]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge tag 'linux-can-next-for-4.4-20150917' of git://git.kernel.org/pub/scm/linux...
authorDavid S. Miller <davem@davemloft.net>
Mon, 21 Sep 2015 04:58:23 +0000 (21:58 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 21 Sep 2015 04:58:23 +0000 (21:58 -0700)
Marc Kleine-Budde says:

====================
pull-request: can-next 2015-09-17

this is a pull request of two patches for net-next/master.

Gerhard Bertelsmann adds support for the CAN controller found on the
Allwinner A10/A20 SoC.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
140 files changed:
Documentation/devicetree/bindings/net/hisilicon-hip04-net.txt
Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt [new file with mode: 0644]
arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi [new file with mode: 0644]
drivers/net/bonding/bond_main.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/hisilicon/Makefile
drivers/net/ethernet/hisilicon/hip04_mdio.c [deleted file]
drivers/net/ethernet/hisilicon/hns/Makefile [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hnae.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hnae.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_enet.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_enet.h [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/hisilicon/hns_mdio.c [new file with mode: 0644]
drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
drivers/net/ethernet/intel/fm10k/fm10k_iov.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/fm10k/fm10k_type.h
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/usb/Kconfig
drivers/net/usb/lan78xx.c
drivers/net/usb/lan78xx.h
drivers/net/vrf.c
drivers/net/xen-netfront.c
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter_bridge.h
include/linux/netfilter_ingress.h
include/linux/tcp.h
include/net/dn_neigh.h
include/net/dst.h
include/net/ipv6.h
include/net/netfilter/br_netfilter.h
include/net/sch_generic.h
include/net/sock.h
include/net/xfrm.h
include/uapi/linux/bpf.h
include/uapi/linux/pkt_cls.h
net/bridge/br_forward.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/bridge/br_private.h
net/bridge/br_stp_bpdu.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/core/dev.c
net/core/filter.c
net/core/net-sysfs.c
net/decnet/dn_neigh.c
net/decnet/dn_nsp_in.c
net/decnet/dn_nsp_out.c
net/decnet/dn_route.c
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/ip_forward.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ip_vti.c
net/ipv4/ipmr.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_output.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/output_core.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_output.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_queue.c
net/netfilter/nfnetlink_queue_core.c
net/rxrpc/ar-connection.c
net/rxrpc/ar-internal.h
net/rxrpc/ar-transport.c
net/sched/act_bpf.c
net/sched/cls_bpf.c
net/sched/sch_dsmark.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
samples/bpf/bpf_helpers.h
samples/bpf/tcbpf1_kern.c

index 988fc694b663fdde71b23e73051085f0c034c6b5..d1df8a00e1f3b7828cfb66d2706dd81316343384 100644 (file)
@@ -32,13 +32,13 @@ Required properties:
 
 Required properties:
 
-- compatible: should be "hisilicon,hip04-mdio".
+- compatible: should be "hisilicon,mdio".
 - Inherits from MDIO bus node binding [2]
 [2] Documentation/devicetree/bindings/net/phy.txt
 
 Example:
        mdio {
-               compatible = "hisilicon,hip04-mdio";
+               compatible = "hisilicon,mdio";
                reg = <0x28f1000 0x1000>;
                #address-cells = <1>;
                #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
new file mode 100644 (file)
index 0000000..80411b2
--- /dev/null
@@ -0,0 +1,49 @@
+Hisilicon DSA Fabric device controller
+
+Required properties:
+- compatible: should be "hisilicon,hns-dsaf-v1" or "hisilicon,hns-dsaf-v2".
+  "hisilicon,hns-dsaf-v1" is for hip05.
+  "hisilicon,hns-dsaf-v2" is for Hi1610 and Hi1612.
+- dsa-name: dsa fabric name who provide this interface.
+  should be "dsafX", X is the dsaf id.
+- mode: dsa fabric mode string. only support one of dsaf modes like these:
+               "2port-64vf",
+               "6port-16rss",
+               "6port-16vf".
+- interrupt-parent: the interrupt parent of this device.
+- interrupts: should contain the DSA Fabric and rcb interrupt.
+- reg: specifies base physical address(es) and size of the device registers.
+  The first region is external interface control register base and size.
+  The second region is SerDes base register and size.
+  The third region is the PPE register base and size.
+  The fourth region is dsa fabric base register and size.
+  The fifth region is cpld base register and size, it is not required if do not use cpld.
+- phy-handle: phy handle of physicl port, 0 if not any phy device. see ethernet.txt [1].
+- buf-size: rx buffer size, should be 16-1024.
+- desc-num: number of description in TX and RX queue, should be 512, 1024, 2048 or 4096.
+
+[1] Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+
+dsa: dsa@c7000000 {
+       compatible = "hisilicon,hns-dsaf-v1";
+       dsa_name = "dsaf0";
+       mode = "6port-16rss";
+       interrupt-parent = <&mbigen_dsa>;
+       reg = <0x0 0xC0000000 0x0 0x420000
+              0x0 0xC2000000 0x0 0x300000
+              0x0 0xc5000000 0x0 0x890000
+              0x0 0xc7000000 0x0 0x60000>;
+       phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+       interrupts = <131 4>,<132 4>, <133 4>,<134 4>,
+                    <135 4>,<136 4>, <137 4>,<138 4>,
+                    <139 4>,<140 4>, <141 4>,<142 4>,
+                    <143 4>,<144 4>, <145 4>,<146 4>,
+                    <147 4>,<148 4>, <384 1>,<385 1>,
+                    <386 1>,<387 1>, <388 1>,<389 1>,
+                    <390 1>,<391 1>,
+       buf-size = <4096>;
+       desc-num = <1024>;
+       dma-coherent;
+};
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
new file mode 100644 (file)
index 0000000..9940aa0
--- /dev/null
@@ -0,0 +1,22 @@
+Hisilicon MDIO bus controller
+
+Properties:
+- compatible: "hisilicon,mdio","hisilicon,hns-mdio".
+- reg: The base address of the MDIO bus controller register bank.
+- #address-cells: Must be <1>.
+- #size-cells: Must be <0>.  MDIO addresses have no size component.
+
+Typically an MDIO bus might have several children.
+
+Example:
+         mdio@803c0000 {
+                   #address-cells = <1>;
+                   #size-cells = <0>;
+                   compatible = "hisilicon,mdio","hisilicon,hns-mdio";
+                   reg = <0x0 0x803c0000 0x0 0x10000>;
+
+                   ethernet-phy@0 {
+                            ...
+                            reg = <0>;
+                   };
+         };
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
new file mode 100644 (file)
index 0000000..41d19be
--- /dev/null
@@ -0,0 +1,47 @@
+Hisilicon Network Subsystem NIC controller
+
+Required properties:
+- compatible: "hisilicon,hns-nic-v1" or "hisilicon,hns-nic-v2".
+  "hisilicon,hns-nic-v1" is for hip05.
+  "hisilicon,hns-nic-v2" is for Hi1610 and Hi1612.
+- ae-name: accelerator name who provides this interface,
+  is simply a name referring to the name of name in the accelerator node.
+- port-id: is the index of port provided by DSAF (the accelerator). DSAF can
+  connect to 8 PHYs. Port 0 to 1 are both used for adminstration purpose. They
+  are called debug ports.
+
+  The remaining 6 PHYs are taken according to the mode of DSAF.
+
+  In NIC mode of DSAF, all 6 PHYs are taken as ethernet ports to the CPU. The
+  port-id can be 2 to 7. Here is the diagram:
+            +-----+---------------+
+            |            CPU      |
+            +-+-+-+---+-+-+-+-+-+-+
+              | |     | | | | | |
+             debug       service
+             port         port
+             (0,1)       (2-7)
+
+  In Switch mode of DSAF, all 6 PHYs are taken as physical ports connect to a
+  LAN Switch while the CPU side assume itself have one single NIC connect to
+  this switch. In this case, the port-id will be 2 only.
+            +-----+---------------+
+            |            CPU      |
+            +-+-+-+---+-+-+-+-+-+-+
+              | |   service| port(2)
+             debug   +------------+
+             port    |   switch   |
+             (0,1)   +-+-+-+-+-+-++
+                       | | | | | |
+                      external port
+
+- local-mac-address: mac addr of the ethernet interface
+
+Example:
+
+       ethernet@0{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <0>;
+               local-mac-address = [a2 14 e4 4b 56 76];
+       };
diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
new file mode 100644 (file)
index 0000000..3500586
--- /dev/null
@@ -0,0 +1,193 @@
+soc0: soc@000000000 {
+       #address-cells = <2>;
+       #size-cells = <2>;
+       device_type = "soc";
+       compatible = "simple-bus";
+       ranges = <0x0 0x0 0x0 0x0 0x1 0x0>;
+       chip-id = <0>;
+
+       soc0_mdio0: mdio@803c0000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "hisilicon,hns-mdio";
+               reg = <0x0 0x803c0000 0x0 0x10000
+                      0x0 0x80000000 0x0 0x10000>;
+
+               soc0_phy4: ethernet-phy@4 {
+                       reg = <0x0>;
+                       device_type = "ethernet-phy";
+                       compatible = "ethernet-phy-ieee802.3-c22";
+               };
+               soc0_phy5: ethernet-phy@5 {
+                       reg = <0x1>;
+                       device_type = "ethernet-phy";
+                       compatible = "ethernet-phy-ieee802.3-c22";
+               };
+       };
+
+       dsa: dsa@c7000000 {
+               compatible = "hisilicon,hns-dsaf-v1";
+               dsa_name = "dsaf0";
+               mode = "6port-16rss";
+               interrupt-parent = <&mbigen_dsa>;
+
+               reg = <0x0 0xC0000000 0x0 0x420000
+                      0x0 0xC2000000 0x0 0x300000
+                      0x0 0xc5000000 0x0 0x890000
+                      0x0 0xc7000000 0x0 0x60000
+                      >;
+
+               phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+               interrupts = <
+                       /* [14] ge fifo err 8 / xge 6**/
+                       149 0x4 150 0x4 151 0x4 152 0x4
+                       153 0x4 154 0x4  26 0x4 27 0x4
+                       155 0x4 156 0x4 157 0x4 158 0x4 159 0x4 160 0x4
+                       /* [12] rcb com 4*3**/
+                       0x6 0x4 0x7 0x4 0x8 0x4 0x9 0x4
+                        16 0x4  17 0x4  18 0x4  19 0x4
+                        22 0x4  23 0x4  24 0x4  25 0x4
+                       /* [8] ppe tnl 0-7***/
+                       0x0 0x4 0x1 0x4 0x2 0x4 0x3 0x4
+                       0x4 0x4 0x5 0x4 12 0x4 13 0x4
+                       /* [21] dsaf event int 3+18**/
+                        128 0x4  129 0x4  130 0x4
+                       0x83 0x4 0x84 0x4 0x85 0x4 0x86 0x4 0x87 0x4 0x88 0x4
+                       0x89 0x4 0x8a 0x4 0x8b 0x4 0x8c 0x4 0x8d 0x4 0x8e 0x4
+                       0x8f 0x4 0x90 0x4 0x91 0x4 0x92 0x4 0x93 0x4 0x94 0x4
+                       /* [4] debug rcb 2*2*/
+                       0xe 0x1 0xf 0x1 0x14 0x1 0x15 0x1
+                       /* [256] sevice rcb 2*128*/
+                       0x180 0x1 0x181 0x1 0x182 0x1 0x183 0x1
+                       0x184 0x1 0x185 0x1 0x186 0x1 0x187 0x1
+                       0x188 0x1 0x189 0x1 0x18a 0x1 0x18b 0x1
+                       0x18c 0x1 0x18d 0x1 0x18e 0x1 0x18f 0x1
+                       0x190 0x1 0x191 0x1 0x192 0x1 0x193 0x1
+                       0x194 0x1 0x195 0x1 0x196 0x1 0x197 0x1
+                       0x198 0x1 0x199 0x1 0x19a 0x1 0x19b 0x1
+                       0x19c 0x1 0x19d 0x1 0x19e 0x1 0x19f 0x1
+                       0x1a0 0x1 0x1a1 0x1 0x1a2 0x1 0x1a3 0x1
+                       0x1a4 0x1 0x1a5 0x1 0x1a6 0x1 0x1a7 0x1
+                       0x1a8 0x1 0x1a9 0x1 0x1aa 0x1 0x1ab 0x1
+                       0x1ac 0x1 0x1ad 0x1 0x1ae 0x1 0x1af 0x1
+                       0x1b0 0x1 0x1b1 0x1 0x1b2 0x1 0x1b3 0x1
+                       0x1b4 0x1 0x1b5 0x1 0x1b6 0x1 0x1b7 0x1
+                       0x1b8 0x1 0x1b9 0x1 0x1ba 0x1 0x1bb 0x1
+                       0x1bc 0x1 0x1bd 0x1 0x1be 0x1 0x1bf 0x1
+                       0x1c0 0x1 0x1c1 0x1 0x1c2 0x1 0x1c3 0x1
+                       0x1c4 0x1 0x1c5 0x1 0x1c6 0x1 0x1c7 0x1
+                       0x1c8 0x1 0x1c9 0x1 0x1ca 0x1 0x1cb 0x1
+                       0x1cc 0x1 0x1cd 0x1 0x1ce 0x1 0x1cf 0x1
+                       0x1d0 0x1 0x1d1 0x1 0x1d2 0x1 0x1d3 0x1
+                       0x1d4 0x1 0x1d5 0x1 0x1d6 0x1 0x1d7 0x1
+                       0x1d8 0x1 0x1d9 0x1 0x1da 0x1 0x1db 0x1
+                       0x1dc 0x1 0x1dd 0x1 0x1de 0x1 0x1df 0x1
+                       0x1e0 0x1 0x1e1 0x1 0x1e2 0x1 0x1e3 0x1
+                       0x1e4 0x1 0x1e5 0x1 0x1e6 0x1 0x1e7 0x1
+                       0x1e8 0x1 0x1e9 0x1 0x1ea 0x1 0x1eb 0x1
+                       0x1ec 0x1 0x1ed 0x1 0x1ee 0x1 0x1ef 0x1
+                       0x1f0 0x1 0x1f1 0x1 0x1f2 0x1 0x1f3 0x1
+                       0x1f4 0x1 0x1f5 0x1 0x1f6 0x1 0x1f7 0x1
+                       0x1f8 0x1 0x1f9 0x1 0x1fa 0x1 0x1fb 0x1
+                       0x1fc 0x1 0x1fd 0x1 0x1fe 0x1 0x1ff 0x1
+                       0x200 0x1 0x201 0x1 0x202 0x1 0x203 0x1
+                       0x204 0x1 0x205 0x1 0x206 0x1 0x207 0x1
+                       0x208 0x1 0x209 0x1 0x20a 0x1 0x20b 0x1
+                       0x20c 0x1 0x20d 0x1 0x20e 0x1 0x20f 0x1
+                       0x210 0x1 0x211 0x1 0x212 0x1 0x213 0x1
+                       0x214 0x1 0x215 0x1 0x216 0x1 0x217 0x1
+                       0x218 0x1 0x219 0x1 0x21a 0x1 0x21b 0x1
+                       0x21c 0x1 0x21d 0x1 0x21e 0x1 0x21f 0x1
+                       0x220 0x1 0x221 0x1 0x222 0x1 0x223 0x1
+                       0x224 0x1 0x225 0x1 0x226 0x1 0x227 0x1
+                       0x228 0x1 0x229 0x1 0x22a 0x1 0x22b 0x1
+                       0x22c 0x1 0x22d 0x1 0x22e 0x1 0x22f 0x1
+                       0x230 0x1 0x231 0x1 0x232 0x1 0x233 0x1
+                       0x234 0x1 0x235 0x1 0x236 0x1 0x237 0x1
+                       0x238 0x1 0x239 0x1 0x23a 0x1 0x23b 0x1
+                       0x23c 0x1 0x23d 0x1 0x23e 0x1 0x23f 0x1
+                       0x240 0x1 0x241 0x1 0x242 0x1 0x243 0x1
+                       0x244 0x1 0x245 0x1 0x246 0x1 0x247 0x1
+                       0x248 0x1 0x249 0x1 0x24a 0x1 0x24b 0x1
+                       0x24c 0x1 0x24d 0x1 0x24e 0x1 0x24f 0x1
+                       0x250 0x1 0x251 0x1 0x252 0x1 0x253 0x1
+                       0x254 0x1 0x255 0x1 0x256 0x1 0x257 0x1
+                       0x258 0x1 0x259 0x1 0x25a 0x1 0x25b 0x1
+                       0x25c 0x1 0x25d 0x1 0x25e 0x1 0x25f 0x1
+                       0x260 0x1 0x261 0x1 0x262 0x1 0x263 0x1
+                       0x264 0x1 0x265 0x1 0x266 0x1 0x267 0x1
+                       0x268 0x1 0x269 0x1 0x26a 0x1 0x26b 0x1
+                       0x26c 0x1 0x26d 0x1 0x26e 0x1 0x26f 0x1
+                       0x270 0x1 0x271 0x1 0x272 0x1 0x273 0x1
+                       0x274 0x1 0x275 0x1 0x276 0x1 0x277 0x1
+                       0x278 0x1 0x279 0x1 0x27a 0x1 0x27b 0x1
+                       0x27c 0x1 0x27d 0x1 0x27e 0x1 0x27f 0x1>;
+               buf-size = <4096>;
+               desc-num = <1024>;
+               dma-coherent;
+       };
+
+       eth0: ethernet@0{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <0>;
+               local-mac-address = [00 00 00 01 00 58];
+               status = "disabled";
+               dma-coherent;
+       };
+       eth1: ethernet@1{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <1>;
+               status = "disabled";
+               dma-coherent;
+       };
+       eth2: ethernet@2{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <2>;
+               local-mac-address = [00 00 00 01 00 5a];
+               status = "disabled";
+               dma-coherent;
+       };
+       eth3: ethernet@3{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <3>;
+               local-mac-address = [00 00 00 01 00 5b];
+               status = "disabled";
+               dma-coherent;
+       };
+       eth4: ethernet@4{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <4>;
+               local-mac-address = [00 00 00 01 00 5c];
+               status = "disabled";
+               dma-coherent;
+       };
+       eth5: ethernet@5{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <5>;
+               local-mac-address = [00 00 00 01 00 5d];
+               status = "disabled";
+               dma-coherent;
+       };
+       eth6: ethernet@6{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <6>;
+               local-mac-address = [00 00 00 01 00 5e];
+               status = "disabled";
+               dma-coherent;
+       };
+       eth7: ethernet@7{
+               compatible = "hisilicon,hns-nic-v1";
+               ae-name = "dsaf0";
+               port-id = <7>;
+               local-mac-address = [00 00 00 01 00 5f];
+               status = "disabled";
+               dma-coherent;
+       };
+};
index 771a449d2f56384bae4a5409ffb588dfb40e1f61..90f2615428c017f6616231e110a49ec3b914901a 100644 (file)
@@ -3136,6 +3136,10 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
        struct flow_keys flow;
        u32 hash;
 
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
+           skb->l4_hash)
+               return skb->hash;
+
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
            !bond_flow_dissect(bond, skb, &flow))
                return bond_eth_hash(skb);
index fadbd0088d3e6b3fa6b748c419e2586393d7da6f..1e72722abbf1265716790bc25ea24376e7e7e22f 100644 (file)
@@ -205,6 +205,23 @@ enum dma_reg {
        DMA_INDEX2RING_5,
        DMA_INDEX2RING_6,
        DMA_INDEX2RING_7,
+       DMA_RING0_TIMEOUT,
+       DMA_RING1_TIMEOUT,
+       DMA_RING2_TIMEOUT,
+       DMA_RING3_TIMEOUT,
+       DMA_RING4_TIMEOUT,
+       DMA_RING5_TIMEOUT,
+       DMA_RING6_TIMEOUT,
+       DMA_RING7_TIMEOUT,
+       DMA_RING8_TIMEOUT,
+       DMA_RING9_TIMEOUT,
+       DMA_RING10_TIMEOUT,
+       DMA_RING11_TIMEOUT,
+       DMA_RING12_TIMEOUT,
+       DMA_RING13_TIMEOUT,
+       DMA_RING14_TIMEOUT,
+       DMA_RING15_TIMEOUT,
+       DMA_RING16_TIMEOUT,
 };
 
 static const u8 bcmgenet_dma_regs_v3plus[] = {
@@ -216,6 +233,23 @@ static const u8 bcmgenet_dma_regs_v3plus[] = {
        [DMA_PRIORITY_0]        = 0x30,
        [DMA_PRIORITY_1]        = 0x34,
        [DMA_PRIORITY_2]        = 0x38,
+       [DMA_RING0_TIMEOUT]     = 0x2C,
+       [DMA_RING1_TIMEOUT]     = 0x30,
+       [DMA_RING2_TIMEOUT]     = 0x34,
+       [DMA_RING3_TIMEOUT]     = 0x38,
+       [DMA_RING4_TIMEOUT]     = 0x3c,
+       [DMA_RING5_TIMEOUT]     = 0x40,
+       [DMA_RING6_TIMEOUT]     = 0x44,
+       [DMA_RING7_TIMEOUT]     = 0x48,
+       [DMA_RING8_TIMEOUT]     = 0x4c,
+       [DMA_RING9_TIMEOUT]     = 0x50,
+       [DMA_RING10_TIMEOUT]    = 0x54,
+       [DMA_RING11_TIMEOUT]    = 0x58,
+       [DMA_RING12_TIMEOUT]    = 0x5c,
+       [DMA_RING13_TIMEOUT]    = 0x60,
+       [DMA_RING14_TIMEOUT]    = 0x64,
+       [DMA_RING15_TIMEOUT]    = 0x68,
+       [DMA_RING16_TIMEOUT]    = 0x6C,
        [DMA_INDEX2RING_0]      = 0x70,
        [DMA_INDEX2RING_1]      = 0x74,
        [DMA_INDEX2RING_2]      = 0x78,
@@ -235,6 +269,23 @@ static const u8 bcmgenet_dma_regs_v2[] = {
        [DMA_PRIORITY_0]        = 0x34,
        [DMA_PRIORITY_1]        = 0x38,
        [DMA_PRIORITY_2]        = 0x3C,
+       [DMA_RING0_TIMEOUT]     = 0x2C,
+       [DMA_RING1_TIMEOUT]     = 0x30,
+       [DMA_RING2_TIMEOUT]     = 0x34,
+       [DMA_RING3_TIMEOUT]     = 0x38,
+       [DMA_RING4_TIMEOUT]     = 0x3c,
+       [DMA_RING5_TIMEOUT]     = 0x40,
+       [DMA_RING6_TIMEOUT]     = 0x44,
+       [DMA_RING7_TIMEOUT]     = 0x48,
+       [DMA_RING8_TIMEOUT]     = 0x4c,
+       [DMA_RING9_TIMEOUT]     = 0x50,
+       [DMA_RING10_TIMEOUT]    = 0x54,
+       [DMA_RING11_TIMEOUT]    = 0x58,
+       [DMA_RING12_TIMEOUT]    = 0x5c,
+       [DMA_RING13_TIMEOUT]    = 0x60,
+       [DMA_RING14_TIMEOUT]    = 0x64,
+       [DMA_RING15_TIMEOUT]    = 0x68,
+       [DMA_RING16_TIMEOUT]    = 0x6C,
 };
 
 static const u8 bcmgenet_dma_regs_v1[] = {
@@ -245,6 +296,23 @@ static const u8 bcmgenet_dma_regs_v1[] = {
        [DMA_PRIORITY_0]        = 0x34,
        [DMA_PRIORITY_1]        = 0x38,
        [DMA_PRIORITY_2]        = 0x3C,
+       [DMA_RING0_TIMEOUT]     = 0x2C,
+       [DMA_RING1_TIMEOUT]     = 0x30,
+       [DMA_RING2_TIMEOUT]     = 0x34,
+       [DMA_RING3_TIMEOUT]     = 0x38,
+       [DMA_RING4_TIMEOUT]     = 0x3c,
+       [DMA_RING5_TIMEOUT]     = 0x40,
+       [DMA_RING6_TIMEOUT]     = 0x44,
+       [DMA_RING7_TIMEOUT]     = 0x48,
+       [DMA_RING8_TIMEOUT]     = 0x4c,
+       [DMA_RING9_TIMEOUT]     = 0x50,
+       [DMA_RING10_TIMEOUT]    = 0x54,
+       [DMA_RING11_TIMEOUT]    = 0x58,
+       [DMA_RING12_TIMEOUT]    = 0x5c,
+       [DMA_RING13_TIMEOUT]    = 0x60,
+       [DMA_RING14_TIMEOUT]    = 0x64,
+       [DMA_RING15_TIMEOUT]    = 0x68,
+       [DMA_RING16_TIMEOUT]    = 0x6C,
 };
 
 /* Set at runtime once bcmgenet version is known */
@@ -498,6 +566,86 @@ static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
        priv->msg_enable = level;
 }
 
+static int bcmgenet_get_coalesce(struct net_device *dev,
+                                struct ethtool_coalesce *ec)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       ec->tx_max_coalesced_frames =
+               bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
+                                        DMA_MBUF_DONE_THRESH);
+       ec->rx_max_coalesced_frames =
+               bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
+                                        DMA_MBUF_DONE_THRESH);
+       ec->rx_coalesce_usecs =
+               bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+
+       return 0;
+}
+
+static int bcmgenet_set_coalesce(struct net_device *dev,
+                                struct ethtool_coalesce *ec)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+
+       /* Base system clock is 125Mhz, DMA timeout is this reference clock
+        * divided by 1024, which yields roughly 8.192us, our maximum value
+        * has to fit in the DMA_TIMEOUT_MASK (16 bits)
+        */
+       if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+           ec->tx_max_coalesced_frames == 0 ||
+           ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
+           ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
+               return -EINVAL;
+
+       if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
+               return -EINVAL;
+
+       /* GENET TDMA hardware does not support a configurable timeout, but will
+        * always generate an interrupt either after MBDONE packets have been
+        * transmitted, or when the ring is emtpy.
+        */
+       if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
+           ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_high ||
+           ec->tx_coalesce_usecs_low)
+               return -EOPNOTSUPP;
+
+       /* Program all TX queues with the same values, as there is no
+        * ethtool knob to do coalescing on a per-queue basis
+        */
+       for (i = 0; i < priv->hw_params->tx_queues; i++)
+               bcmgenet_tdma_ring_writel(priv, i,
+                                         ec->tx_max_coalesced_frames,
+                                         DMA_MBUF_DONE_THRESH);
+       bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
+                                 ec->tx_max_coalesced_frames,
+                                 DMA_MBUF_DONE_THRESH);
+
+       for (i = 0; i < priv->hw_params->rx_queues; i++) {
+               bcmgenet_rdma_ring_writel(priv, i,
+                                         ec->rx_max_coalesced_frames,
+                                         DMA_MBUF_DONE_THRESH);
+
+               reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
+               reg &= ~DMA_TIMEOUT_MASK;
+               reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+               bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
+       }
+
+       bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+                                 ec->rx_max_coalesced_frames,
+                                 DMA_MBUF_DONE_THRESH);
+
+       reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
+       reg &= ~DMA_TIMEOUT_MASK;
+       reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
+       bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
+
+       return 0;
+}
+
 /* standard ethtool support functions. */
 enum bcmgenet_stat_type {
        BCMGENET_STAT_NETDEV = -1,
@@ -844,6 +992,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
        .get_eee                = bcmgenet_get_eee,
        .set_eee                = bcmgenet_set_eee,
        .nway_reset             = bcmgenet_nway_reset,
+       .get_coalesce           = bcmgenet_get_coalesce,
+       .set_coalesce           = bcmgenet_set_coalesce,
 };
 
 /* Power down the unimac, based on mode. */
index 7299d10754226680e71ace26cbe4f996c8867127..29dc2f1bbb199a5a94cccb33b4a020f5991fb66e 100644 (file)
@@ -304,13 +304,12 @@ struct bcmgenet_mib_counters {
 #define UMAC_IRQ_RXDMA_MBDONE          (1 << 13)
 #define UMAC_IRQ_RXDMA_PDONE           (1 << 14)
 #define UMAC_IRQ_RXDMA_BDONE           (1 << 15)
-#define UMAC_IRQ_RXDMA_DONE            (UMAC_IRQ_RXDMA_PDONE | \
-                                        UMAC_IRQ_RXDMA_BDONE)
+#define UMAC_IRQ_RXDMA_DONE            UMAC_IRQ_RXDMA_MBDONE
 #define UMAC_IRQ_TXDMA_MBDONE          (1 << 16)
 #define UMAC_IRQ_TXDMA_PDONE           (1 << 17)
 #define UMAC_IRQ_TXDMA_BDONE           (1 << 18)
-#define UMAC_IRQ_TXDMA_DONE            (UMAC_IRQ_TXDMA_PDONE | \
-                                        UMAC_IRQ_TXDMA_BDONE)
+#define UMAC_IRQ_TXDMA_DONE            UMAC_IRQ_TXDMA_MBDONE
+
 /* Only valid for GENETv3+ */
 #define UMAC_IRQ_MDIO_DONE             (1 << 23)
 #define UMAC_IRQ_MDIO_ERROR            (1 << 24)
@@ -386,7 +385,7 @@ struct bcmgenet_mib_counters {
 #define DMA_RING_BUFFER_SIZE_MASK      0xFFFF
 
 /* DMA interrupt threshold register */
-#define DMA_INTR_THRESHOLD_MASK                0x00FF
+#define DMA_INTR_THRESHOLD_MASK                0x01FF
 
 /* DMA XON/XOFF register */
 #define DMA_XON_THREHOLD_MASK          0xFFFF
index dead17b5d769ca9639b879b9ef48b4e4e3679d44..165b5a8aa2ea5a7abeb8424fd8a5eeeb5e0f48e5 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
        bool "Hisilicon devices"
        default y
-       depends on ARM
+       depends on ARM || ARM64
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
 
@@ -27,8 +27,40 @@ config HIP04_ETH
        select PHYLIB
        select MARVELL_PHY
        select MFD_SYSCON
+       select HNS_MDIO
        ---help---
          If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
          want to use the internal ethernet then you should answer Y to this.
 
+config HNS_MDIO
+       tristate "Hisilicon HNS MDIO device Support"
+       select MDIO
+       ---help---
+         This selects the HNS MDIO support. It is needed by HNS_DSAF to access
+         the PHY
+
+config HNS
+       tristate "Hisilicon Network Subsystem Support (Framework)"
+       ---help---
+         This selects the framework support for Hisilicon Network Subsystem. It
+         is needed by any driver which provides HNS acceleration engine or make
+         use of the engine
+
+config HNS_DSAF
+       tristate "Hisilicon HNS DSAF device Support"
+       select HNS
+       select HNS_MDIO
+       ---help---
+         This selects the DSAF (Distributed System Area Frabric) network
+         acceleration engine support. The engine is used in Hisilicon hip05,
+         Hi1610 and further ICT SoC
+
+config HNS_ENET
+       tristate "Hisilicon HNS Ethernet Device Support"
+       select PHYLIB
+       select HNS
+       ---help---
+         This selects the general ethernet driver for HNS.  This module make
+         use of any HNS AE driver, such as HNS_DSAF
+
 endif # NET_VENDOR_HISILICON
index 6c14540a4dc5e18c6049aaa24e6e8184af99ddd7..390b71fb300062f18ffe0621fa20ded653aef11e 100644 (file)
@@ -3,4 +3,6 @@
 #
 
 obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
-obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o
+obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
+obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
+obj-$(CONFIG_HNS) += hns/
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
deleted file mode 100644 (file)
index fca0a5b..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/* Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-
-#define MDIO_CMD_REG           0x0
-#define MDIO_ADDR_REG          0x4
-#define MDIO_WDATA_REG         0x8
-#define MDIO_RDATA_REG         0xc
-#define MDIO_STA_REG           0x10
-
-#define MDIO_START             BIT(14)
-#define MDIO_R_VALID           BIT(1)
-#define MDIO_READ              (BIT(12) | BIT(11) | MDIO_START)
-#define MDIO_WRITE             (BIT(12) | BIT(10) | MDIO_START)
-
-struct hip04_mdio_priv {
-       void __iomem *base;
-};
-
-#define WAIT_TIMEOUT 10
-static int hip04_mdio_wait_ready(struct mii_bus *bus)
-{
-       struct hip04_mdio_priv *priv = bus->priv;
-       int i;
-
-       for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) {
-               if (i == WAIT_TIMEOUT)
-                       return -ETIMEDOUT;
-               msleep(20);
-       }
-
-       return 0;
-}
-
-static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
-       struct hip04_mdio_priv *priv = bus->priv;
-       u32 val;
-       int ret;
-
-       ret = hip04_mdio_wait_ready(bus);
-       if (ret < 0)
-               goto out;
-
-       val = regnum | (mii_id << 5) | MDIO_READ;
-       writel_relaxed(val, priv->base + MDIO_CMD_REG);
-
-       ret = hip04_mdio_wait_ready(bus);
-       if (ret < 0)
-               goto out;
-
-       val = readl_relaxed(priv->base + MDIO_STA_REG);
-       if (val & MDIO_R_VALID) {
-               dev_err(bus->parent, "SMI bus read not valid\n");
-               ret = -ENODEV;
-               goto out;
-       }
-
-       val = readl_relaxed(priv->base + MDIO_RDATA_REG);
-       ret = val & 0xFFFF;
-out:
-       return ret;
-}
-
-static int hip04_mdio_write(struct mii_bus *bus, int mii_id,
-                           int regnum, u16 value)
-{
-       struct hip04_mdio_priv *priv = bus->priv;
-       u32 val;
-       int ret;
-
-       ret = hip04_mdio_wait_ready(bus);
-       if (ret < 0)
-               goto out;
-
-       writel_relaxed(value, priv->base + MDIO_WDATA_REG);
-       val = regnum | (mii_id << 5) | MDIO_WRITE;
-       writel_relaxed(val, priv->base + MDIO_CMD_REG);
-out:
-       return ret;
-}
-
-static int hip04_mdio_reset(struct mii_bus *bus)
-{
-       int temp, i;
-
-       for (i = 0; i < PHY_MAX_ADDR; i++) {
-               hip04_mdio_write(bus, i, 22, 0);
-               temp = hip04_mdio_read(bus, i, MII_BMCR);
-               if (temp < 0)
-                       continue;
-
-               temp |= BMCR_RESET;
-               if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0)
-                       continue;
-       }
-
-       mdelay(500);
-       return 0;
-}
-
-static int hip04_mdio_probe(struct platform_device *pdev)
-{
-       struct resource *r;
-       struct mii_bus *bus;
-       struct hip04_mdio_priv *priv;
-       int ret;
-
-       bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv));
-       if (!bus) {
-               dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
-               return -ENOMEM;
-       }
-
-       bus->name = "hip04_mdio_bus";
-       bus->read = hip04_mdio_read;
-       bus->write = hip04_mdio_write;
-       bus->reset = hip04_mdio_reset;
-       snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
-       bus->parent = &pdev->dev;
-       priv = bus->priv;
-
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       priv->base = devm_ioremap_resource(&pdev->dev, r);
-       if (IS_ERR(priv->base)) {
-               ret = PTR_ERR(priv->base);
-               goto out_mdio;
-       }
-
-       ret = of_mdiobus_register(bus, pdev->dev.of_node);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
-               goto out_mdio;
-       }
-
-       platform_set_drvdata(pdev, bus);
-
-       return 0;
-
-out_mdio:
-       mdiobus_free(bus);
-       return ret;
-}
-
-static int hip04_mdio_remove(struct platform_device *pdev)
-{
-       struct mii_bus *bus = platform_get_drvdata(pdev);
-
-       mdiobus_unregister(bus);
-       mdiobus_free(bus);
-
-       return 0;
-}
-
-static const struct of_device_id hip04_mdio_match[] = {
-       { .compatible = "hisilicon,hip04-mdio" },
-       { }
-};
-MODULE_DEVICE_TABLE(of, hip04_mdio_match);
-
-static struct platform_driver hip04_mdio_driver = {
-       .probe = hip04_mdio_probe,
-       .remove = hip04_mdio_remove,
-       .driver = {
-               .name = "hip04-mdio",
-               .of_match_table = hip04_mdio_match,
-       },
-};
-
-module_platform_driver(hip04_mdio_driver);
-
-MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hip04-mdio");
diff --git a/drivers/net/ethernet/hisilicon/hns/Makefile b/drivers/net/ethernet/hisilicon/hns/Makefile
new file mode 100644 (file)
index 0000000..6010c83
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HNS) += hnae.o
+
+obj-$(CONFIG_HNS_DSAF) += hns_dsaf.o
+hns_dsaf-objs = hns_ae_adapt.o hns_dsaf_gmac.o hns_dsaf_mac.o hns_dsaf_misc.o \
+       hns_dsaf_main.o hns_dsaf_ppe.o hns_dsaf_rcb.o hns_dsaf_xgmac.o
+
+obj-$(CONFIG_HNS_ENET) += hns_enet_drv.o
+hns_enet_drv-objs = hns_enet.o hns_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
new file mode 100644 (file)
index 0000000..0a0a9e8
--- /dev/null
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+
+#include "hnae.h"
+
+#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
+
+static struct class *hnae_class;
+
+static void
+hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+       list_add_tail_rcu(node, head);
+       spin_unlock_irqrestore(lock, flags);
+}
+
+static void hnae_list_del(spinlock_t *lock, struct list_head *node)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+       list_del_rcu(node);
+       spin_unlock_irqrestore(lock, flags);
+}
+
+static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+       unsigned int order = hnae_page_order(ring);
+       struct page *p = dev_alloc_pages(order);
+
+       if (!p)
+               return -ENOMEM;
+
+       cb->priv = p;
+       cb->page_offset = 0;
+       cb->reuse_flag = 0;
+       cb->buf  = page_address(p);
+       cb->length = hnae_page_size(ring);
+       cb->type = DESC_TYPE_PAGE;
+
+       return 0;
+}
+
+static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+       if (cb->type == DESC_TYPE_SKB)
+               dev_kfree_skb_any((struct sk_buff *)cb->priv);
+       else if (unlikely(is_rx_ring(ring)))
+               put_page((struct page *)cb->priv);
+       memset(cb, 0, sizeof(*cb));
+}
+
+static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+       cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+                              cb->length, ring_to_dma_dir(ring));
+
+       if (dma_mapping_error(ring_to_dev(ring), cb->dma))
+               return -EIO;
+
+       return 0;
+}
+
+static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+       if (cb->type == DESC_TYPE_SKB)
+               dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+                                ring_to_dma_dir(ring));
+       else
+               dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+                              ring_to_dma_dir(ring));
+}
+
+static struct hnae_buf_ops hnae_bops = {
+       .alloc_buffer = hnae_alloc_buffer,
+       .free_buffer = hnae_free_buffer,
+       .map_buffer = hnae_map_buffer,
+       .unmap_buffer = hnae_unmap_buffer,
+};
+
+static int __ae_match(struct device *dev, const void *data)
+{
+       struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+       const char *ae_id = data;
+
+       if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
+               return 1;
+
+       return 0;
+}
+
+static struct hnae_ae_dev *find_ae(const char *ae_id)
+{
+       struct device *dev;
+
+       WARN_ON(!ae_id);
+
+       dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
+
+       return dev ? cls_to_ae_dev(dev) : NULL;
+}
+
+static void hnae_free_buffers(struct hnae_ring *ring)
+{
+       int i;
+
+       for (i = 0; i < ring->desc_num; i++)
+               hnae_free_buffer_detach(ring, i);
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hnae_alloc_buffers(struct hnae_ring *ring)
+{
+       int i, j, ret;
+
+       for (i = 0; i < ring->desc_num; i++) {
+               ret = hnae_alloc_buffer_attach(ring, i);
+               if (ret)
+                       goto out_buffer_fail;
+       }
+
+       return 0;
+
+out_buffer_fail:
+       for (j = i - 1; j >= 0; j--)
+               hnae_free_buffer_detach(ring, j);
+       return ret;
+}
+
+/* free desc along with its attached buffer */
+static void hnae_free_desc(struct hnae_ring *ring)
+{
+       hnae_free_buffers(ring);
+       dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
+                        ring->desc_num * sizeof(ring->desc[0]),
+                        ring_to_dma_dir(ring));
+       ring->desc_dma_addr = 0;
+       kfree(ring->desc);
+       ring->desc = NULL;
+}
+
+/* alloc desc, without buffer attached */
+static int hnae_alloc_desc(struct hnae_ring *ring)
+{
+       int size = ring->desc_num * sizeof(ring->desc[0]);
+
+       ring->desc = kzalloc(size, GFP_KERNEL);
+       if (!ring->desc)
+               return -ENOMEM;
+
+       ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
+               ring->desc, size, ring_to_dma_dir(ring));
+       if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
+               ring->desc_dma_addr = 0;
+               kfree(ring->desc);
+               ring->desc = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/* fini ring, also free the buffer for the ring */
+static void hnae_fini_ring(struct hnae_ring *ring)
+{
+       hnae_free_desc(ring);
+       kfree(ring->desc_cb);
+       ring->desc_cb = NULL;
+       ring->next_to_clean = 0;
+       ring->next_to_use = 0;
+}
+
+/* init ring, and with buffer for rx ring */
+static int
+hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
+{
+       int ret;
+
+       if (ring->desc_num <= 0 || ring->buf_size <= 0)
+               return -EINVAL;
+
+       ring->q = q;
+       ring->flags = flags;
+       assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
+
+       /* not matter for tx or rx ring, the ntc and ntc start from 0 */
+       assert(ring->next_to_use == 0);
+       assert(ring->next_to_clean == 0);
+
+       ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
+                       GFP_KERNEL);
+       if (!ring->desc_cb) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = hnae_alloc_desc(ring);
+       if (ret)
+               goto out_with_desc_cb;
+
+       if (is_rx_ring(ring)) {
+               ret = hnae_alloc_buffers(ring);
+               if (ret)
+                       goto out_with_desc;
+       }
+
+       return 0;
+
+out_with_desc:
+       hnae_free_desc(ring);
+out_with_desc_cb:
+       kfree(ring->desc_cb);
+       ring->desc_cb = NULL;
+out:
+       return ret;
+}
+
+static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
+                          struct hnae_ae_dev *dev)
+{
+       int ret;
+
+       q->dev = dev;
+       q->handle = h;
+
+       ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
+       if (ret)
+               goto out;
+
+       ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
+       if (ret)
+               goto out_with_tx_ring;
+
+       if (dev->ops->init_queue)
+               dev->ops->init_queue(q);
+
+       return 0;
+
+out_with_tx_ring:
+       hnae_fini_ring(&q->tx_ring);
+out:
+       return ret;
+}
+
+static void hnae_fini_queue(struct hnae_queue *q)
+{
+       if (q->dev->ops->fini_queue)
+               q->dev->ops->fini_queue(q);
+
+       hnae_fini_ring(&q->tx_ring);
+       hnae_fini_ring(&q->rx_ring);
+}
+
+/**
+ * ae_chain - define ae chain head
+ */
+static RAW_NOTIFIER_HEAD(ae_chain);
+
+int hnae_register_notifier(struct notifier_block *nb)
+{
+       return raw_notifier_chain_register(&ae_chain, nb);
+}
+EXPORT_SYMBOL(hnae_register_notifier);
+
+void hnae_unregister_notifier(struct notifier_block *nb)
+{
+       if (raw_notifier_chain_unregister(&ae_chain, nb))
+               dev_err(NULL, "notifier chain unregister fail\n");
+}
+EXPORT_SYMBOL(hnae_unregister_notifier);
+
+int hnae_reinit_handle(struct hnae_handle *handle)
+{
+       int i, j;
+       int ret;
+
+       for (i = 0; i < handle->q_num; i++) /* free ring*/
+               hnae_fini_queue(handle->qs[i]);
+
+       if (handle->dev->ops->reset)
+               handle->dev->ops->reset(handle);
+
+       for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
+               ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
+               if (ret)
+                       goto out_when_init_queue;
+       }
+       return 0;
+out_when_init_queue:
+       for (j = i - 1; j >= 0; j--)
+               hnae_fini_queue(handle->qs[j]);
+       return ret;
+}
+EXPORT_SYMBOL(hnae_reinit_handle);
+
+/* hnae_get_handle - get a handle from the AE
+ * @owner_dev: the dev use this handle
+ * @ae_id: the id of the ae to be used
+ * @ae_opts: the options set for the handle
+ * @bops: the callbacks for buffer management
+ *
+ * return handle ptr or ERR_PTR
+ */
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+                                   const char *ae_id, u32 port_id,
+                                   struct hnae_buf_ops *bops)
+{
+       struct hnae_ae_dev *dev;
+       struct hnae_handle *handle;
+       int i, j;
+       int ret;
+
+       dev = find_ae(ae_id);
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+
+       handle = dev->ops->get_handle(dev, port_id);
+       if (IS_ERR(handle))
+               return handle;
+
+       handle->dev = dev;
+       handle->owner_dev = owner_dev;
+       handle->bops = bops ? bops : &hnae_bops;
+       handle->eport_id = port_id;
+
+       for (i = 0; i < handle->q_num; i++) {
+               ret = hnae_init_queue(handle, handle->qs[i], dev);
+               if (ret)
+                       goto out_when_init_queue;
+       }
+
+       __module_get(dev->owner);
+
+       hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
+
+       return handle;
+
+out_when_init_queue:
+       for (j = i - 1; j >= 0; j--)
+               hnae_fini_queue(handle->qs[j]);
+
+       return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(hnae_get_handle);
+
+void hnae_put_handle(struct hnae_handle *h)
+{
+       struct hnae_ae_dev *dev = h->dev;
+       int i;
+
+       for (i = 0; i < h->q_num; i++)
+               hnae_fini_queue(h->qs[i]);
+
+       if (h->dev->ops->reset)
+               h->dev->ops->reset(h);
+
+       hnae_list_del(&dev->lock, &h->node);
+
+       if (dev->ops->put_handle)
+               dev->ops->put_handle(h);
+
+       module_put(dev->owner);
+}
+EXPORT_SYMBOL(hnae_put_handle);
+
+static void hnae_release(struct device *dev)
+{
+}
+
+/**
+ * hnae_ae_register - register a AE engine to hnae framework
+ * @hdev: the hnae ae engine device
+ * @owner:  the module who provides this dev
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
+{
+       static atomic_t id = ATOMIC_INIT(-1);
+       int ret;
+
+       if (!hdev->dev)
+               return -ENODEV;
+
+       if (!hdev->ops || !hdev->ops->get_handle ||
+           !hdev->ops->toggle_ring_irq ||
+           !hdev->ops->toggle_queue_status ||
+           !hdev->ops->get_status || !hdev->ops->adjust_link)
+               return -EINVAL;
+
+       hdev->owner = owner;
+       hdev->id = (int)atomic_inc_return(&id);
+       hdev->cls_dev.parent = hdev->dev;
+       hdev->cls_dev.class = hnae_class;
+       hdev->cls_dev.release = hnae_release;
+       (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
+       ret = device_register(&hdev->cls_dev);
+       if (ret)
+               return ret;
+
+       __module_get(THIS_MODULE);
+
+       INIT_LIST_HEAD(&hdev->handle_list);
+       spin_lock_init(&hdev->lock);
+
+       ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
+       if (ret)
+               dev_dbg(hdev->dev,
+                       "has not notifier for AE: %s\n", hdev->name);
+
+       return 0;
+}
+EXPORT_SYMBOL(hnae_ae_register);
+
+/**
+ * hnae_ae_unregister - unregisters a HNAE AE engine
+ * @cdev: the device to unregister
+ */
+void hnae_ae_unregister(struct hnae_ae_dev *hdev)
+{
+       device_unregister(&hdev->cls_dev);
+       module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL(hnae_ae_unregister);
+
+static ssize_t handles_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       ssize_t s = 0;
+       struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+       struct hnae_handle *h;
+       int i = 0, j;
+
+       list_for_each_entry_rcu(h, &hdev->handle_list, node) {
+               s += sprintf(buf + s, "handle %d (eport_id=%u from %s):\n",
+                           i++, h->eport_id, h->dev->name);
+               for (j = 0; j < h->q_num; j++) {
+                       s += sprintf(buf + s, "\tqueue[%d] on 0x%llx\n",
+                                    j, (u64)h->qs[i]->io_base);
+#define HANDEL_TX_MSG "\t\ttx_ring on 0x%llx:%u,%u,%u,%u,%u,%llu,%llu\n"
+                       s += sprintf(buf + s,
+                                    HANDEL_TX_MSG,
+                                    (u64)h->qs[i]->tx_ring.io_base,
+                                    h->qs[i]->tx_ring.buf_size,
+                                    h->qs[i]->tx_ring.desc_num,
+                                    h->qs[i]->tx_ring.max_desc_num_per_pkt,
+                                    h->qs[i]->tx_ring.max_raw_data_sz_per_desc,
+                                    h->qs[i]->tx_ring.max_pkt_size,
+                                h->qs[i]->tx_ring.stats.sw_err_cnt,
+                                h->qs[i]->tx_ring.stats.io_err_cnt);
+                       s += sprintf(buf + s,
+                               "\t\trx_ring on 0x%llx:%u,%u,%llu,%llu,%llu\n",
+                               (u64)h->qs[i]->rx_ring.io_base,
+                               h->qs[i]->rx_ring.buf_size,
+                               h->qs[i]->rx_ring.desc_num,
+                               h->qs[i]->rx_ring.stats.sw_err_cnt,
+                               h->qs[i]->rx_ring.stats.io_err_cnt,
+                               h->qs[i]->rx_ring.stats.seg_pkt_cnt);
+               }
+       }
+
+       return s;
+}
+
+static DEVICE_ATTR_RO(handles);
+static struct attribute *hnae_class_attrs[] = {
+       &dev_attr_handles.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(hnae_class);
+
+static int __init hnae_init(void)
+{
+       hnae_class = class_create(THIS_MODULE, "hnae");
+       if (IS_ERR(hnae_class))
+               return PTR_ERR(hnae_class);
+
+       hnae_class->dev_groups = hnae_class_groups;
+       return 0;
+}
+
+static void __exit hnae_exit(void)
+{
+       class_destroy(hnae_class);
+}
+
+subsys_initcall(hnae_init);
+module_exit(hnae_exit);
+
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
+
+/* vi: set tw=78 noet: */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
new file mode 100644 (file)
index 0000000..5edd8cd
--- /dev/null
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNAE_H
+#define __HNAE_H
+
+/* Names used in this framework:
+ *      ae handle (handle):
+ *        a set of queues provided by AE
+ *      ring buffer queue (rbq):
+ *        the channel between upper layer and the AE, can do tx and rx
+ *      ring:
+ *        a tx or rx channel within a rbq
+ *      ring description (desc):
+ *        an element in the ring with packet information
+ *      buffer:
+ *        a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ *   number set while running
+ * "cb" means control block
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+#define HNAE_DRIVER_VERSION "1.3.0"
+#define HNAE_DRIVER_NAME "hns"
+#define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver"
+#define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem"
+
+#ifdef DEBUG
+
+#ifndef assert
+#define assert(expr) \
+do { \
+       if (!(expr)) { \
+               pr_err("Assertion failed! %s, %s, %s, line %d\n", \
+                          #expr, __FILE__, __func__, __LINE__); \
+       } \
+} while (0)
+#endif
+
+#else
+
+#ifndef assert
+#define assert(expr)
+#endif
+
+#endif
+
+#define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0')
+#define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0')
+#define AE_NAME_SIZE 16
+
+/* some said the RX and TX RCB format should not be the same in the future. But
+ * it is the same now...
+ */
+#define RCB_REG_BASEADDR_L         0x00 /* P660 support only 32bit accessing */
+#define RCB_REG_BASEADDR_H         0x04
+#define RCB_REG_BD_NUM             0x08
+#define RCB_REG_BD_LEN             0x0C
+#define RCB_REG_PKTLINE            0x10
+#define RCB_REG_TAIL               0x18
+#define RCB_REG_HEAD               0x1C
+#define RCB_REG_FBDNUM             0x20
+#define RCB_REG_OFFSET             0x24 /* pkt num to be handled */
+#define RCB_REG_PKTNUM_RECORD      0x2C /* total pkt received */
+
+#define HNS_RX_HEAD_SIZE 256
+
+#define HNAE_AE_REGISTER 0x1
+
+#define RCB_RING_NAME_LEN 16
+
+enum hnae_led_state {
+       HNAE_LED_INACTIVE,
+       HNAE_LED_ACTIVE,
+       HNAE_LED_ON,
+       HNAE_LED_OFF
+};
+
+#define HNS_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS_RX_FLAG_L3ID_IPV4 0x0
+#define HNS_RX_FLAG_L3ID_IPV6 0x1
+#define HNS_RX_FLAG_L4ID_UDP 0x0
+#define HNS_RX_FLAG_L4ID_TCP 0x1
+
+#define HNS_TXD_ASID_S 0
+#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
+#define HNS_TXD_BUFNUM_S 8
+#define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S)
+#define HNS_TXD_PORTID_S 10
+#define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S)
+
+#define HNS_TXD_RA_B 8
+#define HNS_TXD_RI_B 9
+#define HNS_TXD_L4CS_B 10
+#define HNS_TXD_L3CS_B 11
+#define HNS_TXD_FE_B 12
+#define HNS_TXD_VLD_B 13
+#define HNS_TXD_IPOFFSET_S 14
+#define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+
+#define HNS_RXD_IPOFFSET_S 0
+#define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+#define HNS_RXD_BUFNUM_S 8
+#define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S)
+#define HNS_RXD_PORTID_S 10
+#define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S)
+#define HNS_RXD_DMAC_S 13
+#define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S)
+#define HNS_RXD_VLAN_S 15
+#define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S)
+#define HNS_RXD_L3ID_S 17
+#define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S)
+#define HNS_RXD_L4ID_S 21
+#define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S)
+#define HNS_RXD_FE_B 25
+#define HNS_RXD_FRAG_B 26
+#define HNS_RXD_VLD_B 27
+#define HNS_RXD_L2E_B 28
+#define HNS_RXD_L3E_B 29
+#define HNS_RXD_L4E_B 30
+#define HNS_RXD_DROP_B 31
+
+#define HNS_RXD_VLANID_S 8
+#define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S)
+#define HNS_RXD_CFI_B 20
+#define HNS_RXD_PRI_S 21
+#define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S)
+#define HNS_RXD_ASID_S 24
+#define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S)
+
+/* hardware spec ring buffer format */
+struct __packed hnae_desc {
+       __le64 addr;
+       union {
+               struct {
+                       __le16 asid_bufnum_pid;
+                       __le16 send_size;
+                       __le32 flag_ipoffset;
+                       __le32 reserved_3[4];
+               } tx;
+
+               struct {
+                       __le32 ipoff_bnum_pid_flag;
+                       __le16 pkt_len;
+                       __le16 size;
+                       __le32 vlan_pri_asid;
+                       __le32 reserved_2[3];
+               } rx;
+       };
+};
+
+struct hnae_desc_cb {
+       dma_addr_t dma; /* dma address of this desc */
+       void *buf;      /* cpu addr for a desc */
+
+       /* priv data for the desc, e.g. skb when use with ip stack*/
+       void *priv;
+       u16 page_offset;
+       u16 reuse_flag;
+
+       u16 length;     /* length of the buffer */
+
+       /* desc type, used by the ring user to mark the type of the priv data */
+       u16 type;
+};
+
+#define setflags(flags, bits) ((flags) |= (bits))
+#define unsetflags(flags, bits) ((flags) &= ~(bits))
+
+/* hnae_ring->flags fields */
+#define RINGF_DIR 0x1      /* TX or RX ring, set if TX */
+#define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
+#define is_rx_ring(ring) (!is_tx_ring(ring))
+#define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
+       DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+struct ring_stats {
+       u64 io_err_cnt;
+       u64 sw_err_cnt;
+       u64 seg_pkt_cnt;
+       union {
+               struct {
+                       u64 tx_pkts;
+                       u64 tx_bytes;
+                       u64 tx_err_cnt;
+                       u64 restart_queue;
+                       u64 tx_busy;
+               };
+               struct {
+                       u64 rx_pkts;
+                       u64 rx_bytes;
+                       u64 rx_err_cnt;
+                       u64 reuse_pg_cnt;
+                       u64 err_pkt_len;
+                       u64 non_vld_descs;
+                       u64 err_bd_num;
+                       u64 l2_err;
+                       u64 l3l4_csum_err;
+               };
+       };
+};
+
+struct hnae_queue;
+
+struct hnae_ring {
+       u8 __iomem *io_base; /* base io address for the ring */
+       struct hnae_desc *desc; /* dma map address space */
+       struct hnae_desc_cb *desc_cb;
+       struct hnae_queue *q;
+       int irq;
+       char ring_name[RCB_RING_NAME_LEN];
+
+       /* statistic */
+       struct ring_stats stats;
+
+       dma_addr_t desc_dma_addr;
+       u32 buf_size;       /* size for hnae_desc->addr, preset by AE */
+       u16 desc_num;       /* total number of desc */
+       u16 max_desc_num_per_pkt;
+       u16 max_raw_data_sz_per_desc;
+       u16 max_pkt_size;
+       int next_to_use;    /* idx of next spare desc */
+
+       /* idx of lastest sent desc, the ring is empty when equal to
+        * next_to_use
+        */
+       int next_to_clean;
+
+       int flags;          /* ring attribute */
+       int irq_init_flag;
+};
+
+#define ring_ptr_move_fw(ring, p) \
+       ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+       ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+enum hns_desc_type {
+       DESC_TYPE_SKB,
+       DESC_TYPE_PAGE,
+};
+
+#define assert_is_ring_idx(ring, idx) \
+       assert((idx) >= 0 && (idx) < (ring)->desc_num)
+
+/* the distance between [begin, end) in a ring buffer
+ * note: there is a unuse slot between the begin and the end
+ */
+static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
+{
+       assert_is_ring_idx(ring, begin);
+       assert_is_ring_idx(ring, end);
+
+       return (end - begin + ring->desc_num) % ring->desc_num;
+}
+
+static inline int ring_space(struct hnae_ring *ring)
+{
+       return ring->desc_num -
+               ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+}
+
+static inline int is_ring_empty(struct hnae_ring *ring)
+{
+       assert_is_ring_idx(ring, ring->next_to_use);
+       assert_is_ring_idx(ring, ring->next_to_clean);
+
+       return ring->next_to_use == ring->next_to_clean;
+}
+
+#define hnae_buf_size(_ring) ((_ring)->buf_size)
+#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
+#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+
+struct hnae_handle;
+
+/* allocate and dma map space for hnae desc */
+struct hnae_buf_ops {
+       int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+       void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+       int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+       void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+};
+
+struct hnae_queue {
+       void __iomem *io_base;
+       phys_addr_t phy_base;
+       struct hnae_ae_dev *dev;        /* the device who use this queue */
+       struct hnae_ring rx_ring, tx_ring;
+       struct hnae_handle *handle;
+};
+
+/*hnae loop mode*/
+enum hnae_loop {
+       MAC_INTERNALLOOP_MAC = 0,
+       MAC_INTERNALLOOP_SERDES,
+       MAC_INTERNALLOOP_PHY,
+       MAC_LOOP_NONE,
+};
+
+/*hnae port type*/
+enum hnae_port_type {
+       HNAE_PORT_SERVICE = 0,
+       HNAE_PORT_DEBUG
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * get_handle(): (mandatory)
+ *   Get a handle from AE according to its name and options.
+ *   the AE driver should manage the space used by handle and its queues while
+ *   the HNAE framework will allocate desc and desc_cb for all rings in the
+ *   queues.
+ * put_handle():
+ *   Release the handle.
+ * start():
+ *   Enable the hardware, include all queues
+ * stop():
+ *   Disable the hardware
+ * set_opts(): (mandatory)
+ *   Set options to the AE
+ * get_opts(): (mandatory)
+ *   Get options from the AE
+ * get_status():
+ *   Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ *   non-ok
+ * toggle_ring_irq(): (mandatory)
+ *   Set the ring irq to be enabled(0) or disable(1)
+ * toggle_queue_status(): (mandatory)
+ *   Set the queue to be enabled(1) or disable(0), this will not change the
+ *   ring irq state
+ * adjust_link()
+ *   adjust link status
+ * set_loopback()
+ *   set loopback
+ * get_ring_bdnum_limit()
+ *   get ring bd number limit
+ * get_pauseparam()
+ *   get tx and rx of pause frame use
+ * set_autoneg()
+ *   set auto autonegotiation of pause frame use
+ * get_autoneg()
+ *   get auto autonegotiation of pause frame use
+ * set_pauseparam()
+ *   set tx and rx of pause frame use
+ * get_coalesce_usecs()
+ *   get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ *   get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ *   set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ *   set Maximum number of packets to be sent before a TX interrupt.
+ * get_ringnum()
+ *   get RX/TX ring number
+ * get_max_ringnum()
+ *   get RX/TX ring maximum number
+ * get_mac_addr()
+ *   get mac address
+ * set_mac_addr()
+ *   set mac address
+ * set_mc_addr()
+ *   set multicast mode
+ * set_mtu()
+ *   set mtu
+ * update_stats()
+ *   update Old network device statistics
+ * get_ethtool_stats()
+ *   get ethtool network device statistics
+ * get_strings()
+ *   get a set of strings that describe the requested objects
+ * get_sset_count()
+ *   get number of strings that @get_strings will write
+ * update_led_status()
+ *   update the led status
+ * set_led_id()
+ *   set led id
+ * get_regs()
+ *   get regs dump
+ * get_regs_len()
+ *   get the len of the regs dump
+ */
+struct hnae_ae_ops {
+       struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev,
+                                         u32 port_id);
+       void (*put_handle)(struct hnae_handle *handle);
+       void (*init_queue)(struct hnae_queue *q);
+       void (*fini_queue)(struct hnae_queue *q);
+       int (*start)(struct hnae_handle *handle);
+       void (*stop)(struct hnae_handle *handle);
+       void (*reset)(struct hnae_handle *handle);
+       int (*set_opts)(struct hnae_handle *handle, int type, void *opts);
+       int (*get_opts)(struct hnae_handle *handle, int type, void **opts);
+       int (*get_status)(struct hnae_handle *handle);
+       int (*get_info)(struct hnae_handle *handle,
+                       u8 *auto_neg, u16 *speed, u8 *duplex);
+       void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
+       void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
+       void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+       int (*set_loopback)(struct hnae_handle *handle,
+                           enum hnae_loop loop_mode, int en);
+       void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
+                                    u32 *uplimit);
+       void (*get_pauseparam)(struct hnae_handle *handle,
+                              u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+       int (*set_autoneg)(struct hnae_handle *handle, u8 enable);
+       int (*get_autoneg)(struct hnae_handle *handle);
+       int (*set_pauseparam)(struct hnae_handle *handle,
+                             u32 auto_neg, u32 rx_en, u32 tx_en);
+       void (*get_coalesce_usecs)(struct hnae_handle *handle,
+                                  u32 *tx_usecs, u32 *rx_usecs);
+       void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
+                                           u32 *tx_frames, u32 *rx_frames);
+       void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+       int (*set_coalesce_frames)(struct hnae_handle *handle,
+                                  u32 coalesce_frames);
+       int (*get_mac_addr)(struct hnae_handle *handle, void **p);
+       int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+       int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
+       int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
+       void (*update_stats)(struct hnae_handle *handle,
+                            struct net_device_stats *net_stats);
+       void (*get_stats)(struct hnae_handle *handle, u64 *data);
+       void (*get_strings)(struct hnae_handle *handle,
+                           u32 stringset, u8 *data);
+       int (*get_sset_count)(struct hnae_handle *handle, int stringset);
+       void (*update_led_status)(struct hnae_handle *handle);
+       int (*set_led_id)(struct hnae_handle *handle,
+                         enum hnae_led_state status);
+       void (*get_regs)(struct hnae_handle *handle, void *data);
+       int (*get_regs_len)(struct hnae_handle *handle);
+};
+
+struct hnae_ae_dev {
+       struct device cls_dev; /* the class dev */
+       struct device *dev; /* the presented dev */
+       struct hnae_ae_ops *ops;
+       struct list_head node;
+       struct module *owner; /* the module who provides this dev */
+       int id;
+       char name[AE_NAME_SIZE];
+       struct list_head handle_list;
+       spinlock_t lock; /* lock to protect the handle_list */
+};
+
+struct hnae_handle {
+       struct device *owner_dev; /* the device which make use of this handle */
+       struct hnae_ae_dev *dev;  /* the device who provides this handle */
+       struct device_node *phy_node;
+       phy_interface_t phy_if;
+       u32 if_support;
+       int q_num;
+       int vf_id;
+       u32 eport_id;
+       enum hnae_port_type port_type;
+       struct list_head node;    /* list to hnae_ae_dev->handle_list */
+       struct hnae_buf_ops *bops; /* operation for the buffer */
+       struct hnae_queue **qs;  /* array base of all queues */
+};
+
+#define ring_to_dev(ring) ((ring)->q->dev->dev)
+
+struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id,
+                                   u32 port_id, struct hnae_buf_ops *bops);
+void hnae_put_handle(struct hnae_handle *handle);
+int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
+void hnae_ae_unregister(struct hnae_ae_dev *dev);
+
+int hnae_register_notifier(struct notifier_block *nb);
+void hnae_unregister_notifier(struct notifier_block *nb);
+int hnae_reinit_handle(struct hnae_handle *handle);
+
+#define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
+       (q)->tx_ring.io_base + RCB_REG_TAIL)
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
+                                         struct hnae_desc_cb *cb)
+{
+       struct hnae_buf_ops *bops = ring->q->handle->bops;
+       int ret;
+
+       ret = bops->alloc_buffer(ring, cb);
+       if (ret)
+               goto out;
+
+       ret = bops->map_buffer(ring, cb);
+       if (ret)
+               goto out_with_buf;
+
+       return 0;
+
+out_with_buf:
+       bops->free_buffer(ring, cb);
+out:
+       return ret;
+}
+
+static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
+{
+       int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
+
+       if (ret)
+               return ret;
+
+       ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+
+       return 0;
+}
+
+static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
+{
+       ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
+       ring->desc[i].addr = 0;
+}
+
+static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
+{
+       struct hnae_buf_ops *bops = ring->q->handle->bops;
+       struct hnae_desc_cb *cb = &ring->desc_cb[i];
+
+       if (!ring->desc_cb[i].dma)
+               return;
+
+       hnae_buffer_detach(ring, i);
+       bops->free_buffer(ring, cb);
+}
+
+/* detach a in-used buffer and replace with a reserved one  */
+static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
+                                      struct hnae_desc_cb *res_cb)
+{
+       struct hnae_buf_ops *bops = ring->q->handle->bops;
+       struct hnae_desc_cb tmp_cb = ring->desc_cb[i];
+
+       bops->unmap_buffer(ring, &ring->desc_cb[i]);
+       ring->desc_cb[i] = *res_cb;
+       *res_cb = tmp_cb;
+       ring->desc[i].addr = (__le64)ring->desc_cb[i].dma;
+       ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
+{
+       ring->desc_cb[i].reuse_flag = 0;
+       ring->desc[i].addr = (__le64)(ring->desc_cb[i].dma
+               + ring->desc_cb[i].page_offset);
+       ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+#define hnae_set_field(origin, mask, shift, val) \
+       do { \
+               (origin) &= (~(mask)); \
+               (origin) |= ((val) << (shift)) & (mask); \
+       } while (0)
+
+#define hnae_set_bit(origin, shift, val) \
+       hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
+
+#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae_get_bit(origin, shift) \
+       hnae_get_field((origin), (0x1 << (shift)), (shift))
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
new file mode 100644 (file)
index 0000000..a2c72f8
--- /dev/null
@@ -0,0 +1,777 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "hnae.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define AE_NAME_PORT_ID_IDX 6
+#define ETH_STATIC_REG  1
+#define ETH_DUMP_REG    5
+#define ETH_GSTRING_LEN        32
+
+static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
+{
+       struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+       return vf_cb->mac_cb;
+}
+
+/**
+ * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
+ * @port_id: enet port id
+ *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
+ * return: dsaf port id
+ *: service ports 0 - 5, debug port 6-7
+ **/
+static int hns_ae_map_eport_to_dport(u32 port_id)
+{
+       int port_index;
+
+       if (port_id < DSAF_DEBUG_NW_NUM)
+               port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
+       else
+               port_index = port_id - DSAF_DEBUG_NW_NUM;
+
+       return port_index;
+}
+
+static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
+{
+       return container_of(dev, struct dsaf_device, ae_dev);
+}
+
+static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
+{
+       int ppe_index;
+       int ppe_common_index;
+       struct ppe_common_cb *ppe_comm;
+       struct  hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+       if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
+               ppe_index = vf_cb->port_index;
+               ppe_common_index = 0;
+       } else {
+               ppe_index = 0;
+               ppe_common_index =
+                       vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+       }
+       ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+       return &ppe_comm->ppe_cb[ppe_index];
+}
+
+static int hns_ae_get_q_num_per_vf(
+       struct dsaf_device *dsaf_dev, int port)
+{
+       int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+       return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+}
+
+static int hns_ae_get_vf_num_per_port(
+       struct dsaf_device *dsaf_dev, int port)
+{
+       int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+
+       return dsaf_dev->rcb_common[common_idx]->max_vfn;
+}
+
+static struct ring_pair_cb *hns_ae_get_base_ring_pair(
+       struct dsaf_device *dsaf_dev, int port)
+{
+       int common_idx = hns_dsaf_get_comm_idx_by_port(port);
+       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+       int q_num = rcb_comm->max_q_per_vf;
+       int vf_num = rcb_comm->max_vfn;
+
+       if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+               return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
+       else
+               return &rcb_comm->ring_pair_cb[0];
+}
+
+static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
+{
+       return container_of(q, struct ring_pair_cb, q);
+}
+
+struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+                                     u32 port_id)
+{
+       int port_idx;
+       int vfnum_per_port;
+       int qnum_per_vf;
+       int i;
+       struct dsaf_device *dsaf_dev;
+       struct hnae_handle *ae_handle;
+       struct ring_pair_cb *ring_pair_cb;
+       struct hnae_vf_cb *vf_cb;
+
+       dsaf_dev = hns_ae_get_dsaf_dev(dev);
+       port_idx = hns_ae_map_eport_to_dport(port_id);
+
+       ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
+       vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
+       qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+
+       vf_cb = kzalloc(sizeof(*vf_cb) +
+                       qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+       if (unlikely(!vf_cb)) {
+               dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
+               ae_handle = ERR_PTR(-ENOMEM);
+               goto handle_err;
+       }
+       ae_handle = &vf_cb->ae_handle;
+       /* ae_handle Init  */
+       ae_handle->owner_dev = dsaf_dev->dev;
+       ae_handle->dev = dev;
+       ae_handle->q_num = qnum_per_vf;
+
+       /* find ring pair, and set vf id*/
+       for (ae_handle->vf_id = 0;
+               ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
+               if (!ring_pair_cb->used_by_vf)
+                       break;
+               ring_pair_cb += qnum_per_vf;
+       }
+       if (ae_handle->vf_id >= vfnum_per_port) {
+               dev_err(dsaf_dev->dev, "malloc queue fail!\n");
+               ae_handle = ERR_PTR(-EINVAL);
+               goto vf_id_err;
+       }
+
+       ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
+       for (i = 0; i < qnum_per_vf; i++) {
+               ae_handle->qs[i] = &ring_pair_cb->q;
+               ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
+               ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
+
+               ring_pair_cb->used_by_vf = 1;
+               if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+                       ring_pair_cb->port_id_in_dsa = port_idx;
+               else
+                       ring_pair_cb->port_id_in_dsa = 0;
+
+               ring_pair_cb++;
+       }
+
+       vf_cb->dsaf_dev = dsaf_dev;
+       vf_cb->port_index = port_idx;
+       vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+
+       ae_handle->phy_if = vf_cb->mac_cb->phy_if;
+       ae_handle->phy_node = vf_cb->mac_cb->phy_node;
+       ae_handle->if_support = vf_cb->mac_cb->if_support;
+       ae_handle->port_type = vf_cb->mac_cb->mac_type;
+
+       return ae_handle;
+vf_id_err:
+       kfree(vf_cb);
+handle_err:
+       return ae_handle;
+}
+
+static void hns_ae_put_handle(struct hnae_handle *handle)
+{
+       struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+       int i;
+
+       vf_cb->mac_cb    = NULL;
+
+       kfree(vf_cb);
+
+       for (i = 0; i < handle->q_num; i++)
+               hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+}
+
+static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
+{
+       int q_num = handle->q_num;
+       int i;
+
+       for (i = 0; i < q_num; i++)
+               hns_rcb_ring_enable_hw(handle->qs[i], val);
+}
+
+static void hns_ae_init_queue(struct hnae_queue *q)
+{
+       struct ring_pair_cb *ring =
+               container_of(q, struct ring_pair_cb, q);
+
+       hns_rcb_init_hw(ring);
+}
+
+static void hns_ae_fini_queue(struct hnae_queue *q)
+{
+       struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
+
+       if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+               hns_rcb_reset_ring_hw(q);
+}
+
+static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+{
+       int ret;
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       if (!p || !is_valid_ether_addr((const u8 *)p)) {
+               dev_err(handle->owner_dev, "is not valid ether addr !\n");
+               return -EADDRNOTAVAIL;
+       }
+
+       ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
+       if (ret != 0) {
+               dev_err(handle->owner_dev,
+                       "set_mac_address fail, ret=%d!\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
+{
+       int ret;
+       char *mac_addr = (char *)addr;
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       assert(mac_cb);
+
+       if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+               return 0;
+
+       ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE);
+       if (ret) {
+               dev_err(handle->owner_dev,
+                       "mac add mul_mac:%pM port%d  fail, ret = %#x!\n",
+                       mac_addr, mac_cb->mac_id, ret);
+               return ret;
+       }
+
+       ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
+                               mac_addr, ENABLE);
+       if (ret)
+               dev_err(handle->owner_dev,
+                       "mac add mul_mac:%pM port%d  fail, ret = %#x!\n",
+                       mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
+
+       return ret;
+}
+
+static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       return hns_mac_set_mtu(mac_cb, new_mtu);
+}
+
+static int hns_ae_start(struct hnae_handle *handle)
+{
+       int ret;
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE);
+       if (ret)
+               return ret;
+
+       hns_ae_ring_enable_all(handle, 1);
+       msleep(100);
+
+       hns_mac_start(mac_cb);
+
+       return 0;
+}
+
+void hns_ae_stop(struct hnae_handle *handle)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       /* just clean tx fbd, neednot rx fbd*/
+       hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
+
+       msleep(20);
+
+       hns_mac_stop(mac_cb);
+
+       usleep_range(10000, 20000);
+
+       hns_ae_ring_enable_all(handle, 0);
+
+       (void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE);
+}
+
+static void hns_ae_reset(struct hnae_handle *handle)
+{
+       struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+       if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
+               u8 ppe_common_index =
+                       vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
+
+               hns_mac_reset(vf_cb->mac_cb);
+               hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+       }
+}
+
+void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+       u32 flag;
+
+       if (is_tx_ring(ring))
+               flag = RCB_INT_FLAG_TX;
+       else
+               flag = RCB_INT_FLAG_RX;
+
+       hns_rcb_int_clr_hw(ring->q, flag);
+       hns_rcb_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
+{
+       hns_rcb_start(queue, val);
+}
+
+static int hns_ae_get_link_status(struct hnae_handle *handle)
+{
+       u32 link_status;
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       hns_mac_get_link_status(mac_cb, &link_status);
+
+       return !!link_status;
+}
+
+static int hns_ae_get_mac_info(struct hnae_handle *handle,
+                              u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
+}
+
+static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
+                              int duplex)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       hns_mac_adjust_link(mac_cb, speed, duplex);
+}
+
+static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
+                                       u32 *uplimit)
+{
+       *uplimit = HNS_RCB_RING_MAX_PENDING_BD;
+}
+
+static void hns_ae_get_pauseparam(struct hnae_handle *handle,
+                                 u32 *auto_neg, u32 *rx_en, u32 *tx_en)
+{
+       assert(handle);
+
+       hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+
+       hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+}
+
+static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
+{
+       assert(handle);
+
+       return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
+}
+
+static int hns_ae_get_autoneg(struct hnae_handle *handle)
+{
+       u32     auto_neg;
+
+       assert(handle);
+
+       hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
+
+       return auto_neg;
+}
+
+static int hns_ae_set_pauseparam(struct hnae_handle *handle,
+                                u32 autoneg, u32 rx_en, u32 tx_en)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+       int ret;
+
+       ret = hns_mac_set_autoneg(mac_cb, autoneg);
+       if (ret)
+               return ret;
+
+       return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
+}
+
+static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
+                                     u32 *tx_usecs, u32 *rx_usecs)
+{
+       int port;
+
+       port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+       *tx_usecs = hns_rcb_get_coalesce_usecs(
+               hns_ae_get_dsaf_dev(handle->dev),
+               hns_dsaf_get_comm_idx_by_port(port));
+       *rx_usecs = hns_rcb_get_coalesce_usecs(
+               hns_ae_get_dsaf_dev(handle->dev),
+               hns_dsaf_get_comm_idx_by_port(port));
+}
+
+static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
+                                              u32 *tx_frames, u32 *rx_frames)
+{
+       int port;
+
+       assert(handle);
+
+       port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+       *tx_frames = hns_rcb_get_coalesced_frames(
+               hns_ae_get_dsaf_dev(handle->dev), port);
+       *rx_frames = hns_rcb_get_coalesced_frames(
+               hns_ae_get_dsaf_dev(handle->dev), port);
+}
+
+static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+                                     u32 timeout)
+{
+       int port;
+
+       assert(handle);
+
+       port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+       hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
+                                  port, timeout);
+}
+
+static int  hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+                                      u32 coalesce_frames)
+{
+       int port;
+       int ret;
+
+       assert(handle);
+
+       port = hns_ae_map_eport_to_dport(handle->eport_id);
+
+       ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
+                                          port, coalesce_frames);
+       return ret;
+}
+
+void hns_ae_update_stats(struct hnae_handle *handle,
+                        struct net_device_stats *net_stats)
+{
+       int port;
+       int idx;
+       struct dsaf_device *dsaf_dev;
+       struct hns_mac_cb *mac_cb;
+       struct hns_ppe_cb *ppe_cb;
+       struct hnae_queue *queue;
+       struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+       u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
+       u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
+       u64 rx_missed_errors = 0;
+
+       dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+       if (!dsaf_dev)
+               return;
+       port = vf_cb->port_index;
+       ppe_cb = hns_get_ppe_cb(handle);
+       mac_cb = hns_get_mac_cb(handle);
+
+       for (idx = 0; idx < handle->q_num; idx++) {
+               queue = handle->qs[idx];
+               hns_rcb_update_stats(queue);
+
+               tx_bytes += queue->tx_ring.stats.tx_bytes;
+               tx_packets += queue->tx_ring.stats.tx_pkts;
+               rx_bytes += queue->rx_ring.stats.rx_bytes;
+               rx_packets += queue->rx_ring.stats.rx_pkts;
+
+               rx_errors += queue->rx_ring.stats.err_pkt_len
+                               + queue->rx_ring.stats.l2_err
+                               + queue->rx_ring.stats.l3l4_csum_err;
+       }
+
+       hns_ppe_update_stats(ppe_cb);
+       rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
+       tx_errors += ppe_cb->hw_stats.tx_err_checksum
+               + ppe_cb->hw_stats.tx_err_fifo_empty;
+
+       if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+               hns_dsaf_update_stats(dsaf_dev, port);
+               /* for port upline direction, i.e., rx. */
+               rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
+               rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
+               rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
+
+               /* for port downline direction, i.e., tx. */
+               port = port + DSAF_PPE_INODE_BASE;
+               hns_dsaf_update_stats(dsaf_dev, port);
+               tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
+               tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
+               tx_dropped += dsaf_dev->hw_stats[port].crc_false;
+               tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
+               tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
+               tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
+       }
+
+       hns_mac_update_stats(mac_cb);
+       rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
+
+       tx_errors += mac_cb->hw_stats.tx_bad_pkts
+               + mac_cb->hw_stats.tx_fragment_err
+               + mac_cb->hw_stats.tx_jabber_err
+               + mac_cb->hw_stats.tx_underrun_err
+               + mac_cb->hw_stats.tx_crc_err;
+
+       net_stats->tx_bytes = tx_bytes;
+       net_stats->tx_packets = tx_packets;
+       net_stats->rx_bytes = rx_bytes;
+       net_stats->rx_dropped = 0;
+       net_stats->rx_packets = rx_packets;
+       net_stats->rx_errors = rx_errors;
+       net_stats->tx_errors = tx_errors;
+       net_stats->tx_dropped = tx_dropped;
+       net_stats->rx_missed_errors = rx_missed_errors;
+       net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
+       net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
+       net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
+       net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
+       net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
+}
+
+void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+{
+       int idx;
+       struct hns_mac_cb *mac_cb;
+       struct hns_ppe_cb *ppe_cb;
+       u64 *p = data;
+       struct  hnae_vf_cb *vf_cb;
+
+       if (!handle || !data) {
+               pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
+               return;
+       }
+
+       vf_cb = hns_ae_get_vf_cb(handle);
+       mac_cb = hns_get_mac_cb(handle);
+       ppe_cb = hns_get_ppe_cb(handle);
+
+       for (idx = 0; idx < handle->q_num; idx++) {
+               hns_rcb_get_stats(handle->qs[idx], p);
+               p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
+       }
+
+       hns_ppe_get_stats(ppe_cb, p);
+       p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
+
+       hns_mac_get_stats(mac_cb, p);
+       p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
+
+       if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+               hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
+}
+
+void hns_ae_get_strings(struct hnae_handle *handle,
+                       u32 stringset, u8 *data)
+{
+       int port;
+       int idx;
+       struct hns_mac_cb *mac_cb;
+       struct hns_ppe_cb *ppe_cb;
+       u8 *p = data;
+       struct  hnae_vf_cb *vf_cb;
+
+       assert(handle);
+
+       vf_cb = hns_ae_get_vf_cb(handle);
+       port = vf_cb->port_index;
+       mac_cb = hns_get_mac_cb(handle);
+       ppe_cb = hns_get_ppe_cb(handle);
+
+       for (idx = 0; idx < handle->q_num; idx++) {
+               hns_rcb_get_strings(stringset, p, idx);
+               p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
+       }
+
+       hns_ppe_get_strings(ppe_cb, stringset, p);
+       p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+
+       hns_mac_get_strings(mac_cb, stringset, p);
+       p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+
+       if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+               hns_dsaf_get_strings(stringset, p, port);
+}
+
+int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+{
+       u32 sset_count = 0;
+       struct hns_mac_cb *mac_cb;
+
+       assert(handle);
+
+       mac_cb = hns_get_mac_cb(handle);
+
+       sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
+       sset_count += hns_ppe_get_sset_count(stringset);
+       sset_count += hns_mac_get_sset_count(mac_cb, stringset);
+
+       if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+               sset_count += hns_dsaf_get_sset_count(stringset);
+
+       return sset_count;
+}
+
+static int hns_ae_config_loopback(struct hnae_handle *handle,
+                                 enum hnae_loop loop, int en)
+{
+       int ret;
+       struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+       switch (loop) {
+       case MAC_INTERNALLOOP_SERDES:
+               ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
+               break;
+       case MAC_INTERNALLOOP_MAC:
+               ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
+void hns_ae_update_led_status(struct hnae_handle *handle)
+{
+       struct hns_mac_cb *mac_cb;
+
+       assert(handle);
+       mac_cb = hns_get_mac_cb(handle);
+       if (!mac_cb->cpld_vaddr)
+               return;
+       hns_set_led_opt(mac_cb);
+}
+
+int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+                          enum hnae_led_state status)
+{
+       struct hns_mac_cb *mac_cb;
+
+       assert(handle);
+
+       mac_cb = hns_get_mac_cb(handle);
+
+       return hns_cpld_led_set_id(mac_cb, status);
+}
+
+void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+{
+       u32 *p = data;
+       u32 rcb_com_idx;
+       int i;
+       struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+       struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+       hns_ppe_get_regs(ppe_cb, p);
+       p += hns_ppe_get_regs_count();
+
+       rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
+       hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+       p += hns_rcb_get_common_regs_count();
+
+       for (i = 0; i < handle->q_num; i++) {
+               hns_rcb_get_ring_regs(handle->qs[i], p);
+               p += hns_rcb_get_ring_regs_count();
+       }
+
+       hns_mac_get_regs(vf_cb->mac_cb, p);
+       p += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+       if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+               hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
+}
+
+int hns_ae_get_regs_len(struct hnae_handle *handle)
+{
+       u32 total_num;
+       struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+       total_num = hns_ppe_get_regs_count();
+       total_num += hns_rcb_get_common_regs_count();
+       total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
+       total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+       if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+               total_num += hns_dsaf_get_regs_count();
+
+       return total_num;
+}
+
+static struct hnae_ae_ops hns_dsaf_ops = {
+       .get_handle = hns_ae_get_handle,
+       .put_handle = hns_ae_put_handle,
+       .init_queue = hns_ae_init_queue,
+       .fini_queue = hns_ae_fini_queue,
+       .start = hns_ae_start,
+       .stop = hns_ae_stop,
+       .reset = hns_ae_reset,
+       .toggle_ring_irq = hns_ae_toggle_ring_irq,
+       .toggle_queue_status = hns_ae_toggle_queue_status,
+       .get_status = hns_ae_get_link_status,
+       .get_info = hns_ae_get_mac_info,
+       .adjust_link = hns_ae_adjust_link,
+       .set_loopback = hns_ae_config_loopback,
+       .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
+       .get_pauseparam = hns_ae_get_pauseparam,
+       .set_autoneg = hns_ae_set_autoneg,
+       .get_autoneg = hns_ae_get_autoneg,
+       .set_pauseparam = hns_ae_set_pauseparam,
+       .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
+       .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
+       .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
+       .set_coalesce_frames = hns_ae_set_coalesce_frames,
+       .set_mac_addr = hns_ae_set_mac_address,
+       .set_mc_addr = hns_ae_set_multicast_one,
+       .set_mtu = hns_ae_set_mtu,
+       .update_stats = hns_ae_update_stats,
+       .get_stats = hns_ae_get_stats,
+       .get_strings = hns_ae_get_strings,
+       .get_sset_count = hns_ae_get_sset_count,
+       .update_led_status = hns_ae_update_led_status,
+       .set_led_id = hns_ae_cpld_set_led_id,
+       .get_regs = hns_ae_get_regs,
+       .get_regs_len = hns_ae_get_regs_len
+};
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
+{
+       struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+
+       ae_dev->ops = &hns_dsaf_ops;
+       ae_dev->dev = dsaf_dev->dev;
+
+       return hnae_ae_register(ae_dev, THIS_MODULE);
+}
+
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
+{
+       hnae_ae_unregister(&dsaf_dev->ae_dev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
new file mode 100644 (file)
index 0000000..b8517b0
--- /dev/null
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_gmac.h"
+
+static const struct mac_stats_string g_gmac_stats_string[] = {
+       {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+       {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
+       {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+       {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+       {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+       {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
+       {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+       {"gmac_rx_pkts_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+       {"gmac_rx_pkts_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+       {"gmac_rx_pkts_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+       {"gmac_rx_pkts_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+       {"gmac_rx_pkts_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+       {"gmac_rx_fcs_errors", MAC_STATS_FIELD_OFF(rx_fcs_err)},
+       {"gmac_rx_tagged", MAC_STATS_FIELD_OFF(rx_vlan_pkts)},
+       {"gmac_rx_data_err", MAC_STATS_FIELD_OFF(rx_data_err)},
+       {"gmac_rx_align_errors", MAC_STATS_FIELD_OFF(rx_align_err)},
+       {"gmac_rx_long_errors", MAC_STATS_FIELD_OFF(rx_oversize)},
+       {"gmac_rx_jabber_errors", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+       {"gmac_rx_pause_maccontrol", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+       {"gmac_rx_unknown_maccontrol", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+       {"gmac_rx_very_long_err", MAC_STATS_FIELD_OFF(rx_long_err)},
+       {"gmac_rx_runt_err", MAC_STATS_FIELD_OFF(rx_minto64)},
+       {"gmac_rx_short_err", MAC_STATS_FIELD_OFF(rx_under_min)},
+       {"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
+       {"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+       {"gmac_rx_overrun_cnt", MAC_STATS_FIELD_OFF(rx_fifo_overrun_err)},
+       {"gmac_rx_length_err", MAC_STATS_FIELD_OFF(rx_len_err)},
+       {"gmac_rx_fail_comma", MAC_STATS_FIELD_OFF(rx_comma_err)},
+
+       {"gmac_tx_octets_ok", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+       {"gmac_tx_octets_bad", MAC_STATS_FIELD_OFF(tx_bad_bytes)},
+       {"gmac_tx_uc_pkts", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+       {"gmac_tx_mc_pkts", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+       {"gmac_tx_bc_pkts", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+       {"gmac_tx_pkts_64octets", MAC_STATS_FIELD_OFF(tx_64bytes)},
+       {"gmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+       {"gmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+       {"gmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+       {"gmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+       {"gmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+       {"gmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+       {"gmac_tx_excessive_length_drop", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+       {"gmac_tx_underrun", MAC_STATS_FIELD_OFF(tx_underrun_err)},
+       {"gmac_tx_tagged", MAC_STATS_FIELD_OFF(tx_vlan)},
+       {"gmac_tx_crc_error", MAC_STATS_FIELD_OFF(tx_crc_err)},
+       {"gmac_tx_pause_frames", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}
+};
+
+static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       /*enable GE rX/tX */
+       if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+               dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
+
+       if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+               dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+}
+
+static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       /*disable GE rX/tX */
+       if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+               dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
+
+       if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+               dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+}
+
+/**
+*hns_gmac_get_en - get port enable
+*@mac_drv:mac device
+*@rx:rx enable
+*@tx:tx enable
+*/
+static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       u32 porten;
+
+       porten = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+       *tx = dsaf_get_bit(porten, GMAC_PORT_TX_EN_B);
+       *rx = dsaf_get_bit(porten, GMAC_PORT_RX_EN_B);
+}
+
+static void hns_gmac_free(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct dsaf_device *dsaf_dev
+               = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+       u32 mac_id = drv->mac_id;
+
+       hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_field(drv, GMAC_FC_TX_TIMER_REG, GMAC_FC_TX_TIMER_M,
+                          GMAC_FC_TX_TIMER_S, newval);
+}
+
+static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *newval = dsaf_get_dev_field(drv, GMAC_FC_TX_TIMER_REG,
+                                    GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S);
+}
+
+static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG,
+                        GMAC_PAUSE_EN_RX_FDFC_B, !!newval);
+}
+
+static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_field(drv, GMAC_MAX_FRM_SIZE_REG, GMAC_MAX_FRM_SIZE_M,
+                          GMAC_MAX_FRM_SIZE_S, newval);
+
+       dsaf_set_dev_field(drv, GAMC_RX_MAX_FRAME, GMAC_MAX_FRM_SIZE_M,
+                          GMAC_MAX_FRM_SIZE_S, newval);
+}
+
+static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+                        GMAC_TX_AN_EN_B, !!newval);
+}
+
+static void hns_gmac_tx_loop_pkt_dis(void *mac_drv)
+{
+       u32 tx_loop_pkt_pri;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       tx_loop_pkt_pri = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+       dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_EN_B, 1);
+       dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_HIG_PRI_B, 0);
+       dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri);
+}
+
+static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+                        GMAC_DUPLEX_TYPE_B, !!newval);
+}
+
+static void hns_gmac_get_duplex_type(void *mac_drv,
+                                    enum hns_gmac_duplex_mdoe *duplex_mode)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *duplex_mode = (enum hns_gmac_duplex_mdoe)dsaf_get_dev_bit(
+               drv, GMAC_DUPLEX_TYPE_REG, GMAC_DUPLEX_TYPE_B);
+}
+
+static void hns_gmac_get_port_mode(void *mac_drv, enum hns_port_mode *port_mode)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+               drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+}
+
+static void hns_gmac_port_mode_get(void *mac_drv,
+                                  struct hns_gmac_port_mode_cfg *port_mode)
+{
+       u32 tx_ctrl;
+       u32 recv_ctrl;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       port_mode->port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+               drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+
+       tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+       recv_ctrl = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+
+       port_mode->max_frm_size =
+               dsaf_get_dev_field(drv, GMAC_MAX_FRM_SIZE_REG,
+                                  GMAC_MAX_FRM_SIZE_M, GMAC_MAX_FRM_SIZE_S);
+       port_mode->short_runts_thr =
+               dsaf_get_dev_field(drv, GMAC_SHORT_RUNTS_THR_REG,
+                                  GMAC_SHORT_RUNTS_THR_M,
+                                  GMAC_SHORT_RUNTS_THR_S);
+
+       port_mode->pad_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_PAD_EN_B);
+       port_mode->crc_add = dsaf_get_bit(tx_ctrl, GMAC_TX_CRC_ADD_B);
+       port_mode->an_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_AN_EN_B);
+
+       port_mode->runt_pkt_en =
+               dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_RUNT_PKT_EN_B);
+       port_mode->strip_pad_en =
+               dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_STRIP_PAD_EN_B);
+}
+
+static void hns_gmac_pause_frm_cfg(void *mac_drv, u32 rx_pause_en,
+                                  u32 tx_pause_en)
+{
+       u32 pause_en;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+       dsaf_set_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B, !!rx_pause_en);
+       dsaf_set_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B, !!tx_pause_en);
+       dsaf_write_dev(drv, GMAC_PAUSE_EN_REG, pause_en);
+}
+
+static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
+                                     u32 *tx_pause_en)
+{
+       u32 pause_en;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+
+       *rx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B);
+       *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
+}
+
+static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
+                               u32 full_duplex)
+{
+       u32 tx_ctrl;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+                        GMAC_DUPLEX_TYPE_B, !!full_duplex);
+
+       switch (speed) {
+       case MAC_SPEED_10:
+               dsaf_set_dev_field(
+                       drv, GMAC_PORT_MODE_REG,
+                       GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x6);
+               break;
+       case MAC_SPEED_100:
+               dsaf_set_dev_field(
+                       drv, GMAC_PORT_MODE_REG,
+                       GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x7);
+               break;
+       case MAC_SPEED_1000:
+               dsaf_set_dev_field(
+                       drv, GMAC_PORT_MODE_REG,
+                       GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x8);
+               break;
+       default:
+               dev_err(drv->dev,
+                       "hns_gmac_adjust_link fail, speed%d mac%d\n",
+                       speed, drv->mac_id);
+               return -EINVAL;
+       }
+
+       tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+       dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, 1);
+       dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, 1);
+       dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+
+       dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+                        GMAC_MODE_CHANGE_EB_B, 1);
+
+       return 0;
+}
+
+static void hns_gmac_init(void *mac_drv)
+{
+       u32 port;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct dsaf_device *dsaf_dev
+               = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+       port = drv->mac_id;
+
+       hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0);
+       mdelay(10);
+       hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1);
+       mdelay(10);
+       hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+       hns_gmac_tx_loop_pkt_dis(mac_drv);
+}
+
+void hns_gmac_update_stats(void *mac_drv)
+{
+       struct mac_hw_stats *hw_stats = NULL;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       hw_stats = &drv->mac_cb->hw_stats;
+
+       /* RX */
+       hw_stats->rx_good_bytes
+               += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+       hw_stats->rx_bad_bytes
+               += dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+       hw_stats->rx_uc_pkts += dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+       hw_stats->rx_mc_pkts += dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+       hw_stats->rx_bc_pkts += dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+       hw_stats->rx_64bytes
+               += dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+       hw_stats->rx_65to127
+               += dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+       hw_stats->rx_128to255
+               += dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+       hw_stats->rx_256to511
+               += dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+       hw_stats->rx_512to1023
+               += dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+       hw_stats->rx_1024to1518
+               += dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+       hw_stats->rx_1519tomax
+               += dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+       hw_stats->rx_fcs_err += dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+       hw_stats->rx_vlan_pkts += dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+       hw_stats->rx_data_err += dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+       hw_stats->rx_align_err
+               += dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+       hw_stats->rx_oversize
+               += dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+       hw_stats->rx_jabber_err
+               += dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+       hw_stats->rx_pfc_tc0
+               += dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+       hw_stats->rx_unknown_ctrl
+               += dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+       hw_stats->rx_long_err
+               += dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+       hw_stats->rx_minto64
+               += dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+       hw_stats->rx_under_min
+               += dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+       hw_stats->rx_filter_pkts
+               += dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+       hw_stats->rx_filter_bytes
+               += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+       hw_stats->rx_fifo_overrun_err
+               += dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+       hw_stats->rx_len_err
+               += dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+       hw_stats->rx_comma_err
+               += dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+       /* TX */
+       hw_stats->tx_good_bytes
+               += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+       hw_stats->tx_bad_bytes
+               += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+       hw_stats->tx_uc_pkts += dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+       hw_stats->tx_mc_pkts += dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+       hw_stats->tx_bc_pkts += dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+       hw_stats->tx_64bytes
+               += dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+       hw_stats->tx_65to127
+               += dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+       hw_stats->tx_128to255
+               += dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+       hw_stats->tx_256to511
+               += dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+       hw_stats->tx_512to1023
+               += dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+       hw_stats->tx_1024to1518
+               += dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+       hw_stats->tx_1519tomax
+               += dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+       hw_stats->tx_jabber_err
+               += dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+       hw_stats->tx_underrun_err
+               += dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+       hw_stats->tx_vlan += dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+       hw_stats->tx_crc_err += dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+       hw_stats->tx_pfc_tc0
+               += dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+}
+
+static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       if (drv->mac_id >= DSAF_SERVICE_NW_NUM) {
+               u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+
+               u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+                       | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+               dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+               dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG, high_val);
+       }
+}
+
+static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
+                                   u8 enable)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       switch (loop_mode) {
+       case MAC_INTERNALLOOP_MAC:
+               dsaf_set_dev_bit(drv, GMAC_LOOP_REG, GMAC_LP_REG_CF2MI_LP_EN_B,
+                                !!enable);
+               break;
+       default:
+               dev_err(drv->dev, "loop_mode error\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+       u32 tx_ctrl;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+       dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+       dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+       dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
+static void hns_gmac_get_id(void *mac_drv, u8 *mac_id)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *mac_id = drv->mac_id;
+}
+
+static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+       enum hns_gmac_duplex_mdoe duplex;
+       enum hns_port_mode speed;
+       u32 rx_pause;
+       u32 tx_pause;
+       u32 rx;
+       u32 tx;
+       u16 fc_tx_timer;
+       struct hns_gmac_port_mode_cfg port_mode = { GMAC_10M_MII, 0 };
+
+       hns_gmac_port_mode_get(mac_drv, &port_mode);
+       mac_info->pad_and_crc_en = port_mode.crc_add && port_mode.pad_enable;
+       mac_info->auto_neg = port_mode.an_enable;
+
+       hns_gmac_get_tx_auto_pause_frames(mac_drv, &fc_tx_timer);
+       mac_info->tx_pause_time = fc_tx_timer;
+
+       hns_gmac_get_en(mac_drv, &rx, &tx);
+       mac_info->port_en = rx && tx;
+
+       hns_gmac_get_duplex_type(mac_drv, &duplex);
+       mac_info->duplex = duplex;
+
+       hns_gmac_get_port_mode(mac_drv, &speed);
+       switch (speed) {
+       case GMAC_10M_SGMII:
+               mac_info->speed = MAC_SPEED_10;
+               break;
+       case GMAC_100M_SGMII:
+               mac_info->speed = MAC_SPEED_100;
+               break;
+       case GMAC_1000M_SGMII:
+               mac_info->speed = MAC_SPEED_1000;
+               break;
+       default:
+               mac_info->speed = 0;
+               break;
+       }
+
+       hns_gmac_get_pausefrm_cfg(mac_drv, &rx_pause, &tx_pause);
+       mac_info->rx_pause_en = rx_pause;
+       mac_info->tx_pause_en = tx_pause;
+}
+
+static void hns_gmac_autoneg_stat(void *mac_drv, u32 *enable)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *enable = dsaf_get_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+                                  GMAC_TX_AN_EN_B);
+}
+
+static void hns_gmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *link_stat = dsaf_get_dev_bit(drv, GMAC_AN_NEG_STATE_REG,
+                                     GMAC_AN_NEG_STAT_RX_SYNC_OK_B);
+}
+
+static void hns_gmac_get_regs(void *mac_drv, void *data)
+{
+       u32 *regs = data;
+       int i;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       /* base config registers */
+       regs[0] = dsaf_read_dev(drv, GMAC_DUPLEX_TYPE_REG);
+       regs[1] = dsaf_read_dev(drv, GMAC_FD_FC_TYPE_REG);
+       regs[2] = dsaf_read_dev(drv, GMAC_FC_TX_TIMER_REG);
+       regs[3] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_LOW_REG);
+       regs[4] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_HIGH_REG);
+       regs[5] = dsaf_read_dev(drv, GMAC_IPG_TX_TIMER_REG);
+       regs[6] = dsaf_read_dev(drv, GMAC_PAUSE_THR_REG);
+       regs[7] = dsaf_read_dev(drv, GMAC_MAX_FRM_SIZE_REG);
+       regs[8] = dsaf_read_dev(drv, GMAC_PORT_MODE_REG);
+       regs[9] = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+       regs[10] = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+       regs[11] = dsaf_read_dev(drv, GMAC_SHORT_RUNTS_THR_REG);
+       regs[12] = dsaf_read_dev(drv, GMAC_AN_NEG_STATE_REG);
+       regs[13] = dsaf_read_dev(drv, GMAC_TX_LOCAL_PAGE_REG);
+       regs[14] = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+       regs[15] = dsaf_read_dev(drv, GMAC_REC_FILT_CONTROL_REG);
+       regs[16] = dsaf_read_dev(drv, GMAC_PTP_CONFIG_REG);
+
+       /* rx static registers */
+       regs[17] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+       regs[18] = dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+       regs[19] = dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+       regs[20] = dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+       regs[21] = dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+       regs[22] = dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+       regs[23] = dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+       regs[24] = dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+       regs[25] = dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+       regs[26] = dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+       regs[27] = dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+       regs[28] = dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+       regs[29] = dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+       regs[30] = dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+       regs[31] = dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+       regs[32] = dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+       regs[33] = dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+       regs[34] = dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+       regs[35] = dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+       regs[36] = dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+       regs[37] = dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+       regs[38] = dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+       regs[39] = dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+       regs[40] = dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+       regs[41] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+
+       /* tx static registers */
+       regs[42] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+       regs[43] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+       regs[44] = dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+       regs[45] = dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+       regs[46] = dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+       regs[47] = dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+       regs[48] = dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+       regs[49] = dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+       regs[50] = dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+       regs[51] = dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+       regs[52] = dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+       regs[53] = dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+       regs[54] = dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+       regs[55] = dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+       regs[56] = dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+       regs[57] = dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+       regs[58] = dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+
+       regs[59] = dsaf_read_dev(drv, GAMC_RX_MAX_FRAME);
+       regs[60] = dsaf_read_dev(drv, GMAC_LINE_LOOP_BACK_REG);
+       regs[61] = dsaf_read_dev(drv, GMAC_CF_CRC_STRIP_REG);
+       regs[62] = dsaf_read_dev(drv, GMAC_MODE_CHANGE_EN_REG);
+       regs[63] = dsaf_read_dev(drv, GMAC_SIXTEEN_BIT_CNTR_REG);
+       regs[64] = dsaf_read_dev(drv, GMAC_LD_LINK_COUNTER_REG);
+       regs[65] = dsaf_read_dev(drv, GMAC_LOOP_REG);
+       regs[66] = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+       regs[67] = dsaf_read_dev(drv, GMAC_VLAN_CODE_REG);
+       regs[68] = dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+       regs[69] = dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+       regs[70] = dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+       regs[71] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_0_REG);
+       regs[72] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_0_REG);
+       regs[73] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_1_REG);
+       regs[74] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_1_REG);
+       regs[75] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_2_REG);
+       regs[76] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+       regs[77] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_3_REG);
+       regs[78] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_3_REG);
+       regs[79] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_4_REG);
+       regs[80] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_4_REG);
+       regs[81] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_5_REG);
+       regs[82] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_5_REG);
+       regs[83] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_0_REG);
+       regs[84] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_0_REG);
+       regs[85] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_1_REG);
+       regs[86] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_1_REG);
+       regs[87] = dsaf_read_dev(drv, GMAC_MAC_SKIP_LEN_REG);
+       regs[88] = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+
+       /* mark end of mac regs */
+       for (i = 89; i < 96; i++)
+               regs[i] = 0xaaaaaaaa;
+}
+
+static void hns_gmac_get_stats(void *mac_drv, u64 *data)
+{
+       u32 i;
+       u64 *buf = data;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct mac_hw_stats *hw_stats = NULL;
+
+       hw_stats = &drv->mac_cb->hw_stats;
+
+       for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+               buf[i] = DSAF_STATS_READ(hw_stats,
+                       g_gmac_stats_string[i].offset);
+       }
+}
+
+static void hns_gmac_get_strings(u32 stringset, u8 *data)
+{
+       char *buff = (char *)data;
+       u32 i;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+               snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+               buff = buff + ETH_GSTRING_LEN;
+       }
+}
+
+static int hns_gmac_get_sset_count(int stringset)
+{
+       if (stringset == ETH_SS_STATS)
+               return ARRAY_SIZE(g_gmac_stats_string);
+
+       return 0;
+}
+
+static int hns_gmac_get_regs_count(void)
+{
+       return ETH_GMAC_DUMP_NUM;
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+       struct mac_driver *mac_drv;
+
+       mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+       if (!mac_drv)
+               return NULL;
+
+       mac_drv->mac_init = hns_gmac_init;
+       mac_drv->mac_enable = hns_gmac_enable;
+       mac_drv->mac_disable = hns_gmac_disable;
+       mac_drv->mac_free = hns_gmac_free;
+       mac_drv->adjust_link = hns_gmac_adjust_link;
+       mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
+       mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
+       mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
+
+       mac_drv->mac_id = mac_param->mac_id;
+       mac_drv->mac_mode = mac_param->mac_mode;
+       mac_drv->io_base = mac_param->vaddr;
+       mac_drv->dev = mac_param->dev;
+       mac_drv->mac_cb = mac_cb;
+
+       mac_drv->set_mac_addr = hns_gmac_set_mac_addr;
+       mac_drv->set_an_mode = hns_gmac_config_an_mode;
+       mac_drv->config_loopback = hns_gmac_config_loopback;
+       mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
+       mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
+       mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
+       mac_drv->mac_get_id = hns_gmac_get_id;
+       mac_drv->get_info = hns_gmac_get_info;
+       mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
+       mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
+       mac_drv->get_link_status = hns_gmac_get_link_status;
+       mac_drv->get_regs = hns_gmac_get_regs;
+       mac_drv->get_regs_count = hns_gmac_get_regs_count;
+       mac_drv->get_ethtool_stats = hns_gmac_get_stats;
+       mac_drv->get_sset_count = hns_gmac_get_sset_count;
+       mac_drv->get_strings = hns_gmac_get_strings;
+       mac_drv->update_stats = hns_gmac_update_stats;
+
+       return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
new file mode 100644 (file)
index 0000000..44fe301
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_GMAC_H
+#define _HNS_GMAC_H
+
+#include "hns_dsaf_mac.h"
+
+enum hns_port_mode {
+       GMAC_10M_MII = 0,
+       GMAC_100M_MII,
+       GMAC_1000M_GMII,
+       GMAC_10M_RGMII,
+       GMAC_100M_RGMII,
+       GMAC_1000M_RGMII,
+       GMAC_10M_SGMII,
+       GMAC_100M_SGMII,
+       GMAC_1000M_SGMII,
+       GMAC_10000M_SGMII       /* 10GE */
+};
+
+enum hns_gmac_duplex_mdoe {
+       GMAC_HALF_DUPLEX_MODE = 0,
+       GMAC_FULL_DUPLEX_MODE
+};
+
+struct hns_gmac_port_mode_cfg {
+       enum hns_port_mode port_mode;
+       u32 max_frm_size;
+       u32 short_runts_thr;
+       u32 pad_enable;
+       u32 crc_add;
+       u32 an_enable;  /*auto-nego enable  */
+       u32 runt_pkt_en;
+       u32 strip_pad_en;
+};
+
+#define ETH_GMAC_DUMP_NUM              96
+#endif                         /* __HNS_GMAC_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
new file mode 100644 (file)
index 0000000..a8bd27b
--- /dev/null
@@ -0,0 +1,900 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/phy_fixed.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+
+#define MAC_EN_FLAG_V          0xada0328
+
+static const u16 mac_phy_to_speed[] = {
+       [PHY_INTERFACE_MODE_MII] = MAC_SPEED_100,
+       [PHY_INTERFACE_MODE_GMII] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_SGMII] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_TBI] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_RMII] = MAC_SPEED_100,
+       [PHY_INTERFACE_MODE_RGMII] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_RGMII_ID] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_RTBI] = MAC_SPEED_1000,
+       [PHY_INTERFACE_MODE_XGMII] = MAC_SPEED_10000
+};
+
+static const enum mac_mode g_mac_mode_100[] = {
+       [PHY_INTERFACE_MODE_MII]        = MAC_MODE_MII_100,
+       [PHY_INTERFACE_MODE_RMII]   = MAC_MODE_RMII_100
+};
+
+static const enum mac_mode g_mac_mode_1000[] = {
+       [PHY_INTERFACE_MODE_GMII]   = MAC_MODE_GMII_1000,
+       [PHY_INTERFACE_MODE_SGMII]  = MAC_MODE_SGMII_1000,
+       [PHY_INTERFACE_MODE_TBI]        = MAC_MODE_TBI_1000,
+       [PHY_INTERFACE_MODE_RGMII]  = MAC_MODE_RGMII_1000,
+       [PHY_INTERFACE_MODE_RGMII_ID]   = MAC_MODE_RGMII_1000,
+       [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_MODE_RGMII_1000,
+       [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_MODE_RGMII_1000,
+       [PHY_INTERFACE_MODE_RTBI]   = MAC_MODE_RTBI_1000
+};
+
+static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
+{
+       switch (mac_cb->max_speed) {
+       case MAC_SPEED_100:
+               return g_mac_mode_100[mac_cb->phy_if];
+       case MAC_SPEED_1000:
+               return g_mac_mode_1000[mac_cb->phy_if];
+       case MAC_SPEED_10000:
+               return MAC_MODE_XGMII_10000;
+       default:
+               return MAC_MODE_MII_100;
+       }
+}
+
+static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+{
+       switch (mac_cb->max_speed) {
+       case MAC_SPEED_100:
+               return g_mac_mode_100[mac_cb->phy_if];
+       case MAC_SPEED_1000:
+               return g_mac_mode_1000[mac_cb->phy_if];
+       case MAC_SPEED_10000:
+               return MAC_MODE_XGMII_10000;
+       default:
+               return MAC_MODE_MII_100;
+       }
+}
+
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+       if (!mac_cb->cpld_vaddr)
+               return -ENODEV;
+
+       *sfp_prsnt = !dsaf_read_b((u64)mac_cb->cpld_vaddr
+                                       + MAC_SFP_PORT_OFFSET);
+
+       return 0;
+}
+
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+{
+       struct mac_driver *mac_ctrl_drv;
+       int ret, sfp_prsnt;
+
+       mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       if (mac_ctrl_drv->get_link_status)
+               mac_ctrl_drv->get_link_status(mac_ctrl_drv, link_status);
+       else
+               *link_status = 0;
+
+       ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+       if (!ret)
+               *link_status = *link_status && sfp_prsnt;
+
+       mac_cb->link = *link_status;
+}
+
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+                         u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+       struct mac_driver *mac_ctrl_drv;
+       struct mac_info    info;
+
+       mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       if (!mac_ctrl_drv->get_info)
+               return -ENODEV;
+
+       mac_ctrl_drv->get_info(mac_ctrl_drv, &info);
+       if (auto_neg)
+               *auto_neg = info.auto_neg;
+       if (speed)
+               *speed = info.speed;
+       if (duplex)
+               *duplex = info.duplex;
+
+       return 0;
+}
+
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+       int ret;
+       struct mac_driver *mac_ctrl_drv;
+
+       mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+       mac_cb->speed = speed;
+       mac_cb->half_duplex = !duplex;
+       mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
+
+       if (mac_ctrl_drv->adjust_link) {
+               ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
+                       (enum mac_speed)speed, duplex);
+               if (ret) {
+                       dev_err(mac_cb->dev,
+                               "adjust_link failed,%s mac%d ret = %#x!\n",
+                               mac_cb->dsaf_dev->ae_dev.name,
+                               mac_cb->mac_id, ret);
+                       return;
+               }
+       }
+}
+
+/**
+ *hns_mac_get_inner_port_num - get mac table inner port number
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@port_num:port number
+ *
+ */
+static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+                                     u8 vmid, u8 *port_num)
+{
+       u8 tmp_port;
+       u32 comm_idx;
+
+       if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
+               if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+                       dev_err(mac_cb->dev,
+                               "input invalid,%s mac%d vmid%d !\n",
+                               mac_cb->dsaf_dev->ae_dev.name,
+                               mac_cb->mac_id, vmid);
+                       return -EINVAL;
+               }
+       } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
+               if (mac_cb->mac_id <= DSAF_MAX_PORT_NUM_PER_CHIP) {
+                       dev_err(mac_cb->dev,
+                               "input invalid,%s mac%d vmid%d!\n",
+                               mac_cb->dsaf_dev->ae_dev.name,
+                               mac_cb->mac_id, vmid);
+                       return -EINVAL;
+               }
+       } else {
+               dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+                       mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+               return -EINVAL;
+       }
+
+       comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
+
+       if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+               dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
+                       mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
+               return -EINVAL;
+       }
+
+       switch (mac_cb->dsaf_dev->dsaf_mode) {
+       case DSAF_MODE_ENABLE_FIX:
+               tmp_port = 0;
+               break;
+       case DSAF_MODE_DISABLE_FIX:
+               tmp_port = 0;
+               break;
+       case DSAF_MODE_ENABLE_0VM:
+       case DSAF_MODE_ENABLE_8VM:
+       case DSAF_MODE_ENABLE_16VM:
+       case DSAF_MODE_ENABLE_32VM:
+       case DSAF_MODE_ENABLE_128VM:
+       case DSAF_MODE_DISABLE_2PORT_8VM:
+       case DSAF_MODE_DISABLE_2PORT_16VM:
+       case DSAF_MODE_DISABLE_2PORT_64VM:
+       case DSAF_MODE_DISABLE_6PORT_0VM:
+       case DSAF_MODE_DISABLE_6PORT_2VM:
+       case DSAF_MODE_DISABLE_6PORT_4VM:
+       case DSAF_MODE_DISABLE_6PORT_16VM:
+               tmp_port = vmid;
+               break;
+       default:
+               dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+                       mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+               return -EINVAL;
+       }
+       tmp_port += DSAF_BASE_INNER_PORT_NUM;
+
+       *port_num = tmp_port;
+
+       return 0;
+}
+
+/**
+ *hns_mac_get_inner_port_num - change vf mac address
+ *@mac_cb: mac device
+ *@vmid: vmid
+ *@addr:mac address
+ */
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
+                          u32 vmid, char *addr)
+{
+       int ret;
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+       struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+       struct dsaf_drv_mac_single_dest_entry mac_entry;
+       struct mac_entry_idx *old_entry;
+
+       old_entry = &mac_cb->addr_entry_idx[vmid];
+       if (dsaf_dev) {
+               memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+               mac_entry.in_vlan_id = old_entry->vlan_id;
+               mac_entry.in_port_num = mac_cb->mac_id;
+               ret = hns_mac_get_inner_port_num(mac_cb, (u8)vmid,
+                                                &mac_entry.port_num);
+               if (ret)
+                       return ret;
+
+               if ((old_entry->valid != 0) &&
+                   (memcmp(old_entry->addr,
+                   addr, sizeof(mac_entry.addr)) != 0)) {
+                       ret = hns_dsaf_del_mac_entry(dsaf_dev,
+                                                    old_entry->vlan_id,
+                                                    mac_cb->mac_id,
+                                                    old_entry->addr);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+               if (ret)
+                       return ret;
+       }
+
+       if ((mac_ctrl_drv->set_mac_addr) && (vmid == 0))
+               mac_ctrl_drv->set_mac_addr(mac_cb->priv.mac, addr);
+
+       memcpy(old_entry->addr, addr, sizeof(old_entry->addr));
+       old_entry->valid = 1;
+       return 0;
+}
+
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+                     u32 port_num, char *addr, u8 en)
+{
+       int ret;
+       struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+       struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+       if (dsaf_dev && addr) {
+               memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+               mac_entry.in_vlan_id = 0;/*vlan_id;*/
+               mac_entry.in_port_num = mac_cb->mac_id;
+               mac_entry.port_num = port_num;
+
+               if (en == DISABLE)
+                       ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+               else
+                       ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+               if (ret) {
+                       dev_err(dsaf_dev->dev,
+                               "set mac mc port failed,%s mac%d ret = %#x!\n",
+                               mac_cb->dsaf_dev->ae_dev.name,
+                               mac_cb->mac_id, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ *hns_mac_del_mac - delete mac address into dsaf table,can't delete the same
+ *                  address twice
+ *@net_dev: net device
+ *@vfn :   vf lan
+ *@mac : mac address
+ *return status
+ */
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
+{
+       struct mac_entry_idx *old_mac;
+       struct dsaf_device *dsaf_dev;
+       u32 ret;
+
+       dsaf_dev = mac_cb->dsaf_dev;
+
+       if (vfn < DSAF_MAX_VM_NUM) {
+               old_mac = &mac_cb->addr_entry_idx[vfn];
+       } else {
+               dev_err(mac_cb->dev,
+                       "vf queue is too large,%s mac%d queue = %#x!\n",
+                       mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
+               return -EINVAL;
+       }
+
+       if (dsaf_dev) {
+               ret = hns_dsaf_del_mac_entry(dsaf_dev, old_mac->vlan_id,
+                                            mac_cb->mac_id, old_mac->addr);
+               if (ret)
+                       return ret;
+
+               if (memcmp(old_mac->addr, mac, sizeof(old_mac->addr)) == 0)
+                       old_mac->valid = 0;
+       }
+
+       return 0;
+}
+
+static void hns_mac_param_get(struct mac_params *param,
+                             struct hns_mac_cb *mac_cb)
+{
+       param->vaddr = (void *)mac_cb->vaddr;
+       param->mac_mode = hns_get_enet_interface(mac_cb);
+       memcpy(param->addr, mac_cb->addr_entry_idx[0].addr,
+              MAC_NUM_OCTETS_PER_ADDR);
+       param->mac_id = mac_cb->mac_id;
+       param->dev = mac_cb->dev;
+}
+
+/**
+ *hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@queue: queue number
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
+                                    u32 port_num, u16 vlan_id, u8 en)
+{
+       int ret;
+       struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+       u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+               = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+       /* directy return ok in debug network mode */
+       if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+               return 0;
+
+       if (dsaf_dev) {
+               memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+               mac_entry.in_vlan_id = vlan_id;
+               mac_entry.in_port_num = mac_cb->mac_id;
+               mac_entry.port_num = port_num;
+
+               if (en == DISABLE)
+                       ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+               else
+                       ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ *hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@en:enable
+ *retuen 0 - success , negative --fail
+ */
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en)
+{
+       int ret;
+       struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+       u8 port_num;
+       u8 addr[MAC_NUM_OCTETS_PER_ADDR]
+               = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       struct mac_entry_idx *uc_mac_entry;
+       struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+       if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+               return 0;
+
+       uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
+
+       if (dsaf_dev)  {
+               memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+               mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
+               mac_entry.in_port_num = mac_cb->mac_id;
+               ret = hns_mac_get_inner_port_num(mac_cb, vmid, &port_num);
+               if (ret)
+                       return ret;
+               mac_entry.port_num = port_num;
+
+               if (en == DISABLE)
+                       ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+               else
+                       ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+               return ret;
+       }
+
+       return 0;
+}
+
+void hns_mac_reset(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *drv;
+
+       drv = hns_mac_get_drv(mac_cb);
+
+       drv->mac_init(drv);
+
+       if (drv->config_max_frame_length)
+               drv->config_max_frame_length(drv, mac_cb->max_frm);
+
+       if (drv->set_tx_auto_pause_frames)
+               drv->set_tx_auto_pause_frames(drv, mac_cb->tx_pause_frm_time);
+
+       if (drv->set_an_mode)
+               drv->set_an_mode(drv, 1);
+
+       if (drv->mac_pausefrm_cfg) {
+               if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+                       drv->mac_pausefrm_cfg(drv, 0, 0);
+               else /* mac rx must disable, dsaf pfc close instead of it*/
+                       drv->mac_pausefrm_cfg(drv, 0, 1);
+       }
+}
+
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
+{
+       struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+       u32 buf_size = mac_cb->dsaf_dev->buf_size;
+       u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+       if ((new_mtu < MAC_MIN_MTU) || (new_frm > MAC_MAX_MTU) ||
+           (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
+               return -EINVAL;
+
+       if (!drv->config_max_frame_length)
+               return -ECHILD;
+
+       /* adjust max frame to be at least the size of a standard frame */
+       if (new_frm < (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN))
+               new_frm = (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN);
+
+       drv->config_max_frame_length(drv, new_frm);
+
+       mac_cb->max_frm = new_frm;
+
+       return 0;
+}
+
+void hns_mac_start(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *mac_drv = hns_mac_get_drv(mac_cb);
+
+       /* for virt */
+       if (mac_drv->mac_en_flg == MAC_EN_FLAG_V) {
+               /*plus 1 when the virtual mac has been enabled */
+               mac_drv->virt_dev_num += 1;
+               return;
+       }
+
+       if (mac_drv->mac_enable) {
+               mac_drv->mac_enable(mac_cb->priv.mac, MAC_COMM_MODE_RX_AND_TX);
+               mac_drv->mac_en_flg = MAC_EN_FLAG_V;
+       }
+}
+
+void hns_mac_stop(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       /*modified for virtualization */
+       if (mac_ctrl_drv->virt_dev_num > 0) {
+               mac_ctrl_drv->virt_dev_num -= 1;
+               if (mac_ctrl_drv->virt_dev_num > 0)
+                       return;
+       }
+
+       if (mac_ctrl_drv->mac_disable)
+               mac_ctrl_drv->mac_disable(mac_cb->priv.mac,
+                       MAC_COMM_MODE_RX_AND_TX);
+
+       mac_ctrl_drv->mac_en_flg = 0;
+       mac_cb->link = 0;
+       cpld_led_reset(mac_cb);
+}
+
+/**
+ * hns_mac_get_autoneg - get auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       if (mac_ctrl_drv->autoneg_stat)
+               mac_ctrl_drv->autoneg_stat(mac_ctrl_drv, auto_neg);
+       else
+               *auto_neg = 0;
+}
+
+/**
+ * hns_mac_get_pauseparam - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable status
+ * @tx_en: tx enable status
+ * retuen 0 - success , negative --fail
+ */
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       if (mac_ctrl_drv->get_pause_enable) {
+               mac_ctrl_drv->get_pause_enable(mac_ctrl_drv, rx_en, tx_en);
+       } else {
+               *rx_en = 0;
+               *tx_en = 0;
+       }
+
+       /* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
+        * We set the rx pause frm always be true (1), because DSAF deals with
+        * the rx pause frm instead of service mac. After all, we still support
+        * rx pause frm.
+        */
+       if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+               *rx_en = 1;
+}
+
+/**
+ * hns_mac_set_autoneg - set auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
+               dev_err(mac_cb->dev, "enable autoneg is not allowed!");
+               return -ENOTSUPP;
+       }
+
+       if (mac_ctrl_drv->set_an_mode)
+               mac_ctrl_drv->set_an_mode(mac_ctrl_drv, enable);
+
+       return 0;
+}
+
+/**
+ * hns_mac_set_autoneg - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable or not
+ * @tx_en: tx enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+               if (!rx_en) {
+                       dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
+                       return -EINVAL;
+               }
+       } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+               if (tx_en || rx_en) {
+                       dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
+                       return -EINVAL;
+               }
+       } else {
+               dev_err(mac_cb->dev, "Unsupport this operation!");
+               return -EINVAL;
+       }
+
+       if (mac_ctrl_drv->mac_pausefrm_cfg)
+               mac_ctrl_drv->mac_pausefrm_cfg(mac_ctrl_drv, rx_en, tx_en);
+
+       return 0;
+}
+
+/**
+ * hns_mac_init_ex - mac init
+ * @mac_cb: mac control block
+ * retuen 0 - success , negative --fail
+ */
+static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
+{
+       int ret;
+       struct mac_params param;
+       struct mac_driver *drv;
+
+       hns_dsaf_fix_mac_mode(mac_cb);
+
+       memset(&param, 0, sizeof(struct mac_params));
+       hns_mac_param_get(&param, mac_cb);
+
+       if (MAC_SPEED_FROM_MODE(param.mac_mode) < MAC_SPEED_10000)
+               drv = (struct mac_driver *)hns_gmac_config(mac_cb, &param);
+       else
+               drv = (struct mac_driver *)hns_xgmac_config(mac_cb, &param);
+
+       if (!drv)
+               return -ENOMEM;
+
+       mac_cb->priv.mac = (void *)drv;
+       hns_mac_reset(mac_cb);
+
+       hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex);
+
+       ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, ENABLE);
+       if (ret)
+               goto free_mac_drv;
+
+       return 0;
+
+free_mac_drv:
+       drv->mac_free(mac_cb->priv.mac);
+       mac_cb->priv.mac = NULL;
+
+       return ret;
+}
+
+/**
+ *mac_free_dev  - get mac information from device node
+ *@mac_cb: mac device
+ *@np:device node
+ *@mac_mode_idx:mac mode index
+ */
+static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
+                            struct device_node *np, u32 mac_mode_idx)
+{
+       mac_cb->link = false;
+       mac_cb->half_duplex = false;
+       mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
+       mac_cb->max_speed = mac_cb->speed;
+
+       if (mac_cb->phy_if == PHY_INTERFACE_MODE_SGMII) {
+               mac_cb->if_support = MAC_GMAC_SUPPORTED;
+               mac_cb->if_support |= SUPPORTED_1000baseT_Full;
+       } else if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII) {
+               mac_cb->if_support = SUPPORTED_10000baseR_FEC;
+               mac_cb->if_support |= SUPPORTED_10000baseKR_Full;
+       }
+
+       mac_cb->max_frm = MAC_DEFAULT_MTU;
+       mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+
+       /* Get the rest of the PHY information */
+       mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+       if (mac_cb->phy_node)
+               dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+                       mac_cb->mac_id, mac_cb->phy_node->name);
+}
+
+/**
+ * hns_mac_get_mode - get mac mode
+ * @phy_if: phy interface
+ * retuen 0 - gmac, 1 - xgmac , negative --fail
+ */
+static int hns_mac_get_mode(phy_interface_t phy_if)
+{
+       switch (phy_if) {
+       case PHY_INTERFACE_MODE_SGMII:
+               return MAC_GMAC_IDX;
+       case PHY_INTERFACE_MODE_XGMII:
+               return MAC_XGMAC_IDX;
+       default:
+               return -EINVAL;
+       }
+}
+
+u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+                             struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+{
+       u8 __iomem *base = dsaf_dev->io_base;
+       int mac_id = mac_cb->mac_id;
+
+       if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+               return base + 0x40000 + mac_id * 0x4000 -
+                               mac_mode_idx * 0x20000;
+       else
+               return mac_cb->serdes_vaddr + 0x1000
+                       + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+}
+
+/**
+ * hns_mac_get_cfg - get mac cfg from dtb or acpi table
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_idx: mac index
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+{
+       int ret;
+       u32 mac_mode_idx;
+       struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
+
+       mac_cb->dsaf_dev = dsaf_dev;
+       mac_cb->dev = dsaf_dev->dev;
+       mac_cb->mac_id = mac_idx;
+
+       mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
+       mac_cb->serdes_vaddr = dsaf_dev->sds_base;
+
+       if (dsaf_dev->cpld_base &&
+           mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+               mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
+                       mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
+       mac_cb->sfp_prsnt = 0;
+       mac_cb->txpkt_for_led = 0;
+       mac_cb->rxpkt_for_led = 0;
+
+       if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+               mac_cb->mac_type = HNAE_PORT_SERVICE;
+       else
+               mac_cb->mac_type = HNAE_PORT_DEBUG;
+
+       mac_cb->phy_if = hns_mac_get_phy_if(mac_cb);
+
+       ret = hns_mac_get_mode(mac_cb->phy_if);
+       if (ret < 0) {
+               dev_err(dsaf_dev->dev,
+                       "hns_mac_get_mode failed,mac%d ret = %#x!\n",
+                       mac_cb->mac_id, ret);
+               return ret;
+       }
+       mac_mode_idx = (u32)ret;
+
+       hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+
+       mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
+
+       return 0;
+}
+
+/**
+ * hns_mac_init - init mac
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+int hns_mac_init(struct dsaf_device *dsaf_dev)
+{
+       int i;
+       int ret;
+       size_t size;
+       struct hns_mac_cb *mac_cb;
+
+       size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
+       dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
+       if (!dsaf_dev->mac_cb)
+               return -ENOMEM;
+
+       for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
+               ret = hns_mac_get_cfg(dsaf_dev, i);
+               if (ret)
+                       goto free_mac_cb;
+
+               mac_cb = &dsaf_dev->mac_cb[i];
+               ret = hns_mac_init_ex(mac_cb);
+               if (ret)
+                       goto free_mac_cb;
+       }
+
+       return 0;
+
+free_mac_cb:
+       dsaf_dev->mac_cb = NULL;
+
+       return ret;
+}
+
+void hns_mac_uninit(struct dsaf_device *dsaf_dev)
+{
+       cpld_led_reset(dsaf_dev->mac_cb);
+       dsaf_dev->mac_cb = NULL;
+}
+
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+                               enum hnae_loop loop, int en)
+{
+       int ret;
+       struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+       if (drv->config_loopback)
+               ret = drv->config_loopback(drv, loop, en);
+       else
+               ret = -ENOTSUPP;
+
+       return ret;
+}
+
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->update_stats(mac_ctrl_drv);
+}
+
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
+}
+
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
+                        int stringset, u8 *data)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->get_strings(stringset, data);
+}
+
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       return mac_ctrl_drv->get_sset_count(stringset);
+}
+
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       return mac_ctrl_drv->get_regs_count();
+}
+
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->get_regs(mac_ctrl_drv, data);
+}
+
+void hns_set_led_opt(struct hns_mac_cb *mac_cb)
+{
+       int nic_data = 0;
+       int txpkts, rxpkts;
+
+       txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts;
+       rxpkts = mac_cb->rxpkt_for_led - mac_cb->hw_stats.rx_good_pkts;
+       if (txpkts || rxpkts)
+               nic_data = 1;
+       else
+               nic_data = 0;
+       mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
+       mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
+       hns_cpld_set_led(mac_cb, (int)mac_cb->link,
+                        mac_cb->speed, nic_data);
+}
+
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+                       enum hnae_led_state status)
+{
+       if (!mac_cb || !mac_cb->cpld_vaddr)
+               return 0;
+
+       return cpld_set_led_id(mac_cb, status);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
new file mode 100644 (file)
index 0000000..7da95a7
--- /dev/null
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MAC_H
+#define _HNS_DSAF_MAC_H
+
+#include <linux/phy.h>
+#include <linux/kernel.h>
+#include <linux/if_vlan.h>
+#include "hns_dsaf_main.h"
+
+struct dsaf_device;
+
+#define MAC_GMAC_SUPPORTED \
+       (SUPPORTED_10baseT_Half \
+       | SUPPORTED_10baseT_Full \
+       | SUPPORTED_100baseT_Half \
+       | SUPPORTED_100baseT_Full \
+       | SUPPORTED_Autoneg)
+
+#define MAC_DEFAULT_MTU        (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MAC_MAX_MTU            9600
+#define MAC_MIN_MTU            68
+
+#define MAC_DEFAULT_PAUSE_TIME 0xff
+
+#define MAC_GMAC_IDX 0
+#define MAC_XGMAC_IDX 1
+
+#define ETH_STATIC_REG  1
+#define ETH_DUMP_REG    5
+/* check mac addr broadcast */
+#define MAC_IS_BROADCAST(p)    ((*(p) == 0xff) && (*((p) + 1) == 0xff) && \
+               (*((p) + 2) == 0xff) &&  (*((p) + 3) == 0xff)  && \
+               (*((p) + 4) == 0xff) && (*((p) + 5) == 0xff))
+
+/* check mac addr is 01-00-5e-xx-xx-xx*/
+#define MAC_IS_L3_MULTICAST(p) ((*((p) + 0) == 0x01) && \
+                       (*((p) + 1) == 0x00)   && \
+                       (*((p) + 2) == 0x5e))
+
+/*check the mac addr is 0 in all bit*/
+#define MAC_IS_ALL_ZEROS(p)   ((*(p) == 0) && (*((p) + 1) == 0) && \
+       (*((p) + 2) == 0) && (*((p) + 3) == 0) && \
+       (*((p) + 4) == 0) && (*((p) + 5) == 0))
+
+/*check mac addr multicast*/
+#define MAC_IS_MULTICAST(p)    ((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
+
+/**< Number of octets (8-bit bytes) in an ethernet address */
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+struct mac_priv {
+       void *mac;
+};
+
+/* net speed */
+enum mac_speed {
+       MAC_SPEED_10    = 10,      /**< 10 Mbps */
+       MAC_SPEED_100   = 100,    /**< 100 Mbps */
+       MAC_SPEED_1000  = 1000,  /**< 1000 Mbps = 1 Gbps */
+       MAC_SPEED_10000 = 10000  /**< 10000 Mbps = 10 Gbps */
+};
+
+/*mac interface keyword        */
+enum mac_intf {
+       MAC_IF_NONE  = 0x00000000,   /**< interface not invalid */
+       MAC_IF_MII   = 0x00010000,   /**< MII interface */
+       MAC_IF_RMII  = 0x00020000,   /**< RMII interface */
+       MAC_IF_SMII  = 0x00030000,   /**< SMII interface */
+       MAC_IF_GMII  = 0x00040000,   /**< GMII interface */
+       MAC_IF_RGMII = 0x00050000,   /**< RGMII interface */
+       MAC_IF_TBI   = 0x00060000,   /**< TBI interface */
+       MAC_IF_RTBI  = 0x00070000,   /**< RTBI interface */
+       MAC_IF_SGMII = 0x00080000,   /**< SGMII interface */
+       MAC_IF_XGMII = 0x00090000,   /**< XGMII interface */
+       MAC_IF_QSGMII = 0x000a0000      /**< QSGMII interface */
+};
+
+/*mac mode */
+enum mac_mode {
+       /**< Invalid Ethernet mode */
+       MAC_MODE_INVALID         = 0,
+       /**<    10 Mbps MII   */
+       MAC_MODE_MII_10   = (MAC_IF_MII   | MAC_SPEED_10),
+       /**<   100 Mbps MII   */
+       MAC_MODE_MII_100         = (MAC_IF_MII   | MAC_SPEED_100),
+       /**<    10 Mbps RMII  */
+       MAC_MODE_RMII_10         = (MAC_IF_RMII  | MAC_SPEED_10),
+       /**<   100 Mbps RMII  */
+       MAC_MODE_RMII_100       = (MAC_IF_RMII  | MAC_SPEED_100),
+       /**<    10 Mbps SMII  */
+       MAC_MODE_SMII_10         = (MAC_IF_SMII  | MAC_SPEED_10),
+       /**<   100 Mbps SMII  */
+       MAC_MODE_SMII_100       = (MAC_IF_SMII  | MAC_SPEED_100),
+       /**<  1000 Mbps GMII  */
+       MAC_MODE_GMII_1000   = (MAC_IF_GMII  | MAC_SPEED_1000),
+       /**<    10 Mbps RGMII */
+       MAC_MODE_RGMII_10       = (MAC_IF_RGMII | MAC_SPEED_10),
+       /**<   100 Mbps RGMII */
+       MAC_MODE_RGMII_100   = (MAC_IF_RGMII | MAC_SPEED_100),
+       /**<  1000 Mbps RGMII */
+       MAC_MODE_RGMII_1000  = (MAC_IF_RGMII | MAC_SPEED_1000),
+       /**<  1000 Mbps TBI   */
+       MAC_MODE_TBI_1000       = (MAC_IF_TBI   | MAC_SPEED_1000),
+       /**<  1000 Mbps RTBI  */
+       MAC_MODE_RTBI_1000   = (MAC_IF_RTBI  | MAC_SPEED_1000),
+       /**<    10 Mbps SGMII */
+       MAC_MODE_SGMII_10       = (MAC_IF_SGMII | MAC_SPEED_10),
+       /**<   100 Mbps SGMII */
+       MAC_MODE_SGMII_100   = (MAC_IF_SGMII | MAC_SPEED_100),
+       /**<  1000 Mbps SGMII */
+       MAC_MODE_SGMII_1000  = (MAC_IF_SGMII | MAC_SPEED_1000),
+       /**< 10000 Mbps XGMII */
+       MAC_MODE_XGMII_10000 = (MAC_IF_XGMII | MAC_SPEED_10000),
+       /**<  1000 Mbps QSGMII */
+       MAC_MODE_QSGMII_1000 = (MAC_IF_QSGMII | MAC_SPEED_1000)
+};
+
+/*mac communicate mode*/
+enum mac_commom_mode {
+       MAC_COMM_MODE_NONE        = 0, /**< No transmit/receive communication */
+       MAC_COMM_MODE_RX                = 1, /**< Only receive communication */
+       MAC_COMM_MODE_TX                = 2, /**< Only transmit communication */
+       MAC_COMM_MODE_RX_AND_TX = 3  /**< Both tx and rx communication */
+};
+
+/*mac statistics */
+struct mac_statistics {
+       u64  stat_pkts64; /* r-10G tr-DT 64 byte frame counter */
+       u64  stat_pkts65to127; /* r-10G 65 to 127 byte frame counter */
+       u64  stat_pkts128to255; /* r-10G 128 to 255 byte frame counter */
+       u64  stat_pkts256to511; /*r-10G 256 to 511 byte frame counter */
+       u64  stat_pkts512to1023;/* r-10G 512 to 1023 byte frame counter */
+       u64  stat_pkts1024to1518; /* r-10G 1024 to 1518 byte frame counter */
+       u64  stat_pkts1519to1522; /* r-10G 1519 to 1522 byte good frame count*/
+       /* Total number of packets that were less than 64 octets */
+       /*                      long with a wrong CRC.*/
+       u64  stat_fragments;
+       /* Total number of packets longer than valid maximum length octets */
+       u64  stat_jabbers;
+       /* number of dropped packets due to internal errors of */
+       /*                      the MAC Client. */
+       u64  stat_drop_events;
+       /* Incremented when frames of correct length but with */
+       /*                      CRC error are received.*/
+       u64  stat_crc_align_errors;
+       /* Total number of packets that were less than 64 octets */
+       /*                      long with a good CRC.*/
+       u64  stat_undersize_pkts;
+       u64  stat_oversize_pkts;  /**< T,B.D*/
+
+       u64  stat_rx_pause;                /**< Pause MAC Control received */
+       u64  stat_tx_pause;                /**< Pause MAC Control sent */
+
+       u64  in_octets;         /**< Total number of byte received. */
+       u64  in_pkts;           /* Total number of packets received.*/
+       u64  in_mcast_pkts;     /* Total number of multicast frame received */
+       u64  in_bcast_pkts;     /* Total number of broadcast frame received */
+                               /* Frames received, but discarded due to */
+                               /* problems within the MAC RX. */
+       u64  in_discards;
+       u64  in_errors;         /* Number of frames received with error: */
+                               /*      - FIFO Overflow Error */
+                               /*      - CRC Error */
+                               /*      - Frame Too Long Error */
+                               /*      - Alignment Error */
+       u64  out_octets; /*Total number of byte sent. */
+       u64  out_pkts;  /**< Total number of packets sent .*/
+       u64  out_mcast_pkts; /* Total number of multicast frame sent */
+       u64  out_bcast_pkts; /* Total number of multicast frame sent */
+       /* Frames received, but discarded due to problems within */
+       /*                      the MAC TX N/A!.*/
+       u64  out_discards;
+       u64  out_errors;        /*Number of frames transmitted with error: */
+                       /*      - FIFO Overflow Error */
+                       /*      - FIFO Underflow Error */
+                       /*       - Other */
+};
+
+/*mac para struct ,mac get param from nic or dsaf when initialize*/
+struct mac_params {
+       char addr[MAC_NUM_OCTETS_PER_ADDR];
+       void *vaddr; /*virtual address*/
+       struct device *dev;
+       u8 mac_id;
+       /**< Ethernet operation mode (MAC-PHY interface and speed) */
+       enum mac_mode mac_mode;
+};
+
+struct mac_info {
+       u16 speed;/* The forced speed (lower bits) in */
+               /*               *mbps. Please use */
+               /*               * ethtool_cmd_speed()/_set() to */
+               /*               * access it */
+       u8 duplex;              /* Duplex, half or full */
+       u8 auto_neg;    /* Enable or disable autonegotiation */
+       enum hnae_loop loop_mode;
+       u8 tx_pause_en;
+       u8 tx_pause_time;
+       u8 rx_pause_en;
+       u8 pad_and_crc_en;
+       u8 promiscuous_en;
+       u8 port_en;      /*port enable*/
+};
+
+struct mac_entry_idx {
+       u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+       u16 vlan_id:12;
+       u16 valid:1;
+       u16 qos:3;
+};
+
+struct mac_hw_stats {
+       u64 rx_good_pkts;       /* only for xgmac */
+       u64 rx_good_bytes;
+       u64 rx_total_pkts;      /* only for xgmac */
+       u64 rx_total_bytes;     /* only for xgmac */
+       u64 rx_bad_bytes;       /* only for gmac */
+       u64 rx_uc_pkts;
+       u64 rx_mc_pkts;
+       u64 rx_bc_pkts;
+       u64 rx_fragment_err;    /* only for xgmac */
+       u64 rx_undersize;       /* only for xgmac */
+       u64 rx_under_min;
+       u64 rx_minto64;         /* only for gmac */
+       u64 rx_64bytes;
+       u64 rx_65to127;
+       u64 rx_128to255;
+       u64 rx_256to511;
+       u64 rx_512to1023;
+       u64 rx_1024to1518;
+       u64 rx_1519tomax;
+       u64 rx_1519tomax_good;  /* only for xgmac */
+       u64 rx_oversize;
+       u64 rx_jabber_err;
+       u64 rx_fcs_err;
+       u64 rx_vlan_pkts;       /* only for gmac */
+       u64 rx_data_err;        /* only for gmac */
+       u64 rx_align_err;       /* only for gmac */
+       u64 rx_long_err;        /* only for gmac */
+       u64 rx_pfc_tc0;
+       u64 rx_pfc_tc1;         /* only for xgmac */
+       u64 rx_pfc_tc2;         /* only for xgmac */
+       u64 rx_pfc_tc3;         /* only for xgmac */
+       u64 rx_pfc_tc4;         /* only for xgmac */
+       u64 rx_pfc_tc5;         /* only for xgmac */
+       u64 rx_pfc_tc6;         /* only for xgmac */
+       u64 rx_pfc_tc7;         /* only for xgmac */
+       u64 rx_unknown_ctrl;
+       u64 rx_filter_pkts;     /* only for gmac */
+       u64 rx_filter_bytes;    /* only for gmac */
+       u64 rx_fifo_overrun_err;/* only for gmac */
+       u64 rx_len_err;         /* only for gmac */
+       u64 rx_comma_err;       /* only for gmac */
+       u64 rx_symbol_err;      /* only for xgmac */
+       u64 tx_good_to_sw;      /* only for xgmac */
+       u64 tx_bad_to_sw;       /* only for xgmac */
+       u64 rx_1731_pkts;       /* only for xgmac */
+
+       u64 tx_good_bytes;
+       u64 tx_good_pkts;       /* only for xgmac */
+       u64 tx_total_bytes;     /* only for xgmac */
+       u64 tx_total_pkts;      /* only for xgmac */
+       u64 tx_bad_bytes;       /* only for gmac */
+       u64 tx_bad_pkts;        /* only for xgmac */
+       u64 tx_uc_pkts;
+       u64 tx_mc_pkts;
+       u64 tx_bc_pkts;
+       u64 tx_undersize;       /* only for xgmac */
+       u64 tx_fragment_err;    /* only for xgmac */
+       u64 tx_under_min_pkts;  /* only for gmac */
+       u64 tx_64bytes;
+       u64 tx_65to127;
+       u64 tx_128to255;
+       u64 tx_256to511;
+       u64 tx_512to1023;
+       u64 tx_1024to1518;
+       u64 tx_1519tomax;
+       u64 tx_1519tomax_good;  /* only for xgmac */
+       u64 tx_oversize;        /* only for xgmac */
+       u64 tx_jabber_err;
+       u64 tx_underrun_err;    /* only for gmac */
+       u64 tx_vlan;            /* only for gmac */
+       u64 tx_crc_err;         /* only for gmac */
+       u64 tx_pfc_tc0;
+       u64 tx_pfc_tc1;         /* only for xgmac */
+       u64 tx_pfc_tc2;         /* only for xgmac */
+       u64 tx_pfc_tc3;         /* only for xgmac */
+       u64 tx_pfc_tc4;         /* only for xgmac */
+       u64 tx_pfc_tc5;         /* only for xgmac */
+       u64 tx_pfc_tc6;         /* only for xgmac */
+       u64 tx_pfc_tc7;         /* only for xgmac */
+       u64 tx_ctrl;            /* only for xgmac */
+       u64 tx_1731_pkts;       /* only for xgmac */
+       u64 tx_1588_pkts;       /* only for xgmac */
+       u64 rx_good_from_sw;    /* only for xgmac */
+       u64 rx_bad_from_sw;     /* only for xgmac */
+};
+
+struct hns_mac_cb {
+       struct device *dev;
+       struct dsaf_device *dsaf_dev;
+       struct mac_priv priv;
+       u8 __iomem *vaddr;
+       u8 __iomem *cpld_vaddr;
+       u8 __iomem *sys_ctl_vaddr;
+       u8 __iomem *serdes_vaddr;
+       struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
+       u8 sfp_prsnt;
+       u8 cpld_led_value;
+       u8 mac_id;
+
+       u8 link;
+       u8 half_duplex;
+       u16 speed;
+       u16 max_speed;
+       u16 max_frm;
+       u16 tx_pause_frm_time;
+       u32 if_support;
+       u64 txpkt_for_led;
+       u64 rxpkt_for_led;
+       enum hnae_port_type mac_type;
+       phy_interface_t phy_if;
+       enum hnae_loop loop_mode;
+
+       struct device_node *phy_node;
+
+       struct mac_hw_stats hw_stats;
+};
+
+struct mac_driver {
+       /*init Mac when init nic or dsaf*/
+       void (*mac_init)(void *mac_drv);
+       /*remove mac when remove nic or dsaf*/
+       void (*mac_free)(void *mac_drv);
+       /*enable mac when enable nic or dsaf*/
+       void (*mac_enable)(void *mac_drv, enum mac_commom_mode mode);
+       /*disable mac when disable nic or dsaf*/
+       void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
+       /* config mac address*/
+       void (*set_mac_addr)(void *mac_drv,     char *mac_addr);
+       /*adjust mac mode of port,include speed and duplex*/
+       int (*adjust_link)(void *mac_drv, enum mac_speed speed,
+                          u32 full_duplex);
+       /* config autoegotaite mode of port*/
+       void (*set_an_mode)(void *mac_drv, u8 enable);
+       /* config loopbank mode */
+       int (*config_loopback)(void *mac_drv, enum hnae_loop loop_mode,
+                              u8 enable);
+       /* config mtu*/
+       void (*config_max_frame_length)(void *mac_drv, u16 newval);
+       /*config PAD and CRC enable */
+       void (*config_pad_and_crc)(void *mac_drv, u8 newval);
+       /* config duplex mode*/
+       void (*config_half_duplex)(void *mac_drv, u8 newval);
+       /*config tx pause time,if pause_time is zero,disable tx pause enable*/
+       void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time);
+       /*config rx pause enable*/
+       void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
+       /* config rx mode for promiscuous*/
+       int (*set_promiscuous)(void *mac_drv, u8 enable);
+       /* get mac id */
+       void (*mac_get_id)(void *mac_drv, u8 *mac_id);
+       void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
+
+       void (*autoneg_stat)(void *mac_drv, u32 *enable);
+       int (*set_pause_enable)(void *mac_drv, u32 rx_en, u32 tx_en);
+       void (*get_pause_enable)(void *mac_drv, u32 *rx_en, u32 *tx_en);
+       void (*get_link_status)(void *mac_drv, u32 *link_stat);
+       /* get the imporant regs*/
+       void (*get_regs)(void *mac_drv, void *data);
+       int (*get_regs_count)(void);
+       /* get strings name for ethtool statistic */
+       void (*get_strings)(u32 stringset, u8 *data);
+       /* get the number of strings*/
+       int (*get_sset_count)(int stringset);
+
+       /* get the statistic by ethtools*/
+       void (*get_ethtool_stats)(void *mac_drv, u64 *data);
+
+       /* get mac information */
+       void (*get_info)(void *mac_drv, struct mac_info *mac_info);
+
+       void (*update_stats)(void *mac_drv);
+
+       enum mac_mode mac_mode;
+       u8 mac_id;
+       struct hns_mac_cb *mac_cb;
+       void __iomem *io_base;
+       unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
+       unsigned int virt_dev_num;
+       struct device *dev;
+};
+
+struct mac_stats_string {
+       char desc[64];
+       unsigned long offset;
+};
+
+#define MAC_MAKE_MODE(interface, speed) (enum mac_mode)((interface) | (speed))
+#define MAC_INTERFACE_FROM_MODE(mode) (enum mac_intf)((mode) & 0xFFFF0000)
+#define MAC_SPEED_FROM_MODE(mode) (enum mac_speed)((mode) & 0x0000FFFF)
+#define MAC_STATS_FIELD_OFF(field) (offsetof(struct mac_hw_stats, field))
+
+static inline struct mac_driver *hns_mac_get_drv(
+       const struct hns_mac_cb *mac_cb)
+{
+       return (struct mac_driver *)(mac_cb->priv.mac);
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb,
+                     struct mac_params *mac_param);
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
+                      struct mac_params *mac_param);
+
+int hns_mac_init(struct dsaf_device *dsaf_dev);
+void mac_adjust_link(struct net_device *net_dev);
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb,        u32 *link_status);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+                     u32 port_num, char *addr, u8 en);
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, u8 en);
+void hns_mac_start(struct hns_mac_cb *mac_cb);
+void hns_mac_stop(struct hns_mac_cb *mac_cb);
+int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac);
+void hns_mac_uninit(struct dsaf_device *dsaf_dev);
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_reset(struct hns_mac_cb *mac_cb);
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+                         u8 *auto_neg, u16 *speed, u8 *duplex);
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+                               enum hnae_loop loop, int en);
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
+void hns_set_led_opt(struct hns_mac_cb *mac_cb);
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+                       enum hnae_led_state status);
+#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
new file mode 100644 (file)
index 0000000..26ae6c6
--- /dev/null
@@ -0,0 +1,2445 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/device.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_mac.h"
+
+const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+       [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
+       [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
+       [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+};
+
+int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+{
+       int ret, i;
+       u32 desc_num;
+       u32 buf_size;
+       const char *name, *mode_str;
+       struct device_node *np = dsaf_dev->dev->of_node;
+
+       if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v2"))
+               dsaf_dev->dsaf_ver = AE_VERSION_2;
+       else
+               dsaf_dev->dsaf_ver = AE_VERSION_1;
+
+       ret = of_property_read_string(np, "dsa_name", &name);
+       if (ret) {
+               dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
+               return ret;
+       }
+       strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
+       dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
+
+       ret = of_property_read_string(np, "mode", &mode_str);
+       if (ret) {
+               dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
+               return ret;
+       }
+       for (i = 0; i < DSAF_MODE_MAX; i++) {
+               if (g_dsaf_mode_match[i] &&
+                   !strcmp(mode_str, g_dsaf_mode_match[i]))
+                       break;
+       }
+       if (i >= DSAF_MODE_MAX ||
+           i == DSAF_MODE_INVALID || i == DSAF_MODE_ENABLE) {
+               dev_err(dsaf_dev->dev,
+                       "%s prs mode str fail!\n", dsaf_dev->ae_dev.name);
+               return -EINVAL;
+       }
+       dsaf_dev->dsaf_mode = (enum dsaf_mode)i;
+
+       if (dsaf_dev->dsaf_mode > DSAF_MODE_ENABLE)
+               dsaf_dev->dsaf_en = HRD_DSAF_NO_DSAF_MODE;
+       else
+               dsaf_dev->dsaf_en = HRD_DSAF_MODE;
+
+       if ((i == DSAF_MODE_ENABLE_16VM) ||
+           (i == DSAF_MODE_DISABLE_2PORT_8VM) ||
+           (i == DSAF_MODE_DISABLE_6PORT_2VM))
+               dsaf_dev->dsaf_tc_mode = HRD_DSAF_8TC_MODE;
+       else
+               dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
+
+       dsaf_dev->sc_base = of_iomap(np, 0);
+       if (!dsaf_dev->sc_base) {
+               dev_err(dsaf_dev->dev,
+                       "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
+               ret = -ENOMEM;
+               goto unmap_base_addr;
+       }
+
+       dsaf_dev->sds_base = of_iomap(np, 1);
+       if (!dsaf_dev->sds_base) {
+               dev_err(dsaf_dev->dev,
+                       "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
+               ret = -ENOMEM;
+               goto unmap_base_addr;
+       }
+
+       dsaf_dev->ppe_base = of_iomap(np, 2);
+       if (!dsaf_dev->ppe_base) {
+               dev_err(dsaf_dev->dev,
+                       "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
+               ret = -ENOMEM;
+               goto unmap_base_addr;
+       }
+
+       dsaf_dev->io_base = of_iomap(np, 3);
+       if (!dsaf_dev->io_base) {
+               dev_err(dsaf_dev->dev,
+                       "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
+               ret = -ENOMEM;
+               goto unmap_base_addr;
+       }
+
+       dsaf_dev->cpld_base = of_iomap(np, 4);
+       if (!dsaf_dev->cpld_base)
+               dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
+
+       ret = of_property_read_u32(np, "desc-num", &desc_num);
+       if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
+           desc_num > HNS_DSAF_MAX_DESC_CNT) {
+               dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
+                       desc_num, ret);
+               goto unmap_base_addr;
+       }
+       dsaf_dev->desc_num = desc_num;
+
+       ret = of_property_read_u32(np, "buf-size", &buf_size);
+       if (ret < 0) {
+               dev_err(dsaf_dev->dev,
+                       "get buf-size fail, ret=%d!\r\n", ret);
+               goto unmap_base_addr;
+       }
+       dsaf_dev->buf_size = buf_size;
+
+       dsaf_dev->buf_size_type = hns_rcb_buf_size2type(buf_size);
+       if (dsaf_dev->buf_size_type < 0) {
+               dev_err(dsaf_dev->dev,
+                       "buf_size(%d) is wrong!\n", buf_size);
+               goto unmap_base_addr;
+       }
+
+       if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
+               dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
+       else
+               dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
+
+       return 0;
+
+unmap_base_addr:
+       if (dsaf_dev->io_base)
+               iounmap(dsaf_dev->io_base);
+       if (dsaf_dev->ppe_base)
+               iounmap(dsaf_dev->ppe_base);
+       if (dsaf_dev->sds_base)
+               iounmap(dsaf_dev->sds_base);
+       if (dsaf_dev->sc_base)
+               iounmap(dsaf_dev->sc_base);
+       if (dsaf_dev->cpld_base)
+               iounmap(dsaf_dev->cpld_base);
+       return ret;
+}
+
+static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
+{
+       if (dsaf_dev->io_base)
+               iounmap(dsaf_dev->io_base);
+
+       if (dsaf_dev->ppe_base)
+               iounmap(dsaf_dev->ppe_base);
+
+       if (dsaf_dev->sds_base)
+               iounmap(dsaf_dev->sds_base);
+
+       if (dsaf_dev->sc_base)
+               iounmap(dsaf_dev->sc_base);
+
+       if (dsaf_dev->cpld_base)
+               iounmap(dsaf_dev->cpld_base);
+}
+
+/**
+ * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
+{
+       dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_SBM_INIT_S, 1);
+}
+
+/**
+ * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
+ * @dsaf_id: dsa fabric id
+ * @hns_dsaf_reg_cnt_clr_ce: config value
+ */
+static void
+hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
+{
+       dsaf_set_dev_bit(dsaf_dev, DSAF_DSA_REG_CNT_CLR_CE_REG,
+                        DSAF_CNT_CLR_CE_S, reg_cnt_clr_ce);
+}
+
+/**
+ * hns_ppe_qid_cfg - config ppe qid
+ * @dsaf_id: dsa fabric id
+ * @pppe_qid_cfg: value array
+ */
+static void
+hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
+{
+       u32 i;
+
+       for (i = 0; i < DSAF_COMM_CHN; i++) {
+               dsaf_set_dev_field(dsaf_dev,
+                                  DSAF_PPE_QID_CFG_0_REG + 0x0004 * i,
+                                  DSAF_PPE_QID_CFG_M, DSAF_PPE_QID_CFG_S,
+                                  qid_cfg);
+       }
+}
+
+/**
+ * hns_dsaf_sw_port_type_cfg - cfg sw type
+ * @dsaf_id: dsa fabric id
+ * @psw_port_type: array
+ */
+static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
+                                     enum dsaf_sw_port_type port_type)
+{
+       u32 i;
+
+       for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+               dsaf_set_dev_field(dsaf_dev,
+                                  DSAF_SW_PORT_TYPE_0_REG + 0x0004 * i,
+                                  DSAF_SW_PORT_TYPE_M, DSAF_SW_PORT_TYPE_S,
+                                  port_type);
+       }
+}
+
+/**
+ * hns_dsaf_stp_port_type_cfg - cfg stp type
+ * @dsaf_id: dsa fabric id
+ * @pstp_port_type: array
+ */
+static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
+                                      enum dsaf_stp_port_type port_type)
+{
+       u32 i;
+
+       for (i = 0; i < DSAF_COMM_CHN; i++) {
+               dsaf_set_dev_field(dsaf_dev,
+                                  DSAF_STP_PORT_TYPE_0_REG + 0x0004 * i,
+                                  DSAF_STP_PORT_TYPE_M, DSAF_STP_PORT_TYPE_S,
+                                  port_type);
+       }
+}
+
+/**
+ * hns_dsaf_sbm_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
+{
+       u32 o_sbm_cfg;
+       u32 i;
+
+       for (i = 0; i < DSAF_SBM_NUM; i++) {
+               o_sbm_cfg = dsaf_read_dev(dsaf_dev,
+                                         DSAF_SBM_CFG_REG_0_REG + 0x80 * i);
+               dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1);
+               dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_SHCUT_EN_S, 0);
+               dsaf_write_dev(dsaf_dev,
+                              DSAF_SBM_CFG_REG_0_REG + 0x80 * i, o_sbm_cfg);
+       }
+}
+
+/**
+ * hns_dsaf_sbm_cfg_mib_en - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
+{
+       u32 sbm_cfg_mib_en;
+       u32 i;
+       u32 reg;
+       u32 read_cnt;
+
+       for (i = 0; i < DSAF_SBM_NUM; i++) {
+               reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+               dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1);
+       }
+
+       /* waitint for all sbm enable finished */
+       for (i = 0; i < DSAF_SBM_NUM; i++) {
+               read_cnt = 0;
+               reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+               do {
+                       udelay(1);
+                       sbm_cfg_mib_en = dsaf_get_dev_bit(
+                                       dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S);
+                       read_cnt++;
+               } while (sbm_cfg_mib_en == 0 &&
+                       read_cnt < DSAF_CFG_READ_CNT);
+
+               if (sbm_cfg_mib_en == 0) {
+                       dev_err(dsaf_dev->dev,
+                               "sbm_cfg_mib_en fail,%s,sbm_num=%d\n",
+                               dsaf_dev->ae_dev.name, i);
+                       return -ENODEV;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_sbm_bp_wl_cfg - config sbm
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+       u32 o_sbm_bp_cfg0;
+       u32 o_sbm_bp_cfg1;
+       u32 o_sbm_bp_cfg2;
+       u32 o_sbm_bp_cfg3;
+       u32 reg;
+       u32 i;
+
+       /* XGE */
+       for (i = 0; i < DSAF_XGE_NUM; i++) {
+               reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+               o_sbm_bp_cfg0 = dsaf_read_dev(dsaf_dev, reg);
+               dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M,
+                              DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512);
+               dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+                              DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+               dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+                              DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+               dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg0);
+
+               reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+               o_sbm_bp_cfg1 = dsaf_read_dev(dsaf_dev, reg);
+               dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+                              DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+               dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+                              DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+               dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg1);
+
+               reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+               o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+               dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+                              DSAF_SBM_CFG2_SET_BUF_NUM_S, 104);
+               dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+                              DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128);
+               dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+
+               reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+               o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+               dsaf_set_field(o_sbm_bp_cfg3,
+                              DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+                              DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+               dsaf_set_field(o_sbm_bp_cfg3,
+                              DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+                              DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+               dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+
+               /* for no enable pfc mode */
+               reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+               o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg);
+               dsaf_set_field(o_sbm_bp_cfg3,
+                              DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+                              DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128);
+               dsaf_set_field(o_sbm_bp_cfg3,
+                              DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+                              DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192);
+               dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3);
+       }
+
+       /* PPE */
+       for (i = 0; i < DSAF_COMM_CHN; i++) {
+               reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+               o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+               dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+                              DSAF_SBM_CFG2_SET_BUF_NUM_S, 10);
+               dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+                              DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12);
+               dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+       }
+
+       /* RoCEE */
+       for (i = 0; i < DSAF_COMM_CHN; i++) {
+               reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+               o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg);
+               dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+                              DSAF_SBM_CFG2_SET_BUF_NUM_S, 2);
+               dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+                              DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4);
+               dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2);
+       }
+}
+
+/**
+ * hns_dsaf_voq_bp_all_thrd_cfg -  voq
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
+{
+       u32 voq_bp_all_thrd;
+       u32 i;
+
+       for (i = 0; i < DSAF_VOQ_NUM; i++) {
+               voq_bp_all_thrd = dsaf_read_dev(
+                       dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i);
+               if (i < DSAF_XGE_NUM) {
+                       dsaf_set_field(voq_bp_all_thrd,
+                                      DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+                                      DSAF_VOQ_BP_ALL_DOWNTHRD_S, 930);
+                       dsaf_set_field(voq_bp_all_thrd,
+                                      DSAF_VOQ_BP_ALL_UPTHRD_M,
+                                      DSAF_VOQ_BP_ALL_UPTHRD_S, 950);
+               } else {
+                       dsaf_set_field(voq_bp_all_thrd,
+                                      DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+                                      DSAF_VOQ_BP_ALL_DOWNTHRD_S, 220);
+                       dsaf_set_field(voq_bp_all_thrd,
+                                      DSAF_VOQ_BP_ALL_UPTHRD_M,
+                                      DSAF_VOQ_BP_ALL_UPTHRD_S, 230);
+               }
+               dsaf_write_dev(
+                       dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i,
+                       voq_bp_all_thrd);
+       }
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_data: addr
+ */
+static void hns_dsaf_tbl_tcam_data_cfg(
+       struct dsaf_device *dsaf_dev,
+       struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_LOW_0_REG,
+                      ptbl_tcam_data->tbl_tcam_data_low);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_HIGH_0_REG,
+                      ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * dsaf_tbl_tcam_mcast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_mcast: addr
+ */
+static void hns_dsaf_tbl_tcam_mcast_cfg(
+       struct dsaf_device *dsaf_dev,
+       struct dsaf_tbl_tcam_mcast_cfg *mcast)
+{
+       u32 mcast_cfg4;
+
+       mcast_cfg4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+       dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S,
+                    mcast->tbl_mcast_item_vld);
+       dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_OLD_EN_S,
+                    mcast->tbl_mcast_old_en);
+       dsaf_set_field(mcast_cfg4, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+                      DSAF_TBL_MCAST_CFG4_VM128_112_S,
+                      mcast->tbl_mcast_port_msk[4]);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, mcast_cfg4);
+
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG,
+                      mcast->tbl_mcast_port_msk[3]);
+
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG,
+                      mcast->tbl_mcast_port_msk[2]);
+
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG,
+                      mcast->tbl_mcast_port_msk[1]);
+
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG,
+                      mcast->tbl_mcast_port_msk[0]);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_ucast_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_tcam_ucast: addr
+ */
+static void hns_dsaf_tbl_tcam_ucast_cfg(
+       struct dsaf_device *dsaf_dev,
+       struct dsaf_tbl_tcam_ucast_cfg *tbl_tcam_ucast)
+{
+       u32 ucast_cfg1;
+
+       ucast_cfg1 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+       dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S,
+                    tbl_tcam_ucast->tbl_ucast_mac_discard);
+       dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_ITEM_VLD_S,
+                    tbl_tcam_ucast->tbl_ucast_item_vld);
+       dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OLD_EN_S,
+                    tbl_tcam_ucast->tbl_ucast_old_en);
+       dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_DVC_S,
+                    tbl_tcam_ucast->tbl_ucast_dvc);
+       dsaf_set_field(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+                      DSAF_TBL_UCAST_CFG1_OUT_PORT_S,
+                      tbl_tcam_ucast->tbl_ucast_out_port);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG, ucast_cfg1);
+}
+
+/**
+ * hns_dsaf_tbl_line_cfg - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_lin: addr
+ */
+static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
+                                 struct dsaf_tbl_line_cfg *tbl_lin)
+{
+       u32 tbl_line;
+
+       tbl_line = dsaf_read_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG);
+       dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_MAC_DISCARD_S,
+                    tbl_lin->tbl_line_mac_discard);
+       dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_DVC_S,
+                    tbl_lin->tbl_line_dvc);
+       dsaf_set_field(tbl_line, DSAF_TBL_LINE_CFG_OUT_PORT_M,
+                      DSAF_TBL_LINE_CFG_OUT_PORT_S,
+                      tbl_lin->tbl_line_out_port);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
+{
+       u32 o_tbl_pul;
+
+       o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_line_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
+{
+       u32 tbl_pul;
+
+       tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+       dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 1);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+       dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_mcast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_mcast_pul(
+       struct dsaf_device *dsaf_dev)
+{
+       u32 o_tbl_pul;
+
+       o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_ucast_pul - tbl
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_ucast_pul(
+       struct dsaf_device *dsaf_dev)
+{
+       u32 o_tbl_pul;
+
+       o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 1);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_stat_en - tbl
+ * @dsaf_id: dsa fabric id
+ * @ptbl_stat_en: addr
+ */
+static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
+{
+       u32 o_tbl_ctrl;
+
+       o_tbl_ctrl = dsaf_read_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG);
+       dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S, 1);
+       dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_UC_LKUP_NUM_EN_S, 1);
+       dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_MC_LKUP_NUM_EN_S, 1);
+       dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_BC_LKUP_NUM_EN_S, 1);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG, o_tbl_ctrl);
+}
+
+/**
+ * hns_dsaf_rocee_bp_en - rocee back press enable
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
+{
+       dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+                        DSAF_FC_XGE_TX_PAUSE_S, 1);
+}
+
+/* set msk for dsaf exception irq*/
+static void hns_dsaf_int_xge_msk_set(struct dsaf_device *dsaf_dev,
+                                    u32 chnn_num, u32 mask_set)
+{
+       dsaf_write_dev(dsaf_dev,
+                      DSAF_XGE_INT_MSK_0_REG + 0x4 * chnn_num, mask_set);
+}
+
+static void hns_dsaf_int_ppe_msk_set(struct dsaf_device *dsaf_dev,
+                                    u32 chnn_num, u32 msk_set)
+{
+       dsaf_write_dev(dsaf_dev,
+                      DSAF_PPE_INT_MSK_0_REG + 0x4 * chnn_num, msk_set);
+}
+
+static void hns_dsaf_int_rocee_msk_set(struct dsaf_device *dsaf_dev,
+                                      u32 chnn, u32 msk_set)
+{
+       dsaf_write_dev(dsaf_dev,
+                      DSAF_ROCEE_INT_MSK_0_REG + 0x4 * chnn, msk_set);
+}
+
+static void
+hns_dsaf_int_tbl_msk_set(struct dsaf_device *dsaf_dev, u32 msk_set)
+{
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_MSK_0_REG, msk_set);
+}
+
+/* clr dsaf exception irq*/
+static void hns_dsaf_int_xge_src_clr(struct dsaf_device *dsaf_dev,
+                                    u32 chnn_num, u32 int_src)
+{
+       dsaf_write_dev(dsaf_dev,
+                      DSAF_XGE_INT_SRC_0_REG + 0x4 * chnn_num, int_src);
+}
+
+static void hns_dsaf_int_ppe_src_clr(struct dsaf_device *dsaf_dev,
+                                    u32 chnn, u32 int_src)
+{
+       dsaf_write_dev(dsaf_dev,
+                      DSAF_PPE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_rocee_src_clr(struct dsaf_device *dsaf_dev,
+                                      u32 chnn, u32 int_src)
+{
+       dsaf_write_dev(dsaf_dev,
+                      DSAF_ROCEE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
+                                    u32 int_src)
+{
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_SRC_0_REG, int_src);
+}
+
+/**
+ * hns_dsaf_single_line_tbl_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address:
+ * @ptbl_line:
+ */
+static void hns_dsaf_single_line_tbl_cfg(
+       struct dsaf_device *dsaf_dev,
+       u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
+{
+       /*Write Addr*/
+       hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
+
+       /*Write Line*/
+       hns_dsaf_tbl_line_cfg(dsaf_dev, ptbl_line);
+
+       /*Write Plus*/
+       hns_dsaf_tbl_line_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg(
+       struct dsaf_device *dsaf_dev, u32 address,
+       struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+       struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+       /*Write Addr*/
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+       /*Write Tcam Data*/
+       hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+       /*Write Tcam Ucast*/
+       hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
+       /*Write Plus*/
+       hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg - INT
+ * @dsaf_id: dsa fabric id
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mcast,
+ */
+static void hns_dsaf_tcam_mc_cfg(
+       struct dsaf_device *dsaf_dev, u32 address,
+       struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+       struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+       /*Write Addr*/
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+       /*Write Tcam Data*/
+       hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+       /*Write Tcam Mcast*/
+       hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
+       /*Write Plus*/
+       hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_mc_invld - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ */
+static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
+{
+       /*Write Addr*/
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+       /*write tcam mcast*/
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, 0);
+
+       /*Write Plus*/
+       hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_tcam_uc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_uc_get(
+       struct dsaf_device *dsaf_dev, u32 address,
+       struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+       struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+       u32 tcam_read_data0;
+       u32 tcam_read_data4;
+
+       /*Write Addr*/
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+       /*read tcam item puls*/
+       hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+       /*read tcam data*/
+       ptbl_tcam_data->tbl_tcam_data_high
+               = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+       ptbl_tcam_data->tbl_tcam_data_low
+               = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+       /*read tcam mcast*/
+       tcam_read_data0 = dsaf_read_dev(dsaf_dev,
+                                       DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+       tcam_read_data4 = dsaf_read_dev(dsaf_dev,
+                                       DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+
+       ptbl_tcam_ucast->tbl_ucast_item_vld
+               = dsaf_get_bit(tcam_read_data4,
+                              DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+       ptbl_tcam_ucast->tbl_ucast_old_en
+               = dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+       ptbl_tcam_ucast->tbl_ucast_mac_discard
+               = dsaf_get_bit(tcam_read_data0,
+                              DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S);
+       ptbl_tcam_ucast->tbl_ucast_out_port
+               = dsaf_get_field(tcam_read_data0,
+                                DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+                                DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
+       ptbl_tcam_ucast->tbl_ucast_dvc
+               = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+}
+
+/**
+ * hns_dsaf_tcam_mc_get - INT
+ * @dsaf_id: dsa fabric id
+ * @address
+ * @ptbl_tcam_data
+ * @ptbl_tcam_ucast
+ */
+static void hns_dsaf_tcam_mc_get(
+       struct dsaf_device *dsaf_dev, u32 address,
+       struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+       struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+       u32 data_tmp;
+
+       /*Write Addr*/
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+       /*read tcam item puls*/
+       hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+       /*read tcam data*/
+       ptbl_tcam_data->tbl_tcam_data_high =
+               dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+       ptbl_tcam_data->tbl_tcam_data_low =
+               dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+
+       /*read tcam mcast*/
+       ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
+               dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+       ptbl_tcam_mcast->tbl_mcast_port_msk[1] =
+               dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+       ptbl_tcam_mcast->tbl_mcast_port_msk[2] =
+               dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+       ptbl_tcam_mcast->tbl_mcast_port_msk[3] =
+               dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+
+       data_tmp = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+       ptbl_tcam_mcast->tbl_mcast_item_vld =
+               dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+       ptbl_tcam_mcast->tbl_mcast_old_en =
+               dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+       ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
+               dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+                              DSAF_TBL_MCAST_CFG4_VM128_112_S);
+}
+
+/**
+ * hns_dsaf_tbl_line_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
+{
+       u32 i;
+       /* defaultly set all lineal mac table entry resulting discard */
+       struct dsaf_tbl_line_cfg tbl_line[] = {{1, 0, 0} };
+
+       for (i = 0; i < DSAF_LINE_SUM; i++)
+               hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
+{
+       u32 i;
+       struct dsaf_tbl_tcam_data tcam_data[] = {{0, 0} };
+       struct dsaf_tbl_tcam_ucast_cfg tcam_ucast[] = {{0, 0, 0, 0, 0} };
+
+       /*tcam tbl*/
+       for (i = 0; i < DSAF_TCAM_SUM; i++)
+               hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast);
+}
+
+/**
+ * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
+ * @mac_cb: mac contrl block
+ */
+static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
+                               int mac_id, int en)
+{
+       if (!en)
+               dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+       else
+               dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_id: dsa fabric id
+ * @dsaf_mode
+ */
+static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
+{
+       u32 i;
+       u32 o_dsaf_cfg;
+
+       o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
+       dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
+       dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_TC_MODE_S, dsaf_dev->dsaf_tc_mode);
+       dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_CRC_EN_S, 0);
+       dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_MIX_MODE_S, 0);
+       dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_LOCA_ADDR_EN_S, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_CFG_0_REG, o_dsaf_cfg);
+
+       hns_dsaf_reg_cnt_clr_ce(dsaf_dev, 1);
+       hns_dsaf_stp_port_type_cfg(dsaf_dev, DSAF_STP_PORT_TYPE_FORWARD);
+
+       /* set 22 queue per tx ppe engine, only used in switch mode */
+       hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
+
+       /* in non switch mode, set all port to access mode */
+       hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
+
+       /*set dsaf pfc  to 0 for parseing rx pause*/
+       for (i = 0; i < DSAF_COMM_CHN; i++)
+               hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+
+       /*msk and  clr exception irqs */
+       for (i = 0; i < DSAF_COMM_CHN; i++) {
+               hns_dsaf_int_xge_src_clr(dsaf_dev, i, 0xfffffffful);
+               hns_dsaf_int_ppe_src_clr(dsaf_dev, i, 0xfffffffful);
+               hns_dsaf_int_rocee_src_clr(dsaf_dev, i, 0xfffffffful);
+
+               hns_dsaf_int_xge_msk_set(dsaf_dev, i, 0xfffffffful);
+               hns_dsaf_int_ppe_msk_set(dsaf_dev, i, 0xfffffffful);
+               hns_dsaf_int_rocee_msk_set(dsaf_dev, i, 0xfffffffful);
+       }
+       hns_dsaf_int_tbl_src_clr(dsaf_dev, 0xfffffffful);
+       hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful);
+}
+
+/**
+ * hns_dsaf_inode_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
+{
+       u32 reg;
+       u32 tc_cfg;
+       u32 i;
+
+       if (dsaf_dev->dsaf_tc_mode == HRD_DSAF_4TC_MODE)
+               tc_cfg = HNS_DSAF_I4TC_CFG;
+       else
+               tc_cfg = HNS_DSAF_I8TC_CFG;
+
+       for (i = 0; i < DSAF_INODE_NUM; i++) {
+               reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+               dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M,
+                                  DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM);
+
+               reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i;
+               dsaf_write_dev(dsaf_dev, reg, tc_cfg);
+       }
+}
+
+/**
+ * hns_dsaf_sbm_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
+{
+       u32 flag;
+       u32 cnt = 0;
+       int ret;
+
+       hns_dsaf_sbm_bp_wl_cfg(dsaf_dev);
+
+       /* enable sbm chanel, disable sbm chanel shcut function*/
+       hns_dsaf_sbm_cfg(dsaf_dev);
+
+       /* enable sbm mib */
+       ret = hns_dsaf_sbm_cfg_mib_en(dsaf_dev);
+       if (ret) {
+               dev_err(dsaf_dev->dev,
+                       "hns_dsaf_sbm_cfg_mib_en fail,%s, ret=%d\n",
+                       dsaf_dev->ae_dev.name, ret);
+               return ret;
+       }
+
+       /* enable sbm initial link sram */
+       hns_dsaf_sbm_link_sram_init_en(dsaf_dev);
+
+       do {
+               usleep_range(200, 210);/*udelay(200);*/
+               flag = dsaf_read_dev(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG);
+               cnt++;
+       } while (flag != DSAF_SRAM_INIT_FINISH_FLAG && cnt < DSAF_CFG_READ_CNT);
+
+       if (flag != DSAF_SRAM_INIT_FINISH_FLAG) {
+               dev_err(dsaf_dev->dev,
+                       "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n",
+                       dsaf_dev->ae_dev.name, flag, cnt);
+               return -ENODEV;
+       }
+
+       hns_dsaf_rocee_bp_en(dsaf_dev);
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_tbl_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
+{
+       hns_dsaf_tbl_stat_en(dsaf_dev);
+
+       hns_dsaf_tbl_tcam_init(dsaf_dev);
+       hns_dsaf_tbl_line_init(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_voq_init - INT
+ * @dsaf_id: dsa fabric id
+ */
+static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
+{
+       hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_init_hw - init dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
+{
+       int ret;
+
+       dev_dbg(dsaf_dev->dev,
+               "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
+
+       hns_dsaf_rst(dsaf_dev, 0);
+       mdelay(10);
+       hns_dsaf_rst(dsaf_dev, 1);
+
+       hns_dsaf_comm_init(dsaf_dev);
+
+       /*init XBAR_INODE*/
+       hns_dsaf_inode_init(dsaf_dev);
+
+       /*init SBM*/
+       ret = hns_dsaf_sbm_init(dsaf_dev);
+       if (ret)
+               return ret;
+
+       /*init TBL*/
+       hns_dsaf_tbl_init(dsaf_dev);
+
+       /*init VOQ*/
+       hns_dsaf_voq_init(dsaf_dev);
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_remove_hw - uninit dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
+{
+       /*reset*/
+       hns_dsaf_rst(dsaf_dev, 0);
+}
+
+/**
+ * hns_dsaf_init - init dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
+{
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       u32 i;
+       int ret;
+
+       ret = hns_dsaf_init_hw(dsaf_dev);
+       if (ret)
+               return ret;
+
+       /* malloc mem for tcam mac key(vlan+mac) */
+       priv->soft_mac_tbl = vzalloc(sizeof(*priv->soft_mac_tbl)
+                 * DSAF_TCAM_SUM);
+       if (!priv->soft_mac_tbl) {
+               ret = -ENOMEM;
+               goto remove_hw;
+       }
+
+       /*all entry invall */
+       for (i = 0; i < DSAF_TCAM_SUM; i++)
+               (priv->soft_mac_tbl + i)->index = DSAF_INVALID_ENTRY_IDX;
+
+       return 0;
+
+remove_hw:
+       hns_dsaf_remove_hw(dsaf_dev);
+       return ret;
+}
+
+/**
+ * hns_dsaf_free - free dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_free(struct dsaf_device *dsaf_dev)
+{
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+
+       hns_dsaf_remove_hw(dsaf_dev);
+
+       /* free all mac mem */
+       vfree(priv->soft_mac_tbl);
+       priv->soft_mac_tbl = NULL;
+}
+
+/**
+ * hns_dsaf_find_soft_mac_entry - find dsa fabric soft entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: mac entry struct pointer
+ */
+static u16 hns_dsaf_find_soft_mac_entry(
+       struct dsaf_device *dsaf_dev,
+       struct dsaf_drv_tbl_tcam_key *mac_key)
+{
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       u32 i;
+
+       soft_mac_entry = priv->soft_mac_tbl;
+       for (i = 0; i < DSAF_TCAM_SUM; i++) {
+               /* invall tab entry */
+               if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) &&
+                   (soft_mac_entry->tcam_key.high.val == mac_key->high.val) &&
+                   (soft_mac_entry->tcam_key.low.val == mac_key->low.val))
+                       /* return find result --soft index */
+                       return soft_mac_entry->index;
+
+               soft_mac_entry++;
+       }
+       return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry - search dsa fabric soft empty-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
+{
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       u32 i;
+
+       soft_mac_entry = priv->soft_mac_tbl;
+       for (i = 0; i < DSAF_TCAM_SUM; i++) {
+               /* inv all entry */
+               if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+                       /* return find result --soft index */
+                       return i;
+
+               soft_mac_entry++;
+       }
+       return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_set_mac_key - set mac key
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: tcam key pointer
+ * @vlan_id: vlan id
+ * @in_port_num: input port num
+ * @addr: mac addr
+ */
+static void hns_dsaf_set_mac_key(
+       struct dsaf_device *dsaf_dev,
+       struct dsaf_drv_tbl_tcam_key *mac_key, u16 vlan_id, u8 in_port_num,
+       u8 *addr)
+{
+       u8 port;
+
+       if (dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE)
+               /*DSAF mode : in port id fixed 0*/
+               port = 0;
+       else
+               /*non-dsaf mode*/
+               port = in_port_num;
+
+       mac_key->high.bits.mac_0 = addr[0];
+       mac_key->high.bits.mac_1 = addr[1];
+       mac_key->high.bits.mac_2 = addr[2];
+       mac_key->high.bits.mac_3 = addr[3];
+       mac_key->low.bits.mac_4 = addr[4];
+       mac_key->low.bits.mac_5 = addr[5];
+       mac_key->low.bits.vlan = vlan_id;
+       mac_key->low.bits.port = port;
+}
+
+/**
+ * hns_dsaf_set_mac_uc_entry - set mac uc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: uc-mac entry
+ */
+int hns_dsaf_set_mac_uc_entry(
+       struct dsaf_device *dsaf_dev,
+       struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       struct dsaf_tbl_tcam_ucast_cfg mac_data;
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+       /* mac addr check */
+       if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+           MAC_IS_BROADCAST(mac_entry->addr) ||
+           MAC_IS_MULTICAST(mac_entry->addr)) {
+               dev_err(dsaf_dev->dev,
+                       "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+                       dsaf_dev->ae_dev.name, mac_entry->addr[0],
+                       mac_entry->addr[1], mac_entry->addr[2],
+                       mac_entry->addr[3], mac_entry->addr[4],
+                       mac_entry->addr[5]);
+               return -EINVAL;
+       }
+
+       /* config key */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+                            mac_entry->in_port_num, mac_entry->addr);
+
+       /* entry ie exist? */
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               /*if has not inv entry,find a empty entry */
+               entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+               if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+                       /* has not empty,return error */
+                       dev_err(dsaf_dev->dev,
+                               "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+                               dsaf_dev->ae_dev.name,
+                               mac_key.high.val, mac_key.low.val);
+                       return -EINVAL;
+               }
+       }
+
+       dev_dbg(dsaf_dev->dev,
+               "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+               dsaf_dev->ae_dev.name, mac_key.high.val,
+               mac_key.low.val, entry_index);
+
+       /* config hardware entry */
+       mac_data.tbl_ucast_item_vld = 1;
+       mac_data.tbl_ucast_mac_discard = 0;
+       mac_data.tbl_ucast_old_en = 0;
+       /* default config dvc to 0 */
+       mac_data.tbl_ucast_dvc = 0;
+       mac_data.tbl_ucast_out_port = mac_entry->port_num;
+       hns_dsaf_tcam_uc_cfg(
+               dsaf_dev, entry_index,
+               (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+       /* config software entry */
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = entry_index;
+       soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+       soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_set_mac_mc_entry - set mac mc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_set_mac_mc_entry(
+       struct dsaf_device *dsaf_dev,
+       struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       struct dsaf_tbl_tcam_mcast_cfg mac_data;
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+       struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+
+       /* mac addr check */
+       if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+               dev_err(dsaf_dev->dev,
+                       "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
+                       dsaf_dev->ae_dev.name, mac_entry->addr[0],
+                       mac_entry->addr[1], mac_entry->addr[2],
+                       mac_entry->addr[3],
+                       mac_entry->addr[4], mac_entry->addr[5]);
+               return -EINVAL;
+       }
+
+       /*config key */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key,
+                            mac_entry->in_vlan_id,
+                            mac_entry->in_port_num, mac_entry->addr);
+
+       /* entry ie exist? */
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               /*if hasnot, find enpty entry*/
+               entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+               if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+                       /*if hasnot empty, error*/
+                       dev_err(dsaf_dev->dev,
+                               "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+                               dsaf_dev->ae_dev.name,
+                               mac_key.high.val, mac_key.low.val);
+                       return -EINVAL;
+               }
+
+               /* config hardware entry */
+               memset(mac_data.tbl_mcast_port_msk,
+                      0, sizeof(mac_data.tbl_mcast_port_msk));
+       } else {
+               /* config hardware entry */
+               hns_dsaf_tcam_mc_get(
+                       dsaf_dev, entry_index,
+                       (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+       }
+       mac_data.tbl_mcast_old_en = 0;
+       mac_data.tbl_mcast_item_vld = 1;
+       dsaf_set_field(mac_data.tbl_mcast_port_msk[0],
+                      0x3F, 0, mac_entry->port_mask[0]);
+
+       dev_dbg(dsaf_dev->dev,
+               "set_uc_entry, %s key(%#x:%#x) entry_index%d\n",
+               dsaf_dev->ae_dev.name, mac_key.high.val,
+               mac_key.low.val, entry_index);
+
+       hns_dsaf_tcam_mc_cfg(
+               dsaf_dev, entry_index,
+               (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+       /* config software entry */
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = entry_index;
+       soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+       soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_add_mac_mc_port - add mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+                            struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       struct dsaf_tbl_tcam_mcast_cfg mac_data;
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+       struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+       int mskid;
+
+       /*chechk mac addr */
+       if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+               dev_err(dsaf_dev->dev,
+                       "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+                       mac_entry->addr[0], mac_entry->addr[1],
+                       mac_entry->addr[2], mac_entry->addr[3],
+                       mac_entry->addr[4], mac_entry->addr[5]);
+               return -EINVAL;
+       }
+
+       /*config key */
+       hns_dsaf_set_mac_key(
+               dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+               mac_entry->in_port_num, mac_entry->addr);
+
+       memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg));
+
+       /*check exist? */
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               /*if hasnot , find a empty*/
+               entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+               if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+                       /*if hasnot empty, error*/
+                       dev_err(dsaf_dev->dev,
+                               "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+                               dsaf_dev->ae_dev.name, mac_key.high.val,
+                               mac_key.low.val);
+                       return -EINVAL;
+               }
+       } else {
+               /*if exist, add in */
+               hns_dsaf_tcam_mc_get(
+                       dsaf_dev, entry_index,
+                       (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+       }
+       /* config hardware entry */
+       if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+               mskid = mac_entry->port_num;
+       } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+               mskid = mac_entry->port_num -
+                       DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+       } else {
+               dev_err(dsaf_dev->dev,
+                       "%s,pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, mac_entry->port_num,
+                       mac_key.high.val, mac_key.low.val);
+               return -EINVAL;
+       }
+       dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1);
+       mac_data.tbl_mcast_old_en = 0;
+       mac_data.tbl_mcast_item_vld = 1;
+
+       dev_dbg(dsaf_dev->dev,
+               "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+               dsaf_dev->ae_dev.name, mac_key.high.val,
+               mac_key.low.val, entry_index);
+
+       hns_dsaf_tcam_mc_cfg(
+               dsaf_dev, entry_index,
+               (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+
+       /*config software entry */
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = entry_index;
+       soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+       soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_entry - del mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @vlan_id: vlian id
+ * @in_port_num: input port num
+ * @addr : mac addr
+ */
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+                          u8 in_port_num, u8 *addr)
+{
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+       /*check mac addr */
+       if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
+               dev_err(dsaf_dev->dev,
+                       "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+                       addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+               return -EINVAL;
+       }
+
+       /*config key */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, addr);
+
+       /*exist ?*/
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               /*not exist, error */
+               dev_err(dsaf_dev->dev,
+                       "del_mac_entry failed, %s Mac key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name,
+                       mac_key.high.val, mac_key.low.val);
+               return -EINVAL;
+       }
+       dev_dbg(dsaf_dev->dev,
+               "del_mac_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+               dsaf_dev->ae_dev.name, mac_key.high.val,
+               mac_key.low.val, entry_index);
+
+       /*do del opt*/
+       hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+       /*del soft emtry */
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_mc_port - del mac mc- port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+                            struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       struct dsaf_drv_priv *priv =
+           (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+       u16 vlan_id;
+       u8 in_port_num;
+       struct dsaf_tbl_tcam_mcast_cfg mac_data;
+       struct dsaf_drv_tbl_tcam_key tmp_mac_key;
+       int mskid;
+       const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
+
+       if (!(void *)mac_entry) {
+               dev_err(dsaf_dev->dev,
+                       "hns_dsaf_del_mac_mc_port mac_entry is NULL\n");
+               return -EINVAL;
+       }
+
+       /*get key info*/
+       vlan_id = mac_entry->in_vlan_id;
+       in_port_num = mac_entry->in_port_num;
+
+       /*check mac addr */
+       if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+               dev_err(dsaf_dev->dev,
+                       "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
+                       mac_entry->addr[0], mac_entry->addr[1],
+                       mac_entry->addr[2], mac_entry->addr[3],
+                       mac_entry->addr[4], mac_entry->addr[5]);
+               return -EINVAL;
+       }
+
+       /*config key */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num,
+                            mac_entry->addr);
+
+       /*check is exist? */
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               /*find none */
+               dev_err(dsaf_dev->dev,
+                       "find_soft_mac_entry failed, %s Mac key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name,
+                       mac_key.high.val, mac_key.low.val);
+               return -EINVAL;
+       }
+
+       dev_dbg(dsaf_dev->dev,
+               "del_mac_mc_port, %s key(%#x:%#x) index%d\n",
+               dsaf_dev->ae_dev.name, mac_key.high.val,
+               mac_key.low.val, entry_index);
+
+       /*read entry*/
+       hns_dsaf_tcam_mc_get(
+               dsaf_dev, entry_index,
+               (struct dsaf_tbl_tcam_data *)(&tmp_mac_key), &mac_data);
+
+       /*del the port*/
+       if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+               mskid = mac_entry->port_num;
+       } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+               mskid = mac_entry->port_num -
+                       DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+       } else {
+               dev_err(dsaf_dev->dev,
+                       "%s,pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, mac_entry->port_num,
+                       mac_key.high.val, mac_key.low.val);
+               return -EINVAL;
+       }
+       dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 0);
+
+       /*check non port, do del entry */
+       if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+                   sizeof(mac_data.tbl_mcast_port_msk))) {
+               hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+               /* del soft entry */
+               soft_mac_entry += entry_index;
+               soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+       } else { /* not zer, just del port, updata*/
+               hns_dsaf_tcam_mc_cfg(
+                       dsaf_dev, entry_index,
+                       (struct dsaf_tbl_tcam_data *)(&mac_key), &mac_data);
+       }
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_uc_entry - get mac uc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+                             struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+
+       struct dsaf_tbl_tcam_ucast_cfg mac_data;
+
+       /* check macaddr */
+       if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+           MAC_IS_BROADCAST(mac_entry->addr)) {
+               dev_err(dsaf_dev->dev,
+                       "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+                       mac_entry->addr[0], mac_entry->addr[1],
+                       mac_entry->addr[2], mac_entry->addr[3],
+                       mac_entry->addr[4], mac_entry->addr[5]);
+               return -EINVAL;
+       }
+
+       /*config key */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+                            mac_entry->in_port_num, mac_entry->addr);
+
+       /*check exist? */
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               /*find none, error */
+               dev_err(dsaf_dev->dev,
+                       "get_uc_entry failed, %s Mac key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name,
+                       mac_key.high.val, mac_key.low.val);
+               return -EINVAL;
+       }
+       dev_dbg(dsaf_dev->dev,
+               "get_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+               dsaf_dev->ae_dev.name, mac_key.high.val,
+               mac_key.low.val, entry_index);
+
+       /*read entry*/
+       hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+                            (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+       mac_entry->port_num = mac_data.tbl_ucast_out_port;
+
+       return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_mc_entry - get mac mc entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+                             struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+
+       struct dsaf_tbl_tcam_mcast_cfg mac_data;
+
+       /*check mac addr */
+       if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+           MAC_IS_BROADCAST(mac_entry->addr)) {
+               dev_err(dsaf_dev->dev,
+                       "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+                       mac_entry->addr[0], mac_entry->addr[1],
+                       mac_entry->addr[2], mac_entry->addr[3],
+                       mac_entry->addr[4], mac_entry->addr[5]);
+               return -EINVAL;
+       }
+
+       /*config key */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+                            mac_entry->in_port_num, mac_entry->addr);
+
+       /*check exist? */
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               /* find none, error */
+               dev_err(dsaf_dev->dev,
+                       "get_mac_uc_entry failed, %s Mac key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, mac_key.high.val,
+                       mac_key.low.val);
+               return -EINVAL;
+       }
+       dev_dbg(dsaf_dev->dev,
+               "get_mac_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+               dsaf_dev->ae_dev.name, mac_key.high.val,
+               mac_key.low.val, entry_index);
+
+       /*read entry */
+       hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+                            (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+       mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+       return 0;
+}
+
+/**
+ * hns_dsaf_get_mac_entry_by_index - get mac entry by tab index
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @entry_index: tab entry index
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_get_mac_entry_by_index(
+       struct dsaf_device *dsaf_dev,
+       u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry)
+{
+       struct dsaf_drv_tbl_tcam_key mac_key;
+
+       struct dsaf_tbl_tcam_mcast_cfg mac_data;
+       struct dsaf_tbl_tcam_ucast_cfg mac_uc_data;
+       char mac_addr[MAC_NUM_OCTETS_PER_ADDR] = {0};
+
+       if (entry_index >= DSAF_TCAM_SUM) {
+               /* find none, del error */
+               dev_err(dsaf_dev->dev, "get_uc_entry failed, %s\n",
+                       dsaf_dev->ae_dev.name);
+               return -EINVAL;
+       }
+
+       /* mc entry, do read opt */
+       hns_dsaf_tcam_mc_get(dsaf_dev, entry_index,
+                            (struct dsaf_tbl_tcam_data *)&mac_key, &mac_data);
+
+       mac_entry->port_mask[0] = mac_data.tbl_mcast_port_msk[0] & 0x3F;
+
+       /***get mac addr*/
+       mac_addr[0] = mac_key.high.bits.mac_0;
+       mac_addr[1] = mac_key.high.bits.mac_1;
+       mac_addr[2] = mac_key.high.bits.mac_2;
+       mac_addr[3] = mac_key.high.bits.mac_3;
+       mac_addr[4] = mac_key.low.bits.mac_4;
+       mac_addr[5] = mac_key.low.bits.mac_5;
+       /**is mc or uc*/
+       if (MAC_IS_MULTICAST((u8 *)mac_addr) ||
+           MAC_IS_L3_MULTICAST((u8 *)mac_addr)) {
+               /**mc donot do*/
+       } else {
+               /*is not mc, just uc... */
+               hns_dsaf_tcam_uc_get(dsaf_dev, entry_index,
+                                    (struct dsaf_tbl_tcam_data *)&mac_key,
+                                    &mac_uc_data);
+               mac_entry->port_mask[0] = (1 << mac_uc_data.tbl_ucast_out_port);
+       }
+
+       return 0;
+}
+
+static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
+                                             size_t sizeof_priv)
+{
+       struct dsaf_device *dsaf_dev;
+
+       dsaf_dev = devm_kzalloc(dev,
+                               sizeof(*dsaf_dev) + sizeof_priv, GFP_KERNEL);
+       if (unlikely(!dsaf_dev)) {
+               dsaf_dev = ERR_PTR(-ENOMEM);
+       } else {
+               dsaf_dev->dev = dev;
+               dev_set_drvdata(dev, dsaf_dev);
+       }
+
+       return dsaf_dev;
+}
+
+/**
+ * hns_dsaf_free_dev - free dev mem
+ * @dev: struct device pointer
+ */
+static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
+{
+       (void)dev_set_drvdata(dsaf_dev->dev, NULL);
+}
+
+/**
+ * dsaf_pfc_unit_cnt - set pfc unit count
+ * @dsaf_id: dsa fabric id
+ * @pport_rate:  value array
+ * @pdsaf_pfc_unit_cnt:  value array
+ */
+static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int  mac_id,
+                                 enum dsaf_port_rate_mode rate)
+{
+       u32 unit_cnt;
+
+       switch (rate) {
+       case DSAF_PORT_RATE_10000:
+               unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+               break;
+       case DSAF_PORT_RATE_1000:
+               unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+               break;
+       case DSAF_PORT_RATE_2500:
+               unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+               break;
+       default:
+               unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+       }
+
+       dsaf_set_dev_field(dsaf_dev,
+                          (DSAF_PFC_UNIT_CNT_0_REG + 0x4 * (u64)mac_id),
+                          DSAF_PFC_UNINT_CNT_M, DSAF_PFC_UNINT_CNT_S,
+                          unit_cnt);
+}
+
+/**
+ * dsaf_port_work_rate_cfg - fifo
+ * @dsaf_id: dsa fabric id
+ * @xge_ge_work_mode
+ */
+void hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+                                enum dsaf_port_rate_mode rate_mode)
+{
+       u32 port_work_mode;
+
+       port_work_mode = dsaf_read_dev(
+               dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id);
+
+       if (rate_mode == DSAF_PORT_RATE_10000)
+               dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 1);
+       else
+               dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 0);
+
+       dsaf_write_dev(dsaf_dev,
+                      DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id,
+                      port_work_mode);
+
+       hns_dsaf_pfc_unit_cnt(dsaf_dev, mac_id, rate_mode);
+}
+
+/**
+ * hns_dsaf_fix_mac_mode - dsaf modify mac mode
+ * @mac_cb: mac contrl block
+ */
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
+{
+       enum dsaf_port_rate_mode mode;
+       struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+       int mac_id = mac_cb->mac_id;
+
+       if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+               return;
+       if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII)
+               mode = DSAF_PORT_RATE_10000;
+       else
+               mode = DSAF_PORT_RATE_1000;
+
+       hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
+}
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
+{
+       struct dsaf_hw_stats *hw_stats
+               = &dsaf_dev->hw_stats[node_num];
+
+       hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->man_pkts += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->rx_pkts += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_SBM_DROP_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->crc_false += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_CRC_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->bp_drop += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_BP_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->rslt_drop += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_RSLT_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+       hw_stats->local_addr_false += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+       hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+       hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
+               DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+
+       hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
+               DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
+}
+
+/**
+ *hns_dsaf_get_regs - dump dsaf regs
+ *@dsaf_dev: dsaf device
+ *@data:data for value of regs
+ */
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
+{
+       u32 i = 0;
+       u32 j;
+       u32 *p = data;
+
+       /* dsaf common registers */
+       p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
+       p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG);
+       p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG);
+       p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG);
+       p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG);
+       p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG);
+       p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG);
+       p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG);
+       p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG);
+
+       p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4);
+       p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4);
+       p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+       p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+       p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+       p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+       p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4);
+       p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4);
+       p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+       p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4);
+       p[19] =  dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4);
+       p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+       p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4);
+       p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4);
+       p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4);
+
+       for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+               p[24 + i] = dsaf_read_dev(ddev,
+                               DSAF_SW_PORT_TYPE_0_REG + i * 4);
+
+       p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4);
+
+       for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+               p[33 + i] = dsaf_read_dev(ddev,
+                               DSAF_PORT_DEF_VLAN_0_REG + i * 4);
+
+       for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++)
+               p[41 + i] = dsaf_read_dev(ddev,
+                               DSAF_VM_DEF_VLAN_0_REG + i * 4);
+
+       /* dsaf inode registers */
+       p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG);
+
+       p[171] = dsaf_read_dev(ddev,
+                       DSAF_INODE_ECC_ERR_ADDR_0_REG + port * 0x80);
+
+       for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+               j = i * DSAF_COMM_CHN + port;
+               p[172 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_IN_PORT_NUM_0_REG + j * 0x80);
+               p[175 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_PRI_TC_CFG_0_REG + j * 0x80);
+               p[178 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_BP_STATUS_0_REG + j * 0x80);
+               p[181 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_PAD_DISCARD_NUM_0_REG + j * 0x80);
+               p[184 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + j * 0x80);
+               p[187 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
+               p[190 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
+               p[193 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+               p[196 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
+               p[199 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_SBM_DROP_NUM_0_REG + j * 0x80);
+               p[202 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_CRC_FALSE_NUM_0_REG + j * 0x80);
+               p[205 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_BP_DISCARD_NUM_0_REG + j * 0x80);
+               p[208 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_RSLT_DISCARD_NUM_0_REG + j * 0x80);
+               p[211 + i] = dsaf_read_dev(ddev,
+                       DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + j * 0x80);
+               p[214 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_VOQ_OVER_NUM_0_REG + j * 0x80);
+               p[217 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_BD_SAVE_STATUS_0_REG + j * 4);
+               p[220 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
+               p[223 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
+               p[224 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
+       }
+
+       p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+
+       for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+               j = i * DSAF_COMM_CHN + port;
+               p[228 + i] = dsaf_read_dev(ddev,
+                               DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
+       }
+
+       p[231] = dsaf_read_dev(ddev,
+               DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+
+       /* dsaf inode registers */
+       for (i = 0; i < DSAF_SBM_NUM / DSAF_COMM_CHN; i++) {
+               j = i * DSAF_COMM_CHN + port;
+               p[232 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_CFG_REG_0_REG + j * 0x80);
+               p[235 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
+               p[238 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
+               p[241 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
+               p[244 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
+               p[245 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
+               p[248 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
+               p[251 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
+               p[254 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
+               p[257 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
+               p[260 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_INER_ST_0_REG + j * 0x80);
+               p[263 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
+               p[266 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
+               p[269 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
+               p[272 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
+               p[275 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
+               p[278 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
+               p[281 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
+               p[284 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
+               p[287 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
+               p[290 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
+               p[293 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
+               p[296 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
+               p[299 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
+               p[302 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
+               p[305 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
+               p[308 + i] = dsaf_read_dev(ddev,
+                               DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
+       }
+
+       /* dsaf onode registers */
+       for (i = 0; i < DSAF_XOD_NUM; i++) {
+               p[311 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+               p[319 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+               p[327 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+               p[335 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+               p[343 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+               p[351 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+       }
+
+       p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+       p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+       p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+
+       for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
+               j = i * DSAF_COMM_CHN + port;
+               p[362 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_GNT_L_0_REG + j * 0x90);
+               p[365 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_GNT_H_0_REG + j * 0x90);
+               p[368 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
+               p[371 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
+               p[374 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
+               p[377 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
+               p[380 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
+               p[383 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
+               p[386 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
+               p[389 + i] = dsaf_read_dev(ddev,
+                               DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
+       }
+
+       p[392] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
+       p[393] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
+       p[394] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
+       p[395] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
+       p[396] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+       p[397] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+       p[398] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+       p[399] = dsaf_read_dev(ddev,
+               DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+       p[400] = dsaf_read_dev(ddev,
+               DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+       p[401] = dsaf_read_dev(ddev,
+               DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+       p[402] = dsaf_read_dev(ddev,
+               DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+       p[403] = dsaf_read_dev(ddev,
+               DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+       p[404] = dsaf_read_dev(ddev,
+               DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
+
+       /* dsaf voq registers */
+       for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
+               j = (i * DSAF_COMM_CHN + port) * 0x90;
+               p[405 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
+               p[408 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
+               p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+               p[414 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
+               p[417 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
+               p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+               p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+               p[426 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
+               p[429 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
+               p[432 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
+               p[435 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
+               p[438 + i] = dsaf_read_dev(ddev,
+                       DSAF_VOQ_BP_ALL_THRD_0_REG + j);
+       }
+
+       /* dsaf tbl registers */
+       p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+       p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+       p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+       p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+       p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+       p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+       p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+       p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+       p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+       p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+       p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+       p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+       p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+       p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+       p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+       p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+       p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+       p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+       p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+       p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+       p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+       p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+       p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+
+       for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+               j = i * 0x8;
+               p[464 + 2 * i] = dsaf_read_dev(ddev,
+                       DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
+               p[465 + 2 * i] = dsaf_read_dev(ddev,
+                       DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
+       }
+
+       p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+       p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+       p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+       p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+       p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+       p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+       p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+       p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+       p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+       p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+       p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+       p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+
+       /* dsaf other registers */
+       p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+       p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+       p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+       p[495] = dsaf_read_dev(ddev,
+               DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
+       p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+       p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+
+       /* mark end of dsaf regs */
+       for (i = 498; i < 504; i++)
+               p[i] = 0xdddddddd;
+}
+
+static char *hns_dsaf_get_node_stats_strings(char *data, int node)
+{
+       char *buff = data;
+
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
+       buff = buff + ETH_GSTRING_LEN;
+
+       return buff;
+}
+
+static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
+                                   int node_num)
+{
+       u64 *p = data;
+       struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+
+       p[0] = hw_stats->pad_drop;
+       p[1] = hw_stats->man_pkts;
+       p[2] = hw_stats->rx_pkts;
+       p[3] = hw_stats->rx_pkt_id;
+       p[4] = hw_stats->rx_pause_frame;
+       p[5] = hw_stats->release_buf_num;
+       p[6] = hw_stats->sbm_drop;
+       p[7] = hw_stats->crc_false;
+       p[8] = hw_stats->bp_drop;
+       p[9] = hw_stats->rslt_drop;
+       p[10] = hw_stats->local_addr_false;
+       p[11] = hw_stats->vlan_drop;
+       p[12] = hw_stats->stp_drop;
+       p[13] = hw_stats->tx_pkts;
+
+       return &p[14];
+}
+
+/**
+ *hns_dsaf_get_stats - get dsaf statistic
+ *@ddev: dsaf device
+ *@data:statistic value
+ *@port: port num
+ */
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
+{
+       u64 *p = data;
+       int node_num = port;
+
+       /* for ge/xge node info */
+       p = hns_dsaf_get_node_stats(ddev, p, node_num);
+
+       /* for ppe node info */
+       node_num = port + DSAF_PPE_INODE_BASE;
+       (void)hns_dsaf_get_node_stats(ddev, p, node_num);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf string set count
+ *@stringset: type of values in data
+ *return dsaf string name count
+ */
+int hns_dsaf_get_sset_count(int stringset)
+{
+       if (stringset == ETH_SS_STATS)
+               return DSAF_STATIC_NUM;
+
+       return 0;
+}
+
+/**
+ *hns_dsaf_get_strings - get dsaf string set
+ *@stringset:srting set index
+ *@data:strings name value
+ *@port:port index
+ */
+void hns_dsaf_get_strings(int stringset, u8 *data, int port)
+{
+       char *buff = (char *)data;
+       int node = port;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       /* for ge/xge node info */
+       buff = hns_dsaf_get_node_stats_strings(buff, node);
+
+       /* for ppe node info */
+       node = port + DSAF_PPE_INODE_BASE;
+       (void)hns_dsaf_get_node_stats_strings(buff, node);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf regs count
+ *return dsaf regs count
+ */
+int hns_dsaf_get_regs_count(void)
+{
+       return DSAF_DUMP_REGS_NUM;
+}
+
+/**
+ * dsaf_probe - probo dsaf dev
+ * @pdev: dasf platform device
+ * retuen 0 - success , negative --fail
+ */
+static int hns_dsaf_probe(struct platform_device *pdev)
+{
+       struct dsaf_device *dsaf_dev;
+       int ret;
+
+       dsaf_dev = hns_dsaf_alloc_dev(&pdev->dev, sizeof(struct dsaf_drv_priv));
+       if (IS_ERR(dsaf_dev)) {
+               ret = PTR_ERR(dsaf_dev);
+               dev_err(&pdev->dev,
+                       "dsaf_probe dsaf_alloc_dev failed, ret = %#x!\n", ret);
+               return ret;
+       }
+
+       ret = hns_dsaf_get_cfg(dsaf_dev);
+       if (ret)
+               goto free_dev;
+
+       ret = hns_dsaf_init(dsaf_dev);
+       if (ret)
+               goto free_cfg;
+
+       ret = hns_mac_init(dsaf_dev);
+       if (ret)
+               goto uninit_dsaf;
+
+       ret = hns_ppe_init(dsaf_dev);
+       if (ret)
+               goto uninit_mac;
+
+       ret = hns_dsaf_ae_init(dsaf_dev);
+       if (ret)
+               goto uninit_ppe;
+
+       return 0;
+
+uninit_ppe:
+       hns_ppe_uninit(dsaf_dev);
+
+uninit_mac:
+       hns_mac_uninit(dsaf_dev);
+
+uninit_dsaf:
+       hns_dsaf_free(dsaf_dev);
+
+free_cfg:
+       hns_dsaf_free_cfg(dsaf_dev);
+
+free_dev:
+       hns_dsaf_free_dev(dsaf_dev);
+
+       return ret;
+}
+
+/**
+ * dsaf_remove - remove dsaf dev
+ * @pdev: dasf platform device
+ */
+static int hns_dsaf_remove(struct platform_device *pdev)
+{
+       struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
+
+       hns_dsaf_ae_uninit(dsaf_dev);
+
+       hns_ppe_uninit(dsaf_dev);
+
+       hns_mac_uninit(dsaf_dev);
+
+       hns_dsaf_free(dsaf_dev);
+
+       hns_dsaf_free_cfg(dsaf_dev);
+
+       hns_dsaf_free_dev(dsaf_dev);
+
+       return 0;
+}
+
+static const struct of_device_id g_dsaf_match[] = {
+       {.compatible = "hisilicon,hns-dsaf-v1"},
+       {.compatible = "hisilicon,hns-dsaf-v2"},
+       {}
+};
+
+static struct platform_driver g_dsaf_driver = {
+       .probe = hns_dsaf_probe,
+       .remove = hns_dsaf_remove,
+       .driver = {
+               .name = DSAF_DRV_NAME,
+               .of_match_table = g_dsaf_match,
+       },
+};
+
+module_platform_driver(g_dsaf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HNS DSAF driver");
+MODULE_VERSION(DSAF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
new file mode 100644 (file)
index 0000000..e0417c0
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_DSAF_MAIN_H
+#define __HNS_DSAF_MAIN_H
+#include "hnae.h"
+
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_mac.h"
+
+struct hns_mac_cb;
+
+#define DSAF_DRV_NAME "hns_dsaf"
+#define DSAF_MOD_VERSION "v1.0"
+
+#define ENABLE         (0x1)
+#define DISABLE                (0x0)
+
+#define HNS_DSAF_DEBUG_NW_REG_OFFSET (0x100000)
+
+#define DSAF_BASE_INNER_PORT_NUM (127)  /* mac tbl qid*/
+
+#define DSAF_MAX_CHIP_NUM (2)  /*max 2 chips */
+
+#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE (22)
+
+#define HNS_DSAF_MAX_DESC_CNT (1024)
+#define HNS_DSAF_MIN_DESC_CNT (16)
+
+#define DSAF_INVALID_ENTRY_IDX (0xffff)
+
+#define DSAF_CFG_READ_CNT   (30)
+#define DSAF_SRAM_INIT_FINISH_FLAG (0xff)
+
+#define MAC_NUM_OCTETS_PER_ADDR 6
+
+#define DSAF_DUMP_REGS_NUM 504
+#define DSAF_STATIC_NUM 28
+
+#define DSAF_STATS_READ(p, offset) (*((u64 *)((u64)(p) + (offset))))
+
+enum hal_dsaf_mode {
+       HRD_DSAF_NO_DSAF_MODE   = 0x0,
+       HRD_DSAF_MODE           = 0x1,
+};
+
+enum hal_dsaf_tc_mode {
+       HRD_DSAF_4TC_MODE               = 0X0,
+       HRD_DSAF_8TC_MODE               = 0X1,
+};
+
+struct dsaf_vm_def_vlan {
+       u32 vm_def_vlan_id;
+       u32 vm_def_vlan_cfi;
+       u32 vm_def_vlan_pri;
+};
+
+struct dsaf_tbl_tcam_data {
+       u32 tbl_tcam_data_high;
+       u32 tbl_tcam_data_low;
+};
+
+#define DSAF_PORT_MSK_NUM \
+       ((DSAF_TOTAL_QUEUE_NUM + DSAF_SERVICE_NW_NUM - 1) / 32 + 1)
+struct dsaf_tbl_tcam_mcast_cfg {
+       u8 tbl_mcast_old_en;
+       u8 tbl_mcast_item_vld;
+       u32 tbl_mcast_port_msk[DSAF_PORT_MSK_NUM];
+};
+
+struct dsaf_tbl_tcam_ucast_cfg {
+       u32 tbl_ucast_old_en;
+       u32 tbl_ucast_item_vld;
+       u32 tbl_ucast_mac_discard;
+       u32 tbl_ucast_dvc;
+       u32 tbl_ucast_out_port;
+};
+
+struct dsaf_tbl_line_cfg {
+       u32 tbl_line_mac_discard;
+       u32 tbl_line_dvc;
+       u32 tbl_line_out_port;
+};
+
+enum dsaf_port_rate_mode {
+       DSAF_PORT_RATE_1000 = 0,
+       DSAF_PORT_RATE_2500,
+       DSAF_PORT_RATE_10000
+};
+
+enum dsaf_stp_port_type {
+       DSAF_STP_PORT_TYPE_DISCARD = 0,
+       DSAF_STP_PORT_TYPE_BLOCK = 1,
+       DSAF_STP_PORT_TYPE_LISTEN = 2,
+       DSAF_STP_PORT_TYPE_LEARN = 3,
+       DSAF_STP_PORT_TYPE_FORWARD = 4
+};
+
+enum dsaf_sw_port_type {
+       DSAF_SW_PORT_TYPE_NON_VLAN = 0,
+       DSAF_SW_PORT_TYPE_ACCESS = 1,
+       DSAF_SW_PORT_TYPE_TRUNK = 2,
+};
+
+#define DSAF_SUB_BASE_SIZE                        (0x10000)
+
+/* dsaf mode define */
+enum dsaf_mode {
+       DSAF_MODE_INVALID = 0,  /**< Invalid dsaf mode */
+       DSAF_MODE_ENABLE_FIX,   /**< en DSAF-mode, fixed to queue*/
+       DSAF_MODE_ENABLE_0VM,   /**< en DSAF-mode, support 0 VM */
+       DSAF_MODE_ENABLE_8VM,   /**< en DSAF-mode, support 8 VM */
+       DSAF_MODE_ENABLE_16VM,  /**< en DSAF-mode, support 16 VM */
+       DSAF_MODE_ENABLE_32VM,  /**< en DSAF-mode, support 32 VM */
+       DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
+       DSAF_MODE_ENABLE,               /**< before is enable DSAF mode*/
+       DSAF_MODE_DISABLE_FIX,  /**< non-dasf, fixed to queue*/
+       DSAF_MODE_DISABLE_2PORT_8VM,    /**< non-dasf, 2port 8VM */
+       DSAF_MODE_DISABLE_2PORT_16VM,   /**< non-dasf, 2port 16VM */
+       DSAF_MODE_DISABLE_2PORT_64VM,   /**< non-dasf, 2port 64VM */
+       DSAF_MODE_DISABLE_6PORT_0VM,    /**< non-dasf, 6port 0VM */
+       DSAF_MODE_DISABLE_6PORT_2VM,    /**< non-dasf, 6port 2VM */
+       DSAF_MODE_DISABLE_6PORT_4VM,    /**< non-dasf, 6port 4VM */
+       DSAF_MODE_DISABLE_6PORT_16VM,   /**< non-dasf, 6port 16VM */
+       DSAF_MODE_MAX           /**< the last one, use as the num */
+};
+
+#define DSAF_DEST_PORT_NUM 256 /* DSAF max port num */
+#define DSAF_WORD_BIT_CNT 32  /* the num bit of word */
+
+/*mac entry, mc or uc entry*/
+struct dsaf_drv_mac_single_dest_entry {
+       /* mac addr, match the entry*/
+       u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+       u16 in_vlan_id; /* value of VlanId */
+
+       /* the vld input port num, dsaf-mode fix 0, */
+       /*      non-dasf is the entry whitch port vld*/
+       u8 in_port_num;
+
+       u8 port_num; /*output port num*/
+       u8 rsv[6];
+};
+
+/*only mc entry*/
+struct dsaf_drv_mac_multi_dest_entry {
+       /* mac addr, match the entry*/
+       u8 addr[MAC_NUM_OCTETS_PER_ADDR];
+       u16 in_vlan_id;
+       /* this mac addr output port,*/
+       /*      bit0-bit5 means Port0-Port5(1bit is vld)**/
+       u32 port_mask[DSAF_DEST_PORT_NUM / DSAF_WORD_BIT_CNT];
+
+       /* the vld input port num, dsaf-mode fix 0,*/
+       /*      non-dasf is the entry whitch port vld*/
+       u8 in_port_num;
+       u8 rsv[7];
+};
+
+struct dsaf_hw_stats {
+       u64 pad_drop;
+       u64 man_pkts;
+       u64 rx_pkts;
+       u64 rx_pkt_id;
+       u64 rx_pause_frame;
+       u64 release_buf_num;
+       u64 sbm_drop;
+       u64 crc_false;
+       u64 bp_drop;
+       u64 rslt_drop;
+       u64 local_addr_false;
+       u64 vlan_drop;
+       u64 stp_drop;
+       u64 tx_pkts;
+};
+
+struct hnae_vf_cb {
+       u8 port_index;
+       struct hns_mac_cb *mac_cb;
+       struct dsaf_device *dsaf_dev;
+       struct hnae_handle  ae_handle; /* must be the last number */
+};
+
+struct dsaf_int_xge_src {
+       u32    xid_xge_ecc_err_int_src;
+       u32    xid_xge_fsm_timout_int_src;
+       u32    sbm_xge_lnk_fsm_timout_int_src;
+       u32    sbm_xge_lnk_ecc_2bit_int_src;
+       u32    sbm_xge_mib_req_failed_int_src;
+       u32    sbm_xge_mib_req_fsm_timout_int_src;
+       u32    sbm_xge_mib_rels_fsm_timout_int_src;
+       u32    sbm_xge_sram_ecc_2bit_int_src;
+       u32    sbm_xge_mib_buf_sum_err_int_src;
+       u32    sbm_xge_mib_req_extra_int_src;
+       u32    sbm_xge_mib_rels_extra_int_src;
+       u32    voq_xge_start_to_over_0_int_src;
+       u32    voq_xge_start_to_over_1_int_src;
+       u32    voq_xge_ecc_err_int_src;
+};
+
+struct dsaf_int_ppe_src {
+       u32    xid_ppe_fsm_timout_int_src;
+       u32    sbm_ppe_lnk_fsm_timout_int_src;
+       u32    sbm_ppe_lnk_ecc_2bit_int_src;
+       u32    sbm_ppe_mib_req_failed_int_src;
+       u32    sbm_ppe_mib_req_fsm_timout_int_src;
+       u32    sbm_ppe_mib_rels_fsm_timout_int_src;
+       u32    sbm_ppe_sram_ecc_2bit_int_src;
+       u32    sbm_ppe_mib_buf_sum_err_int_src;
+       u32    sbm_ppe_mib_req_extra_int_src;
+       u32    sbm_ppe_mib_rels_extra_int_src;
+       u32    voq_ppe_start_to_over_0_int_src;
+       u32    voq_ppe_ecc_err_int_src;
+       u32    xod_ppe_fifo_rd_empty_int_src;
+       u32    xod_ppe_fifo_wr_full_int_src;
+};
+
+struct dsaf_int_rocee_src {
+       u32    xid_rocee_fsm_timout_int_src;
+       u32    sbm_rocee_lnk_fsm_timout_int_src;
+       u32    sbm_rocee_lnk_ecc_2bit_int_src;
+       u32    sbm_rocee_mib_req_failed_int_src;
+       u32    sbm_rocee_mib_req_fsm_timout_int_src;
+       u32    sbm_rocee_mib_rels_fsm_timout_int_src;
+       u32    sbm_rocee_sram_ecc_2bit_int_src;
+       u32    sbm_rocee_mib_buf_sum_err_int_src;
+       u32    sbm_rocee_mib_req_extra_int_src;
+       u32    sbm_rocee_mib_rels_extra_int_src;
+       u32    voq_rocee_start_to_over_0_int_src;
+       u32    voq_rocee_ecc_err_int_src;
+};
+
+struct dsaf_int_tbl_src {
+       u32    tbl_da0_mis_src;
+       u32    tbl_da1_mis_src;
+       u32    tbl_da2_mis_src;
+       u32    tbl_da3_mis_src;
+       u32    tbl_da4_mis_src;
+       u32    tbl_da5_mis_src;
+       u32    tbl_da6_mis_src;
+       u32    tbl_da7_mis_src;
+       u32    tbl_sa_mis_src;
+       u32    tbl_old_sech_end_src;
+       u32    lram_ecc_err1_src;
+       u32    lram_ecc_err2_src;
+       u32    tram_ecc_err1_src;
+       u32    tram_ecc_err2_src;
+       u32    tbl_ucast_bcast_xge0_src;
+       u32    tbl_ucast_bcast_xge1_src;
+       u32    tbl_ucast_bcast_xge2_src;
+       u32    tbl_ucast_bcast_xge3_src;
+       u32    tbl_ucast_bcast_xge4_src;
+       u32    tbl_ucast_bcast_xge5_src;
+       u32    tbl_ucast_bcast_ppe_src;
+       u32    tbl_ucast_bcast_rocee_src;
+};
+
+struct dsaf_int_stat {
+       struct dsaf_int_xge_src dsaf_int_xge_stat[DSAF_COMM_CHN];
+       struct dsaf_int_ppe_src dsaf_int_ppe_stat[DSAF_COMM_CHN];
+       struct dsaf_int_rocee_src dsaf_int_rocee_stat[DSAF_COMM_CHN];
+       struct dsaf_int_tbl_src dsaf_int_tbl_stat[1];
+
+};
+
+/* Dsaf device struct define ,and mac ->  dsaf */
+struct dsaf_device {
+       struct device *dev;
+       struct hnae_ae_dev ae_dev;
+
+       void *priv;
+
+       int virq[DSAF_IRQ_NUM];
+
+       u8 __iomem *sc_base;
+       u8 __iomem *sds_base;
+       u8 __iomem *ppe_base;
+       u8 __iomem *io_base;
+       u8 __iomem *cpld_base;
+
+       u32 desc_num; /*  desc num per queue*/
+       u32 buf_size; /*  ring buffer size */
+       int buf_size_type; /* ring buffer size-type */
+       enum dsaf_mode dsaf_mode;        /* dsaf mode  */
+       enum hal_dsaf_mode dsaf_en;
+       enum hal_dsaf_tc_mode dsaf_tc_mode;
+       u32 dsaf_ver;
+
+       struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
+       struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
+       struct hns_mac_cb *mac_cb;
+
+       struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
+       struct dsaf_int_stat int_stat;
+};
+
+static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
+{
+       return (void *)((u64)dsaf_dev + sizeof(*dsaf_dev));
+}
+
+struct dsaf_drv_tbl_tcam_key {
+       union {
+               struct {
+                       u8 mac_3;
+                       u8 mac_2;
+                       u8 mac_1;
+                       u8 mac_0;
+               } bits;
+
+               u32 val;
+       } high;
+       union {
+               struct {
+                       u32 port:4; /* port id, */
+                       /* dsaf-mode fixed 0, non-dsaf-mode port id*/
+                       u32 vlan:12; /* vlan id */
+                       u32 mac_5:8;
+                       u32 mac_4:8;
+               } bits;
+
+               u32 val;
+       } low;
+};
+
+struct dsaf_drv_soft_mac_tbl {
+       struct dsaf_drv_tbl_tcam_key tcam_key;
+       u16 index; /*the entry's index in tcam tab*/
+};
+
+struct dsaf_drv_priv {
+       /* soft tab Mac key, for hardware tab*/
+       struct dsaf_drv_soft_mac_tbl *soft_mac_tbl;
+};
+
+static inline void hns_dsaf_tbl_tcam_addr_cfg(struct dsaf_device *dsaf_dev,
+                                             u32 tab_tcam_addr)
+{
+       dsaf_set_dev_field(dsaf_dev, DSAF_TBL_TCAM_ADDR_0_REG,
+                          DSAF_TBL_TCAM_ADDR_M, DSAF_TBL_TCAM_ADDR_S,
+                          tab_tcam_addr);
+}
+
+static inline void hns_dsaf_tbl_tcam_load_pul(struct dsaf_device *dsaf_dev)
+{
+       u32 o_tbl_pul;
+
+       o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 1);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+       dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 0);
+       dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
+                                             u32 tab_line_addr)
+{
+       dsaf_set_dev_field(dsaf_dev, DSAF_TBL_LINE_ADDR_0_REG,
+                          DSAF_TBL_LINE_ADDR_M, DSAF_TBL_LINE_ADDR_S,
+                          tab_line_addr);
+}
+
+static inline int hns_dsaf_get_comm_idx_by_port(int port)
+{
+       if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
+               return 0;
+       else
+               return (port - DSAF_COMM_CHN + 1);
+}
+
+static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
+       struct hnae_handle *handle)
+{
+       return container_of(handle, struct hnae_vf_cb, ae_handle);
+}
+
+int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
+                             struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_set_mac_mc_entry(struct dsaf_device *dsaf_dev,
+                             struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+                            struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+                          u8 in_port_num, u8 *addr);
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+                            struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
+                             struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
+                             struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+int hns_dsaf_get_mac_entry_by_index(
+       struct dsaf_device *dsaf_dev,
+       u16 entry_index,
+       struct dsaf_drv_mac_multi_dest_entry *mac_entry);
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
+
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+                                   u32 port, u32 val);
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
+
+int hns_dsaf_get_sset_count(int stringset);
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port);
+
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
+int hns_dsaf_get_regs_count(void);
+
+#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
new file mode 100644 (file)
index 0000000..d611388
--- /dev/null
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_ppe.h"
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+                     u16 speed, int data)
+{
+       int speed_reg = 0;
+       u8 value;
+
+       if (!mac_cb) {
+               pr_err("sfp_led_opt mac_dev is null!\n");
+               return;
+       }
+       if (!mac_cb->cpld_vaddr) {
+               dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+                       mac_cb->mac_id);
+               return;
+       }
+
+       if (speed == MAC_SPEED_10000)
+               speed_reg = 1;
+
+       value = mac_cb->cpld_led_value;
+
+       if (link_status) {
+               dsaf_set_bit(value, DSAF_LED_LINK_B, link_status);
+               dsaf_set_field(value, DSAF_LED_SPEED_M,
+                              DSAF_LED_SPEED_S, speed_reg);
+               dsaf_set_bit(value, DSAF_LED_DATA_B, data);
+
+               if (value != mac_cb->cpld_led_value) {
+                       dsaf_write_b(mac_cb->cpld_vaddr, value);
+                       mac_cb->cpld_led_value = value;
+               }
+       } else {
+               dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+               mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+       }
+}
+
+void cpld_led_reset(struct hns_mac_cb *mac_cb)
+{
+       if (!mac_cb || !mac_cb->cpld_vaddr)
+               return;
+
+       dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+       mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+}
+
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+                   enum hnae_led_state status)
+{
+       switch (status) {
+       case HNAE_LED_ACTIVE:
+               mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+               return 2;
+       case HNAE_LED_ON:
+               dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+                            CPLD_LED_ON_VALUE);
+               dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+               break;
+       case HNAE_LED_OFF:
+               dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+                            CPLD_LED_DEFAULT_VALUE);
+               dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+               break;
+       case HNAE_LED_INACTIVE:
+               dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+                            CPLD_LED_DEFAULT_VALUE);
+               dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+#define RESET_REQ_OR_DREQ 1
+
+void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
+{
+       u32 xbar_reg_addr;
+       u32 nt_reg_addr;
+
+       if (!val) {
+               xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
+               nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
+       } else {
+               xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG;
+               nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
+       }
+
+       dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
+                      RESET_REQ_OR_DREQ);
+       dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
+                      RESET_REQ_OR_DREQ);
+}
+
+void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+       u32 reg_val = 0;
+       u32 reg_addr;
+
+       if (port >= DSAF_XGE_NUM)
+               return;
+
+       reg_val |= RESET_REQ_OR_DREQ;
+       reg_val |= 0x2082082 << port;
+
+       if (val == 0)
+               reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+       else
+               reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+                                   u32 port, u32 val)
+{
+       u32 reg_val = 0;
+       u32 reg_addr;
+
+       if (port >= DSAF_XGE_NUM)
+               return;
+
+       reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+
+       if (val == 0)
+               reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+       else
+               reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+       u32 reg_val_1;
+       u32 reg_val_2;
+
+       if (port >= DSAF_GE_NUM)
+               return;
+
+       if (port < DSAF_SERVICE_NW_NUM) {
+               reg_val_1  = 0x1 << port;
+               reg_val_2  = 0x1041041 << port;
+
+               if (val == 0) {
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_GE_RESET_REQ1_REG,
+                                      reg_val_1);
+
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_GE_RESET_REQ0_REG,
+                                      reg_val_2);
+               } else {
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+                                      reg_val_2);
+
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+                                      reg_val_1);
+               }
+       } else {
+               reg_val_1 = 0x15540 << (port - 6);
+               reg_val_2 = 0x100 << (port - 6);
+
+               if (val == 0) {
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_GE_RESET_REQ1_REG,
+                                      reg_val_1);
+
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_PPE_RESET_REQ_REG,
+                                      reg_val_2);
+               } else {
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+                                      reg_val_1);
+
+                       dsaf_write_reg(dsaf_dev->sc_base,
+                                      DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+                                      reg_val_2);
+               }
+       }
+}
+
+void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+{
+       u32 reg_val = 0;
+       u32 reg_addr;
+
+       reg_val |= RESET_REQ_OR_DREQ << port;
+
+       if (val == 0)
+               reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+       else
+               reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+
+       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
+{
+       int comm_index = ppe_common->comm_index;
+       struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+       u32 reg_val;
+       u32 reg_addr;
+
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+               reg_val = RESET_REQ_OR_DREQ;
+               if (val == 0)
+                       reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
+               else
+                       reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
+
+       } else {
+               reg_val = 0x100 << (comm_index - 1);
+
+               if (val == 0)
+                       reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+               else
+                       reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+       }
+
+       dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+}
+
+/**
+ * hns_mac_get_sds_mode - get phy ifterface form serdes mode
+ * @mac_cb: mac control block
+ * retuen phy interface
+ */
+phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+{
+       u32 hilink3_mode;
+       u32 hilink4_mode;
+       void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
+       int dev_id = mac_cb->mac_id;
+       phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+
+       hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
+       hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
+       if (dev_id >= 0 && dev_id <= 3) {
+               if (hilink4_mode == 0)
+                       phy_if = PHY_INTERFACE_MODE_SGMII;
+               else
+                       phy_if = PHY_INTERFACE_MODE_XGMII;
+       } else if (dev_id >= 4 && dev_id <= 5) {
+               if (hilink3_mode == 0)
+                       phy_if = PHY_INTERFACE_MODE_SGMII;
+               else
+                       phy_if = PHY_INTERFACE_MODE_XGMII;
+       } else {
+               phy_if = PHY_INTERFACE_MODE_SGMII;
+       }
+
+       dev_dbg(mac_cb->dev,
+               "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
+               hilink3_mode, hilink4_mode, dev_id, phy_if);
+       return phy_if;
+}
+
+/**
+ * hns_mac_config_sds_loopback - set loop back for serdes
+ * @mac_cb: mac control block
+ * retuen 0 == success
+ */
+int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
+{
+       /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
+        * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
+        */
+       u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+                      (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+       const u8 lane_id[] = {
+               0,      /* mac 0 -> lane 0 */
+               1,      /* mac 1 -> lane 1 */
+               2,      /* mac 2 -> lane 2 */
+               3,      /* mac 3 -> lane 3 */
+               2,      /* mac 4 -> lane 2 */
+               3,      /* mac 5 -> lane 3 */
+               0,      /* mac 6 -> lane 0 */
+               1       /* mac 7 -> lane 1 */
+       };
+#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
+       u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
+
+       int sfp_prsnt;
+       int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+
+       if (!mac_cb->phy_node) {
+               if (ret)
+                       pr_info("please confirm sfp is present or not\n");
+               else
+                       if (!sfp_prsnt)
+                               pr_info("no sfp in this eth\n");
+       }
+
+       dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
new file mode 100644 (file)
index 0000000..419f07a
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_MISC_H
+#define _HNS_DSAF_MISC_H
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_mac.h"
+
+#define CPLD_ADDR_PORT_OFFSET  0x4
+
+#define HS_LED_ON              0xE
+#define HS_LED_OFF             0xF
+
+#define CPLD_LED_ON_VALUE      1
+#define CPLD_LED_DEFAULT_VALUE 0
+
+#define MAC_SFP_PORT_OFFSET    0x2
+
+#define DSAF_LED_SPEED_S 0
+#define DSAF_LED_SPEED_M (0x3 << DSAF_LED_SPEED_S)
+
+#define DSAF_LED_LINK_B 2
+#define DSAF_LED_DATA_B 4
+#define DSAF_LED_ANCHOR_B 5
+
+void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+                     u16 speed, int data);
+void cpld_led_reset(struct hns_mac_cb *mac_cb);
+int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+                   enum hnae_led_state status);
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
new file mode 100644 (file)
index 0000000..67f33f1
--- /dev/null
@@ -0,0 +1,583 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "hns_dsaf_ppe.h"
+
+static void __iomem *hns_ppe_common_get_ioaddr(
+       struct ppe_common_cb *ppe_common)
+{
+       void __iomem *base_addr;
+
+       int idx = ppe_common->comm_index;
+
+       if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
+               base_addr = ppe_common->dsaf_dev->ppe_base
+                       + PPE_COMMON_REG_OFFSET;
+       else
+               base_addr = ppe_common->dsaf_dev->sds_base
+                       + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+                       + PPE_COMMON_REG_OFFSET;
+
+       return base_addr;
+}
+
+/**
+ * hns_ppe_common_get_cfg - get ppe common config
+ * @dsaf_dev: dasf device
+ * comm_index: common index
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+{
+       struct ppe_common_cb *ppe_common;
+       int ppe_num;
+
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+               ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
+       else
+               ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
+
+       ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) +
+               ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL);
+       if (!ppe_common)
+               return -ENOMEM;
+
+       ppe_common->ppe_num = ppe_num;
+       ppe_common->dsaf_dev = dsaf_dev;
+       ppe_common->comm_index = comm_index;
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+               ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
+       else
+               ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
+       ppe_common->dev = dsaf_dev->dev;
+
+       ppe_common->io_base = hns_ppe_common_get_ioaddr(ppe_common);
+
+       dsaf_dev->ppe_common[comm_index] = ppe_common;
+
+       return 0;
+}
+
+void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+{
+       dsaf_dev->ppe_common[comm_index] = NULL;
+}
+
+static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+                                       int ppe_idx)
+{
+       void __iomem *base_addr;
+       int common_idx = ppe_common->comm_index;
+
+       if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+               base_addr = ppe_common->dsaf_dev->ppe_base +
+                       ppe_idx * PPE_REG_OFFSET;
+
+       } else {
+               base_addr = ppe_common->dsaf_dev->sds_base +
+                       (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
+       }
+
+       return base_addr;
+}
+
+static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
+{
+       int port;
+
+       if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
+               port = idx;
+       else
+               port = HNS_PPE_SERVICE_NW_ENGINE_NUM
+                       + ppe_common->comm_index - 1;
+
+       return port;
+}
+
+static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
+{
+       u32 i;
+       struct hns_ppe_cb *ppe_cb;
+       u32 ppe_num = ppe_common->ppe_num;
+
+       for (i = 0; i < ppe_num; i++) {
+               ppe_cb = &ppe_common->ppe_cb[i];
+               ppe_cb->dev = ppe_common->dev;
+               ppe_cb->next = NULL;
+               ppe_cb->ppe_common_cb = ppe_common;
+               ppe_cb->index = i;
+               ppe_cb->port = hns_ppe_get_port(ppe_common, i);
+               ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
+               ppe_cb->virq = 0;
+       }
+}
+
+static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb)
+{
+       dsaf_set_dev_bit(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG,
+                        PPE_CNT_CLR_CE_B, 1);
+}
+
+/**
+ * hns_ppe_checksum_hw - set ppe checksum caculate
+ * @ppe_device: ppe device
+ * @value: value
+ */
+static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+       dsaf_set_dev_field(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG,
+                          0xfffffff, 0, value);
+}
+
+static void hns_ppe_set_qid_mode(struct ppe_common_cb *ppe_common,
+                                enum ppe_qid_mode qid_mdoe)
+{
+       dsaf_set_dev_field(ppe_common, PPE_COM_CFG_QID_MODE_REG,
+                          PPE_CFG_QID_MODE_CF_QID_MODE_M,
+                          PPE_CFG_QID_MODE_CF_QID_MODE_S, qid_mdoe);
+}
+
+/**
+ * hns_ppe_set_qid - set ppe qid
+ * @ppe_common: ppe common device
+ * @qid: queue id
+ */
+static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
+{
+       u32 qid_mod = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+
+       if (!dsaf_get_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+                           PPE_CFG_QID_MODE_DEF_QID_S)) {
+               dsaf_set_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+                              PPE_CFG_QID_MODE_DEF_QID_S, qid);
+               dsaf_write_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG, qid_mod);
+       }
+}
+
+/**
+ * hns_ppe_set_port_mode - set port mode
+ * @ppe_device: ppe device
+ * @mode: port mode
+ */
+static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
+                                 enum ppe_port_mode mode)
+{
+       dsaf_write_dev(ppe_cb, PPE_CFG_XGE_MODE_REG, mode);
+}
+
+/**
+ * hns_ppe_common_init_hw - init ppe common device
+ * @ppe_common: ppe common device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
+{
+       enum ppe_qid_mode qid_mode;
+       enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode;
+
+       hns_ppe_com_srst(ppe_common, 0);
+       mdelay(100);
+       hns_ppe_com_srst(ppe_common, 1);
+       mdelay(100);
+
+       if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+               switch (dsaf_mode) {
+               case DSAF_MODE_ENABLE_FIX:
+               case DSAF_MODE_DISABLE_FIX:
+                       qid_mode = PPE_QID_MODE0;
+                       hns_ppe_set_qid(ppe_common, 0);
+                       break;
+               case DSAF_MODE_ENABLE_0VM:
+               case DSAF_MODE_DISABLE_2PORT_64VM:
+                       qid_mode = PPE_QID_MODE3;
+                       break;
+               case DSAF_MODE_ENABLE_8VM:
+               case DSAF_MODE_DISABLE_2PORT_16VM:
+                       qid_mode = PPE_QID_MODE4;
+                       break;
+               case DSAF_MODE_ENABLE_16VM:
+               case DSAF_MODE_DISABLE_6PORT_0VM:
+                       qid_mode = PPE_QID_MODE5;
+                       break;
+               case DSAF_MODE_ENABLE_32VM:
+               case DSAF_MODE_DISABLE_6PORT_16VM:
+                       qid_mode = PPE_QID_MODE2;
+                       break;
+               case DSAF_MODE_ENABLE_128VM:
+               case DSAF_MODE_DISABLE_6PORT_4VM:
+                       qid_mode = PPE_QID_MODE1;
+                       break;
+               case DSAF_MODE_DISABLE_2PORT_8VM:
+                       qid_mode = PPE_QID_MODE7;
+                       break;
+               case DSAF_MODE_DISABLE_6PORT_2VM:
+                       qid_mode = PPE_QID_MODE6;
+                       break;
+               default:
+                       dev_err(ppe_common->dev,
+                               "get ppe queue mode failed! dsaf_mode=%d\n",
+                               dsaf_mode);
+                       return -EINVAL;
+               }
+               hns_ppe_set_qid_mode(ppe_common, qid_mode);
+       }
+
+       dsaf_set_dev_bit(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG,
+                        PPE_COMMON_CNT_CLR_CE_B, 1);
+
+       return 0;
+}
+
+/*clr ppe exception irq*/
+static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
+{
+       u32 clr_vlue = 0xfffffffful;
+       u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+       u32 vld_msk = 0;
+
+       /*only care bit 0,1,7*/
+       dsaf_set_bit(vld_msk, 0, 1);
+       dsaf_set_bit(vld_msk, 1, 1);
+       dsaf_set_bit(vld_msk, 7, 1);
+
+       /*clr sts**/
+       dsaf_write_dev(ppe_cb, PPE_RINT_REG, clr_vlue);
+
+       /*for some reserved bits, so set 0**/
+       dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
+}
+
+/**
+ * ppe_init_hw - init ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
+{
+       struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
+       u32 port = ppe_cb->port;
+       struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
+
+       hns_ppe_srst_by_port(dsaf_dev, port, 0);
+       mdelay(10);
+       hns_ppe_srst_by_port(dsaf_dev, port, 1);
+
+       /* clr and msk except irq*/
+       hns_ppe_exc_irq_en(ppe_cb, 0);
+
+       if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+               hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
+       else
+               hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+       hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
+       hns_ppe_cnt_clr_ce(ppe_cb);
+}
+
+/**
+ * ppe_uninit_hw - uninit ppe
+ * @ppe_device: ppe device
+ */
+static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
+{
+       u32 port;
+
+       if (ppe_cb->ppe_common_cb) {
+               port = ppe_cb->index;
+               hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0);
+       }
+}
+
+void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+{
+       u32 i;
+
+       for (i = 0; i < ppe_common->ppe_num; i++) {
+               hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+               memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
+       }
+}
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
+{
+       u32 i;
+
+       for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+               if (dsaf_dev->ppe_common[i])
+                       hns_ppe_uninit_ex(dsaf_dev->ppe_common[i]);
+               hns_rcb_common_free_cfg(dsaf_dev, i);
+               hns_ppe_common_free_cfg(dsaf_dev, i);
+       }
+}
+
+/**
+ * hns_ppe_reset - reinit ppe/rcb hw
+ * @dsaf_dev: dasf device
+ * retuen void
+ */
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
+{
+       u32 i;
+       int ret;
+       struct ppe_common_cb *ppe_common;
+
+       ppe_common = dsaf_dev->ppe_common[ppe_common_index];
+       ret = hns_ppe_common_init_hw(ppe_common);
+       if (ret)
+               return;
+
+       ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
+       if (ret)
+               return;
+
+       for (i = 0; i < ppe_common->ppe_num; i++)
+               hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+
+       hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]);
+}
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
+{
+       struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+       hw_stats->rx_pkts_from_sw
+               += dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+       hw_stats->rx_pkts
+               += dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+       hw_stats->rx_drop_no_bd
+               += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+       hw_stats->rx_alloc_buf_fail
+               += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+       hw_stats->rx_alloc_buf_wait
+               += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+       hw_stats->rx_drop_no_buf
+               += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+       hw_stats->rx_err_fifo_full
+               += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+       hw_stats->tx_bd_form_rcb
+               += dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+       hw_stats->tx_pkts_from_rcb
+               += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+       hw_stats->tx_pkts
+               += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+       hw_stats->tx_err_fifo_empty
+               += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+       hw_stats->tx_err_checksum
+               += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+}
+
+int hns_ppe_get_sset_count(int stringset)
+{
+       if (stringset == ETH_SS_STATS)
+               return ETH_PPE_STATIC_NUM;
+       return 0;
+}
+
+int hns_ppe_get_regs_count(void)
+{
+       return ETH_PPE_DUMP_NUM;
+}
+
+/**
+ * ppe_get_strings - get ppe srting
+ * @ppe_device: ppe device
+ * @stringset: string set type
+ * @data: output string
+ */
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+{
+       char *buff = (char *)data;
+       int index = ppe_cb->index;
+
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index);
+       buff = buff + ETH_GSTRING_LEN;
+
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index);
+}
+
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
+{
+       u64 *regs_buff = data;
+       struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+       regs_buff[0] = hw_stats->rx_pkts_from_sw;
+       regs_buff[1] = hw_stats->rx_pkts;
+       regs_buff[2] = hw_stats->rx_drop_no_bd;
+       regs_buff[3] = hw_stats->rx_alloc_buf_fail;
+       regs_buff[4] = hw_stats->rx_alloc_buf_wait;
+       regs_buff[5] = hw_stats->rx_drop_no_buf;
+       regs_buff[6] = hw_stats->rx_err_fifo_full;
+
+       regs_buff[7] = hw_stats->tx_bd_form_rcb;
+       regs_buff[8] = hw_stats->tx_pkts_from_rcb;
+       regs_buff[9] = hw_stats->tx_pkts;
+       regs_buff[10] = hw_stats->tx_err_fifo_empty;
+       regs_buff[11] = hw_stats->tx_err_checksum;
+}
+
+/**
+ * hns_ppe_init - init ppe device
+ * @dsaf_dev: dasf device
+ * retuen 0 - success , negative --fail
+ */
+int hns_ppe_init(struct dsaf_device *dsaf_dev)
+{
+       int i, k;
+       int ret;
+
+       for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+               ret = hns_ppe_common_get_cfg(dsaf_dev, i);
+               if (ret)
+                       goto get_ppe_cfg_fail;
+
+               ret = hns_rcb_common_get_cfg(dsaf_dev, i);
+               if (ret)
+                       goto get_rcb_cfg_fail;
+
+               hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
+
+               hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+       }
+
+       for (i = 0; i < HNS_PPE_COM_NUM; i++)
+               hns_ppe_reset_common(dsaf_dev, i);
+
+       return 0;
+
+get_rcb_cfg_fail:
+       hns_ppe_common_free_cfg(dsaf_dev, i);
+get_ppe_cfg_fail:
+       for (k = i - 1; k >= 0; k--) {
+               hns_rcb_common_free_cfg(dsaf_dev, k);
+               hns_ppe_common_free_cfg(dsaf_dev, k);
+       }
+       return ret;
+}
+
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data)
+{
+       struct ppe_common_cb *ppe_common = ppe_cb->ppe_common_cb;
+       u32 *regs = data;
+       u32 i;
+       u32 offset;
+
+       /* ppe common registers */
+       regs[0] = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+       regs[1] = dsaf_read_dev(ppe_common, PPE_COM_INTEN_REG);
+       regs[2] = dsaf_read_dev(ppe_common, PPE_COM_RINT_REG);
+       regs[3] = dsaf_read_dev(ppe_common, PPE_COM_INTSTS_REG);
+       regs[4] = dsaf_read_dev(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG);
+
+       for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++) {
+               offset = PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 0x4 * i;
+               regs[5 + i] = dsaf_read_dev(ppe_common, offset);
+               offset = PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 0x4 * i;
+               regs[5 + i + DSAF_TOTAL_QUEUE_NUM]
+                               = dsaf_read_dev(ppe_common, offset);
+               offset = PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 0x4 * i;
+               regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 2]
+                               = dsaf_read_dev(ppe_common, offset);
+               offset = PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 0x4 * i;
+               regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 3]
+                               = dsaf_read_dev(ppe_common, offset);
+       }
+
+       /* mark end of ppe regs */
+       for (i = 521; i < 524; i++)
+               regs[i] = 0xeeeeeeee;
+
+       /* ppe channel registers */
+       regs[525] = dsaf_read_dev(ppe_cb, PPE_CFG_TX_FIFO_THRSLD_REG);
+       regs[526] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_THRSLD_REG);
+       regs[527] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG);
+       regs[528] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG);
+       regs[529] = dsaf_read_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG);
+       regs[530] = dsaf_read_dev(ppe_cb, PPE_CFG_BUS_CTRL_REG);
+       regs[531] = dsaf_read_dev(ppe_cb, PPE_CFG_TNL_TO_BE_RST_REG);
+       regs[532] = dsaf_read_dev(ppe_cb, PPE_CURR_TNL_CAN_RST_REG);
+
+       regs[533] = dsaf_read_dev(ppe_cb, PPE_CFG_XGE_MODE_REG);
+       regs[534] = dsaf_read_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG);
+       regs[535] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_MODE_REG);
+       regs[536] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_VLAN_TAG_REG);
+       regs[537] = dsaf_read_dev(ppe_cb, PPE_CFG_TAG_GEN_REG);
+       regs[538] = dsaf_read_dev(ppe_cb, PPE_CFG_PARSE_TAG_REG);
+       regs[539] = dsaf_read_dev(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG);
+
+       regs[540] = dsaf_read_dev(ppe_cb, PPE_INTEN_REG);
+       regs[541] = dsaf_read_dev(ppe_cb, PPE_RINT_REG);
+       regs[542] = dsaf_read_dev(ppe_cb, PPE_INTSTS_REG);
+       regs[543] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_INT_REG);
+
+       regs[544] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME0_REG);
+       regs[545] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME1_REG);
+
+       /* ppe static */
+       regs[546] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+       regs[547] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+       regs[548] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+       regs[549] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+       regs[550] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+       regs[551] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+       regs[552] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+       regs[553] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+       regs[554] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+       regs[555] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+       regs[556] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+       regs[557] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+       regs[558] = dsaf_read_dev(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG);
+       regs[559] = dsaf_read_dev(ppe_cb, PPE_CFG_AXI_DBG_REG);
+       regs[560] = dsaf_read_dev(ppe_cb, PPE_HIS_PRO_ERR_REG);
+       regs[561] = dsaf_read_dev(ppe_cb, PPE_HIS_TNL_FIFO_ERR_REG);
+       regs[562] = dsaf_read_dev(ppe_cb, PPE_CURR_CFF_DATA_NUM_REG);
+       regs[563] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_ST_REG);
+       regs[564] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_ST_REG);
+       regs[565] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO0_REG);
+       regs[566] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO1_REG);
+       regs[567] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG);
+       regs[568] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO1_REG);
+       regs[569] = dsaf_read_dev(ppe_cb, PPE_ECO0_REG);
+       regs[570] = dsaf_read_dev(ppe_cb, PPE_ECO1_REG);
+       regs[571] = dsaf_read_dev(ppe_cb, PPE_ECO2_REG);
+
+       /* mark end of ppe regs */
+       for (i = 572; i < 576; i++)
+               regs[i] = 0xeeeeeeee;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
new file mode 100644 (file)
index 0000000..4894f9a
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_PPE_H
+#define _HNS_DSAF_PPE_H
+
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
+
+#define HNS_PPE_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_PPE_DEBUG_NW_ENGINE_NUM 1
+#define HNS_PPE_COM_NUM DSAF_COMM_DEV_NUM
+
+#define PPE_COMMON_REG_OFFSET 0x70000
+#define PPE_REG_OFFSET 0x10000
+
+#define ETH_PPE_DUMP_NUM 576
+#define ETH_PPE_STATIC_NUM 12
+enum ppe_qid_mode {
+       PPE_QID_MODE0 = 0,      /* fixed queue id mode */
+       PPE_QID_MODE1,          /* switch:128VM non switch:6Port/4VM/4TC */
+       PPE_QID_MODE2,          /* switch:32VM/4TC non switch:6Port/16VM */
+       PPE_QID_MODE3,          /* switch:4TC/8TAG non switch:2Port/64VM */
+       PPE_QID_MODE4,          /* switch:8VM/16TAG non switch:2Port/16VM/4TC */
+       PPE_QID_MODE5,          /* non switch:6Port/16TAG */
+       PPE_QID_MODE6,          /* non switch:6Port/2VM/8TC */
+       PPE_QID_MODE7,          /* non switch:2Port/8VM/8TC */
+};
+
+enum ppe_port_mode {
+       PPE_MODE_GE = 0,
+       PPE_MODE_XGE,
+};
+
+enum ppe_common_mode {
+       PPE_COMMON_MODE_DEBUG = 0,
+       PPE_COMMON_MODE_SERVICE,
+       PPE_COMMON_MODE_MAX
+};
+
+struct hns_ppe_hw_stats {
+       u64 rx_pkts_from_sw;
+       u64 rx_pkts;
+       u64 rx_drop_no_bd;
+       u64 rx_alloc_buf_fail;
+       u64 rx_alloc_buf_wait;
+       u64 rx_drop_no_buf;
+       u64 rx_err_fifo_full;
+       u64 tx_bd_form_rcb;
+       u64 tx_pkts_from_rcb;
+       u64 tx_pkts;
+       u64 tx_err_fifo_empty;
+       u64 tx_err_checksum;
+};
+
+struct hns_ppe_cb {
+       struct device *dev;
+       struct hns_ppe_cb *next;        /* pointer to next ppe device */
+       struct ppe_common_cb *ppe_common_cb; /* belong to */
+       struct hns_ppe_hw_stats hw_stats;
+
+       u8 index;       /* index in a ppe common device */
+       u8 port;                         /* port id in dsaf  */
+       void __iomem *io_base;
+       int virq;
+};
+
+struct ppe_common_cb {
+       struct device *dev;
+       struct dsaf_device *dsaf_dev;
+       void __iomem *io_base;
+
+       enum ppe_common_mode ppe_mode;
+
+       u8 comm_index;   /*ppe_common index*/
+
+       u32 ppe_num;
+       struct hns_ppe_cb ppe_cb[0];
+
+};
+
+int hns_ppe_init(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index);
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb);
+
+int hns_ppe_get_sset_count(int stringset);
+int hns_ppe_get_regs_count(void);
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
+
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
+#endif /* _HNS_DSAF_PPE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
new file mode 100644 (file)
index 0000000..50f3427
--- /dev/null
@@ -0,0 +1,1023 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/cacheflush.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define RCB_COMMON_REG_OFFSET 0x80000
+#define TX_RING 0
+#define RX_RING 1
+
+#define RCB_RESET_WAIT_TIMES 30
+#define RCB_RESET_TRY_TIMES 10
+
+/**
+ *hns_rcb_wait_fbd_clean - clean fbd
+ *@qs: ring struct pointer array
+ *@qnum: num of array
+ *@flag: tx or rx flag
+ */
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
+{
+       int i, wait_cnt;
+       u32 fbd_num;
+
+       for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
+               usleep_range(200, 300);
+               fbd_num = 0;
+               if (flag & RCB_INT_FLAG_TX)
+                       fbd_num += dsaf_read_dev(qs[i],
+                                                RCB_RING_TX_RING_FBDNUM_REG);
+               if (flag & RCB_INT_FLAG_RX)
+                       fbd_num += dsaf_read_dev(qs[i],
+                                                RCB_RING_RX_RING_FBDNUM_REG);
+               if (!fbd_num)
+                       i++;
+               if (wait_cnt >= 10000)
+                       break;
+       }
+
+       if (i < q_num)
+               dev_err(qs[i]->handle->owner_dev,
+                       "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
+}
+
+/**
+ *hns_rcb_reset_ring_hw - ring reset
+ *@q: ring struct pointer
+ */
+void hns_rcb_reset_ring_hw(struct hnae_queue *q)
+{
+       u32 wait_cnt;
+       u32 try_cnt = 0;
+       u32 could_ret;
+
+       u32 tx_fbd_num;
+
+       while (try_cnt++ < RCB_RESET_TRY_TIMES) {
+               usleep_range(100, 200);
+               tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
+               if (tx_fbd_num)
+                       continue;
+
+               dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
+
+               dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+               msleep(20);
+               could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+               wait_cnt = 0;
+               while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
+                       dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+                       dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+                       msleep(20);
+                       could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+                       wait_cnt++;
+               }
+
+               dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+               if (could_ret)
+                       break;
+       }
+
+       if (try_cnt >= RCB_RESET_TRY_TIMES)
+               dev_err(q->dev->dev, "port%d reset ring fail\n",
+                       hns_ae_get_vf_cb(q->handle)->port_index);
+}
+
+/**
+ *hns_rcb_int_ctrl_hw - rcb irq enable control
+ *@q: hnae queue struct pointer
+ *@flag:ring flag tx or rx
+ *@mask:mask
+ */
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+       u32 int_mask_en = !!mask;
+
+       if (flag & RCB_INT_FLAG_TX) {
+               dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+               dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
+                              int_mask_en);
+       }
+
+       if (flag & RCB_INT_FLAG_RX) {
+               dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+               dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
+                              int_mask_en);
+       }
+}
+
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+       u32 clr = 1;
+
+       if (flag & RCB_INT_FLAG_TX) {
+               dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr);
+               dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr);
+       }
+
+       if (flag & RCB_INT_FLAG_RX) {
+               dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr);
+               dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr);
+       }
+}
+
+/**
+ *hns_rcb_ring_enable_hw - enable ring
+ *@ring: rcb ring
+ */
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
+{
+       dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
+}
+
+void hns_rcb_start(struct hnae_queue *q, u32 val)
+{
+       hns_rcb_ring_enable_hw(q, val);
+}
+
+/**
+ *hns_rcb_common_init_commit_hw - make rcb common init completed
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
+{
+       wmb();  /* Sync point before breakpoint */
+       dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
+       wmb();  /* Sync point after breakpoint */
+}
+
+/**
+ *hns_rcb_ring_init - init rcb ring
+ *@ring_pair: ring pair control block
+ *@ring_type: ring type, RX_RING or TX_RING
+ */
+static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
+{
+       struct hnae_queue *q = &ring_pair->q;
+       struct rcb_common_cb *rcb_common = ring_pair->rcb_common;
+       u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type;
+       struct hnae_ring *ring =
+               (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
+       dma_addr_t dma = ring->desc_dma_addr;
+
+       if (ring_type == RX_RING) {
+               dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
+                              (u32)dma);
+               dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
+                              (u32)(dma >> 32));
+
+               dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+                              bd_size_type);
+               dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
+                              ring_pair->port_id_in_dsa);
+               dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
+                              ring_pair->port_id_in_dsa);
+       } else {
+               dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
+                              (u32)dma);
+               dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
+                              (u32)(dma >> 32));
+
+               dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+                              bd_size_type);
+               dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
+                              ring_pair->port_id_in_dsa);
+               dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
+                              ring_pair->port_id_in_dsa);
+       }
+}
+
+/**
+ *hns_rcb_init_hw - init rcb hardware
+ *@ring: rcb ring
+ */
+void hns_rcb_init_hw(struct ring_pair_cb *ring)
+{
+       hns_rcb_ring_init(ring, RX_RING);
+       hns_rcb_ring_init(ring, TX_RING);
+}
+
+/**
+ *hns_rcb_set_port_desc_cnt - set rcb port description num
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@desc_cnt:BD num
+ */
+static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
+                                     u32 port_idx, u32 desc_cnt)
+{
+       if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+               port_idx = 0;
+
+       dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
+                      desc_cnt);
+}
+
+/**
+ *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@coalesced_frames:BD num for coalesced frames
+ */
+static int  hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+                                             u32 port_idx,
+                                             u32 coalesced_frames)
+{
+       if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+               port_idx = 0;
+       if (coalesced_frames >= rcb_common->desc_num ||
+           coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
+               return -EINVAL;
+
+       dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+                      coalesced_frames);
+       return 0;
+}
+
+/**
+ *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ * return coaleseced frames value
+ */
+static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
+                                            u32 port_idx)
+{
+       if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
+               port_idx = 0;
+
+       return dsaf_read_dev(rcb_common,
+                            RCB_CFG_PKTLINE_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_set_timeout - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@time_out:time for coalesced time_out
+ */
+static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
+                               u32 timeout)
+{
+       dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+}
+
+static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
+{
+       if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+               return HNS_RCB_SERVICE_NW_ENGINE_NUM;
+       else
+               return HNS_RCB_DEBUG_NW_ENGINE_NUM;
+}
+
+/*clr rcb comm exception irq**/
+static void hns_rcb_comm_exc_irq_en(
+                       struct rcb_common_cb *rcb_common, int en)
+{
+       u32 clr_vlue = 0xfffffffful;
+       u32 msk_vlue = en ? 0 : 0xfffffffful;
+
+       /* clr int*/
+       dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
+
+       dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
+
+       dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
+
+       dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
+       dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
+
+       /*en msk*/
+       dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
+
+       dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
+
+       /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
+       dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
+
+       dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
+       dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
+}
+
+/**
+ *hns_rcb_common_init_hw - init rcb common hardware
+ *@rcb_common: rcb_common device
+ *retuen 0 - success , negative --fail
+ */
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
+{
+       u32 reg_val;
+       int i;
+       int port_num = hns_rcb_common_get_port_num(rcb_common);
+
+       hns_rcb_comm_exc_irq_en(rcb_common, 0);
+
+       reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
+       if (0x1 != (reg_val & 0x1)) {
+               dev_err(rcb_common->dsaf_dev->dev,
+                       "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
+               return -EBUSY;
+       }
+
+       for (i = 0; i < port_num; i++) {
+               hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
+               (void)hns_rcb_set_port_coalesced_frames(
+                       rcb_common, i, rcb_common->coalesced_frames);
+       }
+       hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
+
+       dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
+                      HNS_RCB_COMMON_ENDIAN);
+
+       return 0;
+}
+
+int hns_rcb_buf_size2type(u32 buf_size)
+{
+       int bd_size_type;
+
+       switch (buf_size) {
+       case 512:
+               bd_size_type = HNS_BD_SIZE_512_TYPE;
+               break;
+       case 1024:
+               bd_size_type = HNS_BD_SIZE_1024_TYPE;
+               break;
+       case 2048:
+               bd_size_type = HNS_BD_SIZE_2048_TYPE;
+               break;
+       case 4096:
+               bd_size_type = HNS_BD_SIZE_4096_TYPE;
+               break;
+       default:
+               bd_size_type = -EINVAL;
+       }
+
+       return bd_size_type;
+}
+
+static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
+{
+       struct hnae_ring *ring;
+       struct rcb_common_cb *rcb_common;
+       struct ring_pair_cb *ring_pair_cb;
+       u32 buf_size;
+       u16 desc_num;
+       int irq_idx;
+
+       ring_pair_cb = container_of(q, struct ring_pair_cb, q);
+       if (ring_type == RX_RING) {
+               ring = &q->rx_ring;
+               ring->io_base = ring_pair_cb->q.io_base;
+               irq_idx = HNS_RCB_IRQ_IDX_RX;
+       } else {
+               ring = &q->tx_ring;
+               ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+                       HNS_RCB_TX_REG_OFFSET;
+               irq_idx = HNS_RCB_IRQ_IDX_TX;
+       }
+
+       rcb_common = ring_pair_cb->rcb_common;
+       buf_size = rcb_common->dsaf_dev->buf_size;
+       desc_num = rcb_common->dsaf_dev->desc_num;
+
+       ring->desc = NULL;
+       ring->desc_cb = NULL;
+
+       ring->irq = ring_pair_cb->virq[irq_idx];
+       ring->desc_dma_addr = 0;
+
+       ring->buf_size = buf_size;
+       ring->desc_num = desc_num;
+       ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT;
+       ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
+       ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
+       ring->next_to_use = 0;
+       ring->next_to_clean = 0;
+}
+
+static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
+{
+       ring_pair_cb->q.handle = NULL;
+
+       hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
+       hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
+}
+
+static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+{
+       int comm_index = rcb_common->comm_index;
+       int port;
+       int q_num;
+
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+               q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
+               port = ring_idx / q_num;
+       } else {
+               port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
+       }
+
+       return port;
+}
+
+static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
+{
+       int comm_index = rcb_common->comm_index;
+
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+               return HNS_SERVICE_RING_IRQ_IDX;
+       else
+               return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2;
+}
+
+#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
+       ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
+/**
+ *hns_rcb_get_cfg - get rcb config
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+{
+       struct ring_pair_cb *ring_pair_cb;
+       u32 i;
+       u32 ring_num = rcb_common->ring_num;
+       int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
+       struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
+
+       for (i = 0; i < ring_num; i++) {
+               ring_pair_cb = &rcb_common->ring_pair_cb[i];
+               ring_pair_cb->rcb_common = rcb_common;
+               ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
+               ring_pair_cb->index = i;
+               ring_pair_cb->q.io_base =
+                       RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
+               ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+               ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX]
+                       = irq_of_parse_and_map(np, base_irq_idx + i * 2);
+               ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX]
+                       = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1);
+               ring_pair_cb->q.phy_base =
+                       RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
+               hns_rcb_ring_pair_get_cfg(ring_pair_cb);
+       }
+}
+
+/**
+ *hns_rcb_get_coalesced_frames - get rcb port coalesced frames
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return coalesced_frames
+ */
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+{
+       int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
+       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+       return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+}
+
+/**
+ *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index:port index
+ *return time_out
+ */
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+{
+       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+       return rcb_comm->timeout;
+}
+
+/**
+ *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@comm_index: comm :index
+ *@etx_usecs:tx time for coalesced time_out
+ *@rx_usecs:rx time for coalesced time_out
+ */
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+                               int port, u32 timeout)
+{
+       int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
+       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+
+       if (rcb_comm->timeout == timeout)
+               return;
+
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+               dev_err(dsaf_dev->dev,
+                       "error: not support coalesce_usecs setting!\n");
+               return;
+       }
+       rcb_comm->timeout = timeout;
+       hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+}
+
+/**
+ *hns_rcb_set_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@tx_frames:tx BD num for coalesced frames
+ *@rx_frames:rx BD num for coalesced frames
+ *Return 0 on success, negative on failure
+ */
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+                                int port, u32 coalesced_frames)
+{
+       int comm_index =  hns_dsaf_get_comm_idx_by_port(port);
+       struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+       u32 coalesced_reg_val;
+       int ret;
+
+       coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+
+       if (coalesced_reg_val == coalesced_frames)
+               return 0;
+
+       if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
+               ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
+                                                       coalesced_frames);
+               return ret;
+       } else {
+               return -EINVAL;
+       }
+}
+
+/**
+ *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
+ *                                             accordding to dsaf mode
+ *@dsaf_mode: dsaf mode
+ *@max_vfn : max vfn number
+ *@max_q_per_vf:max ring number per vm
+ */
+static void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+                                  u16 *max_vfn, u16 *max_q_per_vf)
+{
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+               switch (dsaf_mode) {
+               case DSAF_MODE_DISABLE_6PORT_0VM:
+                       *max_vfn = 1;
+                       *max_q_per_vf = 16;
+                       break;
+               case DSAF_MODE_DISABLE_FIX:
+                       *max_vfn = 1;
+                       *max_q_per_vf = 1;
+                       break;
+               case DSAF_MODE_DISABLE_2PORT_64VM:
+                       *max_vfn = 64;
+                       *max_q_per_vf = 1;
+                       break;
+               case DSAF_MODE_DISABLE_6PORT_16VM:
+                       *max_vfn = 16;
+                       *max_q_per_vf = 1;
+                       break;
+               default:
+                       *max_vfn = 1;
+                       *max_q_per_vf = 16;
+                       break;
+               }
+       } else {
+               *max_vfn = 1;
+               *max_q_per_vf = 1;
+       }
+}
+
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+{
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+               switch (dsaf_dev->dsaf_mode) {
+               case DSAF_MODE_ENABLE_FIX:
+                       return 1;
+
+               case DSAF_MODE_DISABLE_FIX:
+                       return 6;
+
+               case DSAF_MODE_ENABLE_0VM:
+                       return 32;
+
+               case DSAF_MODE_DISABLE_6PORT_0VM:
+               case DSAF_MODE_ENABLE_16VM:
+               case DSAF_MODE_DISABLE_6PORT_2VM:
+               case DSAF_MODE_DISABLE_6PORT_16VM:
+               case DSAF_MODE_DISABLE_6PORT_4VM:
+               case DSAF_MODE_ENABLE_8VM:
+                       return 96;
+
+               case DSAF_MODE_DISABLE_2PORT_16VM:
+               case DSAF_MODE_DISABLE_2PORT_8VM:
+               case DSAF_MODE_ENABLE_32VM:
+               case DSAF_MODE_DISABLE_2PORT_64VM:
+               case DSAF_MODE_ENABLE_128VM:
+                       return 128;
+
+               default:
+                       dev_warn(dsaf_dev->dev,
+                                "get ring num fail,use default!dsaf_mode=%d\n",
+                                dsaf_dev->dsaf_mode);
+                       return 128;
+               }
+       } else {
+               return 1;
+       }
+}
+
+void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
+                                      int comm_index)
+{
+       void __iomem *base_addr;
+
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+               base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
+       else
+               base_addr = dsaf_dev->sds_base
+                       + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
+                       + RCB_COMMON_REG_OFFSET;
+
+       return base_addr;
+}
+
+static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
+                                           int comm_index)
+{
+       struct device_node *np = dsaf_dev->dev->of_node;
+       phys_addr_t phy_addr;
+       const __be32 *tmp_addr;
+       u64 addr_offset = 0;
+       u64 size = 0;
+       int index = 0;
+
+       if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+               index    = 2;
+               addr_offset = RCB_COMMON_REG_OFFSET;
+       } else {
+               index    = 1;
+               addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
+                               RCB_COMMON_REG_OFFSET;
+       }
+       tmp_addr  = of_get_address(np, index, &size, NULL);
+       phy_addr  = of_translate_address(np, tmp_addr);
+       return phy_addr + addr_offset;
+}
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
+                          int comm_index)
+{
+       struct rcb_common_cb *rcb_common;
+       enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+       u16 max_vfn;
+       u16 max_q_per_vf;
+       int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+
+       rcb_common =
+               devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
+                       ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL);
+       if (!rcb_common) {
+               dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
+               return -ENOMEM;
+       }
+       rcb_common->comm_index = comm_index;
+       rcb_common->ring_num = ring_num;
+       rcb_common->dsaf_dev = dsaf_dev;
+
+       rcb_common->desc_num = dsaf_dev->desc_num;
+       rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
+       rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
+
+       hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+       rcb_common->max_vfn = max_vfn;
+       rcb_common->max_q_per_vf = max_q_per_vf;
+
+       rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
+       rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+
+       dsaf_dev->rcb_common[comm_index] = rcb_common;
+       return 0;
+}
+
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
+                            u32 comm_index)
+{
+       dsaf_dev->rcb_common[comm_index] = NULL;
+}
+
+void hns_rcb_update_stats(struct hnae_queue *queue)
+{
+       struct ring_pair_cb *ring =
+               container_of(queue, struct ring_pair_cb, q);
+       struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
+       struct ppe_common_cb *ppe_common
+               = dsaf_dev->ppe_common[ring->rcb_common->comm_index];
+       struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+       hw_stats->rx_pkts += dsaf_read_dev(queue,
+                        RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+       dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
+
+       hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
+                        PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+       hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
+                        PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
+
+       hw_stats->tx_pkts += dsaf_read_dev(queue,
+                        RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+       dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
+
+       hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
+                        PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+       hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
+                        PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
+}
+
+/**
+ *hns_rcb_get_stats - get rcb statistic
+ *@ring: rcb ring
+ *@data:statistic value
+ */
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
+{
+       u64 *regs_buff = data;
+       struct ring_pair_cb *ring =
+               container_of(queue, struct ring_pair_cb, q);
+       struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+       regs_buff[0] = hw_stats->tx_pkts;
+       regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
+       regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
+       regs_buff[3] =
+               dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+
+       regs_buff[4] = queue->tx_ring.stats.tx_pkts;
+       regs_buff[5] = queue->tx_ring.stats.tx_bytes;
+       regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
+       regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
+       regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
+       regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
+       regs_buff[10] = queue->tx_ring.stats.restart_queue;
+       regs_buff[11] = queue->tx_ring.stats.tx_busy;
+
+       regs_buff[12] = hw_stats->rx_pkts;
+       regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
+       regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
+       regs_buff[15] =
+               dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+
+       regs_buff[16] = queue->rx_ring.stats.rx_pkts;
+       regs_buff[17] = queue->rx_ring.stats.rx_bytes;
+       regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
+       regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
+       regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
+       regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
+       regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
+       regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
+       regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
+       regs_buff[25] = queue->rx_ring.stats.err_bd_num;
+       regs_buff[26] = queue->rx_ring.stats.l2_err;
+       regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
+}
+
+/**
+ *hns_rcb_get_ring_sset_count - rcb string set count
+ *@stringset:ethtool cmd
+ *return rcb ring string set count
+ */
+int hns_rcb_get_ring_sset_count(int stringset)
+{
+       if (stringset == ETH_SS_STATS)
+               return HNS_RING_STATIC_REG_NUM;
+
+       return 0;
+}
+
+/**
+ *hns_rcb_get_common_regs_count - rcb common regs count
+ *return regs count
+ */
+int hns_rcb_get_common_regs_count(void)
+{
+       return HNS_RCB_COMMON_DUMP_REG_NUM;
+}
+
+/**
+ *rcb_get_sset_count - rcb ring regs count
+ *return regs count
+ */
+int hns_rcb_get_ring_regs_count(void)
+{
+       return HNS_RCB_RING_DUMP_REG_NUM;
+}
+
+/**
+ *hns_rcb_get_strings - get rcb string set
+ *@stringset:string set index
+ *@data:strings name value
+ *@index:queue index
+ */
+void hns_rcb_get_strings(int stringset, u8 *data, int index)
+{
+       char *buff = (char *)data;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
+       buff = buff + ETH_GSTRING_LEN;
+
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
+       buff = buff + ETH_GSTRING_LEN;
+       snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
+}
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
+{
+       u32 *regs = data;
+       u32 i = 0;
+
+       /*rcb common registers */
+       regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
+       regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
+       regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
+
+       regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
+       regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
+       regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
+       regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
+       regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
+       regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
+
+       regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
+       regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
+       regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
+       regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
+       regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
+       regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
+       regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
+       regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
+       regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
+       regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
+       regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
+       regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
+       regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
+       regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
+       regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
+       regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
+       regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
+       regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
+       regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
+
+       regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
+       regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
+       regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
+       regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
+       regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
+       regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
+       regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
+       regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
+       regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
+       regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
+
+       /* rcb common entry registers */
+       for (i = 0; i < 16; i++) { /* total 16 model registers */
+               regs[38 + i]
+                       = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
+               regs[54 + i]
+                       = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
+       }
+
+       regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
+       regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+       regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+
+       /* mark end of rcb common regs */
+       for (i = 73; i < 80; i++)
+               regs[i] = 0xcccccccc;
+}
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
+{
+       u32 *regs = data;
+       struct ring_pair_cb *ring_pair
+               = container_of(queue, struct ring_pair_cb, q);
+       u32 i = 0;
+
+       /*rcb ring registers */
+       regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
+       regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
+       regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
+       regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
+       regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
+       regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
+       regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
+       regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+       regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+
+       regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
+       regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
+       regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
+       regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
+       regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
+       regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
+       regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
+       regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+       regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
+       regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+
+       regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
+       regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
+       regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
+       regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
+       regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
+       regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
+       regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
+
+       regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
+       regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
+       regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
+       regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
+       regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
+       regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
+       regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
+       regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
+
+       /* mark end of ring regs */
+       for (i = 35; i < 40; i++)
+               regs[i] = 0xcccccc00 + ring_pair->index;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
new file mode 100644 (file)
index 0000000..c7db613
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_DSAF_RCB_H
+#define _HNS_DSAF_RCB_H
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "hnae.h"
+#include "hns_dsaf_main.h"
+
+struct rcb_common_cb;
+
+#define HNS_RCB_IRQ_NUM_PER_QUEUE              2
+#define HNS_RCB_IRQ_IDX_TX                     0
+#define HNS_RCB_IRQ_IDX_RX                     1
+#define HNS_RCB_TX_REG_OFFSET                  0x40
+
+#define HNS_RCB_SERVICE_NW_ENGINE_NUM          DSAF_COMM_CHN
+#define HNS_RCB_DEBUG_NW_ENGINE_NUM            1
+#define HNS_RCB_RING_MAX_BD_PER_PKT            3
+#define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU
+
+#define HNS_RCB_RING_MAX_PENDING_BD            1024
+#define HNS_RCB_RING_MIN_PENDING_BD            16
+
+#define HNS_RCB_REG_OFFSET                     0x10000
+
+#define HNS_RCB_MAX_COALESCED_FRAMES           1023
+#define HNS_RCB_MIN_COALESCED_FRAMES           1
+#define HNS_RCB_DEF_COALESCED_FRAMES           50
+#define HNS_RCB_MAX_TIME_OUT                   0x500
+
+#define HNS_RCB_COMMON_ENDIAN                  1
+
+#define HNS_BD_SIZE_512_TYPE                   0
+#define HNS_BD_SIZE_1024_TYPE                  1
+#define HNS_BD_SIZE_2048_TYPE                  2
+#define HNS_BD_SIZE_4096_TYPE                  3
+
+#define HNS_RCB_COMMON_DUMP_REG_NUM 80
+#define HNS_RCB_RING_DUMP_REG_NUM 40
+#define HNS_RING_STATIC_REG_NUM 28
+
+#define HNS_DUMP_REG_NUM                       500
+#define HNS_STATIC_REG_NUM                     12
+
+enum rcb_int_flag {
+       RCB_INT_FLAG_TX = 0x1,
+       RCB_INT_FLAG_RX = (0x1 << 1),
+       RCB_INT_FLAG_MAX = (0x1 << 2),  /*must be the last element */
+};
+
+struct hns_ring_hw_stats {
+       u64 tx_pkts;
+       u64 ppe_tx_ok_pkts;
+       u64 ppe_tx_drop_pkts;
+       u64 rx_pkts;
+       u64 ppe_rx_ok_pkts;
+       u64 ppe_rx_drop_pkts;
+};
+
+struct ring_pair_cb {
+       struct rcb_common_cb *rcb_common;       /*  ring belongs to */
+       struct device *dev;     /*device for DMA mapping */
+       struct hnae_queue q;
+
+       u16 index;      /* global index in a rcb common device */
+       u16 buf_size;
+
+       int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
+
+       u8 port_id_in_dsa;
+       u8 used_by_vf;
+
+       struct hns_ring_hw_stats hw_stats;
+};
+
+struct rcb_common_cb {
+       u8 __iomem *io_base;
+       phys_addr_t phy_base;
+       struct dsaf_device *dsaf_dev;
+       u16 max_vfn;
+       u16 max_q_per_vf;
+
+       u8 comm_index;
+       u32 ring_num;
+       u32 coalesced_frames; /* frames  threshold of  rx interrupt   */
+       u32 timeout; /* time threshold of  rx interrupt  */
+       u32 desc_num; /*  desc num per queue*/
+
+       struct ring_pair_cb ring_pair_cb[0];
+};
+
+int hns_rcb_buf_size2type(u32 buf_size);
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_start(struct hnae_queue *q, u32 val);
+void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
+void hns_rcb_init_hw(struct ring_pair_cb *ring);
+void hns_rcb_reset_ring_hw(struct hnae_queue *q);
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+
+u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
+u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
+                               int comm_index, u32 timeout);
+int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
+                                int comm_index, u32 coalesce_frames);
+void hns_rcb_update_stats(struct hnae_queue *queue);
+
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_common, void *data);
+
+int hns_rcb_get_ring_sset_count(int stringset);
+int hns_rcb_get_common_regs_count(void);
+int hns_rcb_get_ring_regs_count(void);
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
+
+void hns_rcb_get_strings(int stringset, u8 *data, int index);
+#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
new file mode 100644 (file)
index 0000000..6fc58ba
--- /dev/null
@@ -0,0 +1,972 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DSAF_REG_H_
+#define _DSAF_REG_H_
+
+#define HNS_GE_FIFO_ERR_INTNUM 8
+#define HNS_XGE_ERR_INTNUM 6
+#define HNS_RCB_COMM_ERR_INTNUM 12
+#define HNS_PPE_TNL_ERR_INTNUM 8
+#define HNS_DSAF_EVENT_INTNUM 21
+#define HNS_DEBUG_RING_INTNUM 4
+#define HNS_SERVICE_RING_INTNUM 256
+
+#define HNS_DEBUG_RING_IRQ_IDX (HNS_GE_FIFO_ERR_INTNUM + HNS_XGE_ERR_INTNUM +\
+               HNS_RCB_COMM_ERR_INTNUM + HNS_PPE_TNL_ERR_INTNUM +\
+               HNS_DSAF_EVENT_INTNUM)
+#define HNS_SERVICE_RING_IRQ_IDX (HNS_DEBUG_RING_IRQ_IDX +\
+               HNS_DEBUG_RING_INTNUM)
+
+#define DSAF_IRQ_NUM 18
+
+#define DSAF_MAX_PORT_NUM_PER_CHIP 8
+#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 3
+#define DSAF_PPE_INODE_BASE 6
+#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#define DSAF_DEBUG_NW_NUM      2
+#define DSAF_SERVICE_NW_NUM    6
+#define DSAF_COMM_CHN          DSAF_SERVICE_NW_NUM
+#define DSAF_GE_NUM            ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_PORT_NUM          ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_XGE_NUM           DSAF_SERVICE_NW_NUM
+#define DSAF_NODE_NUM          18
+#define DSAF_XOD_BIG_NUM       DSAF_NODE_NUM
+#define DSAF_SBM_NUM           DSAF_NODE_NUM
+#define DSAF_VOQ_NUM           DSAF_NODE_NUM
+#define DSAF_INODE_NUM         DSAF_NODE_NUM
+#define DSAF_XOD_NUM           8
+#define DSAF_TBL_NUM           8
+#define DSAF_SW_PORT_NUM       8
+#define DSAF_TOTAL_QUEUE_NUM   129
+
+#define DSAF_TCAM_SUM          512
+#define DSAF_LINE_SUM          (2048 * 14)
+
+#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG                0x100
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG              0x180
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG              0x184
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG              0x188
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG              0x18C
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG              0x190
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG              0x194
+#define DSAF_SUB_SC_DSAF_CLK_EN_REG                    0x300
+#define DSAF_SUB_SC_DSAF_CLK_DIS_REG                   0x304
+#define DSAF_SUB_SC_NT_CLK_EN_REG                      0x308
+#define DSAF_SUB_SC_NT_CLK_DIS_REG                     0x30C
+#define DSAF_SUB_SC_XGE_CLK_EN_REG                     0x310
+#define DSAF_SUB_SC_XGE_CLK_DIS_REG                    0x314
+#define DSAF_SUB_SC_GE_CLK_EN_REG                      0x318
+#define DSAF_SUB_SC_GE_CLK_DIS_REG                     0x31C
+#define DSAF_SUB_SC_PPE_CLK_EN_REG                     0x320
+#define DSAF_SUB_SC_PPE_CLK_DIS_REG                    0x324
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG             0x350
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG            0x354
+#define DSAF_SUB_SC_XBAR_RESET_REQ_REG                 0xA00
+#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG                0xA04
+#define DSAF_SUB_SC_NT_RESET_REQ_REG                   0xA08
+#define DSAF_SUB_SC_NT_RESET_DREQ_REG                  0xA0C
+#define DSAF_SUB_SC_XGE_RESET_REQ_REG                  0xA10
+#define DSAF_SUB_SC_XGE_RESET_DREQ_REG                 0xA14
+#define DSAF_SUB_SC_GE_RESET_REQ0_REG                  0xA18
+#define DSAF_SUB_SC_GE_RESET_DREQ0_REG                 0xA1C
+#define DSAF_SUB_SC_GE_RESET_REQ1_REG                  0xA20
+#define DSAF_SUB_SC_GE_RESET_DREQ1_REG                 0xA24
+#define DSAF_SUB_SC_PPE_RESET_REQ_REG                  0xA48
+#define DSAF_SUB_SC_PPE_RESET_DREQ_REG                 0xA4C
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG          0xA88
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG         0xA8C
+#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG         0x2060
+#define DSAF_SUB_SC_TCAM_MBIST_EN_REG                  0x2300
+#define DSAF_SUB_SC_DSAF_CLK_ST_REG                    0x5300
+#define DSAF_SUB_SC_NT_CLK_ST_REG                      0x5304
+#define DSAF_SUB_SC_XGE_CLK_ST_REG                     0x5308
+#define DSAF_SUB_SC_GE_CLK_ST_REG                      0x530C
+#define DSAF_SUB_SC_PPE_CLK_ST_REG                     0x5310
+#define DSAF_SUB_SC_ROCEE_CLK_ST_REG                   0x5314
+#define DSAF_SUB_SC_CPU_CLK_ST_REG                     0x5318
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG             0x5328
+#define DSAF_SUB_SC_XBAR_RESET_ST_REG                  0x5A00
+#define DSAF_SUB_SC_NT_RESET_ST_REG                    0x5A04
+#define DSAF_SUB_SC_XGE_RESET_ST_REG                   0x5A08
+#define DSAF_SUB_SC_GE_RESET_ST0_REG                   0x5A0C
+#define DSAF_SUB_SC_GE_RESET_ST1_REG                   0x5A10
+#define DSAF_SUB_SC_PPE_RESET_ST_REG                   0x5A24
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG           0x5A44
+
+/*serdes offset**/
+#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
+#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
+#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
+#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
+#define HNS_MAC_LANE3_CTLEDFE_REG 0x000BFF9CULL
+#define HNS_MAC_LANE0_STATE_REG 0x000BFFD4ULL
+#define HNS_MAC_LANE1_STATE_REG 0x000BFFC4ULL
+#define HNS_MAC_LANE2_STATE_REG 0x000BFFB4ULL
+#define HNS_MAC_LANE3_STATE_REG 0x000BFFA4ULL
+
+#define HILINK_RESET_TIMOUT 10000
+
+#define DSAF_SRAM_INIT_OVER_0_REG      0x0
+#define DSAF_CFG_0_REG                 0x4
+#define DSAF_ECC_ERR_INVERT_0_REG      0x8
+#define DSAF_ABNORMAL_TIMEOUT_0_REG    0x1C
+#define DSAF_FSM_TIMEOUT_0_REG         0x20
+#define DSAF_DSA_REG_CNT_CLR_CE_REG    0x2C
+#define DSAF_DSA_SBM_INF_FIFO_THRD_REG 0x30
+#define DSAF_DSA_SRAM_1BIT_ECC_SEL_REG 0x34
+#define DSAF_DSA_SRAM_1BIT_ECC_CNT_REG 0x38
+#define DSAF_PFC_EN_0_REG              0x50
+#define DSAF_PFC_UNIT_CNT_0_REG                0x70
+#define DSAF_XGE_INT_MSK_0_REG         0x100
+#define DSAF_PPE_INT_MSK_0_REG         0x120
+#define DSAF_ROCEE_INT_MSK_0_REG       0x140
+#define DSAF_XGE_INT_SRC_0_REG         0x160
+#define DSAF_PPE_INT_SRC_0_REG         0x180
+#define DSAF_ROCEE_INT_SRC_0_REG       0x1A0
+#define DSAF_XGE_INT_STS_0_REG         0x1C0
+#define DSAF_PPE_INT_STS_0_REG         0x1E0
+#define DSAF_ROCEE_INT_STS_0_REG       0x200
+#define DSAF_PPE_QID_CFG_0_REG         0x300
+#define DSAF_SW_PORT_TYPE_0_REG                0x320
+#define DSAF_STP_PORT_TYPE_0_REG       0x340
+#define DSAF_MIX_DEF_QID_0_REG         0x360
+#define DSAF_PORT_DEF_VLAN_0_REG       0x380
+#define DSAF_VM_DEF_VLAN_0_REG         0x400
+
+#define DSAF_INODE_CUT_THROUGH_CFG_0_REG       0x1000
+#define DSAF_INODE_ECC_INVERT_EN_0_REG         0x1008
+#define DSAF_INODE_ECC_ERR_ADDR_0_REG          0x100C
+#define DSAF_INODE_IN_PORT_NUM_0_REG           0x1018
+#define DSAF_INODE_PRI_TC_CFG_0_REG            0x101C
+#define DSAF_INODE_BP_STATUS_0_REG             0x1020
+#define DSAF_INODE_PAD_DISCARD_NUM_0_REG       0x1028
+#define DSAF_INODE_FINAL_IN_MAN_NUM_0_REG      0x102C
+#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG      0x1030
+#define DSAF_INODE_SBM_PID_NUM_0_REG           0x1038
+#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG    0x103C
+#define DSAF_INODE_SBM_RELS_NUM_0_REG          0x104C
+#define DSAF_INODE_SBM_DROP_NUM_0_REG          0x1050
+#define DSAF_INODE_CRC_FALSE_NUM_0_REG         0x1054
+#define DSAF_INODE_BP_DISCARD_NUM_0_REG                0x1058
+#define DSAF_INODE_RSLT_DISCARD_NUM_0_REG      0x105C
+#define DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG  0x1060
+#define DSAF_INODE_VOQ_OVER_NUM_0_REG          0x1068
+#define DSAF_INODE_BD_SAVE_STATUS_0_REG                0x1900
+#define DSAF_INODE_BD_ORDER_STATUS_0_REG       0x1950
+#define DSAF_INODE_SW_VLAN_TAG_DISC_0_REG      0x1A00
+#define DSAF_INODE_IN_DATA_STP_DISC_0_REG      0x1A50
+#define DSAF_INODE_GE_FC_EN_0_REG              0x1B00
+#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG                0x1B50
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG                0x1C00
+
+#define DSAF_SBM_CFG_REG_0_REG                 0x2000
+#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG                0x2004
+#define DSAF_SBM_BP_CFG_0_PPE_REG_0_REG                0x2304
+#define DSAF_SBM_BP_CFG_0_ROCEE_REG_0_REG      0x2604
+#define DSAF_SBM_BP_CFG_1_REG_0_REG            0x2008
+#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG                0x200C
+#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG                0x230C
+#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG      0x260C
+#define DSAF_SBM_FREE_CNT_0_0_REG              0x2010
+#define DSAF_SBM_FREE_CNT_1_0_REG              0x2014
+#define DSAF_SBM_BP_CNT_0_0_REG                        0x2018
+#define DSAF_SBM_BP_CNT_1_0_REG                        0x201C
+#define DSAF_SBM_BP_CNT_2_0_REG                        0x2020
+#define DSAF_SBM_BP_CNT_3_0_REG                        0x2024
+#define DSAF_SBM_INER_ST_0_REG                 0x2028
+#define DSAF_SBM_MIB_REQ_FAILED_TC_0_REG       0x202C
+#define DSAF_SBM_LNK_INPORT_CNT_0_REG          0x2030
+#define DSAF_SBM_LNK_DROP_CNT_0_REG            0x2034
+#define DSAF_SBM_INF_OUTPORT_CNT_0_REG         0x2038
+#define DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG      0x203C
+#define DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG      0x2040
+#define DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG      0x2044
+#define DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG      0x2048
+#define DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG      0x204C
+#define DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG      0x2050
+#define DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG      0x2054
+#define DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG      0x2058
+#define DSAF_SBM_LNK_REQ_CNT_0_REG             0x205C
+#define DSAF_SBM_LNK_RELS_CNT_0_REG            0x2060
+#define DSAF_SBM_BP_CFG_3_REG_0_REG            0x2068
+#define DSAF_SBM_BP_CFG_4_REG_0_REG            0x206C
+
+#define DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG     0x3000
+#define DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG     0x3004
+#define DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG      0x3008
+#define DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG      0x300C
+#define DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG       0x3010
+#define DSAF_XOD_ETS_TOKEN_CFG_0_REG           0x3014
+#define DSAF_XOD_PFS_CFG_0_0_REG               0x3018
+#define DSAF_XOD_PFS_CFG_1_0_REG               0x301C
+#define DSAF_XOD_PFS_CFG_2_0_REG               0x3020
+#define DSAF_XOD_GNT_L_0_REG                   0x3024
+#define DSAF_XOD_GNT_H_0_REG                   0x3028
+#define DSAF_XOD_CONNECT_STATE_0_REG           0x302C
+#define DSAF_XOD_RCVPKT_CNT_0_REG              0x3030
+#define DSAF_XOD_RCVTC0_CNT_0_REG              0x3034
+#define DSAF_XOD_RCVTC1_CNT_0_REG              0x3038
+#define DSAF_XOD_RCVTC2_CNT_0_REG              0x303C
+#define DSAF_XOD_RCVTC3_CNT_0_REG              0x3040
+#define DSAF_XOD_RCVVC0_CNT_0_REG              0x3044
+#define DSAF_XOD_RCVVC1_CNT_0_REG              0x3048
+#define DSAF_XOD_XGE_RCVIN0_CNT_0_REG          0x304C
+#define DSAF_XOD_XGE_RCVIN1_CNT_0_REG          0x3050
+#define DSAF_XOD_XGE_RCVIN2_CNT_0_REG          0x3054
+#define DSAF_XOD_XGE_RCVIN3_CNT_0_REG          0x3058
+#define DSAF_XOD_XGE_RCVIN4_CNT_0_REG          0x305C
+#define DSAF_XOD_XGE_RCVIN5_CNT_0_REG          0x3060
+#define DSAF_XOD_XGE_RCVIN6_CNT_0_REG          0x3064
+#define DSAF_XOD_XGE_RCVIN7_CNT_0_REG          0x3068
+#define DSAF_XOD_PPE_RCVIN0_CNT_0_REG          0x306C
+#define DSAF_XOD_PPE_RCVIN1_CNT_0_REG          0x3070
+#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG                0x3074
+#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG                0x3078
+#define DSAF_XOD_FIFO_STATUS_0_REG             0x307C
+
+#define DSAF_VOQ_ECC_INVERT_EN_0_REG           0x4004
+#define DSAF_VOQ_SRAM_PKT_NUM_0_REG            0x4008
+#define DSAF_VOQ_IN_PKT_NUM_0_REG              0x400C
+#define DSAF_VOQ_OUT_PKT_NUM_0_REG             0x4010
+#define DSAF_VOQ_ECC_ERR_ADDR_0_REG            0x4014
+#define DSAF_VOQ_BP_STATUS_0_REG               0x4018
+#define DSAF_VOQ_SPUP_IDLE_0_REG               0x401C
+#define DSAF_VOQ_XGE_XOD_REQ_0_0_REG           0x4024
+#define DSAF_VOQ_XGE_XOD_REQ_1_0_REG           0x4028
+#define DSAF_VOQ_PPE_XOD_REQ_0_REG             0x402C
+#define DSAF_VOQ_ROCEE_XOD_REQ_0_REG           0x4030
+#define DSAF_VOQ_BP_ALL_THRD_0_REG             0x4034
+
+#define DSAF_TBL_CTRL_0_REG                    0x5000
+#define DSAF_TBL_INT_MSK_0_REG                 0x5004
+#define DSAF_TBL_INT_SRC_0_REG                 0x5008
+#define DSAF_TBL_INT_STS_0_REG                 0x5100
+#define DSAF_TBL_TCAM_ADDR_0_REG               0x500C
+#define DSAF_TBL_LINE_ADDR_0_REG               0x5010
+#define DSAF_TBL_TCAM_HIGH_0_REG               0x5014
+#define DSAF_TBL_TCAM_LOW_0_REG                        0x5018
+#define DSAF_TBL_TCAM_MCAST_CFG_4_0_REG                0x501C
+#define DSAF_TBL_TCAM_MCAST_CFG_3_0_REG                0x5020
+#define DSAF_TBL_TCAM_MCAST_CFG_2_0_REG                0x5024
+#define DSAF_TBL_TCAM_MCAST_CFG_1_0_REG                0x5028
+#define DSAF_TBL_TCAM_MCAST_CFG_0_0_REG                0x502C
+#define DSAF_TBL_TCAM_UCAST_CFG_0_REG          0x5030
+#define DSAF_TBL_LIN_CFG_0_REG                 0x5034
+#define DSAF_TBL_TCAM_RDATA_HIGH_0_REG         0x5038
+#define DSAF_TBL_TCAM_RDATA_LOW_0_REG          0x503C
+#define DSAF_TBL_TCAM_RAM_RDATA4_0_REG         0x5040
+#define DSAF_TBL_TCAM_RAM_RDATA3_0_REG         0x5044
+#define DSAF_TBL_TCAM_RAM_RDATA2_0_REG         0x5048
+#define DSAF_TBL_TCAM_RAM_RDATA1_0_REG         0x504C
+#define DSAF_TBL_TCAM_RAM_RDATA0_0_REG         0x5050
+#define DSAF_TBL_LIN_RDATA_0_REG               0x5054
+#define DSAF_TBL_DA0_MIS_INFO1_0_REG           0x5058
+#define DSAF_TBL_DA0_MIS_INFO0_0_REG           0x505C
+#define DSAF_TBL_SA_MIS_INFO2_0_REG            0x5104
+#define DSAF_TBL_SA_MIS_INFO1_0_REG            0x5098
+#define DSAF_TBL_SA_MIS_INFO0_0_REG            0x509C
+#define DSAF_TBL_PUL_0_REG                     0x50A0
+#define DSAF_TBL_OLD_RSLT_0_REG                        0x50A4
+#define DSAF_TBL_OLD_SCAN_VAL_0_REG            0x50A8
+#define DSAF_TBL_DFX_CTRL_0_REG                        0x50AC
+#define DSAF_TBL_DFX_STAT_0_REG                        0x50B0
+#define DSAF_TBL_DFX_STAT_2_0_REG              0x5108
+#define DSAF_TBL_LKUP_NUM_I_0_REG              0x50C0
+#define DSAF_TBL_LKUP_NUM_O_0_REG              0x50E0
+#define DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG  0x510C
+
+#define DSAF_INODE_FIFO_WL_0_REG               0x6000
+#define DSAF_ONODE_FIFO_WL_0_REG               0x6020
+#define DSAF_XGE_GE_WORK_MODE_0_REG            0x6040
+#define DSAF_XGE_APP_RX_LINK_UP_0_REG          0x6080
+#define DSAF_NETPORT_CTRL_SIG_0_REG            0x60A0
+#define DSAF_XGE_CTRL_SIG_CFG_0_REG            0x60C0
+
+#define PPE_COM_CFG_QID_MODE_REG               0x0
+#define PPE_COM_INTEN_REG                      0x110
+#define PPE_COM_RINT_REG                       0x114
+#define PPE_COM_INTSTS_REG                     0x118
+#define PPE_COM_COMMON_CNT_CLR_CE_REG          0x1120
+#define PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG    0x300
+#define PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG      0x600
+#define PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG     0x900
+#define PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG      0xC00
+#define PPE_COM_COMMON_CNT_CLR_CE_REG          0x1120
+
+#define PPE_CFG_TX_FIFO_THRSLD_REG             0x0
+#define PPE_CFG_RX_FIFO_THRSLD_REG             0x4
+#define PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG       0x8
+#define PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG       0xC
+#define PPE_CFG_PAUSE_IDLE_CNT_REG             0x10
+#define PPE_CFG_BUS_CTRL_REG                   0x40
+#define PPE_CFG_TNL_TO_BE_RST_REG              0x48
+#define PPE_CURR_TNL_CAN_RST_REG               0x4C
+#define PPE_CFG_XGE_MODE_REG                   0x80
+#define PPE_CFG_MAX_FRAME_LEN_REG              0x84
+#define PPE_CFG_RX_PKT_MODE_REG                        0x88
+#define PPE_CFG_RX_VLAN_TAG_REG                        0x8C
+#define PPE_CFG_TAG_GEN_REG                    0x90
+#define PPE_CFG_PARSE_TAG_REG                  0x94
+#define PPE_CFG_PRO_CHECK_EN_REG               0x98
+#define PPE_INTEN_REG                          0x100
+#define PPE_RINT_REG                           0x104
+#define PPE_INTSTS_REG                         0x108
+#define PPE_CFG_RX_PKT_INT_REG                 0x140
+#define PPE_CFG_HEAT_DECT_TIME0_REG            0x144
+#define PPE_CFG_HEAT_DECT_TIME1_REG            0x148
+#define PPE_HIS_RX_SW_PKT_CNT_REG              0x200
+#define PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG                0x204
+#define PPE_HIS_RX_PKT_NO_BUF_CNT_REG          0x208
+#define PPE_HIS_TX_BD_CNT_REG                  0x20C
+#define PPE_HIS_TX_PKT_CNT_REG                 0x210
+#define PPE_HIS_TX_PKT_OK_CNT_REG              0x214
+#define PPE_HIS_TX_PKT_EPT_CNT_REG             0x218
+#define PPE_HIS_TX_PKT_CS_FAIL_CNT_REG         0x21C
+#define PPE_HIS_RX_APP_BUF_FAIL_CNT_REG                0x220
+#define PPE_HIS_RX_APP_BUF_WAIT_CNT_REG                0x224
+#define PPE_HIS_RX_PKT_DROP_FUL_CNT_REG                0x228
+#define PPE_HIS_RX_PKT_DROP_PRT_CNT_REG                0x22C
+#define PPE_TNL_0_5_CNT_CLR_CE_REG             0x300
+#define PPE_CFG_AXI_DBG_REG                    0x304
+#define PPE_HIS_PRO_ERR_REG                    0x308
+#define PPE_HIS_TNL_FIFO_ERR_REG               0x30C
+#define PPE_CURR_CFF_DATA_NUM_REG              0x310
+#define PPE_CURR_RX_ST_REG                     0x314
+#define PPE_CURR_TX_ST_REG                     0x318
+#define PPE_CURR_RX_FIFO0_REG                  0x31C
+#define PPE_CURR_RX_FIFO1_REG                  0x320
+#define PPE_CURR_TX_FIFO0_REG                  0x324
+#define PPE_CURR_TX_FIFO1_REG                  0x328
+#define PPE_ECO0_REG                           0x32C
+#define PPE_ECO1_REG                           0x330
+#define PPE_ECO2_REG                           0x334
+
+#define RCB_COM_CFG_ENDIAN_REG                 0x0
+#define RCB_COM_CFG_SYS_FSH_REG                        0xC
+#define RCB_COM_CFG_INIT_FLAG_REG              0x10
+#define RCB_COM_CFG_PKT_REG                    0x30
+#define RCB_COM_CFG_RINVLD_REG                 0x34
+#define RCB_COM_CFG_FNA_REG                    0x38
+#define RCB_COM_CFG_FA_REG                     0x3C
+#define RCB_COM_CFG_PKT_TC_BP_REG              0x40
+#define RCB_COM_CFG_PPE_TNL_CLKEN_REG          0x44
+
+#define RCB_COM_INTMSK_TX_PKT_REG              0x3A0
+#define RCB_COM_RINT_TX_PKT_REG                        0x3A8
+#define RCB_COM_INTMASK_ECC_ERR_REG            0x400
+#define RCB_COM_INTSTS_ECC_ERR_REG             0x408
+#define RCB_COM_EBD_SRAM_ERR_REG               0x410
+#define RCB_COM_RXRING_ERR_REG                 0x41C
+#define RCB_COM_TXRING_ERR_REG                 0x420
+#define RCB_COM_TX_FBD_ERR_REG                 0x424
+#define RCB_SRAM_ECC_CHK_EN_REG                        0x428
+#define RCB_SRAM_ECC_CHK0_REG                  0x42C
+#define RCB_SRAM_ECC_CHK1_REG                  0x430
+#define RCB_SRAM_ECC_CHK2_REG                  0x434
+#define RCB_SRAM_ECC_CHK3_REG                  0x438
+#define RCB_SRAM_ECC_CHK4_REG                  0x43c
+#define RCB_SRAM_ECC_CHK5_REG                  0x440
+#define RCB_ECC_ERR_ADDR0_REG                  0x450
+#define RCB_ECC_ERR_ADDR3_REG                  0x45C
+#define RCB_ECC_ERR_ADDR4_REG                  0x460
+#define RCB_ECC_ERR_ADDR5_REG                  0x464
+
+#define RCB_COM_SF_CFG_INTMASK_RING            0x480
+#define RCB_COM_SF_CFG_RING_STS                        0x484
+#define RCB_COM_SF_CFG_RING                    0x488
+#define RCB_COM_SF_CFG_INTMASK_BD              0x48C
+#define RCB_COM_SF_CFG_BD_RINT_STS             0x470
+#define RCB_COM_RCB_RD_BD_BUSY                 0x490
+#define RCB_COM_RCB_FBD_CRT_EN                 0x494
+#define RCB_COM_AXI_WR_ERR_INTMASK             0x498
+#define RCB_COM_AXI_ERR_STS                    0x49C
+#define RCB_COM_CHK_TX_FBD_NUM_REG             0x4a0
+
+#define RCB_CFG_BD_NUM_REG                     0x9000
+#define RCB_CFG_PKTLINE_REG                    0x9050
+
+#define RCB_CFG_OVERTIME_REG                   0x9300
+#define RCB_CFG_PKTLINE_INT_NUM_REG            0x9304
+#define RCB_CFG_OVERTIME_INT_NUM_REG           0x9308
+
+#define RCB_RING_RX_RING_BASEADDR_L_REG                0x00000
+#define RCB_RING_RX_RING_BASEADDR_H_REG                0x00004
+#define RCB_RING_RX_RING_BD_NUM_REG            0x00008
+#define RCB_RING_RX_RING_BD_LEN_REG            0x0000C
+#define RCB_RING_RX_RING_PKTLINE_REG           0x00010
+#define RCB_RING_RX_RING_TAIL_REG              0x00018
+#define RCB_RING_RX_RING_HEAD_REG              0x0001C
+#define RCB_RING_RX_RING_FBDNUM_REG            0x00020
+#define RCB_RING_RX_RING_PKTNUM_RECORD_REG     0x0002C
+
+#define RCB_RING_TX_RING_BASEADDR_L_REG                0x00040
+#define RCB_RING_TX_RING_BASEADDR_H_REG                0x00044
+#define RCB_RING_TX_RING_BD_NUM_REG            0x00048
+#define RCB_RING_TX_RING_BD_LEN_REG            0x0004C
+#define RCB_RING_TX_RING_PKTLINE_REG           0x00050
+#define RCB_RING_TX_RING_TAIL_REG              0x00058
+#define RCB_RING_TX_RING_HEAD_REG              0x0005C
+#define RCB_RING_TX_RING_FBDNUM_REG            0x00060
+#define RCB_RING_TX_RING_OFFSET_REG            0x00064
+#define RCB_RING_TX_RING_PKTNUM_RECORD_REG     0x0006C
+
+#define RCB_RING_PREFETCH_EN_REG               0x0007C
+#define RCB_RING_CFG_VF_NUM_REG                        0x00080
+#define RCB_RING_ASID_REG                      0x0008C
+#define RCB_RING_RX_VM_REG                     0x00090
+#define RCB_RING_T0_BE_RST                     0x00094
+#define RCB_RING_COULD_BE_RST                  0x00098
+#define RCB_RING_WRR_WEIGHT_REG                        0x0009c
+
+#define RCB_RING_INTMSK_RXWL_REG               0x000A0
+#define RCB_RING_INTSTS_RX_RING_REG            0x000A4
+#define RCB_RING_INTMSK_TXWL_REG               0x000AC
+#define RCB_RING_INTSTS_TX_RING_REG            0x000B0
+#define RCB_RING_INTMSK_RX_OVERTIME_REG                0x000B8
+#define RCB_RING_INTSTS_RX_OVERTIME_REG                0x000BC
+#define RCB_RING_INTMSK_TX_OVERTIME_REG                0x000C4
+#define RCB_RING_INTSTS_TX_OVERTIME_REG                0x000C8
+
+#define GMAC_DUPLEX_TYPE_REG                   0x0008UL
+#define GMAC_FD_FC_TYPE_REG                    0x000CUL
+#define GMAC_FC_TX_TIMER_REG                   0x001CUL
+#define GMAC_FD_FC_ADDR_LOW_REG                        0x0020UL
+#define GMAC_FD_FC_ADDR_HIGH_REG               0x0024UL
+#define GMAC_IPG_TX_TIMER_REG                  0x0030UL
+#define GMAC_PAUSE_THR_REG                     0x0038UL
+#define GMAC_MAX_FRM_SIZE_REG                  0x003CUL
+#define GMAC_PORT_MODE_REG                     0x0040UL
+#define GMAC_PORT_EN_REG                       0x0044UL
+#define GMAC_PAUSE_EN_REG                      0x0048UL
+#define GMAC_SHORT_RUNTS_THR_REG               0x0050UL
+#define GMAC_AN_NEG_STATE_REG                  0x0058UL
+#define GMAC_TX_LOCAL_PAGE_REG                 0x005CUL
+#define GMAC_TRANSMIT_CONTROL_REG              0x0060UL
+#define GMAC_REC_FILT_CONTROL_REG              0x0064UL
+#define GMAC_PTP_CONFIG_REG                    0x0074UL
+
+#define GMAC_RX_OCTETS_TOTAL_OK_REG            0x0080UL
+#define GMAC_RX_OCTETS_BAD_REG                 0x0084UL
+#define GMAC_RX_UC_PKTS_REG                    0x0088UL
+#define GMAC_RX_MC_PKTS_REG                    0x008CUL
+#define GMAC_RX_BC_PKTS_REG                    0x0090UL
+#define GMAC_RX_PKTS_64OCTETS_REG              0x0094UL
+#define GMAC_RX_PKTS_65TO127OCTETS_REG         0x0098UL
+#define GMAC_RX_PKTS_128TO255OCTETS_REG                0x009CUL
+#define GMAC_RX_PKTS_255TO511OCTETS_REG                0x00A0UL
+#define GMAC_RX_PKTS_512TO1023OCTETS_REG       0x00A4UL
+#define GMAC_RX_PKTS_1024TO1518OCTETS_REG      0x00A8UL
+#define GMAC_RX_PKTS_1519TOMAXOCTETS_REG       0x00ACUL
+#define GMAC_RX_FCS_ERRORS_REG                 0x00B0UL
+#define GMAC_RX_TAGGED_REG                     0x00B4UL
+#define GMAC_RX_DATA_ERR_REG                   0x00B8UL
+#define GMAC_RX_ALIGN_ERRORS_REG               0x00BCUL
+#define GMAC_RX_LONG_ERRORS_REG                        0x00C0UL
+#define GMAC_RX_JABBER_ERRORS_REG              0x00C4UL
+#define GMAC_RX_PAUSE_MACCTRL_FRAM_REG         0x00C8UL
+#define GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG       0x00CCUL
+#define GMAC_RX_VERY_LONG_ERR_CNT_REG          0x00D0UL
+#define GMAC_RX_RUNT_ERR_CNT_REG               0x00D4UL
+#define GMAC_RX_SHORT_ERR_CNT_REG              0x00D8UL
+#define GMAC_RX_FILT_PKT_CNT_REG               0x00E8UL
+#define GMAC_RX_OCTETS_TOTAL_FILT_REG          0x00ECUL
+#define GMAC_OCTETS_TRANSMITTED_OK_REG         0x0100UL
+#define GMAC_OCTETS_TRANSMITTED_BAD_REG                0x0104UL
+#define GMAC_TX_UC_PKTS_REG                    0x0108UL
+#define GMAC_TX_MC_PKTS_REG                    0x010CUL
+#define GMAC_TX_BC_PKTS_REG                    0x0110UL
+#define GMAC_TX_PKTS_64OCTETS_REG              0x0114UL
+#define GMAC_TX_PKTS_65TO127OCTETS_REG         0x0118UL
+#define GMAC_TX_PKTS_128TO255OCTETS_REG                0x011CUL
+#define GMAC_TX_PKTS_255TO511OCTETS_REG                0x0120UL
+#define GMAC_TX_PKTS_512TO1023OCTETS_REG       0x0124UL
+#define GMAC_TX_PKTS_1024TO1518OCTETS_REG      0x0128UL
+#define GMAC_TX_PKTS_1519TOMAXOCTETS_REG       0x012CUL
+#define GMAC_TX_EXCESSIVE_LENGTH_DROP_REG      0x014CUL
+#define GMAC_TX_UNDERRUN_REG                   0x0150UL
+#define GMAC_TX_TAGGED_REG                     0x0154UL
+#define GMAC_TX_CRC_ERROR_REG                  0x0158UL
+#define GMAC_TX_PAUSE_FRAMES_REG               0x015CUL
+#define GAMC_RX_MAX_FRAME                      0x0170UL
+#define GMAC_LINE_LOOP_BACK_REG                        0x01A8UL
+#define GMAC_CF_CRC_STRIP_REG                  0x01B0UL
+#define GMAC_MODE_CHANGE_EN_REG                        0x01B4UL
+#define GMAC_SIXTEEN_BIT_CNTR_REG              0x01CCUL
+#define GMAC_LD_LINK_COUNTER_REG               0x01D0UL
+#define GMAC_LOOP_REG                          0x01DCUL
+#define GMAC_RECV_CONTROL_REG                  0x01E0UL
+#define GMAC_VLAN_CODE_REG                     0x01E8UL
+#define GMAC_RX_OVERRUN_CNT_REG                        0x01ECUL
+#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG                0x01F4UL
+#define GMAC_RX_FAIL_COMMA_CNT_REG             0x01F8UL
+#define GMAC_STATION_ADDR_LOW_0_REG            0x0200UL
+#define GMAC_STATION_ADDR_HIGH_0_REG           0x0204UL
+#define GMAC_STATION_ADDR_LOW_1_REG            0x0208UL
+#define GMAC_STATION_ADDR_HIGH_1_REG           0x020CUL
+#define GMAC_STATION_ADDR_LOW_2_REG            0x0210UL
+#define GMAC_STATION_ADDR_HIGH_2_REG           0x0214UL
+#define GMAC_STATION_ADDR_LOW_3_REG            0x0218UL
+#define GMAC_STATION_ADDR_HIGH_3_REG           0x021CUL
+#define GMAC_STATION_ADDR_LOW_4_REG            0x0220UL
+#define GMAC_STATION_ADDR_HIGH_4_REG           0x0224UL
+#define GMAC_STATION_ADDR_LOW_5_REG            0x0228UL
+#define GMAC_STATION_ADDR_HIGH_5_REG           0x022CUL
+#define GMAC_STATION_ADDR_LOW_MSK_0_REG                0x0230UL
+#define GMAC_STATION_ADDR_HIGH_MSK_0_REG       0x0234UL
+#define GMAC_STATION_ADDR_LOW_MSK_1_REG                0x0238UL
+#define GMAC_STATION_ADDR_HIGH_MSK_1_REG       0x023CUL
+#define GMAC_MAC_SKIP_LEN_REG                  0x0240UL
+#define GMAC_TX_LOOP_PKT_PRI_REG               0x0378UL
+
+#define XGMAC_INT_STATUS_REG                   0x0
+#define XGMAC_INT_ENABLE_REG                   0x4
+#define XGMAC_INT_SET_REG                      0x8
+#define XGMAC_IERR_U_INFO_REG                  0xC
+#define XGMAC_OVF_INFO_REG                     0x10
+#define XGMAC_OVF_CNT_REG                      0x14
+#define XGMAC_PORT_MODE_REG                    0x40
+#define XGMAC_CLK_ENABLE_REG                   0x44
+#define XGMAC_RESET_REG                                0x48
+#define XGMAC_LINK_CONTROL_REG                 0x50
+#define XGMAC_LINK_STATUS_REG                  0x54
+#define XGMAC_SPARE_REG                                0xC0
+#define XGMAC_SPARE_CNT_REG                    0xC4
+
+#define XGMAC_MAC_ENABLE_REG                   0x100
+#define XGMAC_MAC_CONTROL_REG                  0x104
+#define XGMAC_MAC_IPG_REG                      0x120
+#define XGMAC_MAC_MSG_CRC_EN_REG               0x124
+#define XGMAC_MAC_MSG_IMG_REG                  0x128
+#define XGMAC_MAC_MSG_FC_CFG_REG               0x12C
+#define XGMAC_MAC_MSG_TC_CFG_REG               0x130
+#define XGMAC_MAC_PAD_SIZE_REG                 0x134
+#define XGMAC_MAC_MIN_PKT_SIZE_REG             0x138
+#define XGMAC_MAC_MAX_PKT_SIZE_REG             0x13C
+#define XGMAC_MAC_PAUSE_CTRL_REG               0x160
+#define XGMAC_MAC_PAUSE_TIME_REG               0x164
+#define XGMAC_MAC_PAUSE_GAP_REG                        0x168
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG                0x16C
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG                0x170
+#define XGMAC_MAC_PAUSE_PEER_MAC_H_REG         0x174
+#define XGMAC_MAC_PAUSE_PEER_MAC_L_REG         0x178
+#define XGMAC_MAC_PFC_PRI_EN_REG               0x17C
+#define XGMAC_MAC_1588_CTRL_REG                        0x180
+#define XGMAC_MAC_1588_TX_PORT_DLY_REG         0x184
+#define XGMAC_MAC_1588_RX_PORT_DLY_REG         0x188
+#define XGMAC_MAC_1588_ASYM_DLY_REG            0x18C
+#define XGMAC_MAC_1588_ADJUST_CFG_REG          0x190
+#define XGMAC_MAC_Y1731_ETH_TYPE_REG           0x194
+#define XGMAC_MAC_MIB_CONTROL_REG              0x198
+#define XGMAC_MAC_WAN_RATE_ADJUST_REG          0x19C
+#define XGMAC_MAC_TX_ERR_MARK_REG              0x1A0
+#define XGMAC_MAC_TX_LF_RF_CONTROL_REG         0x1A4
+#define XGMAC_MAC_RX_LF_RF_STATUS_REG          0x1A8
+#define XGMAC_MAC_TX_RUNT_PKT_CNT_REG          0x1C0
+#define XGMAC_MAC_RX_RUNT_PKT_CNT_REG          0x1C4
+#define XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG     0x1C8
+#define XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG    0x1CC
+#define XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG   0x1D0
+#define XGMAC_MAC_RX_ERR_MSG_CNT_REG           0x1D4
+#define XGMAC_MAC_RX_ERR_EFD_CNT_REG           0x1D8
+#define XGMAC_MAC_ERR_INFO_REG                 0x1DC
+#define XGMAC_MAC_DBG_INFO_REG                 0x1E0
+
+#define XGMAC_PCS_BASER_SYNC_THD_REG           0x330
+#define XGMAC_PCS_STATUS1_REG                  0x404
+#define XGMAC_PCS_BASER_STATUS1_REG            0x410
+#define XGMAC_PCS_BASER_STATUS2_REG            0x414
+#define XGMAC_PCS_BASER_SEEDA_0_REG            0x420
+#define XGMAC_PCS_BASER_SEEDA_1_REG            0x424
+#define XGMAC_PCS_BASER_SEEDB_0_REG            0x428
+#define XGMAC_PCS_BASER_SEEDB_1_REG            0x42C
+#define XGMAC_PCS_BASER_TEST_CONTROL_REG       0x430
+#define XGMAC_PCS_BASER_TEST_ERR_CNT_REG       0x434
+#define XGMAC_PCS_DBG_INFO_REG                 0x4C0
+#define XGMAC_PCS_DBG_INFO1_REG                        0x4C4
+#define XGMAC_PCS_DBG_INFO2_REG                        0x4C8
+#define XGMAC_PCS_DBG_INFO3_REG                        0x4CC
+
+#define XGMAC_PMA_ENABLE_REG                   0x700
+#define XGMAC_PMA_CONTROL_REG                  0x704
+#define XGMAC_PMA_SIGNAL_STATUS_REG            0x708
+#define XGMAC_PMA_DBG_INFO_REG                 0x70C
+#define XGMAC_PMA_FEC_ABILITY_REG              0x740
+#define XGMAC_PMA_FEC_CONTROL_REG              0x744
+#define XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG      0x750
+#define XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG    0x760
+
+#define XGMAC_TX_PKTS_FRAGMENT                 0x0000
+#define XGMAC_TX_PKTS_UNDERSIZE                        0x0008
+#define XGMAC_TX_PKTS_UNDERMIN                 0x0010
+#define XGMAC_TX_PKTS_64OCTETS                 0x0018
+#define XGMAC_TX_PKTS_65TO127OCTETS            0x0020
+#define XGMAC_TX_PKTS_128TO255OCTETS           0x0028
+#define XGMAC_TX_PKTS_256TO511OCTETS           0x0030
+#define XGMAC_TX_PKTS_512TO1023OCTETS          0x0038
+#define XGMAC_TX_PKTS_1024TO1518OCTETS         0x0040
+#define XGMAC_TX_PKTS_1519TOMAXOCTETS          0x0048
+#define XGMAC_TX_PKTS_1519TOMAXOCTETSOK                0x0050
+#define XGMAC_TX_PKTS_OVERSIZE                 0x0058
+#define XGMAC_TX_PKTS_JABBER                   0x0060
+#define XGMAC_TX_GOODPKTS                      0x0068
+#define XGMAC_TX_GOODOCTETS                    0x0070
+#define XGMAC_TX_TOTAL_PKTS                    0x0078
+#define XGMAC_TX_TOTALOCTETS                   0x0080
+#define XGMAC_TX_UNICASTPKTS                   0x0088
+#define XGMAC_TX_MULTICASTPKTS                 0x0090
+#define XGMAC_TX_BROADCASTPKTS                 0x0098
+#define XGMAC_TX_PRI0PAUSEPKTS                 0x00a0
+#define XGMAC_TX_PRI1PAUSEPKTS                 0x00a8
+#define XGMAC_TX_PRI2PAUSEPKTS                 0x00b0
+#define XGMAC_TX_PRI3PAUSEPKTS                 0x00b8
+#define XGMAC_TX_PRI4PAUSEPKTS                 0x00c0
+#define XGMAC_TX_PRI5PAUSEPKTS                 0x00c8
+#define XGMAC_TX_PRI6PAUSEPKTS                 0x00d0
+#define XGMAC_TX_PRI7PAUSEPKTS                 0x00d8
+#define XGMAC_TX_MACCTRLPKTS                   0x00e0
+#define XGMAC_TX_1731PKTS                      0x00e8
+#define XGMAC_TX_1588PKTS                      0x00f0
+#define XGMAC_RX_FROMAPPGOODPKTS               0x00f8
+#define XGMAC_RX_FROMAPPBADPKTS                        0x0100
+#define XGMAC_TX_ERRALLPKTS                    0x0108
+
+#define XGMAC_RX_PKTS_FRAGMENT                 0x0110
+#define XGMAC_RX_PKTSUNDERSIZE                 0x0118
+#define XGMAC_RX_PKTS_UNDERMIN                 0x0120
+#define XGMAC_RX_PKTS_64OCTETS                 0x0128
+#define XGMAC_RX_PKTS_65TO127OCTETS            0x0130
+#define XGMAC_RX_PKTS_128TO255OCTETS           0x0138
+#define XGMAC_RX_PKTS_256TO511OCTETS           0x0140
+#define XGMAC_RX_PKTS_512TO1023OCTETS          0x0148
+#define XGMAC_RX_PKTS_1024TO1518OCTETS         0x0150
+#define XGMAC_RX_PKTS_1519TOMAXOCTETS          0x0158
+#define XGMAC_RX_PKTS_1519TOMAXOCTETSOK                0x0160
+#define XGMAC_RX_PKTS_OVERSIZE                 0x0168
+#define XGMAC_RX_PKTS_JABBER                   0x0170
+#define XGMAC_RX_GOODPKTS                      0x0178
+#define XGMAC_RX_GOODOCTETS                    0x0180
+#define XGMAC_RX_TOTAL_PKTS                    0x0188
+#define XGMAC_RX_TOTALOCTETS                   0x0190
+#define XGMAC_RX_UNICASTPKTS                   0x0198
+#define XGMAC_RX_MULTICASTPKTS                 0x01a0
+#define XGMAC_RX_BROADCASTPKTS                 0x01a8
+#define XGMAC_RX_PRI0PAUSEPKTS                 0x01b0
+#define XGMAC_RX_PRI1PAUSEPKTS                 0x01b8
+#define XGMAC_RX_PRI2PAUSEPKTS                 0x01c0
+#define XGMAC_RX_PRI3PAUSEPKTS                 0x01c8
+#define XGMAC_RX_PRI4PAUSEPKTS                 0x01d0
+#define XGMAC_RX_PRI5PAUSEPKTS                 0x01d8
+#define XGMAC_RX_PRI6PAUSEPKTS                 0x01e0
+#define XGMAC_RX_PRI7PAUSEPKTS                 0x01e8
+#define XGMAC_RX_MACCTRLPKTS                   0x01f0
+#define XGMAC_TX_SENDAPPGOODPKTS               0x01f8
+#define XGMAC_TX_SENDAPPBADPKTS                        0x0200
+#define XGMAC_RX_1731PKTS                      0x0208
+#define XGMAC_RX_SYMBOLERRPKTS                 0x0210
+#define XGMAC_RX_FCSERRPKTS                    0x0218
+
+#define XGMAC_TRX_CORE_SRST_M                  0x2080
+
+#define DSAF_CFG_EN_S 0
+#define DSAF_CFG_TC_MODE_S 1
+#define DSAF_CFG_CRC_EN_S 2
+#define DSAF_CFG_SBM_INIT_S 3
+#define DSAF_CFG_MIX_MODE_S 4
+#define DSAF_CFG_STP_MODE_S 5
+#define DSAF_CFG_LOCA_ADDR_EN_S 6
+
+#define DSAF_CNT_CLR_CE_S 0
+#define DSAF_SNAP_EN_S 1
+
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_XGE 41
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000 410
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_2500 103
+
+#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
+#define DSAF_PFC_UNINT_CNT_S 0
+
+#define DSAF_PPE_QID_CFG_M 0xFF
+#define DSAF_PPE_QID_CFG_S 0
+
+#define DSAF_SW_PORT_TYPE_M 3
+#define DSAF_SW_PORT_TYPE_S 0
+
+#define DSAF_STP_PORT_TYPE_M 7
+#define DSAF_STP_PORT_TYPE_S 0
+
+#define DSAF_INODE_IN_PORT_NUM_M 7
+#define DSAF_INODE_IN_PORT_NUM_S 0
+
+#define HNS_DSAF_I4TC_CFG 0x18688688
+#define HNS_DSAF_I8TC_CFG 0x18FAC688
+
+#define DSAF_SBM_CFG_SHCUT_EN_S 0
+#define DSAF_SBM_CFG_EN_S 1
+#define DSAF_SBM_CFG_MIB_EN_S 2
+#define DSAF_SBM_CFG_ECC_INVERT_EN_S 3
+
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S 20
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 11) - 1) << 20)
+
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAF_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_S 10
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_TBL_TCAM_ADDR_S 0
+#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
+
+#define DSAF_TBL_LINE_ADDR_S 0
+#define DSAF_TBL_LINE_ADDR_M ((1ULL << 15) - 1)
+
+#define DSAF_TBL_MCAST_CFG4_VM128_112_S 0
+#define DSAF_TBL_MCAST_CFG4_VM128_112_M (((1ULL << 7) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG4_ITEM_VLD_S 7
+#define DSAF_TBL_MCAST_CFG4_OLD_EN_S 8
+
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_S 0
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_M (((1ULL << 6) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG0_VM25_0_S 6
+#define DSAF_TBL_MCAST_CFG0_VM25_0_M (((1ULL << 26) - 1) << 6)
+
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_S 0
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_UCAST_CFG1_DVC_S 8
+#define DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S 9
+#define DSAF_TBL_UCAST_CFG1_ITEM_VLD_S 10
+#define DSAF_TBL_UCAST_CFG1_OLD_EN_S 11
+
+#define DSAF_TBL_LINE_CFG_OUT_PORT_S 0
+#define DSAF_TBL_LINE_CFG_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_LINE_CFG_DVC_S 8
+#define DSAF_TBL_LINE_CFG_MAC_DISCARD_S 9
+
+#define DSAF_TBL_PUL_OLD_RSLT_RE_S 0
+#define DSAF_TBL_PUL_MCAST_VLD_S 1
+#define DSAF_TBL_PUL_TCAM_DATA_VLD_S 2
+#define DSAF_TBL_PUL_UCAST_VLD_S 3
+#define DSAF_TBL_PUL_LINE_VLD_S 4
+#define DSAF_TBL_PUL_TCAM_LOAD_S 5
+#define DSAF_TBL_PUL_LINE_LOAD_S 6
+
+#define DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S 0
+#define DSAF_TBL_DFX_UC_LKUP_NUM_EN_S 1
+#define DSAF_TBL_DFX_MC_LKUP_NUM_EN_S 2
+#define DSAF_TBL_DFX_BC_LKUP_NUM_EN_S 3
+#define DSAF_TBL_DFX_RAM_ERR_INJECT_EN_S 4
+
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_S 0
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_M (((1ULL << 10) - 1) << 0)
+#define DSAF_VOQ_BP_ALL_UPTHRD_S 10
+#define DSAF_VOQ_BP_ALL_UPTHRD_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_XGE_GE_WORK_MODE_S 0
+#define DSAF_XGE_GE_LOOPBACK_S 1
+
+#define DSAF_FC_XGE_TX_PAUSE_S 0
+#define DSAF_REGS_XGE_CNT_CAR_S 1
+
+#define PPE_CFG_QID_MODE_DEF_QID_S     0
+#define PPE_CFG_QID_MODE_DEF_QID_M     (0xff << PPE_CFG_QID_MODE_DEF_QID_S)
+
+#define PPE_CFG_QID_MODE_CF_QID_MODE_S 8
+#define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S)
+
+#define PPE_CNT_CLR_CE_B       0
+#define PPE_CNT_CLR_SNAP_EN_B  1
+
+#define PPE_COMMON_CNT_CLR_CE_B        0
+#define PPE_COMMON_CNT_CLR_SNAP_EN_B   1
+
+#define GMAC_DUPLEX_TYPE_B 0
+
+#define GMAC_FC_TX_TIMER_S 0
+#define GMAC_FC_TX_TIMER_M 0xffff
+
+#define GMAC_MAX_FRM_SIZE_S 0
+#define GMAC_MAX_FRM_SIZE_M 0xffff
+
+#define GMAC_PORT_MODE_S       0
+#define GMAC_PORT_MODE_M       0xf
+
+#define GMAC_RGMII_1000M_DELAY_B       4
+#define GMAC_MII_TX_EDGE_SEL_B         5
+#define GMAC_FIFO_ERR_AUTO_RST_B       6
+#define GMAC_DBG_CLK_LOS_MSK_B         7
+
+#define GMAC_PORT_RX_EN_B      1
+#define GMAC_PORT_TX_EN_B      2
+
+#define GMAC_PAUSE_EN_RX_FDFC_B 0
+#define GMAC_PAUSE_EN_TX_FDFC_B 1
+#define GMAC_PAUSE_EN_TX_HDFC_B 2
+
+#define GMAC_SHORT_RUNTS_THR_S 0
+#define GMAC_SHORT_RUNTS_THR_M 0x1f
+
+#define GMAC_AN_NEG_STAT_FD_B          5
+#define GMAC_AN_NEG_STAT_HD_B          6
+#define GMAC_AN_NEG_STAT_RF1_DUPLIEX_B 12
+#define GMAC_AN_NEG_STAT_RF2_B         13
+
+#define GMAC_AN_NEG_STAT_NP_LNK_OK_B   15
+#define GMAC_AN_NEG_STAT_RX_SYNC_OK_B  20
+#define GMAC_AN_NEG_STAT_AN_DONE_B     21
+
+#define GMAC_AN_NEG_STAT_PS_S          7
+#define GMAC_AN_NEG_STAT_PS_M          (0x3 << GMAC_AN_NEG_STAT_PS_S)
+
+#define GMAC_AN_NEG_STAT_SPEED_S       10
+#define GMAC_AN_NEG_STAT_SPEED_M       (0x3 << GMAC_AN_NEG_STAT_SPEED_S)
+
+#define GMAC_TX_AN_EN_B                5
+#define GMAC_TX_CRC_ADD_B      6
+#define GMAC_TX_PAD_EN_B       7
+
+#define GMAC_LINE_LOOPBACK_B   0
+
+#define GMAC_LP_REG_CF_EXT_DRV_LP_B    1
+#define GMAC_LP_REG_CF2MI_LP_EN_B      2
+
+#define GMAC_MODE_CHANGE_EB_B  0
+
+#define GMAC_RECV_CTRL_STRIP_PAD_EN_B  3
+#define GMAC_RECV_CTRL_RUNT_PKT_EN_B   4
+
+#define GMAC_TX_LOOP_PKT_HIG_PRI_B     0
+#define GMAC_TX_LOOP_PKT_EN_B          1
+
+#define XGMAC_PORT_MODE_TX_S           0x0
+#define XGMAC_PORT_MODE_TX_M           (0x3 << XGMAC_PORT_MODE_TX_S)
+#define XGMAC_PORT_MODE_TX_40G_B       0x3
+#define XGMAC_PORT_MODE_RX_S           0x4
+#define XGMAC_PORT_MODE_RX_M           (0x3 << XGMAC_PORT_MODE_RX_S)
+#define XGMAC_PORT_MODE_RX_40G_B       0x7
+
+#define XGMAC_ENABLE_TX_B              0
+#define XGMAC_ENABLE_RX_B              1
+
+#define XGMAC_CTL_TX_FCS_B             0
+#define XGMAC_CTL_TX_PAD_B             1
+#define XGMAC_CTL_TX_PREAMBLE_TRANS_B  3
+#define XGMAC_CTL_TX_UNDER_MIN_ERR_B   4
+#define XGMAC_CTL_TX_TRUNCATE_B                5
+#define XGMAC_CTL_TX_1588_B            8
+#define XGMAC_CTL_TX_1731_B            9
+#define XGMAC_CTL_TX_PFC_B             10
+#define XGMAC_CTL_RX_FCS_B             16
+#define XGMAC_CTL_RX_FCS_STRIP_B       17
+#define XGMAC_CTL_RX_PREAMBLE_TRANS_B  19
+#define XGMAC_CTL_RX_UNDER_MIN_ERR_B   20
+#define XGMAC_CTL_RX_TRUNCATE_B                21
+#define XGMAC_CTL_RX_1588_B            24
+#define XGMAC_CTL_RX_1731_B            25
+#define XGMAC_CTL_RX_PFC_B             26
+
+#define XGMAC_PMA_FEC_CTL_TX_B         0
+#define XGMAC_PMA_FEC_CTL_RX_B         1
+#define XGMAC_PMA_FEC_CTL_ERR_EN       2
+#define XGMAC_PMA_FEC_CTL_ERR_SH       3
+
+#define XGMAC_PAUSE_CTL_TX_B           0
+#define XGMAC_PAUSE_CTL_RX_B           1
+#define XGMAC_PAUSE_CTL_RSP_MODE_B     2
+#define XGMAC_PAUSE_CTL_TX_XOFF_B      3
+
+static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
+{
+       u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+       writel(value, reg_addr + reg);
+}
+
+#define dsaf_write_dev(a, reg, value) \
+       dsaf_write_reg((a)->io_base, (reg), (value))
+
+static inline u32 dsaf_read_reg(u8 *base, u32 reg)
+{
+       u8 __iomem *reg_addr = ACCESS_ONCE(base);
+
+       return readl(reg_addr + reg);
+}
+
+#define dsaf_read_dev(a, reg) \
+       dsaf_read_reg((a)->io_base, (reg))
+
+#define dsaf_set_field(origin, mask, shift, val) \
+       do { \
+               (origin) &= (~(mask)); \
+               (origin) |= (((val) << (shift)) & (mask)); \
+       } while (0)
+
+#define dsaf_set_bit(origin, shift, val) \
+       dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
+
+static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+                                     u32 val)
+{
+       u32 origin = dsaf_read_reg(base, reg);
+
+       dsaf_set_field(origin, mask, shift, val);
+       dsaf_write_reg(base, reg, origin);
+}
+
+#define dsaf_set_dev_field(dev, reg, mask, shift, val) \
+       dsaf_set_reg_field((dev)->io_base, (reg), (mask), (shift), (val))
+
+#define dsaf_set_dev_bit(dev, reg, bit, val) \
+       dsaf_set_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit), (val))
+
+#define dsaf_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define dsaf_get_bit(origin, shift) \
+       dsaf_get_field((origin), (1ull << (shift)), (shift))
+
+static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+       u32 origin;
+
+       origin = dsaf_read_reg(base, reg);
+       return dsaf_get_field(origin, mask, shift);
+}
+
+#define dsaf_get_dev_field(dev, reg, mask, shift) \
+       dsaf_get_reg_field((dev)->io_base, (reg), (mask), (shift))
+
+#define dsaf_get_dev_bit(dev, reg, bit) \
+       dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
+
+#define dsaf_write_b(addr, data)\
+       writeb((data), (__iomem unsigned char *)(addr))
+#define dsaf_read_b(addr)\
+       readb((__iomem unsigned char *)(addr))
+
+#define hns_mac_reg_read64(drv, offset) \
+       readq((__iomem void *)(((u64)(drv)->io_base + 0xc00 + (offset))))
+
+#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
new file mode 100644 (file)
index 0000000..fe7fa1d
--- /dev/null
@@ -0,0 +1,836 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_xgmac.h"
+#include "hns_dsaf_reg.h"
+
+static const struct mac_stats_string g_xgmac_stats_string[] = {
+       {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)},
+       {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)},
+       {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)},
+       {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)},
+       {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+       {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+       {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+       {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+       {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+       {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+       {"xgmac_tx_good_pkts_1519tomax",
+               MAC_STATS_FIELD_OFF(tx_1519tomax_good)},
+       {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)},
+       {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+       {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)},
+       {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+       {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)},
+       {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)},
+       {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+       {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+       {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+       {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)},
+       {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)},
+       {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)},
+       {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)},
+       {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)},
+       {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)},
+       {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)},
+       {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)},
+       {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)},
+       {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)},
+       {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)},
+       {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)},
+       {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
+       {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
+
+       {"xgmac_rx_not_well_pkt", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+       {"xgmac_rx_good_well_pkt", MAC_STATS_FIELD_OFF(rx_undersize)},
+       {"xgmac_rx_total_pkt", MAC_STATS_FIELD_OFF(rx_under_min)},
+       {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
+       {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+       {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+       {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+       {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+       {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+       {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+       {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)},
+       {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)},
+       {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+       {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)},
+       {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+       {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)},
+       {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)},
+       {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+       {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+       {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+       {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+       {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)},
+       {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)},
+       {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)},
+       {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)},
+       {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)},
+       {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)},
+       {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)},
+       {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+       {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)},
+       {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)},
+       {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)},
+       {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)},
+       {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)}
+};
+
+/**
+ *hns_xgmac_tx_enable - xgmac port tx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value)
+{
+       dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value);
+}
+
+/**
+ *hns_xgmac_rx_enable - xgmac port rx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
+{
+       dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value);
+}
+
+/**
+ *hns_xgmac_enable - enable xgmac port
+ *@drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct dsaf_device *dsaf_dev
+               = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+       u32 port = drv->mac_id;
+
+       hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1);
+       mdelay(10);
+
+       /*enable XGE rX/tX */
+       if (mode == MAC_COMM_MODE_TX) {
+               hns_xgmac_tx_enable(drv, 1);
+       } else if (mode == MAC_COMM_MODE_RX) {
+               hns_xgmac_rx_enable(drv, 1);
+       } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+               hns_xgmac_tx_enable(drv, 1);
+               hns_xgmac_rx_enable(drv, 1);
+       } else {
+               dev_err(drv->dev, "error mac mode:%d\n", mode);
+       }
+}
+
+/**
+ *hns_xgmac_disable - disable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct dsaf_device *dsaf_dev
+               = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+       u32 port = drv->mac_id;
+
+       if (mode == MAC_COMM_MODE_TX) {
+               hns_xgmac_tx_enable(drv, 0);
+       } else if (mode == MAC_COMM_MODE_RX) {
+               hns_xgmac_rx_enable(drv, 0);
+       } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+               hns_xgmac_tx_enable(drv, 0);
+               hns_xgmac_rx_enable(drv, 0);
+       }
+
+       mdelay(10);
+       hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0);
+}
+
+/**
+ *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable
+ *@drv: mac driver
+ *@tx_value: tx value
+ *@rx_value: rx value
+ *return status
+ */
+static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value,
+                                    u32 rx_value)
+{
+       u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+
+       dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value);
+       dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value);
+       dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin);
+}
+
+/* clr exc irq for xge*/
+static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
+{
+       u32 clr_vlue = 0xfffffffful;
+       u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+
+       dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
+       dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
+}
+
+/**
+ *hns_xgmac_init - initialize XGE
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_init(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct dsaf_device *dsaf_dev
+               = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+       u32 port = drv->mac_id;
+
+       hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0);
+       mdelay(100);
+       hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1);
+
+       mdelay(100);
+       hns_xgmac_exc_irq_en(drv, 0);
+
+       hns_xgmac_pma_fec_enable(drv, 0x0, 0x0);
+
+       hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+}
+
+/**
+ *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+
+       dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval);
+       dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval);
+       dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval);
+       dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin);
+}
+
+/**
+ *hns_xgmac_pausefrm_cfg - set pause param about xgmac
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+
+       dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en);
+       dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en);
+       dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
+}
+
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+       u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+               | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+       dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val);
+       dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val);
+}
+
+/**
+ *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable rx pause param
+ */
+static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+                        XGMAC_PAUSE_CTL_RX_B, !!enable);
+}
+
+/**
+ *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable tx pause param
+ */
+static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+                        XGMAC_PAUSE_CTL_TX_B, !!enable);
+
+       /*if enable is not zero ,set tx pause time */
+       if (enable)
+               dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
+}
+
+/**
+ *hns_xgmac_get_id - get xgmac port id
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_get_id(void *mac_drv, u8 *mac_id)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *mac_id = drv->mac_id;
+}
+
+/**
+ *hns_xgmac_config_max_frame_length - set xgmac max frame length
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
+}
+
+void hns_xgmac_update_stats(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
+
+       /* TX */
+       hw_stats->tx_fragment_err
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+       hw_stats->tx_undersize
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+       hw_stats->tx_under_min_pkts
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+       hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+       hw_stats->tx_65to127
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+       hw_stats->tx_128to255
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+       hw_stats->tx_256to511
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+       hw_stats->tx_512to1023
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+       hw_stats->tx_1024to1518
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+       hw_stats->tx_1519tomax
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+       hw_stats->tx_1519tomax_good
+               = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+       hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+       hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+       hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+       hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+       hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+       hw_stats->tx_total_bytes
+               = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+       hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+       hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+       hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+       hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+       hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+       hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+       hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+       hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+       hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+       hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+       hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+       hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+       hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+       hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+       hw_stats->rx_good_from_sw
+               = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+       hw_stats->rx_bad_from_sw
+               = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+       hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+
+       /* RX */
+       hw_stats->rx_fragment_err
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+       hw_stats->rx_undersize
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+       hw_stats->rx_under_min
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+       hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+       hw_stats->rx_65to127
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+       hw_stats->rx_128to255
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+       hw_stats->rx_256to511
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+       hw_stats->rx_512to1023
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+       hw_stats->rx_1024to1518
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+       hw_stats->rx_1519tomax
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+       hw_stats->rx_1519tomax_good
+               = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+       hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+       hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+       hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+       hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+       hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+       hw_stats->rx_total_bytes
+               = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+       hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+       hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+       hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+       hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+       hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+       hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+       hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+       hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+       hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+       hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+       hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+
+       hw_stats->rx_unknown_ctrl
+               = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+       hw_stats->tx_good_to_sw
+               = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+       hw_stats->tx_bad_to_sw
+               = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+       hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+       hw_stats->rx_symbol_err
+               = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+       hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+}
+
+/**
+ *hns_xgmac_free - free xgmac driver
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_free(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct dsaf_device *dsaf_dev
+               = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+       u32 mac_id = drv->mac_id;
+
+       hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0);
+}
+
+/**
+ *hns_xgmac_get_info - get xgmac information
+ *@mac_drv: mac driver
+ *@mac_info:mac information
+ */
+static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       u32 pause_time, pause_ctrl, port_mode, ctrl_val;
+
+       ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+       mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B);
+       mac_info->auto_neg = 0;
+
+       pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+       mac_info->tx_pause_time = pause_time;
+
+       port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+       mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M,
+                                          XGMAC_PORT_MODE_TX_S) &&
+                               dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M,
+                                              XGMAC_PORT_MODE_RX_S);
+       mac_info->duplex = 1;
+       mac_info->speed = MAC_SPEED_10000;
+
+       pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+       mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+       mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_pausefrm_cfg - get xgmac pause param
+ *@mac_drv: mac driver
+ *@rx_en:xgmac rx pause enable
+ *@tx_en:xgmac tx pause enable
+ */
+static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       u32 pause_ctrl;
+
+       pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+       *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+       *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_link_status - get xgmac link status
+ *@mac_drv: mac driver
+ *@link_stat: xgmac link stat
+ */
+static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+       *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+}
+
+/**
+ *hns_xgmac_get_regs - dump xgmac regs
+ *@mac_drv: mac driver
+ *@cmd:ethtool cmd
+ *@data:data for value of regs
+ */
+static void hns_xgmac_get_regs(void *mac_drv, void *data)
+{
+       u32 i = 0;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       u32 *regs = data;
+       u64 qtmp;
+
+       /* base config registers */
+       regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG);
+       regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG);
+       regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG);
+       regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG);
+       regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG);
+       regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG);
+       regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+       regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG);
+       regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG);
+       regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG);
+       regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+
+       regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG);
+       regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG);
+       regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG);
+       regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+       regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG);
+       regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG);
+       regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG);
+       regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG);
+       regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG);
+       regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG);
+       regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG);
+       regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG);
+       regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+       regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+       regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG);
+       regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG);
+       regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG);
+       regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG);
+       regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG);
+       regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG);
+       regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG);
+       regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG);
+       regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG);
+       regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG);
+       regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG);
+
+       regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG);
+       regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG);
+       regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG);
+       regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG);
+       regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG);
+       regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG);
+       regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG);
+       regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG);
+       regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG);
+       regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG);
+       regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG);
+       regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG);
+       regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG);
+       regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG);
+       regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG);
+
+       regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG);
+       regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG);
+       regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG);
+       regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG);
+       regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG);
+       regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG);
+       regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG);
+       regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG);
+       regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG);
+       regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG);
+       regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG);
+       regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG);
+       regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG);
+       regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG);
+
+       regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG);
+       regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG);
+       regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG);
+       regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG);
+       regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG);
+       regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+       regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG);
+       regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG);
+
+       /* status registers */
+#define hns_xgmac_cpy_q(p, q) \
+       do {\
+               *(p) = (u32)(q);\
+               *((p) + 1) = (u32)((q) >> 32);\
+       } while (0)
+
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+       hns_xgmac_cpy_q(&regs[73], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+       hns_xgmac_cpy_q(&regs[75], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+       hns_xgmac_cpy_q(&regs[77], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+       hns_xgmac_cpy_q(&regs[79], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+       hns_xgmac_cpy_q(&regs[81], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+       hns_xgmac_cpy_q(&regs[83], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+       hns_xgmac_cpy_q(&regs[85], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+       hns_xgmac_cpy_q(&regs[87], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+       hns_xgmac_cpy_q(&regs[89], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+       hns_xgmac_cpy_q(&regs[91], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+       hns_xgmac_cpy_q(&regs[93], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+       hns_xgmac_cpy_q(&regs[95], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+       hns_xgmac_cpy_q(&regs[97], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+       hns_xgmac_cpy_q(&regs[99], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+       hns_xgmac_cpy_q(&regs[101], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+       hns_xgmac_cpy_q(&regs[103], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+       hns_xgmac_cpy_q(&regs[105], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+       hns_xgmac_cpy_q(&regs[107], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+       hns_xgmac_cpy_q(&regs[109], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+       hns_xgmac_cpy_q(&regs[111], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[113], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[115], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[117], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[119], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[121], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[123], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[125], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[127], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+       hns_xgmac_cpy_q(&regs[129], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+       hns_xgmac_cpy_q(&regs[131], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+       hns_xgmac_cpy_q(&regs[133], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+       hns_xgmac_cpy_q(&regs[135], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+       hns_xgmac_cpy_q(&regs[137], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+       hns_xgmac_cpy_q(&regs[139], qtmp);
+
+       /* RX */
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+       hns_xgmac_cpy_q(&regs[141], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+       hns_xgmac_cpy_q(&regs[143], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+       hns_xgmac_cpy_q(&regs[145], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+       hns_xgmac_cpy_q(&regs[147], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+       hns_xgmac_cpy_q(&regs[149], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+       hns_xgmac_cpy_q(&regs[151], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+       hns_xgmac_cpy_q(&regs[153], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+       hns_xgmac_cpy_q(&regs[155], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+       hns_xgmac_cpy_q(&regs[157], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+       hns_xgmac_cpy_q(&regs[159], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+       hns_xgmac_cpy_q(&regs[161], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+       hns_xgmac_cpy_q(&regs[163], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+       hns_xgmac_cpy_q(&regs[165], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+       hns_xgmac_cpy_q(&regs[167], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+       hns_xgmac_cpy_q(&regs[169], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+       hns_xgmac_cpy_q(&regs[171], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+       hns_xgmac_cpy_q(&regs[173], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+       hns_xgmac_cpy_q(&regs[175], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+       hns_xgmac_cpy_q(&regs[177], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+       hns_xgmac_cpy_q(&regs[179], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[181], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[183], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[185], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[187], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[189], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[191], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[193], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+       hns_xgmac_cpy_q(&regs[195], qtmp);
+
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+       hns_xgmac_cpy_q(&regs[197], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+       hns_xgmac_cpy_q(&regs[199], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+       hns_xgmac_cpy_q(&regs[201], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+       hns_xgmac_cpy_q(&regs[203], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+       hns_xgmac_cpy_q(&regs[205], qtmp);
+       qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+       hns_xgmac_cpy_q(&regs[207], qtmp);
+
+       /* mark end of mac regs */
+       for (i = 208; i < 214; i++)
+               regs[i] = 0xaaaaaaaa;
+}
+
+/**
+ *hns_xgmac_get_stats - get xgmac statistic
+ *@mac_drv: mac driver
+ *@data:data for value of stats regs
+ */
+static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
+{
+       u32 i;
+       u64 *buf = data;
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct mac_hw_stats *hw_stats = NULL;
+
+       hw_stats = &drv->mac_cb->hw_stats;
+
+       for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+               buf[i] = DSAF_STATS_READ(hw_stats,
+                       g_xgmac_stats_string[i].offset);
+       }
+}
+
+/**
+ *hns_xgmac_get_strings - get xgmac strings name
+ *@stringset: type of values in data
+ *@data:data for value of string name
+ */
+static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+{
+       char *buff = (char *)data;
+       u32 i;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+               snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc);
+               buff = buff + ETH_GSTRING_LEN;
+       }
+}
+
+/**
+ *hns_xgmac_get_sset_count - get xgmac string set count
+ *@stringset: type of values in data
+ *return xgmac string set count
+ */
+static int hns_xgmac_get_sset_count(int stringset)
+{
+       if (stringset == ETH_SS_STATS)
+               return ARRAY_SIZE(g_xgmac_stats_string);
+
+       return 0;
+}
+
+/**
+ *hns_xgmac_get_regs_count - get xgmac regs count
+ *return xgmac regs count
+ */
+static int hns_xgmac_get_regs_count(void)
+{
+       return ETH_XGMAC_DUMP_NUM;
+}
+
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+       struct mac_driver *mac_drv;
+
+       mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+       if (!mac_drv)
+               return NULL;
+
+       mac_drv->mac_init = hns_xgmac_init;
+       mac_drv->mac_enable = hns_xgmac_enable;
+       mac_drv->mac_disable = hns_xgmac_disable;
+
+       mac_drv->mac_id = mac_param->mac_id;
+       mac_drv->mac_mode = mac_param->mac_mode;
+       mac_drv->io_base = mac_param->vaddr;
+       mac_drv->dev = mac_param->dev;
+       mac_drv->mac_cb = mac_cb;
+
+       mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr;
+       mac_drv->set_an_mode = NULL;
+       mac_drv->config_loopback = NULL;
+       mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc;
+       mac_drv->config_half_duplex = NULL;
+       mac_drv->set_rx_ignore_pause_frames =
+               hns_xgmac_set_rx_ignore_pause_frames;
+       mac_drv->mac_get_id = hns_xgmac_get_id;
+       mac_drv->mac_free = hns_xgmac_free;
+       mac_drv->adjust_link = NULL;
+       mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
+       mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length;
+       mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg;
+       mac_drv->autoneg_stat = NULL;
+       mac_drv->get_info = hns_xgmac_get_info;
+       mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg;
+       mac_drv->get_link_status = hns_xgmac_get_link_status;
+       mac_drv->get_regs = hns_xgmac_get_regs;
+       mac_drv->get_ethtool_stats = hns_xgmac_get_stats;
+       mac_drv->get_sset_count = hns_xgmac_get_sset_count;
+       mac_drv->get_regs_count = hns_xgmac_get_regs_count;
+       mac_drv->get_strings = hns_xgmac_get_strings;
+       mac_drv->update_stats = hns_xgmac_update_stats;
+
+       return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
new file mode 100644 (file)
index 0000000..139f729
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_XGMAC_H
+#define _HNS_XGMAC_H
+
+#define ETH_XGMAC_DUMP_NUM             (214)
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
new file mode 100644 (file)
index 0000000..0713ced
--- /dev/null
@@ -0,0 +1,1646 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "hnae.h"
+#include "hns_enet.h"
+
+#define NIC_MAX_Q_PER_VF 16
+#define HNS_NIC_TX_TIMEOUT (5 * HZ)
+
+#define SERVICE_TIMER_HZ (1 * HZ)
+
+#define NIC_TX_CLEAN_MAX_NUM 256
+#define NIC_RX_CLEAN_MAX_NUM 64
+
+#define RCB_ERR_PRINT_CYCLE 1000
+
+#define RCB_IRQ_NOT_INITED 0
+#define RCB_IRQ_INITED 1
+
+static void fill_desc(struct hnae_ring *ring, void *priv,
+                     int size, dma_addr_t dma, int frag_end,
+                     int buf_num, enum hns_desc_type type)
+{
+       struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+       struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+       struct sk_buff *skb;
+       __be16 protocol;
+       u32 ip_offset;
+       u32 asid_bufnum_pid = 0;
+       u32 flag_ipoffset = 0;
+
+       desc_cb->priv = priv;
+       desc_cb->length = size;
+       desc_cb->dma = dma;
+       desc_cb->type = type;
+
+       desc->addr = cpu_to_le64(dma);
+       desc->tx.send_size = cpu_to_le16((u16)size);
+
+       /*config bd buffer end */
+       flag_ipoffset |= 1 << HNS_TXD_VLD_B;
+
+       asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
+
+       if (type == DESC_TYPE_SKB) {
+               skb = (struct sk_buff *)priv;
+
+               if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       protocol = skb->protocol;
+                       ip_offset = ETH_HLEN;
+
+                       /*if it is a SW VLAN check the next protocol*/
+                       if (protocol == htons(ETH_P_8021Q)) {
+                               ip_offset += VLAN_HLEN;
+                               protocol = vlan_get_protocol(skb);
+                               skb->protocol = protocol;
+                       }
+
+                       if (skb->protocol == htons(ETH_P_IP)) {
+                               flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
+                               /* check for tcp/udp header */
+                               flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+
+                       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+                               /* ipv6 has not l3 cs, check for L4 header */
+                               flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+                       }
+
+                       flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
+               }
+       }
+
+       flag_ipoffset |= frag_end << HNS_TXD_FE_B;
+
+       desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
+       desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
+
+       ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void unfill_desc(struct hnae_ring *ring)
+{
+       ring_ptr_move_bw(ring, next_to_use);
+}
+
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+                       struct sk_buff *skb,
+                       struct hns_nic_ring_data *ring_data)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct device *dev = priv->dev;
+       struct hnae_ring *ring = ring_data->ring;
+       struct netdev_queue *dev_queue;
+       struct skb_frag_struct *frag;
+       int buf_num;
+       dma_addr_t dma;
+       int size, next_to_use;
+       int i, j;
+       struct sk_buff *new_skb;
+
+       assert(ring->max_desc_num_per_pkt <= ring->desc_num);
+
+       /* no. of segments (plus a header) */
+       buf_num = skb_shinfo(skb)->nr_frags + 1;
+
+       if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+               if (ring_space(ring) < 1) {
+                       ring->stats.tx_busy++;
+                       goto out_net_tx_busy;
+               }
+
+               new_skb = skb_copy(skb, GFP_ATOMIC);
+               if (!new_skb) {
+                       ring->stats.sw_err_cnt++;
+                       netdev_err(ndev, "no memory to xmit!\n");
+                       goto out_err_tx_ok;
+               }
+
+               dev_kfree_skb_any(skb);
+               skb = new_skb;
+               buf_num = 1;
+               assert(skb_shinfo(skb)->nr_frags == 1);
+       } else if (buf_num > ring_space(ring)) {
+               ring->stats.tx_busy++;
+               goto out_net_tx_busy;
+       }
+       next_to_use = ring->next_to_use;
+
+       /* fill the first part */
+       size = skb_headlen(skb);
+       dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, dma)) {
+               netdev_err(ndev, "TX head DMA map failed\n");
+               ring->stats.sw_err_cnt++;
+               goto out_err_tx_ok;
+       }
+       fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num,
+                 DESC_TYPE_SKB);
+
+       /* fill the fragments */
+       for (i = 1; i < buf_num; i++) {
+               frag = &skb_shinfo(skb)->frags[i - 1];
+               size = skb_frag_size(frag);
+               dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, dma)) {
+                       netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
+                       ring->stats.sw_err_cnt++;
+                       goto out_map_frag_fail;
+               }
+               fill_desc(ring, skb_frag_page(frag), size, dma,
+                         buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE);
+       }
+
+       /*complete translate all packets*/
+       dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
+       netdev_tx_sent_queue(dev_queue, skb->len);
+
+       wmb(); /* commit all data before submit */
+       assert(skb->queue_mapping < priv->ae_handle->q_num);
+       hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
+       ring->stats.tx_pkts++;
+       ring->stats.tx_bytes += skb->len;
+
+       return NETDEV_TX_OK;
+
+out_map_frag_fail:
+
+       for (j = i - 1; j > 0; j--) {
+               unfill_desc(ring);
+               next_to_use = ring->next_to_use;
+               dma_unmap_page(dev, ring->desc_cb[next_to_use].dma,
+                              ring->desc_cb[next_to_use].length,
+                              DMA_TO_DEVICE);
+       }
+
+       unfill_desc(ring);
+       next_to_use = ring->next_to_use;
+       dma_unmap_single(dev, ring->desc_cb[next_to_use].dma,
+                        ring->desc_cb[next_to_use].length, DMA_TO_DEVICE);
+
+out_err_tx_ok:
+
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+
+out_net_tx_busy:
+
+       netif_stop_subqueue(ndev, skb->queue_mapping);
+
+       /* Herbert's original patch had:
+        *  smp_mb__after_netif_stop_queue();
+        * but since that doesn't exist yet, just open code it.
+        */
+       smp_mb();
+       return NETDEV_TX_BUSY;
+}
+
+/**
+ * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
+                                       unsigned int max_size)
+{
+       unsigned char *network;
+       u8 hlen;
+
+       /* this should never happen, but better safe than sorry */
+       if (max_size < ETH_HLEN)
+               return max_size;
+
+       /* initialize network frame pointer */
+       network = data;
+
+       /* set first protocol and move network header forward */
+       network += ETH_HLEN;
+
+       /* handle any vlan tag if present */
+       if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
+               == HNS_RX_FLAG_VLAN_PRESENT) {
+               if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
+                       return max_size;
+
+               network += VLAN_HLEN;
+       }
+
+       /* handle L3 protocols */
+       if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+               == HNS_RX_FLAG_L3ID_IPV4) {
+               if ((typeof(max_size))(network - data) >
+                   (max_size - sizeof(struct iphdr)))
+                       return max_size;
+
+               /* access ihl as a u8 to avoid unaligned access on ia64 */
+               hlen = (network[0] & 0x0F) << 2;
+
+               /* verify hlen meets minimum size requirements */
+               if (hlen < sizeof(struct iphdr))
+                       return network - data;
+
+               /* record next protocol if header is present */
+       } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
+               == HNS_RX_FLAG_L3ID_IPV6) {
+               if ((typeof(max_size))(network - data) >
+                   (max_size - sizeof(struct ipv6hdr)))
+                       return max_size;
+
+               /* record next protocol */
+               hlen = sizeof(struct ipv6hdr);
+       } else {
+               return network - data;
+       }
+
+       /* relocate pointer to start of L4 header */
+       network += hlen;
+
+       /* finally sort out TCP/UDP */
+       if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+               == HNS_RX_FLAG_L4ID_TCP) {
+               if ((typeof(max_size))(network - data) >
+                   (max_size - sizeof(struct tcphdr)))
+                       return max_size;
+
+               /* access doff as a u8 to avoid unaligned access on ia64 */
+               hlen = (network[12] & 0xF0) >> 2;
+
+               /* verify hlen meets minimum size requirements */
+               if (hlen < sizeof(struct tcphdr))
+                       return network - data;
+
+               network += hlen;
+       } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
+               == HNS_RX_FLAG_L4ID_UDP) {
+               if ((typeof(max_size))(network - data) >
+                   (max_size - sizeof(struct udphdr)))
+                       return max_size;
+
+               network += sizeof(struct udphdr);
+       }
+
+       /* If everything has gone correctly network should be the
+        * data section of the packet and will be the end of the header.
+        * If not then it probably represents the end of the last recognized
+        * header.
+        */
+       if ((typeof(max_size))(network - data) < max_size)
+               return network - data;
+       else
+               return max_size;
+}
+
+static void
+hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset)
+{
+        /* avoid re-using remote pages,flag default unreuse */
+       if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) {
+               /* move offset up to the next cache line */
+               desc_cb->page_offset += tsize;
+
+               if (desc_cb->page_offset <= last_offset) {
+                       desc_cb->reuse_flag = 1;
+                       /* bump ref count on page before it is given*/
+                       get_page(desc_cb->priv);
+               }
+       }
+}
+
+static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
+                              struct sk_buff **out_skb, int *out_bnum)
+{
+       struct hnae_ring *ring = ring_data->ring;
+       struct net_device *ndev = ring_data->napi.dev;
+       struct sk_buff *skb;
+       struct hnae_desc *desc;
+       struct hnae_desc_cb *desc_cb;
+       unsigned char *va;
+       int bnum, length, size, i, truesize, last_offset;
+       int pull_len;
+       u32 bnum_flag;
+
+       last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+       desc = &ring->desc[ring->next_to_clean];
+       desc_cb = &ring->desc_cb[ring->next_to_clean];
+       length = le16_to_cpu(desc->rx.pkt_len);
+       bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+       bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
+       *out_bnum = bnum;
+       va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+
+       skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE);
+       if (unlikely(!skb)) {
+               netdev_err(ndev, "alloc rx skb fail\n");
+               ring->stats.sw_err_cnt++;
+               return -ENOMEM;
+       }
+
+       if (length <= HNS_RX_HEAD_SIZE) {
+               memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+               /* we can reuse buffer as-is, just make sure it is local */
+               if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+                       desc_cb->reuse_flag = 1;
+               else /* this page cannot be reused so discard it */
+                       put_page(desc_cb->priv);
+
+               ring_ptr_move_fw(ring, next_to_clean);
+
+               if (unlikely(bnum != 1)) { /* check err*/
+                       *out_bnum = 1;
+                       goto out_bnum_err;
+               }
+       } else {
+               ring->stats.seg_pkt_cnt++;
+
+               pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+               memcpy(__skb_put(skb, pull_len), va,
+                      ALIGN(pull_len, sizeof(long)));
+
+               size = le16_to_cpu(desc->rx.size);
+               truesize = ALIGN(size, L1_CACHE_BYTES);
+               skb_add_rx_frag(skb, 0, desc_cb->priv,
+                               desc_cb->page_offset + pull_len,
+                               size - pull_len, truesize - pull_len);
+
+               hns_nic_reuse_page(desc_cb, truesize, last_offset);
+               ring_ptr_move_fw(ring, next_to_clean);
+
+               if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
+                       *out_bnum = 1;
+                       goto out_bnum_err;
+               }
+               for (i = 1; i < bnum; i++) {
+                       desc = &ring->desc[ring->next_to_clean];
+                       desc_cb = &ring->desc_cb[ring->next_to_clean];
+                       size = le16_to_cpu(desc->rx.size);
+                       truesize = ALIGN(size, L1_CACHE_BYTES);
+                       skb_add_rx_frag(skb, i, desc_cb->priv,
+                                       desc_cb->page_offset,
+                                       size, truesize);
+
+                       hns_nic_reuse_page(desc_cb, truesize, last_offset);
+                       ring_ptr_move_fw(ring, next_to_clean);
+               }
+       }
+
+       /* check except process, free skb and jump the desc */
+       if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
+out_bnum_err:
+               *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
+               netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
+                          bnum, ring->max_desc_num_per_pkt,
+                          length, (int)MAX_SKB_FRAGS,
+                          ((u64 *)desc)[0], ((u64 *)desc)[1]);
+               ring->stats.err_bd_num++;
+               dev_kfree_skb_any(skb);
+               return -EDOM;
+       }
+
+       bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+
+       if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
+               netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
+                          ((u64 *)desc)[0], ((u64 *)desc)[1]);
+               ring->stats.non_vld_descs++;
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+
+       if (unlikely((!desc->rx.pkt_len) ||
+                    hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
+               if (!(ring->stats.err_pkt_len % RCB_ERR_PRINT_CYCLE))
+                       netdev_dbg(ndev,
+                                  "pkt_len(%u),drop(%u),%#llx,%#llx\n",
+                                  le16_to_cpu(desc->rx.pkt_len),
+                                  hnae_get_bit(bnum_flag, HNS_RXD_DROP_B),
+                                  ((u64 *)desc)[0], ((u64 *)desc)[1]);
+               ring->stats.err_pkt_len++;
+               dev_kfree_skb_any(skb);
+               return -EFAULT;
+       }
+
+       if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
+               if (!(ring->stats.l2_err % RCB_ERR_PRINT_CYCLE))
+                       netdev_dbg(ndev, "L2 check err,%#llx,%#llx\n",
+                                  ((u64 *)desc)[0], ((u64 *)desc)[1]);
+               ring->stats.l2_err++;
+               dev_kfree_skb_any(skb);
+               return -EFAULT;
+       }
+
+       ring->stats.rx_pkts++;
+       ring->stats.rx_bytes += skb->len;
+
+       if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
+                    hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
+               if (!(ring->stats.l3l4_csum_err % RCB_ERR_PRINT_CYCLE))
+                       netdev_dbg(ndev,
+                                  "check err(%#x),%#llx,%#llx\n",
+                                  hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) |
+                                  hnae_get_bit(bnum_flag, HNS_RXD_L4E_B),
+                                  ((u64 *)desc)[0], ((u64 *)desc)[1]);
+               ring->stats.l3l4_csum_err++;
+               return 0;
+       }
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       return 0;
+}
+
+static void
+hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
+{
+       int i, ret;
+       struct hnae_desc_cb res_cbs;
+       struct hnae_desc_cb *desc_cb;
+       struct hnae_ring *ring = ring_data->ring;
+       struct net_device *ndev = ring_data->napi.dev;
+
+       for (i = 0; i < cleand_count; i++) {
+               desc_cb = &ring->desc_cb[ring->next_to_use];
+               if (desc_cb->reuse_flag) {
+                       ring->stats.reuse_pg_cnt++;
+                       hnae_reuse_buffer(ring, ring->next_to_use);
+               } else {
+                       ret = hnae_reserve_buffer_map(ring, &res_cbs);
+                       if (ret) {
+                               ring->stats.sw_err_cnt++;
+                               netdev_err(ndev, "hnae reserve buffer map failed.\n");
+                               break;
+                       }
+                       hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
+               }
+
+               ring_ptr_move_fw(ring, next_to_use);
+       }
+
+       wmb(); /* make all data has been write before submit */
+       writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
+}
+
+/* return error number for error or number of desc left to take
+ */
+static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
+                             struct sk_buff *skb)
+{
+       struct net_device *ndev = ring_data->napi.dev;
+
+       skb->protocol = eth_type_trans(skb, ndev);
+       (void)napi_gro_receive(&ring_data->napi, skb);
+       ndev->last_rx = jiffies;
+}
+
+static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
+                              int budget, void *v)
+{
+       struct hnae_ring *ring = ring_data->ring;
+       struct sk_buff *skb;
+       int num, bnum, ex_num;
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+       int recv_pkts, recv_bds, clean_count, err;
+
+       num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+       rmb(); /* make sure num taken effect before the other data is touched */
+
+       recv_pkts = 0, recv_bds = 0, clean_count = 0;
+recv:
+       while (recv_pkts < budget && recv_bds < num) {
+               /* reuse or realloc buffers*/
+               if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+                       hns_nic_alloc_rx_buffers(ring_data, clean_count);
+                       clean_count = 0;
+               }
+
+               /* poll one pkg*/
+               err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
+               if (unlikely(!skb)) /* this fault cannot be repaired */
+                       break;
+
+               recv_bds += bnum;
+               clean_count += bnum;
+               if (unlikely(err)) {  /* do jump the err */
+                       recv_pkts++;
+                       continue;
+               }
+
+               /* do update ip stack process*/
+               ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
+                                                       ring_data, skb);
+               recv_pkts++;
+       }
+
+       /* make all data has been write before submit */
+       if (clean_count > 0) {
+               hns_nic_alloc_rx_buffers(ring_data, clean_count);
+               clean_count = 0;
+       }
+
+       if (recv_pkts < budget) {
+               ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+               rmb(); /*complete read rx ring bd number*/
+               if (ex_num > 0) {
+                       num += ex_num;
+                       goto recv;
+               }
+       }
+
+       return recv_pkts;
+}
+
+static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+       struct hnae_ring *ring = ring_data->ring;
+       int num = 0;
+
+       /* for hardware bug fixed */
+       num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+       if (num > 0) {
+               ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+                       ring_data->ring, 1);
+
+               napi_schedule(&ring_data->napi);
+       }
+}
+
+static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
+                                           int *bytes, int *pkts)
+{
+       struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+       (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+       (*bytes) += desc_cb->length;
+       /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+       hnae_free_buffer_detach(ring, ring->next_to_clean);
+
+       ring_ptr_move_fw(ring, next_to_clean);
+}
+
+static int is_valid_clean_head(struct hnae_ring *ring, int h)
+{
+       int u = ring->next_to_use;
+       int c = ring->next_to_clean;
+
+       if (unlikely(h > ring->desc_num))
+               return 0;
+
+       assert(u > 0 && u < ring->desc_num);
+       assert(c > 0 && c < ring->desc_num);
+       assert(u != c && h != c); /* must be checked before call this func */
+
+       return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
+/* netif_tx_lock will turn down the performance, set only when necessary */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
+#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
+#else
+#define NETIF_TX_LOCK(ndev)
+#define NETIF_TX_UNLOCK(ndev)
+#endif
+/* reclaim all desc in one budget
+ * return error or number of desc left
+ */
+static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
+                              int budget, void *v)
+{
+       struct hnae_ring *ring = ring_data->ring;
+       struct net_device *ndev = ring_data->napi.dev;
+       struct netdev_queue *dev_queue;
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       int head;
+       int bytes, pkts;
+
+       NETIF_TX_LOCK(ndev);
+
+       head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+       rmb(); /* make sure head is ready before touch any data */
+
+       if (is_ring_empty(ring) || head == ring->next_to_clean) {
+               NETIF_TX_UNLOCK(ndev);
+               return 0; /* no data to poll */
+       }
+
+       if (!is_valid_clean_head(ring, head)) {
+               netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
+                          ring->next_to_use, ring->next_to_clean);
+               ring->stats.io_err_cnt++;
+               NETIF_TX_UNLOCK(ndev);
+               return -EIO;
+       }
+
+       bytes = 0;
+       pkts = 0;
+       while (head != ring->next_to_clean)
+               hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+       NETIF_TX_UNLOCK(ndev);
+
+       dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+       netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+       if (unlikely(pkts && netif_carrier_ok(ndev) &&
+                    (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
+               /* Make sure that anybody stopping the queue after this
+                * sees the new next_to_clean.
+                */
+               smp_mb();
+               if (netif_tx_queue_stopped(dev_queue) &&
+                   !test_bit(NIC_STATE_DOWN, &priv->state)) {
+                       netif_tx_wake_queue(dev_queue);
+                       ring->stats.restart_queue++;
+               }
+       }
+       return 0;
+}
+
+static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+       struct hnae_ring *ring = ring_data->ring;
+       int head = ring->next_to_clean;
+
+       /* for hardware bug fixed */
+       head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+       if (head != ring->next_to_clean) {
+               ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+                       ring_data->ring, 1);
+
+               napi_schedule(&ring_data->napi);
+       }
+}
+
+static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
+{
+       struct hnae_ring *ring = ring_data->ring;
+       struct net_device *ndev = ring_data->napi.dev;
+       struct netdev_queue *dev_queue;
+       int head;
+       int bytes, pkts;
+
+       NETIF_TX_LOCK(ndev);
+
+       head = ring->next_to_use; /* ntu :soft setted ring position*/
+       bytes = 0;
+       pkts = 0;
+       while (head != ring->next_to_clean)
+               hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+       NETIF_TX_UNLOCK(ndev);
+
+       dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+       netdev_tx_reset_queue(dev_queue);
+}
+
+static int hns_nic_common_poll(struct napi_struct *napi, int budget)
+{
+       struct hns_nic_ring_data *ring_data =
+               container_of(napi, struct hns_nic_ring_data, napi);
+       int clean_complete = ring_data->poll_one(
+                               ring_data, budget, ring_data->ex_process);
+
+       if (clean_complete >= 0 && clean_complete < budget) {
+               napi_complete(napi);
+               ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+                       ring_data->ring, 0);
+
+               ring_data->fini_process(ring_data);
+       }
+
+       return clean_complete;
+}
+
+static irqreturn_t hns_irq_handle(int irq, void *dev)
+{
+       struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
+
+       ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+               ring_data->ring, 1);
+       napi_schedule(&ring_data->napi);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
+ *@ndev: net device
+ */
+static void hns_nic_adjust_link(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+
+       h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
+}
+
+/**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+ *@h: ae handle
+ * Return 0 on success, negative on failure
+ */
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct phy_device *phy_dev = NULL;
+
+       if (!h->phy_node)
+               return 0;
+
+       if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+               phy_dev = of_phy_connect(ndev, h->phy_node,
+                                        hns_nic_adjust_link, 0, h->phy_if);
+       else
+               phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
+
+       if (unlikely(!phy_dev) || IS_ERR(phy_dev))
+               return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
+
+       phy_dev->supported &= h->if_support;
+       phy_dev->advertising = phy_dev->supported;
+
+       if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+               phy_dev->autoneg = false;
+
+       priv->phy = phy_dev;
+
+       return 0;
+}
+
+static int hns_nic_ring_open(struct net_device *netdev, int idx)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct hnae_handle *h = priv->ae_handle;
+
+       napi_enable(&priv->ring_data[idx].napi);
+
+       enable_irq(priv->ring_data[idx].ring->irq);
+       h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
+
+       return 0;
+}
+
+static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       struct sockaddr *mac_addr = p;
+       int ret;
+
+       if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
+       if (ret) {
+               netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
+               return ret;
+       }
+
+       memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+
+       return 0;
+}
+
+void hns_nic_update_stats(struct net_device *netdev)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct hnae_handle *h = priv->ae_handle;
+
+       h->dev->ops->update_stats(h, &netdev->stats);
+}
+
+/* set mac addr if it is configed. or leave it to the AE driver */
+static void hns_init_mac_addr(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct device_node *node = priv->dev->of_node;
+       const void *mac_addr_temp;
+
+       mac_addr_temp = of_get_mac_address(node);
+       if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
+               memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
+       } else {
+               eth_hw_addr_random(ndev);
+               dev_warn(priv->dev, "No valid mac, use random mac %pM",
+                        ndev->dev_addr);
+       }
+}
+
+static void hns_nic_ring_close(struct net_device *netdev, int idx)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct hnae_handle *h = priv->ae_handle;
+
+       h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
+       disable_irq(priv->ring_data[idx].ring->irq);
+
+       napi_disable(&priv->ring_data[idx].napi);
+}
+
+static int hns_nic_init_irq(struct hns_nic_priv *priv)
+{
+       struct hnae_handle *h = priv->ae_handle;
+       struct hns_nic_ring_data *rd;
+       int i;
+       int ret;
+       int cpu;
+       cpumask_t mask;
+
+       for (i = 0; i < h->q_num * 2; i++) {
+               rd = &priv->ring_data[i];
+
+               if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
+                       break;
+
+               snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
+                        "%s-%s%d", priv->netdev->name,
+                        (i < h->q_num ? "tx" : "rx"), rd->queue_index);
+
+               rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+
+               ret = request_irq(rd->ring->irq,
+                                 hns_irq_handle, 0, rd->ring->ring_name, rd);
+               if (ret) {
+                       netdev_err(priv->netdev, "request irq(%d) fail\n",
+                                  rd->ring->irq);
+                       return ret;
+               }
+               disable_irq(rd->ring->irq);
+               rd->ring->irq_init_flag = RCB_IRQ_INITED;
+
+               /*set cpu affinity*/
+               if (cpu_online(rd->queue_index)) {
+                       cpumask_clear(&mask);
+                       cpu = rd->queue_index;
+                       cpumask_set_cpu(cpu, &mask);
+                       irq_set_affinity_hint(rd->ring->irq, &mask);
+               }
+       }
+
+       return 0;
+}
+
+static int hns_nic_net_up(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       int i, j, k;
+       int ret;
+
+       ret = hns_nic_init_irq(priv);
+       if (ret != 0) {
+               netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
+               return ret;
+       }
+
+       for (i = 0; i < h->q_num * 2; i++) {
+               ret = hns_nic_ring_open(ndev, i);
+               if (ret)
+                       goto out_has_some_queues;
+       }
+
+       for (k = 0; k < h->q_num; k++)
+               h->dev->ops->toggle_queue_status(h->qs[k], 1);
+
+       ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
+       if (ret)
+               goto out_set_mac_addr_err;
+
+       ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+       if (ret)
+               goto out_start_err;
+
+       if (priv->phy)
+               phy_start(priv->phy);
+
+       clear_bit(NIC_STATE_DOWN, &priv->state);
+       (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+       return 0;
+
+out_start_err:
+       netif_stop_queue(ndev);
+out_set_mac_addr_err:
+       for (k = 0; k < h->q_num; k++)
+               h->dev->ops->toggle_queue_status(h->qs[k], 0);
+out_has_some_queues:
+       for (j = i - 1; j >= 0; j--)
+               hns_nic_ring_close(ndev, j);
+
+       set_bit(NIC_STATE_DOWN, &priv->state);
+
+       return ret;
+}
+
+static void hns_nic_net_down(struct net_device *ndev)
+{
+       int i;
+       struct hnae_ae_ops *ops;
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+
+       if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
+               return;
+
+       (void)del_timer_sync(&priv->service_timer);
+       netif_tx_stop_all_queues(ndev);
+       netif_carrier_off(ndev);
+       netif_tx_disable(ndev);
+       priv->link = 0;
+
+       if (priv->phy)
+               phy_stop(priv->phy);
+
+       ops = priv->ae_handle->dev->ops;
+
+       if (ops->stop)
+               ops->stop(priv->ae_handle);
+
+       netif_tx_stop_all_queues(ndev);
+
+       for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
+               hns_nic_ring_close(ndev, i);
+               hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
+
+               /* clean tx buffers*/
+               hns_nic_tx_clr_all_bufs(priv->ring_data + i);
+       }
+}
+
+void hns_nic_net_reset(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *handle = priv->ae_handle;
+
+       while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
+               usleep_range(1000, 2000);
+
+       (void)hnae_reinit_handle(handle);
+
+       clear_bit(NIC_STATE_RESETTING, &priv->state);
+}
+
+void hns_nic_net_reinit(struct net_device *netdev)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+
+       priv->netdev->trans_start = jiffies;
+       while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
+               usleep_range(1000, 2000);
+
+       hns_nic_net_down(netdev);
+       hns_nic_net_reset(netdev);
+       (void)hns_nic_net_up(netdev);
+       clear_bit(NIC_STATE_REINITING, &priv->state);
+}
+
+static int hns_nic_net_open(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       int ret;
+
+       if (test_bit(NIC_STATE_TESTING, &priv->state))
+               return -EBUSY;
+
+       priv->link = 0;
+       netif_carrier_off(ndev);
+
+       ret = netif_set_real_num_tx_queues(ndev, h->q_num);
+       if (ret < 0) {
+               netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
+                          ret);
+               return ret;
+       }
+
+       ret = netif_set_real_num_rx_queues(ndev, h->q_num);
+       if (ret < 0) {
+               netdev_err(ndev,
+                          "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+               return ret;
+       }
+
+       ret = hns_nic_net_up(ndev);
+       if (ret) {
+               netdev_err(ndev,
+                          "hns net up fail, ret=%d!\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int hns_nic_net_stop(struct net_device *ndev)
+{
+       hns_nic_net_down(ndev);
+
+       return 0;
+}
+
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+static void hns_nic_net_timeout(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+
+       hns_tx_timeout_reset(priv);
+}
+
+static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
+                           int cmd)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct phy_device *phy_dev = priv->phy;
+
+       if (!netif_running(netdev))
+               return -EINVAL;
+
+       if (!phy_dev)
+               return -ENOTSUPP;
+
+       return phy_mii_ioctl(phy_dev, ifr, cmd);
+}
+
+/* use only for netconsole to poll with the device without interrupt */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void hns_nic_poll_controller(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+       for (i = 0; i < priv->ae_handle->q_num * 2; i++)
+               napi_schedule(&priv->ring_data[i].napi);
+       local_irq_restore(flags);
+}
+#endif
+
+static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
+                                   struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       int ret;
+
+       assert(skb->queue_mapping < ndev->ae_handle->q_num);
+       ret = hns_nic_net_xmit_hw(ndev, skb,
+                                 &tx_ring_data(priv, skb->queue_mapping));
+       if (ret == NETDEV_TX_OK) {
+               ndev->trans_start = jiffies;
+               ndev->stats.tx_bytes += skb->len;
+               ndev->stats.tx_packets++;
+       }
+       return (netdev_tx_t)ret;
+}
+
+static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       int ret;
+
+       /* MTU < 68 is an error and causes problems on some kernels */
+       if (new_mtu < 68)
+               return -EINVAL;
+
+       if (!h->dev->ops->set_mtu)
+               return -ENOTSUPP;
+
+       if (netif_running(ndev)) {
+               (void)hns_nic_net_stop(ndev);
+               msleep(100);
+
+               ret = h->dev->ops->set_mtu(h, new_mtu);
+               if (ret)
+                       netdev_err(ndev, "set mtu fail, return value %d\n",
+                                  ret);
+
+               if (hns_nic_net_open(ndev))
+                       netdev_err(ndev, "hns net open fail\n");
+       } else {
+               ret = h->dev->ops->set_mtu(h, new_mtu);
+       }
+
+       if (!ret)
+               ndev->mtu = new_mtu;
+
+       return ret;
+}
+
+/**
+ * nic_set_multicast_list - set mutl mac address
+ * @netdev: net device
+ * @p: mac address
+ *
+ * return void
+ */
+void hns_set_multicast_list(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       struct netdev_hw_addr *ha = NULL;
+
+       if (!h) {
+               netdev_err(ndev, "hnae handle is null\n");
+               return;
+       }
+
+       if (h->dev->ops->set_mc_addr) {
+               netdev_for_each_mc_addr(ha, ndev)
+                       if (h->dev->ops->set_mc_addr(h, ha->addr))
+                               netdev_err(ndev, "set multicast fail\n");
+       }
+}
+
+struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
+                                             struct rtnl_link_stats64 *stats)
+{
+       int idx = 0;
+       u64 tx_bytes = 0;
+       u64 rx_bytes = 0;
+       u64 tx_pkts = 0;
+       u64 rx_pkts = 0;
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+
+       for (idx = 0; idx < h->q_num; idx++) {
+               tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
+               tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
+               rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
+               rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
+       }
+
+       stats->tx_bytes = tx_bytes;
+       stats->tx_packets = tx_pkts;
+       stats->rx_bytes = rx_bytes;
+       stats->rx_packets = rx_pkts;
+
+       stats->rx_errors = ndev->stats.rx_errors;
+       stats->multicast = ndev->stats.multicast;
+       stats->rx_length_errors = ndev->stats.rx_length_errors;
+       stats->rx_crc_errors = ndev->stats.rx_crc_errors;
+       stats->rx_missed_errors = ndev->stats.rx_missed_errors;
+
+       stats->tx_errors = ndev->stats.tx_errors;
+       stats->rx_dropped = ndev->stats.rx_dropped;
+       stats->tx_dropped = ndev->stats.tx_dropped;
+       stats->collisions = ndev->stats.collisions;
+       stats->rx_over_errors = ndev->stats.rx_over_errors;
+       stats->rx_frame_errors = ndev->stats.rx_frame_errors;
+       stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
+       stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
+       stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
+       stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
+       stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
+       stats->tx_window_errors = ndev->stats.tx_window_errors;
+       stats->rx_compressed = ndev->stats.rx_compressed;
+       stats->tx_compressed = ndev->stats.tx_compressed;
+
+       return stats;
+}
+
+static const struct net_device_ops hns_nic_netdev_ops = {
+       .ndo_open = hns_nic_net_open,
+       .ndo_stop = hns_nic_net_stop,
+       .ndo_start_xmit = hns_nic_net_xmit,
+       .ndo_tx_timeout = hns_nic_net_timeout,
+       .ndo_set_mac_address = hns_nic_net_set_mac_address,
+       .ndo_change_mtu = hns_nic_change_mtu,
+       .ndo_do_ioctl = hns_nic_do_ioctl,
+       .ndo_get_stats64 = hns_nic_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = hns_nic_poll_controller,
+#endif
+       .ndo_set_rx_mode = hns_set_multicast_list,
+};
+
+static void hns_nic_update_link_status(struct net_device *netdev)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+
+       struct hnae_handle *h = priv->ae_handle;
+       int state = 1;
+
+       if (priv->phy) {
+               if (!genphy_update_link(priv->phy))
+                       state = priv->phy->link;
+               else
+                       state = 0;
+       }
+       state = state && h->dev->ops->get_status(h);
+
+       if (state != priv->link) {
+               if (state) {
+                       netif_carrier_on(netdev);
+                       netif_tx_wake_all_queues(netdev);
+                       netdev_info(netdev, "link up\n");
+               } else {
+                       netif_carrier_off(netdev);
+                       netdev_info(netdev, "link down\n");
+               }
+               priv->link = state;
+       }
+}
+
+/* for dumping key regs*/
+static void hns_nic_dump(struct hns_nic_priv *priv)
+{
+       struct hnae_handle *h = priv->ae_handle;
+       struct hnae_ae_ops *ops = h->dev->ops;
+       u32 *data, reg_num, i;
+
+       if (ops->get_regs_len && ops->get_regs) {
+               reg_num = ops->get_regs_len(priv->ae_handle);
+               reg_num = (reg_num + 3ul) & ~3ul;
+               data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
+               if (data) {
+                       ops->get_regs(priv->ae_handle, data);
+                       for (i = 0; i < reg_num; i += 4)
+                               pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                                       i, data[i], data[i + 1],
+                                       data[i + 2], data[i + 3]);
+                       kfree(data);
+               }
+       }
+
+       for (i = 0; i < h->q_num; i++) {
+               pr_info("tx_queue%d_next_to_clean:%d\n",
+                       i, h->qs[i]->tx_ring.next_to_clean);
+               pr_info("tx_queue%d_next_to_use:%d\n",
+                       i, h->qs[i]->tx_ring.next_to_use);
+               pr_info("rx_queue%d_next_to_clean:%d\n",
+                       i, h->qs[i]->rx_ring.next_to_clean);
+               pr_info("rx_queue%d_next_to_use:%d\n",
+                       i, h->qs[i]->rx_ring.next_to_use);
+       }
+}
+
+/* for resetting suntask*/
+static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
+{
+       enum hnae_port_type type = priv->ae_handle->port_type;
+
+       if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
+               return;
+       clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+
+       /* If we're already down, removing or resetting, just bail */
+       if (test_bit(NIC_STATE_DOWN, &priv->state) ||
+           test_bit(NIC_STATE_REMOVING, &priv->state) ||
+           test_bit(NIC_STATE_RESETTING, &priv->state))
+               return;
+
+       hns_nic_dump(priv);
+       netdev_err(priv->netdev, "Reset %s port\n",
+                  (type == HNAE_PORT_DEBUG ? "debug" : "business"));
+
+       rtnl_lock();
+       if (type == HNAE_PORT_DEBUG) {
+               hns_nic_net_reinit(priv->netdev);
+       } else {
+               hns_nic_net_down(priv->netdev);
+               hns_nic_net_reset(priv->netdev);
+       }
+       rtnl_unlock();
+}
+
+/* for doing service complete*/
+static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
+{
+       assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
+
+       smp_mb__before_atomic();
+       clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+}
+
+static void hns_nic_service_task(struct work_struct *work)
+{
+       struct hns_nic_priv *priv
+               = container_of(work, struct hns_nic_priv, service_task);
+       struct hnae_handle *h = priv->ae_handle;
+
+       hns_nic_update_link_status(priv->netdev);
+       h->dev->ops->update_led_status(h);
+       hns_nic_update_stats(priv->netdev);
+
+       hns_nic_reset_subtask(priv);
+       hns_nic_service_event_complete(priv);
+}
+
+static void hns_nic_task_schedule(struct hns_nic_priv *priv)
+{
+       if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
+           !test_bit(NIC_STATE_REMOVING, &priv->state) &&
+           !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
+               (void)schedule_work(&priv->service_task);
+}
+
+static void hns_nic_service_timer(unsigned long data)
+{
+       struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
+
+       (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+       hns_nic_task_schedule(priv);
+}
+
+/**
+ * hns_tx_timeout_reset - initiate reset due to Tx timeout
+ * @priv: driver private struct
+ **/
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
+{
+       /* Do the reset outside of interrupt context */
+       if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
+               set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+               netdev_warn(priv->netdev,
+                           "initiating reset due to tx timeout(%llu,0x%lx)\n",
+                           priv->tx_timeout_count, priv->state);
+               priv->tx_timeout_count++;
+               hns_nic_task_schedule(priv);
+       }
+}
+
+static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
+{
+       struct hnae_handle *h = priv->ae_handle;
+       struct hns_nic_ring_data *rd;
+       int i;
+
+       if (h->q_num > NIC_MAX_Q_PER_VF) {
+               netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
+               return -EINVAL;
+       }
+
+       priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
+                                 GFP_KERNEL);
+       if (!priv->ring_data)
+               return -ENOMEM;
+
+       for (i = 0; i < h->q_num; i++) {
+               rd = &priv->ring_data[i];
+               rd->queue_index = i;
+               rd->ring = &h->qs[i]->tx_ring;
+               rd->poll_one = hns_nic_tx_poll_one;
+               rd->fini_process = hns_nic_tx_fini_pro;
+
+               netif_napi_add(priv->netdev, &rd->napi,
+                              hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+               rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+       }
+       for (i = h->q_num; i < h->q_num * 2; i++) {
+               rd = &priv->ring_data[i];
+               rd->queue_index = i - h->q_num;
+               rd->ring = &h->qs[i - h->q_num]->rx_ring;
+               rd->poll_one = hns_nic_rx_poll_one;
+               rd->ex_process = hns_nic_rx_up_pro;
+               rd->fini_process = hns_nic_rx_fini_pro;
+
+               netif_napi_add(priv->netdev, &rd->napi,
+                              hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+               rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+       }
+
+       return 0;
+}
+
+static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
+{
+       struct hnae_handle *h = priv->ae_handle;
+       int i;
+
+       for (i = 0; i < h->q_num * 2; i++) {
+               netif_napi_del(&priv->ring_data[i].napi);
+               if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+                       irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+                                             NULL);
+                       free_irq(priv->ring_data[i].ring->irq,
+                                &priv->ring_data[i]);
+               }
+
+               priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+       }
+       kfree(priv->ring_data);
+}
+
+static int hns_nic_try_get_ae(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h;
+       int ret;
+
+       h = hnae_get_handle(&priv->netdev->dev,
+                           priv->ae_name, priv->port_id, NULL);
+       if (IS_ERR_OR_NULL(h)) {
+               ret = PTR_ERR(h);
+               dev_dbg(priv->dev, "has not handle, register notifier!\n");
+               goto out;
+       }
+       priv->ae_handle = h;
+
+       ret = hns_nic_init_phy(ndev, h);
+       if (ret) {
+               dev_err(priv->dev, "probe phy device fail!\n");
+               goto out_init_phy;
+       }
+
+       ret = hns_nic_init_ring_data(priv);
+       if (ret) {
+               ret = -ENOMEM;
+               goto out_init_ring_data;
+       }
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(priv->dev, "probe register netdev fail!\n");
+               goto out_reg_ndev_fail;
+       }
+       return 0;
+
+out_reg_ndev_fail:
+       hns_nic_uninit_ring_data(priv);
+       priv->ring_data = NULL;
+out_init_phy:
+out_init_ring_data:
+       hnae_put_handle(priv->ae_handle);
+       priv->ae_handle = NULL;
+out:
+       return ret;
+}
+
+static int hns_nic_notifier_action(struct notifier_block *nb,
+                                  unsigned long action, void *data)
+{
+       struct hns_nic_priv *priv =
+               container_of(nb, struct hns_nic_priv, notifier_block);
+
+       assert(action == HNAE_AE_REGISTER);
+
+       if (!hns_nic_try_get_ae(priv->netdev)) {
+               hnae_unregister_notifier(&priv->notifier_block);
+               priv->notifier_block.notifier_call = NULL;
+       }
+       return 0;
+}
+
+static int hns_nic_dev_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct net_device *ndev;
+       struct hns_nic_priv *priv;
+       struct device_node *node = dev->of_node;
+       int ret;
+
+       ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
+       if (!ndev)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, ndev);
+
+       priv = netdev_priv(ndev);
+       priv->dev = dev;
+       priv->netdev = ndev;
+
+       if (of_device_is_compatible(node, "hisilicon,hns-nic-v2"))
+               priv->enet_ver = AE_VERSION_2;
+       else
+               priv->enet_ver = AE_VERSION_1;
+
+       ret = of_property_read_string(node, "ae-name", &priv->ae_name);
+       if (ret)
+               goto out_read_string_fail;
+
+       ret = of_property_read_u32(node, "port-id", &priv->port_id);
+       if (ret)
+               goto out_read_string_fail;
+
+       hns_init_mac_addr(ndev);
+
+       ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+       ndev->netdev_ops = &hns_nic_netdev_ops;
+       hns_ethtool_set_ops(ndev);
+       ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+               NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+               NETIF_F_GRO;
+       ndev->vlan_features |=
+               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+       ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+       SET_NETDEV_DEV(ndev, dev);
+
+       if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+               dev_dbg(dev, "set mask to 64bit\n");
+       else
+               dev_err(dev, "set mask to 32bit fail!\n");
+
+       /* carrier off reporting is important to ethtool even BEFORE open */
+       netif_carrier_off(ndev);
+
+       setup_timer(&priv->service_timer, hns_nic_service_timer,
+                   (unsigned long)priv);
+       INIT_WORK(&priv->service_task, hns_nic_service_task);
+
+       set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
+       clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+       set_bit(NIC_STATE_DOWN, &priv->state);
+
+       if (hns_nic_try_get_ae(priv->netdev)) {
+               priv->notifier_block.notifier_call = hns_nic_notifier_action;
+               ret = hnae_register_notifier(&priv->notifier_block);
+               if (ret) {
+                       dev_err(dev, "register notifier fail!\n");
+                       goto out_notify_fail;
+               }
+               dev_dbg(dev, "has not handle, register notifier!\n");
+       }
+
+       return 0;
+
+out_notify_fail:
+       (void)cancel_work_sync(&priv->service_task);
+out_read_string_fail:
+       free_netdev(ndev);
+       return ret;
+}
+
+static int hns_nic_dev_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+
+       if (ndev->reg_state != NETREG_UNINITIALIZED)
+               unregister_netdev(ndev);
+
+       if (priv->ring_data)
+               hns_nic_uninit_ring_data(priv);
+       priv->ring_data = NULL;
+
+       if (priv->phy)
+               phy_disconnect(priv->phy);
+       priv->phy = NULL;
+
+       if (!IS_ERR_OR_NULL(priv->ae_handle))
+               hnae_put_handle(priv->ae_handle);
+       priv->ae_handle = NULL;
+       if (priv->notifier_block.notifier_call)
+               hnae_unregister_notifier(&priv->notifier_block);
+       priv->notifier_block.notifier_call = NULL;
+
+       set_bit(NIC_STATE_REMOVING, &priv->state);
+       (void)cancel_work_sync(&priv->service_task);
+
+       free_netdev(ndev);
+       return 0;
+}
+
+static const struct of_device_id hns_enet_of_match[] = {
+       {.compatible = "hisilicon,hns-nic-v1",},
+       {.compatible = "hisilicon,hns-nic-v2",},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, hns_enet_of_match);
+
+static struct platform_driver hns_nic_dev_driver = {
+       .driver = {
+               .name = "hns-nic",
+               .owner = THIS_MODULE,
+               .of_match_table = hns_enet_of_match,
+       },
+       .probe = hns_nic_dev_probe,
+       .remove = hns_nic_dev_remove,
+};
+
+module_platform_driver(hns_nic_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
new file mode 100644 (file)
index 0000000..dae0ed1
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HNS_ENET_H
+#define __HNS_ENET_H
+
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include "hnae.h"
+
+enum hns_nic_state {
+       NIC_STATE_TESTING = 0,
+       NIC_STATE_RESETTING,
+       NIC_STATE_REINITING,
+       NIC_STATE_DOWN,
+       NIC_STATE_DISABLED,
+       NIC_STATE_REMOVING,
+       NIC_STATE_SERVICE_INITED,
+       NIC_STATE_SERVICE_SCHED,
+       NIC_STATE2_RESET_REQUESTED,
+       NIC_STATE_MAX
+};
+
+struct hns_nic_ring_data {
+       struct hnae_ring *ring;
+       struct napi_struct napi;
+       int queue_index;
+       int (*poll_one)(struct hns_nic_ring_data *, int, void *);
+       void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
+       void (*fini_process)(struct hns_nic_ring_data *);
+};
+
+struct hns_nic_priv {
+       const char *ae_name;
+       u32 enet_ver;
+       u32 port_id;
+       int phy_mode;
+       int phy_led_val;
+       struct phy_device *phy;
+       struct net_device *netdev;
+       struct device *dev;
+       struct hnae_handle *ae_handle;
+
+       /* the cb for nic to manage the ring buffer, the first half of the
+        * array is for tx_ring and vice versa for the second half
+        */
+       struct hns_nic_ring_data *ring_data;
+
+       /* The most recently read link state */
+       int link;
+       u64 tx_timeout_count;
+
+       unsigned long state;
+
+       struct timer_list service_timer;
+
+       struct work_struct service_task;
+
+       struct notifier_block notifier_block;
+};
+
+#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
+#define rx_ring_data(priv, idx) \
+       ((priv)->ring_data[(priv)->ae_handle->q_num + (idx)])
+
+void hns_ethtool_set_ops(struct net_device *ndev);
+void hns_nic_net_reset(struct net_device *ndev);
+void hns_nic_net_reinit(struct net_device *netdev);
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
+int hns_nic_net_xmit_hw(struct net_device *ndev,
+                       struct sk_buff *skb,
+                       struct hns_nic_ring_data *ring_data);
+
+#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
new file mode 100644 (file)
index 0000000..2550208
--- /dev/null
@@ -0,0 +1,1230 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hns_enet.h"
+
+#define HNS_PHY_PAGE_MDIX      0
+#define HNS_PHY_PAGE_LED       3
+#define HNS_PHY_PAGE_COPPER    0
+
+#define HNS_PHY_PAGE_REG       22      /* Page Selection Reg. */
+#define HNS_PHY_CSC_REG                16      /* Copper Specific Control Register */
+#define HNS_PHY_CSS_REG                17      /* Copper Specific Status Register */
+#define HNS_LED_FC_REG         16      /* LED Function Control Reg. */
+#define HNS_LED_PC_REG         17      /* LED Polarity Control Reg. */
+
+#define HNS_LED_FORCE_ON       9
+#define HNS_LED_FORCE_OFF      8
+
+#define HNS_CHIP_VERSION 660
+#define HNS_NET_STATS_CNT 26
+
+#define PHY_MDIX_CTRL_S                (5)
+#define PHY_MDIX_CTRL_M                (3 << PHY_MDIX_CTRL_S)
+
+#define PHY_MDIX_STATUS_B      (6)
+#define PHY_SPEED_DUP_RESOLVE_B        (11)
+
+/**
+ *hns_nic_get_link - get current link status
+ *@net_dev: net_device
+ *retuen 0 - success , negative --fail
+ */
+static u32 hns_nic_get_link(struct net_device *net_dev)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       u32 link_stat = priv->link;
+       struct hnae_handle *h;
+
+       assert(priv && priv->ae_handle);
+       h = priv->ae_handle;
+
+       if (priv->phy) {
+               if (!genphy_update_link(priv->phy))
+                       link_stat = priv->phy->link;
+               else
+                       link_stat = 0;
+       }
+
+       if (h->dev && h->dev->ops && h->dev->ops->get_status)
+               link_stat = link_stat && h->dev->ops->get_status(h);
+       else
+               link_stat = 0;
+
+       return link_stat;
+}
+
+static void hns_get_mdix_mode(struct net_device *net_dev,
+                             struct ethtool_cmd *cmd)
+{
+       int mdix_ctrl, mdix, retval, is_resolved;
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct phy_device *phy_dev = priv->phy;
+
+       if (!phy_dev || !phy_dev->bus) {
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+               cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+               return;
+       }
+
+       (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+                           HNS_PHY_PAGE_MDIX);
+
+       retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG);
+       mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S);
+
+       retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG);
+       mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B);
+       is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B);
+
+       (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG,
+                           HNS_PHY_PAGE_COPPER);
+
+       switch (mdix_ctrl) {
+       case 0x0:
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+               break;
+       case 0x1:
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+               break;
+       case 0x3:
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+               break;
+       default:
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+               break;
+       }
+
+       if (!is_resolved)
+               cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+       else if (mdix)
+               cmd->eth_tp_mdix = ETH_TP_MDI_X;
+       else
+               cmd->eth_tp_mdix = ETH_TP_MDI;
+}
+
+/**
+ *hns_nic_get_settings - implement ethtool get settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_get_settings(struct net_device *net_dev,
+                               struct ethtool_cmd *cmd)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_handle *h;
+       u32 link_stat;
+       int ret;
+       u8 duplex;
+       u16 speed;
+
+       if (!priv || !priv->ae_handle)
+               return -ESRCH;
+
+       h = priv->ae_handle;
+       if (!h->dev || !h->dev->ops || !h->dev->ops->get_info)
+               return -ESRCH;
+
+       ret = h->dev->ops->get_info(h, NULL, &speed, &duplex);
+       if (ret < 0) {
+               netdev_err(net_dev, "%s get_info error!\n", __func__);
+               return -EINVAL;
+       }
+
+       /* When there is no phy, autoneg is off. */
+       cmd->autoneg = false;
+       ethtool_cmd_speed_set(cmd, speed);
+       cmd->duplex = duplex;
+
+       if (priv->phy)
+               (void)phy_ethtool_gset(priv->phy, cmd);
+
+       link_stat = hns_nic_get_link(net_dev);
+       if (!link_stat) {
+               ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
+               cmd->duplex = DUPLEX_UNKNOWN;
+       }
+
+       if (cmd->autoneg)
+               cmd->advertising |= ADVERTISED_Autoneg;
+
+       cmd->supported |= h->if_support;
+       if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+               cmd->supported |= SUPPORTED_TP;
+               cmd->advertising |= ADVERTISED_1000baseT_Full;
+       } else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+               cmd->supported |= SUPPORTED_FIBRE;
+               cmd->advertising |= ADVERTISED_10000baseKR_Full;
+       }
+
+       if (h->port_type == HNAE_PORT_SERVICE) {
+               cmd->port = PORT_FIBRE;
+               cmd->supported |= SUPPORTED_Pause;
+       } else {
+               cmd->port = PORT_TP;
+       }
+
+       cmd->transceiver = XCVR_EXTERNAL;
+       cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+       hns_get_mdix_mode(net_dev, cmd);
+
+       return 0;
+}
+
+/**
+ *hns_nic_set_settings - implement ethtool set settings
+ *@net_dev: net_device
+ *@cmd: ethtool_cmd
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_set_settings(struct net_device *net_dev,
+                               struct ethtool_cmd *cmd)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_handle *h;
+       int link_stat;
+       u32 speed;
+       u8 duplex, autoneg;
+
+       if (!netif_running(net_dev))
+               return -ESRCH;
+
+       if (!priv || !priv->ae_handle || !priv->ae_handle->dev ||
+           !priv->ae_handle->dev->ops)
+               return -ENODEV;
+
+       h = priv->ae_handle;
+       link_stat = hns_nic_get_link(net_dev);
+       duplex = cmd->duplex;
+       speed = ethtool_cmd_speed(cmd);
+       autoneg = cmd->autoneg;
+
+       if (!link_stat) {
+               if (duplex != (u8)DUPLEX_UNKNOWN || speed != (u32)SPEED_UNKNOWN)
+                       return -EINVAL;
+
+               if (h->phy_if == PHY_INTERFACE_MODE_SGMII && h->phy_node) {
+                       priv->phy->autoneg = autoneg;
+                       return phy_start_aneg(priv->phy);
+               }
+       }
+
+       if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+               if (autoneg != AUTONEG_DISABLE)
+                       return -EINVAL;
+
+               if (speed != SPEED_10000 || duplex != DUPLEX_FULL)
+                       return -EINVAL;
+       } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+               if (!h->phy_node && autoneg != AUTONEG_DISABLE)
+                       return -EINVAL;
+
+               if (speed == SPEED_1000 && duplex == DUPLEX_HALF)
+                       return -EINVAL;
+
+               if (speed != SPEED_10 && speed != SPEED_100 &&
+                   speed != SPEED_1000)
+                       return -EINVAL;
+       } else {
+               netdev_err(net_dev, "Not supported!");
+               return -ENOTSUPP;
+       }
+
+       if (priv->phy) {
+               return phy_ethtool_sset(priv->phy, cmd);
+       } else if (h->dev->ops->adjust_link && link_stat) {
+               h->dev->ops->adjust_link(h, speed, duplex);
+               return 0;
+       }
+       netdev_err(net_dev, "Not supported!");
+       return -ENOTSUPP;
+}
+
+static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
+       "Mac    Loopback test",
+       "Serdes Loopback test",
+       "Phy    Loopback test"
+};
+
+static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
+{
+#define COPPER_CONTROL_REG 0
+#define PHY_LOOP_BACK BIT(14)
+       u16 val = 0;
+
+       if (phy_dev->is_c45) /* c45 branch adding for XGE PHY */
+               return -ENOTSUPP;
+
+       if (en) {
+               /* speed : 1000M */
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   HNS_PHY_PAGE_REG, 2);
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   21, 0x1046);
+               /* Force Master */
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   9, 0x1F00);
+               /* Soft-reset */
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   0, 0x9140);
+               /* If autoneg disabled,two soft-reset operations */
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   0, 0x9140);
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   22, 0xFA);
+
+               /* Default is 0x0400 */
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   1, 0x418);
+
+               /* Force 1000M Link, Default is 0x0200 */
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   7, 0x20C);
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   22, 0);
+
+               /* Enable MAC loop-back */
+               val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+                                       COPPER_CONTROL_REG);
+               val |= PHY_LOOP_BACK;
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   COPPER_CONTROL_REG, val);
+       } else {
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   22, 0xFA);
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   1, 0x400);
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   7, 0x200);
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   22, 0);
+
+               val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr,
+                                       COPPER_CONTROL_REG);
+               val &= ~PHY_LOOP_BACK;
+               (void)mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                   COPPER_CONTROL_REG, val);
+       }
+       return 0;
+}
+
+static int __lb_setup(struct net_device *ndev,
+                     enum hnae_loop loop)
+{
+       int ret = 0;
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct phy_device *phy_dev = priv->phy;
+       struct hnae_handle *h = priv->ae_handle;
+
+       switch (loop) {
+       case MAC_INTERNALLOOP_PHY:
+               if ((phy_dev) && (!phy_dev->is_c45))
+                       ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+               break;
+       case MAC_INTERNALLOOP_MAC:
+               if ((h->dev->ops->set_loopback) &&
+                   (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII))
+                       ret = h->dev->ops->set_loopback(h, loop, 0x1);
+               break;
+       case MAC_INTERNALLOOP_SERDES:
+               if (h->dev->ops->set_loopback)
+                       ret = h->dev->ops->set_loopback(h, loop, 0x1);
+               break;
+       case MAC_LOOP_NONE:
+               if ((phy_dev) && (!phy_dev->is_c45))
+                       ret |= hns_nic_config_phy_loopback(phy_dev, 0x0);
+
+               if (h->dev->ops->set_loopback) {
+                       if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+                               ret |= h->dev->ops->set_loopback(h,
+                                       MAC_INTERNALLOOP_MAC, 0x0);
+
+                       ret |= h->dev->ops->set_loopback(h,
+                               MAC_INTERNALLOOP_SERDES, 0x0);
+               }
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int __lb_up(struct net_device *ndev,
+                  enum hnae_loop loop_mode)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       int speed, duplex;
+       int ret;
+
+       hns_nic_net_reset(ndev);
+
+       if (priv->phy) {
+               phy_disconnect(priv->phy);
+               msleep(100);
+
+               ret = hns_nic_init_phy(ndev, h);
+               if (ret)
+                       return ret;
+       }
+
+       ret = __lb_setup(ndev, loop_mode);
+       if (ret)
+               return ret;
+
+       msleep(100);
+
+       ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+       if (ret)
+               return ret;
+
+       if (priv->phy)
+               phy_start(priv->phy);
+
+       /* link adjust duplex*/
+       if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+               speed = 1000;
+       else
+               speed = 10000;
+       duplex = 1;
+
+       h->dev->ops->adjust_link(h, speed, duplex);
+
+       return 0;
+}
+
+static void __lb_other_process(struct hns_nic_ring_data *ring_data,
+                              struct sk_buff *skb)
+{
+       struct net_device *ndev;
+       struct hnae_ring *ring;
+       struct netdev_queue *dev_queue;
+       struct sk_buff *new_skb;
+       unsigned int frame_size;
+       int check_ok;
+       u32 i;
+       char buff[33]; /* 32B data and the last character '\0' */
+
+       if (!ring_data) { /* Just for doing create frame*/
+               frame_size = skb->len;
+               memset(skb->data, 0xFF, frame_size);
+               frame_size &= ~1ul;
+               memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+               memset(&skb->data[frame_size / 2 + 10], 0xBE,
+                      frame_size / 2 - 11);
+               memset(&skb->data[frame_size / 2 + 12], 0xAF,
+                      frame_size / 2 - 13);
+               return;
+       }
+
+       ring = ring_data->ring;
+       ndev = ring_data->napi.dev;
+       if (is_tx_ring(ring)) { /* for tx queue reset*/
+               dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+               netdev_tx_reset_queue(dev_queue);
+               return;
+       }
+
+       frame_size = skb->len;
+       frame_size &= ~1ul;
+       /* for mutl buffer*/
+       new_skb = skb_copy(skb, GFP_ATOMIC);
+       dev_kfree_skb_any(skb);
+       skb = new_skb;
+
+       check_ok = 0;
+       if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/
+               if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+                   (*(skb->data + frame_size / 2 + 12) == 0xAF))
+                       check_ok = 1;
+       }
+
+       if (check_ok) {
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += skb->len;
+       } else {
+               ndev->stats.rx_frame_errors++;
+               for (i = 0; i < skb->len; i++) {
+                       snprintf(buff + i % 16 * 2, 3, /* tailing \0*/
+                                "%02x", *(skb->data + i));
+                       if ((i % 16 == 15) || (i == skb->len - 1))
+                               pr_info("%s\n", buff);
+               }
+       }
+       dev_kfree_skb_any(skb);
+}
+
+static int __lb_clean_rings(struct hns_nic_priv *priv,
+                           int ringid0, int ringid1, int budget)
+{
+       int i, ret;
+       struct hns_nic_ring_data *ring_data;
+       struct net_device *ndev = priv->netdev;
+       unsigned long rx_packets = ndev->stats.rx_packets;
+       unsigned long rx_bytes = ndev->stats.rx_bytes;
+       unsigned long rx_frame_errors = ndev->stats.rx_frame_errors;
+
+       for (i = ringid0; i <= ringid1; i++) {
+               ring_data = &priv->ring_data[i];
+               (void)ring_data->poll_one(ring_data,
+                                         budget, __lb_other_process);
+       }
+       ret = (int)(ndev->stats.rx_packets - rx_packets);
+       ndev->stats.rx_packets = rx_packets;
+       ndev->stats.rx_bytes = rx_bytes;
+       ndev->stats.rx_frame_errors = rx_frame_errors;
+       return ret;
+}
+
+/**
+ * nic_run_loopback_test -  run loopback test
+ * @nic_dev: net device
+ * @loopback_type: loopback type
+ */
+static int __lb_run_test(struct net_device *ndev,
+                        enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_PKT_NUM_PER_CYCLE 1
+#define NIC_LB_TEST_RING_ID 0
+#define NIC_LB_TEST_FRAME_SIZE 128
+/* nic loopback test err  */
+#define NIC_LB_TEST_NO_MEM_ERR 1
+#define NIC_LB_TEST_TX_CNT_ERR 2
+#define NIC_LB_TEST_RX_CNT_ERR 3
+#define NIC_LB_TEST_RX_PKG_ERR 4
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       int i, j, lc, good_cnt, ret_val = 0;
+       unsigned int size;
+       netdev_tx_t tx_ret_val;
+       struct sk_buff *skb;
+
+       size = NIC_LB_TEST_FRAME_SIZE;
+       /* allocate test skb */
+       skb = alloc_skb(size, GFP_KERNEL);
+       if (!skb)
+               return NIC_LB_TEST_NO_MEM_ERR;
+
+       /* place data into test skb */
+       (void)skb_put(skb, size);
+       __lb_other_process(NULL, skb);
+       skb->queue_mapping = NIC_LB_TEST_RING_ID;
+
+       lc = 1;
+       for (j = 0; j < lc; j++) {
+               /* reset count of good packets */
+               good_cnt = 0;
+               /* place 64 packets on the transmit queue*/
+               for (i = 0; i < NIC_LB_TEST_PKT_NUM_PER_CYCLE; i++) {
+                       (void)skb_get(skb);
+
+                       tx_ret_val = (netdev_tx_t)hns_nic_net_xmit_hw(
+                               ndev, skb,
+                               &tx_ring_data(priv, skb->queue_mapping));
+                       if (tx_ret_val == NETDEV_TX_OK)
+                               good_cnt++;
+                       else
+                               break;
+               }
+               if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+                       ret_val = NIC_LB_TEST_TX_CNT_ERR;
+                       dev_err(priv->dev, "%s sent fail, cnt=0x%x, budget=0x%x\n",
+                               hns_nic_test_strs[loop_mode], good_cnt,
+                               NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+                       break;
+               }
+
+               /* allow 100 milliseconds for packets to go from Tx to Rx */
+               msleep(100);
+
+               good_cnt = __lb_clean_rings(priv,
+                                           h->q_num, h->q_num * 2 - 1,
+                                           NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+               if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+                       ret_val = NIC_LB_TEST_RX_CNT_ERR;
+                       dev_err(priv->dev, "%s recv fail, cnt=0x%x, budget=0x%x\n",
+                               hns_nic_test_strs[loop_mode], good_cnt,
+                               NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+                       break;
+               }
+               (void)__lb_clean_rings(priv,
+                                      NIC_LB_TEST_RING_ID, NIC_LB_TEST_RING_ID,
+                                      NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+       }
+
+       /* free the original skb */
+       kfree_skb(skb);
+
+       return ret_val;
+}
+
+static int __lb_down(struct net_device *ndev)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       struct hnae_handle *h = priv->ae_handle;
+       int ret;
+
+       ret = __lb_setup(ndev, MAC_LOOP_NONE);
+       if (ret)
+               netdev_err(ndev, "%s: __lb_setup return error(%d)!\n",
+                          __func__,
+                          ret);
+
+       if (priv->phy)
+               phy_stop(priv->phy);
+
+       if (h->dev->ops->stop)
+               h->dev->ops->stop(h);
+
+       usleep_range(10000, 20000);
+       (void)__lb_clean_rings(priv, 0, h->q_num - 1, 256);
+
+       hns_nic_net_reset(ndev);
+
+       return 0;
+}
+
+/**
+ * hns_nic_self_test - self test
+ * @dev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns_nic_self_test(struct net_device *ndev,
+                             struct ethtool_test *eth_test, u64 *data)
+{
+       struct hns_nic_priv *priv = netdev_priv(ndev);
+       bool if_running = netif_running(ndev);
+#define SELF_TEST_TPYE_NUM 3
+       int st_param[SELF_TEST_TPYE_NUM][2];
+       int i;
+       int test_index = 0;
+
+       st_param[0][0] = MAC_INTERNALLOOP_MAC; /* XGE not supported lb */
+       st_param[0][1] = (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII);
+       st_param[1][0] = MAC_INTERNALLOOP_SERDES;
+       st_param[1][1] = 1; /*serdes must exist*/
+       st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
+       st_param[2][1] = ((!!(priv->ae_handle->phy_node)) &&
+               (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
+
+       if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+               set_bit(NIC_STATE_TESTING, &priv->state);
+
+               if (if_running)
+                       (void)dev_close(ndev);
+
+               for (i = 0; i < SELF_TEST_TPYE_NUM; i++) {
+                       if (!st_param[i][1])
+                               continue;       /* NEXT testing */
+
+                       data[test_index] = __lb_up(ndev,
+                               (enum hnae_loop)st_param[i][0]);
+                       if (!data[test_index]) {
+                               data[test_index] = __lb_run_test(
+                                       ndev, (enum hnae_loop)st_param[i][0]);
+                               (void)__lb_down(ndev);
+                       }
+
+                       if (data[test_index])
+                               eth_test->flags |= ETH_TEST_FL_FAILED;
+
+                       test_index++;
+               }
+
+               hns_nic_net_reset(priv->netdev);
+
+               clear_bit(NIC_STATE_TESTING, &priv->state);
+
+               if (if_running)
+                       (void)dev_open(ndev);
+       }
+       /* Online tests aren't run; pass by default */
+
+       (void)msleep_interruptible(4 * 1000);
+}
+
+/**
+ * hns_nic_get_drvinfo - get net driver info
+ * @dev: net device
+ * @drvinfo: driver info
+ */
+static void hns_nic_get_drvinfo(struct net_device *net_dev,
+                               struct ethtool_drvinfo *drvinfo)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+       assert(priv);
+
+       strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
+               sizeof(drvinfo->version));
+       drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
+
+       strncpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
+       drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+
+       strncpy(drvinfo->bus_info, priv->dev->bus->name,
+               sizeof(drvinfo->bus_info));
+       drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
+
+       strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
+       drvinfo->eedump_len = 0;
+}
+
+/**
+ * hns_get_ringparam - get ring parameter
+ * @dev: net device
+ * @param: ethtool parameter
+ */
+void hns_get_ringparam(struct net_device *net_dev,
+                      struct ethtool_ringparam *param)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_ae_ops *ops;
+       struct hnae_queue *queue;
+       u32 uplimit = 0;
+
+       queue = priv->ae_handle->qs[0];
+       ops = priv->ae_handle->dev->ops;
+
+       if (ops->get_ring_bdnum_limit)
+               ops->get_ring_bdnum_limit(queue, &uplimit);
+
+       param->rx_max_pending = uplimit;
+       param->tx_max_pending = uplimit;
+       param->rx_pending = queue->rx_ring.desc_num;
+       param->tx_pending = queue->tx_ring.desc_num;
+}
+
+/**
+ * hns_get_pauseparam - get pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ */
+static void hns_get_pauseparam(struct net_device *net_dev,
+                              struct ethtool_pauseparam *param)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_ae_ops *ops;
+
+       ops = priv->ae_handle->dev->ops;
+
+       if (ops->get_pauseparam)
+               ops->get_pauseparam(priv->ae_handle, &param->autoneg,
+                                           &param->rx_pause, &param->tx_pause);
+}
+
+/**
+ * hns_set_pauseparam - set pause parameter
+ * @dev: net device
+ * @param: pause parameter
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_set_pauseparam(struct net_device *net_dev,
+                             struct ethtool_pauseparam *param)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_handle *h;
+       struct hnae_ae_ops *ops;
+
+       assert(priv || priv->ae_handle);
+
+       h = priv->ae_handle;
+       ops = h->dev->ops;
+
+       if (!ops->set_pauseparam)
+               return -ESRCH;
+
+       return ops->set_pauseparam(priv->ae_handle, param->autoneg,
+                                  param->rx_pause, param->tx_pause);
+}
+
+/**
+ * hns_get_coalesce - get coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_get_coalesce(struct net_device *net_dev,
+                           struct ethtool_coalesce *ec)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_ae_ops *ops;
+
+       ops = priv->ae_handle->dev->ops;
+
+       ec->use_adaptive_rx_coalesce = 1;
+       ec->use_adaptive_tx_coalesce = 1;
+
+       if ((!ops->get_coalesce_usecs) ||
+           (!ops->get_rx_max_coalesced_frames))
+               return -ESRCH;
+
+       ops->get_coalesce_usecs(priv->ae_handle,
+                                       &ec->tx_coalesce_usecs,
+                                       &ec->rx_coalesce_usecs);
+
+       ops->get_rx_max_coalesced_frames(
+               priv->ae_handle,
+               &ec->tx_max_coalesced_frames,
+               &ec->rx_max_coalesced_frames);
+
+       return 0;
+}
+
+/**
+ * hns_set_coalesce - set coalesce info.
+ * @dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_set_coalesce(struct net_device *net_dev,
+                           struct ethtool_coalesce *ec)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_ae_ops *ops;
+       int ret;
+
+       assert(priv || priv->ae_handle);
+
+       ops = priv->ae_handle->dev->ops;
+
+       if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
+               return -EINVAL;
+
+       if (ec->rx_max_coalesced_frames != ec->tx_max_coalesced_frames)
+               return -EINVAL;
+
+       if ((!ops->set_coalesce_usecs) ||
+           (!ops->set_coalesce_frames))
+               return -ESRCH;
+
+       ops->set_coalesce_usecs(priv->ae_handle,
+                                       ec->rx_coalesce_usecs);
+
+       ret = ops->set_coalesce_frames(
+               priv->ae_handle,
+               ec->rx_max_coalesced_frames);
+
+       return ret;
+}
+
+/**
+ * hns_get_channels - get channel info.
+ * @dev: net device
+ * @ch: channel info.
+ */
+void hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+       ch->max_rx = priv->ae_handle->q_num;
+       ch->max_tx = priv->ae_handle->q_num;
+
+       ch->rx_count = priv->ae_handle->q_num;
+       ch->tx_count = priv->ae_handle->q_num;
+}
+
+/**
+ * get_ethtool_stats - get detail statistics.
+ * @dev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+void hns_get_ethtool_stats(struct net_device *netdev,
+                          struct ethtool_stats *stats, u64 *data)
+{
+       u64 *p = data;
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct hnae_handle *h = priv->ae_handle;
+       const struct rtnl_link_stats64 *net_stats;
+       struct rtnl_link_stats64 temp;
+
+       if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) {
+               netdev_err(netdev, "get_stats or update_stats is null!\n");
+               return;
+       }
+
+       h->dev->ops->update_stats(h, &netdev->stats);
+
+       net_stats = dev_get_stats(netdev, &temp);
+
+       /* get netdev statistics */
+       p[0] = net_stats->rx_packets;
+       p[1] = net_stats->tx_packets;
+       p[2] = net_stats->rx_bytes;
+       p[3] = net_stats->tx_bytes;
+       p[4] = net_stats->rx_errors;
+       p[5] = net_stats->tx_errors;
+       p[6] = net_stats->rx_dropped;
+       p[7] = net_stats->tx_dropped;
+       p[8] = net_stats->multicast;
+       p[9] = net_stats->collisions;
+       p[10] = net_stats->rx_over_errors;
+       p[11] = net_stats->rx_crc_errors;
+       p[12] = net_stats->rx_frame_errors;
+       p[13] = net_stats->rx_fifo_errors;
+       p[14] = net_stats->rx_missed_errors;
+       p[15] = net_stats->tx_aborted_errors;
+       p[16] = net_stats->tx_carrier_errors;
+       p[17] = net_stats->tx_fifo_errors;
+       p[18] = net_stats->tx_heartbeat_errors;
+       p[19] = net_stats->rx_length_errors;
+       p[20] = net_stats->tx_window_errors;
+       p[21] = net_stats->rx_compressed;
+       p[22] = net_stats->tx_compressed;
+
+       p[23] = netdev->rx_dropped.counter;
+       p[24] = netdev->tx_dropped.counter;
+
+       p[25] = priv->tx_timeout_count;
+
+       /* get driver statistics */
+       h->dev->ops->get_stats(h, &p[26]);
+}
+
+/**
+ * get_strings: Return a set of strings that describe the requested objects
+ * @dev: net device
+ * @stats: string set ID.
+ * @data: objects data.
+ */
+void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct hnae_handle *h = priv->ae_handle;
+       char *buff = (char *)data;
+
+       if (!h->dev->ops->get_strings) {
+               netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
+               return;
+       }
+
+       if (stringset == ETH_SS_TEST) {
+               if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) {
+                       memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC],
+                              ETH_GSTRING_LEN);
+                       buff += ETH_GSTRING_LEN;
+               }
+               memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
+                      ETH_GSTRING_LEN);
+               buff += ETH_GSTRING_LEN;
+               if ((priv->phy) && (!priv->phy->is_c45))
+                       memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
+                              ETH_GSTRING_LEN);
+
+       } else {
+               snprintf(buff, ETH_GSTRING_LEN, "rx_packets");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_packets");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_bytes");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_bytes");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_dropped");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_dropped");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "multicast");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "collisions");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "rx_compressed");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "tx_compressed");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped");
+               buff = buff + ETH_GSTRING_LEN;
+               snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped");
+               buff = buff + ETH_GSTRING_LEN;
+
+               snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout");
+               buff = buff + ETH_GSTRING_LEN;
+
+               h->dev->ops->get_strings(h, stringset, (u8 *)buff);
+       }
+}
+
+/**
+ * nic_get_sset_count - get string set count witch returned by nic_get_strings.
+ * @dev: net device
+ * @stringset: string set index, 0: self test string; 1: statistics string.
+ *
+ * Return string set count.
+ */
+int hns_get_sset_count(struct net_device *netdev, int stringset)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct hnae_handle *h = priv->ae_handle;
+       struct hnae_ae_ops *ops = h->dev->ops;
+
+       if (!ops->get_sset_count) {
+               netdev_err(netdev, "get_sset_count is null!\n");
+               return -EOPNOTSUPP;
+       }
+       if (stringset == ETH_SS_TEST) {
+               u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+
+               if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
+                       cnt--;
+
+               if ((!priv->phy) || (priv->phy->is_c45))
+                       cnt--;
+
+               return cnt;
+       } else {
+               return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+       }
+}
+
+/**
+ * hns_phy_led_set - set phy LED status.
+ * @dev: net device
+ * @value: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_phy_led_set(struct net_device *netdev, int value)
+{
+       int retval;
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct phy_device *phy_dev = priv->phy;
+
+       if (!phy_dev->bus) {
+               netdev_err(netdev, "phy_dev->bus is null!\n");
+               return -EINVAL;
+       }
+       retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+                              HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
+       retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG,
+                              value);
+       retval = mdiobus_write(phy_dev->bus, phy_dev->addr,
+                              HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+       if (retval) {
+               netdev_err(netdev, "mdiobus_write fail !\n");
+               return retval;
+       }
+       return 0;
+}
+
+/**
+ * nic_set_phys_id - set phy identify LED.
+ * @dev: net device
+ * @state: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct hnae_handle *h = priv->ae_handle;
+       struct phy_device *phy_dev = priv->phy;
+       int ret;
+
+       if (phy_dev)
+               switch (state) {
+               case ETHTOOL_ID_ACTIVE:
+                       ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                           HNS_PHY_PAGE_REG,
+                                           HNS_PHY_PAGE_LED);
+                       if (ret)
+                               return ret;
+
+                       priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus,
+                                                             phy_dev->addr,
+                                                             HNS_LED_FC_REG);
+
+                       ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                           HNS_PHY_PAGE_REG,
+                                           HNS_PHY_PAGE_COPPER);
+                       if (ret)
+                               return ret;
+                       return 2;
+               case ETHTOOL_ID_ON:
+                       ret = hns_phy_led_set(netdev, HNS_LED_FORCE_ON);
+                       if (ret)
+                               return ret;
+                       break;
+               case ETHTOOL_ID_OFF:
+                       ret = hns_phy_led_set(netdev, HNS_LED_FORCE_OFF);
+                       if (ret)
+                               return ret;
+                       break;
+               case ETHTOOL_ID_INACTIVE:
+                       ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                           HNS_PHY_PAGE_REG,
+                                           HNS_PHY_PAGE_LED);
+                       if (ret)
+                               return ret;
+
+                       ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                           HNS_LED_FC_REG, priv->phy_led_val);
+                       if (ret)
+                               return ret;
+
+                       ret = mdiobus_write(phy_dev->bus, phy_dev->addr,
+                                           HNS_PHY_PAGE_REG,
+                                           HNS_PHY_PAGE_COPPER);
+                       if (ret)
+                               return ret;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       else
+               switch (state) {
+               case ETHTOOL_ID_ACTIVE:
+                       return h->dev->ops->set_led_id(h, HNAE_LED_ACTIVE);
+               case ETHTOOL_ID_ON:
+                       return h->dev->ops->set_led_id(h, HNAE_LED_ON);
+               case ETHTOOL_ID_OFF:
+                       return h->dev->ops->set_led_id(h, HNAE_LED_OFF);
+               case ETHTOOL_ID_INACTIVE:
+                       return h->dev->ops->set_led_id(h, HNAE_LED_INACTIVE);
+               default:
+                       return -EINVAL;
+               }
+
+       return 0;
+}
+
+/**
+ * hns_get_regs - get net device register
+ * @dev: net device
+ * @cmd: ethtool cmd
+ * @date: register data
+ */
+void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+                 void *data)
+{
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_ae_ops *ops;
+
+       assert(priv || priv->ae_handle);
+
+       ops = priv->ae_handle->dev->ops;
+
+       cmd->version = HNS_CHIP_VERSION;
+       if (!ops->get_regs) {
+               netdev_err(net_dev, "ops->get_regs is null!\n");
+               return;
+       }
+       ops->get_regs(priv->ae_handle, data);
+}
+
+/**
+ * nic_get_regs_len - get total register len.
+ * @dev: net device
+ *
+ * Return total register len.
+ */
+static int hns_get_regs_len(struct net_device *net_dev)
+{
+       u32 reg_num;
+       struct hns_nic_priv *priv = netdev_priv(net_dev);
+       struct hnae_ae_ops *ops;
+
+       assert(priv || priv->ae_handle);
+
+       ops = priv->ae_handle->dev->ops;
+       if (!ops->get_regs_len) {
+               netdev_err(net_dev, "ops->get_regs_len is null!\n");
+               return -EOPNOTSUPP;
+       }
+
+       reg_num = ops->get_regs_len(priv->ae_handle);
+       if (reg_num > 0)
+               return reg_num * sizeof(u32);
+       else
+               return reg_num; /* error code */
+}
+
+/**
+ * hns_nic_nway_reset - nway reset
+ * @dev: net device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_nic_nway_reset(struct net_device *netdev)
+{
+       int ret = 0;
+       struct hns_nic_priv *priv = netdev_priv(netdev);
+       struct phy_device *phy = priv->phy;
+
+       if (netif_running(netdev)) {
+               if (phy)
+                       ret = genphy_restart_aneg(phy);
+       }
+
+       return ret;
+}
+
+static struct ethtool_ops hns_ethtool_ops = {
+       .get_drvinfo = hns_nic_get_drvinfo,
+       .get_link  = hns_nic_get_link,
+       .get_settings  = hns_nic_get_settings,
+       .set_settings  = hns_nic_set_settings,
+       .get_ringparam = hns_get_ringparam,
+       .get_pauseparam = hns_get_pauseparam,
+       .set_pauseparam = hns_set_pauseparam,
+       .get_coalesce = hns_get_coalesce,
+       .set_coalesce = hns_set_coalesce,
+       .get_channels = hns_get_channels,
+       .self_test = hns_nic_self_test,
+       .get_strings = hns_get_strings,
+       .get_sset_count = hns_get_sset_count,
+       .get_ethtool_stats = hns_get_ethtool_stats,
+       .set_phys_id = hns_set_phys_id,
+       .get_regs_len = hns_get_regs_len,
+       .get_regs = hns_get_regs,
+       .nway_reset = hns_nic_nway_reset,
+};
+
+void hns_ethtool_set_ops(struct net_device *ndev)
+{
+       ndev->ethtool_ops = &hns_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
new file mode 100644 (file)
index 0000000..e4ec52a
--- /dev/null
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock_types.h>
+
+#define MDIO_DRV_NAME "Hi-HNS_MDIO"
+#define MDIO_BUS_NAME "Hisilicon MII Bus"
+#define MDIO_DRV_VERSION "1.3.0"
+#define MDIO_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define MDIO_DRV_STRING MDIO_BUS_NAME
+#define MDIO_DEFAULT_DEVICE_DESCR MDIO_BUS_NAME
+
+#define MDIO_CTL_DEV_ADDR(x)   (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x)  ((x & 0x1f) << 5)
+
+#define MDIO_TIMEOUT                   1000000
+
+struct hns_mdio_device {
+       void *vbase;            /* mdio reg base address */
+       void *sys_vbase;
+};
+
+/* mdio reg */
+#define MDIO_COMMAND_REG               0x0
+#define MDIO_ADDR_REG                  0x4
+#define MDIO_WDATA_REG                 0x8
+#define MDIO_RDATA_REG                 0xc
+#define MDIO_STA_REG                   0x10
+
+/* cfg phy bit map */
+#define MDIO_CMD_DEVAD_M       0x1f
+#define MDIO_CMD_DEVAD_S       0
+#define MDIO_CMD_PRTAD_M       0x1f
+#define MDIO_CMD_PRTAD_S       5
+#define MDIO_CMD_OP_M          0x3
+#define MDIO_CMD_OP_S          10
+#define MDIO_CMD_ST_M          0x3
+#define MDIO_CMD_ST_S          12
+#define MDIO_CMD_START_B       14
+
+#define MDIO_ADDR_DATA_M       0xffff
+#define MDIO_ADDR_DATA_S       0
+
+#define MDIO_WDATA_DATA_M      0xffff
+#define MDIO_WDATA_DATA_S      0
+
+#define MDIO_RDATA_DATA_M      0xffff
+#define MDIO_RDATA_DATA_S      0
+
+#define MDIO_STATE_STA_B       0
+
+enum mdio_st_clause {
+       MDIO_ST_CLAUSE_45 = 0,
+       MDIO_ST_CLAUSE_22
+};
+
+enum mdio_c22_op_seq {
+       MDIO_C22_WRITE = 1,
+       MDIO_C22_READ = 2
+};
+
+enum mdio_c45_op_seq {
+       MDIO_C45_WRITE_ADDR = 0,
+       MDIO_C45_WRITE_DATA,
+       MDIO_C45_READ_INCREMENT,
+       MDIO_C45_READ
+};
+
+/* peri subctrl reg */
+#define MDIO_SC_CLK_EN         0x338
+#define MDIO_SC_CLK_DIS                0x33C
+#define MDIO_SC_RESET_REQ      0xA38
+#define MDIO_SC_RESET_DREQ     0xA3C
+#define MDIO_SC_CTRL           0x2010
+#define MDIO_SC_CLK_ST         0x531C
+#define MDIO_SC_RESET_ST       0x5A1C
+
+static void mdio_write_reg(void *base, u32 reg, u32 value)
+{
+       u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+       writel_relaxed(value, reg_addr + reg);
+}
+
+#define MDIO_WRITE_REG(a, reg, value) \
+       mdio_write_reg((a)->vbase, (reg), (value))
+
+static u32 mdio_read_reg(void *base, u32 reg)
+{
+       u8 __iomem *reg_addr = (u8 __iomem *)base;
+
+       return readl_relaxed(reg_addr + reg);
+}
+
+#define mdio_set_field(origin, mask, shift, val) \
+       do { \
+               (origin) &= (~((mask) << (shift))); \
+               (origin) |= (((val) & (mask)) << (shift)); \
+       } while (0)
+
+#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
+
+static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+                              u32 val)
+{
+       u32 origin = mdio_read_reg(base, reg);
+
+       mdio_set_field(origin, mask, shift, val);
+       mdio_write_reg(base, reg, origin);
+}
+
+#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
+       mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
+
+static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+{
+       u32 origin;
+
+       origin = mdio_read_reg(base, reg);
+       return mdio_get_field(origin, mask, shift);
+}
+
+#define MDIO_GET_REG_FIELD(dev, reg, mask, shift) \
+               mdio_get_reg_field((dev)->vbase, (reg), (mask), (shift))
+
+#define MDIO_GET_REG_BIT(dev, reg, bit) \
+               mdio_get_reg_field((dev)->vbase, (reg), 0x1ull, (bit))
+
+#define MDIO_CHECK_SET_ST      1
+#define MDIO_CHECK_CLR_ST      0
+
+static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
+                                u32 cfg_reg, u32 set_val,
+                                u32 st_reg, u32 st_msk, u8 check_st)
+{
+       u32 time_cnt;
+       u32 reg_value;
+
+       mdio_write_reg((void *)mdio_dev->sys_vbase, cfg_reg, set_val);
+
+       for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
+               reg_value = mdio_read_reg((void *)mdio_dev->sys_vbase, st_reg);
+               reg_value &= st_msk;
+               if ((!!check_st) == (!!reg_value))
+                       break;
+       }
+
+       if ((!!check_st) != (!!reg_value))
+               return -EBUSY;
+
+       return 0;
+}
+
+static int hns_mdio_wait_ready(struct mii_bus *bus)
+{
+       struct hns_mdio_device *mdio_dev = bus->priv;
+       int i;
+       u32 cmd_reg_value = 1;
+
+       /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+       /* after that can do read or write*/
+       for (i = 0; cmd_reg_value; i++) {
+               cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
+                                                MDIO_COMMAND_REG,
+                                                MDIO_CMD_START_B);
+               if (i == MDIO_TIMEOUT)
+                       return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
+                              u8 is_c45, u8 op, u8 phy_id, u16 cmd)
+{
+       u32 cmd_reg_value;
+       u8 st = is_c45 ? MDIO_ST_CLAUSE_45 : MDIO_ST_CLAUSE_22;
+
+       cmd_reg_value = st << MDIO_CMD_ST_S;
+       cmd_reg_value |= op << MDIO_CMD_OP_S;
+       cmd_reg_value |=
+               (phy_id & MDIO_CMD_PRTAD_M) << MDIO_CMD_PRTAD_S;
+       cmd_reg_value |= (cmd & MDIO_CMD_DEVAD_M) << MDIO_CMD_DEVAD_S;
+       cmd_reg_value |= 1 << MDIO_CMD_START_B;
+
+       MDIO_WRITE_REG(mdio_dev, MDIO_COMMAND_REG, cmd_reg_value);
+}
+
+/**
+ * hns_mdio_write - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write(struct mii_bus *bus,
+                         int phy_id, int regnum, u16 data)
+{
+       int ret;
+       struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+       u8 devad = ((regnum >> 16) & 0x1f);
+       u8 is_c45 = !!(regnum & MII_ADDR_C45);
+       u16 reg = (u16)(regnum & 0xffff);
+       u8 op;
+       u16 cmd_reg_cfg;
+
+       dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+               bus->id, mdio_dev->vbase);
+       dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
+               phy_id, is_c45, devad, reg, data);
+
+       /* wait for ready */
+       ret = hns_mdio_wait_ready(bus);
+       if (ret) {
+               dev_err(&bus->dev, "MDIO bus is busy\n");
+               return ret;
+       }
+
+       if (!is_c45) {
+               cmd_reg_cfg = reg;
+               op = MDIO_C22_WRITE;
+       } else {
+               /* config the cmd-reg to write addr*/
+               MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+                                  MDIO_ADDR_DATA_S, reg);
+
+               hns_mdio_cmd_write(mdio_dev, is_c45,
+                                  MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+               /* check for read or write opt is finished */
+               ret = hns_mdio_wait_ready(bus);
+               if (ret) {
+                       dev_err(&bus->dev, "MDIO bus is busy\n");
+                       return ret;
+               }
+
+               /* config the data needed writing */
+               cmd_reg_cfg = devad;
+               op = MDIO_C45_WRITE_ADDR;
+       }
+
+       MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+                          MDIO_WDATA_DATA_S, data);
+
+       hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+
+       return 0;
+}
+
+/**
+ * hns_mdio_read - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @value: register value
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+       int ret;
+       u16 reg_val = 0;
+       u8 devad = ((regnum >> 16) & 0x1f);
+       u8 is_c45 = !!(regnum & MII_ADDR_C45);
+       u16 reg = (u16)(regnum & 0xffff);
+       struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+
+       dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+               bus->id, mdio_dev->vbase);
+       dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
+               phy_id, is_c45, devad, reg);
+
+       /* Step 1: wait for ready */
+       ret = hns_mdio_wait_ready(bus);
+       if (ret) {
+               dev_err(&bus->dev, "MDIO bus is busy\n");
+               return ret;
+       }
+
+       if (!is_c45) {
+               hns_mdio_cmd_write(mdio_dev, is_c45,
+                                  MDIO_C22_READ, phy_id, reg);
+       } else {
+               MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+                                  MDIO_ADDR_DATA_S, reg);
+
+               /* Step 2; config the cmd-reg to write addr*/
+               hns_mdio_cmd_write(mdio_dev, is_c45,
+                                  MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+               /* Step 3: check for read or write opt is finished */
+               ret = hns_mdio_wait_ready(bus);
+               if (ret) {
+                       dev_err(&bus->dev, "MDIO bus is busy\n");
+                       return ret;
+               }
+
+               hns_mdio_cmd_write(mdio_dev, is_c45,
+                                  MDIO_C45_WRITE_ADDR, phy_id, devad);
+       }
+
+       /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+       /* check for read or write opt is finished */
+       ret = hns_mdio_wait_ready(bus);
+       if (ret) {
+               dev_err(&bus->dev, "MDIO bus is busy\n");
+               return ret;
+       }
+
+       reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+       if (reg_val) {
+               dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+               return -EBUSY;
+       }
+
+       /* Step 6; get out data*/
+       reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+                                         MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+       return reg_val;
+}
+
+/**
+ * hns_mdio_reset - reset mdio bus
+ * @bus: mdio bus
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_reset(struct mii_bus *bus)
+{
+       struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+       int ret;
+
+       if (!mdio_dev->sys_vbase) {
+               dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+               return -ENODEV;
+       }
+
+       /*1. reset req, and read reset st check*/
+       ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
+                                   MDIO_SC_RESET_ST, 0x1,
+                                   MDIO_CHECK_SET_ST);
+       if (ret) {
+               dev_err(&bus->dev, "MDIO reset fail\n");
+               return ret;
+       }
+
+       /*2. dis clk, and read clk st check*/
+       ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
+                                   0x1, MDIO_SC_CLK_ST, 0x1,
+                                   MDIO_CHECK_CLR_ST);
+       if (ret) {
+               dev_err(&bus->dev, "MDIO dis clk fail\n");
+               return ret;
+       }
+
+       /*3. reset dreq, and read reset st check*/
+       ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
+                                   MDIO_SC_RESET_ST, 0x1,
+                                   MDIO_CHECK_CLR_ST);
+       if (ret) {
+               dev_err(&bus->dev, "MDIO dis clk fail\n");
+               return ret;
+       }
+
+       /*4. en clk, and read clk st check*/
+       ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
+                                   0x1, MDIO_SC_CLK_ST, 0x1,
+                                   MDIO_CHECK_SET_ST);
+       if (ret)
+               dev_err(&bus->dev, "MDIO en clk fail\n");
+
+       return ret;
+}
+
+/**
+ * hns_mdio_bus_name - get mdio bus name
+ * @name: mdio bus name
+ * @np: mdio device node pointer
+ */
+static void hns_mdio_bus_name(char *name, struct device_node *np)
+{
+       const u32 *addr;
+       u64 taddr = OF_BAD_ADDR;
+
+       addr = of_get_address(np, 0, NULL, NULL);
+       if (addr)
+               taddr = of_translate_address(np, addr);
+
+       snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+                (unsigned long long)taddr);
+}
+
+/**
+ * hns_mdio_probe - probe mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_probe(struct platform_device *pdev)
+{
+       struct device_node *np;
+       struct hns_mdio_device *mdio_dev;
+       struct mii_bus *new_bus;
+       struct resource *res;
+       int ret;
+
+       if (!pdev) {
+               dev_err(NULL, "pdev is NULL!\r\n");
+               return -ENODEV;
+       }
+       np = pdev->dev.of_node;
+       mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
+       if (!mdio_dev)
+               return -ENOMEM;
+
+       new_bus = devm_mdiobus_alloc(&pdev->dev);
+       if (!new_bus) {
+               dev_err(&pdev->dev, "mdiobus_alloc fail!\n");
+               return -ENOMEM;
+       }
+
+       new_bus->name = MDIO_BUS_NAME;
+       new_bus->read = hns_mdio_read;
+       new_bus->write = hns_mdio_write;
+       new_bus->reset = hns_mdio_reset;
+       new_bus->priv = mdio_dev;
+       hns_mdio_bus_name(new_bus->id, np);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mdio_dev->vbase)) {
+               ret = PTR_ERR(mdio_dev->vbase);
+               return ret;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       mdio_dev->sys_vbase = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mdio_dev->sys_vbase)) {
+               ret = PTR_ERR(mdio_dev->sys_vbase);
+               return ret;
+       }
+
+       new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR,
+                                   sizeof(int), GFP_KERNEL);
+       if (!new_bus->irq)
+               return -ENOMEM;
+
+       new_bus->parent = &pdev->dev;
+       platform_set_drvdata(pdev, new_bus);
+
+       ret = of_mdiobus_register(new_bus, np);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
+               platform_set_drvdata(pdev, NULL);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * hns_mdio_remove - remove mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_remove(struct platform_device *pdev)
+{
+       struct mii_bus *bus;
+
+       bus = platform_get_drvdata(pdev);
+
+       mdiobus_unregister(bus);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static const struct of_device_id hns_mdio_match[] = {
+       {.compatible = "hisilicon,mdio"},
+       {.compatible = "hisilicon,hns-mdio"},
+       {}
+};
+
+static struct platform_driver hns_mdio_driver = {
+       .probe = hns_mdio_probe,
+       .remove = hns_mdio_remove,
+       .driver = {
+                  .name = MDIO_DRV_NAME,
+                  .of_match_table = hns_mdio_match,
+                  },
+};
+
+module_platform_driver(hns_mdio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Hisilicon HNS MDIO driver");
+MODULE_ALIAS("platform:" MDIO_DRV_NAME);
index f45b4d71adb85f4e3daf92cf83a42832a47d5723..08ecf43dffc77babaa86a9c3c5c98c950ce7423b 100644 (file)
@@ -37,7 +37,8 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos)
 }
 
 static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
-                                    void __always_unused *v, loff_t *pos)
+                                    void __always_unused *v,
+                                    loff_t *pos)
 {
        struct fm10k_ring *ring = s->private;
 
@@ -45,7 +46,7 @@ static void *fm10k_dbg_desc_seq_next(struct seq_file *s,
 }
 
 static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s,
-                                   __always_unused void *v)
+                                   void __always_unused *v)
 {
        /* Do nothing. */
 }
index 94571e6e790c55f9d204535fd4cc364857551e75..0e25a80417b923c1dbee67e2ff91cce3d86bd103 100644 (file)
@@ -228,9 +228,6 @@ int fm10k_iov_resume(struct pci_dev *pdev)
                hw->iov.ops.set_lport(hw, vf_info, i,
                                      FM10K_VF_FLAG_MULTI_CAPABLE);
 
-               /* assign our default vid to the VF following reset */
-               vf_info->sw_vid = hw->mac.default_vid;
-
                /* mailbox is disconnected so we don't send a message */
                hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
 
index b5b2925103ec10c1e835429c2ab7d2efee093838..92d415584749e06cbe6068295f10401711786ce9 100644 (file)
@@ -497,8 +497,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
        if (rx_desc->w.vlan) {
                u16 vid = le16_to_cpu(rx_desc->w.vlan);
 
-               if (vid != rx_ring->vid)
+               if ((vid & VLAN_VID_MASK) != rx_ring->vid)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+               else if (vid & VLAN_PRIO_MASK)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              vid & VLAN_PRIO_MASK);
        }
 
        fm10k_type_trans(rx_ring, rx_desc, skb);
@@ -1079,9 +1082,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
        struct fm10k_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
        unsigned short f;
-#endif
        u16 count = TXD_USE_COUNT(skb_headlen(skb));
 
        /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
@@ -1089,12 +1090,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
         *       + 2 desc gap to keep tail from touching head
         * otherwise try next time
         */
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
-       count += skb_shinfo(skb)->nr_frags;
-#endif
+
        if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
index 99228bf46c1202170ef8eb934624e15d99a257d9..639263d5e833a04d19967f6df3cd20d8ce574a3b 100644 (file)
@@ -758,6 +758,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
        struct fm10k_intfc *interface = netdev_priv(netdev);
        struct fm10k_hw *hw = &interface->hw;
        s32 err;
+       int i;
 
        /* updates do not apply to VLAN 0 */
        if (!vid)
@@ -775,8 +776,25 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
        if (!set)
                clear_bit(vid, interface->active_vlans);
 
-       /* if default VLAN is already present do nothing */
-       if (vid == hw->mac.default_vid)
+       /* disable the default VID on ring if we have an active VLAN */
+       for (i = 0; i < interface->num_rx_queues; i++) {
+               struct fm10k_ring *rx_ring = interface->rx_ring[i];
+               u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
+
+               if (test_bit(rx_vid, interface->active_vlans))
+                       rx_ring->vid |= FM10K_VLAN_CLEAR;
+               else
+                       rx_ring->vid &= ~FM10K_VLAN_CLEAR;
+       }
+
+       /* Do not remove default VID related entries from VLAN and MAC tables */
+       if (!set && vid == hw->mac.default_vid)
+               return 0;
+
+       /* Do not throw an error if the interface is down. We will sync once
+        * we come up
+        */
+       if (test_bit(__FM10K_DOWN, &interface->state))
                return 0;
 
        fm10k_mbx_lock(interface);
@@ -996,21 +1014,6 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
        int xcast_mode;
        u16 vid, glort;
 
-       /* restore our address if perm_addr is set */
-       if (hw->mac.type == fm10k_mac_vf) {
-               if (is_valid_ether_addr(hw->mac.perm_addr)) {
-                       ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
-                       ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
-                       ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
-                       netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
-               }
-
-               if (hw->mac.vlan_override)
-                       netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
-               else
-                       netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
-       }
-
        /* record glort for this interface */
        glort = interface->glort;
 
@@ -1045,7 +1048,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
                                           vid, true, 0);
        }
 
-       /* update xcast mode before syncronizing addresses */
+       /* update xcast mode before synchronizing addresses */
        hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
 
        /* synchronize all of the addresses */
index ce53ff25f88d2ad7ebe03c475ffcbeb082f980fc..3d71c520611018373b0d139638535ad2aabb74e6 100644 (file)
@@ -170,6 +170,21 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
        /* reassociate interrupts */
        fm10k_mbx_request_irq(interface);
 
+       /* update hardware address for VFs if perm_addr has changed */
+       if (hw->mac.type == fm10k_mac_vf) {
+               if (is_valid_ether_addr(hw->mac.perm_addr)) {
+                       ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
+                       ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
+                       ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
+                       netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
+               }
+
+               if (hw->mac.vlan_override)
+                       netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+               else
+                       netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+       }
+
        /* reset clock */
        fm10k_ts_reset(interface);
 
@@ -663,6 +678,10 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
        /* assign default VLAN to queue */
        ring->vid = hw->mac.default_vid;
 
+       /* if we have an active VLAN, disable default VID */
+       if (test_bit(hw->mac.default_vid, interface->active_vlans))
+               ring->vid |= FM10K_VLAN_CLEAR;
+
        /* Map interrupt */
        if (ring->q_vector) {
                rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
@@ -861,10 +880,12 @@ void fm10k_netpoll(struct net_device *netdev)
 
 #endif
 #define FM10K_ERR_MSG(type) case (type): error = #type; break
-static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
+static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
                              struct fm10k_fault *fault)
 {
        struct pci_dev *pdev = interface->pdev;
+       struct fm10k_hw *hw = &interface->hw;
+       struct fm10k_iov_data *iov_data = interface->iov_data;
        char *error;
 
        switch (type) {
@@ -918,6 +939,30 @@ static void fm10k_print_fault(struct fm10k_intfc *interface, int type,
                 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
                 error, fault->address, fault->specinfo,
                 PCI_SLOT(fault->func), PCI_FUNC(fault->func));
+
+       /* For VF faults, clear out the respective LPORT, reset the queue
+        * resources, and then reconnect to the mailbox. This allows the
+        * VF in question to resume behavior. For transient faults that are
+        * the result of non-malicious behavior this will log the fault and
+        * allow the VF to resume functionality. Obviously for malicious VFs
+        * they will be able to attempt malicious behavior again. In this
+        * case, the system administrator will need to step in and manually
+        * remove or disable the VF in question.
+        */
+       if (fault->func && iov_data) {
+               int vf = fault->func - 1;
+               struct fm10k_vf_info *vf_info = &iov_data->vf_info[vf];
+
+               hw->iov.ops.reset_lport(hw, vf_info);
+               hw->iov.ops.reset_resources(hw, vf_info);
+
+               /* reset_lport disables the VF, so re-enable it */
+               hw->iov.ops.set_lport(hw, vf_info, vf,
+                                     FM10K_VF_FLAG_MULTI_CAPABLE);
+
+               /* reset_resources will disconnect from the mbx  */
+               vf_info->mbx.ops.connect(hw, &vf_info->mbx);
+       }
 }
 
 static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
@@ -941,7 +986,7 @@ static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr)
                        continue;
                }
 
-               fm10k_print_fault(interface, type, &fault);
+               fm10k_handle_fault(interface, type, &fault);
        }
 }
 
@@ -1705,22 +1750,86 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
 
 static void fm10k_slot_warn(struct fm10k_intfc *interface)
 {
-       struct device *dev = &interface->pdev->dev;
+       enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+       enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
        struct fm10k_hw *hw = &interface->hw;
+       int max_gts = 0, expected_gts = 0;
 
-       if (hw->mac.ops.is_slot_appropriate(hw))
+       if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
+           speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+               dev_warn(&interface->pdev->dev,
+                        "Unable to determine PCI Express bandwidth.\n");
                return;
+       }
+
+       switch (speed) {
+       case PCIE_SPEED_2_5GT:
+               /* 8b/10b encoding reduces max throughput by 20% */
+               max_gts = 2 * width;
+               break;
+       case PCIE_SPEED_5_0GT:
+               /* 8b/10b encoding reduces max throughput by 20% */
+               max_gts = 4 * width;
+               break;
+       case PCIE_SPEED_8_0GT:
+               /* 128b/130b encoding has less than 2% impact on throughput */
+               max_gts = 8 * width;
+               break;
+       default:
+               dev_warn(&interface->pdev->dev,
+                        "Unable to determine PCI Express bandwidth.\n");
+               return;
+       }
+
+       dev_info(&interface->pdev->dev,
+                "PCI Express bandwidth of %dGT/s available\n",
+                max_gts);
+       dev_info(&interface->pdev->dev,
+                "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
+                (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+                 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+                 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+                 "Unknown"),
+                hw->bus.width,
+                (speed == PCIE_SPEED_2_5GT ? "20%" :
+                 speed == PCIE_SPEED_5_0GT ? "20%" :
+                 speed == PCIE_SPEED_8_0GT ? "<2%" :
+                 "Unknown"),
+                (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
+                 hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
+                 hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
+                 "Unknown"));
 
-       dev_warn(dev,
-                "For optimal performance, a %s %s slot is recommended.\n",
-                (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" :
-                 hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" :
-                 "x8"),
-                (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
-                 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
-                 "8.0GT/s"));
-       dev_warn(dev,
-                "A slot with more lanes and/or higher speed is suggested.\n");
+       switch (hw->bus_caps.speed) {
+       case fm10k_bus_speed_2500:
+               /* 8b/10b encoding reduces max throughput by 20% */
+               expected_gts = 2 * hw->bus_caps.width;
+               break;
+       case fm10k_bus_speed_5000:
+               /* 8b/10b encoding reduces max throughput by 20% */
+               expected_gts = 4 * hw->bus_caps.width;
+               break;
+       case fm10k_bus_speed_8000:
+               /* 128b/130b encoding has less than 2% impact on throughput */
+               expected_gts = 8 * hw->bus_caps.width;
+               break;
+       default:
+               dev_warn(&interface->pdev->dev,
+                        "Unable to determine expected PCI Express bandwidth.\n");
+               return;
+       }
+
+       if (max_gts < expected_gts) {
+               dev_warn(&interface->pdev->dev,
+                        "This device requires %dGT/s of bandwidth for optimal performance.\n",
+                        expected_gts);
+               dev_warn(&interface->pdev->dev,
+                        "A %sslot with x%d lanes is suggested.\n",
+                        (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
+                         hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
+                         hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
+                        hw->bus_caps.width);
+       }
 }
 
 /**
@@ -1739,7 +1848,6 @@ static int fm10k_probe(struct pci_dev *pdev,
 {
        struct net_device *netdev;
        struct fm10k_intfc *interface;
-       struct fm10k_hw *hw;
        int err;
 
        err = pci_enable_device_mem(pdev);
@@ -1783,7 +1891,6 @@ static int fm10k_probe(struct pci_dev *pdev,
 
        interface->netdev = netdev;
        interface->pdev = pdev;
-       hw = &interface->hw;
 
        interface->uc_addr = ioremap(pci_resource_start(pdev, 0),
                                     FM10K_UC_ADDR_SIZE);
@@ -1825,24 +1932,12 @@ static int fm10k_probe(struct pci_dev *pdev,
        /* Register PTP interface */
        fm10k_ptp_register(interface);
 
-       /* print bus type/speed/width info */
-       dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n",
-                (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" :
-                 hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" :
-                 hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" :
-                 "Unknown"),
-                (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" :
-                 hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" :
-                 hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" :
-                 "Unknown"),
-                (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
-                 hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
-                 hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
-                 "Unknown"));
-
        /* print warning for non-optimal configurations */
        fm10k_slot_warn(interface);
 
+       /* report MAC address for logging */
+       dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
+
        /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
        fm10k_iov_configure(pdev, 0);
 
@@ -1983,6 +2078,16 @@ static int fm10k_resume(struct pci_dev *pdev)
        if (err)
                return err;
 
+       /* assume host is not ready, to prevent race with watchdog in case we
+        * actually don't have connection to the switch
+        */
+       interface->host_ready = false;
+       fm10k_watchdog_host_not_ready(interface);
+
+       /* clear the service task disable bit to allow service task to start */
+       clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+       fm10k_service_event_schedule(interface);
+
        /* restore SR-IOV interface */
        fm10k_iov_resume(pdev);
 
@@ -2010,6 +2115,15 @@ static int fm10k_suspend(struct pci_dev *pdev,
 
        fm10k_iov_suspend(pdev);
 
+       /* the watchdog tasks may read registers, which will appear like a
+        * surprise-remove event once the PCI device is disabled. This will
+        * cause us to close the netdevice, so we don't retain the open/closed
+        * state post-resume. Prevent this by disabling the service task while
+        * suspended, until we actually resume.
+        */
+       set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+       cancel_work_sync(&interface->service_task);
+
        rtnl_lock();
 
        if (netif_running(netdev))
index 3ca0233b3ea23682c23126bc24357629e3a4eb9a..8c0bdc4e4edd44eb744810619ead6709fe760c06 100644 (file)
@@ -59,6 +59,11 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
        if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
                return FM10K_ERR_DMA_PENDING;
 
+       /* verify the switch is ready for reset */
+       reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
+       if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
+               goto out;
+
        /* Inititate data path reset */
        reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
        fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
@@ -72,6 +77,7 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
        if (!(reg & FM10K_IP_NOTINRESET))
                err = FM10K_ERR_RESET_FAILED;
 
+out:
        return err;
 }
 
@@ -184,19 +190,6 @@ static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
        return 0;
 }
 
-/**
- *  fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU
- *  @hw: pointer to hardware structure
- *
- *  Looks at the PCIe bus info to confirm whether or not this slot can support
- *  the necessary bandwidth for this device.
- **/
-static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw)
-{
-       return (hw->bus.speed == hw->bus_caps.speed) &&
-              (hw->bus.width == hw->bus_caps.width);
-}
-
 /**
  *  fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
  *  @hw: pointer to hardware structure
@@ -1161,6 +1154,24 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
        return hw->iov.ops.assign_int_moderator(hw, vf_idx);
 }
 
+/**
+ * fm10k_iov_select_vid - Select correct default VID
+ * @hw: Pointer to hardware structure
+ * @vid: VID to correct
+ *
+ * Will report an error if VID is out of range. For VID = 0, it will return
+ * either the pf_vid or sw_vid depending on which one is set.
+ */
+static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+{
+       if (!vid)
+               return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
+       else if (vf_info->pf_vid && vid != vf_info->pf_vid)
+               return FM10K_ERR_PARAM;
+       else
+               return vid;
+}
+
 /**
  *  fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
  *  @hw: Pointer to hardware structure
@@ -1175,9 +1186,10 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                              struct fm10k_mbx_info *mbx)
 {
        struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
-       int err = 0;
        u8 mac[ETH_ALEN];
        u32 *result;
+       int err = 0;
+       bool set;
        u16 vlan;
        u32 vid;
 
@@ -1193,19 +1205,21 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                if (err)
                        return err;
 
-               /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
-               if (!vid || (vid == FM10K_VLAN_CLEAR)) {
-                       if (vf_info->pf_vid)
-                               vid |= vf_info->pf_vid;
-                       else
-                               vid |= vf_info->sw_vid;
-               } else if (vid != vf_info->pf_vid) {
+               /* verify upper 16 bits are zero */
+               if (vid >> 16)
                        return FM10K_ERR_PARAM;
-               }
+
+               set = !(vid & FM10K_VLAN_CLEAR);
+               vid &= ~FM10K_VLAN_CLEAR;
+
+               err = fm10k_iov_select_vid(vf_info, vid);
+               if (err < 0)
+                       return err;
+               else
+                       vid = err;
 
                /* update VSI info for VF in regards to VLAN table */
-               err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi,
-                                             !(vid & FM10K_VLAN_CLEAR));
+               err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
        }
 
        if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
@@ -1221,19 +1235,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                    memcmp(mac, vf_info->mac, ETH_ALEN))
                        return FM10K_ERR_PARAM;
 
-               /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
-               if (!vlan || (vlan == FM10K_VLAN_CLEAR)) {
-                       if (vf_info->pf_vid)
-                               vlan |= vf_info->pf_vid;
-                       else
-                               vlan |= vf_info->sw_vid;
-               } else if (vf_info->pf_vid) {
-                       return FM10K_ERR_PARAM;
-               }
+               set = !(vlan & FM10K_VLAN_CLEAR);
+               vlan &= ~FM10K_VLAN_CLEAR;
+
+               err = fm10k_iov_select_vid(vf_info, vlan);
+               if (err < 0)
+                       return err;
+               else
+                       vlan = err;
 
                /* notify switch of request for new unicast address */
-               err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan,
-                                                !(vlan & FM10K_VLAN_CLEAR), 0);
+               err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
+                                                mac, vlan, set, 0);
        }
 
        if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
@@ -1248,19 +1261,18 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
                        return FM10K_ERR_PARAM;
 
-               /* if VLAN ID is 0, set the default VLAN ID instead of 0 */
-               if (!vlan || (vlan == FM10K_VLAN_CLEAR)) {
-                       if (vf_info->pf_vid)
-                               vlan |= vf_info->pf_vid;
-                       else
-                               vlan |= vf_info->sw_vid;
-               } else if (vf_info->pf_vid) {
-                       return FM10K_ERR_PARAM;
-               }
+               set = !(vlan & FM10K_VLAN_CLEAR);
+               vlan &= ~FM10K_VLAN_CLEAR;
+
+               err = fm10k_iov_select_vid(vf_info, vlan);
+               if (err < 0)
+                       return err;
+               else
+                       vlan = err;
 
                /* notify switch of request for new multicast address */
-               err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan,
-                                                !(vlan & FM10K_VLAN_CLEAR));
+               err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
+                                                mac, vlan, set);
        }
 
        return err;
@@ -1849,7 +1861,6 @@ static struct fm10k_mac_ops mac_ops_pf = {
        .init_hw                = &fm10k_init_hw_pf,
        .start_hw               = &fm10k_start_hw_generic,
        .stop_hw                = &fm10k_stop_hw_generic,
-       .is_slot_appropriate    = &fm10k_is_slot_appropriate_pf,
        .update_vlan            = &fm10k_update_vlan_pf,
        .read_mac_addr          = &fm10k_read_mac_addr_pf,
        .update_uc_addr         = &fm10k_update_uc_addr_pf,
index 2a17d82fa37d47c14e7190a3bbe1f01db080f69e..bac8d486d75f335d60322adcd0f827a5de367392 100644 (file)
@@ -521,7 +521,6 @@ struct fm10k_mac_ops {
        s32 (*stop_hw)(struct fm10k_hw *);
        s32 (*get_bus_info)(struct fm10k_hw *);
        s32 (*get_host_state)(struct fm10k_hw *, bool *);
-       bool (*is_slot_appropriate)(struct fm10k_hw *);
        s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
        s32 (*read_mac_addr)(struct fm10k_hw *);
        s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
index 94f0f6a146d96e7e57fe9bf647bf6ae480a727b4..36c8b0aa08fd2eeadfbc7b87ee5ce9d8d619b589 100644 (file)
@@ -131,19 +131,6 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
        return 0;
 }
 
-/**
- *  fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU
- *  @hw: pointer to hardware structure
- *
- *  Looks at the PCIe bus info to confirm whether or not this slot can support
- *  the necessary bandwidth for this device. Since the VF has no control over
- *  the "slot" it is in, always indicate that the slot is appropriate.
- **/
-static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw)
-{
-       return true;
-}
-
 /* This structure defines the attibutes to be parsed below */
 const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = {
        FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN),
@@ -552,7 +539,6 @@ static struct fm10k_mac_ops mac_ops_vf = {
        .init_hw                = &fm10k_init_hw_vf,
        .start_hw               = &fm10k_start_hw_generic,
        .stop_hw                = &fm10k_stop_hw_vf,
-       .is_slot_appropriate    = &fm10k_is_slot_appropriate_vf,
        .update_vlan            = &fm10k_update_vlan_vf,
        .read_mac_addr          = &fm10k_read_mac_addr_vf,
        .update_uc_addr         = &fm10k_update_uc_addr_vf,
index edf1fb91320967f98cdc3ba56a4ad984a48b1063..a699c991ad2cfe50af67c5249f799d4cfbf87ea6 100644 (file)
@@ -539,8 +539,7 @@ struct hwmon_buff {
 #define IXGBE_MIN_RSC_ITR      24
 #define IXGBE_100K_ITR         40
 #define IXGBE_20K_ITR          200
-#define IXGBE_10K_ITR          400
-#define IXGBE_8K_ITR           500
+#define IXGBE_12K_ITR          336
 
 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
index ab2edc8e7703f014e0dc09f8d91de5dee1054083..94c4912b23308c5bbd0648d8b21a5bcaea9077aa 100644 (file)
@@ -2286,7 +2286,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
                adapter->tx_itr_setting = ec->tx_coalesce_usecs;
 
        if (adapter->tx_itr_setting == 1)
-               tx_itr_param = IXGBE_10K_ITR;
+               tx_itr_param = IXGBE_12K_ITR;
        else
                tx_itr_param = adapter->tx_itr_setting;
 
index 68e1e757ecefce38e456c5cf2ae017ddea2ed054..f3168bcc7d87905b2f3563ea64c0fbfeda01d1d4 100644 (file)
@@ -866,7 +866,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
        if (txr_count && !rxr_count) {
                /* tx only vector */
                if (adapter->tx_itr_setting == 1)
-                       q_vector->itr = IXGBE_10K_ITR;
+                       q_vector->itr = IXGBE_12K_ITR;
                else
                        q_vector->itr = adapter->tx_itr_setting;
        } else {
index 63b2cfe9416b22eaef239ea854852daa6ddcce03..acb1b91408ec4ff1fba129b37ea5e98ceed22b11 100644 (file)
@@ -2261,7 +2261,7 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
        /* simple throttlerate management
         *   0-10MB/s   lowest (100000 ints/s)
         *  10-20MB/s   low    (20000 ints/s)
-        *  20-1249MB/s bulk   (8000 ints/s)
+        *  20-1249MB/s bulk   (12000 ints/s)
         */
        /* what was last interrupt timeslice? */
        timepassed_us = q_vector->itr >> 2;
@@ -2350,7 +2350,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
                new_itr = IXGBE_20K_ITR;
                break;
        case bulk_latency:
-               new_itr = IXGBE_8K_ITR;
+               new_itr = IXGBE_12K_ITR;
                break;
        default:
                break;
@@ -2495,17 +2495,26 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
 
-       if (eicr & IXGBE_EICR_GPI_SDP2(hw)) {
+       if (!ixgbe_is_sfp(hw))
+               return;
+
+       /* Later MAC's use different SDP */
+       if (hw->mac.type >= ixgbe_mac_X540)
+               eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
+
+       if (eicr & eicr_mask) {
                /* Clear the interrupt */
-               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw));
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
                if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
                        adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
                        ixgbe_service_event_schedule(adapter);
                }
        }
 
-       if (eicr & IXGBE_EICR_GPI_SDP1(hw)) {
+       if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+           (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
                /* Clear the interrupt */
                IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
                if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
@@ -9019,12 +9028,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
        /* remove the added san mac */
        ixgbe_del_sanmac_netdev(netdev);
 
-       if (netdev->reg_state == NETREG_REGISTERED)
-               unregister_netdev(netdev);
-
 #ifdef CONFIG_PCI_IOV
        ixgbe_disable_sriov(adapter);
 #endif
+       if (netdev->reg_state == NETREG_REGISTERED)
+               unregister_netdev(netdev);
+
        ixgbe_clear_interrupt_scheme(adapter);
 
        ixgbe_release_hw_control(adapter);
index 630f0b7800e47e085c5ffb2db6ac23efef9ad08c..0e2fc1a844ab2b2453681750368e1cf9d288e779 100644 (file)
@@ -2018,10 +2018,18 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
        lp->cfg.flags |= SMC91X_USE_DMA;
 #  endif
        if (lp->cfg.flags & SMC91X_USE_DMA) {
-               int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
-                                         smc_pxa_dma_irq, NULL);
-               if (dma >= 0)
-                       dev->dma = dma;
+               dma_cap_mask_t mask;
+               struct pxad_param param;
+
+               dma_cap_zero(mask);
+               dma_cap_set(DMA_SLAVE, mask);
+               param.prio = PXAD_PRIO_LOWEST;
+               param.drcmr = -1UL;
+
+               lp->dma_chan =
+                       dma_request_slave_channel_compat(mask, pxad_filter_fn,
+                                                        &param, &dev->dev,
+                                                        "data");
        }
 #endif
 
@@ -2032,8 +2040,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
                            version_string, revision_register & 0x0f,
                            lp->base, dev->irq);
 
-               if (dev->dma != (unsigned char)-1)
-                       pr_cont(" DMA %d", dev->dma);
+               if (lp->dma_chan)
+                       pr_cont(" DMA %p", lp->dma_chan);
 
                pr_cont("%s%s\n",
                        lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
@@ -2058,8 +2066,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
 
 err_out:
 #ifdef CONFIG_ARCH_PXA
-       if (retval && dev->dma != (unsigned char)-1)
-               pxa_free_dma(dev->dma);
+       if (retval && lp->dma_chan)
+               dma_release_channel(lp->dma_chan);
 #endif
        return retval;
 }
@@ -2370,6 +2378,7 @@ static int smc_drv_probe(struct platform_device *pdev)
                struct smc_local *lp = netdev_priv(ndev);
                lp->device = &pdev->dev;
                lp->physaddr = res->start;
+
        }
 #endif
 
@@ -2406,8 +2415,8 @@ static int smc_drv_remove(struct platform_device *pdev)
        free_irq(ndev->irq, ndev);
 
 #ifdef CONFIG_ARCH_PXA
-       if (ndev->dma != (unsigned char)-1)
-               pxa_free_dma(ndev->dma);
+       if (lp->dma_chan)
+               dma_release_channel(lp->dma_chan);
 #endif
        iounmap(lp->base);
 
index 3a18501d1068c36816554f953e367ff1439c2a36..a3c129e1e40ac1e2c6785ff30fd4af41cb68ecb2 100644 (file)
@@ -33,6 +33,7 @@
 #ifndef _SMC91X_H_
 #define _SMC91X_H_
 
+#include <linux/dmaengine.h>
 #include <linux/smc91x.h>
 
 /*
@@ -244,6 +245,7 @@ struct smc_local {
        u_long physaddr;
        struct device *device;
 #endif
+       struct dma_chan *dma_chan;
        void __iomem *base;
        void __iomem *datacs;
 
@@ -265,21 +267,47 @@ struct smc_local {
  * as RX which can overrun memory and lose packets.
  */
 #include <linux/dma-mapping.h>
-#include <mach/dma.h>
+#include <linux/dma/pxa-dma.h>
 
 #ifdef SMC_insl
 #undef SMC_insl
 #define SMC_insl(a, r, p, l) \
        smc_pxa_dma_insl(a, lp, r, dev->dma, p, l)
+static inline void
+smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len)
+{
+       dma_addr_t dmabuf;
+       struct dma_async_tx_descriptor *tx;
+       dma_cookie_t cookie;
+       enum dma_status status;
+       struct dma_tx_state state;
+
+       dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
+       tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len,
+                                        DMA_DEV_TO_MEM, 0);
+       if (tx) {
+               cookie = dmaengine_submit(tx);
+               dma_async_issue_pending(lp->dma_chan);
+               do {
+                       status = dmaengine_tx_status(lp->dma_chan, cookie,
+                                                    &state);
+                       cpu_relax();
+               } while (status != DMA_COMPLETE && status != DMA_ERROR &&
+                        state.residue);
+               dmaengine_terminate_all(lp->dma_chan);
+       }
+       dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+}
+
 static inline void
 smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
                 u_char *buf, int len)
 {
-       u_long physaddr = lp->physaddr;
-       dma_addr_t dmabuf;
+       struct dma_slave_config config;
+       int ret;
 
        /* fallback if no DMA available */
-       if (dma == (unsigned char)-1) {
+       if (!lp->dma_chan) {
                readsl(ioaddr + reg, buf, len);
                return;
        }
@@ -291,18 +319,22 @@ smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
                len--;
        }
 
+       memset(&config, 0, sizeof(config));
+       config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       config.src_addr = lp->physaddr + reg;
+       config.dst_addr = lp->physaddr + reg;
+       config.src_maxburst = 32;
+       config.dst_maxburst = 32;
+       ret = dmaengine_slave_config(lp->dma_chan, &config);
+       if (ret) {
+               dev_err(lp->device, "dma channel configuration failed: %d\n",
+                       ret);
+               return;
+       }
+
        len *= 4;
-       dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
-       DCSR(dma) = DCSR_NODESC;
-       DTADR(dma) = dmabuf;
-       DSADR(dma) = physaddr + reg;
-       DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
-                    DCMD_WIDTH4 | (DCMD_LENGTH & len));
-       DCSR(dma) = DCSR_NODESC | DCSR_RUN;
-       while (!(DCSR(dma) & DCSR_STOPSTATE))
-               cpu_relax();
-       DCSR(dma) = 0;
-       dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+       smc_pxa_dma_inpump(lp, buf, len);
 }
 #endif
 
@@ -314,11 +346,11 @@ static inline void
 smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
                 u_char *buf, int len)
 {
-       u_long physaddr = lp->physaddr;
-       dma_addr_t dmabuf;
+       struct dma_slave_config config;
+       int ret;
 
        /* fallback if no DMA available */
-       if (dma == (unsigned char)-1) {
+       if (!lp->dma_chan) {
                readsw(ioaddr + reg, buf, len);
                return;
        }
@@ -330,26 +362,25 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
                len--;
        }
 
+       memset(&config, 0, sizeof(config));
+       config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+       config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+       config.src_addr = lp->physaddr + reg;
+       config.dst_addr = lp->physaddr + reg;
+       config.src_maxburst = 32;
+       config.dst_maxburst = 32;
+       ret = dmaengine_slave_config(lp->dma_chan, &config);
+       if (ret) {
+               dev_err(lp->device, "dma channel configuration failed: %d\n",
+                       ret);
+               return;
+       }
+
        len *= 2;
-       dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE);
-       DCSR(dma) = DCSR_NODESC;
-       DTADR(dma) = dmabuf;
-       DSADR(dma) = physaddr + reg;
-       DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
-                    DCMD_WIDTH2 | (DCMD_LENGTH & len));
-       DCSR(dma) = DCSR_NODESC | DCSR_RUN;
-       while (!(DCSR(dma) & DCSR_STOPSTATE))
-               cpu_relax();
-       DCSR(dma) = 0;
-       dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE);
+       smc_pxa_dma_inpump(lp, buf, len);
 }
 #endif
 
-static void
-smc_pxa_dma_irq(int dma, void *dummy)
-{
-       DCSR(dma) = 0;
-}
 #endif  /* CONFIG_ARCH_PXA */
 
 
index 975357ddeae62b56eb9e1627ae2c3c183ebc2ec6..167cfc503a783dbc0b9689ed3a0c3c6627323876 100644 (file)
@@ -109,6 +109,8 @@ config USB_RTL8152
 config USB_LAN78XX
        tristate "Microchip LAN78XX Based USB Ethernet Adapters"
        select MII
+       select PHYLIB
+       select MICROCHIP_PHY
        help
          This option adds support for Microchip LAN78XX based USB 2
          & USB 3 10/100/1000 Ethernet adapters.
index a39518fc93aadf82918fd944dcf88c6de5321f48..6a70a72b565d88e481b41a9c5c28d5a94f89473a 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/mii.h>
 #include <linux/usb.h>
 #include <linux/crc32.h>
 #include <linux/signal.h>
 #include <linux/ipv6.h>
 #include <linux/mdio.h>
 #include <net/ip6_checksum.h>
+#include <linux/microchipphy.h>
 #include "lan78xx.h"
 
 #define DRIVER_AUTHOR  "WOOJUNG HUH <woojung.huh@microchip.com>"
 #define DRIVER_DESC    "LAN78XX USB 3.0 Gigabit Ethernet Devices"
 #define DRIVER_NAME    "lan78xx"
-#define DRIVER_VERSION "1.0.0"
+#define DRIVER_VERSION "1.0.1"
 
 #define TX_TIMEOUT_JIFFIES             (5 * HZ)
 #define THROTTLE_JIFFIES               (HZ / 8)
@@ -57,7 +57,6 @@
 #define DEFAULT_RX_CSUM_ENABLE         (true)
 #define DEFAULT_TSO_CSUM_ENABLE                (true)
 #define DEFAULT_VLAN_FILTER_ENABLE     (true)
-#define INTERNAL_PHY_ID                        (2)     /* 2: GMII */
 #define TX_OVERHEAD                    (8)
 #define RXW_PADDING                    2
 
@@ -275,10 +274,12 @@ struct lan78xx_net {
        struct timer_list       delay;
 
        unsigned long           data[5];
-       struct mii_if_info      mii;
 
        int                     link_on;
        u8                      mdix_ctrl;
+
+       u32                     devid;
+       struct mii_bus          *mdiobus;
 };
 
 /* use ethtool to change the level for any given device */
@@ -411,222 +412,6 @@ static inline u32 mii_access(int id, int index, int read)
        return ret;
 }
 
-static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
-{
-       struct lan78xx_net *dev = netdev_priv(netdev);
-       u32 val, addr;
-       int ret;
-
-       ret = usb_autopm_get_interface(dev->intf);
-       if (ret < 0)
-               return ret;
-
-       mutex_lock(&dev->phy_mutex);
-
-       /* confirm MII not busy */
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* set the address, index & direction (read from PHY) */
-       phy_id &= dev->mii.phy_id_mask;
-       idx &= dev->mii.reg_num_mask;
-       addr = mii_access(phy_id, idx, MII_READ);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       ret = lan78xx_read_reg(dev, MII_DATA, &val);
-
-       ret = (int)(val & 0xFFFF);
-
-done:
-       mutex_unlock(&dev->phy_mutex);
-       usb_autopm_put_interface(dev->intf);
-       return ret;
-}
-
-static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
-                              int idx, int regval)
-{
-       struct lan78xx_net *dev = netdev_priv(netdev);
-       u32 val, addr;
-       int ret;
-
-       if (usb_autopm_get_interface(dev->intf) < 0)
-               return;
-
-       mutex_lock(&dev->phy_mutex);
-
-       /* confirm MII not busy */
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       val = regval;
-       ret = lan78xx_write_reg(dev, MII_DATA, val);
-
-       /* set the address, index & direction (write to PHY) */
-       phy_id &= dev->mii.phy_id_mask;
-       idx &= dev->mii.reg_num_mask;
-       addr = mii_access(phy_id, idx, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-done:
-       mutex_unlock(&dev->phy_mutex);
-       usb_autopm_put_interface(dev->intf);
-}
-
-static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
-                             int mmddev, int mmdidx, int regval)
-{
-       struct lan78xx_net *dev = netdev_priv(netdev);
-       u32 val, addr;
-       int ret;
-
-       if (usb_autopm_get_interface(dev->intf) < 0)
-               return;
-
-       mutex_lock(&dev->phy_mutex);
-
-       /* confirm MII not busy */
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       mmddev &= 0x1F;
-
-       /* set up device address for MMD */
-       ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
-
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* select register of MMD */
-       val = mmdidx;
-       ret = lan78xx_write_reg(dev, MII_DATA, val);
-
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* select register data for MMD */
-       val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
-       ret = lan78xx_write_reg(dev, MII_DATA, val);
-
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* write to MMD */
-       val = regval;
-       ret = lan78xx_write_reg(dev, MII_DATA, val);
-
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-done:
-       mutex_unlock(&dev->phy_mutex);
-       usb_autopm_put_interface(dev->intf);
-}
-
-static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
-                           int mmddev, int mmdidx)
-{
-       struct lan78xx_net *dev = netdev_priv(netdev);
-       u32 val, addr;
-       int ret;
-
-       ret = usb_autopm_get_interface(dev->intf);
-       if (ret < 0)
-               return ret;
-
-       mutex_lock(&dev->phy_mutex);
-
-       /* confirm MII not busy */
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* set up device address for MMD */
-       ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
-
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* select register of MMD */
-       val = mmdidx;
-       ret = lan78xx_write_reg(dev, MII_DATA, val);
-
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* select register data for MMD */
-       val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
-       ret = lan78xx_write_reg(dev, MII_DATA, val);
-
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* set the address, index & direction (read from PHY) */
-       phy_id &= dev->mii.phy_id_mask;
-       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
-       ret = lan78xx_write_reg(dev, MII_ACC, addr);
-
-       ret = lan78xx_phy_wait_not_busy(dev);
-       if (ret < 0)
-               goto done;
-
-       /* read from MMD */
-       ret = lan78xx_read_reg(dev, MII_DATA, &val);
-
-       ret = (int)(val & 0xFFFF);
-
-done:
-       mutex_unlock(&dev->phy_mutex);
-       usb_autopm_put_interface(dev->intf);
-       return ret;
-}
-
 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
 {
        unsigned long start_time = jiffies;
@@ -1047,14 +832,13 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
 
 static int lan78xx_link_reset(struct lan78xx_net *dev)
 {
-       struct mii_if_info *mii = &dev->mii;
+       struct phy_device *phydev = dev->net->phydev;
        struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
        int ladv, radv, ret;
        u32 buf;
 
        /* clear PHY interrupt status */
-       /* VTSE PHY */
-       ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
+       ret = phy_read(phydev, LAN88XX_INT_STS);
        if (unlikely(ret < 0))
                return -EIO;
 
@@ -1063,7 +847,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
        if (unlikely(ret < 0))
                return -EIO;
 
-       if (!mii_link_ok(mii) && dev->link_on) {
+       phy_read_status(phydev);
+
+       if (!phydev->link && dev->link_on) {
                dev->link_on = false;
                netif_carrier_off(dev->net);
 
@@ -1075,13 +861,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                ret = lan78xx_write_reg(dev, MAC_CR, buf);
                if (unlikely(ret < 0))
                        return -EIO;
-       } else if (mii_link_ok(mii) && !dev->link_on) {
+       } else if (phydev->link && !dev->link_on) {
                dev->link_on = true;
 
-               mii_check_media(mii, 1, 1);
-               mii_ethtool_gset(&dev->mii, &ecmd);
+               phy_ethtool_gset(phydev, &ecmd);
 
-               mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+               ret = phy_read(phydev, LAN88XX_INT_STS);
 
                if (dev->udev->speed == USB_SPEED_SUPER) {
                        if (ethtool_cmd_speed(&ecmd) == 1000) {
@@ -1102,11 +887,11 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                        }
                }
 
-               ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+               ladv = phy_read(phydev, MII_ADVERTISE);
                if (ladv < 0)
                        return ladv;
 
-               radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+               radv = phy_read(phydev, MII_LPA);
                if (radv < 0)
                        return radv;
 
@@ -1279,6 +1064,8 @@ static int lan78xx_set_wol(struct net_device *netdev,
 
        device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
 
+       phy_ethtool_set_wol(netdev->phydev, wol);
+
        usb_autopm_put_interface(dev->intf);
 
        return ret;
@@ -1287,49 +1074,39 @@ static int lan78xx_set_wol(struct net_device *netdev,
 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
 {
        struct lan78xx_net *dev = netdev_priv(net);
+       struct phy_device *phydev = net->phydev;
        int ret;
        u32 buf;
-       u32 adv, lpadv;
 
        ret = usb_autopm_get_interface(dev->intf);
        if (ret < 0)
                return ret;
 
+       ret = phy_ethtool_get_eee(phydev, edata);
+       if (ret < 0)
+               goto exit;
+
        ret = lan78xx_read_reg(dev, MAC_CR, &buf);
        if (buf & MAC_CR_EEE_EN_) {
-               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
-                                      PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
-               adv = mmd_eee_adv_to_ethtool_adv_t(buf);
-               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
-                                      PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
-               lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
-
                edata->eee_enabled = true;
-               edata->supported = true;
-               edata->eee_active = !!(adv & lpadv);
-               edata->advertised = adv;
-               edata->lp_advertised = lpadv;
+               edata->eee_active = !!(edata->advertised &
+                                      edata->lp_advertised);
                edata->tx_lpi_enabled = true;
                /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
                ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
                edata->tx_lpi_timer = buf;
        } else {
-               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
-                                      PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
-               lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
-
                edata->eee_enabled = false;
                edata->eee_active = false;
-               edata->supported = false;
-               edata->advertised = 0;
-               edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
                edata->tx_lpi_enabled = false;
                edata->tx_lpi_timer = 0;
        }
 
+       ret = 0;
+exit:
        usb_autopm_put_interface(dev->intf);
 
-       return 0;
+       return ret;
 }
 
 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
@@ -1347,9 +1124,10 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
                buf |= MAC_CR_EEE_EN_;
                ret = lan78xx_write_reg(dev, MAC_CR, buf);
 
-               buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
-               lan78xx_mmd_write(dev->net, dev->mii.phy_id,
-                                 PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
+               phy_ethtool_set_eee(net->phydev, edata);
+
+               buf = (u32)edata->tx_lpi_timer;
+               ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
        } else {
                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
                buf &= ~MAC_CR_EEE_EN_;
@@ -1363,19 +1141,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
 
 static u32 lan78xx_get_link(struct net_device *net)
 {
-       struct lan78xx_net *dev = netdev_priv(net);
+       phy_read_status(net->phydev);
 
-       return mii_link_ok(&dev->mii);
+       return net->phydev->link;
 }
 
 int lan78xx_nway_reset(struct net_device *net)
 {
-       struct lan78xx_net *dev = netdev_priv(net);
-
-       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
-               return -EOPNOTSUPP;
-
-       return mii_nway_restart(&dev->mii);
+       return phy_start_aneg(net->phydev);
 }
 
 static void lan78xx_get_drvinfo(struct net_device *net,
@@ -1402,36 +1175,78 @@ static void lan78xx_set_msglevel(struct net_device *net, u32 level)
        dev->msg_enable = level;
 }
 
+static int lan78xx_get_mdix_status(struct net_device *net)
+{
+       struct phy_device *phydev = net->phydev;
+       int buf;
+
+       phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
+       buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
+       phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
+
+       return buf;
+}
+
+static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       struct phy_device *phydev = net->phydev;
+       int buf;
+
+       if (mdix_ctrl == ETH_TP_MDI) {
+               phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
+                         LAN88XX_EXT_PAGE_SPACE_1);
+               buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
+               buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
+               phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
+                         buf | LAN88XX_EXT_MODE_CTRL_MDI_);
+               phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
+                         LAN88XX_EXT_PAGE_SPACE_0);
+       } else if (mdix_ctrl == ETH_TP_MDI_X) {
+               phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
+                         LAN88XX_EXT_PAGE_SPACE_1);
+               buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
+               buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
+               phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
+                         buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
+               phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
+                         LAN88XX_EXT_PAGE_SPACE_0);
+       } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
+               phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
+                         LAN88XX_EXT_PAGE_SPACE_1);
+               buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
+               buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
+               phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
+                         buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
+               phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
+                         LAN88XX_EXT_PAGE_SPACE_0);
+       }
+       dev->mdix_ctrl = mdix_ctrl;
+}
+
 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
 {
        struct lan78xx_net *dev = netdev_priv(net);
-       struct mii_if_info *mii = &dev->mii;
+       struct phy_device *phydev = net->phydev;
        int ret;
        int buf;
 
-       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
-               return -EOPNOTSUPP;
-
        ret = usb_autopm_get_interface(dev->intf);
        if (ret < 0)
                return ret;
 
-       ret = mii_ethtool_gset(&dev->mii, cmd);
+       ret = phy_ethtool_gset(phydev, cmd);
 
-       mii->mdio_write(mii->dev, mii->phy_id,
-                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
-       buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
-       mii->mdio_write(mii->dev, mii->phy_id,
-                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+       buf = lan78xx_get_mdix_status(net);
 
-       buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
-       if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
+       buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
+       if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
                cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
                cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
-       } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
+       } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
                cmd->eth_tp_mdix = ETH_TP_MDI;
                cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
-       } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
+       } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
                cmd->eth_tp_mdix = ETH_TP_MDI_X;
                cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
        }
@@ -1444,70 +1259,27 @@ static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
 {
        struct lan78xx_net *dev = netdev_priv(net);
-       struct mii_if_info *mii = &dev->mii;
+       struct phy_device *phydev = net->phydev;
        int ret = 0;
        int temp;
 
-       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
-               return -EOPNOTSUPP;
-
        ret = usb_autopm_get_interface(dev->intf);
        if (ret < 0)
                return ret;
 
        if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
-               if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_GPIO_PAGE,
-                                       PHY_EXT_GPIO_PAGE_SPACE_1);
-                       temp = mii->mdio_read(mii->dev, mii->phy_id,
-                                       PHY_EXT_MODE_CTRL);
-                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_MODE_CTRL,
-                                       temp | PHY_EXT_MODE_CTRL_MDI_);
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_GPIO_PAGE,
-                                       PHY_EXT_GPIO_PAGE_SPACE_0);
-               } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_GPIO_PAGE,
-                                       PHY_EXT_GPIO_PAGE_SPACE_1);
-                       temp = mii->mdio_read(mii->dev, mii->phy_id,
-                                       PHY_EXT_MODE_CTRL);
-                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_MODE_CTRL,
-                                       temp | PHY_EXT_MODE_CTRL_MDI_X_);
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_GPIO_PAGE,
-                                       PHY_EXT_GPIO_PAGE_SPACE_0);
-               } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_GPIO_PAGE,
-                                       PHY_EXT_GPIO_PAGE_SPACE_1);
-                       temp = mii->mdio_read(mii->dev, mii->phy_id,
-                                                       PHY_EXT_MODE_CTRL);
-                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_MODE_CTRL,
-                                       temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
-                       mii->mdio_write(mii->dev, mii->phy_id,
-                                       PHY_EXT_GPIO_PAGE,
-                                       PHY_EXT_GPIO_PAGE_SPACE_0);
-               }
+               lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
        }
 
        /* change speed & duplex */
-       ret = mii_ethtool_sset(&dev->mii, cmd);
+       ret = phy_ethtool_sset(phydev, cmd);
 
        if (!cmd->autoneg) {
                /* force link down */
-               temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
-               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
-                               temp | BMCR_LOOPBACK);
+               temp = phy_read(phydev, MII_BMCR);
+               phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
                mdelay(1);
-               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
+               phy_write(phydev, MII_BMCR, temp);
        }
 
        usb_autopm_put_interface(dev->intf);
@@ -1537,12 +1309,10 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
 
 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 {
-       struct lan78xx_net *dev = netdev_priv(netdev);
-
        if (!netif_running(netdev))
                return -EINVAL;
 
-       return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+       return phy_mii_ioctl(netdev->phydev, rq, cmd);
 }
 
 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
@@ -1598,53 +1368,183 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
        ether_addr_copy(dev->net->dev_addr, addr);
 }
 
-static void lan78xx_mii_init(struct lan78xx_net *dev)
+/* MDIO read and write wrappers for phylib */
+static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
 {
-       /* Initialize MII structure */
-       dev->mii.dev = dev->net;
-       dev->mii.mdio_read = lan78xx_mdio_read;
-       dev->mii.mdio_write = lan78xx_mdio_write;
-       dev->mii.phy_id_mask = 0x1f;
-       dev->mii.reg_num_mask = 0x1f;
-       dev->mii.phy_id = INTERNAL_PHY_ID;
-       dev->mii.supports_gmii = true;
+       struct lan78xx_net *dev = bus->priv;
+       u32 val, addr;
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* set the address, index & direction (read from PHY) */
+       addr = mii_access(phy_id, idx, MII_READ);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+       ret = (int)(val & 0xFFFF);
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+       return ret;
+}
+
+static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
+                                u16 regval)
+{
+       struct lan78xx_net *dev = bus->priv;
+       u32 val, addr;
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       val = (u32)regval;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       /* set the address, index & direction (write to PHY) */
+       addr = mii_access(phy_id, idx, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+       return 0;
+}
+
+static int lan78xx_mdio_init(struct lan78xx_net *dev)
+{
+       int ret;
+       int i;
+
+       dev->mdiobus = mdiobus_alloc();
+       if (!dev->mdiobus) {
+               netdev_err(dev->net, "can't allocate MDIO bus\n");
+               return -ENOMEM;
+       }
+
+       dev->mdiobus->priv = (void *)dev;
+       dev->mdiobus->read = lan78xx_mdiobus_read;
+       dev->mdiobus->write = lan78xx_mdiobus_write;
+       dev->mdiobus->name = "lan78xx-mdiobus";
+
+       snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+                dev->udev->bus->busnum, dev->udev->devnum);
+
+       dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       if (!dev->mdiobus->irq) {
+               ret = -ENOMEM;
+               goto exit1;
+       }
+
+       /* handle our own interrupt */
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
+
+       switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
+       case 0x78000000:
+       case 0x78500000:
+               /* set to internal PHY id */
+               dev->mdiobus->phy_mask = ~(1 << 1);
+               break;
+       }
+
+       ret = mdiobus_register(dev->mdiobus);
+       if (ret) {
+               netdev_err(dev->net, "can't register MDIO bus\n");
+               goto exit2;
+       }
+
+       netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
+       return 0;
+exit2:
+       kfree(dev->mdiobus->irq);
+exit1:
+       mdiobus_free(dev->mdiobus);
+       return ret;
+}
+
+static void lan78xx_remove_mdio(struct lan78xx_net *dev)
+{
+       mdiobus_unregister(dev->mdiobus);
+       kfree(dev->mdiobus->irq);
+       mdiobus_free(dev->mdiobus);
+}
+
+static void lan78xx_link_status_change(struct net_device *net)
+{
+       /* nothing to do */
 }
 
 static int lan78xx_phy_init(struct lan78xx_net *dev)
 {
-       int temp;
-       struct mii_if_info *mii = &dev->mii;
+       int ret;
+       struct phy_device *phydev = dev->net->phydev;
 
-       if ((!mii->mdio_write) || (!mii->mdio_read))
-               return -EOPNOTSUPP;
+       phydev = phy_find_first(dev->mdiobus);
+       if (!phydev) {
+               netdev_err(dev->net, "no PHY found\n");
+               return -EIO;
+       }
 
-       temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
-       temp |= ADVERTISE_ALL;
-       mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
-                       temp | ADVERTISE_CSMA |
-                       ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+       ret = phy_connect_direct(dev->net, phydev,
+                                lan78xx_link_status_change,
+                                PHY_INTERFACE_MODE_GMII);
+       if (ret) {
+               netdev_err(dev->net, "can't attach PHY to %s\n",
+                          dev->mdiobus->id);
+               return -EIO;
+       }
 
        /* set to AUTOMDIX */
-       mii->mdio_write(mii->dev, mii->phy_id,
-                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
-       temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
-       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
-       mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
-                       temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
-       mii->mdio_write(mii->dev, mii->phy_id,
-                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
-       dev->mdix_ctrl = ETH_TP_MDI_AUTO;
-
-       /* MAC doesn't support 1000HD */
-       temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
-       mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
-                       temp & ~ADVERTISE_1000HALF);
-
-       /* clear interrupt */
-       mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
-       mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
-                       PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
-                       PHY_VTSE_INT_MASK_LINK_CHANGE_);
+       lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
+
+       /* MAC doesn't support 1000T Half */
+       phydev->supported &= ~SUPPORTED_1000baseT_Half;
+       phydev->supported |= (SUPPORTED_10baseT_Half |
+                             SUPPORTED_10baseT_Full |
+                             SUPPORTED_100baseT_Half |
+                             SUPPORTED_100baseT_Full |
+                             SUPPORTED_1000baseT_Full |
+                             SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+       genphy_config_aneg(phydev);
+
+       /* Workaround to enable PHY interrupt.
+        * phy_start_interrupts() is API for requesting and enabling
+        * PHY interrupt. However, USB-to-Ethernet device can't use
+        * request_irq() called in phy_start_interrupts().
+        * Set PHY to PHY_HALTED and call phy_start()
+        * to make a call to phy_enable_interrupts()
+        */
+       phy_stop(phydev);
+       phy_start(phydev);
 
        netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
 
@@ -1930,6 +1830,10 @@ static int lan78xx_reset(struct lan78xx_net *dev)
 
        lan78xx_init_mac_address(dev);
 
+       /* save DEVID for later usage */
+       ret = lan78xx_read_reg(dev, ID_REV, &buf);
+       dev->devid = buf;
+
        /* Respond to the IN token with a NAK */
        ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
        buf |= USB_CFG_BIR_;
@@ -2002,23 +1906,12 @@ static int lan78xx_reset(struct lan78xx_net *dev)
                        netdev_warn(dev->net, "timeout waiting for PHY Reset");
                        return -EIO;
                }
-       } while (buf & PMT_CTL_PHY_RST_);
-
-       lan78xx_mii_init(dev);
-
-       ret = lan78xx_phy_init(dev);
+       } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
 
        ret = lan78xx_read_reg(dev, MAC_CR, &buf);
-
-       buf |= MAC_CR_GMII_EN_;
        buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
-
        ret = lan78xx_write_reg(dev, MAC_CR, buf);
 
-       /* enable on PHY */
-       if (buf & MAC_CR_EEE_EN_)
-               lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
-
        /* enable PHY interrupts */
        ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
        buf |= INT_ENP_PHY_INT;
@@ -2042,9 +1935,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        buf |= FCT_RX_CTL_EN_;
        ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
 
-       if (!mii_nway_restart(&dev->mii))
-               netif_dbg(dev, link, dev->net, "autoneg initiated");
-
        return 0;
 }
 
@@ -2061,6 +1951,10 @@ static int lan78xx_open(struct net_device *net)
        if (ret < 0)
                goto done;
 
+       ret = lan78xx_phy_init(dev);
+       if (ret < 0)
+               goto done;
+
        /* for Link Check */
        if (dev->urb_intr) {
                ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
@@ -2115,6 +2009,10 @@ int lan78xx_stop(struct net_device *net)
 {
        struct lan78xx_net              *dev = netdev_priv(net);
 
+       phy_stop(net->phydev);
+       phy_disconnect(net->phydev);
+       net->phydev = NULL;
+
        clear_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_stop_queue(net);
 
@@ -2395,6 +2293,8 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
        /* Init all registers */
        ret = lan78xx_reset(dev);
 
+       lan78xx_mdio_init(dev);
+
        dev->net->flags |= IFF_MULTICAST;
 
        pdata->wol = WAKE_MAGIC;
@@ -2406,6 +2306,8 @@ static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
 {
        struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
 
+       lan78xx_remove_mdio(dev);
+
        if (pdata) {
                netif_dbg(dev, ifdown, dev->net, "free pdata");
                kfree(pdata);
@@ -3459,6 +3361,9 @@ int lan78xx_reset_resume(struct usb_interface *intf)
        struct lan78xx_net *dev = usb_get_intfdata(intf);
 
        lan78xx_reset(dev);
+
+       lan78xx_phy_init(dev);
+
        return lan78xx_resume(intf);
 }
 
index ae7562ee72ad01823a3df47721bb744d3152dec1..a93fb653e7c52f415bd0aae99f7cd2619c8cf47b 100644 (file)
 #define LTM_INACTIVE1_TIMER10_         (0x0000FFFF)
 
 #define MAC_CR                         (0x100)
-#define MAC_CR_GMII_EN_                        (0x00080000)
 #define MAC_CR_EEE_TX_CLK_STOP_EN_     (0x00040000)
 #define MAC_CR_EEE_EN_                 (0x00020000)
 #define MAC_CR_EEE_TLAR_EN_            (0x00010000)
 #define OTP_TPVSR_VAL                  (OTP_BASE_ADDR + 4 * 0x3A)
 #define OTP_TPVHR_VAL                  (OTP_BASE_ADDR + 4 * 0x3B)
 #define OTP_TPVSA_VAL                  (OTP_BASE_ADDR + 4 * 0x3C)
-
-#define PHY_ID1                                (0x02)
-#define PHY_ID2                                (0x03)
-
-#define PHY_DEV_ID_OUI_VTSE            (0x04001C)
-#define PHY_DEV_ID_MODEL_VTSE_8502     (0x23)
-
-#define PHY_AUTONEG_ADV                        (0x04)
-#define NWAY_AR_NEXT_PAGE_             (0x8000)
-#define NWAY_AR_REMOTE_FAULT_          (0x2000)
-#define NWAY_AR_ASM_DIR_               (0x0800)
-#define NWAY_AR_PAUSE_                 (0x0400)
-#define NWAY_AR_100T4_CAPS_            (0x0200)
-#define NWAY_AR_100TX_FD_CAPS_         (0x0100)
-#define NWAY_AR_SELECTOR_FIELD_                (0x001F)
-#define NWAY_AR_100TX_HD_CAPS_         (0x0080)
-#define NWAY_AR_10T_FD_CAPS_           (0x0040)
-#define NWAY_AR_10T_HD_CAPS_           (0x0020)
-#define NWAY_AR_ALL_CAPS_              (NWAY_AR_10T_HD_CAPS_ | \
-                                        NWAY_AR_10T_FD_CAPS_ | \
-                                        NWAY_AR_100TX_HD_CAPS_ | \
-                                        NWAY_AR_100TX_FD_CAPS_)
-#define NWAY_AR_PAUSE_MASK             (NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_)
-
-#define PHY_LP_ABILITY                 (0x05)
-#define NWAY_LPAR_NEXT_PAGE_           (0x8000)
-#define NWAY_LPAR_ACKNOWLEDGE_         (0x4000)
-#define NWAY_LPAR_REMOTE_FAULT_                (0x2000)
-#define NWAY_LPAR_ASM_DIR_             (0x0800)
-#define NWAY_LPAR_PAUSE_               (0x0400)
-#define NWAY_LPAR_100T4_CAPS_          (0x0200)
-#define NWAY_LPAR_100TX_FD_CAPS_       (0x0100)
-#define NWAY_LPAR_100TX_HD_CAPS_       (0x0080)
-#define NWAY_LPAR_10T_FD_CAPS_         (0x0040)
-#define NWAY_LPAR_10T_HD_CAPS_         (0x0020)
-#define NWAY_LPAR_SELECTOR_FIELD_      (0x001F)
-
-#define PHY_AUTONEG_EXP                        (0x06)
-#define NWAY_ER_PAR_DETECT_FAULT_      (0x0010)
-#define NWAY_ER_LP_NEXT_PAGE_CAPS_     (0x0008)
-#define NWAY_ER_NEXT_PAGE_CAPS_                (0x0004)
-#define NWAY_ER_PAGE_RXD_              (0x0002)
-#define NWAY_ER_LP_NWAY_CAPS_          (0x0001)
-
-#define PHY_NEXT_PAGE_TX               (0x07)
-#define NPTX_NEXT_PAGE_                        (0x8000)
-#define NPTX_MSG_PAGE_                 (0x2000)
-#define NPTX_ACKNOWLDGE2_              (0x1000)
-#define NPTX_TOGGLE_                   (0x0800)
-#define NPTX_MSG_CODE_FIELD_           (0x0001)
-
-#define PHY_LP_NEXT_PAGE               (0x08)
-#define LP_RNPR_NEXT_PAGE_             (0x8000)
-#define LP_RNPR_ACKNOWLDGE_            (0x4000)
-#define LP_RNPR_MSG_PAGE_              (0x2000)
-#define LP_RNPR_ACKNOWLDGE2_           (0x1000)
-#define LP_RNPR_TOGGLE_                        (0x0800)
-#define LP_RNPR_MSG_CODE_FIELD_                (0x0001)
-
-#define PHY_1000T_CTRL                 (0x09)
-#define CR_1000T_TEST_MODE_4_          (0x8000)
-#define CR_1000T_TEST_MODE_3_          (0x6000)
-#define CR_1000T_TEST_MODE_2_          (0x4000)
-#define CR_1000T_TEST_MODE_1_          (0x2000)
-#define CR_1000T_MS_ENABLE_            (0x1000)
-#define CR_1000T_MS_VALUE_             (0x0800)
-#define CR_1000T_REPEATER_DTE_         (0x0400)
-#define CR_1000T_FD_CAPS_              (0x0200)
-#define CR_1000T_HD_CAPS_              (0x0100)
-#define CR_1000T_ASYM_PAUSE_           (0x0080)
-#define CR_1000T_TEST_MODE_NORMAL_     (0x0000)
-
-#define PHY_1000T_STATUS               (0x0A)
-#define SR_1000T_MS_CONFIG_FAULT_      (0x8000)
-#define SR_1000T_MS_CONFIG_RES_                (0x4000)
-#define SR_1000T_LOCAL_RX_STATUS_      (0x2000)
-#define SR_1000T_REMOTE_RX_STATUS_     (0x1000)
-#define SR_1000T_LP_FD_CAPS_           (0x0800)
-#define SR_1000T_LP_HD_CAPS_           (0x0400)
-#define SR_1000T_ASYM_PAUSE_DIR_       (0x0100)
-#define SR_1000T_IDLE_ERROR_CNT_       (0x00FF)
-#define SR_1000T_REMOTE_RX_STATUS_SHIFT                12
-#define SR_1000T_LOCAL_RX_STATUS_SHIFT         13
-#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT  5
-#define FFE_IDLE_ERR_COUNT_TIMEOUT_20          20
-#define FFE_IDLE_ERR_COUNT_TIMEOUT_100         100
-
-#define PHY_EXT_STATUS                 (0x0F)
-#define IEEE_ESR_1000X_FD_CAPS_                (0x8000)
-#define IEEE_ESR_1000X_HD_CAPS_                (0x4000)
-#define IEEE_ESR_1000T_FD_CAPS_                (0x2000)
-#define IEEE_ESR_1000T_HD_CAPS_                (0x1000)
-#define PHY_TX_POLARITY_MASK_          (0x0100)
-#define PHY_TX_NORMAL_POLARITY_                (0x0000)
-#define AUTO_POLARITY_DISABLE_         (0x0010)
-
-#define PHY_MMD_CTL                    (0x0D)
-#define PHY_MMD_CTRL_OP_MASK_          (0xC000)
-#define PHY_MMD_CTRL_OP_REG_           (0x0000)
-#define PHY_MMD_CTRL_OP_DNI_           (0x4000)
-#define PHY_MMD_CTRL_OP_DPIRW_         (0x8000)
-#define PHY_MMD_CTRL_OP_DPIWO_         (0xC000)
-#define PHY_MMD_CTRL_DEV_ADDR_MASK_    (0x001F)
-
-#define PHY_MMD_REG_DATA               (0x0E)
-
-/* VTSE Vendor Specific registers */
-#define PHY_VTSE_BYPASS                                (0x12)
-#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_     (0x0020)
-
-#define PHY_VTSE_INT_MASK                      (0x19)
-#define PHY_VTSE_INT_MASK_MDINTPIN_EN_         (0x8000)
-#define PHY_VTSE_INT_MASK_SPEED_CHANGE_                (0x4000)
-#define PHY_VTSE_INT_MASK_LINK_CHANGE_         (0x2000)
-#define PHY_VTSE_INT_MASK_FDX_CHANGE_          (0x1000)
-#define PHY_VTSE_INT_MASK_AUTONEG_ERR_         (0x0800)
-#define PHY_VTSE_INT_MASK_AUTONEG_DONE_                (0x0400)
-#define PHY_VTSE_INT_MASK_POE_DETECT_          (0x0200)
-#define PHY_VTSE_INT_MASK_SYMBOL_ERR_          (0x0100)
-#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_      (0x0080)
-#define PHY_VTSE_INT_MASK_WOL_EVENT_           (0x0040)
-#define PHY_VTSE_INT_MASK_EXTENDED_INT_                (0x0020)
-#define PHY_VTSE_INT_MASK_RESERVED_            (0x0010)
-#define PHY_VTSE_INT_MASK_FALSE_CARRIER_       (0x0008)
-#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_       (0x0004)
-#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_   (0x0002)
-#define PHY_VTSE_INT_MASK_RX__ER_              (0x0001)
-
-#define PHY_VTSE_INT_STS                       (0x1A)
-#define PHY_VTSE_INT_STS_INT_ACTIVE_           (0x8000)
-#define PHY_VTSE_INT_STS_SPEED_CHANGE_         (0x4000)
-#define PHY_VTSE_INT_STS_LINK_CHANGE_          (0x2000)
-#define PHY_VTSE_INT_STS_FDX_CHANGE_           (0x1000)
-#define PHY_VTSE_INT_STS_AUTONEG_ERR_          (0x0800)
-#define PHY_VTSE_INT_STS_AUTONEG_DONE_         (0x0400)
-#define PHY_VTSE_INT_STS_POE_DETECT_           (0x0200)
-#define PHY_VTSE_INT_STS_SYMBOL_ERR_           (0x0100)
-#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_       (0x0080)
-#define PHY_VTSE_INT_STS_WOL_EVENT_            (0x0040)
-#define PHY_VTSE_INT_STS_EXTENDED_INT_         (0x0020)
-#define PHY_VTSE_INT_STS_RESERVED_             (0x0010)
-#define PHY_VTSE_INT_STS_FALSE_CARRIER_                (0x0008)
-#define PHY_VTSE_INT_STS_LINK_SPEED_DS_                (0x0004)
-#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_    (0x0002)
-#define PHY_VTSE_INT_STS_RX_ER_                        (0x0001)
-
-/* VTSE PHY registers */
-#define PHY_EXT_GPIO_PAGE              (0x1F)
-#define PHY_EXT_GPIO_PAGE_SPACE_0      (0x0000)
-#define PHY_EXT_GPIO_PAGE_SPACE_1      (0x0001)
-#define PHY_EXT_GPIO_PAGE_SPACE_2      (0x0002)
-
-/* Extended Register Page 1 space */
-#define PHY_EXT_MODE_CTRL              (0x13)
-#define PHY_EXT_MODE_CTRL_MDIX_MASK_   (0x000C)
-#define PHY_EXT_MODE_CTRL_AUTO_MDIX_   (0x0000)
-#define PHY_EXT_MODE_CTRL_MDI_         (0x0008)
-#define PHY_EXT_MODE_CTRL_MDI_X_       (0x000C)
-
-#define PHY_ANA_10BASE_T_HD            0x01
-#define PHY_ANA_10BASE_T_FD            0x02
-#define PHY_ANA_100BASE_TX_HD          0x04
-#define PHY_ANA_100BASE_TX_FD          0x08
-#define PHY_ANA_1000BASE_T_FD          0x10
-#define PHY_ANA_ALL_SUPPORTED_MEDIA    (PHY_ANA_10BASE_T_HD |   \
-                                        PHY_ANA_10BASE_T_FD |   \
-                                        PHY_ANA_100BASE_TX_HD | \
-                                        PHY_ANA_100BASE_TX_FD | \
-                                        PHY_ANA_1000BASE_T_FD)
-/* PHY MMD registers */
-#define PHY_MMD_DEV_3                          3
-
-#define PHY_EEE_PCS_STATUS                     (0x1)
-#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_                ((WORD)0x0800)
-#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_                ((WORD)0x0400)
-#define PHY_EEE_PCS_STATUS_TX_LPI_IND_         ((WORD)0x0200)
-#define PHY_EEE_PCS_STATUS_RX_LPI_IND_         ((WORD)0x0100)
-#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_    ((WORD)0x0004)
-
-#define PHY_EEE_CAPABILITIES                   (0x14)
-#define PHY_EEE_CAPABILITIES_1000BT_EEE_       ((WORD)0x0004)
-#define PHY_EEE_CAPABILITIES_100BT_EEE_                ((WORD)0x0002)
-
-#define PHY_MMD_DEV_7                          7
-
-#define PHY_EEE_ADVERTISEMENT                  (0x3C)
-#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_      ((WORD)0x0004)
-#define PHY_EEE_ADVERTISEMENT_100BT_EEE_       ((WORD)0x0002)
-
-#define PHY_EEE_LP_ADVERTISEMENT               (0x3D)
-#define PHY_EEE_1000BT_EEE_CAPABLE_            ((WORD)0x0004)
-#define PHY_EEE_100BT_EEE_CAPABLE_             ((WORD)0x0002)
 #endif /* _LAN78XX_H */
index 8c9ab5ebea2318f86e522a3c9129b36c95880b2e..637e9fd1e14ce87df0848caf5ec9f390d1ad883a 100644 (file)
@@ -253,7 +253,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 }
 
 /* modelled after ip_finish_output2 */
-static int vrf_finish_output(struct sock *sk, struct sk_buff *skb)
+static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct rtable *rt = (struct rtable *)dst;
@@ -298,14 +298,15 @@ err:
 static int vrf_output(struct sock *sk, struct sk_buff *skb)
 {
        struct net_device *dev = skb_dst(skb)->dev;
+       struct net *net = dev_net(dev);
 
-       IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+       IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 
        skb->dev = dev;
        skb->protocol = htons(ETH_P_IP);
 
-       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
-                           NULL, dev,
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, dev,
                            vrf_finish_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
index f821a97d78278feed765d08d886a4665d8795bd5..9bf63c27a9b7af80dc3ba46d2e7633c9f9391eb8 100644 (file)
@@ -1819,19 +1819,22 @@ again:
                goto destroy_ring;
        }
 
-       if (num_queues == 1) {
-               err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
-               if (err)
-                       goto abort_transaction_no_dev_fatal;
-       } else {
+       if (xenbus_exists(XBT_NIL,
+                         info->xbdev->otherend, "multi-queue-max-queues")) {
                /* Write the number of queues */
-               err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
-                                   "%u", num_queues);
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "multi-queue-num-queues", "%u", num_queues);
                if (err) {
                        message = "writing multi-queue-num-queues";
                        goto abort_transaction_no_dev_fatal;
                }
+       }
 
+       if (num_queues == 1) {
+               err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
+               if (err)
+                       goto abort_transaction_no_dev_fatal;
+       } else {
                /* Write the keys for each queue */
                for (i = 0; i < num_queues; ++i) {
                        queue = &info->queues[i];
index 88a00694eda5fc394bf4e1ef52ac1d51b9f02111..b791405958b4c7396e2b283f74ca280753451f34 100644 (file)
@@ -2212,12 +2212,8 @@ int dev_open(struct net_device *dev);
 int dev_close(struct net_device *dev);
 int dev_close_many(struct list_head *head, bool unlink);
 void dev_disable_lro(struct net_device *dev);
-int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
-int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
-static inline int dev_queue_xmit(struct sk_buff *skb)
-{
-       return dev_queue_xmit_sk(skb->sk, skb);
-}
+int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
 int register_netdevice(struct net_device *dev);
 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2989,11 +2985,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
 
 int netif_rx(struct sk_buff *skb);
 int netif_rx_ni(struct sk_buff *skb);
-int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
-static inline int netif_receive_skb(struct sk_buff *skb)
-{
-       return netif_receive_skb_sk(skb->sk, skb);
-}
+int netif_receive_skb(struct sk_buff *skb);
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
 struct sk_buff *napi_get_frags(struct napi_struct *napi);
index 36a652531791e7abfaf5fe55afada5a36425ea75..0b4d4560f33d30e46c9a04ffcc3fc0f268b12892 100644 (file)
@@ -54,8 +54,9 @@ struct nf_hook_state {
        struct net_device *in;
        struct net_device *out;
        struct sock *sk;
+       struct net *net;
        struct list_head *hook_list;
-       int (*okfn)(struct sock *, struct sk_buff *);
+       int (*okfn)(struct net *, struct sock *, struct sk_buff *);
 };
 
 static inline void nf_hook_state_init(struct nf_hook_state *p,
@@ -65,7 +66,8 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
                                      struct net_device *indev,
                                      struct net_device *outdev,
                                      struct sock *sk,
-                                     int (*okfn)(struct sock *, struct sk_buff *))
+                                     struct net *net,
+                                     int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 {
        p->hook = hook;
        p->thresh = thresh;
@@ -73,6 +75,7 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
        p->in = indev;
        p->out = outdev;
        p->sk = sk;
+       p->net = net;
        p->hook_list = hook_list;
        p->okfn = okfn;
 }
@@ -167,32 +170,32 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
  *     value indicates the packet has been consumed by the hook.
  */
 static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+                                struct net *net,
                                 struct sock *sk,
                                 struct sk_buff *skb,
                                 struct net_device *indev,
                                 struct net_device *outdev,
-                                int (*okfn)(struct sock *, struct sk_buff *),
+                                int (*okfn)(struct net *, struct sock *, struct sk_buff *),
                                 int thresh)
 {
-       struct net *net = dev_net(indev ? indev : outdev);
        struct list_head *hook_list = &net->nf.hooks[pf][hook];
 
        if (nf_hook_list_active(hook_list, pf, hook)) {
                struct nf_hook_state state;
 
                nf_hook_state_init(&state, hook_list, hook, thresh,
-                                  pf, indev, outdev, sk, okfn);
+                                  pf, indev, outdev, sk, net, okfn);
                return nf_hook_slow(skb, &state);
        }
        return 1;
 }
 
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
-                         struct sk_buff *skb, struct net_device *indev,
-                         struct net_device *outdev,
-                         int (*okfn)(struct sock *, struct sk_buff *))
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
+                         struct sock *sk, struct sk_buff *skb,
+                         struct net_device *indev, struct net_device *outdev,
+                         int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 {
-       return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
+       return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN);
 }
                    
 /* Activate hook; either okfn or kfree_skb called, unless a hook
@@ -213,36 +216,38 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
 */
 
 static inline int
-NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
+NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
               struct sk_buff *skb, struct net_device *in,
               struct net_device *out,
-              int (*okfn)(struct sock *, struct sk_buff *), int thresh)
+              int (*okfn)(struct net *, struct sock *, struct sk_buff *),
+              int thresh)
 {
-       int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
+       int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh);
        if (ret == 1)
-               ret = okfn(sk, skb);
+               ret = okfn(net, sk, skb);
        return ret;
 }
 
 static inline int
-NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
             struct sk_buff *skb, struct net_device *in, struct net_device *out,
-            int (*okfn)(struct sock *, struct sk_buff *), bool cond)
+            int (*okfn)(struct net *, struct sock *, struct sk_buff *),
+            bool cond)
 {
        int ret;
 
        if (!cond ||
-           ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
-               ret = okfn(sk, skb);
+           ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1))
+               ret = okfn(net, sk, skb);
        return ret;
 }
 
 static inline int
-NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
+NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
        struct net_device *in, struct net_device *out,
-       int (*okfn)(struct sock *, struct sk_buff *))
+       int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 {
-       return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
+       return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN);
 }
 
 /* Call setsockopt() */
@@ -342,21 +347,12 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 }
 
 #else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
-#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
-static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
-                                struct sock *sk,
-                                struct sk_buff *skb,
-                                struct net_device *indev,
-                                struct net_device *outdev,
-                                int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
-{
-       return okfn(sk, skb);
-}
-static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
-                         struct sk_buff *skb, struct net_device *indev,
-                         struct net_device *outdev,
-                         int (*okfn)(struct sock *, struct sk_buff *))
+#define NF_HOOK(pf, hook, net, sk, skb, indev, outdev, okfn) (okfn)(net, sk, skb)
+#define NF_HOOK_COND(pf, hook, net, sk, skb, indev, outdev, okfn, cond) (okfn)(net, sk, skb)
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
+                         struct sock *sk, struct sk_buff *skb,
+                         struct net_device *indev, struct net_device *outdev,
+                         int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 {
        return 1;
 }
index 2437b8a5d7a945d2a3ab5c4ff67dfd6f260bc95a..2ed40c402b5e446f9a06de983de8f9834d6c8b07 100644 (file)
@@ -17,7 +17,7 @@ enum nf_br_hook_priorities {
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 
-int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
+int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
 {
index cb0727fe2b3dbc5894b98867370615295edd3c87..187feabe557c379b2a78bb4467623722e7ed4996 100644 (file)
@@ -17,7 +17,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
 
        nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
                           NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
-                          skb->dev, NULL, NULL);
+                          skb->dev, NULL, dev_net(skb->dev), NULL);
        return nf_hook_slow(skb, &state);
 }
 
index 48c3696e86457172ff9e9a907db15e44b7e92bb5..937b97893d5f4877ecffc3f17de9bcb2fe3e3659 100644 (file)
@@ -113,6 +113,7 @@ struct tcp_request_sock {
        struct inet_request_sock        req;
        const struct tcp_request_sock_ops *af_specific;
        bool                            tfo_listener;
+       u32                             txhash;
        u32                             rcv_isn;
        u32                             snt_isn;
        u32                             snt_synack; /* synack sent time */
index d0424269313fe2a84df0db71e07271e033756c9a..5e902fc3f4ebc54d67329cb1430e5d873b3e15de 100644 (file)
@@ -18,11 +18,11 @@ struct dn_neigh {
 
 void dn_neigh_init(void);
 void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb);
+int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
 void dn_neigh_pointopoint_hello(struct sk_buff *skb);
 int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
-int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb);
+int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 extern struct neigh_table dn_neigh_table;
 
index 9261d928303d475a8ef2772144f9f70e0febda49..df0481a070290ae5097b2b4931da55f997b158b2 100644 (file)
@@ -454,13 +454,13 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
 }
 
 /* Output packet to network from transport.  */
-static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb)
+static inline int dst_output(struct sock *sk, struct sk_buff *skb)
 {
        return skb_dst(skb)->output(sk, skb);
 }
-static inline int dst_output(struct sk_buff *skb)
+static inline int dst_output_okfn(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       return dst_output_sk(skb->sk, skb);
+       return dst_output(sk, skb);
 }
 
 /* Input packet from network to transport.  */
index 711cca428cc8cd56b40de704265ad73262a6d2a4..384a93cf07d601ff9c35ad7dca056be8897b8be6 100644 (file)
@@ -807,7 +807,7 @@ static inline u8 ip6_tclass(__be32 flowinfo)
 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
             struct packet_type *pt, struct net_device *orig_dev);
 
-int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb);
+int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 /*
  *     upper-layer output functions
index d4c6b5f30acd936d863b1a5a5e89ef2443ad3943..8fe266504900165daf1d749acc1e460d96f9e167 100644 (file)
@@ -31,7 +31,7 @@ static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
        skb->network_header -= len;
 }
 
-int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb);
+int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
 {
index 444faa89a55fdd5e2b581d0ae93c3553268fa7e5..4c79ce8c1f92f2d47eb87ffacd55fa01292e7378 100644 (file)
@@ -251,7 +251,7 @@ struct tcf_proto {
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
        u16                     slave_dev_queue_mapping;
-       u16                     _pad;
+       u16                     tc_classid;
 #define QDISC_CB_PRIV_LEN 20
        unsigned char           data[QDISC_CB_PRIV_LEN];
 };
@@ -402,6 +402,7 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
 bool tcf_destroy(struct tcf_proto *tp, bool force);
 void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+int skb_do_redirect(struct sk_buff *);
 
 /* Reset all TX qdiscs greater then index of a device.  */
 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
index 7aa78440559a47db8e5ccc8ea69a34f87b90c125..94dff7f566f5cb2a7d2874f376f8d1a723ef7e13 100644 (file)
@@ -1654,12 +1654,16 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
 kuid_t sock_i_uid(struct sock *sk);
 unsigned long sock_i_ino(struct sock *sk);
 
-static inline void sk_set_txhash(struct sock *sk)
+static inline u32 net_tx_rndhash(void)
 {
-       sk->sk_txhash = prandom_u32();
+       u32 v = prandom_u32();
+
+       return v ?: 1;
+}
 
-       if (unlikely(!sk->sk_txhash))
-               sk->sk_txhash = 1;
+static inline void sk_set_txhash(struct sock *sk)
+{
+       sk->sk_txhash = net_tx_rndhash();
 }
 
 static inline void sk_rethink_txhash(struct sock *sk)
index 312e3fee9ccfc098f70cd8ec6edb87881e1d1f99..fd176106909a8111177250d70e684fa67e6f3f02 100644 (file)
@@ -296,8 +296,6 @@ struct xfrm_policy_afinfo {
                                                  struct flowi *fl,
                                                  int reverse);
        int                     (*get_tos)(const struct flowi *fl);
-       void                    (*init_dst)(struct net *net,
-                                           struct xfrm_dst *dst);
        int                     (*init_path)(struct xfrm_dst *path,
                                             struct dst_entry *dst,
                                             int nfheader_len);
index 92a48e2d54619f80113c13f9fff45f98ddfabe30..4ec0b5488294e26f3300c3980ab6f14ccd47ca55 100644 (file)
@@ -272,6 +272,14 @@ enum bpf_func_id {
        BPF_FUNC_skb_get_tunnel_key,
        BPF_FUNC_skb_set_tunnel_key,
        BPF_FUNC_perf_event_read,       /* u64 bpf_perf_event_read(&map, index) */
+       /**
+        * bpf_redirect(ifindex, flags) - redirect to another netdev
+        * @ifindex: ifindex of the net device
+        * @flags: bit 0 - if set, redirect to ingress instead of egress
+        *         other bits - reserved
+        * Return: TC_ACT_REDIRECT
+        */
+       BPF_FUNC_redirect,
        __BPF_FUNC_MAX_ID,
 };
 
@@ -293,6 +301,7 @@ struct __sk_buff {
        __u32 tc_index;
        __u32 cb[5];
        __u32 hash;
+       __u32 tc_classid;
 };
 
 struct bpf_tunnel_key {
index 4f0d1bc3647dc266de5cf017efafe37a2f4f2e2a..439873775d49196a93b6f6b7dfaa820574fd462e 100644 (file)
@@ -87,6 +87,7 @@ enum {
 #define TC_ACT_STOLEN          4
 #define TC_ACT_QUEUED          5
 #define TC_ACT_REPEAT          6
+#define TC_ACT_REDIRECT                7
 #define TC_ACT_JUMP            0x10000000
 
 /* Action type identifiers*/
@@ -373,6 +374,8 @@ enum {
 
 /* BPF classifier */
 
+#define TCA_BPF_FLAG_ACT_DIRECT                (1 << 0)
+
 enum {
        TCA_BPF_UNSPEC,
        TCA_BPF_ACT,
@@ -382,6 +385,7 @@ enum {
        TCA_BPF_OPS,
        TCA_BPF_FD,
        TCA_BPF_NAME,
+       TCA_BPF_FLAGS,
        __TCA_BPF_MAX,
 };
 
index fa7bfced888ec0f3b7d8a5ceff17b597cbe9b79e..48afca729ed7ae3286d7c1c217bcc8b59894fe56 100644 (file)
@@ -35,7 +35,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
                p->state == BR_STATE_FORWARDING;
 }
 
-int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
+int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        if (!is_skb_forwardable(skb->dev, skb))
                goto drop;
@@ -65,10 +65,10 @@ drop:
 }
 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
 
-int br_forward_finish(struct sock *sk, struct sk_buff *skb)
+int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb,
-                      NULL, skb->dev,
+       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
+                      net, sk, skb, NULL, skb->dev,
                       br_dev_queue_push_xmit);
 
 }
@@ -92,8 +92,8 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
                return;
        }
 
-       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
-               NULL, skb->dev,
+       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
+               dev_net(skb->dev), NULL, skb,NULL, skb->dev,
                br_forward_finish);
 }
 
@@ -114,8 +114,8 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
        skb->dev = to->dev;
        skb_forward_csum(skb);
 
-       NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb,
-               indev, skb->dev,
+       NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD,
+               dev_net(indev), NULL, skb, indev, skb->dev,
                br_forward_finish);
 }
 
index f921a5dce22dadf465b79dd93be733707d5d5d0d..223f4040d9df939059c7a67070663d710172a65e 100644 (file)
 br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
 EXPORT_SYMBOL(br_should_route_hook);
 
+static int
+br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+       return netif_receive_skb(skb);
+}
+
 static int br_pass_frame_up(struct sk_buff *skb)
 {
        struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
@@ -55,9 +61,9 @@ static int br_pass_frame_up(struct sk_buff *skb)
        if (!skb)
                return NET_RX_DROP;
 
-       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
-                      indev, NULL,
-                      netif_receive_skb_sk);
+       return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+                      dev_net(indev), NULL, skb, indev, NULL,
+                      br_netif_receive_skb);
 }
 
 static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
@@ -120,7 +126,7 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
 }
 
 /* note: already called with rcu_read_lock */
-int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb)
+int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        const unsigned char *dest = eth_hdr(skb)->h_dest;
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
@@ -208,7 +214,7 @@ drop:
 EXPORT_SYMBOL_GPL(br_handle_frame_finish);
 
 /* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct sock *sk, struct sk_buff *skb)
+static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
        u16 vid = 0;
@@ -278,8 +284,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                }
 
                /* Deliver packet to local host only */
-               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, NULL, skb,
-                           skb->dev, NULL, br_handle_local_finish)) {
+               if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+                           dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+                           br_handle_local_finish)) {
                        return RX_HANDLER_CONSUMED; /* consumed by filter */
                } else {
                        *pskb = skb;
@@ -303,8 +310,8 @@ forward:
                if (ether_addr_equal(p->br->dev->dev_addr, dest))
                        skb->pkt_type = PACKET_HOST;
 
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, NULL, skb,
-                       skb->dev, NULL,
+               NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
+                       dev_net(skb->dev), NULL, skb, skb->dev, NULL,
                        br_handle_frame_finish);
                break;
        default:
index 66efdc21f548524a19f3abc3ea5268b245f88dd2..b4d858a18eb65873810693fb32a256e5cc495b6d 100644 (file)
@@ -829,8 +829,8 @@ static void __br_multicast_send_query(struct net_bridge *br,
 
        if (port) {
                skb->dev = port->dev;
-               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
-                       NULL, skb->dev,
+               NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
+                       dev_net(port->dev), NULL, skb, NULL, skb->dev,
                        br_dev_queue_push_xmit);
        } else {
                br_multicast_select_own_querier(br, ip, skb);
index 0a6f095bb0c9eef4cc6e6b543db0b0ba3967d67d..e6e76bbdc82fc2d879e4913c7d1a457abde89c9e 100644 (file)
@@ -256,7 +256,7 @@ void nf_bridge_update_protocol(struct sk_buff *skb)
  * don't, we use the neighbour framework to find out. In both cases, we make
  * sure that br_handle_frame_finish() is called afterwards.
  */
-int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
+int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct neighbour *neigh;
        struct dst_entry *dst;
@@ -273,7 +273,7 @@ int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
                if (neigh->hh.hh_len) {
                        neigh_hh_bridge(&neigh->hh, skb);
                        skb->dev = nf_bridge->physindev;
-                       ret = br_handle_frame_finish(sk, skb);
+                       ret = br_handle_frame_finish(net, sk, skb);
                } else {
                        /* the neighbour function below overwrites the complete
                         * MAC header, so we save the Ethernet source address and
@@ -342,7 +342,7 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
  * device, we proceed as if ip_route_input() succeeded. If it differs from the
  * logical bridge port or if ip_route_output_key() fails we drop the packet.
  */
-static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
+static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
        struct iphdr *iph = ip_hdr(skb);
@@ -371,7 +371,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
                        if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
                                goto free_skb;
 
-                       rt = ip_route_output(dev_net(dev), iph->daddr, 0,
+                       rt = ip_route_output(net, iph->daddr, 0,
                                             RT_TOS(iph->tos), 0);
                        if (!IS_ERR(rt)) {
                                /* - Bridged-and-DNAT'ed traffic doesn't
@@ -393,7 +393,7 @@ bridged_dnat:
                                nf_bridge_push_encap_header(skb);
                                NF_HOOK_THRESH(NFPROTO_BRIDGE,
                                               NF_BR_PRE_ROUTING,
-                                              sk, skb, skb->dev, NULL,
+                                              net, sk, skb, skb->dev, NULL,
                                               br_nf_pre_routing_finish_bridge,
                                               1);
                                return 0;
@@ -413,7 +413,7 @@ bridged_dnat:
        skb->dev = nf_bridge->physindev;
        nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
-       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
+       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, net, sk, skb,
                       skb->dev, NULL,
                       br_handle_frame_finish, 1);
 
@@ -511,7 +511,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
 
        skb->protocol = htons(ETH_P_IP);
 
-       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
+       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
                br_nf_pre_routing_finish);
 
@@ -535,7 +535,7 @@ static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
 }
 
 /* PF_BRIDGE/FORWARD *************************************************/
-static int br_nf_forward_finish(struct sock *sk, struct sk_buff *skb)
+static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct net_device *in;
@@ -559,7 +559,7 @@ static int br_nf_forward_finish(struct sock *sk, struct sk_buff *skb)
        }
        nf_bridge_push_encap_header(skb);
 
-       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, sk, skb,
+       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, net, sk, skb,
                       in, skb->dev, br_forward_finish, 1);
        return 0;
 }
@@ -626,7 +626,7 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
        else
                skb->protocol = htons(ETH_P_IPV6);
 
-       NF_HOOK(pf, NF_INET_FORWARD, NULL, skb,
+       NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
                brnf_get_logical_dev(skb, state->in),
                parent, br_nf_forward_finish);
 
@@ -661,14 +661,14 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
                return NF_ACCEPT;
        }
        *d = state->in;
-       NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->sk, skb,
+       NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
                state->in, state->out, br_nf_forward_finish);
 
        return NF_STOLEN;
 }
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
-static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
+static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct brnf_frag_data *data;
        int err;
@@ -690,23 +690,27 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
        __skb_push(skb, data->encap_size);
 
        nf_bridge_info_free(skb);
-       return br_dev_queue_push_xmit(sk, skb);
+       return br_dev_queue_push_xmit(net, sk, skb);
+}
+static int br_nf_push_frag_xmit_sk(struct sock *sk, struct sk_buff *skb)
+{
+       struct net *net = dev_net(skb_dst(skb)->dev);
+       return br_nf_push_frag_xmit(net, sk, skb);
 }
 #endif
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
-static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
-                            int (*output)(struct sock *, struct sk_buff *))
+static int
+br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+                 int (*output)(struct sock *, struct sk_buff *))
 {
        unsigned int mtu = ip_skb_dst_mtu(skb);
        struct iphdr *iph = ip_hdr(skb);
-       struct rtable *rt = skb_rtable(skb);
-       struct net_device *dev = rt->dst.dev;
 
        if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
                     (IPCB(skb)->frag_max_size &&
                      IPCB(skb)->frag_max_size > mtu))) {
-               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+               IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
                kfree_skb(skb);
                return -EMSGSIZE;
        }
@@ -722,7 +726,7 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
        return 0;
 }
 
-static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
+static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct nf_bridge_info *nf_bridge;
        unsigned int mtu_reserved;
@@ -731,7 +735,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 
        if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
                nf_bridge_info_free(skb);
-               return br_dev_queue_push_xmit(sk, skb);
+               return br_dev_queue_push_xmit(net, sk, skb);
        }
 
        nf_bridge = nf_bridge_info_get(skb);
@@ -760,7 +764,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
                skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
                                                 data->size);
 
-               return br_nf_ip_fragment(sk, skb, br_nf_push_frag_xmit);
+               return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit_sk);
        }
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
@@ -783,14 +787,14 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
                                                 data->size);
 
                if (v6ops)
-                       return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
+                       return v6ops->fragment(sk, skb, br_nf_push_frag_xmit_sk);
 
                kfree_skb(skb);
                return -EMSGSIZE;
        }
 #endif
        nf_bridge_info_free(skb);
-       return br_dev_queue_push_xmit(sk, skb);
+       return br_dev_queue_push_xmit(net, sk, skb);
  drop:
        kfree_skb(skb);
        return 0;
@@ -836,7 +840,7 @@ static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
        else
                skb->protocol = htons(ETH_P_IPV6);
 
-       NF_HOOK(pf, NF_INET_POST_ROUTING, state->sk, skb,
+       NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
                NULL, realoutdev,
                br_nf_dev_queue_xmit);
 
@@ -880,7 +884,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
        skb->dev = nf_bridge->physindev;
 
        nf_bridge->physoutdev = NULL;
-       br_handle_frame_finish(NULL, skb);
+       br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
 }
 
 static int br_nf_dev_xmit(struct sk_buff *skb)
index 77383bfe7ea38f888ce67619d03d1b7856bab84b..e4dbbe44c7245cb415cf7463483739a4c60837ac 100644 (file)
@@ -161,7 +161,7 @@ br_nf_ipv6_daddr_was_changed(const struct sk_buff *skb,
  * for br_nf_pre_routing_finish(), same logic is used here but
  * equivalent IPv6 function ip6_route_input() called indirectly.
  */
-static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
+static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
        struct rtable *rt;
@@ -189,7 +189,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
                        nf_bridge_update_protocol(skb);
                        nf_bridge_push_encap_header(skb);
                        NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
-                                      sk, skb, skb->dev, NULL,
+                                      net, sk, skb, skb->dev, NULL,
                                       br_nf_pre_routing_finish_bridge,
                                       1);
                        return 0;
@@ -208,7 +208,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
        skb->dev = nf_bridge->physindev;
        nf_bridge_update_protocol(skb);
        nf_bridge_push_encap_header(skb);
-       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, sk, skb,
+       NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, net, sk, skb,
                       skb->dev, NULL,
                       br_handle_frame_finish, 1);
 
@@ -237,7 +237,7 @@ unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
        nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
 
        skb->protocol = htons(ETH_P_IPV6);
-       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->sk, skb,
+       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
                br_nf_pre_routing_finish_ipv6);
 
index 213baf7aaa930b6c730aed2251e2a1051cab87b7..74e99c75c8e4c0e39d6b6b85b65518fde19cd66a 100644 (file)
@@ -413,10 +413,10 @@ int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
 
 /* br_forward.c */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
-int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb);
+int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb);
 void br_forward(const struct net_bridge_port *to,
                struct sk_buff *skb, struct sk_buff *skb0);
-int br_forward_finish(struct sock *sk, struct sk_buff *skb);
+int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
 void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
                      struct sk_buff *skb2, bool unicast);
@@ -434,7 +434,7 @@ void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
 void br_manage_promisc(struct net_bridge *br);
 
 /* br_input.c */
-int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
+int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
 rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
 
 static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
index 534fc4cd263ef21dd517402efde9585e6f720f83..5881fbc114a9ec88612fd08f35a73d2794d56890 100644 (file)
 
 #define LLC_RESERVE sizeof(struct llc_pdu_un)
 
+static int br_send_bpdu_finish(struct net *net, struct sock *sk,
+                              struct sk_buff *skb)
+{
+       return dev_queue_xmit(skb);
+}
+
 static void br_send_bpdu(struct net_bridge_port *p,
                         const unsigned char *data, int length)
 {
@@ -54,9 +60,9 @@ static void br_send_bpdu(struct net_bridge_port *p,
 
        skb_reset_mac_header(skb);
 
-       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
-               NULL, skb->dev,
-               dev_queue_xmit_sk);
+       NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
+               dev_net(p->dev), NULL, skb, NULL, skb->dev,
+               br_send_bpdu_finish);
 }
 
 static inline void br_set_ticks(unsigned char *dest, int j)
index 8a3f63b2e8073d8081df5fbaac3bf63c348c0447..ab20d6ed6e2f9a693cef4f1f017ad9d55637130c 100644 (file)
@@ -61,7 +61,7 @@ ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
            const struct nf_hook_state *state)
 {
        return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-                           dev_net(state->in)->xt.frame_filter);
+                           state->net->xt.frame_filter);
 }
 
 static unsigned int
@@ -69,7 +69,7 @@ ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
             const struct nf_hook_state *state)
 {
        return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-                           dev_net(state->out)->xt.frame_filter);
+                           state->net->xt.frame_filter);
 }
 
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
index c5ef5b1ab6786814830983d76ef46c6fd0051f51..ad81a5a65644ef23af545d5375beae883b60c9c3 100644 (file)
@@ -61,7 +61,7 @@ ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
           const struct nf_hook_state *state)
 {
        return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-                           dev_net(state->in)->xt.frame_nat);
+                           state->net->xt.frame_nat);
 }
 
 static unsigned int
@@ -69,7 +69,7 @@ ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
            const struct nf_hook_state *state)
 {
        return ebt_do_table(ops->hooknum, skb, state->in, state->out,
-                           dev_net(state->out)->xt.frame_nat);
+                           state->net->xt.frame_nat);
 }
 
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
index 877c84834d81a601ee4934fb287d1afccd4ee598..ee0d6286f934ab383baa13f55f362fbf01d162f7 100644 (file)
@@ -2915,9 +2915,11 @@ EXPORT_SYMBOL(xmit_recursion);
 
 /**
  *     dev_loopback_xmit - loop back @skb
+ *     @net: network namespace this loopback is happening in
+ *     @sk:  sk needed to be a netfilter okfn
  *     @skb: buffer to transmit
  */
-int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
+int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        skb_reset_mac_header(skb);
        __skb_pull(skb, skb_network_offset(skb));
@@ -3143,11 +3145,11 @@ out:
        return rc;
 }
 
-int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
+int dev_queue_xmit(struct sk_buff *skb)
 {
        return __dev_queue_xmit(skb, NULL);
 }
-EXPORT_SYMBOL(dev_queue_xmit_sk);
+EXPORT_SYMBOL(dev_queue_xmit);
 
 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
 {
@@ -3668,6 +3670,14 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
        case TC_ACT_QUEUED:
                kfree_skb(skb);
                return NULL;
+       case TC_ACT_REDIRECT:
+               /* skb_mac_header check was done by cls/act_bpf, so
+                * we can safely push the L2 header back before
+                * redirecting to another netdev
+                */
+               __skb_push(skb, skb->mac_len);
+               skb_do_redirect(skb);
+               return NULL;
        default:
                break;
        }
@@ -3982,13 +3992,13 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
  *     NET_RX_SUCCESS: no congestion
  *     NET_RX_DROP: packet was dropped
  */
-int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
+int netif_receive_skb(struct sk_buff *skb)
 {
        trace_netif_receive_skb_entry(skb);
 
        return netif_receive_skb_internal(skb);
 }
-EXPORT_SYMBOL(netif_receive_skb_sk);
+EXPORT_SYMBOL(netif_receive_skb);
 
 /* Network device is going away, flush any packets still pending
  * Called with irqs disabled.
index 13079f03902e7674b7cd71bf01ca1d12844c758b..da3f3d94d6e93c7411a1f8369be9c180124fffc9 100644 (file)
@@ -1427,6 +1427,48 @@ const struct bpf_func_proto bpf_clone_redirect_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
+struct redirect_info {
+       u32 ifindex;
+       u32 flags;
+};
+
+static DEFINE_PER_CPU(struct redirect_info, redirect_info);
+static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
+{
+       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+
+       ri->ifindex = ifindex;
+       ri->flags = flags;
+       return TC_ACT_REDIRECT;
+}
+
+int skb_do_redirect(struct sk_buff *skb)
+{
+       struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+       struct net_device *dev;
+
+       dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
+       ri->ifindex = 0;
+       if (unlikely(!dev)) {
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       if (BPF_IS_REDIRECT_INGRESS(ri->flags))
+               return dev_forward_skb(dev, skb);
+
+       skb->dev = dev;
+       return dev_queue_xmit(skb);
+}
+
+const struct bpf_func_proto bpf_redirect_proto = {
+       .func           = bpf_redirect,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_ANYTHING,
+       .arg2_type      = ARG_ANYTHING,
+};
+
 static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        return task_get_classid((struct sk_buff *) (unsigned long) r1);
@@ -1607,6 +1649,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_skb_get_tunnel_key_proto;
        case BPF_FUNC_skb_set_tunnel_key:
                return bpf_get_skb_set_tunnel_key_proto();
+       case BPF_FUNC_redirect:
+               return &bpf_redirect_proto;
        default:
                return sk_filter_func_proto(func_id);
        }
@@ -1632,6 +1676,9 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 static bool sk_filter_is_valid_access(int off, int size,
                                      enum bpf_access_type type)
 {
+       if (off == offsetof(struct __sk_buff, tc_classid))
+               return false;
+
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, cb[0]) ...
@@ -1648,6 +1695,9 @@ static bool sk_filter_is_valid_access(int off, int size,
 static bool tc_cls_act_is_valid_access(int off, int size,
                                       enum bpf_access_type type)
 {
+       if (off == offsetof(struct __sk_buff, tc_classid))
+               return type == BPF_WRITE ? true : false;
+
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, mark):
@@ -1760,6 +1810,14 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                        *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
                break;
 
+       case offsetof(struct __sk_buff, tc_classid):
+               ctx_off -= offsetof(struct __sk_buff, tc_classid);
+               ctx_off += offsetof(struct sk_buff, cb);
+               ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
+               WARN_ON(type != BPF_WRITE);
+               *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+               break;
+
        case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
index b279077c30894dfeb69e2c21686d702cc809e678..49b599062af19b98a9b07e0651fb073e2cb1ebf3 100644 (file)
@@ -1004,15 +1004,12 @@ static ssize_t show_trans_timeout(struct netdev_queue *queue,
 }
 
 #ifdef CONFIG_XPS
-static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
 {
        struct net_device *dev = queue->dev;
-       int i;
-
-       for (i = 0; i < dev->num_tx_queues; i++)
-               if (queue == &dev->_tx[i])
-                       break;
+       unsigned int i;
 
+       i = queue - dev->_tx;
        BUG_ON(i >= dev->num_tx_queues);
 
        return i;
index 4507b188fc5109c6dced018c7b2159e9b1a8c3b2..482730cd8a562e048b08f275551361ca813a8792 100644 (file)
@@ -194,7 +194,7 @@ static int dn_neigh_output(struct neighbour *neigh, struct sk_buff *skb)
        return err;
 }
 
-static int dn_neigh_output_packet(struct sock *sk, struct sk_buff *skb)
+static int dn_neigh_output_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
@@ -246,8 +246,9 @@ static int dn_long_output(struct neighbour *neigh, struct sock *sk,
 
        skb_reset_network_header(skb);
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
-                      NULL, neigh->dev, dn_neigh_output_packet);
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING,
+                      &init_net, sk, skb, NULL, neigh->dev,
+                      dn_neigh_output_packet);
 }
 
 /*
@@ -286,8 +287,9 @@ static int dn_short_output(struct neighbour *neigh, struct sock *sk,
 
        skb_reset_network_header(skb);
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
-                      NULL, neigh->dev, dn_neigh_output_packet);
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING,
+                      &init_net, sk, skb, NULL, neigh->dev,
+                      dn_neigh_output_packet);
 }
 
 /*
@@ -327,11 +329,12 @@ static int dn_phase3_output(struct neighbour *neigh, struct sock *sk,
 
        skb_reset_network_header(skb);
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING, sk, skb,
-                      NULL, neigh->dev, dn_neigh_output_packet);
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_POST_ROUTING,
+                      &init_net, sk, skb, NULL, neigh->dev,
+                      dn_neigh_output_packet);
 }
 
-int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb)
+int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *) dst;
@@ -375,7 +378,7 @@ void dn_neigh_pointopoint_hello(struct sk_buff *skb)
 /*
  * Ethernet router hello message received
  */
-int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb)
+int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data;
 
@@ -437,7 +440,7 @@ int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb)
 /*
  * Endnode hello message received
  */
-int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb)
+int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data;
        struct neighbour *neigh;
index a321eac9fd0c5755f2d902b468137b413b092d46..7ac086d5c0c017a54534a1a986fefd88e0bcb12f 100644 (file)
@@ -714,7 +714,8 @@ out:
        return ret;
 }
 
-static int dn_nsp_rx_packet(struct sock *sk2, struct sk_buff *skb)
+static int dn_nsp_rx_packet(struct net *net, struct sock *sk2,
+                           struct sk_buff *skb)
 {
        struct dn_skb_cb *cb = DN_SKB_CB(skb);
        struct sock *sk = NULL;
@@ -814,8 +815,8 @@ free_out:
 
 int dn_nsp_rx(struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN, NULL, skb,
-                      skb->dev, NULL,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_IN,
+                      &init_net, NULL, skb, skb->dev, NULL,
                       dn_nsp_rx_packet);
 }
 
index 1aaa51ebbda6ed1271b2a44820860f4d338b2f66..4b02dd300f5072f97e25aea310e6dea67d26cfee 100644 (file)
@@ -85,7 +85,7 @@ static void dn_nsp_send(struct sk_buff *skb)
        if (dst) {
 try_again:
                skb_dst_set(skb, dst);
-               dst_output(skb);
+               dst_output(skb->sk, skb);
                return;
        }
 
@@ -582,7 +582,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
         * associations.
         */
        skb_dst_set(skb, dst_clone(dst));
-       dst_output(skb);
+       dst_output(skb->sk, skb);
 }
 
 
index 03227ffd19ce02c1a506ebd606813edb853a3e75..e930321e2c1de264000eba75fa0a13a60cd821cc 100644 (file)
@@ -512,7 +512,7 @@ static int dn_return_long(struct sk_buff *skb)
  *
  * Returns: result of input function if route is found, error code otherwise
  */
-static int dn_route_rx_packet(struct sock *sk, struct sk_buff *skb)
+static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dn_skb_cb *cb;
        int err;
@@ -573,8 +573,8 @@ static int dn_route_rx_long(struct sk_buff *skb)
        ptr++;
        cb->hops = *ptr++; /* Visit Count */
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
-                      skb->dev, NULL,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING,
+                      &init_net, NULL, skb, skb->dev, NULL,
                       dn_route_rx_packet);
 
 drop_it:
@@ -601,8 +601,8 @@ static int dn_route_rx_short(struct sk_buff *skb)
        ptr += 2;
        cb->hops = *ptr & 0x3f;
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, NULL, skb,
-                      skb->dev, NULL,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING,
+                      &init_net, NULL, skb, skb->dev, NULL,
                       dn_route_rx_packet);
 
 drop_it:
@@ -610,7 +610,7 @@ drop_it:
        return NET_RX_DROP;
 }
 
-static int dn_route_discard(struct sock *sk, struct sk_buff *skb)
+static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        /*
         * I know we drop the packet here, but thats considered success in
@@ -620,7 +620,7 @@ static int dn_route_discard(struct sock *sk, struct sk_buff *skb)
        return NET_RX_SUCCESS;
 }
 
-static int dn_route_ptp_hello(struct sock *sk, struct sk_buff *skb)
+static int dn_route_ptp_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        dn_dev_hello(skb);
        dn_neigh_pointopoint_hello(skb);
@@ -706,22 +706,22 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
                switch (flags & DN_RT_CNTL_MSK) {
                case DN_RT_PKT_HELO:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
-                                      NULL, skb, skb->dev, NULL,
+                                      &init_net, NULL, skb, skb->dev, NULL,
                                       dn_route_ptp_hello);
 
                case DN_RT_PKT_L1RT:
                case DN_RT_PKT_L2RT:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
-                                      NULL, skb, skb->dev, NULL,
+                                      &init_net, NULL, skb, skb->dev, NULL,
                                       dn_route_discard);
                case DN_RT_PKT_ERTH:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
-                                      NULL, skb, skb->dev, NULL,
+                                      &init_net, NULL, skb, skb->dev, NULL,
                                       dn_neigh_router_hello);
 
                case DN_RT_PKT_EEDH:
                        return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
-                                      NULL, skb, skb->dev, NULL,
+                                      &init_net, NULL, skb, skb->dev, NULL,
                                       dn_neigh_endnode_hello);
                }
        } else {
@@ -770,8 +770,8 @@ static int dn_output(struct sock *sk, struct sk_buff *skb)
        cb->rt_flags |= DN_RT_F_IE;
        cb->hops = 0;
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, sk, skb,
-                      NULL, dev,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT,
+                      &init_net, sk, skb, NULL, dev,
                       dn_to_neigh_output);
 
 error:
@@ -819,8 +819,8 @@ static int dn_forward(struct sk_buff *skb)
        if (rt->rt_flags & RTCF_DOREDIRECT)
                cb->rt_flags |= DN_RT_F_IE;
 
-       return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, NULL, skb,
-                      dev, skb->dev,
+       return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD,
+                      &init_net, NULL, skb, dev, skb->dev,
                       dn_to_neigh_output);
 
 drop:
index 1d0c3adb6f349b1db579085fd15d9a2ff30360b6..8a556643b8741a12646927ca5f05b3b78354ef79 100644 (file)
@@ -1043,22 +1043,16 @@ void inet_register_protosw(struct inet_protosw *p)
                goto out_illegal;
 
        /* If we are trying to override a permanent protocol, bail. */
-       answer = NULL;
        last_perm = &inetsw[p->type];
        list_for_each(lh, &inetsw[p->type]) {
                answer = list_entry(lh, struct inet_protosw, list);
-
                /* Check only the non-wild match. */
-               if (INET_PROTOSW_PERMANENT & answer->flags) {
-                       if (protocol == answer->protocol)
-                               break;
-                       last_perm = lh;
-               }
-
-               answer = NULL;
+               if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
+                       break;
+               if (protocol == answer->protocol)
+                       goto out_permanent;
+               last_perm = lh;
        }
-       if (answer)
-               goto out_permanent;
 
        /* Add the new entry after the last permanent entry if any, so that
         * the new entry does not override a permanent entry when matched with
index 30409b75e92503cca0daacc48010a63936c6aa20..61ff5ea312837fcd596beb770873193a14c396c0 100644 (file)
@@ -621,14 +621,20 @@ out:
 }
 EXPORT_SYMBOL(arp_create);
 
+static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+       return dev_queue_xmit(skb);
+}
+
 /*
  *     Send an arp packet.
  */
 void arp_xmit(struct sk_buff *skb)
 {
        /* Send it off, maybe filter it using firewalling first.  */
-       NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, NULL, skb,
-               NULL, skb->dev, dev_queue_xmit_sk);
+       NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
+               dev_net(skb->dev), NULL, skb, NULL, skb->dev,
+               arp_xmit_finish);
 }
 EXPORT_SYMBOL(arp_xmit);
 
@@ -636,7 +642,7 @@ EXPORT_SYMBOL(arp_xmit);
  *     Process an arp request.
  */
 
-static int arp_process(struct sock *sk, struct sk_buff *skb)
+static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -648,7 +654,6 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
        u16 dev_type = dev->type;
        int addr_type;
        struct neighbour *n;
-       struct net *net = dev_net(dev);
        bool is_garp = false;
 
        /* arp_rcv below verifies the ARP header and verifies the device
@@ -859,7 +864,7 @@ out:
 
 static void parp_redo(struct sk_buff *skb)
 {
-       arp_process(NULL, skb);
+       arp_process(dev_net(skb->dev), NULL, skb);
 }
 
 
@@ -892,8 +897,9 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
 
        memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
 
-       return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, NULL, skb,
-                      dev, NULL, arp_process);
+       return NF_HOOK(NFPROTO_ARP, NF_ARP_IN,
+                      dev_net(dev), NULL, skb, dev, NULL,
+                      arp_process);
 
 consumeskb:
        consume_skb(skb);
index 2d3aa408fbdca19230224269ebbcc7124dc7f5bb..d66cfb35ba74681f0a4cecb76f2bb72d9389efe9 100644 (file)
@@ -61,18 +61,18 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 }
 
 
-static int ip_forward_finish(struct sock *sk, struct sk_buff *skb)
+static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct ip_options *opt  = &(IPCB(skb)->opt);
 
-       IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
-       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
        skb_sender_cpu_clear(skb);
-       return dst_output_sk(sk, skb);
+       return dst_output(sk, skb);
 }
 
 int ip_forward(struct sk_buff *skb)
@@ -81,6 +81,7 @@ int ip_forward(struct sk_buff *skb)
        struct iphdr *iph;      /* Our header */
        struct rtable *rt;      /* Route we use */
        struct ip_options *opt  = &(IPCB(skb)->opt);
+       struct net *net;
 
        /* that should never happen */
        if (skb->pkt_type != PACKET_HOST)
@@ -99,6 +100,7 @@ int ip_forward(struct sk_buff *skb)
                return NET_RX_SUCCESS;
 
        skb_forward_csum(skb);
+       net = dev_net(skb->dev);
 
        /*
         *      According to the RFC, we must first decrease the TTL field. If
@@ -119,7 +121,7 @@ int ip_forward(struct sk_buff *skb)
        IPCB(skb)->flags |= IPSKB_FORWARDED;
        mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
        if (ip_exceeds_mtu(skb, mtu)) {
-               IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
+               IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                          htonl(mtu));
                goto drop;
@@ -143,8 +145,9 @@ int ip_forward(struct sk_buff *skb)
 
        skb->priority = rt_tos2priority(iph->tos);
 
-       return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
-                      skb->dev, rt->dst.dev, ip_forward_finish);
+       return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
+                      net, NULL, skb, skb->dev, rt->dst.dev,
+                      ip_forward_finish);
 
 sr_failed:
        /*
@@ -155,7 +158,7 @@ sr_failed:
 
 too_many_hops:
        /* Tell the sender its packet died... */
-       IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
        icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
 drop:
        kfree_skb(skb);
index f4fc8a77aaa79dcb5156bfb2de84efdf24808713..7cc9f7bb7fb778bcb895634a97ea9de0e88b0d66 100644 (file)
@@ -188,10 +188,8 @@ bool ip_call_ra_chain(struct sk_buff *skb)
        return false;
 }
 
-static int ip_local_deliver_finish(struct sock *sk, struct sk_buff *skb)
+static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct net *net = dev_net(skb->dev);
-
        __skb_pull(skb, skb_network_header_len(skb));
 
        rcu_read_lock();
@@ -254,8 +252,8 @@ int ip_local_deliver(struct sk_buff *skb)
                        return 0;
        }
 
-       return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN, NULL, skb,
-                      skb->dev, NULL,
+       return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
+                      dev_net(skb->dev), NULL, skb, skb->dev, NULL,
                       ip_local_deliver_finish);
 }
 
@@ -311,7 +309,7 @@ drop:
 int sysctl_ip_early_demux __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_ip_early_demux);
 
-static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
+static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
@@ -337,8 +335,7 @@ static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
                                               iph->tos, skb->dev);
                if (unlikely(err)) {
                        if (err == -EXDEV)
-                               NET_INC_STATS_BH(dev_net(skb->dev),
-                                                LINUX_MIB_IPRPFILTER);
+                               NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER);
                        goto drop;
                }
        }
@@ -359,11 +356,9 @@ static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
 
        rt = skb_rtable(skb);
        if (rt->rt_type == RTN_MULTICAST) {
-               IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
-                               skb->len);
+               IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INMCAST, skb->len);
        } else if (rt->rt_type == RTN_BROADCAST)
-               IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
-                               skb->len);
+               IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INBCAST, skb->len);
 
        return dst_input(skb);
 
@@ -378,6 +373,7 @@ drop:
 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
 {
        const struct iphdr *iph;
+       struct net *net;
        u32 len;
 
        /* When the interface is in promisc. mode, drop all the crap
@@ -387,11 +383,12 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
                goto drop;
 
 
-       IP_UPD_PO_STATS_BH(dev_net(dev), IPSTATS_MIB_IN, skb->len);
+       net = dev_net(dev);
+       IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_IN, skb->len);
 
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb) {
-               IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+               IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
                goto out;
        }
 
@@ -417,7 +414,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
        BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
        BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
-       IP_ADD_STATS_BH(dev_net(dev),
+       IP_ADD_STATS_BH(net,
                        IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
                        max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
 
@@ -431,7 +428,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 
        len = ntohs(iph->tot_len);
        if (skb->len < len) {
-               IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
+               IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
                goto drop;
        } else if (len < (iph->ihl*4))
                goto inhdr_error;
@@ -441,7 +438,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
         * Note this now means skb->len holds ntohs(iph->tot_len).
         */
        if (pskb_trim_rcsum(skb, len)) {
-               IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+               IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
                goto drop;
        }
 
@@ -453,14 +450,14 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
 
-       return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
-                      dev, NULL,
+       return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
+                      net, NULL, skb, dev, NULL,
                       ip_rcv_finish);
 
 csum_error:
-       IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_CSUMERRORS);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_CSUMERRORS);
 inhdr_error:
-       IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
 drop:
        kfree_skb(skb);
 out:
index 0138fada0951b17b175be0b215c248fc7224dc5a..09a6b7bb7ea389d078904a24c4ca2b8d8c2e9ccb 100644 (file)
@@ -97,12 +97,14 @@ EXPORT_SYMBOL(ip_send_check);
 
 static int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
 {
+       struct net *net = dev_net(skb_dst(skb)->dev);
        struct iphdr *iph = ip_hdr(skb);
 
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
-       return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb, NULL,
-                      skb_dst(skb)->dev, dst_output_sk);
+       return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
+                      net, sk, skb, NULL, skb_dst(skb)->dev,
+                      dst_output_okfn);
 }
 
 int __ip_local_out(struct sk_buff *skb)
@@ -116,7 +118,7 @@ int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
 
        err = __ip_local_out(skb);
        if (likely(err == 1))
-               err = dst_output_sk(sk, skb);
+               err = dst_output(sk, skb);
 
        return err;
 }
@@ -177,14 +179,15 @@ static int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
        struct rtable *rt = (struct rtable *)dst;
        struct net_device *dev = dst->dev;
+       struct net *net = dev_net(dev);
        unsigned int hh_len = LL_RESERVED_SPACE(dev);
        struct neighbour *neigh;
        u32 nexthop;
 
        if (rt->rt_type == RTN_MULTICAST) {
-               IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
+               IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
        } else if (rt->rt_type == RTN_BROADCAST)
-               IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
+               IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
 
        /* Be paranoid, rather than too clever. */
        if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -263,7 +266,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb,
        return ret;
 }
 
-static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        unsigned int mtu;
 
@@ -271,7 +274,7 @@ static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
        /* Policy lookup after SNAT yielded a new policy */
        if (skb_dst(skb)->xfrm) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
-               return dst_output_sk(sk, skb);
+               return dst_output(sk, skb);
        }
 #endif
        mtu = ip_skb_dst_mtu(skb);
@@ -288,11 +291,12 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
 {
        struct rtable *rt = skb_rtable(skb);
        struct net_device *dev = rt->dst.dev;
+       struct net *net = dev_net(dev);
 
        /*
         *      If the indicated interface is up and running, send the packet.
         */
-       IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+       IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 
        skb->dev = dev;
        skb->protocol = htons(ETH_P_IP);
@@ -320,7 +324,7 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
                        struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
                        if (newskb)
                                NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
-                                       sk, newskb, NULL, newskb->dev,
+                                       net, sk, newskb, NULL, newskb->dev,
                                        dev_loopback_xmit);
                }
 
@@ -335,26 +339,29 @@ int ip_mc_output(struct sock *sk, struct sk_buff *skb)
        if (rt->rt_flags&RTCF_BROADCAST) {
                struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
                if (newskb)
-                       NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, newskb,
-                               NULL, newskb->dev, dev_loopback_xmit);
+                       NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+                               net, sk, newskb, NULL, newskb->dev,
+                               dev_loopback_xmit);
        }
 
-       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, NULL,
-                           skb->dev, ip_finish_output,
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, skb->dev,
+                           ip_finish_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
 int ip_output(struct sock *sk, struct sk_buff *skb)
 {
        struct net_device *dev = skb_dst(skb)->dev;
+       struct net *net = dev_net(dev);
 
-       IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+       IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 
        skb->dev = dev;
        skb->protocol = htons(ETH_P_IP);
 
-       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
-                           NULL, dev,
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, dev,
                            ip_finish_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
@@ -498,10 +505,9 @@ static int ip_fragment(struct sock *sk, struct sk_buff *skb,
        if (unlikely(!skb->ignore_df ||
                     (IPCB(skb)->frag_max_size &&
                      IPCB(skb)->frag_max_size > mtu))) {
-               struct rtable *rt = skb_rtable(skb);
-               struct net_device *dev = rt->dst.dev;
+               struct net *net = dev_net(skb_rtable(skb)->dst.dev);
 
-               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+               IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                          htonl(mtu));
                kfree_skb(skb);
@@ -529,9 +535,11 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
        int offset;
        __be16 not_last_frag;
        struct rtable *rt = skb_rtable(skb);
+       struct net *net;
        int err = 0;
 
        dev = rt->dst.dev;
+       net = dev_net(dev);
 
        /*
         *      Point into the IP datagram header.
@@ -624,7 +632,7 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
                        err = output(sk, skb);
 
                        if (!err)
-                               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
+                               IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
                        if (err || !frag)
                                break;
 
@@ -634,7 +642,7 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
                }
 
                if (err == 0) {
-                       IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
+                       IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
                        return 0;
                }
 
@@ -643,7 +651,7 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
                        kfree_skb(frag);
                        frag = skb;
                }
-               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+               IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
                return err;
 
 slow_path_clean:
@@ -765,15 +773,15 @@ slow_path:
                if (err)
                        goto fail;
 
-               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
+               IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
        }
        consume_skb(skb);
-       IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
+       IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
        return err;
 
 fail:
        kfree_skb(skb);
-       IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+       IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
        return err;
 }
 EXPORT_SYMBOL(ip_do_fragment);
index 0c152087ca15dd3f97548d3c7123d42bd6626f0e..3b87ec5178f986ad17f53d23fcd8e01402fe13a4 100644 (file)
@@ -197,7 +197,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
 
-       err = dst_output(skb);
+       err = dst_output(skb->sk, skb);
        if (net_xmit_eval(err) == 0)
                err = skb->len;
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
index 866ee89f5254a4d6b401c4d9152d15640802b6b8..cfcb996ec51bac6843ae3f513e6a7ba2f3da3610 100644 (file)
@@ -1678,17 +1678,18 @@ static void ip_encap(struct net *net, struct sk_buff *skb,
        nf_reset(skb);
 }
 
-static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
+static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
+                                     struct sk_buff *skb)
 {
        struct ip_options *opt = &(IPCB(skb)->opt);
 
-       IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
-       IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
+       IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len);
 
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       return dst_output_sk(sk, skb);
+       return dst_output(sk, skb);
 }
 
 /*
@@ -1745,7 +1746,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
                 * to blackhole.
                 */
 
-               IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+               IP_INC_STATS_BH(net, IPSTATS_MIB_FRAGFAILS);
                ip_rt_put(rt);
                goto out_free;
        }
@@ -1787,8 +1788,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
         * not mrouter) cannot join to more than one interface - it will
         * result in receiving multiple packets.
         */
-       NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
-               skb->dev, dev,
+       NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
+               net, NULL, skb, skb->dev, dev,
                ipmr_forward_finish);
        return;
 
index 93876d03120ca85fbc1e5aaa689d245d4508f01e..d217e4c196454e45e91eb1bf472614d3756961ca 100644 (file)
@@ -30,10 +30,8 @@ static unsigned int
 arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net(state->in ? state->in : state->out);
-
        return arpt_do_table(skb, ops->hooknum, state,
-                            net->ipv4.arptable_filter);
+                            state->net->ipv4.arptable_filter);
 }
 
 static struct nf_hook_ops *arpfilter_ops __read_mostly;
index b0a86e73451c1f2ee99e209eb8a6556414b02e46..5d514eac4c3131c1327f3561ac607fbf6fe52517 100644 (file)
@@ -246,7 +246,8 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
        return 0;
 }
 
-static void trace_packet(const struct sk_buff *skb,
+static void trace_packet(struct net *net,
+                        const struct sk_buff *skb,
                         unsigned int hook,
                         const struct net_device *in,
                         const struct net_device *out,
@@ -258,7 +259,6 @@ static void trace_packet(const struct sk_buff *skb,
        const char *hookname, *chainname, *comment;
        const struct ipt_entry *iter;
        unsigned int rulenum = 0;
-       struct net *net = dev_net(in ? in : out);
 
        root = get_entry(private->entries, private->hook_entry[hook]);
 
@@ -378,8 +378,8 @@ ipt_do_table(struct sk_buff *skb,
 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
                /* The packet is traced: log it */
                if (unlikely(skb->nf_trace))
-                       trace_packet(skb, hook, state->in, state->out,
-                                    table->name, private, e);
+                       trace_packet(state->net, skb, hook, state->in,
+                                    state->out, table->name, private, e);
 #endif
                /* Standard target? */
                if (!t->u.kernel.target->target) {
index 45cb16a6a4a337c564d880485c45ca46229cb152..69157d8eba953bb7c175ecf57eebd789f6a91b3e 100644 (file)
@@ -514,7 +514,7 @@ arp_mangle(const struct nf_hook_ops *ops,
        struct arphdr *arp = arp_hdr(skb);
        struct arp_payload *payload;
        struct clusterip_config *c;
-       struct net *net = dev_net(state->in ? state->in : state->out);
+       struct net *net = state->net;
 
        /* we don't care about non-ethernet and non-ipv4 ARP */
        if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
index 95ea633e8356eb9b419e4027f9954810194aa23c..f471a0628c7507be9bdc003305031913b278934a 100644 (file)
@@ -303,7 +303,7 @@ static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
                                       const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
+       struct synproxy_net *snet = synproxy_pernet(nhs->net);
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
index a0f3beca52d2107b12ae748a4328d5491e7553c3..32feff32b116f92eb0526b4bc764d75e763dc1f1 100644 (file)
@@ -36,16 +36,14 @@ static unsigned int
 iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                    const struct nf_hook_state *state)
 {
-       const struct net *net;
-
        if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net(state->in ? state->in : state->out);
-       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_filter);
+       return ipt_do_table(skb, ops->hooknum, state,
+                           state->net->ipv4.iptable_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index 62cbb8c5f4a8f246428599186e0e9b498ae460a6..4a5150fc9510ff617ab41998862a1495518e90cc 100644 (file)
@@ -39,7 +39,6 @@ static const struct xt_table packet_mangler = {
 static unsigned int
 ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 {
-       struct net_device *out = state->out;
        unsigned int ret;
        const struct iphdr *iph;
        u_int8_t tos;
@@ -60,7 +59,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
        tos = iph->tos;
 
        ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, state,
-                          dev_net(out)->ipv4.iptable_mangle);
+                          state->net->ipv4.iptable_mangle);
        /* Reroute for ANY change. */
        if (ret != NF_DROP && ret != NF_STOLEN) {
                iph = ip_hdr(skb);
@@ -88,10 +87,10 @@ iptable_mangle_hook(const struct nf_hook_ops *ops,
                return ipt_mangle_out(skb, state);
        if (ops->hooknum == NF_INET_POST_ROUTING)
                return ipt_do_table(skb, ops->hooknum, state,
-                                   dev_net(state->out)->ipv4.iptable_mangle);
+                                   state->net->ipv4.iptable_mangle);
        /* PREROUTING/INPUT/FORWARD: */
        return ipt_do_table(skb, ops->hooknum, state,
-                           dev_net(state->in)->ipv4.iptable_mangle);
+                           state->net->ipv4.iptable_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
index 0d4d9cdf98a4c0dcb2da3fbebe9264efeb99d4c7..4f4c64f81169ccea09f147834b5864d8ecdd08ee 100644 (file)
@@ -33,9 +33,8 @@ static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops,
                                         const struct nf_hook_state *state,
                                         struct nf_conn *ct)
 {
-       struct net *net = nf_ct_net(ct);
-
-       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.nat_table);
+       return ipt_do_table(skb, ops->hooknum, state,
+                           state->net->ipv4.nat_table);
 }
 
 static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops,
index 0356e6da4bb749ba1dcfa07667dcac7b0aa92878..20126e469ffb41f547c0d13f3d69ba30f601281c 100644 (file)
@@ -23,16 +23,14 @@ static unsigned int
 iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                 const struct nf_hook_state *state)
 {
-       const struct net *net;
-
        if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net(state->in ? state->in : state->out);
-       return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_raw);
+       return ipt_do_table(skb, ops->hooknum, state,
+                           state->net->ipv4.iptable_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index 4bce3980ccd935f891c55329c127478dc031ed77..82fefd609b85b3130583c4477cc51a34f0e6e458 100644 (file)
@@ -40,17 +40,14 @@ static unsigned int
 iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct nf_hook_state *state)
 {
-       const struct net *net;
-
        if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* Somebody is playing with raw sockets. */
                return NF_ACCEPT;
 
-       net = dev_net(state->in ? state->in : state->out);
        return ipt_do_table(skb, ops->hooknum, state,
-                           net->ipv4.iptable_security);
+                           state->net->ipv4.iptable_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
index 8a2caaf3940bedaa9abba13352594a29341d287e..9564684876c9729d5cf27cb7370d3d6b56afcbc7 100644 (file)
@@ -147,7 +147,7 @@ static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(dev_net(state->in), PF_INET, ops->hooknum, skb);
+       return nf_conntrack_in(state->net, PF_INET, ops->hooknum, skb);
 }
 
 static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
@@ -158,7 +158,7 @@ static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
        if (skb->len < sizeof(struct iphdr) ||
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
-       return nf_conntrack_in(dev_net(state->out), PF_INET, ops->hooknum, skb);
+       return nf_conntrack_in(state->net, PF_INET, ops->hooknum, skb);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
index 561cd4b8fc6e07b49222d788651e93c42ee3adcb..28ef8a913130b6915f22b1c6ec837ece145c84db 100644 (file)
@@ -411,8 +411,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
                icmp_out_count(net, ((struct icmphdr *)
                        skb_transport_header(skb))->type);
 
-       err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, sk, skb,
-                     NULL, rt->dst.dev, dst_output_sk);
+       err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
+                     net, sk, skb, NULL, rt->dst.dev,
+                     dst_output_okfn);
        if (err > 0)
                err = net_xmit_errno(err);
        if (err)
index da427a4a33feff4583cd8a21d5c58f37f65c3d02..80f7c5b7b832322d398d1ff93164effcad4104f2 100644 (file)
@@ -1712,6 +1712,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                goto martian_source;
 
        res.fi = NULL;
+       res.table = NULL;
        if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
                goto brd_input;
 
@@ -1834,6 +1835,7 @@ no_route:
        RT_CACHE_STAT_INC(in_no_route);
        res.type = RTN_UNREACHABLE;
        res.fi = NULL;
+       res.table = NULL;
        goto local_input;
 
        /*
index a8f515bb19c4bbd2d379c6f5c29f9a3d02e8bc96..a62e9c76d485fb1f316da1323a9982d3d3019e94 100644 (file)
@@ -6228,6 +6228,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        }
 
        tcp_rsk(req)->snt_isn = isn;
+       tcp_rsk(req)->txhash = net_tx_rndhash();
        tcp_openreq_init_rwin(req, sk, dst);
        fastopen = !want_cookie &&
                   tcp_try_fastopen(sk, skb, req, &foc, dst);
index 93898e093d4e655537665602337de3c6c35cab70..d671d742a2398d04ce2d6f5126ab941a36bbc1cc 100644 (file)
@@ -1276,8 +1276,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->mc_index     = inet_iif(skb);
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
+       newsk->sk_txhash      = tcp_rsk(req)->txhash;
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
-       sk_set_txhash(newsk);
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = newtp->write_seq ^ jiffies;
index f9a8a12b62ee64d954ae9a4aab75bcdce687650b..d0ad3554c3332b5f32c48e7dcf28cf05484754a8 100644 (file)
@@ -2987,6 +2987,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        rcu_read_lock();
        md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
 #endif
+       skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
        tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
                                             foc) + sizeof(*th);
 
@@ -3505,6 +3506,7 @@ int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
        struct flowi fl;
        int res;
 
+       tcp_rsk(req)->txhash = net_tx_rndhash();
        res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
index 60b032f58ccc9ffca8617de21c72288b919889b7..62e1e72db4612d0aa5c912ac7ec7aeea24123a35 100644 (file)
@@ -22,7 +22,8 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
        return xfrm4_extract_header(skb);
 }
 
-static inline int xfrm4_rcv_encap_finish(struct sock *sk, struct sk_buff *skb)
+static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
+                                        struct sk_buff *skb)
 {
        if (!skb_dst(skb)) {
                const struct iphdr *iph = ip_hdr(skb);
@@ -52,8 +53,8 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
 
-       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, NULL, skb,
-               skb->dev, NULL,
+       NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
+               dev_net(skb->dev), NULL, skb, skb->dev, NULL,
                xfrm4_rcv_encap_finish);
        return 0;
 }
index 2878dbfffeb7e769a32079f1a6b80061136a7efc..cd6be736e19fcb06aa68f42e07eb51c5ce2c10d2 100644 (file)
@@ -80,14 +80,14 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
        return xfrm_output(sk, skb);
 }
 
-static int __xfrm4_output(struct sock *sk, struct sk_buff *skb)
+static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct xfrm_state *x = skb_dst(skb)->xfrm;
 
 #ifdef CONFIG_NETFILTER
        if (!x) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
-               return dst_output_sk(sk, skb);
+               return dst_output(sk, skb);
        }
 #endif
 
@@ -96,8 +96,11 @@ static int __xfrm4_output(struct sock *sk, struct sk_buff *skb)
 
 int xfrm4_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
-                           NULL, skb_dst(skb)->dev, __xfrm4_output,
+       struct net *net = dev_net(skb_dst(skb)->dev);
+
+       return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, skb_dst(skb)->dev,
+                           __xfrm4_output,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
index adba03ac7ce9671f6dba419d163672b9520743bd..9075acf081dda32e30185886714f34f61b0b1f7d 100644 (file)
@@ -47,7 +47,7 @@
 #include <net/inet_ecn.h>
 #include <net/dst_metadata.h>
 
-int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
+int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
                const struct inet6_protocol *ipprot;
@@ -109,7 +109,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        if (hdr->version != 6)
                goto err;
 
-       IP6_ADD_STATS_BH(dev_net(dev), idev,
+       IP6_ADD_STATS_BH(net, idev,
                         IPSTATS_MIB_NOECTPKTS +
                                (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
                         max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
@@ -183,8 +183,8 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
        /* Must drop socket now because of tproxy. */
        skb_orphan(skb);
 
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
-                      dev, NULL,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
+                      net, NULL, skb, dev, NULL,
                       ip6_rcv_finish);
 err:
        IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
@@ -199,9 +199,8 @@ drop:
  */
 
 
-static int ip6_input_finish(struct sock *sk, struct sk_buff *skb)
+static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       struct net *net = dev_net(skb_dst(skb)->dev);
        const struct inet6_protocol *ipprot;
        struct inet6_dev *idev;
        unsigned int nhoff;
@@ -278,8 +277,8 @@ discard:
 
 int ip6_input(struct sk_buff *skb)
 {
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, NULL, skb,
-                      skb->dev, NULL,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
+                      dev_net(skb->dev), NULL, skb, skb->dev, NULL,
                       ip6_input_finish);
 }
 
index 26ea4793074004d0af1026bb378860b53baa0ad2..291a07be5dfbe3be2834cd977eea288e67832628 100644 (file)
@@ -60,6 +60,7 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *dev = dst->dev;
+       struct net *net = dev_net(dev);
        struct neighbour *neigh;
        struct in6_addr *nexthop;
        int ret;
@@ -71,7 +72,7 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
                struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
                if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
-                   ((mroute6_socket(dev_net(dev), skb) &&
+                   ((mroute6_socket(net, skb) &&
                     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
                     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
                                         &ipv6_hdr(skb)->saddr))) {
@@ -82,19 +83,18 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
                         */
                        if (newskb)
                                NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
-                                       sk, newskb, NULL, newskb->dev,
+                                       net, sk, newskb, NULL, newskb->dev,
                                        dev_loopback_xmit);
 
                        if (ipv6_hdr(skb)->hop_limit == 0) {
-                               IP6_INC_STATS(dev_net(dev), idev,
+                               IP6_INC_STATS(net, idev,
                                              IPSTATS_MIB_OUTDISCARDS);
                                kfree_skb(skb);
                                return 0;
                        }
                }
 
-               IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
-                               skb->len);
+               IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
 
                if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
                    IPV6_ADDR_SCOPE_NODELOCAL &&
@@ -116,13 +116,12 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
        }
        rcu_read_unlock_bh();
 
-       IP6_INC_STATS(dev_net(dst->dev),
-                     ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+       IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
        kfree_skb(skb);
        return -EINVAL;
 }
 
-static int ip6_finish_output(struct sock *sk, struct sk_buff *skb)
+static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
            dst_allfrag(skb_dst(skb)) ||
@@ -136,15 +135,16 @@ int ip6_output(struct sock *sk, struct sk_buff *skb)
 {
        struct net_device *dev = skb_dst(skb)->dev;
        struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+       struct net *net = dev_net(dev);
+
        if (unlikely(idev->cnf.disable_ipv6)) {
-               IP6_INC_STATS(dev_net(dev), idev,
-                             IPSTATS_MIB_OUTDISCARDS);
+               IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
                kfree_skb(skb);
                return 0;
        }
 
-       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
-                           NULL, dev,
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+                           net, sk, skb, NULL, dev,
                            ip6_finish_output,
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
@@ -224,8 +224,9 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
                IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
                              IPSTATS_MIB_OUT, skb->len);
-               return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
-                              NULL, dst->dev, dst_output_sk);
+               return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+                              net, sk, skb, NULL, dst->dev,
+                              dst_output_okfn);
        }
 
        skb->dev = dst->dev;
@@ -317,10 +318,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
        return 0;
 }
 
-static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb)
+static inline int ip6_forward_finish(struct net *net, struct sock *sk,
+                                    struct sk_buff *skb)
 {
        skb_sender_cpu_clear(skb);
-       return dst_output_sk(sk, skb);
+       return dst_output(sk, skb);
 }
 
 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@ -512,8 +514,8 @@ int ip6_forward(struct sk_buff *skb)
 
        IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
        IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
-                      skb->dev, dst->dev,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+                      net, NULL, skb, skb->dev, dst->dev,
                       ip6_forward_finish);
 
 error:
index 0224c032dca5dca98ea0146bcdf52c179fa23f6d..f96f1c19b4a8842cbc29dbe1288e0d001f34bd7e 100644 (file)
@@ -482,7 +482,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                return -EMSGSIZE;
        }
 
-       err = dst_output(skb);
+       err = dst_output(skb->sk, skb);
        if (net_xmit_eval(err) == 0) {
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
 
index 0e004cc42a22b1593fa308f3adac735568ddf2ee..5e5d16e7ce8532ea9599f6c4b006f4934b0e265e 100644 (file)
@@ -1985,13 +1985,13 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
 }
 #endif
 
-static inline int ip6mr_forward2_finish(struct sock *sk, struct sk_buff *skb)
+static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
                         IPSTATS_MIB_OUTFORWDATAGRAMS);
-       IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+       IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
                         IPSTATS_MIB_OUTOCTETS, skb->len);
-       return dst_output_sk(sk, skb);
+       return dst_output(sk, skb);
 }
 
 /*
@@ -2063,8 +2063,8 @@ static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
 
        IP6CB(skb)->flags |= IP6SKB_FORWARDED;
 
-       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
-                      skb->dev, dev,
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+                      net, NULL, skb, skb->dev, dev,
                       ip6mr_forward2_finish);
 
 out_free:
index 083b2927fc67aaa3939fff42a1c6ee9f1ca70afe..a8bf57ca74d3a171d4a939a7e919760f9e117d7d 100644 (file)
@@ -1645,8 +1645,8 @@ static void mld_sendpack(struct sk_buff *skb)
        payload_len = skb->len;
 
        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
-                     net->ipv6.igmp_sk, skb, NULL, skb->dev,
-                     dst_output_sk);
+                     net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
+                     dst_output_okfn);
 out:
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
@@ -2008,8 +2008,9 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        }
 
        skb_dst_set(skb, dst);
-       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
-                     NULL, skb->dev, dst_output_sk);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+                     net, sk, skb, NULL, skb->dev,
+                     dst_output_okfn);
 out:
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, type);
index 64a71354b069dd816199d1b1e23fe78082f2406a..dde5a1e5875add991ea57965045d672c2b80a81c 100644 (file)
@@ -463,9 +463,9 @@ static void ndisc_send_skb(struct sk_buff *skb,
        idev = __in6_dev_get(dst->dev);
        IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
 
-       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
-                     NULL, dst->dev,
-                     dst_output_sk);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+                     net, sk, skb, NULL, dst->dev,
+                     dst_output_okfn);
        if (!err) {
                ICMP6MSGOUT_INC_STATS(net, idev, type);
                ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
index 0771991ed812aebef403e60ace6e7df1dd6f550c..cd9b401231d30c5304d3c753631d45ef42786e23 100644 (file)
@@ -275,7 +275,8 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
        return 0;
 }
 
-static void trace_packet(const struct sk_buff *skb,
+static void trace_packet(struct net *net,
+                        const struct sk_buff *skb,
                         unsigned int hook,
                         const struct net_device *in,
                         const struct net_device *out,
@@ -287,7 +288,6 @@ static void trace_packet(const struct sk_buff *skb,
        const char *hookname, *chainname, *comment;
        const struct ip6t_entry *iter;
        unsigned int rulenum = 0;
-       struct net *net = dev_net(in ? in : out);
 
        root = get_entry(private->entries, private->hook_entry[hook]);
 
@@ -401,8 +401,8 @@ ip6t_do_table(struct sk_buff *skb,
 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
                /* The packet is traced: log it */
                if (unlikely(skb->nf_trace))
-                       trace_packet(skb, hook, state->in, state->out,
-                                    table->name, private, e);
+                       trace_packet(state->net, skb, hook, state->in,
+                                    state->out, table->name, private, e);
 #endif
                /* Standard target? */
                if (!t->u.kernel.target->target) {
index 1e4bf99ed16e68c68ca87e997895f66dbca4e789..4c9f3e79d75f1185d5ccec41184fec1b9ae4f83f 100644 (file)
@@ -320,7 +320,7 @@ static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
                                       const struct nf_hook_state *nhs)
 {
-       struct synproxy_net *snet = synproxy_pernet(dev_net(nhs->in ? : nhs->out));
+       struct synproxy_net *snet = synproxy_pernet(nhs->net);
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
        struct nf_conn_synproxy *synproxy;
index 5c33d8abc0774e52a99c20273f3349b0b5374fc7..2449005fb5dc896667740a86f5daa3b643e4bac7 100644 (file)
@@ -35,9 +35,8 @@ static unsigned int
 ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net(state->in ? state->in : state->out);
-
-       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_filter);
+       return ip6t_do_table(skb, ops->hooknum, state,
+                            state->net->ipv6.ip6table_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index b551f5b79fe2b7fa62278ae1f7d9327e82795253..a46dbf097d29ce5b41a496310e90dac9d29301d4 100644 (file)
@@ -58,7 +58,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
        flowlabel = *((u_int32_t *)ipv6_hdr(skb));
 
        ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, state,
-                           dev_net(state->out)->ipv6.ip6table_mangle);
+                           state->net->ipv6.ip6table_mangle);
 
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
@@ -83,10 +83,10 @@ ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                return ip6t_mangle_out(skb, state);
        if (ops->hooknum == NF_INET_POST_ROUTING)
                return ip6t_do_table(skb, ops->hooknum, state,
-                                    dev_net(state->out)->ipv6.ip6table_mangle);
+                                    state->net->ipv6.ip6table_mangle);
        /* INPUT/FORWARD */
        return ip6t_do_table(skb, ops->hooknum, state,
-                            dev_net(state->in)->ipv6.ip6table_mangle);
+                            state->net->ipv6.ip6table_mangle);
 }
 
 static struct nf_hook_ops *mangle_ops __read_mostly;
index c3a7f7af0ed4d183d00a5f50307f44fa44399460..a56451de127f1ccaef91605c42eb7be2a1c2c2f2 100644 (file)
@@ -35,9 +35,8 @@ static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops,
                                          const struct nf_hook_state *state,
                                          struct nf_conn *ct)
 {
-       struct net *net = nf_ct_net(ct);
-
-       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_nat);
+       return ip6t_do_table(skb, ops->hooknum, state,
+                            state->net->ipv6.ip6table_nat);
 }
 
 static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops,
index 0b33caad2b69254e29af5ff38484e37a0dc6c711..18e831e35782b37827d0909c8104f63480d4fe74 100644 (file)
@@ -22,9 +22,8 @@ static unsigned int
 ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                  const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net(state->in ? state->in : state->out);
-
-       return ip6t_do_table(skb, ops->hooknum, state, net->ipv6.ip6table_raw);
+       return ip6t_do_table(skb, ops->hooknum, state,
+                            state->net->ipv6.ip6table_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index fcef83c25f7b3281a92a2d5be27512e057dddfff..83bc96ae5d73845d8f6871c142168437623af61a 100644 (file)
@@ -39,10 +39,8 @@ static unsigned int
 ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                       const struct nf_hook_state *state)
 {
-       const struct net *net = dev_net(state->in ? state->in : state->out);
-
        return ip6t_do_table(skb, ops->hooknum, state,
-                            net->ipv6.ip6table_security);
+                            state->net->ipv6.ip6table_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
index 7302900c321aff58fcb7dc21794b50e04b1942d8..1ef1b79def5689b720c9ea23e1d060182569d57f 100644 (file)
@@ -169,7 +169,7 @@ static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct nf_hook_state *state)
 {
-       return nf_conntrack_in(dev_net(state->in), PF_INET6, ops->hooknum, skb);
+       return nf_conntrack_in(state->net, PF_INET6, ops->hooknum, skb);
 }
 
 static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
@@ -181,7 +181,7 @@ static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
                net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
                return NF_ACCEPT;
        }
-       return nf_conntrack_in(dev_net(state->out), PF_INET6, ops->hooknum, skb);
+       return nf_conntrack_in(state->net, PF_INET6, ops->hooknum, skb);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
index 6d9c0b3d5b8c49d111cca7bd70b9bc5229f0a263..6b576be3c83e802704d67f789ac8686a63ea64a6 100644 (file)
@@ -74,7 +74,7 @@ static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
 
        nf_ct_frag6_consume_orig(reasm);
 
-       NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, state->sk, reasm,
+       NF_HOOK_THRESH(NFPROTO_IPV6, ops->hooknum, state->net, state->sk, reasm,
                       state->in, state->out,
                       state->okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
 
index 928a0fb0b74406cdf32c89fe7db054b40476dc54..e77102c4f8045e881bf3bdf86d43dc5323f6ae31 100644 (file)
@@ -140,6 +140,7 @@ EXPORT_SYMBOL(ip6_dst_hoplimit);
 
 static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
 {
+       struct net *net = dev_net(skb_dst(skb)->dev);
        int len;
 
        len = skb->len - sizeof(struct ipv6hdr);
@@ -148,8 +149,9 @@ static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
        ipv6_hdr(skb)->payload_len = htons(len);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
-       return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
-                      NULL, skb_dst(skb)->dev, dst_output_sk);
+       return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+                      net, sk, skb, NULL, skb_dst(skb)->dev,
+                      dst_output_okfn);
 }
 
 int __ip6_local_out(struct sk_buff *skb)
@@ -164,7 +166,7 @@ int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
 
        err = __ip6_local_out_sk(sk, skb);
        if (likely(err == 1))
-               err = dst_output_sk(sk, skb);
+               err = dst_output(sk, skb);
 
        return err;
 }
index fdbada1569a37348b47b60769f7d679741b21d0a..fec0151522a25d0bcd787c0353d23f5aba11c780 100644 (file)
@@ -614,6 +614,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
                        unsigned int flags)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
+       struct net *net = sock_net(sk);
        struct ipv6hdr *iph;
        struct sk_buff *skb;
        int err;
@@ -652,9 +653,9 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        if (err)
                goto error_fault;
 
-       IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
-       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
-                     NULL, rt->dst.dev, dst_output_sk);
+       IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
+                     NULL, rt->dst.dev, dst_output_okfn);
        if (err > 0)
                err = net_xmit_errno(err);
        if (err)
@@ -666,7 +667,7 @@ error_fault:
        err = -EFAULT;
        kfree_skb(skb);
 error:
-       IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+       IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
        if (err == -ENOBUFS && !np->recverr)
                err = 0;
        return err;
index 97d9314ea3611eeadd576d2fd8919ae60d468891..f9c0e264067104260ed586836364da10019851aa 100644 (file)
@@ -1090,7 +1090,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
        newsk->sk_bound_dev_if = ireq->ir_iif;
 
-       sk_set_txhash(newsk);
+       newsk->sk_txhash = tcp_rsk(req)->txhash;
 
        /* Now IPv6 options...
 
index 74bd17882a2fe5126012fae7625254a56f14e20d..0eaab1fa6be5751734d98d900ae4d296ca12651e 100644 (file)
@@ -42,8 +42,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
        ipv6_hdr(skb)->payload_len = htons(skb->len);
        __skb_push(skb, skb->data - skb_network_header(skb));
 
-       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, NULL, skb,
-               skb->dev, NULL,
+       NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
+               dev_net(skb->dev), NULL, skb, skb->dev, NULL,
                ip6_rcv_finish);
        return -1;
 }
index 09c76a7b474dbcb12cae8aeba6fcba375d0d329a..0c3e9ffcf23122b213b4c310838e0ab7dd0f748e 100644 (file)
@@ -131,7 +131,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
        return xfrm_output(sk, skb);
 }
 
-static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
+static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_state *x = dst->xfrm;
@@ -140,7 +140,7 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
 #ifdef CONFIG_NETFILTER
        if (!x) {
                IP6CB(skb)->flags |= IP6SKB_REROUTED;
-               return dst_output_sk(sk, skb);
+               return dst_output(sk, skb);
        }
 #endif
 
@@ -168,7 +168,10 @@ static int __xfrm6_output(struct sock *sk, struct sk_buff *skb)
 
 int xfrm6_output(struct sock *sk, struct sk_buff *skb)
 {
-       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
-                           NULL, skb_dst(skb)->dev, __xfrm6_output,
+       struct net *net = dev_net(skb_dst(skb)->dev);
+
+       return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+                           net, sk, skb,  NULL, skb_dst(skb)->dev,
+                           __xfrm6_output,
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
index 258a0b0e82a293db38533a114c5c3a0bb2c03b9f..cc7299033af808b5acfe447227c8194d1d06b809 100644 (file)
@@ -573,8 +573,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
                skb_forward_csum(skb);
                if (!skb->sk)
                        skb_sender_cpu_clear(skb);
-               NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
-                       NULL, skb_dst(skb)->dev, dst_output_sk);
+               NF_HOOK(pf, NF_INET_LOCAL_OUT, ip_vs_conn_net(cp), NULL, skb,
+                       NULL, skb_dst(skb)->dev, dst_output_okfn);
        } else
                ret = NF_ACCEPT;
 
@@ -595,8 +595,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
                skb_forward_csum(skb);
                if (!skb->sk)
                        skb_sender_cpu_clear(skb);
-               NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
-                       NULL, skb_dst(skb)->dev, dst_output_sk);
+               NF_HOOK(pf, NF_INET_LOCAL_OUT, ip_vs_conn_net(cp), NULL, skb,
+                       NULL, skb_dst(skb)->dev, dst_output_okfn);
        } else
                ret = NF_ACCEPT;
        return ret;
index 96777f9a9350b3a684ae56b50c77400fde3ad305..9f3c3c25fa733289adb735a095294f8237a3263d 100644 (file)
@@ -215,7 +215,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        case NF_ACCEPT:
        case NF_STOP:
                local_bh_disable();
-               entry->state.okfn(entry->state.sk, skb);
+               entry->state.okfn(entry->state.net, entry->state.sk, skb);
                local_bh_enable();
                break;
        case NF_QUEUE:
index a5cd6d90b78b16ebd0c96c4d1d426ad7aea6e86b..41583e30051b823bd401b23392875f7fd8079d45 100644 (file)
@@ -670,8 +670,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
        struct nfqnl_instance *queue;
        struct sk_buff *skb, *segs;
        int err = -ENOBUFS;
-       struct net *net = dev_net(entry->state.in ?
-                                 entry->state.in : entry->state.out);
+       struct net *net = entry->state.net;
        struct nfnl_queue_net *q = nfnl_queue_pernet(net);
 
        /* rcu_read_lock()ed by nf_hook_slow() */
index 6631f4f1e39be713029c8b9b504db4ea741fb3e6..692b3e67fb54418ffb143491b9e2f1dc82d8d503 100644 (file)
@@ -808,7 +808,7 @@ void rxrpc_put_connection(struct rxrpc_connection *conn)
 
        ASSERTCMP(atomic_read(&conn->usage), >, 0);
 
-       conn->put_time = get_seconds();
+       conn->put_time = ktime_get_seconds();
        if (atomic_dec_and_test(&conn->usage)) {
                _debug("zombie");
                rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
@@ -852,7 +852,7 @@ static void rxrpc_connection_reaper(struct work_struct *work)
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_seconds();
        earliest = ULONG_MAX;
 
        write_lock_bh(&rxrpc_connection_lock);
index aef1bd294e1796b68052936bdc8d0b62b52814fe..2934a73a5981ad154888904b67f8082fedac4a3c 100644 (file)
@@ -208,7 +208,7 @@ struct rxrpc_transport {
        struct rb_root          server_conns;   /* server connections on this transport */
        struct list_head        link;           /* link in master session list */
        struct sk_buff_head     error_queue;    /* error packets awaiting processing */
-       time_t                  put_time;       /* time at which to reap */
+       unsigned long           put_time;       /* time at which to reap */
        spinlock_t              client_lock;    /* client connection allocation lock */
        rwlock_t                conn_lock;      /* lock for active/dead connections */
        atomic_t                usage;
@@ -256,7 +256,7 @@ struct rxrpc_connection {
        struct rxrpc_crypt      csum_iv;        /* packet checksum base */
        unsigned long           events;
 #define RXRPC_CONN_CHALLENGE   0               /* send challenge packet */
-       time_t                  put_time;       /* time at which to reap */
+       unsigned long           put_time;       /* time at which to reap */
        rwlock_t                lock;           /* access lock */
        spinlock_t              state_lock;     /* state-change lock */
        atomic_t                usage;
index 1976dec84f297cfb126df6bcd53129f1d518001b..9946467f16b41a9f5acfc59865f085c04e007b77 100644 (file)
@@ -189,7 +189,7 @@ void rxrpc_put_transport(struct rxrpc_transport *trans)
 
        ASSERTCMP(atomic_read(&trans->usage), >, 0);
 
-       trans->put_time = get_seconds();
+       trans->put_time = ktime_get_seconds();
        if (unlikely(atomic_dec_and_test(&trans->usage))) {
                _debug("zombie");
                /* let the reaper determine the timeout to avoid a race with
@@ -226,7 +226,7 @@ static void rxrpc_transport_reaper(struct work_struct *work)
 
        _enter("");
 
-       now = get_seconds();
+       now = ktime_get_seconds();
        earliest = ULONG_MAX;
 
        /* extract all the transports that have been dead too long */
index 559bfa011bda7b7c6fed76ef7df8d4cd1d861bc3..0bc6f912f870297a6d91ef04acca76f6e1915339 100644 (file)
@@ -72,6 +72,7 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
        case TC_ACT_PIPE:
        case TC_ACT_RECLASSIFY:
        case TC_ACT_OK:
+       case TC_ACT_REDIRECT:
                action = filter_res;
                break;
        case TC_ACT_SHOT:
index e5168f8b9640964ce2dd95a896a24f6c986a959a..0590816ab7b03bf0571e81933b613fb4b96288d0 100644 (file)
@@ -38,6 +38,7 @@ struct cls_bpf_prog {
        struct bpf_prog *filter;
        struct list_head link;
        struct tcf_result res;
+       bool exts_integrated;
        struct tcf_exts exts;
        u32 handle;
        union {
@@ -52,6 +53,7 @@ struct cls_bpf_prog {
 
 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
        [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
+       [TCA_BPF_FLAGS]         = { .type = NLA_U32 },
        [TCA_BPF_FD]            = { .type = NLA_U32 },
        [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
        [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
@@ -59,6 +61,23 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
                                    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
 };
 
+static int cls_bpf_exec_opcode(int code)
+{
+       switch (code) {
+       case TC_ACT_OK:
+       case TC_ACT_RECLASSIFY:
+       case TC_ACT_SHOT:
+       case TC_ACT_PIPE:
+       case TC_ACT_STOLEN:
+       case TC_ACT_QUEUED:
+       case TC_ACT_REDIRECT:
+       case TC_ACT_UNSPEC:
+               return code;
+       default:
+               return TC_ACT_UNSPEC;
+       }
+}
+
 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                            struct tcf_result *res)
 {
@@ -79,6 +98,8 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        list_for_each_entry_rcu(prog, &head->plist, link) {
                int filter_res;
 
+               qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
+
                if (at_ingress) {
                        /* It is safe to push/pull even if skb_shared() */
                        __skb_push(skb, skb->mac_len);
@@ -88,6 +109,16 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                        filter_res = BPF_PROG_RUN(prog->filter, skb);
                }
 
+               if (prog->exts_integrated) {
+                       res->class = prog->res.class;
+                       res->classid = qdisc_skb_cb(skb)->tc_classid;
+
+                       ret = cls_bpf_exec_opcode(filter_res);
+                       if (ret == TC_ACT_UNSPEC)
+                               continue;
+                       break;
+               }
+
                if (filter_res == 0)
                        continue;
 
@@ -195,8 +226,7 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
        return ret;
 }
 
-static int cls_bpf_prog_from_ops(struct nlattr **tb,
-                                struct cls_bpf_prog *prog, u32 classid)
+static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
 {
        struct sock_filter *bpf_ops;
        struct sock_fprog_kern fprog_tmp;
@@ -230,15 +260,13 @@ static int cls_bpf_prog_from_ops(struct nlattr **tb,
        prog->bpf_ops = bpf_ops;
        prog->bpf_num_ops = bpf_num_ops;
        prog->bpf_name = NULL;
-
        prog->filter = fp;
-       prog->res.classid = classid;
 
        return 0;
 }
 
-static int cls_bpf_prog_from_efd(struct nlattr **tb,
-                                struct cls_bpf_prog *prog, u32 classid)
+static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
+                                const struct tcf_proto *tp)
 {
        struct bpf_prog *fp;
        char *name = NULL;
@@ -268,9 +296,7 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb,
        prog->bpf_ops = NULL;
        prog->bpf_fd = bpf_fd;
        prog->bpf_name = name;
-
        prog->filter = fp;
-       prog->res.classid = classid;
 
        return 0;
 }
@@ -280,8 +306,8 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
                                   unsigned long base, struct nlattr **tb,
                                   struct nlattr *est, bool ovr)
 {
+       bool is_bpf, is_ebpf, have_exts = false;
        struct tcf_exts exts;
-       bool is_bpf, is_ebpf;
        u32 classid;
        int ret;
 
@@ -298,9 +324,22 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
                return ret;
 
        classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
+       if (tb[TCA_BPF_FLAGS]) {
+               u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
+
+               if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
+                       tcf_exts_destroy(&exts);
+                       return -EINVAL;
+               }
+
+               have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
+       }
+
+       prog->res.classid = classid;
+       prog->exts_integrated = have_exts;
 
-       ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) :
-                      cls_bpf_prog_from_efd(tb, prog, classid);
+       ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
+                      cls_bpf_prog_from_efd(tb, prog, tp);
        if (ret < 0) {
                tcf_exts_destroy(&exts);
                return ret;
index c4d45fd8c551ef5b2c42910459b06c35667590ae..f357f34d02d2a5e9f4ead0ebe77ec44dca79f89a 100644 (file)
 
 #define NO_DEFAULT_INDEX       (1 << 16)
 
+struct mask_value {
+       u8                      mask;
+       u8                      value;
+};
+
 struct dsmark_qdisc_data {
        struct Qdisc            *q;
        struct tcf_proto __rcu  *filter_list;
-       u8                      *mask;  /* "owns" the array */
-       u8                      *value;
+       struct mask_value       *mv;
        u16                     indices;
+       u8                      set_tc_index;
        u32                     default_index;  /* index range is 0...0xffff */
-       int                     set_tc_index;
+#define DSMARK_EMBEDDED_SZ     16
+       struct mask_value       embedded[DSMARK_EMBEDDED_SZ];
 };
 
 static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
@@ -116,7 +122,6 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_DSMARK_MAX + 1];
        int err = -EINVAL;
-       u8 mask = 0;
 
        pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
                 __func__, sch, p, classid, parent, *arg);
@@ -133,14 +138,11 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
        if (err < 0)
                goto errout;
 
-       if (tb[TCA_DSMARK_MASK])
-               mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
-
        if (tb[TCA_DSMARK_VALUE])
-               p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+               p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
 
        if (tb[TCA_DSMARK_MASK])
-               p->mask[*arg - 1] = mask;
+               p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
 
        err = 0;
 
@@ -155,8 +157,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
        if (!dsmark_valid_index(p, arg))
                return -EINVAL;
 
-       p->mask[arg - 1] = 0xff;
-       p->value[arg - 1] = 0;
+       p->mv[arg - 1].mask = 0xff;
+       p->mv[arg - 1].value = 0;
 
        return 0;
 }
@@ -173,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
                return;
 
        for (i = 0; i < p->indices; i++) {
-               if (p->mask[i] == 0xff && !p->value[i])
+               if (p->mv[i].mask == 0xff && !p->mv[i].value)
                        goto ignore;
                if (walker->count >= walker->skip) {
                        if (walker->fn(sch, i + 1, walker) < 0) {
@@ -291,12 +293,12 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
 
        switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
-               ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
-                                   p->value[index]);
+               ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
+                                   p->mv[index].value);
                        break;
        case htons(ETH_P_IPV6):
-               ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index],
-                                   p->value[index]);
+               ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
+                                   p->mv[index].value);
                        break;
        default:
                /*
@@ -304,7 +306,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 * This way, we can send non-IP traffic through dsmark
                 * and don't need yet another qdisc as a bypass.
                 */
-               if (p->mask[index] != 0xff || p->value[index])
+               if (p->mv[index].mask != 0xff || p->mv[index].value)
                        pr_warn("%s: unsupported protocol %d\n",
                                __func__, ntohs(tc_skb_protocol(skb)));
                break;
@@ -346,7 +348,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
        int err = -EINVAL;
        u32 default_index = NO_DEFAULT_INDEX;
        u16 indices;
-       u8 *mask;
+       int i;
 
        pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
 
@@ -366,18 +368,18 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_DSMARK_DEFAULT_INDEX])
                default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
 
-       mask = kmalloc(indices * 2, GFP_KERNEL);
-       if (mask == NULL) {
+       if (indices <= DSMARK_EMBEDDED_SZ)
+               p->mv = p->embedded;
+       else
+               p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
+       if (!p->mv) {
                err = -ENOMEM;
                goto errout;
        }
-
-       p->mask = mask;
-       memset(p->mask, 0xff, indices);
-
-       p->value = p->mask + indices;
-       memset(p->value, 0, indices);
-
+       for (i = 0; i < indices; i++) {
+               p->mv[i].mask = 0xff;
+               p->mv[i].value = 0;
+       }
        p->indices = indices;
        p->default_index = default_index;
        p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
@@ -410,7 +412,8 @@ static void dsmark_destroy(struct Qdisc *sch)
 
        tcf_destroy_chain(&p->filter_list);
        qdisc_destroy(p->q);
-       kfree(p->mask);
+       if (p->mv != p->embedded)
+               kfree(p->mv);
 }
 
 static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
@@ -430,8 +433,8 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
        opts = nla_nest_start(skb, TCA_OPTIONS);
        if (opts == NULL)
                goto nla_put_failure;
-       if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) ||
-           nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]))
+       if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
+           nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
                goto nla_put_failure;
 
        return nla_nest_end(skb, opts);
index 68ada2ca4b60707ac70b45aa6f0a9ea142a9dd16..c48a4b8582bb0970626a8c073b6068d8c5bddc19 100644 (file)
@@ -19,7 +19,7 @@
 #include <net/dst.h>
 #include <net/xfrm.h>
 
-static int xfrm_output2(struct sock *sk, struct sk_buff *skb);
+static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 static int xfrm_skb_check_space(struct sk_buff *skb)
 {
@@ -131,6 +131,8 @@ out:
 
 int xfrm_output_resume(struct sk_buff *skb, int err)
 {
+       struct net *net = xs_net(skb_dst(skb)->xfrm);
+
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
                nf_reset(skb);
 
@@ -139,10 +141,10 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
                        goto out;
 
                if (!skb_dst(skb)->xfrm)
-                       return dst_output(skb);
+                       return dst_output(skb->sk, skb);
 
                err = nf_hook(skb_dst(skb)->ops->family,
-                             NF_INET_POST_ROUTING, skb->sk, skb,
+                             NF_INET_POST_ROUTING, net, skb->sk, skb,
                              NULL, skb_dst(skb)->dev, xfrm_output2);
                if (unlikely(err != 1))
                        goto out;
@@ -156,12 +158,12 @@ out:
 }
 EXPORT_SYMBOL_GPL(xfrm_output_resume);
 
-static int xfrm_output2(struct sock *sk, struct sk_buff *skb)
+static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        return xfrm_output_resume(skb, 1);
 }
 
-static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
+static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct sk_buff *segs;
 
@@ -177,7 +179,7 @@ static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
                int err;
 
                segs->next = NULL;
-               err = xfrm_output2(sk, segs);
+               err = xfrm_output2(net, sk, segs);
 
                if (unlikely(err)) {
                        kfree_skb_list(nskb);
@@ -196,7 +198,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
        int err;
 
        if (skb_is_gso(skb))
-               return xfrm_output_gso(sk, skb);
+               return xfrm_output_gso(net, sk, skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                err = skb_checksum_help(skb);
@@ -207,7 +209,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       return xfrm_output2(sk, skb);
+       return xfrm_output2(net, sk, skb);
 }
 EXPORT_SYMBOL_GPL(xfrm_output);
 
index 94af3d0657859e98c61d3f9b2babebcab722aec8..e7f64bcb78a8e08eb5a41d167311a8a5e3748fef 100644 (file)
@@ -1583,8 +1583,6 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
 
                memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
                xdst->flo.ops = &xfrm_bundle_fc_ops;
-               if (afinfo->init_dst)
-                       afinfo->init_dst(net, xdst);
        } else
                xdst = ERR_PTR(-ENOBUFS);
 
@@ -1947,7 +1945,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);
 
-               dst_output(skb);
+               dst_output(skb->sk, skb);
        }
 
 out:
index 3a44d3a272af40119b379fe1930244909499fd8d..21aa1b44c30ca1ff8ba369de84f7492df01aa518 100644 (file)
@@ -33,6 +33,10 @@ static int (*bpf_get_current_comm)(void *buf, int buf_size) =
        (void *) BPF_FUNC_get_current_comm;
 static int (*bpf_perf_event_read)(void *map, int index) =
        (void *) BPF_FUNC_perf_event_read;
+static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
+       (void *) BPF_FUNC_clone_redirect;
+static int (*bpf_redirect)(int ifindex, int flags) =
+       (void *) BPF_FUNC_redirect;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
index 9bfb2eb34563d0158dae21c97c1b37d640e8d62a..fa051b3d53ee0a8f18da0b0701e04d2962c3e4b6 100644 (file)
@@ -5,7 +5,7 @@
 #include <uapi/linux/in.h>
 #include <uapi/linux/tcp.h>
 #include <uapi/linux/filter.h>
-
+#include <uapi/linux/pkt_cls.h>
 #include "bpf_helpers.h"
 
 /* compiler workaround */
@@ -64,4 +64,26 @@ int bpf_prog1(struct __sk_buff *skb)
 
        return 0;
 }
+SEC("redirect_xmit")
+int _redirect_xmit(struct __sk_buff *skb)
+{
+       return bpf_redirect(skb->ifindex + 1, 0);
+}
+SEC("redirect_recv")
+int _redirect_recv(struct __sk_buff *skb)
+{
+       return bpf_redirect(skb->ifindex + 1, 1);
+}
+SEC("clone_redirect_xmit")
+int _clone_redirect_xmit(struct __sk_buff *skb)
+{
+       bpf_clone_redirect(skb, skb->ifindex + 1, 0);
+       return TC_ACT_SHOT;
+}
+SEC("clone_redirect_recv")
+int _clone_redirect_recv(struct __sk_buff *skb)
+{
+       bpf_clone_redirect(skb, skb->ifindex + 1, 1);
+       return TC_ACT_SHOT;
+}
 char _license[] SEC("license") = "GPL";