2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
26 #include "hns3_enet.h"
28 static const char hns3_driver_name
[] = "hns3";
29 const char hns3_driver_version
[] = VERMAGIC_STRING
;
30 static const char hns3_driver_string
[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client
;
35 /* hns3_pci_tbl - PCI Device ID Table
37 * Last entry must be all 0s
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
42 static const struct pci_device_id hns3_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
55 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
57 /* required last entry */
60 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
62 static irqreturn_t
hns3_irq_handle(int irq
, void *dev
)
64 struct hns3_enet_tqp_vector
*tqp_vector
= dev
;
66 napi_schedule(&tqp_vector
->napi
);
71 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
73 struct hns3_enet_tqp_vector
*tqp_vectors
;
76 for (i
= 0; i
< priv
->vector_num
; i
++) {
77 tqp_vectors
= &priv
->tqp_vector
[i
];
79 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
82 /* release the irq resource */
83 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
84 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
88 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
90 struct hns3_enet_tqp_vector
*tqp_vectors
;
97 for (i
= 0; i
< priv
->vector_num
; i
++) {
98 tqp_vectors
= &priv
->tqp_vector
[i
];
100 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
103 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
104 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
105 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
108 } else if (tqp_vectors
->rx_group
.ring
) {
109 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
110 "%s-%s-%d", priv
->netdev
->name
, "Rx",
112 } else if (tqp_vectors
->tx_group
.ring
) {
113 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
114 "%s-%s-%d", priv
->netdev
->name
, "Tx",
117 /* Skip this unused q_vector */
121 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
123 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
127 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
128 tqp_vectors
->vector_irq
);
132 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
138 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
141 writel(mask_en
, tqp_vector
->mask_addr
);
144 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
146 napi_enable(&tqp_vector
->napi
);
149 hns3_mask_vector_irq(tqp_vector
, 1);
152 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
155 hns3_mask_vector_irq(tqp_vector
, 0);
157 disable_irq(tqp_vector
->vector_irq
);
158 napi_disable(&tqp_vector
->napi
);
161 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
164 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
166 /* this defines the configuration for RL (Interrupt Rate Limiter).
167 * Rl defines rate of interrupts i.e. number of interrupts-per-second
168 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
171 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.gl_adapt_enable
&&
172 !tqp_vector
->rx_group
.coal
.gl_adapt_enable
)
173 /* According to the hardware, the range of rl_reg is
174 * 0-59 and the unit is 4.
176 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
178 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
181 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
184 u32 rx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
186 writel(rx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
189 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
192 u32 tx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
194 writel(tx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
197 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
,
198 struct hns3_nic_priv
*priv
)
200 struct hnae3_handle
*h
= priv
->ae_handle
;
202 /* initialize the configuration for interrupt coalescing.
203 * 1. GL (Interrupt Gap Limiter)
204 * 2. RL (Interrupt Rate Limiter)
207 /* Default: enable interrupt coalescing self-adaptive and GL */
208 tqp_vector
->tx_group
.coal
.gl_adapt_enable
= 1;
209 tqp_vector
->rx_group
.coal
.gl_adapt_enable
= 1;
211 tqp_vector
->tx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
212 tqp_vector
->rx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
214 /* Default: disable RL */
215 h
->kinfo
.int_rl_setting
= 0;
217 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
218 tqp_vector
->rx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
219 tqp_vector
->tx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
222 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
223 struct hns3_nic_priv
*priv
)
225 struct hnae3_handle
*h
= priv
->ae_handle
;
227 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
228 tqp_vector
->tx_group
.coal
.int_gl
);
229 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
230 tqp_vector
->rx_group
.coal
.int_gl
);
231 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
234 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
236 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
237 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
238 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
241 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
244 "netif_set_real_num_tx_queues fail, ret=%d!\n",
249 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
252 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
259 static u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
261 u16 free_tqps
, max_rss_size
, max_tqps
;
263 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &free_tqps
, &max_rss_size
);
264 max_tqps
= h
->kinfo
.num_tc
* max_rss_size
;
266 return min_t(u16
, max_tqps
, (free_tqps
+ h
->kinfo
.num_tqps
));
269 static int hns3_nic_net_up(struct net_device
*netdev
)
271 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
272 struct hnae3_handle
*h
= priv
->ae_handle
;
276 /* get irq resource for all vectors */
277 ret
= hns3_nic_init_irq(priv
);
279 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
283 /* enable the vectors */
284 for (i
= 0; i
< priv
->vector_num
; i
++)
285 hns3_vector_enable(&priv
->tqp_vector
[i
]);
287 /* start the ae_dev */
288 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
292 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
297 for (j
= i
- 1; j
>= 0; j
--)
298 hns3_vector_disable(&priv
->tqp_vector
[j
]);
300 hns3_nic_uninit_irq(priv
);
305 static int hns3_nic_net_open(struct net_device
*netdev
)
307 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
310 netif_carrier_off(netdev
);
312 ret
= hns3_nic_set_real_num_queue(netdev
);
316 ret
= hns3_nic_net_up(netdev
);
319 "hns net up fail, ret=%d!\n", ret
);
323 priv
->ae_handle
->last_reset_time
= jiffies
;
327 static void hns3_nic_net_down(struct net_device
*netdev
)
329 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
330 const struct hnae3_ae_ops
*ops
;
333 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
337 ops
= priv
->ae_handle
->ae_algo
->ops
;
339 ops
->stop(priv
->ae_handle
);
341 /* disable vectors */
342 for (i
= 0; i
< priv
->vector_num
; i
++)
343 hns3_vector_disable(&priv
->tqp_vector
[i
]);
345 /* free irq resources */
346 hns3_nic_uninit_irq(priv
);
349 static int hns3_nic_net_stop(struct net_device
*netdev
)
351 netif_tx_stop_all_queues(netdev
);
352 netif_carrier_off(netdev
);
354 hns3_nic_net_down(netdev
);
359 static int hns3_nic_uc_sync(struct net_device
*netdev
,
360 const unsigned char *addr
)
362 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
364 if (h
->ae_algo
->ops
->add_uc_addr
)
365 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
370 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
371 const unsigned char *addr
)
373 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
375 if (h
->ae_algo
->ops
->rm_uc_addr
)
376 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
381 static int hns3_nic_mc_sync(struct net_device
*netdev
,
382 const unsigned char *addr
)
384 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
386 if (h
->ae_algo
->ops
->add_mc_addr
)
387 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
392 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
393 const unsigned char *addr
)
395 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
397 if (h
->ae_algo
->ops
->rm_mc_addr
)
398 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
403 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
405 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
407 if (h
->ae_algo
->ops
->set_promisc_mode
) {
408 if (netdev
->flags
& IFF_PROMISC
)
409 h
->ae_algo
->ops
->set_promisc_mode(h
, 1);
411 h
->ae_algo
->ops
->set_promisc_mode(h
, 0);
413 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
414 netdev_err(netdev
, "sync uc address fail\n");
415 if (netdev
->flags
& IFF_MULTICAST
)
416 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
417 netdev_err(netdev
, "sync mc address fail\n");
420 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
421 u16
*mss
, u32
*type_cs_vlan_tso
)
423 u32 l4_offset
, hdr_len
;
424 union l3_hdr_info l3
;
425 union l4_hdr_info l4
;
429 if (!skb_is_gso(skb
))
432 ret
= skb_cow_head(skb
, 0);
436 l3
.hdr
= skb_network_header(skb
);
437 l4
.hdr
= skb_transport_header(skb
);
439 /* Software should clear the IPv4's checksum field when tso is
442 if (l3
.v4
->version
== 4)
446 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
449 SKB_GSO_UDP_TUNNEL_CSUM
)) {
450 if ((!(skb_shinfo(skb
)->gso_type
&
452 (skb_shinfo(skb
)->gso_type
&
453 SKB_GSO_UDP_TUNNEL_CSUM
)) {
454 /* Software should clear the udp's checksum
455 * field when tso is needed.
459 /* reset l3&l4 pointers from outer to inner headers */
460 l3
.hdr
= skb_inner_network_header(skb
);
461 l4
.hdr
= skb_inner_transport_header(skb
);
463 /* Software should clear the IPv4's checksum field when
466 if (l3
.v4
->version
== 4)
470 /* normal or tunnel packet*/
471 l4_offset
= l4
.hdr
- skb
->data
;
472 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
474 /* remove payload length from inner pseudo checksum when tso*/
475 l4_paylen
= skb
->len
- l4_offset
;
476 csum_replace_by_diff(&l4
.tcp
->check
,
477 (__force __wsum
)htonl(l4_paylen
));
479 /* find the txbd field values */
480 *paylen
= skb
->len
- hdr_len
;
481 hnae_set_bit(*type_cs_vlan_tso
,
484 /* get MSS for TSO */
485 *mss
= skb_shinfo(skb
)->gso_size
;
490 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
498 unsigned char *l4_hdr
;
499 unsigned char *exthdr
;
503 /* find outer header point */
504 l3
.hdr
= skb_network_header(skb
);
505 l4_hdr
= skb_transport_header(skb
);
507 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
508 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
509 l4_proto_tmp
= l3
.v6
->nexthdr
;
510 if (l4_hdr
!= exthdr
)
511 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
512 &l4_proto_tmp
, &frag_off
);
513 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
514 l4_proto_tmp
= l3
.v4
->protocol
;
519 *ol4_proto
= l4_proto_tmp
;
522 if (!skb
->encapsulation
) {
527 /* find inner header point */
528 l3
.hdr
= skb_inner_network_header(skb
);
529 l4_hdr
= skb_inner_transport_header(skb
);
531 if (l3
.v6
->version
== 6) {
532 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
533 l4_proto_tmp
= l3
.v6
->nexthdr
;
534 if (l4_hdr
!= exthdr
)
535 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
536 &l4_proto_tmp
, &frag_off
);
537 } else if (l3
.v4
->version
== 4) {
538 l4_proto_tmp
= l3
.v4
->protocol
;
541 *il4_proto
= l4_proto_tmp
;
546 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
547 u8 il4_proto
, u32
*type_cs_vlan_tso
,
548 u32
*ol_type_vlan_len_msec
)
558 struct gre_base_hdr
*gre
;
561 unsigned char *l2_hdr
;
562 u8 l4_proto
= ol4_proto
;
569 l3
.hdr
= skb_network_header(skb
);
570 l4
.hdr
= skb_transport_header(skb
);
572 /* compute L2 header size for normal packet, defined in 2 Bytes */
573 l2_len
= l3
.hdr
- skb
->data
;
574 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
575 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
578 if (skb
->encapsulation
) {
579 /* compute OL2 header size, defined in 2 Bytes */
581 hnae_set_field(*ol_type_vlan_len_msec
,
583 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
585 /* compute OL3 header size, defined in 4 Bytes */
586 ol3_len
= l4
.hdr
- l3
.hdr
;
587 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
588 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
590 /* MAC in UDP, MAC in GRE (0x6558)*/
591 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
592 /* switch MAC header ptr from outer to inner header.*/
593 l2_hdr
= skb_inner_mac_header(skb
);
595 /* compute OL4 header size, defined in 4 Bytes. */
596 ol4_len
= l2_hdr
- l4
.hdr
;
597 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_M
,
598 HNS3_TXD_L4LEN_S
, ol4_len
>> 2);
600 /* switch IP header ptr from outer to inner header */
601 l3
.hdr
= skb_inner_network_header(skb
);
603 /* compute inner l2 header size, defined in 2 Bytes. */
604 l2_len
= l3
.hdr
- l2_hdr
;
605 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
606 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
608 /* skb packet types not supported by hardware,
609 * txbd len fild doesn't be filled.
614 /* switch L4 header pointer from outer to inner */
615 l4
.hdr
= skb_inner_transport_header(skb
);
617 l4_proto
= il4_proto
;
620 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
621 l3_len
= l4
.hdr
- l3
.hdr
;
622 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
623 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
625 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
628 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
629 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
632 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
633 HNS3_TXD_L4LEN_S
, (sizeof(struct sctphdr
) >> 2));
636 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
637 HNS3_TXD_L4LEN_S
, (sizeof(struct udphdr
) >> 2));
640 /* skb packet types not supported by hardware,
641 * txbd len fild doesn't be filled.
647 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
648 u8 il4_proto
, u32
*type_cs_vlan_tso
,
649 u32
*ol_type_vlan_len_msec
)
656 u32 l4_proto
= ol4_proto
;
658 l3
.hdr
= skb_network_header(skb
);
660 /* define OL3 type and tunnel type(OL4).*/
661 if (skb
->encapsulation
) {
662 /* define outer network header type.*/
663 if (skb
->protocol
== htons(ETH_P_IP
)) {
665 hnae_set_field(*ol_type_vlan_len_msec
,
666 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
667 HNS3_OL3T_IPV4_CSUM
);
669 hnae_set_field(*ol_type_vlan_len_msec
,
670 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
671 HNS3_OL3T_IPV4_NO_CSUM
);
673 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
674 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
675 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
678 /* define tunnel type(OL4).*/
681 hnae_set_field(*ol_type_vlan_len_msec
,
684 HNS3_TUN_MAC_IN_UDP
);
687 hnae_set_field(*ol_type_vlan_len_msec
,
693 /* drop the skb tunnel packet if hardware don't support,
694 * because hardware can't calculate csum when TSO.
699 /* the stack computes the IP header already,
700 * driver calculate l4 checksum when not TSO.
702 skb_checksum_help(skb
);
706 l3
.hdr
= skb_inner_network_header(skb
);
707 l4_proto
= il4_proto
;
710 if (l3
.v4
->version
== 4) {
711 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
712 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
714 /* the stack computes the IP header already, the only time we
715 * need the hardware to recompute it is in the case of TSO.
718 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
720 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
721 } else if (l3
.v6
->version
== 6) {
722 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
723 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
724 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
729 hnae_set_field(*type_cs_vlan_tso
,
735 hnae_set_field(*type_cs_vlan_tso
,
741 hnae_set_field(*type_cs_vlan_tso
,
747 /* drop the skb tunnel packet if hardware don't support,
748 * because hardware can't calculate csum when TSO.
753 /* the stack computes the IP header already,
754 * driver calculate l4 checksum when not TSO.
756 skb_checksum_help(skb
);
763 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
765 /* Config bd buffer end */
766 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
767 HNS3_TXD_BDTYPE_S
, 0);
768 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
769 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
770 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
773 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
774 struct hns3_enet_ring
*tx_ring
,
775 u32
*inner_vlan_flag
,
780 #define HNS3_TX_VLAN_PRIO_SHIFT 13
782 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
783 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
784 NETIF_F_HW_VLAN_CTAG_TX
)) {
785 /* When HW VLAN acceleration is turned off, and the stack
786 * sets the protocol to 802.1q, the driver just need to
787 * set the protocol to the encapsulated ethertype.
789 skb
->protocol
= vlan_get_protocol(skb
);
793 if (skb_vlan_tag_present(skb
)) {
796 vlan_tag
= skb_vlan_tag_get(skb
);
797 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
799 /* Based on hw strategy, use out_vtag in two layer tag case,
800 * and use inner_vtag in one tag case.
802 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
803 hnae_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
804 *out_vtag
= vlan_tag
;
806 hnae_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
807 *inner_vtag
= vlan_tag
;
809 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
810 struct vlan_ethhdr
*vhdr
;
813 rc
= skb_cow_head(skb
, 0);
816 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
817 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
818 << HNS3_TX_VLAN_PRIO_SHIFT
);
821 skb
->protocol
= vlan_get_protocol(skb
);
825 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
826 int size
, dma_addr_t dma
, int frag_end
,
827 enum hns_desc_type type
)
829 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
830 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
831 u32 ol_type_vlan_len_msec
= 0;
832 u16 bdtp_fe_sc_vld_ra_ri
= 0;
833 u32 type_cs_vlan_tso
= 0;
844 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
845 desc_cb
->priv
= priv
;
846 desc_cb
->length
= size
;
848 desc_cb
->type
= type
;
850 /* now, fill the descriptor */
851 desc
->addr
= cpu_to_le64(dma
);
852 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
853 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
854 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
856 if (type
== DESC_TYPE_SKB
) {
857 skb
= (struct sk_buff
*)priv
;
860 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
861 &ol_type_vlan_len_msec
,
862 &inner_vtag
, &out_vtag
);
866 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
867 skb_reset_mac_len(skb
);
868 protocol
= skb
->protocol
;
870 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
873 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
875 &ol_type_vlan_len_msec
);
876 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
878 &ol_type_vlan_len_msec
);
882 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
889 desc
->tx
.ol_type_vlan_len_msec
=
890 cpu_to_le32(ol_type_vlan_len_msec
);
891 desc
->tx
.type_cs_vlan_tso_len
=
892 cpu_to_le32(type_cs_vlan_tso
);
893 desc
->tx
.paylen
= cpu_to_le32(paylen
);
894 desc
->tx
.mss
= cpu_to_le16(mss
);
895 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
896 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
899 /* move ring pointer to next.*/
900 ring_ptr_move_fw(ring
, next_to_use
);
905 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
906 int size
, dma_addr_t dma
, int frag_end
,
907 enum hns_desc_type type
)
909 unsigned int frag_buf_num
;
914 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
915 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
916 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
918 /* When the frag size is bigger than hardware, split this frag */
919 for (k
= 0; k
< frag_buf_num
; k
++) {
920 ret
= hns3_fill_desc(ring
, priv
,
921 (k
== frag_buf_num
- 1) ?
922 sizeoflast
: HNS3_MAX_BD_SIZE
,
923 dma
+ HNS3_MAX_BD_SIZE
* k
,
924 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
925 (type
== DESC_TYPE_SKB
&& !k
) ?
926 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
934 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
935 struct hns3_enet_ring
*ring
)
937 struct sk_buff
*skb
= *out_skb
;
938 struct skb_frag_struct
*frag
;
945 size
= skb_headlen(skb
);
946 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
948 frag_num
= skb_shinfo(skb
)->nr_frags
;
949 for (i
= 0; i
< frag_num
; i
++) {
950 frag
= &skb_shinfo(skb
)->frags
[i
];
951 size
= skb_frag_size(frag
);
953 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
954 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
957 buf_num
+= bdnum_for_frag
;
960 if (buf_num
> ring_space(ring
))
967 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
968 struct hns3_enet_ring
*ring
)
970 struct sk_buff
*skb
= *out_skb
;
973 /* No. of segments (plus a header) */
974 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
976 if (buf_num
> ring_space(ring
))
984 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
986 struct device
*dev
= ring_to_dev(ring
);
989 for (i
= 0; i
< ring
->desc_num
; i
++) {
990 /* check if this is where we started */
991 if (ring
->next_to_use
== next_to_use_orig
)
994 /* unmap the descriptor dma address */
995 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
996 dma_unmap_single(dev
,
997 ring
->desc_cb
[ring
->next_to_use
].dma
,
998 ring
->desc_cb
[ring
->next_to_use
].length
,
1002 ring
->desc_cb
[ring
->next_to_use
].dma
,
1003 ring
->desc_cb
[ring
->next_to_use
].length
,
1007 ring_ptr_move_bw(ring
, next_to_use
);
1011 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1013 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1014 struct hns3_nic_ring_data
*ring_data
=
1015 &tx_ring_data(priv
, skb
->queue_mapping
);
1016 struct hns3_enet_ring
*ring
= ring_data
->ring
;
1017 struct device
*dev
= priv
->dev
;
1018 struct netdev_queue
*dev_queue
;
1019 struct skb_frag_struct
*frag
;
1020 int next_to_use_head
;
1021 int next_to_use_frag
;
1029 /* Prefetch the data used later */
1030 prefetch(skb
->data
);
1032 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
1034 u64_stats_update_begin(&ring
->syncp
);
1035 ring
->stats
.tx_busy
++;
1036 u64_stats_update_end(&ring
->syncp
);
1038 goto out_net_tx_busy
;
1040 u64_stats_update_begin(&ring
->syncp
);
1041 ring
->stats
.sw_err_cnt
++;
1042 u64_stats_update_end(&ring
->syncp
);
1043 netdev_err(netdev
, "no memory to xmit!\n");
1050 /* No. of segments (plus a header) */
1051 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1052 /* Fill the first part */
1053 size
= skb_headlen(skb
);
1055 next_to_use_head
= ring
->next_to_use
;
1057 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1058 if (dma_mapping_error(dev
, dma
)) {
1059 netdev_err(netdev
, "TX head DMA map failed\n");
1060 ring
->stats
.sw_err_cnt
++;
1064 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
1067 goto head_dma_map_err
;
1069 next_to_use_frag
= ring
->next_to_use
;
1070 /* Fill the fragments */
1071 for (i
= 1; i
< seg_num
; i
++) {
1072 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1073 size
= skb_frag_size(frag
);
1074 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1075 if (dma_mapping_error(dev
, dma
)) {
1076 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
1077 ring
->stats
.sw_err_cnt
++;
1078 goto frag_dma_map_err
;
1080 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
1081 seg_num
- 1 == i
? 1 : 0,
1085 goto frag_dma_map_err
;
1088 /* Complete translate all packets */
1089 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1090 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1092 wmb(); /* Commit all data before submit */
1094 hnae_queue_xmit(ring
->tqp
, buf_num
);
1096 return NETDEV_TX_OK
;
1099 hns_nic_dma_unmap(ring
, next_to_use_frag
);
1102 hns_nic_dma_unmap(ring
, next_to_use_head
);
1105 dev_kfree_skb_any(skb
);
1106 return NETDEV_TX_OK
;
1109 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1110 smp_mb(); /* Commit all data before submit */
1112 return NETDEV_TX_BUSY
;
1115 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1117 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1118 struct sockaddr
*mac_addr
= p
;
1121 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1122 return -EADDRNOTAVAIL
;
1124 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
1126 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1130 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1135 static int hns3_nic_set_features(struct net_device
*netdev
,
1136 netdev_features_t features
)
1138 netdev_features_t changed
= netdev
->features
^ features
;
1139 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1140 struct hnae3_handle
*h
= priv
->ae_handle
;
1143 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1144 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1145 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1146 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1148 priv
->ops
.fill_desc
= hns3_fill_desc
;
1149 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1153 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
1154 h
->ae_algo
->ops
->enable_vlan_filter
) {
1155 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1156 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1158 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1161 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1162 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
1163 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1164 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1166 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1172 netdev
->features
= features
;
1176 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1177 struct rtnl_link_stats64
*stats
)
1179 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1180 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1181 struct hnae3_handle
*handle
= priv
->ae_handle
;
1182 struct hns3_enet_ring
*ring
;
1192 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1195 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1197 for (idx
= 0; idx
< queue_num
; idx
++) {
1198 /* fetch the tx stats */
1199 ring
= priv
->ring_data
[idx
].ring
;
1201 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1202 tx_bytes
+= ring
->stats
.tx_bytes
;
1203 tx_pkts
+= ring
->stats
.tx_pkts
;
1204 tx_drop
+= ring
->stats
.tx_busy
;
1205 tx_drop
+= ring
->stats
.sw_err_cnt
;
1206 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1208 /* fetch the rx stats */
1209 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1211 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1212 rx_bytes
+= ring
->stats
.rx_bytes
;
1213 rx_pkts
+= ring
->stats
.rx_pkts
;
1214 rx_drop
+= ring
->stats
.non_vld_descs
;
1215 rx_drop
+= ring
->stats
.err_pkt_len
;
1216 rx_drop
+= ring
->stats
.l2_err
;
1217 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1220 stats
->tx_bytes
= tx_bytes
;
1221 stats
->tx_packets
= tx_pkts
;
1222 stats
->rx_bytes
= rx_bytes
;
1223 stats
->rx_packets
= rx_pkts
;
1225 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1226 stats
->multicast
= netdev
->stats
.multicast
;
1227 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1228 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1229 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1231 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1232 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1233 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1234 stats
->collisions
= netdev
->stats
.collisions
;
1235 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1236 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1237 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1238 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1239 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1240 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1241 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1242 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1243 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1244 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1247 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1249 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1250 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1251 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1252 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1253 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1254 u16 mode
= mqprio_qopt
->mode
;
1255 u8 hw
= mqprio_qopt
->qopt
.hw
;
1260 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1261 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1264 if (tc
> HNAE3_MAX_TC
)
1270 if_running
= netif_running(netdev
);
1272 hns3_nic_net_stop(netdev
);
1276 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1277 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1282 netdev_reset_tc(netdev
);
1284 ret
= netdev_set_num_tc(netdev
, tc
);
1288 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1289 if (!kinfo
->tc_info
[i
].enable
)
1292 netdev_set_tc_queue(netdev
,
1293 kinfo
->tc_info
[i
].tc
,
1294 kinfo
->tc_info
[i
].tqp_count
,
1295 kinfo
->tc_info
[i
].tqp_offset
);
1299 ret
= hns3_nic_set_real_num_queue(netdev
);
1303 hns3_nic_net_open(netdev
);
1308 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1311 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1314 return hns3_setup_tc(dev
, type_data
);
1317 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1318 __be16 proto
, u16 vid
)
1320 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1321 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1324 if (h
->ae_algo
->ops
->set_vlan_filter
)
1325 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1328 set_bit(vid
, priv
->active_vlans
);
1333 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1334 __be16 proto
, u16 vid
)
1336 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1337 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1340 if (h
->ae_algo
->ops
->set_vlan_filter
)
1341 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1344 clear_bit(vid
, priv
->active_vlans
);
1349 static void hns3_restore_vlan(struct net_device
*netdev
)
1351 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1355 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
1356 ret
= hns3_vlan_rx_add_vid(netdev
, htons(ETH_P_8021Q
), vid
);
1358 netdev_warn(netdev
, "Restore vlan: %d filter, ret:%d\n",
1363 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1364 u8 qos
, __be16 vlan_proto
)
1366 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1369 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1370 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1376 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1378 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1379 bool if_running
= netif_running(netdev
);
1382 if (!h
->ae_algo
->ops
->set_mtu
)
1385 /* if this was called with netdev up then bring netdevice down */
1387 (void)hns3_nic_net_stop(netdev
);
1391 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1393 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1398 netdev
->mtu
= new_mtu
;
1400 /* if the netdev was running earlier, bring it up again */
1401 if (if_running
&& hns3_nic_net_open(netdev
))
1407 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1409 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1410 struct hns3_enet_ring
*tx_ring
= NULL
;
1411 int timeout_queue
= 0;
1412 int hw_head
, hw_tail
;
1415 /* Find the stopped queue the same way the stack does */
1416 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1417 struct netdev_queue
*q
;
1418 unsigned long trans_start
;
1420 q
= netdev_get_tx_queue(ndev
, i
);
1421 trans_start
= q
->trans_start
;
1422 if (netif_xmit_stopped(q
) &&
1424 (trans_start
+ ndev
->watchdog_timeo
))) {
1430 if (i
== ndev
->num_tx_queues
) {
1432 "no netdev TX timeout queue found, timeout count: %llu\n",
1433 priv
->tx_timeout_count
);
1437 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1439 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1440 HNS3_RING_TX_RING_HEAD_REG
);
1441 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1442 HNS3_RING_TX_RING_TAIL_REG
);
1444 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1445 priv
->tx_timeout_count
,
1447 tx_ring
->next_to_use
,
1448 tx_ring
->next_to_clean
,
1451 readl(tx_ring
->tqp_vector
->mask_addr
));
1456 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1458 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1459 struct hnae3_handle
*h
= priv
->ae_handle
;
1461 if (!hns3_get_tx_timeo_queue_info(ndev
))
1464 priv
->tx_timeout_count
++;
1466 if (time_before(jiffies
, (h
->last_reset_time
+ ndev
->watchdog_timeo
)))
1469 /* request the reset */
1470 if (h
->ae_algo
->ops
->reset_event
)
1471 h
->ae_algo
->ops
->reset_event(h
);
1474 static const struct net_device_ops hns3_nic_netdev_ops
= {
1475 .ndo_open
= hns3_nic_net_open
,
1476 .ndo_stop
= hns3_nic_net_stop
,
1477 .ndo_start_xmit
= hns3_nic_net_xmit
,
1478 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1479 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1480 .ndo_change_mtu
= hns3_nic_change_mtu
,
1481 .ndo_set_features
= hns3_nic_set_features
,
1482 .ndo_get_stats64
= hns3_nic_get_stats64
,
1483 .ndo_setup_tc
= hns3_nic_setup_tc
,
1484 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1485 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1486 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1487 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1490 static bool hns3_is_phys_func(struct pci_dev
*pdev
)
1492 u32 dev_id
= pdev
->device
;
1495 case HNAE3_DEV_ID_GE
:
1496 case HNAE3_DEV_ID_25GE
:
1497 case HNAE3_DEV_ID_25GE_RDMA
:
1498 case HNAE3_DEV_ID_25GE_RDMA_MACSEC
:
1499 case HNAE3_DEV_ID_50GE_RDMA
:
1500 case HNAE3_DEV_ID_50GE_RDMA_MACSEC
:
1501 case HNAE3_DEV_ID_100G_RDMA_MACSEC
:
1503 case HNAE3_DEV_ID_100G_VF
:
1504 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
:
1507 dev_warn(&pdev
->dev
, "un-recognized pci device-id %d",
1514 static void hns3_disable_sriov(struct pci_dev
*pdev
)
1516 /* If our VFs are assigned we cannot shut down SR-IOV
1517 * without causing issues, so just leave the hardware
1518 * available but disabled
1520 if (pci_vfs_assigned(pdev
)) {
1521 dev_warn(&pdev
->dev
,
1522 "disabling driver while VFs are assigned\n");
1526 pci_disable_sriov(pdev
);
1529 /* hns3_probe - Device initialization routine
1530 * @pdev: PCI device information struct
1531 * @ent: entry in hns3_pci_tbl
1533 * hns3_probe initializes a PF identified by a pci_dev structure.
1534 * The OS initialization, configuring of the PF private structure,
1535 * and a hardware reset occur.
1537 * Returns 0 on success, negative on failure
1539 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1541 struct hnae3_ae_dev
*ae_dev
;
1544 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1551 ae_dev
->pdev
= pdev
;
1552 ae_dev
->flag
= ent
->driver_data
;
1553 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1554 pci_set_drvdata(pdev
, ae_dev
);
1556 hnae3_register_ae_dev(ae_dev
);
1561 /* hns3_remove - Device removal routine
1562 * @pdev: PCI device information struct
1564 static void hns3_remove(struct pci_dev
*pdev
)
1566 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1568 if (hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))
1569 hns3_disable_sriov(pdev
);
1571 hnae3_unregister_ae_dev(ae_dev
);
1575 * hns3_pci_sriov_configure
1576 * @pdev: pointer to a pci_dev structure
1577 * @num_vfs: number of VFs to allocate
1579 * Enable or change the number of VFs. Called when the user updates the number
1582 static int hns3_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1586 if (!(hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))) {
1587 dev_warn(&pdev
->dev
, "Can not config SRIOV\n");
1592 ret
= pci_enable_sriov(pdev
, num_vfs
);
1594 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n", ret
);
1597 } else if (!pci_vfs_assigned(pdev
)) {
1598 pci_disable_sriov(pdev
);
1600 dev_warn(&pdev
->dev
,
1601 "Unable to free VFs because some are assigned to VMs.\n");
1607 static struct pci_driver hns3_driver
= {
1608 .name
= hns3_driver_name
,
1609 .id_table
= hns3_pci_tbl
,
1610 .probe
= hns3_probe
,
1611 .remove
= hns3_remove
,
1612 .sriov_configure
= hns3_pci_sriov_configure
,
1615 /* set default feature to hns3 */
1616 static void hns3_set_default_feature(struct net_device
*netdev
)
1618 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1620 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1621 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1622 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1623 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1624 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1626 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1628 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1630 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1631 NETIF_F_HW_VLAN_CTAG_FILTER
|
1632 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1633 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1634 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1635 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1636 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1638 netdev
->vlan_features
|=
1639 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1640 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1641 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1642 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1643 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1645 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1646 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1647 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1648 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1649 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1650 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1653 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1654 struct hns3_desc_cb
*cb
)
1656 unsigned int order
= hnae_page_order(ring
);
1659 p
= dev_alloc_pages(order
);
1664 cb
->page_offset
= 0;
1666 cb
->buf
= page_address(p
);
1667 cb
->length
= hnae_page_size(ring
);
1668 cb
->type
= DESC_TYPE_PAGE
;
1673 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1674 struct hns3_desc_cb
*cb
)
1676 if (cb
->type
== DESC_TYPE_SKB
)
1677 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1678 else if (!HNAE3_IS_TX_RING(ring
))
1679 put_page((struct page
*)cb
->priv
);
1680 memset(cb
, 0, sizeof(*cb
));
1683 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1685 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1686 cb
->length
, ring_to_dma_dir(ring
));
1688 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1694 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1695 struct hns3_desc_cb
*cb
)
1697 if (cb
->type
== DESC_TYPE_SKB
)
1698 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1699 ring_to_dma_dir(ring
));
1701 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1702 ring_to_dma_dir(ring
));
1705 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1707 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1708 ring
->desc
[i
].addr
= 0;
1711 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1713 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1715 if (!ring
->desc_cb
[i
].dma
)
1718 hns3_buffer_detach(ring
, i
);
1719 hns3_free_buffer(ring
, cb
);
1722 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1726 for (i
= 0; i
< ring
->desc_num
; i
++)
1727 hns3_free_buffer_detach(ring
, i
);
1730 /* free desc along with its attached buffer */
1731 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1733 hns3_free_buffers(ring
);
1735 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
1736 ring
->desc_num
* sizeof(ring
->desc
[0]),
1738 ring
->desc_dma_addr
= 0;
1743 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1745 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1747 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
1751 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
), ring
->desc
,
1752 size
, DMA_BIDIRECTIONAL
);
1753 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
1754 ring
->desc_dma_addr
= 0;
1763 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1764 struct hns3_desc_cb
*cb
)
1768 ret
= hns3_alloc_buffer(ring
, cb
);
1772 ret
= hns3_map_buffer(ring
, cb
);
1779 hns3_free_buffer(ring
, cb
);
1784 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1786 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1791 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1796 /* Allocate memory for raw pkg, and map with dma */
1797 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1801 for (i
= 0; i
< ring
->desc_num
; i
++) {
1802 ret
= hns3_alloc_buffer_attach(ring
, i
);
1804 goto out_buffer_fail
;
1810 for (j
= i
- 1; j
>= 0; j
--)
1811 hns3_free_buffer_detach(ring
, j
);
1815 /* detach a in-used buffer and replace with a reserved one */
1816 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1817 struct hns3_desc_cb
*res_cb
)
1819 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1820 ring
->desc_cb
[i
] = *res_cb
;
1821 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1824 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1826 ring
->desc_cb
[i
].reuse_flag
= 0;
1827 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1828 + ring
->desc_cb
[i
].page_offset
);
1831 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1834 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1836 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1837 (*bytes
) += desc_cb
->length
;
1838 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1839 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1841 ring_ptr_move_fw(ring
, next_to_clean
);
1844 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1846 int u
= ring
->next_to_use
;
1847 int c
= ring
->next_to_clean
;
1849 if (unlikely(h
> ring
->desc_num
))
1852 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1855 bool hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1857 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1858 struct netdev_queue
*dev_queue
;
1862 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1863 rmb(); /* Make sure head is ready before touch any data */
1865 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1866 return true; /* no data to poll */
1868 if (!is_valid_clean_head(ring
, head
)) {
1869 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1870 ring
->next_to_use
, ring
->next_to_clean
);
1872 u64_stats_update_begin(&ring
->syncp
);
1873 ring
->stats
.io_err_cnt
++;
1874 u64_stats_update_end(&ring
->syncp
);
1880 while (head
!= ring
->next_to_clean
&& budget
) {
1881 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1882 /* Issue prefetch for next Tx descriptor */
1883 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1887 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1888 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1890 u64_stats_update_begin(&ring
->syncp
);
1891 ring
->stats
.tx_bytes
+= bytes
;
1892 ring
->stats
.tx_pkts
+= pkts
;
1893 u64_stats_update_end(&ring
->syncp
);
1895 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1896 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1898 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1899 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1900 /* Make sure that anybody stopping the queue after this
1901 * sees the new next_to_clean.
1904 if (netif_tx_queue_stopped(dev_queue
)) {
1905 netif_tx_wake_queue(dev_queue
);
1906 ring
->stats
.restart_queue
++;
1913 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1915 int ntc
= ring
->next_to_clean
;
1916 int ntu
= ring
->next_to_use
;
1918 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1922 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1924 struct hns3_desc_cb
*desc_cb
;
1925 struct hns3_desc_cb res_cbs
;
1928 for (i
= 0; i
< cleand_count
; i
++) {
1929 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1930 if (desc_cb
->reuse_flag
) {
1931 u64_stats_update_begin(&ring
->syncp
);
1932 ring
->stats
.reuse_pg_cnt
++;
1933 u64_stats_update_end(&ring
->syncp
);
1935 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1937 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1939 u64_stats_update_begin(&ring
->syncp
);
1940 ring
->stats
.sw_err_cnt
++;
1941 u64_stats_update_end(&ring
->syncp
);
1943 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
1944 "hnae reserve buffer map failed.\n");
1947 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
1950 ring_ptr_move_fw(ring
, next_to_use
);
1953 wmb(); /* Make all data has been write before submit */
1954 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
1957 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
1958 struct hns3_enet_ring
*ring
, int pull_len
,
1959 struct hns3_desc_cb
*desc_cb
)
1961 struct hns3_desc
*desc
;
1966 twobufs
= ((PAGE_SIZE
< 8192) &&
1967 hnae_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
1969 desc
= &ring
->desc
[ring
->next_to_clean
];
1970 size
= le16_to_cpu(desc
->rx
.size
);
1972 truesize
= hnae_buf_size(ring
);
1975 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
1977 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
1978 size
- pull_len
, truesize
);
1980 /* Avoid re-using remote pages,flag default unreuse */
1981 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
1985 /* If we are only owner of page we can reuse it */
1986 if (likely(page_count(desc_cb
->priv
) == 1)) {
1987 /* Flip page offset to other buffer */
1988 desc_cb
->page_offset
^= truesize
;
1990 desc_cb
->reuse_flag
= 1;
1991 /* bump ref count on page before it is given*/
1992 get_page(desc_cb
->priv
);
1997 /* Move offset up to the next cache line */
1998 desc_cb
->page_offset
+= truesize
;
2000 if (desc_cb
->page_offset
<= last_offset
) {
2001 desc_cb
->reuse_flag
= 1;
2002 /* Bump ref count on page before it is given*/
2003 get_page(desc_cb
->priv
);
2007 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2008 struct hns3_desc
*desc
)
2010 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2011 int l3_type
, l4_type
;
2016 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2017 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2019 skb
->ip_summed
= CHECKSUM_NONE
;
2021 skb_checksum_none_assert(skb
);
2023 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2026 /* check if hardware has done checksum */
2027 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2030 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2031 hnae_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2032 hnae_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2033 hnae_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2034 netdev_err(netdev
, "L3/L4 error pkt\n");
2035 u64_stats_update_begin(&ring
->syncp
);
2036 ring
->stats
.l3l4_csum_err
++;
2037 u64_stats_update_end(&ring
->syncp
);
2042 l3_type
= hnae_get_field(l234info
, HNS3_RXD_L3ID_M
,
2044 l4_type
= hnae_get_field(l234info
, HNS3_RXD_L4ID_M
,
2047 ol4_type
= hnae_get_field(l234info
, HNS3_RXD_OL4ID_M
, HNS3_RXD_OL4ID_S
);
2049 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2050 case HNS3_OL4_TYPE_NVGRE
:
2051 skb
->csum_level
= 1;
2052 case HNS3_OL4_TYPE_NO_TUN
:
2053 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2054 if (l3_type
== HNS3_L3_TYPE_IPV4
||
2055 (l3_type
== HNS3_L3_TYPE_IPV6
&&
2056 (l4_type
== HNS3_L4_TYPE_UDP
||
2057 l4_type
== HNS3_L4_TYPE_TCP
||
2058 l4_type
== HNS3_L4_TYPE_SCTP
)))
2059 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2064 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2066 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2069 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2070 struct sk_buff
**out_skb
, int *out_bnum
)
2072 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2073 struct hns3_desc_cb
*desc_cb
;
2074 struct hns3_desc
*desc
;
2075 struct sk_buff
*skb
;
2083 desc
= &ring
->desc
[ring
->next_to_clean
];
2084 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2088 length
= le16_to_cpu(desc
->rx
.pkt_len
);
2089 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2090 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2092 /* Check valid BD */
2093 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
2096 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2098 /* Prefetch first cache line of first page
2099 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2100 * line size is 64B so need to prefetch twice to make it 128B. But in
2101 * actual we can have greater size of caches with 128B Level 1 cache
2102 * lines. In such a case, single fetch would suffice to cache in the
2103 * relevant part of the header.
2106 #if L1_CACHE_BYTES < 128
2107 prefetch(va
+ L1_CACHE_BYTES
);
2110 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2112 if (unlikely(!skb
)) {
2113 netdev_err(netdev
, "alloc rx skb fail\n");
2115 u64_stats_update_begin(&ring
->syncp
);
2116 ring
->stats
.sw_err_cnt
++;
2117 u64_stats_update_end(&ring
->syncp
);
2122 prefetchw(skb
->data
);
2124 /* Based on hw strategy, the tag offloaded will be stored at
2125 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2126 * in one layer tag case.
2128 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2131 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2132 if (!(vlan_tag
& VLAN_VID_MASK
))
2133 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2134 if (vlan_tag
& VLAN_VID_MASK
)
2135 __vlan_hwaccel_put_tag(skb
,
2141 if (length
<= HNS3_RX_HEAD_SIZE
) {
2142 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2144 /* We can reuse buffer as-is, just make sure it is local */
2145 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2146 desc_cb
->reuse_flag
= 1;
2147 else /* This page cannot be reused so discard it */
2148 put_page(desc_cb
->priv
);
2150 ring_ptr_move_fw(ring
, next_to_clean
);
2152 u64_stats_update_begin(&ring
->syncp
);
2153 ring
->stats
.seg_pkt_cnt
++;
2154 u64_stats_update_end(&ring
->syncp
);
2156 pull_len
= eth_get_headlen(va
, HNS3_RX_HEAD_SIZE
);
2158 memcpy(__skb_put(skb
, pull_len
), va
,
2159 ALIGN(pull_len
, sizeof(long)));
2161 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2162 ring_ptr_move_fw(ring
, next_to_clean
);
2164 while (!hnae_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2165 desc
= &ring
->desc
[ring
->next_to_clean
];
2166 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2167 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2168 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2169 ring_ptr_move_fw(ring
, next_to_clean
);
2176 if (unlikely(!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2177 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2178 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2179 u64_stats_update_begin(&ring
->syncp
);
2180 ring
->stats
.non_vld_descs
++;
2181 u64_stats_update_end(&ring
->syncp
);
2183 dev_kfree_skb_any(skb
);
2187 if (unlikely((!desc
->rx
.pkt_len
) ||
2188 hnae_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2189 netdev_err(netdev
, "truncated pkt\n");
2190 u64_stats_update_begin(&ring
->syncp
);
2191 ring
->stats
.err_pkt_len
++;
2192 u64_stats_update_end(&ring
->syncp
);
2194 dev_kfree_skb_any(skb
);
2198 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2199 netdev_err(netdev
, "L2 error pkt\n");
2200 u64_stats_update_begin(&ring
->syncp
);
2201 ring
->stats
.l2_err
++;
2202 u64_stats_update_end(&ring
->syncp
);
2204 dev_kfree_skb_any(skb
);
2208 u64_stats_update_begin(&ring
->syncp
);
2209 ring
->stats
.rx_pkts
++;
2210 ring
->stats
.rx_bytes
+= skb
->len
;
2211 u64_stats_update_end(&ring
->syncp
);
2213 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2215 hns3_rx_checksum(ring
, skb
, desc
);
2219 int hns3_clean_rx_ring(
2220 struct hns3_enet_ring
*ring
, int budget
,
2221 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2223 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2224 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2225 int recv_pkts
, recv_bds
, clean_count
, err
;
2226 int unused_count
= hns3_desc_unused(ring
);
2227 struct sk_buff
*skb
= NULL
;
2230 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2231 rmb(); /* Make sure num taken effect before the other data is touched */
2233 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2234 num
-= unused_count
;
2236 while (recv_pkts
< budget
&& recv_bds
< num
) {
2237 /* Reuse or realloc buffers */
2238 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2239 hns3_nic_alloc_rx_buffers(ring
,
2240 clean_count
+ unused_count
);
2242 unused_count
= hns3_desc_unused(ring
);
2246 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2247 if (unlikely(!skb
)) /* This fault cannot be repaired */
2251 clean_count
+= bnum
;
2252 if (unlikely(err
)) { /* Do jump the err */
2257 /* Do update ip stack process */
2258 skb
->protocol
= eth_type_trans(skb
, netdev
);
2265 /* Make all data has been write before submit */
2266 if (clean_count
+ unused_count
> 0)
2267 hns3_nic_alloc_rx_buffers(ring
,
2268 clean_count
+ unused_count
);
2273 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2275 struct hns3_enet_tqp_vector
*tqp_vector
=
2276 ring_group
->ring
->tqp_vector
;
2277 enum hns3_flow_level_range new_flow_level
;
2278 int packets_per_msecs
;
2279 int bytes_per_msecs
;
2283 if (!ring_group
->coal
.int_gl
|| !tqp_vector
->last_jiffies
)
2286 if (ring_group
->total_packets
== 0) {
2287 ring_group
->coal
.int_gl
= HNS3_INT_GL_50K
;
2288 ring_group
->coal
.flow_level
= HNS3_FLOW_LOW
;
2292 /* Simple throttlerate management
2293 * 0-10MB/s lower (50000 ints/s)
2294 * 10-20MB/s middle (20000 ints/s)
2295 * 20-1249MB/s high (18000 ints/s)
2296 * > 40000pps ultra (8000 ints/s)
2298 new_flow_level
= ring_group
->coal
.flow_level
;
2299 new_int_gl
= ring_group
->coal
.int_gl
;
2301 jiffies_to_msecs(jiffies
- tqp_vector
->last_jiffies
);
2303 if (!time_passed_ms
)
2306 do_div(ring_group
->total_packets
, time_passed_ms
);
2307 packets_per_msecs
= ring_group
->total_packets
;
2309 do_div(ring_group
->total_bytes
, time_passed_ms
);
2310 bytes_per_msecs
= ring_group
->total_bytes
;
2312 #define HNS3_RX_LOW_BYTE_RATE 10000
2313 #define HNS3_RX_MID_BYTE_RATE 20000
2315 switch (new_flow_level
) {
2317 if (bytes_per_msecs
> HNS3_RX_LOW_BYTE_RATE
)
2318 new_flow_level
= HNS3_FLOW_MID
;
2321 if (bytes_per_msecs
> HNS3_RX_MID_BYTE_RATE
)
2322 new_flow_level
= HNS3_FLOW_HIGH
;
2323 else if (bytes_per_msecs
<= HNS3_RX_LOW_BYTE_RATE
)
2324 new_flow_level
= HNS3_FLOW_LOW
;
2326 case HNS3_FLOW_HIGH
:
2327 case HNS3_FLOW_ULTRA
:
2329 if (bytes_per_msecs
<= HNS3_RX_MID_BYTE_RATE
)
2330 new_flow_level
= HNS3_FLOW_MID
;
2334 #define HNS3_RX_ULTRA_PACKET_RATE 40
2336 if (packets_per_msecs
> HNS3_RX_ULTRA_PACKET_RATE
&&
2337 &tqp_vector
->rx_group
== ring_group
)
2338 new_flow_level
= HNS3_FLOW_ULTRA
;
2340 switch (new_flow_level
) {
2342 new_int_gl
= HNS3_INT_GL_50K
;
2345 new_int_gl
= HNS3_INT_GL_20K
;
2347 case HNS3_FLOW_HIGH
:
2348 new_int_gl
= HNS3_INT_GL_18K
;
2350 case HNS3_FLOW_ULTRA
:
2351 new_int_gl
= HNS3_INT_GL_8K
;
2357 ring_group
->total_bytes
= 0;
2358 ring_group
->total_packets
= 0;
2359 ring_group
->coal
.flow_level
= new_flow_level
;
2360 if (new_int_gl
!= ring_group
->coal
.int_gl
) {
2361 ring_group
->coal
.int_gl
= new_int_gl
;
2367 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2369 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
2370 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
2371 bool rx_update
, tx_update
;
2373 if (tqp_vector
->int_adapt_down
> 0) {
2374 tqp_vector
->int_adapt_down
--;
2378 if (rx_group
->coal
.gl_adapt_enable
) {
2379 rx_update
= hns3_get_new_int_gl(rx_group
);
2381 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
2382 rx_group
->coal
.int_gl
);
2385 if (tx_group
->coal
.gl_adapt_enable
) {
2386 tx_update
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2388 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
2389 tx_group
->coal
.int_gl
);
2392 tqp_vector
->last_jiffies
= jiffies
;
2393 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
2396 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2398 struct hns3_enet_ring
*ring
;
2399 int rx_pkt_total
= 0;
2401 struct hns3_enet_tqp_vector
*tqp_vector
=
2402 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2403 bool clean_complete
= true;
2406 /* Since the actual Tx work is minimal, we can give the Tx a larger
2407 * budget and be more aggressive about cleaning up the Tx descriptors.
2409 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2410 if (!hns3_clean_tx_ring(ring
, budget
))
2411 clean_complete
= false;
2414 /* make sure rx ring budget not smaller than 1 */
2415 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2417 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2418 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2421 if (rx_cleaned
>= rx_budget
)
2422 clean_complete
= false;
2424 rx_pkt_total
+= rx_cleaned
;
2427 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2429 if (!clean_complete
)
2432 napi_complete(napi
);
2433 hns3_update_new_int_gl(tqp_vector
);
2434 hns3_mask_vector_irq(tqp_vector
, 1);
2436 return rx_pkt_total
;
2439 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2440 struct hnae3_ring_chain_node
*head
)
2442 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2443 struct hnae3_ring_chain_node
*cur_chain
= head
;
2444 struct hnae3_ring_chain_node
*chain
;
2445 struct hns3_enet_ring
*tx_ring
;
2446 struct hns3_enet_ring
*rx_ring
;
2448 tx_ring
= tqp_vector
->tx_group
.ring
;
2450 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2451 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2452 HNAE3_RING_TYPE_TX
);
2453 hnae_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2454 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_TX
);
2456 cur_chain
->next
= NULL
;
2458 while (tx_ring
->next
) {
2459 tx_ring
= tx_ring
->next
;
2461 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2466 cur_chain
->next
= chain
;
2467 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2468 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2469 HNAE3_RING_TYPE_TX
);
2470 hnae_set_field(chain
->int_gl_idx
,
2471 HNAE3_RING_GL_IDX_M
,
2472 HNAE3_RING_GL_IDX_S
,
2479 rx_ring
= tqp_vector
->rx_group
.ring
;
2480 if (!tx_ring
&& rx_ring
) {
2481 cur_chain
->next
= NULL
;
2482 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2483 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2484 HNAE3_RING_TYPE_RX
);
2485 hnae_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2486 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2488 rx_ring
= rx_ring
->next
;
2492 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2496 cur_chain
->next
= chain
;
2497 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2498 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2499 HNAE3_RING_TYPE_RX
);
2500 hnae_set_field(chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2501 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2505 rx_ring
= rx_ring
->next
;
2511 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2512 struct hnae3_ring_chain_node
*head
)
2514 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2515 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2520 chain_tmp
= chain
->next
;
2521 devm_kfree(&pdev
->dev
, chain
);
2526 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2527 struct hns3_enet_ring
*ring
)
2529 ring
->next
= group
->ring
;
2535 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2537 struct hnae3_ring_chain_node vector_ring_chain
;
2538 struct hnae3_handle
*h
= priv
->ae_handle
;
2539 struct hns3_enet_tqp_vector
*tqp_vector
;
2543 for (i
= 0; i
< priv
->vector_num
; i
++) {
2544 tqp_vector
= &priv
->tqp_vector
[i
];
2545 hns3_vector_gl_rl_init_hw(tqp_vector
, priv
);
2546 tqp_vector
->num_tqps
= 0;
2549 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2550 u16 vector_i
= i
% priv
->vector_num
;
2551 u16 tqp_num
= h
->kinfo
.num_tqps
;
2553 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2555 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2556 priv
->ring_data
[i
].ring
);
2558 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2559 priv
->ring_data
[i
+ tqp_num
].ring
);
2561 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2562 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2563 tqp_vector
->num_tqps
++;
2566 for (i
= 0; i
< priv
->vector_num
; i
++) {
2567 tqp_vector
= &priv
->tqp_vector
[i
];
2569 tqp_vector
->rx_group
.total_bytes
= 0;
2570 tqp_vector
->rx_group
.total_packets
= 0;
2571 tqp_vector
->tx_group
.total_bytes
= 0;
2572 tqp_vector
->tx_group
.total_packets
= 0;
2573 tqp_vector
->handle
= h
;
2575 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2576 &vector_ring_chain
);
2580 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2581 tqp_vector
->vector_irq
, &vector_ring_chain
);
2583 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2588 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2589 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2595 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
2597 struct hnae3_handle
*h
= priv
->ae_handle
;
2598 struct hns3_enet_tqp_vector
*tqp_vector
;
2599 struct hnae3_vector_info
*vector
;
2600 struct pci_dev
*pdev
= h
->pdev
;
2601 u16 tqp_num
= h
->kinfo
.num_tqps
;
2606 /* RSS size, cpu online and vector_num should be the same */
2607 /* Should consider 2p/4p later */
2608 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2609 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2614 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2616 priv
->vector_num
= vector_num
;
2617 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2618 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2620 if (!priv
->tqp_vector
) {
2625 for (i
= 0; i
< priv
->vector_num
; i
++) {
2626 tqp_vector
= &priv
->tqp_vector
[i
];
2627 tqp_vector
->idx
= i
;
2628 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
2629 tqp_vector
->vector_irq
= vector
[i
].vector
;
2630 hns3_vector_gl_rl_init(tqp_vector
, priv
);
2634 devm_kfree(&pdev
->dev
, vector
);
2638 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
2644 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2646 struct hnae3_ring_chain_node vector_ring_chain
;
2647 struct hnae3_handle
*h
= priv
->ae_handle
;
2648 struct hns3_enet_tqp_vector
*tqp_vector
;
2651 for (i
= 0; i
< priv
->vector_num
; i
++) {
2652 tqp_vector
= &priv
->tqp_vector
[i
];
2654 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2655 &vector_ring_chain
);
2659 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2660 tqp_vector
->vector_irq
, &vector_ring_chain
);
2664 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
2668 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2670 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2671 (void)irq_set_affinity_hint(
2672 priv
->tqp_vector
[i
].vector_irq
,
2674 free_irq(priv
->tqp_vector
[i
].vector_irq
,
2675 &priv
->tqp_vector
[i
]);
2678 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2679 hns3_clear_ring_group(&tqp_vector
->rx_group
);
2680 hns3_clear_ring_group(&tqp_vector
->tx_group
);
2681 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2687 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
2689 struct hnae3_handle
*h
= priv
->ae_handle
;
2690 struct pci_dev
*pdev
= h
->pdev
;
2693 for (i
= 0; i
< priv
->vector_num
; i
++) {
2694 struct hns3_enet_tqp_vector
*tqp_vector
;
2696 tqp_vector
= &priv
->tqp_vector
[i
];
2697 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
2702 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2706 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2709 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2710 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2711 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2712 struct hns3_enet_ring
*ring
;
2714 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2718 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2719 ring_data
[q
->tqp_index
].ring
= ring
;
2720 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2721 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2723 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2724 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
2725 ring
->io_base
= q
->io_base
;
2728 hnae_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2732 ring
->desc_cb
= NULL
;
2733 ring
->dev
= priv
->dev
;
2734 ring
->desc_dma_addr
= 0;
2735 ring
->buf_size
= q
->buf_size
;
2736 ring
->desc_num
= q
->desc_num
;
2737 ring
->next_to_use
= 0;
2738 ring
->next_to_clean
= 0;
2743 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2744 struct hns3_nic_priv
*priv
)
2748 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2752 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2759 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2761 struct hnae3_handle
*h
= priv
->ae_handle
;
2762 struct pci_dev
*pdev
= h
->pdev
;
2765 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2766 sizeof(*priv
->ring_data
) * 2,
2768 if (!priv
->ring_data
)
2771 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2772 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2779 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2783 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
2785 struct hnae3_handle
*h
= priv
->ae_handle
;
2788 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2789 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2790 devm_kfree(priv
->dev
,
2791 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2793 devm_kfree(priv
->dev
, priv
->ring_data
);
2796 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2800 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2803 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2805 if (!ring
->desc_cb
) {
2810 ret
= hns3_alloc_desc(ring
);
2812 goto out_with_desc_cb
;
2814 if (!HNAE3_IS_TX_RING(ring
)) {
2815 ret
= hns3_alloc_ring_buffers(ring
);
2823 hns3_free_desc(ring
);
2825 kfree(ring
->desc_cb
);
2826 ring
->desc_cb
= NULL
;
2831 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2833 hns3_free_desc(ring
);
2834 kfree(ring
->desc_cb
);
2835 ring
->desc_cb
= NULL
;
2836 ring
->next_to_clean
= 0;
2837 ring
->next_to_use
= 0;
2840 static int hns3_buf_size2type(u32 buf_size
)
2846 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2849 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2852 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2855 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2858 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2861 return bd_size_type
;
2864 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2866 dma_addr_t dma
= ring
->desc_dma_addr
;
2867 struct hnae3_queue
*q
= ring
->tqp
;
2869 if (!HNAE3_IS_TX_RING(ring
)) {
2870 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2872 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2873 (u32
)((dma
>> 31) >> 1));
2875 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2876 hns3_buf_size2type(ring
->buf_size
));
2877 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2878 ring
->desc_num
/ 8 - 1);
2881 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2883 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2884 (u32
)((dma
>> 31) >> 1));
2886 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2887 hns3_buf_size2type(ring
->buf_size
));
2888 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
2889 ring
->desc_num
/ 8 - 1);
2893 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
2895 struct hnae3_handle
*h
= priv
->ae_handle
;
2896 int ring_num
= h
->kinfo
.num_tqps
* 2;
2900 for (i
= 0; i
< ring_num
; i
++) {
2901 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
2904 "Alloc ring memory fail! ret=%d\n", ret
);
2905 goto out_when_alloc_ring_memory
;
2908 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
2910 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
2915 out_when_alloc_ring_memory
:
2916 for (j
= i
- 1; j
>= 0; j
--)
2917 hns3_fini_ring(priv
->ring_data
[j
].ring
);
2922 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
2924 struct hnae3_handle
*h
= priv
->ae_handle
;
2927 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2928 if (h
->ae_algo
->ops
->reset_queue
)
2929 h
->ae_algo
->ops
->reset_queue(h
, i
);
2931 hns3_fini_ring(priv
->ring_data
[i
].ring
);
2932 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2937 /* Set mac addr if it is configured. or leave it to the AE driver */
2938 static void hns3_init_mac_addr(struct net_device
*netdev
, bool init
)
2940 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2941 struct hnae3_handle
*h
= priv
->ae_handle
;
2942 u8 mac_addr_temp
[ETH_ALEN
];
2944 if (h
->ae_algo
->ops
->get_mac_addr
&& init
) {
2945 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
2946 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
2949 /* Check if the MAC address is valid, if not get a random one */
2950 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2951 eth_hw_addr_random(netdev
);
2952 dev_warn(priv
->dev
, "using random MAC address %pM\n",
2956 if (h
->ae_algo
->ops
->set_mac_addr
)
2957 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
2961 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
2963 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2965 if ((netdev
->features
& NETIF_F_TSO
) ||
2966 (netdev
->features
& NETIF_F_TSO6
)) {
2967 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
2968 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
2970 priv
->ops
.fill_desc
= hns3_fill_desc
;
2971 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
2975 static int hns3_client_init(struct hnae3_handle
*handle
)
2977 struct pci_dev
*pdev
= handle
->pdev
;
2978 struct hns3_nic_priv
*priv
;
2979 struct net_device
*netdev
;
2982 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
2983 hns3_get_max_available_channels(handle
));
2987 priv
= netdev_priv(netdev
);
2988 priv
->dev
= &pdev
->dev
;
2989 priv
->netdev
= netdev
;
2990 priv
->ae_handle
= handle
;
2991 priv
->ae_handle
->reset_level
= HNAE3_NONE_RESET
;
2992 priv
->ae_handle
->last_reset_time
= jiffies
;
2993 priv
->tx_timeout_count
= 0;
2995 handle
->kinfo
.netdev
= netdev
;
2996 handle
->priv
= (void *)priv
;
2998 hns3_init_mac_addr(netdev
, true);
3000 hns3_set_default_feature(netdev
);
3002 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
3003 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3004 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3005 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3006 hns3_ethtool_set_ops(netdev
);
3007 hns3_nic_set_priv_ops(netdev
);
3009 /* Carrier off reporting is important to ethtool even BEFORE open */
3010 netif_carrier_off(netdev
);
3012 ret
= hns3_get_ring_config(priv
);
3015 goto out_get_ring_cfg
;
3018 ret
= hns3_nic_alloc_vector_data(priv
);
3021 goto out_alloc_vector_data
;
3024 ret
= hns3_nic_init_vector_data(priv
);
3027 goto out_init_vector_data
;
3030 ret
= hns3_init_all_ring(priv
);
3033 goto out_init_ring_data
;
3036 ret
= register_netdev(netdev
);
3038 dev_err(priv
->dev
, "probe register netdev fail!\n");
3039 goto out_reg_netdev_fail
;
3042 hns3_dcbnl_setup(handle
);
3044 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3045 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
3049 out_reg_netdev_fail
:
3051 (void)hns3_nic_uninit_vector_data(priv
);
3052 out_init_vector_data
:
3053 hns3_nic_dealloc_vector_data(priv
);
3054 out_alloc_vector_data
:
3055 priv
->ring_data
= NULL
;
3057 priv
->ae_handle
= NULL
;
3058 free_netdev(netdev
);
3062 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3064 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3065 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3068 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3069 unregister_netdev(netdev
);
3071 ret
= hns3_nic_uninit_vector_data(priv
);
3073 netdev_err(netdev
, "uninit vector error\n");
3075 ret
= hns3_nic_dealloc_vector_data(priv
);
3077 netdev_err(netdev
, "dealloc vector error\n");
3079 ret
= hns3_uninit_all_ring(priv
);
3081 netdev_err(netdev
, "uninit ring error\n");
3083 hns3_put_ring_config(priv
);
3085 priv
->ring_data
= NULL
;
3087 free_netdev(netdev
);
3090 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3092 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3098 netif_carrier_on(netdev
);
3099 netif_tx_wake_all_queues(netdev
);
3100 netdev_info(netdev
, "link up\n");
3102 netif_carrier_off(netdev
);
3103 netif_tx_stop_all_queues(netdev
);
3104 netdev_info(netdev
, "link down\n");
3108 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3110 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3111 struct net_device
*ndev
= kinfo
->netdev
;
3116 if (tc
> HNAE3_MAX_TC
)
3122 if_running
= netif_running(ndev
);
3124 ret
= netdev_set_num_tc(ndev
, tc
);
3129 (void)hns3_nic_net_stop(ndev
);
3133 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3134 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3139 netdev_reset_tc(ndev
);
3143 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
3144 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
3146 if (tc_info
->enable
)
3147 netdev_set_tc_queue(ndev
,
3150 tc_info
->tqp_offset
);
3153 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
3154 netdev_set_prio_tc_map(ndev
, i
,
3159 ret
= hns3_nic_set_real_num_queue(ndev
);
3163 (void)hns3_nic_net_open(ndev
);
3168 static void hns3_recover_hw_addr(struct net_device
*ndev
)
3170 struct netdev_hw_addr_list
*list
;
3171 struct netdev_hw_addr
*ha
, *tmp
;
3173 /* go through and sync uc_addr entries to the device */
3175 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3176 hns3_nic_uc_sync(ndev
, ha
->addr
);
3178 /* go through and sync mc_addr entries to the device */
3180 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3181 hns3_nic_mc_sync(ndev
, ha
->addr
);
3184 static void hns3_clear_tx_ring(struct hns3_enet_ring
*ring
)
3186 if (!HNAE3_IS_TX_RING(ring
))
3189 while (ring
->next_to_clean
!= ring
->next_to_use
) {
3190 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
3191 ring_ptr_move_fw(ring
, next_to_clean
);
3195 static void hns3_clear_rx_ring(struct hns3_enet_ring
*ring
)
3197 if (HNAE3_IS_TX_RING(ring
))
3200 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3201 /* When a buffer is not reused, it's memory has been
3202 * freed in hns3_handle_rx_bd or will be freed by
3203 * stack, so only need to unmap the buffer here.
3205 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3206 hns3_unmap_buffer(ring
,
3207 &ring
->desc_cb
[ring
->next_to_use
]);
3208 ring
->desc_cb
[ring
->next_to_use
].dma
= 0;
3211 ring_ptr_move_fw(ring
, next_to_use
);
3215 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3217 struct net_device
*ndev
= h
->kinfo
.netdev
;
3218 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3221 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3222 struct netdev_queue
*dev_queue
;
3223 struct hns3_enet_ring
*ring
;
3225 ring
= priv
->ring_data
[i
].ring
;
3226 hns3_clear_tx_ring(ring
);
3227 dev_queue
= netdev_get_tx_queue(ndev
,
3228 priv
->ring_data
[i
].queue_index
);
3229 netdev_tx_reset_queue(dev_queue
);
3231 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3232 hns3_clear_rx_ring(ring
);
3236 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3238 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3239 struct net_device
*ndev
= kinfo
->netdev
;
3241 if (!netif_running(ndev
))
3244 return hns3_nic_net_stop(ndev
);
3247 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3249 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3252 if (netif_running(kinfo
->netdev
)) {
3253 ret
= hns3_nic_net_up(kinfo
->netdev
);
3255 netdev_err(kinfo
->netdev
,
3256 "hns net up fail, ret=%d!\n", ret
);
3259 handle
->last_reset_time
= jiffies
;
3265 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3267 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3268 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3271 hns3_init_mac_addr(netdev
, false);
3272 hns3_nic_set_rx_mode(netdev
);
3273 hns3_recover_hw_addr(netdev
);
3275 /* Hardware table is only clear when pf resets */
3276 if (!(handle
->flags
& HNAE3_SUPPORT_VF
))
3277 hns3_restore_vlan(netdev
);
3279 /* Carrier off reporting is important to ethtool even BEFORE open */
3280 netif_carrier_off(netdev
);
3282 ret
= hns3_get_ring_config(priv
);
3286 ret
= hns3_nic_init_vector_data(priv
);
3290 ret
= hns3_init_all_ring(priv
);
3292 hns3_nic_uninit_vector_data(priv
);
3293 priv
->ring_data
= NULL
;
3299 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
3301 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3302 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3305 hns3_clear_all_ring(handle
);
3307 ret
= hns3_nic_uninit_vector_data(priv
);
3309 netdev_err(netdev
, "uninit vector error\n");
3313 ret
= hns3_uninit_all_ring(priv
);
3315 netdev_err(netdev
, "uninit ring error\n");
3317 hns3_put_ring_config(priv
);
3319 priv
->ring_data
= NULL
;
3324 static int hns3_reset_notify(struct hnae3_handle
*handle
,
3325 enum hnae3_reset_notify_type type
)
3330 case HNAE3_UP_CLIENT
:
3331 ret
= hns3_reset_notify_up_enet(handle
);
3333 case HNAE3_DOWN_CLIENT
:
3334 ret
= hns3_reset_notify_down_enet(handle
);
3336 case HNAE3_INIT_CLIENT
:
3337 ret
= hns3_reset_notify_init_enet(handle
);
3339 case HNAE3_UNINIT_CLIENT
:
3340 ret
= hns3_reset_notify_uninit_enet(handle
);
3349 static void hns3_restore_coal(struct hns3_nic_priv
*priv
,
3350 struct hns3_enet_coalesce
*tx
,
3351 struct hns3_enet_coalesce
*rx
)
3353 u16 vector_num
= priv
->vector_num
;
3356 for (i
= 0; i
< vector_num
; i
++) {
3357 memcpy(&priv
->tqp_vector
[i
].tx_group
.coal
, tx
,
3358 sizeof(struct hns3_enet_coalesce
));
3359 memcpy(&priv
->tqp_vector
[i
].rx_group
.coal
, rx
,
3360 sizeof(struct hns3_enet_coalesce
));
3364 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
,
3365 struct hns3_enet_coalesce
*tx
,
3366 struct hns3_enet_coalesce
*rx
)
3368 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3369 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3372 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
3376 ret
= hns3_get_ring_config(priv
);
3380 ret
= hns3_nic_alloc_vector_data(priv
);
3382 goto err_alloc_vector
;
3384 hns3_restore_coal(priv
, tx
, rx
);
3386 ret
= hns3_nic_init_vector_data(priv
);
3388 goto err_uninit_vector
;
3390 ret
= hns3_init_all_ring(priv
);
3397 hns3_put_ring_config(priv
);
3399 hns3_nic_uninit_vector_data(priv
);
3401 hns3_nic_dealloc_vector_data(priv
);
3405 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
3407 return (new_tqp_num
/ num_tc
) * num_tc
;
3410 int hns3_set_channels(struct net_device
*netdev
,
3411 struct ethtool_channels
*ch
)
3413 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3414 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3415 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
3416 struct hns3_enet_coalesce tx_coal
, rx_coal
;
3417 bool if_running
= netif_running(netdev
);
3418 u32 new_tqp_num
= ch
->combined_count
;
3422 if (ch
->rx_count
|| ch
->tx_count
)
3425 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
3426 new_tqp_num
< kinfo
->num_tc
) {
3427 dev_err(&netdev
->dev
,
3428 "Change tqps fail, the tqp range is from %d to %d",
3430 hns3_get_max_available_channels(h
));
3434 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
3435 if (kinfo
->num_tqps
== new_tqp_num
)
3439 hns3_nic_net_stop(netdev
);
3441 hns3_clear_all_ring(h
);
3443 ret
= hns3_nic_uninit_vector_data(priv
);
3445 dev_err(&netdev
->dev
,
3446 "Unbind vector with tqp fail, nothing is changed");
3450 /* Changing the tqp num may also change the vector num,
3451 * ethtool only support setting and querying one coal
3452 * configuation for now, so save the vector 0' coal
3453 * configuation here in order to restore it.
3455 memcpy(&tx_coal
, &priv
->tqp_vector
[0].tx_group
.coal
,
3456 sizeof(struct hns3_enet_coalesce
));
3457 memcpy(&rx_coal
, &priv
->tqp_vector
[0].rx_group
.coal
,
3458 sizeof(struct hns3_enet_coalesce
));
3460 hns3_nic_dealloc_vector_data(priv
);
3462 hns3_uninit_all_ring(priv
);
3463 hns3_put_ring_config(priv
);
3465 org_tqp_num
= h
->kinfo
.num_tqps
;
3466 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
, &tx_coal
, &rx_coal
);
3468 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
,
3469 &tx_coal
, &rx_coal
);
3471 /* If revert to old tqp failed, fatal error occurred */
3472 dev_err(&netdev
->dev
,
3473 "Revert to old tqp num fail, ret=%d", ret
);
3476 dev_info(&netdev
->dev
,
3477 "Change tqp num fail, Revert to old tqp num");
3482 hns3_nic_net_open(netdev
);
3487 static const struct hnae3_client_ops client_ops
= {
3488 .init_instance
= hns3_client_init
,
3489 .uninit_instance
= hns3_client_uninit
,
3490 .link_status_change
= hns3_link_status_change
,
3491 .setup_tc
= hns3_client_setup_tc
,
3492 .reset_notify
= hns3_reset_notify
,
3495 /* hns3_init_module - Driver registration routine
3496 * hns3_init_module is the first routine called when the driver is
3497 * loaded. All it does is register with the PCI subsystem.
3499 static int __init
hns3_init_module(void)
3503 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
3504 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
3506 client
.type
= HNAE3_CLIENT_KNIC
;
3507 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
3510 client
.ops
= &client_ops
;
3512 INIT_LIST_HEAD(&client
.node
);
3514 ret
= hnae3_register_client(&client
);
3518 ret
= pci_register_driver(&hns3_driver
);
3520 hnae3_unregister_client(&client
);
3524 module_init(hns3_init_module
);
3526 /* hns3_exit_module - Driver exit cleanup routine
3527 * hns3_exit_module is called just before the driver is removed
3530 static void __exit
hns3_exit_module(void)
3532 pci_unregister_driver(&hns3_driver
);
3533 hnae3_unregister_client(&client
);
3535 module_exit(hns3_exit_module
);
3537 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3538 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3539 MODULE_LICENSE("GPL");
3540 MODULE_ALIAS("pci:hns-nic");
3541 MODULE_VERSION(HNS3_MOD_VERSION
);