2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/vxlan.h>
25 #include "hns3_enet.h"
27 const char hns3_driver_name
[] = "hns3";
28 const char hns3_driver_version
[] = VERMAGIC_STRING
;
29 static const char hns3_driver_string
[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
32 static struct hnae3_client client
;
34 /* hns3_pci_tbl - PCI Device ID Table
36 * Last entry must be all 0s
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
41 static const struct pci_device_id hns3_pci_tbl
[] = {
42 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
), 0},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
), 0},
49 /* required last entry */
52 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
54 static irqreturn_t
hns3_irq_handle(int irq
, void *dev
)
56 struct hns3_enet_tqp_vector
*tqp_vector
= dev
;
58 napi_schedule(&tqp_vector
->napi
);
63 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
65 struct hns3_enet_tqp_vector
*tqp_vectors
;
68 for (i
= 0; i
< priv
->vector_num
; i
++) {
69 tqp_vectors
= &priv
->tqp_vector
[i
];
71 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
74 /* release the irq resource */
75 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
76 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
80 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
82 struct hns3_enet_tqp_vector
*tqp_vectors
;
89 for (i
= 0; i
< priv
->vector_num
; i
++) {
90 tqp_vectors
= &priv
->tqp_vector
[i
];
92 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
95 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
96 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
97 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
100 } else if (tqp_vectors
->rx_group
.ring
) {
101 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
102 "%s-%s-%d", priv
->netdev
->name
, "Rx",
104 } else if (tqp_vectors
->tx_group
.ring
) {
105 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
106 "%s-%s-%d", priv
->netdev
->name
, "Tx",
109 /* Skip this unused q_vector */
113 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
115 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
119 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
120 tqp_vectors
->vector_irq
);
124 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
130 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
133 writel(mask_en
, tqp_vector
->mask_addr
);
136 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
138 napi_enable(&tqp_vector
->napi
);
141 hns3_mask_vector_irq(tqp_vector
, 1);
144 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
147 hns3_mask_vector_irq(tqp_vector
, 0);
149 disable_irq(tqp_vector
->vector_irq
);
150 napi_disable(&tqp_vector
->napi
);
153 static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
156 /* this defines the configuration for GL (Interrupt Gap Limiter)
157 * GL defines inter interrupt gap.
158 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
160 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
161 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
162 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL2_OFFSET
);
165 static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
168 /* this defines the configuration for RL (Interrupt Rate Limiter).
169 * Rl defines rate of interrupts i.e. number of interrupts-per-second
170 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
172 writel(rl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
175 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
)
177 /* initialize the configuration for interrupt coalescing.
178 * 1. GL (Interrupt Gap Limiter)
179 * 2. RL (Interrupt Rate Limiter)
182 /* Default :enable interrupt coalesce */
183 tqp_vector
->rx_group
.int_gl
= HNS3_INT_GL_50K
;
184 tqp_vector
->tx_group
.int_gl
= HNS3_INT_GL_50K
;
185 hns3_set_vector_coalesc_gl(tqp_vector
, HNS3_INT_GL_50K
);
186 /* for now we are disabling Interrupt RL - we
187 * will re-enable later
189 hns3_set_vector_coalesc_rl(tqp_vector
, 0);
190 tqp_vector
->rx_group
.flow_level
= HNS3_FLOW_LOW
;
191 tqp_vector
->tx_group
.flow_level
= HNS3_FLOW_LOW
;
194 static int hns3_nic_net_up(struct net_device
*netdev
)
196 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
197 struct hnae3_handle
*h
= priv
->ae_handle
;
201 /* get irq resource for all vectors */
202 ret
= hns3_nic_init_irq(priv
);
204 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
208 /* enable the vectors */
209 for (i
= 0; i
< priv
->vector_num
; i
++)
210 hns3_vector_enable(&priv
->tqp_vector
[i
]);
212 /* start the ae_dev */
213 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
220 for (j
= i
- 1; j
>= 0; j
--)
221 hns3_vector_disable(&priv
->tqp_vector
[j
]);
223 hns3_nic_uninit_irq(priv
);
228 static int hns3_nic_net_open(struct net_device
*netdev
)
230 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
231 struct hnae3_handle
*h
= priv
->ae_handle
;
234 netif_carrier_off(netdev
);
236 ret
= netif_set_real_num_tx_queues(netdev
, h
->kinfo
.num_tqps
);
239 "netif_set_real_num_tx_queues fail, ret=%d!\n",
244 ret
= netif_set_real_num_rx_queues(netdev
, h
->kinfo
.num_tqps
);
247 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
251 ret
= hns3_nic_net_up(netdev
);
254 "hns net up fail, ret=%d!\n", ret
);
261 static void hns3_nic_net_down(struct net_device
*netdev
)
263 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
264 const struct hnae3_ae_ops
*ops
;
268 ops
= priv
->ae_handle
->ae_algo
->ops
;
270 ops
->stop(priv
->ae_handle
);
272 /* disable vectors */
273 for (i
= 0; i
< priv
->vector_num
; i
++)
274 hns3_vector_disable(&priv
->tqp_vector
[i
]);
276 /* free irq resources */
277 hns3_nic_uninit_irq(priv
);
280 static int hns3_nic_net_stop(struct net_device
*netdev
)
282 netif_tx_stop_all_queues(netdev
);
283 netif_carrier_off(netdev
);
285 hns3_nic_net_down(netdev
);
290 void hns3_set_multicast_list(struct net_device
*netdev
)
292 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
293 struct hnae3_handle
*h
= priv
->ae_handle
;
294 struct netdev_hw_addr
*ha
= NULL
;
296 if (h
->ae_algo
->ops
->set_mc_addr
) {
297 netdev_for_each_mc_addr(ha
, netdev
)
298 if (h
->ae_algo
->ops
->set_mc_addr(h
, ha
->addr
))
299 netdev_err(netdev
, "set multicast fail\n");
303 static int hns3_nic_uc_sync(struct net_device
*netdev
,
304 const unsigned char *addr
)
306 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
307 struct hnae3_handle
*h
= priv
->ae_handle
;
309 if (h
->ae_algo
->ops
->add_uc_addr
)
310 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
315 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
316 const unsigned char *addr
)
318 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
319 struct hnae3_handle
*h
= priv
->ae_handle
;
321 if (h
->ae_algo
->ops
->rm_uc_addr
)
322 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
327 static int hns3_nic_mc_sync(struct net_device
*netdev
,
328 const unsigned char *addr
)
330 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
331 struct hnae3_handle
*h
= priv
->ae_handle
;
333 if (h
->ae_algo
->ops
->add_mc_addr
)
334 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
339 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
340 const unsigned char *addr
)
342 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
343 struct hnae3_handle
*h
= priv
->ae_handle
;
345 if (h
->ae_algo
->ops
->rm_mc_addr
)
346 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
351 void hns3_nic_set_rx_mode(struct net_device
*netdev
)
353 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
354 struct hnae3_handle
*h
= priv
->ae_handle
;
356 if (h
->ae_algo
->ops
->set_promisc_mode
) {
357 if (netdev
->flags
& IFF_PROMISC
)
358 h
->ae_algo
->ops
->set_promisc_mode(h
, 1);
360 h
->ae_algo
->ops
->set_promisc_mode(h
, 0);
362 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
363 netdev_err(netdev
, "sync uc address fail\n");
364 if (netdev
->flags
& IFF_MULTICAST
)
365 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
366 netdev_err(netdev
, "sync mc address fail\n");
369 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
370 u16
*mss
, u32
*type_cs_vlan_tso
)
372 u32 l4_offset
, hdr_len
;
373 union l3_hdr_info l3
;
374 union l4_hdr_info l4
;
378 if (!skb_is_gso(skb
))
381 ret
= skb_cow_head(skb
, 0);
385 l3
.hdr
= skb_network_header(skb
);
386 l4
.hdr
= skb_transport_header(skb
);
388 /* Software should clear the IPv4's checksum field when tso is
391 if (l3
.v4
->version
== 4)
395 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
398 SKB_GSO_UDP_TUNNEL_CSUM
)) {
399 if ((!(skb_shinfo(skb
)->gso_type
&
401 (skb_shinfo(skb
)->gso_type
&
402 SKB_GSO_UDP_TUNNEL_CSUM
)) {
403 /* Software should clear the udp's checksum
404 * field when tso is needed.
408 /* reset l3&l4 pointers from outer to inner headers */
409 l3
.hdr
= skb_inner_network_header(skb
);
410 l4
.hdr
= skb_inner_transport_header(skb
);
412 /* Software should clear the IPv4's checksum field when
415 if (l3
.v4
->version
== 4)
419 /* normal or tunnel packet*/
420 l4_offset
= l4
.hdr
- skb
->data
;
421 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
423 /* remove payload length from inner pseudo checksum when tso*/
424 l4_paylen
= skb
->len
- l4_offset
;
425 csum_replace_by_diff(&l4
.tcp
->check
,
426 (__force __wsum
)htonl(l4_paylen
));
428 /* find the txbd field values */
429 *paylen
= skb
->len
- hdr_len
;
430 hnae_set_bit(*type_cs_vlan_tso
,
433 /* get MSS for TSO */
434 *mss
= skb_shinfo(skb
)->gso_size
;
439 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
447 unsigned char *l4_hdr
;
448 unsigned char *exthdr
;
452 /* find outer header point */
453 l3
.hdr
= skb_network_header(skb
);
454 l4_hdr
= skb_inner_transport_header(skb
);
456 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
457 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
458 l4_proto_tmp
= l3
.v6
->nexthdr
;
459 if (l4_hdr
!= exthdr
)
460 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
461 &l4_proto_tmp
, &frag_off
);
462 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
463 l4_proto_tmp
= l3
.v4
->protocol
;
468 *ol4_proto
= l4_proto_tmp
;
471 if (!skb
->encapsulation
) {
476 /* find inner header point */
477 l3
.hdr
= skb_inner_network_header(skb
);
478 l4_hdr
= skb_inner_transport_header(skb
);
480 if (l3
.v6
->version
== 6) {
481 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
482 l4_proto_tmp
= l3
.v6
->nexthdr
;
483 if (l4_hdr
!= exthdr
)
484 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
485 &l4_proto_tmp
, &frag_off
);
486 } else if (l3
.v4
->version
== 4) {
487 l4_proto_tmp
= l3
.v4
->protocol
;
490 *il4_proto
= l4_proto_tmp
;
495 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
496 u8 il4_proto
, u32
*type_cs_vlan_tso
,
497 u32
*ol_type_vlan_len_msec
)
507 struct gre_base_hdr
*gre
;
510 unsigned char *l2_hdr
;
511 u8 l4_proto
= ol4_proto
;
518 l3
.hdr
= skb_network_header(skb
);
519 l4
.hdr
= skb_transport_header(skb
);
521 /* compute L2 header size for normal packet, defined in 2 Bytes */
522 l2_len
= l3
.hdr
- skb
->data
;
523 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
524 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
527 if (skb
->encapsulation
) {
528 /* compute OL2 header size, defined in 2 Bytes */
530 hnae_set_field(*ol_type_vlan_len_msec
,
532 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
534 /* compute OL3 header size, defined in 4 Bytes */
535 ol3_len
= l4
.hdr
- l3
.hdr
;
536 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
537 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
539 /* MAC in UDP, MAC in GRE (0x6558)*/
540 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
541 /* switch MAC header ptr from outer to inner header.*/
542 l2_hdr
= skb_inner_mac_header(skb
);
544 /* compute OL4 header size, defined in 4 Bytes. */
545 ol4_len
= l2_hdr
- l4
.hdr
;
546 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_M
,
547 HNS3_TXD_L4LEN_S
, ol4_len
>> 2);
549 /* switch IP header ptr from outer to inner header */
550 l3
.hdr
= skb_inner_network_header(skb
);
552 /* compute inner l2 header size, defined in 2 Bytes. */
553 l2_len
= l3
.hdr
- l2_hdr
;
554 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
555 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
557 /* skb packet types not supported by hardware,
558 * txbd len fild doesn't be filled.
563 /* switch L4 header pointer from outer to inner */
564 l4
.hdr
= skb_inner_transport_header(skb
);
566 l4_proto
= il4_proto
;
569 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
570 l3_len
= l4
.hdr
- l3
.hdr
;
571 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
572 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
574 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
577 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
578 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
581 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
582 HNS3_TXD_L4LEN_S
, (sizeof(struct sctphdr
) >> 2));
585 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
586 HNS3_TXD_L4LEN_S
, (sizeof(struct udphdr
) >> 2));
589 /* skb packet types not supported by hardware,
590 * txbd len fild doesn't be filled.
596 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
597 u8 il4_proto
, u32
*type_cs_vlan_tso
,
598 u32
*ol_type_vlan_len_msec
)
605 u32 l4_proto
= ol4_proto
;
607 l3
.hdr
= skb_network_header(skb
);
609 /* define OL3 type and tunnel type(OL4).*/
610 if (skb
->encapsulation
) {
611 /* define outer network header type.*/
612 if (skb
->protocol
== htons(ETH_P_IP
)) {
614 hnae_set_field(*ol_type_vlan_len_msec
,
615 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
616 HNS3_OL3T_IPV4_CSUM
);
618 hnae_set_field(*ol_type_vlan_len_msec
,
619 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
620 HNS3_OL3T_IPV4_NO_CSUM
);
622 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
623 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
624 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
627 /* define tunnel type(OL4).*/
630 hnae_set_field(*ol_type_vlan_len_msec
,
633 HNS3_TUN_MAC_IN_UDP
);
636 hnae_set_field(*ol_type_vlan_len_msec
,
642 /* drop the skb tunnel packet if hardware don't support,
643 * because hardware can't calculate csum when TSO.
648 /* the stack computes the IP header already,
649 * driver calculate l4 checksum when not TSO.
651 skb_checksum_help(skb
);
655 l3
.hdr
= skb_inner_network_header(skb
);
656 l4_proto
= il4_proto
;
659 if (l3
.v4
->version
== 4) {
660 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
661 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
663 /* the stack computes the IP header already, the only time we
664 * need the hardware to recompute it is in the case of TSO.
667 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
669 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
670 } else if (l3
.v6
->version
== 6) {
671 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
672 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
673 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
678 hnae_set_field(*type_cs_vlan_tso
,
684 hnae_set_field(*type_cs_vlan_tso
,
690 hnae_set_field(*type_cs_vlan_tso
,
696 /* drop the skb tunnel packet if hardware don't support,
697 * because hardware can't calculate csum when TSO.
702 /* the stack computes the IP header already,
703 * driver calculate l4 checksum when not TSO.
705 skb_checksum_help(skb
);
712 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
714 /* Config bd buffer end */
715 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
716 HNS3_TXD_BDTYPE_M
, 0);
717 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
718 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
719 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 1);
722 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
723 int size
, dma_addr_t dma
, int frag_end
,
724 enum hns_desc_type type
)
726 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
727 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
728 u32 ol_type_vlan_len_msec
= 0;
729 u16 bdtp_fe_sc_vld_ra_ri
= 0;
730 u32 type_cs_vlan_tso
= 0;
739 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
740 desc_cb
->priv
= priv
;
741 desc_cb
->length
= size
;
743 desc_cb
->type
= type
;
745 /* now, fill the descriptor */
746 desc
->addr
= cpu_to_le64(dma
);
747 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
748 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
749 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
751 if (type
== DESC_TYPE_SKB
) {
752 skb
= (struct sk_buff
*)priv
;
753 paylen
= cpu_to_le16(skb
->len
);
755 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
756 skb_reset_mac_len(skb
);
757 protocol
= skb
->protocol
;
760 if (protocol
== htons(ETH_P_8021Q
)) {
761 protocol
= vlan_get_protocol(skb
);
762 skb
->protocol
= protocol
;
764 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
767 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
769 &ol_type_vlan_len_msec
);
770 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
772 &ol_type_vlan_len_msec
);
776 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
783 desc
->tx
.ol_type_vlan_len_msec
=
784 cpu_to_le32(ol_type_vlan_len_msec
);
785 desc
->tx
.type_cs_vlan_tso_len
=
786 cpu_to_le32(type_cs_vlan_tso
);
787 desc
->tx
.paylen
= cpu_to_le16(paylen
);
788 desc
->tx
.mss
= cpu_to_le16(mss
);
791 /* move ring pointer to next.*/
792 ring_ptr_move_fw(ring
, next_to_use
);
797 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
798 int size
, dma_addr_t dma
, int frag_end
,
799 enum hns_desc_type type
)
801 unsigned int frag_buf_num
;
806 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
807 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
808 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
810 /* When the frag size is bigger than hardware, split this frag */
811 for (k
= 0; k
< frag_buf_num
; k
++) {
812 ret
= hns3_fill_desc(ring
, priv
,
813 (k
== frag_buf_num
- 1) ?
814 sizeoflast
: HNS3_MAX_BD_SIZE
,
815 dma
+ HNS3_MAX_BD_SIZE
* k
,
816 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
817 (type
== DESC_TYPE_SKB
&& !k
) ?
818 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
826 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
827 struct hns3_enet_ring
*ring
)
829 struct sk_buff
*skb
= *out_skb
;
830 struct skb_frag_struct
*frag
;
837 size
= skb_headlen(skb
);
838 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
840 frag_num
= skb_shinfo(skb
)->nr_frags
;
841 for (i
= 0; i
< frag_num
; i
++) {
842 frag
= &skb_shinfo(skb
)->frags
[i
];
843 size
= skb_frag_size(frag
);
845 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
846 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
849 buf_num
+= bdnum_for_frag
;
852 if (buf_num
> ring_space(ring
))
859 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
860 struct hns3_enet_ring
*ring
)
862 struct sk_buff
*skb
= *out_skb
;
865 /* No. of segments (plus a header) */
866 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
868 if (buf_num
> ring_space(ring
))
876 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
878 struct device
*dev
= ring_to_dev(ring
);
881 for (i
= 0; i
< ring
->desc_num
; i
++) {
882 /* check if this is where we started */
883 if (ring
->next_to_use
== next_to_use_orig
)
886 /* unmap the descriptor dma address */
887 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
888 dma_unmap_single(dev
,
889 ring
->desc_cb
[ring
->next_to_use
].dma
,
890 ring
->desc_cb
[ring
->next_to_use
].length
,
894 ring
->desc_cb
[ring
->next_to_use
].dma
,
895 ring
->desc_cb
[ring
->next_to_use
].length
,
899 ring_ptr_move_bw(ring
, next_to_use
);
903 static netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
,
904 struct net_device
*netdev
)
906 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
907 struct hns3_nic_ring_data
*ring_data
=
908 &tx_ring_data(priv
, skb
->queue_mapping
);
909 struct hns3_enet_ring
*ring
= ring_data
->ring
;
910 struct device
*dev
= priv
->dev
;
911 struct netdev_queue
*dev_queue
;
912 struct skb_frag_struct
*frag
;
913 int next_to_use_head
;
914 int next_to_use_frag
;
922 /* Prefetch the data used later */
925 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
927 u64_stats_update_begin(&ring
->syncp
);
928 ring
->stats
.tx_busy
++;
929 u64_stats_update_end(&ring
->syncp
);
931 goto out_net_tx_busy
;
933 u64_stats_update_begin(&ring
->syncp
);
934 ring
->stats
.sw_err_cnt
++;
935 u64_stats_update_end(&ring
->syncp
);
936 netdev_err(netdev
, "no memory to xmit!\n");
943 /* No. of segments (plus a header) */
944 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
945 /* Fill the first part */
946 size
= skb_headlen(skb
);
948 next_to_use_head
= ring
->next_to_use
;
950 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
951 if (dma_mapping_error(dev
, dma
)) {
952 netdev_err(netdev
, "TX head DMA map failed\n");
953 ring
->stats
.sw_err_cnt
++;
957 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
960 goto head_dma_map_err
;
962 next_to_use_frag
= ring
->next_to_use
;
963 /* Fill the fragments */
964 for (i
= 1; i
< seg_num
; i
++) {
965 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
966 size
= skb_frag_size(frag
);
967 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
968 if (dma_mapping_error(dev
, dma
)) {
969 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
970 ring
->stats
.sw_err_cnt
++;
971 goto frag_dma_map_err
;
973 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
974 seg_num
- 1 == i
? 1 : 0,
978 goto frag_dma_map_err
;
981 /* Complete translate all packets */
982 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
983 netdev_tx_sent_queue(dev_queue
, skb
->len
);
985 wmb(); /* Commit all data before submit */
987 hnae_queue_xmit(ring
->tqp
, buf_num
);
992 hns_nic_dma_unmap(ring
, next_to_use_frag
);
995 hns_nic_dma_unmap(ring
, next_to_use_head
);
998 dev_kfree_skb_any(skb
);
1002 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1003 smp_mb(); /* Commit all data before submit */
1005 return NETDEV_TX_BUSY
;
1008 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1010 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1011 struct hnae3_handle
*h
= priv
->ae_handle
;
1012 struct sockaddr
*mac_addr
= p
;
1015 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1016 return -EADDRNOTAVAIL
;
1018 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
1020 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1024 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1029 static int hns3_nic_set_features(struct net_device
*netdev
,
1030 netdev_features_t features
)
1032 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1034 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1035 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1036 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1038 priv
->ops
.fill_desc
= hns3_fill_desc
;
1039 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1042 netdev
->features
= features
;
1047 hns3_nic_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
1049 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1050 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1051 struct hns3_enet_ring
*ring
;
1059 for (idx
= 0; idx
< queue_num
; idx
++) {
1060 /* fetch the tx stats */
1061 ring
= priv
->ring_data
[idx
].ring
;
1063 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1064 tx_bytes
+= ring
->stats
.tx_bytes
;
1065 tx_pkts
+= ring
->stats
.tx_pkts
;
1066 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1068 /* fetch the rx stats */
1069 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1071 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1072 rx_bytes
+= ring
->stats
.rx_bytes
;
1073 rx_pkts
+= ring
->stats
.rx_pkts
;
1074 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1077 stats
->tx_bytes
= tx_bytes
;
1078 stats
->tx_packets
= tx_pkts
;
1079 stats
->rx_bytes
= rx_bytes
;
1080 stats
->rx_packets
= rx_pkts
;
1082 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1083 stats
->multicast
= netdev
->stats
.multicast
;
1084 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1085 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1086 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1088 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1089 stats
->rx_dropped
= netdev
->stats
.rx_dropped
;
1090 stats
->tx_dropped
= netdev
->stats
.tx_dropped
;
1091 stats
->collisions
= netdev
->stats
.collisions
;
1092 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1093 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1094 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1095 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1096 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1097 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1098 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1099 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1100 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1101 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1104 static void hns3_add_tunnel_port(struct net_device
*netdev
, u16 port
,
1105 enum hns3_udp_tnl_type type
)
1107 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1108 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1109 struct hnae3_handle
*h
= priv
->ae_handle
;
1111 if (udp_tnl
->used
&& udp_tnl
->dst_port
== port
) {
1116 if (udp_tnl
->used
) {
1118 "UDP tunnel [%d], port [%d] offload\n", type
, port
);
1122 udp_tnl
->dst_port
= port
;
1124 /* TBD send command to hardware to add port */
1125 if (h
->ae_algo
->ops
->add_tunnel_udp
)
1126 h
->ae_algo
->ops
->add_tunnel_udp(h
, port
);
1129 static void hns3_del_tunnel_port(struct net_device
*netdev
, u16 port
,
1130 enum hns3_udp_tnl_type type
)
1132 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1133 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1134 struct hnae3_handle
*h
= priv
->ae_handle
;
1136 if (!udp_tnl
->used
|| udp_tnl
->dst_port
!= port
) {
1138 "Invalid UDP tunnel port %d\n", port
);
1146 udp_tnl
->dst_port
= 0;
1147 /* TBD send command to hardware to del port */
1148 if (h
->ae_algo
->ops
->del_tunnel_udp
)
1149 h
->ae_algo
->ops
->del_tunnel_udp(h
, port
);
1152 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1153 * @netdev: This physical ports's netdev
1154 * @ti: Tunnel information
1156 static void hns3_nic_udp_tunnel_add(struct net_device
*netdev
,
1157 struct udp_tunnel_info
*ti
)
1159 u16 port_n
= ntohs(ti
->port
);
1162 case UDP_TUNNEL_TYPE_VXLAN
:
1163 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1165 case UDP_TUNNEL_TYPE_GENEVE
:
1166 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1169 netdev_err(netdev
, "unsupported tunnel type %d\n", ti
->type
);
1174 static void hns3_nic_udp_tunnel_del(struct net_device
*netdev
,
1175 struct udp_tunnel_info
*ti
)
1177 u16 port_n
= ntohs(ti
->port
);
1180 case UDP_TUNNEL_TYPE_VXLAN
:
1181 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1183 case UDP_TUNNEL_TYPE_GENEVE
:
1184 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1191 static int hns3_setup_tc(struct net_device
*netdev
, u8 tc
)
1193 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1194 struct hnae3_handle
*h
= priv
->ae_handle
;
1195 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1199 if (tc
> HNAE3_MAX_TC
)
1202 if (kinfo
->num_tc
== tc
)
1209 netdev_reset_tc(netdev
);
1213 /* Set num_tc for netdev */
1214 ret
= netdev_set_num_tc(netdev
, tc
);
1218 /* Set per TC queues for the VSI */
1219 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1220 if (kinfo
->tc_info
[i
].enable
)
1221 netdev_set_tc_queue(netdev
,
1222 kinfo
->tc_info
[i
].tc
,
1223 kinfo
->tc_info
[i
].tqp_count
,
1224 kinfo
->tc_info
[i
].tqp_offset
);
1230 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1233 struct tc_mqprio_qopt
*mqprio
= type_data
;
1235 if (type
!= TC_SETUP_MQPRIO
)
1238 return hns3_setup_tc(dev
, mqprio
->num_tc
);
1241 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1242 __be16 proto
, u16 vid
)
1244 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1245 struct hnae3_handle
*h
= priv
->ae_handle
;
1248 if (h
->ae_algo
->ops
->set_vlan_filter
)
1249 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1254 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1255 __be16 proto
, u16 vid
)
1257 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1258 struct hnae3_handle
*h
= priv
->ae_handle
;
1261 if (h
->ae_algo
->ops
->set_vlan_filter
)
1262 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1267 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1268 u8 qos
, __be16 vlan_proto
)
1270 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1271 struct hnae3_handle
*h
= priv
->ae_handle
;
1274 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1275 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1281 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1283 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1284 struct hnae3_handle
*h
= priv
->ae_handle
;
1285 bool if_running
= netif_running(netdev
);
1288 if (!h
->ae_algo
->ops
->set_mtu
)
1291 /* if this was called with netdev up then bring netdevice down */
1293 (void)hns3_nic_net_stop(netdev
);
1297 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1299 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1304 /* if the netdev was running earlier, bring it up again */
1305 if (if_running
&& hns3_nic_net_open(netdev
))
1311 static const struct net_device_ops hns3_nic_netdev_ops
= {
1312 .ndo_open
= hns3_nic_net_open
,
1313 .ndo_stop
= hns3_nic_net_stop
,
1314 .ndo_start_xmit
= hns3_nic_net_xmit
,
1315 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1316 .ndo_change_mtu
= hns3_nic_change_mtu
,
1317 .ndo_set_features
= hns3_nic_set_features
,
1318 .ndo_get_stats64
= hns3_nic_get_stats64
,
1319 .ndo_setup_tc
= hns3_nic_setup_tc
,
1320 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1321 .ndo_udp_tunnel_add
= hns3_nic_udp_tunnel_add
,
1322 .ndo_udp_tunnel_del
= hns3_nic_udp_tunnel_del
,
1323 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1324 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1325 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1328 /* hns3_probe - Device initialization routine
1329 * @pdev: PCI device information struct
1330 * @ent: entry in hns3_pci_tbl
1332 * hns3_probe initializes a PF identified by a pci_dev structure.
1333 * The OS initialization, configuring of the PF private structure,
1334 * and a hardware reset occur.
1336 * Returns 0 on success, negative on failure
1338 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1340 struct hnae3_ae_dev
*ae_dev
;
1343 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1350 ae_dev
->pdev
= pdev
;
1351 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1352 pci_set_drvdata(pdev
, ae_dev
);
1354 return hnae3_register_ae_dev(ae_dev
);
1357 /* hns3_remove - Device removal routine
1358 * @pdev: PCI device information struct
1360 static void hns3_remove(struct pci_dev
*pdev
)
1362 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1364 hnae3_unregister_ae_dev(ae_dev
);
1366 devm_kfree(&pdev
->dev
, ae_dev
);
1368 pci_set_drvdata(pdev
, NULL
);
1371 static struct pci_driver hns3_driver
= {
1372 .name
= hns3_driver_name
,
1373 .id_table
= hns3_pci_tbl
,
1374 .probe
= hns3_probe
,
1375 .remove
= hns3_remove
,
1378 /* set default feature to hns3 */
1379 static void hns3_set_default_feature(struct net_device
*netdev
)
1381 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1383 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1384 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1385 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1386 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1387 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1389 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1391 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1393 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1394 NETIF_F_HW_VLAN_CTAG_FILTER
|
1395 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1396 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1397 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1398 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1400 netdev
->vlan_features
|=
1401 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1402 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1403 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1404 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1405 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1407 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1408 NETIF_F_HW_VLAN_CTAG_FILTER
|
1409 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1410 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1411 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1412 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1415 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1416 struct hns3_desc_cb
*cb
)
1418 unsigned int order
= hnae_page_order(ring
);
1421 p
= dev_alloc_pages(order
);
1426 cb
->page_offset
= 0;
1428 cb
->buf
= page_address(p
);
1429 cb
->length
= hnae_page_size(ring
);
1430 cb
->type
= DESC_TYPE_PAGE
;
1432 memset(cb
->buf
, 0, cb
->length
);
1437 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1438 struct hns3_desc_cb
*cb
)
1440 if (cb
->type
== DESC_TYPE_SKB
)
1441 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1442 else if (!HNAE3_IS_TX_RING(ring
))
1443 put_page((struct page
*)cb
->priv
);
1444 memset(cb
, 0, sizeof(*cb
));
1447 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1449 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1450 cb
->length
, ring_to_dma_dir(ring
));
1452 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1458 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1459 struct hns3_desc_cb
*cb
)
1461 if (cb
->type
== DESC_TYPE_SKB
)
1462 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1463 ring_to_dma_dir(ring
));
1465 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1466 ring_to_dma_dir(ring
));
1469 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1471 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1472 ring
->desc
[i
].addr
= 0;
1475 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1477 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1479 if (!ring
->desc_cb
[i
].dma
)
1482 hns3_buffer_detach(ring
, i
);
1483 hns3_free_buffer(ring
, cb
);
1486 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1490 for (i
= 0; i
< ring
->desc_num
; i
++)
1491 hns3_free_buffer_detach(ring
, i
);
1494 /* free desc along with its attached buffer */
1495 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1497 hns3_free_buffers(ring
);
1499 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
1500 ring
->desc_num
* sizeof(ring
->desc
[0]),
1502 ring
->desc_dma_addr
= 0;
1507 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1509 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1511 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
1515 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
), ring
->desc
,
1516 size
, DMA_BIDIRECTIONAL
);
1517 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
1518 ring
->desc_dma_addr
= 0;
1527 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1528 struct hns3_desc_cb
*cb
)
1532 ret
= hns3_alloc_buffer(ring
, cb
);
1536 ret
= hns3_map_buffer(ring
, cb
);
1543 hns3_free_buffers(ring
);
1548 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1550 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1555 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1560 /* Allocate memory for raw pkg, and map with dma */
1561 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1565 for (i
= 0; i
< ring
->desc_num
; i
++) {
1566 ret
= hns3_alloc_buffer_attach(ring
, i
);
1568 goto out_buffer_fail
;
1574 for (j
= i
- 1; j
>= 0; j
--)
1575 hns3_free_buffer_detach(ring
, j
);
1579 /* detach a in-used buffer and replace with a reserved one */
1580 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1581 struct hns3_desc_cb
*res_cb
)
1583 hns3_map_buffer(ring
, &ring
->desc_cb
[i
]);
1584 ring
->desc_cb
[i
] = *res_cb
;
1585 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1588 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1590 ring
->desc_cb
[i
].reuse_flag
= 0;
1591 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1592 + ring
->desc_cb
[i
].page_offset
);
1595 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1598 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1600 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1601 (*bytes
) += desc_cb
->length
;
1602 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1603 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1605 ring_ptr_move_fw(ring
, next_to_clean
);
1608 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1610 int u
= ring
->next_to_use
;
1611 int c
= ring
->next_to_clean
;
1613 if (unlikely(h
> ring
->desc_num
))
1616 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1619 int hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1621 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1622 struct netdev_queue
*dev_queue
;
1626 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1627 rmb(); /* Make sure head is ready before touch any data */
1629 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1630 return 0; /* no data to poll */
1632 if (!is_valid_clean_head(ring
, head
)) {
1633 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1634 ring
->next_to_use
, ring
->next_to_clean
);
1636 u64_stats_update_begin(&ring
->syncp
);
1637 ring
->stats
.io_err_cnt
++;
1638 u64_stats_update_end(&ring
->syncp
);
1644 while (head
!= ring
->next_to_clean
&& budget
) {
1645 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1646 /* Issue prefetch for next Tx descriptor */
1647 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1651 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1652 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1654 u64_stats_update_begin(&ring
->syncp
);
1655 ring
->stats
.tx_bytes
+= bytes
;
1656 ring
->stats
.tx_pkts
+= pkts
;
1657 u64_stats_update_end(&ring
->syncp
);
1659 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1660 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1662 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1663 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1664 /* Make sure that anybody stopping the queue after this
1665 * sees the new next_to_clean.
1668 if (netif_tx_queue_stopped(dev_queue
)) {
1669 netif_tx_wake_queue(dev_queue
);
1670 ring
->stats
.restart_queue
++;
1677 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1679 int ntc
= ring
->next_to_clean
;
1680 int ntu
= ring
->next_to_use
;
1682 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1686 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1688 struct hns3_desc_cb
*desc_cb
;
1689 struct hns3_desc_cb res_cbs
;
1692 for (i
= 0; i
< cleand_count
; i
++) {
1693 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1694 if (desc_cb
->reuse_flag
) {
1695 u64_stats_update_begin(&ring
->syncp
);
1696 ring
->stats
.reuse_pg_cnt
++;
1697 u64_stats_update_end(&ring
->syncp
);
1699 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1701 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1703 u64_stats_update_begin(&ring
->syncp
);
1704 ring
->stats
.sw_err_cnt
++;
1705 u64_stats_update_end(&ring
->syncp
);
1707 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
1708 "hnae reserve buffer map failed.\n");
1711 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
1714 ring_ptr_move_fw(ring
, next_to_use
);
1717 wmb(); /* Make all data has been write before submit */
1718 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
1721 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1722 * @data: pointer to the start of the headers
1723 * @max: total length of section to find headers in
1725 * This function is meant to determine the length of headers that will
1726 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1727 * motivation of doing this is to only perform one pull for IPv4 TCP
1728 * packets so that we can do basic things like calculating the gso_size
1729 * based on the average data per packet.
1731 static unsigned int hns3_nic_get_headlen(unsigned char *data
, u32 flag
,
1732 unsigned int max_size
)
1734 unsigned char *network
;
1737 /* This should never happen, but better safe than sorry */
1738 if (max_size
< ETH_HLEN
)
1741 /* Initialize network frame pointer */
1744 /* Set first protocol and move network header forward */
1745 network
+= ETH_HLEN
;
1747 /* Handle any vlan tag if present */
1748 if (hnae_get_field(flag
, HNS3_RXD_VLAN_M
, HNS3_RXD_VLAN_S
)
1749 == HNS3_RX_FLAG_VLAN_PRESENT
) {
1750 if ((typeof(max_size
))(network
- data
) > (max_size
- VLAN_HLEN
))
1753 network
+= VLAN_HLEN
;
1756 /* Handle L3 protocols */
1757 if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
1758 == HNS3_RX_FLAG_L3ID_IPV4
) {
1759 if ((typeof(max_size
))(network
- data
) >
1760 (max_size
- sizeof(struct iphdr
)))
1763 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1764 hlen
= (network
[0] & 0x0F) << 2;
1766 /* Verify hlen meets minimum size requirements */
1767 if (hlen
< sizeof(struct iphdr
))
1768 return network
- data
;
1770 /* Record next protocol if header is present */
1771 } else if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
1772 == HNS3_RX_FLAG_L3ID_IPV6
) {
1773 if ((typeof(max_size
))(network
- data
) >
1774 (max_size
- sizeof(struct ipv6hdr
)))
1777 /* Record next protocol */
1778 hlen
= sizeof(struct ipv6hdr
);
1780 return network
- data
;
1783 /* Relocate pointer to start of L4 header */
1786 /* Finally sort out TCP/UDP */
1787 if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
1788 == HNS3_RX_FLAG_L4ID_TCP
) {
1789 if ((typeof(max_size
))(network
- data
) >
1790 (max_size
- sizeof(struct tcphdr
)))
1793 /* Access doff as a u8 to avoid unaligned access on ia64 */
1794 hlen
= (network
[12] & 0xF0) >> 2;
1796 /* Verify hlen meets minimum size requirements */
1797 if (hlen
< sizeof(struct tcphdr
))
1798 return network
- data
;
1801 } else if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
1802 == HNS3_RX_FLAG_L4ID_UDP
) {
1803 if ((typeof(max_size
))(network
- data
) >
1804 (max_size
- sizeof(struct udphdr
)))
1807 network
+= sizeof(struct udphdr
);
1810 /* If everything has gone correctly network should be the
1811 * data section of the packet and will be the end of the header.
1812 * If not then it probably represents the end of the last recognized
1815 if ((typeof(max_size
))(network
- data
) < max_size
)
1816 return network
- data
;
1821 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
1822 struct hns3_enet_ring
*ring
, int pull_len
,
1823 struct hns3_desc_cb
*desc_cb
)
1825 struct hns3_desc
*desc
;
1830 twobufs
= ((PAGE_SIZE
< 8192) &&
1831 hnae_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
1833 desc
= &ring
->desc
[ring
->next_to_clean
];
1834 size
= le16_to_cpu(desc
->rx
.size
);
1837 truesize
= hnae_buf_size(ring
);
1839 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
1840 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
1843 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
1844 size
- pull_len
, truesize
- pull_len
);
1846 /* Avoid re-using remote pages,flag default unreuse */
1847 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
1851 /* If we are only owner of page we can reuse it */
1852 if (likely(page_count(desc_cb
->priv
) == 1)) {
1853 /* Flip page offset to other buffer */
1854 desc_cb
->page_offset
^= truesize
;
1856 desc_cb
->reuse_flag
= 1;
1857 /* bump ref count on page before it is given*/
1858 get_page(desc_cb
->priv
);
1863 /* Move offset up to the next cache line */
1864 desc_cb
->page_offset
+= truesize
;
1866 if (desc_cb
->page_offset
<= last_offset
) {
1867 desc_cb
->reuse_flag
= 1;
1868 /* Bump ref count on page before it is given*/
1869 get_page(desc_cb
->priv
);
1873 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
1874 struct hns3_desc
*desc
)
1876 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1877 int l3_type
, l4_type
;
1882 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
1883 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
1885 skb
->ip_summed
= CHECKSUM_NONE
;
1887 skb_checksum_none_assert(skb
);
1889 if (!(netdev
->features
& NETIF_F_RXCSUM
))
1892 /* check if hardware has done checksum */
1893 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
1896 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
1897 hnae_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
1898 hnae_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
1899 hnae_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
1900 netdev_err(netdev
, "L3/L4 error pkt\n");
1901 u64_stats_update_begin(&ring
->syncp
);
1902 ring
->stats
.l3l4_csum_err
++;
1903 u64_stats_update_end(&ring
->syncp
);
1908 l3_type
= hnae_get_field(l234info
, HNS3_RXD_L3ID_M
,
1910 l4_type
= hnae_get_field(l234info
, HNS3_RXD_L4ID_M
,
1913 ol4_type
= hnae_get_field(l234info
, HNS3_RXD_OL4ID_M
, HNS3_RXD_OL4ID_S
);
1915 case HNS3_OL4_TYPE_MAC_IN_UDP
:
1916 case HNS3_OL4_TYPE_NVGRE
:
1917 skb
->csum_level
= 1;
1918 case HNS3_OL4_TYPE_NO_TUN
:
1919 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1920 if (l3_type
== HNS3_L3_TYPE_IPV4
||
1921 (l3_type
== HNS3_L3_TYPE_IPV6
&&
1922 (l4_type
== HNS3_L4_TYPE_UDP
||
1923 l4_type
== HNS3_L4_TYPE_TCP
||
1924 l4_type
== HNS3_L4_TYPE_SCTP
)))
1925 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1930 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
1931 struct sk_buff
**out_skb
, int *out_bnum
)
1933 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1934 struct hns3_desc_cb
*desc_cb
;
1935 struct hns3_desc
*desc
;
1936 struct sk_buff
*skb
;
1944 desc
= &ring
->desc
[ring
->next_to_clean
];
1945 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1949 length
= le16_to_cpu(desc
->rx
.pkt_len
);
1950 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
1951 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
1953 /* Check valid BD */
1954 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
1957 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
1959 /* Prefetch first cache line of first page
1960 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1961 * line size is 64B so need to prefetch twice to make it 128B. But in
1962 * actual we can have greater size of caches with 128B Level 1 cache
1963 * lines. In such a case, single fetch would suffice to cache in the
1964 * relevant part of the header.
1967 #if L1_CACHE_BYTES < 128
1968 prefetch(va
+ L1_CACHE_BYTES
);
1971 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
1973 if (unlikely(!skb
)) {
1974 netdev_err(netdev
, "alloc rx skb fail\n");
1976 u64_stats_update_begin(&ring
->syncp
);
1977 ring
->stats
.sw_err_cnt
++;
1978 u64_stats_update_end(&ring
->syncp
);
1983 prefetchw(skb
->data
);
1986 if (length
<= HNS3_RX_HEAD_SIZE
) {
1987 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
1989 /* We can reuse buffer as-is, just make sure it is local */
1990 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
1991 desc_cb
->reuse_flag
= 1;
1992 else /* This page cannot be reused so discard it */
1993 put_page(desc_cb
->priv
);
1995 ring_ptr_move_fw(ring
, next_to_clean
);
1997 u64_stats_update_begin(&ring
->syncp
);
1998 ring
->stats
.seg_pkt_cnt
++;
1999 u64_stats_update_end(&ring
->syncp
);
2001 pull_len
= hns3_nic_get_headlen(va
, l234info
,
2003 memcpy(__skb_put(skb
, pull_len
), va
,
2004 ALIGN(pull_len
, sizeof(long)));
2006 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2007 ring_ptr_move_fw(ring
, next_to_clean
);
2009 while (!hnae_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2010 desc
= &ring
->desc
[ring
->next_to_clean
];
2011 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2012 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2013 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2014 ring_ptr_move_fw(ring
, next_to_clean
);
2021 if (unlikely(!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2022 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2023 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2024 u64_stats_update_begin(&ring
->syncp
);
2025 ring
->stats
.non_vld_descs
++;
2026 u64_stats_update_end(&ring
->syncp
);
2028 dev_kfree_skb_any(skb
);
2032 if (unlikely((!desc
->rx
.pkt_len
) ||
2033 hnae_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2034 netdev_err(netdev
, "truncated pkt\n");
2035 u64_stats_update_begin(&ring
->syncp
);
2036 ring
->stats
.err_pkt_len
++;
2037 u64_stats_update_end(&ring
->syncp
);
2039 dev_kfree_skb_any(skb
);
2043 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2044 netdev_err(netdev
, "L2 error pkt\n");
2045 u64_stats_update_begin(&ring
->syncp
);
2046 ring
->stats
.l2_err
++;
2047 u64_stats_update_end(&ring
->syncp
);
2049 dev_kfree_skb_any(skb
);
2053 u64_stats_update_begin(&ring
->syncp
);
2054 ring
->stats
.rx_pkts
++;
2055 ring
->stats
.rx_bytes
+= skb
->len
;
2056 u64_stats_update_end(&ring
->syncp
);
2058 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2060 hns3_rx_checksum(ring
, skb
, desc
);
2064 static int hns3_clean_rx_ring(struct hns3_enet_ring
*ring
, int budget
)
2066 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2067 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2068 int recv_pkts
, recv_bds
, clean_count
, err
;
2069 int unused_count
= hns3_desc_unused(ring
);
2070 struct sk_buff
*skb
= NULL
;
2073 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2074 rmb(); /* Make sure num taken effect before the other data is touched */
2076 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2077 num
-= unused_count
;
2079 while (recv_pkts
< budget
&& recv_bds
< num
) {
2080 /* Reuse or realloc buffers */
2081 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2082 hns3_nic_alloc_rx_buffers(ring
,
2083 clean_count
+ unused_count
);
2085 unused_count
= hns3_desc_unused(ring
);
2089 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2090 if (unlikely(!skb
)) /* This fault cannot be repaired */
2094 clean_count
+= bnum
;
2095 if (unlikely(err
)) { /* Do jump the err */
2100 /* Do update ip stack process */
2101 skb
->protocol
= eth_type_trans(skb
, netdev
);
2102 (void)napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2108 /* Make all data has been write before submit */
2109 if (clean_count
+ unused_count
> 0)
2110 hns3_nic_alloc_rx_buffers(ring
,
2111 clean_count
+ unused_count
);
2116 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2118 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2119 enum hns3_flow_level_range new_flow_level
;
2120 struct hns3_enet_tqp_vector
*tqp_vector
;
2121 int packets_per_secs
;
2122 int bytes_per_usecs
;
2126 if (!ring_group
->int_gl
)
2129 if (ring_group
->total_packets
== 0) {
2130 ring_group
->int_gl
= HNS3_INT_GL_50K
;
2131 ring_group
->flow_level
= HNS3_FLOW_LOW
;
2135 /* Simple throttlerate management
2136 * 0-10MB/s lower (50000 ints/s)
2137 * 10-20MB/s middle (20000 ints/s)
2138 * 20-1249MB/s high (18000 ints/s)
2139 * > 40000pps ultra (8000 ints/s)
2141 new_flow_level
= ring_group
->flow_level
;
2142 new_int_gl
= ring_group
->int_gl
;
2143 tqp_vector
= ring_group
->ring
->tqp_vector
;
2144 usecs
= (ring_group
->int_gl
<< 1);
2145 bytes_per_usecs
= ring_group
->total_bytes
/ usecs
;
2146 /* 1000000 microseconds */
2147 packets_per_secs
= ring_group
->total_packets
* 1000000 / usecs
;
2149 switch (new_flow_level
) {
2151 if (bytes_per_usecs
> 10)
2152 new_flow_level
= HNS3_FLOW_MID
;
2155 if (bytes_per_usecs
> 20)
2156 new_flow_level
= HNS3_FLOW_HIGH
;
2157 else if (bytes_per_usecs
<= 10)
2158 new_flow_level
= HNS3_FLOW_LOW
;
2160 case HNS3_FLOW_HIGH
:
2161 case HNS3_FLOW_ULTRA
:
2163 if (bytes_per_usecs
<= 20)
2164 new_flow_level
= HNS3_FLOW_MID
;
2168 if ((packets_per_secs
> HNS3_RX_ULTRA_PACKET_RATE
) &&
2169 (&tqp_vector
->rx_group
== ring_group
))
2170 new_flow_level
= HNS3_FLOW_ULTRA
;
2172 switch (new_flow_level
) {
2174 new_int_gl
= HNS3_INT_GL_50K
;
2177 new_int_gl
= HNS3_INT_GL_20K
;
2179 case HNS3_FLOW_HIGH
:
2180 new_int_gl
= HNS3_INT_GL_18K
;
2182 case HNS3_FLOW_ULTRA
:
2183 new_int_gl
= HNS3_INT_GL_8K
;
2189 ring_group
->total_bytes
= 0;
2190 ring_group
->total_packets
= 0;
2191 ring_group
->flow_level
= new_flow_level
;
2192 if (new_int_gl
!= ring_group
->int_gl
) {
2193 ring_group
->int_gl
= new_int_gl
;
2199 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2201 u16 rx_int_gl
, tx_int_gl
;
2204 rx
= hns3_get_new_int_gl(&tqp_vector
->rx_group
);
2205 tx
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2206 rx_int_gl
= tqp_vector
->rx_group
.int_gl
;
2207 tx_int_gl
= tqp_vector
->tx_group
.int_gl
;
2209 if (rx_int_gl
> tx_int_gl
) {
2210 tqp_vector
->tx_group
.int_gl
= rx_int_gl
;
2211 tqp_vector
->tx_group
.flow_level
=
2212 tqp_vector
->rx_group
.flow_level
;
2213 hns3_set_vector_coalesc_gl(tqp_vector
, rx_int_gl
);
2215 tqp_vector
->rx_group
.int_gl
= tx_int_gl
;
2216 tqp_vector
->rx_group
.flow_level
=
2217 tqp_vector
->tx_group
.flow_level
;
2218 hns3_set_vector_coalesc_gl(tqp_vector
, tx_int_gl
);
2223 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2225 struct hns3_enet_ring
*ring
;
2226 int rx_pkt_total
= 0;
2228 struct hns3_enet_tqp_vector
*tqp_vector
=
2229 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2230 bool clean_complete
= true;
2233 /* Since the actual Tx work is minimal, we can give the Tx a larger
2234 * budget and be more aggressive about cleaning up the Tx descriptors.
2236 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2237 if (!hns3_clean_tx_ring(ring
, budget
))
2238 clean_complete
= false;
2241 /* make sure rx ring budget not smaller than 1 */
2242 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2244 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2245 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
);
2247 if (rx_cleaned
>= rx_budget
)
2248 clean_complete
= false;
2250 rx_pkt_total
+= rx_cleaned
;
2253 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2255 if (!clean_complete
)
2258 napi_complete(napi
);
2259 hns3_update_new_int_gl(tqp_vector
);
2260 hns3_mask_vector_irq(tqp_vector
, 1);
2262 return rx_pkt_total
;
2265 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2266 struct hnae3_ring_chain_node
*head
)
2268 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2269 struct hnae3_ring_chain_node
*cur_chain
= head
;
2270 struct hnae3_ring_chain_node
*chain
;
2271 struct hns3_enet_ring
*tx_ring
;
2272 struct hns3_enet_ring
*rx_ring
;
2274 tx_ring
= tqp_vector
->tx_group
.ring
;
2276 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2277 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2278 HNAE3_RING_TYPE_TX
);
2280 cur_chain
->next
= NULL
;
2282 while (tx_ring
->next
) {
2283 tx_ring
= tx_ring
->next
;
2285 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2290 cur_chain
->next
= chain
;
2291 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2292 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2293 HNAE3_RING_TYPE_TX
);
2299 rx_ring
= tqp_vector
->rx_group
.ring
;
2300 if (!tx_ring
&& rx_ring
) {
2301 cur_chain
->next
= NULL
;
2302 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2303 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2304 HNAE3_RING_TYPE_RX
);
2306 rx_ring
= rx_ring
->next
;
2310 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2314 cur_chain
->next
= chain
;
2315 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2316 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2317 HNAE3_RING_TYPE_RX
);
2320 rx_ring
= rx_ring
->next
;
2326 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2327 struct hnae3_ring_chain_node
*head
)
2329 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2330 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2335 chain_tmp
= chain
->next
;
2336 devm_kfree(&pdev
->dev
, chain
);
2341 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2342 struct hns3_enet_ring
*ring
)
2344 ring
->next
= group
->ring
;
2350 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2352 struct hnae3_ring_chain_node vector_ring_chain
;
2353 struct hnae3_handle
*h
= priv
->ae_handle
;
2354 struct hns3_enet_tqp_vector
*tqp_vector
;
2355 struct hnae3_vector_info
*vector
;
2356 struct pci_dev
*pdev
= h
->pdev
;
2357 u16 tqp_num
= h
->kinfo
.num_tqps
;
2362 /* RSS size, cpu online and vector_num should be the same */
2363 /* Should consider 2p/4p later */
2364 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2365 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2370 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2372 priv
->vector_num
= vector_num
;
2373 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2374 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2376 if (!priv
->tqp_vector
)
2379 for (i
= 0; i
< tqp_num
; i
++) {
2380 u16 vector_i
= i
% vector_num
;
2382 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2384 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2385 priv
->ring_data
[i
].ring
);
2387 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2388 priv
->ring_data
[i
+ tqp_num
].ring
);
2390 tqp_vector
->idx
= vector_i
;
2391 tqp_vector
->mask_addr
= vector
[vector_i
].io_addr
;
2392 tqp_vector
->vector_irq
= vector
[vector_i
].vector
;
2393 tqp_vector
->num_tqps
++;
2395 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2396 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2399 for (i
= 0; i
< vector_num
; i
++) {
2400 tqp_vector
= &priv
->tqp_vector
[i
];
2402 tqp_vector
->rx_group
.total_bytes
= 0;
2403 tqp_vector
->rx_group
.total_packets
= 0;
2404 tqp_vector
->tx_group
.total_bytes
= 0;
2405 tqp_vector
->tx_group
.total_packets
= 0;
2406 hns3_vector_gl_rl_init(tqp_vector
);
2407 tqp_vector
->handle
= h
;
2409 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2410 &vector_ring_chain
);
2414 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2415 tqp_vector
->vector_irq
, &vector_ring_chain
);
2419 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2421 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2422 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2426 devm_kfree(&pdev
->dev
, vector
);
2430 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2432 struct hnae3_ring_chain_node vector_ring_chain
;
2433 struct hnae3_handle
*h
= priv
->ae_handle
;
2434 struct hns3_enet_tqp_vector
*tqp_vector
;
2435 struct pci_dev
*pdev
= h
->pdev
;
2438 for (i
= 0; i
< priv
->vector_num
; i
++) {
2439 tqp_vector
= &priv
->tqp_vector
[i
];
2441 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2442 &vector_ring_chain
);
2446 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2447 tqp_vector
->vector_irq
, &vector_ring_chain
);
2451 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2453 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2454 (void)irq_set_affinity_hint(
2455 priv
->tqp_vector
[i
].vector_irq
,
2457 devm_free_irq(&pdev
->dev
,
2458 priv
->tqp_vector
[i
].vector_irq
,
2459 &priv
->tqp_vector
[i
]);
2462 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2464 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2467 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2472 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2475 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2476 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2477 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2478 struct hns3_enet_ring
*ring
;
2480 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2484 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2485 ring_data
[q
->tqp_index
].ring
= ring
;
2486 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2488 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2489 ring
->io_base
= q
->io_base
;
2492 hnae_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2494 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2498 ring
->desc_cb
= NULL
;
2499 ring
->dev
= priv
->dev
;
2500 ring
->desc_dma_addr
= 0;
2501 ring
->buf_size
= q
->buf_size
;
2502 ring
->desc_num
= q
->desc_num
;
2503 ring
->next_to_use
= 0;
2504 ring
->next_to_clean
= 0;
2509 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2510 struct hns3_nic_priv
*priv
)
2514 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2518 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2525 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2527 struct hnae3_handle
*h
= priv
->ae_handle
;
2528 struct pci_dev
*pdev
= h
->pdev
;
2531 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2532 sizeof(*priv
->ring_data
) * 2,
2534 if (!priv
->ring_data
)
2537 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2538 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2545 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2549 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2553 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2556 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2558 if (!ring
->desc_cb
) {
2563 ret
= hns3_alloc_desc(ring
);
2565 goto out_with_desc_cb
;
2567 if (!HNAE3_IS_TX_RING(ring
)) {
2568 ret
= hns3_alloc_ring_buffers(ring
);
2576 hns3_free_desc(ring
);
2578 kfree(ring
->desc_cb
);
2579 ring
->desc_cb
= NULL
;
2584 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2586 hns3_free_desc(ring
);
2587 kfree(ring
->desc_cb
);
2588 ring
->desc_cb
= NULL
;
2589 ring
->next_to_clean
= 0;
2590 ring
->next_to_use
= 0;
2593 int hns3_buf_size2type(u32 buf_size
)
2599 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2602 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2605 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2608 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2611 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2614 return bd_size_type
;
2617 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2619 dma_addr_t dma
= ring
->desc_dma_addr
;
2620 struct hnae3_queue
*q
= ring
->tqp
;
2622 if (!HNAE3_IS_TX_RING(ring
)) {
2623 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2625 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2626 (u32
)((dma
>> 31) >> 1));
2628 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2629 hns3_buf_size2type(ring
->buf_size
));
2630 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2631 ring
->desc_num
/ 8 - 1);
2634 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2636 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2637 (u32
)((dma
>> 31) >> 1));
2639 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2640 hns3_buf_size2type(ring
->buf_size
));
2641 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
2642 ring
->desc_num
/ 8 - 1);
2646 static int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
2648 struct hnae3_handle
*h
= priv
->ae_handle
;
2649 int ring_num
= h
->kinfo
.num_tqps
* 2;
2653 for (i
= 0; i
< ring_num
; i
++) {
2654 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
2657 "Alloc ring memory fail! ret=%d\n", ret
);
2658 goto out_when_alloc_ring_memory
;
2661 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
2663 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
2668 out_when_alloc_ring_memory
:
2669 for (j
= i
- 1; j
>= 0; j
--)
2670 hns3_fini_ring(priv
->ring_data
[i
].ring
);
2675 static int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
2677 struct hnae3_handle
*h
= priv
->ae_handle
;
2680 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2681 if (h
->ae_algo
->ops
->reset_queue
)
2682 h
->ae_algo
->ops
->reset_queue(h
, i
);
2684 hns3_fini_ring(priv
->ring_data
[i
].ring
);
2685 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2691 /* Set mac addr if it is configured. or leave it to the AE driver */
2692 static void hns3_init_mac_addr(struct net_device
*netdev
)
2694 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2695 struct hnae3_handle
*h
= priv
->ae_handle
;
2696 u8 mac_addr_temp
[ETH_ALEN
];
2698 if (h
->ae_algo
->ops
->get_mac_addr
) {
2699 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
2700 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
2703 /* Check if the MAC address is valid, if not get a random one */
2704 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2705 eth_hw_addr_random(netdev
);
2706 dev_warn(priv
->dev
, "using random MAC address %pM\n",
2708 /* Also copy this new MAC address into hdev */
2709 if (h
->ae_algo
->ops
->set_mac_addr
)
2710 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
);
2714 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
2716 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2718 if ((netdev
->features
& NETIF_F_TSO
) ||
2719 (netdev
->features
& NETIF_F_TSO6
)) {
2720 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
2721 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
2723 priv
->ops
.fill_desc
= hns3_fill_desc
;
2724 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
2728 static int hns3_client_init(struct hnae3_handle
*handle
)
2730 struct pci_dev
*pdev
= handle
->pdev
;
2731 struct hns3_nic_priv
*priv
;
2732 struct net_device
*netdev
;
2735 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
2736 handle
->kinfo
.num_tqps
);
2740 priv
= netdev_priv(netdev
);
2741 priv
->dev
= &pdev
->dev
;
2742 priv
->netdev
= netdev
;
2743 priv
->ae_handle
= handle
;
2745 handle
->kinfo
.netdev
= netdev
;
2746 handle
->priv
= (void *)priv
;
2748 hns3_init_mac_addr(netdev
);
2750 hns3_set_default_feature(netdev
);
2752 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
2753 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2754 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
2755 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2756 hns3_ethtool_set_ops(netdev
);
2757 hns3_nic_set_priv_ops(netdev
);
2759 /* Carrier off reporting is important to ethtool even BEFORE open */
2760 netif_carrier_off(netdev
);
2762 ret
= hns3_get_ring_config(priv
);
2765 goto out_get_ring_cfg
;
2768 ret
= hns3_nic_init_vector_data(priv
);
2771 goto out_init_vector_data
;
2774 ret
= hns3_init_all_ring(priv
);
2777 goto out_init_ring_data
;
2780 ret
= register_netdev(netdev
);
2782 dev_err(priv
->dev
, "probe register netdev fail!\n");
2783 goto out_reg_netdev_fail
;
2786 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2787 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2791 out_reg_netdev_fail
:
2793 (void)hns3_nic_uninit_vector_data(priv
);
2794 priv
->ring_data
= NULL
;
2795 out_init_vector_data
:
2797 priv
->ae_handle
= NULL
;
2798 free_netdev(netdev
);
2802 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
2804 struct net_device
*netdev
= handle
->kinfo
.netdev
;
2805 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2808 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
2809 unregister_netdev(netdev
);
2811 ret
= hns3_nic_uninit_vector_data(priv
);
2813 netdev_err(netdev
, "uninit vector error\n");
2815 ret
= hns3_uninit_all_ring(priv
);
2817 netdev_err(netdev
, "uninit ring error\n");
2819 priv
->ring_data
= NULL
;
2821 free_netdev(netdev
);
2824 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
2826 struct net_device
*netdev
= handle
->kinfo
.netdev
;
2832 netif_carrier_on(netdev
);
2833 netif_tx_wake_all_queues(netdev
);
2834 netdev_info(netdev
, "link up\n");
2836 netif_carrier_off(netdev
);
2837 netif_tx_stop_all_queues(netdev
);
2838 netdev_info(netdev
, "link down\n");
2842 const struct hnae3_client_ops client_ops
= {
2843 .init_instance
= hns3_client_init
,
2844 .uninit_instance
= hns3_client_uninit
,
2845 .link_status_change
= hns3_link_status_change
,
2848 /* hns3_init_module - Driver registration routine
2849 * hns3_init_module is the first routine called when the driver is
2850 * loaded. All it does is register with the PCI subsystem.
2852 static int __init
hns3_init_module(void)
2856 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
2857 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
2859 client
.type
= HNAE3_CLIENT_KNIC
;
2860 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
2863 client
.ops
= &client_ops
;
2865 ret
= hnae3_register_client(&client
);
2869 ret
= pci_register_driver(&hns3_driver
);
2871 hnae3_unregister_client(&client
);
2875 module_init(hns3_init_module
);
2877 /* hns3_exit_module - Driver exit cleanup routine
2878 * hns3_exit_module is called just before the driver is removed
2881 static void __exit
hns3_exit_module(void)
2883 pci_unregister_driver(&hns3_driver
);
2884 hnae3_unregister_client(&client
);
2886 module_exit(hns3_exit_module
);
2888 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2889 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2890 MODULE_LICENSE("GPL");
2891 MODULE_ALIAS("pci:hns-nic");