2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
26 #include "hns3_enet.h"
28 static const char hns3_driver_name
[] = "hns3";
29 const char hns3_driver_version
[] = VERMAGIC_STRING
;
30 static const char hns3_driver_string
[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client
;
35 /* hns3_pci_tbl - PCI Device ID Table
37 * Last entry must be all 0s
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
42 static const struct pci_device_id hns3_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
55 /* required last entry */
58 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
60 static irqreturn_t
hns3_irq_handle(int irq
, void *dev
)
62 struct hns3_enet_tqp_vector
*tqp_vector
= dev
;
64 napi_schedule(&tqp_vector
->napi
);
69 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
71 struct hns3_enet_tqp_vector
*tqp_vectors
;
74 for (i
= 0; i
< priv
->vector_num
; i
++) {
75 tqp_vectors
= &priv
->tqp_vector
[i
];
77 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
80 /* release the irq resource */
81 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
82 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
86 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
88 struct hns3_enet_tqp_vector
*tqp_vectors
;
95 for (i
= 0; i
< priv
->vector_num
; i
++) {
96 tqp_vectors
= &priv
->tqp_vector
[i
];
98 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
101 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
102 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
103 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
106 } else if (tqp_vectors
->rx_group
.ring
) {
107 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
108 "%s-%s-%d", priv
->netdev
->name
, "Rx",
110 } else if (tqp_vectors
->tx_group
.ring
) {
111 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
112 "%s-%s-%d", priv
->netdev
->name
, "Tx",
115 /* Skip this unused q_vector */
119 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
121 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
125 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
126 tqp_vectors
->vector_irq
);
130 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
136 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
139 writel(mask_en
, tqp_vector
->mask_addr
);
142 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
144 napi_enable(&tqp_vector
->napi
);
147 hns3_mask_vector_irq(tqp_vector
, 1);
150 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
153 hns3_mask_vector_irq(tqp_vector
, 0);
155 disable_irq(tqp_vector
->vector_irq
);
156 napi_disable(&tqp_vector
->napi
);
159 static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
162 /* this defines the configuration for GL (Interrupt Gap Limiter)
163 * GL defines inter interrupt gap.
164 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
166 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
167 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
168 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL2_OFFSET
);
171 static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
174 /* this defines the configuration for RL (Interrupt Rate Limiter).
175 * Rl defines rate of interrupts i.e. number of interrupts-per-second
176 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
178 writel(rl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
181 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
)
183 /* initialize the configuration for interrupt coalescing.
184 * 1. GL (Interrupt Gap Limiter)
185 * 2. RL (Interrupt Rate Limiter)
188 /* Default :enable interrupt coalesce */
189 tqp_vector
->rx_group
.int_gl
= HNS3_INT_GL_50K
;
190 tqp_vector
->tx_group
.int_gl
= HNS3_INT_GL_50K
;
191 hns3_set_vector_coalesc_gl(tqp_vector
, HNS3_INT_GL_50K
);
192 /* for now we are disabling Interrupt RL - we
193 * will re-enable later
195 hns3_set_vector_coalesc_rl(tqp_vector
, 0);
196 tqp_vector
->rx_group
.flow_level
= HNS3_FLOW_LOW
;
197 tqp_vector
->tx_group
.flow_level
= HNS3_FLOW_LOW
;
200 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
202 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
203 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
204 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
207 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
210 "netif_set_real_num_tx_queues fail, ret=%d!\n",
215 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
218 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
225 static int hns3_nic_net_up(struct net_device
*netdev
)
227 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
228 struct hnae3_handle
*h
= priv
->ae_handle
;
232 /* get irq resource for all vectors */
233 ret
= hns3_nic_init_irq(priv
);
235 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
239 /* enable the vectors */
240 for (i
= 0; i
< priv
->vector_num
; i
++)
241 hns3_vector_enable(&priv
->tqp_vector
[i
]);
243 /* start the ae_dev */
244 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
251 for (j
= i
- 1; j
>= 0; j
--)
252 hns3_vector_disable(&priv
->tqp_vector
[j
]);
254 hns3_nic_uninit_irq(priv
);
259 static int hns3_nic_net_open(struct net_device
*netdev
)
261 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
264 netif_carrier_off(netdev
);
266 ret
= hns3_nic_set_real_num_queue(netdev
);
270 ret
= hns3_nic_net_up(netdev
);
273 "hns net up fail, ret=%d!\n", ret
);
277 priv
->last_reset_time
= jiffies
;
281 static void hns3_nic_net_down(struct net_device
*netdev
)
283 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
284 const struct hnae3_ae_ops
*ops
;
288 ops
= priv
->ae_handle
->ae_algo
->ops
;
290 ops
->stop(priv
->ae_handle
);
292 /* disable vectors */
293 for (i
= 0; i
< priv
->vector_num
; i
++)
294 hns3_vector_disable(&priv
->tqp_vector
[i
]);
296 /* free irq resources */
297 hns3_nic_uninit_irq(priv
);
300 static int hns3_nic_net_stop(struct net_device
*netdev
)
302 netif_tx_stop_all_queues(netdev
);
303 netif_carrier_off(netdev
);
305 hns3_nic_net_down(netdev
);
310 static int hns3_nic_uc_sync(struct net_device
*netdev
,
311 const unsigned char *addr
)
313 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
315 if (h
->ae_algo
->ops
->add_uc_addr
)
316 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
321 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
322 const unsigned char *addr
)
324 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
326 if (h
->ae_algo
->ops
->rm_uc_addr
)
327 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
332 static int hns3_nic_mc_sync(struct net_device
*netdev
,
333 const unsigned char *addr
)
335 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
337 if (h
->ae_algo
->ops
->add_mc_addr
)
338 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
343 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
344 const unsigned char *addr
)
346 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
348 if (h
->ae_algo
->ops
->rm_mc_addr
)
349 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
354 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
356 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
358 if (h
->ae_algo
->ops
->set_promisc_mode
) {
359 if (netdev
->flags
& IFF_PROMISC
)
360 h
->ae_algo
->ops
->set_promisc_mode(h
, 1);
362 h
->ae_algo
->ops
->set_promisc_mode(h
, 0);
364 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
365 netdev_err(netdev
, "sync uc address fail\n");
366 if (netdev
->flags
& IFF_MULTICAST
)
367 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
368 netdev_err(netdev
, "sync mc address fail\n");
371 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
372 u16
*mss
, u32
*type_cs_vlan_tso
)
374 u32 l4_offset
, hdr_len
;
375 union l3_hdr_info l3
;
376 union l4_hdr_info l4
;
380 if (!skb_is_gso(skb
))
383 ret
= skb_cow_head(skb
, 0);
387 l3
.hdr
= skb_network_header(skb
);
388 l4
.hdr
= skb_transport_header(skb
);
390 /* Software should clear the IPv4's checksum field when tso is
393 if (l3
.v4
->version
== 4)
397 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
400 SKB_GSO_UDP_TUNNEL_CSUM
)) {
401 if ((!(skb_shinfo(skb
)->gso_type
&
403 (skb_shinfo(skb
)->gso_type
&
404 SKB_GSO_UDP_TUNNEL_CSUM
)) {
405 /* Software should clear the udp's checksum
406 * field when tso is needed.
410 /* reset l3&l4 pointers from outer to inner headers */
411 l3
.hdr
= skb_inner_network_header(skb
);
412 l4
.hdr
= skb_inner_transport_header(skb
);
414 /* Software should clear the IPv4's checksum field when
417 if (l3
.v4
->version
== 4)
421 /* normal or tunnel packet*/
422 l4_offset
= l4
.hdr
- skb
->data
;
423 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
425 /* remove payload length from inner pseudo checksum when tso*/
426 l4_paylen
= skb
->len
- l4_offset
;
427 csum_replace_by_diff(&l4
.tcp
->check
,
428 (__force __wsum
)htonl(l4_paylen
));
430 /* find the txbd field values */
431 *paylen
= skb
->len
- hdr_len
;
432 hnae_set_bit(*type_cs_vlan_tso
,
435 /* get MSS for TSO */
436 *mss
= skb_shinfo(skb
)->gso_size
;
441 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
449 unsigned char *l4_hdr
;
450 unsigned char *exthdr
;
454 /* find outer header point */
455 l3
.hdr
= skb_network_header(skb
);
456 l4_hdr
= skb_inner_transport_header(skb
);
458 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
459 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
460 l4_proto_tmp
= l3
.v6
->nexthdr
;
461 if (l4_hdr
!= exthdr
)
462 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
463 &l4_proto_tmp
, &frag_off
);
464 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
465 l4_proto_tmp
= l3
.v4
->protocol
;
470 *ol4_proto
= l4_proto_tmp
;
473 if (!skb
->encapsulation
) {
478 /* find inner header point */
479 l3
.hdr
= skb_inner_network_header(skb
);
480 l4_hdr
= skb_inner_transport_header(skb
);
482 if (l3
.v6
->version
== 6) {
483 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
484 l4_proto_tmp
= l3
.v6
->nexthdr
;
485 if (l4_hdr
!= exthdr
)
486 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
487 &l4_proto_tmp
, &frag_off
);
488 } else if (l3
.v4
->version
== 4) {
489 l4_proto_tmp
= l3
.v4
->protocol
;
492 *il4_proto
= l4_proto_tmp
;
497 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
498 u8 il4_proto
, u32
*type_cs_vlan_tso
,
499 u32
*ol_type_vlan_len_msec
)
509 struct gre_base_hdr
*gre
;
512 unsigned char *l2_hdr
;
513 u8 l4_proto
= ol4_proto
;
520 l3
.hdr
= skb_network_header(skb
);
521 l4
.hdr
= skb_transport_header(skb
);
523 /* compute L2 header size for normal packet, defined in 2 Bytes */
524 l2_len
= l3
.hdr
- skb
->data
;
525 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
526 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
529 if (skb
->encapsulation
) {
530 /* compute OL2 header size, defined in 2 Bytes */
532 hnae_set_field(*ol_type_vlan_len_msec
,
534 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
536 /* compute OL3 header size, defined in 4 Bytes */
537 ol3_len
= l4
.hdr
- l3
.hdr
;
538 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
539 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
541 /* MAC in UDP, MAC in GRE (0x6558)*/
542 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
543 /* switch MAC header ptr from outer to inner header.*/
544 l2_hdr
= skb_inner_mac_header(skb
);
546 /* compute OL4 header size, defined in 4 Bytes. */
547 ol4_len
= l2_hdr
- l4
.hdr
;
548 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_M
,
549 HNS3_TXD_L4LEN_S
, ol4_len
>> 2);
551 /* switch IP header ptr from outer to inner header */
552 l3
.hdr
= skb_inner_network_header(skb
);
554 /* compute inner l2 header size, defined in 2 Bytes. */
555 l2_len
= l3
.hdr
- l2_hdr
;
556 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
557 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
559 /* skb packet types not supported by hardware,
560 * txbd len fild doesn't be filled.
565 /* switch L4 header pointer from outer to inner */
566 l4
.hdr
= skb_inner_transport_header(skb
);
568 l4_proto
= il4_proto
;
571 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
572 l3_len
= l4
.hdr
- l3
.hdr
;
573 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
574 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
576 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
579 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
580 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
583 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
584 HNS3_TXD_L4LEN_S
, (sizeof(struct sctphdr
) >> 2));
587 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
588 HNS3_TXD_L4LEN_S
, (sizeof(struct udphdr
) >> 2));
591 /* skb packet types not supported by hardware,
592 * txbd len fild doesn't be filled.
598 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
599 u8 il4_proto
, u32
*type_cs_vlan_tso
,
600 u32
*ol_type_vlan_len_msec
)
607 u32 l4_proto
= ol4_proto
;
609 l3
.hdr
= skb_network_header(skb
);
611 /* define OL3 type and tunnel type(OL4).*/
612 if (skb
->encapsulation
) {
613 /* define outer network header type.*/
614 if (skb
->protocol
== htons(ETH_P_IP
)) {
616 hnae_set_field(*ol_type_vlan_len_msec
,
617 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
618 HNS3_OL3T_IPV4_CSUM
);
620 hnae_set_field(*ol_type_vlan_len_msec
,
621 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
622 HNS3_OL3T_IPV4_NO_CSUM
);
624 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
625 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
626 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
629 /* define tunnel type(OL4).*/
632 hnae_set_field(*ol_type_vlan_len_msec
,
635 HNS3_TUN_MAC_IN_UDP
);
638 hnae_set_field(*ol_type_vlan_len_msec
,
644 /* drop the skb tunnel packet if hardware don't support,
645 * because hardware can't calculate csum when TSO.
650 /* the stack computes the IP header already,
651 * driver calculate l4 checksum when not TSO.
653 skb_checksum_help(skb
);
657 l3
.hdr
= skb_inner_network_header(skb
);
658 l4_proto
= il4_proto
;
661 if (l3
.v4
->version
== 4) {
662 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
663 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
665 /* the stack computes the IP header already, the only time we
666 * need the hardware to recompute it is in the case of TSO.
669 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
671 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
672 } else if (l3
.v6
->version
== 6) {
673 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
674 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
675 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
680 hnae_set_field(*type_cs_vlan_tso
,
686 hnae_set_field(*type_cs_vlan_tso
,
692 hnae_set_field(*type_cs_vlan_tso
,
698 /* drop the skb tunnel packet if hardware don't support,
699 * because hardware can't calculate csum when TSO.
704 /* the stack computes the IP header already,
705 * driver calculate l4 checksum when not TSO.
707 skb_checksum_help(skb
);
714 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
716 /* Config bd buffer end */
717 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
718 HNS3_TXD_BDTYPE_M
, 0);
719 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
720 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
721 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
724 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
725 int size
, dma_addr_t dma
, int frag_end
,
726 enum hns_desc_type type
)
728 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
729 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
730 u32 ol_type_vlan_len_msec
= 0;
731 u16 bdtp_fe_sc_vld_ra_ri
= 0;
732 u32 type_cs_vlan_tso
= 0;
741 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
742 desc_cb
->priv
= priv
;
743 desc_cb
->length
= size
;
745 desc_cb
->type
= type
;
747 /* now, fill the descriptor */
748 desc
->addr
= cpu_to_le64(dma
);
749 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
750 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
751 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
753 if (type
== DESC_TYPE_SKB
) {
754 skb
= (struct sk_buff
*)priv
;
757 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
758 skb_reset_mac_len(skb
);
759 protocol
= skb
->protocol
;
762 if (protocol
== htons(ETH_P_8021Q
)) {
763 protocol
= vlan_get_protocol(skb
);
764 skb
->protocol
= protocol
;
766 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
769 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
771 &ol_type_vlan_len_msec
);
772 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
774 &ol_type_vlan_len_msec
);
778 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
785 desc
->tx
.ol_type_vlan_len_msec
=
786 cpu_to_le32(ol_type_vlan_len_msec
);
787 desc
->tx
.type_cs_vlan_tso_len
=
788 cpu_to_le32(type_cs_vlan_tso
);
789 desc
->tx
.paylen
= cpu_to_le32(paylen
);
790 desc
->tx
.mss
= cpu_to_le16(mss
);
793 /* move ring pointer to next.*/
794 ring_ptr_move_fw(ring
, next_to_use
);
799 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
800 int size
, dma_addr_t dma
, int frag_end
,
801 enum hns_desc_type type
)
803 unsigned int frag_buf_num
;
808 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
809 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
810 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
812 /* When the frag size is bigger than hardware, split this frag */
813 for (k
= 0; k
< frag_buf_num
; k
++) {
814 ret
= hns3_fill_desc(ring
, priv
,
815 (k
== frag_buf_num
- 1) ?
816 sizeoflast
: HNS3_MAX_BD_SIZE
,
817 dma
+ HNS3_MAX_BD_SIZE
* k
,
818 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
819 (type
== DESC_TYPE_SKB
&& !k
) ?
820 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
828 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
829 struct hns3_enet_ring
*ring
)
831 struct sk_buff
*skb
= *out_skb
;
832 struct skb_frag_struct
*frag
;
839 size
= skb_headlen(skb
);
840 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
842 frag_num
= skb_shinfo(skb
)->nr_frags
;
843 for (i
= 0; i
< frag_num
; i
++) {
844 frag
= &skb_shinfo(skb
)->frags
[i
];
845 size
= skb_frag_size(frag
);
847 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
848 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
851 buf_num
+= bdnum_for_frag
;
854 if (buf_num
> ring_space(ring
))
861 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
862 struct hns3_enet_ring
*ring
)
864 struct sk_buff
*skb
= *out_skb
;
867 /* No. of segments (plus a header) */
868 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
870 if (buf_num
> ring_space(ring
))
878 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
880 struct device
*dev
= ring_to_dev(ring
);
883 for (i
= 0; i
< ring
->desc_num
; i
++) {
884 /* check if this is where we started */
885 if (ring
->next_to_use
== next_to_use_orig
)
888 /* unmap the descriptor dma address */
889 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
890 dma_unmap_single(dev
,
891 ring
->desc_cb
[ring
->next_to_use
].dma
,
892 ring
->desc_cb
[ring
->next_to_use
].length
,
896 ring
->desc_cb
[ring
->next_to_use
].dma
,
897 ring
->desc_cb
[ring
->next_to_use
].length
,
901 ring_ptr_move_bw(ring
, next_to_use
);
905 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
907 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
908 struct hns3_nic_ring_data
*ring_data
=
909 &tx_ring_data(priv
, skb
->queue_mapping
);
910 struct hns3_enet_ring
*ring
= ring_data
->ring
;
911 struct device
*dev
= priv
->dev
;
912 struct netdev_queue
*dev_queue
;
913 struct skb_frag_struct
*frag
;
914 int next_to_use_head
;
915 int next_to_use_frag
;
923 /* Prefetch the data used later */
926 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
928 u64_stats_update_begin(&ring
->syncp
);
929 ring
->stats
.tx_busy
++;
930 u64_stats_update_end(&ring
->syncp
);
932 goto out_net_tx_busy
;
934 u64_stats_update_begin(&ring
->syncp
);
935 ring
->stats
.sw_err_cnt
++;
936 u64_stats_update_end(&ring
->syncp
);
937 netdev_err(netdev
, "no memory to xmit!\n");
944 /* No. of segments (plus a header) */
945 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
946 /* Fill the first part */
947 size
= skb_headlen(skb
);
949 next_to_use_head
= ring
->next_to_use
;
951 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
952 if (dma_mapping_error(dev
, dma
)) {
953 netdev_err(netdev
, "TX head DMA map failed\n");
954 ring
->stats
.sw_err_cnt
++;
958 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
961 goto head_dma_map_err
;
963 next_to_use_frag
= ring
->next_to_use
;
964 /* Fill the fragments */
965 for (i
= 1; i
< seg_num
; i
++) {
966 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
967 size
= skb_frag_size(frag
);
968 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
969 if (dma_mapping_error(dev
, dma
)) {
970 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
971 ring
->stats
.sw_err_cnt
++;
972 goto frag_dma_map_err
;
974 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
975 seg_num
- 1 == i
? 1 : 0,
979 goto frag_dma_map_err
;
982 /* Complete translate all packets */
983 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
984 netdev_tx_sent_queue(dev_queue
, skb
->len
);
986 wmb(); /* Commit all data before submit */
988 hnae_queue_xmit(ring
->tqp
, buf_num
);
993 hns_nic_dma_unmap(ring
, next_to_use_frag
);
996 hns_nic_dma_unmap(ring
, next_to_use_head
);
999 dev_kfree_skb_any(skb
);
1000 return NETDEV_TX_OK
;
1003 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1004 smp_mb(); /* Commit all data before submit */
1006 return NETDEV_TX_BUSY
;
1009 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1011 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1012 struct sockaddr
*mac_addr
= p
;
1015 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1016 return -EADDRNOTAVAIL
;
1018 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
1020 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1024 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1029 static int hns3_nic_set_features(struct net_device
*netdev
,
1030 netdev_features_t features
)
1032 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1034 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1035 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1036 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1038 priv
->ops
.fill_desc
= hns3_fill_desc
;
1039 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1042 netdev
->features
= features
;
1047 hns3_nic_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
1049 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1050 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1051 struct hns3_enet_ring
*ring
;
1059 for (idx
= 0; idx
< queue_num
; idx
++) {
1060 /* fetch the tx stats */
1061 ring
= priv
->ring_data
[idx
].ring
;
1063 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1064 tx_bytes
+= ring
->stats
.tx_bytes
;
1065 tx_pkts
+= ring
->stats
.tx_pkts
;
1066 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1068 /* fetch the rx stats */
1069 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1071 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1072 rx_bytes
+= ring
->stats
.rx_bytes
;
1073 rx_pkts
+= ring
->stats
.rx_pkts
;
1074 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1077 stats
->tx_bytes
= tx_bytes
;
1078 stats
->tx_packets
= tx_pkts
;
1079 stats
->rx_bytes
= rx_bytes
;
1080 stats
->rx_packets
= rx_pkts
;
1082 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1083 stats
->multicast
= netdev
->stats
.multicast
;
1084 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1085 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1086 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1088 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1089 stats
->rx_dropped
= netdev
->stats
.rx_dropped
;
1090 stats
->tx_dropped
= netdev
->stats
.tx_dropped
;
1091 stats
->collisions
= netdev
->stats
.collisions
;
1092 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1093 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1094 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1095 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1096 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1097 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1098 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1099 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1100 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1101 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1104 static void hns3_add_tunnel_port(struct net_device
*netdev
, u16 port
,
1105 enum hns3_udp_tnl_type type
)
1107 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1108 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1109 struct hnae3_handle
*h
= priv
->ae_handle
;
1111 if (udp_tnl
->used
&& udp_tnl
->dst_port
== port
) {
1116 if (udp_tnl
->used
) {
1118 "UDP tunnel [%d], port [%d] offload\n", type
, port
);
1122 udp_tnl
->dst_port
= port
;
1124 /* TBD send command to hardware to add port */
1125 if (h
->ae_algo
->ops
->add_tunnel_udp
)
1126 h
->ae_algo
->ops
->add_tunnel_udp(h
, port
);
1129 static void hns3_del_tunnel_port(struct net_device
*netdev
, u16 port
,
1130 enum hns3_udp_tnl_type type
)
1132 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1133 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1134 struct hnae3_handle
*h
= priv
->ae_handle
;
1136 if (!udp_tnl
->used
|| udp_tnl
->dst_port
!= port
) {
1138 "Invalid UDP tunnel port %d\n", port
);
1146 udp_tnl
->dst_port
= 0;
1147 /* TBD send command to hardware to del port */
1148 if (h
->ae_algo
->ops
->del_tunnel_udp
)
1149 h
->ae_algo
->ops
->del_tunnel_udp(h
, port
);
1152 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1153 * @netdev: This physical ports's netdev
1154 * @ti: Tunnel information
1156 static void hns3_nic_udp_tunnel_add(struct net_device
*netdev
,
1157 struct udp_tunnel_info
*ti
)
1159 u16 port_n
= ntohs(ti
->port
);
1162 case UDP_TUNNEL_TYPE_VXLAN
:
1163 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1165 case UDP_TUNNEL_TYPE_GENEVE
:
1166 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1169 netdev_err(netdev
, "unsupported tunnel type %d\n", ti
->type
);
1174 static void hns3_nic_udp_tunnel_del(struct net_device
*netdev
,
1175 struct udp_tunnel_info
*ti
)
1177 u16 port_n
= ntohs(ti
->port
);
1180 case UDP_TUNNEL_TYPE_VXLAN
:
1181 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1183 case UDP_TUNNEL_TYPE_GENEVE
:
1184 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1191 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1193 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1194 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1195 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1196 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1197 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1198 u16 mode
= mqprio_qopt
->mode
;
1199 u8 hw
= mqprio_qopt
->qopt
.hw
;
1204 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1205 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1208 if (tc
> HNAE3_MAX_TC
)
1214 if_running
= netif_running(netdev
);
1216 hns3_nic_net_stop(netdev
);
1220 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1221 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1226 netdev_reset_tc(netdev
);
1228 ret
= netdev_set_num_tc(netdev
, tc
);
1232 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1233 if (!kinfo
->tc_info
[i
].enable
)
1236 netdev_set_tc_queue(netdev
,
1237 kinfo
->tc_info
[i
].tc
,
1238 kinfo
->tc_info
[i
].tqp_count
,
1239 kinfo
->tc_info
[i
].tqp_offset
);
1243 ret
= hns3_nic_set_real_num_queue(netdev
);
1247 hns3_nic_net_open(netdev
);
1252 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1255 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1258 return hns3_setup_tc(dev
, type_data
);
1261 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1262 __be16 proto
, u16 vid
)
1264 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1267 if (h
->ae_algo
->ops
->set_vlan_filter
)
1268 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1273 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1274 __be16 proto
, u16 vid
)
1276 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1279 if (h
->ae_algo
->ops
->set_vlan_filter
)
1280 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1285 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1286 u8 qos
, __be16 vlan_proto
)
1288 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1291 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1292 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1298 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1300 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1301 bool if_running
= netif_running(netdev
);
1304 if (!h
->ae_algo
->ops
->set_mtu
)
1307 /* if this was called with netdev up then bring netdevice down */
1309 (void)hns3_nic_net_stop(netdev
);
1313 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1315 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1320 /* if the netdev was running earlier, bring it up again */
1321 if (if_running
&& hns3_nic_net_open(netdev
))
1327 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1329 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1330 struct hns3_enet_ring
*tx_ring
= NULL
;
1331 int timeout_queue
= 0;
1332 int hw_head
, hw_tail
;
1335 /* Find the stopped queue the same way the stack does */
1336 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1337 struct netdev_queue
*q
;
1338 unsigned long trans_start
;
1340 q
= netdev_get_tx_queue(ndev
, i
);
1341 trans_start
= q
->trans_start
;
1342 if (netif_xmit_stopped(q
) &&
1344 (trans_start
+ ndev
->watchdog_timeo
))) {
1350 if (i
== ndev
->num_tx_queues
) {
1352 "no netdev TX timeout queue found, timeout count: %llu\n",
1353 priv
->tx_timeout_count
);
1357 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1359 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1360 HNS3_RING_TX_RING_HEAD_REG
);
1361 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1362 HNS3_RING_TX_RING_TAIL_REG
);
1364 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1365 priv
->tx_timeout_count
,
1367 tx_ring
->next_to_use
,
1368 tx_ring
->next_to_clean
,
1371 readl(tx_ring
->tqp_vector
->mask_addr
));
1376 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1378 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1379 unsigned long last_reset_time
= priv
->last_reset_time
;
1380 struct hnae3_handle
*h
= priv
->ae_handle
;
1382 if (!hns3_get_tx_timeo_queue_info(ndev
))
1385 priv
->tx_timeout_count
++;
1387 /* This timeout is far away enough from last timeout,
1388 * if timeout again,set the reset type to PF reset
1390 if (time_after(jiffies
, (last_reset_time
+ 20 * HZ
)))
1391 priv
->reset_level
= HNAE3_FUNC_RESET
;
1393 /* Don't do any new action before the next timeout */
1394 else if (time_before(jiffies
, (last_reset_time
+ ndev
->watchdog_timeo
)))
1397 priv
->last_reset_time
= jiffies
;
1399 if (h
->ae_algo
->ops
->reset_event
)
1400 h
->ae_algo
->ops
->reset_event(h
, priv
->reset_level
);
1402 priv
->reset_level
++;
1403 if (priv
->reset_level
> HNAE3_GLOBAL_RESET
)
1404 priv
->reset_level
= HNAE3_GLOBAL_RESET
;
1407 static const struct net_device_ops hns3_nic_netdev_ops
= {
1408 .ndo_open
= hns3_nic_net_open
,
1409 .ndo_stop
= hns3_nic_net_stop
,
1410 .ndo_start_xmit
= hns3_nic_net_xmit
,
1411 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1412 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1413 .ndo_change_mtu
= hns3_nic_change_mtu
,
1414 .ndo_set_features
= hns3_nic_set_features
,
1415 .ndo_get_stats64
= hns3_nic_get_stats64
,
1416 .ndo_setup_tc
= hns3_nic_setup_tc
,
1417 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1418 .ndo_udp_tunnel_add
= hns3_nic_udp_tunnel_add
,
1419 .ndo_udp_tunnel_del
= hns3_nic_udp_tunnel_del
,
1420 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1421 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1422 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1425 /* hns3_probe - Device initialization routine
1426 * @pdev: PCI device information struct
1427 * @ent: entry in hns3_pci_tbl
1429 * hns3_probe initializes a PF identified by a pci_dev structure.
1430 * The OS initialization, configuring of the PF private structure,
1431 * and a hardware reset occur.
1433 * Returns 0 on success, negative on failure
1435 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1437 struct hnae3_ae_dev
*ae_dev
;
1440 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1447 ae_dev
->pdev
= pdev
;
1448 ae_dev
->flag
= ent
->driver_data
;
1449 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1450 pci_set_drvdata(pdev
, ae_dev
);
1452 return hnae3_register_ae_dev(ae_dev
);
1455 /* hns3_remove - Device removal routine
1456 * @pdev: PCI device information struct
1458 static void hns3_remove(struct pci_dev
*pdev
)
1460 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1462 hnae3_unregister_ae_dev(ae_dev
);
1464 devm_kfree(&pdev
->dev
, ae_dev
);
1466 pci_set_drvdata(pdev
, NULL
);
1469 static struct pci_driver hns3_driver
= {
1470 .name
= hns3_driver_name
,
1471 .id_table
= hns3_pci_tbl
,
1472 .probe
= hns3_probe
,
1473 .remove
= hns3_remove
,
1476 /* set default feature to hns3 */
1477 static void hns3_set_default_feature(struct net_device
*netdev
)
1479 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1481 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1482 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1483 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1484 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1485 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1487 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1489 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1491 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1492 NETIF_F_HW_VLAN_CTAG_FILTER
|
1493 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1494 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1495 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1496 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1498 netdev
->vlan_features
|=
1499 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1500 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1501 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1502 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1503 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1505 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1506 NETIF_F_HW_VLAN_CTAG_FILTER
|
1507 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1508 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1509 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1510 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1513 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1514 struct hns3_desc_cb
*cb
)
1516 unsigned int order
= hnae_page_order(ring
);
1519 p
= dev_alloc_pages(order
);
1524 cb
->page_offset
= 0;
1526 cb
->buf
= page_address(p
);
1527 cb
->length
= hnae_page_size(ring
);
1528 cb
->type
= DESC_TYPE_PAGE
;
1533 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1534 struct hns3_desc_cb
*cb
)
1536 if (cb
->type
== DESC_TYPE_SKB
)
1537 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1538 else if (!HNAE3_IS_TX_RING(ring
))
1539 put_page((struct page
*)cb
->priv
);
1540 memset(cb
, 0, sizeof(*cb
));
1543 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1545 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1546 cb
->length
, ring_to_dma_dir(ring
));
1548 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1554 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1555 struct hns3_desc_cb
*cb
)
1557 if (cb
->type
== DESC_TYPE_SKB
)
1558 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1559 ring_to_dma_dir(ring
));
1561 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1562 ring_to_dma_dir(ring
));
1565 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1567 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1568 ring
->desc
[i
].addr
= 0;
1571 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1573 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1575 if (!ring
->desc_cb
[i
].dma
)
1578 hns3_buffer_detach(ring
, i
);
1579 hns3_free_buffer(ring
, cb
);
1582 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1586 for (i
= 0; i
< ring
->desc_num
; i
++)
1587 hns3_free_buffer_detach(ring
, i
);
1590 /* free desc along with its attached buffer */
1591 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1593 hns3_free_buffers(ring
);
1595 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
1596 ring
->desc_num
* sizeof(ring
->desc
[0]),
1598 ring
->desc_dma_addr
= 0;
1603 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1605 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1607 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
1611 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
), ring
->desc
,
1612 size
, DMA_BIDIRECTIONAL
);
1613 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
1614 ring
->desc_dma_addr
= 0;
1623 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1624 struct hns3_desc_cb
*cb
)
1628 ret
= hns3_alloc_buffer(ring
, cb
);
1632 ret
= hns3_map_buffer(ring
, cb
);
1639 hns3_free_buffer(ring
, cb
);
1644 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1646 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1651 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1656 /* Allocate memory for raw pkg, and map with dma */
1657 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1661 for (i
= 0; i
< ring
->desc_num
; i
++) {
1662 ret
= hns3_alloc_buffer_attach(ring
, i
);
1664 goto out_buffer_fail
;
1670 for (j
= i
- 1; j
>= 0; j
--)
1671 hns3_free_buffer_detach(ring
, j
);
1675 /* detach a in-used buffer and replace with a reserved one */
1676 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1677 struct hns3_desc_cb
*res_cb
)
1679 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1680 ring
->desc_cb
[i
] = *res_cb
;
1681 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1684 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1686 ring
->desc_cb
[i
].reuse_flag
= 0;
1687 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1688 + ring
->desc_cb
[i
].page_offset
);
1691 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1694 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1696 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1697 (*bytes
) += desc_cb
->length
;
1698 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1699 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1701 ring_ptr_move_fw(ring
, next_to_clean
);
1704 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1706 int u
= ring
->next_to_use
;
1707 int c
= ring
->next_to_clean
;
1709 if (unlikely(h
> ring
->desc_num
))
1712 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1715 bool hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1717 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1718 struct netdev_queue
*dev_queue
;
1722 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1723 rmb(); /* Make sure head is ready before touch any data */
1725 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1726 return true; /* no data to poll */
1728 if (!is_valid_clean_head(ring
, head
)) {
1729 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1730 ring
->next_to_use
, ring
->next_to_clean
);
1732 u64_stats_update_begin(&ring
->syncp
);
1733 ring
->stats
.io_err_cnt
++;
1734 u64_stats_update_end(&ring
->syncp
);
1740 while (head
!= ring
->next_to_clean
&& budget
) {
1741 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1742 /* Issue prefetch for next Tx descriptor */
1743 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1747 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1748 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1750 u64_stats_update_begin(&ring
->syncp
);
1751 ring
->stats
.tx_bytes
+= bytes
;
1752 ring
->stats
.tx_pkts
+= pkts
;
1753 u64_stats_update_end(&ring
->syncp
);
1755 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1756 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1758 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1759 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1760 /* Make sure that anybody stopping the queue after this
1761 * sees the new next_to_clean.
1764 if (netif_tx_queue_stopped(dev_queue
)) {
1765 netif_tx_wake_queue(dev_queue
);
1766 ring
->stats
.restart_queue
++;
1773 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1775 int ntc
= ring
->next_to_clean
;
1776 int ntu
= ring
->next_to_use
;
1778 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1782 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1784 struct hns3_desc_cb
*desc_cb
;
1785 struct hns3_desc_cb res_cbs
;
1788 for (i
= 0; i
< cleand_count
; i
++) {
1789 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1790 if (desc_cb
->reuse_flag
) {
1791 u64_stats_update_begin(&ring
->syncp
);
1792 ring
->stats
.reuse_pg_cnt
++;
1793 u64_stats_update_end(&ring
->syncp
);
1795 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1797 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1799 u64_stats_update_begin(&ring
->syncp
);
1800 ring
->stats
.sw_err_cnt
++;
1801 u64_stats_update_end(&ring
->syncp
);
1803 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
1804 "hnae reserve buffer map failed.\n");
1807 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
1810 ring_ptr_move_fw(ring
, next_to_use
);
1813 wmb(); /* Make all data has been write before submit */
1814 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
1817 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1818 * @data: pointer to the start of the headers
1819 * @max: total length of section to find headers in
1821 * This function is meant to determine the length of headers that will
1822 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1823 * motivation of doing this is to only perform one pull for IPv4 TCP
1824 * packets so that we can do basic things like calculating the gso_size
1825 * based on the average data per packet.
1827 static unsigned int hns3_nic_get_headlen(unsigned char *data
, u32 flag
,
1828 unsigned int max_size
)
1830 unsigned char *network
;
1833 /* This should never happen, but better safe than sorry */
1834 if (max_size
< ETH_HLEN
)
1837 /* Initialize network frame pointer */
1840 /* Set first protocol and move network header forward */
1841 network
+= ETH_HLEN
;
1843 /* Handle any vlan tag if present */
1844 if (hnae_get_field(flag
, HNS3_RXD_VLAN_M
, HNS3_RXD_VLAN_S
)
1845 == HNS3_RX_FLAG_VLAN_PRESENT
) {
1846 if ((typeof(max_size
))(network
- data
) > (max_size
- VLAN_HLEN
))
1849 network
+= VLAN_HLEN
;
1852 /* Handle L3 protocols */
1853 if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
1854 == HNS3_RX_FLAG_L3ID_IPV4
) {
1855 if ((typeof(max_size
))(network
- data
) >
1856 (max_size
- sizeof(struct iphdr
)))
1859 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1860 hlen
= (network
[0] & 0x0F) << 2;
1862 /* Verify hlen meets minimum size requirements */
1863 if (hlen
< sizeof(struct iphdr
))
1864 return network
- data
;
1866 /* Record next protocol if header is present */
1867 } else if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
1868 == HNS3_RX_FLAG_L3ID_IPV6
) {
1869 if ((typeof(max_size
))(network
- data
) >
1870 (max_size
- sizeof(struct ipv6hdr
)))
1873 /* Record next protocol */
1874 hlen
= sizeof(struct ipv6hdr
);
1876 return network
- data
;
1879 /* Relocate pointer to start of L4 header */
1882 /* Finally sort out TCP/UDP */
1883 if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
1884 == HNS3_RX_FLAG_L4ID_TCP
) {
1885 if ((typeof(max_size
))(network
- data
) >
1886 (max_size
- sizeof(struct tcphdr
)))
1889 /* Access doff as a u8 to avoid unaligned access on ia64 */
1890 hlen
= (network
[12] & 0xF0) >> 2;
1892 /* Verify hlen meets minimum size requirements */
1893 if (hlen
< sizeof(struct tcphdr
))
1894 return network
- data
;
1897 } else if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
1898 == HNS3_RX_FLAG_L4ID_UDP
) {
1899 if ((typeof(max_size
))(network
- data
) >
1900 (max_size
- sizeof(struct udphdr
)))
1903 network
+= sizeof(struct udphdr
);
1906 /* If everything has gone correctly network should be the
1907 * data section of the packet and will be the end of the header.
1908 * If not then it probably represents the end of the last recognized
1911 if ((typeof(max_size
))(network
- data
) < max_size
)
1912 return network
- data
;
1917 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
1918 struct hns3_enet_ring
*ring
, int pull_len
,
1919 struct hns3_desc_cb
*desc_cb
)
1921 struct hns3_desc
*desc
;
1926 twobufs
= ((PAGE_SIZE
< 8192) &&
1927 hnae_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
1929 desc
= &ring
->desc
[ring
->next_to_clean
];
1930 size
= le16_to_cpu(desc
->rx
.size
);
1933 truesize
= hnae_buf_size(ring
);
1935 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
1936 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
1939 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
1940 size
- pull_len
, truesize
- pull_len
);
1942 /* Avoid re-using remote pages,flag default unreuse */
1943 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
1947 /* If we are only owner of page we can reuse it */
1948 if (likely(page_count(desc_cb
->priv
) == 1)) {
1949 /* Flip page offset to other buffer */
1950 desc_cb
->page_offset
^= truesize
;
1952 desc_cb
->reuse_flag
= 1;
1953 /* bump ref count on page before it is given*/
1954 get_page(desc_cb
->priv
);
1959 /* Move offset up to the next cache line */
1960 desc_cb
->page_offset
+= truesize
;
1962 if (desc_cb
->page_offset
<= last_offset
) {
1963 desc_cb
->reuse_flag
= 1;
1964 /* Bump ref count on page before it is given*/
1965 get_page(desc_cb
->priv
);
1969 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
1970 struct hns3_desc
*desc
)
1972 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1973 int l3_type
, l4_type
;
1978 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
1979 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
1981 skb
->ip_summed
= CHECKSUM_NONE
;
1983 skb_checksum_none_assert(skb
);
1985 if (!(netdev
->features
& NETIF_F_RXCSUM
))
1988 /* check if hardware has done checksum */
1989 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
1992 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
1993 hnae_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
1994 hnae_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
1995 hnae_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
1996 netdev_err(netdev
, "L3/L4 error pkt\n");
1997 u64_stats_update_begin(&ring
->syncp
);
1998 ring
->stats
.l3l4_csum_err
++;
1999 u64_stats_update_end(&ring
->syncp
);
2004 l3_type
= hnae_get_field(l234info
, HNS3_RXD_L3ID_M
,
2006 l4_type
= hnae_get_field(l234info
, HNS3_RXD_L4ID_M
,
2009 ol4_type
= hnae_get_field(l234info
, HNS3_RXD_OL4ID_M
, HNS3_RXD_OL4ID_S
);
2011 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2012 case HNS3_OL4_TYPE_NVGRE
:
2013 skb
->csum_level
= 1;
2014 case HNS3_OL4_TYPE_NO_TUN
:
2015 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2016 if (l3_type
== HNS3_L3_TYPE_IPV4
||
2017 (l3_type
== HNS3_L3_TYPE_IPV6
&&
2018 (l4_type
== HNS3_L4_TYPE_UDP
||
2019 l4_type
== HNS3_L4_TYPE_TCP
||
2020 l4_type
== HNS3_L4_TYPE_SCTP
)))
2021 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2026 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2028 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2031 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2032 struct sk_buff
**out_skb
, int *out_bnum
)
2034 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2035 struct hns3_desc_cb
*desc_cb
;
2036 struct hns3_desc
*desc
;
2037 struct sk_buff
*skb
;
2045 desc
= &ring
->desc
[ring
->next_to_clean
];
2046 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2050 length
= le16_to_cpu(desc
->rx
.pkt_len
);
2051 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2052 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2054 /* Check valid BD */
2055 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
2058 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2060 /* Prefetch first cache line of first page
2061 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2062 * line size is 64B so need to prefetch twice to make it 128B. But in
2063 * actual we can have greater size of caches with 128B Level 1 cache
2064 * lines. In such a case, single fetch would suffice to cache in the
2065 * relevant part of the header.
2068 #if L1_CACHE_BYTES < 128
2069 prefetch(va
+ L1_CACHE_BYTES
);
2072 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2074 if (unlikely(!skb
)) {
2075 netdev_err(netdev
, "alloc rx skb fail\n");
2077 u64_stats_update_begin(&ring
->syncp
);
2078 ring
->stats
.sw_err_cnt
++;
2079 u64_stats_update_end(&ring
->syncp
);
2084 prefetchw(skb
->data
);
2087 if (length
<= HNS3_RX_HEAD_SIZE
) {
2088 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2090 /* We can reuse buffer as-is, just make sure it is local */
2091 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2092 desc_cb
->reuse_flag
= 1;
2093 else /* This page cannot be reused so discard it */
2094 put_page(desc_cb
->priv
);
2096 ring_ptr_move_fw(ring
, next_to_clean
);
2098 u64_stats_update_begin(&ring
->syncp
);
2099 ring
->stats
.seg_pkt_cnt
++;
2100 u64_stats_update_end(&ring
->syncp
);
2102 pull_len
= hns3_nic_get_headlen(va
, l234info
,
2104 memcpy(__skb_put(skb
, pull_len
), va
,
2105 ALIGN(pull_len
, sizeof(long)));
2107 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2108 ring_ptr_move_fw(ring
, next_to_clean
);
2110 while (!hnae_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2111 desc
= &ring
->desc
[ring
->next_to_clean
];
2112 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2113 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2114 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2115 ring_ptr_move_fw(ring
, next_to_clean
);
2122 if (unlikely(!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2123 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2124 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2125 u64_stats_update_begin(&ring
->syncp
);
2126 ring
->stats
.non_vld_descs
++;
2127 u64_stats_update_end(&ring
->syncp
);
2129 dev_kfree_skb_any(skb
);
2133 if (unlikely((!desc
->rx
.pkt_len
) ||
2134 hnae_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2135 netdev_err(netdev
, "truncated pkt\n");
2136 u64_stats_update_begin(&ring
->syncp
);
2137 ring
->stats
.err_pkt_len
++;
2138 u64_stats_update_end(&ring
->syncp
);
2140 dev_kfree_skb_any(skb
);
2144 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2145 netdev_err(netdev
, "L2 error pkt\n");
2146 u64_stats_update_begin(&ring
->syncp
);
2147 ring
->stats
.l2_err
++;
2148 u64_stats_update_end(&ring
->syncp
);
2150 dev_kfree_skb_any(skb
);
2154 u64_stats_update_begin(&ring
->syncp
);
2155 ring
->stats
.rx_pkts
++;
2156 ring
->stats
.rx_bytes
+= skb
->len
;
2157 u64_stats_update_end(&ring
->syncp
);
2159 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2161 hns3_rx_checksum(ring
, skb
, desc
);
2165 int hns3_clean_rx_ring(
2166 struct hns3_enet_ring
*ring
, int budget
,
2167 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2169 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2170 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2171 int recv_pkts
, recv_bds
, clean_count
, err
;
2172 int unused_count
= hns3_desc_unused(ring
);
2173 struct sk_buff
*skb
= NULL
;
2176 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2177 rmb(); /* Make sure num taken effect before the other data is touched */
2179 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2180 num
-= unused_count
;
2182 while (recv_pkts
< budget
&& recv_bds
< num
) {
2183 /* Reuse or realloc buffers */
2184 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2185 hns3_nic_alloc_rx_buffers(ring
,
2186 clean_count
+ unused_count
);
2188 unused_count
= hns3_desc_unused(ring
);
2192 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2193 if (unlikely(!skb
)) /* This fault cannot be repaired */
2197 clean_count
+= bnum
;
2198 if (unlikely(err
)) { /* Do jump the err */
2203 /* Do update ip stack process */
2204 skb
->protocol
= eth_type_trans(skb
, netdev
);
2211 /* Make all data has been write before submit */
2212 if (clean_count
+ unused_count
> 0)
2213 hns3_nic_alloc_rx_buffers(ring
,
2214 clean_count
+ unused_count
);
2219 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2221 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2222 enum hns3_flow_level_range new_flow_level
;
2223 struct hns3_enet_tqp_vector
*tqp_vector
;
2224 int packets_per_secs
;
2225 int bytes_per_usecs
;
2229 if (!ring_group
->int_gl
)
2232 if (ring_group
->total_packets
== 0) {
2233 ring_group
->int_gl
= HNS3_INT_GL_50K
;
2234 ring_group
->flow_level
= HNS3_FLOW_LOW
;
2238 /* Simple throttlerate management
2239 * 0-10MB/s lower (50000 ints/s)
2240 * 10-20MB/s middle (20000 ints/s)
2241 * 20-1249MB/s high (18000 ints/s)
2242 * > 40000pps ultra (8000 ints/s)
2244 new_flow_level
= ring_group
->flow_level
;
2245 new_int_gl
= ring_group
->int_gl
;
2246 tqp_vector
= ring_group
->ring
->tqp_vector
;
2247 usecs
= (ring_group
->int_gl
<< 1);
2248 bytes_per_usecs
= ring_group
->total_bytes
/ usecs
;
2249 /* 1000000 microseconds */
2250 packets_per_secs
= ring_group
->total_packets
* 1000000 / usecs
;
2252 switch (new_flow_level
) {
2254 if (bytes_per_usecs
> 10)
2255 new_flow_level
= HNS3_FLOW_MID
;
2258 if (bytes_per_usecs
> 20)
2259 new_flow_level
= HNS3_FLOW_HIGH
;
2260 else if (bytes_per_usecs
<= 10)
2261 new_flow_level
= HNS3_FLOW_LOW
;
2263 case HNS3_FLOW_HIGH
:
2264 case HNS3_FLOW_ULTRA
:
2266 if (bytes_per_usecs
<= 20)
2267 new_flow_level
= HNS3_FLOW_MID
;
2271 if ((packets_per_secs
> HNS3_RX_ULTRA_PACKET_RATE
) &&
2272 (&tqp_vector
->rx_group
== ring_group
))
2273 new_flow_level
= HNS3_FLOW_ULTRA
;
2275 switch (new_flow_level
) {
2277 new_int_gl
= HNS3_INT_GL_50K
;
2280 new_int_gl
= HNS3_INT_GL_20K
;
2282 case HNS3_FLOW_HIGH
:
2283 new_int_gl
= HNS3_INT_GL_18K
;
2285 case HNS3_FLOW_ULTRA
:
2286 new_int_gl
= HNS3_INT_GL_8K
;
2292 ring_group
->total_bytes
= 0;
2293 ring_group
->total_packets
= 0;
2294 ring_group
->flow_level
= new_flow_level
;
2295 if (new_int_gl
!= ring_group
->int_gl
) {
2296 ring_group
->int_gl
= new_int_gl
;
2302 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2304 u16 rx_int_gl
, tx_int_gl
;
2307 rx
= hns3_get_new_int_gl(&tqp_vector
->rx_group
);
2308 tx
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2309 rx_int_gl
= tqp_vector
->rx_group
.int_gl
;
2310 tx_int_gl
= tqp_vector
->tx_group
.int_gl
;
2312 if (rx_int_gl
> tx_int_gl
) {
2313 tqp_vector
->tx_group
.int_gl
= rx_int_gl
;
2314 tqp_vector
->tx_group
.flow_level
=
2315 tqp_vector
->rx_group
.flow_level
;
2316 hns3_set_vector_coalesc_gl(tqp_vector
, rx_int_gl
);
2318 tqp_vector
->rx_group
.int_gl
= tx_int_gl
;
2319 tqp_vector
->rx_group
.flow_level
=
2320 tqp_vector
->tx_group
.flow_level
;
2321 hns3_set_vector_coalesc_gl(tqp_vector
, tx_int_gl
);
2326 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2328 struct hns3_enet_ring
*ring
;
2329 int rx_pkt_total
= 0;
2331 struct hns3_enet_tqp_vector
*tqp_vector
=
2332 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2333 bool clean_complete
= true;
2336 /* Since the actual Tx work is minimal, we can give the Tx a larger
2337 * budget and be more aggressive about cleaning up the Tx descriptors.
2339 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2340 if (!hns3_clean_tx_ring(ring
, budget
))
2341 clean_complete
= false;
2344 /* make sure rx ring budget not smaller than 1 */
2345 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2347 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2348 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2351 if (rx_cleaned
>= rx_budget
)
2352 clean_complete
= false;
2354 rx_pkt_total
+= rx_cleaned
;
2357 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2359 if (!clean_complete
)
2362 napi_complete(napi
);
2363 hns3_update_new_int_gl(tqp_vector
);
2364 hns3_mask_vector_irq(tqp_vector
, 1);
2366 return rx_pkt_total
;
2369 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2370 struct hnae3_ring_chain_node
*head
)
2372 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2373 struct hnae3_ring_chain_node
*cur_chain
= head
;
2374 struct hnae3_ring_chain_node
*chain
;
2375 struct hns3_enet_ring
*tx_ring
;
2376 struct hns3_enet_ring
*rx_ring
;
2378 tx_ring
= tqp_vector
->tx_group
.ring
;
2380 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2381 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2382 HNAE3_RING_TYPE_TX
);
2384 cur_chain
->next
= NULL
;
2386 while (tx_ring
->next
) {
2387 tx_ring
= tx_ring
->next
;
2389 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2394 cur_chain
->next
= chain
;
2395 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2396 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2397 HNAE3_RING_TYPE_TX
);
2403 rx_ring
= tqp_vector
->rx_group
.ring
;
2404 if (!tx_ring
&& rx_ring
) {
2405 cur_chain
->next
= NULL
;
2406 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2407 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2408 HNAE3_RING_TYPE_RX
);
2410 rx_ring
= rx_ring
->next
;
2414 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2418 cur_chain
->next
= chain
;
2419 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2420 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2421 HNAE3_RING_TYPE_RX
);
2424 rx_ring
= rx_ring
->next
;
2430 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2431 struct hnae3_ring_chain_node
*head
)
2433 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2434 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2439 chain_tmp
= chain
->next
;
2440 devm_kfree(&pdev
->dev
, chain
);
2445 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2446 struct hns3_enet_ring
*ring
)
2448 ring
->next
= group
->ring
;
2454 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2456 struct hnae3_ring_chain_node vector_ring_chain
;
2457 struct hnae3_handle
*h
= priv
->ae_handle
;
2458 struct hns3_enet_tqp_vector
*tqp_vector
;
2459 struct hnae3_vector_info
*vector
;
2460 struct pci_dev
*pdev
= h
->pdev
;
2461 u16 tqp_num
= h
->kinfo
.num_tqps
;
2466 /* RSS size, cpu online and vector_num should be the same */
2467 /* Should consider 2p/4p later */
2468 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2469 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2474 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2476 priv
->vector_num
= vector_num
;
2477 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2478 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2480 if (!priv
->tqp_vector
)
2483 for (i
= 0; i
< tqp_num
; i
++) {
2484 u16 vector_i
= i
% vector_num
;
2486 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2488 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2489 priv
->ring_data
[i
].ring
);
2491 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2492 priv
->ring_data
[i
+ tqp_num
].ring
);
2494 tqp_vector
->idx
= vector_i
;
2495 tqp_vector
->mask_addr
= vector
[vector_i
].io_addr
;
2496 tqp_vector
->vector_irq
= vector
[vector_i
].vector
;
2497 tqp_vector
->num_tqps
++;
2499 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2500 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2503 for (i
= 0; i
< vector_num
; i
++) {
2504 tqp_vector
= &priv
->tqp_vector
[i
];
2506 tqp_vector
->rx_group
.total_bytes
= 0;
2507 tqp_vector
->rx_group
.total_packets
= 0;
2508 tqp_vector
->tx_group
.total_bytes
= 0;
2509 tqp_vector
->tx_group
.total_packets
= 0;
2510 hns3_vector_gl_rl_init(tqp_vector
);
2511 tqp_vector
->handle
= h
;
2513 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2514 &vector_ring_chain
);
2518 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2519 tqp_vector
->vector_irq
, &vector_ring_chain
);
2523 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2525 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2526 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2530 devm_kfree(&pdev
->dev
, vector
);
2534 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2536 struct hnae3_ring_chain_node vector_ring_chain
;
2537 struct hnae3_handle
*h
= priv
->ae_handle
;
2538 struct hns3_enet_tqp_vector
*tqp_vector
;
2539 struct pci_dev
*pdev
= h
->pdev
;
2542 for (i
= 0; i
< priv
->vector_num
; i
++) {
2543 tqp_vector
= &priv
->tqp_vector
[i
];
2545 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2546 &vector_ring_chain
);
2550 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2551 tqp_vector
->vector_irq
, &vector_ring_chain
);
2555 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2557 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2558 (void)irq_set_affinity_hint(
2559 priv
->tqp_vector
[i
].vector_irq
,
2561 free_irq(priv
->tqp_vector
[i
].vector_irq
,
2562 &priv
->tqp_vector
[i
]);
2565 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2567 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2570 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2575 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2578 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2579 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2580 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2581 struct hns3_enet_ring
*ring
;
2583 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2587 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2588 ring_data
[q
->tqp_index
].ring
= ring
;
2589 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2590 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2592 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2593 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
2594 ring
->io_base
= q
->io_base
;
2597 hnae_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2601 ring
->desc_cb
= NULL
;
2602 ring
->dev
= priv
->dev
;
2603 ring
->desc_dma_addr
= 0;
2604 ring
->buf_size
= q
->buf_size
;
2605 ring
->desc_num
= q
->desc_num
;
2606 ring
->next_to_use
= 0;
2607 ring
->next_to_clean
= 0;
2612 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2613 struct hns3_nic_priv
*priv
)
2617 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2621 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2628 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2630 struct hnae3_handle
*h
= priv
->ae_handle
;
2631 struct pci_dev
*pdev
= h
->pdev
;
2634 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2635 sizeof(*priv
->ring_data
) * 2,
2637 if (!priv
->ring_data
)
2640 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2641 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2648 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2652 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2656 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2659 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2661 if (!ring
->desc_cb
) {
2666 ret
= hns3_alloc_desc(ring
);
2668 goto out_with_desc_cb
;
2670 if (!HNAE3_IS_TX_RING(ring
)) {
2671 ret
= hns3_alloc_ring_buffers(ring
);
2679 hns3_free_desc(ring
);
2681 kfree(ring
->desc_cb
);
2682 ring
->desc_cb
= NULL
;
2687 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2689 hns3_free_desc(ring
);
2690 kfree(ring
->desc_cb
);
2691 ring
->desc_cb
= NULL
;
2692 ring
->next_to_clean
= 0;
2693 ring
->next_to_use
= 0;
2696 static int hns3_buf_size2type(u32 buf_size
)
2702 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2705 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2708 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2711 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2714 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2717 return bd_size_type
;
2720 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2722 dma_addr_t dma
= ring
->desc_dma_addr
;
2723 struct hnae3_queue
*q
= ring
->tqp
;
2725 if (!HNAE3_IS_TX_RING(ring
)) {
2726 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2728 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2729 (u32
)((dma
>> 31) >> 1));
2731 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2732 hns3_buf_size2type(ring
->buf_size
));
2733 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2734 ring
->desc_num
/ 8 - 1);
2737 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2739 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2740 (u32
)((dma
>> 31) >> 1));
2742 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2743 hns3_buf_size2type(ring
->buf_size
));
2744 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
2745 ring
->desc_num
/ 8 - 1);
2749 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
2751 struct hnae3_handle
*h
= priv
->ae_handle
;
2752 int ring_num
= h
->kinfo
.num_tqps
* 2;
2756 for (i
= 0; i
< ring_num
; i
++) {
2757 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
2760 "Alloc ring memory fail! ret=%d\n", ret
);
2761 goto out_when_alloc_ring_memory
;
2764 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
2766 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
2771 out_when_alloc_ring_memory
:
2772 for (j
= i
- 1; j
>= 0; j
--)
2773 hns3_fini_ring(priv
->ring_data
[j
].ring
);
2778 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
2780 struct hnae3_handle
*h
= priv
->ae_handle
;
2783 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2784 if (h
->ae_algo
->ops
->reset_queue
)
2785 h
->ae_algo
->ops
->reset_queue(h
, i
);
2787 hns3_fini_ring(priv
->ring_data
[i
].ring
);
2788 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2794 /* Set mac addr if it is configured. or leave it to the AE driver */
2795 static void hns3_init_mac_addr(struct net_device
*netdev
)
2797 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2798 struct hnae3_handle
*h
= priv
->ae_handle
;
2799 u8 mac_addr_temp
[ETH_ALEN
];
2801 if (h
->ae_algo
->ops
->get_mac_addr
) {
2802 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
2803 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
2806 /* Check if the MAC address is valid, if not get a random one */
2807 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2808 eth_hw_addr_random(netdev
);
2809 dev_warn(priv
->dev
, "using random MAC address %pM\n",
2813 if (h
->ae_algo
->ops
->set_mac_addr
)
2814 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
);
2818 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
2820 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2822 if ((netdev
->features
& NETIF_F_TSO
) ||
2823 (netdev
->features
& NETIF_F_TSO6
)) {
2824 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
2825 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
2827 priv
->ops
.fill_desc
= hns3_fill_desc
;
2828 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
2832 static int hns3_client_init(struct hnae3_handle
*handle
)
2834 struct pci_dev
*pdev
= handle
->pdev
;
2835 struct hns3_nic_priv
*priv
;
2836 struct net_device
*netdev
;
2839 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
2840 handle
->kinfo
.num_tqps
);
2844 priv
= netdev_priv(netdev
);
2845 priv
->dev
= &pdev
->dev
;
2846 priv
->netdev
= netdev
;
2847 priv
->ae_handle
= handle
;
2848 priv
->last_reset_time
= jiffies
;
2849 priv
->reset_level
= HNAE3_FUNC_RESET
;
2850 priv
->tx_timeout_count
= 0;
2852 handle
->kinfo
.netdev
= netdev
;
2853 handle
->priv
= (void *)priv
;
2855 hns3_init_mac_addr(netdev
);
2857 hns3_set_default_feature(netdev
);
2859 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
2860 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2861 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
2862 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2863 hns3_ethtool_set_ops(netdev
);
2864 hns3_nic_set_priv_ops(netdev
);
2866 /* Carrier off reporting is important to ethtool even BEFORE open */
2867 netif_carrier_off(netdev
);
2869 ret
= hns3_get_ring_config(priv
);
2872 goto out_get_ring_cfg
;
2875 ret
= hns3_nic_init_vector_data(priv
);
2878 goto out_init_vector_data
;
2881 ret
= hns3_init_all_ring(priv
);
2884 goto out_init_ring_data
;
2887 ret
= register_netdev(netdev
);
2889 dev_err(priv
->dev
, "probe register netdev fail!\n");
2890 goto out_reg_netdev_fail
;
2893 hns3_dcbnl_setup(handle
);
2895 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2896 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2900 out_reg_netdev_fail
:
2902 (void)hns3_nic_uninit_vector_data(priv
);
2903 priv
->ring_data
= NULL
;
2904 out_init_vector_data
:
2906 priv
->ae_handle
= NULL
;
2907 free_netdev(netdev
);
2911 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
2913 struct net_device
*netdev
= handle
->kinfo
.netdev
;
2914 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2917 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
2918 unregister_netdev(netdev
);
2920 ret
= hns3_nic_uninit_vector_data(priv
);
2922 netdev_err(netdev
, "uninit vector error\n");
2924 ret
= hns3_uninit_all_ring(priv
);
2926 netdev_err(netdev
, "uninit ring error\n");
2928 priv
->ring_data
= NULL
;
2930 free_netdev(netdev
);
2933 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
2935 struct net_device
*netdev
= handle
->kinfo
.netdev
;
2941 netif_carrier_on(netdev
);
2942 netif_tx_wake_all_queues(netdev
);
2943 netdev_info(netdev
, "link up\n");
2945 netif_carrier_off(netdev
);
2946 netif_tx_stop_all_queues(netdev
);
2947 netdev_info(netdev
, "link down\n");
2951 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
2953 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
2954 struct net_device
*ndev
= kinfo
->netdev
;
2959 if (tc
> HNAE3_MAX_TC
)
2965 if_running
= netif_running(ndev
);
2967 ret
= netdev_set_num_tc(ndev
, tc
);
2972 (void)hns3_nic_net_stop(ndev
);
2976 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
2977 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
2982 netdev_reset_tc(ndev
);
2986 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
2987 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
2989 if (tc_info
->enable
)
2990 netdev_set_tc_queue(ndev
,
2993 tc_info
->tqp_offset
);
2996 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
2997 netdev_set_prio_tc_map(ndev
, i
,
3002 ret
= hns3_nic_set_real_num_queue(ndev
);
3006 (void)hns3_nic_net_open(ndev
);
3011 static void hns3_recover_hw_addr(struct net_device
*ndev
)
3013 struct netdev_hw_addr_list
*list
;
3014 struct netdev_hw_addr
*ha
, *tmp
;
3016 /* go through and sync uc_addr entries to the device */
3018 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3019 hns3_nic_uc_sync(ndev
, ha
->addr
);
3021 /* go through and sync mc_addr entries to the device */
3023 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3024 hns3_nic_mc_sync(ndev
, ha
->addr
);
3027 static void hns3_drop_skb_data(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
3029 dev_kfree_skb_any(skb
);
3032 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3034 struct net_device
*ndev
= h
->kinfo
.netdev
;
3035 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3038 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3039 struct netdev_queue
*dev_queue
;
3040 struct hns3_enet_ring
*ring
;
3042 ring
= priv
->ring_data
[i
].ring
;
3043 hns3_clean_tx_ring(ring
, ring
->desc_num
);
3044 dev_queue
= netdev_get_tx_queue(ndev
,
3045 priv
->ring_data
[i
].queue_index
);
3046 netdev_tx_reset_queue(dev_queue
);
3048 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3049 hns3_clean_rx_ring(ring
, ring
->desc_num
, hns3_drop_skb_data
);
3053 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3055 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3056 struct net_device
*ndev
= kinfo
->netdev
;
3058 if (!netif_running(ndev
))
3061 return hns3_nic_net_stop(ndev
);
3064 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3066 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3067 struct hns3_nic_priv
*priv
= netdev_priv(kinfo
->netdev
);
3070 if (netif_running(kinfo
->netdev
)) {
3071 ret
= hns3_nic_net_up(kinfo
->netdev
);
3073 netdev_err(kinfo
->netdev
,
3074 "hns net up fail, ret=%d!\n", ret
);
3078 priv
->last_reset_time
= jiffies
;
3084 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3086 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3087 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3090 priv
->reset_level
= 1;
3091 hns3_init_mac_addr(netdev
);
3092 hns3_nic_set_rx_mode(netdev
);
3093 hns3_recover_hw_addr(netdev
);
3095 /* Carrier off reporting is important to ethtool even BEFORE open */
3096 netif_carrier_off(netdev
);
3098 ret
= hns3_get_ring_config(priv
);
3102 ret
= hns3_nic_init_vector_data(priv
);
3106 ret
= hns3_init_all_ring(priv
);
3108 hns3_nic_uninit_vector_data(priv
);
3109 priv
->ring_data
= NULL
;
3115 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
3117 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3118 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3121 hns3_clear_all_ring(handle
);
3123 ret
= hns3_nic_uninit_vector_data(priv
);
3125 netdev_err(netdev
, "uninit vector error\n");
3129 ret
= hns3_uninit_all_ring(priv
);
3131 netdev_err(netdev
, "uninit ring error\n");
3133 priv
->ring_data
= NULL
;
3138 static int hns3_reset_notify(struct hnae3_handle
*handle
,
3139 enum hnae3_reset_notify_type type
)
3144 case HNAE3_UP_CLIENT
:
3145 ret
= hns3_reset_notify_up_enet(handle
);
3147 case HNAE3_DOWN_CLIENT
:
3148 ret
= hns3_reset_notify_down_enet(handle
);
3150 case HNAE3_INIT_CLIENT
:
3151 ret
= hns3_reset_notify_init_enet(handle
);
3153 case HNAE3_UNINIT_CLIENT
:
3154 ret
= hns3_reset_notify_uninit_enet(handle
);
3163 static const struct hnae3_client_ops client_ops
= {
3164 .init_instance
= hns3_client_init
,
3165 .uninit_instance
= hns3_client_uninit
,
3166 .link_status_change
= hns3_link_status_change
,
3167 .setup_tc
= hns3_client_setup_tc
,
3168 .reset_notify
= hns3_reset_notify
,
3171 /* hns3_init_module - Driver registration routine
3172 * hns3_init_module is the first routine called when the driver is
3173 * loaded. All it does is register with the PCI subsystem.
3175 static int __init
hns3_init_module(void)
3179 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
3180 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
3182 client
.type
= HNAE3_CLIENT_KNIC
;
3183 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
3186 client
.ops
= &client_ops
;
3188 ret
= hnae3_register_client(&client
);
3192 ret
= pci_register_driver(&hns3_driver
);
3194 hnae3_unregister_client(&client
);
3198 module_init(hns3_init_module
);
3200 /* hns3_exit_module - Driver exit cleanup routine
3201 * hns3_exit_module is called just before the driver is removed
3204 static void __exit
hns3_exit_module(void)
3206 pci_unregister_driver(&hns3_driver
);
3207 hnae3_unregister_client(&client
);
3209 module_exit(hns3_exit_module
);
3211 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3212 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3213 MODULE_LICENSE("GPL");
3214 MODULE_ALIAS("pci:hns-nic");