2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
26 #include "hns3_enet.h"
28 static const char hns3_driver_name
[] = "hns3";
29 const char hns3_driver_version
[] = VERMAGIC_STRING
;
30 static const char hns3_driver_string
[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client
;
35 /* hns3_pci_tbl - PCI Device ID Table
37 * Last entry must be all 0s
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
42 static const struct pci_device_id hns3_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
55 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
57 /* required last entry */
60 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
62 static irqreturn_t
hns3_irq_handle(int irq
, void *dev
)
64 struct hns3_enet_tqp_vector
*tqp_vector
= dev
;
66 napi_schedule(&tqp_vector
->napi
);
71 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
73 struct hns3_enet_tqp_vector
*tqp_vectors
;
76 for (i
= 0; i
< priv
->vector_num
; i
++) {
77 tqp_vectors
= &priv
->tqp_vector
[i
];
79 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
82 /* release the irq resource */
83 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
84 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
88 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
90 struct hns3_enet_tqp_vector
*tqp_vectors
;
97 for (i
= 0; i
< priv
->vector_num
; i
++) {
98 tqp_vectors
= &priv
->tqp_vector
[i
];
100 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
103 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
104 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
105 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
108 } else if (tqp_vectors
->rx_group
.ring
) {
109 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
110 "%s-%s-%d", priv
->netdev
->name
, "Rx",
112 } else if (tqp_vectors
->tx_group
.ring
) {
113 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
114 "%s-%s-%d", priv
->netdev
->name
, "Tx",
117 /* Skip this unused q_vector */
121 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
123 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
127 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
128 tqp_vectors
->vector_irq
);
132 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
138 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
141 writel(mask_en
, tqp_vector
->mask_addr
);
144 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
146 napi_enable(&tqp_vector
->napi
);
149 hns3_mask_vector_irq(tqp_vector
, 1);
152 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
155 hns3_mask_vector_irq(tqp_vector
, 0);
157 disable_irq(tqp_vector
->vector_irq
);
158 napi_disable(&tqp_vector
->napi
);
161 static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
164 /* this defines the configuration for GL (Interrupt Gap Limiter)
165 * GL defines inter interrupt gap.
166 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
168 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
169 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
170 writel(gl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL2_OFFSET
);
173 static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
176 /* this defines the configuration for RL (Interrupt Rate Limiter).
177 * Rl defines rate of interrupts i.e. number of interrupts-per-second
178 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
180 writel(rl_value
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
183 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
)
185 /* initialize the configuration for interrupt coalescing.
186 * 1. GL (Interrupt Gap Limiter)
187 * 2. RL (Interrupt Rate Limiter)
190 /* Default :enable interrupt coalesce */
191 tqp_vector
->rx_group
.int_gl
= HNS3_INT_GL_50K
;
192 tqp_vector
->tx_group
.int_gl
= HNS3_INT_GL_50K
;
193 hns3_set_vector_coalesc_gl(tqp_vector
, HNS3_INT_GL_50K
);
194 /* for now we are disabling Interrupt RL - we
195 * will re-enable later
197 hns3_set_vector_coalesc_rl(tqp_vector
, 0);
198 tqp_vector
->rx_group
.flow_level
= HNS3_FLOW_LOW
;
199 tqp_vector
->tx_group
.flow_level
= HNS3_FLOW_LOW
;
202 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
204 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
205 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
206 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
209 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
212 "netif_set_real_num_tx_queues fail, ret=%d!\n",
217 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
220 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
227 static int hns3_nic_net_up(struct net_device
*netdev
)
229 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
230 struct hnae3_handle
*h
= priv
->ae_handle
;
234 /* get irq resource for all vectors */
235 ret
= hns3_nic_init_irq(priv
);
237 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
241 /* enable the vectors */
242 for (i
= 0; i
< priv
->vector_num
; i
++)
243 hns3_vector_enable(&priv
->tqp_vector
[i
]);
245 /* start the ae_dev */
246 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
250 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
255 for (j
= i
- 1; j
>= 0; j
--)
256 hns3_vector_disable(&priv
->tqp_vector
[j
]);
258 hns3_nic_uninit_irq(priv
);
263 static int hns3_nic_net_open(struct net_device
*netdev
)
265 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
268 netif_carrier_off(netdev
);
270 ret
= hns3_nic_set_real_num_queue(netdev
);
274 ret
= hns3_nic_net_up(netdev
);
277 "hns net up fail, ret=%d!\n", ret
);
281 priv
->last_reset_time
= jiffies
;
285 static void hns3_nic_net_down(struct net_device
*netdev
)
287 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
288 const struct hnae3_ae_ops
*ops
;
291 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
295 ops
= priv
->ae_handle
->ae_algo
->ops
;
297 ops
->stop(priv
->ae_handle
);
299 /* disable vectors */
300 for (i
= 0; i
< priv
->vector_num
; i
++)
301 hns3_vector_disable(&priv
->tqp_vector
[i
]);
303 /* free irq resources */
304 hns3_nic_uninit_irq(priv
);
307 static int hns3_nic_net_stop(struct net_device
*netdev
)
309 netif_tx_stop_all_queues(netdev
);
310 netif_carrier_off(netdev
);
312 hns3_nic_net_down(netdev
);
317 static int hns3_nic_uc_sync(struct net_device
*netdev
,
318 const unsigned char *addr
)
320 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
322 if (h
->ae_algo
->ops
->add_uc_addr
)
323 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
328 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
329 const unsigned char *addr
)
331 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
333 if (h
->ae_algo
->ops
->rm_uc_addr
)
334 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
339 static int hns3_nic_mc_sync(struct net_device
*netdev
,
340 const unsigned char *addr
)
342 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
344 if (h
->ae_algo
->ops
->add_mc_addr
)
345 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
350 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
351 const unsigned char *addr
)
353 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
355 if (h
->ae_algo
->ops
->rm_mc_addr
)
356 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
361 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
363 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
365 if (h
->ae_algo
->ops
->set_promisc_mode
) {
366 if (netdev
->flags
& IFF_PROMISC
)
367 h
->ae_algo
->ops
->set_promisc_mode(h
, 1);
369 h
->ae_algo
->ops
->set_promisc_mode(h
, 0);
371 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
372 netdev_err(netdev
, "sync uc address fail\n");
373 if (netdev
->flags
& IFF_MULTICAST
)
374 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
375 netdev_err(netdev
, "sync mc address fail\n");
378 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
379 u16
*mss
, u32
*type_cs_vlan_tso
)
381 u32 l4_offset
, hdr_len
;
382 union l3_hdr_info l3
;
383 union l4_hdr_info l4
;
387 if (!skb_is_gso(skb
))
390 ret
= skb_cow_head(skb
, 0);
394 l3
.hdr
= skb_network_header(skb
);
395 l4
.hdr
= skb_transport_header(skb
);
397 /* Software should clear the IPv4's checksum field when tso is
400 if (l3
.v4
->version
== 4)
404 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
407 SKB_GSO_UDP_TUNNEL_CSUM
)) {
408 if ((!(skb_shinfo(skb
)->gso_type
&
410 (skb_shinfo(skb
)->gso_type
&
411 SKB_GSO_UDP_TUNNEL_CSUM
)) {
412 /* Software should clear the udp's checksum
413 * field when tso is needed.
417 /* reset l3&l4 pointers from outer to inner headers */
418 l3
.hdr
= skb_inner_network_header(skb
);
419 l4
.hdr
= skb_inner_transport_header(skb
);
421 /* Software should clear the IPv4's checksum field when
424 if (l3
.v4
->version
== 4)
428 /* normal or tunnel packet*/
429 l4_offset
= l4
.hdr
- skb
->data
;
430 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
432 /* remove payload length from inner pseudo checksum when tso*/
433 l4_paylen
= skb
->len
- l4_offset
;
434 csum_replace_by_diff(&l4
.tcp
->check
,
435 (__force __wsum
)htonl(l4_paylen
));
437 /* find the txbd field values */
438 *paylen
= skb
->len
- hdr_len
;
439 hnae_set_bit(*type_cs_vlan_tso
,
442 /* get MSS for TSO */
443 *mss
= skb_shinfo(skb
)->gso_size
;
448 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
456 unsigned char *l4_hdr
;
457 unsigned char *exthdr
;
461 /* find outer header point */
462 l3
.hdr
= skb_network_header(skb
);
463 l4_hdr
= skb_inner_transport_header(skb
);
465 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
466 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
467 l4_proto_tmp
= l3
.v6
->nexthdr
;
468 if (l4_hdr
!= exthdr
)
469 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
470 &l4_proto_tmp
, &frag_off
);
471 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
472 l4_proto_tmp
= l3
.v4
->protocol
;
477 *ol4_proto
= l4_proto_tmp
;
480 if (!skb
->encapsulation
) {
485 /* find inner header point */
486 l3
.hdr
= skb_inner_network_header(skb
);
487 l4_hdr
= skb_inner_transport_header(skb
);
489 if (l3
.v6
->version
== 6) {
490 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
491 l4_proto_tmp
= l3
.v6
->nexthdr
;
492 if (l4_hdr
!= exthdr
)
493 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
494 &l4_proto_tmp
, &frag_off
);
495 } else if (l3
.v4
->version
== 4) {
496 l4_proto_tmp
= l3
.v4
->protocol
;
499 *il4_proto
= l4_proto_tmp
;
504 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
505 u8 il4_proto
, u32
*type_cs_vlan_tso
,
506 u32
*ol_type_vlan_len_msec
)
516 struct gre_base_hdr
*gre
;
519 unsigned char *l2_hdr
;
520 u8 l4_proto
= ol4_proto
;
527 l3
.hdr
= skb_network_header(skb
);
528 l4
.hdr
= skb_transport_header(skb
);
530 /* compute L2 header size for normal packet, defined in 2 Bytes */
531 l2_len
= l3
.hdr
- skb
->data
;
532 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
533 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
536 if (skb
->encapsulation
) {
537 /* compute OL2 header size, defined in 2 Bytes */
539 hnae_set_field(*ol_type_vlan_len_msec
,
541 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
543 /* compute OL3 header size, defined in 4 Bytes */
544 ol3_len
= l4
.hdr
- l3
.hdr
;
545 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
546 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
548 /* MAC in UDP, MAC in GRE (0x6558)*/
549 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
550 /* switch MAC header ptr from outer to inner header.*/
551 l2_hdr
= skb_inner_mac_header(skb
);
553 /* compute OL4 header size, defined in 4 Bytes. */
554 ol4_len
= l2_hdr
- l4
.hdr
;
555 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_M
,
556 HNS3_TXD_L4LEN_S
, ol4_len
>> 2);
558 /* switch IP header ptr from outer to inner header */
559 l3
.hdr
= skb_inner_network_header(skb
);
561 /* compute inner l2 header size, defined in 2 Bytes. */
562 l2_len
= l3
.hdr
- l2_hdr
;
563 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
564 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
566 /* skb packet types not supported by hardware,
567 * txbd len fild doesn't be filled.
572 /* switch L4 header pointer from outer to inner */
573 l4
.hdr
= skb_inner_transport_header(skb
);
575 l4_proto
= il4_proto
;
578 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
579 l3_len
= l4
.hdr
- l3
.hdr
;
580 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
581 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
583 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
586 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
587 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
590 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
591 HNS3_TXD_L4LEN_S
, (sizeof(struct sctphdr
) >> 2));
594 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
595 HNS3_TXD_L4LEN_S
, (sizeof(struct udphdr
) >> 2));
598 /* skb packet types not supported by hardware,
599 * txbd len fild doesn't be filled.
605 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
606 u8 il4_proto
, u32
*type_cs_vlan_tso
,
607 u32
*ol_type_vlan_len_msec
)
614 u32 l4_proto
= ol4_proto
;
616 l3
.hdr
= skb_network_header(skb
);
618 /* define OL3 type and tunnel type(OL4).*/
619 if (skb
->encapsulation
) {
620 /* define outer network header type.*/
621 if (skb
->protocol
== htons(ETH_P_IP
)) {
623 hnae_set_field(*ol_type_vlan_len_msec
,
624 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
625 HNS3_OL3T_IPV4_CSUM
);
627 hnae_set_field(*ol_type_vlan_len_msec
,
628 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
629 HNS3_OL3T_IPV4_NO_CSUM
);
631 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
632 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
633 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
636 /* define tunnel type(OL4).*/
639 hnae_set_field(*ol_type_vlan_len_msec
,
642 HNS3_TUN_MAC_IN_UDP
);
645 hnae_set_field(*ol_type_vlan_len_msec
,
651 /* drop the skb tunnel packet if hardware don't support,
652 * because hardware can't calculate csum when TSO.
657 /* the stack computes the IP header already,
658 * driver calculate l4 checksum when not TSO.
660 skb_checksum_help(skb
);
664 l3
.hdr
= skb_inner_network_header(skb
);
665 l4_proto
= il4_proto
;
668 if (l3
.v4
->version
== 4) {
669 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
670 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
672 /* the stack computes the IP header already, the only time we
673 * need the hardware to recompute it is in the case of TSO.
676 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
678 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
679 } else if (l3
.v6
->version
== 6) {
680 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
681 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
682 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
687 hnae_set_field(*type_cs_vlan_tso
,
693 hnae_set_field(*type_cs_vlan_tso
,
699 hnae_set_field(*type_cs_vlan_tso
,
705 /* drop the skb tunnel packet if hardware don't support,
706 * because hardware can't calculate csum when TSO.
711 /* the stack computes the IP header already,
712 * driver calculate l4 checksum when not TSO.
714 skb_checksum_help(skb
);
721 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
723 /* Config bd buffer end */
724 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
725 HNS3_TXD_BDTYPE_M
, 0);
726 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
727 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
728 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
731 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
732 struct hns3_enet_ring
*tx_ring
,
733 u32
*inner_vlan_flag
,
738 #define HNS3_TX_VLAN_PRIO_SHIFT 13
740 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
741 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
742 NETIF_F_HW_VLAN_CTAG_TX
)) {
743 /* When HW VLAN acceleration is turned off, and the stack
744 * sets the protocol to 802.1q, the driver just need to
745 * set the protocol to the encapsulated ethertype.
747 skb
->protocol
= vlan_get_protocol(skb
);
751 if (skb_vlan_tag_present(skb
)) {
754 vlan_tag
= skb_vlan_tag_get(skb
);
755 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
757 /* Based on hw strategy, use out_vtag in two layer tag case,
758 * and use inner_vtag in one tag case.
760 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
761 hnae_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
762 *out_vtag
= vlan_tag
;
764 hnae_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
765 *inner_vtag
= vlan_tag
;
767 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
768 struct vlan_ethhdr
*vhdr
;
771 rc
= skb_cow_head(skb
, 0);
774 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
775 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
776 << HNS3_TX_VLAN_PRIO_SHIFT
);
779 skb
->protocol
= vlan_get_protocol(skb
);
783 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
784 int size
, dma_addr_t dma
, int frag_end
,
785 enum hns_desc_type type
)
787 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
788 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
789 u32 ol_type_vlan_len_msec
= 0;
790 u16 bdtp_fe_sc_vld_ra_ri
= 0;
791 u32 type_cs_vlan_tso
= 0;
802 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
803 desc_cb
->priv
= priv
;
804 desc_cb
->length
= size
;
806 desc_cb
->type
= type
;
808 /* now, fill the descriptor */
809 desc
->addr
= cpu_to_le64(dma
);
810 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
811 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
812 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
814 if (type
== DESC_TYPE_SKB
) {
815 skb
= (struct sk_buff
*)priv
;
818 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
819 &ol_type_vlan_len_msec
,
820 &inner_vtag
, &out_vtag
);
824 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
825 skb_reset_mac_len(skb
);
826 protocol
= skb
->protocol
;
828 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
831 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
833 &ol_type_vlan_len_msec
);
834 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
836 &ol_type_vlan_len_msec
);
840 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
847 desc
->tx
.ol_type_vlan_len_msec
=
848 cpu_to_le32(ol_type_vlan_len_msec
);
849 desc
->tx
.type_cs_vlan_tso_len
=
850 cpu_to_le32(type_cs_vlan_tso
);
851 desc
->tx
.paylen
= cpu_to_le32(paylen
);
852 desc
->tx
.mss
= cpu_to_le16(mss
);
853 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
854 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
857 /* move ring pointer to next.*/
858 ring_ptr_move_fw(ring
, next_to_use
);
863 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
864 int size
, dma_addr_t dma
, int frag_end
,
865 enum hns_desc_type type
)
867 unsigned int frag_buf_num
;
872 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
873 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
874 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
876 /* When the frag size is bigger than hardware, split this frag */
877 for (k
= 0; k
< frag_buf_num
; k
++) {
878 ret
= hns3_fill_desc(ring
, priv
,
879 (k
== frag_buf_num
- 1) ?
880 sizeoflast
: HNS3_MAX_BD_SIZE
,
881 dma
+ HNS3_MAX_BD_SIZE
* k
,
882 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
883 (type
== DESC_TYPE_SKB
&& !k
) ?
884 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
892 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
893 struct hns3_enet_ring
*ring
)
895 struct sk_buff
*skb
= *out_skb
;
896 struct skb_frag_struct
*frag
;
903 size
= skb_headlen(skb
);
904 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
906 frag_num
= skb_shinfo(skb
)->nr_frags
;
907 for (i
= 0; i
< frag_num
; i
++) {
908 frag
= &skb_shinfo(skb
)->frags
[i
];
909 size
= skb_frag_size(frag
);
911 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
912 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
915 buf_num
+= bdnum_for_frag
;
918 if (buf_num
> ring_space(ring
))
925 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
926 struct hns3_enet_ring
*ring
)
928 struct sk_buff
*skb
= *out_skb
;
931 /* No. of segments (plus a header) */
932 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
934 if (buf_num
> ring_space(ring
))
942 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
944 struct device
*dev
= ring_to_dev(ring
);
947 for (i
= 0; i
< ring
->desc_num
; i
++) {
948 /* check if this is where we started */
949 if (ring
->next_to_use
== next_to_use_orig
)
952 /* unmap the descriptor dma address */
953 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
954 dma_unmap_single(dev
,
955 ring
->desc_cb
[ring
->next_to_use
].dma
,
956 ring
->desc_cb
[ring
->next_to_use
].length
,
960 ring
->desc_cb
[ring
->next_to_use
].dma
,
961 ring
->desc_cb
[ring
->next_to_use
].length
,
965 ring_ptr_move_bw(ring
, next_to_use
);
969 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
971 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
972 struct hns3_nic_ring_data
*ring_data
=
973 &tx_ring_data(priv
, skb
->queue_mapping
);
974 struct hns3_enet_ring
*ring
= ring_data
->ring
;
975 struct device
*dev
= priv
->dev
;
976 struct netdev_queue
*dev_queue
;
977 struct skb_frag_struct
*frag
;
978 int next_to_use_head
;
979 int next_to_use_frag
;
987 /* Prefetch the data used later */
990 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
992 u64_stats_update_begin(&ring
->syncp
);
993 ring
->stats
.tx_busy
++;
994 u64_stats_update_end(&ring
->syncp
);
996 goto out_net_tx_busy
;
998 u64_stats_update_begin(&ring
->syncp
);
999 ring
->stats
.sw_err_cnt
++;
1000 u64_stats_update_end(&ring
->syncp
);
1001 netdev_err(netdev
, "no memory to xmit!\n");
1008 /* No. of segments (plus a header) */
1009 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1010 /* Fill the first part */
1011 size
= skb_headlen(skb
);
1013 next_to_use_head
= ring
->next_to_use
;
1015 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1016 if (dma_mapping_error(dev
, dma
)) {
1017 netdev_err(netdev
, "TX head DMA map failed\n");
1018 ring
->stats
.sw_err_cnt
++;
1022 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
1025 goto head_dma_map_err
;
1027 next_to_use_frag
= ring
->next_to_use
;
1028 /* Fill the fragments */
1029 for (i
= 1; i
< seg_num
; i
++) {
1030 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1031 size
= skb_frag_size(frag
);
1032 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1033 if (dma_mapping_error(dev
, dma
)) {
1034 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
1035 ring
->stats
.sw_err_cnt
++;
1036 goto frag_dma_map_err
;
1038 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
1039 seg_num
- 1 == i
? 1 : 0,
1043 goto frag_dma_map_err
;
1046 /* Complete translate all packets */
1047 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1048 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1050 wmb(); /* Commit all data before submit */
1052 hnae_queue_xmit(ring
->tqp
, buf_num
);
1054 return NETDEV_TX_OK
;
1057 hns_nic_dma_unmap(ring
, next_to_use_frag
);
1060 hns_nic_dma_unmap(ring
, next_to_use_head
);
1063 dev_kfree_skb_any(skb
);
1064 return NETDEV_TX_OK
;
1067 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1068 smp_mb(); /* Commit all data before submit */
1070 return NETDEV_TX_BUSY
;
1073 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1075 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1076 struct sockaddr
*mac_addr
= p
;
1079 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1080 return -EADDRNOTAVAIL
;
1082 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
1084 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1088 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1093 static int hns3_nic_set_features(struct net_device
*netdev
,
1094 netdev_features_t features
)
1096 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1097 struct hnae3_handle
*h
= priv
->ae_handle
;
1098 netdev_features_t changed
;
1101 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1102 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1103 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1105 priv
->ops
.fill_desc
= hns3_fill_desc
;
1106 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1109 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1110 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1112 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1114 changed
= netdev
->features
^ features
;
1115 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
) {
1116 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1117 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1119 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1125 netdev
->features
= features
;
1129 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1130 struct rtnl_link_stats64
*stats
)
1132 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1133 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1134 struct hnae3_handle
*handle
= priv
->ae_handle
;
1135 struct hns3_enet_ring
*ring
;
1145 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1148 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1150 for (idx
= 0; idx
< queue_num
; idx
++) {
1151 /* fetch the tx stats */
1152 ring
= priv
->ring_data
[idx
].ring
;
1154 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1155 tx_bytes
+= ring
->stats
.tx_bytes
;
1156 tx_pkts
+= ring
->stats
.tx_pkts
;
1157 tx_drop
+= ring
->stats
.tx_busy
;
1158 tx_drop
+= ring
->stats
.sw_err_cnt
;
1159 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1161 /* fetch the rx stats */
1162 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1164 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1165 rx_bytes
+= ring
->stats
.rx_bytes
;
1166 rx_pkts
+= ring
->stats
.rx_pkts
;
1167 rx_drop
+= ring
->stats
.non_vld_descs
;
1168 rx_drop
+= ring
->stats
.err_pkt_len
;
1169 rx_drop
+= ring
->stats
.l2_err
;
1170 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1173 stats
->tx_bytes
= tx_bytes
;
1174 stats
->tx_packets
= tx_pkts
;
1175 stats
->rx_bytes
= rx_bytes
;
1176 stats
->rx_packets
= rx_pkts
;
1178 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1179 stats
->multicast
= netdev
->stats
.multicast
;
1180 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1181 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1182 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1184 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1185 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1186 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1187 stats
->collisions
= netdev
->stats
.collisions
;
1188 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1189 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1190 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1191 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1192 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1193 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1194 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1195 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1196 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1197 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1200 static void hns3_add_tunnel_port(struct net_device
*netdev
, u16 port
,
1201 enum hns3_udp_tnl_type type
)
1203 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1204 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1205 struct hnae3_handle
*h
= priv
->ae_handle
;
1207 if (udp_tnl
->used
&& udp_tnl
->dst_port
== port
) {
1212 if (udp_tnl
->used
) {
1214 "UDP tunnel [%d], port [%d] offload\n", type
, port
);
1218 udp_tnl
->dst_port
= port
;
1220 /* TBD send command to hardware to add port */
1221 if (h
->ae_algo
->ops
->add_tunnel_udp
)
1222 h
->ae_algo
->ops
->add_tunnel_udp(h
, port
);
1225 static void hns3_del_tunnel_port(struct net_device
*netdev
, u16 port
,
1226 enum hns3_udp_tnl_type type
)
1228 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1229 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1230 struct hnae3_handle
*h
= priv
->ae_handle
;
1232 if (!udp_tnl
->used
|| udp_tnl
->dst_port
!= port
) {
1234 "Invalid UDP tunnel port %d\n", port
);
1242 udp_tnl
->dst_port
= 0;
1243 /* TBD send command to hardware to del port */
1244 if (h
->ae_algo
->ops
->del_tunnel_udp
)
1245 h
->ae_algo
->ops
->del_tunnel_udp(h
, port
);
1248 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1249 * @netdev: This physical ports's netdev
1250 * @ti: Tunnel information
1252 static void hns3_nic_udp_tunnel_add(struct net_device
*netdev
,
1253 struct udp_tunnel_info
*ti
)
1255 u16 port_n
= ntohs(ti
->port
);
1258 case UDP_TUNNEL_TYPE_VXLAN
:
1259 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1261 case UDP_TUNNEL_TYPE_GENEVE
:
1262 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1265 netdev_err(netdev
, "unsupported tunnel type %d\n", ti
->type
);
1270 static void hns3_nic_udp_tunnel_del(struct net_device
*netdev
,
1271 struct udp_tunnel_info
*ti
)
1273 u16 port_n
= ntohs(ti
->port
);
1276 case UDP_TUNNEL_TYPE_VXLAN
:
1277 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1279 case UDP_TUNNEL_TYPE_GENEVE
:
1280 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1287 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1289 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1290 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1291 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1292 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1293 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1294 u16 mode
= mqprio_qopt
->mode
;
1295 u8 hw
= mqprio_qopt
->qopt
.hw
;
1300 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1301 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1304 if (tc
> HNAE3_MAX_TC
)
1310 if_running
= netif_running(netdev
);
1312 hns3_nic_net_stop(netdev
);
1316 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1317 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1322 netdev_reset_tc(netdev
);
1324 ret
= netdev_set_num_tc(netdev
, tc
);
1328 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1329 if (!kinfo
->tc_info
[i
].enable
)
1332 netdev_set_tc_queue(netdev
,
1333 kinfo
->tc_info
[i
].tc
,
1334 kinfo
->tc_info
[i
].tqp_count
,
1335 kinfo
->tc_info
[i
].tqp_offset
);
1339 ret
= hns3_nic_set_real_num_queue(netdev
);
1343 hns3_nic_net_open(netdev
);
1348 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1351 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1354 return hns3_setup_tc(dev
, type_data
);
1357 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1358 __be16 proto
, u16 vid
)
1360 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1363 if (h
->ae_algo
->ops
->set_vlan_filter
)
1364 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1369 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1370 __be16 proto
, u16 vid
)
1372 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1375 if (h
->ae_algo
->ops
->set_vlan_filter
)
1376 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1381 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1382 u8 qos
, __be16 vlan_proto
)
1384 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1387 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1388 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1394 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1396 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1397 bool if_running
= netif_running(netdev
);
1400 if (!h
->ae_algo
->ops
->set_mtu
)
1403 /* if this was called with netdev up then bring netdevice down */
1405 (void)hns3_nic_net_stop(netdev
);
1409 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1411 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1416 netdev
->mtu
= new_mtu
;
1418 /* if the netdev was running earlier, bring it up again */
1419 if (if_running
&& hns3_nic_net_open(netdev
))
1425 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1427 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1428 struct hns3_enet_ring
*tx_ring
= NULL
;
1429 int timeout_queue
= 0;
1430 int hw_head
, hw_tail
;
1433 /* Find the stopped queue the same way the stack does */
1434 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1435 struct netdev_queue
*q
;
1436 unsigned long trans_start
;
1438 q
= netdev_get_tx_queue(ndev
, i
);
1439 trans_start
= q
->trans_start
;
1440 if (netif_xmit_stopped(q
) &&
1442 (trans_start
+ ndev
->watchdog_timeo
))) {
1448 if (i
== ndev
->num_tx_queues
) {
1450 "no netdev TX timeout queue found, timeout count: %llu\n",
1451 priv
->tx_timeout_count
);
1455 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1457 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1458 HNS3_RING_TX_RING_HEAD_REG
);
1459 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1460 HNS3_RING_TX_RING_TAIL_REG
);
1462 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1463 priv
->tx_timeout_count
,
1465 tx_ring
->next_to_use
,
1466 tx_ring
->next_to_clean
,
1469 readl(tx_ring
->tqp_vector
->mask_addr
));
1474 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1476 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1477 unsigned long last_reset_time
= priv
->last_reset_time
;
1478 struct hnae3_handle
*h
= priv
->ae_handle
;
1480 if (!hns3_get_tx_timeo_queue_info(ndev
))
1483 priv
->tx_timeout_count
++;
1485 /* This timeout is far away enough from last timeout,
1486 * if timeout again,set the reset type to PF reset
1488 if (time_after(jiffies
, (last_reset_time
+ 20 * HZ
)))
1489 priv
->reset_level
= HNAE3_FUNC_RESET
;
1491 /* Don't do any new action before the next timeout */
1492 else if (time_before(jiffies
, (last_reset_time
+ ndev
->watchdog_timeo
)))
1495 priv
->last_reset_time
= jiffies
;
1497 if (h
->ae_algo
->ops
->reset_event
)
1498 h
->ae_algo
->ops
->reset_event(h
, priv
->reset_level
);
1500 priv
->reset_level
++;
1501 if (priv
->reset_level
> HNAE3_GLOBAL_RESET
)
1502 priv
->reset_level
= HNAE3_GLOBAL_RESET
;
1505 static const struct net_device_ops hns3_nic_netdev_ops
= {
1506 .ndo_open
= hns3_nic_net_open
,
1507 .ndo_stop
= hns3_nic_net_stop
,
1508 .ndo_start_xmit
= hns3_nic_net_xmit
,
1509 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1510 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1511 .ndo_change_mtu
= hns3_nic_change_mtu
,
1512 .ndo_set_features
= hns3_nic_set_features
,
1513 .ndo_get_stats64
= hns3_nic_get_stats64
,
1514 .ndo_setup_tc
= hns3_nic_setup_tc
,
1515 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1516 .ndo_udp_tunnel_add
= hns3_nic_udp_tunnel_add
,
1517 .ndo_udp_tunnel_del
= hns3_nic_udp_tunnel_del
,
1518 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1519 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1520 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1523 /* hns3_probe - Device initialization routine
1524 * @pdev: PCI device information struct
1525 * @ent: entry in hns3_pci_tbl
1527 * hns3_probe initializes a PF identified by a pci_dev structure.
1528 * The OS initialization, configuring of the PF private structure,
1529 * and a hardware reset occur.
1531 * Returns 0 on success, negative on failure
1533 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1535 struct hnae3_ae_dev
*ae_dev
;
1538 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1545 ae_dev
->pdev
= pdev
;
1546 ae_dev
->flag
= ent
->driver_data
;
1547 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1548 pci_set_drvdata(pdev
, ae_dev
);
1550 return hnae3_register_ae_dev(ae_dev
);
1553 /* hns3_remove - Device removal routine
1554 * @pdev: PCI device information struct
1556 static void hns3_remove(struct pci_dev
*pdev
)
1558 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1560 hnae3_unregister_ae_dev(ae_dev
);
1562 devm_kfree(&pdev
->dev
, ae_dev
);
1564 pci_set_drvdata(pdev
, NULL
);
1567 static struct pci_driver hns3_driver
= {
1568 .name
= hns3_driver_name
,
1569 .id_table
= hns3_pci_tbl
,
1570 .probe
= hns3_probe
,
1571 .remove
= hns3_remove
,
1574 /* set default feature to hns3 */
1575 static void hns3_set_default_feature(struct net_device
*netdev
)
1577 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1579 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1581 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1582 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1583 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1584 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1585 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1587 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1589 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1591 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1592 NETIF_F_HW_VLAN_CTAG_FILTER
|
1593 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1594 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1595 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1596 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1597 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1599 netdev
->vlan_features
|=
1600 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1601 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1602 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1603 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1604 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1606 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1607 NETIF_F_HW_VLAN_CTAG_TX
|
1608 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1609 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1610 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1611 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1613 if (!(h
->flags
& HNAE3_SUPPORT_VF
))
1614 netdev
->hw_features
|=
1615 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_RX
;
1618 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1619 struct hns3_desc_cb
*cb
)
1621 unsigned int order
= hnae_page_order(ring
);
1624 p
= dev_alloc_pages(order
);
1629 cb
->page_offset
= 0;
1631 cb
->buf
= page_address(p
);
1632 cb
->length
= hnae_page_size(ring
);
1633 cb
->type
= DESC_TYPE_PAGE
;
1638 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1639 struct hns3_desc_cb
*cb
)
1641 if (cb
->type
== DESC_TYPE_SKB
)
1642 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1643 else if (!HNAE3_IS_TX_RING(ring
))
1644 put_page((struct page
*)cb
->priv
);
1645 memset(cb
, 0, sizeof(*cb
));
1648 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1650 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1651 cb
->length
, ring_to_dma_dir(ring
));
1653 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1659 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1660 struct hns3_desc_cb
*cb
)
1662 if (cb
->type
== DESC_TYPE_SKB
)
1663 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1664 ring_to_dma_dir(ring
));
1666 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1667 ring_to_dma_dir(ring
));
1670 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1672 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1673 ring
->desc
[i
].addr
= 0;
1676 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1678 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1680 if (!ring
->desc_cb
[i
].dma
)
1683 hns3_buffer_detach(ring
, i
);
1684 hns3_free_buffer(ring
, cb
);
1687 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1691 for (i
= 0; i
< ring
->desc_num
; i
++)
1692 hns3_free_buffer_detach(ring
, i
);
1695 /* free desc along with its attached buffer */
1696 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1698 hns3_free_buffers(ring
);
1700 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
1701 ring
->desc_num
* sizeof(ring
->desc
[0]),
1703 ring
->desc_dma_addr
= 0;
1708 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1710 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1712 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
1716 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
), ring
->desc
,
1717 size
, DMA_BIDIRECTIONAL
);
1718 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
1719 ring
->desc_dma_addr
= 0;
1728 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1729 struct hns3_desc_cb
*cb
)
1733 ret
= hns3_alloc_buffer(ring
, cb
);
1737 ret
= hns3_map_buffer(ring
, cb
);
1744 hns3_free_buffer(ring
, cb
);
1749 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1751 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1756 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1761 /* Allocate memory for raw pkg, and map with dma */
1762 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1766 for (i
= 0; i
< ring
->desc_num
; i
++) {
1767 ret
= hns3_alloc_buffer_attach(ring
, i
);
1769 goto out_buffer_fail
;
1775 for (j
= i
- 1; j
>= 0; j
--)
1776 hns3_free_buffer_detach(ring
, j
);
1780 /* detach a in-used buffer and replace with a reserved one */
1781 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1782 struct hns3_desc_cb
*res_cb
)
1784 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1785 ring
->desc_cb
[i
] = *res_cb
;
1786 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1789 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1791 ring
->desc_cb
[i
].reuse_flag
= 0;
1792 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1793 + ring
->desc_cb
[i
].page_offset
);
1796 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1799 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1801 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1802 (*bytes
) += desc_cb
->length
;
1803 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1804 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1806 ring_ptr_move_fw(ring
, next_to_clean
);
1809 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1811 int u
= ring
->next_to_use
;
1812 int c
= ring
->next_to_clean
;
1814 if (unlikely(h
> ring
->desc_num
))
1817 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1820 bool hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1822 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1823 struct netdev_queue
*dev_queue
;
1827 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1828 rmb(); /* Make sure head is ready before touch any data */
1830 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1831 return true; /* no data to poll */
1833 if (!is_valid_clean_head(ring
, head
)) {
1834 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1835 ring
->next_to_use
, ring
->next_to_clean
);
1837 u64_stats_update_begin(&ring
->syncp
);
1838 ring
->stats
.io_err_cnt
++;
1839 u64_stats_update_end(&ring
->syncp
);
1845 while (head
!= ring
->next_to_clean
&& budget
) {
1846 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1847 /* Issue prefetch for next Tx descriptor */
1848 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1852 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1853 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1855 u64_stats_update_begin(&ring
->syncp
);
1856 ring
->stats
.tx_bytes
+= bytes
;
1857 ring
->stats
.tx_pkts
+= pkts
;
1858 u64_stats_update_end(&ring
->syncp
);
1860 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1861 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1863 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1864 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1865 /* Make sure that anybody stopping the queue after this
1866 * sees the new next_to_clean.
1869 if (netif_tx_queue_stopped(dev_queue
)) {
1870 netif_tx_wake_queue(dev_queue
);
1871 ring
->stats
.restart_queue
++;
1878 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1880 int ntc
= ring
->next_to_clean
;
1881 int ntu
= ring
->next_to_use
;
1883 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1887 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1889 struct hns3_desc_cb
*desc_cb
;
1890 struct hns3_desc_cb res_cbs
;
1893 for (i
= 0; i
< cleand_count
; i
++) {
1894 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1895 if (desc_cb
->reuse_flag
) {
1896 u64_stats_update_begin(&ring
->syncp
);
1897 ring
->stats
.reuse_pg_cnt
++;
1898 u64_stats_update_end(&ring
->syncp
);
1900 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1902 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1904 u64_stats_update_begin(&ring
->syncp
);
1905 ring
->stats
.sw_err_cnt
++;
1906 u64_stats_update_end(&ring
->syncp
);
1908 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
1909 "hnae reserve buffer map failed.\n");
1912 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
1915 ring_ptr_move_fw(ring
, next_to_use
);
1918 wmb(); /* Make all data has been write before submit */
1919 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
1922 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1923 * @data: pointer to the start of the headers
1924 * @max: total length of section to find headers in
1926 * This function is meant to determine the length of headers that will
1927 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1928 * motivation of doing this is to only perform one pull for IPv4 TCP
1929 * packets so that we can do basic things like calculating the gso_size
1930 * based on the average data per packet.
1932 static unsigned int hns3_nic_get_headlen(unsigned char *data
, u32 flag
,
1933 unsigned int max_size
)
1935 unsigned char *network
;
1938 /* This should never happen, but better safe than sorry */
1939 if (max_size
< ETH_HLEN
)
1942 /* Initialize network frame pointer */
1945 /* Set first protocol and move network header forward */
1946 network
+= ETH_HLEN
;
1948 /* Handle any vlan tag if present */
1949 if (hnae_get_field(flag
, HNS3_RXD_VLAN_M
, HNS3_RXD_VLAN_S
)
1950 == HNS3_RX_FLAG_VLAN_PRESENT
) {
1951 if ((typeof(max_size
))(network
- data
) > (max_size
- VLAN_HLEN
))
1954 network
+= VLAN_HLEN
;
1957 /* Handle L3 protocols */
1958 if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
1959 == HNS3_RX_FLAG_L3ID_IPV4
) {
1960 if ((typeof(max_size
))(network
- data
) >
1961 (max_size
- sizeof(struct iphdr
)))
1964 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1965 hlen
= (network
[0] & 0x0F) << 2;
1967 /* Verify hlen meets minimum size requirements */
1968 if (hlen
< sizeof(struct iphdr
))
1969 return network
- data
;
1971 /* Record next protocol if header is present */
1972 } else if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
1973 == HNS3_RX_FLAG_L3ID_IPV6
) {
1974 if ((typeof(max_size
))(network
- data
) >
1975 (max_size
- sizeof(struct ipv6hdr
)))
1978 /* Record next protocol */
1979 hlen
= sizeof(struct ipv6hdr
);
1981 return network
- data
;
1984 /* Relocate pointer to start of L4 header */
1987 /* Finally sort out TCP/UDP */
1988 if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
1989 == HNS3_RX_FLAG_L4ID_TCP
) {
1990 if ((typeof(max_size
))(network
- data
) >
1991 (max_size
- sizeof(struct tcphdr
)))
1994 /* Access doff as a u8 to avoid unaligned access on ia64 */
1995 hlen
= (network
[12] & 0xF0) >> 2;
1997 /* Verify hlen meets minimum size requirements */
1998 if (hlen
< sizeof(struct tcphdr
))
1999 return network
- data
;
2002 } else if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
2003 == HNS3_RX_FLAG_L4ID_UDP
) {
2004 if ((typeof(max_size
))(network
- data
) >
2005 (max_size
- sizeof(struct udphdr
)))
2008 network
+= sizeof(struct udphdr
);
2011 /* If everything has gone correctly network should be the
2012 * data section of the packet and will be the end of the header.
2013 * If not then it probably represents the end of the last recognized
2016 if ((typeof(max_size
))(network
- data
) < max_size
)
2017 return network
- data
;
2022 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
2023 struct hns3_enet_ring
*ring
, int pull_len
,
2024 struct hns3_desc_cb
*desc_cb
)
2026 struct hns3_desc
*desc
;
2031 twobufs
= ((PAGE_SIZE
< 8192) &&
2032 hnae_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
2034 desc
= &ring
->desc
[ring
->next_to_clean
];
2035 size
= le16_to_cpu(desc
->rx
.size
);
2038 truesize
= hnae_buf_size(ring
);
2040 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
2041 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
2044 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
2045 size
- pull_len
, truesize
- pull_len
);
2047 /* Avoid re-using remote pages,flag default unreuse */
2048 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
2052 /* If we are only owner of page we can reuse it */
2053 if (likely(page_count(desc_cb
->priv
) == 1)) {
2054 /* Flip page offset to other buffer */
2055 desc_cb
->page_offset
^= truesize
;
2057 desc_cb
->reuse_flag
= 1;
2058 /* bump ref count on page before it is given*/
2059 get_page(desc_cb
->priv
);
2064 /* Move offset up to the next cache line */
2065 desc_cb
->page_offset
+= truesize
;
2067 if (desc_cb
->page_offset
<= last_offset
) {
2068 desc_cb
->reuse_flag
= 1;
2069 /* Bump ref count on page before it is given*/
2070 get_page(desc_cb
->priv
);
2074 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2075 struct hns3_desc
*desc
)
2077 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2078 int l3_type
, l4_type
;
2083 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2084 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2086 skb
->ip_summed
= CHECKSUM_NONE
;
2088 skb_checksum_none_assert(skb
);
2090 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2093 /* check if hardware has done checksum */
2094 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2097 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2098 hnae_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2099 hnae_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2100 hnae_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2101 netdev_err(netdev
, "L3/L4 error pkt\n");
2102 u64_stats_update_begin(&ring
->syncp
);
2103 ring
->stats
.l3l4_csum_err
++;
2104 u64_stats_update_end(&ring
->syncp
);
2109 l3_type
= hnae_get_field(l234info
, HNS3_RXD_L3ID_M
,
2111 l4_type
= hnae_get_field(l234info
, HNS3_RXD_L4ID_M
,
2114 ol4_type
= hnae_get_field(l234info
, HNS3_RXD_OL4ID_M
, HNS3_RXD_OL4ID_S
);
2116 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2117 case HNS3_OL4_TYPE_NVGRE
:
2118 skb
->csum_level
= 1;
2119 case HNS3_OL4_TYPE_NO_TUN
:
2120 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2121 if (l3_type
== HNS3_L3_TYPE_IPV4
||
2122 (l3_type
== HNS3_L3_TYPE_IPV6
&&
2123 (l4_type
== HNS3_L4_TYPE_UDP
||
2124 l4_type
== HNS3_L4_TYPE_TCP
||
2125 l4_type
== HNS3_L4_TYPE_SCTP
)))
2126 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2131 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2133 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2136 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2137 struct sk_buff
**out_skb
, int *out_bnum
)
2139 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2140 struct hns3_desc_cb
*desc_cb
;
2141 struct hns3_desc
*desc
;
2142 struct sk_buff
*skb
;
2150 desc
= &ring
->desc
[ring
->next_to_clean
];
2151 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2155 length
= le16_to_cpu(desc
->rx
.pkt_len
);
2156 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2157 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2159 /* Check valid BD */
2160 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
2163 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2165 /* Prefetch first cache line of first page
2166 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2167 * line size is 64B so need to prefetch twice to make it 128B. But in
2168 * actual we can have greater size of caches with 128B Level 1 cache
2169 * lines. In such a case, single fetch would suffice to cache in the
2170 * relevant part of the header.
2173 #if L1_CACHE_BYTES < 128
2174 prefetch(va
+ L1_CACHE_BYTES
);
2177 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2179 if (unlikely(!skb
)) {
2180 netdev_err(netdev
, "alloc rx skb fail\n");
2182 u64_stats_update_begin(&ring
->syncp
);
2183 ring
->stats
.sw_err_cnt
++;
2184 u64_stats_update_end(&ring
->syncp
);
2189 prefetchw(skb
->data
);
2191 /* Based on hw strategy, the tag offloaded will be stored at
2192 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2193 * in one layer tag case.
2195 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2198 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2199 if (!(vlan_tag
& VLAN_VID_MASK
))
2200 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2201 if (vlan_tag
& VLAN_VID_MASK
)
2202 __vlan_hwaccel_put_tag(skb
,
2208 if (length
<= HNS3_RX_HEAD_SIZE
) {
2209 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2211 /* We can reuse buffer as-is, just make sure it is local */
2212 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2213 desc_cb
->reuse_flag
= 1;
2214 else /* This page cannot be reused so discard it */
2215 put_page(desc_cb
->priv
);
2217 ring_ptr_move_fw(ring
, next_to_clean
);
2219 u64_stats_update_begin(&ring
->syncp
);
2220 ring
->stats
.seg_pkt_cnt
++;
2221 u64_stats_update_end(&ring
->syncp
);
2223 pull_len
= hns3_nic_get_headlen(va
, l234info
,
2225 memcpy(__skb_put(skb
, pull_len
), va
,
2226 ALIGN(pull_len
, sizeof(long)));
2228 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2229 ring_ptr_move_fw(ring
, next_to_clean
);
2231 while (!hnae_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2232 desc
= &ring
->desc
[ring
->next_to_clean
];
2233 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2234 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2235 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2236 ring_ptr_move_fw(ring
, next_to_clean
);
2243 if (unlikely(!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2244 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2245 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2246 u64_stats_update_begin(&ring
->syncp
);
2247 ring
->stats
.non_vld_descs
++;
2248 u64_stats_update_end(&ring
->syncp
);
2250 dev_kfree_skb_any(skb
);
2254 if (unlikely((!desc
->rx
.pkt_len
) ||
2255 hnae_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2256 netdev_err(netdev
, "truncated pkt\n");
2257 u64_stats_update_begin(&ring
->syncp
);
2258 ring
->stats
.err_pkt_len
++;
2259 u64_stats_update_end(&ring
->syncp
);
2261 dev_kfree_skb_any(skb
);
2265 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2266 netdev_err(netdev
, "L2 error pkt\n");
2267 u64_stats_update_begin(&ring
->syncp
);
2268 ring
->stats
.l2_err
++;
2269 u64_stats_update_end(&ring
->syncp
);
2271 dev_kfree_skb_any(skb
);
2275 u64_stats_update_begin(&ring
->syncp
);
2276 ring
->stats
.rx_pkts
++;
2277 ring
->stats
.rx_bytes
+= skb
->len
;
2278 u64_stats_update_end(&ring
->syncp
);
2280 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2282 hns3_rx_checksum(ring
, skb
, desc
);
2286 int hns3_clean_rx_ring(
2287 struct hns3_enet_ring
*ring
, int budget
,
2288 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2290 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2291 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2292 int recv_pkts
, recv_bds
, clean_count
, err
;
2293 int unused_count
= hns3_desc_unused(ring
);
2294 struct sk_buff
*skb
= NULL
;
2297 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2298 rmb(); /* Make sure num taken effect before the other data is touched */
2300 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2301 num
-= unused_count
;
2303 while (recv_pkts
< budget
&& recv_bds
< num
) {
2304 /* Reuse or realloc buffers */
2305 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2306 hns3_nic_alloc_rx_buffers(ring
,
2307 clean_count
+ unused_count
);
2309 unused_count
= hns3_desc_unused(ring
);
2313 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2314 if (unlikely(!skb
)) /* This fault cannot be repaired */
2318 clean_count
+= bnum
;
2319 if (unlikely(err
)) { /* Do jump the err */
2324 /* Do update ip stack process */
2325 skb
->protocol
= eth_type_trans(skb
, netdev
);
2332 /* Make all data has been write before submit */
2333 if (clean_count
+ unused_count
> 0)
2334 hns3_nic_alloc_rx_buffers(ring
,
2335 clean_count
+ unused_count
);
2340 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2342 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2343 enum hns3_flow_level_range new_flow_level
;
2344 struct hns3_enet_tqp_vector
*tqp_vector
;
2345 int packets_per_secs
;
2346 int bytes_per_usecs
;
2350 if (!ring_group
->int_gl
)
2353 if (ring_group
->total_packets
== 0) {
2354 ring_group
->int_gl
= HNS3_INT_GL_50K
;
2355 ring_group
->flow_level
= HNS3_FLOW_LOW
;
2359 /* Simple throttlerate management
2360 * 0-10MB/s lower (50000 ints/s)
2361 * 10-20MB/s middle (20000 ints/s)
2362 * 20-1249MB/s high (18000 ints/s)
2363 * > 40000pps ultra (8000 ints/s)
2365 new_flow_level
= ring_group
->flow_level
;
2366 new_int_gl
= ring_group
->int_gl
;
2367 tqp_vector
= ring_group
->ring
->tqp_vector
;
2368 usecs
= (ring_group
->int_gl
<< 1);
2369 bytes_per_usecs
= ring_group
->total_bytes
/ usecs
;
2370 /* 1000000 microseconds */
2371 packets_per_secs
= ring_group
->total_packets
* 1000000 / usecs
;
2373 switch (new_flow_level
) {
2375 if (bytes_per_usecs
> 10)
2376 new_flow_level
= HNS3_FLOW_MID
;
2379 if (bytes_per_usecs
> 20)
2380 new_flow_level
= HNS3_FLOW_HIGH
;
2381 else if (bytes_per_usecs
<= 10)
2382 new_flow_level
= HNS3_FLOW_LOW
;
2384 case HNS3_FLOW_HIGH
:
2385 case HNS3_FLOW_ULTRA
:
2387 if (bytes_per_usecs
<= 20)
2388 new_flow_level
= HNS3_FLOW_MID
;
2392 if ((packets_per_secs
> HNS3_RX_ULTRA_PACKET_RATE
) &&
2393 (&tqp_vector
->rx_group
== ring_group
))
2394 new_flow_level
= HNS3_FLOW_ULTRA
;
2396 switch (new_flow_level
) {
2398 new_int_gl
= HNS3_INT_GL_50K
;
2401 new_int_gl
= HNS3_INT_GL_20K
;
2403 case HNS3_FLOW_HIGH
:
2404 new_int_gl
= HNS3_INT_GL_18K
;
2406 case HNS3_FLOW_ULTRA
:
2407 new_int_gl
= HNS3_INT_GL_8K
;
2413 ring_group
->total_bytes
= 0;
2414 ring_group
->total_packets
= 0;
2415 ring_group
->flow_level
= new_flow_level
;
2416 if (new_int_gl
!= ring_group
->int_gl
) {
2417 ring_group
->int_gl
= new_int_gl
;
2423 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2425 u16 rx_int_gl
, tx_int_gl
;
2428 rx
= hns3_get_new_int_gl(&tqp_vector
->rx_group
);
2429 tx
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2430 rx_int_gl
= tqp_vector
->rx_group
.int_gl
;
2431 tx_int_gl
= tqp_vector
->tx_group
.int_gl
;
2433 if (rx_int_gl
> tx_int_gl
) {
2434 tqp_vector
->tx_group
.int_gl
= rx_int_gl
;
2435 tqp_vector
->tx_group
.flow_level
=
2436 tqp_vector
->rx_group
.flow_level
;
2437 hns3_set_vector_coalesc_gl(tqp_vector
, rx_int_gl
);
2439 tqp_vector
->rx_group
.int_gl
= tx_int_gl
;
2440 tqp_vector
->rx_group
.flow_level
=
2441 tqp_vector
->tx_group
.flow_level
;
2442 hns3_set_vector_coalesc_gl(tqp_vector
, tx_int_gl
);
2447 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2449 struct hns3_enet_ring
*ring
;
2450 int rx_pkt_total
= 0;
2452 struct hns3_enet_tqp_vector
*tqp_vector
=
2453 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2454 bool clean_complete
= true;
2457 /* Since the actual Tx work is minimal, we can give the Tx a larger
2458 * budget and be more aggressive about cleaning up the Tx descriptors.
2460 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2461 if (!hns3_clean_tx_ring(ring
, budget
))
2462 clean_complete
= false;
2465 /* make sure rx ring budget not smaller than 1 */
2466 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2468 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2469 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2472 if (rx_cleaned
>= rx_budget
)
2473 clean_complete
= false;
2475 rx_pkt_total
+= rx_cleaned
;
2478 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2480 if (!clean_complete
)
2483 napi_complete(napi
);
2484 hns3_update_new_int_gl(tqp_vector
);
2485 hns3_mask_vector_irq(tqp_vector
, 1);
2487 return rx_pkt_total
;
2490 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2491 struct hnae3_ring_chain_node
*head
)
2493 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2494 struct hnae3_ring_chain_node
*cur_chain
= head
;
2495 struct hnae3_ring_chain_node
*chain
;
2496 struct hns3_enet_ring
*tx_ring
;
2497 struct hns3_enet_ring
*rx_ring
;
2499 tx_ring
= tqp_vector
->tx_group
.ring
;
2501 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2502 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2503 HNAE3_RING_TYPE_TX
);
2505 cur_chain
->next
= NULL
;
2507 while (tx_ring
->next
) {
2508 tx_ring
= tx_ring
->next
;
2510 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2515 cur_chain
->next
= chain
;
2516 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2517 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2518 HNAE3_RING_TYPE_TX
);
2524 rx_ring
= tqp_vector
->rx_group
.ring
;
2525 if (!tx_ring
&& rx_ring
) {
2526 cur_chain
->next
= NULL
;
2527 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2528 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2529 HNAE3_RING_TYPE_RX
);
2531 rx_ring
= rx_ring
->next
;
2535 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2539 cur_chain
->next
= chain
;
2540 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2541 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2542 HNAE3_RING_TYPE_RX
);
2545 rx_ring
= rx_ring
->next
;
2551 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2552 struct hnae3_ring_chain_node
*head
)
2554 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2555 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2560 chain_tmp
= chain
->next
;
2561 devm_kfree(&pdev
->dev
, chain
);
2566 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2567 struct hns3_enet_ring
*ring
)
2569 ring
->next
= group
->ring
;
2575 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2577 struct hnae3_ring_chain_node vector_ring_chain
;
2578 struct hnae3_handle
*h
= priv
->ae_handle
;
2579 struct hns3_enet_tqp_vector
*tqp_vector
;
2580 struct hnae3_vector_info
*vector
;
2581 struct pci_dev
*pdev
= h
->pdev
;
2582 u16 tqp_num
= h
->kinfo
.num_tqps
;
2587 /* RSS size, cpu online and vector_num should be the same */
2588 /* Should consider 2p/4p later */
2589 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2590 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2595 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2597 priv
->vector_num
= vector_num
;
2598 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2599 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2601 if (!priv
->tqp_vector
)
2604 for (i
= 0; i
< tqp_num
; i
++) {
2605 u16 vector_i
= i
% vector_num
;
2607 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2609 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2610 priv
->ring_data
[i
].ring
);
2612 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2613 priv
->ring_data
[i
+ tqp_num
].ring
);
2615 tqp_vector
->idx
= vector_i
;
2616 tqp_vector
->mask_addr
= vector
[vector_i
].io_addr
;
2617 tqp_vector
->vector_irq
= vector
[vector_i
].vector
;
2618 tqp_vector
->num_tqps
++;
2620 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2621 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2624 for (i
= 0; i
< vector_num
; i
++) {
2625 tqp_vector
= &priv
->tqp_vector
[i
];
2627 tqp_vector
->rx_group
.total_bytes
= 0;
2628 tqp_vector
->rx_group
.total_packets
= 0;
2629 tqp_vector
->tx_group
.total_bytes
= 0;
2630 tqp_vector
->tx_group
.total_packets
= 0;
2631 hns3_vector_gl_rl_init(tqp_vector
);
2632 tqp_vector
->handle
= h
;
2634 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2635 &vector_ring_chain
);
2639 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2640 tqp_vector
->vector_irq
, &vector_ring_chain
);
2644 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2646 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2647 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2651 devm_kfree(&pdev
->dev
, vector
);
2655 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2657 struct hnae3_ring_chain_node vector_ring_chain
;
2658 struct hnae3_handle
*h
= priv
->ae_handle
;
2659 struct hns3_enet_tqp_vector
*tqp_vector
;
2660 struct pci_dev
*pdev
= h
->pdev
;
2663 for (i
= 0; i
< priv
->vector_num
; i
++) {
2664 tqp_vector
= &priv
->tqp_vector
[i
];
2666 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2667 &vector_ring_chain
);
2671 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2672 tqp_vector
->vector_irq
, &vector_ring_chain
);
2676 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2678 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2679 (void)irq_set_affinity_hint(
2680 priv
->tqp_vector
[i
].vector_irq
,
2682 free_irq(priv
->tqp_vector
[i
].vector_irq
,
2683 &priv
->tqp_vector
[i
]);
2686 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2688 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2691 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2696 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2699 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2700 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2701 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2702 struct hns3_enet_ring
*ring
;
2704 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2708 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2709 ring_data
[q
->tqp_index
].ring
= ring
;
2710 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2711 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2713 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2714 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
2715 ring
->io_base
= q
->io_base
;
2718 hnae_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2722 ring
->desc_cb
= NULL
;
2723 ring
->dev
= priv
->dev
;
2724 ring
->desc_dma_addr
= 0;
2725 ring
->buf_size
= q
->buf_size
;
2726 ring
->desc_num
= q
->desc_num
;
2727 ring
->next_to_use
= 0;
2728 ring
->next_to_clean
= 0;
2733 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2734 struct hns3_nic_priv
*priv
)
2738 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2742 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2749 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2751 struct hnae3_handle
*h
= priv
->ae_handle
;
2752 struct pci_dev
*pdev
= h
->pdev
;
2755 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2756 sizeof(*priv
->ring_data
) * 2,
2758 if (!priv
->ring_data
)
2761 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2762 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2769 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2773 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
2775 struct hnae3_handle
*h
= priv
->ae_handle
;
2778 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2779 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2780 devm_kfree(priv
->dev
,
2781 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2783 devm_kfree(priv
->dev
, priv
->ring_data
);
2786 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2790 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2793 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2795 if (!ring
->desc_cb
) {
2800 ret
= hns3_alloc_desc(ring
);
2802 goto out_with_desc_cb
;
2804 if (!HNAE3_IS_TX_RING(ring
)) {
2805 ret
= hns3_alloc_ring_buffers(ring
);
2813 hns3_free_desc(ring
);
2815 kfree(ring
->desc_cb
);
2816 ring
->desc_cb
= NULL
;
2821 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2823 hns3_free_desc(ring
);
2824 kfree(ring
->desc_cb
);
2825 ring
->desc_cb
= NULL
;
2826 ring
->next_to_clean
= 0;
2827 ring
->next_to_use
= 0;
2830 static int hns3_buf_size2type(u32 buf_size
)
2836 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2839 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2842 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2845 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2848 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2851 return bd_size_type
;
2854 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2856 dma_addr_t dma
= ring
->desc_dma_addr
;
2857 struct hnae3_queue
*q
= ring
->tqp
;
2859 if (!HNAE3_IS_TX_RING(ring
)) {
2860 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2862 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2863 (u32
)((dma
>> 31) >> 1));
2865 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2866 hns3_buf_size2type(ring
->buf_size
));
2867 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2868 ring
->desc_num
/ 8 - 1);
2871 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2873 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2874 (u32
)((dma
>> 31) >> 1));
2876 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2877 hns3_buf_size2type(ring
->buf_size
));
2878 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
2879 ring
->desc_num
/ 8 - 1);
2883 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
2885 struct hnae3_handle
*h
= priv
->ae_handle
;
2886 int ring_num
= h
->kinfo
.num_tqps
* 2;
2890 for (i
= 0; i
< ring_num
; i
++) {
2891 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
2894 "Alloc ring memory fail! ret=%d\n", ret
);
2895 goto out_when_alloc_ring_memory
;
2898 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
2900 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
2905 out_when_alloc_ring_memory
:
2906 for (j
= i
- 1; j
>= 0; j
--)
2907 hns3_fini_ring(priv
->ring_data
[j
].ring
);
2912 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
2914 struct hnae3_handle
*h
= priv
->ae_handle
;
2917 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2918 if (h
->ae_algo
->ops
->reset_queue
)
2919 h
->ae_algo
->ops
->reset_queue(h
, i
);
2921 hns3_fini_ring(priv
->ring_data
[i
].ring
);
2922 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2923 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2924 devm_kfree(priv
->dev
,
2925 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2927 devm_kfree(priv
->dev
, priv
->ring_data
);
2932 /* Set mac addr if it is configured. or leave it to the AE driver */
2933 static void hns3_init_mac_addr(struct net_device
*netdev
)
2935 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2936 struct hnae3_handle
*h
= priv
->ae_handle
;
2937 u8 mac_addr_temp
[ETH_ALEN
];
2939 if (h
->ae_algo
->ops
->get_mac_addr
) {
2940 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
2941 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
2944 /* Check if the MAC address is valid, if not get a random one */
2945 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2946 eth_hw_addr_random(netdev
);
2947 dev_warn(priv
->dev
, "using random MAC address %pM\n",
2951 if (h
->ae_algo
->ops
->set_mac_addr
)
2952 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
);
2956 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
2958 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2960 if ((netdev
->features
& NETIF_F_TSO
) ||
2961 (netdev
->features
& NETIF_F_TSO6
)) {
2962 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
2963 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
2965 priv
->ops
.fill_desc
= hns3_fill_desc
;
2966 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
2970 static int hns3_client_init(struct hnae3_handle
*handle
)
2972 struct pci_dev
*pdev
= handle
->pdev
;
2973 struct hns3_nic_priv
*priv
;
2974 struct net_device
*netdev
;
2977 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
2978 handle
->kinfo
.num_tqps
);
2982 priv
= netdev_priv(netdev
);
2983 priv
->dev
= &pdev
->dev
;
2984 priv
->netdev
= netdev
;
2985 priv
->ae_handle
= handle
;
2986 priv
->last_reset_time
= jiffies
;
2987 priv
->reset_level
= HNAE3_FUNC_RESET
;
2988 priv
->tx_timeout_count
= 0;
2990 handle
->kinfo
.netdev
= netdev
;
2991 handle
->priv
= (void *)priv
;
2993 hns3_init_mac_addr(netdev
);
2995 hns3_set_default_feature(netdev
);
2997 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
2998 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2999 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3000 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3001 hns3_ethtool_set_ops(netdev
);
3002 hns3_nic_set_priv_ops(netdev
);
3004 /* Carrier off reporting is important to ethtool even BEFORE open */
3005 netif_carrier_off(netdev
);
3007 ret
= hns3_get_ring_config(priv
);
3010 goto out_get_ring_cfg
;
3013 ret
= hns3_nic_init_vector_data(priv
);
3016 goto out_init_vector_data
;
3019 ret
= hns3_init_all_ring(priv
);
3022 goto out_init_ring_data
;
3025 ret
= register_netdev(netdev
);
3027 dev_err(priv
->dev
, "probe register netdev fail!\n");
3028 goto out_reg_netdev_fail
;
3031 hns3_dcbnl_setup(handle
);
3033 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3034 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
3038 out_reg_netdev_fail
:
3040 (void)hns3_nic_uninit_vector_data(priv
);
3041 priv
->ring_data
= NULL
;
3042 out_init_vector_data
:
3044 priv
->ae_handle
= NULL
;
3045 free_netdev(netdev
);
3049 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3051 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3052 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3055 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3056 unregister_netdev(netdev
);
3058 ret
= hns3_nic_uninit_vector_data(priv
);
3060 netdev_err(netdev
, "uninit vector error\n");
3062 ret
= hns3_uninit_all_ring(priv
);
3064 netdev_err(netdev
, "uninit ring error\n");
3066 priv
->ring_data
= NULL
;
3068 free_netdev(netdev
);
3071 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3073 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3079 netif_carrier_on(netdev
);
3080 netif_tx_wake_all_queues(netdev
);
3081 netdev_info(netdev
, "link up\n");
3083 netif_carrier_off(netdev
);
3084 netif_tx_stop_all_queues(netdev
);
3085 netdev_info(netdev
, "link down\n");
3089 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3091 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3092 struct net_device
*ndev
= kinfo
->netdev
;
3097 if (tc
> HNAE3_MAX_TC
)
3103 if_running
= netif_running(ndev
);
3105 ret
= netdev_set_num_tc(ndev
, tc
);
3110 (void)hns3_nic_net_stop(ndev
);
3114 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3115 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3120 netdev_reset_tc(ndev
);
3124 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
3125 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
3127 if (tc_info
->enable
)
3128 netdev_set_tc_queue(ndev
,
3131 tc_info
->tqp_offset
);
3134 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
3135 netdev_set_prio_tc_map(ndev
, i
,
3140 ret
= hns3_nic_set_real_num_queue(ndev
);
3144 (void)hns3_nic_net_open(ndev
);
3149 static void hns3_recover_hw_addr(struct net_device
*ndev
)
3151 struct netdev_hw_addr_list
*list
;
3152 struct netdev_hw_addr
*ha
, *tmp
;
3154 /* go through and sync uc_addr entries to the device */
3156 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3157 hns3_nic_uc_sync(ndev
, ha
->addr
);
3159 /* go through and sync mc_addr entries to the device */
3161 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3162 hns3_nic_mc_sync(ndev
, ha
->addr
);
3165 static void hns3_drop_skb_data(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
3167 dev_kfree_skb_any(skb
);
3170 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3172 struct net_device
*ndev
= h
->kinfo
.netdev
;
3173 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3176 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3177 struct netdev_queue
*dev_queue
;
3178 struct hns3_enet_ring
*ring
;
3180 ring
= priv
->ring_data
[i
].ring
;
3181 hns3_clean_tx_ring(ring
, ring
->desc_num
);
3182 dev_queue
= netdev_get_tx_queue(ndev
,
3183 priv
->ring_data
[i
].queue_index
);
3184 netdev_tx_reset_queue(dev_queue
);
3186 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3187 hns3_clean_rx_ring(ring
, ring
->desc_num
, hns3_drop_skb_data
);
3191 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3193 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3194 struct net_device
*ndev
= kinfo
->netdev
;
3196 if (!netif_running(ndev
))
3199 return hns3_nic_net_stop(ndev
);
3202 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3204 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3205 struct hns3_nic_priv
*priv
= netdev_priv(kinfo
->netdev
);
3208 if (netif_running(kinfo
->netdev
)) {
3209 ret
= hns3_nic_net_up(kinfo
->netdev
);
3211 netdev_err(kinfo
->netdev
,
3212 "hns net up fail, ret=%d!\n", ret
);
3216 priv
->last_reset_time
= jiffies
;
3222 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3224 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3225 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3228 priv
->reset_level
= 1;
3229 hns3_init_mac_addr(netdev
);
3230 hns3_nic_set_rx_mode(netdev
);
3231 hns3_recover_hw_addr(netdev
);
3233 /* Carrier off reporting is important to ethtool even BEFORE open */
3234 netif_carrier_off(netdev
);
3236 ret
= hns3_get_ring_config(priv
);
3240 ret
= hns3_nic_init_vector_data(priv
);
3244 ret
= hns3_init_all_ring(priv
);
3246 hns3_nic_uninit_vector_data(priv
);
3247 priv
->ring_data
= NULL
;
3253 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
3255 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3256 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3259 hns3_clear_all_ring(handle
);
3261 ret
= hns3_nic_uninit_vector_data(priv
);
3263 netdev_err(netdev
, "uninit vector error\n");
3267 ret
= hns3_uninit_all_ring(priv
);
3269 netdev_err(netdev
, "uninit ring error\n");
3271 priv
->ring_data
= NULL
;
3276 static int hns3_reset_notify(struct hnae3_handle
*handle
,
3277 enum hnae3_reset_notify_type type
)
3282 case HNAE3_UP_CLIENT
:
3283 ret
= hns3_reset_notify_up_enet(handle
);
3285 case HNAE3_DOWN_CLIENT
:
3286 ret
= hns3_reset_notify_down_enet(handle
);
3288 case HNAE3_INIT_CLIENT
:
3289 ret
= hns3_reset_notify_init_enet(handle
);
3291 case HNAE3_UNINIT_CLIENT
:
3292 ret
= hns3_reset_notify_uninit_enet(handle
);
3301 static u16
hns3_get_max_available_channels(struct net_device
*netdev
)
3303 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3304 u16 free_tqps
, max_rss_size
, max_tqps
;
3306 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &free_tqps
, &max_rss_size
);
3307 max_tqps
= h
->kinfo
.num_tc
* max_rss_size
;
3309 return min_t(u16
, max_tqps
, (free_tqps
+ h
->kinfo
.num_tqps
));
3312 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
)
3314 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3315 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3318 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
3322 ret
= hns3_get_ring_config(priv
);
3326 ret
= hns3_nic_init_vector_data(priv
);
3328 goto err_uninit_vector
;
3330 ret
= hns3_init_all_ring(priv
);
3337 hns3_put_ring_config(priv
);
3339 hns3_nic_uninit_vector_data(priv
);
3343 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
3345 return (new_tqp_num
/ num_tc
) * num_tc
;
3348 int hns3_set_channels(struct net_device
*netdev
,
3349 struct ethtool_channels
*ch
)
3351 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3352 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3353 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
3354 bool if_running
= netif_running(netdev
);
3355 u32 new_tqp_num
= ch
->combined_count
;
3359 if (ch
->rx_count
|| ch
->tx_count
)
3362 if (new_tqp_num
> hns3_get_max_available_channels(netdev
) ||
3363 new_tqp_num
< kinfo
->num_tc
) {
3364 dev_err(&netdev
->dev
,
3365 "Change tqps fail, the tqp range is from %d to %d",
3367 hns3_get_max_available_channels(netdev
));
3371 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
3372 if (kinfo
->num_tqps
== new_tqp_num
)
3378 hns3_clear_all_ring(h
);
3380 ret
= hns3_nic_uninit_vector_data(priv
);
3382 dev_err(&netdev
->dev
,
3383 "Unbind vector with tqp fail, nothing is changed");
3387 hns3_uninit_all_ring(priv
);
3389 org_tqp_num
= h
->kinfo
.num_tqps
;
3390 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
);
3392 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
);
3394 /* If revert to old tqp failed, fatal error occurred */
3395 dev_err(&netdev
->dev
,
3396 "Revert to old tqp num fail, ret=%d", ret
);
3399 dev_info(&netdev
->dev
,
3400 "Change tqp num fail, Revert to old tqp num");
3410 static const struct hnae3_client_ops client_ops
= {
3411 .init_instance
= hns3_client_init
,
3412 .uninit_instance
= hns3_client_uninit
,
3413 .link_status_change
= hns3_link_status_change
,
3414 .setup_tc
= hns3_client_setup_tc
,
3415 .reset_notify
= hns3_reset_notify
,
3418 /* hns3_init_module - Driver registration routine
3419 * hns3_init_module is the first routine called when the driver is
3420 * loaded. All it does is register with the PCI subsystem.
3422 static int __init
hns3_init_module(void)
3426 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
3427 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
3429 client
.type
= HNAE3_CLIENT_KNIC
;
3430 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
3433 client
.ops
= &client_ops
;
3435 ret
= hnae3_register_client(&client
);
3439 ret
= pci_register_driver(&hns3_driver
);
3441 hnae3_unregister_client(&client
);
3445 module_init(hns3_init_module
);
3447 /* hns3_exit_module - Driver exit cleanup routine
3448 * hns3_exit_module is called just before the driver is removed
3451 static void __exit
hns3_exit_module(void)
3453 pci_unregister_driver(&hns3_driver
);
3454 hnae3_unregister_client(&client
);
3456 module_exit(hns3_exit_module
);
3458 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3459 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3460 MODULE_LICENSE("GPL");
3461 MODULE_ALIAS("pci:hns-nic");