1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #include <linux/if_vlan.h>
9 #include <linux/ipv6.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/skbuff.h>
13 #include <linux/sctp.h>
14 #include <linux/vermagic.h>
16 #include <net/pkt_cls.h>
17 #include <net/vxlan.h>
20 #include "hns3_enet.h"
22 static void hns3_clear_all_ring(struct hnae3_handle
*h
);
23 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
);
25 static const char hns3_driver_name
[] = "hns3";
26 const char hns3_driver_version
[] = VERMAGIC_STRING
;
27 static const char hns3_driver_string
[] =
28 "Hisilicon Ethernet Network Driver for Hip08 Family";
29 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
30 static struct hnae3_client client
;
32 /* hns3_pci_tbl - PCI Device ID Table
34 * Last entry must be all 0s
36 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
37 * Class, Class Mask, private data (not used) }
39 static const struct pci_device_id hns3_pci_tbl
[] = {
40 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
41 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
42 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
43 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
50 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
52 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
54 /* required last entry */
57 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
59 static irqreturn_t
hns3_irq_handle(int irq
, void *vector
)
61 struct hns3_enet_tqp_vector
*tqp_vector
= vector
;
63 napi_schedule(&tqp_vector
->napi
);
68 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
70 struct hns3_enet_tqp_vector
*tqp_vectors
;
73 for (i
= 0; i
< priv
->vector_num
; i
++) {
74 tqp_vectors
= &priv
->tqp_vector
[i
];
76 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
79 /* release the irq resource */
80 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
81 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
85 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
87 struct hns3_enet_tqp_vector
*tqp_vectors
;
94 for (i
= 0; i
< priv
->vector_num
; i
++) {
95 tqp_vectors
= &priv
->tqp_vector
[i
];
97 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
100 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
101 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
102 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
105 } else if (tqp_vectors
->rx_group
.ring
) {
106 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
107 "%s-%s-%d", priv
->netdev
->name
, "Rx",
109 } else if (tqp_vectors
->tx_group
.ring
) {
110 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
111 "%s-%s-%d", priv
->netdev
->name
, "Tx",
114 /* Skip this unused q_vector */
118 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
120 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
124 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
125 tqp_vectors
->vector_irq
);
129 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
135 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
138 writel(mask_en
, tqp_vector
->mask_addr
);
141 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
143 napi_enable(&tqp_vector
->napi
);
146 hns3_mask_vector_irq(tqp_vector
, 1);
149 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
152 hns3_mask_vector_irq(tqp_vector
, 0);
154 disable_irq(tqp_vector
->vector_irq
);
155 napi_disable(&tqp_vector
->napi
);
158 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
161 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
163 /* this defines the configuration for RL (Interrupt Rate Limiter).
164 * Rl defines rate of interrupts i.e. number of interrupts-per-second
165 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
168 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.gl_adapt_enable
&&
169 !tqp_vector
->rx_group
.coal
.gl_adapt_enable
)
170 /* According to the hardware, the range of rl_reg is
171 * 0-59 and the unit is 4.
173 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
175 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
178 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
181 u32 rx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
183 writel(rx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
186 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
189 u32 tx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
191 writel(tx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
194 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
,
195 struct hns3_nic_priv
*priv
)
197 struct hnae3_handle
*h
= priv
->ae_handle
;
199 /* initialize the configuration for interrupt coalescing.
200 * 1. GL (Interrupt Gap Limiter)
201 * 2. RL (Interrupt Rate Limiter)
204 /* Default: enable interrupt coalescing self-adaptive and GL */
205 tqp_vector
->tx_group
.coal
.gl_adapt_enable
= 1;
206 tqp_vector
->rx_group
.coal
.gl_adapt_enable
= 1;
208 tqp_vector
->tx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
209 tqp_vector
->rx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
211 /* Default: disable RL */
212 h
->kinfo
.int_rl_setting
= 0;
214 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
215 tqp_vector
->rx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
216 tqp_vector
->tx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
219 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
220 struct hns3_nic_priv
*priv
)
222 struct hnae3_handle
*h
= priv
->ae_handle
;
224 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
225 tqp_vector
->tx_group
.coal
.int_gl
);
226 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
227 tqp_vector
->rx_group
.coal
.int_gl
);
228 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
231 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
233 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
234 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
235 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
238 if (kinfo
->num_tc
<= 1) {
239 netdev_reset_tc(netdev
);
241 ret
= netdev_set_num_tc(netdev
, kinfo
->num_tc
);
244 "netdev_set_num_tc fail, ret=%d!\n", ret
);
248 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
249 if (!kinfo
->tc_info
[i
].enable
)
252 netdev_set_tc_queue(netdev
,
253 kinfo
->tc_info
[i
].tc
,
254 kinfo
->tc_info
[i
].tqp_count
,
255 kinfo
->tc_info
[i
].tqp_offset
);
259 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
262 "netif_set_real_num_tx_queues fail, ret=%d!\n",
267 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
270 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
277 static u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
279 u16 free_tqps
, max_rss_size
, max_tqps
;
281 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &free_tqps
, &max_rss_size
);
282 max_tqps
= h
->kinfo
.num_tc
* max_rss_size
;
284 return min_t(u16
, max_tqps
, (free_tqps
+ h
->kinfo
.num_tqps
));
287 static int hns3_nic_net_up(struct net_device
*netdev
)
289 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
290 struct hnae3_handle
*h
= priv
->ae_handle
;
294 ret
= hns3_nic_reset_all_ring(h
);
298 /* get irq resource for all vectors */
299 ret
= hns3_nic_init_irq(priv
);
301 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
305 /* enable the vectors */
306 for (i
= 0; i
< priv
->vector_num
; i
++)
307 hns3_vector_enable(&priv
->tqp_vector
[i
]);
309 /* start the ae_dev */
310 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
314 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
319 for (j
= i
- 1; j
>= 0; j
--)
320 hns3_vector_disable(&priv
->tqp_vector
[j
]);
322 hns3_nic_uninit_irq(priv
);
327 static int hns3_nic_net_open(struct net_device
*netdev
)
329 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
330 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
331 struct hnae3_knic_private_info
*kinfo
;
334 netif_carrier_off(netdev
);
336 ret
= hns3_nic_set_real_num_queue(netdev
);
340 ret
= hns3_nic_net_up(netdev
);
343 "hns net up fail, ret=%d!\n", ret
);
348 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
349 netdev_set_prio_tc_map(netdev
, i
,
353 priv
->ae_handle
->last_reset_time
= jiffies
;
357 static void hns3_nic_net_down(struct net_device
*netdev
)
359 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
360 const struct hnae3_ae_ops
*ops
;
363 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
366 /* disable vectors */
367 for (i
= 0; i
< priv
->vector_num
; i
++)
368 hns3_vector_disable(&priv
->tqp_vector
[i
]);
371 ops
= priv
->ae_handle
->ae_algo
->ops
;
373 ops
->stop(priv
->ae_handle
);
375 /* free irq resources */
376 hns3_nic_uninit_irq(priv
);
378 hns3_clear_all_ring(priv
->ae_handle
);
381 static int hns3_nic_net_stop(struct net_device
*netdev
)
383 netif_tx_stop_all_queues(netdev
);
384 netif_carrier_off(netdev
);
386 hns3_nic_net_down(netdev
);
391 static int hns3_nic_uc_sync(struct net_device
*netdev
,
392 const unsigned char *addr
)
394 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
396 if (h
->ae_algo
->ops
->add_uc_addr
)
397 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
402 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
403 const unsigned char *addr
)
405 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
407 if (h
->ae_algo
->ops
->rm_uc_addr
)
408 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
413 static int hns3_nic_mc_sync(struct net_device
*netdev
,
414 const unsigned char *addr
)
416 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
418 if (h
->ae_algo
->ops
->add_mc_addr
)
419 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
424 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
425 const unsigned char *addr
)
427 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
429 if (h
->ae_algo
->ops
->rm_mc_addr
)
430 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
435 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
437 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
439 if (h
->ae_algo
->ops
->set_promisc_mode
) {
440 if (netdev
->flags
& IFF_PROMISC
)
441 h
->ae_algo
->ops
->set_promisc_mode(h
, true, true);
442 else if (netdev
->flags
& IFF_ALLMULTI
)
443 h
->ae_algo
->ops
->set_promisc_mode(h
, false, true);
445 h
->ae_algo
->ops
->set_promisc_mode(h
, false, false);
447 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
448 netdev_err(netdev
, "sync uc address fail\n");
449 if (netdev
->flags
& IFF_MULTICAST
) {
450 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
451 netdev_err(netdev
, "sync mc address fail\n");
453 if (h
->ae_algo
->ops
->update_mta_status
)
454 h
->ae_algo
->ops
->update_mta_status(h
);
458 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
459 u16
*mss
, u32
*type_cs_vlan_tso
)
461 u32 l4_offset
, hdr_len
;
462 union l3_hdr_info l3
;
463 union l4_hdr_info l4
;
467 if (!skb_is_gso(skb
))
470 ret
= skb_cow_head(skb
, 0);
474 l3
.hdr
= skb_network_header(skb
);
475 l4
.hdr
= skb_transport_header(skb
);
477 /* Software should clear the IPv4's checksum field when tso is
480 if (l3
.v4
->version
== 4)
484 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
487 SKB_GSO_UDP_TUNNEL_CSUM
)) {
488 if ((!(skb_shinfo(skb
)->gso_type
&
490 (skb_shinfo(skb
)->gso_type
&
491 SKB_GSO_UDP_TUNNEL_CSUM
)) {
492 /* Software should clear the udp's checksum
493 * field when tso is needed.
497 /* reset l3&l4 pointers from outer to inner headers */
498 l3
.hdr
= skb_inner_network_header(skb
);
499 l4
.hdr
= skb_inner_transport_header(skb
);
501 /* Software should clear the IPv4's checksum field when
504 if (l3
.v4
->version
== 4)
508 /* normal or tunnel packet*/
509 l4_offset
= l4
.hdr
- skb
->data
;
510 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
512 /* remove payload length from inner pseudo checksum when tso*/
513 l4_paylen
= skb
->len
- l4_offset
;
514 csum_replace_by_diff(&l4
.tcp
->check
,
515 (__force __wsum
)htonl(l4_paylen
));
517 /* find the txbd field values */
518 *paylen
= skb
->len
- hdr_len
;
519 hnae3_set_bit(*type_cs_vlan_tso
,
522 /* get MSS for TSO */
523 *mss
= skb_shinfo(skb
)->gso_size
;
528 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
536 unsigned char *l4_hdr
;
537 unsigned char *exthdr
;
541 /* find outer header point */
542 l3
.hdr
= skb_network_header(skb
);
543 l4_hdr
= skb_transport_header(skb
);
545 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
546 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
547 l4_proto_tmp
= l3
.v6
->nexthdr
;
548 if (l4_hdr
!= exthdr
)
549 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
550 &l4_proto_tmp
, &frag_off
);
551 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
552 l4_proto_tmp
= l3
.v4
->protocol
;
557 *ol4_proto
= l4_proto_tmp
;
560 if (!skb
->encapsulation
) {
565 /* find inner header point */
566 l3
.hdr
= skb_inner_network_header(skb
);
567 l4_hdr
= skb_inner_transport_header(skb
);
569 if (l3
.v6
->version
== 6) {
570 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
571 l4_proto_tmp
= l3
.v6
->nexthdr
;
572 if (l4_hdr
!= exthdr
)
573 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
574 &l4_proto_tmp
, &frag_off
);
575 } else if (l3
.v4
->version
== 4) {
576 l4_proto_tmp
= l3
.v4
->protocol
;
579 *il4_proto
= l4_proto_tmp
;
584 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
585 u8 il4_proto
, u32
*type_cs_vlan_tso
,
586 u32
*ol_type_vlan_len_msec
)
596 struct gre_base_hdr
*gre
;
599 unsigned char *l2_hdr
;
600 u8 l4_proto
= ol4_proto
;
607 l3
.hdr
= skb_network_header(skb
);
608 l4
.hdr
= skb_transport_header(skb
);
610 /* compute L2 header size for normal packet, defined in 2 Bytes */
611 l2_len
= l3
.hdr
- skb
->data
;
612 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
613 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
616 if (skb
->encapsulation
) {
617 /* compute OL2 header size, defined in 2 Bytes */
619 hnae3_set_field(*ol_type_vlan_len_msec
,
621 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
623 /* compute OL3 header size, defined in 4 Bytes */
624 ol3_len
= l4
.hdr
- l3
.hdr
;
625 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
626 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
628 /* MAC in UDP, MAC in GRE (0x6558)*/
629 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
630 /* switch MAC header ptr from outer to inner header.*/
631 l2_hdr
= skb_inner_mac_header(skb
);
633 /* compute OL4 header size, defined in 4 Bytes. */
634 ol4_len
= l2_hdr
- l4
.hdr
;
635 hnae3_set_field(*ol_type_vlan_len_msec
,
636 HNS3_TXD_L4LEN_M
, HNS3_TXD_L4LEN_S
,
639 /* switch IP header ptr from outer to inner header */
640 l3
.hdr
= skb_inner_network_header(skb
);
642 /* compute inner l2 header size, defined in 2 Bytes. */
643 l2_len
= l3
.hdr
- l2_hdr
;
644 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
645 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
647 /* skb packet types not supported by hardware,
648 * txbd len fild doesn't be filled.
653 /* switch L4 header pointer from outer to inner */
654 l4
.hdr
= skb_inner_transport_header(skb
);
656 l4_proto
= il4_proto
;
659 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
660 l3_len
= l4
.hdr
- l3
.hdr
;
661 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
662 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
664 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
667 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
668 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
671 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
673 (sizeof(struct sctphdr
) >> 2));
676 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
678 (sizeof(struct udphdr
) >> 2));
681 /* skb packet types not supported by hardware,
682 * txbd len fild doesn't be filled.
688 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
689 * and it is udp packet, which has a dest port as the IANA assigned.
690 * the hardware is expected to do the checksum offload, but the
691 * hardware will not do the checksum offload when udp dest port is
694 static bool hns3_tunnel_csum_bug(struct sk_buff
*skb
)
696 #define IANA_VXLAN_PORT 4789
700 struct gre_base_hdr
*gre
;
704 l4
.hdr
= skb_transport_header(skb
);
706 if (!(!skb
->encapsulation
&& l4
.udp
->dest
== htons(IANA_VXLAN_PORT
)))
709 skb_checksum_help(skb
);
714 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
715 u8 il4_proto
, u32
*type_cs_vlan_tso
,
716 u32
*ol_type_vlan_len_msec
)
723 u32 l4_proto
= ol4_proto
;
725 l3
.hdr
= skb_network_header(skb
);
727 /* define OL3 type and tunnel type(OL4).*/
728 if (skb
->encapsulation
) {
729 /* define outer network header type.*/
730 if (skb
->protocol
== htons(ETH_P_IP
)) {
732 hnae3_set_field(*ol_type_vlan_len_msec
,
735 HNS3_OL3T_IPV4_CSUM
);
737 hnae3_set_field(*ol_type_vlan_len_msec
,
740 HNS3_OL3T_IPV4_NO_CSUM
);
742 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
743 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
744 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
747 /* define tunnel type(OL4).*/
750 hnae3_set_field(*ol_type_vlan_len_msec
,
753 HNS3_TUN_MAC_IN_UDP
);
756 hnae3_set_field(*ol_type_vlan_len_msec
,
762 /* drop the skb tunnel packet if hardware don't support,
763 * because hardware can't calculate csum when TSO.
768 /* the stack computes the IP header already,
769 * driver calculate l4 checksum when not TSO.
771 skb_checksum_help(skb
);
775 l3
.hdr
= skb_inner_network_header(skb
);
776 l4_proto
= il4_proto
;
779 if (l3
.v4
->version
== 4) {
780 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
781 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
783 /* the stack computes the IP header already, the only time we
784 * need the hardware to recompute it is in the case of TSO.
787 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
789 } else if (l3
.v6
->version
== 6) {
790 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
791 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
796 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
797 hnae3_set_field(*type_cs_vlan_tso
,
803 if (hns3_tunnel_csum_bug(skb
))
806 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
807 hnae3_set_field(*type_cs_vlan_tso
,
813 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
814 hnae3_set_field(*type_cs_vlan_tso
,
820 /* drop the skb tunnel packet if hardware don't support,
821 * because hardware can't calculate csum when TSO.
826 /* the stack computes the IP header already,
827 * driver calculate l4 checksum when not TSO.
829 skb_checksum_help(skb
);
836 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
838 /* Config bd buffer end */
839 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
840 HNS3_TXD_BDTYPE_S
, 0);
841 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
842 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
843 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
846 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
847 struct hns3_enet_ring
*tx_ring
,
848 u32
*inner_vlan_flag
,
853 #define HNS3_TX_VLAN_PRIO_SHIFT 13
855 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
856 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
857 NETIF_F_HW_VLAN_CTAG_TX
)) {
858 /* When HW VLAN acceleration is turned off, and the stack
859 * sets the protocol to 802.1q, the driver just need to
860 * set the protocol to the encapsulated ethertype.
862 skb
->protocol
= vlan_get_protocol(skb
);
866 if (skb_vlan_tag_present(skb
)) {
869 vlan_tag
= skb_vlan_tag_get(skb
);
870 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
872 /* Based on hw strategy, use out_vtag in two layer tag case,
873 * and use inner_vtag in one tag case.
875 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
876 hnae3_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
877 *out_vtag
= vlan_tag
;
879 hnae3_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
880 *inner_vtag
= vlan_tag
;
882 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
883 struct vlan_ethhdr
*vhdr
;
886 rc
= skb_cow_head(skb
, 0);
889 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
890 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
891 << HNS3_TX_VLAN_PRIO_SHIFT
);
894 skb
->protocol
= vlan_get_protocol(skb
);
898 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
899 int size
, dma_addr_t dma
, int frag_end
,
900 enum hns_desc_type type
)
902 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
903 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
904 u32 ol_type_vlan_len_msec
= 0;
905 u16 bdtp_fe_sc_vld_ra_ri
= 0;
906 u32 type_cs_vlan_tso
= 0;
917 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
918 desc_cb
->priv
= priv
;
919 desc_cb
->length
= size
;
921 desc_cb
->type
= type
;
923 /* now, fill the descriptor */
924 desc
->addr
= cpu_to_le64(dma
);
925 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
926 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
927 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
929 if (type
== DESC_TYPE_SKB
) {
930 skb
= (struct sk_buff
*)priv
;
933 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
934 &ol_type_vlan_len_msec
,
935 &inner_vtag
, &out_vtag
);
939 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
940 skb_reset_mac_len(skb
);
941 protocol
= skb
->protocol
;
943 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
946 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
948 &ol_type_vlan_len_msec
);
949 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
951 &ol_type_vlan_len_msec
);
955 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
962 desc
->tx
.ol_type_vlan_len_msec
=
963 cpu_to_le32(ol_type_vlan_len_msec
);
964 desc
->tx
.type_cs_vlan_tso_len
=
965 cpu_to_le32(type_cs_vlan_tso
);
966 desc
->tx
.paylen
= cpu_to_le32(paylen
);
967 desc
->tx
.mss
= cpu_to_le16(mss
);
968 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
969 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
972 /* move ring pointer to next.*/
973 ring_ptr_move_fw(ring
, next_to_use
);
978 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
979 int size
, dma_addr_t dma
, int frag_end
,
980 enum hns_desc_type type
)
982 unsigned int frag_buf_num
;
987 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
988 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
989 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
991 /* When the frag size is bigger than hardware, split this frag */
992 for (k
= 0; k
< frag_buf_num
; k
++) {
993 ret
= hns3_fill_desc(ring
, priv
,
994 (k
== frag_buf_num
- 1) ?
995 sizeoflast
: HNS3_MAX_BD_SIZE
,
996 dma
+ HNS3_MAX_BD_SIZE
* k
,
997 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
998 (type
== DESC_TYPE_SKB
&& !k
) ?
999 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
1007 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
1008 struct hns3_enet_ring
*ring
)
1010 struct sk_buff
*skb
= *out_skb
;
1011 struct skb_frag_struct
*frag
;
1018 size
= skb_headlen(skb
);
1019 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1021 frag_num
= skb_shinfo(skb
)->nr_frags
;
1022 for (i
= 0; i
< frag_num
; i
++) {
1023 frag
= &skb_shinfo(skb
)->frags
[i
];
1024 size
= skb_frag_size(frag
);
1026 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1027 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
1030 buf_num
+= bdnum_for_frag
;
1033 if (buf_num
> ring_space(ring
))
1040 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
1041 struct hns3_enet_ring
*ring
)
1043 struct sk_buff
*skb
= *out_skb
;
1046 /* No. of segments (plus a header) */
1047 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1049 if (buf_num
> ring_space(ring
))
1057 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
1059 struct device
*dev
= ring_to_dev(ring
);
1062 for (i
= 0; i
< ring
->desc_num
; i
++) {
1063 /* check if this is where we started */
1064 if (ring
->next_to_use
== next_to_use_orig
)
1067 /* unmap the descriptor dma address */
1068 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
1069 dma_unmap_single(dev
,
1070 ring
->desc_cb
[ring
->next_to_use
].dma
,
1071 ring
->desc_cb
[ring
->next_to_use
].length
,
1075 ring
->desc_cb
[ring
->next_to_use
].dma
,
1076 ring
->desc_cb
[ring
->next_to_use
].length
,
1080 ring_ptr_move_bw(ring
, next_to_use
);
1084 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1086 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1087 struct hns3_nic_ring_data
*ring_data
=
1088 &tx_ring_data(priv
, skb
->queue_mapping
);
1089 struct hns3_enet_ring
*ring
= ring_data
->ring
;
1090 struct device
*dev
= priv
->dev
;
1091 struct netdev_queue
*dev_queue
;
1092 struct skb_frag_struct
*frag
;
1093 int next_to_use_head
;
1094 int next_to_use_frag
;
1102 /* Prefetch the data used later */
1103 prefetch(skb
->data
);
1105 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
1107 u64_stats_update_begin(&ring
->syncp
);
1108 ring
->stats
.tx_busy
++;
1109 u64_stats_update_end(&ring
->syncp
);
1111 goto out_net_tx_busy
;
1113 u64_stats_update_begin(&ring
->syncp
);
1114 ring
->stats
.sw_err_cnt
++;
1115 u64_stats_update_end(&ring
->syncp
);
1116 netdev_err(netdev
, "no memory to xmit!\n");
1123 /* No. of segments (plus a header) */
1124 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1125 /* Fill the first part */
1126 size
= skb_headlen(skb
);
1128 next_to_use_head
= ring
->next_to_use
;
1130 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1131 if (dma_mapping_error(dev
, dma
)) {
1132 netdev_err(netdev
, "TX head DMA map failed\n");
1133 ring
->stats
.sw_err_cnt
++;
1137 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
1140 goto head_dma_map_err
;
1142 next_to_use_frag
= ring
->next_to_use
;
1143 /* Fill the fragments */
1144 for (i
= 1; i
< seg_num
; i
++) {
1145 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1146 size
= skb_frag_size(frag
);
1147 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1148 if (dma_mapping_error(dev
, dma
)) {
1149 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
1150 ring
->stats
.sw_err_cnt
++;
1151 goto frag_dma_map_err
;
1153 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
1154 seg_num
- 1 == i
? 1 : 0,
1158 goto frag_dma_map_err
;
1161 /* Complete translate all packets */
1162 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1163 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1165 wmb(); /* Commit all data before submit */
1167 hnae3_queue_xmit(ring
->tqp
, buf_num
);
1169 return NETDEV_TX_OK
;
1172 hns_nic_dma_unmap(ring
, next_to_use_frag
);
1175 hns_nic_dma_unmap(ring
, next_to_use_head
);
1178 dev_kfree_skb_any(skb
);
1179 return NETDEV_TX_OK
;
1182 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1183 smp_mb(); /* Commit all data before submit */
1185 return NETDEV_TX_BUSY
;
1188 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1190 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1191 struct sockaddr
*mac_addr
= p
;
1194 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1195 return -EADDRNOTAVAIL
;
1197 if (ether_addr_equal(netdev
->dev_addr
, mac_addr
->sa_data
)) {
1198 netdev_info(netdev
, "already using mac address %pM\n",
1203 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
1205 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1209 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1214 static int hns3_nic_set_features(struct net_device
*netdev
,
1215 netdev_features_t features
)
1217 netdev_features_t changed
= netdev
->features
^ features
;
1218 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1219 struct hnae3_handle
*h
= priv
->ae_handle
;
1222 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1223 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1224 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1225 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1227 priv
->ops
.fill_desc
= hns3_fill_desc
;
1228 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1232 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
1233 h
->ae_algo
->ops
->enable_vlan_filter
) {
1234 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1235 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1237 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1240 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1241 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
1242 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1243 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1245 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1251 netdev
->features
= features
;
1255 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1256 struct rtnl_link_stats64
*stats
)
1258 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1259 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1260 struct hnae3_handle
*handle
= priv
->ae_handle
;
1261 struct hns3_enet_ring
*ring
;
1271 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1274 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1276 for (idx
= 0; idx
< queue_num
; idx
++) {
1277 /* fetch the tx stats */
1278 ring
= priv
->ring_data
[idx
].ring
;
1280 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1281 tx_bytes
+= ring
->stats
.tx_bytes
;
1282 tx_pkts
+= ring
->stats
.tx_pkts
;
1283 tx_drop
+= ring
->stats
.tx_busy
;
1284 tx_drop
+= ring
->stats
.sw_err_cnt
;
1285 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1287 /* fetch the rx stats */
1288 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1290 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1291 rx_bytes
+= ring
->stats
.rx_bytes
;
1292 rx_pkts
+= ring
->stats
.rx_pkts
;
1293 rx_drop
+= ring
->stats
.non_vld_descs
;
1294 rx_drop
+= ring
->stats
.err_pkt_len
;
1295 rx_drop
+= ring
->stats
.l2_err
;
1296 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1299 stats
->tx_bytes
= tx_bytes
;
1300 stats
->tx_packets
= tx_pkts
;
1301 stats
->rx_bytes
= rx_bytes
;
1302 stats
->rx_packets
= rx_pkts
;
1304 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1305 stats
->multicast
= netdev
->stats
.multicast
;
1306 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1307 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1308 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1310 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1311 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1312 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1313 stats
->collisions
= netdev
->stats
.collisions
;
1314 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1315 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1316 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1317 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1318 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1319 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1320 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1321 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1322 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1323 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1326 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1328 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1329 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1330 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1331 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1332 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1333 u16 mode
= mqprio_qopt
->mode
;
1334 u8 hw
= mqprio_qopt
->qopt
.hw
;
1338 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1339 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1342 if (tc
> HNAE3_MAX_TC
)
1348 if_running
= netif_running(netdev
);
1350 hns3_nic_net_stop(netdev
);
1354 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1355 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1359 ret
= hns3_nic_set_real_num_queue(netdev
);
1363 hns3_nic_net_open(netdev
);
1368 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1371 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1374 return hns3_setup_tc(dev
, type_data
);
1377 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1378 __be16 proto
, u16 vid
)
1380 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1381 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1384 if (h
->ae_algo
->ops
->set_vlan_filter
)
1385 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1388 set_bit(vid
, priv
->active_vlans
);
1393 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1394 __be16 proto
, u16 vid
)
1396 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1397 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1400 if (h
->ae_algo
->ops
->set_vlan_filter
)
1401 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1404 clear_bit(vid
, priv
->active_vlans
);
1409 static void hns3_restore_vlan(struct net_device
*netdev
)
1411 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1415 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
1416 ret
= hns3_vlan_rx_add_vid(netdev
, htons(ETH_P_8021Q
), vid
);
1418 netdev_warn(netdev
, "Restore vlan: %d filter, ret:%d\n",
1423 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1424 u8 qos
, __be16 vlan_proto
)
1426 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1429 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1430 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1436 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1438 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1439 bool if_running
= netif_running(netdev
);
1442 if (!h
->ae_algo
->ops
->set_mtu
)
1445 /* if this was called with netdev up then bring netdevice down */
1447 (void)hns3_nic_net_stop(netdev
);
1451 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1453 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1458 netdev
->mtu
= new_mtu
;
1460 /* if the netdev was running earlier, bring it up again */
1461 if (if_running
&& hns3_nic_net_open(netdev
))
1467 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1469 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1470 struct hns3_enet_ring
*tx_ring
= NULL
;
1471 int timeout_queue
= 0;
1472 int hw_head
, hw_tail
;
1475 /* Find the stopped queue the same way the stack does */
1476 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1477 struct netdev_queue
*q
;
1478 unsigned long trans_start
;
1480 q
= netdev_get_tx_queue(ndev
, i
);
1481 trans_start
= q
->trans_start
;
1482 if (netif_xmit_stopped(q
) &&
1484 (trans_start
+ ndev
->watchdog_timeo
))) {
1490 if (i
== ndev
->num_tx_queues
) {
1492 "no netdev TX timeout queue found, timeout count: %llu\n",
1493 priv
->tx_timeout_count
);
1497 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1499 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1500 HNS3_RING_TX_RING_HEAD_REG
);
1501 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1502 HNS3_RING_TX_RING_TAIL_REG
);
1504 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1505 priv
->tx_timeout_count
,
1507 tx_ring
->next_to_use
,
1508 tx_ring
->next_to_clean
,
1511 readl(tx_ring
->tqp_vector
->mask_addr
));
1516 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1518 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1519 struct hnae3_handle
*h
= priv
->ae_handle
;
1521 if (!hns3_get_tx_timeo_queue_info(ndev
))
1524 priv
->tx_timeout_count
++;
1526 if (time_before(jiffies
, (h
->last_reset_time
+ ndev
->watchdog_timeo
)))
1529 /* request the reset */
1530 if (h
->ae_algo
->ops
->reset_event
)
1531 h
->ae_algo
->ops
->reset_event(h
);
1534 static const struct net_device_ops hns3_nic_netdev_ops
= {
1535 .ndo_open
= hns3_nic_net_open
,
1536 .ndo_stop
= hns3_nic_net_stop
,
1537 .ndo_start_xmit
= hns3_nic_net_xmit
,
1538 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1539 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1540 .ndo_change_mtu
= hns3_nic_change_mtu
,
1541 .ndo_set_features
= hns3_nic_set_features
,
1542 .ndo_get_stats64
= hns3_nic_get_stats64
,
1543 .ndo_setup_tc
= hns3_nic_setup_tc
,
1544 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1545 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1546 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1547 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1550 static bool hns3_is_phys_func(struct pci_dev
*pdev
)
1552 u32 dev_id
= pdev
->device
;
1555 case HNAE3_DEV_ID_GE
:
1556 case HNAE3_DEV_ID_25GE
:
1557 case HNAE3_DEV_ID_25GE_RDMA
:
1558 case HNAE3_DEV_ID_25GE_RDMA_MACSEC
:
1559 case HNAE3_DEV_ID_50GE_RDMA
:
1560 case HNAE3_DEV_ID_50GE_RDMA_MACSEC
:
1561 case HNAE3_DEV_ID_100G_RDMA_MACSEC
:
1563 case HNAE3_DEV_ID_100G_VF
:
1564 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
:
1567 dev_warn(&pdev
->dev
, "un-recognized pci device-id %d",
1574 static void hns3_disable_sriov(struct pci_dev
*pdev
)
1576 /* If our VFs are assigned we cannot shut down SR-IOV
1577 * without causing issues, so just leave the hardware
1578 * available but disabled
1580 if (pci_vfs_assigned(pdev
)) {
1581 dev_warn(&pdev
->dev
,
1582 "disabling driver while VFs are assigned\n");
1586 pci_disable_sriov(pdev
);
1589 /* hns3_probe - Device initialization routine
1590 * @pdev: PCI device information struct
1591 * @ent: entry in hns3_pci_tbl
1593 * hns3_probe initializes a PF identified by a pci_dev structure.
1594 * The OS initialization, configuring of the PF private structure,
1595 * and a hardware reset occur.
1597 * Returns 0 on success, negative on failure
1599 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1601 struct hnae3_ae_dev
*ae_dev
;
1604 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1611 ae_dev
->pdev
= pdev
;
1612 ae_dev
->flag
= ent
->driver_data
;
1613 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1614 pci_set_drvdata(pdev
, ae_dev
);
1616 hnae3_register_ae_dev(ae_dev
);
1621 /* hns3_remove - Device removal routine
1622 * @pdev: PCI device information struct
1624 static void hns3_remove(struct pci_dev
*pdev
)
1626 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1628 if (hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))
1629 hns3_disable_sriov(pdev
);
1631 hnae3_unregister_ae_dev(ae_dev
);
1635 * hns3_pci_sriov_configure
1636 * @pdev: pointer to a pci_dev structure
1637 * @num_vfs: number of VFs to allocate
1639 * Enable or change the number of VFs. Called when the user updates the number
1642 static int hns3_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1646 if (!(hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))) {
1647 dev_warn(&pdev
->dev
, "Can not config SRIOV\n");
1652 ret
= pci_enable_sriov(pdev
, num_vfs
);
1654 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n", ret
);
1657 } else if (!pci_vfs_assigned(pdev
)) {
1658 pci_disable_sriov(pdev
);
1660 dev_warn(&pdev
->dev
,
1661 "Unable to free VFs because some are assigned to VMs.\n");
1667 static struct pci_driver hns3_driver
= {
1668 .name
= hns3_driver_name
,
1669 .id_table
= hns3_pci_tbl
,
1670 .probe
= hns3_probe
,
1671 .remove
= hns3_remove
,
1672 .sriov_configure
= hns3_pci_sriov_configure
,
1675 /* set default feature to hns3 */
1676 static void hns3_set_default_feature(struct net_device
*netdev
)
1678 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1680 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1681 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1682 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1683 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1684 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1686 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1688 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1690 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1691 NETIF_F_HW_VLAN_CTAG_FILTER
|
1692 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1693 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1694 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1695 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1696 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1698 netdev
->vlan_features
|=
1699 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1700 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1701 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1702 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1703 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1705 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1706 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1707 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1708 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1709 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1710 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1713 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1714 struct hns3_desc_cb
*cb
)
1716 unsigned int order
= hnae3_page_order(ring
);
1719 p
= dev_alloc_pages(order
);
1724 cb
->page_offset
= 0;
1726 cb
->buf
= page_address(p
);
1727 cb
->length
= hnae3_page_size(ring
);
1728 cb
->type
= DESC_TYPE_PAGE
;
1733 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1734 struct hns3_desc_cb
*cb
)
1736 if (cb
->type
== DESC_TYPE_SKB
)
1737 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1738 else if (!HNAE3_IS_TX_RING(ring
))
1739 put_page((struct page
*)cb
->priv
);
1740 memset(cb
, 0, sizeof(*cb
));
1743 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1745 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1746 cb
->length
, ring_to_dma_dir(ring
));
1748 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1754 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1755 struct hns3_desc_cb
*cb
)
1757 if (cb
->type
== DESC_TYPE_SKB
)
1758 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1759 ring_to_dma_dir(ring
));
1761 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1762 ring_to_dma_dir(ring
));
1765 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1767 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1768 ring
->desc
[i
].addr
= 0;
1771 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1773 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1775 if (!ring
->desc_cb
[i
].dma
)
1778 hns3_buffer_detach(ring
, i
);
1779 hns3_free_buffer(ring
, cb
);
1782 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1786 for (i
= 0; i
< ring
->desc_num
; i
++)
1787 hns3_free_buffer_detach(ring
, i
);
1790 /* free desc along with its attached buffer */
1791 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1793 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1795 hns3_free_buffers(ring
);
1798 dma_free_coherent(ring_to_dev(ring
), size
,
1799 ring
->desc
, ring
->desc_dma_addr
);
1804 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1806 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1808 ring
->desc
= dma_zalloc_coherent(ring_to_dev(ring
), size
,
1809 &ring
->desc_dma_addr
,
1817 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1818 struct hns3_desc_cb
*cb
)
1822 ret
= hns3_alloc_buffer(ring
, cb
);
1826 ret
= hns3_map_buffer(ring
, cb
);
1833 hns3_free_buffer(ring
, cb
);
1838 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1840 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1845 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1850 /* Allocate memory for raw pkg, and map with dma */
1851 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1855 for (i
= 0; i
< ring
->desc_num
; i
++) {
1856 ret
= hns3_alloc_buffer_attach(ring
, i
);
1858 goto out_buffer_fail
;
1864 for (j
= i
- 1; j
>= 0; j
--)
1865 hns3_free_buffer_detach(ring
, j
);
1869 /* detach a in-used buffer and replace with a reserved one */
1870 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1871 struct hns3_desc_cb
*res_cb
)
1873 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1874 ring
->desc_cb
[i
] = *res_cb
;
1875 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1876 ring
->desc
[i
].rx
.bd_base_info
= 0;
1879 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1881 ring
->desc_cb
[i
].reuse_flag
= 0;
1882 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1883 + ring
->desc_cb
[i
].page_offset
);
1884 ring
->desc
[i
].rx
.bd_base_info
= 0;
1887 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1890 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1892 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1893 (*bytes
) += desc_cb
->length
;
1894 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1895 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1897 ring_ptr_move_fw(ring
, next_to_clean
);
1900 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1902 int u
= ring
->next_to_use
;
1903 int c
= ring
->next_to_clean
;
1905 if (unlikely(h
> ring
->desc_num
))
1908 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1911 bool hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1913 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1914 struct netdev_queue
*dev_queue
;
1918 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1919 rmb(); /* Make sure head is ready before touch any data */
1921 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1922 return true; /* no data to poll */
1924 if (unlikely(!is_valid_clean_head(ring
, head
))) {
1925 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1926 ring
->next_to_use
, ring
->next_to_clean
);
1928 u64_stats_update_begin(&ring
->syncp
);
1929 ring
->stats
.io_err_cnt
++;
1930 u64_stats_update_end(&ring
->syncp
);
1936 while (head
!= ring
->next_to_clean
&& budget
) {
1937 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1938 /* Issue prefetch for next Tx descriptor */
1939 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1943 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1944 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1946 u64_stats_update_begin(&ring
->syncp
);
1947 ring
->stats
.tx_bytes
+= bytes
;
1948 ring
->stats
.tx_pkts
+= pkts
;
1949 u64_stats_update_end(&ring
->syncp
);
1951 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1952 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1954 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1955 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1956 /* Make sure that anybody stopping the queue after this
1957 * sees the new next_to_clean.
1960 if (netif_tx_queue_stopped(dev_queue
)) {
1961 netif_tx_wake_queue(dev_queue
);
1962 ring
->stats
.restart_queue
++;
1969 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1971 int ntc
= ring
->next_to_clean
;
1972 int ntu
= ring
->next_to_use
;
1974 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1978 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1980 struct hns3_desc_cb
*desc_cb
;
1981 struct hns3_desc_cb res_cbs
;
1984 for (i
= 0; i
< cleand_count
; i
++) {
1985 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1986 if (desc_cb
->reuse_flag
) {
1987 u64_stats_update_begin(&ring
->syncp
);
1988 ring
->stats
.reuse_pg_cnt
++;
1989 u64_stats_update_end(&ring
->syncp
);
1991 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1993 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1995 u64_stats_update_begin(&ring
->syncp
);
1996 ring
->stats
.sw_err_cnt
++;
1997 u64_stats_update_end(&ring
->syncp
);
1999 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
2000 "hnae reserve buffer map failed.\n");
2003 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
2006 ring_ptr_move_fw(ring
, next_to_use
);
2009 wmb(); /* Make all data has been write before submit */
2010 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
2013 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
2014 struct hns3_enet_ring
*ring
, int pull_len
,
2015 struct hns3_desc_cb
*desc_cb
)
2017 struct hns3_desc
*desc
;
2022 twobufs
= ((PAGE_SIZE
< 8192) &&
2023 hnae3_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
2025 desc
= &ring
->desc
[ring
->next_to_clean
];
2026 size
= le16_to_cpu(desc
->rx
.size
);
2028 truesize
= hnae3_buf_size(ring
);
2031 last_offset
= hnae3_page_size(ring
) - hnae3_buf_size(ring
);
2033 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
2034 size
- pull_len
, truesize
);
2036 /* Avoid re-using remote pages,flag default unreuse */
2037 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
2041 /* If we are only owner of page we can reuse it */
2042 if (likely(page_count(desc_cb
->priv
) == 1)) {
2043 /* Flip page offset to other buffer */
2044 desc_cb
->page_offset
^= truesize
;
2046 desc_cb
->reuse_flag
= 1;
2047 /* bump ref count on page before it is given*/
2048 get_page(desc_cb
->priv
);
2053 /* Move offset up to the next cache line */
2054 desc_cb
->page_offset
+= truesize
;
2056 if (desc_cb
->page_offset
<= last_offset
) {
2057 desc_cb
->reuse_flag
= 1;
2058 /* Bump ref count on page before it is given*/
2059 get_page(desc_cb
->priv
);
2063 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2064 struct hns3_desc
*desc
)
2066 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2067 int l3_type
, l4_type
;
2072 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2073 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2075 skb
->ip_summed
= CHECKSUM_NONE
;
2077 skb_checksum_none_assert(skb
);
2079 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2082 /* check if hardware has done checksum */
2083 if (!hnae3_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2086 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2087 hnae3_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2088 hnae3_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2089 hnae3_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2090 netdev_err(netdev
, "L3/L4 error pkt\n");
2091 u64_stats_update_begin(&ring
->syncp
);
2092 ring
->stats
.l3l4_csum_err
++;
2093 u64_stats_update_end(&ring
->syncp
);
2098 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
2100 l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
2103 ol4_type
= hnae3_get_field(l234info
, HNS3_RXD_OL4ID_M
,
2106 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2107 case HNS3_OL4_TYPE_NVGRE
:
2108 skb
->csum_level
= 1;
2109 case HNS3_OL4_TYPE_NO_TUN
:
2110 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2111 if ((l3_type
== HNS3_L3_TYPE_IPV4
||
2112 l3_type
== HNS3_L3_TYPE_IPV6
) &&
2113 (l4_type
== HNS3_L4_TYPE_UDP
||
2114 l4_type
== HNS3_L4_TYPE_TCP
||
2115 l4_type
== HNS3_L4_TYPE_SCTP
))
2116 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2121 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2123 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2126 static u16
hns3_parse_vlan_tag(struct hns3_enet_ring
*ring
,
2127 struct hns3_desc
*desc
, u32 l234info
)
2129 struct pci_dev
*pdev
= ring
->tqp
->handle
->pdev
;
2132 if (pdev
->revision
== 0x20) {
2133 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2134 if (!(vlan_tag
& VLAN_VID_MASK
))
2135 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2140 #define HNS3_STRP_OUTER_VLAN 0x1
2141 #define HNS3_STRP_INNER_VLAN 0x2
2143 switch (hnae3_get_field(l234info
, HNS3_RXD_STRP_TAGP_M
,
2144 HNS3_RXD_STRP_TAGP_S
)) {
2145 case HNS3_STRP_OUTER_VLAN
:
2146 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2148 case HNS3_STRP_INNER_VLAN
:
2149 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2159 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2160 struct sk_buff
**out_skb
, int *out_bnum
)
2162 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2163 struct hns3_desc_cb
*desc_cb
;
2164 struct hns3_desc
*desc
;
2165 struct sk_buff
*skb
;
2173 desc
= &ring
->desc
[ring
->next_to_clean
];
2174 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2178 length
= le16_to_cpu(desc
->rx
.size
);
2179 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2181 /* Check valid BD */
2182 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
)))
2185 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2187 /* Prefetch first cache line of first page
2188 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2189 * line size is 64B so need to prefetch twice to make it 128B. But in
2190 * actual we can have greater size of caches with 128B Level 1 cache
2191 * lines. In such a case, single fetch would suffice to cache in the
2192 * relevant part of the header.
2195 #if L1_CACHE_BYTES < 128
2196 prefetch(va
+ L1_CACHE_BYTES
);
2199 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2201 if (unlikely(!skb
)) {
2202 netdev_err(netdev
, "alloc rx skb fail\n");
2204 u64_stats_update_begin(&ring
->syncp
);
2205 ring
->stats
.sw_err_cnt
++;
2206 u64_stats_update_end(&ring
->syncp
);
2211 prefetchw(skb
->data
);
2214 if (length
<= HNS3_RX_HEAD_SIZE
) {
2215 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2217 /* We can reuse buffer as-is, just make sure it is local */
2218 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2219 desc_cb
->reuse_flag
= 1;
2220 else /* This page cannot be reused so discard it */
2221 put_page(desc_cb
->priv
);
2223 ring_ptr_move_fw(ring
, next_to_clean
);
2225 u64_stats_update_begin(&ring
->syncp
);
2226 ring
->stats
.seg_pkt_cnt
++;
2227 u64_stats_update_end(&ring
->syncp
);
2229 pull_len
= eth_get_headlen(va
, HNS3_RX_HEAD_SIZE
);
2231 memcpy(__skb_put(skb
, pull_len
), va
,
2232 ALIGN(pull_len
, sizeof(long)));
2234 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2235 ring_ptr_move_fw(ring
, next_to_clean
);
2237 while (!hnae3_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2238 desc
= &ring
->desc
[ring
->next_to_clean
];
2239 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2240 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2241 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2242 ring_ptr_move_fw(ring
, next_to_clean
);
2248 /* Based on hw strategy, the tag offloaded will be stored at
2249 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2250 * in one layer tag case.
2252 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2255 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2256 if (!(vlan_tag
& VLAN_VID_MASK
))
2257 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2258 if (vlan_tag
& VLAN_VID_MASK
)
2259 __vlan_hwaccel_put_tag(skb
,
2264 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2266 /* Based on hw strategy, the tag offloaded will be stored at
2267 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2268 * in one layer tag case.
2270 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2273 vlan_tag
= hns3_parse_vlan_tag(ring
, desc
, l234info
);
2274 if (vlan_tag
& VLAN_VID_MASK
)
2275 __vlan_hwaccel_put_tag(skb
,
2280 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2281 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2282 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2283 u64_stats_update_begin(&ring
->syncp
);
2284 ring
->stats
.non_vld_descs
++;
2285 u64_stats_update_end(&ring
->syncp
);
2287 dev_kfree_skb_any(skb
);
2291 if (unlikely((!desc
->rx
.pkt_len
) ||
2292 hnae3_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2293 netdev_err(netdev
, "truncated pkt\n");
2294 u64_stats_update_begin(&ring
->syncp
);
2295 ring
->stats
.err_pkt_len
++;
2296 u64_stats_update_end(&ring
->syncp
);
2298 dev_kfree_skb_any(skb
);
2302 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2303 netdev_err(netdev
, "L2 error pkt\n");
2304 u64_stats_update_begin(&ring
->syncp
);
2305 ring
->stats
.l2_err
++;
2306 u64_stats_update_end(&ring
->syncp
);
2308 dev_kfree_skb_any(skb
);
2312 u64_stats_update_begin(&ring
->syncp
);
2313 ring
->stats
.rx_pkts
++;
2314 ring
->stats
.rx_bytes
+= skb
->len
;
2315 u64_stats_update_end(&ring
->syncp
);
2317 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2319 hns3_rx_checksum(ring
, skb
, desc
);
2323 int hns3_clean_rx_ring(
2324 struct hns3_enet_ring
*ring
, int budget
,
2325 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2327 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2328 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2329 int recv_pkts
, recv_bds
, clean_count
, err
;
2330 int unused_count
= hns3_desc_unused(ring
);
2331 struct sk_buff
*skb
= NULL
;
2334 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2335 rmb(); /* Make sure num taken effect before the other data is touched */
2337 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2338 num
-= unused_count
;
2340 while (recv_pkts
< budget
&& recv_bds
< num
) {
2341 /* Reuse or realloc buffers */
2342 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2343 hns3_nic_alloc_rx_buffers(ring
,
2344 clean_count
+ unused_count
);
2346 unused_count
= hns3_desc_unused(ring
);
2350 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2351 if (unlikely(!skb
)) /* This fault cannot be repaired */
2355 clean_count
+= bnum
;
2356 if (unlikely(err
)) { /* Do jump the err */
2361 /* Do update ip stack process */
2362 skb
->protocol
= eth_type_trans(skb
, netdev
);
2369 /* Make all data has been write before submit */
2370 if (clean_count
+ unused_count
> 0)
2371 hns3_nic_alloc_rx_buffers(ring
,
2372 clean_count
+ unused_count
);
2377 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2379 struct hns3_enet_tqp_vector
*tqp_vector
=
2380 ring_group
->ring
->tqp_vector
;
2381 enum hns3_flow_level_range new_flow_level
;
2382 int packets_per_msecs
;
2383 int bytes_per_msecs
;
2387 if (!ring_group
->coal
.int_gl
|| !tqp_vector
->last_jiffies
)
2390 if (ring_group
->total_packets
== 0) {
2391 ring_group
->coal
.int_gl
= HNS3_INT_GL_50K
;
2392 ring_group
->coal
.flow_level
= HNS3_FLOW_LOW
;
2396 /* Simple throttlerate management
2397 * 0-10MB/s lower (50000 ints/s)
2398 * 10-20MB/s middle (20000 ints/s)
2399 * 20-1249MB/s high (18000 ints/s)
2400 * > 40000pps ultra (8000 ints/s)
2402 new_flow_level
= ring_group
->coal
.flow_level
;
2403 new_int_gl
= ring_group
->coal
.int_gl
;
2405 jiffies_to_msecs(jiffies
- tqp_vector
->last_jiffies
);
2407 if (!time_passed_ms
)
2410 do_div(ring_group
->total_packets
, time_passed_ms
);
2411 packets_per_msecs
= ring_group
->total_packets
;
2413 do_div(ring_group
->total_bytes
, time_passed_ms
);
2414 bytes_per_msecs
= ring_group
->total_bytes
;
2416 #define HNS3_RX_LOW_BYTE_RATE 10000
2417 #define HNS3_RX_MID_BYTE_RATE 20000
2419 switch (new_flow_level
) {
2421 if (bytes_per_msecs
> HNS3_RX_LOW_BYTE_RATE
)
2422 new_flow_level
= HNS3_FLOW_MID
;
2425 if (bytes_per_msecs
> HNS3_RX_MID_BYTE_RATE
)
2426 new_flow_level
= HNS3_FLOW_HIGH
;
2427 else if (bytes_per_msecs
<= HNS3_RX_LOW_BYTE_RATE
)
2428 new_flow_level
= HNS3_FLOW_LOW
;
2430 case HNS3_FLOW_HIGH
:
2431 case HNS3_FLOW_ULTRA
:
2433 if (bytes_per_msecs
<= HNS3_RX_MID_BYTE_RATE
)
2434 new_flow_level
= HNS3_FLOW_MID
;
2438 #define HNS3_RX_ULTRA_PACKET_RATE 40
2440 if (packets_per_msecs
> HNS3_RX_ULTRA_PACKET_RATE
&&
2441 &tqp_vector
->rx_group
== ring_group
)
2442 new_flow_level
= HNS3_FLOW_ULTRA
;
2444 switch (new_flow_level
) {
2446 new_int_gl
= HNS3_INT_GL_50K
;
2449 new_int_gl
= HNS3_INT_GL_20K
;
2451 case HNS3_FLOW_HIGH
:
2452 new_int_gl
= HNS3_INT_GL_18K
;
2454 case HNS3_FLOW_ULTRA
:
2455 new_int_gl
= HNS3_INT_GL_8K
;
2461 ring_group
->total_bytes
= 0;
2462 ring_group
->total_packets
= 0;
2463 ring_group
->coal
.flow_level
= new_flow_level
;
2464 if (new_int_gl
!= ring_group
->coal
.int_gl
) {
2465 ring_group
->coal
.int_gl
= new_int_gl
;
2471 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2473 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
2474 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
2475 bool rx_update
, tx_update
;
2477 if (tqp_vector
->int_adapt_down
> 0) {
2478 tqp_vector
->int_adapt_down
--;
2482 if (rx_group
->coal
.gl_adapt_enable
) {
2483 rx_update
= hns3_get_new_int_gl(rx_group
);
2485 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
2486 rx_group
->coal
.int_gl
);
2489 if (tx_group
->coal
.gl_adapt_enable
) {
2490 tx_update
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2492 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
2493 tx_group
->coal
.int_gl
);
2496 tqp_vector
->last_jiffies
= jiffies
;
2497 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
2500 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2502 struct hns3_enet_ring
*ring
;
2503 int rx_pkt_total
= 0;
2505 struct hns3_enet_tqp_vector
*tqp_vector
=
2506 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2507 bool clean_complete
= true;
2510 /* Since the actual Tx work is minimal, we can give the Tx a larger
2511 * budget and be more aggressive about cleaning up the Tx descriptors.
2513 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2514 if (!hns3_clean_tx_ring(ring
, budget
))
2515 clean_complete
= false;
2518 /* make sure rx ring budget not smaller than 1 */
2519 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2521 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2522 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2525 if (rx_cleaned
>= rx_budget
)
2526 clean_complete
= false;
2528 rx_pkt_total
+= rx_cleaned
;
2531 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2533 if (!clean_complete
)
2536 napi_complete(napi
);
2537 hns3_update_new_int_gl(tqp_vector
);
2538 hns3_mask_vector_irq(tqp_vector
, 1);
2540 return rx_pkt_total
;
2543 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2544 struct hnae3_ring_chain_node
*head
)
2546 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2547 struct hnae3_ring_chain_node
*cur_chain
= head
;
2548 struct hnae3_ring_chain_node
*chain
;
2549 struct hns3_enet_ring
*tx_ring
;
2550 struct hns3_enet_ring
*rx_ring
;
2552 tx_ring
= tqp_vector
->tx_group
.ring
;
2554 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2555 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2556 HNAE3_RING_TYPE_TX
);
2557 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2558 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_TX
);
2560 cur_chain
->next
= NULL
;
2562 while (tx_ring
->next
) {
2563 tx_ring
= tx_ring
->next
;
2565 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2570 cur_chain
->next
= chain
;
2571 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2572 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2573 HNAE3_RING_TYPE_TX
);
2574 hnae3_set_field(chain
->int_gl_idx
,
2575 HNAE3_RING_GL_IDX_M
,
2576 HNAE3_RING_GL_IDX_S
,
2583 rx_ring
= tqp_vector
->rx_group
.ring
;
2584 if (!tx_ring
&& rx_ring
) {
2585 cur_chain
->next
= NULL
;
2586 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2587 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2588 HNAE3_RING_TYPE_RX
);
2589 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2590 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2592 rx_ring
= rx_ring
->next
;
2596 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2600 cur_chain
->next
= chain
;
2601 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2602 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2603 HNAE3_RING_TYPE_RX
);
2604 hnae3_set_field(chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2605 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2609 rx_ring
= rx_ring
->next
;
2615 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2616 struct hnae3_ring_chain_node
*head
)
2618 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2619 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2624 chain_tmp
= chain
->next
;
2625 devm_kfree(&pdev
->dev
, chain
);
2630 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2631 struct hns3_enet_ring
*ring
)
2633 ring
->next
= group
->ring
;
2639 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2641 struct hnae3_ring_chain_node vector_ring_chain
;
2642 struct hnae3_handle
*h
= priv
->ae_handle
;
2643 struct hns3_enet_tqp_vector
*tqp_vector
;
2647 for (i
= 0; i
< priv
->vector_num
; i
++) {
2648 tqp_vector
= &priv
->tqp_vector
[i
];
2649 hns3_vector_gl_rl_init_hw(tqp_vector
, priv
);
2650 tqp_vector
->num_tqps
= 0;
2653 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2654 u16 vector_i
= i
% priv
->vector_num
;
2655 u16 tqp_num
= h
->kinfo
.num_tqps
;
2657 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2659 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2660 priv
->ring_data
[i
].ring
);
2662 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2663 priv
->ring_data
[i
+ tqp_num
].ring
);
2665 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2666 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2667 tqp_vector
->num_tqps
++;
2670 for (i
= 0; i
< priv
->vector_num
; i
++) {
2671 tqp_vector
= &priv
->tqp_vector
[i
];
2673 tqp_vector
->rx_group
.total_bytes
= 0;
2674 tqp_vector
->rx_group
.total_packets
= 0;
2675 tqp_vector
->tx_group
.total_bytes
= 0;
2676 tqp_vector
->tx_group
.total_packets
= 0;
2677 tqp_vector
->handle
= h
;
2679 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2680 &vector_ring_chain
);
2684 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2685 tqp_vector
->vector_irq
, &vector_ring_chain
);
2687 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2692 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2693 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2699 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
2701 struct hnae3_handle
*h
= priv
->ae_handle
;
2702 struct hns3_enet_tqp_vector
*tqp_vector
;
2703 struct hnae3_vector_info
*vector
;
2704 struct pci_dev
*pdev
= h
->pdev
;
2705 u16 tqp_num
= h
->kinfo
.num_tqps
;
2710 /* RSS size, cpu online and vector_num should be the same */
2711 /* Should consider 2p/4p later */
2712 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2713 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2718 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2720 priv
->vector_num
= vector_num
;
2721 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2722 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2724 if (!priv
->tqp_vector
) {
2729 for (i
= 0; i
< priv
->vector_num
; i
++) {
2730 tqp_vector
= &priv
->tqp_vector
[i
];
2731 tqp_vector
->idx
= i
;
2732 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
2733 tqp_vector
->vector_irq
= vector
[i
].vector
;
2734 hns3_vector_gl_rl_init(tqp_vector
, priv
);
2738 devm_kfree(&pdev
->dev
, vector
);
2742 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
2748 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2750 struct hnae3_ring_chain_node vector_ring_chain
;
2751 struct hnae3_handle
*h
= priv
->ae_handle
;
2752 struct hns3_enet_tqp_vector
*tqp_vector
;
2755 for (i
= 0; i
< priv
->vector_num
; i
++) {
2756 tqp_vector
= &priv
->tqp_vector
[i
];
2758 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2759 &vector_ring_chain
);
2763 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2764 tqp_vector
->vector_irq
, &vector_ring_chain
);
2768 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2770 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2771 (void)irq_set_affinity_hint(
2772 priv
->tqp_vector
[i
].vector_irq
,
2774 free_irq(priv
->tqp_vector
[i
].vector_irq
,
2775 &priv
->tqp_vector
[i
]);
2778 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2779 hns3_clear_ring_group(&tqp_vector
->rx_group
);
2780 hns3_clear_ring_group(&tqp_vector
->tx_group
);
2781 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2787 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
2789 struct hnae3_handle
*h
= priv
->ae_handle
;
2790 struct pci_dev
*pdev
= h
->pdev
;
2793 for (i
= 0; i
< priv
->vector_num
; i
++) {
2794 struct hns3_enet_tqp_vector
*tqp_vector
;
2796 tqp_vector
= &priv
->tqp_vector
[i
];
2797 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
2802 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2806 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2809 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2810 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2811 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2812 struct hns3_enet_ring
*ring
;
2814 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2818 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2819 ring_data
[q
->tqp_index
].ring
= ring
;
2820 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2821 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2823 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2824 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
2825 ring
->io_base
= q
->io_base
;
2828 hnae3_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2832 ring
->desc_cb
= NULL
;
2833 ring
->dev
= priv
->dev
;
2834 ring
->desc_dma_addr
= 0;
2835 ring
->buf_size
= q
->buf_size
;
2836 ring
->desc_num
= q
->desc_num
;
2837 ring
->next_to_use
= 0;
2838 ring
->next_to_clean
= 0;
2843 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2844 struct hns3_nic_priv
*priv
)
2848 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2852 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2859 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2861 struct hnae3_handle
*h
= priv
->ae_handle
;
2862 struct pci_dev
*pdev
= h
->pdev
;
2865 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2866 sizeof(*priv
->ring_data
) * 2,
2868 if (!priv
->ring_data
)
2871 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2872 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2879 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2883 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
2885 struct hnae3_handle
*h
= priv
->ae_handle
;
2888 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2889 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2890 devm_kfree(priv
->dev
,
2891 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2893 devm_kfree(priv
->dev
, priv
->ring_data
);
2896 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2900 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2903 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2905 if (!ring
->desc_cb
) {
2910 ret
= hns3_alloc_desc(ring
);
2912 goto out_with_desc_cb
;
2914 if (!HNAE3_IS_TX_RING(ring
)) {
2915 ret
= hns3_alloc_ring_buffers(ring
);
2923 hns3_free_desc(ring
);
2925 kfree(ring
->desc_cb
);
2926 ring
->desc_cb
= NULL
;
2931 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2933 hns3_free_desc(ring
);
2934 kfree(ring
->desc_cb
);
2935 ring
->desc_cb
= NULL
;
2936 ring
->next_to_clean
= 0;
2937 ring
->next_to_use
= 0;
2940 static int hns3_buf_size2type(u32 buf_size
)
2946 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2949 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2952 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2955 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2958 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2961 return bd_size_type
;
2964 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2966 dma_addr_t dma
= ring
->desc_dma_addr
;
2967 struct hnae3_queue
*q
= ring
->tqp
;
2969 if (!HNAE3_IS_TX_RING(ring
)) {
2970 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2972 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2973 (u32
)((dma
>> 31) >> 1));
2975 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2976 hns3_buf_size2type(ring
->buf_size
));
2977 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2978 ring
->desc_num
/ 8 - 1);
2981 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2983 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2984 (u32
)((dma
>> 31) >> 1));
2986 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2987 hns3_buf_size2type(ring
->buf_size
));
2988 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
2989 ring
->desc_num
/ 8 - 1);
2993 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
2995 struct hnae3_handle
*h
= priv
->ae_handle
;
2996 int ring_num
= h
->kinfo
.num_tqps
* 2;
3000 for (i
= 0; i
< ring_num
; i
++) {
3001 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
3004 "Alloc ring memory fail! ret=%d\n", ret
);
3005 goto out_when_alloc_ring_memory
;
3008 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
3013 out_when_alloc_ring_memory
:
3014 for (j
= i
- 1; j
>= 0; j
--)
3015 hns3_fini_ring(priv
->ring_data
[j
].ring
);
3020 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
3022 struct hnae3_handle
*h
= priv
->ae_handle
;
3025 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3026 if (h
->ae_algo
->ops
->reset_queue
)
3027 h
->ae_algo
->ops
->reset_queue(h
, i
);
3029 hns3_fini_ring(priv
->ring_data
[i
].ring
);
3030 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3035 /* Set mac addr if it is configured. or leave it to the AE driver */
3036 static void hns3_init_mac_addr(struct net_device
*netdev
, bool init
)
3038 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3039 struct hnae3_handle
*h
= priv
->ae_handle
;
3040 u8 mac_addr_temp
[ETH_ALEN
];
3042 if (h
->ae_algo
->ops
->get_mac_addr
&& init
) {
3043 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
3044 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
3047 /* Check if the MAC address is valid, if not get a random one */
3048 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3049 eth_hw_addr_random(netdev
);
3050 dev_warn(priv
->dev
, "using random MAC address %pM\n",
3054 if (h
->ae_algo
->ops
->set_mac_addr
)
3055 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
3059 static void hns3_uninit_mac_addr(struct net_device
*netdev
)
3061 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3062 struct hnae3_handle
*h
= priv
->ae_handle
;
3064 if (h
->ae_algo
->ops
->rm_uc_addr
)
3065 h
->ae_algo
->ops
->rm_uc_addr(h
, netdev
->dev_addr
);
3068 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
3070 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3072 if ((netdev
->features
& NETIF_F_TSO
) ||
3073 (netdev
->features
& NETIF_F_TSO6
)) {
3074 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
3075 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
3077 priv
->ops
.fill_desc
= hns3_fill_desc
;
3078 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
3082 static int hns3_client_init(struct hnae3_handle
*handle
)
3084 struct pci_dev
*pdev
= handle
->pdev
;
3085 struct hns3_nic_priv
*priv
;
3086 struct net_device
*netdev
;
3089 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
3090 hns3_get_max_available_channels(handle
));
3094 priv
= netdev_priv(netdev
);
3095 priv
->dev
= &pdev
->dev
;
3096 priv
->netdev
= netdev
;
3097 priv
->ae_handle
= handle
;
3098 priv
->ae_handle
->reset_level
= HNAE3_NONE_RESET
;
3099 priv
->ae_handle
->last_reset_time
= jiffies
;
3100 priv
->tx_timeout_count
= 0;
3102 handle
->kinfo
.netdev
= netdev
;
3103 handle
->priv
= (void *)priv
;
3105 hns3_init_mac_addr(netdev
, true);
3107 hns3_set_default_feature(netdev
);
3109 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
3110 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3111 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3112 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3113 hns3_ethtool_set_ops(netdev
);
3114 hns3_nic_set_priv_ops(netdev
);
3116 /* Carrier off reporting is important to ethtool even BEFORE open */
3117 netif_carrier_off(netdev
);
3119 ret
= hns3_get_ring_config(priv
);
3122 goto out_get_ring_cfg
;
3125 ret
= hns3_nic_alloc_vector_data(priv
);
3128 goto out_alloc_vector_data
;
3131 ret
= hns3_nic_init_vector_data(priv
);
3134 goto out_init_vector_data
;
3137 ret
= hns3_init_all_ring(priv
);
3140 goto out_init_ring_data
;
3143 ret
= register_netdev(netdev
);
3145 dev_err(priv
->dev
, "probe register netdev fail!\n");
3146 goto out_reg_netdev_fail
;
3149 hns3_dcbnl_setup(handle
);
3151 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3152 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
3156 out_reg_netdev_fail
:
3158 (void)hns3_nic_uninit_vector_data(priv
);
3159 out_init_vector_data
:
3160 hns3_nic_dealloc_vector_data(priv
);
3161 out_alloc_vector_data
:
3162 priv
->ring_data
= NULL
;
3164 priv
->ae_handle
= NULL
;
3165 free_netdev(netdev
);
3169 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3171 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3172 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3175 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3176 unregister_netdev(netdev
);
3178 hns3_force_clear_all_rx_ring(handle
);
3180 ret
= hns3_nic_uninit_vector_data(priv
);
3182 netdev_err(netdev
, "uninit vector error\n");
3184 ret
= hns3_nic_dealloc_vector_data(priv
);
3186 netdev_err(netdev
, "dealloc vector error\n");
3188 ret
= hns3_uninit_all_ring(priv
);
3190 netdev_err(netdev
, "uninit ring error\n");
3192 hns3_put_ring_config(priv
);
3194 priv
->ring_data
= NULL
;
3196 hns3_uninit_mac_addr(netdev
);
3198 free_netdev(netdev
);
3201 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3203 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3209 netif_carrier_on(netdev
);
3210 netif_tx_wake_all_queues(netdev
);
3211 netdev_info(netdev
, "link up\n");
3213 netif_carrier_off(netdev
);
3214 netif_tx_stop_all_queues(netdev
);
3215 netdev_info(netdev
, "link down\n");
3219 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3221 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3222 struct net_device
*ndev
= kinfo
->netdev
;
3226 if (tc
> HNAE3_MAX_TC
)
3232 if_running
= netif_running(ndev
);
3235 (void)hns3_nic_net_stop(ndev
);
3239 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3240 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3244 ret
= hns3_nic_set_real_num_queue(ndev
);
3248 (void)hns3_nic_net_open(ndev
);
3253 static void hns3_recover_hw_addr(struct net_device
*ndev
)
3255 struct netdev_hw_addr_list
*list
;
3256 struct netdev_hw_addr
*ha
, *tmp
;
3258 /* go through and sync uc_addr entries to the device */
3260 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3261 hns3_nic_uc_sync(ndev
, ha
->addr
);
3263 /* go through and sync mc_addr entries to the device */
3265 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3266 hns3_nic_mc_sync(ndev
, ha
->addr
);
3269 static void hns3_clear_tx_ring(struct hns3_enet_ring
*ring
)
3271 while (ring
->next_to_clean
!= ring
->next_to_use
) {
3272 ring
->desc
[ring
->next_to_clean
].tx
.bdtp_fe_sc_vld_ra_ri
= 0;
3273 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
3274 ring_ptr_move_fw(ring
, next_to_clean
);
3278 static int hns3_clear_rx_ring(struct hns3_enet_ring
*ring
)
3280 struct hns3_desc_cb res_cbs
;
3283 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3284 /* When a buffer is not reused, it's memory has been
3285 * freed in hns3_handle_rx_bd or will be freed by
3286 * stack, so we need to replace the buffer here.
3288 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3289 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
3291 u64_stats_update_begin(&ring
->syncp
);
3292 ring
->stats
.sw_err_cnt
++;
3293 u64_stats_update_end(&ring
->syncp
);
3294 /* if alloc new buffer fail, exit directly
3295 * and reclear in up flow.
3297 netdev_warn(ring
->tqp
->handle
->kinfo
.netdev
,
3298 "reserve buffer map failed, ret = %d\n",
3302 hns3_replace_buffer(ring
, ring
->next_to_use
,
3305 ring_ptr_move_fw(ring
, next_to_use
);
3311 static void hns3_force_clear_rx_ring(struct hns3_enet_ring
*ring
)
3313 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3314 /* When a buffer is not reused, it's memory has been
3315 * freed in hns3_handle_rx_bd or will be freed by
3316 * stack, so only need to unmap the buffer here.
3318 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3319 hns3_unmap_buffer(ring
,
3320 &ring
->desc_cb
[ring
->next_to_use
]);
3321 ring
->desc_cb
[ring
->next_to_use
].dma
= 0;
3324 ring_ptr_move_fw(ring
, next_to_use
);
3328 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
)
3330 struct net_device
*ndev
= h
->kinfo
.netdev
;
3331 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3332 struct hns3_enet_ring
*ring
;
3335 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3336 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3337 hns3_force_clear_rx_ring(ring
);
3341 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3343 struct net_device
*ndev
= h
->kinfo
.netdev
;
3344 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3347 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3348 struct netdev_queue
*dev_queue
;
3349 struct hns3_enet_ring
*ring
;
3351 ring
= priv
->ring_data
[i
].ring
;
3352 hns3_clear_tx_ring(ring
);
3353 dev_queue
= netdev_get_tx_queue(ndev
,
3354 priv
->ring_data
[i
].queue_index
);
3355 netdev_tx_reset_queue(dev_queue
);
3357 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3358 /* Continue to clear other rings even if clearing some
3361 hns3_clear_rx_ring(ring
);
3365 int hns3_nic_reset_all_ring(struct hnae3_handle
*h
)
3367 struct net_device
*ndev
= h
->kinfo
.netdev
;
3368 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3369 struct hns3_enet_ring
*rx_ring
;
3373 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3374 h
->ae_algo
->ops
->reset_queue(h
, i
);
3375 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
3377 /* We need to clear tx ring here because self test will
3378 * use the ring and will not run down before up
3380 hns3_clear_tx_ring(priv
->ring_data
[i
].ring
);
3381 priv
->ring_data
[i
].ring
->next_to_clean
= 0;
3382 priv
->ring_data
[i
].ring
->next_to_use
= 0;
3384 rx_ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3385 hns3_init_ring_hw(rx_ring
);
3386 ret
= hns3_clear_rx_ring(rx_ring
);
3390 /* We can not know the hardware head and tail when this
3391 * function is called in reset flow, so we reuse all desc.
3393 for (j
= 0; j
< rx_ring
->desc_num
; j
++)
3394 hns3_reuse_buffer(rx_ring
, j
);
3396 rx_ring
->next_to_clean
= 0;
3397 rx_ring
->next_to_use
= 0;
3403 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3405 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3406 struct net_device
*ndev
= kinfo
->netdev
;
3408 if (!netif_running(ndev
))
3411 return hns3_nic_net_stop(ndev
);
3414 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3416 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3419 if (netif_running(kinfo
->netdev
)) {
3420 ret
= hns3_nic_net_up(kinfo
->netdev
);
3422 netdev_err(kinfo
->netdev
,
3423 "hns net up fail, ret=%d!\n", ret
);
3426 handle
->last_reset_time
= jiffies
;
3432 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3434 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3435 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3438 hns3_init_mac_addr(netdev
, false);
3439 hns3_nic_set_rx_mode(netdev
);
3440 hns3_recover_hw_addr(netdev
);
3442 /* Hardware table is only clear when pf resets */
3443 if (!(handle
->flags
& HNAE3_SUPPORT_VF
))
3444 hns3_restore_vlan(netdev
);
3446 /* Carrier off reporting is important to ethtool even BEFORE open */
3447 netif_carrier_off(netdev
);
3449 ret
= hns3_nic_init_vector_data(priv
);
3453 ret
= hns3_init_all_ring(priv
);
3455 hns3_nic_uninit_vector_data(priv
);
3456 priv
->ring_data
= NULL
;
3462 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
3464 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3465 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3468 hns3_force_clear_all_rx_ring(handle
);
3470 ret
= hns3_nic_uninit_vector_data(priv
);
3472 netdev_err(netdev
, "uninit vector error\n");
3476 ret
= hns3_uninit_all_ring(priv
);
3478 netdev_err(netdev
, "uninit ring error\n");
3480 hns3_uninit_mac_addr(netdev
);
3485 static int hns3_reset_notify(struct hnae3_handle
*handle
,
3486 enum hnae3_reset_notify_type type
)
3491 case HNAE3_UP_CLIENT
:
3492 ret
= hns3_reset_notify_up_enet(handle
);
3494 case HNAE3_DOWN_CLIENT
:
3495 ret
= hns3_reset_notify_down_enet(handle
);
3497 case HNAE3_INIT_CLIENT
:
3498 ret
= hns3_reset_notify_init_enet(handle
);
3500 case HNAE3_UNINIT_CLIENT
:
3501 ret
= hns3_reset_notify_uninit_enet(handle
);
3510 static void hns3_restore_coal(struct hns3_nic_priv
*priv
,
3511 struct hns3_enet_coalesce
*tx
,
3512 struct hns3_enet_coalesce
*rx
)
3514 u16 vector_num
= priv
->vector_num
;
3517 for (i
= 0; i
< vector_num
; i
++) {
3518 memcpy(&priv
->tqp_vector
[i
].tx_group
.coal
, tx
,
3519 sizeof(struct hns3_enet_coalesce
));
3520 memcpy(&priv
->tqp_vector
[i
].rx_group
.coal
, rx
,
3521 sizeof(struct hns3_enet_coalesce
));
3525 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
,
3526 struct hns3_enet_coalesce
*tx
,
3527 struct hns3_enet_coalesce
*rx
)
3529 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3530 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3533 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
3537 ret
= hns3_get_ring_config(priv
);
3541 ret
= hns3_nic_alloc_vector_data(priv
);
3543 goto err_alloc_vector
;
3545 hns3_restore_coal(priv
, tx
, rx
);
3547 ret
= hns3_nic_init_vector_data(priv
);
3549 goto err_uninit_vector
;
3551 ret
= hns3_init_all_ring(priv
);
3558 hns3_put_ring_config(priv
);
3560 hns3_nic_uninit_vector_data(priv
);
3562 hns3_nic_dealloc_vector_data(priv
);
3566 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
3568 return (new_tqp_num
/ num_tc
) * num_tc
;
3571 int hns3_set_channels(struct net_device
*netdev
,
3572 struct ethtool_channels
*ch
)
3574 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3575 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3576 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
3577 struct hns3_enet_coalesce tx_coal
, rx_coal
;
3578 bool if_running
= netif_running(netdev
);
3579 u32 new_tqp_num
= ch
->combined_count
;
3583 if (ch
->rx_count
|| ch
->tx_count
)
3586 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
3587 new_tqp_num
< kinfo
->num_tc
) {
3588 dev_err(&netdev
->dev
,
3589 "Change tqps fail, the tqp range is from %d to %d",
3591 hns3_get_max_available_channels(h
));
3595 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
3596 if (kinfo
->num_tqps
== new_tqp_num
)
3600 hns3_nic_net_stop(netdev
);
3602 ret
= hns3_nic_uninit_vector_data(priv
);
3604 dev_err(&netdev
->dev
,
3605 "Unbind vector with tqp fail, nothing is changed");
3609 /* Changing the tqp num may also change the vector num,
3610 * ethtool only support setting and querying one coal
3611 * configuation for now, so save the vector 0' coal
3612 * configuation here in order to restore it.
3614 memcpy(&tx_coal
, &priv
->tqp_vector
[0].tx_group
.coal
,
3615 sizeof(struct hns3_enet_coalesce
));
3616 memcpy(&rx_coal
, &priv
->tqp_vector
[0].rx_group
.coal
,
3617 sizeof(struct hns3_enet_coalesce
));
3619 hns3_nic_dealloc_vector_data(priv
);
3621 hns3_uninit_all_ring(priv
);
3622 hns3_put_ring_config(priv
);
3624 org_tqp_num
= h
->kinfo
.num_tqps
;
3625 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
, &tx_coal
, &rx_coal
);
3627 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
,
3628 &tx_coal
, &rx_coal
);
3630 /* If revert to old tqp failed, fatal error occurred */
3631 dev_err(&netdev
->dev
,
3632 "Revert to old tqp num fail, ret=%d", ret
);
3635 dev_info(&netdev
->dev
,
3636 "Change tqp num fail, Revert to old tqp num");
3641 hns3_nic_net_open(netdev
);
3646 static const struct hnae3_client_ops client_ops
= {
3647 .init_instance
= hns3_client_init
,
3648 .uninit_instance
= hns3_client_uninit
,
3649 .link_status_change
= hns3_link_status_change
,
3650 .setup_tc
= hns3_client_setup_tc
,
3651 .reset_notify
= hns3_reset_notify
,
3654 /* hns3_init_module - Driver registration routine
3655 * hns3_init_module is the first routine called when the driver is
3656 * loaded. All it does is register with the PCI subsystem.
3658 static int __init
hns3_init_module(void)
3662 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
3663 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
3665 client
.type
= HNAE3_CLIENT_KNIC
;
3666 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
3669 client
.ops
= &client_ops
;
3671 INIT_LIST_HEAD(&client
.node
);
3673 ret
= hnae3_register_client(&client
);
3677 ret
= pci_register_driver(&hns3_driver
);
3679 hnae3_unregister_client(&client
);
3683 module_init(hns3_init_module
);
3685 /* hns3_exit_module - Driver exit cleanup routine
3686 * hns3_exit_module is called just before the driver is removed
3689 static void __exit
hns3_exit_module(void)
3691 pci_unregister_driver(&hns3_driver
);
3692 hnae3_unregister_client(&client
);
3694 module_exit(hns3_exit_module
);
3696 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3697 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3698 MODULE_LICENSE("GPL");
3699 MODULE_ALIAS("pci:hns-nic");
3700 MODULE_VERSION(HNS3_MOD_VERSION
);