2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
26 #include "hns3_enet.h"
28 static void hns3_clear_all_ring(struct hnae3_handle
*h
);
29 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
);
31 static const char hns3_driver_name
[] = "hns3";
32 const char hns3_driver_version
[] = VERMAGIC_STRING
;
33 static const char hns3_driver_string
[] =
34 "Hisilicon Ethernet Network Driver for Hip08 Family";
35 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
36 static struct hnae3_client client
;
38 /* hns3_pci_tbl - PCI Device ID Table
40 * Last entry must be all 0s
42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
43 * Class, Class Mask, private data (not used) }
45 static const struct pci_device_id hns3_pci_tbl
[] = {
46 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
48 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
50 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
52 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
54 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
55 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
57 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
58 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
59 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
60 /* required last entry */
63 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
65 static irqreturn_t
hns3_irq_handle(int irq
, void *dev
)
67 struct hns3_enet_tqp_vector
*tqp_vector
= dev
;
69 napi_schedule(&tqp_vector
->napi
);
74 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
76 struct hns3_enet_tqp_vector
*tqp_vectors
;
79 for (i
= 0; i
< priv
->vector_num
; i
++) {
80 tqp_vectors
= &priv
->tqp_vector
[i
];
82 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
85 /* release the irq resource */
86 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
87 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
91 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
93 struct hns3_enet_tqp_vector
*tqp_vectors
;
100 for (i
= 0; i
< priv
->vector_num
; i
++) {
101 tqp_vectors
= &priv
->tqp_vector
[i
];
103 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
106 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
107 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
108 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
111 } else if (tqp_vectors
->rx_group
.ring
) {
112 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
113 "%s-%s-%d", priv
->netdev
->name
, "Rx",
115 } else if (tqp_vectors
->tx_group
.ring
) {
116 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
117 "%s-%s-%d", priv
->netdev
->name
, "Tx",
120 /* Skip this unused q_vector */
124 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
126 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
130 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
131 tqp_vectors
->vector_irq
);
135 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
141 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
144 writel(mask_en
, tqp_vector
->mask_addr
);
147 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
149 napi_enable(&tqp_vector
->napi
);
152 hns3_mask_vector_irq(tqp_vector
, 1);
155 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
158 hns3_mask_vector_irq(tqp_vector
, 0);
160 disable_irq(tqp_vector
->vector_irq
);
161 napi_disable(&tqp_vector
->napi
);
164 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
167 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
169 /* this defines the configuration for RL (Interrupt Rate Limiter).
170 * Rl defines rate of interrupts i.e. number of interrupts-per-second
171 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
174 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.gl_adapt_enable
&&
175 !tqp_vector
->rx_group
.coal
.gl_adapt_enable
)
176 /* According to the hardware, the range of rl_reg is
177 * 0-59 and the unit is 4.
179 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
181 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
184 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
187 u32 rx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
189 writel(rx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
192 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
195 u32 tx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
197 writel(tx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
200 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
,
201 struct hns3_nic_priv
*priv
)
203 struct hnae3_handle
*h
= priv
->ae_handle
;
205 /* initialize the configuration for interrupt coalescing.
206 * 1. GL (Interrupt Gap Limiter)
207 * 2. RL (Interrupt Rate Limiter)
210 /* Default: enable interrupt coalescing self-adaptive and GL */
211 tqp_vector
->tx_group
.coal
.gl_adapt_enable
= 1;
212 tqp_vector
->rx_group
.coal
.gl_adapt_enable
= 1;
214 tqp_vector
->tx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
215 tqp_vector
->rx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
217 /* Default: disable RL */
218 h
->kinfo
.int_rl_setting
= 0;
220 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
221 tqp_vector
->rx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
222 tqp_vector
->tx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
225 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
226 struct hns3_nic_priv
*priv
)
228 struct hnae3_handle
*h
= priv
->ae_handle
;
230 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
231 tqp_vector
->tx_group
.coal
.int_gl
);
232 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
233 tqp_vector
->rx_group
.coal
.int_gl
);
234 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
237 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
239 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
240 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
241 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
244 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
247 "netif_set_real_num_tx_queues fail, ret=%d!\n",
252 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
255 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
262 static u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
264 u16 free_tqps
, max_rss_size
, max_tqps
;
266 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &free_tqps
, &max_rss_size
);
267 max_tqps
= h
->kinfo
.num_tc
* max_rss_size
;
269 return min_t(u16
, max_tqps
, (free_tqps
+ h
->kinfo
.num_tqps
));
272 static int hns3_nic_net_up(struct net_device
*netdev
)
274 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
275 struct hnae3_handle
*h
= priv
->ae_handle
;
279 ret
= hns3_nic_reset_all_ring(h
);
283 /* get irq resource for all vectors */
284 ret
= hns3_nic_init_irq(priv
);
286 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
290 /* enable the vectors */
291 for (i
= 0; i
< priv
->vector_num
; i
++)
292 hns3_vector_enable(&priv
->tqp_vector
[i
]);
294 /* start the ae_dev */
295 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
299 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
304 for (j
= i
- 1; j
>= 0; j
--)
305 hns3_vector_disable(&priv
->tqp_vector
[j
]);
307 hns3_nic_uninit_irq(priv
);
312 static int hns3_nic_net_open(struct net_device
*netdev
)
314 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
317 netif_carrier_off(netdev
);
319 ret
= hns3_nic_set_real_num_queue(netdev
);
323 ret
= hns3_nic_net_up(netdev
);
326 "hns net up fail, ret=%d!\n", ret
);
330 priv
->ae_handle
->last_reset_time
= jiffies
;
334 static void hns3_nic_net_down(struct net_device
*netdev
)
336 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
337 const struct hnae3_ae_ops
*ops
;
340 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
343 /* disable vectors */
344 for (i
= 0; i
< priv
->vector_num
; i
++)
345 hns3_vector_disable(&priv
->tqp_vector
[i
]);
348 ops
= priv
->ae_handle
->ae_algo
->ops
;
350 ops
->stop(priv
->ae_handle
);
352 /* free irq resources */
353 hns3_nic_uninit_irq(priv
);
355 hns3_clear_all_ring(priv
->ae_handle
);
358 static int hns3_nic_net_stop(struct net_device
*netdev
)
360 netif_tx_stop_all_queues(netdev
);
361 netif_carrier_off(netdev
);
363 hns3_nic_net_down(netdev
);
368 static int hns3_nic_uc_sync(struct net_device
*netdev
,
369 const unsigned char *addr
)
371 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
373 if (h
->ae_algo
->ops
->add_uc_addr
)
374 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
379 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
380 const unsigned char *addr
)
382 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
384 if (h
->ae_algo
->ops
->rm_uc_addr
)
385 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
390 static int hns3_nic_mc_sync(struct net_device
*netdev
,
391 const unsigned char *addr
)
393 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
395 if (h
->ae_algo
->ops
->add_mc_addr
)
396 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
401 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
402 const unsigned char *addr
)
404 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
406 if (h
->ae_algo
->ops
->rm_mc_addr
)
407 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
412 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
414 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
416 if (h
->ae_algo
->ops
->set_promisc_mode
) {
417 if (netdev
->flags
& IFF_PROMISC
)
418 h
->ae_algo
->ops
->set_promisc_mode(h
, true, true);
419 else if (netdev
->flags
& IFF_ALLMULTI
)
420 h
->ae_algo
->ops
->set_promisc_mode(h
, false, true);
422 h
->ae_algo
->ops
->set_promisc_mode(h
, false, false);
424 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
425 netdev_err(netdev
, "sync uc address fail\n");
426 if (netdev
->flags
& IFF_MULTICAST
) {
427 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
428 netdev_err(netdev
, "sync mc address fail\n");
430 if (h
->ae_algo
->ops
->update_mta_status
)
431 h
->ae_algo
->ops
->update_mta_status(h
);
435 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
436 u16
*mss
, u32
*type_cs_vlan_tso
)
438 u32 l4_offset
, hdr_len
;
439 union l3_hdr_info l3
;
440 union l4_hdr_info l4
;
444 if (!skb_is_gso(skb
))
447 ret
= skb_cow_head(skb
, 0);
451 l3
.hdr
= skb_network_header(skb
);
452 l4
.hdr
= skb_transport_header(skb
);
454 /* Software should clear the IPv4's checksum field when tso is
457 if (l3
.v4
->version
== 4)
461 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
464 SKB_GSO_UDP_TUNNEL_CSUM
)) {
465 if ((!(skb_shinfo(skb
)->gso_type
&
467 (skb_shinfo(skb
)->gso_type
&
468 SKB_GSO_UDP_TUNNEL_CSUM
)) {
469 /* Software should clear the udp's checksum
470 * field when tso is needed.
474 /* reset l3&l4 pointers from outer to inner headers */
475 l3
.hdr
= skb_inner_network_header(skb
);
476 l4
.hdr
= skb_inner_transport_header(skb
);
478 /* Software should clear the IPv4's checksum field when
481 if (l3
.v4
->version
== 4)
485 /* normal or tunnel packet*/
486 l4_offset
= l4
.hdr
- skb
->data
;
487 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
489 /* remove payload length from inner pseudo checksum when tso*/
490 l4_paylen
= skb
->len
- l4_offset
;
491 csum_replace_by_diff(&l4
.tcp
->check
,
492 (__force __wsum
)htonl(l4_paylen
));
494 /* find the txbd field values */
495 *paylen
= skb
->len
- hdr_len
;
496 hnae3_set_bit(*type_cs_vlan_tso
,
499 /* get MSS for TSO */
500 *mss
= skb_shinfo(skb
)->gso_size
;
505 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
513 unsigned char *l4_hdr
;
514 unsigned char *exthdr
;
518 /* find outer header point */
519 l3
.hdr
= skb_network_header(skb
);
520 l4_hdr
= skb_transport_header(skb
);
522 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
523 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
524 l4_proto_tmp
= l3
.v6
->nexthdr
;
525 if (l4_hdr
!= exthdr
)
526 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
527 &l4_proto_tmp
, &frag_off
);
528 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
529 l4_proto_tmp
= l3
.v4
->protocol
;
534 *ol4_proto
= l4_proto_tmp
;
537 if (!skb
->encapsulation
) {
542 /* find inner header point */
543 l3
.hdr
= skb_inner_network_header(skb
);
544 l4_hdr
= skb_inner_transport_header(skb
);
546 if (l3
.v6
->version
== 6) {
547 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
548 l4_proto_tmp
= l3
.v6
->nexthdr
;
549 if (l4_hdr
!= exthdr
)
550 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
551 &l4_proto_tmp
, &frag_off
);
552 } else if (l3
.v4
->version
== 4) {
553 l4_proto_tmp
= l3
.v4
->protocol
;
556 *il4_proto
= l4_proto_tmp
;
561 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
562 u8 il4_proto
, u32
*type_cs_vlan_tso
,
563 u32
*ol_type_vlan_len_msec
)
573 struct gre_base_hdr
*gre
;
576 unsigned char *l2_hdr
;
577 u8 l4_proto
= ol4_proto
;
584 l3
.hdr
= skb_network_header(skb
);
585 l4
.hdr
= skb_transport_header(skb
);
587 /* compute L2 header size for normal packet, defined in 2 Bytes */
588 l2_len
= l3
.hdr
- skb
->data
;
589 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
590 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
593 if (skb
->encapsulation
) {
594 /* compute OL2 header size, defined in 2 Bytes */
596 hnae3_set_field(*ol_type_vlan_len_msec
,
598 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
600 /* compute OL3 header size, defined in 4 Bytes */
601 ol3_len
= l4
.hdr
- l3
.hdr
;
602 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
603 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
605 /* MAC in UDP, MAC in GRE (0x6558)*/
606 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
607 /* switch MAC header ptr from outer to inner header.*/
608 l2_hdr
= skb_inner_mac_header(skb
);
610 /* compute OL4 header size, defined in 4 Bytes. */
611 ol4_len
= l2_hdr
- l4
.hdr
;
612 hnae3_set_field(*ol_type_vlan_len_msec
,
613 HNS3_TXD_L4LEN_M
, HNS3_TXD_L4LEN_S
,
616 /* switch IP header ptr from outer to inner header */
617 l3
.hdr
= skb_inner_network_header(skb
);
619 /* compute inner l2 header size, defined in 2 Bytes. */
620 l2_len
= l3
.hdr
- l2_hdr
;
621 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
622 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
624 /* skb packet types not supported by hardware,
625 * txbd len fild doesn't be filled.
630 /* switch L4 header pointer from outer to inner */
631 l4
.hdr
= skb_inner_transport_header(skb
);
633 l4_proto
= il4_proto
;
636 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
637 l3_len
= l4
.hdr
- l3
.hdr
;
638 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
639 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
641 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
644 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
645 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
648 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
650 (sizeof(struct sctphdr
) >> 2));
653 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
655 (sizeof(struct udphdr
) >> 2));
658 /* skb packet types not supported by hardware,
659 * txbd len fild doesn't be filled.
665 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
666 * and it is udp packet, which has a dest port as the IANA assigned.
667 * the hardware is expected to do the checksum offload, but the
668 * hardware will not do the checksum offload when udp dest port is
671 static bool hns3_tunnel_csum_bug(struct sk_buff
*skb
)
673 #define IANA_VXLAN_PORT 4789
677 struct gre_base_hdr
*gre
;
681 l4
.hdr
= skb_transport_header(skb
);
683 if (!(!skb
->encapsulation
&& l4
.udp
->dest
== htons(IANA_VXLAN_PORT
)))
686 skb_checksum_help(skb
);
691 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
692 u8 il4_proto
, u32
*type_cs_vlan_tso
,
693 u32
*ol_type_vlan_len_msec
)
700 u32 l4_proto
= ol4_proto
;
702 l3
.hdr
= skb_network_header(skb
);
704 /* define OL3 type and tunnel type(OL4).*/
705 if (skb
->encapsulation
) {
706 /* define outer network header type.*/
707 if (skb
->protocol
== htons(ETH_P_IP
)) {
709 hnae3_set_field(*ol_type_vlan_len_msec
,
712 HNS3_OL3T_IPV4_CSUM
);
714 hnae3_set_field(*ol_type_vlan_len_msec
,
717 HNS3_OL3T_IPV4_NO_CSUM
);
719 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
720 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
721 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
724 /* define tunnel type(OL4).*/
727 hnae3_set_field(*ol_type_vlan_len_msec
,
730 HNS3_TUN_MAC_IN_UDP
);
733 hnae3_set_field(*ol_type_vlan_len_msec
,
739 /* drop the skb tunnel packet if hardware don't support,
740 * because hardware can't calculate csum when TSO.
745 /* the stack computes the IP header already,
746 * driver calculate l4 checksum when not TSO.
748 skb_checksum_help(skb
);
752 l3
.hdr
= skb_inner_network_header(skb
);
753 l4_proto
= il4_proto
;
756 if (l3
.v4
->version
== 4) {
757 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
758 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
760 /* the stack computes the IP header already, the only time we
761 * need the hardware to recompute it is in the case of TSO.
764 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
766 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
767 } else if (l3
.v6
->version
== 6) {
768 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
769 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
770 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
775 hnae3_set_field(*type_cs_vlan_tso
,
781 if (hns3_tunnel_csum_bug(skb
))
784 hnae3_set_field(*type_cs_vlan_tso
,
790 hnae3_set_field(*type_cs_vlan_tso
,
796 /* drop the skb tunnel packet if hardware don't support,
797 * because hardware can't calculate csum when TSO.
802 /* the stack computes the IP header already,
803 * driver calculate l4 checksum when not TSO.
805 skb_checksum_help(skb
);
812 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
814 /* Config bd buffer end */
815 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
816 HNS3_TXD_BDTYPE_S
, 0);
817 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
818 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
819 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
822 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
823 struct hns3_enet_ring
*tx_ring
,
824 u32
*inner_vlan_flag
,
829 #define HNS3_TX_VLAN_PRIO_SHIFT 13
831 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
832 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
833 NETIF_F_HW_VLAN_CTAG_TX
)) {
834 /* When HW VLAN acceleration is turned off, and the stack
835 * sets the protocol to 802.1q, the driver just need to
836 * set the protocol to the encapsulated ethertype.
838 skb
->protocol
= vlan_get_protocol(skb
);
842 if (skb_vlan_tag_present(skb
)) {
845 vlan_tag
= skb_vlan_tag_get(skb
);
846 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
848 /* Based on hw strategy, use out_vtag in two layer tag case,
849 * and use inner_vtag in one tag case.
851 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
852 hnae3_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
853 *out_vtag
= vlan_tag
;
855 hnae3_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
856 *inner_vtag
= vlan_tag
;
858 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
859 struct vlan_ethhdr
*vhdr
;
862 rc
= skb_cow_head(skb
, 0);
865 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
866 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
867 << HNS3_TX_VLAN_PRIO_SHIFT
);
870 skb
->protocol
= vlan_get_protocol(skb
);
874 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
875 int size
, dma_addr_t dma
, int frag_end
,
876 enum hns_desc_type type
)
878 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
879 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
880 u32 ol_type_vlan_len_msec
= 0;
881 u16 bdtp_fe_sc_vld_ra_ri
= 0;
882 u32 type_cs_vlan_tso
= 0;
893 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
894 desc_cb
->priv
= priv
;
895 desc_cb
->length
= size
;
897 desc_cb
->type
= type
;
899 /* now, fill the descriptor */
900 desc
->addr
= cpu_to_le64(dma
);
901 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
902 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
903 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
905 if (type
== DESC_TYPE_SKB
) {
906 skb
= (struct sk_buff
*)priv
;
909 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
910 &ol_type_vlan_len_msec
,
911 &inner_vtag
, &out_vtag
);
915 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
916 skb_reset_mac_len(skb
);
917 protocol
= skb
->protocol
;
919 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
922 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
924 &ol_type_vlan_len_msec
);
925 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
927 &ol_type_vlan_len_msec
);
931 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
938 desc
->tx
.ol_type_vlan_len_msec
=
939 cpu_to_le32(ol_type_vlan_len_msec
);
940 desc
->tx
.type_cs_vlan_tso_len
=
941 cpu_to_le32(type_cs_vlan_tso
);
942 desc
->tx
.paylen
= cpu_to_le32(paylen
);
943 desc
->tx
.mss
= cpu_to_le16(mss
);
944 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
945 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
948 /* move ring pointer to next.*/
949 ring_ptr_move_fw(ring
, next_to_use
);
954 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
955 int size
, dma_addr_t dma
, int frag_end
,
956 enum hns_desc_type type
)
958 unsigned int frag_buf_num
;
963 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
964 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
965 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
967 /* When the frag size is bigger than hardware, split this frag */
968 for (k
= 0; k
< frag_buf_num
; k
++) {
969 ret
= hns3_fill_desc(ring
, priv
,
970 (k
== frag_buf_num
- 1) ?
971 sizeoflast
: HNS3_MAX_BD_SIZE
,
972 dma
+ HNS3_MAX_BD_SIZE
* k
,
973 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
974 (type
== DESC_TYPE_SKB
&& !k
) ?
975 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
983 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
984 struct hns3_enet_ring
*ring
)
986 struct sk_buff
*skb
= *out_skb
;
987 struct skb_frag_struct
*frag
;
994 size
= skb_headlen(skb
);
995 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
997 frag_num
= skb_shinfo(skb
)->nr_frags
;
998 for (i
= 0; i
< frag_num
; i
++) {
999 frag
= &skb_shinfo(skb
)->frags
[i
];
1000 size
= skb_frag_size(frag
);
1002 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1003 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
1006 buf_num
+= bdnum_for_frag
;
1009 if (buf_num
> ring_space(ring
))
1016 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
1017 struct hns3_enet_ring
*ring
)
1019 struct sk_buff
*skb
= *out_skb
;
1022 /* No. of segments (plus a header) */
1023 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1025 if (buf_num
> ring_space(ring
))
1033 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
1035 struct device
*dev
= ring_to_dev(ring
);
1038 for (i
= 0; i
< ring
->desc_num
; i
++) {
1039 /* check if this is where we started */
1040 if (ring
->next_to_use
== next_to_use_orig
)
1043 /* unmap the descriptor dma address */
1044 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
1045 dma_unmap_single(dev
,
1046 ring
->desc_cb
[ring
->next_to_use
].dma
,
1047 ring
->desc_cb
[ring
->next_to_use
].length
,
1051 ring
->desc_cb
[ring
->next_to_use
].dma
,
1052 ring
->desc_cb
[ring
->next_to_use
].length
,
1056 ring_ptr_move_bw(ring
, next_to_use
);
1060 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1062 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1063 struct hns3_nic_ring_data
*ring_data
=
1064 &tx_ring_data(priv
, skb
->queue_mapping
);
1065 struct hns3_enet_ring
*ring
= ring_data
->ring
;
1066 struct device
*dev
= priv
->dev
;
1067 struct netdev_queue
*dev_queue
;
1068 struct skb_frag_struct
*frag
;
1069 int next_to_use_head
;
1070 int next_to_use_frag
;
1078 /* Prefetch the data used later */
1079 prefetch(skb
->data
);
1081 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
1083 u64_stats_update_begin(&ring
->syncp
);
1084 ring
->stats
.tx_busy
++;
1085 u64_stats_update_end(&ring
->syncp
);
1087 goto out_net_tx_busy
;
1089 u64_stats_update_begin(&ring
->syncp
);
1090 ring
->stats
.sw_err_cnt
++;
1091 u64_stats_update_end(&ring
->syncp
);
1092 netdev_err(netdev
, "no memory to xmit!\n");
1099 /* No. of segments (plus a header) */
1100 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1101 /* Fill the first part */
1102 size
= skb_headlen(skb
);
1104 next_to_use_head
= ring
->next_to_use
;
1106 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1107 if (dma_mapping_error(dev
, dma
)) {
1108 netdev_err(netdev
, "TX head DMA map failed\n");
1109 ring
->stats
.sw_err_cnt
++;
1113 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
1116 goto head_dma_map_err
;
1118 next_to_use_frag
= ring
->next_to_use
;
1119 /* Fill the fragments */
1120 for (i
= 1; i
< seg_num
; i
++) {
1121 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1122 size
= skb_frag_size(frag
);
1123 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1124 if (dma_mapping_error(dev
, dma
)) {
1125 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
1126 ring
->stats
.sw_err_cnt
++;
1127 goto frag_dma_map_err
;
1129 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
1130 seg_num
- 1 == i
? 1 : 0,
1134 goto frag_dma_map_err
;
1137 /* Complete translate all packets */
1138 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1139 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1141 wmb(); /* Commit all data before submit */
1143 hnae3_queue_xmit(ring
->tqp
, buf_num
);
1145 return NETDEV_TX_OK
;
1148 hns_nic_dma_unmap(ring
, next_to_use_frag
);
1151 hns_nic_dma_unmap(ring
, next_to_use_head
);
1154 dev_kfree_skb_any(skb
);
1155 return NETDEV_TX_OK
;
1158 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1159 smp_mb(); /* Commit all data before submit */
1161 return NETDEV_TX_BUSY
;
1164 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1166 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1167 struct sockaddr
*mac_addr
= p
;
1170 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1171 return -EADDRNOTAVAIL
;
1173 if (ether_addr_equal(netdev
->dev_addr
, mac_addr
->sa_data
)) {
1174 netdev_info(netdev
, "already using mac address %pM\n",
1179 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
1181 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1185 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1190 static int hns3_nic_set_features(struct net_device
*netdev
,
1191 netdev_features_t features
)
1193 netdev_features_t changed
= netdev
->features
^ features
;
1194 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1195 struct hnae3_handle
*h
= priv
->ae_handle
;
1198 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1199 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1200 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1201 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1203 priv
->ops
.fill_desc
= hns3_fill_desc
;
1204 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1208 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
1209 h
->ae_algo
->ops
->enable_vlan_filter
) {
1210 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1211 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1213 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1216 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1217 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
1218 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1219 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1221 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1227 netdev
->features
= features
;
1231 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1232 struct rtnl_link_stats64
*stats
)
1234 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1235 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1236 struct hnae3_handle
*handle
= priv
->ae_handle
;
1237 struct hns3_enet_ring
*ring
;
1247 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1250 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1252 for (idx
= 0; idx
< queue_num
; idx
++) {
1253 /* fetch the tx stats */
1254 ring
= priv
->ring_data
[idx
].ring
;
1256 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1257 tx_bytes
+= ring
->stats
.tx_bytes
;
1258 tx_pkts
+= ring
->stats
.tx_pkts
;
1259 tx_drop
+= ring
->stats
.tx_busy
;
1260 tx_drop
+= ring
->stats
.sw_err_cnt
;
1261 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1263 /* fetch the rx stats */
1264 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1266 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1267 rx_bytes
+= ring
->stats
.rx_bytes
;
1268 rx_pkts
+= ring
->stats
.rx_pkts
;
1269 rx_drop
+= ring
->stats
.non_vld_descs
;
1270 rx_drop
+= ring
->stats
.err_pkt_len
;
1271 rx_drop
+= ring
->stats
.l2_err
;
1272 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1275 stats
->tx_bytes
= tx_bytes
;
1276 stats
->tx_packets
= tx_pkts
;
1277 stats
->rx_bytes
= rx_bytes
;
1278 stats
->rx_packets
= rx_pkts
;
1280 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1281 stats
->multicast
= netdev
->stats
.multicast
;
1282 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1283 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1284 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1286 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1287 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1288 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1289 stats
->collisions
= netdev
->stats
.collisions
;
1290 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1291 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1292 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1293 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1294 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1295 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1296 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1297 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1298 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1299 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1302 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1304 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1305 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1306 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1307 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1308 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1309 u16 mode
= mqprio_qopt
->mode
;
1310 u8 hw
= mqprio_qopt
->qopt
.hw
;
1315 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1316 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1319 if (tc
> HNAE3_MAX_TC
)
1325 if_running
= netif_running(netdev
);
1327 hns3_nic_net_stop(netdev
);
1331 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1332 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1337 netdev_reset_tc(netdev
);
1339 ret
= netdev_set_num_tc(netdev
, tc
);
1343 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1344 if (!kinfo
->tc_info
[i
].enable
)
1347 netdev_set_tc_queue(netdev
,
1348 kinfo
->tc_info
[i
].tc
,
1349 kinfo
->tc_info
[i
].tqp_count
,
1350 kinfo
->tc_info
[i
].tqp_offset
);
1354 ret
= hns3_nic_set_real_num_queue(netdev
);
1358 hns3_nic_net_open(netdev
);
1363 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1366 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1369 return hns3_setup_tc(dev
, type_data
);
1372 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1373 __be16 proto
, u16 vid
)
1375 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1376 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1379 if (h
->ae_algo
->ops
->set_vlan_filter
)
1380 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1383 set_bit(vid
, priv
->active_vlans
);
1388 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1389 __be16 proto
, u16 vid
)
1391 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1392 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1395 if (h
->ae_algo
->ops
->set_vlan_filter
)
1396 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1399 clear_bit(vid
, priv
->active_vlans
);
1404 static void hns3_restore_vlan(struct net_device
*netdev
)
1406 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1410 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
1411 ret
= hns3_vlan_rx_add_vid(netdev
, htons(ETH_P_8021Q
), vid
);
1413 netdev_warn(netdev
, "Restore vlan: %d filter, ret:%d\n",
1418 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1419 u8 qos
, __be16 vlan_proto
)
1421 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1424 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1425 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1431 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1433 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1434 bool if_running
= netif_running(netdev
);
1437 if (!h
->ae_algo
->ops
->set_mtu
)
1440 /* if this was called with netdev up then bring netdevice down */
1442 (void)hns3_nic_net_stop(netdev
);
1446 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1448 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1453 netdev
->mtu
= new_mtu
;
1455 /* if the netdev was running earlier, bring it up again */
1456 if (if_running
&& hns3_nic_net_open(netdev
))
1462 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1464 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1465 struct hns3_enet_ring
*tx_ring
= NULL
;
1466 int timeout_queue
= 0;
1467 int hw_head
, hw_tail
;
1470 /* Find the stopped queue the same way the stack does */
1471 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1472 struct netdev_queue
*q
;
1473 unsigned long trans_start
;
1475 q
= netdev_get_tx_queue(ndev
, i
);
1476 trans_start
= q
->trans_start
;
1477 if (netif_xmit_stopped(q
) &&
1479 (trans_start
+ ndev
->watchdog_timeo
))) {
1485 if (i
== ndev
->num_tx_queues
) {
1487 "no netdev TX timeout queue found, timeout count: %llu\n",
1488 priv
->tx_timeout_count
);
1492 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1494 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1495 HNS3_RING_TX_RING_HEAD_REG
);
1496 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1497 HNS3_RING_TX_RING_TAIL_REG
);
1499 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1500 priv
->tx_timeout_count
,
1502 tx_ring
->next_to_use
,
1503 tx_ring
->next_to_clean
,
1506 readl(tx_ring
->tqp_vector
->mask_addr
));
1511 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1513 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1514 struct hnae3_handle
*h
= priv
->ae_handle
;
1516 if (!hns3_get_tx_timeo_queue_info(ndev
))
1519 priv
->tx_timeout_count
++;
1521 if (time_before(jiffies
, (h
->last_reset_time
+ ndev
->watchdog_timeo
)))
1524 /* request the reset */
1525 if (h
->ae_algo
->ops
->reset_event
)
1526 h
->ae_algo
->ops
->reset_event(h
);
1529 static const struct net_device_ops hns3_nic_netdev_ops
= {
1530 .ndo_open
= hns3_nic_net_open
,
1531 .ndo_stop
= hns3_nic_net_stop
,
1532 .ndo_start_xmit
= hns3_nic_net_xmit
,
1533 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1534 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1535 .ndo_change_mtu
= hns3_nic_change_mtu
,
1536 .ndo_set_features
= hns3_nic_set_features
,
1537 .ndo_get_stats64
= hns3_nic_get_stats64
,
1538 .ndo_setup_tc
= hns3_nic_setup_tc
,
1539 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1540 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1541 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1542 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1545 static bool hns3_is_phys_func(struct pci_dev
*pdev
)
1547 u32 dev_id
= pdev
->device
;
1550 case HNAE3_DEV_ID_GE
:
1551 case HNAE3_DEV_ID_25GE
:
1552 case HNAE3_DEV_ID_25GE_RDMA
:
1553 case HNAE3_DEV_ID_25GE_RDMA_MACSEC
:
1554 case HNAE3_DEV_ID_50GE_RDMA
:
1555 case HNAE3_DEV_ID_50GE_RDMA_MACSEC
:
1556 case HNAE3_DEV_ID_100G_RDMA_MACSEC
:
1558 case HNAE3_DEV_ID_100G_VF
:
1559 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
:
1562 dev_warn(&pdev
->dev
, "un-recognized pci device-id %d",
1569 static void hns3_disable_sriov(struct pci_dev
*pdev
)
1571 /* If our VFs are assigned we cannot shut down SR-IOV
1572 * without causing issues, so just leave the hardware
1573 * available but disabled
1575 if (pci_vfs_assigned(pdev
)) {
1576 dev_warn(&pdev
->dev
,
1577 "disabling driver while VFs are assigned\n");
1581 pci_disable_sriov(pdev
);
1584 /* hns3_probe - Device initialization routine
1585 * @pdev: PCI device information struct
1586 * @ent: entry in hns3_pci_tbl
1588 * hns3_probe initializes a PF identified by a pci_dev structure.
1589 * The OS initialization, configuring of the PF private structure,
1590 * and a hardware reset occur.
1592 * Returns 0 on success, negative on failure
1594 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1596 struct hnae3_ae_dev
*ae_dev
;
1599 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1606 ae_dev
->pdev
= pdev
;
1607 ae_dev
->flag
= ent
->driver_data
;
1608 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1609 pci_set_drvdata(pdev
, ae_dev
);
1611 hnae3_register_ae_dev(ae_dev
);
1616 /* hns3_remove - Device removal routine
1617 * @pdev: PCI device information struct
1619 static void hns3_remove(struct pci_dev
*pdev
)
1621 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1623 if (hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))
1624 hns3_disable_sriov(pdev
);
1626 hnae3_unregister_ae_dev(ae_dev
);
1630 * hns3_pci_sriov_configure
1631 * @pdev: pointer to a pci_dev structure
1632 * @num_vfs: number of VFs to allocate
1634 * Enable or change the number of VFs. Called when the user updates the number
1637 static int hns3_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1641 if (!(hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))) {
1642 dev_warn(&pdev
->dev
, "Can not config SRIOV\n");
1647 ret
= pci_enable_sriov(pdev
, num_vfs
);
1649 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n", ret
);
1652 } else if (!pci_vfs_assigned(pdev
)) {
1653 pci_disable_sriov(pdev
);
1655 dev_warn(&pdev
->dev
,
1656 "Unable to free VFs because some are assigned to VMs.\n");
1662 static struct pci_driver hns3_driver
= {
1663 .name
= hns3_driver_name
,
1664 .id_table
= hns3_pci_tbl
,
1665 .probe
= hns3_probe
,
1666 .remove
= hns3_remove
,
1667 .sriov_configure
= hns3_pci_sriov_configure
,
1670 /* set default feature to hns3 */
1671 static void hns3_set_default_feature(struct net_device
*netdev
)
1673 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1675 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1676 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1677 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1678 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1679 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1681 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1683 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1685 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1686 NETIF_F_HW_VLAN_CTAG_FILTER
|
1687 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1688 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1689 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1690 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1691 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1693 netdev
->vlan_features
|=
1694 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1695 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1696 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1697 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1698 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1700 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1701 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1702 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1703 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1704 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1705 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1708 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1709 struct hns3_desc_cb
*cb
)
1711 unsigned int order
= hnae3_page_order(ring
);
1714 p
= dev_alloc_pages(order
);
1719 cb
->page_offset
= 0;
1721 cb
->buf
= page_address(p
);
1722 cb
->length
= hnae3_page_size(ring
);
1723 cb
->type
= DESC_TYPE_PAGE
;
1728 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1729 struct hns3_desc_cb
*cb
)
1731 if (cb
->type
== DESC_TYPE_SKB
)
1732 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1733 else if (!HNAE3_IS_TX_RING(ring
))
1734 put_page((struct page
*)cb
->priv
);
1735 memset(cb
, 0, sizeof(*cb
));
1738 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1740 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1741 cb
->length
, ring_to_dma_dir(ring
));
1743 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1749 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1750 struct hns3_desc_cb
*cb
)
1752 if (cb
->type
== DESC_TYPE_SKB
)
1753 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1754 ring_to_dma_dir(ring
));
1756 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1757 ring_to_dma_dir(ring
));
1760 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1762 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1763 ring
->desc
[i
].addr
= 0;
1766 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1768 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1770 if (!ring
->desc_cb
[i
].dma
)
1773 hns3_buffer_detach(ring
, i
);
1774 hns3_free_buffer(ring
, cb
);
1777 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1781 for (i
= 0; i
< ring
->desc_num
; i
++)
1782 hns3_free_buffer_detach(ring
, i
);
1785 /* free desc along with its attached buffer */
1786 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1788 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1790 hns3_free_buffers(ring
);
1793 dma_free_coherent(ring_to_dev(ring
), size
,
1794 ring
->desc
, ring
->desc_dma_addr
);
1799 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1801 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1803 ring
->desc
= dma_zalloc_coherent(ring_to_dev(ring
), size
,
1804 &ring
->desc_dma_addr
,
1812 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1813 struct hns3_desc_cb
*cb
)
1817 ret
= hns3_alloc_buffer(ring
, cb
);
1821 ret
= hns3_map_buffer(ring
, cb
);
1828 hns3_free_buffer(ring
, cb
);
1833 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1835 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1840 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1845 /* Allocate memory for raw pkg, and map with dma */
1846 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1850 for (i
= 0; i
< ring
->desc_num
; i
++) {
1851 ret
= hns3_alloc_buffer_attach(ring
, i
);
1853 goto out_buffer_fail
;
1859 for (j
= i
- 1; j
>= 0; j
--)
1860 hns3_free_buffer_detach(ring
, j
);
1864 /* detach a in-used buffer and replace with a reserved one */
1865 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1866 struct hns3_desc_cb
*res_cb
)
1868 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1869 ring
->desc_cb
[i
] = *res_cb
;
1870 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1871 ring
->desc
[i
].rx
.bd_base_info
= 0;
1874 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1876 ring
->desc_cb
[i
].reuse_flag
= 0;
1877 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1878 + ring
->desc_cb
[i
].page_offset
);
1879 ring
->desc
[i
].rx
.bd_base_info
= 0;
1882 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1885 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1887 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1888 (*bytes
) += desc_cb
->length
;
1889 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
1890 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1892 ring_ptr_move_fw(ring
, next_to_clean
);
1895 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1897 int u
= ring
->next_to_use
;
1898 int c
= ring
->next_to_clean
;
1900 if (unlikely(h
> ring
->desc_num
))
1903 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1906 bool hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1908 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1909 struct netdev_queue
*dev_queue
;
1913 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1914 rmb(); /* Make sure head is ready before touch any data */
1916 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1917 return true; /* no data to poll */
1919 if (unlikely(!is_valid_clean_head(ring
, head
))) {
1920 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1921 ring
->next_to_use
, ring
->next_to_clean
);
1923 u64_stats_update_begin(&ring
->syncp
);
1924 ring
->stats
.io_err_cnt
++;
1925 u64_stats_update_end(&ring
->syncp
);
1931 while (head
!= ring
->next_to_clean
&& budget
) {
1932 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1933 /* Issue prefetch for next Tx descriptor */
1934 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1938 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1939 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1941 u64_stats_update_begin(&ring
->syncp
);
1942 ring
->stats
.tx_bytes
+= bytes
;
1943 ring
->stats
.tx_pkts
+= pkts
;
1944 u64_stats_update_end(&ring
->syncp
);
1946 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1947 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1949 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1950 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1951 /* Make sure that anybody stopping the queue after this
1952 * sees the new next_to_clean.
1955 if (netif_tx_queue_stopped(dev_queue
)) {
1956 netif_tx_wake_queue(dev_queue
);
1957 ring
->stats
.restart_queue
++;
1964 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1966 int ntc
= ring
->next_to_clean
;
1967 int ntu
= ring
->next_to_use
;
1969 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1973 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1975 struct hns3_desc_cb
*desc_cb
;
1976 struct hns3_desc_cb res_cbs
;
1979 for (i
= 0; i
< cleand_count
; i
++) {
1980 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1981 if (desc_cb
->reuse_flag
) {
1982 u64_stats_update_begin(&ring
->syncp
);
1983 ring
->stats
.reuse_pg_cnt
++;
1984 u64_stats_update_end(&ring
->syncp
);
1986 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1988 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1990 u64_stats_update_begin(&ring
->syncp
);
1991 ring
->stats
.sw_err_cnt
++;
1992 u64_stats_update_end(&ring
->syncp
);
1994 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
1995 "hnae reserve buffer map failed.\n");
1998 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
2001 ring_ptr_move_fw(ring
, next_to_use
);
2004 wmb(); /* Make all data has been write before submit */
2005 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
2008 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
2009 struct hns3_enet_ring
*ring
, int pull_len
,
2010 struct hns3_desc_cb
*desc_cb
)
2012 struct hns3_desc
*desc
;
2017 twobufs
= ((PAGE_SIZE
< 8192) &&
2018 hnae3_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
2020 desc
= &ring
->desc
[ring
->next_to_clean
];
2021 size
= le16_to_cpu(desc
->rx
.size
);
2023 truesize
= hnae3_buf_size(ring
);
2026 last_offset
= hnae3_page_size(ring
) - hnae3_buf_size(ring
);
2028 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
2029 size
- pull_len
, truesize
);
2031 /* Avoid re-using remote pages,flag default unreuse */
2032 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
2036 /* If we are only owner of page we can reuse it */
2037 if (likely(page_count(desc_cb
->priv
) == 1)) {
2038 /* Flip page offset to other buffer */
2039 desc_cb
->page_offset
^= truesize
;
2041 desc_cb
->reuse_flag
= 1;
2042 /* bump ref count on page before it is given*/
2043 get_page(desc_cb
->priv
);
2048 /* Move offset up to the next cache line */
2049 desc_cb
->page_offset
+= truesize
;
2051 if (desc_cb
->page_offset
<= last_offset
) {
2052 desc_cb
->reuse_flag
= 1;
2053 /* Bump ref count on page before it is given*/
2054 get_page(desc_cb
->priv
);
2058 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2059 struct hns3_desc
*desc
)
2061 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2062 int l3_type
, l4_type
;
2067 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2068 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2070 skb
->ip_summed
= CHECKSUM_NONE
;
2072 skb_checksum_none_assert(skb
);
2074 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2077 /* check if hardware has done checksum */
2078 if (!hnae3_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2081 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2082 hnae3_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2083 hnae3_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2084 hnae3_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2085 netdev_err(netdev
, "L3/L4 error pkt\n");
2086 u64_stats_update_begin(&ring
->syncp
);
2087 ring
->stats
.l3l4_csum_err
++;
2088 u64_stats_update_end(&ring
->syncp
);
2093 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
2095 l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
2098 ol4_type
= hnae3_get_field(l234info
, HNS3_RXD_OL4ID_M
,
2101 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2102 case HNS3_OL4_TYPE_NVGRE
:
2103 skb
->csum_level
= 1;
2104 case HNS3_OL4_TYPE_NO_TUN
:
2105 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2106 if ((l3_type
== HNS3_L3_TYPE_IPV4
||
2107 l3_type
== HNS3_L3_TYPE_IPV6
) &&
2108 (l4_type
== HNS3_L4_TYPE_UDP
||
2109 l4_type
== HNS3_L4_TYPE_TCP
||
2110 l4_type
== HNS3_L4_TYPE_SCTP
))
2111 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2116 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2118 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2121 static u16
hns3_parse_vlan_tag(struct hns3_enet_ring
*ring
,
2122 struct hns3_desc
*desc
, u32 l234info
)
2124 struct pci_dev
*pdev
= ring
->tqp
->handle
->pdev
;
2127 if (pdev
->revision
== 0x20) {
2128 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2129 if (!(vlan_tag
& VLAN_VID_MASK
))
2130 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2135 #define HNS3_STRP_OUTER_VLAN 0x1
2136 #define HNS3_STRP_INNER_VLAN 0x2
2138 switch (hnae3_get_field(l234info
, HNS3_RXD_STRP_TAGP_M
,
2139 HNS3_RXD_STRP_TAGP_S
)) {
2140 case HNS3_STRP_OUTER_VLAN
:
2141 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2143 case HNS3_STRP_INNER_VLAN
:
2144 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2154 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2155 struct sk_buff
**out_skb
, int *out_bnum
)
2157 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2158 struct hns3_desc_cb
*desc_cb
;
2159 struct hns3_desc
*desc
;
2160 struct sk_buff
*skb
;
2168 desc
= &ring
->desc
[ring
->next_to_clean
];
2169 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2173 length
= le16_to_cpu(desc
->rx
.size
);
2174 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2176 /* Check valid BD */
2177 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
)))
2180 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2182 /* Prefetch first cache line of first page
2183 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2184 * line size is 64B so need to prefetch twice to make it 128B. But in
2185 * actual we can have greater size of caches with 128B Level 1 cache
2186 * lines. In such a case, single fetch would suffice to cache in the
2187 * relevant part of the header.
2190 #if L1_CACHE_BYTES < 128
2191 prefetch(va
+ L1_CACHE_BYTES
);
2194 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2196 if (unlikely(!skb
)) {
2197 netdev_err(netdev
, "alloc rx skb fail\n");
2199 u64_stats_update_begin(&ring
->syncp
);
2200 ring
->stats
.sw_err_cnt
++;
2201 u64_stats_update_end(&ring
->syncp
);
2206 prefetchw(skb
->data
);
2209 if (length
<= HNS3_RX_HEAD_SIZE
) {
2210 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2212 /* We can reuse buffer as-is, just make sure it is local */
2213 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2214 desc_cb
->reuse_flag
= 1;
2215 else /* This page cannot be reused so discard it */
2216 put_page(desc_cb
->priv
);
2218 ring_ptr_move_fw(ring
, next_to_clean
);
2220 u64_stats_update_begin(&ring
->syncp
);
2221 ring
->stats
.seg_pkt_cnt
++;
2222 u64_stats_update_end(&ring
->syncp
);
2224 pull_len
= eth_get_headlen(va
, HNS3_RX_HEAD_SIZE
);
2226 memcpy(__skb_put(skb
, pull_len
), va
,
2227 ALIGN(pull_len
, sizeof(long)));
2229 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2230 ring_ptr_move_fw(ring
, next_to_clean
);
2232 while (!hnae3_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2233 desc
= &ring
->desc
[ring
->next_to_clean
];
2234 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2235 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2236 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2237 ring_ptr_move_fw(ring
, next_to_clean
);
2243 /* Based on hw strategy, the tag offloaded will be stored at
2244 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2245 * in one layer tag case.
2247 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2250 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2251 if (!(vlan_tag
& VLAN_VID_MASK
))
2252 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2253 if (vlan_tag
& VLAN_VID_MASK
)
2254 __vlan_hwaccel_put_tag(skb
,
2259 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2261 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2263 /* Based on hw strategy, the tag offloaded will be stored at
2264 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2265 * in one layer tag case.
2267 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2270 vlan_tag
= hns3_parse_vlan_tag(ring
, desc
, l234info
);
2271 if (vlan_tag
& VLAN_VID_MASK
)
2272 __vlan_hwaccel_put_tag(skb
,
2277 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2278 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2279 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2280 u64_stats_update_begin(&ring
->syncp
);
2281 ring
->stats
.non_vld_descs
++;
2282 u64_stats_update_end(&ring
->syncp
);
2284 dev_kfree_skb_any(skb
);
2288 if (unlikely((!desc
->rx
.pkt_len
) ||
2289 hnae3_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2290 netdev_err(netdev
, "truncated pkt\n");
2291 u64_stats_update_begin(&ring
->syncp
);
2292 ring
->stats
.err_pkt_len
++;
2293 u64_stats_update_end(&ring
->syncp
);
2295 dev_kfree_skb_any(skb
);
2299 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2300 netdev_err(netdev
, "L2 error pkt\n");
2301 u64_stats_update_begin(&ring
->syncp
);
2302 ring
->stats
.l2_err
++;
2303 u64_stats_update_end(&ring
->syncp
);
2305 dev_kfree_skb_any(skb
);
2309 u64_stats_update_begin(&ring
->syncp
);
2310 ring
->stats
.rx_pkts
++;
2311 ring
->stats
.rx_bytes
+= skb
->len
;
2312 u64_stats_update_end(&ring
->syncp
);
2314 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2316 hns3_rx_checksum(ring
, skb
, desc
);
2320 int hns3_clean_rx_ring(
2321 struct hns3_enet_ring
*ring
, int budget
,
2322 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2324 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2325 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2326 int recv_pkts
, recv_bds
, clean_count
, err
;
2327 int unused_count
= hns3_desc_unused(ring
);
2328 struct sk_buff
*skb
= NULL
;
2331 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2332 rmb(); /* Make sure num taken effect before the other data is touched */
2334 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2335 num
-= unused_count
;
2337 while (recv_pkts
< budget
&& recv_bds
< num
) {
2338 /* Reuse or realloc buffers */
2339 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2340 hns3_nic_alloc_rx_buffers(ring
,
2341 clean_count
+ unused_count
);
2343 unused_count
= hns3_desc_unused(ring
);
2347 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2348 if (unlikely(!skb
)) /* This fault cannot be repaired */
2352 clean_count
+= bnum
;
2353 if (unlikely(err
)) { /* Do jump the err */
2358 /* Do update ip stack process */
2359 skb
->protocol
= eth_type_trans(skb
, netdev
);
2366 /* Make all data has been write before submit */
2367 if (clean_count
+ unused_count
> 0)
2368 hns3_nic_alloc_rx_buffers(ring
,
2369 clean_count
+ unused_count
);
2374 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2376 struct hns3_enet_tqp_vector
*tqp_vector
=
2377 ring_group
->ring
->tqp_vector
;
2378 enum hns3_flow_level_range new_flow_level
;
2379 int packets_per_msecs
;
2380 int bytes_per_msecs
;
2384 if (!ring_group
->coal
.int_gl
|| !tqp_vector
->last_jiffies
)
2387 if (ring_group
->total_packets
== 0) {
2388 ring_group
->coal
.int_gl
= HNS3_INT_GL_50K
;
2389 ring_group
->coal
.flow_level
= HNS3_FLOW_LOW
;
2393 /* Simple throttlerate management
2394 * 0-10MB/s lower (50000 ints/s)
2395 * 10-20MB/s middle (20000 ints/s)
2396 * 20-1249MB/s high (18000 ints/s)
2397 * > 40000pps ultra (8000 ints/s)
2399 new_flow_level
= ring_group
->coal
.flow_level
;
2400 new_int_gl
= ring_group
->coal
.int_gl
;
2402 jiffies_to_msecs(jiffies
- tqp_vector
->last_jiffies
);
2404 if (!time_passed_ms
)
2407 do_div(ring_group
->total_packets
, time_passed_ms
);
2408 packets_per_msecs
= ring_group
->total_packets
;
2410 do_div(ring_group
->total_bytes
, time_passed_ms
);
2411 bytes_per_msecs
= ring_group
->total_bytes
;
2413 #define HNS3_RX_LOW_BYTE_RATE 10000
2414 #define HNS3_RX_MID_BYTE_RATE 20000
2416 switch (new_flow_level
) {
2418 if (bytes_per_msecs
> HNS3_RX_LOW_BYTE_RATE
)
2419 new_flow_level
= HNS3_FLOW_MID
;
2422 if (bytes_per_msecs
> HNS3_RX_MID_BYTE_RATE
)
2423 new_flow_level
= HNS3_FLOW_HIGH
;
2424 else if (bytes_per_msecs
<= HNS3_RX_LOW_BYTE_RATE
)
2425 new_flow_level
= HNS3_FLOW_LOW
;
2427 case HNS3_FLOW_HIGH
:
2428 case HNS3_FLOW_ULTRA
:
2430 if (bytes_per_msecs
<= HNS3_RX_MID_BYTE_RATE
)
2431 new_flow_level
= HNS3_FLOW_MID
;
2435 #define HNS3_RX_ULTRA_PACKET_RATE 40
2437 if (packets_per_msecs
> HNS3_RX_ULTRA_PACKET_RATE
&&
2438 &tqp_vector
->rx_group
== ring_group
)
2439 new_flow_level
= HNS3_FLOW_ULTRA
;
2441 switch (new_flow_level
) {
2443 new_int_gl
= HNS3_INT_GL_50K
;
2446 new_int_gl
= HNS3_INT_GL_20K
;
2448 case HNS3_FLOW_HIGH
:
2449 new_int_gl
= HNS3_INT_GL_18K
;
2451 case HNS3_FLOW_ULTRA
:
2452 new_int_gl
= HNS3_INT_GL_8K
;
2458 ring_group
->total_bytes
= 0;
2459 ring_group
->total_packets
= 0;
2460 ring_group
->coal
.flow_level
= new_flow_level
;
2461 if (new_int_gl
!= ring_group
->coal
.int_gl
) {
2462 ring_group
->coal
.int_gl
= new_int_gl
;
2468 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2470 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
2471 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
2472 bool rx_update
, tx_update
;
2474 if (tqp_vector
->int_adapt_down
> 0) {
2475 tqp_vector
->int_adapt_down
--;
2479 if (rx_group
->coal
.gl_adapt_enable
) {
2480 rx_update
= hns3_get_new_int_gl(rx_group
);
2482 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
2483 rx_group
->coal
.int_gl
);
2486 if (tx_group
->coal
.gl_adapt_enable
) {
2487 tx_update
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2489 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
2490 tx_group
->coal
.int_gl
);
2493 tqp_vector
->last_jiffies
= jiffies
;
2494 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
2497 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2499 struct hns3_enet_ring
*ring
;
2500 int rx_pkt_total
= 0;
2502 struct hns3_enet_tqp_vector
*tqp_vector
=
2503 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2504 bool clean_complete
= true;
2507 /* Since the actual Tx work is minimal, we can give the Tx a larger
2508 * budget and be more aggressive about cleaning up the Tx descriptors.
2510 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2511 if (!hns3_clean_tx_ring(ring
, budget
))
2512 clean_complete
= false;
2515 /* make sure rx ring budget not smaller than 1 */
2516 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2518 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2519 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2522 if (rx_cleaned
>= rx_budget
)
2523 clean_complete
= false;
2525 rx_pkt_total
+= rx_cleaned
;
2528 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2530 if (!clean_complete
)
2533 napi_complete(napi
);
2534 hns3_update_new_int_gl(tqp_vector
);
2535 hns3_mask_vector_irq(tqp_vector
, 1);
2537 return rx_pkt_total
;
2540 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2541 struct hnae3_ring_chain_node
*head
)
2543 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2544 struct hnae3_ring_chain_node
*cur_chain
= head
;
2545 struct hnae3_ring_chain_node
*chain
;
2546 struct hns3_enet_ring
*tx_ring
;
2547 struct hns3_enet_ring
*rx_ring
;
2549 tx_ring
= tqp_vector
->tx_group
.ring
;
2551 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2552 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2553 HNAE3_RING_TYPE_TX
);
2554 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2555 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_TX
);
2557 cur_chain
->next
= NULL
;
2559 while (tx_ring
->next
) {
2560 tx_ring
= tx_ring
->next
;
2562 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2567 cur_chain
->next
= chain
;
2568 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2569 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2570 HNAE3_RING_TYPE_TX
);
2571 hnae3_set_field(chain
->int_gl_idx
,
2572 HNAE3_RING_GL_IDX_M
,
2573 HNAE3_RING_GL_IDX_S
,
2580 rx_ring
= tqp_vector
->rx_group
.ring
;
2581 if (!tx_ring
&& rx_ring
) {
2582 cur_chain
->next
= NULL
;
2583 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2584 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2585 HNAE3_RING_TYPE_RX
);
2586 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2587 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2589 rx_ring
= rx_ring
->next
;
2593 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2597 cur_chain
->next
= chain
;
2598 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2599 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2600 HNAE3_RING_TYPE_RX
);
2601 hnae3_set_field(chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2602 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2606 rx_ring
= rx_ring
->next
;
2612 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2613 struct hnae3_ring_chain_node
*head
)
2615 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2616 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2621 chain_tmp
= chain
->next
;
2622 devm_kfree(&pdev
->dev
, chain
);
2627 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2628 struct hns3_enet_ring
*ring
)
2630 ring
->next
= group
->ring
;
2636 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2638 struct hnae3_ring_chain_node vector_ring_chain
;
2639 struct hnae3_handle
*h
= priv
->ae_handle
;
2640 struct hns3_enet_tqp_vector
*tqp_vector
;
2644 for (i
= 0; i
< priv
->vector_num
; i
++) {
2645 tqp_vector
= &priv
->tqp_vector
[i
];
2646 hns3_vector_gl_rl_init_hw(tqp_vector
, priv
);
2647 tqp_vector
->num_tqps
= 0;
2650 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2651 u16 vector_i
= i
% priv
->vector_num
;
2652 u16 tqp_num
= h
->kinfo
.num_tqps
;
2654 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2656 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2657 priv
->ring_data
[i
].ring
);
2659 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2660 priv
->ring_data
[i
+ tqp_num
].ring
);
2662 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2663 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2664 tqp_vector
->num_tqps
++;
2667 for (i
= 0; i
< priv
->vector_num
; i
++) {
2668 tqp_vector
= &priv
->tqp_vector
[i
];
2670 tqp_vector
->rx_group
.total_bytes
= 0;
2671 tqp_vector
->rx_group
.total_packets
= 0;
2672 tqp_vector
->tx_group
.total_bytes
= 0;
2673 tqp_vector
->tx_group
.total_packets
= 0;
2674 tqp_vector
->handle
= h
;
2676 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2677 &vector_ring_chain
);
2681 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2682 tqp_vector
->vector_irq
, &vector_ring_chain
);
2684 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2689 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2690 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2696 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
2698 struct hnae3_handle
*h
= priv
->ae_handle
;
2699 struct hns3_enet_tqp_vector
*tqp_vector
;
2700 struct hnae3_vector_info
*vector
;
2701 struct pci_dev
*pdev
= h
->pdev
;
2702 u16 tqp_num
= h
->kinfo
.num_tqps
;
2707 /* RSS size, cpu online and vector_num should be the same */
2708 /* Should consider 2p/4p later */
2709 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2710 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2715 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2717 priv
->vector_num
= vector_num
;
2718 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2719 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2721 if (!priv
->tqp_vector
) {
2726 for (i
= 0; i
< priv
->vector_num
; i
++) {
2727 tqp_vector
= &priv
->tqp_vector
[i
];
2728 tqp_vector
->idx
= i
;
2729 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
2730 tqp_vector
->vector_irq
= vector
[i
].vector
;
2731 hns3_vector_gl_rl_init(tqp_vector
, priv
);
2735 devm_kfree(&pdev
->dev
, vector
);
2739 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
2745 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2747 struct hnae3_ring_chain_node vector_ring_chain
;
2748 struct hnae3_handle
*h
= priv
->ae_handle
;
2749 struct hns3_enet_tqp_vector
*tqp_vector
;
2752 for (i
= 0; i
< priv
->vector_num
; i
++) {
2753 tqp_vector
= &priv
->tqp_vector
[i
];
2755 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2756 &vector_ring_chain
);
2760 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2761 tqp_vector
->vector_irq
, &vector_ring_chain
);
2765 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2767 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2768 (void)irq_set_affinity_hint(
2769 priv
->tqp_vector
[i
].vector_irq
,
2771 free_irq(priv
->tqp_vector
[i
].vector_irq
,
2772 &priv
->tqp_vector
[i
]);
2775 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2776 hns3_clear_ring_group(&tqp_vector
->rx_group
);
2777 hns3_clear_ring_group(&tqp_vector
->tx_group
);
2778 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2784 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
2786 struct hnae3_handle
*h
= priv
->ae_handle
;
2787 struct pci_dev
*pdev
= h
->pdev
;
2790 for (i
= 0; i
< priv
->vector_num
; i
++) {
2791 struct hns3_enet_tqp_vector
*tqp_vector
;
2793 tqp_vector
= &priv
->tqp_vector
[i
];
2794 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
2799 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2803 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2806 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2807 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2808 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2809 struct hns3_enet_ring
*ring
;
2811 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2815 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2816 ring_data
[q
->tqp_index
].ring
= ring
;
2817 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2818 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2820 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2821 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
2822 ring
->io_base
= q
->io_base
;
2825 hnae3_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2829 ring
->desc_cb
= NULL
;
2830 ring
->dev
= priv
->dev
;
2831 ring
->desc_dma_addr
= 0;
2832 ring
->buf_size
= q
->buf_size
;
2833 ring
->desc_num
= q
->desc_num
;
2834 ring
->next_to_use
= 0;
2835 ring
->next_to_clean
= 0;
2840 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2841 struct hns3_nic_priv
*priv
)
2845 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2849 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2856 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2858 struct hnae3_handle
*h
= priv
->ae_handle
;
2859 struct pci_dev
*pdev
= h
->pdev
;
2862 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2863 sizeof(*priv
->ring_data
) * 2,
2865 if (!priv
->ring_data
)
2868 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2869 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2876 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2880 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
2882 struct hnae3_handle
*h
= priv
->ae_handle
;
2885 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2886 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2887 devm_kfree(priv
->dev
,
2888 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2890 devm_kfree(priv
->dev
, priv
->ring_data
);
2893 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2897 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2900 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2902 if (!ring
->desc_cb
) {
2907 ret
= hns3_alloc_desc(ring
);
2909 goto out_with_desc_cb
;
2911 if (!HNAE3_IS_TX_RING(ring
)) {
2912 ret
= hns3_alloc_ring_buffers(ring
);
2920 hns3_free_desc(ring
);
2922 kfree(ring
->desc_cb
);
2923 ring
->desc_cb
= NULL
;
2928 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2930 hns3_free_desc(ring
);
2931 kfree(ring
->desc_cb
);
2932 ring
->desc_cb
= NULL
;
2933 ring
->next_to_clean
= 0;
2934 ring
->next_to_use
= 0;
2937 static int hns3_buf_size2type(u32 buf_size
)
2943 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2946 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2949 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2952 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2955 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2958 return bd_size_type
;
2961 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2963 dma_addr_t dma
= ring
->desc_dma_addr
;
2964 struct hnae3_queue
*q
= ring
->tqp
;
2966 if (!HNAE3_IS_TX_RING(ring
)) {
2967 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2969 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2970 (u32
)((dma
>> 31) >> 1));
2972 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2973 hns3_buf_size2type(ring
->buf_size
));
2974 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2975 ring
->desc_num
/ 8 - 1);
2978 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2980 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2981 (u32
)((dma
>> 31) >> 1));
2983 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2984 hns3_buf_size2type(ring
->buf_size
));
2985 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
2986 ring
->desc_num
/ 8 - 1);
2990 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
2992 struct hnae3_handle
*h
= priv
->ae_handle
;
2993 int ring_num
= h
->kinfo
.num_tqps
* 2;
2997 for (i
= 0; i
< ring_num
; i
++) {
2998 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
3001 "Alloc ring memory fail! ret=%d\n", ret
);
3002 goto out_when_alloc_ring_memory
;
3005 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
3010 out_when_alloc_ring_memory
:
3011 for (j
= i
- 1; j
>= 0; j
--)
3012 hns3_fini_ring(priv
->ring_data
[j
].ring
);
3017 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
3019 struct hnae3_handle
*h
= priv
->ae_handle
;
3022 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3023 if (h
->ae_algo
->ops
->reset_queue
)
3024 h
->ae_algo
->ops
->reset_queue(h
, i
);
3026 hns3_fini_ring(priv
->ring_data
[i
].ring
);
3027 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3032 /* Set mac addr if it is configured. or leave it to the AE driver */
3033 static void hns3_init_mac_addr(struct net_device
*netdev
, bool init
)
3035 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3036 struct hnae3_handle
*h
= priv
->ae_handle
;
3037 u8 mac_addr_temp
[ETH_ALEN
];
3039 if (h
->ae_algo
->ops
->get_mac_addr
&& init
) {
3040 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
3041 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
3044 /* Check if the MAC address is valid, if not get a random one */
3045 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3046 eth_hw_addr_random(netdev
);
3047 dev_warn(priv
->dev
, "using random MAC address %pM\n",
3051 if (h
->ae_algo
->ops
->set_mac_addr
)
3052 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
3056 static void hns3_uninit_mac_addr(struct net_device
*netdev
)
3058 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3059 struct hnae3_handle
*h
= priv
->ae_handle
;
3061 if (h
->ae_algo
->ops
->rm_uc_addr
)
3062 h
->ae_algo
->ops
->rm_uc_addr(h
, netdev
->dev_addr
);
3065 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
3067 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3069 if ((netdev
->features
& NETIF_F_TSO
) ||
3070 (netdev
->features
& NETIF_F_TSO6
)) {
3071 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
3072 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
3074 priv
->ops
.fill_desc
= hns3_fill_desc
;
3075 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
3079 static int hns3_client_init(struct hnae3_handle
*handle
)
3081 struct pci_dev
*pdev
= handle
->pdev
;
3082 struct hns3_nic_priv
*priv
;
3083 struct net_device
*netdev
;
3086 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
3087 hns3_get_max_available_channels(handle
));
3091 priv
= netdev_priv(netdev
);
3092 priv
->dev
= &pdev
->dev
;
3093 priv
->netdev
= netdev
;
3094 priv
->ae_handle
= handle
;
3095 priv
->ae_handle
->reset_level
= HNAE3_NONE_RESET
;
3096 priv
->ae_handle
->last_reset_time
= jiffies
;
3097 priv
->tx_timeout_count
= 0;
3099 handle
->kinfo
.netdev
= netdev
;
3100 handle
->priv
= (void *)priv
;
3102 hns3_init_mac_addr(netdev
, true);
3104 hns3_set_default_feature(netdev
);
3106 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
3107 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3108 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3109 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3110 hns3_ethtool_set_ops(netdev
);
3111 hns3_nic_set_priv_ops(netdev
);
3113 /* Carrier off reporting is important to ethtool even BEFORE open */
3114 netif_carrier_off(netdev
);
3116 ret
= hns3_get_ring_config(priv
);
3119 goto out_get_ring_cfg
;
3122 ret
= hns3_nic_alloc_vector_data(priv
);
3125 goto out_alloc_vector_data
;
3128 ret
= hns3_nic_init_vector_data(priv
);
3131 goto out_init_vector_data
;
3134 ret
= hns3_init_all_ring(priv
);
3137 goto out_init_ring_data
;
3140 ret
= register_netdev(netdev
);
3142 dev_err(priv
->dev
, "probe register netdev fail!\n");
3143 goto out_reg_netdev_fail
;
3146 hns3_dcbnl_setup(handle
);
3148 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3149 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
3153 out_reg_netdev_fail
:
3155 (void)hns3_nic_uninit_vector_data(priv
);
3156 out_init_vector_data
:
3157 hns3_nic_dealloc_vector_data(priv
);
3158 out_alloc_vector_data
:
3159 priv
->ring_data
= NULL
;
3161 priv
->ae_handle
= NULL
;
3162 free_netdev(netdev
);
3166 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3168 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3169 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3172 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3173 unregister_netdev(netdev
);
3175 hns3_force_clear_all_rx_ring(handle
);
3177 ret
= hns3_nic_uninit_vector_data(priv
);
3179 netdev_err(netdev
, "uninit vector error\n");
3181 ret
= hns3_nic_dealloc_vector_data(priv
);
3183 netdev_err(netdev
, "dealloc vector error\n");
3185 ret
= hns3_uninit_all_ring(priv
);
3187 netdev_err(netdev
, "uninit ring error\n");
3189 hns3_put_ring_config(priv
);
3191 priv
->ring_data
= NULL
;
3193 hns3_uninit_mac_addr(netdev
);
3195 free_netdev(netdev
);
3198 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3200 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3206 netif_carrier_on(netdev
);
3207 netif_tx_wake_all_queues(netdev
);
3208 netdev_info(netdev
, "link up\n");
3210 netif_carrier_off(netdev
);
3211 netif_tx_stop_all_queues(netdev
);
3212 netdev_info(netdev
, "link down\n");
3216 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3218 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3219 struct net_device
*ndev
= kinfo
->netdev
;
3224 if (tc
> HNAE3_MAX_TC
)
3230 if_running
= netif_running(ndev
);
3232 ret
= netdev_set_num_tc(ndev
, tc
);
3237 (void)hns3_nic_net_stop(ndev
);
3241 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3242 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3247 netdev_reset_tc(ndev
);
3251 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
3252 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
3254 if (tc_info
->enable
)
3255 netdev_set_tc_queue(ndev
,
3258 tc_info
->tqp_offset
);
3261 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
3262 netdev_set_prio_tc_map(ndev
, i
,
3267 ret
= hns3_nic_set_real_num_queue(ndev
);
3271 (void)hns3_nic_net_open(ndev
);
3276 static void hns3_recover_hw_addr(struct net_device
*ndev
)
3278 struct netdev_hw_addr_list
*list
;
3279 struct netdev_hw_addr
*ha
, *tmp
;
3281 /* go through and sync uc_addr entries to the device */
3283 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3284 hns3_nic_uc_sync(ndev
, ha
->addr
);
3286 /* go through and sync mc_addr entries to the device */
3288 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3289 hns3_nic_mc_sync(ndev
, ha
->addr
);
3292 static void hns3_clear_tx_ring(struct hns3_enet_ring
*ring
)
3294 while (ring
->next_to_clean
!= ring
->next_to_use
) {
3295 ring
->desc
[ring
->next_to_clean
].tx
.bdtp_fe_sc_vld_ra_ri
= 0;
3296 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
3297 ring_ptr_move_fw(ring
, next_to_clean
);
3301 static int hns3_clear_rx_ring(struct hns3_enet_ring
*ring
)
3303 struct hns3_desc_cb res_cbs
;
3306 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3307 /* When a buffer is not reused, it's memory has been
3308 * freed in hns3_handle_rx_bd or will be freed by
3309 * stack, so we need to replace the buffer here.
3311 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3312 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
3314 u64_stats_update_begin(&ring
->syncp
);
3315 ring
->stats
.sw_err_cnt
++;
3316 u64_stats_update_end(&ring
->syncp
);
3317 /* if alloc new buffer fail, exit directly
3318 * and reclear in up flow.
3320 netdev_warn(ring
->tqp
->handle
->kinfo
.netdev
,
3321 "reserve buffer map failed, ret = %d\n",
3325 hns3_replace_buffer(ring
, ring
->next_to_use
,
3328 ring_ptr_move_fw(ring
, next_to_use
);
3334 static void hns3_force_clear_rx_ring(struct hns3_enet_ring
*ring
)
3336 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3337 /* When a buffer is not reused, it's memory has been
3338 * freed in hns3_handle_rx_bd or will be freed by
3339 * stack, so only need to unmap the buffer here.
3341 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3342 hns3_unmap_buffer(ring
,
3343 &ring
->desc_cb
[ring
->next_to_use
]);
3344 ring
->desc_cb
[ring
->next_to_use
].dma
= 0;
3347 ring_ptr_move_fw(ring
, next_to_use
);
3351 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
)
3353 struct net_device
*ndev
= h
->kinfo
.netdev
;
3354 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3355 struct hns3_enet_ring
*ring
;
3358 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3359 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3360 hns3_force_clear_rx_ring(ring
);
3364 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3366 struct net_device
*ndev
= h
->kinfo
.netdev
;
3367 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3370 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3371 struct netdev_queue
*dev_queue
;
3372 struct hns3_enet_ring
*ring
;
3374 ring
= priv
->ring_data
[i
].ring
;
3375 hns3_clear_tx_ring(ring
);
3376 dev_queue
= netdev_get_tx_queue(ndev
,
3377 priv
->ring_data
[i
].queue_index
);
3378 netdev_tx_reset_queue(dev_queue
);
3380 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3381 /* Continue to clear other rings even if clearing some
3384 hns3_clear_rx_ring(ring
);
3388 int hns3_nic_reset_all_ring(struct hnae3_handle
*h
)
3390 struct net_device
*ndev
= h
->kinfo
.netdev
;
3391 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3392 struct hns3_enet_ring
*rx_ring
;
3396 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3397 h
->ae_algo
->ops
->reset_queue(h
, i
);
3398 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
3400 /* We need to clear tx ring here because self test will
3401 * use the ring and will not run down before up
3403 hns3_clear_tx_ring(priv
->ring_data
[i
].ring
);
3404 priv
->ring_data
[i
].ring
->next_to_clean
= 0;
3405 priv
->ring_data
[i
].ring
->next_to_use
= 0;
3407 rx_ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3408 hns3_init_ring_hw(rx_ring
);
3409 ret
= hns3_clear_rx_ring(rx_ring
);
3413 /* We can not know the hardware head and tail when this
3414 * function is called in reset flow, so we reuse all desc.
3416 for (j
= 0; j
< rx_ring
->desc_num
; j
++)
3417 hns3_reuse_buffer(rx_ring
, j
);
3419 rx_ring
->next_to_clean
= 0;
3420 rx_ring
->next_to_use
= 0;
3426 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3428 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3429 struct net_device
*ndev
= kinfo
->netdev
;
3431 if (!netif_running(ndev
))
3434 return hns3_nic_net_stop(ndev
);
3437 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3439 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3442 if (netif_running(kinfo
->netdev
)) {
3443 ret
= hns3_nic_net_up(kinfo
->netdev
);
3445 netdev_err(kinfo
->netdev
,
3446 "hns net up fail, ret=%d!\n", ret
);
3449 handle
->last_reset_time
= jiffies
;
3455 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3457 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3458 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3461 hns3_init_mac_addr(netdev
, false);
3462 hns3_nic_set_rx_mode(netdev
);
3463 hns3_recover_hw_addr(netdev
);
3465 /* Hardware table is only clear when pf resets */
3466 if (!(handle
->flags
& HNAE3_SUPPORT_VF
))
3467 hns3_restore_vlan(netdev
);
3469 /* Carrier off reporting is important to ethtool even BEFORE open */
3470 netif_carrier_off(netdev
);
3472 ret
= hns3_get_ring_config(priv
);
3476 ret
= hns3_nic_init_vector_data(priv
);
3480 ret
= hns3_init_all_ring(priv
);
3482 hns3_nic_uninit_vector_data(priv
);
3483 priv
->ring_data
= NULL
;
3489 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
3491 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3492 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3495 hns3_force_clear_all_rx_ring(handle
);
3497 ret
= hns3_nic_uninit_vector_data(priv
);
3499 netdev_err(netdev
, "uninit vector error\n");
3503 ret
= hns3_uninit_all_ring(priv
);
3505 netdev_err(netdev
, "uninit ring error\n");
3507 hns3_put_ring_config(priv
);
3509 priv
->ring_data
= NULL
;
3511 hns3_uninit_mac_addr(netdev
);
3516 static int hns3_reset_notify(struct hnae3_handle
*handle
,
3517 enum hnae3_reset_notify_type type
)
3522 case HNAE3_UP_CLIENT
:
3523 ret
= hns3_reset_notify_up_enet(handle
);
3525 case HNAE3_DOWN_CLIENT
:
3526 ret
= hns3_reset_notify_down_enet(handle
);
3528 case HNAE3_INIT_CLIENT
:
3529 ret
= hns3_reset_notify_init_enet(handle
);
3531 case HNAE3_UNINIT_CLIENT
:
3532 ret
= hns3_reset_notify_uninit_enet(handle
);
3541 static void hns3_restore_coal(struct hns3_nic_priv
*priv
,
3542 struct hns3_enet_coalesce
*tx
,
3543 struct hns3_enet_coalesce
*rx
)
3545 u16 vector_num
= priv
->vector_num
;
3548 for (i
= 0; i
< vector_num
; i
++) {
3549 memcpy(&priv
->tqp_vector
[i
].tx_group
.coal
, tx
,
3550 sizeof(struct hns3_enet_coalesce
));
3551 memcpy(&priv
->tqp_vector
[i
].rx_group
.coal
, rx
,
3552 sizeof(struct hns3_enet_coalesce
));
3556 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
,
3557 struct hns3_enet_coalesce
*tx
,
3558 struct hns3_enet_coalesce
*rx
)
3560 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3561 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3564 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
3568 ret
= hns3_get_ring_config(priv
);
3572 ret
= hns3_nic_alloc_vector_data(priv
);
3574 goto err_alloc_vector
;
3576 hns3_restore_coal(priv
, tx
, rx
);
3578 ret
= hns3_nic_init_vector_data(priv
);
3580 goto err_uninit_vector
;
3582 ret
= hns3_init_all_ring(priv
);
3589 hns3_put_ring_config(priv
);
3591 hns3_nic_uninit_vector_data(priv
);
3593 hns3_nic_dealloc_vector_data(priv
);
3597 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
3599 return (new_tqp_num
/ num_tc
) * num_tc
;
3602 int hns3_set_channels(struct net_device
*netdev
,
3603 struct ethtool_channels
*ch
)
3605 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3606 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3607 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
3608 struct hns3_enet_coalesce tx_coal
, rx_coal
;
3609 bool if_running
= netif_running(netdev
);
3610 u32 new_tqp_num
= ch
->combined_count
;
3614 if (ch
->rx_count
|| ch
->tx_count
)
3617 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
3618 new_tqp_num
< kinfo
->num_tc
) {
3619 dev_err(&netdev
->dev
,
3620 "Change tqps fail, the tqp range is from %d to %d",
3622 hns3_get_max_available_channels(h
));
3626 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
3627 if (kinfo
->num_tqps
== new_tqp_num
)
3631 hns3_nic_net_stop(netdev
);
3633 ret
= hns3_nic_uninit_vector_data(priv
);
3635 dev_err(&netdev
->dev
,
3636 "Unbind vector with tqp fail, nothing is changed");
3640 /* Changing the tqp num may also change the vector num,
3641 * ethtool only support setting and querying one coal
3642 * configuation for now, so save the vector 0' coal
3643 * configuation here in order to restore it.
3645 memcpy(&tx_coal
, &priv
->tqp_vector
[0].tx_group
.coal
,
3646 sizeof(struct hns3_enet_coalesce
));
3647 memcpy(&rx_coal
, &priv
->tqp_vector
[0].rx_group
.coal
,
3648 sizeof(struct hns3_enet_coalesce
));
3650 hns3_nic_dealloc_vector_data(priv
);
3652 hns3_uninit_all_ring(priv
);
3653 hns3_put_ring_config(priv
);
3655 org_tqp_num
= h
->kinfo
.num_tqps
;
3656 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
, &tx_coal
, &rx_coal
);
3658 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
,
3659 &tx_coal
, &rx_coal
);
3661 /* If revert to old tqp failed, fatal error occurred */
3662 dev_err(&netdev
->dev
,
3663 "Revert to old tqp num fail, ret=%d", ret
);
3666 dev_info(&netdev
->dev
,
3667 "Change tqp num fail, Revert to old tqp num");
3672 hns3_nic_net_open(netdev
);
3677 static const struct hnae3_client_ops client_ops
= {
3678 .init_instance
= hns3_client_init
,
3679 .uninit_instance
= hns3_client_uninit
,
3680 .link_status_change
= hns3_link_status_change
,
3681 .setup_tc
= hns3_client_setup_tc
,
3682 .reset_notify
= hns3_reset_notify
,
3685 /* hns3_init_module - Driver registration routine
3686 * hns3_init_module is the first routine called when the driver is
3687 * loaded. All it does is register with the PCI subsystem.
3689 static int __init
hns3_init_module(void)
3693 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
3694 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
3696 client
.type
= HNAE3_CLIENT_KNIC
;
3697 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
3700 client
.ops
= &client_ops
;
3702 INIT_LIST_HEAD(&client
.node
);
3704 ret
= hnae3_register_client(&client
);
3708 ret
= pci_register_driver(&hns3_driver
);
3710 hnae3_unregister_client(&client
);
3714 module_init(hns3_init_module
);
3716 /* hns3_exit_module - Driver exit cleanup routine
3717 * hns3_exit_module is called just before the driver is removed
3720 static void __exit
hns3_exit_module(void)
3722 pci_unregister_driver(&hns3_driver
);
3723 hnae3_unregister_client(&client
);
3725 module_exit(hns3_exit_module
);
3727 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3728 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3729 MODULE_LICENSE("GPL");
3730 MODULE_ALIAS("pci:hns-nic");
3731 MODULE_VERSION(HNS3_MOD_VERSION
);