1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #include <linux/if_vlan.h>
9 #include <linux/ipv6.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/aer.h>
13 #include <linux/skbuff.h>
14 #include <linux/sctp.h>
15 #include <linux/vermagic.h>
17 #include <net/pkt_cls.h>
19 #include <net/vxlan.h>
22 #include "hns3_enet.h"
24 static void hns3_clear_all_ring(struct hnae3_handle
*h
);
25 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
);
26 static void hns3_remove_hw_addr(struct net_device
*netdev
);
28 static const char hns3_driver_name
[] = "hns3";
29 const char hns3_driver_version
[] = VERMAGIC_STRING
;
30 static const char hns3_driver_string
[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client
;
35 /* hns3_pci_tbl - PCI Device ID Table
37 * Last entry must be all 0s
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
42 static const struct pci_device_id hns3_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
55 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
),
57 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
58 /* required last entry */
61 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
63 static irqreturn_t
hns3_irq_handle(int irq
, void *vector
)
65 struct hns3_enet_tqp_vector
*tqp_vector
= vector
;
67 napi_schedule(&tqp_vector
->napi
);
72 /* This callback function is used to set affinity changes to the irq affinity
73 * masks when the irq_set_affinity_notifier function is used.
75 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify
*notify
,
76 const cpumask_t
*mask
)
78 struct hns3_enet_tqp_vector
*tqp_vectors
=
79 container_of(notify
, struct hns3_enet_tqp_vector
,
82 tqp_vectors
->affinity_mask
= *mask
;
85 static void hns3_nic_irq_affinity_release(struct kref
*ref
)
89 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
91 struct hns3_enet_tqp_vector
*tqp_vectors
;
94 for (i
= 0; i
< priv
->vector_num
; i
++) {
95 tqp_vectors
= &priv
->tqp_vector
[i
];
97 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
100 /* clear the affinity notifier and affinity mask */
101 irq_set_affinity_notifier(tqp_vectors
->vector_irq
, NULL
);
102 irq_set_affinity_hint(tqp_vectors
->vector_irq
, NULL
);
104 /* release the irq resource */
105 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
106 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
110 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
112 struct hns3_enet_tqp_vector
*tqp_vectors
;
113 int txrx_int_idx
= 0;
119 for (i
= 0; i
< priv
->vector_num
; i
++) {
120 tqp_vectors
= &priv
->tqp_vector
[i
];
122 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
125 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
126 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
127 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
130 } else if (tqp_vectors
->rx_group
.ring
) {
131 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
132 "%s-%s-%d", priv
->netdev
->name
, "Rx",
134 } else if (tqp_vectors
->tx_group
.ring
) {
135 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
136 "%s-%s-%d", priv
->netdev
->name
, "Tx",
139 /* Skip this unused q_vector */
143 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
145 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
149 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
150 tqp_vectors
->vector_irq
);
154 tqp_vectors
->affinity_notify
.notify
=
155 hns3_nic_irq_affinity_notify
;
156 tqp_vectors
->affinity_notify
.release
=
157 hns3_nic_irq_affinity_release
;
158 irq_set_affinity_notifier(tqp_vectors
->vector_irq
,
159 &tqp_vectors
->affinity_notify
);
160 irq_set_affinity_hint(tqp_vectors
->vector_irq
,
161 &tqp_vectors
->affinity_mask
);
163 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
169 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
172 writel(mask_en
, tqp_vector
->mask_addr
);
175 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
177 napi_enable(&tqp_vector
->napi
);
180 hns3_mask_vector_irq(tqp_vector
, 1);
183 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
186 hns3_mask_vector_irq(tqp_vector
, 0);
188 disable_irq(tqp_vector
->vector_irq
);
189 napi_disable(&tqp_vector
->napi
);
192 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
195 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
197 /* this defines the configuration for RL (Interrupt Rate Limiter).
198 * Rl defines rate of interrupts i.e. number of interrupts-per-second
199 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
202 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.gl_adapt_enable
&&
203 !tqp_vector
->rx_group
.coal
.gl_adapt_enable
)
204 /* According to the hardware, the range of rl_reg is
205 * 0-59 and the unit is 4.
207 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
209 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
212 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
215 u32 rx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
217 writel(rx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
220 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
223 u32 tx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
225 writel(tx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
228 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
,
229 struct hns3_nic_priv
*priv
)
231 /* initialize the configuration for interrupt coalescing.
232 * 1. GL (Interrupt Gap Limiter)
233 * 2. RL (Interrupt Rate Limiter)
236 /* Default: enable interrupt coalescing self-adaptive and GL */
237 tqp_vector
->tx_group
.coal
.gl_adapt_enable
= 1;
238 tqp_vector
->rx_group
.coal
.gl_adapt_enable
= 1;
240 tqp_vector
->tx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
241 tqp_vector
->rx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
243 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
244 tqp_vector
->rx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
245 tqp_vector
->tx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
248 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
249 struct hns3_nic_priv
*priv
)
251 struct hnae3_handle
*h
= priv
->ae_handle
;
253 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
254 tqp_vector
->tx_group
.coal
.int_gl
);
255 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
256 tqp_vector
->rx_group
.coal
.int_gl
);
257 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
260 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
262 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
263 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
264 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
267 if (kinfo
->num_tc
<= 1) {
268 netdev_reset_tc(netdev
);
270 ret
= netdev_set_num_tc(netdev
, kinfo
->num_tc
);
273 "netdev_set_num_tc fail, ret=%d!\n", ret
);
277 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
278 if (!kinfo
->tc_info
[i
].enable
)
281 netdev_set_tc_queue(netdev
,
282 kinfo
->tc_info
[i
].tc
,
283 kinfo
->tc_info
[i
].tqp_count
,
284 kinfo
->tc_info
[i
].tqp_offset
);
288 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
291 "netif_set_real_num_tx_queues fail, ret=%d!\n",
296 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
299 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
306 static u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
308 u16 alloc_tqps
, max_rss_size
, rss_size
;
310 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &alloc_tqps
, &max_rss_size
);
311 rss_size
= alloc_tqps
/ h
->kinfo
.num_tc
;
313 return min_t(u16
, rss_size
, max_rss_size
);
316 static void hns3_tqp_enable(struct hnae3_queue
*tqp
)
320 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
321 rcb_reg
|= BIT(HNS3_RING_EN_B
);
322 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
325 static void hns3_tqp_disable(struct hnae3_queue
*tqp
)
329 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
330 rcb_reg
&= ~BIT(HNS3_RING_EN_B
);
331 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
334 static int hns3_nic_net_up(struct net_device
*netdev
)
336 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
337 struct hnae3_handle
*h
= priv
->ae_handle
;
341 ret
= hns3_nic_reset_all_ring(h
);
345 /* get irq resource for all vectors */
346 ret
= hns3_nic_init_irq(priv
);
348 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
352 /* enable the vectors */
353 for (i
= 0; i
< priv
->vector_num
; i
++)
354 hns3_vector_enable(&priv
->tqp_vector
[i
]);
357 for (j
= 0; j
< h
->kinfo
.num_tqps
; j
++)
358 hns3_tqp_enable(h
->kinfo
.tqp
[j
]);
360 /* start the ae_dev */
361 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
365 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
371 hns3_tqp_disable(h
->kinfo
.tqp
[j
]);
373 for (j
= i
- 1; j
>= 0; j
--)
374 hns3_vector_disable(&priv
->tqp_vector
[j
]);
376 hns3_nic_uninit_irq(priv
);
381 static int hns3_nic_net_open(struct net_device
*netdev
)
383 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
384 struct hnae3_knic_private_info
*kinfo
;
387 if (hns3_nic_resetting(netdev
))
390 netif_carrier_off(netdev
);
392 ret
= hns3_nic_set_real_num_queue(netdev
);
396 ret
= hns3_nic_net_up(netdev
);
399 "hns net up fail, ret=%d!\n", ret
);
404 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
405 netdev_set_prio_tc_map(netdev
, i
,
412 static void hns3_nic_net_down(struct net_device
*netdev
)
414 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
415 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
416 const struct hnae3_ae_ops
*ops
;
419 /* disable vectors */
420 for (i
= 0; i
< priv
->vector_num
; i
++)
421 hns3_vector_disable(&priv
->tqp_vector
[i
]);
424 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
425 hns3_tqp_disable(h
->kinfo
.tqp
[i
]);
428 ops
= priv
->ae_handle
->ae_algo
->ops
;
430 ops
->stop(priv
->ae_handle
);
432 /* free irq resources */
433 hns3_nic_uninit_irq(priv
);
435 hns3_clear_all_ring(priv
->ae_handle
);
438 static int hns3_nic_net_stop(struct net_device
*netdev
)
440 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
442 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
445 netif_tx_stop_all_queues(netdev
);
446 netif_carrier_off(netdev
);
448 hns3_nic_net_down(netdev
);
453 static int hns3_nic_uc_sync(struct net_device
*netdev
,
454 const unsigned char *addr
)
456 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
458 if (h
->ae_algo
->ops
->add_uc_addr
)
459 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
464 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
465 const unsigned char *addr
)
467 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
469 if (h
->ae_algo
->ops
->rm_uc_addr
)
470 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
475 static int hns3_nic_mc_sync(struct net_device
*netdev
,
476 const unsigned char *addr
)
478 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
480 if (h
->ae_algo
->ops
->add_mc_addr
)
481 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
486 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
487 const unsigned char *addr
)
489 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
491 if (h
->ae_algo
->ops
->rm_mc_addr
)
492 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
497 static u8
hns3_get_netdev_flags(struct net_device
*netdev
)
501 if (netdev
->flags
& IFF_PROMISC
) {
502 flags
= HNAE3_USER_UPE
| HNAE3_USER_MPE
;
504 flags
|= HNAE3_VLAN_FLTR
;
505 if (netdev
->flags
& IFF_ALLMULTI
)
506 flags
|= HNAE3_USER_MPE
;
512 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
514 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
518 new_flags
= hns3_get_netdev_flags(netdev
);
520 ret
= __dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
);
522 netdev_err(netdev
, "sync uc address fail\n");
524 new_flags
|= HNAE3_OVERFLOW_UPE
;
527 if (netdev
->flags
& IFF_MULTICAST
) {
528 ret
= __dev_mc_sync(netdev
, hns3_nic_mc_sync
,
531 netdev_err(netdev
, "sync mc address fail\n");
533 new_flags
|= HNAE3_OVERFLOW_MPE
;
537 hns3_update_promisc_mode(netdev
, new_flags
);
538 /* User mode Promisc mode enable and vlan filtering is disabled to
539 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
540 * vlan fitering is enabled
542 hns3_enable_vlan_filter(netdev
, new_flags
& HNAE3_VLAN_FLTR
);
543 h
->netdev_flags
= new_flags
;
546 int hns3_update_promisc_mode(struct net_device
*netdev
, u8 promisc_flags
)
548 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
549 struct hnae3_handle
*h
= priv
->ae_handle
;
551 if (h
->ae_algo
->ops
->set_promisc_mode
) {
552 return h
->ae_algo
->ops
->set_promisc_mode(h
,
553 promisc_flags
& HNAE3_UPE
,
554 promisc_flags
& HNAE3_MPE
);
560 void hns3_enable_vlan_filter(struct net_device
*netdev
, bool enable
)
562 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
563 struct hnae3_handle
*h
= priv
->ae_handle
;
566 if (h
->pdev
->revision
>= 0x21 && h
->ae_algo
->ops
->enable_vlan_filter
) {
567 last_state
= h
->netdev_flags
& HNAE3_VLAN_FLTR
? true : false;
568 if (enable
!= last_state
) {
571 enable
? "enable" : "disable");
572 h
->ae_algo
->ops
->enable_vlan_filter(h
, enable
);
577 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
578 u16
*mss
, u32
*type_cs_vlan_tso
)
580 u32 l4_offset
, hdr_len
;
581 union l3_hdr_info l3
;
582 union l4_hdr_info l4
;
586 if (!skb_is_gso(skb
))
589 ret
= skb_cow_head(skb
, 0);
593 l3
.hdr
= skb_network_header(skb
);
594 l4
.hdr
= skb_transport_header(skb
);
596 /* Software should clear the IPv4's checksum field when tso is
599 if (l3
.v4
->version
== 4)
603 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
606 SKB_GSO_UDP_TUNNEL_CSUM
)) {
607 if ((!(skb_shinfo(skb
)->gso_type
&
609 (skb_shinfo(skb
)->gso_type
&
610 SKB_GSO_UDP_TUNNEL_CSUM
)) {
611 /* Software should clear the udp's checksum
612 * field when tso is needed.
616 /* reset l3&l4 pointers from outer to inner headers */
617 l3
.hdr
= skb_inner_network_header(skb
);
618 l4
.hdr
= skb_inner_transport_header(skb
);
620 /* Software should clear the IPv4's checksum field when
623 if (l3
.v4
->version
== 4)
627 /* normal or tunnel packet*/
628 l4_offset
= l4
.hdr
- skb
->data
;
629 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
631 /* remove payload length from inner pseudo checksum when tso*/
632 l4_paylen
= skb
->len
- l4_offset
;
633 csum_replace_by_diff(&l4
.tcp
->check
,
634 (__force __wsum
)htonl(l4_paylen
));
636 /* find the txbd field values */
637 *paylen
= skb
->len
- hdr_len
;
638 hnae3_set_bit(*type_cs_vlan_tso
,
641 /* get MSS for TSO */
642 *mss
= skb_shinfo(skb
)->gso_size
;
647 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
655 unsigned char *l4_hdr
;
656 unsigned char *exthdr
;
660 /* find outer header point */
661 l3
.hdr
= skb_network_header(skb
);
662 l4_hdr
= skb_transport_header(skb
);
664 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
665 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
666 l4_proto_tmp
= l3
.v6
->nexthdr
;
667 if (l4_hdr
!= exthdr
)
668 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
669 &l4_proto_tmp
, &frag_off
);
670 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
671 l4_proto_tmp
= l3
.v4
->protocol
;
676 *ol4_proto
= l4_proto_tmp
;
679 if (!skb
->encapsulation
) {
684 /* find inner header point */
685 l3
.hdr
= skb_inner_network_header(skb
);
686 l4_hdr
= skb_inner_transport_header(skb
);
688 if (l3
.v6
->version
== 6) {
689 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
690 l4_proto_tmp
= l3
.v6
->nexthdr
;
691 if (l4_hdr
!= exthdr
)
692 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
693 &l4_proto_tmp
, &frag_off
);
694 } else if (l3
.v4
->version
== 4) {
695 l4_proto_tmp
= l3
.v4
->protocol
;
698 *il4_proto
= l4_proto_tmp
;
703 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
704 u8 il4_proto
, u32
*type_cs_vlan_tso
,
705 u32
*ol_type_vlan_len_msec
)
715 struct gre_base_hdr
*gre
;
718 unsigned char *l2_hdr
;
719 u8 l4_proto
= ol4_proto
;
726 l3
.hdr
= skb_network_header(skb
);
727 l4
.hdr
= skb_transport_header(skb
);
729 /* compute L2 header size for normal packet, defined in 2 Bytes */
730 l2_len
= l3
.hdr
- skb
->data
;
731 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
732 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
735 if (skb
->encapsulation
) {
736 /* compute OL2 header size, defined in 2 Bytes */
738 hnae3_set_field(*ol_type_vlan_len_msec
,
740 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
742 /* compute OL3 header size, defined in 4 Bytes */
743 ol3_len
= l4
.hdr
- l3
.hdr
;
744 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
745 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
747 /* MAC in UDP, MAC in GRE (0x6558)*/
748 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
749 /* switch MAC header ptr from outer to inner header.*/
750 l2_hdr
= skb_inner_mac_header(skb
);
752 /* compute OL4 header size, defined in 4 Bytes. */
753 ol4_len
= l2_hdr
- l4
.hdr
;
754 hnae3_set_field(*ol_type_vlan_len_msec
,
755 HNS3_TXD_L4LEN_M
, HNS3_TXD_L4LEN_S
,
758 /* switch IP header ptr from outer to inner header */
759 l3
.hdr
= skb_inner_network_header(skb
);
761 /* compute inner l2 header size, defined in 2 Bytes. */
762 l2_len
= l3
.hdr
- l2_hdr
;
763 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
764 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
766 /* skb packet types not supported by hardware,
767 * txbd len fild doesn't be filled.
772 /* switch L4 header pointer from outer to inner */
773 l4
.hdr
= skb_inner_transport_header(skb
);
775 l4_proto
= il4_proto
;
778 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
779 l3_len
= l4
.hdr
- l3
.hdr
;
780 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
781 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
783 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
786 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
787 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
790 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
792 (sizeof(struct sctphdr
) >> 2));
795 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
797 (sizeof(struct udphdr
) >> 2));
800 /* skb packet types not supported by hardware,
801 * txbd len fild doesn't be filled.
807 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
808 * and it is udp packet, which has a dest port as the IANA assigned.
809 * the hardware is expected to do the checksum offload, but the
810 * hardware will not do the checksum offload when udp dest port is
813 static bool hns3_tunnel_csum_bug(struct sk_buff
*skb
)
815 #define IANA_VXLAN_PORT 4789
819 struct gre_base_hdr
*gre
;
823 l4
.hdr
= skb_transport_header(skb
);
825 if (!(!skb
->encapsulation
&& l4
.udp
->dest
== htons(IANA_VXLAN_PORT
)))
828 skb_checksum_help(skb
);
833 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
834 u8 il4_proto
, u32
*type_cs_vlan_tso
,
835 u32
*ol_type_vlan_len_msec
)
842 u32 l4_proto
= ol4_proto
;
844 l3
.hdr
= skb_network_header(skb
);
846 /* define OL3 type and tunnel type(OL4).*/
847 if (skb
->encapsulation
) {
848 /* define outer network header type.*/
849 if (skb
->protocol
== htons(ETH_P_IP
)) {
851 hnae3_set_field(*ol_type_vlan_len_msec
,
854 HNS3_OL3T_IPV4_CSUM
);
856 hnae3_set_field(*ol_type_vlan_len_msec
,
859 HNS3_OL3T_IPV4_NO_CSUM
);
861 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
862 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
863 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
866 /* define tunnel type(OL4).*/
869 hnae3_set_field(*ol_type_vlan_len_msec
,
872 HNS3_TUN_MAC_IN_UDP
);
875 hnae3_set_field(*ol_type_vlan_len_msec
,
881 /* drop the skb tunnel packet if hardware don't support,
882 * because hardware can't calculate csum when TSO.
887 /* the stack computes the IP header already,
888 * driver calculate l4 checksum when not TSO.
890 skb_checksum_help(skb
);
894 l3
.hdr
= skb_inner_network_header(skb
);
895 l4_proto
= il4_proto
;
898 if (l3
.v4
->version
== 4) {
899 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
900 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
902 /* the stack computes the IP header already, the only time we
903 * need the hardware to recompute it is in the case of TSO.
906 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
907 } else if (l3
.v6
->version
== 6) {
908 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
909 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
914 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
915 hnae3_set_field(*type_cs_vlan_tso
,
921 if (hns3_tunnel_csum_bug(skb
))
924 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
925 hnae3_set_field(*type_cs_vlan_tso
,
931 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
932 hnae3_set_field(*type_cs_vlan_tso
,
938 /* drop the skb tunnel packet if hardware don't support,
939 * because hardware can't calculate csum when TSO.
944 /* the stack computes the IP header already,
945 * driver calculate l4 checksum when not TSO.
947 skb_checksum_help(skb
);
954 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
956 /* Config bd buffer end */
957 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
958 HNS3_TXD_BDTYPE_S
, 0);
959 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
960 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
961 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
964 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
965 struct hns3_enet_ring
*tx_ring
,
966 u32
*inner_vlan_flag
,
971 #define HNS3_TX_VLAN_PRIO_SHIFT 13
973 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
974 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
975 NETIF_F_HW_VLAN_CTAG_TX
)) {
976 /* When HW VLAN acceleration is turned off, and the stack
977 * sets the protocol to 802.1q, the driver just need to
978 * set the protocol to the encapsulated ethertype.
980 skb
->protocol
= vlan_get_protocol(skb
);
984 if (skb_vlan_tag_present(skb
)) {
987 vlan_tag
= skb_vlan_tag_get(skb
);
988 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
990 /* Based on hw strategy, use out_vtag in two layer tag case,
991 * and use inner_vtag in one tag case.
993 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
994 hnae3_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
995 *out_vtag
= vlan_tag
;
997 hnae3_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
998 *inner_vtag
= vlan_tag
;
1000 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
1001 struct vlan_ethhdr
*vhdr
;
1004 rc
= skb_cow_head(skb
, 0);
1007 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
1008 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
1009 << HNS3_TX_VLAN_PRIO_SHIFT
);
1012 skb
->protocol
= vlan_get_protocol(skb
);
1016 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
1017 int size
, int frag_end
, enum hns_desc_type type
)
1019 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1020 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
1021 struct device
*dev
= ring_to_dev(ring
);
1022 u32 ol_type_vlan_len_msec
= 0;
1023 u16 bdtp_fe_sc_vld_ra_ri
= 0;
1024 struct skb_frag_struct
*frag
;
1025 unsigned int frag_buf_num
;
1026 u32 type_cs_vlan_tso
= 0;
1027 struct sk_buff
*skb
;
1039 if (type
== DESC_TYPE_SKB
) {
1040 skb
= (struct sk_buff
*)priv
;
1043 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
1044 &ol_type_vlan_len_msec
,
1045 &inner_vtag
, &out_vtag
);
1049 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1050 skb_reset_mac_len(skb
);
1052 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
1055 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
1057 &ol_type_vlan_len_msec
);
1058 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
1060 &ol_type_vlan_len_msec
);
1064 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
1071 desc
->tx
.ol_type_vlan_len_msec
=
1072 cpu_to_le32(ol_type_vlan_len_msec
);
1073 desc
->tx
.type_cs_vlan_tso_len
=
1074 cpu_to_le32(type_cs_vlan_tso
);
1075 desc
->tx
.paylen
= cpu_to_le32(paylen
);
1076 desc
->tx
.mss
= cpu_to_le16(mss
);
1077 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
1078 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
1080 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1082 frag
= (struct skb_frag_struct
*)priv
;
1083 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1086 if (dma_mapping_error(ring
->dev
, dma
)) {
1087 ring
->stats
.sw_err_cnt
++;
1091 desc_cb
->length
= size
;
1093 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1094 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
1095 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
1097 /* When frag size is bigger than hardware limit, split this frag */
1098 for (k
= 0; k
< frag_buf_num
; k
++) {
1099 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1100 desc_cb
->priv
= priv
;
1101 desc_cb
->dma
= dma
+ HNS3_MAX_BD_SIZE
* k
;
1102 desc_cb
->type
= (type
== DESC_TYPE_SKB
&& !k
) ?
1103 DESC_TYPE_SKB
: DESC_TYPE_PAGE
;
1105 /* now, fill the descriptor */
1106 desc
->addr
= cpu_to_le64(dma
+ HNS3_MAX_BD_SIZE
* k
);
1107 desc
->tx
.send_size
= cpu_to_le16((k
== frag_buf_num
- 1) ?
1108 (u16
)sizeoflast
: (u16
)HNS3_MAX_BD_SIZE
);
1109 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
,
1110 frag_end
&& (k
== frag_buf_num
- 1) ?
1112 desc
->tx
.bdtp_fe_sc_vld_ra_ri
=
1113 cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
1115 /* move ring pointer to next.*/
1116 ring_ptr_move_fw(ring
, next_to_use
);
1118 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1119 desc
= &ring
->desc
[ring
->next_to_use
];
1125 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
1126 struct hns3_enet_ring
*ring
)
1128 struct sk_buff
*skb
= *out_skb
;
1129 struct skb_frag_struct
*frag
;
1136 size
= skb_headlen(skb
);
1137 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1139 frag_num
= skb_shinfo(skb
)->nr_frags
;
1140 for (i
= 0; i
< frag_num
; i
++) {
1141 frag
= &skb_shinfo(skb
)->frags
[i
];
1142 size
= skb_frag_size(frag
);
1144 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1145 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
1148 buf_num
+= bdnum_for_frag
;
1151 if (buf_num
> ring_space(ring
))
1158 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
1159 struct hns3_enet_ring
*ring
)
1161 struct sk_buff
*skb
= *out_skb
;
1164 /* No. of segments (plus a header) */
1165 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1167 if (unlikely(ring_space(ring
) < buf_num
))
1175 static void hns3_clear_desc(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
1177 struct device
*dev
= ring_to_dev(ring
);
1180 for (i
= 0; i
< ring
->desc_num
; i
++) {
1181 /* check if this is where we started */
1182 if (ring
->next_to_use
== next_to_use_orig
)
1185 /* unmap the descriptor dma address */
1186 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
1187 dma_unmap_single(dev
,
1188 ring
->desc_cb
[ring
->next_to_use
].dma
,
1189 ring
->desc_cb
[ring
->next_to_use
].length
,
1191 else if (ring
->desc_cb
[ring
->next_to_use
].length
)
1193 ring
->desc_cb
[ring
->next_to_use
].dma
,
1194 ring
->desc_cb
[ring
->next_to_use
].length
,
1197 ring
->desc_cb
[ring
->next_to_use
].length
= 0;
1200 ring_ptr_move_bw(ring
, next_to_use
);
1204 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1206 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1207 struct hns3_nic_ring_data
*ring_data
=
1208 &tx_ring_data(priv
, skb
->queue_mapping
);
1209 struct hns3_enet_ring
*ring
= ring_data
->ring
;
1210 struct netdev_queue
*dev_queue
;
1211 struct skb_frag_struct
*frag
;
1212 int next_to_use_head
;
1213 int next_to_use_frag
;
1220 /* Prefetch the data used later */
1221 prefetch(skb
->data
);
1223 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
1225 u64_stats_update_begin(&ring
->syncp
);
1226 ring
->stats
.tx_busy
++;
1227 u64_stats_update_end(&ring
->syncp
);
1229 goto out_net_tx_busy
;
1231 u64_stats_update_begin(&ring
->syncp
);
1232 ring
->stats
.sw_err_cnt
++;
1233 u64_stats_update_end(&ring
->syncp
);
1234 netdev_err(netdev
, "no memory to xmit!\n");
1241 /* No. of segments (plus a header) */
1242 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1243 /* Fill the first part */
1244 size
= skb_headlen(skb
);
1246 next_to_use_head
= ring
->next_to_use
;
1248 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, seg_num
== 1 ? 1 : 0,
1253 next_to_use_frag
= ring
->next_to_use
;
1254 /* Fill the fragments */
1255 for (i
= 1; i
< seg_num
; i
++) {
1256 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1257 size
= skb_frag_size(frag
);
1259 ret
= priv
->ops
.fill_desc(ring
, frag
, size
,
1260 seg_num
- 1 == i
? 1 : 0,
1267 /* Complete translate all packets */
1268 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1269 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1271 wmb(); /* Commit all data before submit */
1273 hnae3_queue_xmit(ring
->tqp
, buf_num
);
1275 return NETDEV_TX_OK
;
1278 hns3_clear_desc(ring
, next_to_use_frag
);
1281 hns3_clear_desc(ring
, next_to_use_head
);
1284 dev_kfree_skb_any(skb
);
1285 return NETDEV_TX_OK
;
1288 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1289 smp_mb(); /* Commit all data before submit */
1291 return NETDEV_TX_BUSY
;
1294 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1296 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1297 struct sockaddr
*mac_addr
= p
;
1300 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1301 return -EADDRNOTAVAIL
;
1303 if (ether_addr_equal(netdev
->dev_addr
, mac_addr
->sa_data
)) {
1304 netdev_info(netdev
, "already using mac address %pM\n",
1309 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
1311 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1315 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1320 static int hns3_nic_do_ioctl(struct net_device
*netdev
,
1321 struct ifreq
*ifr
, int cmd
)
1323 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1325 if (!netif_running(netdev
))
1328 if (!h
->ae_algo
->ops
->do_ioctl
)
1331 return h
->ae_algo
->ops
->do_ioctl(h
, ifr
, cmd
);
1334 static int hns3_nic_set_features(struct net_device
*netdev
,
1335 netdev_features_t features
)
1337 netdev_features_t changed
= netdev
->features
^ features
;
1338 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1339 struct hnae3_handle
*h
= priv
->ae_handle
;
1342 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1343 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
1344 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1346 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1349 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
1350 h
->ae_algo
->ops
->enable_vlan_filter
) {
1351 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1352 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1354 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1357 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1358 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
1359 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1360 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1362 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1368 if ((changed
& NETIF_F_NTUPLE
) && h
->ae_algo
->ops
->enable_fd
) {
1369 if (features
& NETIF_F_NTUPLE
)
1370 h
->ae_algo
->ops
->enable_fd(h
, true);
1372 h
->ae_algo
->ops
->enable_fd(h
, false);
1375 netdev
->features
= features
;
1379 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1380 struct rtnl_link_stats64
*stats
)
1382 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1383 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1384 struct hnae3_handle
*handle
= priv
->ae_handle
;
1385 struct hns3_enet_ring
*ring
;
1395 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1398 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1400 for (idx
= 0; idx
< queue_num
; idx
++) {
1401 /* fetch the tx stats */
1402 ring
= priv
->ring_data
[idx
].ring
;
1404 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1405 tx_bytes
+= ring
->stats
.tx_bytes
;
1406 tx_pkts
+= ring
->stats
.tx_pkts
;
1407 tx_drop
+= ring
->stats
.tx_busy
;
1408 tx_drop
+= ring
->stats
.sw_err_cnt
;
1409 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1411 /* fetch the rx stats */
1412 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1414 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1415 rx_bytes
+= ring
->stats
.rx_bytes
;
1416 rx_pkts
+= ring
->stats
.rx_pkts
;
1417 rx_drop
+= ring
->stats
.non_vld_descs
;
1418 rx_drop
+= ring
->stats
.err_pkt_len
;
1419 rx_drop
+= ring
->stats
.l2_err
;
1420 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1423 stats
->tx_bytes
= tx_bytes
;
1424 stats
->tx_packets
= tx_pkts
;
1425 stats
->rx_bytes
= rx_bytes
;
1426 stats
->rx_packets
= rx_pkts
;
1428 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1429 stats
->multicast
= netdev
->stats
.multicast
;
1430 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1431 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1432 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1434 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1435 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1436 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1437 stats
->collisions
= netdev
->stats
.collisions
;
1438 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1439 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1440 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1441 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1442 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1443 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1444 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1445 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1446 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1447 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1450 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1452 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1453 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1454 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1455 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1456 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1457 u16 mode
= mqprio_qopt
->mode
;
1458 u8 hw
= mqprio_qopt
->qopt
.hw
;
1462 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1463 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1466 if (tc
> HNAE3_MAX_TC
)
1472 if_running
= netif_running(netdev
);
1474 hns3_nic_net_stop(netdev
);
1478 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1479 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1483 ret
= hns3_nic_set_real_num_queue(netdev
);
1487 hns3_nic_net_open(netdev
);
1492 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1495 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1498 return hns3_setup_tc(dev
, type_data
);
1501 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1502 __be16 proto
, u16 vid
)
1504 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1505 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1508 if (h
->ae_algo
->ops
->set_vlan_filter
)
1509 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1512 set_bit(vid
, priv
->active_vlans
);
1517 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1518 __be16 proto
, u16 vid
)
1520 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1521 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1524 if (h
->ae_algo
->ops
->set_vlan_filter
)
1525 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1528 clear_bit(vid
, priv
->active_vlans
);
1533 static int hns3_restore_vlan(struct net_device
*netdev
)
1535 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1539 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
1540 ret
= hns3_vlan_rx_add_vid(netdev
, htons(ETH_P_8021Q
), vid
);
1542 netdev_err(netdev
, "Restore vlan: %d filter, ret:%d\n",
1551 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1552 u8 qos
, __be16 vlan_proto
)
1554 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1557 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1558 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1564 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1566 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1567 bool if_running
= netif_running(netdev
);
1570 if (!h
->ae_algo
->ops
->set_mtu
)
1573 /* if this was called with netdev up then bring netdevice down */
1575 (void)hns3_nic_net_stop(netdev
);
1579 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1581 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1584 netdev
->mtu
= new_mtu
;
1586 /* if the netdev was running earlier, bring it up again */
1587 if (if_running
&& hns3_nic_net_open(netdev
))
1593 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1595 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1596 struct hns3_enet_ring
*tx_ring
= NULL
;
1597 int timeout_queue
= 0;
1598 int hw_head
, hw_tail
;
1601 /* Find the stopped queue the same way the stack does */
1602 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1603 struct netdev_queue
*q
;
1604 unsigned long trans_start
;
1606 q
= netdev_get_tx_queue(ndev
, i
);
1607 trans_start
= q
->trans_start
;
1608 if (netif_xmit_stopped(q
) &&
1610 (trans_start
+ ndev
->watchdog_timeo
))) {
1616 if (i
== ndev
->num_tx_queues
) {
1618 "no netdev TX timeout queue found, timeout count: %llu\n",
1619 priv
->tx_timeout_count
);
1623 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1625 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1626 HNS3_RING_TX_RING_HEAD_REG
);
1627 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1628 HNS3_RING_TX_RING_TAIL_REG
);
1630 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1631 priv
->tx_timeout_count
,
1633 tx_ring
->next_to_use
,
1634 tx_ring
->next_to_clean
,
1637 readl(tx_ring
->tqp_vector
->mask_addr
));
1642 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1644 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1645 struct hnae3_handle
*h
= priv
->ae_handle
;
1647 if (!hns3_get_tx_timeo_queue_info(ndev
))
1650 priv
->tx_timeout_count
++;
1652 /* request the reset, and let the hclge to determine
1653 * which reset level should be done
1655 if (h
->ae_algo
->ops
->reset_event
)
1656 h
->ae_algo
->ops
->reset_event(h
->pdev
, h
);
1659 static const struct net_device_ops hns3_nic_netdev_ops
= {
1660 .ndo_open
= hns3_nic_net_open
,
1661 .ndo_stop
= hns3_nic_net_stop
,
1662 .ndo_start_xmit
= hns3_nic_net_xmit
,
1663 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1664 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1665 .ndo_do_ioctl
= hns3_nic_do_ioctl
,
1666 .ndo_change_mtu
= hns3_nic_change_mtu
,
1667 .ndo_set_features
= hns3_nic_set_features
,
1668 .ndo_get_stats64
= hns3_nic_get_stats64
,
1669 .ndo_setup_tc
= hns3_nic_setup_tc
,
1670 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1671 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1672 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1673 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1676 static bool hns3_is_phys_func(struct pci_dev
*pdev
)
1678 u32 dev_id
= pdev
->device
;
1681 case HNAE3_DEV_ID_GE
:
1682 case HNAE3_DEV_ID_25GE
:
1683 case HNAE3_DEV_ID_25GE_RDMA
:
1684 case HNAE3_DEV_ID_25GE_RDMA_MACSEC
:
1685 case HNAE3_DEV_ID_50GE_RDMA
:
1686 case HNAE3_DEV_ID_50GE_RDMA_MACSEC
:
1687 case HNAE3_DEV_ID_100G_RDMA_MACSEC
:
1689 case HNAE3_DEV_ID_100G_VF
:
1690 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
:
1693 dev_warn(&pdev
->dev
, "un-recognized pci device-id %d",
1700 static void hns3_disable_sriov(struct pci_dev
*pdev
)
1702 /* If our VFs are assigned we cannot shut down SR-IOV
1703 * without causing issues, so just leave the hardware
1704 * available but disabled
1706 if (pci_vfs_assigned(pdev
)) {
1707 dev_warn(&pdev
->dev
,
1708 "disabling driver while VFs are assigned\n");
1712 pci_disable_sriov(pdev
);
1715 static void hns3_get_dev_capability(struct pci_dev
*pdev
,
1716 struct hnae3_ae_dev
*ae_dev
)
1718 if (pdev
->revision
>= 0x21) {
1719 hnae3_set_bit(ae_dev
->flag
, HNAE3_DEV_SUPPORT_FD_B
, 1);
1720 hnae3_set_bit(ae_dev
->flag
, HNAE3_DEV_SUPPORT_GRO_B
, 1);
1724 /* hns3_probe - Device initialization routine
1725 * @pdev: PCI device information struct
1726 * @ent: entry in hns3_pci_tbl
1728 * hns3_probe initializes a PF identified by a pci_dev structure.
1729 * The OS initialization, configuring of the PF private structure,
1730 * and a hardware reset occur.
1732 * Returns 0 on success, negative on failure
1734 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1736 struct hnae3_ae_dev
*ae_dev
;
1739 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1746 ae_dev
->pdev
= pdev
;
1747 ae_dev
->flag
= ent
->driver_data
;
1748 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1749 ae_dev
->reset_type
= HNAE3_NONE_RESET
;
1750 hns3_get_dev_capability(pdev
, ae_dev
);
1751 pci_set_drvdata(pdev
, ae_dev
);
1753 hnae3_register_ae_dev(ae_dev
);
1758 /* hns3_remove - Device removal routine
1759 * @pdev: PCI device information struct
1761 static void hns3_remove(struct pci_dev
*pdev
)
1763 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1765 if (hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))
1766 hns3_disable_sriov(pdev
);
1768 hnae3_unregister_ae_dev(ae_dev
);
1772 * hns3_pci_sriov_configure
1773 * @pdev: pointer to a pci_dev structure
1774 * @num_vfs: number of VFs to allocate
1776 * Enable or change the number of VFs. Called when the user updates the number
1779 static int hns3_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1783 if (!(hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))) {
1784 dev_warn(&pdev
->dev
, "Can not config SRIOV\n");
1789 ret
= pci_enable_sriov(pdev
, num_vfs
);
1791 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n", ret
);
1794 } else if (!pci_vfs_assigned(pdev
)) {
1795 pci_disable_sriov(pdev
);
1797 dev_warn(&pdev
->dev
,
1798 "Unable to free VFs because some are assigned to VMs.\n");
1804 static void hns3_shutdown(struct pci_dev
*pdev
)
1806 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1808 hnae3_unregister_ae_dev(ae_dev
);
1809 devm_kfree(&pdev
->dev
, ae_dev
);
1810 pci_set_drvdata(pdev
, NULL
);
1812 if (system_state
== SYSTEM_POWER_OFF
)
1813 pci_set_power_state(pdev
, PCI_D3hot
);
1816 static pci_ers_result_t
hns3_error_detected(struct pci_dev
*pdev
,
1817 pci_channel_state_t state
)
1819 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1820 pci_ers_result_t ret
;
1822 dev_info(&pdev
->dev
, "PCI error detected, state(=%d)!!\n", state
);
1824 if (state
== pci_channel_io_perm_failure
)
1825 return PCI_ERS_RESULT_DISCONNECT
;
1829 "Can't recover - error happened during device init\n");
1830 return PCI_ERS_RESULT_NONE
;
1833 if (ae_dev
->ops
->process_hw_error
)
1834 ret
= ae_dev
->ops
->process_hw_error(ae_dev
);
1836 return PCI_ERS_RESULT_NONE
;
1841 static pci_ers_result_t
hns3_slot_reset(struct pci_dev
*pdev
)
1843 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1844 struct device
*dev
= &pdev
->dev
;
1846 dev_info(dev
, "requesting reset due to PCI error\n");
1848 /* request the reset */
1849 if (ae_dev
->ops
->reset_event
) {
1850 ae_dev
->ops
->reset_event(pdev
, NULL
);
1851 return PCI_ERS_RESULT_RECOVERED
;
1854 return PCI_ERS_RESULT_DISCONNECT
;
1857 static void hns3_reset_prepare(struct pci_dev
*pdev
)
1859 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1861 dev_info(&pdev
->dev
, "hns3 flr prepare\n");
1862 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->flr_prepare
)
1863 ae_dev
->ops
->flr_prepare(ae_dev
);
1866 static void hns3_reset_done(struct pci_dev
*pdev
)
1868 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1870 dev_info(&pdev
->dev
, "hns3 flr done\n");
1871 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->flr_done
)
1872 ae_dev
->ops
->flr_done(ae_dev
);
1875 static const struct pci_error_handlers hns3_err_handler
= {
1876 .error_detected
= hns3_error_detected
,
1877 .slot_reset
= hns3_slot_reset
,
1878 .reset_prepare
= hns3_reset_prepare
,
1879 .reset_done
= hns3_reset_done
,
1882 static struct pci_driver hns3_driver
= {
1883 .name
= hns3_driver_name
,
1884 .id_table
= hns3_pci_tbl
,
1885 .probe
= hns3_probe
,
1886 .remove
= hns3_remove
,
1887 .shutdown
= hns3_shutdown
,
1888 .sriov_configure
= hns3_pci_sriov_configure
,
1889 .err_handler
= &hns3_err_handler
,
1892 /* set default feature to hns3 */
1893 static void hns3_set_default_feature(struct net_device
*netdev
)
1895 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1896 struct pci_dev
*pdev
= h
->pdev
;
1898 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1900 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1901 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1902 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1903 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1904 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1906 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1908 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1910 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1911 NETIF_F_HW_VLAN_CTAG_FILTER
|
1912 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1913 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1914 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1915 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1916 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1918 netdev
->vlan_features
|=
1919 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1920 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1921 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1922 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1923 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1925 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1926 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1927 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1928 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1929 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1930 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1932 if (pdev
->revision
>= 0x21) {
1933 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1935 if (!(h
->flags
& HNAE3_SUPPORT_VF
)) {
1936 netdev
->hw_features
|= NETIF_F_NTUPLE
;
1937 netdev
->features
|= NETIF_F_NTUPLE
;
1942 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1943 struct hns3_desc_cb
*cb
)
1945 unsigned int order
= hnae3_page_order(ring
);
1948 p
= dev_alloc_pages(order
);
1953 cb
->page_offset
= 0;
1955 cb
->buf
= page_address(p
);
1956 cb
->length
= hnae3_page_size(ring
);
1957 cb
->type
= DESC_TYPE_PAGE
;
1962 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1963 struct hns3_desc_cb
*cb
)
1965 if (cb
->type
== DESC_TYPE_SKB
)
1966 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1967 else if (!HNAE3_IS_TX_RING(ring
))
1968 put_page((struct page
*)cb
->priv
);
1969 memset(cb
, 0, sizeof(*cb
));
1972 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1974 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1975 cb
->length
, ring_to_dma_dir(ring
));
1977 if (unlikely(dma_mapping_error(ring_to_dev(ring
), cb
->dma
)))
1983 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1984 struct hns3_desc_cb
*cb
)
1986 if (cb
->type
== DESC_TYPE_SKB
)
1987 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1988 ring_to_dma_dir(ring
));
1989 else if (cb
->length
)
1990 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1991 ring_to_dma_dir(ring
));
1994 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1996 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1997 ring
->desc
[i
].addr
= 0;
2000 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
2002 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
2004 if (!ring
->desc_cb
[i
].dma
)
2007 hns3_buffer_detach(ring
, i
);
2008 hns3_free_buffer(ring
, cb
);
2011 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
2015 for (i
= 0; i
< ring
->desc_num
; i
++)
2016 hns3_free_buffer_detach(ring
, i
);
2019 /* free desc along with its attached buffer */
2020 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
2022 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
2024 hns3_free_buffers(ring
);
2027 dma_free_coherent(ring_to_dev(ring
), size
,
2028 ring
->desc
, ring
->desc_dma_addr
);
2033 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
2035 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
2037 ring
->desc
= dma_zalloc_coherent(ring_to_dev(ring
), size
,
2038 &ring
->desc_dma_addr
,
2046 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
2047 struct hns3_desc_cb
*cb
)
2051 ret
= hns3_alloc_buffer(ring
, cb
);
2055 ret
= hns3_map_buffer(ring
, cb
);
2062 hns3_free_buffer(ring
, cb
);
2067 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
2069 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
2074 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
2079 /* Allocate memory for raw pkg, and map with dma */
2080 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
2084 for (i
= 0; i
< ring
->desc_num
; i
++) {
2085 ret
= hns3_alloc_buffer_attach(ring
, i
);
2087 goto out_buffer_fail
;
2093 for (j
= i
- 1; j
>= 0; j
--)
2094 hns3_free_buffer_detach(ring
, j
);
2098 /* detach a in-used buffer and replace with a reserved one */
2099 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
2100 struct hns3_desc_cb
*res_cb
)
2102 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
2103 ring
->desc_cb
[i
] = *res_cb
;
2104 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
2105 ring
->desc
[i
].rx
.bd_base_info
= 0;
2108 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
2110 ring
->desc_cb
[i
].reuse_flag
= 0;
2111 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
2112 + ring
->desc_cb
[i
].page_offset
);
2113 ring
->desc
[i
].rx
.bd_base_info
= 0;
2116 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
2119 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2121 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
2122 (*bytes
) += desc_cb
->length
;
2123 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
2124 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
2126 ring_ptr_move_fw(ring
, next_to_clean
);
2129 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
2131 int u
= ring
->next_to_use
;
2132 int c
= ring
->next_to_clean
;
2134 if (unlikely(h
> ring
->desc_num
))
2137 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
2140 void hns3_clean_tx_ring(struct hns3_enet_ring
*ring
)
2142 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2143 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2144 struct netdev_queue
*dev_queue
;
2148 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
2149 rmb(); /* Make sure head is ready before touch any data */
2151 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
2152 return; /* no data to poll */
2154 if (unlikely(!is_valid_clean_head(ring
, head
))) {
2155 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
2156 ring
->next_to_use
, ring
->next_to_clean
);
2158 u64_stats_update_begin(&ring
->syncp
);
2159 ring
->stats
.io_err_cnt
++;
2160 u64_stats_update_end(&ring
->syncp
);
2166 while (head
!= ring
->next_to_clean
) {
2167 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
2168 /* Issue prefetch for next Tx descriptor */
2169 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
2172 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
2173 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
2175 u64_stats_update_begin(&ring
->syncp
);
2176 ring
->stats
.tx_bytes
+= bytes
;
2177 ring
->stats
.tx_pkts
+= pkts
;
2178 u64_stats_update_end(&ring
->syncp
);
2180 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
2181 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
2183 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
2184 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
2185 /* Make sure that anybody stopping the queue after this
2186 * sees the new next_to_clean.
2189 if (netif_tx_queue_stopped(dev_queue
) &&
2190 !test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
2191 netif_tx_wake_queue(dev_queue
);
2192 ring
->stats
.restart_queue
++;
2197 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
2199 int ntc
= ring
->next_to_clean
;
2200 int ntu
= ring
->next_to_use
;
2202 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
2206 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
2208 struct hns3_desc_cb
*desc_cb
;
2209 struct hns3_desc_cb res_cbs
;
2212 for (i
= 0; i
< cleand_count
; i
++) {
2213 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2214 if (desc_cb
->reuse_flag
) {
2215 u64_stats_update_begin(&ring
->syncp
);
2216 ring
->stats
.reuse_pg_cnt
++;
2217 u64_stats_update_end(&ring
->syncp
);
2219 hns3_reuse_buffer(ring
, ring
->next_to_use
);
2221 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
2223 u64_stats_update_begin(&ring
->syncp
);
2224 ring
->stats
.sw_err_cnt
++;
2225 u64_stats_update_end(&ring
->syncp
);
2227 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
2228 "hnae reserve buffer map failed.\n");
2231 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
2234 ring_ptr_move_fw(ring
, next_to_use
);
2237 wmb(); /* Make all data has been write before submit */
2238 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
2241 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
2242 struct hns3_enet_ring
*ring
, int pull_len
,
2243 struct hns3_desc_cb
*desc_cb
)
2245 struct hns3_desc
*desc
;
2251 twobufs
= ((PAGE_SIZE
< 8192) &&
2252 hnae3_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
2254 desc
= &ring
->desc
[ring
->next_to_clean
];
2255 size
= le16_to_cpu(desc
->rx
.size
);
2257 truesize
= hnae3_buf_size(ring
);
2260 last_offset
= hnae3_page_size(ring
) - hnae3_buf_size(ring
);
2262 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
2263 size
- pull_len
, truesize
);
2265 /* Avoid re-using remote pages,flag default unreuse */
2266 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
2270 /* If we are only owner of page we can reuse it */
2271 if (likely(page_count(desc_cb
->priv
) == 1)) {
2272 /* Flip page offset to other buffer */
2273 desc_cb
->page_offset
^= truesize
;
2275 desc_cb
->reuse_flag
= 1;
2276 /* bump ref count on page before it is given*/
2277 get_page(desc_cb
->priv
);
2282 /* Move offset up to the next cache line */
2283 desc_cb
->page_offset
+= truesize
;
2285 if (desc_cb
->page_offset
<= last_offset
) {
2286 desc_cb
->reuse_flag
= 1;
2287 /* Bump ref count on page before it is given*/
2288 get_page(desc_cb
->priv
);
2292 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2293 struct hns3_desc
*desc
)
2295 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2296 int l3_type
, l4_type
;
2301 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2302 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2304 skb
->ip_summed
= CHECKSUM_NONE
;
2306 skb_checksum_none_assert(skb
);
2308 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2311 /* We MUST enable hardware checksum before enabling hardware GRO */
2312 if (skb_shinfo(skb
)->gso_size
) {
2313 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2317 /* check if hardware has done checksum */
2318 if (!hnae3_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2321 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2322 hnae3_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2323 hnae3_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2324 hnae3_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2325 u64_stats_update_begin(&ring
->syncp
);
2326 ring
->stats
.l3l4_csum_err
++;
2327 u64_stats_update_end(&ring
->syncp
);
2332 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
2334 l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
2337 ol4_type
= hnae3_get_field(l234info
, HNS3_RXD_OL4ID_M
,
2340 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2341 case HNS3_OL4_TYPE_NVGRE
:
2342 skb
->csum_level
= 1;
2344 case HNS3_OL4_TYPE_NO_TUN
:
2345 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2346 if ((l3_type
== HNS3_L3_TYPE_IPV4
||
2347 l3_type
== HNS3_L3_TYPE_IPV6
) &&
2348 (l4_type
== HNS3_L4_TYPE_UDP
||
2349 l4_type
== HNS3_L4_TYPE_TCP
||
2350 l4_type
== HNS3_L4_TYPE_SCTP
))
2351 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2358 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2360 if (skb_has_frag_list(skb
))
2361 napi_gro_flush(&ring
->tqp_vector
->napi
, false);
2363 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2366 static bool hns3_parse_vlan_tag(struct hns3_enet_ring
*ring
,
2367 struct hns3_desc
*desc
, u32 l234info
,
2370 struct pci_dev
*pdev
= ring
->tqp
->handle
->pdev
;
2372 if (pdev
->revision
== 0x20) {
2373 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2374 if (!(*vlan_tag
& VLAN_VID_MASK
))
2375 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2377 return (*vlan_tag
!= 0);
2380 #define HNS3_STRP_OUTER_VLAN 0x1
2381 #define HNS3_STRP_INNER_VLAN 0x2
2383 switch (hnae3_get_field(l234info
, HNS3_RXD_STRP_TAGP_M
,
2384 HNS3_RXD_STRP_TAGP_S
)) {
2385 case HNS3_STRP_OUTER_VLAN
:
2386 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2388 case HNS3_STRP_INNER_VLAN
:
2389 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2396 static int hns3_alloc_skb(struct hns3_enet_ring
*ring
, int length
,
2399 #define HNS3_NEED_ADD_FRAG 1
2400 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2401 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2402 struct sk_buff
*skb
;
2404 ring
->skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
, HNS3_RX_HEAD_SIZE
);
2406 if (unlikely(!skb
)) {
2407 netdev_err(netdev
, "alloc rx skb fail\n");
2409 u64_stats_update_begin(&ring
->syncp
);
2410 ring
->stats
.sw_err_cnt
++;
2411 u64_stats_update_end(&ring
->syncp
);
2416 prefetchw(skb
->data
);
2418 ring
->pending_buf
= 1;
2420 ring
->tail_skb
= NULL
;
2421 if (length
<= HNS3_RX_HEAD_SIZE
) {
2422 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2424 /* We can reuse buffer as-is, just make sure it is local */
2425 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2426 desc_cb
->reuse_flag
= 1;
2427 else /* This page cannot be reused so discard it */
2428 put_page(desc_cb
->priv
);
2430 ring_ptr_move_fw(ring
, next_to_clean
);
2433 u64_stats_update_begin(&ring
->syncp
);
2434 ring
->stats
.seg_pkt_cnt
++;
2435 u64_stats_update_end(&ring
->syncp
);
2437 ring
->pull_len
= eth_get_headlen(va
, HNS3_RX_HEAD_SIZE
);
2438 __skb_put(skb
, ring
->pull_len
);
2439 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, ring
->pull_len
,
2441 ring_ptr_move_fw(ring
, next_to_clean
);
2443 return HNS3_NEED_ADD_FRAG
;
2446 static int hns3_add_frag(struct hns3_enet_ring
*ring
, struct hns3_desc
*desc
,
2447 struct sk_buff
**out_skb
, bool pending
)
2449 struct sk_buff
*skb
= *out_skb
;
2450 struct sk_buff
*head_skb
= *out_skb
;
2451 struct sk_buff
*new_skb
;
2452 struct hns3_desc_cb
*desc_cb
;
2453 struct hns3_desc
*pre_desc
;
2457 /* if there is pending bd, the SW param next_to_clean has moved
2458 * to next and the next is NULL
2461 pre_bd
= (ring
->next_to_clean
- 1 + ring
->desc_num
) %
2463 pre_desc
= &ring
->desc
[pre_bd
];
2464 bd_base_info
= le32_to_cpu(pre_desc
->rx
.bd_base_info
);
2466 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2469 while (!hnae3_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2470 desc
= &ring
->desc
[ring
->next_to_clean
];
2471 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2472 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2473 if (!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
2476 if (unlikely(ring
->frag_num
>= MAX_SKB_FRAGS
)) {
2477 new_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2479 if (unlikely(!new_skb
)) {
2480 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
2481 "alloc rx skb frag fail\n");
2486 if (ring
->tail_skb
) {
2487 ring
->tail_skb
->next
= new_skb
;
2488 ring
->tail_skb
= new_skb
;
2490 skb_shinfo(skb
)->frag_list
= new_skb
;
2491 ring
->tail_skb
= new_skb
;
2495 if (ring
->tail_skb
) {
2496 head_skb
->truesize
+= hnae3_buf_size(ring
);
2497 head_skb
->data_len
+= le16_to_cpu(desc
->rx
.size
);
2498 head_skb
->len
+= le16_to_cpu(desc
->rx
.size
);
2499 skb
= ring
->tail_skb
;
2502 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, 0, desc_cb
);
2503 ring_ptr_move_fw(ring
, next_to_clean
);
2504 ring
->pending_buf
++;
2510 static void hns3_set_gro_param(struct sk_buff
*skb
, u32 l234info
,
2516 gro_count
= hnae3_get_field(l234info
, HNS3_RXD_GRO_COUNT_M
,
2517 HNS3_RXD_GRO_COUNT_S
);
2518 /* if there is no HW GRO, do not set gro params */
2522 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
2523 * to skb_shinfo(skb)->gso_segs
2525 NAPI_GRO_CB(skb
)->count
= gro_count
;
2527 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
2529 if (l3_type
== HNS3_L3_TYPE_IPV4
)
2530 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2531 else if (l3_type
== HNS3_L3_TYPE_IPV6
)
2532 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
2536 skb_shinfo(skb
)->gso_size
= hnae3_get_field(bd_base_info
,
2537 HNS3_RXD_GRO_SIZE_M
,
2538 HNS3_RXD_GRO_SIZE_S
);
2539 if (skb_shinfo(skb
)->gso_size
)
2540 tcp_gro_complete(skb
);
2543 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring
*ring
,
2544 struct sk_buff
*skb
)
2546 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_clean
];
2547 struct hnae3_handle
*handle
= ring
->tqp
->handle
;
2548 enum pkt_hash_types rss_type
;
2550 if (le32_to_cpu(desc
->rx
.rss_hash
))
2551 rss_type
= handle
->kinfo
.rss_type
;
2553 rss_type
= PKT_HASH_TYPE_NONE
;
2555 skb_set_hash(skb
, le32_to_cpu(desc
->rx
.rss_hash
), rss_type
);
2558 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2559 struct sk_buff
**out_skb
)
2561 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2562 struct sk_buff
*skb
= ring
->skb
;
2563 struct hns3_desc_cb
*desc_cb
;
2564 struct hns3_desc
*desc
;
2570 desc
= &ring
->desc
[ring
->next_to_clean
];
2571 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2575 length
= le16_to_cpu(desc
->rx
.size
);
2576 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2578 /* Check valid BD */
2579 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
)))
2583 ring
->va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2585 /* Prefetch first cache line of first page
2586 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2587 * line size is 64B so need to prefetch twice to make it 128B. But in
2588 * actual we can have greater size of caches with 128B Level 1 cache
2589 * lines. In such a case, single fetch would suffice to cache in the
2590 * relevant part of the header.
2593 #if L1_CACHE_BYTES < 128
2594 prefetch(ring
->va
+ L1_CACHE_BYTES
);
2598 ret
= hns3_alloc_skb(ring
, length
, ring
->va
);
2599 *out_skb
= skb
= ring
->skb
;
2601 if (ret
< 0) /* alloc buffer fail */
2603 if (ret
> 0) { /* need add frag */
2604 ret
= hns3_add_frag(ring
, desc
, &skb
, false);
2608 /* As the head data may be changed when GRO enable, copy
2609 * the head data in after other data rx completed
2611 memcpy(skb
->data
, ring
->va
,
2612 ALIGN(ring
->pull_len
, sizeof(long)));
2615 ret
= hns3_add_frag(ring
, desc
, &skb
, true);
2619 /* As the head data may be changed when GRO enable, copy
2620 * the head data in after other data rx completed
2622 memcpy(skb
->data
, ring
->va
,
2623 ALIGN(ring
->pull_len
, sizeof(long)));
2626 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2627 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2629 /* Based on hw strategy, the tag offloaded will be stored at
2630 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2631 * in one layer tag case.
2633 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2636 if (hns3_parse_vlan_tag(ring
, desc
, l234info
, &vlan_tag
))
2637 __vlan_hwaccel_put_tag(skb
,
2642 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2643 u64_stats_update_begin(&ring
->syncp
);
2644 ring
->stats
.non_vld_descs
++;
2645 u64_stats_update_end(&ring
->syncp
);
2647 dev_kfree_skb_any(skb
);
2651 if (unlikely((!desc
->rx
.pkt_len
) ||
2652 hnae3_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2653 u64_stats_update_begin(&ring
->syncp
);
2654 ring
->stats
.err_pkt_len
++;
2655 u64_stats_update_end(&ring
->syncp
);
2657 dev_kfree_skb_any(skb
);
2661 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2662 u64_stats_update_begin(&ring
->syncp
);
2663 ring
->stats
.l2_err
++;
2664 u64_stats_update_end(&ring
->syncp
);
2666 dev_kfree_skb_any(skb
);
2670 u64_stats_update_begin(&ring
->syncp
);
2671 ring
->stats
.rx_pkts
++;
2672 ring
->stats
.rx_bytes
+= skb
->len
;
2673 u64_stats_update_end(&ring
->syncp
);
2675 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2677 /* This is needed in order to enable forwarding support */
2678 hns3_set_gro_param(skb
, l234info
, bd_base_info
);
2680 hns3_rx_checksum(ring
, skb
, desc
);
2682 hns3_set_rx_skb_rss_type(ring
, skb
);
2687 int hns3_clean_rx_ring(
2688 struct hns3_enet_ring
*ring
, int budget
,
2689 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2691 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2692 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2693 int recv_pkts
, recv_bds
, clean_count
, err
;
2694 int unused_count
= hns3_desc_unused(ring
) - ring
->pending_buf
;
2695 struct sk_buff
*skb
= ring
->skb
;
2698 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2699 rmb(); /* Make sure num taken effect before the other data is touched */
2701 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2702 num
-= unused_count
;
2704 while (recv_pkts
< budget
&& recv_bds
< num
) {
2705 /* Reuse or realloc buffers */
2706 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2707 hns3_nic_alloc_rx_buffers(ring
,
2708 clean_count
+ unused_count
);
2710 unused_count
= hns3_desc_unused(ring
) -
2715 err
= hns3_handle_rx_bd(ring
, &skb
);
2716 if (unlikely(!skb
)) /* This fault cannot be repaired */
2719 if (err
== -ENXIO
) { /* Do not get FE for the packet */
2721 } else if (unlikely(err
)) { /* Do jump the err */
2722 recv_bds
+= ring
->pending_buf
;
2723 clean_count
+= ring
->pending_buf
;
2725 ring
->pending_buf
= 0;
2729 /* Do update ip stack process */
2730 skb
->protocol
= eth_type_trans(skb
, netdev
);
2732 recv_bds
+= ring
->pending_buf
;
2733 clean_count
+= ring
->pending_buf
;
2735 ring
->pending_buf
= 0;
2741 /* Make all data has been write before submit */
2742 if (clean_count
+ unused_count
> 0)
2743 hns3_nic_alloc_rx_buffers(ring
,
2744 clean_count
+ unused_count
);
2749 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2751 struct hns3_enet_tqp_vector
*tqp_vector
=
2752 ring_group
->ring
->tqp_vector
;
2753 enum hns3_flow_level_range new_flow_level
;
2754 int packets_per_msecs
;
2755 int bytes_per_msecs
;
2759 if (!ring_group
->coal
.int_gl
|| !tqp_vector
->last_jiffies
)
2762 if (ring_group
->total_packets
== 0) {
2763 ring_group
->coal
.int_gl
= HNS3_INT_GL_50K
;
2764 ring_group
->coal
.flow_level
= HNS3_FLOW_LOW
;
2768 /* Simple throttlerate management
2769 * 0-10MB/s lower (50000 ints/s)
2770 * 10-20MB/s middle (20000 ints/s)
2771 * 20-1249MB/s high (18000 ints/s)
2772 * > 40000pps ultra (8000 ints/s)
2774 new_flow_level
= ring_group
->coal
.flow_level
;
2775 new_int_gl
= ring_group
->coal
.int_gl
;
2777 jiffies_to_msecs(jiffies
- tqp_vector
->last_jiffies
);
2779 if (!time_passed_ms
)
2782 do_div(ring_group
->total_packets
, time_passed_ms
);
2783 packets_per_msecs
= ring_group
->total_packets
;
2785 do_div(ring_group
->total_bytes
, time_passed_ms
);
2786 bytes_per_msecs
= ring_group
->total_bytes
;
2788 #define HNS3_RX_LOW_BYTE_RATE 10000
2789 #define HNS3_RX_MID_BYTE_RATE 20000
2791 switch (new_flow_level
) {
2793 if (bytes_per_msecs
> HNS3_RX_LOW_BYTE_RATE
)
2794 new_flow_level
= HNS3_FLOW_MID
;
2797 if (bytes_per_msecs
> HNS3_RX_MID_BYTE_RATE
)
2798 new_flow_level
= HNS3_FLOW_HIGH
;
2799 else if (bytes_per_msecs
<= HNS3_RX_LOW_BYTE_RATE
)
2800 new_flow_level
= HNS3_FLOW_LOW
;
2802 case HNS3_FLOW_HIGH
:
2803 case HNS3_FLOW_ULTRA
:
2805 if (bytes_per_msecs
<= HNS3_RX_MID_BYTE_RATE
)
2806 new_flow_level
= HNS3_FLOW_MID
;
2810 #define HNS3_RX_ULTRA_PACKET_RATE 40
2812 if (packets_per_msecs
> HNS3_RX_ULTRA_PACKET_RATE
&&
2813 &tqp_vector
->rx_group
== ring_group
)
2814 new_flow_level
= HNS3_FLOW_ULTRA
;
2816 switch (new_flow_level
) {
2818 new_int_gl
= HNS3_INT_GL_50K
;
2821 new_int_gl
= HNS3_INT_GL_20K
;
2823 case HNS3_FLOW_HIGH
:
2824 new_int_gl
= HNS3_INT_GL_18K
;
2826 case HNS3_FLOW_ULTRA
:
2827 new_int_gl
= HNS3_INT_GL_8K
;
2833 ring_group
->total_bytes
= 0;
2834 ring_group
->total_packets
= 0;
2835 ring_group
->coal
.flow_level
= new_flow_level
;
2836 if (new_int_gl
!= ring_group
->coal
.int_gl
) {
2837 ring_group
->coal
.int_gl
= new_int_gl
;
2843 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2845 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
2846 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
2847 bool rx_update
, tx_update
;
2849 if (tqp_vector
->int_adapt_down
> 0) {
2850 tqp_vector
->int_adapt_down
--;
2854 if (rx_group
->coal
.gl_adapt_enable
) {
2855 rx_update
= hns3_get_new_int_gl(rx_group
);
2857 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
2858 rx_group
->coal
.int_gl
);
2861 if (tx_group
->coal
.gl_adapt_enable
) {
2862 tx_update
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2864 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
2865 tx_group
->coal
.int_gl
);
2868 tqp_vector
->last_jiffies
= jiffies
;
2869 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
2872 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2874 struct hns3_nic_priv
*priv
= netdev_priv(napi
->dev
);
2875 struct hns3_enet_ring
*ring
;
2876 int rx_pkt_total
= 0;
2878 struct hns3_enet_tqp_vector
*tqp_vector
=
2879 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2880 bool clean_complete
= true;
2883 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))) {
2884 napi_complete(napi
);
2888 /* Since the actual Tx work is minimal, we can give the Tx a larger
2889 * budget and be more aggressive about cleaning up the Tx descriptors.
2891 hns3_for_each_ring(ring
, tqp_vector
->tx_group
)
2892 hns3_clean_tx_ring(ring
);
2894 /* make sure rx ring budget not smaller than 1 */
2895 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2897 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2898 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2901 if (rx_cleaned
>= rx_budget
)
2902 clean_complete
= false;
2904 rx_pkt_total
+= rx_cleaned
;
2907 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2909 if (!clean_complete
)
2912 if (likely(!test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) &&
2913 napi_complete(napi
)) {
2914 hns3_update_new_int_gl(tqp_vector
);
2915 hns3_mask_vector_irq(tqp_vector
, 1);
2918 return rx_pkt_total
;
2921 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2922 struct hnae3_ring_chain_node
*head
)
2924 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2925 struct hnae3_ring_chain_node
*cur_chain
= head
;
2926 struct hnae3_ring_chain_node
*chain
;
2927 struct hns3_enet_ring
*tx_ring
;
2928 struct hns3_enet_ring
*rx_ring
;
2930 tx_ring
= tqp_vector
->tx_group
.ring
;
2932 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2933 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2934 HNAE3_RING_TYPE_TX
);
2935 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2936 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_TX
);
2938 cur_chain
->next
= NULL
;
2940 while (tx_ring
->next
) {
2941 tx_ring
= tx_ring
->next
;
2943 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2946 goto err_free_chain
;
2948 cur_chain
->next
= chain
;
2949 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2950 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2951 HNAE3_RING_TYPE_TX
);
2952 hnae3_set_field(chain
->int_gl_idx
,
2953 HNAE3_RING_GL_IDX_M
,
2954 HNAE3_RING_GL_IDX_S
,
2961 rx_ring
= tqp_vector
->rx_group
.ring
;
2962 if (!tx_ring
&& rx_ring
) {
2963 cur_chain
->next
= NULL
;
2964 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2965 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2966 HNAE3_RING_TYPE_RX
);
2967 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2968 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2970 rx_ring
= rx_ring
->next
;
2974 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2976 goto err_free_chain
;
2978 cur_chain
->next
= chain
;
2979 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2980 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2981 HNAE3_RING_TYPE_RX
);
2982 hnae3_set_field(chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2983 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2987 rx_ring
= rx_ring
->next
;
2993 cur_chain
= head
->next
;
2995 chain
= cur_chain
->next
;
2996 devm_kfree(&pdev
->dev
, chain
);
3003 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
3004 struct hnae3_ring_chain_node
*head
)
3006 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
3007 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
3012 chain_tmp
= chain
->next
;
3013 devm_kfree(&pdev
->dev
, chain
);
3018 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
3019 struct hns3_enet_ring
*ring
)
3021 ring
->next
= group
->ring
;
3027 static void hns3_nic_set_cpumask(struct hns3_nic_priv
*priv
)
3029 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
3030 struct hns3_enet_tqp_vector
*tqp_vector
;
3031 int num_vectors
= priv
->vector_num
;
3035 numa_node
= dev_to_node(&pdev
->dev
);
3037 for (vector_i
= 0; vector_i
< num_vectors
; vector_i
++) {
3038 tqp_vector
= &priv
->tqp_vector
[vector_i
];
3039 cpumask_set_cpu(cpumask_local_spread(vector_i
, numa_node
),
3040 &tqp_vector
->affinity_mask
);
3044 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
3046 struct hnae3_ring_chain_node vector_ring_chain
;
3047 struct hnae3_handle
*h
= priv
->ae_handle
;
3048 struct hns3_enet_tqp_vector
*tqp_vector
;
3052 hns3_nic_set_cpumask(priv
);
3054 for (i
= 0; i
< priv
->vector_num
; i
++) {
3055 tqp_vector
= &priv
->tqp_vector
[i
];
3056 hns3_vector_gl_rl_init_hw(tqp_vector
, priv
);
3057 tqp_vector
->num_tqps
= 0;
3060 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3061 u16 vector_i
= i
% priv
->vector_num
;
3062 u16 tqp_num
= h
->kinfo
.num_tqps
;
3064 tqp_vector
= &priv
->tqp_vector
[vector_i
];
3066 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
3067 priv
->ring_data
[i
].ring
);
3069 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
3070 priv
->ring_data
[i
+ tqp_num
].ring
);
3072 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
3073 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
3074 tqp_vector
->num_tqps
++;
3077 for (i
= 0; i
< priv
->vector_num
; i
++) {
3078 tqp_vector
= &priv
->tqp_vector
[i
];
3080 tqp_vector
->rx_group
.total_bytes
= 0;
3081 tqp_vector
->rx_group
.total_packets
= 0;
3082 tqp_vector
->tx_group
.total_bytes
= 0;
3083 tqp_vector
->tx_group
.total_packets
= 0;
3084 tqp_vector
->handle
= h
;
3086 ret
= hns3_get_vector_ring_chain(tqp_vector
,
3087 &vector_ring_chain
);
3091 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
3092 tqp_vector
->vector_irq
, &vector_ring_chain
);
3094 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
3099 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
3100 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
3107 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
3112 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
3114 struct hnae3_handle
*h
= priv
->ae_handle
;
3115 struct hns3_enet_tqp_vector
*tqp_vector
;
3116 struct hnae3_vector_info
*vector
;
3117 struct pci_dev
*pdev
= h
->pdev
;
3118 u16 tqp_num
= h
->kinfo
.num_tqps
;
3123 /* RSS size, cpu online and vector_num should be the same */
3124 /* Should consider 2p/4p later */
3125 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
3126 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
3131 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
3133 priv
->vector_num
= vector_num
;
3134 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
3135 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
3137 if (!priv
->tqp_vector
) {
3142 for (i
= 0; i
< priv
->vector_num
; i
++) {
3143 tqp_vector
= &priv
->tqp_vector
[i
];
3144 tqp_vector
->idx
= i
;
3145 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
3146 tqp_vector
->vector_irq
= vector
[i
].vector
;
3147 hns3_vector_gl_rl_init(tqp_vector
, priv
);
3151 devm_kfree(&pdev
->dev
, vector
);
3155 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
3161 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
3163 struct hnae3_ring_chain_node vector_ring_chain
;
3164 struct hnae3_handle
*h
= priv
->ae_handle
;
3165 struct hns3_enet_tqp_vector
*tqp_vector
;
3168 for (i
= 0; i
< priv
->vector_num
; i
++) {
3169 tqp_vector
= &priv
->tqp_vector
[i
];
3171 ret
= hns3_get_vector_ring_chain(tqp_vector
,
3172 &vector_ring_chain
);
3176 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
3177 tqp_vector
->vector_irq
, &vector_ring_chain
);
3181 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
3183 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
3184 (void)irq_set_affinity_hint(
3185 priv
->tqp_vector
[i
].vector_irq
,
3187 free_irq(priv
->tqp_vector
[i
].vector_irq
,
3188 &priv
->tqp_vector
[i
]);
3191 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
3192 hns3_clear_ring_group(&tqp_vector
->rx_group
);
3193 hns3_clear_ring_group(&tqp_vector
->tx_group
);
3194 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
3200 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
3202 struct hnae3_handle
*h
= priv
->ae_handle
;
3203 struct pci_dev
*pdev
= h
->pdev
;
3206 for (i
= 0; i
< priv
->vector_num
; i
++) {
3207 struct hns3_enet_tqp_vector
*tqp_vector
;
3209 tqp_vector
= &priv
->tqp_vector
[i
];
3210 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
3215 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
3219 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
3222 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
3223 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
3224 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
3225 struct hns3_enet_ring
*ring
;
3227 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
3231 if (ring_type
== HNAE3_RING_TYPE_TX
) {
3232 ring_data
[q
->tqp_index
].ring
= ring
;
3233 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
3234 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
3236 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
3237 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
3238 ring
->io_base
= q
->io_base
;
3241 hnae3_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
3245 ring
->desc_cb
= NULL
;
3246 ring
->dev
= priv
->dev
;
3247 ring
->desc_dma_addr
= 0;
3248 ring
->buf_size
= q
->buf_size
;
3249 ring
->desc_num
= q
->desc_num
;
3250 ring
->next_to_use
= 0;
3251 ring
->next_to_clean
= 0;
3256 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
3257 struct hns3_nic_priv
*priv
)
3261 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
3265 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
3267 devm_kfree(priv
->dev
, priv
->ring_data
[tqp
->tqp_index
].ring
);
3274 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
3276 struct hnae3_handle
*h
= priv
->ae_handle
;
3277 struct pci_dev
*pdev
= h
->pdev
;
3280 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
3281 sizeof(*priv
->ring_data
) * 2,
3283 if (!priv
->ring_data
)
3286 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3287 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
3295 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
3296 devm_kfree(priv
->dev
,
3297 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3300 devm_kfree(&pdev
->dev
, priv
->ring_data
);
3304 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
3306 struct hnae3_handle
*h
= priv
->ae_handle
;
3309 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3310 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
3311 devm_kfree(priv
->dev
,
3312 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3314 devm_kfree(priv
->dev
, priv
->ring_data
);
3317 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
3321 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
3324 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
3326 if (!ring
->desc_cb
) {
3331 ret
= hns3_alloc_desc(ring
);
3333 goto out_with_desc_cb
;
3335 if (!HNAE3_IS_TX_RING(ring
)) {
3336 ret
= hns3_alloc_ring_buffers(ring
);
3344 hns3_free_desc(ring
);
3346 kfree(ring
->desc_cb
);
3347 ring
->desc_cb
= NULL
;
3352 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
3354 hns3_free_desc(ring
);
3355 kfree(ring
->desc_cb
);
3356 ring
->desc_cb
= NULL
;
3357 ring
->next_to_clean
= 0;
3358 ring
->next_to_use
= 0;
3361 static int hns3_buf_size2type(u32 buf_size
)
3367 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
3370 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
3373 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
3376 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
3379 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
3382 return bd_size_type
;
3385 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
3387 dma_addr_t dma
= ring
->desc_dma_addr
;
3388 struct hnae3_queue
*q
= ring
->tqp
;
3390 if (!HNAE3_IS_TX_RING(ring
)) {
3391 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
3393 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
3394 (u32
)((dma
>> 31) >> 1));
3396 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
3397 hns3_buf_size2type(ring
->buf_size
));
3398 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
3399 ring
->desc_num
/ 8 - 1);
3402 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
3404 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
3405 (u32
)((dma
>> 31) >> 1));
3407 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
3408 ring
->desc_num
/ 8 - 1);
3412 static void hns3_init_tx_ring_tc(struct hns3_nic_priv
*priv
)
3414 struct hnae3_knic_private_info
*kinfo
= &priv
->ae_handle
->kinfo
;
3417 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
3418 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
3421 if (!tc_info
->enable
)
3424 for (j
= 0; j
< tc_info
->tqp_count
; j
++) {
3425 struct hnae3_queue
*q
;
3427 q
= priv
->ring_data
[tc_info
->tqp_offset
+ j
].ring
->tqp
;
3428 hns3_write_dev(q
, HNS3_RING_TX_RING_TC_REG
,
3434 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
3436 struct hnae3_handle
*h
= priv
->ae_handle
;
3437 int ring_num
= h
->kinfo
.num_tqps
* 2;
3441 for (i
= 0; i
< ring_num
; i
++) {
3442 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
3445 "Alloc ring memory fail! ret=%d\n", ret
);
3446 goto out_when_alloc_ring_memory
;
3449 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
3454 out_when_alloc_ring_memory
:
3455 for (j
= i
- 1; j
>= 0; j
--)
3456 hns3_fini_ring(priv
->ring_data
[j
].ring
);
3461 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
3463 struct hnae3_handle
*h
= priv
->ae_handle
;
3466 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3467 hns3_fini_ring(priv
->ring_data
[i
].ring
);
3468 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3473 /* Set mac addr if it is configured. or leave it to the AE driver */
3474 static int hns3_init_mac_addr(struct net_device
*netdev
, bool init
)
3476 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3477 struct hnae3_handle
*h
= priv
->ae_handle
;
3478 u8 mac_addr_temp
[ETH_ALEN
];
3481 if (h
->ae_algo
->ops
->get_mac_addr
&& init
) {
3482 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
3483 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
3486 /* Check if the MAC address is valid, if not get a random one */
3487 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3488 eth_hw_addr_random(netdev
);
3489 dev_warn(priv
->dev
, "using random MAC address %pM\n",
3493 if (h
->ae_algo
->ops
->set_mac_addr
)
3494 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
3499 static int hns3_restore_fd_rules(struct net_device
*netdev
)
3501 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3504 if (h
->ae_algo
->ops
->restore_fd_rules
)
3505 ret
= h
->ae_algo
->ops
->restore_fd_rules(h
);
3510 static void hns3_del_all_fd_rules(struct net_device
*netdev
, bool clear_list
)
3512 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3514 if (h
->ae_algo
->ops
->del_all_fd_entries
)
3515 h
->ae_algo
->ops
->del_all_fd_entries(h
, clear_list
);
3518 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
3520 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3522 priv
->ops
.fill_desc
= hns3_fill_desc
;
3523 if ((netdev
->features
& NETIF_F_TSO
) ||
3524 (netdev
->features
& NETIF_F_TSO6
))
3525 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
3527 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
3530 static int hns3_client_start(struct hnae3_handle
*handle
)
3532 if (!handle
->ae_algo
->ops
->client_start
)
3535 return handle
->ae_algo
->ops
->client_start(handle
);
3538 static void hns3_client_stop(struct hnae3_handle
*handle
)
3540 if (!handle
->ae_algo
->ops
->client_stop
)
3543 handle
->ae_algo
->ops
->client_stop(handle
);
3546 static int hns3_client_init(struct hnae3_handle
*handle
)
3548 struct pci_dev
*pdev
= handle
->pdev
;
3549 u16 alloc_tqps
, max_rss_size
;
3550 struct hns3_nic_priv
*priv
;
3551 struct net_device
*netdev
;
3554 handle
->ae_algo
->ops
->get_tqps_and_rss_info(handle
, &alloc_tqps
,
3556 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
), alloc_tqps
);
3560 priv
= netdev_priv(netdev
);
3561 priv
->dev
= &pdev
->dev
;
3562 priv
->netdev
= netdev
;
3563 priv
->ae_handle
= handle
;
3564 priv
->tx_timeout_count
= 0;
3566 handle
->kinfo
.netdev
= netdev
;
3567 handle
->priv
= (void *)priv
;
3569 hns3_init_mac_addr(netdev
, true);
3571 hns3_set_default_feature(netdev
);
3573 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
3574 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3575 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3576 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3577 hns3_ethtool_set_ops(netdev
);
3578 hns3_nic_set_priv_ops(netdev
);
3580 /* Carrier off reporting is important to ethtool even BEFORE open */
3581 netif_carrier_off(netdev
);
3583 ret
= hns3_get_ring_config(priv
);
3586 goto out_get_ring_cfg
;
3589 ret
= hns3_nic_alloc_vector_data(priv
);
3592 goto out_alloc_vector_data
;
3595 ret
= hns3_nic_init_vector_data(priv
);
3598 goto out_init_vector_data
;
3601 ret
= hns3_init_all_ring(priv
);
3604 goto out_init_ring_data
;
3607 ret
= register_netdev(netdev
);
3609 dev_err(priv
->dev
, "probe register netdev fail!\n");
3610 goto out_reg_netdev_fail
;
3613 ret
= hns3_client_start(handle
);
3615 dev_err(priv
->dev
, "hns3_client_start fail! ret=%d\n", ret
);
3616 goto out_reg_netdev_fail
;
3619 hns3_dcbnl_setup(handle
);
3621 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
3622 netdev
->max_mtu
= HNS3_MAX_MTU
;
3624 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
3628 out_reg_netdev_fail
:
3630 (void)hns3_nic_uninit_vector_data(priv
);
3631 out_init_vector_data
:
3632 hns3_nic_dealloc_vector_data(priv
);
3633 out_alloc_vector_data
:
3634 priv
->ring_data
= NULL
;
3636 priv
->ae_handle
= NULL
;
3637 free_netdev(netdev
);
3641 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3643 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3644 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3647 hns3_client_stop(handle
);
3649 hns3_remove_hw_addr(netdev
);
3651 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3652 unregister_netdev(netdev
);
3654 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
3655 netdev_warn(netdev
, "already uninitialized\n");
3656 goto out_netdev_free
;
3659 hns3_del_all_fd_rules(netdev
, true);
3661 hns3_force_clear_all_rx_ring(handle
);
3663 ret
= hns3_nic_uninit_vector_data(priv
);
3665 netdev_err(netdev
, "uninit vector error\n");
3667 ret
= hns3_nic_dealloc_vector_data(priv
);
3669 netdev_err(netdev
, "dealloc vector error\n");
3671 ret
= hns3_uninit_all_ring(priv
);
3673 netdev_err(netdev
, "uninit ring error\n");
3675 hns3_put_ring_config(priv
);
3677 priv
->ring_data
= NULL
;
3680 free_netdev(netdev
);
3683 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3685 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3691 netif_carrier_on(netdev
);
3692 netif_tx_wake_all_queues(netdev
);
3693 netdev_info(netdev
, "link up\n");
3695 netif_carrier_off(netdev
);
3696 netif_tx_stop_all_queues(netdev
);
3697 netdev_info(netdev
, "link down\n");
3701 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3703 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3704 struct net_device
*ndev
= kinfo
->netdev
;
3708 if (tc
> HNAE3_MAX_TC
)
3714 if_running
= netif_running(ndev
);
3717 (void)hns3_nic_net_stop(ndev
);
3721 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3722 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3726 ret
= hns3_nic_set_real_num_queue(ndev
);
3730 (void)hns3_nic_net_open(ndev
);
3735 static int hns3_recover_hw_addr(struct net_device
*ndev
)
3737 struct netdev_hw_addr_list
*list
;
3738 struct netdev_hw_addr
*ha
, *tmp
;
3741 /* go through and sync uc_addr entries to the device */
3743 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
) {
3744 ret
= hns3_nic_uc_sync(ndev
, ha
->addr
);
3749 /* go through and sync mc_addr entries to the device */
3751 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
) {
3752 ret
= hns3_nic_mc_sync(ndev
, ha
->addr
);
3760 static void hns3_remove_hw_addr(struct net_device
*netdev
)
3762 struct netdev_hw_addr_list
*list
;
3763 struct netdev_hw_addr
*ha
, *tmp
;
3765 hns3_nic_uc_unsync(netdev
, netdev
->dev_addr
);
3767 /* go through and unsync uc_addr entries to the device */
3769 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3770 hns3_nic_uc_unsync(netdev
, ha
->addr
);
3772 /* go through and unsync mc_addr entries to the device */
3774 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3775 if (ha
->refcount
> 1)
3776 hns3_nic_mc_unsync(netdev
, ha
->addr
);
3779 static void hns3_clear_tx_ring(struct hns3_enet_ring
*ring
)
3781 while (ring
->next_to_clean
!= ring
->next_to_use
) {
3782 ring
->desc
[ring
->next_to_clean
].tx
.bdtp_fe_sc_vld_ra_ri
= 0;
3783 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
3784 ring_ptr_move_fw(ring
, next_to_clean
);
3788 static int hns3_clear_rx_ring(struct hns3_enet_ring
*ring
)
3790 struct hns3_desc_cb res_cbs
;
3793 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3794 /* When a buffer is not reused, it's memory has been
3795 * freed in hns3_handle_rx_bd or will be freed by
3796 * stack, so we need to replace the buffer here.
3798 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3799 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
3801 u64_stats_update_begin(&ring
->syncp
);
3802 ring
->stats
.sw_err_cnt
++;
3803 u64_stats_update_end(&ring
->syncp
);
3804 /* if alloc new buffer fail, exit directly
3805 * and reclear in up flow.
3807 netdev_warn(ring
->tqp
->handle
->kinfo
.netdev
,
3808 "reserve buffer map failed, ret = %d\n",
3812 hns3_replace_buffer(ring
, ring
->next_to_use
,
3815 ring_ptr_move_fw(ring
, next_to_use
);
3821 static void hns3_force_clear_rx_ring(struct hns3_enet_ring
*ring
)
3823 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3824 /* When a buffer is not reused, it's memory has been
3825 * freed in hns3_handle_rx_bd or will be freed by
3826 * stack, so only need to unmap the buffer here.
3828 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3829 hns3_unmap_buffer(ring
,
3830 &ring
->desc_cb
[ring
->next_to_use
]);
3831 ring
->desc_cb
[ring
->next_to_use
].dma
= 0;
3834 ring_ptr_move_fw(ring
, next_to_use
);
3838 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
)
3840 struct net_device
*ndev
= h
->kinfo
.netdev
;
3841 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3842 struct hns3_enet_ring
*ring
;
3845 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3846 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3847 hns3_force_clear_rx_ring(ring
);
3851 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3853 struct net_device
*ndev
= h
->kinfo
.netdev
;
3854 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3857 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3858 struct netdev_queue
*dev_queue
;
3859 struct hns3_enet_ring
*ring
;
3861 ring
= priv
->ring_data
[i
].ring
;
3862 hns3_clear_tx_ring(ring
);
3863 dev_queue
= netdev_get_tx_queue(ndev
,
3864 priv
->ring_data
[i
].queue_index
);
3865 netdev_tx_reset_queue(dev_queue
);
3867 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3868 /* Continue to clear other rings even if clearing some
3871 hns3_clear_rx_ring(ring
);
3875 int hns3_nic_reset_all_ring(struct hnae3_handle
*h
)
3877 struct net_device
*ndev
= h
->kinfo
.netdev
;
3878 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3879 struct hns3_enet_ring
*rx_ring
;
3883 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3884 ret
= h
->ae_algo
->ops
->reset_queue(h
, i
);
3888 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
3890 /* We need to clear tx ring here because self test will
3891 * use the ring and will not run down before up
3893 hns3_clear_tx_ring(priv
->ring_data
[i
].ring
);
3894 priv
->ring_data
[i
].ring
->next_to_clean
= 0;
3895 priv
->ring_data
[i
].ring
->next_to_use
= 0;
3897 rx_ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3898 hns3_init_ring_hw(rx_ring
);
3899 ret
= hns3_clear_rx_ring(rx_ring
);
3903 /* We can not know the hardware head and tail when this
3904 * function is called in reset flow, so we reuse all desc.
3906 for (j
= 0; j
< rx_ring
->desc_num
; j
++)
3907 hns3_reuse_buffer(rx_ring
, j
);
3909 rx_ring
->next_to_clean
= 0;
3910 rx_ring
->next_to_use
= 0;
3913 hns3_init_tx_ring_tc(priv
);
3918 static void hns3_store_coal(struct hns3_nic_priv
*priv
)
3920 /* ethtool only support setting and querying one coal
3921 * configuation for now, so save the vector 0' coal
3922 * configuation here in order to restore it.
3924 memcpy(&priv
->tx_coal
, &priv
->tqp_vector
[0].tx_group
.coal
,
3925 sizeof(struct hns3_enet_coalesce
));
3926 memcpy(&priv
->rx_coal
, &priv
->tqp_vector
[0].rx_group
.coal
,
3927 sizeof(struct hns3_enet_coalesce
));
3930 static void hns3_restore_coal(struct hns3_nic_priv
*priv
)
3932 u16 vector_num
= priv
->vector_num
;
3935 for (i
= 0; i
< vector_num
; i
++) {
3936 memcpy(&priv
->tqp_vector
[i
].tx_group
.coal
, &priv
->tx_coal
,
3937 sizeof(struct hns3_enet_coalesce
));
3938 memcpy(&priv
->tqp_vector
[i
].rx_group
.coal
, &priv
->rx_coal
,
3939 sizeof(struct hns3_enet_coalesce
));
3943 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3945 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
3946 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3947 struct net_device
*ndev
= kinfo
->netdev
;
3948 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3950 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
))
3953 /* it is cumbersome for hardware to pick-and-choose entries for deletion
3954 * from table space. Hence, for function reset software intervention is
3955 * required to delete the entries
3957 if (hns3_dev_ongoing_func_reset(ae_dev
)) {
3958 hns3_remove_hw_addr(ndev
);
3959 hns3_del_all_fd_rules(ndev
, false);
3962 if (!netif_running(ndev
))
3965 return hns3_nic_net_stop(ndev
);
3968 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3970 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3971 struct hns3_nic_priv
*priv
= netdev_priv(kinfo
->netdev
);
3974 if (netif_running(kinfo
->netdev
)) {
3975 ret
= hns3_nic_net_up(kinfo
->netdev
);
3977 netdev_err(kinfo
->netdev
,
3978 "hns net up fail, ret=%d!\n", ret
);
3983 clear_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
);
3988 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3990 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3991 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3992 bool vlan_filter_enable
;
3995 ret
= hns3_init_mac_addr(netdev
, false);
3999 ret
= hns3_recover_hw_addr(netdev
);
4003 ret
= hns3_update_promisc_mode(netdev
, handle
->netdev_flags
);
4007 vlan_filter_enable
= netdev
->flags
& IFF_PROMISC
? false : true;
4008 hns3_enable_vlan_filter(netdev
, vlan_filter_enable
);
4010 /* Hardware table is only clear when pf resets */
4011 if (!(handle
->flags
& HNAE3_SUPPORT_VF
)) {
4012 ret
= hns3_restore_vlan(netdev
);
4017 ret
= hns3_restore_fd_rules(netdev
);
4021 /* Carrier off reporting is important to ethtool even BEFORE open */
4022 netif_carrier_off(netdev
);
4024 ret
= hns3_nic_alloc_vector_data(priv
);
4028 hns3_restore_coal(priv
);
4030 ret
= hns3_nic_init_vector_data(priv
);
4032 goto err_dealloc_vector
;
4034 ret
= hns3_init_all_ring(priv
);
4036 goto err_uninit_vector
;
4038 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
4043 hns3_nic_uninit_vector_data(priv
);
4044 priv
->ring_data
= NULL
;
4046 hns3_nic_dealloc_vector_data(priv
);
4051 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
4053 struct net_device
*netdev
= handle
->kinfo
.netdev
;
4054 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4057 if (!test_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
4058 netdev_warn(netdev
, "already uninitialized\n");
4062 hns3_force_clear_all_rx_ring(handle
);
4064 ret
= hns3_nic_uninit_vector_data(priv
);
4066 netdev_err(netdev
, "uninit vector error\n");
4070 hns3_store_coal(priv
);
4072 ret
= hns3_nic_dealloc_vector_data(priv
);
4074 netdev_err(netdev
, "dealloc vector error\n");
4076 ret
= hns3_uninit_all_ring(priv
);
4078 netdev_err(netdev
, "uninit ring error\n");
4080 clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
4085 static int hns3_reset_notify(struct hnae3_handle
*handle
,
4086 enum hnae3_reset_notify_type type
)
4091 case HNAE3_UP_CLIENT
:
4092 ret
= hns3_reset_notify_up_enet(handle
);
4094 case HNAE3_DOWN_CLIENT
:
4095 ret
= hns3_reset_notify_down_enet(handle
);
4097 case HNAE3_INIT_CLIENT
:
4098 ret
= hns3_reset_notify_init_enet(handle
);
4100 case HNAE3_UNINIT_CLIENT
:
4101 ret
= hns3_reset_notify_uninit_enet(handle
);
4110 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
)
4112 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4113 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
4116 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
4120 ret
= hns3_get_ring_config(priv
);
4124 ret
= hns3_nic_alloc_vector_data(priv
);
4126 goto err_alloc_vector
;
4128 hns3_restore_coal(priv
);
4130 ret
= hns3_nic_init_vector_data(priv
);
4132 goto err_uninit_vector
;
4134 ret
= hns3_init_all_ring(priv
);
4141 hns3_put_ring_config(priv
);
4143 hns3_nic_uninit_vector_data(priv
);
4145 hns3_nic_dealloc_vector_data(priv
);
4149 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
4151 return (new_tqp_num
/ num_tc
) * num_tc
;
4154 int hns3_set_channels(struct net_device
*netdev
,
4155 struct ethtool_channels
*ch
)
4157 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4158 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
4159 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
4160 bool if_running
= netif_running(netdev
);
4161 u32 new_tqp_num
= ch
->combined_count
;
4165 if (ch
->rx_count
|| ch
->tx_count
)
4168 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
4169 new_tqp_num
< kinfo
->num_tc
) {
4170 dev_err(&netdev
->dev
,
4171 "Change tqps fail, the tqp range is from %d to %d",
4173 hns3_get_max_available_channels(h
));
4177 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
4178 if (kinfo
->num_tqps
== new_tqp_num
)
4182 hns3_nic_net_stop(netdev
);
4184 ret
= hns3_nic_uninit_vector_data(priv
);
4186 dev_err(&netdev
->dev
,
4187 "Unbind vector with tqp fail, nothing is changed");
4191 hns3_store_coal(priv
);
4193 hns3_nic_dealloc_vector_data(priv
);
4195 hns3_uninit_all_ring(priv
);
4196 hns3_put_ring_config(priv
);
4198 org_tqp_num
= h
->kinfo
.num_tqps
;
4199 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
);
4201 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
);
4203 /* If revert to old tqp failed, fatal error occurred */
4204 dev_err(&netdev
->dev
,
4205 "Revert to old tqp num fail, ret=%d", ret
);
4208 dev_info(&netdev
->dev
,
4209 "Change tqp num fail, Revert to old tqp num");
4214 hns3_nic_net_open(netdev
);
4219 static const struct hnae3_client_ops client_ops
= {
4220 .init_instance
= hns3_client_init
,
4221 .uninit_instance
= hns3_client_uninit
,
4222 .link_status_change
= hns3_link_status_change
,
4223 .setup_tc
= hns3_client_setup_tc
,
4224 .reset_notify
= hns3_reset_notify
,
4227 /* hns3_init_module - Driver registration routine
4228 * hns3_init_module is the first routine called when the driver is
4229 * loaded. All it does is register with the PCI subsystem.
4231 static int __init
hns3_init_module(void)
4235 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
4236 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
4238 client
.type
= HNAE3_CLIENT_KNIC
;
4239 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
4242 client
.ops
= &client_ops
;
4244 INIT_LIST_HEAD(&client
.node
);
4246 ret
= hnae3_register_client(&client
);
4250 ret
= pci_register_driver(&hns3_driver
);
4252 hnae3_unregister_client(&client
);
4256 module_init(hns3_init_module
);
4258 /* hns3_exit_module - Driver exit cleanup routine
4259 * hns3_exit_module is called just before the driver is removed
4262 static void __exit
hns3_exit_module(void)
4264 pci_unregister_driver(&hns3_driver
);
4265 hnae3_unregister_client(&client
);
4267 module_exit(hns3_exit_module
);
4269 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4270 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4271 MODULE_LICENSE("GPL");
4272 MODULE_ALIAS("pci:hns-nic");
4273 MODULE_VERSION(HNS3_MOD_VERSION
);