1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #include <linux/if_vlan.h>
9 #include <linux/ipv6.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/aer.h>
13 #include <linux/skbuff.h>
14 #include <linux/sctp.h>
15 #include <linux/vermagic.h>
17 #include <net/pkt_cls.h>
19 #include <net/vxlan.h>
22 #include "hns3_enet.h"
24 static void hns3_clear_all_ring(struct hnae3_handle
*h
);
25 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
);
26 static void hns3_remove_hw_addr(struct net_device
*netdev
);
28 static const char hns3_driver_name
[] = "hns3";
29 const char hns3_driver_version
[] = VERMAGIC_STRING
;
30 static const char hns3_driver_string
[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client
;
35 /* hns3_pci_tbl - PCI Device ID Table
37 * Last entry must be all 0s
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
42 static const struct pci_device_id hns3_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
55 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
),
57 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
58 /* required last entry */
61 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
63 static irqreturn_t
hns3_irq_handle(int irq
, void *vector
)
65 struct hns3_enet_tqp_vector
*tqp_vector
= vector
;
67 napi_schedule(&tqp_vector
->napi
);
72 /* This callback function is used to set affinity changes to the irq affinity
73 * masks when the irq_set_affinity_notifier function is used.
75 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify
*notify
,
76 const cpumask_t
*mask
)
78 struct hns3_enet_tqp_vector
*tqp_vectors
=
79 container_of(notify
, struct hns3_enet_tqp_vector
,
82 tqp_vectors
->affinity_mask
= *mask
;
85 static void hns3_nic_irq_affinity_release(struct kref
*ref
)
89 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
91 struct hns3_enet_tqp_vector
*tqp_vectors
;
94 for (i
= 0; i
< priv
->vector_num
; i
++) {
95 tqp_vectors
= &priv
->tqp_vector
[i
];
97 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
100 /* clear the affinity notifier and affinity mask */
101 irq_set_affinity_notifier(tqp_vectors
->vector_irq
, NULL
);
102 irq_set_affinity_hint(tqp_vectors
->vector_irq
, NULL
);
104 /* release the irq resource */
105 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
106 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
110 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
112 struct hns3_enet_tqp_vector
*tqp_vectors
;
113 int txrx_int_idx
= 0;
119 for (i
= 0; i
< priv
->vector_num
; i
++) {
120 tqp_vectors
= &priv
->tqp_vector
[i
];
122 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
125 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
126 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
127 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
130 } else if (tqp_vectors
->rx_group
.ring
) {
131 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
132 "%s-%s-%d", priv
->netdev
->name
, "Rx",
134 } else if (tqp_vectors
->tx_group
.ring
) {
135 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
136 "%s-%s-%d", priv
->netdev
->name
, "Tx",
139 /* Skip this unused q_vector */
143 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
145 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
149 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
150 tqp_vectors
->vector_irq
);
154 tqp_vectors
->affinity_notify
.notify
=
155 hns3_nic_irq_affinity_notify
;
156 tqp_vectors
->affinity_notify
.release
=
157 hns3_nic_irq_affinity_release
;
158 irq_set_affinity_notifier(tqp_vectors
->vector_irq
,
159 &tqp_vectors
->affinity_notify
);
160 irq_set_affinity_hint(tqp_vectors
->vector_irq
,
161 &tqp_vectors
->affinity_mask
);
163 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
169 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
172 writel(mask_en
, tqp_vector
->mask_addr
);
175 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
177 napi_enable(&tqp_vector
->napi
);
180 hns3_mask_vector_irq(tqp_vector
, 1);
183 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
186 hns3_mask_vector_irq(tqp_vector
, 0);
188 disable_irq(tqp_vector
->vector_irq
);
189 napi_disable(&tqp_vector
->napi
);
192 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
195 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
197 /* this defines the configuration for RL (Interrupt Rate Limiter).
198 * Rl defines rate of interrupts i.e. number of interrupts-per-second
199 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
202 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.gl_adapt_enable
&&
203 !tqp_vector
->rx_group
.coal
.gl_adapt_enable
)
204 /* According to the hardware, the range of rl_reg is
205 * 0-59 and the unit is 4.
207 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
209 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
212 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
215 u32 rx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
217 writel(rx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
220 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
223 u32 tx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
225 writel(tx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
228 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
,
229 struct hns3_nic_priv
*priv
)
231 /* initialize the configuration for interrupt coalescing.
232 * 1. GL (Interrupt Gap Limiter)
233 * 2. RL (Interrupt Rate Limiter)
236 /* Default: enable interrupt coalescing self-adaptive and GL */
237 tqp_vector
->tx_group
.coal
.gl_adapt_enable
= 1;
238 tqp_vector
->rx_group
.coal
.gl_adapt_enable
= 1;
240 tqp_vector
->tx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
241 tqp_vector
->rx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
243 tqp_vector
->rx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
244 tqp_vector
->tx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
247 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
248 struct hns3_nic_priv
*priv
)
250 struct hnae3_handle
*h
= priv
->ae_handle
;
252 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
253 tqp_vector
->tx_group
.coal
.int_gl
);
254 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
255 tqp_vector
->rx_group
.coal
.int_gl
);
256 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
259 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
261 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
262 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
263 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
266 if (kinfo
->num_tc
<= 1) {
267 netdev_reset_tc(netdev
);
269 ret
= netdev_set_num_tc(netdev
, kinfo
->num_tc
);
272 "netdev_set_num_tc fail, ret=%d!\n", ret
);
276 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
277 if (!kinfo
->tc_info
[i
].enable
)
280 netdev_set_tc_queue(netdev
,
281 kinfo
->tc_info
[i
].tc
,
282 kinfo
->tc_info
[i
].tqp_count
,
283 kinfo
->tc_info
[i
].tqp_offset
);
287 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
290 "netif_set_real_num_tx_queues fail, ret=%d!\n",
295 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
298 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
305 static u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
307 u16 alloc_tqps
, max_rss_size
, rss_size
;
309 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &alloc_tqps
, &max_rss_size
);
310 rss_size
= alloc_tqps
/ h
->kinfo
.num_tc
;
312 return min_t(u16
, rss_size
, max_rss_size
);
315 static void hns3_tqp_enable(struct hnae3_queue
*tqp
)
319 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
320 rcb_reg
|= BIT(HNS3_RING_EN_B
);
321 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
324 static void hns3_tqp_disable(struct hnae3_queue
*tqp
)
328 rcb_reg
= hns3_read_dev(tqp
, HNS3_RING_EN_REG
);
329 rcb_reg
&= ~BIT(HNS3_RING_EN_B
);
330 hns3_write_dev(tqp
, HNS3_RING_EN_REG
, rcb_reg
);
333 static int hns3_nic_net_up(struct net_device
*netdev
)
335 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
336 struct hnae3_handle
*h
= priv
->ae_handle
;
340 ret
= hns3_nic_reset_all_ring(h
);
344 /* get irq resource for all vectors */
345 ret
= hns3_nic_init_irq(priv
);
347 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
351 /* enable the vectors */
352 for (i
= 0; i
< priv
->vector_num
; i
++)
353 hns3_vector_enable(&priv
->tqp_vector
[i
]);
356 for (j
= 0; j
< h
->kinfo
.num_tqps
; j
++)
357 hns3_tqp_enable(h
->kinfo
.tqp
[j
]);
359 /* start the ae_dev */
360 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
364 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
370 hns3_tqp_disable(h
->kinfo
.tqp
[j
]);
372 for (j
= i
- 1; j
>= 0; j
--)
373 hns3_vector_disable(&priv
->tqp_vector
[j
]);
375 hns3_nic_uninit_irq(priv
);
380 static int hns3_nic_net_open(struct net_device
*netdev
)
382 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
383 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
384 struct hnae3_knic_private_info
*kinfo
;
387 if (hns3_nic_resetting(netdev
))
390 netif_carrier_off(netdev
);
392 ret
= hns3_nic_set_real_num_queue(netdev
);
396 ret
= hns3_nic_net_up(netdev
);
399 "hns net up fail, ret=%d!\n", ret
);
404 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
405 netdev_set_prio_tc_map(netdev
, i
,
409 if (h
->ae_algo
->ops
->set_timer_task
)
410 h
->ae_algo
->ops
->set_timer_task(priv
->ae_handle
, true);
415 static void hns3_nic_net_down(struct net_device
*netdev
)
417 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
418 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
419 const struct hnae3_ae_ops
*ops
;
422 /* disable vectors */
423 for (i
= 0; i
< priv
->vector_num
; i
++)
424 hns3_vector_disable(&priv
->tqp_vector
[i
]);
427 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++)
428 hns3_tqp_disable(h
->kinfo
.tqp
[i
]);
431 ops
= priv
->ae_handle
->ae_algo
->ops
;
433 ops
->stop(priv
->ae_handle
);
435 /* free irq resources */
436 hns3_nic_uninit_irq(priv
);
438 hns3_clear_all_ring(priv
->ae_handle
);
441 static int hns3_nic_net_stop(struct net_device
*netdev
)
443 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
444 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
446 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
449 if (h
->ae_algo
->ops
->set_timer_task
)
450 h
->ae_algo
->ops
->set_timer_task(priv
->ae_handle
, false);
452 netif_tx_stop_all_queues(netdev
);
453 netif_carrier_off(netdev
);
455 hns3_nic_net_down(netdev
);
460 static int hns3_nic_uc_sync(struct net_device
*netdev
,
461 const unsigned char *addr
)
463 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
465 if (h
->ae_algo
->ops
->add_uc_addr
)
466 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
471 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
472 const unsigned char *addr
)
474 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
476 if (h
->ae_algo
->ops
->rm_uc_addr
)
477 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
482 static int hns3_nic_mc_sync(struct net_device
*netdev
,
483 const unsigned char *addr
)
485 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
487 if (h
->ae_algo
->ops
->add_mc_addr
)
488 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
493 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
494 const unsigned char *addr
)
496 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
498 if (h
->ae_algo
->ops
->rm_mc_addr
)
499 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
504 static u8
hns3_get_netdev_flags(struct net_device
*netdev
)
508 if (netdev
->flags
& IFF_PROMISC
) {
509 flags
= HNAE3_USER_UPE
| HNAE3_USER_MPE
;
511 flags
|= HNAE3_VLAN_FLTR
;
512 if (netdev
->flags
& IFF_ALLMULTI
)
513 flags
|= HNAE3_USER_MPE
;
519 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
521 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
525 new_flags
= hns3_get_netdev_flags(netdev
);
527 ret
= __dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
);
529 netdev_err(netdev
, "sync uc address fail\n");
531 new_flags
|= HNAE3_OVERFLOW_UPE
;
534 if (netdev
->flags
& IFF_MULTICAST
) {
535 ret
= __dev_mc_sync(netdev
, hns3_nic_mc_sync
,
538 netdev_err(netdev
, "sync mc address fail\n");
540 new_flags
|= HNAE3_OVERFLOW_MPE
;
544 hns3_update_promisc_mode(netdev
, new_flags
);
545 /* User mode Promisc mode enable and vlan filtering is disabled to
546 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
547 * vlan fitering is enabled
549 hns3_enable_vlan_filter(netdev
, new_flags
& HNAE3_VLAN_FLTR
);
550 h
->netdev_flags
= new_flags
;
553 int hns3_update_promisc_mode(struct net_device
*netdev
, u8 promisc_flags
)
555 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
556 struct hnae3_handle
*h
= priv
->ae_handle
;
558 if (h
->ae_algo
->ops
->set_promisc_mode
) {
559 return h
->ae_algo
->ops
->set_promisc_mode(h
,
560 promisc_flags
& HNAE3_UPE
,
561 promisc_flags
& HNAE3_MPE
);
567 void hns3_enable_vlan_filter(struct net_device
*netdev
, bool enable
)
569 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
570 struct hnae3_handle
*h
= priv
->ae_handle
;
573 if (h
->pdev
->revision
>= 0x21 && h
->ae_algo
->ops
->enable_vlan_filter
) {
574 last_state
= h
->netdev_flags
& HNAE3_VLAN_FLTR
? true : false;
575 if (enable
!= last_state
) {
578 enable
? "enable" : "disable");
579 h
->ae_algo
->ops
->enable_vlan_filter(h
, enable
);
584 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
585 u16
*mss
, u32
*type_cs_vlan_tso
)
587 u32 l4_offset
, hdr_len
;
588 union l3_hdr_info l3
;
589 union l4_hdr_info l4
;
593 if (!skb_is_gso(skb
))
596 ret
= skb_cow_head(skb
, 0);
600 l3
.hdr
= skb_network_header(skb
);
601 l4
.hdr
= skb_transport_header(skb
);
603 /* Software should clear the IPv4's checksum field when tso is
606 if (l3
.v4
->version
== 4)
610 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
613 SKB_GSO_UDP_TUNNEL_CSUM
)) {
614 if ((!(skb_shinfo(skb
)->gso_type
&
616 (skb_shinfo(skb
)->gso_type
&
617 SKB_GSO_UDP_TUNNEL_CSUM
)) {
618 /* Software should clear the udp's checksum
619 * field when tso is needed.
623 /* reset l3&l4 pointers from outer to inner headers */
624 l3
.hdr
= skb_inner_network_header(skb
);
625 l4
.hdr
= skb_inner_transport_header(skb
);
627 /* Software should clear the IPv4's checksum field when
630 if (l3
.v4
->version
== 4)
634 /* normal or tunnel packet*/
635 l4_offset
= l4
.hdr
- skb
->data
;
636 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
638 /* remove payload length from inner pseudo checksum when tso*/
639 l4_paylen
= skb
->len
- l4_offset
;
640 csum_replace_by_diff(&l4
.tcp
->check
,
641 (__force __wsum
)htonl(l4_paylen
));
643 /* find the txbd field values */
644 *paylen
= skb
->len
- hdr_len
;
645 hnae3_set_bit(*type_cs_vlan_tso
,
648 /* get MSS for TSO */
649 *mss
= skb_shinfo(skb
)->gso_size
;
654 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
662 unsigned char *l4_hdr
;
663 unsigned char *exthdr
;
667 /* find outer header point */
668 l3
.hdr
= skb_network_header(skb
);
669 l4_hdr
= skb_transport_header(skb
);
671 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
672 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
673 l4_proto_tmp
= l3
.v6
->nexthdr
;
674 if (l4_hdr
!= exthdr
)
675 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
676 &l4_proto_tmp
, &frag_off
);
677 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
678 l4_proto_tmp
= l3
.v4
->protocol
;
683 *ol4_proto
= l4_proto_tmp
;
686 if (!skb
->encapsulation
) {
691 /* find inner header point */
692 l3
.hdr
= skb_inner_network_header(skb
);
693 l4_hdr
= skb_inner_transport_header(skb
);
695 if (l3
.v6
->version
== 6) {
696 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
697 l4_proto_tmp
= l3
.v6
->nexthdr
;
698 if (l4_hdr
!= exthdr
)
699 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
700 &l4_proto_tmp
, &frag_off
);
701 } else if (l3
.v4
->version
== 4) {
702 l4_proto_tmp
= l3
.v4
->protocol
;
705 *il4_proto
= l4_proto_tmp
;
710 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
711 u8 il4_proto
, u32
*type_cs_vlan_tso
,
712 u32
*ol_type_vlan_len_msec
)
722 struct gre_base_hdr
*gre
;
725 unsigned char *l2_hdr
;
726 u8 l4_proto
= ol4_proto
;
733 l3
.hdr
= skb_network_header(skb
);
734 l4
.hdr
= skb_transport_header(skb
);
736 /* compute L2 header size for normal packet, defined in 2 Bytes */
737 l2_len
= l3
.hdr
- skb
->data
;
738 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
739 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
742 if (skb
->encapsulation
) {
743 /* compute OL2 header size, defined in 2 Bytes */
745 hnae3_set_field(*ol_type_vlan_len_msec
,
747 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
749 /* compute OL3 header size, defined in 4 Bytes */
750 ol3_len
= l4
.hdr
- l3
.hdr
;
751 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
752 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
754 /* MAC in UDP, MAC in GRE (0x6558)*/
755 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
756 /* switch MAC header ptr from outer to inner header.*/
757 l2_hdr
= skb_inner_mac_header(skb
);
759 /* compute OL4 header size, defined in 4 Bytes. */
760 ol4_len
= l2_hdr
- l4
.hdr
;
761 hnae3_set_field(*ol_type_vlan_len_msec
,
762 HNS3_TXD_L4LEN_M
, HNS3_TXD_L4LEN_S
,
765 /* switch IP header ptr from outer to inner header */
766 l3
.hdr
= skb_inner_network_header(skb
);
768 /* compute inner l2 header size, defined in 2 Bytes. */
769 l2_len
= l3
.hdr
- l2_hdr
;
770 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
771 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
773 /* skb packet types not supported by hardware,
774 * txbd len fild doesn't be filled.
779 /* switch L4 header pointer from outer to inner */
780 l4
.hdr
= skb_inner_transport_header(skb
);
782 l4_proto
= il4_proto
;
785 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
786 l3_len
= l4
.hdr
- l3
.hdr
;
787 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
788 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
790 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
793 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
794 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
797 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
799 (sizeof(struct sctphdr
) >> 2));
802 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
804 (sizeof(struct udphdr
) >> 2));
807 /* skb packet types not supported by hardware,
808 * txbd len fild doesn't be filled.
814 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
815 * and it is udp packet, which has a dest port as the IANA assigned.
816 * the hardware is expected to do the checksum offload, but the
817 * hardware will not do the checksum offload when udp dest port is
820 static bool hns3_tunnel_csum_bug(struct sk_buff
*skb
)
822 #define IANA_VXLAN_PORT 4789
826 struct gre_base_hdr
*gre
;
830 l4
.hdr
= skb_transport_header(skb
);
832 if (!(!skb
->encapsulation
&& l4
.udp
->dest
== htons(IANA_VXLAN_PORT
)))
835 skb_checksum_help(skb
);
840 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
841 u8 il4_proto
, u32
*type_cs_vlan_tso
,
842 u32
*ol_type_vlan_len_msec
)
849 u32 l4_proto
= ol4_proto
;
851 l3
.hdr
= skb_network_header(skb
);
853 /* define OL3 type and tunnel type(OL4).*/
854 if (skb
->encapsulation
) {
855 /* define outer network header type.*/
856 if (skb
->protocol
== htons(ETH_P_IP
)) {
858 hnae3_set_field(*ol_type_vlan_len_msec
,
861 HNS3_OL3T_IPV4_CSUM
);
863 hnae3_set_field(*ol_type_vlan_len_msec
,
866 HNS3_OL3T_IPV4_NO_CSUM
);
868 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
869 hnae3_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
870 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
873 /* define tunnel type(OL4).*/
876 hnae3_set_field(*ol_type_vlan_len_msec
,
879 HNS3_TUN_MAC_IN_UDP
);
882 hnae3_set_field(*ol_type_vlan_len_msec
,
888 /* drop the skb tunnel packet if hardware don't support,
889 * because hardware can't calculate csum when TSO.
894 /* the stack computes the IP header already,
895 * driver calculate l4 checksum when not TSO.
897 skb_checksum_help(skb
);
901 l3
.hdr
= skb_inner_network_header(skb
);
902 l4_proto
= il4_proto
;
905 if (l3
.v4
->version
== 4) {
906 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
907 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
909 /* the stack computes the IP header already, the only time we
910 * need the hardware to recompute it is in the case of TSO.
913 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
914 } else if (l3
.v6
->version
== 6) {
915 hnae3_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
916 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
921 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
922 hnae3_set_field(*type_cs_vlan_tso
,
928 if (hns3_tunnel_csum_bug(skb
))
931 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
932 hnae3_set_field(*type_cs_vlan_tso
,
938 hnae3_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
939 hnae3_set_field(*type_cs_vlan_tso
,
945 /* drop the skb tunnel packet if hardware don't support,
946 * because hardware can't calculate csum when TSO.
951 /* the stack computes the IP header already,
952 * driver calculate l4 checksum when not TSO.
954 skb_checksum_help(skb
);
961 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
963 /* Config bd buffer end */
964 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
965 HNS3_TXD_BDTYPE_S
, 0);
966 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
967 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
968 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
971 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
972 struct hns3_enet_ring
*tx_ring
,
973 u32
*inner_vlan_flag
,
978 #define HNS3_TX_VLAN_PRIO_SHIFT 13
980 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
981 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
982 NETIF_F_HW_VLAN_CTAG_TX
)) {
983 /* When HW VLAN acceleration is turned off, and the stack
984 * sets the protocol to 802.1q, the driver just need to
985 * set the protocol to the encapsulated ethertype.
987 skb
->protocol
= vlan_get_protocol(skb
);
991 if (skb_vlan_tag_present(skb
)) {
994 vlan_tag
= skb_vlan_tag_get(skb
);
995 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
997 /* Based on hw strategy, use out_vtag in two layer tag case,
998 * and use inner_vtag in one tag case.
1000 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
1001 hnae3_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
1002 *out_vtag
= vlan_tag
;
1004 hnae3_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
1005 *inner_vtag
= vlan_tag
;
1007 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
1008 struct vlan_ethhdr
*vhdr
;
1011 rc
= skb_cow_head(skb
, 0);
1014 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
1015 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
1016 << HNS3_TX_VLAN_PRIO_SHIFT
);
1019 skb
->protocol
= vlan_get_protocol(skb
);
1023 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
1024 int size
, int frag_end
, enum hns_desc_type type
)
1026 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1027 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
1028 struct device
*dev
= ring_to_dev(ring
);
1029 u32 ol_type_vlan_len_msec
= 0;
1030 u16 bdtp_fe_sc_vld_ra_ri
= 0;
1031 struct skb_frag_struct
*frag
;
1032 unsigned int frag_buf_num
;
1033 u32 type_cs_vlan_tso
= 0;
1034 struct sk_buff
*skb
;
1046 if (type
== DESC_TYPE_SKB
) {
1047 skb
= (struct sk_buff
*)priv
;
1050 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
1051 &ol_type_vlan_len_msec
,
1052 &inner_vtag
, &out_vtag
);
1056 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1057 skb_reset_mac_len(skb
);
1059 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
1062 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
1064 &ol_type_vlan_len_msec
);
1065 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
1067 &ol_type_vlan_len_msec
);
1071 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
1078 desc
->tx
.ol_type_vlan_len_msec
=
1079 cpu_to_le32(ol_type_vlan_len_msec
);
1080 desc
->tx
.type_cs_vlan_tso_len
=
1081 cpu_to_le32(type_cs_vlan_tso
);
1082 desc
->tx
.paylen
= cpu_to_le32(paylen
);
1083 desc
->tx
.mss
= cpu_to_le16(mss
);
1084 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
1085 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
1087 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1089 frag
= (struct skb_frag_struct
*)priv
;
1090 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1093 if (dma_mapping_error(ring
->dev
, dma
)) {
1094 ring
->stats
.sw_err_cnt
++;
1098 desc_cb
->length
= size
;
1100 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1101 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
1102 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
1104 /* When frag size is bigger than hardware limit, split this frag */
1105 for (k
= 0; k
< frag_buf_num
; k
++) {
1106 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1107 desc_cb
->priv
= priv
;
1108 desc_cb
->dma
= dma
+ HNS3_MAX_BD_SIZE
* k
;
1109 desc_cb
->type
= (type
== DESC_TYPE_SKB
&& !k
) ?
1110 DESC_TYPE_SKB
: DESC_TYPE_PAGE
;
1112 /* now, fill the descriptor */
1113 desc
->addr
= cpu_to_le64(dma
+ HNS3_MAX_BD_SIZE
* k
);
1114 desc
->tx
.send_size
= cpu_to_le16((k
== frag_buf_num
- 1) ?
1115 (u16
)sizeoflast
: (u16
)HNS3_MAX_BD_SIZE
);
1116 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
,
1117 frag_end
&& (k
== frag_buf_num
- 1) ?
1119 desc
->tx
.bdtp_fe_sc_vld_ra_ri
=
1120 cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
1122 /* move ring pointer to next.*/
1123 ring_ptr_move_fw(ring
, next_to_use
);
1125 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1126 desc
= &ring
->desc
[ring
->next_to_use
];
1132 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
1133 struct hns3_enet_ring
*ring
)
1135 struct sk_buff
*skb
= *out_skb
;
1136 struct skb_frag_struct
*frag
;
1143 size
= skb_headlen(skb
);
1144 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1146 frag_num
= skb_shinfo(skb
)->nr_frags
;
1147 for (i
= 0; i
< frag_num
; i
++) {
1148 frag
= &skb_shinfo(skb
)->frags
[i
];
1149 size
= skb_frag_size(frag
);
1151 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
1152 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
1155 buf_num
+= bdnum_for_frag
;
1158 if (buf_num
> ring_space(ring
))
1165 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
1166 struct hns3_enet_ring
*ring
)
1168 struct sk_buff
*skb
= *out_skb
;
1171 /* No. of segments (plus a header) */
1172 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1174 if (unlikely(ring_space(ring
) < buf_num
))
1182 static void hns3_clear_desc(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
1184 struct device
*dev
= ring_to_dev(ring
);
1187 for (i
= 0; i
< ring
->desc_num
; i
++) {
1188 /* check if this is where we started */
1189 if (ring
->next_to_use
== next_to_use_orig
)
1192 /* unmap the descriptor dma address */
1193 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
1194 dma_unmap_single(dev
,
1195 ring
->desc_cb
[ring
->next_to_use
].dma
,
1196 ring
->desc_cb
[ring
->next_to_use
].length
,
1198 else if (ring
->desc_cb
[ring
->next_to_use
].length
)
1200 ring
->desc_cb
[ring
->next_to_use
].dma
,
1201 ring
->desc_cb
[ring
->next_to_use
].length
,
1204 ring
->desc_cb
[ring
->next_to_use
].length
= 0;
1207 ring_ptr_move_bw(ring
, next_to_use
);
1211 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1213 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1214 struct hns3_nic_ring_data
*ring_data
=
1215 &tx_ring_data(priv
, skb
->queue_mapping
);
1216 struct hns3_enet_ring
*ring
= ring_data
->ring
;
1217 struct netdev_queue
*dev_queue
;
1218 struct skb_frag_struct
*frag
;
1219 int next_to_use_head
;
1220 int next_to_use_frag
;
1227 /* Prefetch the data used later */
1228 prefetch(skb
->data
);
1230 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
1232 u64_stats_update_begin(&ring
->syncp
);
1233 ring
->stats
.tx_busy
++;
1234 u64_stats_update_end(&ring
->syncp
);
1236 goto out_net_tx_busy
;
1238 u64_stats_update_begin(&ring
->syncp
);
1239 ring
->stats
.sw_err_cnt
++;
1240 u64_stats_update_end(&ring
->syncp
);
1241 netdev_err(netdev
, "no memory to xmit!\n");
1248 /* No. of segments (plus a header) */
1249 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1250 /* Fill the first part */
1251 size
= skb_headlen(skb
);
1253 next_to_use_head
= ring
->next_to_use
;
1255 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, seg_num
== 1 ? 1 : 0,
1260 next_to_use_frag
= ring
->next_to_use
;
1261 /* Fill the fragments */
1262 for (i
= 1; i
< seg_num
; i
++) {
1263 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1264 size
= skb_frag_size(frag
);
1266 ret
= priv
->ops
.fill_desc(ring
, frag
, size
,
1267 seg_num
- 1 == i
? 1 : 0,
1274 /* Complete translate all packets */
1275 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1276 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1278 wmb(); /* Commit all data before submit */
1280 hnae3_queue_xmit(ring
->tqp
, buf_num
);
1282 return NETDEV_TX_OK
;
1285 hns3_clear_desc(ring
, next_to_use_frag
);
1288 hns3_clear_desc(ring
, next_to_use_head
);
1291 dev_kfree_skb_any(skb
);
1292 return NETDEV_TX_OK
;
1295 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1296 smp_mb(); /* Commit all data before submit */
1298 return NETDEV_TX_BUSY
;
1301 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1303 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1304 struct sockaddr
*mac_addr
= p
;
1307 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1308 return -EADDRNOTAVAIL
;
1310 if (ether_addr_equal(netdev
->dev_addr
, mac_addr
->sa_data
)) {
1311 netdev_info(netdev
, "already using mac address %pM\n",
1316 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
1318 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1322 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1327 static int hns3_nic_do_ioctl(struct net_device
*netdev
,
1328 struct ifreq
*ifr
, int cmd
)
1330 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1332 if (!netif_running(netdev
))
1335 if (!h
->ae_algo
->ops
->do_ioctl
)
1338 return h
->ae_algo
->ops
->do_ioctl(h
, ifr
, cmd
);
1341 static int hns3_nic_set_features(struct net_device
*netdev
,
1342 netdev_features_t features
)
1344 netdev_features_t changed
= netdev
->features
^ features
;
1345 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1346 struct hnae3_handle
*h
= priv
->ae_handle
;
1349 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1350 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
1351 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1353 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1356 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
1357 h
->ae_algo
->ops
->enable_vlan_filter
) {
1358 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1359 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1361 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1364 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1365 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
1366 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1367 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1369 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1375 if ((changed
& NETIF_F_NTUPLE
) && h
->ae_algo
->ops
->enable_fd
) {
1376 if (features
& NETIF_F_NTUPLE
)
1377 h
->ae_algo
->ops
->enable_fd(h
, true);
1379 h
->ae_algo
->ops
->enable_fd(h
, false);
1382 netdev
->features
= features
;
1386 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1387 struct rtnl_link_stats64
*stats
)
1389 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1390 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1391 struct hnae3_handle
*handle
= priv
->ae_handle
;
1392 struct hns3_enet_ring
*ring
;
1402 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1405 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1407 for (idx
= 0; idx
< queue_num
; idx
++) {
1408 /* fetch the tx stats */
1409 ring
= priv
->ring_data
[idx
].ring
;
1411 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1412 tx_bytes
+= ring
->stats
.tx_bytes
;
1413 tx_pkts
+= ring
->stats
.tx_pkts
;
1414 tx_drop
+= ring
->stats
.tx_busy
;
1415 tx_drop
+= ring
->stats
.sw_err_cnt
;
1416 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1418 /* fetch the rx stats */
1419 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1421 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1422 rx_bytes
+= ring
->stats
.rx_bytes
;
1423 rx_pkts
+= ring
->stats
.rx_pkts
;
1424 rx_drop
+= ring
->stats
.non_vld_descs
;
1425 rx_drop
+= ring
->stats
.err_pkt_len
;
1426 rx_drop
+= ring
->stats
.l2_err
;
1427 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1430 stats
->tx_bytes
= tx_bytes
;
1431 stats
->tx_packets
= tx_pkts
;
1432 stats
->rx_bytes
= rx_bytes
;
1433 stats
->rx_packets
= rx_pkts
;
1435 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1436 stats
->multicast
= netdev
->stats
.multicast
;
1437 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1438 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1439 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1441 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1442 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1443 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1444 stats
->collisions
= netdev
->stats
.collisions
;
1445 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1446 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1447 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1448 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1449 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1450 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1451 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1452 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1453 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1454 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1457 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1459 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1460 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1461 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1462 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1463 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1464 u16 mode
= mqprio_qopt
->mode
;
1465 u8 hw
= mqprio_qopt
->qopt
.hw
;
1469 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1470 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1473 if (tc
> HNAE3_MAX_TC
)
1479 if_running
= netif_running(netdev
);
1481 hns3_nic_net_stop(netdev
);
1485 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1486 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1490 ret
= hns3_nic_set_real_num_queue(netdev
);
1494 hns3_nic_net_open(netdev
);
1499 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1502 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1505 return hns3_setup_tc(dev
, type_data
);
1508 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1509 __be16 proto
, u16 vid
)
1511 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1512 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1515 if (h
->ae_algo
->ops
->set_vlan_filter
)
1516 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1519 set_bit(vid
, priv
->active_vlans
);
1524 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1525 __be16 proto
, u16 vid
)
1527 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1528 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1531 if (h
->ae_algo
->ops
->set_vlan_filter
)
1532 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1535 clear_bit(vid
, priv
->active_vlans
);
1540 static int hns3_restore_vlan(struct net_device
*netdev
)
1542 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1546 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
1547 ret
= hns3_vlan_rx_add_vid(netdev
, htons(ETH_P_8021Q
), vid
);
1549 netdev_err(netdev
, "Restore vlan: %d filter, ret:%d\n",
1558 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1559 u8 qos
, __be16 vlan_proto
)
1561 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1564 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1565 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1571 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1573 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1576 if (!h
->ae_algo
->ops
->set_mtu
)
1579 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1581 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1584 netdev
->mtu
= new_mtu
;
1589 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1591 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1592 struct hns3_enet_ring
*tx_ring
= NULL
;
1593 int timeout_queue
= 0;
1594 int hw_head
, hw_tail
;
1597 /* Find the stopped queue the same way the stack does */
1598 for (i
= 0; i
< ndev
->num_tx_queues
; i
++) {
1599 struct netdev_queue
*q
;
1600 unsigned long trans_start
;
1602 q
= netdev_get_tx_queue(ndev
, i
);
1603 trans_start
= q
->trans_start
;
1604 if (netif_xmit_stopped(q
) &&
1606 (trans_start
+ ndev
->watchdog_timeo
))) {
1608 netdev_info(ndev
, "queue state: 0x%lx, delta msecs: %u\n",
1610 jiffies_to_msecs(jiffies
- trans_start
));
1615 if (i
== ndev
->num_tx_queues
) {
1617 "no netdev TX timeout queue found, timeout count: %llu\n",
1618 priv
->tx_timeout_count
);
1622 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1624 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1625 HNS3_RING_TX_RING_HEAD_REG
);
1626 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1627 HNS3_RING_TX_RING_TAIL_REG
);
1629 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1630 priv
->tx_timeout_count
,
1632 tx_ring
->next_to_use
,
1633 tx_ring
->next_to_clean
,
1636 readl(tx_ring
->tqp_vector
->mask_addr
));
1641 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1643 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1644 struct hnae3_handle
*h
= priv
->ae_handle
;
1646 if (!hns3_get_tx_timeo_queue_info(ndev
))
1649 priv
->tx_timeout_count
++;
1651 /* request the reset, and let the hclge to determine
1652 * which reset level should be done
1654 if (h
->ae_algo
->ops
->reset_event
)
1655 h
->ae_algo
->ops
->reset_event(h
->pdev
, h
);
1658 static const struct net_device_ops hns3_nic_netdev_ops
= {
1659 .ndo_open
= hns3_nic_net_open
,
1660 .ndo_stop
= hns3_nic_net_stop
,
1661 .ndo_start_xmit
= hns3_nic_net_xmit
,
1662 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1663 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1664 .ndo_do_ioctl
= hns3_nic_do_ioctl
,
1665 .ndo_change_mtu
= hns3_nic_change_mtu
,
1666 .ndo_set_features
= hns3_nic_set_features
,
1667 .ndo_get_stats64
= hns3_nic_get_stats64
,
1668 .ndo_setup_tc
= hns3_nic_setup_tc
,
1669 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1670 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1671 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1672 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1675 static bool hns3_is_phys_func(struct pci_dev
*pdev
)
1677 u32 dev_id
= pdev
->device
;
1680 case HNAE3_DEV_ID_GE
:
1681 case HNAE3_DEV_ID_25GE
:
1682 case HNAE3_DEV_ID_25GE_RDMA
:
1683 case HNAE3_DEV_ID_25GE_RDMA_MACSEC
:
1684 case HNAE3_DEV_ID_50GE_RDMA
:
1685 case HNAE3_DEV_ID_50GE_RDMA_MACSEC
:
1686 case HNAE3_DEV_ID_100G_RDMA_MACSEC
:
1688 case HNAE3_DEV_ID_100G_VF
:
1689 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
:
1692 dev_warn(&pdev
->dev
, "un-recognized pci device-id %d",
1699 static void hns3_disable_sriov(struct pci_dev
*pdev
)
1701 /* If our VFs are assigned we cannot shut down SR-IOV
1702 * without causing issues, so just leave the hardware
1703 * available but disabled
1705 if (pci_vfs_assigned(pdev
)) {
1706 dev_warn(&pdev
->dev
,
1707 "disabling driver while VFs are assigned\n");
1711 pci_disable_sriov(pdev
);
1714 static void hns3_get_dev_capability(struct pci_dev
*pdev
,
1715 struct hnae3_ae_dev
*ae_dev
)
1717 if (pdev
->revision
>= 0x21) {
1718 hnae3_set_bit(ae_dev
->flag
, HNAE3_DEV_SUPPORT_FD_B
, 1);
1719 hnae3_set_bit(ae_dev
->flag
, HNAE3_DEV_SUPPORT_GRO_B
, 1);
1723 /* hns3_probe - Device initialization routine
1724 * @pdev: PCI device information struct
1725 * @ent: entry in hns3_pci_tbl
1727 * hns3_probe initializes a PF identified by a pci_dev structure.
1728 * The OS initialization, configuring of the PF private structure,
1729 * and a hardware reset occur.
1731 * Returns 0 on success, negative on failure
1733 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1735 struct hnae3_ae_dev
*ae_dev
;
1738 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1745 ae_dev
->pdev
= pdev
;
1746 ae_dev
->flag
= ent
->driver_data
;
1747 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1748 ae_dev
->reset_type
= HNAE3_NONE_RESET
;
1749 hns3_get_dev_capability(pdev
, ae_dev
);
1750 pci_set_drvdata(pdev
, ae_dev
);
1752 ret
= hnae3_register_ae_dev(ae_dev
);
1754 devm_kfree(&pdev
->dev
, ae_dev
);
1755 pci_set_drvdata(pdev
, NULL
);
1761 /* hns3_remove - Device removal routine
1762 * @pdev: PCI device information struct
1764 static void hns3_remove(struct pci_dev
*pdev
)
1766 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1768 if (hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))
1769 hns3_disable_sriov(pdev
);
1771 hnae3_unregister_ae_dev(ae_dev
);
1772 pci_set_drvdata(pdev
, NULL
);
1776 * hns3_pci_sriov_configure
1777 * @pdev: pointer to a pci_dev structure
1778 * @num_vfs: number of VFs to allocate
1780 * Enable or change the number of VFs. Called when the user updates the number
1783 static int hns3_pci_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
1787 if (!(hns3_is_phys_func(pdev
) && IS_ENABLED(CONFIG_PCI_IOV
))) {
1788 dev_warn(&pdev
->dev
, "Can not config SRIOV\n");
1793 ret
= pci_enable_sriov(pdev
, num_vfs
);
1795 dev_err(&pdev
->dev
, "SRIOV enable failed %d\n", ret
);
1798 } else if (!pci_vfs_assigned(pdev
)) {
1799 pci_disable_sriov(pdev
);
1801 dev_warn(&pdev
->dev
,
1802 "Unable to free VFs because some are assigned to VMs.\n");
1808 static void hns3_shutdown(struct pci_dev
*pdev
)
1810 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1812 hnae3_unregister_ae_dev(ae_dev
);
1813 devm_kfree(&pdev
->dev
, ae_dev
);
1814 pci_set_drvdata(pdev
, NULL
);
1816 if (system_state
== SYSTEM_POWER_OFF
)
1817 pci_set_power_state(pdev
, PCI_D3hot
);
1820 static pci_ers_result_t
hns3_error_detected(struct pci_dev
*pdev
,
1821 pci_channel_state_t state
)
1823 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1824 pci_ers_result_t ret
;
1826 dev_info(&pdev
->dev
, "PCI error detected, state(=%d)!!\n", state
);
1828 if (state
== pci_channel_io_perm_failure
)
1829 return PCI_ERS_RESULT_DISCONNECT
;
1833 "Can't recover - error happened during device init\n");
1834 return PCI_ERS_RESULT_NONE
;
1837 if (ae_dev
->ops
->handle_hw_ras_error
)
1838 ret
= ae_dev
->ops
->handle_hw_ras_error(ae_dev
);
1840 return PCI_ERS_RESULT_NONE
;
1845 static pci_ers_result_t
hns3_slot_reset(struct pci_dev
*pdev
)
1847 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1848 struct device
*dev
= &pdev
->dev
;
1850 dev_info(dev
, "requesting reset due to PCI error\n");
1852 /* request the reset */
1853 if (ae_dev
->ops
->reset_event
) {
1854 ae_dev
->ops
->reset_event(pdev
, NULL
);
1855 return PCI_ERS_RESULT_RECOVERED
;
1858 return PCI_ERS_RESULT_DISCONNECT
;
1861 static void hns3_reset_prepare(struct pci_dev
*pdev
)
1863 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1865 dev_info(&pdev
->dev
, "hns3 flr prepare\n");
1866 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->flr_prepare
)
1867 ae_dev
->ops
->flr_prepare(ae_dev
);
1870 static void hns3_reset_done(struct pci_dev
*pdev
)
1872 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1874 dev_info(&pdev
->dev
, "hns3 flr done\n");
1875 if (ae_dev
&& ae_dev
->ops
&& ae_dev
->ops
->flr_done
)
1876 ae_dev
->ops
->flr_done(ae_dev
);
1879 static const struct pci_error_handlers hns3_err_handler
= {
1880 .error_detected
= hns3_error_detected
,
1881 .slot_reset
= hns3_slot_reset
,
1882 .reset_prepare
= hns3_reset_prepare
,
1883 .reset_done
= hns3_reset_done
,
1886 static struct pci_driver hns3_driver
= {
1887 .name
= hns3_driver_name
,
1888 .id_table
= hns3_pci_tbl
,
1889 .probe
= hns3_probe
,
1890 .remove
= hns3_remove
,
1891 .shutdown
= hns3_shutdown
,
1892 .sriov_configure
= hns3_pci_sriov_configure
,
1893 .err_handler
= &hns3_err_handler
,
1896 /* set default feature to hns3 */
1897 static void hns3_set_default_feature(struct net_device
*netdev
)
1899 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1900 struct pci_dev
*pdev
= h
->pdev
;
1902 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1904 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1905 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1906 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1907 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1908 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1910 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1912 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1914 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1915 NETIF_F_HW_VLAN_CTAG_FILTER
|
1916 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1917 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1918 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1919 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1920 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1922 netdev
->vlan_features
|=
1923 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1924 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1925 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1926 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1927 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1929 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1930 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1931 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1932 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1933 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1934 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_SCTP_CRC
;
1936 if (pdev
->revision
>= 0x21) {
1937 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1939 if (!(h
->flags
& HNAE3_SUPPORT_VF
)) {
1940 netdev
->hw_features
|= NETIF_F_NTUPLE
;
1941 netdev
->features
|= NETIF_F_NTUPLE
;
1946 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1947 struct hns3_desc_cb
*cb
)
1949 unsigned int order
= hnae3_page_order(ring
);
1952 p
= dev_alloc_pages(order
);
1957 cb
->page_offset
= 0;
1959 cb
->buf
= page_address(p
);
1960 cb
->length
= hnae3_page_size(ring
);
1961 cb
->type
= DESC_TYPE_PAGE
;
1966 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1967 struct hns3_desc_cb
*cb
)
1969 if (cb
->type
== DESC_TYPE_SKB
)
1970 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1971 else if (!HNAE3_IS_TX_RING(ring
))
1972 put_page((struct page
*)cb
->priv
);
1973 memset(cb
, 0, sizeof(*cb
));
1976 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1978 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1979 cb
->length
, ring_to_dma_dir(ring
));
1981 if (unlikely(dma_mapping_error(ring_to_dev(ring
), cb
->dma
)))
1987 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1988 struct hns3_desc_cb
*cb
)
1990 if (cb
->type
== DESC_TYPE_SKB
)
1991 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1992 ring_to_dma_dir(ring
));
1993 else if (cb
->length
)
1994 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1995 ring_to_dma_dir(ring
));
1998 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
2000 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
2001 ring
->desc
[i
].addr
= 0;
2004 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
2006 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
2008 if (!ring
->desc_cb
[i
].dma
)
2011 hns3_buffer_detach(ring
, i
);
2012 hns3_free_buffer(ring
, cb
);
2015 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
2019 for (i
= 0; i
< ring
->desc_num
; i
++)
2020 hns3_free_buffer_detach(ring
, i
);
2023 /* free desc along with its attached buffer */
2024 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
2026 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
2028 hns3_free_buffers(ring
);
2031 dma_free_coherent(ring_to_dev(ring
), size
,
2032 ring
->desc
, ring
->desc_dma_addr
);
2037 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
2039 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
2041 ring
->desc
= dma_zalloc_coherent(ring_to_dev(ring
), size
,
2042 &ring
->desc_dma_addr
,
2050 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
2051 struct hns3_desc_cb
*cb
)
2055 ret
= hns3_alloc_buffer(ring
, cb
);
2059 ret
= hns3_map_buffer(ring
, cb
);
2066 hns3_free_buffer(ring
, cb
);
2071 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
2073 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
2078 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
2083 /* Allocate memory for raw pkg, and map with dma */
2084 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
2088 for (i
= 0; i
< ring
->desc_num
; i
++) {
2089 ret
= hns3_alloc_buffer_attach(ring
, i
);
2091 goto out_buffer_fail
;
2097 for (j
= i
- 1; j
>= 0; j
--)
2098 hns3_free_buffer_detach(ring
, j
);
2102 /* detach a in-used buffer and replace with a reserved one */
2103 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
2104 struct hns3_desc_cb
*res_cb
)
2106 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
2107 ring
->desc_cb
[i
] = *res_cb
;
2108 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
2109 ring
->desc
[i
].rx
.bd_base_info
= 0;
2112 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
2114 ring
->desc_cb
[i
].reuse_flag
= 0;
2115 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
2116 + ring
->desc_cb
[i
].page_offset
);
2117 ring
->desc
[i
].rx
.bd_base_info
= 0;
2120 static void hns3_nic_reclaim_desc(struct hns3_enet_ring
*ring
, int head
,
2121 int *bytes
, int *pkts
)
2123 int ntc
= ring
->next_to_clean
;
2124 struct hns3_desc_cb
*desc_cb
;
2126 while (head
!= ntc
) {
2127 desc_cb
= &ring
->desc_cb
[ntc
];
2128 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
2129 (*bytes
) += desc_cb
->length
;
2130 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
2131 hns3_free_buffer_detach(ring
, ntc
);
2133 if (++ntc
== ring
->desc_num
)
2136 /* Issue prefetch for next Tx descriptor */
2137 prefetch(&ring
->desc_cb
[ntc
]);
2140 /* This smp_store_release() pairs with smp_load_acquire() in
2141 * ring_space called by hns3_nic_net_xmit.
2143 smp_store_release(&ring
->next_to_clean
, ntc
);
2146 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
2148 int u
= ring
->next_to_use
;
2149 int c
= ring
->next_to_clean
;
2151 if (unlikely(h
> ring
->desc_num
))
2154 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
2157 void hns3_clean_tx_ring(struct hns3_enet_ring
*ring
)
2159 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2160 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2161 struct netdev_queue
*dev_queue
;
2165 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
2166 rmb(); /* Make sure head is ready before touch any data */
2168 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
2169 return; /* no data to poll */
2171 if (unlikely(!is_valid_clean_head(ring
, head
))) {
2172 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
2173 ring
->next_to_use
, ring
->next_to_clean
);
2175 u64_stats_update_begin(&ring
->syncp
);
2176 ring
->stats
.io_err_cnt
++;
2177 u64_stats_update_end(&ring
->syncp
);
2183 hns3_nic_reclaim_desc(ring
, head
, &bytes
, &pkts
);
2185 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
2186 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
2188 u64_stats_update_begin(&ring
->syncp
);
2189 ring
->stats
.tx_bytes
+= bytes
;
2190 ring
->stats
.tx_pkts
+= pkts
;
2191 u64_stats_update_end(&ring
->syncp
);
2193 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
2194 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
2196 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
2197 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
2198 /* Make sure that anybody stopping the queue after this
2199 * sees the new next_to_clean.
2202 if (netif_tx_queue_stopped(dev_queue
) &&
2203 !test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
)) {
2204 netif_tx_wake_queue(dev_queue
);
2205 ring
->stats
.restart_queue
++;
2210 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
2212 int ntc
= ring
->next_to_clean
;
2213 int ntu
= ring
->next_to_use
;
2215 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
2219 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
2221 struct hns3_desc_cb
*desc_cb
;
2222 struct hns3_desc_cb res_cbs
;
2225 for (i
= 0; i
< cleand_count
; i
++) {
2226 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
2227 if (desc_cb
->reuse_flag
) {
2228 u64_stats_update_begin(&ring
->syncp
);
2229 ring
->stats
.reuse_pg_cnt
++;
2230 u64_stats_update_end(&ring
->syncp
);
2232 hns3_reuse_buffer(ring
, ring
->next_to_use
);
2234 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
2236 u64_stats_update_begin(&ring
->syncp
);
2237 ring
->stats
.sw_err_cnt
++;
2238 u64_stats_update_end(&ring
->syncp
);
2240 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
2241 "hnae reserve buffer map failed.\n");
2244 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
2247 ring_ptr_move_fw(ring
, next_to_use
);
2250 wmb(); /* Make all data has been write before submit */
2251 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
2254 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
2255 struct hns3_enet_ring
*ring
, int pull_len
,
2256 struct hns3_desc_cb
*desc_cb
)
2258 struct hns3_desc
*desc
;
2264 twobufs
= ((PAGE_SIZE
< 8192) &&
2265 hnae3_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
2267 desc
= &ring
->desc
[ring
->next_to_clean
];
2268 size
= le16_to_cpu(desc
->rx
.size
);
2270 truesize
= hnae3_buf_size(ring
);
2273 last_offset
= hnae3_page_size(ring
) - hnae3_buf_size(ring
);
2275 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
2276 size
- pull_len
, truesize
);
2278 /* Avoid re-using remote pages,flag default unreuse */
2279 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
2283 /* If we are only owner of page we can reuse it */
2284 if (likely(page_count(desc_cb
->priv
) == 1)) {
2285 /* Flip page offset to other buffer */
2286 desc_cb
->page_offset
^= truesize
;
2288 desc_cb
->reuse_flag
= 1;
2289 /* bump ref count on page before it is given*/
2290 get_page(desc_cb
->priv
);
2295 /* Move offset up to the next cache line */
2296 desc_cb
->page_offset
+= truesize
;
2298 if (desc_cb
->page_offset
<= last_offset
) {
2299 desc_cb
->reuse_flag
= 1;
2300 /* Bump ref count on page before it is given*/
2301 get_page(desc_cb
->priv
);
2305 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2306 struct hns3_desc
*desc
)
2308 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2309 int l3_type
, l4_type
;
2314 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2315 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2317 skb
->ip_summed
= CHECKSUM_NONE
;
2319 skb_checksum_none_assert(skb
);
2321 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2324 /* We MUST enable hardware checksum before enabling hardware GRO */
2325 if (skb_shinfo(skb
)->gso_size
) {
2326 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2330 /* check if hardware has done checksum */
2331 if (!hnae3_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2334 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2335 hnae3_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2336 hnae3_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2337 hnae3_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2338 u64_stats_update_begin(&ring
->syncp
);
2339 ring
->stats
.l3l4_csum_err
++;
2340 u64_stats_update_end(&ring
->syncp
);
2345 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
2347 l4_type
= hnae3_get_field(l234info
, HNS3_RXD_L4ID_M
,
2350 ol4_type
= hnae3_get_field(l234info
, HNS3_RXD_OL4ID_M
,
2353 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2354 case HNS3_OL4_TYPE_NVGRE
:
2355 skb
->csum_level
= 1;
2357 case HNS3_OL4_TYPE_NO_TUN
:
2358 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2359 if ((l3_type
== HNS3_L3_TYPE_IPV4
||
2360 l3_type
== HNS3_L3_TYPE_IPV6
) &&
2361 (l4_type
== HNS3_L4_TYPE_UDP
||
2362 l4_type
== HNS3_L4_TYPE_TCP
||
2363 l4_type
== HNS3_L4_TYPE_SCTP
))
2364 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2371 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2373 if (skb_has_frag_list(skb
))
2374 napi_gro_flush(&ring
->tqp_vector
->napi
, false);
2376 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2379 static bool hns3_parse_vlan_tag(struct hns3_enet_ring
*ring
,
2380 struct hns3_desc
*desc
, u32 l234info
,
2383 struct pci_dev
*pdev
= ring
->tqp
->handle
->pdev
;
2385 if (pdev
->revision
== 0x20) {
2386 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2387 if (!(*vlan_tag
& VLAN_VID_MASK
))
2388 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2390 return (*vlan_tag
!= 0);
2393 #define HNS3_STRP_OUTER_VLAN 0x1
2394 #define HNS3_STRP_INNER_VLAN 0x2
2396 switch (hnae3_get_field(l234info
, HNS3_RXD_STRP_TAGP_M
,
2397 HNS3_RXD_STRP_TAGP_S
)) {
2398 case HNS3_STRP_OUTER_VLAN
:
2399 *vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2401 case HNS3_STRP_INNER_VLAN
:
2402 *vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2409 static int hns3_alloc_skb(struct hns3_enet_ring
*ring
, int length
,
2412 #define HNS3_NEED_ADD_FRAG 1
2413 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2414 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2415 struct sk_buff
*skb
;
2417 ring
->skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
, HNS3_RX_HEAD_SIZE
);
2419 if (unlikely(!skb
)) {
2420 netdev_err(netdev
, "alloc rx skb fail\n");
2422 u64_stats_update_begin(&ring
->syncp
);
2423 ring
->stats
.sw_err_cnt
++;
2424 u64_stats_update_end(&ring
->syncp
);
2429 prefetchw(skb
->data
);
2431 ring
->pending_buf
= 1;
2433 ring
->tail_skb
= NULL
;
2434 if (length
<= HNS3_RX_HEAD_SIZE
) {
2435 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2437 /* We can reuse buffer as-is, just make sure it is local */
2438 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2439 desc_cb
->reuse_flag
= 1;
2440 else /* This page cannot be reused so discard it */
2441 put_page(desc_cb
->priv
);
2443 ring_ptr_move_fw(ring
, next_to_clean
);
2446 u64_stats_update_begin(&ring
->syncp
);
2447 ring
->stats
.seg_pkt_cnt
++;
2448 u64_stats_update_end(&ring
->syncp
);
2450 ring
->pull_len
= eth_get_headlen(va
, HNS3_RX_HEAD_SIZE
);
2451 __skb_put(skb
, ring
->pull_len
);
2452 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, ring
->pull_len
,
2454 ring_ptr_move_fw(ring
, next_to_clean
);
2456 return HNS3_NEED_ADD_FRAG
;
2459 static int hns3_add_frag(struct hns3_enet_ring
*ring
, struct hns3_desc
*desc
,
2460 struct sk_buff
**out_skb
, bool pending
)
2462 struct sk_buff
*skb
= *out_skb
;
2463 struct sk_buff
*head_skb
= *out_skb
;
2464 struct sk_buff
*new_skb
;
2465 struct hns3_desc_cb
*desc_cb
;
2466 struct hns3_desc
*pre_desc
;
2470 /* if there is pending bd, the SW param next_to_clean has moved
2471 * to next and the next is NULL
2474 pre_bd
= (ring
->next_to_clean
- 1 + ring
->desc_num
) %
2476 pre_desc
= &ring
->desc
[pre_bd
];
2477 bd_base_info
= le32_to_cpu(pre_desc
->rx
.bd_base_info
);
2479 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2482 while (!hnae3_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2483 desc
= &ring
->desc
[ring
->next_to_clean
];
2484 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2485 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2486 /* make sure HW write desc complete */
2488 if (!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
2491 if (unlikely(ring
->frag_num
>= MAX_SKB_FRAGS
)) {
2492 new_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2494 if (unlikely(!new_skb
)) {
2495 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
2496 "alloc rx skb frag fail\n");
2501 if (ring
->tail_skb
) {
2502 ring
->tail_skb
->next
= new_skb
;
2503 ring
->tail_skb
= new_skb
;
2505 skb_shinfo(skb
)->frag_list
= new_skb
;
2506 ring
->tail_skb
= new_skb
;
2510 if (ring
->tail_skb
) {
2511 head_skb
->truesize
+= hnae3_buf_size(ring
);
2512 head_skb
->data_len
+= le16_to_cpu(desc
->rx
.size
);
2513 head_skb
->len
+= le16_to_cpu(desc
->rx
.size
);
2514 skb
= ring
->tail_skb
;
2517 hns3_nic_reuse_page(skb
, ring
->frag_num
++, ring
, 0, desc_cb
);
2518 ring_ptr_move_fw(ring
, next_to_clean
);
2519 ring
->pending_buf
++;
2525 static void hns3_set_gro_param(struct sk_buff
*skb
, u32 l234info
,
2531 gro_count
= hnae3_get_field(l234info
, HNS3_RXD_GRO_COUNT_M
,
2532 HNS3_RXD_GRO_COUNT_S
);
2533 /* if there is no HW GRO, do not set gro params */
2537 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
2538 * to skb_shinfo(skb)->gso_segs
2540 NAPI_GRO_CB(skb
)->count
= gro_count
;
2542 l3_type
= hnae3_get_field(l234info
, HNS3_RXD_L3ID_M
,
2544 if (l3_type
== HNS3_L3_TYPE_IPV4
)
2545 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2546 else if (l3_type
== HNS3_L3_TYPE_IPV6
)
2547 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
2551 skb_shinfo(skb
)->gso_size
= hnae3_get_field(bd_base_info
,
2552 HNS3_RXD_GRO_SIZE_M
,
2553 HNS3_RXD_GRO_SIZE_S
);
2554 if (skb_shinfo(skb
)->gso_size
)
2555 tcp_gro_complete(skb
);
2558 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring
*ring
,
2559 struct sk_buff
*skb
)
2561 struct hnae3_handle
*handle
= ring
->tqp
->handle
;
2562 enum pkt_hash_types rss_type
;
2563 struct hns3_desc
*desc
;
2566 /* When driver handle the rss type, ring->next_to_clean indicates the
2567 * first descriptor of next packet, need -1 here.
2569 last_bd
= (ring
->next_to_clean
- 1 + ring
->desc_num
) % ring
->desc_num
;
2570 desc
= &ring
->desc
[last_bd
];
2572 if (le32_to_cpu(desc
->rx
.rss_hash
))
2573 rss_type
= handle
->kinfo
.rss_type
;
2575 rss_type
= PKT_HASH_TYPE_NONE
;
2577 skb_set_hash(skb
, le32_to_cpu(desc
->rx
.rss_hash
), rss_type
);
2580 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2581 struct sk_buff
**out_skb
)
2583 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2584 struct sk_buff
*skb
= ring
->skb
;
2585 struct hns3_desc_cb
*desc_cb
;
2586 struct hns3_desc
*desc
;
2592 desc
= &ring
->desc
[ring
->next_to_clean
];
2593 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2597 length
= le16_to_cpu(desc
->rx
.size
);
2598 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2600 /* Check valid BD */
2601 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
)))
2605 ring
->va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2607 /* Prefetch first cache line of first page
2608 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2609 * line size is 64B so need to prefetch twice to make it 128B. But in
2610 * actual we can have greater size of caches with 128B Level 1 cache
2611 * lines. In such a case, single fetch would suffice to cache in the
2612 * relevant part of the header.
2615 #if L1_CACHE_BYTES < 128
2616 prefetch(ring
->va
+ L1_CACHE_BYTES
);
2620 ret
= hns3_alloc_skb(ring
, length
, ring
->va
);
2621 *out_skb
= skb
= ring
->skb
;
2623 if (ret
< 0) /* alloc buffer fail */
2625 if (ret
> 0) { /* need add frag */
2626 ret
= hns3_add_frag(ring
, desc
, &skb
, false);
2630 /* As the head data may be changed when GRO enable, copy
2631 * the head data in after other data rx completed
2633 memcpy(skb
->data
, ring
->va
,
2634 ALIGN(ring
->pull_len
, sizeof(long)));
2637 ret
= hns3_add_frag(ring
, desc
, &skb
, true);
2641 /* As the head data may be changed when GRO enable, copy
2642 * the head data in after other data rx completed
2644 memcpy(skb
->data
, ring
->va
,
2645 ALIGN(ring
->pull_len
, sizeof(long)));
2648 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2649 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2651 /* Based on hw strategy, the tag offloaded will be stored at
2652 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2653 * in one layer tag case.
2655 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2658 if (hns3_parse_vlan_tag(ring
, desc
, l234info
, &vlan_tag
))
2659 __vlan_hwaccel_put_tag(skb
,
2664 if (unlikely(!hnae3_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2665 u64_stats_update_begin(&ring
->syncp
);
2666 ring
->stats
.non_vld_descs
++;
2667 u64_stats_update_end(&ring
->syncp
);
2669 dev_kfree_skb_any(skb
);
2673 if (unlikely((!desc
->rx
.pkt_len
) ||
2674 hnae3_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2675 u64_stats_update_begin(&ring
->syncp
);
2676 ring
->stats
.err_pkt_len
++;
2677 u64_stats_update_end(&ring
->syncp
);
2679 dev_kfree_skb_any(skb
);
2683 if (unlikely(hnae3_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2684 u64_stats_update_begin(&ring
->syncp
);
2685 ring
->stats
.l2_err
++;
2686 u64_stats_update_end(&ring
->syncp
);
2688 dev_kfree_skb_any(skb
);
2692 u64_stats_update_begin(&ring
->syncp
);
2693 ring
->stats
.rx_pkts
++;
2694 ring
->stats
.rx_bytes
+= skb
->len
;
2695 u64_stats_update_end(&ring
->syncp
);
2697 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2699 /* This is needed in order to enable forwarding support */
2700 hns3_set_gro_param(skb
, l234info
, bd_base_info
);
2702 hns3_rx_checksum(ring
, skb
, desc
);
2704 hns3_set_rx_skb_rss_type(ring
, skb
);
2709 int hns3_clean_rx_ring(
2710 struct hns3_enet_ring
*ring
, int budget
,
2711 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2713 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2714 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2715 int recv_pkts
, recv_bds
, clean_count
, err
;
2716 int unused_count
= hns3_desc_unused(ring
) - ring
->pending_buf
;
2717 struct sk_buff
*skb
= ring
->skb
;
2720 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2721 rmb(); /* Make sure num taken effect before the other data is touched */
2723 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2724 num
-= unused_count
;
2726 while (recv_pkts
< budget
&& recv_bds
< num
) {
2727 /* Reuse or realloc buffers */
2728 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2729 hns3_nic_alloc_rx_buffers(ring
,
2730 clean_count
+ unused_count
);
2732 unused_count
= hns3_desc_unused(ring
) -
2737 err
= hns3_handle_rx_bd(ring
, &skb
);
2738 if (unlikely(!skb
)) /* This fault cannot be repaired */
2741 if (err
== -ENXIO
) { /* Do not get FE for the packet */
2743 } else if (unlikely(err
)) { /* Do jump the err */
2744 recv_bds
+= ring
->pending_buf
;
2745 clean_count
+= ring
->pending_buf
;
2747 ring
->pending_buf
= 0;
2751 /* Do update ip stack process */
2752 skb
->protocol
= eth_type_trans(skb
, netdev
);
2754 recv_bds
+= ring
->pending_buf
;
2755 clean_count
+= ring
->pending_buf
;
2757 ring
->pending_buf
= 0;
2763 /* Make all data has been write before submit */
2764 if (clean_count
+ unused_count
> 0)
2765 hns3_nic_alloc_rx_buffers(ring
,
2766 clean_count
+ unused_count
);
2771 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2773 struct hns3_enet_tqp_vector
*tqp_vector
=
2774 ring_group
->ring
->tqp_vector
;
2775 enum hns3_flow_level_range new_flow_level
;
2776 int packets_per_msecs
;
2777 int bytes_per_msecs
;
2781 if (!tqp_vector
->last_jiffies
)
2784 if (ring_group
->total_packets
== 0) {
2785 ring_group
->coal
.int_gl
= HNS3_INT_GL_50K
;
2786 ring_group
->coal
.flow_level
= HNS3_FLOW_LOW
;
2790 /* Simple throttlerate management
2791 * 0-10MB/s lower (50000 ints/s)
2792 * 10-20MB/s middle (20000 ints/s)
2793 * 20-1249MB/s high (18000 ints/s)
2794 * > 40000pps ultra (8000 ints/s)
2796 new_flow_level
= ring_group
->coal
.flow_level
;
2797 new_int_gl
= ring_group
->coal
.int_gl
;
2799 jiffies_to_msecs(jiffies
- tqp_vector
->last_jiffies
);
2801 if (!time_passed_ms
)
2804 do_div(ring_group
->total_packets
, time_passed_ms
);
2805 packets_per_msecs
= ring_group
->total_packets
;
2807 do_div(ring_group
->total_bytes
, time_passed_ms
);
2808 bytes_per_msecs
= ring_group
->total_bytes
;
2810 #define HNS3_RX_LOW_BYTE_RATE 10000
2811 #define HNS3_RX_MID_BYTE_RATE 20000
2813 switch (new_flow_level
) {
2815 if (bytes_per_msecs
> HNS3_RX_LOW_BYTE_RATE
)
2816 new_flow_level
= HNS3_FLOW_MID
;
2819 if (bytes_per_msecs
> HNS3_RX_MID_BYTE_RATE
)
2820 new_flow_level
= HNS3_FLOW_HIGH
;
2821 else if (bytes_per_msecs
<= HNS3_RX_LOW_BYTE_RATE
)
2822 new_flow_level
= HNS3_FLOW_LOW
;
2824 case HNS3_FLOW_HIGH
:
2825 case HNS3_FLOW_ULTRA
:
2827 if (bytes_per_msecs
<= HNS3_RX_MID_BYTE_RATE
)
2828 new_flow_level
= HNS3_FLOW_MID
;
2832 #define HNS3_RX_ULTRA_PACKET_RATE 40
2834 if (packets_per_msecs
> HNS3_RX_ULTRA_PACKET_RATE
&&
2835 &tqp_vector
->rx_group
== ring_group
)
2836 new_flow_level
= HNS3_FLOW_ULTRA
;
2838 switch (new_flow_level
) {
2840 new_int_gl
= HNS3_INT_GL_50K
;
2843 new_int_gl
= HNS3_INT_GL_20K
;
2845 case HNS3_FLOW_HIGH
:
2846 new_int_gl
= HNS3_INT_GL_18K
;
2848 case HNS3_FLOW_ULTRA
:
2849 new_int_gl
= HNS3_INT_GL_8K
;
2855 ring_group
->total_bytes
= 0;
2856 ring_group
->total_packets
= 0;
2857 ring_group
->coal
.flow_level
= new_flow_level
;
2858 if (new_int_gl
!= ring_group
->coal
.int_gl
) {
2859 ring_group
->coal
.int_gl
= new_int_gl
;
2865 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2867 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
2868 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
2869 bool rx_update
, tx_update
;
2871 /* update param every 1000ms */
2872 if (time_before(jiffies
,
2873 tqp_vector
->last_jiffies
+ msecs_to_jiffies(1000)))
2876 if (rx_group
->coal
.gl_adapt_enable
) {
2877 rx_update
= hns3_get_new_int_gl(rx_group
);
2879 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
2880 rx_group
->coal
.int_gl
);
2883 if (tx_group
->coal
.gl_adapt_enable
) {
2884 tx_update
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2886 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
2887 tx_group
->coal
.int_gl
);
2890 tqp_vector
->last_jiffies
= jiffies
;
2893 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2895 struct hns3_nic_priv
*priv
= netdev_priv(napi
->dev
);
2896 struct hns3_enet_ring
*ring
;
2897 int rx_pkt_total
= 0;
2899 struct hns3_enet_tqp_vector
*tqp_vector
=
2900 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2901 bool clean_complete
= true;
2904 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))) {
2905 napi_complete(napi
);
2909 /* Since the actual Tx work is minimal, we can give the Tx a larger
2910 * budget and be more aggressive about cleaning up the Tx descriptors.
2912 hns3_for_each_ring(ring
, tqp_vector
->tx_group
)
2913 hns3_clean_tx_ring(ring
);
2915 /* make sure rx ring budget not smaller than 1 */
2916 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2918 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2919 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2922 if (rx_cleaned
>= rx_budget
)
2923 clean_complete
= false;
2925 rx_pkt_total
+= rx_cleaned
;
2928 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2930 if (!clean_complete
)
2933 if (napi_complete(napi
) &&
2934 likely(!test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))) {
2935 hns3_update_new_int_gl(tqp_vector
);
2936 hns3_mask_vector_irq(tqp_vector
, 1);
2939 return rx_pkt_total
;
2942 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2943 struct hnae3_ring_chain_node
*head
)
2945 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2946 struct hnae3_ring_chain_node
*cur_chain
= head
;
2947 struct hnae3_ring_chain_node
*chain
;
2948 struct hns3_enet_ring
*tx_ring
;
2949 struct hns3_enet_ring
*rx_ring
;
2951 tx_ring
= tqp_vector
->tx_group
.ring
;
2953 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2954 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2955 HNAE3_RING_TYPE_TX
);
2956 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2957 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_TX
);
2959 cur_chain
->next
= NULL
;
2961 while (tx_ring
->next
) {
2962 tx_ring
= tx_ring
->next
;
2964 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2967 goto err_free_chain
;
2969 cur_chain
->next
= chain
;
2970 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2971 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2972 HNAE3_RING_TYPE_TX
);
2973 hnae3_set_field(chain
->int_gl_idx
,
2974 HNAE3_RING_GL_IDX_M
,
2975 HNAE3_RING_GL_IDX_S
,
2982 rx_ring
= tqp_vector
->rx_group
.ring
;
2983 if (!tx_ring
&& rx_ring
) {
2984 cur_chain
->next
= NULL
;
2985 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2986 hnae3_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2987 HNAE3_RING_TYPE_RX
);
2988 hnae3_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2989 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2991 rx_ring
= rx_ring
->next
;
2995 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2997 goto err_free_chain
;
2999 cur_chain
->next
= chain
;
3000 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
3001 hnae3_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
3002 HNAE3_RING_TYPE_RX
);
3003 hnae3_set_field(chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
3004 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
3008 rx_ring
= rx_ring
->next
;
3014 cur_chain
= head
->next
;
3016 chain
= cur_chain
->next
;
3017 devm_kfree(&pdev
->dev
, cur_chain
);
3025 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
3026 struct hnae3_ring_chain_node
*head
)
3028 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
3029 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
3034 chain_tmp
= chain
->next
;
3035 devm_kfree(&pdev
->dev
, chain
);
3040 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
3041 struct hns3_enet_ring
*ring
)
3043 ring
->next
= group
->ring
;
3049 static void hns3_nic_set_cpumask(struct hns3_nic_priv
*priv
)
3051 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
3052 struct hns3_enet_tqp_vector
*tqp_vector
;
3053 int num_vectors
= priv
->vector_num
;
3057 numa_node
= dev_to_node(&pdev
->dev
);
3059 for (vector_i
= 0; vector_i
< num_vectors
; vector_i
++) {
3060 tqp_vector
= &priv
->tqp_vector
[vector_i
];
3061 cpumask_set_cpu(cpumask_local_spread(vector_i
, numa_node
),
3062 &tqp_vector
->affinity_mask
);
3066 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
3068 struct hnae3_ring_chain_node vector_ring_chain
;
3069 struct hnae3_handle
*h
= priv
->ae_handle
;
3070 struct hns3_enet_tqp_vector
*tqp_vector
;
3074 hns3_nic_set_cpumask(priv
);
3076 for (i
= 0; i
< priv
->vector_num
; i
++) {
3077 tqp_vector
= &priv
->tqp_vector
[i
];
3078 hns3_vector_gl_rl_init_hw(tqp_vector
, priv
);
3079 tqp_vector
->num_tqps
= 0;
3082 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3083 u16 vector_i
= i
% priv
->vector_num
;
3084 u16 tqp_num
= h
->kinfo
.num_tqps
;
3086 tqp_vector
= &priv
->tqp_vector
[vector_i
];
3088 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
3089 priv
->ring_data
[i
].ring
);
3091 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
3092 priv
->ring_data
[i
+ tqp_num
].ring
);
3094 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
3095 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
3096 tqp_vector
->num_tqps
++;
3099 for (i
= 0; i
< priv
->vector_num
; i
++) {
3100 tqp_vector
= &priv
->tqp_vector
[i
];
3102 tqp_vector
->rx_group
.total_bytes
= 0;
3103 tqp_vector
->rx_group
.total_packets
= 0;
3104 tqp_vector
->tx_group
.total_bytes
= 0;
3105 tqp_vector
->tx_group
.total_packets
= 0;
3106 tqp_vector
->handle
= h
;
3108 ret
= hns3_get_vector_ring_chain(tqp_vector
,
3109 &vector_ring_chain
);
3113 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
3114 tqp_vector
->vector_irq
, &vector_ring_chain
);
3116 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
3121 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
3122 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
3129 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
3134 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
3136 #define HNS3_VECTOR_PF_MAX_NUM 64
3138 struct hnae3_handle
*h
= priv
->ae_handle
;
3139 struct hns3_enet_tqp_vector
*tqp_vector
;
3140 struct hnae3_vector_info
*vector
;
3141 struct pci_dev
*pdev
= h
->pdev
;
3142 u16 tqp_num
= h
->kinfo
.num_tqps
;
3147 /* RSS size, cpu online and vector_num should be the same */
3148 /* Should consider 2p/4p later */
3149 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
3150 vector_num
= min_t(u16
, vector_num
, HNS3_VECTOR_PF_MAX_NUM
);
3152 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
3157 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
3159 priv
->vector_num
= vector_num
;
3160 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
3161 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
3163 if (!priv
->tqp_vector
) {
3168 for (i
= 0; i
< priv
->vector_num
; i
++) {
3169 tqp_vector
= &priv
->tqp_vector
[i
];
3170 tqp_vector
->idx
= i
;
3171 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
3172 tqp_vector
->vector_irq
= vector
[i
].vector
;
3173 hns3_vector_gl_rl_init(tqp_vector
, priv
);
3177 devm_kfree(&pdev
->dev
, vector
);
3181 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
3187 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
3189 struct hnae3_ring_chain_node vector_ring_chain
;
3190 struct hnae3_handle
*h
= priv
->ae_handle
;
3191 struct hns3_enet_tqp_vector
*tqp_vector
;
3194 for (i
= 0; i
< priv
->vector_num
; i
++) {
3195 tqp_vector
= &priv
->tqp_vector
[i
];
3197 ret
= hns3_get_vector_ring_chain(tqp_vector
,
3198 &vector_ring_chain
);
3202 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
3203 tqp_vector
->vector_irq
, &vector_ring_chain
);
3207 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
3209 if (tqp_vector
->irq_init_flag
== HNS3_VECTOR_INITED
) {
3210 irq_set_affinity_notifier(tqp_vector
->vector_irq
,
3212 irq_set_affinity_hint(tqp_vector
->vector_irq
, NULL
);
3213 free_irq(tqp_vector
->vector_irq
, tqp_vector
);
3214 tqp_vector
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
3217 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
3218 hns3_clear_ring_group(&tqp_vector
->rx_group
);
3219 hns3_clear_ring_group(&tqp_vector
->tx_group
);
3220 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
3226 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
3228 struct hnae3_handle
*h
= priv
->ae_handle
;
3229 struct pci_dev
*pdev
= h
->pdev
;
3232 for (i
= 0; i
< priv
->vector_num
; i
++) {
3233 struct hns3_enet_tqp_vector
*tqp_vector
;
3235 tqp_vector
= &priv
->tqp_vector
[i
];
3236 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
3241 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
3245 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
3248 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
3249 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
3250 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
3251 struct hns3_enet_ring
*ring
;
3253 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
3257 if (ring_type
== HNAE3_RING_TYPE_TX
) {
3258 ring_data
[q
->tqp_index
].ring
= ring
;
3259 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
3260 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
3262 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
3263 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
3264 ring
->io_base
= q
->io_base
;
3267 hnae3_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
3271 ring
->desc_cb
= NULL
;
3272 ring
->dev
= priv
->dev
;
3273 ring
->desc_dma_addr
= 0;
3274 ring
->buf_size
= q
->buf_size
;
3275 ring
->desc_num
= q
->desc_num
;
3276 ring
->next_to_use
= 0;
3277 ring
->next_to_clean
= 0;
3282 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
3283 struct hns3_nic_priv
*priv
)
3287 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
3291 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
3293 devm_kfree(priv
->dev
, priv
->ring_data
[tqp
->tqp_index
].ring
);
3300 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
3302 struct hnae3_handle
*h
= priv
->ae_handle
;
3303 struct pci_dev
*pdev
= h
->pdev
;
3306 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
3307 sizeof(*priv
->ring_data
) * 2,
3309 if (!priv
->ring_data
)
3312 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3313 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
3321 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
3322 devm_kfree(priv
->dev
,
3323 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3326 devm_kfree(&pdev
->dev
, priv
->ring_data
);
3330 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
3332 struct hnae3_handle
*h
= priv
->ae_handle
;
3335 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3336 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
3337 devm_kfree(priv
->dev
,
3338 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3340 devm_kfree(priv
->dev
, priv
->ring_data
);
3343 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
3347 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
3350 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
3352 if (!ring
->desc_cb
) {
3357 ret
= hns3_alloc_desc(ring
);
3359 goto out_with_desc_cb
;
3361 if (!HNAE3_IS_TX_RING(ring
)) {
3362 ret
= hns3_alloc_ring_buffers(ring
);
3370 hns3_free_desc(ring
);
3372 kfree(ring
->desc_cb
);
3373 ring
->desc_cb
= NULL
;
3378 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
3380 hns3_free_desc(ring
);
3381 kfree(ring
->desc_cb
);
3382 ring
->desc_cb
= NULL
;
3383 ring
->next_to_clean
= 0;
3384 ring
->next_to_use
= 0;
3387 static int hns3_buf_size2type(u32 buf_size
)
3393 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
3396 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
3399 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
3402 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
3405 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
3408 return bd_size_type
;
3411 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
3413 dma_addr_t dma
= ring
->desc_dma_addr
;
3414 struct hnae3_queue
*q
= ring
->tqp
;
3416 if (!HNAE3_IS_TX_RING(ring
)) {
3417 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
3419 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
3420 (u32
)((dma
>> 31) >> 1));
3422 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
3423 hns3_buf_size2type(ring
->buf_size
));
3424 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
3425 ring
->desc_num
/ 8 - 1);
3428 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
3430 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
3431 (u32
)((dma
>> 31) >> 1));
3433 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
3434 ring
->desc_num
/ 8 - 1);
3438 static void hns3_init_tx_ring_tc(struct hns3_nic_priv
*priv
)
3440 struct hnae3_knic_private_info
*kinfo
= &priv
->ae_handle
->kinfo
;
3443 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
3444 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
3447 if (!tc_info
->enable
)
3450 for (j
= 0; j
< tc_info
->tqp_count
; j
++) {
3451 struct hnae3_queue
*q
;
3453 q
= priv
->ring_data
[tc_info
->tqp_offset
+ j
].ring
->tqp
;
3454 hns3_write_dev(q
, HNS3_RING_TX_RING_TC_REG
,
3460 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
3462 struct hnae3_handle
*h
= priv
->ae_handle
;
3463 int ring_num
= h
->kinfo
.num_tqps
* 2;
3467 for (i
= 0; i
< ring_num
; i
++) {
3468 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
3471 "Alloc ring memory fail! ret=%d\n", ret
);
3472 goto out_when_alloc_ring_memory
;
3475 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
3480 out_when_alloc_ring_memory
:
3481 for (j
= i
- 1; j
>= 0; j
--)
3482 hns3_fini_ring(priv
->ring_data
[j
].ring
);
3487 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
3489 struct hnae3_handle
*h
= priv
->ae_handle
;
3492 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3493 hns3_fini_ring(priv
->ring_data
[i
].ring
);
3494 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3499 /* Set mac addr if it is configured. or leave it to the AE driver */
3500 static int hns3_init_mac_addr(struct net_device
*netdev
, bool init
)
3502 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3503 struct hnae3_handle
*h
= priv
->ae_handle
;
3504 u8 mac_addr_temp
[ETH_ALEN
];
3507 if (h
->ae_algo
->ops
->get_mac_addr
&& init
) {
3508 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
3509 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
3512 /* Check if the MAC address is valid, if not get a random one */
3513 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3514 eth_hw_addr_random(netdev
);
3515 dev_warn(priv
->dev
, "using random MAC address %pM\n",
3519 if (h
->ae_algo
->ops
->set_mac_addr
)
3520 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
3525 static int hns3_restore_fd_rules(struct net_device
*netdev
)
3527 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3530 if (h
->ae_algo
->ops
->restore_fd_rules
)
3531 ret
= h
->ae_algo
->ops
->restore_fd_rules(h
);
3536 static void hns3_del_all_fd_rules(struct net_device
*netdev
, bool clear_list
)
3538 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3540 if (h
->ae_algo
->ops
->del_all_fd_entries
)
3541 h
->ae_algo
->ops
->del_all_fd_entries(h
, clear_list
);
3544 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
3546 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3548 priv
->ops
.fill_desc
= hns3_fill_desc
;
3549 if ((netdev
->features
& NETIF_F_TSO
) ||
3550 (netdev
->features
& NETIF_F_TSO6
))
3551 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
3553 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
3556 static int hns3_client_start(struct hnae3_handle
*handle
)
3558 if (!handle
->ae_algo
->ops
->client_start
)
3561 return handle
->ae_algo
->ops
->client_start(handle
);
3564 static void hns3_client_stop(struct hnae3_handle
*handle
)
3566 if (!handle
->ae_algo
->ops
->client_stop
)
3569 handle
->ae_algo
->ops
->client_stop(handle
);
3572 static int hns3_client_init(struct hnae3_handle
*handle
)
3574 struct pci_dev
*pdev
= handle
->pdev
;
3575 u16 alloc_tqps
, max_rss_size
;
3576 struct hns3_nic_priv
*priv
;
3577 struct net_device
*netdev
;
3580 handle
->ae_algo
->ops
->get_tqps_and_rss_info(handle
, &alloc_tqps
,
3582 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
), alloc_tqps
);
3586 priv
= netdev_priv(netdev
);
3587 priv
->dev
= &pdev
->dev
;
3588 priv
->netdev
= netdev
;
3589 priv
->ae_handle
= handle
;
3590 priv
->tx_timeout_count
= 0;
3592 handle
->kinfo
.netdev
= netdev
;
3593 handle
->priv
= (void *)priv
;
3595 hns3_init_mac_addr(netdev
, true);
3597 hns3_set_default_feature(netdev
);
3599 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
3600 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3601 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3602 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3603 hns3_ethtool_set_ops(netdev
);
3604 hns3_nic_set_priv_ops(netdev
);
3606 /* Carrier off reporting is important to ethtool even BEFORE open */
3607 netif_carrier_off(netdev
);
3609 ret
= hns3_get_ring_config(priv
);
3612 goto out_get_ring_cfg
;
3615 ret
= hns3_nic_alloc_vector_data(priv
);
3618 goto out_alloc_vector_data
;
3621 ret
= hns3_nic_init_vector_data(priv
);
3624 goto out_init_vector_data
;
3627 ret
= hns3_init_all_ring(priv
);
3630 goto out_init_ring_data
;
3633 ret
= register_netdev(netdev
);
3635 dev_err(priv
->dev
, "probe register netdev fail!\n");
3636 goto out_reg_netdev_fail
;
3639 ret
= hns3_client_start(handle
);
3641 dev_err(priv
->dev
, "hns3_client_start fail! ret=%d\n", ret
);
3642 goto out_reg_netdev_fail
;
3645 hns3_dcbnl_setup(handle
);
3647 hns3_dbg_init(handle
);
3649 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
3650 netdev
->max_mtu
= HNS3_MAX_MTU
;
3652 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
3656 out_reg_netdev_fail
:
3658 (void)hns3_nic_uninit_vector_data(priv
);
3659 out_init_vector_data
:
3660 hns3_nic_dealloc_vector_data(priv
);
3661 out_alloc_vector_data
:
3662 priv
->ring_data
= NULL
;
3664 priv
->ae_handle
= NULL
;
3665 free_netdev(netdev
);
3669 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3671 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3672 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3675 hns3_client_stop(handle
);
3677 hns3_remove_hw_addr(netdev
);
3679 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3680 unregister_netdev(netdev
);
3682 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
3683 netdev_warn(netdev
, "already uninitialized\n");
3684 goto out_netdev_free
;
3687 hns3_del_all_fd_rules(netdev
, true);
3689 hns3_force_clear_all_rx_ring(handle
);
3691 ret
= hns3_nic_uninit_vector_data(priv
);
3693 netdev_err(netdev
, "uninit vector error\n");
3695 ret
= hns3_nic_dealloc_vector_data(priv
);
3697 netdev_err(netdev
, "dealloc vector error\n");
3699 ret
= hns3_uninit_all_ring(priv
);
3701 netdev_err(netdev
, "uninit ring error\n");
3703 hns3_put_ring_config(priv
);
3705 hns3_dbg_uninit(handle
);
3707 priv
->ring_data
= NULL
;
3710 free_netdev(netdev
);
3713 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3715 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3721 netif_carrier_on(netdev
);
3722 netif_tx_wake_all_queues(netdev
);
3723 netdev_info(netdev
, "link up\n");
3725 netif_carrier_off(netdev
);
3726 netif_tx_stop_all_queues(netdev
);
3727 netdev_info(netdev
, "link down\n");
3731 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3733 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3734 struct net_device
*ndev
= kinfo
->netdev
;
3738 if (tc
> HNAE3_MAX_TC
)
3744 if_running
= netif_running(ndev
);
3747 (void)hns3_nic_net_stop(ndev
);
3751 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3752 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3756 ret
= hns3_nic_set_real_num_queue(ndev
);
3760 (void)hns3_nic_net_open(ndev
);
3765 static int hns3_recover_hw_addr(struct net_device
*ndev
)
3767 struct netdev_hw_addr_list
*list
;
3768 struct netdev_hw_addr
*ha
, *tmp
;
3771 /* go through and sync uc_addr entries to the device */
3773 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
) {
3774 ret
= hns3_nic_uc_sync(ndev
, ha
->addr
);
3779 /* go through and sync mc_addr entries to the device */
3781 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
) {
3782 ret
= hns3_nic_mc_sync(ndev
, ha
->addr
);
3790 static void hns3_remove_hw_addr(struct net_device
*netdev
)
3792 struct netdev_hw_addr_list
*list
;
3793 struct netdev_hw_addr
*ha
, *tmp
;
3795 hns3_nic_uc_unsync(netdev
, netdev
->dev_addr
);
3797 /* go through and unsync uc_addr entries to the device */
3799 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3800 hns3_nic_uc_unsync(netdev
, ha
->addr
);
3802 /* go through and unsync mc_addr entries to the device */
3804 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3805 if (ha
->refcount
> 1)
3806 hns3_nic_mc_unsync(netdev
, ha
->addr
);
3809 static void hns3_clear_tx_ring(struct hns3_enet_ring
*ring
)
3811 while (ring
->next_to_clean
!= ring
->next_to_use
) {
3812 ring
->desc
[ring
->next_to_clean
].tx
.bdtp_fe_sc_vld_ra_ri
= 0;
3813 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
3814 ring_ptr_move_fw(ring
, next_to_clean
);
3818 static int hns3_clear_rx_ring(struct hns3_enet_ring
*ring
)
3820 struct hns3_desc_cb res_cbs
;
3823 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3824 /* When a buffer is not reused, it's memory has been
3825 * freed in hns3_handle_rx_bd or will be freed by
3826 * stack, so we need to replace the buffer here.
3828 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3829 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
3831 u64_stats_update_begin(&ring
->syncp
);
3832 ring
->stats
.sw_err_cnt
++;
3833 u64_stats_update_end(&ring
->syncp
);
3834 /* if alloc new buffer fail, exit directly
3835 * and reclear in up flow.
3837 netdev_warn(ring
->tqp
->handle
->kinfo
.netdev
,
3838 "reserve buffer map failed, ret = %d\n",
3842 hns3_replace_buffer(ring
, ring
->next_to_use
,
3845 ring_ptr_move_fw(ring
, next_to_use
);
3851 static void hns3_force_clear_rx_ring(struct hns3_enet_ring
*ring
)
3853 while (ring
->next_to_use
!= ring
->next_to_clean
) {
3854 /* When a buffer is not reused, it's memory has been
3855 * freed in hns3_handle_rx_bd or will be freed by
3856 * stack, so only need to unmap the buffer here.
3858 if (!ring
->desc_cb
[ring
->next_to_use
].reuse_flag
) {
3859 hns3_unmap_buffer(ring
,
3860 &ring
->desc_cb
[ring
->next_to_use
]);
3861 ring
->desc_cb
[ring
->next_to_use
].dma
= 0;
3864 ring_ptr_move_fw(ring
, next_to_use
);
3868 static void hns3_force_clear_all_rx_ring(struct hnae3_handle
*h
)
3870 struct net_device
*ndev
= h
->kinfo
.netdev
;
3871 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3872 struct hns3_enet_ring
*ring
;
3875 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3876 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3877 hns3_force_clear_rx_ring(ring
);
3881 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3883 struct net_device
*ndev
= h
->kinfo
.netdev
;
3884 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3887 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3888 struct netdev_queue
*dev_queue
;
3889 struct hns3_enet_ring
*ring
;
3891 ring
= priv
->ring_data
[i
].ring
;
3892 hns3_clear_tx_ring(ring
);
3893 dev_queue
= netdev_get_tx_queue(ndev
,
3894 priv
->ring_data
[i
].queue_index
);
3895 netdev_tx_reset_queue(dev_queue
);
3897 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3898 /* Continue to clear other rings even if clearing some
3901 hns3_clear_rx_ring(ring
);
3905 int hns3_nic_reset_all_ring(struct hnae3_handle
*h
)
3907 struct net_device
*ndev
= h
->kinfo
.netdev
;
3908 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3909 struct hns3_enet_ring
*rx_ring
;
3913 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3914 ret
= h
->ae_algo
->ops
->reset_queue(h
, i
);
3918 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
3920 /* We need to clear tx ring here because self test will
3921 * use the ring and will not run down before up
3923 hns3_clear_tx_ring(priv
->ring_data
[i
].ring
);
3924 priv
->ring_data
[i
].ring
->next_to_clean
= 0;
3925 priv
->ring_data
[i
].ring
->next_to_use
= 0;
3927 rx_ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3928 hns3_init_ring_hw(rx_ring
);
3929 ret
= hns3_clear_rx_ring(rx_ring
);
3933 /* We can not know the hardware head and tail when this
3934 * function is called in reset flow, so we reuse all desc.
3936 for (j
= 0; j
< rx_ring
->desc_num
; j
++)
3937 hns3_reuse_buffer(rx_ring
, j
);
3939 rx_ring
->next_to_clean
= 0;
3940 rx_ring
->next_to_use
= 0;
3943 hns3_init_tx_ring_tc(priv
);
3948 static void hns3_store_coal(struct hns3_nic_priv
*priv
)
3950 /* ethtool only support setting and querying one coal
3951 * configuation for now, so save the vector 0' coal
3952 * configuation here in order to restore it.
3954 memcpy(&priv
->tx_coal
, &priv
->tqp_vector
[0].tx_group
.coal
,
3955 sizeof(struct hns3_enet_coalesce
));
3956 memcpy(&priv
->rx_coal
, &priv
->tqp_vector
[0].rx_group
.coal
,
3957 sizeof(struct hns3_enet_coalesce
));
3960 static void hns3_restore_coal(struct hns3_nic_priv
*priv
)
3962 u16 vector_num
= priv
->vector_num
;
3965 for (i
= 0; i
< vector_num
; i
++) {
3966 memcpy(&priv
->tqp_vector
[i
].tx_group
.coal
, &priv
->tx_coal
,
3967 sizeof(struct hns3_enet_coalesce
));
3968 memcpy(&priv
->tqp_vector
[i
].rx_group
.coal
, &priv
->rx_coal
,
3969 sizeof(struct hns3_enet_coalesce
));
3973 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3975 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
3976 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3977 struct net_device
*ndev
= kinfo
->netdev
;
3978 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3980 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
))
3983 /* it is cumbersome for hardware to pick-and-choose entries for deletion
3984 * from table space. Hence, for function reset software intervention is
3985 * required to delete the entries
3987 if (hns3_dev_ongoing_func_reset(ae_dev
)) {
3988 hns3_remove_hw_addr(ndev
);
3989 hns3_del_all_fd_rules(ndev
, false);
3992 if (!netif_running(ndev
))
3995 return hns3_nic_net_stop(ndev
);
3998 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
4000 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
4001 struct hns3_nic_priv
*priv
= netdev_priv(kinfo
->netdev
);
4004 clear_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
);
4006 if (netif_running(kinfo
->netdev
)) {
4007 ret
= hns3_nic_net_open(kinfo
->netdev
);
4009 set_bit(HNS3_NIC_STATE_RESETTING
, &priv
->state
);
4010 netdev_err(kinfo
->netdev
,
4011 "hns net up fail, ret=%d!\n", ret
);
4019 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
4021 struct net_device
*netdev
= handle
->kinfo
.netdev
;
4022 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4023 bool vlan_filter_enable
;
4026 ret
= hns3_init_mac_addr(netdev
, false);
4030 ret
= hns3_recover_hw_addr(netdev
);
4034 ret
= hns3_update_promisc_mode(netdev
, handle
->netdev_flags
);
4038 vlan_filter_enable
= netdev
->flags
& IFF_PROMISC
? false : true;
4039 hns3_enable_vlan_filter(netdev
, vlan_filter_enable
);
4041 /* Hardware table is only clear when pf resets */
4042 if (!(handle
->flags
& HNAE3_SUPPORT_VF
)) {
4043 ret
= hns3_restore_vlan(netdev
);
4048 ret
= hns3_restore_fd_rules(netdev
);
4052 /* Carrier off reporting is important to ethtool even BEFORE open */
4053 netif_carrier_off(netdev
);
4055 ret
= hns3_nic_alloc_vector_data(priv
);
4059 hns3_restore_coal(priv
);
4061 ret
= hns3_nic_init_vector_data(priv
);
4063 goto err_dealloc_vector
;
4065 ret
= hns3_init_all_ring(priv
);
4067 goto err_uninit_vector
;
4069 set_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
4074 hns3_nic_uninit_vector_data(priv
);
4075 priv
->ring_data
= NULL
;
4077 hns3_nic_dealloc_vector_data(priv
);
4082 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
4084 struct net_device
*netdev
= handle
->kinfo
.netdev
;
4085 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4088 if (!test_bit(HNS3_NIC_STATE_INITED
, &priv
->state
)) {
4089 netdev_warn(netdev
, "already uninitialized\n");
4093 hns3_force_clear_all_rx_ring(handle
);
4095 ret
= hns3_nic_uninit_vector_data(priv
);
4097 netdev_err(netdev
, "uninit vector error\n");
4101 hns3_store_coal(priv
);
4103 ret
= hns3_nic_dealloc_vector_data(priv
);
4105 netdev_err(netdev
, "dealloc vector error\n");
4107 ret
= hns3_uninit_all_ring(priv
);
4109 netdev_err(netdev
, "uninit ring error\n");
4111 clear_bit(HNS3_NIC_STATE_INITED
, &priv
->state
);
4116 static int hns3_reset_notify(struct hnae3_handle
*handle
,
4117 enum hnae3_reset_notify_type type
)
4122 case HNAE3_UP_CLIENT
:
4123 ret
= hns3_reset_notify_up_enet(handle
);
4125 case HNAE3_DOWN_CLIENT
:
4126 ret
= hns3_reset_notify_down_enet(handle
);
4128 case HNAE3_INIT_CLIENT
:
4129 ret
= hns3_reset_notify_init_enet(handle
);
4131 case HNAE3_UNINIT_CLIENT
:
4132 ret
= hns3_reset_notify_uninit_enet(handle
);
4141 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
)
4143 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4144 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
4147 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
4151 ret
= hns3_get_ring_config(priv
);
4155 ret
= hns3_nic_alloc_vector_data(priv
);
4157 goto err_alloc_vector
;
4159 hns3_restore_coal(priv
);
4161 ret
= hns3_nic_init_vector_data(priv
);
4163 goto err_uninit_vector
;
4165 ret
= hns3_init_all_ring(priv
);
4172 hns3_put_ring_config(priv
);
4174 hns3_nic_uninit_vector_data(priv
);
4176 hns3_nic_dealloc_vector_data(priv
);
4180 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
4182 return (new_tqp_num
/ num_tc
) * num_tc
;
4185 int hns3_set_channels(struct net_device
*netdev
,
4186 struct ethtool_channels
*ch
)
4188 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
4189 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
4190 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
4191 bool if_running
= netif_running(netdev
);
4192 u32 new_tqp_num
= ch
->combined_count
;
4196 if (ch
->rx_count
|| ch
->tx_count
)
4199 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
4200 new_tqp_num
< kinfo
->num_tc
) {
4201 dev_err(&netdev
->dev
,
4202 "Change tqps fail, the tqp range is from %d to %d",
4204 hns3_get_max_available_channels(h
));
4208 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
4209 if (kinfo
->num_tqps
== new_tqp_num
)
4213 hns3_nic_net_stop(netdev
);
4215 ret
= hns3_nic_uninit_vector_data(priv
);
4217 dev_err(&netdev
->dev
,
4218 "Unbind vector with tqp fail, nothing is changed");
4222 hns3_store_coal(priv
);
4224 hns3_nic_dealloc_vector_data(priv
);
4226 hns3_uninit_all_ring(priv
);
4227 hns3_put_ring_config(priv
);
4229 org_tqp_num
= h
->kinfo
.num_tqps
;
4230 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
);
4232 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
);
4234 /* If revert to old tqp failed, fatal error occurred */
4235 dev_err(&netdev
->dev
,
4236 "Revert to old tqp num fail, ret=%d", ret
);
4239 dev_info(&netdev
->dev
,
4240 "Change tqp num fail, Revert to old tqp num");
4245 hns3_nic_net_open(netdev
);
4250 static const struct hnae3_client_ops client_ops
= {
4251 .init_instance
= hns3_client_init
,
4252 .uninit_instance
= hns3_client_uninit
,
4253 .link_status_change
= hns3_link_status_change
,
4254 .setup_tc
= hns3_client_setup_tc
,
4255 .reset_notify
= hns3_reset_notify
,
4258 /* hns3_init_module - Driver registration routine
4259 * hns3_init_module is the first routine called when the driver is
4260 * loaded. All it does is register with the PCI subsystem.
4262 static int __init
hns3_init_module(void)
4266 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
4267 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
4269 client
.type
= HNAE3_CLIENT_KNIC
;
4270 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
4273 client
.ops
= &client_ops
;
4275 INIT_LIST_HEAD(&client
.node
);
4277 hns3_dbg_register_debugfs(hns3_driver_name
);
4279 ret
= hnae3_register_client(&client
);
4281 goto err_reg_client
;
4283 ret
= pci_register_driver(&hns3_driver
);
4285 goto err_reg_driver
;
4290 hnae3_unregister_client(&client
);
4292 hns3_dbg_unregister_debugfs();
4295 module_init(hns3_init_module
);
4297 /* hns3_exit_module - Driver exit cleanup routine
4298 * hns3_exit_module is called just before the driver is removed
4301 static void __exit
hns3_exit_module(void)
4303 pci_unregister_driver(&hns3_driver
);
4304 hnae3_unregister_client(&client
);
4305 hns3_dbg_unregister_debugfs();
4307 module_exit(hns3_exit_module
);
4309 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4310 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4311 MODULE_LICENSE("GPL");
4312 MODULE_ALIAS("pci:hns-nic");
4313 MODULE_VERSION(HNS3_MOD_VERSION
);