2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
26 #include "hns3_enet.h"
28 static const char hns3_driver_name
[] = "hns3";
29 const char hns3_driver_version
[] = VERMAGIC_STRING
;
30 static const char hns3_driver_string
[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client
;
35 /* hns3_pci_tbl - PCI Device ID Table
37 * Last entry must be all 0s
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
42 static const struct pci_device_id hns3_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
55 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
57 /* required last entry */
60 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
62 static irqreturn_t
hns3_irq_handle(int irq
, void *dev
)
64 struct hns3_enet_tqp_vector
*tqp_vector
= dev
;
66 napi_schedule(&tqp_vector
->napi
);
71 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
73 struct hns3_enet_tqp_vector
*tqp_vectors
;
76 for (i
= 0; i
< priv
->vector_num
; i
++) {
77 tqp_vectors
= &priv
->tqp_vector
[i
];
79 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
82 /* release the irq resource */
83 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
84 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
88 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
90 struct hns3_enet_tqp_vector
*tqp_vectors
;
97 for (i
= 0; i
< priv
->vector_num
; i
++) {
98 tqp_vectors
= &priv
->tqp_vector
[i
];
100 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
103 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
104 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
105 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
108 } else if (tqp_vectors
->rx_group
.ring
) {
109 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
110 "%s-%s-%d", priv
->netdev
->name
, "Rx",
112 } else if (tqp_vectors
->tx_group
.ring
) {
113 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
114 "%s-%s-%d", priv
->netdev
->name
, "Tx",
117 /* Skip this unused q_vector */
121 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
123 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
127 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
128 tqp_vectors
->vector_irq
);
132 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
138 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
141 writel(mask_en
, tqp_vector
->mask_addr
);
144 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
146 napi_enable(&tqp_vector
->napi
);
149 hns3_mask_vector_irq(tqp_vector
, 1);
152 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
155 hns3_mask_vector_irq(tqp_vector
, 0);
157 disable_irq(tqp_vector
->vector_irq
);
158 napi_disable(&tqp_vector
->napi
);
161 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
164 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
166 /* this defines the configuration for RL (Interrupt Rate Limiter).
167 * Rl defines rate of interrupts i.e. number of interrupts-per-second
168 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
171 if (rl_reg
> 0 && !tqp_vector
->tx_group
.coal
.gl_adapt_enable
&&
172 !tqp_vector
->rx_group
.coal
.gl_adapt_enable
)
173 /* According to the hardware, the range of rl_reg is
174 * 0-59 and the unit is 4.
176 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
178 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
181 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
184 u32 rx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
186 writel(rx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
189 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
192 u32 tx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
194 writel(tx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
197 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
,
198 struct hns3_nic_priv
*priv
)
200 struct hnae3_handle
*h
= priv
->ae_handle
;
202 /* initialize the configuration for interrupt coalescing.
203 * 1. GL (Interrupt Gap Limiter)
204 * 2. RL (Interrupt Rate Limiter)
207 /* Default: enable interrupt coalescing self-adaptive and GL */
208 tqp_vector
->tx_group
.coal
.gl_adapt_enable
= 1;
209 tqp_vector
->rx_group
.coal
.gl_adapt_enable
= 1;
211 tqp_vector
->tx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
212 tqp_vector
->rx_group
.coal
.int_gl
= HNS3_INT_GL_50K
;
214 /* Default: disable RL */
215 h
->kinfo
.int_rl_setting
= 0;
217 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
218 tqp_vector
->rx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
219 tqp_vector
->tx_group
.coal
.flow_level
= HNS3_FLOW_LOW
;
222 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector
*tqp_vector
,
223 struct hns3_nic_priv
*priv
)
225 struct hnae3_handle
*h
= priv
->ae_handle
;
227 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
228 tqp_vector
->tx_group
.coal
.int_gl
);
229 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
230 tqp_vector
->rx_group
.coal
.int_gl
);
231 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
234 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
236 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
237 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
238 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
241 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
244 "netif_set_real_num_tx_queues fail, ret=%d!\n",
249 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
252 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
259 static u16
hns3_get_max_available_channels(struct hnae3_handle
*h
)
261 u16 free_tqps
, max_rss_size
, max_tqps
;
263 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &free_tqps
, &max_rss_size
);
264 max_tqps
= h
->kinfo
.num_tc
* max_rss_size
;
266 return min_t(u16
, max_tqps
, (free_tqps
+ h
->kinfo
.num_tqps
));
269 static int hns3_nic_net_up(struct net_device
*netdev
)
271 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
272 struct hnae3_handle
*h
= priv
->ae_handle
;
276 /* get irq resource for all vectors */
277 ret
= hns3_nic_init_irq(priv
);
279 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
283 /* enable the vectors */
284 for (i
= 0; i
< priv
->vector_num
; i
++)
285 hns3_vector_enable(&priv
->tqp_vector
[i
]);
287 /* start the ae_dev */
288 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
292 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
297 for (j
= i
- 1; j
>= 0; j
--)
298 hns3_vector_disable(&priv
->tqp_vector
[j
]);
300 hns3_nic_uninit_irq(priv
);
305 static int hns3_nic_net_open(struct net_device
*netdev
)
307 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
310 netif_carrier_off(netdev
);
312 ret
= hns3_nic_set_real_num_queue(netdev
);
316 ret
= hns3_nic_net_up(netdev
);
319 "hns net up fail, ret=%d!\n", ret
);
323 priv
->ae_handle
->last_reset_time
= jiffies
;
327 static void hns3_nic_net_down(struct net_device
*netdev
)
329 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
330 const struct hnae3_ae_ops
*ops
;
333 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
337 ops
= priv
->ae_handle
->ae_algo
->ops
;
339 ops
->stop(priv
->ae_handle
);
341 /* disable vectors */
342 for (i
= 0; i
< priv
->vector_num
; i
++)
343 hns3_vector_disable(&priv
->tqp_vector
[i
]);
345 /* free irq resources */
346 hns3_nic_uninit_irq(priv
);
349 static int hns3_nic_net_stop(struct net_device
*netdev
)
351 netif_tx_stop_all_queues(netdev
);
352 netif_carrier_off(netdev
);
354 hns3_nic_net_down(netdev
);
359 static int hns3_nic_uc_sync(struct net_device
*netdev
,
360 const unsigned char *addr
)
362 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
364 if (h
->ae_algo
->ops
->add_uc_addr
)
365 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
370 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
371 const unsigned char *addr
)
373 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
375 if (h
->ae_algo
->ops
->rm_uc_addr
)
376 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
381 static int hns3_nic_mc_sync(struct net_device
*netdev
,
382 const unsigned char *addr
)
384 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
386 if (h
->ae_algo
->ops
->add_mc_addr
)
387 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
392 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
393 const unsigned char *addr
)
395 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
397 if (h
->ae_algo
->ops
->rm_mc_addr
)
398 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
403 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
405 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
407 if (h
->ae_algo
->ops
->set_promisc_mode
) {
408 if (netdev
->flags
& IFF_PROMISC
)
409 h
->ae_algo
->ops
->set_promisc_mode(h
, 1);
411 h
->ae_algo
->ops
->set_promisc_mode(h
, 0);
413 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
414 netdev_err(netdev
, "sync uc address fail\n");
415 if (netdev
->flags
& IFF_MULTICAST
)
416 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
417 netdev_err(netdev
, "sync mc address fail\n");
420 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
421 u16
*mss
, u32
*type_cs_vlan_tso
)
423 u32 l4_offset
, hdr_len
;
424 union l3_hdr_info l3
;
425 union l4_hdr_info l4
;
429 if (!skb_is_gso(skb
))
432 ret
= skb_cow_head(skb
, 0);
436 l3
.hdr
= skb_network_header(skb
);
437 l4
.hdr
= skb_transport_header(skb
);
439 /* Software should clear the IPv4's checksum field when tso is
442 if (l3
.v4
->version
== 4)
446 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
449 SKB_GSO_UDP_TUNNEL_CSUM
)) {
450 if ((!(skb_shinfo(skb
)->gso_type
&
452 (skb_shinfo(skb
)->gso_type
&
453 SKB_GSO_UDP_TUNNEL_CSUM
)) {
454 /* Software should clear the udp's checksum
455 * field when tso is needed.
459 /* reset l3&l4 pointers from outer to inner headers */
460 l3
.hdr
= skb_inner_network_header(skb
);
461 l4
.hdr
= skb_inner_transport_header(skb
);
463 /* Software should clear the IPv4's checksum field when
466 if (l3
.v4
->version
== 4)
470 /* normal or tunnel packet*/
471 l4_offset
= l4
.hdr
- skb
->data
;
472 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
474 /* remove payload length from inner pseudo checksum when tso*/
475 l4_paylen
= skb
->len
- l4_offset
;
476 csum_replace_by_diff(&l4
.tcp
->check
,
477 (__force __wsum
)htonl(l4_paylen
));
479 /* find the txbd field values */
480 *paylen
= skb
->len
- hdr_len
;
481 hnae_set_bit(*type_cs_vlan_tso
,
484 /* get MSS for TSO */
485 *mss
= skb_shinfo(skb
)->gso_size
;
490 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
498 unsigned char *l4_hdr
;
499 unsigned char *exthdr
;
503 /* find outer header point */
504 l3
.hdr
= skb_network_header(skb
);
505 l4_hdr
= skb_transport_header(skb
);
507 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
508 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
509 l4_proto_tmp
= l3
.v6
->nexthdr
;
510 if (l4_hdr
!= exthdr
)
511 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
512 &l4_proto_tmp
, &frag_off
);
513 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
514 l4_proto_tmp
= l3
.v4
->protocol
;
519 *ol4_proto
= l4_proto_tmp
;
522 if (!skb
->encapsulation
) {
527 /* find inner header point */
528 l3
.hdr
= skb_inner_network_header(skb
);
529 l4_hdr
= skb_inner_transport_header(skb
);
531 if (l3
.v6
->version
== 6) {
532 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
533 l4_proto_tmp
= l3
.v6
->nexthdr
;
534 if (l4_hdr
!= exthdr
)
535 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
536 &l4_proto_tmp
, &frag_off
);
537 } else if (l3
.v4
->version
== 4) {
538 l4_proto_tmp
= l3
.v4
->protocol
;
541 *il4_proto
= l4_proto_tmp
;
546 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
547 u8 il4_proto
, u32
*type_cs_vlan_tso
,
548 u32
*ol_type_vlan_len_msec
)
558 struct gre_base_hdr
*gre
;
561 unsigned char *l2_hdr
;
562 u8 l4_proto
= ol4_proto
;
569 l3
.hdr
= skb_network_header(skb
);
570 l4
.hdr
= skb_transport_header(skb
);
572 /* compute L2 header size for normal packet, defined in 2 Bytes */
573 l2_len
= l3
.hdr
- skb
->data
;
574 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
575 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
578 if (skb
->encapsulation
) {
579 /* compute OL2 header size, defined in 2 Bytes */
581 hnae_set_field(*ol_type_vlan_len_msec
,
583 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
585 /* compute OL3 header size, defined in 4 Bytes */
586 ol3_len
= l4
.hdr
- l3
.hdr
;
587 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
588 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
590 /* MAC in UDP, MAC in GRE (0x6558)*/
591 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
592 /* switch MAC header ptr from outer to inner header.*/
593 l2_hdr
= skb_inner_mac_header(skb
);
595 /* compute OL4 header size, defined in 4 Bytes. */
596 ol4_len
= l2_hdr
- l4
.hdr
;
597 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_M
,
598 HNS3_TXD_L4LEN_S
, ol4_len
>> 2);
600 /* switch IP header ptr from outer to inner header */
601 l3
.hdr
= skb_inner_network_header(skb
);
603 /* compute inner l2 header size, defined in 2 Bytes. */
604 l2_len
= l3
.hdr
- l2_hdr
;
605 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
606 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
608 /* skb packet types not supported by hardware,
609 * txbd len fild doesn't be filled.
614 /* switch L4 header pointer from outer to inner */
615 l4
.hdr
= skb_inner_transport_header(skb
);
617 l4_proto
= il4_proto
;
620 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
621 l3_len
= l4
.hdr
- l3
.hdr
;
622 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
623 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
625 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
628 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
629 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
632 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
633 HNS3_TXD_L4LEN_S
, (sizeof(struct sctphdr
) >> 2));
636 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
637 HNS3_TXD_L4LEN_S
, (sizeof(struct udphdr
) >> 2));
640 /* skb packet types not supported by hardware,
641 * txbd len fild doesn't be filled.
647 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
648 u8 il4_proto
, u32
*type_cs_vlan_tso
,
649 u32
*ol_type_vlan_len_msec
)
656 u32 l4_proto
= ol4_proto
;
658 l3
.hdr
= skb_network_header(skb
);
660 /* define OL3 type and tunnel type(OL4).*/
661 if (skb
->encapsulation
) {
662 /* define outer network header type.*/
663 if (skb
->protocol
== htons(ETH_P_IP
)) {
665 hnae_set_field(*ol_type_vlan_len_msec
,
666 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
667 HNS3_OL3T_IPV4_CSUM
);
669 hnae_set_field(*ol_type_vlan_len_msec
,
670 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
671 HNS3_OL3T_IPV4_NO_CSUM
);
673 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
674 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
675 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
678 /* define tunnel type(OL4).*/
681 hnae_set_field(*ol_type_vlan_len_msec
,
684 HNS3_TUN_MAC_IN_UDP
);
687 hnae_set_field(*ol_type_vlan_len_msec
,
693 /* drop the skb tunnel packet if hardware don't support,
694 * because hardware can't calculate csum when TSO.
699 /* the stack computes the IP header already,
700 * driver calculate l4 checksum when not TSO.
702 skb_checksum_help(skb
);
706 l3
.hdr
= skb_inner_network_header(skb
);
707 l4_proto
= il4_proto
;
710 if (l3
.v4
->version
== 4) {
711 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
712 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
714 /* the stack computes the IP header already, the only time we
715 * need the hardware to recompute it is in the case of TSO.
718 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
720 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
721 } else if (l3
.v6
->version
== 6) {
722 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
723 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
724 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
729 hnae_set_field(*type_cs_vlan_tso
,
735 hnae_set_field(*type_cs_vlan_tso
,
741 hnae_set_field(*type_cs_vlan_tso
,
747 /* drop the skb tunnel packet if hardware don't support,
748 * because hardware can't calculate csum when TSO.
753 /* the stack computes the IP header already,
754 * driver calculate l4 checksum when not TSO.
756 skb_checksum_help(skb
);
763 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
765 /* Config bd buffer end */
766 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
767 HNS3_TXD_BDTYPE_S
, 0);
768 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
769 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
770 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
773 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
774 struct hns3_enet_ring
*tx_ring
,
775 u32
*inner_vlan_flag
,
780 #define HNS3_TX_VLAN_PRIO_SHIFT 13
782 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
783 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
784 NETIF_F_HW_VLAN_CTAG_TX
)) {
785 /* When HW VLAN acceleration is turned off, and the stack
786 * sets the protocol to 802.1q, the driver just need to
787 * set the protocol to the encapsulated ethertype.
789 skb
->protocol
= vlan_get_protocol(skb
);
793 if (skb_vlan_tag_present(skb
)) {
796 vlan_tag
= skb_vlan_tag_get(skb
);
797 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
799 /* Based on hw strategy, use out_vtag in two layer tag case,
800 * and use inner_vtag in one tag case.
802 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
803 hnae_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
804 *out_vtag
= vlan_tag
;
806 hnae_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
807 *inner_vtag
= vlan_tag
;
809 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
810 struct vlan_ethhdr
*vhdr
;
813 rc
= skb_cow_head(skb
, 0);
816 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
817 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
818 << HNS3_TX_VLAN_PRIO_SHIFT
);
821 skb
->protocol
= vlan_get_protocol(skb
);
825 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
826 int size
, dma_addr_t dma
, int frag_end
,
827 enum hns_desc_type type
)
829 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
830 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
831 u32 ol_type_vlan_len_msec
= 0;
832 u16 bdtp_fe_sc_vld_ra_ri
= 0;
833 u32 type_cs_vlan_tso
= 0;
844 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
845 desc_cb
->priv
= priv
;
846 desc_cb
->length
= size
;
848 desc_cb
->type
= type
;
850 /* now, fill the descriptor */
851 desc
->addr
= cpu_to_le64(dma
);
852 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
853 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
854 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
856 if (type
== DESC_TYPE_SKB
) {
857 skb
= (struct sk_buff
*)priv
;
860 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
861 &ol_type_vlan_len_msec
,
862 &inner_vtag
, &out_vtag
);
866 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
867 skb_reset_mac_len(skb
);
868 protocol
= skb
->protocol
;
870 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
873 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
875 &ol_type_vlan_len_msec
);
876 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
878 &ol_type_vlan_len_msec
);
882 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
889 desc
->tx
.ol_type_vlan_len_msec
=
890 cpu_to_le32(ol_type_vlan_len_msec
);
891 desc
->tx
.type_cs_vlan_tso_len
=
892 cpu_to_le32(type_cs_vlan_tso
);
893 desc
->tx
.paylen
= cpu_to_le32(paylen
);
894 desc
->tx
.mss
= cpu_to_le16(mss
);
895 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
896 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
899 /* move ring pointer to next.*/
900 ring_ptr_move_fw(ring
, next_to_use
);
905 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
906 int size
, dma_addr_t dma
, int frag_end
,
907 enum hns_desc_type type
)
909 unsigned int frag_buf_num
;
914 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
915 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
916 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
918 /* When the frag size is bigger than hardware, split this frag */
919 for (k
= 0; k
< frag_buf_num
; k
++) {
920 ret
= hns3_fill_desc(ring
, priv
,
921 (k
== frag_buf_num
- 1) ?
922 sizeoflast
: HNS3_MAX_BD_SIZE
,
923 dma
+ HNS3_MAX_BD_SIZE
* k
,
924 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
925 (type
== DESC_TYPE_SKB
&& !k
) ?
926 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
934 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
935 struct hns3_enet_ring
*ring
)
937 struct sk_buff
*skb
= *out_skb
;
938 struct skb_frag_struct
*frag
;
945 size
= skb_headlen(skb
);
946 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
948 frag_num
= skb_shinfo(skb
)->nr_frags
;
949 for (i
= 0; i
< frag_num
; i
++) {
950 frag
= &skb_shinfo(skb
)->frags
[i
];
951 size
= skb_frag_size(frag
);
953 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
954 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
957 buf_num
+= bdnum_for_frag
;
960 if (buf_num
> ring_space(ring
))
967 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
968 struct hns3_enet_ring
*ring
)
970 struct sk_buff
*skb
= *out_skb
;
973 /* No. of segments (plus a header) */
974 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
976 if (buf_num
> ring_space(ring
))
984 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
986 struct device
*dev
= ring_to_dev(ring
);
989 for (i
= 0; i
< ring
->desc_num
; i
++) {
990 /* check if this is where we started */
991 if (ring
->next_to_use
== next_to_use_orig
)
994 /* unmap the descriptor dma address */
995 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
996 dma_unmap_single(dev
,
997 ring
->desc_cb
[ring
->next_to_use
].dma
,
998 ring
->desc_cb
[ring
->next_to_use
].length
,
1002 ring
->desc_cb
[ring
->next_to_use
].dma
,
1003 ring
->desc_cb
[ring
->next_to_use
].length
,
1007 ring_ptr_move_bw(ring
, next_to_use
);
1011 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1013 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1014 struct hns3_nic_ring_data
*ring_data
=
1015 &tx_ring_data(priv
, skb
->queue_mapping
);
1016 struct hns3_enet_ring
*ring
= ring_data
->ring
;
1017 struct device
*dev
= priv
->dev
;
1018 struct netdev_queue
*dev_queue
;
1019 struct skb_frag_struct
*frag
;
1020 int next_to_use_head
;
1021 int next_to_use_frag
;
1029 /* Prefetch the data used later */
1030 prefetch(skb
->data
);
1032 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
1034 u64_stats_update_begin(&ring
->syncp
);
1035 ring
->stats
.tx_busy
++;
1036 u64_stats_update_end(&ring
->syncp
);
1038 goto out_net_tx_busy
;
1040 u64_stats_update_begin(&ring
->syncp
);
1041 ring
->stats
.sw_err_cnt
++;
1042 u64_stats_update_end(&ring
->syncp
);
1043 netdev_err(netdev
, "no memory to xmit!\n");
1050 /* No. of segments (plus a header) */
1051 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1052 /* Fill the first part */
1053 size
= skb_headlen(skb
);
1055 next_to_use_head
= ring
->next_to_use
;
1057 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1058 if (dma_mapping_error(dev
, dma
)) {
1059 netdev_err(netdev
, "TX head DMA map failed\n");
1060 ring
->stats
.sw_err_cnt
++;
1064 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
1067 goto head_dma_map_err
;
1069 next_to_use_frag
= ring
->next_to_use
;
1070 /* Fill the fragments */
1071 for (i
= 1; i
< seg_num
; i
++) {
1072 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1073 size
= skb_frag_size(frag
);
1074 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1075 if (dma_mapping_error(dev
, dma
)) {
1076 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
1077 ring
->stats
.sw_err_cnt
++;
1078 goto frag_dma_map_err
;
1080 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
1081 seg_num
- 1 == i
? 1 : 0,
1085 goto frag_dma_map_err
;
1088 /* Complete translate all packets */
1089 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1090 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1092 wmb(); /* Commit all data before submit */
1094 hnae_queue_xmit(ring
->tqp
, buf_num
);
1096 return NETDEV_TX_OK
;
1099 hns_nic_dma_unmap(ring
, next_to_use_frag
);
1102 hns_nic_dma_unmap(ring
, next_to_use_head
);
1105 dev_kfree_skb_any(skb
);
1106 return NETDEV_TX_OK
;
1109 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1110 smp_mb(); /* Commit all data before submit */
1112 return NETDEV_TX_BUSY
;
1115 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1117 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1118 struct sockaddr
*mac_addr
= p
;
1121 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1122 return -EADDRNOTAVAIL
;
1124 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
, false);
1126 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1130 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1135 static int hns3_nic_set_features(struct net_device
*netdev
,
1136 netdev_features_t features
)
1138 netdev_features_t changed
= netdev
->features
^ features
;
1139 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1140 struct hnae3_handle
*h
= priv
->ae_handle
;
1143 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1144 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1145 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1146 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1148 priv
->ops
.fill_desc
= hns3_fill_desc
;
1149 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1153 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
1154 h
->ae_algo
->ops
->enable_vlan_filter
) {
1155 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1156 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1158 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1161 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1162 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
1163 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1164 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1166 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1172 netdev
->features
= features
;
1176 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1177 struct rtnl_link_stats64
*stats
)
1179 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1180 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1181 struct hnae3_handle
*handle
= priv
->ae_handle
;
1182 struct hns3_enet_ring
*ring
;
1192 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1195 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1197 for (idx
= 0; idx
< queue_num
; idx
++) {
1198 /* fetch the tx stats */
1199 ring
= priv
->ring_data
[idx
].ring
;
1201 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1202 tx_bytes
+= ring
->stats
.tx_bytes
;
1203 tx_pkts
+= ring
->stats
.tx_pkts
;
1204 tx_drop
+= ring
->stats
.tx_busy
;
1205 tx_drop
+= ring
->stats
.sw_err_cnt
;
1206 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1208 /* fetch the rx stats */
1209 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1211 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1212 rx_bytes
+= ring
->stats
.rx_bytes
;
1213 rx_pkts
+= ring
->stats
.rx_pkts
;
1214 rx_drop
+= ring
->stats
.non_vld_descs
;
1215 rx_drop
+= ring
->stats
.err_pkt_len
;
1216 rx_drop
+= ring
->stats
.l2_err
;
1217 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1220 stats
->tx_bytes
= tx_bytes
;
1221 stats
->tx_packets
= tx_pkts
;
1222 stats
->rx_bytes
= rx_bytes
;
1223 stats
->rx_packets
= rx_pkts
;
1225 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1226 stats
->multicast
= netdev
->stats
.multicast
;
1227 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1228 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1229 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1231 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1232 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1233 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1234 stats
->collisions
= netdev
->stats
.collisions
;
1235 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1236 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1237 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1238 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1239 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1240 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1241 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1242 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1243 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1244 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1247 static void hns3_add_tunnel_port(struct net_device
*netdev
, u16 port
,
1248 enum hns3_udp_tnl_type type
)
1250 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1251 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1252 struct hnae3_handle
*h
= priv
->ae_handle
;
1254 if (udp_tnl
->used
&& udp_tnl
->dst_port
== port
) {
1259 if (udp_tnl
->used
) {
1261 "UDP tunnel [%d], port [%d] offload\n", type
, port
);
1265 udp_tnl
->dst_port
= port
;
1267 /* TBD send command to hardware to add port */
1268 if (h
->ae_algo
->ops
->add_tunnel_udp
)
1269 h
->ae_algo
->ops
->add_tunnel_udp(h
, port
);
1272 static void hns3_del_tunnel_port(struct net_device
*netdev
, u16 port
,
1273 enum hns3_udp_tnl_type type
)
1275 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1276 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1277 struct hnae3_handle
*h
= priv
->ae_handle
;
1279 if (!udp_tnl
->used
|| udp_tnl
->dst_port
!= port
) {
1281 "Invalid UDP tunnel port %d\n", port
);
1289 udp_tnl
->dst_port
= 0;
1290 /* TBD send command to hardware to del port */
1291 if (h
->ae_algo
->ops
->del_tunnel_udp
)
1292 h
->ae_algo
->ops
->del_tunnel_udp(h
, port
);
1295 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1296 * @netdev: This physical ports's netdev
1297 * @ti: Tunnel information
1299 static void hns3_nic_udp_tunnel_add(struct net_device
*netdev
,
1300 struct udp_tunnel_info
*ti
)
1302 u16 port_n
= ntohs(ti
->port
);
1305 case UDP_TUNNEL_TYPE_VXLAN
:
1306 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1308 case UDP_TUNNEL_TYPE_GENEVE
:
1309 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1312 netdev_err(netdev
, "unsupported tunnel type %d\n", ti
->type
);
1317 static void hns3_nic_udp_tunnel_del(struct net_device
*netdev
,
1318 struct udp_tunnel_info
*ti
)
1320 u16 port_n
= ntohs(ti
->port
);
1323 case UDP_TUNNEL_TYPE_VXLAN
:
1324 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1326 case UDP_TUNNEL_TYPE_GENEVE
:
1327 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1334 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1336 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1337 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1338 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1339 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1340 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1341 u16 mode
= mqprio_qopt
->mode
;
1342 u8 hw
= mqprio_qopt
->qopt
.hw
;
1347 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1348 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1351 if (tc
> HNAE3_MAX_TC
)
1357 if_running
= netif_running(netdev
);
1359 hns3_nic_net_stop(netdev
);
1363 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1364 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1369 netdev_reset_tc(netdev
);
1371 ret
= netdev_set_num_tc(netdev
, tc
);
1375 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1376 if (!kinfo
->tc_info
[i
].enable
)
1379 netdev_set_tc_queue(netdev
,
1380 kinfo
->tc_info
[i
].tc
,
1381 kinfo
->tc_info
[i
].tqp_count
,
1382 kinfo
->tc_info
[i
].tqp_offset
);
1386 ret
= hns3_nic_set_real_num_queue(netdev
);
1390 hns3_nic_net_open(netdev
);
1395 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1398 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1401 return hns3_setup_tc(dev
, type_data
);
1404 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1405 __be16 proto
, u16 vid
)
1407 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1408 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1411 if (h
->ae_algo
->ops
->set_vlan_filter
)
1412 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1415 set_bit(vid
, priv
->active_vlans
);
1420 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1421 __be16 proto
, u16 vid
)
1423 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1424 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1427 if (h
->ae_algo
->ops
->set_vlan_filter
)
1428 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1431 clear_bit(vid
, priv
->active_vlans
);
1436 static void hns3_restore_vlan(struct net_device
*netdev
)
1438 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1442 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
1443 ret
= hns3_vlan_rx_add_vid(netdev
, htons(ETH_P_8021Q
), vid
);
1445 netdev_warn(netdev
, "Restore vlan: %d filter, ret:%d\n",
1450 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1451 u8 qos
, __be16 vlan_proto
)
1453 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1456 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1457 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1463 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1465 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1466 bool if_running
= netif_running(netdev
);
1469 if (!h
->ae_algo
->ops
->set_mtu
)
1472 /* if this was called with netdev up then bring netdevice down */
1474 (void)hns3_nic_net_stop(netdev
);
1478 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1480 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1485 netdev
->mtu
= new_mtu
;
1487 /* if the netdev was running earlier, bring it up again */
1488 if (if_running
&& hns3_nic_net_open(netdev
))
1494 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1496 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1497 struct hns3_enet_ring
*tx_ring
= NULL
;
1498 int timeout_queue
= 0;
1499 int hw_head
, hw_tail
;
1502 /* Find the stopped queue the same way the stack does */
1503 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1504 struct netdev_queue
*q
;
1505 unsigned long trans_start
;
1507 q
= netdev_get_tx_queue(ndev
, i
);
1508 trans_start
= q
->trans_start
;
1509 if (netif_xmit_stopped(q
) &&
1511 (trans_start
+ ndev
->watchdog_timeo
))) {
1517 if (i
== ndev
->num_tx_queues
) {
1519 "no netdev TX timeout queue found, timeout count: %llu\n",
1520 priv
->tx_timeout_count
);
1524 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1526 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1527 HNS3_RING_TX_RING_HEAD_REG
);
1528 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1529 HNS3_RING_TX_RING_TAIL_REG
);
1531 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1532 priv
->tx_timeout_count
,
1534 tx_ring
->next_to_use
,
1535 tx_ring
->next_to_clean
,
1538 readl(tx_ring
->tqp_vector
->mask_addr
));
1543 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1545 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1546 struct hnae3_handle
*h
= priv
->ae_handle
;
1548 if (!hns3_get_tx_timeo_queue_info(ndev
))
1551 priv
->tx_timeout_count
++;
1553 if (time_before(jiffies
, (h
->last_reset_time
+ ndev
->watchdog_timeo
)))
1556 /* request the reset */
1557 if (h
->ae_algo
->ops
->reset_event
)
1558 h
->ae_algo
->ops
->reset_event(h
);
1561 static const struct net_device_ops hns3_nic_netdev_ops
= {
1562 .ndo_open
= hns3_nic_net_open
,
1563 .ndo_stop
= hns3_nic_net_stop
,
1564 .ndo_start_xmit
= hns3_nic_net_xmit
,
1565 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1566 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1567 .ndo_change_mtu
= hns3_nic_change_mtu
,
1568 .ndo_set_features
= hns3_nic_set_features
,
1569 .ndo_get_stats64
= hns3_nic_get_stats64
,
1570 .ndo_setup_tc
= hns3_nic_setup_tc
,
1571 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1572 .ndo_udp_tunnel_add
= hns3_nic_udp_tunnel_add
,
1573 .ndo_udp_tunnel_del
= hns3_nic_udp_tunnel_del
,
1574 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1575 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1576 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1579 /* hns3_probe - Device initialization routine
1580 * @pdev: PCI device information struct
1581 * @ent: entry in hns3_pci_tbl
1583 * hns3_probe initializes a PF identified by a pci_dev structure.
1584 * The OS initialization, configuring of the PF private structure,
1585 * and a hardware reset occur.
1587 * Returns 0 on success, negative on failure
1589 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1591 struct hnae3_ae_dev
*ae_dev
;
1594 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1601 ae_dev
->pdev
= pdev
;
1602 ae_dev
->flag
= ent
->driver_data
;
1603 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1604 pci_set_drvdata(pdev
, ae_dev
);
1606 return hnae3_register_ae_dev(ae_dev
);
1609 /* hns3_remove - Device removal routine
1610 * @pdev: PCI device information struct
1612 static void hns3_remove(struct pci_dev
*pdev
)
1614 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1616 hnae3_unregister_ae_dev(ae_dev
);
1619 static struct pci_driver hns3_driver
= {
1620 .name
= hns3_driver_name
,
1621 .id_table
= hns3_pci_tbl
,
1622 .probe
= hns3_probe
,
1623 .remove
= hns3_remove
,
1626 /* set default feature to hns3 */
1627 static void hns3_set_default_feature(struct net_device
*netdev
)
1629 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1631 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1632 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1633 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1634 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1635 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1637 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1639 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1641 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1642 NETIF_F_HW_VLAN_CTAG_FILTER
|
1643 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1644 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1645 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1646 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1647 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1649 netdev
->vlan_features
|=
1650 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1651 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1652 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1653 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1654 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1656 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1657 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1658 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1659 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1660 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1661 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1664 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1665 struct hns3_desc_cb
*cb
)
1667 unsigned int order
= hnae_page_order(ring
);
1670 p
= dev_alloc_pages(order
);
1675 cb
->page_offset
= 0;
1677 cb
->buf
= page_address(p
);
1678 cb
->length
= hnae_page_size(ring
);
1679 cb
->type
= DESC_TYPE_PAGE
;
1684 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1685 struct hns3_desc_cb
*cb
)
1687 if (cb
->type
== DESC_TYPE_SKB
)
1688 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1689 else if (!HNAE3_IS_TX_RING(ring
))
1690 put_page((struct page
*)cb
->priv
);
1691 memset(cb
, 0, sizeof(*cb
));
1694 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1696 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1697 cb
->length
, ring_to_dma_dir(ring
));
1699 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1705 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1706 struct hns3_desc_cb
*cb
)
1708 if (cb
->type
== DESC_TYPE_SKB
)
1709 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1710 ring_to_dma_dir(ring
));
1712 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1713 ring_to_dma_dir(ring
));
1716 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1718 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1719 ring
->desc
[i
].addr
= 0;
1722 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1724 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1726 if (!ring
->desc_cb
[i
].dma
)
1729 hns3_buffer_detach(ring
, i
);
1730 hns3_free_buffer(ring
, cb
);
1733 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1737 for (i
= 0; i
< ring
->desc_num
; i
++)
1738 hns3_free_buffer_detach(ring
, i
);
1741 /* free desc along with its attached buffer */
1742 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1744 hns3_free_buffers(ring
);
1746 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
1747 ring
->desc_num
* sizeof(ring
->desc
[0]),
1749 ring
->desc_dma_addr
= 0;
1754 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1756 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1758 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
1762 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
), ring
->desc
,
1763 size
, DMA_BIDIRECTIONAL
);
1764 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
1765 ring
->desc_dma_addr
= 0;
1774 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1775 struct hns3_desc_cb
*cb
)
1779 ret
= hns3_alloc_buffer(ring
, cb
);
1783 ret
= hns3_map_buffer(ring
, cb
);
1790 hns3_free_buffer(ring
, cb
);
1795 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1797 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1802 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1807 /* Allocate memory for raw pkg, and map with dma */
1808 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1812 for (i
= 0; i
< ring
->desc_num
; i
++) {
1813 ret
= hns3_alloc_buffer_attach(ring
, i
);
1815 goto out_buffer_fail
;
1821 for (j
= i
- 1; j
>= 0; j
--)
1822 hns3_free_buffer_detach(ring
, j
);
1826 /* detach a in-used buffer and replace with a reserved one */
1827 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1828 struct hns3_desc_cb
*res_cb
)
1830 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1831 ring
->desc_cb
[i
] = *res_cb
;
1832 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1835 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1837 ring
->desc_cb
[i
].reuse_flag
= 0;
1838 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1839 + ring
->desc_cb
[i
].page_offset
);
1842 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1845 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1847 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1848 (*bytes
) += desc_cb
->length
;
1849 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1850 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1852 ring_ptr_move_fw(ring
, next_to_clean
);
1855 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1857 int u
= ring
->next_to_use
;
1858 int c
= ring
->next_to_clean
;
1860 if (unlikely(h
> ring
->desc_num
))
1863 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1866 bool hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1868 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1869 struct netdev_queue
*dev_queue
;
1873 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1874 rmb(); /* Make sure head is ready before touch any data */
1876 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1877 return true; /* no data to poll */
1879 if (!is_valid_clean_head(ring
, head
)) {
1880 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1881 ring
->next_to_use
, ring
->next_to_clean
);
1883 u64_stats_update_begin(&ring
->syncp
);
1884 ring
->stats
.io_err_cnt
++;
1885 u64_stats_update_end(&ring
->syncp
);
1891 while (head
!= ring
->next_to_clean
&& budget
) {
1892 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1893 /* Issue prefetch for next Tx descriptor */
1894 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1898 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1899 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1901 u64_stats_update_begin(&ring
->syncp
);
1902 ring
->stats
.tx_bytes
+= bytes
;
1903 ring
->stats
.tx_pkts
+= pkts
;
1904 u64_stats_update_end(&ring
->syncp
);
1906 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1907 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1909 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1910 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1911 /* Make sure that anybody stopping the queue after this
1912 * sees the new next_to_clean.
1915 if (netif_tx_queue_stopped(dev_queue
)) {
1916 netif_tx_wake_queue(dev_queue
);
1917 ring
->stats
.restart_queue
++;
1924 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1926 int ntc
= ring
->next_to_clean
;
1927 int ntu
= ring
->next_to_use
;
1929 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1933 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1935 struct hns3_desc_cb
*desc_cb
;
1936 struct hns3_desc_cb res_cbs
;
1939 for (i
= 0; i
< cleand_count
; i
++) {
1940 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1941 if (desc_cb
->reuse_flag
) {
1942 u64_stats_update_begin(&ring
->syncp
);
1943 ring
->stats
.reuse_pg_cnt
++;
1944 u64_stats_update_end(&ring
->syncp
);
1946 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1948 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1950 u64_stats_update_begin(&ring
->syncp
);
1951 ring
->stats
.sw_err_cnt
++;
1952 u64_stats_update_end(&ring
->syncp
);
1954 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
1955 "hnae reserve buffer map failed.\n");
1958 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
1961 ring_ptr_move_fw(ring
, next_to_use
);
1964 wmb(); /* Make all data has been write before submit */
1965 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
1968 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1969 * @data: pointer to the start of the headers
1970 * @max: total length of section to find headers in
1972 * This function is meant to determine the length of headers that will
1973 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1974 * motivation of doing this is to only perform one pull for IPv4 TCP
1975 * packets so that we can do basic things like calculating the gso_size
1976 * based on the average data per packet.
1978 static unsigned int hns3_nic_get_headlen(unsigned char *data
, u32 flag
,
1979 unsigned int max_size
)
1981 unsigned char *network
;
1984 /* This should never happen, but better safe than sorry */
1985 if (max_size
< ETH_HLEN
)
1988 /* Initialize network frame pointer */
1991 /* Set first protocol and move network header forward */
1992 network
+= ETH_HLEN
;
1994 /* Handle any vlan tag if present */
1995 if (hnae_get_field(flag
, HNS3_RXD_VLAN_M
, HNS3_RXD_VLAN_S
)
1996 == HNS3_RX_FLAG_VLAN_PRESENT
) {
1997 if ((typeof(max_size
))(network
- data
) > (max_size
- VLAN_HLEN
))
2000 network
+= VLAN_HLEN
;
2003 /* Handle L3 protocols */
2004 if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
2005 == HNS3_RX_FLAG_L3ID_IPV4
) {
2006 if ((typeof(max_size
))(network
- data
) >
2007 (max_size
- sizeof(struct iphdr
)))
2010 /* Access ihl as a u8 to avoid unaligned access on ia64 */
2011 hlen
= (network
[0] & 0x0F) << 2;
2013 /* Verify hlen meets minimum size requirements */
2014 if (hlen
< sizeof(struct iphdr
))
2015 return network
- data
;
2017 /* Record next protocol if header is present */
2018 } else if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
2019 == HNS3_RX_FLAG_L3ID_IPV6
) {
2020 if ((typeof(max_size
))(network
- data
) >
2021 (max_size
- sizeof(struct ipv6hdr
)))
2024 /* Record next protocol */
2025 hlen
= sizeof(struct ipv6hdr
);
2027 return network
- data
;
2030 /* Relocate pointer to start of L4 header */
2033 /* Finally sort out TCP/UDP */
2034 if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
2035 == HNS3_RX_FLAG_L4ID_TCP
) {
2036 if ((typeof(max_size
))(network
- data
) >
2037 (max_size
- sizeof(struct tcphdr
)))
2040 /* Access doff as a u8 to avoid unaligned access on ia64 */
2041 hlen
= (network
[12] & 0xF0) >> 2;
2043 /* Verify hlen meets minimum size requirements */
2044 if (hlen
< sizeof(struct tcphdr
))
2045 return network
- data
;
2048 } else if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
2049 == HNS3_RX_FLAG_L4ID_UDP
) {
2050 if ((typeof(max_size
))(network
- data
) >
2051 (max_size
- sizeof(struct udphdr
)))
2054 network
+= sizeof(struct udphdr
);
2057 /* If everything has gone correctly network should be the
2058 * data section of the packet and will be the end of the header.
2059 * If not then it probably represents the end of the last recognized
2062 if ((typeof(max_size
))(network
- data
) < max_size
)
2063 return network
- data
;
2068 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
2069 struct hns3_enet_ring
*ring
, int pull_len
,
2070 struct hns3_desc_cb
*desc_cb
)
2072 struct hns3_desc
*desc
;
2077 twobufs
= ((PAGE_SIZE
< 8192) &&
2078 hnae_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
2080 desc
= &ring
->desc
[ring
->next_to_clean
];
2081 size
= le16_to_cpu(desc
->rx
.size
);
2083 truesize
= hnae_buf_size(ring
);
2086 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
2088 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
2089 size
- pull_len
, truesize
);
2091 /* Avoid re-using remote pages,flag default unreuse */
2092 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
2096 /* If we are only owner of page we can reuse it */
2097 if (likely(page_count(desc_cb
->priv
) == 1)) {
2098 /* Flip page offset to other buffer */
2099 desc_cb
->page_offset
^= truesize
;
2101 desc_cb
->reuse_flag
= 1;
2102 /* bump ref count on page before it is given*/
2103 get_page(desc_cb
->priv
);
2108 /* Move offset up to the next cache line */
2109 desc_cb
->page_offset
+= truesize
;
2111 if (desc_cb
->page_offset
<= last_offset
) {
2112 desc_cb
->reuse_flag
= 1;
2113 /* Bump ref count on page before it is given*/
2114 get_page(desc_cb
->priv
);
2118 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2119 struct hns3_desc
*desc
)
2121 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2122 int l3_type
, l4_type
;
2127 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2128 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2130 skb
->ip_summed
= CHECKSUM_NONE
;
2132 skb_checksum_none_assert(skb
);
2134 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2137 /* check if hardware has done checksum */
2138 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2141 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2142 hnae_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2143 hnae_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2144 hnae_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2145 netdev_err(netdev
, "L3/L4 error pkt\n");
2146 u64_stats_update_begin(&ring
->syncp
);
2147 ring
->stats
.l3l4_csum_err
++;
2148 u64_stats_update_end(&ring
->syncp
);
2153 l3_type
= hnae_get_field(l234info
, HNS3_RXD_L3ID_M
,
2155 l4_type
= hnae_get_field(l234info
, HNS3_RXD_L4ID_M
,
2158 ol4_type
= hnae_get_field(l234info
, HNS3_RXD_OL4ID_M
, HNS3_RXD_OL4ID_S
);
2160 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2161 case HNS3_OL4_TYPE_NVGRE
:
2162 skb
->csum_level
= 1;
2163 case HNS3_OL4_TYPE_NO_TUN
:
2164 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2165 if (l3_type
== HNS3_L3_TYPE_IPV4
||
2166 (l3_type
== HNS3_L3_TYPE_IPV6
&&
2167 (l4_type
== HNS3_L4_TYPE_UDP
||
2168 l4_type
== HNS3_L4_TYPE_TCP
||
2169 l4_type
== HNS3_L4_TYPE_SCTP
)))
2170 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2175 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2177 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2180 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2181 struct sk_buff
**out_skb
, int *out_bnum
)
2183 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2184 struct hns3_desc_cb
*desc_cb
;
2185 struct hns3_desc
*desc
;
2186 struct sk_buff
*skb
;
2194 desc
= &ring
->desc
[ring
->next_to_clean
];
2195 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2199 length
= le16_to_cpu(desc
->rx
.pkt_len
);
2200 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2201 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2203 /* Check valid BD */
2204 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
2207 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2209 /* Prefetch first cache line of first page
2210 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2211 * line size is 64B so need to prefetch twice to make it 128B. But in
2212 * actual we can have greater size of caches with 128B Level 1 cache
2213 * lines. In such a case, single fetch would suffice to cache in the
2214 * relevant part of the header.
2217 #if L1_CACHE_BYTES < 128
2218 prefetch(va
+ L1_CACHE_BYTES
);
2221 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2223 if (unlikely(!skb
)) {
2224 netdev_err(netdev
, "alloc rx skb fail\n");
2226 u64_stats_update_begin(&ring
->syncp
);
2227 ring
->stats
.sw_err_cnt
++;
2228 u64_stats_update_end(&ring
->syncp
);
2233 prefetchw(skb
->data
);
2235 /* Based on hw strategy, the tag offloaded will be stored at
2236 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2237 * in one layer tag case.
2239 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2242 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2243 if (!(vlan_tag
& VLAN_VID_MASK
))
2244 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2245 if (vlan_tag
& VLAN_VID_MASK
)
2246 __vlan_hwaccel_put_tag(skb
,
2252 if (length
<= HNS3_RX_HEAD_SIZE
) {
2253 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2255 /* We can reuse buffer as-is, just make sure it is local */
2256 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2257 desc_cb
->reuse_flag
= 1;
2258 else /* This page cannot be reused so discard it */
2259 put_page(desc_cb
->priv
);
2261 ring_ptr_move_fw(ring
, next_to_clean
);
2263 u64_stats_update_begin(&ring
->syncp
);
2264 ring
->stats
.seg_pkt_cnt
++;
2265 u64_stats_update_end(&ring
->syncp
);
2267 pull_len
= hns3_nic_get_headlen(va
, l234info
,
2269 memcpy(__skb_put(skb
, pull_len
), va
,
2270 ALIGN(pull_len
, sizeof(long)));
2272 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2273 ring_ptr_move_fw(ring
, next_to_clean
);
2275 while (!hnae_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2276 desc
= &ring
->desc
[ring
->next_to_clean
];
2277 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2278 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2279 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2280 ring_ptr_move_fw(ring
, next_to_clean
);
2287 if (unlikely(!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2288 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2289 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2290 u64_stats_update_begin(&ring
->syncp
);
2291 ring
->stats
.non_vld_descs
++;
2292 u64_stats_update_end(&ring
->syncp
);
2294 dev_kfree_skb_any(skb
);
2298 if (unlikely((!desc
->rx
.pkt_len
) ||
2299 hnae_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2300 netdev_err(netdev
, "truncated pkt\n");
2301 u64_stats_update_begin(&ring
->syncp
);
2302 ring
->stats
.err_pkt_len
++;
2303 u64_stats_update_end(&ring
->syncp
);
2305 dev_kfree_skb_any(skb
);
2309 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2310 netdev_err(netdev
, "L2 error pkt\n");
2311 u64_stats_update_begin(&ring
->syncp
);
2312 ring
->stats
.l2_err
++;
2313 u64_stats_update_end(&ring
->syncp
);
2315 dev_kfree_skb_any(skb
);
2319 u64_stats_update_begin(&ring
->syncp
);
2320 ring
->stats
.rx_pkts
++;
2321 ring
->stats
.rx_bytes
+= skb
->len
;
2322 u64_stats_update_end(&ring
->syncp
);
2324 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2326 hns3_rx_checksum(ring
, skb
, desc
);
2330 int hns3_clean_rx_ring(
2331 struct hns3_enet_ring
*ring
, int budget
,
2332 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2334 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2335 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2336 int recv_pkts
, recv_bds
, clean_count
, err
;
2337 int unused_count
= hns3_desc_unused(ring
);
2338 struct sk_buff
*skb
= NULL
;
2341 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2342 rmb(); /* Make sure num taken effect before the other data is touched */
2344 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2345 num
-= unused_count
;
2347 while (recv_pkts
< budget
&& recv_bds
< num
) {
2348 /* Reuse or realloc buffers */
2349 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2350 hns3_nic_alloc_rx_buffers(ring
,
2351 clean_count
+ unused_count
);
2353 unused_count
= hns3_desc_unused(ring
);
2357 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2358 if (unlikely(!skb
)) /* This fault cannot be repaired */
2362 clean_count
+= bnum
;
2363 if (unlikely(err
)) { /* Do jump the err */
2368 /* Do update ip stack process */
2369 skb
->protocol
= eth_type_trans(skb
, netdev
);
2376 /* Make all data has been write before submit */
2377 if (clean_count
+ unused_count
> 0)
2378 hns3_nic_alloc_rx_buffers(ring
,
2379 clean_count
+ unused_count
);
2384 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2386 struct hns3_enet_tqp_vector
*tqp_vector
=
2387 ring_group
->ring
->tqp_vector
;
2388 enum hns3_flow_level_range new_flow_level
;
2389 int packets_per_msecs
;
2390 int bytes_per_msecs
;
2394 if (!ring_group
->coal
.int_gl
|| !tqp_vector
->last_jiffies
)
2397 if (ring_group
->total_packets
== 0) {
2398 ring_group
->coal
.int_gl
= HNS3_INT_GL_50K
;
2399 ring_group
->coal
.flow_level
= HNS3_FLOW_LOW
;
2403 /* Simple throttlerate management
2404 * 0-10MB/s lower (50000 ints/s)
2405 * 10-20MB/s middle (20000 ints/s)
2406 * 20-1249MB/s high (18000 ints/s)
2407 * > 40000pps ultra (8000 ints/s)
2409 new_flow_level
= ring_group
->coal
.flow_level
;
2410 new_int_gl
= ring_group
->coal
.int_gl
;
2412 jiffies_to_msecs(jiffies
- tqp_vector
->last_jiffies
);
2414 if (!time_passed_ms
)
2417 do_div(ring_group
->total_packets
, time_passed_ms
);
2418 packets_per_msecs
= ring_group
->total_packets
;
2420 do_div(ring_group
->total_bytes
, time_passed_ms
);
2421 bytes_per_msecs
= ring_group
->total_bytes
;
2423 #define HNS3_RX_LOW_BYTE_RATE 10000
2424 #define HNS3_RX_MID_BYTE_RATE 20000
2426 switch (new_flow_level
) {
2428 if (bytes_per_msecs
> HNS3_RX_LOW_BYTE_RATE
)
2429 new_flow_level
= HNS3_FLOW_MID
;
2432 if (bytes_per_msecs
> HNS3_RX_MID_BYTE_RATE
)
2433 new_flow_level
= HNS3_FLOW_HIGH
;
2434 else if (bytes_per_msecs
<= HNS3_RX_LOW_BYTE_RATE
)
2435 new_flow_level
= HNS3_FLOW_LOW
;
2437 case HNS3_FLOW_HIGH
:
2438 case HNS3_FLOW_ULTRA
:
2440 if (bytes_per_msecs
<= HNS3_RX_MID_BYTE_RATE
)
2441 new_flow_level
= HNS3_FLOW_MID
;
2445 #define HNS3_RX_ULTRA_PACKET_RATE 40
2447 if (packets_per_msecs
> HNS3_RX_ULTRA_PACKET_RATE
&&
2448 &tqp_vector
->rx_group
== ring_group
)
2449 new_flow_level
= HNS3_FLOW_ULTRA
;
2451 switch (new_flow_level
) {
2453 new_int_gl
= HNS3_INT_GL_50K
;
2456 new_int_gl
= HNS3_INT_GL_20K
;
2458 case HNS3_FLOW_HIGH
:
2459 new_int_gl
= HNS3_INT_GL_18K
;
2461 case HNS3_FLOW_ULTRA
:
2462 new_int_gl
= HNS3_INT_GL_8K
;
2468 ring_group
->total_bytes
= 0;
2469 ring_group
->total_packets
= 0;
2470 ring_group
->coal
.flow_level
= new_flow_level
;
2471 if (new_int_gl
!= ring_group
->coal
.int_gl
) {
2472 ring_group
->coal
.int_gl
= new_int_gl
;
2478 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2480 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
2481 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
2482 bool rx_update
, tx_update
;
2484 if (tqp_vector
->int_adapt_down
> 0) {
2485 tqp_vector
->int_adapt_down
--;
2489 if (rx_group
->coal
.gl_adapt_enable
) {
2490 rx_update
= hns3_get_new_int_gl(rx_group
);
2492 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
2493 rx_group
->coal
.int_gl
);
2496 if (tx_group
->coal
.gl_adapt_enable
) {
2497 tx_update
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2499 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
2500 tx_group
->coal
.int_gl
);
2503 tqp_vector
->last_jiffies
= jiffies
;
2504 tqp_vector
->int_adapt_down
= HNS3_INT_ADAPT_DOWN_START
;
2507 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2509 struct hns3_enet_ring
*ring
;
2510 int rx_pkt_total
= 0;
2512 struct hns3_enet_tqp_vector
*tqp_vector
=
2513 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2514 bool clean_complete
= true;
2517 /* Since the actual Tx work is minimal, we can give the Tx a larger
2518 * budget and be more aggressive about cleaning up the Tx descriptors.
2520 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2521 if (!hns3_clean_tx_ring(ring
, budget
))
2522 clean_complete
= false;
2525 /* make sure rx ring budget not smaller than 1 */
2526 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2528 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2529 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2532 if (rx_cleaned
>= rx_budget
)
2533 clean_complete
= false;
2535 rx_pkt_total
+= rx_cleaned
;
2538 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2540 if (!clean_complete
)
2543 napi_complete(napi
);
2544 hns3_update_new_int_gl(tqp_vector
);
2545 hns3_mask_vector_irq(tqp_vector
, 1);
2547 return rx_pkt_total
;
2550 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2551 struct hnae3_ring_chain_node
*head
)
2553 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2554 struct hnae3_ring_chain_node
*cur_chain
= head
;
2555 struct hnae3_ring_chain_node
*chain
;
2556 struct hns3_enet_ring
*tx_ring
;
2557 struct hns3_enet_ring
*rx_ring
;
2559 tx_ring
= tqp_vector
->tx_group
.ring
;
2561 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2562 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2563 HNAE3_RING_TYPE_TX
);
2564 hnae_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2565 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_TX
);
2567 cur_chain
->next
= NULL
;
2569 while (tx_ring
->next
) {
2570 tx_ring
= tx_ring
->next
;
2572 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2577 cur_chain
->next
= chain
;
2578 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2579 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2580 HNAE3_RING_TYPE_TX
);
2581 hnae_set_field(chain
->int_gl_idx
,
2582 HNAE3_RING_GL_IDX_M
,
2583 HNAE3_RING_GL_IDX_S
,
2590 rx_ring
= tqp_vector
->rx_group
.ring
;
2591 if (!tx_ring
&& rx_ring
) {
2592 cur_chain
->next
= NULL
;
2593 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2594 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2595 HNAE3_RING_TYPE_RX
);
2596 hnae_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2597 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2599 rx_ring
= rx_ring
->next
;
2603 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2607 cur_chain
->next
= chain
;
2608 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2609 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2610 HNAE3_RING_TYPE_RX
);
2611 hnae_set_field(chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2612 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2616 rx_ring
= rx_ring
->next
;
2622 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2623 struct hnae3_ring_chain_node
*head
)
2625 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2626 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2631 chain_tmp
= chain
->next
;
2632 devm_kfree(&pdev
->dev
, chain
);
2637 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2638 struct hns3_enet_ring
*ring
)
2640 ring
->next
= group
->ring
;
2646 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2648 struct hnae3_ring_chain_node vector_ring_chain
;
2649 struct hnae3_handle
*h
= priv
->ae_handle
;
2650 struct hns3_enet_tqp_vector
*tqp_vector
;
2654 for (i
= 0; i
< priv
->vector_num
; i
++) {
2655 tqp_vector
= &priv
->tqp_vector
[i
];
2656 hns3_vector_gl_rl_init_hw(tqp_vector
, priv
);
2657 tqp_vector
->num_tqps
= 0;
2660 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2661 u16 vector_i
= i
% priv
->vector_num
;
2662 u16 tqp_num
= h
->kinfo
.num_tqps
;
2664 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2666 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2667 priv
->ring_data
[i
].ring
);
2669 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2670 priv
->ring_data
[i
+ tqp_num
].ring
);
2672 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2673 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2674 tqp_vector
->num_tqps
++;
2677 for (i
= 0; i
< priv
->vector_num
; i
++) {
2678 tqp_vector
= &priv
->tqp_vector
[i
];
2680 tqp_vector
->rx_group
.total_bytes
= 0;
2681 tqp_vector
->rx_group
.total_packets
= 0;
2682 tqp_vector
->tx_group
.total_bytes
= 0;
2683 tqp_vector
->tx_group
.total_packets
= 0;
2684 tqp_vector
->handle
= h
;
2686 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2687 &vector_ring_chain
);
2691 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2692 tqp_vector
->vector_irq
, &vector_ring_chain
);
2694 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2699 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2700 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2706 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv
*priv
)
2708 struct hnae3_handle
*h
= priv
->ae_handle
;
2709 struct hns3_enet_tqp_vector
*tqp_vector
;
2710 struct hnae3_vector_info
*vector
;
2711 struct pci_dev
*pdev
= h
->pdev
;
2712 u16 tqp_num
= h
->kinfo
.num_tqps
;
2717 /* RSS size, cpu online and vector_num should be the same */
2718 /* Should consider 2p/4p later */
2719 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2720 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2725 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2727 priv
->vector_num
= vector_num
;
2728 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2729 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2731 if (!priv
->tqp_vector
) {
2736 for (i
= 0; i
< priv
->vector_num
; i
++) {
2737 tqp_vector
= &priv
->tqp_vector
[i
];
2738 tqp_vector
->idx
= i
;
2739 tqp_vector
->mask_addr
= vector
[i
].io_addr
;
2740 tqp_vector
->vector_irq
= vector
[i
].vector
;
2741 hns3_vector_gl_rl_init(tqp_vector
, priv
);
2745 devm_kfree(&pdev
->dev
, vector
);
2749 static void hns3_clear_ring_group(struct hns3_enet_ring_group
*group
)
2755 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2757 struct hnae3_ring_chain_node vector_ring_chain
;
2758 struct hnae3_handle
*h
= priv
->ae_handle
;
2759 struct hns3_enet_tqp_vector
*tqp_vector
;
2762 for (i
= 0; i
< priv
->vector_num
; i
++) {
2763 tqp_vector
= &priv
->tqp_vector
[i
];
2765 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2766 &vector_ring_chain
);
2770 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2771 tqp_vector
->vector_irq
, &vector_ring_chain
);
2775 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
2779 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2781 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2782 (void)irq_set_affinity_hint(
2783 priv
->tqp_vector
[i
].vector_irq
,
2785 free_irq(priv
->tqp_vector
[i
].vector_irq
,
2786 &priv
->tqp_vector
[i
]);
2789 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2790 hns3_clear_ring_group(&tqp_vector
->rx_group
);
2791 hns3_clear_ring_group(&tqp_vector
->tx_group
);
2792 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2798 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv
*priv
)
2800 struct hnae3_handle
*h
= priv
->ae_handle
;
2801 struct pci_dev
*pdev
= h
->pdev
;
2804 for (i
= 0; i
< priv
->vector_num
; i
++) {
2805 struct hns3_enet_tqp_vector
*tqp_vector
;
2807 tqp_vector
= &priv
->tqp_vector
[i
];
2808 ret
= h
->ae_algo
->ops
->put_vector(h
, tqp_vector
->vector_irq
);
2813 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2817 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2820 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2821 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2822 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2823 struct hns3_enet_ring
*ring
;
2825 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2829 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2830 ring_data
[q
->tqp_index
].ring
= ring
;
2831 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2832 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2834 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2835 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
2836 ring
->io_base
= q
->io_base
;
2839 hnae_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2843 ring
->desc_cb
= NULL
;
2844 ring
->dev
= priv
->dev
;
2845 ring
->desc_dma_addr
= 0;
2846 ring
->buf_size
= q
->buf_size
;
2847 ring
->desc_num
= q
->desc_num
;
2848 ring
->next_to_use
= 0;
2849 ring
->next_to_clean
= 0;
2854 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2855 struct hns3_nic_priv
*priv
)
2859 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2863 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2870 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2872 struct hnae3_handle
*h
= priv
->ae_handle
;
2873 struct pci_dev
*pdev
= h
->pdev
;
2876 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2877 sizeof(*priv
->ring_data
) * 2,
2879 if (!priv
->ring_data
)
2882 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2883 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2890 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2894 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
2896 struct hnae3_handle
*h
= priv
->ae_handle
;
2899 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2900 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2901 devm_kfree(priv
->dev
,
2902 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2904 devm_kfree(priv
->dev
, priv
->ring_data
);
2907 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2911 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2914 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2916 if (!ring
->desc_cb
) {
2921 ret
= hns3_alloc_desc(ring
);
2923 goto out_with_desc_cb
;
2925 if (!HNAE3_IS_TX_RING(ring
)) {
2926 ret
= hns3_alloc_ring_buffers(ring
);
2934 hns3_free_desc(ring
);
2936 kfree(ring
->desc_cb
);
2937 ring
->desc_cb
= NULL
;
2942 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2944 hns3_free_desc(ring
);
2945 kfree(ring
->desc_cb
);
2946 ring
->desc_cb
= NULL
;
2947 ring
->next_to_clean
= 0;
2948 ring
->next_to_use
= 0;
2951 static int hns3_buf_size2type(u32 buf_size
)
2957 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2960 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2963 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2966 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2969 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2972 return bd_size_type
;
2975 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2977 dma_addr_t dma
= ring
->desc_dma_addr
;
2978 struct hnae3_queue
*q
= ring
->tqp
;
2980 if (!HNAE3_IS_TX_RING(ring
)) {
2981 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2983 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2984 (u32
)((dma
>> 31) >> 1));
2986 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2987 hns3_buf_size2type(ring
->buf_size
));
2988 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2989 ring
->desc_num
/ 8 - 1);
2992 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2994 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2995 (u32
)((dma
>> 31) >> 1));
2997 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2998 hns3_buf_size2type(ring
->buf_size
));
2999 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
3000 ring
->desc_num
/ 8 - 1);
3004 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
3006 struct hnae3_handle
*h
= priv
->ae_handle
;
3007 int ring_num
= h
->kinfo
.num_tqps
* 2;
3011 for (i
= 0; i
< ring_num
; i
++) {
3012 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
3015 "Alloc ring memory fail! ret=%d\n", ret
);
3016 goto out_when_alloc_ring_memory
;
3019 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
3021 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
3026 out_when_alloc_ring_memory
:
3027 for (j
= i
- 1; j
>= 0; j
--)
3028 hns3_fini_ring(priv
->ring_data
[j
].ring
);
3033 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
3035 struct hnae3_handle
*h
= priv
->ae_handle
;
3038 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3039 if (h
->ae_algo
->ops
->reset_queue
)
3040 h
->ae_algo
->ops
->reset_queue(h
, i
);
3042 hns3_fini_ring(priv
->ring_data
[i
].ring
);
3043 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
3048 /* Set mac addr if it is configured. or leave it to the AE driver */
3049 static void hns3_init_mac_addr(struct net_device
*netdev
)
3051 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3052 struct hnae3_handle
*h
= priv
->ae_handle
;
3053 u8 mac_addr_temp
[ETH_ALEN
];
3055 if (h
->ae_algo
->ops
->get_mac_addr
) {
3056 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
3057 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
3060 /* Check if the MAC address is valid, if not get a random one */
3061 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3062 eth_hw_addr_random(netdev
);
3063 dev_warn(priv
->dev
, "using random MAC address %pM\n",
3067 if (h
->ae_algo
->ops
->set_mac_addr
)
3068 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
, true);
3072 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
3074 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3076 if ((netdev
->features
& NETIF_F_TSO
) ||
3077 (netdev
->features
& NETIF_F_TSO6
)) {
3078 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
3079 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
3081 priv
->ops
.fill_desc
= hns3_fill_desc
;
3082 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
3086 static int hns3_client_init(struct hnae3_handle
*handle
)
3088 struct pci_dev
*pdev
= handle
->pdev
;
3089 struct hns3_nic_priv
*priv
;
3090 struct net_device
*netdev
;
3093 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
3094 hns3_get_max_available_channels(handle
));
3098 priv
= netdev_priv(netdev
);
3099 priv
->dev
= &pdev
->dev
;
3100 priv
->netdev
= netdev
;
3101 priv
->ae_handle
= handle
;
3102 priv
->ae_handle
->reset_level
= HNAE3_NONE_RESET
;
3103 priv
->ae_handle
->last_reset_time
= jiffies
;
3104 priv
->tx_timeout_count
= 0;
3106 handle
->kinfo
.netdev
= netdev
;
3107 handle
->priv
= (void *)priv
;
3109 hns3_init_mac_addr(netdev
);
3111 hns3_set_default_feature(netdev
);
3113 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
3114 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3115 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3116 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3117 hns3_ethtool_set_ops(netdev
);
3118 hns3_nic_set_priv_ops(netdev
);
3120 /* Carrier off reporting is important to ethtool even BEFORE open */
3121 netif_carrier_off(netdev
);
3123 ret
= hns3_get_ring_config(priv
);
3126 goto out_get_ring_cfg
;
3129 ret
= hns3_nic_alloc_vector_data(priv
);
3132 goto out_alloc_vector_data
;
3135 ret
= hns3_nic_init_vector_data(priv
);
3138 goto out_init_vector_data
;
3141 ret
= hns3_init_all_ring(priv
);
3144 goto out_init_ring_data
;
3147 ret
= register_netdev(netdev
);
3149 dev_err(priv
->dev
, "probe register netdev fail!\n");
3150 goto out_reg_netdev_fail
;
3153 hns3_dcbnl_setup(handle
);
3155 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3156 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
3160 out_reg_netdev_fail
:
3162 (void)hns3_nic_uninit_vector_data(priv
);
3163 out_init_vector_data
:
3164 hns3_nic_dealloc_vector_data(priv
);
3165 out_alloc_vector_data
:
3166 priv
->ring_data
= NULL
;
3168 priv
->ae_handle
= NULL
;
3169 free_netdev(netdev
);
3173 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3175 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3176 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3179 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3180 unregister_netdev(netdev
);
3182 ret
= hns3_nic_uninit_vector_data(priv
);
3184 netdev_err(netdev
, "uninit vector error\n");
3186 ret
= hns3_nic_dealloc_vector_data(priv
);
3188 netdev_err(netdev
, "dealloc vector error\n");
3190 ret
= hns3_uninit_all_ring(priv
);
3192 netdev_err(netdev
, "uninit ring error\n");
3194 hns3_put_ring_config(priv
);
3196 priv
->ring_data
= NULL
;
3198 free_netdev(netdev
);
3201 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3203 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3209 netif_carrier_on(netdev
);
3210 netif_tx_wake_all_queues(netdev
);
3211 netdev_info(netdev
, "link up\n");
3213 netif_carrier_off(netdev
);
3214 netif_tx_stop_all_queues(netdev
);
3215 netdev_info(netdev
, "link down\n");
3219 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3221 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3222 struct net_device
*ndev
= kinfo
->netdev
;
3227 if (tc
> HNAE3_MAX_TC
)
3233 if_running
= netif_running(ndev
);
3235 ret
= netdev_set_num_tc(ndev
, tc
);
3240 (void)hns3_nic_net_stop(ndev
);
3244 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3245 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3250 netdev_reset_tc(ndev
);
3254 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
3255 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
3257 if (tc_info
->enable
)
3258 netdev_set_tc_queue(ndev
,
3261 tc_info
->tqp_offset
);
3264 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
3265 netdev_set_prio_tc_map(ndev
, i
,
3270 ret
= hns3_nic_set_real_num_queue(ndev
);
3274 (void)hns3_nic_net_open(ndev
);
3279 static void hns3_recover_hw_addr(struct net_device
*ndev
)
3281 struct netdev_hw_addr_list
*list
;
3282 struct netdev_hw_addr
*ha
, *tmp
;
3284 /* go through and sync uc_addr entries to the device */
3286 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3287 hns3_nic_uc_sync(ndev
, ha
->addr
);
3289 /* go through and sync mc_addr entries to the device */
3291 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3292 hns3_nic_mc_sync(ndev
, ha
->addr
);
3295 static void hns3_drop_skb_data(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
3297 dev_kfree_skb_any(skb
);
3300 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3302 struct net_device
*ndev
= h
->kinfo
.netdev
;
3303 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3306 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3307 struct netdev_queue
*dev_queue
;
3308 struct hns3_enet_ring
*ring
;
3310 ring
= priv
->ring_data
[i
].ring
;
3311 hns3_clean_tx_ring(ring
, ring
->desc_num
);
3312 dev_queue
= netdev_get_tx_queue(ndev
,
3313 priv
->ring_data
[i
].queue_index
);
3314 netdev_tx_reset_queue(dev_queue
);
3316 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3317 hns3_clean_rx_ring(ring
, ring
->desc_num
, hns3_drop_skb_data
);
3321 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3323 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3324 struct net_device
*ndev
= kinfo
->netdev
;
3326 if (!netif_running(ndev
))
3329 return hns3_nic_net_stop(ndev
);
3332 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3334 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3337 if (netif_running(kinfo
->netdev
)) {
3338 ret
= hns3_nic_net_up(kinfo
->netdev
);
3340 netdev_err(kinfo
->netdev
,
3341 "hns net up fail, ret=%d!\n", ret
);
3344 handle
->last_reset_time
= jiffies
;
3350 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3352 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3353 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3356 hns3_init_mac_addr(netdev
);
3357 hns3_nic_set_rx_mode(netdev
);
3358 hns3_recover_hw_addr(netdev
);
3360 /* Hardware table is only clear when pf resets */
3361 if (!(handle
->flags
& HNAE3_SUPPORT_VF
))
3362 hns3_restore_vlan(netdev
);
3364 /* Carrier off reporting is important to ethtool even BEFORE open */
3365 netif_carrier_off(netdev
);
3367 ret
= hns3_get_ring_config(priv
);
3371 ret
= hns3_nic_init_vector_data(priv
);
3375 ret
= hns3_init_all_ring(priv
);
3377 hns3_nic_uninit_vector_data(priv
);
3378 priv
->ring_data
= NULL
;
3384 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
3386 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3387 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3390 hns3_clear_all_ring(handle
);
3392 ret
= hns3_nic_uninit_vector_data(priv
);
3394 netdev_err(netdev
, "uninit vector error\n");
3398 ret
= hns3_uninit_all_ring(priv
);
3400 netdev_err(netdev
, "uninit ring error\n");
3402 hns3_put_ring_config(priv
);
3404 priv
->ring_data
= NULL
;
3409 static int hns3_reset_notify(struct hnae3_handle
*handle
,
3410 enum hnae3_reset_notify_type type
)
3415 case HNAE3_UP_CLIENT
:
3416 ret
= hns3_reset_notify_up_enet(handle
);
3418 case HNAE3_DOWN_CLIENT
:
3419 ret
= hns3_reset_notify_down_enet(handle
);
3421 case HNAE3_INIT_CLIENT
:
3422 ret
= hns3_reset_notify_init_enet(handle
);
3424 case HNAE3_UNINIT_CLIENT
:
3425 ret
= hns3_reset_notify_uninit_enet(handle
);
3434 static void hns3_restore_coal(struct hns3_nic_priv
*priv
,
3435 struct hns3_enet_coalesce
*tx
,
3436 struct hns3_enet_coalesce
*rx
)
3438 u16 vector_num
= priv
->vector_num
;
3441 for (i
= 0; i
< vector_num
; i
++) {
3442 memcpy(&priv
->tqp_vector
[i
].tx_group
.coal
, tx
,
3443 sizeof(struct hns3_enet_coalesce
));
3444 memcpy(&priv
->tqp_vector
[i
].rx_group
.coal
, rx
,
3445 sizeof(struct hns3_enet_coalesce
));
3449 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
,
3450 struct hns3_enet_coalesce
*tx
,
3451 struct hns3_enet_coalesce
*rx
)
3453 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3454 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3457 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
3461 ret
= hns3_get_ring_config(priv
);
3465 ret
= hns3_nic_alloc_vector_data(priv
);
3467 goto err_alloc_vector
;
3469 hns3_restore_coal(priv
, tx
, rx
);
3471 ret
= hns3_nic_init_vector_data(priv
);
3473 goto err_uninit_vector
;
3475 ret
= hns3_init_all_ring(priv
);
3482 hns3_put_ring_config(priv
);
3484 hns3_nic_uninit_vector_data(priv
);
3486 hns3_nic_dealloc_vector_data(priv
);
3490 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
3492 return (new_tqp_num
/ num_tc
) * num_tc
;
3495 int hns3_set_channels(struct net_device
*netdev
,
3496 struct ethtool_channels
*ch
)
3498 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3499 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3500 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
3501 struct hns3_enet_coalesce tx_coal
, rx_coal
;
3502 bool if_running
= netif_running(netdev
);
3503 u32 new_tqp_num
= ch
->combined_count
;
3507 if (ch
->rx_count
|| ch
->tx_count
)
3510 if (new_tqp_num
> hns3_get_max_available_channels(h
) ||
3511 new_tqp_num
< kinfo
->num_tc
) {
3512 dev_err(&netdev
->dev
,
3513 "Change tqps fail, the tqp range is from %d to %d",
3515 hns3_get_max_available_channels(h
));
3519 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
3520 if (kinfo
->num_tqps
== new_tqp_num
)
3524 hns3_nic_net_stop(netdev
);
3526 hns3_clear_all_ring(h
);
3528 ret
= hns3_nic_uninit_vector_data(priv
);
3530 dev_err(&netdev
->dev
,
3531 "Unbind vector with tqp fail, nothing is changed");
3535 /* Changing the tqp num may also change the vector num,
3536 * ethtool only support setting and querying one coal
3537 * configuation for now, so save the vector 0' coal
3538 * configuation here in order to restore it.
3540 memcpy(&tx_coal
, &priv
->tqp_vector
[0].tx_group
.coal
,
3541 sizeof(struct hns3_enet_coalesce
));
3542 memcpy(&rx_coal
, &priv
->tqp_vector
[0].rx_group
.coal
,
3543 sizeof(struct hns3_enet_coalesce
));
3545 hns3_nic_dealloc_vector_data(priv
);
3547 hns3_uninit_all_ring(priv
);
3548 hns3_put_ring_config(priv
);
3550 org_tqp_num
= h
->kinfo
.num_tqps
;
3551 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
, &tx_coal
, &rx_coal
);
3553 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
,
3554 &tx_coal
, &rx_coal
);
3556 /* If revert to old tqp failed, fatal error occurred */
3557 dev_err(&netdev
->dev
,
3558 "Revert to old tqp num fail, ret=%d", ret
);
3561 dev_info(&netdev
->dev
,
3562 "Change tqp num fail, Revert to old tqp num");
3567 hns3_nic_net_open(netdev
);
3572 static const struct hnae3_client_ops client_ops
= {
3573 .init_instance
= hns3_client_init
,
3574 .uninit_instance
= hns3_client_uninit
,
3575 .link_status_change
= hns3_link_status_change
,
3576 .setup_tc
= hns3_client_setup_tc
,
3577 .reset_notify
= hns3_reset_notify
,
3580 /* hns3_init_module - Driver registration routine
3581 * hns3_init_module is the first routine called when the driver is
3582 * loaded. All it does is register with the PCI subsystem.
3584 static int __init
hns3_init_module(void)
3588 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
3589 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
3591 client
.type
= HNAE3_CLIENT_KNIC
;
3592 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
3595 client
.ops
= &client_ops
;
3597 ret
= hnae3_register_client(&client
);
3601 ret
= pci_register_driver(&hns3_driver
);
3603 hnae3_unregister_client(&client
);
3607 module_init(hns3_init_module
);
3609 /* hns3_exit_module - Driver exit cleanup routine
3610 * hns3_exit_module is called just before the driver is removed
3613 static void __exit
hns3_exit_module(void)
3615 pci_unregister_driver(&hns3_driver
);
3616 hnae3_unregister_client(&client
);
3618 module_exit(hns3_exit_module
);
3620 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3621 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3622 MODULE_LICENSE("GPL");
3623 MODULE_ALIAS("pci:hns-nic");