]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
net: hns3: report the function type the same line with hns3_nic_get_stats64
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
CommitLineData
76ad4f0e
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
30d240df 22#include <net/pkt_cls.h>
76ad4f0e
S
23#include <net/vxlan.h>
24
25#include "hnae3.h"
26#include "hns3_enet.h"
27
1db9b1bf 28static const char hns3_driver_name[] = "hns3";
76ad4f0e
S
29const char hns3_driver_version[] = VERMAGIC_STRING;
30static const char hns3_driver_string[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
33static struct hnae3_client client;
34
35/* hns3_pci_tbl - PCI Device ID Table
36 *
37 * Last entry must be all 0s
38 *
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
41 */
42static const struct pci_device_id hns3_pci_tbl[] = {
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
e92a0843 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
2daf4a65 46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
2daf4a65 48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
2daf4a65 50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
2daf4a65 52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
2daf4a65 54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
a9c89a3f
SM
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
76ad4f0e
S
57 /* required last entry */
58 {0, }
59};
60MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
61
62static irqreturn_t hns3_irq_handle(int irq, void *dev)
63{
64 struct hns3_enet_tqp_vector *tqp_vector = dev;
65
66 napi_schedule(&tqp_vector->napi);
67
68 return IRQ_HANDLED;
69}
70
71static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
72{
73 struct hns3_enet_tqp_vector *tqp_vectors;
74 unsigned int i;
75
76 for (i = 0; i < priv->vector_num; i++) {
77 tqp_vectors = &priv->tqp_vector[i];
78
79 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
80 continue;
81
82 /* release the irq resource */
83 free_irq(tqp_vectors->vector_irq, tqp_vectors);
84 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
85 }
86}
87
88static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
89{
90 struct hns3_enet_tqp_vector *tqp_vectors;
91 int txrx_int_idx = 0;
92 int rx_int_idx = 0;
93 int tx_int_idx = 0;
94 unsigned int i;
95 int ret;
96
97 for (i = 0; i < priv->vector_num; i++) {
98 tqp_vectors = &priv->tqp_vector[i];
99
100 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
101 continue;
102
103 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
104 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
105 "%s-%s-%d", priv->netdev->name, "TxRx",
106 txrx_int_idx++);
107 txrx_int_idx++;
108 } else if (tqp_vectors->rx_group.ring) {
109 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
110 "%s-%s-%d", priv->netdev->name, "Rx",
111 rx_int_idx++);
112 } else if (tqp_vectors->tx_group.ring) {
113 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
114 "%s-%s-%d", priv->netdev->name, "Tx",
115 tx_int_idx++);
116 } else {
117 /* Skip this unused q_vector */
118 continue;
119 }
120
121 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
122
123 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
124 tqp_vectors->name,
125 tqp_vectors);
126 if (ret) {
127 netdev_err(priv->netdev, "request irq(%d) fail\n",
128 tqp_vectors->vector_irq);
129 return ret;
130 }
131
132 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
133 }
134
135 return 0;
136}
137
138static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
139 u32 mask_en)
140{
141 writel(mask_en, tqp_vector->mask_addr);
142}
143
144static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
145{
146 napi_enable(&tqp_vector->napi);
147
148 /* enable vector */
149 hns3_mask_vector_irq(tqp_vector, 1);
150}
151
152static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
153{
154 /* disable vector */
155 hns3_mask_vector_irq(tqp_vector, 0);
156
157 disable_irq(tqp_vector->vector_irq);
158 napi_disable(&tqp_vector->napi);
159}
160
161static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
162 u32 gl_value)
163{
164 /* this defines the configuration for GL (Interrupt Gap Limiter)
165 * GL defines inter interrupt gap.
166 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
167 */
168 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
169 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
170 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
171}
172
173static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
174 u32 rl_value)
175{
176 /* this defines the configuration for RL (Interrupt Rate Limiter).
177 * Rl defines rate of interrupts i.e. number of interrupts-per-second
178 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
179 */
180 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
181}
182
183static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
184{
185 /* initialize the configuration for interrupt coalescing.
186 * 1. GL (Interrupt Gap Limiter)
187 * 2. RL (Interrupt Rate Limiter)
188 */
189
190 /* Default :enable interrupt coalesce */
191 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
192 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
193 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
194 /* for now we are disabling Interrupt RL - we
195 * will re-enable later
196 */
197 hns3_set_vector_coalesc_rl(tqp_vector, 0);
198 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
199 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
200}
201
9df8f79a
YL
202static int hns3_nic_set_real_num_queue(struct net_device *netdev)
203{
9780cb97 204 struct hnae3_handle *h = hns3_get_handle(netdev);
9df8f79a
YL
205 struct hnae3_knic_private_info *kinfo = &h->kinfo;
206 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
207 int ret;
208
209 ret = netif_set_real_num_tx_queues(netdev, queue_size);
210 if (ret) {
211 netdev_err(netdev,
212 "netif_set_real_num_tx_queues fail, ret=%d!\n",
213 ret);
214 return ret;
215 }
216
217 ret = netif_set_real_num_rx_queues(netdev, queue_size);
218 if (ret) {
219 netdev_err(netdev,
220 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
221 return ret;
222 }
223
224 return 0;
225}
226
76ad4f0e
S
227static int hns3_nic_net_up(struct net_device *netdev)
228{
229 struct hns3_nic_priv *priv = netdev_priv(netdev);
230 struct hnae3_handle *h = priv->ae_handle;
231 int i, j;
232 int ret;
233
234 /* get irq resource for all vectors */
235 ret = hns3_nic_init_irq(priv);
236 if (ret) {
237 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
238 return ret;
239 }
240
241 /* enable the vectors */
242 for (i = 0; i < priv->vector_num; i++)
243 hns3_vector_enable(&priv->tqp_vector[i]);
244
245 /* start the ae_dev */
246 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
247 if (ret)
248 goto out_start_err;
249
a3083abb
JS
250 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
251
76ad4f0e
S
252 return 0;
253
254out_start_err:
255 for (j = i - 1; j >= 0; j--)
256 hns3_vector_disable(&priv->tqp_vector[j]);
257
258 hns3_nic_uninit_irq(priv);
259
260 return ret;
261}
262
263static int hns3_nic_net_open(struct net_device *netdev)
264{
f8fa222c 265 struct hns3_nic_priv *priv = netdev_priv(netdev);
76ad4f0e
S
266 int ret;
267
268 netif_carrier_off(netdev);
269
9df8f79a
YL
270 ret = hns3_nic_set_real_num_queue(netdev);
271 if (ret)
76ad4f0e 272 return ret;
76ad4f0e
S
273
274 ret = hns3_nic_net_up(netdev);
275 if (ret) {
276 netdev_err(netdev,
277 "hns net up fail, ret=%d!\n", ret);
278 return ret;
279 }
280
f8fa222c 281 priv->last_reset_time = jiffies;
76ad4f0e
S
282 return 0;
283}
284
285static void hns3_nic_net_down(struct net_device *netdev)
286{
287 struct hns3_nic_priv *priv = netdev_priv(netdev);
288 const struct hnae3_ae_ops *ops;
289 int i;
290
a3083abb
JS
291 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
292 return;
293
76ad4f0e
S
294 /* stop ae_dev */
295 ops = priv->ae_handle->ae_algo->ops;
296 if (ops->stop)
297 ops->stop(priv->ae_handle);
298
299 /* disable vectors */
300 for (i = 0; i < priv->vector_num; i++)
301 hns3_vector_disable(&priv->tqp_vector[i]);
302
303 /* free irq resources */
304 hns3_nic_uninit_irq(priv);
305}
306
307static int hns3_nic_net_stop(struct net_device *netdev)
308{
309 netif_tx_stop_all_queues(netdev);
310 netif_carrier_off(netdev);
311
312 hns3_nic_net_down(netdev);
313
314 return 0;
315}
316
76ad4f0e
S
317static int hns3_nic_uc_sync(struct net_device *netdev,
318 const unsigned char *addr)
319{
9780cb97 320 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
321
322 if (h->ae_algo->ops->add_uc_addr)
323 return h->ae_algo->ops->add_uc_addr(h, addr);
324
325 return 0;
326}
327
328static int hns3_nic_uc_unsync(struct net_device *netdev,
329 const unsigned char *addr)
330{
9780cb97 331 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
332
333 if (h->ae_algo->ops->rm_uc_addr)
334 return h->ae_algo->ops->rm_uc_addr(h, addr);
335
336 return 0;
337}
338
339static int hns3_nic_mc_sync(struct net_device *netdev,
340 const unsigned char *addr)
341{
9780cb97 342 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 343
720a8478 344 if (h->ae_algo->ops->add_mc_addr)
76ad4f0e
S
345 return h->ae_algo->ops->add_mc_addr(h, addr);
346
347 return 0;
348}
349
350static int hns3_nic_mc_unsync(struct net_device *netdev,
351 const unsigned char *addr)
352{
9780cb97 353 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 354
720a8478 355 if (h->ae_algo->ops->rm_mc_addr)
76ad4f0e
S
356 return h->ae_algo->ops->rm_mc_addr(h, addr);
357
358 return 0;
359}
360
1db9b1bf 361static void hns3_nic_set_rx_mode(struct net_device *netdev)
76ad4f0e 362{
9780cb97 363 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
364
365 if (h->ae_algo->ops->set_promisc_mode) {
366 if (netdev->flags & IFF_PROMISC)
367 h->ae_algo->ops->set_promisc_mode(h, 1);
368 else
369 h->ae_algo->ops->set_promisc_mode(h, 0);
370 }
371 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
372 netdev_err(netdev, "sync uc address fail\n");
373 if (netdev->flags & IFF_MULTICAST)
374 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
375 netdev_err(netdev, "sync mc address fail\n");
376}
377
378static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
379 u16 *mss, u32 *type_cs_vlan_tso)
380{
381 u32 l4_offset, hdr_len;
382 union l3_hdr_info l3;
383 union l4_hdr_info l4;
384 u32 l4_paylen;
385 int ret;
386
387 if (!skb_is_gso(skb))
388 return 0;
389
390 ret = skb_cow_head(skb, 0);
391 if (ret)
392 return ret;
393
394 l3.hdr = skb_network_header(skb);
395 l4.hdr = skb_transport_header(skb);
396
397 /* Software should clear the IPv4's checksum field when tso is
398 * needed.
399 */
400 if (l3.v4->version == 4)
401 l3.v4->check = 0;
402
403 /* tunnel packet.*/
404 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
405 SKB_GSO_GRE_CSUM |
406 SKB_GSO_UDP_TUNNEL |
407 SKB_GSO_UDP_TUNNEL_CSUM)) {
408 if ((!(skb_shinfo(skb)->gso_type &
409 SKB_GSO_PARTIAL)) &&
410 (skb_shinfo(skb)->gso_type &
411 SKB_GSO_UDP_TUNNEL_CSUM)) {
412 /* Software should clear the udp's checksum
413 * field when tso is needed.
414 */
415 l4.udp->check = 0;
416 }
417 /* reset l3&l4 pointers from outer to inner headers */
418 l3.hdr = skb_inner_network_header(skb);
419 l4.hdr = skb_inner_transport_header(skb);
420
421 /* Software should clear the IPv4's checksum field when
422 * tso is needed.
423 */
424 if (l3.v4->version == 4)
425 l3.v4->check = 0;
426 }
427
428 /* normal or tunnel packet*/
429 l4_offset = l4.hdr - skb->data;
430 hdr_len = (l4.tcp->doff * 4) + l4_offset;
431
432 /* remove payload length from inner pseudo checksum when tso*/
433 l4_paylen = skb->len - l4_offset;
434 csum_replace_by_diff(&l4.tcp->check,
435 (__force __wsum)htonl(l4_paylen));
436
437 /* find the txbd field values */
438 *paylen = skb->len - hdr_len;
439 hnae_set_bit(*type_cs_vlan_tso,
440 HNS3_TXD_TSO_B, 1);
441
442 /* get MSS for TSO */
443 *mss = skb_shinfo(skb)->gso_size;
444
445 return 0;
446}
447
1898d4e4
S
448static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
449 u8 *il4_proto)
76ad4f0e
S
450{
451 union {
452 struct iphdr *v4;
453 struct ipv6hdr *v6;
454 unsigned char *hdr;
455 } l3;
456 unsigned char *l4_hdr;
457 unsigned char *exthdr;
458 u8 l4_proto_tmp;
459 __be16 frag_off;
460
461 /* find outer header point */
462 l3.hdr = skb_network_header(skb);
463 l4_hdr = skb_inner_transport_header(skb);
464
465 if (skb->protocol == htons(ETH_P_IPV6)) {
466 exthdr = l3.hdr + sizeof(*l3.v6);
467 l4_proto_tmp = l3.v6->nexthdr;
468 if (l4_hdr != exthdr)
469 ipv6_skip_exthdr(skb, exthdr - skb->data,
470 &l4_proto_tmp, &frag_off);
471 } else if (skb->protocol == htons(ETH_P_IP)) {
472 l4_proto_tmp = l3.v4->protocol;
1898d4e4
S
473 } else {
474 return -EINVAL;
76ad4f0e
S
475 }
476
477 *ol4_proto = l4_proto_tmp;
478
479 /* tunnel packet */
480 if (!skb->encapsulation) {
481 *il4_proto = 0;
1898d4e4 482 return 0;
76ad4f0e
S
483 }
484
485 /* find inner header point */
486 l3.hdr = skb_inner_network_header(skb);
487 l4_hdr = skb_inner_transport_header(skb);
488
489 if (l3.v6->version == 6) {
490 exthdr = l3.hdr + sizeof(*l3.v6);
491 l4_proto_tmp = l3.v6->nexthdr;
492 if (l4_hdr != exthdr)
493 ipv6_skip_exthdr(skb, exthdr - skb->data,
494 &l4_proto_tmp, &frag_off);
495 } else if (l3.v4->version == 4) {
496 l4_proto_tmp = l3.v4->protocol;
497 }
498
499 *il4_proto = l4_proto_tmp;
1898d4e4
S
500
501 return 0;
76ad4f0e
S
502}
503
504static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
505 u8 il4_proto, u32 *type_cs_vlan_tso,
506 u32 *ol_type_vlan_len_msec)
507{
508 union {
509 struct iphdr *v4;
510 struct ipv6hdr *v6;
511 unsigned char *hdr;
512 } l3;
513 union {
514 struct tcphdr *tcp;
515 struct udphdr *udp;
516 struct gre_base_hdr *gre;
517 unsigned char *hdr;
518 } l4;
519 unsigned char *l2_hdr;
520 u8 l4_proto = ol4_proto;
521 u32 ol2_len;
522 u32 ol3_len;
523 u32 ol4_len;
524 u32 l2_len;
525 u32 l3_len;
526
527 l3.hdr = skb_network_header(skb);
528 l4.hdr = skb_transport_header(skb);
529
530 /* compute L2 header size for normal packet, defined in 2 Bytes */
531 l2_len = l3.hdr - skb->data;
532 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
533 HNS3_TXD_L2LEN_S, l2_len >> 1);
534
535 /* tunnel packet*/
536 if (skb->encapsulation) {
537 /* compute OL2 header size, defined in 2 Bytes */
538 ol2_len = l2_len;
539 hnae_set_field(*ol_type_vlan_len_msec,
540 HNS3_TXD_L2LEN_M,
541 HNS3_TXD_L2LEN_S, ol2_len >> 1);
542
543 /* compute OL3 header size, defined in 4 Bytes */
544 ol3_len = l4.hdr - l3.hdr;
545 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
546 HNS3_TXD_L3LEN_S, ol3_len >> 2);
547
548 /* MAC in UDP, MAC in GRE (0x6558)*/
549 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
550 /* switch MAC header ptr from outer to inner header.*/
551 l2_hdr = skb_inner_mac_header(skb);
552
553 /* compute OL4 header size, defined in 4 Bytes. */
554 ol4_len = l2_hdr - l4.hdr;
555 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
556 HNS3_TXD_L4LEN_S, ol4_len >> 2);
557
558 /* switch IP header ptr from outer to inner header */
559 l3.hdr = skb_inner_network_header(skb);
560
561 /* compute inner l2 header size, defined in 2 Bytes. */
562 l2_len = l3.hdr - l2_hdr;
563 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
564 HNS3_TXD_L2LEN_S, l2_len >> 1);
565 } else {
566 /* skb packet types not supported by hardware,
567 * txbd len fild doesn't be filled.
568 */
569 return;
570 }
571
572 /* switch L4 header pointer from outer to inner */
573 l4.hdr = skb_inner_transport_header(skb);
574
575 l4_proto = il4_proto;
576 }
577
578 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
579 l3_len = l4.hdr - l3.hdr;
580 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
581 HNS3_TXD_L3LEN_S, l3_len >> 2);
582
583 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
584 switch (l4_proto) {
585 case IPPROTO_TCP:
586 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
587 HNS3_TXD_L4LEN_S, l4.tcp->doff);
588 break;
589 case IPPROTO_SCTP:
590 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
591 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
592 break;
593 case IPPROTO_UDP:
594 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
595 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
596 break;
597 default:
598 /* skb packet types not supported by hardware,
599 * txbd len fild doesn't be filled.
600 */
601 return;
602 }
603}
604
605static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
606 u8 il4_proto, u32 *type_cs_vlan_tso,
607 u32 *ol_type_vlan_len_msec)
608{
609 union {
610 struct iphdr *v4;
611 struct ipv6hdr *v6;
612 unsigned char *hdr;
613 } l3;
614 u32 l4_proto = ol4_proto;
615
616 l3.hdr = skb_network_header(skb);
617
618 /* define OL3 type and tunnel type(OL4).*/
619 if (skb->encapsulation) {
620 /* define outer network header type.*/
621 if (skb->protocol == htons(ETH_P_IP)) {
622 if (skb_is_gso(skb))
623 hnae_set_field(*ol_type_vlan_len_msec,
624 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
625 HNS3_OL3T_IPV4_CSUM);
626 else
627 hnae_set_field(*ol_type_vlan_len_msec,
628 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
629 HNS3_OL3T_IPV4_NO_CSUM);
630
631 } else if (skb->protocol == htons(ETH_P_IPV6)) {
632 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
633 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
634 }
635
636 /* define tunnel type(OL4).*/
637 switch (l4_proto) {
638 case IPPROTO_UDP:
639 hnae_set_field(*ol_type_vlan_len_msec,
640 HNS3_TXD_TUNTYPE_M,
641 HNS3_TXD_TUNTYPE_S,
642 HNS3_TUN_MAC_IN_UDP);
643 break;
644 case IPPROTO_GRE:
645 hnae_set_field(*ol_type_vlan_len_msec,
646 HNS3_TXD_TUNTYPE_M,
647 HNS3_TXD_TUNTYPE_S,
648 HNS3_TUN_NVGRE);
649 break;
650 default:
651 /* drop the skb tunnel packet if hardware don't support,
652 * because hardware can't calculate csum when TSO.
653 */
654 if (skb_is_gso(skb))
655 return -EDOM;
656
657 /* the stack computes the IP header already,
658 * driver calculate l4 checksum when not TSO.
659 */
660 skb_checksum_help(skb);
661 return 0;
662 }
663
664 l3.hdr = skb_inner_network_header(skb);
665 l4_proto = il4_proto;
666 }
667
668 if (l3.v4->version == 4) {
669 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
670 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
671
672 /* the stack computes the IP header already, the only time we
673 * need the hardware to recompute it is in the case of TSO.
674 */
675 if (skb_is_gso(skb))
676 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
677
678 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
679 } else if (l3.v6->version == 6) {
680 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
681 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
682 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
683 }
684
685 switch (l4_proto) {
686 case IPPROTO_TCP:
687 hnae_set_field(*type_cs_vlan_tso,
688 HNS3_TXD_L4T_M,
689 HNS3_TXD_L4T_S,
690 HNS3_L4T_TCP);
691 break;
692 case IPPROTO_UDP:
693 hnae_set_field(*type_cs_vlan_tso,
694 HNS3_TXD_L4T_M,
695 HNS3_TXD_L4T_S,
696 HNS3_L4T_UDP);
697 break;
698 case IPPROTO_SCTP:
699 hnae_set_field(*type_cs_vlan_tso,
700 HNS3_TXD_L4T_M,
701 HNS3_TXD_L4T_S,
702 HNS3_L4T_SCTP);
703 break;
704 default:
705 /* drop the skb tunnel packet if hardware don't support,
706 * because hardware can't calculate csum when TSO.
707 */
708 if (skb_is_gso(skb))
709 return -EDOM;
710
711 /* the stack computes the IP header already,
712 * driver calculate l4 checksum when not TSO.
713 */
714 skb_checksum_help(skb);
715 return 0;
716 }
717
718 return 0;
719}
720
721static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
722{
723 /* Config bd buffer end */
724 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
725 HNS3_TXD_BDTYPE_M, 0);
726 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
727 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
7036d26f 728 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
76ad4f0e
S
729}
730
1fdd8dc5
PL
731static int hns3_fill_desc_vtags(struct sk_buff *skb,
732 struct hns3_enet_ring *tx_ring,
733 u32 *inner_vlan_flag,
734 u32 *out_vlan_flag,
735 u16 *inner_vtag,
736 u16 *out_vtag)
737{
738#define HNS3_TX_VLAN_PRIO_SHIFT 13
739
740 if (skb->protocol == htons(ETH_P_8021Q) &&
741 !(tx_ring->tqp->handle->kinfo.netdev->features &
742 NETIF_F_HW_VLAN_CTAG_TX)) {
743 /* When HW VLAN acceleration is turned off, and the stack
744 * sets the protocol to 802.1q, the driver just need to
745 * set the protocol to the encapsulated ethertype.
746 */
747 skb->protocol = vlan_get_protocol(skb);
748 return 0;
749 }
750
751 if (skb_vlan_tag_present(skb)) {
752 u16 vlan_tag;
753
754 vlan_tag = skb_vlan_tag_get(skb);
755 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
756
757 /* Based on hw strategy, use out_vtag in two layer tag case,
758 * and use inner_vtag in one tag case.
759 */
760 if (skb->protocol == htons(ETH_P_8021Q)) {
761 hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
762 *out_vtag = vlan_tag;
763 } else {
764 hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
765 *inner_vtag = vlan_tag;
766 }
767 } else if (skb->protocol == htons(ETH_P_8021Q)) {
768 struct vlan_ethhdr *vhdr;
769 int rc;
770
771 rc = skb_cow_head(skb, 0);
772 if (rc < 0)
773 return rc;
774 vhdr = (struct vlan_ethhdr *)skb->data;
775 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
776 << HNS3_TX_VLAN_PRIO_SHIFT);
777 }
778
779 skb->protocol = vlan_get_protocol(skb);
780 return 0;
781}
782
76ad4f0e
S
783static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
784 int size, dma_addr_t dma, int frag_end,
785 enum hns_desc_type type)
786{
787 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
788 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
789 u32 ol_type_vlan_len_msec = 0;
790 u16 bdtp_fe_sc_vld_ra_ri = 0;
791 u32 type_cs_vlan_tso = 0;
792 struct sk_buff *skb;
1fdd8dc5
PL
793 u16 inner_vtag = 0;
794 u16 out_vtag = 0;
76ad4f0e
S
795 u32 paylen = 0;
796 u16 mss = 0;
797 __be16 protocol;
798 u8 ol4_proto;
799 u8 il4_proto;
800 int ret;
801
802 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
803 desc_cb->priv = priv;
804 desc_cb->length = size;
805 desc_cb->dma = dma;
806 desc_cb->type = type;
807
808 /* now, fill the descriptor */
809 desc->addr = cpu_to_le64(dma);
810 desc->tx.send_size = cpu_to_le16((u16)size);
811 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
812 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
813
814 if (type == DESC_TYPE_SKB) {
815 skb = (struct sk_buff *)priv;
a90bb9a5 816 paylen = skb->len;
76ad4f0e 817
1fdd8dc5
PL
818 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
819 &ol_type_vlan_len_msec,
820 &inner_vtag, &out_vtag);
821 if (unlikely(ret))
822 return ret;
823
76ad4f0e
S
824 if (skb->ip_summed == CHECKSUM_PARTIAL) {
825 skb_reset_mac_len(skb);
826 protocol = skb->protocol;
827
1898d4e4
S
828 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
829 if (ret)
830 return ret;
76ad4f0e
S
831 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
832 &type_cs_vlan_tso,
833 &ol_type_vlan_len_msec);
834 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
835 &type_cs_vlan_tso,
836 &ol_type_vlan_len_msec);
837 if (ret)
838 return ret;
839
840 ret = hns3_set_tso(skb, &paylen, &mss,
841 &type_cs_vlan_tso);
842 if (ret)
843 return ret;
844 }
845
846 /* Set txbd */
847 desc->tx.ol_type_vlan_len_msec =
848 cpu_to_le32(ol_type_vlan_len_msec);
849 desc->tx.type_cs_vlan_tso_len =
850 cpu_to_le32(type_cs_vlan_tso);
a90bb9a5 851 desc->tx.paylen = cpu_to_le32(paylen);
76ad4f0e 852 desc->tx.mss = cpu_to_le16(mss);
1fdd8dc5
PL
853 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
854 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
76ad4f0e
S
855 }
856
857 /* move ring pointer to next.*/
858 ring_ptr_move_fw(ring, next_to_use);
859
860 return 0;
861}
862
863static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
864 int size, dma_addr_t dma, int frag_end,
865 enum hns_desc_type type)
866{
867 unsigned int frag_buf_num;
868 unsigned int k;
869 int sizeoflast;
870 int ret;
871
872 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
873 sizeoflast = size % HNS3_MAX_BD_SIZE;
874 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
875
876 /* When the frag size is bigger than hardware, split this frag */
877 for (k = 0; k < frag_buf_num; k++) {
878 ret = hns3_fill_desc(ring, priv,
879 (k == frag_buf_num - 1) ?
880 sizeoflast : HNS3_MAX_BD_SIZE,
881 dma + HNS3_MAX_BD_SIZE * k,
882 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
883 (type == DESC_TYPE_SKB && !k) ?
884 DESC_TYPE_SKB : DESC_TYPE_PAGE);
885 if (ret)
886 return ret;
887 }
888
889 return 0;
890}
891
892static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
893 struct hns3_enet_ring *ring)
894{
895 struct sk_buff *skb = *out_skb;
896 struct skb_frag_struct *frag;
897 int bdnum_for_frag;
898 int frag_num;
899 int buf_num;
900 int size;
901 int i;
902
903 size = skb_headlen(skb);
904 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
905
906 frag_num = skb_shinfo(skb)->nr_frags;
907 for (i = 0; i < frag_num; i++) {
908 frag = &skb_shinfo(skb)->frags[i];
909 size = skb_frag_size(frag);
910 bdnum_for_frag =
911 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
912 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
913 return -ENOMEM;
914
915 buf_num += bdnum_for_frag;
916 }
917
918 if (buf_num > ring_space(ring))
919 return -EBUSY;
920
921 *bnum = buf_num;
922 return 0;
923}
924
925static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
926 struct hns3_enet_ring *ring)
927{
928 struct sk_buff *skb = *out_skb;
929 int buf_num;
930
931 /* No. of segments (plus a header) */
932 buf_num = skb_shinfo(skb)->nr_frags + 1;
933
934 if (buf_num > ring_space(ring))
935 return -EBUSY;
936
937 *bnum = buf_num;
938
939 return 0;
940}
941
942static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
943{
944 struct device *dev = ring_to_dev(ring);
945 unsigned int i;
946
947 for (i = 0; i < ring->desc_num; i++) {
948 /* check if this is where we started */
949 if (ring->next_to_use == next_to_use_orig)
950 break;
951
952 /* unmap the descriptor dma address */
953 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
954 dma_unmap_single(dev,
955 ring->desc_cb[ring->next_to_use].dma,
956 ring->desc_cb[ring->next_to_use].length,
957 DMA_TO_DEVICE);
958 else
959 dma_unmap_page(dev,
960 ring->desc_cb[ring->next_to_use].dma,
961 ring->desc_cb[ring->next_to_use].length,
962 DMA_TO_DEVICE);
963
964 /* rollback one */
965 ring_ptr_move_bw(ring, next_to_use);
966 }
967}
968
d43e5aca 969netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
76ad4f0e
S
970{
971 struct hns3_nic_priv *priv = netdev_priv(netdev);
972 struct hns3_nic_ring_data *ring_data =
973 &tx_ring_data(priv, skb->queue_mapping);
974 struct hns3_enet_ring *ring = ring_data->ring;
975 struct device *dev = priv->dev;
976 struct netdev_queue *dev_queue;
977 struct skb_frag_struct *frag;
978 int next_to_use_head;
979 int next_to_use_frag;
980 dma_addr_t dma;
981 int buf_num;
982 int seg_num;
983 int size;
984 int ret;
985 int i;
986
987 /* Prefetch the data used later */
988 prefetch(skb->data);
989
990 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
991 case -EBUSY:
992 u64_stats_update_begin(&ring->syncp);
993 ring->stats.tx_busy++;
994 u64_stats_update_end(&ring->syncp);
995
996 goto out_net_tx_busy;
997 case -ENOMEM:
998 u64_stats_update_begin(&ring->syncp);
999 ring->stats.sw_err_cnt++;
1000 u64_stats_update_end(&ring->syncp);
1001 netdev_err(netdev, "no memory to xmit!\n");
1002
1003 goto out_err_tx_ok;
1004 default:
1005 break;
1006 }
1007
1008 /* No. of segments (plus a header) */
1009 seg_num = skb_shinfo(skb)->nr_frags + 1;
1010 /* Fill the first part */
1011 size = skb_headlen(skb);
1012
1013 next_to_use_head = ring->next_to_use;
1014
1015 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1016 if (dma_mapping_error(dev, dma)) {
1017 netdev_err(netdev, "TX head DMA map failed\n");
1018 ring->stats.sw_err_cnt++;
1019 goto out_err_tx_ok;
1020 }
1021
1022 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1023 DESC_TYPE_SKB);
1024 if (ret)
1025 goto head_dma_map_err;
1026
1027 next_to_use_frag = ring->next_to_use;
1028 /* Fill the fragments */
1029 for (i = 1; i < seg_num; i++) {
1030 frag = &skb_shinfo(skb)->frags[i - 1];
1031 size = skb_frag_size(frag);
1032 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1033 if (dma_mapping_error(dev, dma)) {
1034 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1035 ring->stats.sw_err_cnt++;
1036 goto frag_dma_map_err;
1037 }
1038 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1039 seg_num - 1 == i ? 1 : 0,
1040 DESC_TYPE_PAGE);
1041
1042 if (ret)
1043 goto frag_dma_map_err;
1044 }
1045
1046 /* Complete translate all packets */
1047 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1048 netdev_tx_sent_queue(dev_queue, skb->len);
1049
1050 wmb(); /* Commit all data before submit */
1051
1052 hnae_queue_xmit(ring->tqp, buf_num);
1053
1054 return NETDEV_TX_OK;
1055
1056frag_dma_map_err:
1057 hns_nic_dma_unmap(ring, next_to_use_frag);
1058
1059head_dma_map_err:
1060 hns_nic_dma_unmap(ring, next_to_use_head);
1061
1062out_err_tx_ok:
1063 dev_kfree_skb_any(skb);
1064 return NETDEV_TX_OK;
1065
1066out_net_tx_busy:
1067 netif_stop_subqueue(netdev, ring_data->queue_index);
1068 smp_mb(); /* Commit all data before submit */
1069
1070 return NETDEV_TX_BUSY;
1071}
1072
1073static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1074{
9780cb97 1075 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1076 struct sockaddr *mac_addr = p;
1077 int ret;
1078
1079 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1080 return -EADDRNOTAVAIL;
1081
1082 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1083 if (ret) {
1084 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1085 return ret;
1086 }
1087
1088 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1089
1090 return 0;
1091}
1092
1093static int hns3_nic_set_features(struct net_device *netdev,
1094 netdev_features_t features)
1095{
1096 struct hns3_nic_priv *priv = netdev_priv(netdev);
5f9a7732
PL
1097 struct hnae3_handle *h = priv->ae_handle;
1098 netdev_features_t changed;
1099 int ret;
76ad4f0e
S
1100
1101 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1102 priv->ops.fill_desc = hns3_fill_desc_tso;
1103 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1104 } else {
1105 priv->ops.fill_desc = hns3_fill_desc;
1106 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1107 }
1108
d818396d
JS
1109 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1110 h->ae_algo->ops->enable_vlan_filter(h, true);
1111 else
1112 h->ae_algo->ops->enable_vlan_filter(h, false);
1113
5f9a7732
PL
1114 changed = netdev->features ^ features;
1115 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1116 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1117 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1118 else
1119 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1120
1121 if (ret)
1122 return ret;
1123 }
1124
76ad4f0e
S
1125 netdev->features = features;
1126 return 0;
1127}
1128
9596f6f0
PL
1129static void hns3_nic_get_stats64(struct net_device *netdev,
1130 struct rtnl_link_stats64 *stats)
76ad4f0e
S
1131{
1132 struct hns3_nic_priv *priv = netdev_priv(netdev);
1133 int queue_num = priv->ae_handle->kinfo.num_tqps;
7a5d2a39 1134 struct hnae3_handle *handle = priv->ae_handle;
76ad4f0e
S
1135 struct hns3_enet_ring *ring;
1136 unsigned int start;
1137 unsigned int idx;
1138 u64 tx_bytes = 0;
1139 u64 rx_bytes = 0;
1140 u64 tx_pkts = 0;
1141 u64 rx_pkts = 0;
0a83231f
JS
1142 u64 tx_drop = 0;
1143 u64 rx_drop = 0;
76ad4f0e 1144
a3083abb
JS
1145 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1146 return;
1147
7a5d2a39
JS
1148 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1149
76ad4f0e
S
1150 for (idx = 0; idx < queue_num; idx++) {
1151 /* fetch the tx stats */
1152 ring = priv->ring_data[idx].ring;
1153 do {
d36d36ce 1154 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1155 tx_bytes += ring->stats.tx_bytes;
1156 tx_pkts += ring->stats.tx_pkts;
0a83231f
JS
1157 tx_drop += ring->stats.tx_busy;
1158 tx_drop += ring->stats.sw_err_cnt;
76ad4f0e
S
1159 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1160
1161 /* fetch the rx stats */
1162 ring = priv->ring_data[idx + queue_num].ring;
1163 do {
d36d36ce 1164 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1165 rx_bytes += ring->stats.rx_bytes;
1166 rx_pkts += ring->stats.rx_pkts;
0a83231f
JS
1167 rx_drop += ring->stats.non_vld_descs;
1168 rx_drop += ring->stats.err_pkt_len;
1169 rx_drop += ring->stats.l2_err;
76ad4f0e
S
1170 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1171 }
1172
1173 stats->tx_bytes = tx_bytes;
1174 stats->tx_packets = tx_pkts;
1175 stats->rx_bytes = rx_bytes;
1176 stats->rx_packets = rx_pkts;
1177
1178 stats->rx_errors = netdev->stats.rx_errors;
1179 stats->multicast = netdev->stats.multicast;
1180 stats->rx_length_errors = netdev->stats.rx_length_errors;
1181 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1182 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1183
1184 stats->tx_errors = netdev->stats.tx_errors;
0a83231f
JS
1185 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1186 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
76ad4f0e
S
1187 stats->collisions = netdev->stats.collisions;
1188 stats->rx_over_errors = netdev->stats.rx_over_errors;
1189 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1190 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1191 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1192 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1193 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1194 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1195 stats->tx_window_errors = netdev->stats.tx_window_errors;
1196 stats->rx_compressed = netdev->stats.rx_compressed;
1197 stats->tx_compressed = netdev->stats.tx_compressed;
1198}
1199
1200static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1201 enum hns3_udp_tnl_type type)
1202{
1203 struct hns3_nic_priv *priv = netdev_priv(netdev);
1204 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1205 struct hnae3_handle *h = priv->ae_handle;
1206
1207 if (udp_tnl->used && udp_tnl->dst_port == port) {
1208 udp_tnl->used++;
1209 return;
1210 }
1211
1212 if (udp_tnl->used) {
1213 netdev_warn(netdev,
1214 "UDP tunnel [%d], port [%d] offload\n", type, port);
1215 return;
1216 }
1217
1218 udp_tnl->dst_port = port;
1219 udp_tnl->used = 1;
1220 /* TBD send command to hardware to add port */
1221 if (h->ae_algo->ops->add_tunnel_udp)
1222 h->ae_algo->ops->add_tunnel_udp(h, port);
1223}
1224
1225static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1226 enum hns3_udp_tnl_type type)
1227{
1228 struct hns3_nic_priv *priv = netdev_priv(netdev);
1229 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1230 struct hnae3_handle *h = priv->ae_handle;
1231
1232 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1233 netdev_warn(netdev,
1234 "Invalid UDP tunnel port %d\n", port);
1235 return;
1236 }
1237
1238 udp_tnl->used--;
1239 if (udp_tnl->used)
1240 return;
1241
1242 udp_tnl->dst_port = 0;
1243 /* TBD send command to hardware to del port */
1244 if (h->ae_algo->ops->del_tunnel_udp)
9537e7cb 1245 h->ae_algo->ops->del_tunnel_udp(h, port);
76ad4f0e
S
1246}
1247
1248/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1249 * @netdev: This physical ports's netdev
1250 * @ti: Tunnel information
1251 */
1252static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1253 struct udp_tunnel_info *ti)
1254{
1255 u16 port_n = ntohs(ti->port);
1256
1257 switch (ti->type) {
1258 case UDP_TUNNEL_TYPE_VXLAN:
1259 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1260 break;
1261 case UDP_TUNNEL_TYPE_GENEVE:
1262 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1263 break;
1264 default:
1265 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1266 break;
1267 }
1268}
1269
1270static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1271 struct udp_tunnel_info *ti)
1272{
1273 u16 port_n = ntohs(ti->port);
1274
1275 switch (ti->type) {
1276 case UDP_TUNNEL_TYPE_VXLAN:
1277 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1278 break;
1279 case UDP_TUNNEL_TYPE_GENEVE:
1280 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1281 break;
1282 default:
1283 break;
1284 }
1285}
1286
30d240df 1287static int hns3_setup_tc(struct net_device *netdev, void *type_data)
76ad4f0e 1288{
30d240df 1289 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9780cb97 1290 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 1291 struct hnae3_knic_private_info *kinfo = &h->kinfo;
30d240df
YL
1292 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1293 u8 tc = mqprio_qopt->qopt.num_tc;
1294 u16 mode = mqprio_qopt->mode;
1295 u8 hw = mqprio_qopt->qopt.hw;
1296 bool if_running;
76ad4f0e
S
1297 unsigned int i;
1298 int ret;
1299
30d240df
YL
1300 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1301 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1302 return -EOPNOTSUPP;
1303
76ad4f0e
S
1304 if (tc > HNAE3_MAX_TC)
1305 return -EINVAL;
1306
76ad4f0e
S
1307 if (!netdev)
1308 return -EINVAL;
1309
30d240df
YL
1310 if_running = netif_running(netdev);
1311 if (if_running) {
1312 hns3_nic_net_stop(netdev);
1313 msleep(100);
76ad4f0e
S
1314 }
1315
30d240df
YL
1316 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1317 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
76ad4f0e 1318 if (ret)
30d240df
YL
1319 goto out;
1320
1321 if (tc <= 1) {
1322 netdev_reset_tc(netdev);
1323 } else {
1324 ret = netdev_set_num_tc(netdev, tc);
1325 if (ret)
1326 goto out;
1327
1328 for (i = 0; i < HNAE3_MAX_TC; i++) {
1329 if (!kinfo->tc_info[i].enable)
1330 continue;
76ad4f0e 1331
76ad4f0e
S
1332 netdev_set_tc_queue(netdev,
1333 kinfo->tc_info[i].tc,
1334 kinfo->tc_info[i].tqp_count,
1335 kinfo->tc_info[i].tqp_offset);
30d240df 1336 }
76ad4f0e
S
1337 }
1338
30d240df
YL
1339 ret = hns3_nic_set_real_num_queue(netdev);
1340
1341out:
1342 if (if_running)
1343 hns3_nic_net_open(netdev);
1344
1345 return ret;
76ad4f0e
S
1346}
1347
2572ac53 1348static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1349 void *type_data)
76ad4f0e 1350{
575ed7d3 1351 if (type != TC_SETUP_QDISC_MQPRIO)
38cf0426 1352 return -EOPNOTSUPP;
76ad4f0e 1353
30d240df 1354 return hns3_setup_tc(dev, type_data);
76ad4f0e
S
1355}
1356
1357static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1358 __be16 proto, u16 vid)
1359{
9780cb97 1360 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1361 int ret = -EIO;
1362
1363 if (h->ae_algo->ops->set_vlan_filter)
1364 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1365
1366 return ret;
1367}
1368
1369static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1370 __be16 proto, u16 vid)
1371{
9780cb97 1372 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1373 int ret = -EIO;
1374
1375 if (h->ae_algo->ops->set_vlan_filter)
1376 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1377
1378 return ret;
1379}
1380
1381static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1382 u8 qos, __be16 vlan_proto)
1383{
9780cb97 1384 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1385 int ret = -EIO;
1386
1387 if (h->ae_algo->ops->set_vf_vlan_filter)
1388 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1389 qos, vlan_proto);
1390
1391 return ret;
1392}
1393
a8e8b7ff
S
1394static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1395{
9780cb97 1396 struct hnae3_handle *h = hns3_get_handle(netdev);
a8e8b7ff
S
1397 bool if_running = netif_running(netdev);
1398 int ret;
1399
1400 if (!h->ae_algo->ops->set_mtu)
1401 return -EOPNOTSUPP;
1402
1403 /* if this was called with netdev up then bring netdevice down */
1404 if (if_running) {
1405 (void)hns3_nic_net_stop(netdev);
1406 msleep(100);
1407 }
1408
1409 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1410 if (ret) {
1411 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1412 ret);
1413 return ret;
1414 }
1415
fe6362f9
FL
1416 netdev->mtu = new_mtu;
1417
a8e8b7ff
S
1418 /* if the netdev was running earlier, bring it up again */
1419 if (if_running && hns3_nic_net_open(netdev))
1420 ret = -EINVAL;
1421
1422 return ret;
1423}
1424
f8fa222c
L
1425static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1426{
1427 struct hns3_nic_priv *priv = netdev_priv(ndev);
1428 struct hns3_enet_ring *tx_ring = NULL;
1429 int timeout_queue = 0;
1430 int hw_head, hw_tail;
1431 int i;
1432
1433 /* Find the stopped queue the same way the stack does */
1434 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1435 struct netdev_queue *q;
1436 unsigned long trans_start;
1437
1438 q = netdev_get_tx_queue(ndev, i);
1439 trans_start = q->trans_start;
1440 if (netif_xmit_stopped(q) &&
1441 time_after(jiffies,
1442 (trans_start + ndev->watchdog_timeo))) {
1443 timeout_queue = i;
1444 break;
1445 }
1446 }
1447
1448 if (i == ndev->num_tx_queues) {
1449 netdev_info(ndev,
1450 "no netdev TX timeout queue found, timeout count: %llu\n",
1451 priv->tx_timeout_count);
1452 return false;
1453 }
1454
1455 tx_ring = priv->ring_data[timeout_queue].ring;
1456
1457 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1458 HNS3_RING_TX_RING_HEAD_REG);
1459 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1460 HNS3_RING_TX_RING_TAIL_REG);
1461 netdev_info(ndev,
1462 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1463 priv->tx_timeout_count,
1464 timeout_queue,
1465 tx_ring->next_to_use,
1466 tx_ring->next_to_clean,
1467 hw_head,
1468 hw_tail,
1469 readl(tx_ring->tqp_vector->mask_addr));
1470
1471 return true;
1472}
1473
1474static void hns3_nic_net_timeout(struct net_device *ndev)
1475{
1476 struct hns3_nic_priv *priv = netdev_priv(ndev);
1477 unsigned long last_reset_time = priv->last_reset_time;
1478 struct hnae3_handle *h = priv->ae_handle;
1479
1480 if (!hns3_get_tx_timeo_queue_info(ndev))
1481 return;
1482
1483 priv->tx_timeout_count++;
1484
1485 /* This timeout is far away enough from last timeout,
1486 * if timeout again,set the reset type to PF reset
1487 */
1488 if (time_after(jiffies, (last_reset_time + 20 * HZ)))
1489 priv->reset_level = HNAE3_FUNC_RESET;
1490
1491 /* Don't do any new action before the next timeout */
1492 else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo)))
1493 return;
1494
1495 priv->last_reset_time = jiffies;
1496
1497 if (h->ae_algo->ops->reset_event)
1498 h->ae_algo->ops->reset_event(h, priv->reset_level);
1499
1500 priv->reset_level++;
1501 if (priv->reset_level > HNAE3_GLOBAL_RESET)
1502 priv->reset_level = HNAE3_GLOBAL_RESET;
1503}
1504
76ad4f0e
S
1505static const struct net_device_ops hns3_nic_netdev_ops = {
1506 .ndo_open = hns3_nic_net_open,
1507 .ndo_stop = hns3_nic_net_stop,
1508 .ndo_start_xmit = hns3_nic_net_xmit,
f8fa222c 1509 .ndo_tx_timeout = hns3_nic_net_timeout,
76ad4f0e 1510 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
a8e8b7ff 1511 .ndo_change_mtu = hns3_nic_change_mtu,
76ad4f0e
S
1512 .ndo_set_features = hns3_nic_set_features,
1513 .ndo_get_stats64 = hns3_nic_get_stats64,
1514 .ndo_setup_tc = hns3_nic_setup_tc,
1515 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1516 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1517 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1518 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1519 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1520 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1521};
1522
1523/* hns3_probe - Device initialization routine
1524 * @pdev: PCI device information struct
1525 * @ent: entry in hns3_pci_tbl
1526 *
1527 * hns3_probe initializes a PF identified by a pci_dev structure.
1528 * The OS initialization, configuring of the PF private structure,
1529 * and a hardware reset occur.
1530 *
1531 * Returns 0 on success, negative on failure
1532 */
1533static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1534{
1535 struct hnae3_ae_dev *ae_dev;
1536 int ret;
1537
1538 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1539 GFP_KERNEL);
1540 if (!ae_dev) {
1541 ret = -ENOMEM;
1542 return ret;
1543 }
1544
1545 ae_dev->pdev = pdev;
e92a0843 1546 ae_dev->flag = ent->driver_data;
76ad4f0e
S
1547 ae_dev->dev_type = HNAE3_DEV_KNIC;
1548 pci_set_drvdata(pdev, ae_dev);
1549
1550 return hnae3_register_ae_dev(ae_dev);
1551}
1552
1553/* hns3_remove - Device removal routine
1554 * @pdev: PCI device information struct
1555 */
1556static void hns3_remove(struct pci_dev *pdev)
1557{
1558 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1559
1560 hnae3_unregister_ae_dev(ae_dev);
1561
1562 devm_kfree(&pdev->dev, ae_dev);
1563
1564 pci_set_drvdata(pdev, NULL);
1565}
1566
1567static struct pci_driver hns3_driver = {
1568 .name = hns3_driver_name,
1569 .id_table = hns3_pci_tbl,
1570 .probe = hns3_probe,
1571 .remove = hns3_remove,
1572};
1573
1574/* set default feature to hns3 */
1575static void hns3_set_default_feature(struct net_device *netdev)
1576{
d818396d
JS
1577 struct hnae3_handle *h = hns3_get_handle(netdev);
1578
76ad4f0e
S
1579 netdev->priv_flags |= IFF_UNICAST_FLT;
1580
1581 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1582 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1583 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1584 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1585 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1586
1587 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1588
1589 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1590
1591 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1592 NETIF_F_HW_VLAN_CTAG_FILTER |
5f9a7732 1593 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
76ad4f0e
S
1594 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1595 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1596 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1597 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1598
1599 netdev->vlan_features |=
1600 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1601 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1602 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1603 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1604 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1605
1606 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
0ba96f84 1607 NETIF_F_HW_VLAN_CTAG_TX |
76ad4f0e
S
1608 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1609 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1610 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1611 NETIF_F_GSO_UDP_TUNNEL_CSUM;
d818396d
JS
1612
1613 if (!(h->flags & HNAE3_SUPPORT_VF))
0ba96f84
JS
1614 netdev->hw_features |=
1615 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
76ad4f0e
S
1616}
1617
1618static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1619 struct hns3_desc_cb *cb)
1620{
1621 unsigned int order = hnae_page_order(ring);
1622 struct page *p;
1623
1624 p = dev_alloc_pages(order);
1625 if (!p)
1626 return -ENOMEM;
1627
1628 cb->priv = p;
1629 cb->page_offset = 0;
1630 cb->reuse_flag = 0;
1631 cb->buf = page_address(p);
1632 cb->length = hnae_page_size(ring);
1633 cb->type = DESC_TYPE_PAGE;
1634
76ad4f0e
S
1635 return 0;
1636}
1637
1638static void hns3_free_buffer(struct hns3_enet_ring *ring,
1639 struct hns3_desc_cb *cb)
1640{
1641 if (cb->type == DESC_TYPE_SKB)
1642 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1643 else if (!HNAE3_IS_TX_RING(ring))
1644 put_page((struct page *)cb->priv);
1645 memset(cb, 0, sizeof(*cb));
1646}
1647
1648static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1649{
1650 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1651 cb->length, ring_to_dma_dir(ring));
1652
1653 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1654 return -EIO;
1655
1656 return 0;
1657}
1658
1659static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1660 struct hns3_desc_cb *cb)
1661{
1662 if (cb->type == DESC_TYPE_SKB)
1663 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1664 ring_to_dma_dir(ring));
1665 else
1666 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1667 ring_to_dma_dir(ring));
1668}
1669
1670static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1671{
1672 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1673 ring->desc[i].addr = 0;
1674}
1675
1676static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1677{
1678 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1679
1680 if (!ring->desc_cb[i].dma)
1681 return;
1682
1683 hns3_buffer_detach(ring, i);
1684 hns3_free_buffer(ring, cb);
1685}
1686
1687static void hns3_free_buffers(struct hns3_enet_ring *ring)
1688{
1689 int i;
1690
1691 for (i = 0; i < ring->desc_num; i++)
1692 hns3_free_buffer_detach(ring, i);
1693}
1694
1695/* free desc along with its attached buffer */
1696static void hns3_free_desc(struct hns3_enet_ring *ring)
1697{
1698 hns3_free_buffers(ring);
1699
1700 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1701 ring->desc_num * sizeof(ring->desc[0]),
1702 DMA_BIDIRECTIONAL);
1703 ring->desc_dma_addr = 0;
1704 kfree(ring->desc);
1705 ring->desc = NULL;
1706}
1707
1708static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1709{
1710 int size = ring->desc_num * sizeof(ring->desc[0]);
1711
1712 ring->desc = kzalloc(size, GFP_KERNEL);
1713 if (!ring->desc)
1714 return -ENOMEM;
1715
1716 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1717 size, DMA_BIDIRECTIONAL);
1718 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1719 ring->desc_dma_addr = 0;
1720 kfree(ring->desc);
1721 ring->desc = NULL;
1722 return -ENOMEM;
1723 }
1724
1725 return 0;
1726}
1727
1728static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1729 struct hns3_desc_cb *cb)
1730{
1731 int ret;
1732
1733 ret = hns3_alloc_buffer(ring, cb);
1734 if (ret)
1735 goto out;
1736
1737 ret = hns3_map_buffer(ring, cb);
1738 if (ret)
1739 goto out_with_buf;
1740
1741 return 0;
1742
1743out_with_buf:
564883bb 1744 hns3_free_buffer(ring, cb);
76ad4f0e
S
1745out:
1746 return ret;
1747}
1748
1749static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1750{
1751 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1752
1753 if (ret)
1754 return ret;
1755
1756 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1757
1758 return 0;
1759}
1760
1761/* Allocate memory for raw pkg, and map with dma */
1762static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1763{
1764 int i, j, ret;
1765
1766 for (i = 0; i < ring->desc_num; i++) {
1767 ret = hns3_alloc_buffer_attach(ring, i);
1768 if (ret)
1769 goto out_buffer_fail;
1770 }
1771
1772 return 0;
1773
1774out_buffer_fail:
1775 for (j = i - 1; j >= 0; j--)
1776 hns3_free_buffer_detach(ring, j);
1777 return ret;
1778}
1779
1780/* detach a in-used buffer and replace with a reserved one */
1781static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1782 struct hns3_desc_cb *res_cb)
1783{
b9077428 1784 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
76ad4f0e
S
1785 ring->desc_cb[i] = *res_cb;
1786 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1787}
1788
1789static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1790{
1791 ring->desc_cb[i].reuse_flag = 0;
1792 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1793 + ring->desc_cb[i].page_offset);
1794}
1795
1796static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1797 int *pkts)
1798{
1799 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1800
1801 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1802 (*bytes) += desc_cb->length;
1803 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1804 hns3_free_buffer_detach(ring, ring->next_to_clean);
1805
1806 ring_ptr_move_fw(ring, next_to_clean);
1807}
1808
1809static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1810{
1811 int u = ring->next_to_use;
1812 int c = ring->next_to_clean;
1813
1814 if (unlikely(h > ring->desc_num))
1815 return 0;
1816
1817 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1818}
1819
24e750c4 1820bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
76ad4f0e
S
1821{
1822 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1823 struct netdev_queue *dev_queue;
1824 int bytes, pkts;
1825 int head;
1826
1827 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1828 rmb(); /* Make sure head is ready before touch any data */
1829
1830 if (is_ring_empty(ring) || head == ring->next_to_clean)
24e750c4 1831 return true; /* no data to poll */
76ad4f0e
S
1832
1833 if (!is_valid_clean_head(ring, head)) {
1834 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1835 ring->next_to_use, ring->next_to_clean);
1836
1837 u64_stats_update_begin(&ring->syncp);
1838 ring->stats.io_err_cnt++;
1839 u64_stats_update_end(&ring->syncp);
24e750c4 1840 return true;
76ad4f0e
S
1841 }
1842
1843 bytes = 0;
1844 pkts = 0;
1845 while (head != ring->next_to_clean && budget) {
1846 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1847 /* Issue prefetch for next Tx descriptor */
1848 prefetch(&ring->desc_cb[ring->next_to_clean]);
1849 budget--;
1850 }
1851
1852 ring->tqp_vector->tx_group.total_bytes += bytes;
1853 ring->tqp_vector->tx_group.total_packets += pkts;
1854
1855 u64_stats_update_begin(&ring->syncp);
1856 ring->stats.tx_bytes += bytes;
1857 ring->stats.tx_pkts += pkts;
1858 u64_stats_update_end(&ring->syncp);
1859
1860 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1861 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1862
1863 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1864 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1865 /* Make sure that anybody stopping the queue after this
1866 * sees the new next_to_clean.
1867 */
1868 smp_mb();
1869 if (netif_tx_queue_stopped(dev_queue)) {
1870 netif_tx_wake_queue(dev_queue);
1871 ring->stats.restart_queue++;
1872 }
1873 }
1874
1875 return !!budget;
1876}
1877
1878static int hns3_desc_unused(struct hns3_enet_ring *ring)
1879{
1880 int ntc = ring->next_to_clean;
1881 int ntu = ring->next_to_use;
1882
1883 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1884}
1885
1886static void
1887hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1888{
1889 struct hns3_desc_cb *desc_cb;
1890 struct hns3_desc_cb res_cbs;
1891 int i, ret;
1892
1893 for (i = 0; i < cleand_count; i++) {
1894 desc_cb = &ring->desc_cb[ring->next_to_use];
1895 if (desc_cb->reuse_flag) {
1896 u64_stats_update_begin(&ring->syncp);
1897 ring->stats.reuse_pg_cnt++;
1898 u64_stats_update_end(&ring->syncp);
1899
1900 hns3_reuse_buffer(ring, ring->next_to_use);
1901 } else {
1902 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1903 if (ret) {
1904 u64_stats_update_begin(&ring->syncp);
1905 ring->stats.sw_err_cnt++;
1906 u64_stats_update_end(&ring->syncp);
1907
1908 netdev_err(ring->tqp->handle->kinfo.netdev,
1909 "hnae reserve buffer map failed.\n");
1910 break;
1911 }
1912 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1913 }
1914
1915 ring_ptr_move_fw(ring, next_to_use);
1916 }
1917
1918 wmb(); /* Make all data has been write before submit */
1919 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1920}
1921
1922/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1923 * @data: pointer to the start of the headers
1924 * @max: total length of section to find headers in
1925 *
1926 * This function is meant to determine the length of headers that will
1927 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1928 * motivation of doing this is to only perform one pull for IPv4 TCP
1929 * packets so that we can do basic things like calculating the gso_size
1930 * based on the average data per packet.
1931 */
1932static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1933 unsigned int max_size)
1934{
1935 unsigned char *network;
1936 u8 hlen;
1937
1938 /* This should never happen, but better safe than sorry */
1939 if (max_size < ETH_HLEN)
1940 return max_size;
1941
1942 /* Initialize network frame pointer */
1943 network = data;
1944
1945 /* Set first protocol and move network header forward */
1946 network += ETH_HLEN;
1947
1948 /* Handle any vlan tag if present */
1949 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1950 == HNS3_RX_FLAG_VLAN_PRESENT) {
1951 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1952 return max_size;
1953
1954 network += VLAN_HLEN;
1955 }
1956
1957 /* Handle L3 protocols */
1958 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1959 == HNS3_RX_FLAG_L3ID_IPV4) {
1960 if ((typeof(max_size))(network - data) >
1961 (max_size - sizeof(struct iphdr)))
1962 return max_size;
1963
1964 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1965 hlen = (network[0] & 0x0F) << 2;
1966
1967 /* Verify hlen meets minimum size requirements */
1968 if (hlen < sizeof(struct iphdr))
1969 return network - data;
1970
1971 /* Record next protocol if header is present */
1972 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1973 == HNS3_RX_FLAG_L3ID_IPV6) {
1974 if ((typeof(max_size))(network - data) >
1975 (max_size - sizeof(struct ipv6hdr)))
1976 return max_size;
1977
1978 /* Record next protocol */
1979 hlen = sizeof(struct ipv6hdr);
1980 } else {
1981 return network - data;
1982 }
1983
1984 /* Relocate pointer to start of L4 header */
1985 network += hlen;
1986
1987 /* Finally sort out TCP/UDP */
1988 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1989 == HNS3_RX_FLAG_L4ID_TCP) {
1990 if ((typeof(max_size))(network - data) >
1991 (max_size - sizeof(struct tcphdr)))
1992 return max_size;
1993
1994 /* Access doff as a u8 to avoid unaligned access on ia64 */
1995 hlen = (network[12] & 0xF0) >> 2;
1996
1997 /* Verify hlen meets minimum size requirements */
1998 if (hlen < sizeof(struct tcphdr))
1999 return network - data;
2000
2001 network += hlen;
2002 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2003 == HNS3_RX_FLAG_L4ID_UDP) {
2004 if ((typeof(max_size))(network - data) >
2005 (max_size - sizeof(struct udphdr)))
2006 return max_size;
2007
2008 network += sizeof(struct udphdr);
2009 }
2010
2011 /* If everything has gone correctly network should be the
2012 * data section of the packet and will be the end of the header.
2013 * If not then it probably represents the end of the last recognized
2014 * header.
2015 */
2016 if ((typeof(max_size))(network - data) < max_size)
2017 return network - data;
2018 else
2019 return max_size;
2020}
2021
2022static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2023 struct hns3_enet_ring *ring, int pull_len,
2024 struct hns3_desc_cb *desc_cb)
2025{
2026 struct hns3_desc *desc;
2027 int truesize, size;
2028 int last_offset;
2029 bool twobufs;
2030
2031 twobufs = ((PAGE_SIZE < 8192) &&
2032 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2033
2034 desc = &ring->desc[ring->next_to_clean];
2035 size = le16_to_cpu(desc->rx.size);
2036
2037 if (twobufs) {
2038 truesize = hnae_buf_size(ring);
2039 } else {
2040 truesize = ALIGN(size, L1_CACHE_BYTES);
2041 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
2042 }
2043
2044 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2045 size - pull_len, truesize - pull_len);
2046
2047 /* Avoid re-using remote pages,flag default unreuse */
2048 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2049 return;
2050
2051 if (twobufs) {
2052 /* If we are only owner of page we can reuse it */
2053 if (likely(page_count(desc_cb->priv) == 1)) {
2054 /* Flip page offset to other buffer */
2055 desc_cb->page_offset ^= truesize;
2056
2057 desc_cb->reuse_flag = 1;
2058 /* bump ref count on page before it is given*/
2059 get_page(desc_cb->priv);
2060 }
2061 return;
2062 }
2063
2064 /* Move offset up to the next cache line */
2065 desc_cb->page_offset += truesize;
2066
2067 if (desc_cb->page_offset <= last_offset) {
2068 desc_cb->reuse_flag = 1;
2069 /* Bump ref count on page before it is given*/
2070 get_page(desc_cb->priv);
2071 }
2072}
2073
2074static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2075 struct hns3_desc *desc)
2076{
2077 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2078 int l3_type, l4_type;
2079 u32 bd_base_info;
2080 int ol4_type;
2081 u32 l234info;
2082
2083 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2084 l234info = le32_to_cpu(desc->rx.l234_info);
2085
2086 skb->ip_summed = CHECKSUM_NONE;
2087
2088 skb_checksum_none_assert(skb);
2089
2090 if (!(netdev->features & NETIF_F_RXCSUM))
2091 return;
2092
2093 /* check if hardware has done checksum */
2094 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2095 return;
2096
2097 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
2098 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
2099 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2100 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2101 netdev_err(netdev, "L3/L4 error pkt\n");
2102 u64_stats_update_begin(&ring->syncp);
2103 ring->stats.l3l4_csum_err++;
2104 u64_stats_update_end(&ring->syncp);
2105
2106 return;
2107 }
2108
2109 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
2110 HNS3_RXD_L3ID_S);
2111 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
2112 HNS3_RXD_L4ID_S);
2113
2114 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
2115 switch (ol4_type) {
2116 case HNS3_OL4_TYPE_MAC_IN_UDP:
2117 case HNS3_OL4_TYPE_NVGRE:
2118 skb->csum_level = 1;
2119 case HNS3_OL4_TYPE_NO_TUN:
2120 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2121 if (l3_type == HNS3_L3_TYPE_IPV4 ||
2122 (l3_type == HNS3_L3_TYPE_IPV6 &&
2123 (l4_type == HNS3_L4_TYPE_UDP ||
2124 l4_type == HNS3_L4_TYPE_TCP ||
2125 l4_type == HNS3_L4_TYPE_SCTP)))
2126 skb->ip_summed = CHECKSUM_UNNECESSARY;
2127 break;
2128 }
2129}
2130
d43e5aca
YL
2131static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2132{
2133 napi_gro_receive(&ring->tqp_vector->napi, skb);
2134}
2135
76ad4f0e
S
2136static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2137 struct sk_buff **out_skb, int *out_bnum)
2138{
2139 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2140 struct hns3_desc_cb *desc_cb;
2141 struct hns3_desc *desc;
2142 struct sk_buff *skb;
2143 unsigned char *va;
2144 u32 bd_base_info;
2145 int pull_len;
2146 u32 l234info;
2147 int length;
2148 int bnum;
2149
2150 desc = &ring->desc[ring->next_to_clean];
2151 desc_cb = &ring->desc_cb[ring->next_to_clean];
2152
2153 prefetch(desc);
2154
2155 length = le16_to_cpu(desc->rx.pkt_len);
2156 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2157 l234info = le32_to_cpu(desc->rx.l234_info);
2158
2159 /* Check valid BD */
2160 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
2161 return -EFAULT;
2162
2163 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2164
2165 /* Prefetch first cache line of first page
2166 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2167 * line size is 64B so need to prefetch twice to make it 128B. But in
2168 * actual we can have greater size of caches with 128B Level 1 cache
2169 * lines. In such a case, single fetch would suffice to cache in the
2170 * relevant part of the header.
2171 */
2172 prefetch(va);
2173#if L1_CACHE_BYTES < 128
2174 prefetch(va + L1_CACHE_BYTES);
2175#endif
2176
2177 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2178 HNS3_RX_HEAD_SIZE);
2179 if (unlikely(!skb)) {
2180 netdev_err(netdev, "alloc rx skb fail\n");
2181
2182 u64_stats_update_begin(&ring->syncp);
2183 ring->stats.sw_err_cnt++;
2184 u64_stats_update_end(&ring->syncp);
2185
2186 return -ENOMEM;
2187 }
2188
2189 prefetchw(skb->data);
2190
1fdd8dc5
PL
2191 /* Based on hw strategy, the tag offloaded will be stored at
2192 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2193 * in one layer tag case.
2194 */
2195 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2196 u16 vlan_tag;
2197
2198 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2199 if (!(vlan_tag & VLAN_VID_MASK))
2200 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2201 if (vlan_tag & VLAN_VID_MASK)
2202 __vlan_hwaccel_put_tag(skb,
2203 htons(ETH_P_8021Q),
2204 vlan_tag);
2205 }
2206
76ad4f0e
S
2207 bnum = 1;
2208 if (length <= HNS3_RX_HEAD_SIZE) {
2209 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2210
2211 /* We can reuse buffer as-is, just make sure it is local */
2212 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2213 desc_cb->reuse_flag = 1;
2214 else /* This page cannot be reused so discard it */
2215 put_page(desc_cb->priv);
2216
2217 ring_ptr_move_fw(ring, next_to_clean);
2218 } else {
2219 u64_stats_update_begin(&ring->syncp);
2220 ring->stats.seg_pkt_cnt++;
2221 u64_stats_update_end(&ring->syncp);
2222
2223 pull_len = hns3_nic_get_headlen(va, l234info,
2224 HNS3_RX_HEAD_SIZE);
2225 memcpy(__skb_put(skb, pull_len), va,
2226 ALIGN(pull_len, sizeof(long)));
2227
2228 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2229 ring_ptr_move_fw(ring, next_to_clean);
2230
2231 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2232 desc = &ring->desc[ring->next_to_clean];
2233 desc_cb = &ring->desc_cb[ring->next_to_clean];
2234 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2235 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2236 ring_ptr_move_fw(ring, next_to_clean);
2237 bnum++;
2238 }
2239 }
2240
2241 *out_bnum = bnum;
2242
2243 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2244 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2245 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2246 u64_stats_update_begin(&ring->syncp);
2247 ring->stats.non_vld_descs++;
2248 u64_stats_update_end(&ring->syncp);
2249
2250 dev_kfree_skb_any(skb);
2251 return -EINVAL;
2252 }
2253
2254 if (unlikely((!desc->rx.pkt_len) ||
2255 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2256 netdev_err(netdev, "truncated pkt\n");
2257 u64_stats_update_begin(&ring->syncp);
2258 ring->stats.err_pkt_len++;
2259 u64_stats_update_end(&ring->syncp);
2260
2261 dev_kfree_skb_any(skb);
2262 return -EFAULT;
2263 }
2264
2265 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2266 netdev_err(netdev, "L2 error pkt\n");
2267 u64_stats_update_begin(&ring->syncp);
2268 ring->stats.l2_err++;
2269 u64_stats_update_end(&ring->syncp);
2270
2271 dev_kfree_skb_any(skb);
2272 return -EFAULT;
2273 }
2274
2275 u64_stats_update_begin(&ring->syncp);
2276 ring->stats.rx_pkts++;
2277 ring->stats.rx_bytes += skb->len;
2278 u64_stats_update_end(&ring->syncp);
2279
2280 ring->tqp_vector->rx_group.total_bytes += skb->len;
2281
2282 hns3_rx_checksum(ring, skb, desc);
2283 return 0;
2284}
2285
d43e5aca
YL
2286int hns3_clean_rx_ring(
2287 struct hns3_enet_ring *ring, int budget,
2288 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
76ad4f0e
S
2289{
2290#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2291 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2292 int recv_pkts, recv_bds, clean_count, err;
2293 int unused_count = hns3_desc_unused(ring);
2294 struct sk_buff *skb = NULL;
2295 int num, bnum = 0;
2296
2297 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2298 rmb(); /* Make sure num taken effect before the other data is touched */
2299
2300 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2301 num -= unused_count;
2302
2303 while (recv_pkts < budget && recv_bds < num) {
2304 /* Reuse or realloc buffers */
2305 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2306 hns3_nic_alloc_rx_buffers(ring,
2307 clean_count + unused_count);
2308 clean_count = 0;
2309 unused_count = hns3_desc_unused(ring);
2310 }
2311
2312 /* Poll one pkt */
2313 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2314 if (unlikely(!skb)) /* This fault cannot be repaired */
2315 goto out;
2316
2317 recv_bds += bnum;
2318 clean_count += bnum;
2319 if (unlikely(err)) { /* Do jump the err */
2320 recv_pkts++;
2321 continue;
2322 }
2323
2324 /* Do update ip stack process */
2325 skb->protocol = eth_type_trans(skb, netdev);
d43e5aca 2326 rx_fn(ring, skb);
76ad4f0e
S
2327
2328 recv_pkts++;
2329 }
2330
2331out:
2332 /* Make all data has been write before submit */
2333 if (clean_count + unused_count > 0)
2334 hns3_nic_alloc_rx_buffers(ring,
2335 clean_count + unused_count);
2336
2337 return recv_pkts;
2338}
2339
2340static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2341{
2342#define HNS3_RX_ULTRA_PACKET_RATE 40000
2343 enum hns3_flow_level_range new_flow_level;
2344 struct hns3_enet_tqp_vector *tqp_vector;
2345 int packets_per_secs;
2346 int bytes_per_usecs;
2347 u16 new_int_gl;
2348 int usecs;
2349
2350 if (!ring_group->int_gl)
2351 return false;
2352
2353 if (ring_group->total_packets == 0) {
2354 ring_group->int_gl = HNS3_INT_GL_50K;
2355 ring_group->flow_level = HNS3_FLOW_LOW;
2356 return true;
2357 }
2358
2359 /* Simple throttlerate management
2360 * 0-10MB/s lower (50000 ints/s)
2361 * 10-20MB/s middle (20000 ints/s)
2362 * 20-1249MB/s high (18000 ints/s)
2363 * > 40000pps ultra (8000 ints/s)
2364 */
2365 new_flow_level = ring_group->flow_level;
2366 new_int_gl = ring_group->int_gl;
2367 tqp_vector = ring_group->ring->tqp_vector;
2368 usecs = (ring_group->int_gl << 1);
2369 bytes_per_usecs = ring_group->total_bytes / usecs;
2370 /* 1000000 microseconds */
2371 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2372
2373 switch (new_flow_level) {
2374 case HNS3_FLOW_LOW:
2375 if (bytes_per_usecs > 10)
2376 new_flow_level = HNS3_FLOW_MID;
2377 break;
2378 case HNS3_FLOW_MID:
2379 if (bytes_per_usecs > 20)
2380 new_flow_level = HNS3_FLOW_HIGH;
2381 else if (bytes_per_usecs <= 10)
2382 new_flow_level = HNS3_FLOW_LOW;
2383 break;
2384 case HNS3_FLOW_HIGH:
2385 case HNS3_FLOW_ULTRA:
2386 default:
2387 if (bytes_per_usecs <= 20)
2388 new_flow_level = HNS3_FLOW_MID;
2389 break;
2390 }
2391
2392 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2393 (&tqp_vector->rx_group == ring_group))
2394 new_flow_level = HNS3_FLOW_ULTRA;
2395
2396 switch (new_flow_level) {
2397 case HNS3_FLOW_LOW:
2398 new_int_gl = HNS3_INT_GL_50K;
2399 break;
2400 case HNS3_FLOW_MID:
2401 new_int_gl = HNS3_INT_GL_20K;
2402 break;
2403 case HNS3_FLOW_HIGH:
2404 new_int_gl = HNS3_INT_GL_18K;
2405 break;
2406 case HNS3_FLOW_ULTRA:
2407 new_int_gl = HNS3_INT_GL_8K;
2408 break;
2409 default:
2410 break;
2411 }
2412
2413 ring_group->total_bytes = 0;
2414 ring_group->total_packets = 0;
2415 ring_group->flow_level = new_flow_level;
2416 if (new_int_gl != ring_group->int_gl) {
2417 ring_group->int_gl = new_int_gl;
2418 return true;
2419 }
2420 return false;
2421}
2422
2423static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2424{
2425 u16 rx_int_gl, tx_int_gl;
2426 bool rx, tx;
2427
2428 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2429 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2430 rx_int_gl = tqp_vector->rx_group.int_gl;
2431 tx_int_gl = tqp_vector->tx_group.int_gl;
2432 if (rx && tx) {
2433 if (rx_int_gl > tx_int_gl) {
2434 tqp_vector->tx_group.int_gl = rx_int_gl;
2435 tqp_vector->tx_group.flow_level =
2436 tqp_vector->rx_group.flow_level;
2437 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2438 } else {
2439 tqp_vector->rx_group.int_gl = tx_int_gl;
2440 tqp_vector->rx_group.flow_level =
2441 tqp_vector->tx_group.flow_level;
2442 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2443 }
2444 }
2445}
2446
2447static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2448{
2449 struct hns3_enet_ring *ring;
2450 int rx_pkt_total = 0;
2451
2452 struct hns3_enet_tqp_vector *tqp_vector =
2453 container_of(napi, struct hns3_enet_tqp_vector, napi);
2454 bool clean_complete = true;
2455 int rx_budget;
2456
2457 /* Since the actual Tx work is minimal, we can give the Tx a larger
2458 * budget and be more aggressive about cleaning up the Tx descriptors.
2459 */
2460 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2461 if (!hns3_clean_tx_ring(ring, budget))
2462 clean_complete = false;
2463 }
2464
2465 /* make sure rx ring budget not smaller than 1 */
2466 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2467
2468 hns3_for_each_ring(ring, tqp_vector->rx_group) {
d43e5aca
YL
2469 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2470 hns3_rx_skb);
76ad4f0e
S
2471
2472 if (rx_cleaned >= rx_budget)
2473 clean_complete = false;
2474
2475 rx_pkt_total += rx_cleaned;
2476 }
2477
2478 tqp_vector->rx_group.total_packets += rx_pkt_total;
2479
2480 if (!clean_complete)
2481 return budget;
2482
2483 napi_complete(napi);
2484 hns3_update_new_int_gl(tqp_vector);
2485 hns3_mask_vector_irq(tqp_vector, 1);
2486
2487 return rx_pkt_total;
2488}
2489
2490static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2491 struct hnae3_ring_chain_node *head)
2492{
2493 struct pci_dev *pdev = tqp_vector->handle->pdev;
2494 struct hnae3_ring_chain_node *cur_chain = head;
2495 struct hnae3_ring_chain_node *chain;
2496 struct hns3_enet_ring *tx_ring;
2497 struct hns3_enet_ring *rx_ring;
2498
2499 tx_ring = tqp_vector->tx_group.ring;
2500 if (tx_ring) {
2501 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2502 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2503 HNAE3_RING_TYPE_TX);
2504
2505 cur_chain->next = NULL;
2506
2507 while (tx_ring->next) {
2508 tx_ring = tx_ring->next;
2509
2510 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2511 GFP_KERNEL);
2512 if (!chain)
2513 return -ENOMEM;
2514
2515 cur_chain->next = chain;
2516 chain->tqp_index = tx_ring->tqp->tqp_index;
2517 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2518 HNAE3_RING_TYPE_TX);
2519
2520 cur_chain = chain;
2521 }
2522 }
2523
2524 rx_ring = tqp_vector->rx_group.ring;
2525 if (!tx_ring && rx_ring) {
2526 cur_chain->next = NULL;
2527 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2528 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2529 HNAE3_RING_TYPE_RX);
2530
2531 rx_ring = rx_ring->next;
2532 }
2533
2534 while (rx_ring) {
2535 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2536 if (!chain)
2537 return -ENOMEM;
2538
2539 cur_chain->next = chain;
2540 chain->tqp_index = rx_ring->tqp->tqp_index;
2541 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2542 HNAE3_RING_TYPE_RX);
2543 cur_chain = chain;
2544
2545 rx_ring = rx_ring->next;
2546 }
2547
2548 return 0;
2549}
2550
2551static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2552 struct hnae3_ring_chain_node *head)
2553{
2554 struct pci_dev *pdev = tqp_vector->handle->pdev;
2555 struct hnae3_ring_chain_node *chain_tmp, *chain;
2556
2557 chain = head->next;
2558
2559 while (chain) {
2560 chain_tmp = chain->next;
2561 devm_kfree(&pdev->dev, chain);
2562 chain = chain_tmp;
2563 }
2564}
2565
2566static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2567 struct hns3_enet_ring *ring)
2568{
2569 ring->next = group->ring;
2570 group->ring = ring;
2571
2572 group->count++;
2573}
2574
2575static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2576{
2577 struct hnae3_ring_chain_node vector_ring_chain;
2578 struct hnae3_handle *h = priv->ae_handle;
2579 struct hns3_enet_tqp_vector *tqp_vector;
2580 struct hnae3_vector_info *vector;
2581 struct pci_dev *pdev = h->pdev;
2582 u16 tqp_num = h->kinfo.num_tqps;
2583 u16 vector_num;
2584 int ret = 0;
2585 u16 i;
2586
2587 /* RSS size, cpu online and vector_num should be the same */
2588 /* Should consider 2p/4p later */
2589 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2590 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2591 GFP_KERNEL);
2592 if (!vector)
2593 return -ENOMEM;
2594
2595 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2596
2597 priv->vector_num = vector_num;
2598 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2599 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2600 GFP_KERNEL);
2601 if (!priv->tqp_vector)
2602 return -ENOMEM;
2603
2604 for (i = 0; i < tqp_num; i++) {
2605 u16 vector_i = i % vector_num;
2606
2607 tqp_vector = &priv->tqp_vector[vector_i];
2608
2609 hns3_add_ring_to_group(&tqp_vector->tx_group,
2610 priv->ring_data[i].ring);
2611
2612 hns3_add_ring_to_group(&tqp_vector->rx_group,
2613 priv->ring_data[i + tqp_num].ring);
2614
2615 tqp_vector->idx = vector_i;
2616 tqp_vector->mask_addr = vector[vector_i].io_addr;
2617 tqp_vector->vector_irq = vector[vector_i].vector;
2618 tqp_vector->num_tqps++;
2619
2620 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2621 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2622 }
2623
2624 for (i = 0; i < vector_num; i++) {
2625 tqp_vector = &priv->tqp_vector[i];
2626
2627 tqp_vector->rx_group.total_bytes = 0;
2628 tqp_vector->rx_group.total_packets = 0;
2629 tqp_vector->tx_group.total_bytes = 0;
2630 tqp_vector->tx_group.total_packets = 0;
2631 hns3_vector_gl_rl_init(tqp_vector);
2632 tqp_vector->handle = h;
2633
2634 ret = hns3_get_vector_ring_chain(tqp_vector,
2635 &vector_ring_chain);
2636 if (ret)
2637 goto out;
2638
2639 ret = h->ae_algo->ops->map_ring_to_vector(h,
2640 tqp_vector->vector_irq, &vector_ring_chain);
2641 if (ret)
2642 goto out;
2643
2644 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2645
2646 netif_napi_add(priv->netdev, &tqp_vector->napi,
2647 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2648 }
2649
2650out:
2651 devm_kfree(&pdev->dev, vector);
2652 return ret;
2653}
2654
2655static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2656{
2657 struct hnae3_ring_chain_node vector_ring_chain;
2658 struct hnae3_handle *h = priv->ae_handle;
2659 struct hns3_enet_tqp_vector *tqp_vector;
2660 struct pci_dev *pdev = h->pdev;
2661 int i, ret;
2662
2663 for (i = 0; i < priv->vector_num; i++) {
2664 tqp_vector = &priv->tqp_vector[i];
2665
2666 ret = hns3_get_vector_ring_chain(tqp_vector,
2667 &vector_ring_chain);
2668 if (ret)
2669 return ret;
2670
2671 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2672 tqp_vector->vector_irq, &vector_ring_chain);
2673 if (ret)
2674 return ret;
2675
2676 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2677
2678 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2679 (void)irq_set_affinity_hint(
2680 priv->tqp_vector[i].vector_irq,
2681 NULL);
ae064e61 2682 free_irq(priv->tqp_vector[i].vector_irq,
2683 &priv->tqp_vector[i]);
76ad4f0e
S
2684 }
2685
2686 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2687
2688 netif_napi_del(&priv->tqp_vector[i].napi);
2689 }
2690
2691 devm_kfree(&pdev->dev, priv->tqp_vector);
2692
2693 return 0;
2694}
2695
2696static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2697 int ring_type)
2698{
2699 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2700 int queue_num = priv->ae_handle->kinfo.num_tqps;
2701 struct pci_dev *pdev = priv->ae_handle->pdev;
2702 struct hns3_enet_ring *ring;
2703
2704 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2705 if (!ring)
2706 return -ENOMEM;
2707
2708 if (ring_type == HNAE3_RING_TYPE_TX) {
2709 ring_data[q->tqp_index].ring = ring;
66b44730 2710 ring_data[q->tqp_index].queue_index = q->tqp_index;
76ad4f0e
S
2711 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2712 } else {
2713 ring_data[q->tqp_index + queue_num].ring = ring;
66b44730 2714 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
76ad4f0e
S
2715 ring->io_base = q->io_base;
2716 }
2717
2718 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2719
76ad4f0e
S
2720 ring->tqp = q;
2721 ring->desc = NULL;
2722 ring->desc_cb = NULL;
2723 ring->dev = priv->dev;
2724 ring->desc_dma_addr = 0;
2725 ring->buf_size = q->buf_size;
2726 ring->desc_num = q->desc_num;
2727 ring->next_to_use = 0;
2728 ring->next_to_clean = 0;
2729
2730 return 0;
2731}
2732
2733static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2734 struct hns3_nic_priv *priv)
2735{
2736 int ret;
2737
2738 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2739 if (ret)
2740 return ret;
2741
2742 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2743 if (ret)
2744 return ret;
2745
2746 return 0;
2747}
2748
2749static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2750{
2751 struct hnae3_handle *h = priv->ae_handle;
2752 struct pci_dev *pdev = h->pdev;
2753 int i, ret;
2754
2755 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2756 sizeof(*priv->ring_data) * 2,
2757 GFP_KERNEL);
2758 if (!priv->ring_data)
2759 return -ENOMEM;
2760
2761 for (i = 0; i < h->kinfo.num_tqps; i++) {
2762 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2763 if (ret)
2764 goto err;
2765 }
2766
2767 return 0;
2768err:
2769 devm_kfree(&pdev->dev, priv->ring_data);
2770 return ret;
2771}
2772
f1f779ce
PL
2773static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2774{
2775 struct hnae3_handle *h = priv->ae_handle;
2776 int i;
2777
2778 for (i = 0; i < h->kinfo.num_tqps; i++) {
2779 devm_kfree(priv->dev, priv->ring_data[i].ring);
2780 devm_kfree(priv->dev,
2781 priv->ring_data[i + h->kinfo.num_tqps].ring);
2782 }
2783 devm_kfree(priv->dev, priv->ring_data);
2784}
2785
76ad4f0e
S
2786static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2787{
2788 int ret;
2789
2790 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2791 return -EINVAL;
2792
2793 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2794 GFP_KERNEL);
2795 if (!ring->desc_cb) {
2796 ret = -ENOMEM;
2797 goto out;
2798 }
2799
2800 ret = hns3_alloc_desc(ring);
2801 if (ret)
2802 goto out_with_desc_cb;
2803
2804 if (!HNAE3_IS_TX_RING(ring)) {
2805 ret = hns3_alloc_ring_buffers(ring);
2806 if (ret)
2807 goto out_with_desc;
2808 }
2809
2810 return 0;
2811
2812out_with_desc:
2813 hns3_free_desc(ring);
2814out_with_desc_cb:
2815 kfree(ring->desc_cb);
2816 ring->desc_cb = NULL;
2817out:
2818 return ret;
2819}
2820
2821static void hns3_fini_ring(struct hns3_enet_ring *ring)
2822{
2823 hns3_free_desc(ring);
2824 kfree(ring->desc_cb);
2825 ring->desc_cb = NULL;
2826 ring->next_to_clean = 0;
2827 ring->next_to_use = 0;
2828}
2829
1db9b1bf 2830static int hns3_buf_size2type(u32 buf_size)
76ad4f0e
S
2831{
2832 int bd_size_type;
2833
2834 switch (buf_size) {
2835 case 512:
2836 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2837 break;
2838 case 1024:
2839 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2840 break;
2841 case 2048:
2842 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2843 break;
2844 case 4096:
2845 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2846 break;
2847 default:
2848 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2849 }
2850
2851 return bd_size_type;
2852}
2853
2854static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2855{
2856 dma_addr_t dma = ring->desc_dma_addr;
2857 struct hnae3_queue *q = ring->tqp;
2858
2859 if (!HNAE3_IS_TX_RING(ring)) {
2860 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2861 (u32)dma);
2862 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2863 (u32)((dma >> 31) >> 1));
2864
2865 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2866 hns3_buf_size2type(ring->buf_size));
2867 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2868 ring->desc_num / 8 - 1);
2869
2870 } else {
2871 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2872 (u32)dma);
2873 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2874 (u32)((dma >> 31) >> 1));
2875
2876 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2877 hns3_buf_size2type(ring->buf_size));
2878 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2879 ring->desc_num / 8 - 1);
2880 }
2881}
2882
5668abda 2883int hns3_init_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
2884{
2885 struct hnae3_handle *h = priv->ae_handle;
2886 int ring_num = h->kinfo.num_tqps * 2;
2887 int i, j;
2888 int ret;
2889
2890 for (i = 0; i < ring_num; i++) {
2891 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2892 if (ret) {
2893 dev_err(priv->dev,
2894 "Alloc ring memory fail! ret=%d\n", ret);
2895 goto out_when_alloc_ring_memory;
2896 }
2897
2898 hns3_init_ring_hw(priv->ring_data[i].ring);
2899
2900 u64_stats_init(&priv->ring_data[i].ring->syncp);
2901 }
2902
2903 return 0;
2904
2905out_when_alloc_ring_memory:
2906 for (j = i - 1; j >= 0; j--)
ee83f776 2907 hns3_fini_ring(priv->ring_data[j].ring);
76ad4f0e
S
2908
2909 return -ENOMEM;
2910}
2911
5668abda 2912int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
2913{
2914 struct hnae3_handle *h = priv->ae_handle;
2915 int i;
2916
2917 for (i = 0; i < h->kinfo.num_tqps; i++) {
2918 if (h->ae_algo->ops->reset_queue)
2919 h->ae_algo->ops->reset_queue(h, i);
2920
2921 hns3_fini_ring(priv->ring_data[i].ring);
74303129 2922 devm_kfree(priv->dev, priv->ring_data[i].ring);
76ad4f0e 2923 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
74303129
PL
2924 devm_kfree(priv->dev,
2925 priv->ring_data[i + h->kinfo.num_tqps].ring);
76ad4f0e 2926 }
74303129 2927 devm_kfree(priv->dev, priv->ring_data);
76ad4f0e
S
2928
2929 return 0;
2930}
2931
2932/* Set mac addr if it is configured. or leave it to the AE driver */
2933static void hns3_init_mac_addr(struct net_device *netdev)
2934{
2935 struct hns3_nic_priv *priv = netdev_priv(netdev);
2936 struct hnae3_handle *h = priv->ae_handle;
2937 u8 mac_addr_temp[ETH_ALEN];
2938
2939 if (h->ae_algo->ops->get_mac_addr) {
2940 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2941 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2942 }
2943
2944 /* Check if the MAC address is valid, if not get a random one */
2945 if (!is_valid_ether_addr(netdev->dev_addr)) {
2946 eth_hw_addr_random(netdev);
2947 dev_warn(priv->dev, "using random MAC address %pM\n",
2948 netdev->dev_addr);
76ad4f0e 2949 }
139e8792
L
2950
2951 if (h->ae_algo->ops->set_mac_addr)
2952 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2953
76ad4f0e
S
2954}
2955
2956static void hns3_nic_set_priv_ops(struct net_device *netdev)
2957{
2958 struct hns3_nic_priv *priv = netdev_priv(netdev);
2959
2960 if ((netdev->features & NETIF_F_TSO) ||
2961 (netdev->features & NETIF_F_TSO6)) {
2962 priv->ops.fill_desc = hns3_fill_desc_tso;
2963 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2964 } else {
2965 priv->ops.fill_desc = hns3_fill_desc;
2966 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2967 }
2968}
2969
2970static int hns3_client_init(struct hnae3_handle *handle)
2971{
2972 struct pci_dev *pdev = handle->pdev;
2973 struct hns3_nic_priv *priv;
2974 struct net_device *netdev;
2975 int ret;
2976
2977 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2978 handle->kinfo.num_tqps);
2979 if (!netdev)
2980 return -ENOMEM;
2981
2982 priv = netdev_priv(netdev);
2983 priv->dev = &pdev->dev;
2984 priv->netdev = netdev;
2985 priv->ae_handle = handle;
f8fa222c
L
2986 priv->last_reset_time = jiffies;
2987 priv->reset_level = HNAE3_FUNC_RESET;
2988 priv->tx_timeout_count = 0;
76ad4f0e
S
2989
2990 handle->kinfo.netdev = netdev;
2991 handle->priv = (void *)priv;
2992
2993 hns3_init_mac_addr(netdev);
2994
2995 hns3_set_default_feature(netdev);
2996
2997 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2998 netdev->priv_flags |= IFF_UNICAST_FLT;
2999 netdev->netdev_ops = &hns3_nic_netdev_ops;
3000 SET_NETDEV_DEV(netdev, &pdev->dev);
3001 hns3_ethtool_set_ops(netdev);
3002 hns3_nic_set_priv_ops(netdev);
3003
3004 /* Carrier off reporting is important to ethtool even BEFORE open */
3005 netif_carrier_off(netdev);
3006
3007 ret = hns3_get_ring_config(priv);
3008 if (ret) {
3009 ret = -ENOMEM;
3010 goto out_get_ring_cfg;
3011 }
3012
3013 ret = hns3_nic_init_vector_data(priv);
3014 if (ret) {
3015 ret = -ENOMEM;
3016 goto out_init_vector_data;
3017 }
3018
3019 ret = hns3_init_all_ring(priv);
3020 if (ret) {
3021 ret = -ENOMEM;
3022 goto out_init_ring_data;
3023 }
3024
3025 ret = register_netdev(netdev);
3026 if (ret) {
3027 dev_err(priv->dev, "probe register netdev fail!\n");
3028 goto out_reg_netdev_fail;
3029 }
3030
986743db
YL
3031 hns3_dcbnl_setup(handle);
3032
a8e8b7ff
S
3033 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3034 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3035
76ad4f0e
S
3036 return ret;
3037
3038out_reg_netdev_fail:
3039out_init_ring_data:
3040 (void)hns3_nic_uninit_vector_data(priv);
3041 priv->ring_data = NULL;
3042out_init_vector_data:
3043out_get_ring_cfg:
3044 priv->ae_handle = NULL;
3045 free_netdev(netdev);
3046 return ret;
3047}
3048
3049static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3050{
3051 struct net_device *netdev = handle->kinfo.netdev;
3052 struct hns3_nic_priv *priv = netdev_priv(netdev);
3053 int ret;
3054
3055 if (netdev->reg_state != NETREG_UNINITIALIZED)
3056 unregister_netdev(netdev);
3057
3058 ret = hns3_nic_uninit_vector_data(priv);
3059 if (ret)
3060 netdev_err(netdev, "uninit vector error\n");
3061
3062 ret = hns3_uninit_all_ring(priv);
3063 if (ret)
3064 netdev_err(netdev, "uninit ring error\n");
3065
3066 priv->ring_data = NULL;
3067
3068 free_netdev(netdev);
3069}
3070
3071static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3072{
3073 struct net_device *netdev = handle->kinfo.netdev;
3074
3075 if (!netdev)
3076 return;
3077
3078 if (linkup) {
3079 netif_carrier_on(netdev);
3080 netif_tx_wake_all_queues(netdev);
3081 netdev_info(netdev, "link up\n");
3082 } else {
3083 netif_carrier_off(netdev);
3084 netif_tx_stop_all_queues(netdev);
3085 netdev_info(netdev, "link down\n");
3086 }
3087}
3088
9df8f79a
YL
3089static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3090{
3091 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3092 struct net_device *ndev = kinfo->netdev;
075cfdd6 3093 bool if_running;
9df8f79a
YL
3094 int ret;
3095 u8 i;
3096
3097 if (tc > HNAE3_MAX_TC)
3098 return -EINVAL;
3099
3100 if (!ndev)
3101 return -ENODEV;
3102
075cfdd6
CIK
3103 if_running = netif_running(ndev);
3104
9df8f79a
YL
3105 ret = netdev_set_num_tc(ndev, tc);
3106 if (ret)
3107 return ret;
3108
3109 if (if_running) {
3110 (void)hns3_nic_net_stop(ndev);
3111 msleep(100);
3112 }
3113
3114 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3115 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3116 if (ret)
3117 goto err_out;
3118
3119 if (tc <= 1) {
3120 netdev_reset_tc(ndev);
3121 goto out;
3122 }
3123
3124 for (i = 0; i < HNAE3_MAX_TC; i++) {
3125 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3126
3127 if (tc_info->enable)
3128 netdev_set_tc_queue(ndev,
3129 tc_info->tc,
3130 tc_info->tqp_count,
3131 tc_info->tqp_offset);
3132 }
3133
3134 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3135 netdev_set_prio_tc_map(ndev, i,
3136 kinfo->prio_tc[i]);
3137 }
3138
3139out:
3140 ret = hns3_nic_set_real_num_queue(ndev);
3141
3142err_out:
3143 if (if_running)
3144 (void)hns3_nic_net_open(ndev);
3145
3146 return ret;
3147}
3148
bb6b94a8
L
3149static void hns3_recover_hw_addr(struct net_device *ndev)
3150{
3151 struct netdev_hw_addr_list *list;
3152 struct netdev_hw_addr *ha, *tmp;
3153
3154 /* go through and sync uc_addr entries to the device */
3155 list = &ndev->uc;
3156 list_for_each_entry_safe(ha, tmp, &list->list, list)
3157 hns3_nic_uc_sync(ndev, ha->addr);
3158
3159 /* go through and sync mc_addr entries to the device */
3160 list = &ndev->mc;
3161 list_for_each_entry_safe(ha, tmp, &list->list, list)
3162 hns3_nic_mc_sync(ndev, ha->addr);
3163}
3164
3165static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
3166{
3167 dev_kfree_skb_any(skb);
3168}
3169
3170static void hns3_clear_all_ring(struct hnae3_handle *h)
3171{
3172 struct net_device *ndev = h->kinfo.netdev;
3173 struct hns3_nic_priv *priv = netdev_priv(ndev);
3174 u32 i;
3175
3176 for (i = 0; i < h->kinfo.num_tqps; i++) {
3177 struct netdev_queue *dev_queue;
3178 struct hns3_enet_ring *ring;
3179
3180 ring = priv->ring_data[i].ring;
3181 hns3_clean_tx_ring(ring, ring->desc_num);
3182 dev_queue = netdev_get_tx_queue(ndev,
3183 priv->ring_data[i].queue_index);
3184 netdev_tx_reset_queue(dev_queue);
3185
3186 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3187 hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
3188 }
3189}
3190
3191static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3192{
3193 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3194 struct net_device *ndev = kinfo->netdev;
3195
3196 if (!netif_running(ndev))
3197 return -EIO;
3198
3199 return hns3_nic_net_stop(ndev);
3200}
3201
3202static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3203{
3204 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3205 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
3206 int ret = 0;
3207
3208 if (netif_running(kinfo->netdev)) {
3209 ret = hns3_nic_net_up(kinfo->netdev);
3210 if (ret) {
3211 netdev_err(kinfo->netdev,
3212 "hns net up fail, ret=%d!\n", ret);
3213 return ret;
3214 }
3215
3216 priv->last_reset_time = jiffies;
3217 }
3218
3219 return ret;
3220}
3221
3222static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3223{
3224 struct net_device *netdev = handle->kinfo.netdev;
3225 struct hns3_nic_priv *priv = netdev_priv(netdev);
3226 int ret;
3227
3228 priv->reset_level = 1;
3229 hns3_init_mac_addr(netdev);
3230 hns3_nic_set_rx_mode(netdev);
3231 hns3_recover_hw_addr(netdev);
3232
3233 /* Carrier off reporting is important to ethtool even BEFORE open */
3234 netif_carrier_off(netdev);
3235
3236 ret = hns3_get_ring_config(priv);
3237 if (ret)
3238 return ret;
3239
3240 ret = hns3_nic_init_vector_data(priv);
3241 if (ret)
3242 return ret;
3243
3244 ret = hns3_init_all_ring(priv);
3245 if (ret) {
3246 hns3_nic_uninit_vector_data(priv);
3247 priv->ring_data = NULL;
3248 }
3249
3250 return ret;
3251}
3252
3253static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3254{
3255 struct net_device *netdev = handle->kinfo.netdev;
3256 struct hns3_nic_priv *priv = netdev_priv(netdev);
3257 int ret;
3258
3259 hns3_clear_all_ring(handle);
3260
3261 ret = hns3_nic_uninit_vector_data(priv);
3262 if (ret) {
3263 netdev_err(netdev, "uninit vector error\n");
3264 return ret;
3265 }
3266
3267 ret = hns3_uninit_all_ring(priv);
3268 if (ret)
3269 netdev_err(netdev, "uninit ring error\n");
3270
3271 priv->ring_data = NULL;
3272
3273 return ret;
3274}
3275
3276static int hns3_reset_notify(struct hnae3_handle *handle,
3277 enum hnae3_reset_notify_type type)
3278{
3279 int ret = 0;
3280
3281 switch (type) {
3282 case HNAE3_UP_CLIENT:
3283 ret = hns3_reset_notify_up_enet(handle);
3284 break;
3285 case HNAE3_DOWN_CLIENT:
3286 ret = hns3_reset_notify_down_enet(handle);
3287 break;
3288 case HNAE3_INIT_CLIENT:
3289 ret = hns3_reset_notify_init_enet(handle);
3290 break;
3291 case HNAE3_UNINIT_CLIENT:
3292 ret = hns3_reset_notify_uninit_enet(handle);
3293 break;
3294 default:
3295 break;
3296 }
3297
3298 return ret;
3299}
3300
f1f779ce
PL
3301static u16 hns3_get_max_available_channels(struct net_device *netdev)
3302{
3303 struct hnae3_handle *h = hns3_get_handle(netdev);
3304 u16 free_tqps, max_rss_size, max_tqps;
3305
3306 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
3307 max_tqps = h->kinfo.num_tc * max_rss_size;
3308
3309 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
3310}
3311
3312static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
3313{
3314 struct hns3_nic_priv *priv = netdev_priv(netdev);
3315 struct hnae3_handle *h = hns3_get_handle(netdev);
3316 int ret;
3317
3318 ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3319 if (ret)
3320 return ret;
3321
3322 ret = hns3_get_ring_config(priv);
3323 if (ret)
3324 return ret;
3325
3326 ret = hns3_nic_init_vector_data(priv);
3327 if (ret)
3328 goto err_uninit_vector;
3329
3330 ret = hns3_init_all_ring(priv);
3331 if (ret)
3332 goto err_put_ring;
3333
3334 return 0;
3335
3336err_put_ring:
3337 hns3_put_ring_config(priv);
3338err_uninit_vector:
3339 hns3_nic_uninit_vector_data(priv);
3340 return ret;
3341}
3342
3343static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3344{
3345 return (new_tqp_num / num_tc) * num_tc;
3346}
3347
3348int hns3_set_channels(struct net_device *netdev,
3349 struct ethtool_channels *ch)
3350{
3351 struct hns3_nic_priv *priv = netdev_priv(netdev);
3352 struct hnae3_handle *h = hns3_get_handle(netdev);
3353 struct hnae3_knic_private_info *kinfo = &h->kinfo;
3354 bool if_running = netif_running(netdev);
3355 u32 new_tqp_num = ch->combined_count;
3356 u16 org_tqp_num;
3357 int ret;
3358
3359 if (ch->rx_count || ch->tx_count)
3360 return -EINVAL;
3361
3362 if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
3363 new_tqp_num < kinfo->num_tc) {
3364 dev_err(&netdev->dev,
3365 "Change tqps fail, the tqp range is from %d to %d",
3366 kinfo->num_tc,
3367 hns3_get_max_available_channels(netdev));
3368 return -EINVAL;
3369 }
3370
3371 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3372 if (kinfo->num_tqps == new_tqp_num)
3373 return 0;
3374
3375 if (if_running)
3376 dev_close(netdev);
3377
3378 hns3_clear_all_ring(h);
3379
3380 ret = hns3_nic_uninit_vector_data(priv);
3381 if (ret) {
3382 dev_err(&netdev->dev,
3383 "Unbind vector with tqp fail, nothing is changed");
3384 goto open_netdev;
3385 }
3386
3387 hns3_uninit_all_ring(priv);
3388
3389 org_tqp_num = h->kinfo.num_tqps;
3390 ret = hns3_modify_tqp_num(netdev, new_tqp_num);
3391 if (ret) {
3392 ret = hns3_modify_tqp_num(netdev, org_tqp_num);
3393 if (ret) {
3394 /* If revert to old tqp failed, fatal error occurred */
3395 dev_err(&netdev->dev,
3396 "Revert to old tqp num fail, ret=%d", ret);
3397 return ret;
3398 }
3399 dev_info(&netdev->dev,
3400 "Change tqp num fail, Revert to old tqp num");
3401 }
3402
3403open_netdev:
3404 if (if_running)
3405 dev_open(netdev);
3406
3407 return ret;
3408}
3409
1db9b1bf 3410static const struct hnae3_client_ops client_ops = {
76ad4f0e
S
3411 .init_instance = hns3_client_init,
3412 .uninit_instance = hns3_client_uninit,
3413 .link_status_change = hns3_link_status_change,
9df8f79a 3414 .setup_tc = hns3_client_setup_tc,
bb6b94a8 3415 .reset_notify = hns3_reset_notify,
76ad4f0e
S
3416};
3417
3418/* hns3_init_module - Driver registration routine
3419 * hns3_init_module is the first routine called when the driver is
3420 * loaded. All it does is register with the PCI subsystem.
3421 */
3422static int __init hns3_init_module(void)
3423{
3424 int ret;
3425
3426 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3427 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3428
3429 client.type = HNAE3_CLIENT_KNIC;
3430 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3431 hns3_driver_name);
3432
3433 client.ops = &client_ops;
3434
3435 ret = hnae3_register_client(&client);
3436 if (ret)
3437 return ret;
3438
3439 ret = pci_register_driver(&hns3_driver);
3440 if (ret)
3441 hnae3_unregister_client(&client);
3442
3443 return ret;
3444}
3445module_init(hns3_init_module);
3446
3447/* hns3_exit_module - Driver exit cleanup routine
3448 * hns3_exit_module is called just before the driver is removed
3449 * from memory.
3450 */
3451static void __exit hns3_exit_module(void)
3452{
3453 pci_unregister_driver(&hns3_driver);
3454 hnae3_unregister_client(&client);
3455}
3456module_exit(hns3_exit_module);
3457
3458MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3459MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3460MODULE_LICENSE("GPL");
3461MODULE_ALIAS("pci:hns-nic");