]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
net: hns3: Add DCB support when interacting with network stack
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hns3_enet.c
CommitLineData
76ad4f0e
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
22#include <net/vxlan.h>
23
24#include "hnae3.h"
25#include "hns3_enet.h"
26
27const char hns3_driver_name[] = "hns3";
28const char hns3_driver_version[] = VERMAGIC_STRING;
29static const char hns3_driver_string[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32static struct hnae3_client client;
33
34/* hns3_pci_tbl - PCI Device ID Table
35 *
36 * Last entry must be all 0s
37 *
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
40 */
41static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
e92a0843 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
2daf4a65 45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
2daf4a65 47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
2daf4a65 49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
2daf4a65 51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
2daf4a65 53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
76ad4f0e
S
54 /* required last entry */
55 {0, }
56};
57MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
58
59static irqreturn_t hns3_irq_handle(int irq, void *dev)
60{
61 struct hns3_enet_tqp_vector *tqp_vector = dev;
62
63 napi_schedule(&tqp_vector->napi);
64
65 return IRQ_HANDLED;
66}
67
68static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
69{
70 struct hns3_enet_tqp_vector *tqp_vectors;
71 unsigned int i;
72
73 for (i = 0; i < priv->vector_num; i++) {
74 tqp_vectors = &priv->tqp_vector[i];
75
76 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
77 continue;
78
79 /* release the irq resource */
80 free_irq(tqp_vectors->vector_irq, tqp_vectors);
81 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
82 }
83}
84
85static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
86{
87 struct hns3_enet_tqp_vector *tqp_vectors;
88 int txrx_int_idx = 0;
89 int rx_int_idx = 0;
90 int tx_int_idx = 0;
91 unsigned int i;
92 int ret;
93
94 for (i = 0; i < priv->vector_num; i++) {
95 tqp_vectors = &priv->tqp_vector[i];
96
97 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
98 continue;
99
100 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
101 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102 "%s-%s-%d", priv->netdev->name, "TxRx",
103 txrx_int_idx++);
104 txrx_int_idx++;
105 } else if (tqp_vectors->rx_group.ring) {
106 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
107 "%s-%s-%d", priv->netdev->name, "Rx",
108 rx_int_idx++);
109 } else if (tqp_vectors->tx_group.ring) {
110 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
111 "%s-%s-%d", priv->netdev->name, "Tx",
112 tx_int_idx++);
113 } else {
114 /* Skip this unused q_vector */
115 continue;
116 }
117
118 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
119
120 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
121 tqp_vectors->name,
122 tqp_vectors);
123 if (ret) {
124 netdev_err(priv->netdev, "request irq(%d) fail\n",
125 tqp_vectors->vector_irq);
126 return ret;
127 }
128
129 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
130 }
131
132 return 0;
133}
134
135static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
136 u32 mask_en)
137{
138 writel(mask_en, tqp_vector->mask_addr);
139}
140
141static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
142{
143 napi_enable(&tqp_vector->napi);
144
145 /* enable vector */
146 hns3_mask_vector_irq(tqp_vector, 1);
147}
148
149static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
150{
151 /* disable vector */
152 hns3_mask_vector_irq(tqp_vector, 0);
153
154 disable_irq(tqp_vector->vector_irq);
155 napi_disable(&tqp_vector->napi);
156}
157
158static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
159 u32 gl_value)
160{
161 /* this defines the configuration for GL (Interrupt Gap Limiter)
162 * GL defines inter interrupt gap.
163 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
164 */
165 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
166 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
167 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
168}
169
170static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
171 u32 rl_value)
172{
173 /* this defines the configuration for RL (Interrupt Rate Limiter).
174 * Rl defines rate of interrupts i.e. number of interrupts-per-second
175 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
176 */
177 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
178}
179
180static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
181{
182 /* initialize the configuration for interrupt coalescing.
183 * 1. GL (Interrupt Gap Limiter)
184 * 2. RL (Interrupt Rate Limiter)
185 */
186
187 /* Default :enable interrupt coalesce */
188 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
189 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
190 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
191 /* for now we are disabling Interrupt RL - we
192 * will re-enable later
193 */
194 hns3_set_vector_coalesc_rl(tqp_vector, 0);
195 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
196 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
197}
198
9df8f79a
YL
199static int hns3_nic_set_real_num_queue(struct net_device *netdev)
200{
201 struct hns3_nic_priv *priv = netdev_priv(netdev);
202 struct hnae3_handle *h = priv->ae_handle;
203 struct hnae3_knic_private_info *kinfo = &h->kinfo;
204 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
205 int ret;
206
207 ret = netif_set_real_num_tx_queues(netdev, queue_size);
208 if (ret) {
209 netdev_err(netdev,
210 "netif_set_real_num_tx_queues fail, ret=%d!\n",
211 ret);
212 return ret;
213 }
214
215 ret = netif_set_real_num_rx_queues(netdev, queue_size);
216 if (ret) {
217 netdev_err(netdev,
218 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
219 return ret;
220 }
221
222 return 0;
223}
224
76ad4f0e
S
225static int hns3_nic_net_up(struct net_device *netdev)
226{
227 struct hns3_nic_priv *priv = netdev_priv(netdev);
228 struct hnae3_handle *h = priv->ae_handle;
229 int i, j;
230 int ret;
231
232 /* get irq resource for all vectors */
233 ret = hns3_nic_init_irq(priv);
234 if (ret) {
235 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
236 return ret;
237 }
238
239 /* enable the vectors */
240 for (i = 0; i < priv->vector_num; i++)
241 hns3_vector_enable(&priv->tqp_vector[i]);
242
243 /* start the ae_dev */
244 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
245 if (ret)
246 goto out_start_err;
247
248 return 0;
249
250out_start_err:
251 for (j = i - 1; j >= 0; j--)
252 hns3_vector_disable(&priv->tqp_vector[j]);
253
254 hns3_nic_uninit_irq(priv);
255
256 return ret;
257}
258
259static int hns3_nic_net_open(struct net_device *netdev)
260{
76ad4f0e
S
261 int ret;
262
263 netif_carrier_off(netdev);
264
9df8f79a
YL
265 ret = hns3_nic_set_real_num_queue(netdev);
266 if (ret)
76ad4f0e 267 return ret;
76ad4f0e
S
268
269 ret = hns3_nic_net_up(netdev);
270 if (ret) {
271 netdev_err(netdev,
272 "hns net up fail, ret=%d!\n", ret);
273 return ret;
274 }
275
276 return 0;
277}
278
279static void hns3_nic_net_down(struct net_device *netdev)
280{
281 struct hns3_nic_priv *priv = netdev_priv(netdev);
282 const struct hnae3_ae_ops *ops;
283 int i;
284
285 /* stop ae_dev */
286 ops = priv->ae_handle->ae_algo->ops;
287 if (ops->stop)
288 ops->stop(priv->ae_handle);
289
290 /* disable vectors */
291 for (i = 0; i < priv->vector_num; i++)
292 hns3_vector_disable(&priv->tqp_vector[i]);
293
294 /* free irq resources */
295 hns3_nic_uninit_irq(priv);
296}
297
298static int hns3_nic_net_stop(struct net_device *netdev)
299{
300 netif_tx_stop_all_queues(netdev);
301 netif_carrier_off(netdev);
302
303 hns3_nic_net_down(netdev);
304
305 return 0;
306}
307
308void hns3_set_multicast_list(struct net_device *netdev)
309{
310 struct hns3_nic_priv *priv = netdev_priv(netdev);
311 struct hnae3_handle *h = priv->ae_handle;
312 struct netdev_hw_addr *ha = NULL;
313
314 if (h->ae_algo->ops->set_mc_addr) {
315 netdev_for_each_mc_addr(ha, netdev)
316 if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
317 netdev_err(netdev, "set multicast fail\n");
318 }
319}
320
321static int hns3_nic_uc_sync(struct net_device *netdev,
322 const unsigned char *addr)
323{
324 struct hns3_nic_priv *priv = netdev_priv(netdev);
325 struct hnae3_handle *h = priv->ae_handle;
326
327 if (h->ae_algo->ops->add_uc_addr)
328 return h->ae_algo->ops->add_uc_addr(h, addr);
329
330 return 0;
331}
332
333static int hns3_nic_uc_unsync(struct net_device *netdev,
334 const unsigned char *addr)
335{
336 struct hns3_nic_priv *priv = netdev_priv(netdev);
337 struct hnae3_handle *h = priv->ae_handle;
338
339 if (h->ae_algo->ops->rm_uc_addr)
340 return h->ae_algo->ops->rm_uc_addr(h, addr);
341
342 return 0;
343}
344
345static int hns3_nic_mc_sync(struct net_device *netdev,
346 const unsigned char *addr)
347{
348 struct hns3_nic_priv *priv = netdev_priv(netdev);
349 struct hnae3_handle *h = priv->ae_handle;
350
720a8478 351 if (h->ae_algo->ops->add_mc_addr)
76ad4f0e
S
352 return h->ae_algo->ops->add_mc_addr(h, addr);
353
354 return 0;
355}
356
357static int hns3_nic_mc_unsync(struct net_device *netdev,
358 const unsigned char *addr)
359{
360 struct hns3_nic_priv *priv = netdev_priv(netdev);
361 struct hnae3_handle *h = priv->ae_handle;
362
720a8478 363 if (h->ae_algo->ops->rm_mc_addr)
76ad4f0e
S
364 return h->ae_algo->ops->rm_mc_addr(h, addr);
365
366 return 0;
367}
368
369void hns3_nic_set_rx_mode(struct net_device *netdev)
370{
371 struct hns3_nic_priv *priv = netdev_priv(netdev);
372 struct hnae3_handle *h = priv->ae_handle;
373
374 if (h->ae_algo->ops->set_promisc_mode) {
375 if (netdev->flags & IFF_PROMISC)
376 h->ae_algo->ops->set_promisc_mode(h, 1);
377 else
378 h->ae_algo->ops->set_promisc_mode(h, 0);
379 }
380 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
381 netdev_err(netdev, "sync uc address fail\n");
382 if (netdev->flags & IFF_MULTICAST)
383 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
384 netdev_err(netdev, "sync mc address fail\n");
385}
386
387static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
388 u16 *mss, u32 *type_cs_vlan_tso)
389{
390 u32 l4_offset, hdr_len;
391 union l3_hdr_info l3;
392 union l4_hdr_info l4;
393 u32 l4_paylen;
394 int ret;
395
396 if (!skb_is_gso(skb))
397 return 0;
398
399 ret = skb_cow_head(skb, 0);
400 if (ret)
401 return ret;
402
403 l3.hdr = skb_network_header(skb);
404 l4.hdr = skb_transport_header(skb);
405
406 /* Software should clear the IPv4's checksum field when tso is
407 * needed.
408 */
409 if (l3.v4->version == 4)
410 l3.v4->check = 0;
411
412 /* tunnel packet.*/
413 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
414 SKB_GSO_GRE_CSUM |
415 SKB_GSO_UDP_TUNNEL |
416 SKB_GSO_UDP_TUNNEL_CSUM)) {
417 if ((!(skb_shinfo(skb)->gso_type &
418 SKB_GSO_PARTIAL)) &&
419 (skb_shinfo(skb)->gso_type &
420 SKB_GSO_UDP_TUNNEL_CSUM)) {
421 /* Software should clear the udp's checksum
422 * field when tso is needed.
423 */
424 l4.udp->check = 0;
425 }
426 /* reset l3&l4 pointers from outer to inner headers */
427 l3.hdr = skb_inner_network_header(skb);
428 l4.hdr = skb_inner_transport_header(skb);
429
430 /* Software should clear the IPv4's checksum field when
431 * tso is needed.
432 */
433 if (l3.v4->version == 4)
434 l3.v4->check = 0;
435 }
436
437 /* normal or tunnel packet*/
438 l4_offset = l4.hdr - skb->data;
439 hdr_len = (l4.tcp->doff * 4) + l4_offset;
440
441 /* remove payload length from inner pseudo checksum when tso*/
442 l4_paylen = skb->len - l4_offset;
443 csum_replace_by_diff(&l4.tcp->check,
444 (__force __wsum)htonl(l4_paylen));
445
446 /* find the txbd field values */
447 *paylen = skb->len - hdr_len;
448 hnae_set_bit(*type_cs_vlan_tso,
449 HNS3_TXD_TSO_B, 1);
450
451 /* get MSS for TSO */
452 *mss = skb_shinfo(skb)->gso_size;
453
454 return 0;
455}
456
1898d4e4
S
457static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
458 u8 *il4_proto)
76ad4f0e
S
459{
460 union {
461 struct iphdr *v4;
462 struct ipv6hdr *v6;
463 unsigned char *hdr;
464 } l3;
465 unsigned char *l4_hdr;
466 unsigned char *exthdr;
467 u8 l4_proto_tmp;
468 __be16 frag_off;
469
470 /* find outer header point */
471 l3.hdr = skb_network_header(skb);
472 l4_hdr = skb_inner_transport_header(skb);
473
474 if (skb->protocol == htons(ETH_P_IPV6)) {
475 exthdr = l3.hdr + sizeof(*l3.v6);
476 l4_proto_tmp = l3.v6->nexthdr;
477 if (l4_hdr != exthdr)
478 ipv6_skip_exthdr(skb, exthdr - skb->data,
479 &l4_proto_tmp, &frag_off);
480 } else if (skb->protocol == htons(ETH_P_IP)) {
481 l4_proto_tmp = l3.v4->protocol;
1898d4e4
S
482 } else {
483 return -EINVAL;
76ad4f0e
S
484 }
485
486 *ol4_proto = l4_proto_tmp;
487
488 /* tunnel packet */
489 if (!skb->encapsulation) {
490 *il4_proto = 0;
1898d4e4 491 return 0;
76ad4f0e
S
492 }
493
494 /* find inner header point */
495 l3.hdr = skb_inner_network_header(skb);
496 l4_hdr = skb_inner_transport_header(skb);
497
498 if (l3.v6->version == 6) {
499 exthdr = l3.hdr + sizeof(*l3.v6);
500 l4_proto_tmp = l3.v6->nexthdr;
501 if (l4_hdr != exthdr)
502 ipv6_skip_exthdr(skb, exthdr - skb->data,
503 &l4_proto_tmp, &frag_off);
504 } else if (l3.v4->version == 4) {
505 l4_proto_tmp = l3.v4->protocol;
506 }
507
508 *il4_proto = l4_proto_tmp;
1898d4e4
S
509
510 return 0;
76ad4f0e
S
511}
512
513static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
514 u8 il4_proto, u32 *type_cs_vlan_tso,
515 u32 *ol_type_vlan_len_msec)
516{
517 union {
518 struct iphdr *v4;
519 struct ipv6hdr *v6;
520 unsigned char *hdr;
521 } l3;
522 union {
523 struct tcphdr *tcp;
524 struct udphdr *udp;
525 struct gre_base_hdr *gre;
526 unsigned char *hdr;
527 } l4;
528 unsigned char *l2_hdr;
529 u8 l4_proto = ol4_proto;
530 u32 ol2_len;
531 u32 ol3_len;
532 u32 ol4_len;
533 u32 l2_len;
534 u32 l3_len;
535
536 l3.hdr = skb_network_header(skb);
537 l4.hdr = skb_transport_header(skb);
538
539 /* compute L2 header size for normal packet, defined in 2 Bytes */
540 l2_len = l3.hdr - skb->data;
541 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
542 HNS3_TXD_L2LEN_S, l2_len >> 1);
543
544 /* tunnel packet*/
545 if (skb->encapsulation) {
546 /* compute OL2 header size, defined in 2 Bytes */
547 ol2_len = l2_len;
548 hnae_set_field(*ol_type_vlan_len_msec,
549 HNS3_TXD_L2LEN_M,
550 HNS3_TXD_L2LEN_S, ol2_len >> 1);
551
552 /* compute OL3 header size, defined in 4 Bytes */
553 ol3_len = l4.hdr - l3.hdr;
554 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
555 HNS3_TXD_L3LEN_S, ol3_len >> 2);
556
557 /* MAC in UDP, MAC in GRE (0x6558)*/
558 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
559 /* switch MAC header ptr from outer to inner header.*/
560 l2_hdr = skb_inner_mac_header(skb);
561
562 /* compute OL4 header size, defined in 4 Bytes. */
563 ol4_len = l2_hdr - l4.hdr;
564 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
565 HNS3_TXD_L4LEN_S, ol4_len >> 2);
566
567 /* switch IP header ptr from outer to inner header */
568 l3.hdr = skb_inner_network_header(skb);
569
570 /* compute inner l2 header size, defined in 2 Bytes. */
571 l2_len = l3.hdr - l2_hdr;
572 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
573 HNS3_TXD_L2LEN_S, l2_len >> 1);
574 } else {
575 /* skb packet types not supported by hardware,
576 * txbd len fild doesn't be filled.
577 */
578 return;
579 }
580
581 /* switch L4 header pointer from outer to inner */
582 l4.hdr = skb_inner_transport_header(skb);
583
584 l4_proto = il4_proto;
585 }
586
587 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
588 l3_len = l4.hdr - l3.hdr;
589 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
590 HNS3_TXD_L3LEN_S, l3_len >> 2);
591
592 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
593 switch (l4_proto) {
594 case IPPROTO_TCP:
595 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
596 HNS3_TXD_L4LEN_S, l4.tcp->doff);
597 break;
598 case IPPROTO_SCTP:
599 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
600 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
601 break;
602 case IPPROTO_UDP:
603 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
604 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
605 break;
606 default:
607 /* skb packet types not supported by hardware,
608 * txbd len fild doesn't be filled.
609 */
610 return;
611 }
612}
613
614static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
615 u8 il4_proto, u32 *type_cs_vlan_tso,
616 u32 *ol_type_vlan_len_msec)
617{
618 union {
619 struct iphdr *v4;
620 struct ipv6hdr *v6;
621 unsigned char *hdr;
622 } l3;
623 u32 l4_proto = ol4_proto;
624
625 l3.hdr = skb_network_header(skb);
626
627 /* define OL3 type and tunnel type(OL4).*/
628 if (skb->encapsulation) {
629 /* define outer network header type.*/
630 if (skb->protocol == htons(ETH_P_IP)) {
631 if (skb_is_gso(skb))
632 hnae_set_field(*ol_type_vlan_len_msec,
633 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
634 HNS3_OL3T_IPV4_CSUM);
635 else
636 hnae_set_field(*ol_type_vlan_len_msec,
637 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
638 HNS3_OL3T_IPV4_NO_CSUM);
639
640 } else if (skb->protocol == htons(ETH_P_IPV6)) {
641 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
642 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
643 }
644
645 /* define tunnel type(OL4).*/
646 switch (l4_proto) {
647 case IPPROTO_UDP:
648 hnae_set_field(*ol_type_vlan_len_msec,
649 HNS3_TXD_TUNTYPE_M,
650 HNS3_TXD_TUNTYPE_S,
651 HNS3_TUN_MAC_IN_UDP);
652 break;
653 case IPPROTO_GRE:
654 hnae_set_field(*ol_type_vlan_len_msec,
655 HNS3_TXD_TUNTYPE_M,
656 HNS3_TXD_TUNTYPE_S,
657 HNS3_TUN_NVGRE);
658 break;
659 default:
660 /* drop the skb tunnel packet if hardware don't support,
661 * because hardware can't calculate csum when TSO.
662 */
663 if (skb_is_gso(skb))
664 return -EDOM;
665
666 /* the stack computes the IP header already,
667 * driver calculate l4 checksum when not TSO.
668 */
669 skb_checksum_help(skb);
670 return 0;
671 }
672
673 l3.hdr = skb_inner_network_header(skb);
674 l4_proto = il4_proto;
675 }
676
677 if (l3.v4->version == 4) {
678 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
679 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
680
681 /* the stack computes the IP header already, the only time we
682 * need the hardware to recompute it is in the case of TSO.
683 */
684 if (skb_is_gso(skb))
685 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
686
687 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
688 } else if (l3.v6->version == 6) {
689 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
690 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
691 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
692 }
693
694 switch (l4_proto) {
695 case IPPROTO_TCP:
696 hnae_set_field(*type_cs_vlan_tso,
697 HNS3_TXD_L4T_M,
698 HNS3_TXD_L4T_S,
699 HNS3_L4T_TCP);
700 break;
701 case IPPROTO_UDP:
702 hnae_set_field(*type_cs_vlan_tso,
703 HNS3_TXD_L4T_M,
704 HNS3_TXD_L4T_S,
705 HNS3_L4T_UDP);
706 break;
707 case IPPROTO_SCTP:
708 hnae_set_field(*type_cs_vlan_tso,
709 HNS3_TXD_L4T_M,
710 HNS3_TXD_L4T_S,
711 HNS3_L4T_SCTP);
712 break;
713 default:
714 /* drop the skb tunnel packet if hardware don't support,
715 * because hardware can't calculate csum when TSO.
716 */
717 if (skb_is_gso(skb))
718 return -EDOM;
719
720 /* the stack computes the IP header already,
721 * driver calculate l4 checksum when not TSO.
722 */
723 skb_checksum_help(skb);
724 return 0;
725 }
726
727 return 0;
728}
729
730static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
731{
732 /* Config bd buffer end */
733 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
734 HNS3_TXD_BDTYPE_M, 0);
735 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
736 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
737 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
738}
739
740static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
741 int size, dma_addr_t dma, int frag_end,
742 enum hns_desc_type type)
743{
744 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
745 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
746 u32 ol_type_vlan_len_msec = 0;
747 u16 bdtp_fe_sc_vld_ra_ri = 0;
748 u32 type_cs_vlan_tso = 0;
749 struct sk_buff *skb;
750 u32 paylen = 0;
751 u16 mss = 0;
752 __be16 protocol;
753 u8 ol4_proto;
754 u8 il4_proto;
755 int ret;
756
757 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
758 desc_cb->priv = priv;
759 desc_cb->length = size;
760 desc_cb->dma = dma;
761 desc_cb->type = type;
762
763 /* now, fill the descriptor */
764 desc->addr = cpu_to_le64(dma);
765 desc->tx.send_size = cpu_to_le16((u16)size);
766 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
767 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
768
769 if (type == DESC_TYPE_SKB) {
770 skb = (struct sk_buff *)priv;
771 paylen = cpu_to_le16(skb->len);
772
773 if (skb->ip_summed == CHECKSUM_PARTIAL) {
774 skb_reset_mac_len(skb);
775 protocol = skb->protocol;
776
777 /* vlan packet*/
778 if (protocol == htons(ETH_P_8021Q)) {
779 protocol = vlan_get_protocol(skb);
780 skb->protocol = protocol;
781 }
1898d4e4
S
782 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
783 if (ret)
784 return ret;
76ad4f0e
S
785 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
786 &type_cs_vlan_tso,
787 &ol_type_vlan_len_msec);
788 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
789 &type_cs_vlan_tso,
790 &ol_type_vlan_len_msec);
791 if (ret)
792 return ret;
793
794 ret = hns3_set_tso(skb, &paylen, &mss,
795 &type_cs_vlan_tso);
796 if (ret)
797 return ret;
798 }
799
800 /* Set txbd */
801 desc->tx.ol_type_vlan_len_msec =
802 cpu_to_le32(ol_type_vlan_len_msec);
803 desc->tx.type_cs_vlan_tso_len =
804 cpu_to_le32(type_cs_vlan_tso);
805 desc->tx.paylen = cpu_to_le16(paylen);
806 desc->tx.mss = cpu_to_le16(mss);
807 }
808
809 /* move ring pointer to next.*/
810 ring_ptr_move_fw(ring, next_to_use);
811
812 return 0;
813}
814
815static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
816 int size, dma_addr_t dma, int frag_end,
817 enum hns_desc_type type)
818{
819 unsigned int frag_buf_num;
820 unsigned int k;
821 int sizeoflast;
822 int ret;
823
824 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
825 sizeoflast = size % HNS3_MAX_BD_SIZE;
826 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
827
828 /* When the frag size is bigger than hardware, split this frag */
829 for (k = 0; k < frag_buf_num; k++) {
830 ret = hns3_fill_desc(ring, priv,
831 (k == frag_buf_num - 1) ?
832 sizeoflast : HNS3_MAX_BD_SIZE,
833 dma + HNS3_MAX_BD_SIZE * k,
834 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
835 (type == DESC_TYPE_SKB && !k) ?
836 DESC_TYPE_SKB : DESC_TYPE_PAGE);
837 if (ret)
838 return ret;
839 }
840
841 return 0;
842}
843
844static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
845 struct hns3_enet_ring *ring)
846{
847 struct sk_buff *skb = *out_skb;
848 struct skb_frag_struct *frag;
849 int bdnum_for_frag;
850 int frag_num;
851 int buf_num;
852 int size;
853 int i;
854
855 size = skb_headlen(skb);
856 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
857
858 frag_num = skb_shinfo(skb)->nr_frags;
859 for (i = 0; i < frag_num; i++) {
860 frag = &skb_shinfo(skb)->frags[i];
861 size = skb_frag_size(frag);
862 bdnum_for_frag =
863 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
864 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
865 return -ENOMEM;
866
867 buf_num += bdnum_for_frag;
868 }
869
870 if (buf_num > ring_space(ring))
871 return -EBUSY;
872
873 *bnum = buf_num;
874 return 0;
875}
876
877static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
878 struct hns3_enet_ring *ring)
879{
880 struct sk_buff *skb = *out_skb;
881 int buf_num;
882
883 /* No. of segments (plus a header) */
884 buf_num = skb_shinfo(skb)->nr_frags + 1;
885
886 if (buf_num > ring_space(ring))
887 return -EBUSY;
888
889 *bnum = buf_num;
890
891 return 0;
892}
893
894static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
895{
896 struct device *dev = ring_to_dev(ring);
897 unsigned int i;
898
899 for (i = 0; i < ring->desc_num; i++) {
900 /* check if this is where we started */
901 if (ring->next_to_use == next_to_use_orig)
902 break;
903
904 /* unmap the descriptor dma address */
905 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
906 dma_unmap_single(dev,
907 ring->desc_cb[ring->next_to_use].dma,
908 ring->desc_cb[ring->next_to_use].length,
909 DMA_TO_DEVICE);
910 else
911 dma_unmap_page(dev,
912 ring->desc_cb[ring->next_to_use].dma,
913 ring->desc_cb[ring->next_to_use].length,
914 DMA_TO_DEVICE);
915
916 /* rollback one */
917 ring_ptr_move_bw(ring, next_to_use);
918 }
919}
920
921static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
922 struct net_device *netdev)
923{
924 struct hns3_nic_priv *priv = netdev_priv(netdev);
925 struct hns3_nic_ring_data *ring_data =
926 &tx_ring_data(priv, skb->queue_mapping);
927 struct hns3_enet_ring *ring = ring_data->ring;
928 struct device *dev = priv->dev;
929 struct netdev_queue *dev_queue;
930 struct skb_frag_struct *frag;
931 int next_to_use_head;
932 int next_to_use_frag;
933 dma_addr_t dma;
934 int buf_num;
935 int seg_num;
936 int size;
937 int ret;
938 int i;
939
940 /* Prefetch the data used later */
941 prefetch(skb->data);
942
943 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
944 case -EBUSY:
945 u64_stats_update_begin(&ring->syncp);
946 ring->stats.tx_busy++;
947 u64_stats_update_end(&ring->syncp);
948
949 goto out_net_tx_busy;
950 case -ENOMEM:
951 u64_stats_update_begin(&ring->syncp);
952 ring->stats.sw_err_cnt++;
953 u64_stats_update_end(&ring->syncp);
954 netdev_err(netdev, "no memory to xmit!\n");
955
956 goto out_err_tx_ok;
957 default:
958 break;
959 }
960
961 /* No. of segments (plus a header) */
962 seg_num = skb_shinfo(skb)->nr_frags + 1;
963 /* Fill the first part */
964 size = skb_headlen(skb);
965
966 next_to_use_head = ring->next_to_use;
967
968 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
969 if (dma_mapping_error(dev, dma)) {
970 netdev_err(netdev, "TX head DMA map failed\n");
971 ring->stats.sw_err_cnt++;
972 goto out_err_tx_ok;
973 }
974
975 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
976 DESC_TYPE_SKB);
977 if (ret)
978 goto head_dma_map_err;
979
980 next_to_use_frag = ring->next_to_use;
981 /* Fill the fragments */
982 for (i = 1; i < seg_num; i++) {
983 frag = &skb_shinfo(skb)->frags[i - 1];
984 size = skb_frag_size(frag);
985 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
986 if (dma_mapping_error(dev, dma)) {
987 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
988 ring->stats.sw_err_cnt++;
989 goto frag_dma_map_err;
990 }
991 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
992 seg_num - 1 == i ? 1 : 0,
993 DESC_TYPE_PAGE);
994
995 if (ret)
996 goto frag_dma_map_err;
997 }
998
999 /* Complete translate all packets */
1000 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1001 netdev_tx_sent_queue(dev_queue, skb->len);
1002
1003 wmb(); /* Commit all data before submit */
1004
1005 hnae_queue_xmit(ring->tqp, buf_num);
1006
1007 return NETDEV_TX_OK;
1008
1009frag_dma_map_err:
1010 hns_nic_dma_unmap(ring, next_to_use_frag);
1011
1012head_dma_map_err:
1013 hns_nic_dma_unmap(ring, next_to_use_head);
1014
1015out_err_tx_ok:
1016 dev_kfree_skb_any(skb);
1017 return NETDEV_TX_OK;
1018
1019out_net_tx_busy:
1020 netif_stop_subqueue(netdev, ring_data->queue_index);
1021 smp_mb(); /* Commit all data before submit */
1022
1023 return NETDEV_TX_BUSY;
1024}
1025
1026static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1027{
1028 struct hns3_nic_priv *priv = netdev_priv(netdev);
1029 struct hnae3_handle *h = priv->ae_handle;
1030 struct sockaddr *mac_addr = p;
1031 int ret;
1032
1033 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1034 return -EADDRNOTAVAIL;
1035
1036 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1037 if (ret) {
1038 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1039 return ret;
1040 }
1041
1042 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1043
1044 return 0;
1045}
1046
1047static int hns3_nic_set_features(struct net_device *netdev,
1048 netdev_features_t features)
1049{
1050 struct hns3_nic_priv *priv = netdev_priv(netdev);
1051
1052 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1053 priv->ops.fill_desc = hns3_fill_desc_tso;
1054 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1055 } else {
1056 priv->ops.fill_desc = hns3_fill_desc;
1057 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1058 }
1059
1060 netdev->features = features;
1061 return 0;
1062}
1063
1064static void
1065hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1066{
1067 struct hns3_nic_priv *priv = netdev_priv(netdev);
1068 int queue_num = priv->ae_handle->kinfo.num_tqps;
1069 struct hns3_enet_ring *ring;
1070 unsigned int start;
1071 unsigned int idx;
1072 u64 tx_bytes = 0;
1073 u64 rx_bytes = 0;
1074 u64 tx_pkts = 0;
1075 u64 rx_pkts = 0;
1076
1077 for (idx = 0; idx < queue_num; idx++) {
1078 /* fetch the tx stats */
1079 ring = priv->ring_data[idx].ring;
1080 do {
d36d36ce 1081 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1082 tx_bytes += ring->stats.tx_bytes;
1083 tx_pkts += ring->stats.tx_pkts;
1084 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1085
1086 /* fetch the rx stats */
1087 ring = priv->ring_data[idx + queue_num].ring;
1088 do {
d36d36ce 1089 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1090 rx_bytes += ring->stats.rx_bytes;
1091 rx_pkts += ring->stats.rx_pkts;
1092 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1093 }
1094
1095 stats->tx_bytes = tx_bytes;
1096 stats->tx_packets = tx_pkts;
1097 stats->rx_bytes = rx_bytes;
1098 stats->rx_packets = rx_pkts;
1099
1100 stats->rx_errors = netdev->stats.rx_errors;
1101 stats->multicast = netdev->stats.multicast;
1102 stats->rx_length_errors = netdev->stats.rx_length_errors;
1103 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1104 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1105
1106 stats->tx_errors = netdev->stats.tx_errors;
1107 stats->rx_dropped = netdev->stats.rx_dropped;
1108 stats->tx_dropped = netdev->stats.tx_dropped;
1109 stats->collisions = netdev->stats.collisions;
1110 stats->rx_over_errors = netdev->stats.rx_over_errors;
1111 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1112 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1113 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1114 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1115 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1116 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1117 stats->tx_window_errors = netdev->stats.tx_window_errors;
1118 stats->rx_compressed = netdev->stats.rx_compressed;
1119 stats->tx_compressed = netdev->stats.tx_compressed;
1120}
1121
1122static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1123 enum hns3_udp_tnl_type type)
1124{
1125 struct hns3_nic_priv *priv = netdev_priv(netdev);
1126 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1127 struct hnae3_handle *h = priv->ae_handle;
1128
1129 if (udp_tnl->used && udp_tnl->dst_port == port) {
1130 udp_tnl->used++;
1131 return;
1132 }
1133
1134 if (udp_tnl->used) {
1135 netdev_warn(netdev,
1136 "UDP tunnel [%d], port [%d] offload\n", type, port);
1137 return;
1138 }
1139
1140 udp_tnl->dst_port = port;
1141 udp_tnl->used = 1;
1142 /* TBD send command to hardware to add port */
1143 if (h->ae_algo->ops->add_tunnel_udp)
1144 h->ae_algo->ops->add_tunnel_udp(h, port);
1145}
1146
1147static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1148 enum hns3_udp_tnl_type type)
1149{
1150 struct hns3_nic_priv *priv = netdev_priv(netdev);
1151 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1152 struct hnae3_handle *h = priv->ae_handle;
1153
1154 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1155 netdev_warn(netdev,
1156 "Invalid UDP tunnel port %d\n", port);
1157 return;
1158 }
1159
1160 udp_tnl->used--;
1161 if (udp_tnl->used)
1162 return;
1163
1164 udp_tnl->dst_port = 0;
1165 /* TBD send command to hardware to del port */
1166 if (h->ae_algo->ops->del_tunnel_udp)
9537e7cb 1167 h->ae_algo->ops->del_tunnel_udp(h, port);
76ad4f0e
S
1168}
1169
1170/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1171 * @netdev: This physical ports's netdev
1172 * @ti: Tunnel information
1173 */
1174static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1175 struct udp_tunnel_info *ti)
1176{
1177 u16 port_n = ntohs(ti->port);
1178
1179 switch (ti->type) {
1180 case UDP_TUNNEL_TYPE_VXLAN:
1181 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1182 break;
1183 case UDP_TUNNEL_TYPE_GENEVE:
1184 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1185 break;
1186 default:
1187 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1188 break;
1189 }
1190}
1191
1192static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1193 struct udp_tunnel_info *ti)
1194{
1195 u16 port_n = ntohs(ti->port);
1196
1197 switch (ti->type) {
1198 case UDP_TUNNEL_TYPE_VXLAN:
1199 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1200 break;
1201 case UDP_TUNNEL_TYPE_GENEVE:
1202 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1203 break;
1204 default:
1205 break;
1206 }
1207}
1208
1209static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1210{
1211 struct hns3_nic_priv *priv = netdev_priv(netdev);
1212 struct hnae3_handle *h = priv->ae_handle;
1213 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1214 unsigned int i;
1215 int ret;
1216
1217 if (tc > HNAE3_MAX_TC)
1218 return -EINVAL;
1219
1220 if (kinfo->num_tc == tc)
1221 return 0;
1222
1223 if (!netdev)
1224 return -EINVAL;
1225
1226 if (!tc) {
1227 netdev_reset_tc(netdev);
1228 return 0;
1229 }
1230
1231 /* Set num_tc for netdev */
1232 ret = netdev_set_num_tc(netdev, tc);
1233 if (ret)
1234 return ret;
1235
1236 /* Set per TC queues for the VSI */
1237 for (i = 0; i < HNAE3_MAX_TC; i++) {
1238 if (kinfo->tc_info[i].enable)
1239 netdev_set_tc_queue(netdev,
1240 kinfo->tc_info[i].tc,
1241 kinfo->tc_info[i].tqp_count,
1242 kinfo->tc_info[i].tqp_offset);
1243 }
1244
1245 return 0;
1246}
1247
2572ac53 1248static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1249 void *type_data)
76ad4f0e 1250{
de4784ca
JP
1251 struct tc_mqprio_qopt *mqprio = type_data;
1252
74897ef0 1253 if (type != TC_SETUP_MQPRIO)
38cf0426 1254 return -EOPNOTSUPP;
76ad4f0e 1255
de4784ca 1256 return hns3_setup_tc(dev, mqprio->num_tc);
76ad4f0e
S
1257}
1258
1259static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1260 __be16 proto, u16 vid)
1261{
1262 struct hns3_nic_priv *priv = netdev_priv(netdev);
1263 struct hnae3_handle *h = priv->ae_handle;
1264 int ret = -EIO;
1265
1266 if (h->ae_algo->ops->set_vlan_filter)
1267 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1268
1269 return ret;
1270}
1271
1272static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1273 __be16 proto, u16 vid)
1274{
1275 struct hns3_nic_priv *priv = netdev_priv(netdev);
1276 struct hnae3_handle *h = priv->ae_handle;
1277 int ret = -EIO;
1278
1279 if (h->ae_algo->ops->set_vlan_filter)
1280 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1281
1282 return ret;
1283}
1284
1285static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1286 u8 qos, __be16 vlan_proto)
1287{
1288 struct hns3_nic_priv *priv = netdev_priv(netdev);
1289 struct hnae3_handle *h = priv->ae_handle;
1290 int ret = -EIO;
1291
1292 if (h->ae_algo->ops->set_vf_vlan_filter)
1293 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1294 qos, vlan_proto);
1295
1296 return ret;
1297}
1298
a8e8b7ff
S
1299static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1300{
1301 struct hns3_nic_priv *priv = netdev_priv(netdev);
1302 struct hnae3_handle *h = priv->ae_handle;
1303 bool if_running = netif_running(netdev);
1304 int ret;
1305
1306 if (!h->ae_algo->ops->set_mtu)
1307 return -EOPNOTSUPP;
1308
1309 /* if this was called with netdev up then bring netdevice down */
1310 if (if_running) {
1311 (void)hns3_nic_net_stop(netdev);
1312 msleep(100);
1313 }
1314
1315 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1316 if (ret) {
1317 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1318 ret);
1319 return ret;
1320 }
1321
1322 /* if the netdev was running earlier, bring it up again */
1323 if (if_running && hns3_nic_net_open(netdev))
1324 ret = -EINVAL;
1325
1326 return ret;
1327}
1328
76ad4f0e
S
1329static const struct net_device_ops hns3_nic_netdev_ops = {
1330 .ndo_open = hns3_nic_net_open,
1331 .ndo_stop = hns3_nic_net_stop,
1332 .ndo_start_xmit = hns3_nic_net_xmit,
1333 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
a8e8b7ff 1334 .ndo_change_mtu = hns3_nic_change_mtu,
76ad4f0e
S
1335 .ndo_set_features = hns3_nic_set_features,
1336 .ndo_get_stats64 = hns3_nic_get_stats64,
1337 .ndo_setup_tc = hns3_nic_setup_tc,
1338 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1339 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1340 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1341 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1342 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1343 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1344};
1345
1346/* hns3_probe - Device initialization routine
1347 * @pdev: PCI device information struct
1348 * @ent: entry in hns3_pci_tbl
1349 *
1350 * hns3_probe initializes a PF identified by a pci_dev structure.
1351 * The OS initialization, configuring of the PF private structure,
1352 * and a hardware reset occur.
1353 *
1354 * Returns 0 on success, negative on failure
1355 */
1356static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1357{
1358 struct hnae3_ae_dev *ae_dev;
1359 int ret;
1360
1361 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1362 GFP_KERNEL);
1363 if (!ae_dev) {
1364 ret = -ENOMEM;
1365 return ret;
1366 }
1367
1368 ae_dev->pdev = pdev;
e92a0843 1369 ae_dev->flag = ent->driver_data;
76ad4f0e
S
1370 ae_dev->dev_type = HNAE3_DEV_KNIC;
1371 pci_set_drvdata(pdev, ae_dev);
1372
1373 return hnae3_register_ae_dev(ae_dev);
1374}
1375
1376/* hns3_remove - Device removal routine
1377 * @pdev: PCI device information struct
1378 */
1379static void hns3_remove(struct pci_dev *pdev)
1380{
1381 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1382
1383 hnae3_unregister_ae_dev(ae_dev);
1384
1385 devm_kfree(&pdev->dev, ae_dev);
1386
1387 pci_set_drvdata(pdev, NULL);
1388}
1389
1390static struct pci_driver hns3_driver = {
1391 .name = hns3_driver_name,
1392 .id_table = hns3_pci_tbl,
1393 .probe = hns3_probe,
1394 .remove = hns3_remove,
1395};
1396
1397/* set default feature to hns3 */
1398static void hns3_set_default_feature(struct net_device *netdev)
1399{
1400 netdev->priv_flags |= IFF_UNICAST_FLT;
1401
1402 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1403 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1404 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1405 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1406 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1407
1408 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1409
1410 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1411
1412 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1413 NETIF_F_HW_VLAN_CTAG_FILTER |
1414 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1415 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1416 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1417 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1418
1419 netdev->vlan_features |=
1420 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1421 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1422 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1423 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1424 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1425
1426 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1427 NETIF_F_HW_VLAN_CTAG_FILTER |
1428 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1429 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1430 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1431 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1432}
1433
1434static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1435 struct hns3_desc_cb *cb)
1436{
1437 unsigned int order = hnae_page_order(ring);
1438 struct page *p;
1439
1440 p = dev_alloc_pages(order);
1441 if (!p)
1442 return -ENOMEM;
1443
1444 cb->priv = p;
1445 cb->page_offset = 0;
1446 cb->reuse_flag = 0;
1447 cb->buf = page_address(p);
1448 cb->length = hnae_page_size(ring);
1449 cb->type = DESC_TYPE_PAGE;
1450
1451 memset(cb->buf, 0, cb->length);
1452
1453 return 0;
1454}
1455
1456static void hns3_free_buffer(struct hns3_enet_ring *ring,
1457 struct hns3_desc_cb *cb)
1458{
1459 if (cb->type == DESC_TYPE_SKB)
1460 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1461 else if (!HNAE3_IS_TX_RING(ring))
1462 put_page((struct page *)cb->priv);
1463 memset(cb, 0, sizeof(*cb));
1464}
1465
1466static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1467{
1468 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1469 cb->length, ring_to_dma_dir(ring));
1470
1471 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1472 return -EIO;
1473
1474 return 0;
1475}
1476
1477static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1478 struct hns3_desc_cb *cb)
1479{
1480 if (cb->type == DESC_TYPE_SKB)
1481 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1482 ring_to_dma_dir(ring));
1483 else
1484 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1485 ring_to_dma_dir(ring));
1486}
1487
1488static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1489{
1490 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1491 ring->desc[i].addr = 0;
1492}
1493
1494static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1495{
1496 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1497
1498 if (!ring->desc_cb[i].dma)
1499 return;
1500
1501 hns3_buffer_detach(ring, i);
1502 hns3_free_buffer(ring, cb);
1503}
1504
1505static void hns3_free_buffers(struct hns3_enet_ring *ring)
1506{
1507 int i;
1508
1509 for (i = 0; i < ring->desc_num; i++)
1510 hns3_free_buffer_detach(ring, i);
1511}
1512
1513/* free desc along with its attached buffer */
1514static void hns3_free_desc(struct hns3_enet_ring *ring)
1515{
1516 hns3_free_buffers(ring);
1517
1518 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1519 ring->desc_num * sizeof(ring->desc[0]),
1520 DMA_BIDIRECTIONAL);
1521 ring->desc_dma_addr = 0;
1522 kfree(ring->desc);
1523 ring->desc = NULL;
1524}
1525
1526static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1527{
1528 int size = ring->desc_num * sizeof(ring->desc[0]);
1529
1530 ring->desc = kzalloc(size, GFP_KERNEL);
1531 if (!ring->desc)
1532 return -ENOMEM;
1533
1534 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1535 size, DMA_BIDIRECTIONAL);
1536 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1537 ring->desc_dma_addr = 0;
1538 kfree(ring->desc);
1539 ring->desc = NULL;
1540 return -ENOMEM;
1541 }
1542
1543 return 0;
1544}
1545
1546static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1547 struct hns3_desc_cb *cb)
1548{
1549 int ret;
1550
1551 ret = hns3_alloc_buffer(ring, cb);
1552 if (ret)
1553 goto out;
1554
1555 ret = hns3_map_buffer(ring, cb);
1556 if (ret)
1557 goto out_with_buf;
1558
1559 return 0;
1560
1561out_with_buf:
1562 hns3_free_buffers(ring);
1563out:
1564 return ret;
1565}
1566
1567static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1568{
1569 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1570
1571 if (ret)
1572 return ret;
1573
1574 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1575
1576 return 0;
1577}
1578
1579/* Allocate memory for raw pkg, and map with dma */
1580static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1581{
1582 int i, j, ret;
1583
1584 for (i = 0; i < ring->desc_num; i++) {
1585 ret = hns3_alloc_buffer_attach(ring, i);
1586 if (ret)
1587 goto out_buffer_fail;
1588 }
1589
1590 return 0;
1591
1592out_buffer_fail:
1593 for (j = i - 1; j >= 0; j--)
1594 hns3_free_buffer_detach(ring, j);
1595 return ret;
1596}
1597
1598/* detach a in-used buffer and replace with a reserved one */
1599static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1600 struct hns3_desc_cb *res_cb)
1601{
1602 hns3_map_buffer(ring, &ring->desc_cb[i]);
1603 ring->desc_cb[i] = *res_cb;
1604 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1605}
1606
1607static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1608{
1609 ring->desc_cb[i].reuse_flag = 0;
1610 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1611 + ring->desc_cb[i].page_offset);
1612}
1613
1614static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1615 int *pkts)
1616{
1617 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1618
1619 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1620 (*bytes) += desc_cb->length;
1621 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1622 hns3_free_buffer_detach(ring, ring->next_to_clean);
1623
1624 ring_ptr_move_fw(ring, next_to_clean);
1625}
1626
1627static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1628{
1629 int u = ring->next_to_use;
1630 int c = ring->next_to_clean;
1631
1632 if (unlikely(h > ring->desc_num))
1633 return 0;
1634
1635 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1636}
1637
1638int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1639{
1640 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1641 struct netdev_queue *dev_queue;
1642 int bytes, pkts;
1643 int head;
1644
1645 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1646 rmb(); /* Make sure head is ready before touch any data */
1647
1648 if (is_ring_empty(ring) || head == ring->next_to_clean)
1649 return 0; /* no data to poll */
1650
1651 if (!is_valid_clean_head(ring, head)) {
1652 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1653 ring->next_to_use, ring->next_to_clean);
1654
1655 u64_stats_update_begin(&ring->syncp);
1656 ring->stats.io_err_cnt++;
1657 u64_stats_update_end(&ring->syncp);
1658 return -EIO;
1659 }
1660
1661 bytes = 0;
1662 pkts = 0;
1663 while (head != ring->next_to_clean && budget) {
1664 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1665 /* Issue prefetch for next Tx descriptor */
1666 prefetch(&ring->desc_cb[ring->next_to_clean]);
1667 budget--;
1668 }
1669
1670 ring->tqp_vector->tx_group.total_bytes += bytes;
1671 ring->tqp_vector->tx_group.total_packets += pkts;
1672
1673 u64_stats_update_begin(&ring->syncp);
1674 ring->stats.tx_bytes += bytes;
1675 ring->stats.tx_pkts += pkts;
1676 u64_stats_update_end(&ring->syncp);
1677
1678 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1679 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1680
1681 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1682 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1683 /* Make sure that anybody stopping the queue after this
1684 * sees the new next_to_clean.
1685 */
1686 smp_mb();
1687 if (netif_tx_queue_stopped(dev_queue)) {
1688 netif_tx_wake_queue(dev_queue);
1689 ring->stats.restart_queue++;
1690 }
1691 }
1692
1693 return !!budget;
1694}
1695
1696static int hns3_desc_unused(struct hns3_enet_ring *ring)
1697{
1698 int ntc = ring->next_to_clean;
1699 int ntu = ring->next_to_use;
1700
1701 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1702}
1703
1704static void
1705hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1706{
1707 struct hns3_desc_cb *desc_cb;
1708 struct hns3_desc_cb res_cbs;
1709 int i, ret;
1710
1711 for (i = 0; i < cleand_count; i++) {
1712 desc_cb = &ring->desc_cb[ring->next_to_use];
1713 if (desc_cb->reuse_flag) {
1714 u64_stats_update_begin(&ring->syncp);
1715 ring->stats.reuse_pg_cnt++;
1716 u64_stats_update_end(&ring->syncp);
1717
1718 hns3_reuse_buffer(ring, ring->next_to_use);
1719 } else {
1720 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1721 if (ret) {
1722 u64_stats_update_begin(&ring->syncp);
1723 ring->stats.sw_err_cnt++;
1724 u64_stats_update_end(&ring->syncp);
1725
1726 netdev_err(ring->tqp->handle->kinfo.netdev,
1727 "hnae reserve buffer map failed.\n");
1728 break;
1729 }
1730 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1731 }
1732
1733 ring_ptr_move_fw(ring, next_to_use);
1734 }
1735
1736 wmb(); /* Make all data has been write before submit */
1737 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1738}
1739
1740/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1741 * @data: pointer to the start of the headers
1742 * @max: total length of section to find headers in
1743 *
1744 * This function is meant to determine the length of headers that will
1745 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1746 * motivation of doing this is to only perform one pull for IPv4 TCP
1747 * packets so that we can do basic things like calculating the gso_size
1748 * based on the average data per packet.
1749 */
1750static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1751 unsigned int max_size)
1752{
1753 unsigned char *network;
1754 u8 hlen;
1755
1756 /* This should never happen, but better safe than sorry */
1757 if (max_size < ETH_HLEN)
1758 return max_size;
1759
1760 /* Initialize network frame pointer */
1761 network = data;
1762
1763 /* Set first protocol and move network header forward */
1764 network += ETH_HLEN;
1765
1766 /* Handle any vlan tag if present */
1767 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1768 == HNS3_RX_FLAG_VLAN_PRESENT) {
1769 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1770 return max_size;
1771
1772 network += VLAN_HLEN;
1773 }
1774
1775 /* Handle L3 protocols */
1776 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1777 == HNS3_RX_FLAG_L3ID_IPV4) {
1778 if ((typeof(max_size))(network - data) >
1779 (max_size - sizeof(struct iphdr)))
1780 return max_size;
1781
1782 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1783 hlen = (network[0] & 0x0F) << 2;
1784
1785 /* Verify hlen meets minimum size requirements */
1786 if (hlen < sizeof(struct iphdr))
1787 return network - data;
1788
1789 /* Record next protocol if header is present */
1790 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1791 == HNS3_RX_FLAG_L3ID_IPV6) {
1792 if ((typeof(max_size))(network - data) >
1793 (max_size - sizeof(struct ipv6hdr)))
1794 return max_size;
1795
1796 /* Record next protocol */
1797 hlen = sizeof(struct ipv6hdr);
1798 } else {
1799 return network - data;
1800 }
1801
1802 /* Relocate pointer to start of L4 header */
1803 network += hlen;
1804
1805 /* Finally sort out TCP/UDP */
1806 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1807 == HNS3_RX_FLAG_L4ID_TCP) {
1808 if ((typeof(max_size))(network - data) >
1809 (max_size - sizeof(struct tcphdr)))
1810 return max_size;
1811
1812 /* Access doff as a u8 to avoid unaligned access on ia64 */
1813 hlen = (network[12] & 0xF0) >> 2;
1814
1815 /* Verify hlen meets minimum size requirements */
1816 if (hlen < sizeof(struct tcphdr))
1817 return network - data;
1818
1819 network += hlen;
1820 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1821 == HNS3_RX_FLAG_L4ID_UDP) {
1822 if ((typeof(max_size))(network - data) >
1823 (max_size - sizeof(struct udphdr)))
1824 return max_size;
1825
1826 network += sizeof(struct udphdr);
1827 }
1828
1829 /* If everything has gone correctly network should be the
1830 * data section of the packet and will be the end of the header.
1831 * If not then it probably represents the end of the last recognized
1832 * header.
1833 */
1834 if ((typeof(max_size))(network - data) < max_size)
1835 return network - data;
1836 else
1837 return max_size;
1838}
1839
1840static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1841 struct hns3_enet_ring *ring, int pull_len,
1842 struct hns3_desc_cb *desc_cb)
1843{
1844 struct hns3_desc *desc;
1845 int truesize, size;
1846 int last_offset;
1847 bool twobufs;
1848
1849 twobufs = ((PAGE_SIZE < 8192) &&
1850 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1851
1852 desc = &ring->desc[ring->next_to_clean];
1853 size = le16_to_cpu(desc->rx.size);
1854
1855 if (twobufs) {
1856 truesize = hnae_buf_size(ring);
1857 } else {
1858 truesize = ALIGN(size, L1_CACHE_BYTES);
1859 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1860 }
1861
1862 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1863 size - pull_len, truesize - pull_len);
1864
1865 /* Avoid re-using remote pages,flag default unreuse */
1866 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1867 return;
1868
1869 if (twobufs) {
1870 /* If we are only owner of page we can reuse it */
1871 if (likely(page_count(desc_cb->priv) == 1)) {
1872 /* Flip page offset to other buffer */
1873 desc_cb->page_offset ^= truesize;
1874
1875 desc_cb->reuse_flag = 1;
1876 /* bump ref count on page before it is given*/
1877 get_page(desc_cb->priv);
1878 }
1879 return;
1880 }
1881
1882 /* Move offset up to the next cache line */
1883 desc_cb->page_offset += truesize;
1884
1885 if (desc_cb->page_offset <= last_offset) {
1886 desc_cb->reuse_flag = 1;
1887 /* Bump ref count on page before it is given*/
1888 get_page(desc_cb->priv);
1889 }
1890}
1891
1892static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1893 struct hns3_desc *desc)
1894{
1895 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1896 int l3_type, l4_type;
1897 u32 bd_base_info;
1898 int ol4_type;
1899 u32 l234info;
1900
1901 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1902 l234info = le32_to_cpu(desc->rx.l234_info);
1903
1904 skb->ip_summed = CHECKSUM_NONE;
1905
1906 skb_checksum_none_assert(skb);
1907
1908 if (!(netdev->features & NETIF_F_RXCSUM))
1909 return;
1910
1911 /* check if hardware has done checksum */
1912 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1913 return;
1914
1915 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1916 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1917 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1918 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1919 netdev_err(netdev, "L3/L4 error pkt\n");
1920 u64_stats_update_begin(&ring->syncp);
1921 ring->stats.l3l4_csum_err++;
1922 u64_stats_update_end(&ring->syncp);
1923
1924 return;
1925 }
1926
1927 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1928 HNS3_RXD_L3ID_S);
1929 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1930 HNS3_RXD_L4ID_S);
1931
1932 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1933 switch (ol4_type) {
1934 case HNS3_OL4_TYPE_MAC_IN_UDP:
1935 case HNS3_OL4_TYPE_NVGRE:
1936 skb->csum_level = 1;
1937 case HNS3_OL4_TYPE_NO_TUN:
1938 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1939 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1940 (l3_type == HNS3_L3_TYPE_IPV6 &&
1941 (l4_type == HNS3_L4_TYPE_UDP ||
1942 l4_type == HNS3_L4_TYPE_TCP ||
1943 l4_type == HNS3_L4_TYPE_SCTP)))
1944 skb->ip_summed = CHECKSUM_UNNECESSARY;
1945 break;
1946 }
1947}
1948
1949static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1950 struct sk_buff **out_skb, int *out_bnum)
1951{
1952 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1953 struct hns3_desc_cb *desc_cb;
1954 struct hns3_desc *desc;
1955 struct sk_buff *skb;
1956 unsigned char *va;
1957 u32 bd_base_info;
1958 int pull_len;
1959 u32 l234info;
1960 int length;
1961 int bnum;
1962
1963 desc = &ring->desc[ring->next_to_clean];
1964 desc_cb = &ring->desc_cb[ring->next_to_clean];
1965
1966 prefetch(desc);
1967
1968 length = le16_to_cpu(desc->rx.pkt_len);
1969 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1970 l234info = le32_to_cpu(desc->rx.l234_info);
1971
1972 /* Check valid BD */
1973 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1974 return -EFAULT;
1975
1976 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1977
1978 /* Prefetch first cache line of first page
1979 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1980 * line size is 64B so need to prefetch twice to make it 128B. But in
1981 * actual we can have greater size of caches with 128B Level 1 cache
1982 * lines. In such a case, single fetch would suffice to cache in the
1983 * relevant part of the header.
1984 */
1985 prefetch(va);
1986#if L1_CACHE_BYTES < 128
1987 prefetch(va + L1_CACHE_BYTES);
1988#endif
1989
1990 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1991 HNS3_RX_HEAD_SIZE);
1992 if (unlikely(!skb)) {
1993 netdev_err(netdev, "alloc rx skb fail\n");
1994
1995 u64_stats_update_begin(&ring->syncp);
1996 ring->stats.sw_err_cnt++;
1997 u64_stats_update_end(&ring->syncp);
1998
1999 return -ENOMEM;
2000 }
2001
2002 prefetchw(skb->data);
2003
2004 bnum = 1;
2005 if (length <= HNS3_RX_HEAD_SIZE) {
2006 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2007
2008 /* We can reuse buffer as-is, just make sure it is local */
2009 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2010 desc_cb->reuse_flag = 1;
2011 else /* This page cannot be reused so discard it */
2012 put_page(desc_cb->priv);
2013
2014 ring_ptr_move_fw(ring, next_to_clean);
2015 } else {
2016 u64_stats_update_begin(&ring->syncp);
2017 ring->stats.seg_pkt_cnt++;
2018 u64_stats_update_end(&ring->syncp);
2019
2020 pull_len = hns3_nic_get_headlen(va, l234info,
2021 HNS3_RX_HEAD_SIZE);
2022 memcpy(__skb_put(skb, pull_len), va,
2023 ALIGN(pull_len, sizeof(long)));
2024
2025 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2026 ring_ptr_move_fw(ring, next_to_clean);
2027
2028 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2029 desc = &ring->desc[ring->next_to_clean];
2030 desc_cb = &ring->desc_cb[ring->next_to_clean];
2031 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2032 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2033 ring_ptr_move_fw(ring, next_to_clean);
2034 bnum++;
2035 }
2036 }
2037
2038 *out_bnum = bnum;
2039
2040 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2041 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2042 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2043 u64_stats_update_begin(&ring->syncp);
2044 ring->stats.non_vld_descs++;
2045 u64_stats_update_end(&ring->syncp);
2046
2047 dev_kfree_skb_any(skb);
2048 return -EINVAL;
2049 }
2050
2051 if (unlikely((!desc->rx.pkt_len) ||
2052 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2053 netdev_err(netdev, "truncated pkt\n");
2054 u64_stats_update_begin(&ring->syncp);
2055 ring->stats.err_pkt_len++;
2056 u64_stats_update_end(&ring->syncp);
2057
2058 dev_kfree_skb_any(skb);
2059 return -EFAULT;
2060 }
2061
2062 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2063 netdev_err(netdev, "L2 error pkt\n");
2064 u64_stats_update_begin(&ring->syncp);
2065 ring->stats.l2_err++;
2066 u64_stats_update_end(&ring->syncp);
2067
2068 dev_kfree_skb_any(skb);
2069 return -EFAULT;
2070 }
2071
2072 u64_stats_update_begin(&ring->syncp);
2073 ring->stats.rx_pkts++;
2074 ring->stats.rx_bytes += skb->len;
2075 u64_stats_update_end(&ring->syncp);
2076
2077 ring->tqp_vector->rx_group.total_bytes += skb->len;
2078
2079 hns3_rx_checksum(ring, skb, desc);
2080 return 0;
2081}
2082
2083static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2084{
2085#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2086 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2087 int recv_pkts, recv_bds, clean_count, err;
2088 int unused_count = hns3_desc_unused(ring);
2089 struct sk_buff *skb = NULL;
2090 int num, bnum = 0;
2091
2092 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2093 rmb(); /* Make sure num taken effect before the other data is touched */
2094
2095 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2096 num -= unused_count;
2097
2098 while (recv_pkts < budget && recv_bds < num) {
2099 /* Reuse or realloc buffers */
2100 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2101 hns3_nic_alloc_rx_buffers(ring,
2102 clean_count + unused_count);
2103 clean_count = 0;
2104 unused_count = hns3_desc_unused(ring);
2105 }
2106
2107 /* Poll one pkt */
2108 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2109 if (unlikely(!skb)) /* This fault cannot be repaired */
2110 goto out;
2111
2112 recv_bds += bnum;
2113 clean_count += bnum;
2114 if (unlikely(err)) { /* Do jump the err */
2115 recv_pkts++;
2116 continue;
2117 }
2118
2119 /* Do update ip stack process */
2120 skb->protocol = eth_type_trans(skb, netdev);
2121 (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2122
2123 recv_pkts++;
2124 }
2125
2126out:
2127 /* Make all data has been write before submit */
2128 if (clean_count + unused_count > 0)
2129 hns3_nic_alloc_rx_buffers(ring,
2130 clean_count + unused_count);
2131
2132 return recv_pkts;
2133}
2134
2135static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2136{
2137#define HNS3_RX_ULTRA_PACKET_RATE 40000
2138 enum hns3_flow_level_range new_flow_level;
2139 struct hns3_enet_tqp_vector *tqp_vector;
2140 int packets_per_secs;
2141 int bytes_per_usecs;
2142 u16 new_int_gl;
2143 int usecs;
2144
2145 if (!ring_group->int_gl)
2146 return false;
2147
2148 if (ring_group->total_packets == 0) {
2149 ring_group->int_gl = HNS3_INT_GL_50K;
2150 ring_group->flow_level = HNS3_FLOW_LOW;
2151 return true;
2152 }
2153
2154 /* Simple throttlerate management
2155 * 0-10MB/s lower (50000 ints/s)
2156 * 10-20MB/s middle (20000 ints/s)
2157 * 20-1249MB/s high (18000 ints/s)
2158 * > 40000pps ultra (8000 ints/s)
2159 */
2160 new_flow_level = ring_group->flow_level;
2161 new_int_gl = ring_group->int_gl;
2162 tqp_vector = ring_group->ring->tqp_vector;
2163 usecs = (ring_group->int_gl << 1);
2164 bytes_per_usecs = ring_group->total_bytes / usecs;
2165 /* 1000000 microseconds */
2166 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2167
2168 switch (new_flow_level) {
2169 case HNS3_FLOW_LOW:
2170 if (bytes_per_usecs > 10)
2171 new_flow_level = HNS3_FLOW_MID;
2172 break;
2173 case HNS3_FLOW_MID:
2174 if (bytes_per_usecs > 20)
2175 new_flow_level = HNS3_FLOW_HIGH;
2176 else if (bytes_per_usecs <= 10)
2177 new_flow_level = HNS3_FLOW_LOW;
2178 break;
2179 case HNS3_FLOW_HIGH:
2180 case HNS3_FLOW_ULTRA:
2181 default:
2182 if (bytes_per_usecs <= 20)
2183 new_flow_level = HNS3_FLOW_MID;
2184 break;
2185 }
2186
2187 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2188 (&tqp_vector->rx_group == ring_group))
2189 new_flow_level = HNS3_FLOW_ULTRA;
2190
2191 switch (new_flow_level) {
2192 case HNS3_FLOW_LOW:
2193 new_int_gl = HNS3_INT_GL_50K;
2194 break;
2195 case HNS3_FLOW_MID:
2196 new_int_gl = HNS3_INT_GL_20K;
2197 break;
2198 case HNS3_FLOW_HIGH:
2199 new_int_gl = HNS3_INT_GL_18K;
2200 break;
2201 case HNS3_FLOW_ULTRA:
2202 new_int_gl = HNS3_INT_GL_8K;
2203 break;
2204 default:
2205 break;
2206 }
2207
2208 ring_group->total_bytes = 0;
2209 ring_group->total_packets = 0;
2210 ring_group->flow_level = new_flow_level;
2211 if (new_int_gl != ring_group->int_gl) {
2212 ring_group->int_gl = new_int_gl;
2213 return true;
2214 }
2215 return false;
2216}
2217
2218static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2219{
2220 u16 rx_int_gl, tx_int_gl;
2221 bool rx, tx;
2222
2223 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2224 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2225 rx_int_gl = tqp_vector->rx_group.int_gl;
2226 tx_int_gl = tqp_vector->tx_group.int_gl;
2227 if (rx && tx) {
2228 if (rx_int_gl > tx_int_gl) {
2229 tqp_vector->tx_group.int_gl = rx_int_gl;
2230 tqp_vector->tx_group.flow_level =
2231 tqp_vector->rx_group.flow_level;
2232 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2233 } else {
2234 tqp_vector->rx_group.int_gl = tx_int_gl;
2235 tqp_vector->rx_group.flow_level =
2236 tqp_vector->tx_group.flow_level;
2237 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2238 }
2239 }
2240}
2241
2242static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2243{
2244 struct hns3_enet_ring *ring;
2245 int rx_pkt_total = 0;
2246
2247 struct hns3_enet_tqp_vector *tqp_vector =
2248 container_of(napi, struct hns3_enet_tqp_vector, napi);
2249 bool clean_complete = true;
2250 int rx_budget;
2251
2252 /* Since the actual Tx work is minimal, we can give the Tx a larger
2253 * budget and be more aggressive about cleaning up the Tx descriptors.
2254 */
2255 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2256 if (!hns3_clean_tx_ring(ring, budget))
2257 clean_complete = false;
2258 }
2259
2260 /* make sure rx ring budget not smaller than 1 */
2261 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2262
2263 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2264 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2265
2266 if (rx_cleaned >= rx_budget)
2267 clean_complete = false;
2268
2269 rx_pkt_total += rx_cleaned;
2270 }
2271
2272 tqp_vector->rx_group.total_packets += rx_pkt_total;
2273
2274 if (!clean_complete)
2275 return budget;
2276
2277 napi_complete(napi);
2278 hns3_update_new_int_gl(tqp_vector);
2279 hns3_mask_vector_irq(tqp_vector, 1);
2280
2281 return rx_pkt_total;
2282}
2283
2284static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2285 struct hnae3_ring_chain_node *head)
2286{
2287 struct pci_dev *pdev = tqp_vector->handle->pdev;
2288 struct hnae3_ring_chain_node *cur_chain = head;
2289 struct hnae3_ring_chain_node *chain;
2290 struct hns3_enet_ring *tx_ring;
2291 struct hns3_enet_ring *rx_ring;
2292
2293 tx_ring = tqp_vector->tx_group.ring;
2294 if (tx_ring) {
2295 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2296 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2297 HNAE3_RING_TYPE_TX);
2298
2299 cur_chain->next = NULL;
2300
2301 while (tx_ring->next) {
2302 tx_ring = tx_ring->next;
2303
2304 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2305 GFP_KERNEL);
2306 if (!chain)
2307 return -ENOMEM;
2308
2309 cur_chain->next = chain;
2310 chain->tqp_index = tx_ring->tqp->tqp_index;
2311 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2312 HNAE3_RING_TYPE_TX);
2313
2314 cur_chain = chain;
2315 }
2316 }
2317
2318 rx_ring = tqp_vector->rx_group.ring;
2319 if (!tx_ring && rx_ring) {
2320 cur_chain->next = NULL;
2321 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2322 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2323 HNAE3_RING_TYPE_RX);
2324
2325 rx_ring = rx_ring->next;
2326 }
2327
2328 while (rx_ring) {
2329 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2330 if (!chain)
2331 return -ENOMEM;
2332
2333 cur_chain->next = chain;
2334 chain->tqp_index = rx_ring->tqp->tqp_index;
2335 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2336 HNAE3_RING_TYPE_RX);
2337 cur_chain = chain;
2338
2339 rx_ring = rx_ring->next;
2340 }
2341
2342 return 0;
2343}
2344
2345static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2346 struct hnae3_ring_chain_node *head)
2347{
2348 struct pci_dev *pdev = tqp_vector->handle->pdev;
2349 struct hnae3_ring_chain_node *chain_tmp, *chain;
2350
2351 chain = head->next;
2352
2353 while (chain) {
2354 chain_tmp = chain->next;
2355 devm_kfree(&pdev->dev, chain);
2356 chain = chain_tmp;
2357 }
2358}
2359
2360static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2361 struct hns3_enet_ring *ring)
2362{
2363 ring->next = group->ring;
2364 group->ring = ring;
2365
2366 group->count++;
2367}
2368
2369static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2370{
2371 struct hnae3_ring_chain_node vector_ring_chain;
2372 struct hnae3_handle *h = priv->ae_handle;
2373 struct hns3_enet_tqp_vector *tqp_vector;
2374 struct hnae3_vector_info *vector;
2375 struct pci_dev *pdev = h->pdev;
2376 u16 tqp_num = h->kinfo.num_tqps;
2377 u16 vector_num;
2378 int ret = 0;
2379 u16 i;
2380
2381 /* RSS size, cpu online and vector_num should be the same */
2382 /* Should consider 2p/4p later */
2383 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2384 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2385 GFP_KERNEL);
2386 if (!vector)
2387 return -ENOMEM;
2388
2389 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2390
2391 priv->vector_num = vector_num;
2392 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2393 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2394 GFP_KERNEL);
2395 if (!priv->tqp_vector)
2396 return -ENOMEM;
2397
2398 for (i = 0; i < tqp_num; i++) {
2399 u16 vector_i = i % vector_num;
2400
2401 tqp_vector = &priv->tqp_vector[vector_i];
2402
2403 hns3_add_ring_to_group(&tqp_vector->tx_group,
2404 priv->ring_data[i].ring);
2405
2406 hns3_add_ring_to_group(&tqp_vector->rx_group,
2407 priv->ring_data[i + tqp_num].ring);
2408
2409 tqp_vector->idx = vector_i;
2410 tqp_vector->mask_addr = vector[vector_i].io_addr;
2411 tqp_vector->vector_irq = vector[vector_i].vector;
2412 tqp_vector->num_tqps++;
2413
2414 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2415 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2416 }
2417
2418 for (i = 0; i < vector_num; i++) {
2419 tqp_vector = &priv->tqp_vector[i];
2420
2421 tqp_vector->rx_group.total_bytes = 0;
2422 tqp_vector->rx_group.total_packets = 0;
2423 tqp_vector->tx_group.total_bytes = 0;
2424 tqp_vector->tx_group.total_packets = 0;
2425 hns3_vector_gl_rl_init(tqp_vector);
2426 tqp_vector->handle = h;
2427
2428 ret = hns3_get_vector_ring_chain(tqp_vector,
2429 &vector_ring_chain);
2430 if (ret)
2431 goto out;
2432
2433 ret = h->ae_algo->ops->map_ring_to_vector(h,
2434 tqp_vector->vector_irq, &vector_ring_chain);
2435 if (ret)
2436 goto out;
2437
2438 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2439
2440 netif_napi_add(priv->netdev, &tqp_vector->napi,
2441 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2442 }
2443
2444out:
2445 devm_kfree(&pdev->dev, vector);
2446 return ret;
2447}
2448
2449static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2450{
2451 struct hnae3_ring_chain_node vector_ring_chain;
2452 struct hnae3_handle *h = priv->ae_handle;
2453 struct hns3_enet_tqp_vector *tqp_vector;
2454 struct pci_dev *pdev = h->pdev;
2455 int i, ret;
2456
2457 for (i = 0; i < priv->vector_num; i++) {
2458 tqp_vector = &priv->tqp_vector[i];
2459
2460 ret = hns3_get_vector_ring_chain(tqp_vector,
2461 &vector_ring_chain);
2462 if (ret)
2463 return ret;
2464
2465 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2466 tqp_vector->vector_irq, &vector_ring_chain);
2467 if (ret)
2468 return ret;
2469
2470 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2471
2472 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2473 (void)irq_set_affinity_hint(
2474 priv->tqp_vector[i].vector_irq,
2475 NULL);
2476 devm_free_irq(&pdev->dev,
2477 priv->tqp_vector[i].vector_irq,
2478 &priv->tqp_vector[i]);
2479 }
2480
2481 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2482
2483 netif_napi_del(&priv->tqp_vector[i].napi);
2484 }
2485
2486 devm_kfree(&pdev->dev, priv->tqp_vector);
2487
2488 return 0;
2489}
2490
2491static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2492 int ring_type)
2493{
2494 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2495 int queue_num = priv->ae_handle->kinfo.num_tqps;
2496 struct pci_dev *pdev = priv->ae_handle->pdev;
2497 struct hns3_enet_ring *ring;
2498
2499 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2500 if (!ring)
2501 return -ENOMEM;
2502
2503 if (ring_type == HNAE3_RING_TYPE_TX) {
2504 ring_data[q->tqp_index].ring = ring;
2505 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2506 } else {
2507 ring_data[q->tqp_index + queue_num].ring = ring;
2508 ring->io_base = q->io_base;
2509 }
2510
2511 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2512
2513 ring_data[q->tqp_index].queue_index = q->tqp_index;
2514
2515 ring->tqp = q;
2516 ring->desc = NULL;
2517 ring->desc_cb = NULL;
2518 ring->dev = priv->dev;
2519 ring->desc_dma_addr = 0;
2520 ring->buf_size = q->buf_size;
2521 ring->desc_num = q->desc_num;
2522 ring->next_to_use = 0;
2523 ring->next_to_clean = 0;
2524
2525 return 0;
2526}
2527
2528static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2529 struct hns3_nic_priv *priv)
2530{
2531 int ret;
2532
2533 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2534 if (ret)
2535 return ret;
2536
2537 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2538 if (ret)
2539 return ret;
2540
2541 return 0;
2542}
2543
2544static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2545{
2546 struct hnae3_handle *h = priv->ae_handle;
2547 struct pci_dev *pdev = h->pdev;
2548 int i, ret;
2549
2550 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2551 sizeof(*priv->ring_data) * 2,
2552 GFP_KERNEL);
2553 if (!priv->ring_data)
2554 return -ENOMEM;
2555
2556 for (i = 0; i < h->kinfo.num_tqps; i++) {
2557 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2558 if (ret)
2559 goto err;
2560 }
2561
2562 return 0;
2563err:
2564 devm_kfree(&pdev->dev, priv->ring_data);
2565 return ret;
2566}
2567
2568static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2569{
2570 int ret;
2571
2572 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2573 return -EINVAL;
2574
2575 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2576 GFP_KERNEL);
2577 if (!ring->desc_cb) {
2578 ret = -ENOMEM;
2579 goto out;
2580 }
2581
2582 ret = hns3_alloc_desc(ring);
2583 if (ret)
2584 goto out_with_desc_cb;
2585
2586 if (!HNAE3_IS_TX_RING(ring)) {
2587 ret = hns3_alloc_ring_buffers(ring);
2588 if (ret)
2589 goto out_with_desc;
2590 }
2591
2592 return 0;
2593
2594out_with_desc:
2595 hns3_free_desc(ring);
2596out_with_desc_cb:
2597 kfree(ring->desc_cb);
2598 ring->desc_cb = NULL;
2599out:
2600 return ret;
2601}
2602
2603static void hns3_fini_ring(struct hns3_enet_ring *ring)
2604{
2605 hns3_free_desc(ring);
2606 kfree(ring->desc_cb);
2607 ring->desc_cb = NULL;
2608 ring->next_to_clean = 0;
2609 ring->next_to_use = 0;
2610}
2611
2612int hns3_buf_size2type(u32 buf_size)
2613{
2614 int bd_size_type;
2615
2616 switch (buf_size) {
2617 case 512:
2618 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2619 break;
2620 case 1024:
2621 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2622 break;
2623 case 2048:
2624 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2625 break;
2626 case 4096:
2627 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2628 break;
2629 default:
2630 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2631 }
2632
2633 return bd_size_type;
2634}
2635
2636static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2637{
2638 dma_addr_t dma = ring->desc_dma_addr;
2639 struct hnae3_queue *q = ring->tqp;
2640
2641 if (!HNAE3_IS_TX_RING(ring)) {
2642 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2643 (u32)dma);
2644 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2645 (u32)((dma >> 31) >> 1));
2646
2647 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2648 hns3_buf_size2type(ring->buf_size));
2649 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2650 ring->desc_num / 8 - 1);
2651
2652 } else {
2653 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2654 (u32)dma);
2655 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2656 (u32)((dma >> 31) >> 1));
2657
2658 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2659 hns3_buf_size2type(ring->buf_size));
2660 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2661 ring->desc_num / 8 - 1);
2662 }
2663}
2664
2665static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2666{
2667 struct hnae3_handle *h = priv->ae_handle;
2668 int ring_num = h->kinfo.num_tqps * 2;
2669 int i, j;
2670 int ret;
2671
2672 for (i = 0; i < ring_num; i++) {
2673 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2674 if (ret) {
2675 dev_err(priv->dev,
2676 "Alloc ring memory fail! ret=%d\n", ret);
2677 goto out_when_alloc_ring_memory;
2678 }
2679
2680 hns3_init_ring_hw(priv->ring_data[i].ring);
2681
2682 u64_stats_init(&priv->ring_data[i].ring->syncp);
2683 }
2684
2685 return 0;
2686
2687out_when_alloc_ring_memory:
2688 for (j = i - 1; j >= 0; j--)
2689 hns3_fini_ring(priv->ring_data[i].ring);
2690
2691 return -ENOMEM;
2692}
2693
2694static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2695{
2696 struct hnae3_handle *h = priv->ae_handle;
2697 int i;
2698
2699 for (i = 0; i < h->kinfo.num_tqps; i++) {
2700 if (h->ae_algo->ops->reset_queue)
2701 h->ae_algo->ops->reset_queue(h, i);
2702
2703 hns3_fini_ring(priv->ring_data[i].ring);
2704 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2705 }
2706
2707 return 0;
2708}
2709
2710/* Set mac addr if it is configured. or leave it to the AE driver */
2711static void hns3_init_mac_addr(struct net_device *netdev)
2712{
2713 struct hns3_nic_priv *priv = netdev_priv(netdev);
2714 struct hnae3_handle *h = priv->ae_handle;
2715 u8 mac_addr_temp[ETH_ALEN];
2716
2717 if (h->ae_algo->ops->get_mac_addr) {
2718 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2719 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2720 }
2721
2722 /* Check if the MAC address is valid, if not get a random one */
2723 if (!is_valid_ether_addr(netdev->dev_addr)) {
2724 eth_hw_addr_random(netdev);
2725 dev_warn(priv->dev, "using random MAC address %pM\n",
2726 netdev->dev_addr);
76ad4f0e 2727 }
139e8792
L
2728
2729 if (h->ae_algo->ops->set_mac_addr)
2730 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2731
76ad4f0e
S
2732}
2733
2734static void hns3_nic_set_priv_ops(struct net_device *netdev)
2735{
2736 struct hns3_nic_priv *priv = netdev_priv(netdev);
2737
2738 if ((netdev->features & NETIF_F_TSO) ||
2739 (netdev->features & NETIF_F_TSO6)) {
2740 priv->ops.fill_desc = hns3_fill_desc_tso;
2741 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2742 } else {
2743 priv->ops.fill_desc = hns3_fill_desc;
2744 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2745 }
2746}
2747
2748static int hns3_client_init(struct hnae3_handle *handle)
2749{
2750 struct pci_dev *pdev = handle->pdev;
2751 struct hns3_nic_priv *priv;
2752 struct net_device *netdev;
2753 int ret;
2754
2755 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2756 handle->kinfo.num_tqps);
2757 if (!netdev)
2758 return -ENOMEM;
2759
2760 priv = netdev_priv(netdev);
2761 priv->dev = &pdev->dev;
2762 priv->netdev = netdev;
2763 priv->ae_handle = handle;
2764
2765 handle->kinfo.netdev = netdev;
2766 handle->priv = (void *)priv;
2767
2768 hns3_init_mac_addr(netdev);
2769
2770 hns3_set_default_feature(netdev);
2771
2772 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2773 netdev->priv_flags |= IFF_UNICAST_FLT;
2774 netdev->netdev_ops = &hns3_nic_netdev_ops;
2775 SET_NETDEV_DEV(netdev, &pdev->dev);
2776 hns3_ethtool_set_ops(netdev);
2777 hns3_nic_set_priv_ops(netdev);
2778
2779 /* Carrier off reporting is important to ethtool even BEFORE open */
2780 netif_carrier_off(netdev);
2781
2782 ret = hns3_get_ring_config(priv);
2783 if (ret) {
2784 ret = -ENOMEM;
2785 goto out_get_ring_cfg;
2786 }
2787
2788 ret = hns3_nic_init_vector_data(priv);
2789 if (ret) {
2790 ret = -ENOMEM;
2791 goto out_init_vector_data;
2792 }
2793
2794 ret = hns3_init_all_ring(priv);
2795 if (ret) {
2796 ret = -ENOMEM;
2797 goto out_init_ring_data;
2798 }
2799
2800 ret = register_netdev(netdev);
2801 if (ret) {
2802 dev_err(priv->dev, "probe register netdev fail!\n");
2803 goto out_reg_netdev_fail;
2804 }
2805
986743db
YL
2806 hns3_dcbnl_setup(handle);
2807
a8e8b7ff
S
2808 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2809 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2810
76ad4f0e
S
2811 return ret;
2812
2813out_reg_netdev_fail:
2814out_init_ring_data:
2815 (void)hns3_nic_uninit_vector_data(priv);
2816 priv->ring_data = NULL;
2817out_init_vector_data:
2818out_get_ring_cfg:
2819 priv->ae_handle = NULL;
2820 free_netdev(netdev);
2821 return ret;
2822}
2823
2824static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2825{
2826 struct net_device *netdev = handle->kinfo.netdev;
2827 struct hns3_nic_priv *priv = netdev_priv(netdev);
2828 int ret;
2829
2830 if (netdev->reg_state != NETREG_UNINITIALIZED)
2831 unregister_netdev(netdev);
2832
2833 ret = hns3_nic_uninit_vector_data(priv);
2834 if (ret)
2835 netdev_err(netdev, "uninit vector error\n");
2836
2837 ret = hns3_uninit_all_ring(priv);
2838 if (ret)
2839 netdev_err(netdev, "uninit ring error\n");
2840
2841 priv->ring_data = NULL;
2842
2843 free_netdev(netdev);
2844}
2845
2846static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2847{
2848 struct net_device *netdev = handle->kinfo.netdev;
2849
2850 if (!netdev)
2851 return;
2852
2853 if (linkup) {
2854 netif_carrier_on(netdev);
2855 netif_tx_wake_all_queues(netdev);
2856 netdev_info(netdev, "link up\n");
2857 } else {
2858 netif_carrier_off(netdev);
2859 netif_tx_stop_all_queues(netdev);
2860 netdev_info(netdev, "link down\n");
2861 }
2862}
2863
9df8f79a
YL
2864static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
2865{
2866 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
2867 struct net_device *ndev = kinfo->netdev;
2868 bool if_running = netif_running(ndev);
2869 int ret;
2870 u8 i;
2871
2872 if (tc > HNAE3_MAX_TC)
2873 return -EINVAL;
2874
2875 if (!ndev)
2876 return -ENODEV;
2877
2878 ret = netdev_set_num_tc(ndev, tc);
2879 if (ret)
2880 return ret;
2881
2882 if (if_running) {
2883 (void)hns3_nic_net_stop(ndev);
2884 msleep(100);
2885 }
2886
2887 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
2888 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
2889 if (ret)
2890 goto err_out;
2891
2892 if (tc <= 1) {
2893 netdev_reset_tc(ndev);
2894 goto out;
2895 }
2896
2897 for (i = 0; i < HNAE3_MAX_TC; i++) {
2898 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
2899
2900 if (tc_info->enable)
2901 netdev_set_tc_queue(ndev,
2902 tc_info->tc,
2903 tc_info->tqp_count,
2904 tc_info->tqp_offset);
2905 }
2906
2907 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
2908 netdev_set_prio_tc_map(ndev, i,
2909 kinfo->prio_tc[i]);
2910 }
2911
2912out:
2913 ret = hns3_nic_set_real_num_queue(ndev);
2914
2915err_out:
2916 if (if_running)
2917 (void)hns3_nic_net_open(ndev);
2918
2919 return ret;
2920}
2921
76ad4f0e
S
2922const struct hnae3_client_ops client_ops = {
2923 .init_instance = hns3_client_init,
2924 .uninit_instance = hns3_client_uninit,
2925 .link_status_change = hns3_link_status_change,
9df8f79a 2926 .setup_tc = hns3_client_setup_tc,
76ad4f0e
S
2927};
2928
2929/* hns3_init_module - Driver registration routine
2930 * hns3_init_module is the first routine called when the driver is
2931 * loaded. All it does is register with the PCI subsystem.
2932 */
2933static int __init hns3_init_module(void)
2934{
2935 int ret;
2936
2937 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2938 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2939
2940 client.type = HNAE3_CLIENT_KNIC;
2941 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2942 hns3_driver_name);
2943
2944 client.ops = &client_ops;
2945
2946 ret = hnae3_register_client(&client);
2947 if (ret)
2948 return ret;
2949
2950 ret = pci_register_driver(&hns3_driver);
2951 if (ret)
2952 hnae3_unregister_client(&client);
2953
2954 return ret;
2955}
2956module_init(hns3_init_module);
2957
2958/* hns3_exit_module - Driver exit cleanup routine
2959 * hns3_exit_module is called just before the driver is removed
2960 * from memory.
2961 */
2962static void __exit hns3_exit_module(void)
2963{
2964 pci_unregister_driver(&hns3_driver);
2965 hnae3_unregister_client(&client);
2966}
2967module_exit(hns3_exit_module);
2968
2969MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2970MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2971MODULE_LICENSE("GPL");
2972MODULE_ALIAS("pci:hns-nic");