]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
cxgb4: add new T5 pci device id's
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hns3_enet.c
CommitLineData
76ad4f0e
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
22#include <net/vxlan.h>
23
24#include "hnae3.h"
25#include "hns3_enet.h"
26
1db9b1bf 27static const char hns3_driver_name[] = "hns3";
76ad4f0e
S
28const char hns3_driver_version[] = VERMAGIC_STRING;
29static const char hns3_driver_string[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32static struct hnae3_client client;
33
34/* hns3_pci_tbl - PCI Device ID Table
35 *
36 * Last entry must be all 0s
37 *
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
40 */
41static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
e92a0843 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
2daf4a65 45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
2daf4a65 47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
2daf4a65 49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
2daf4a65 51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
2daf4a65 53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
76ad4f0e
S
54 /* required last entry */
55 {0, }
56};
57MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
58
59static irqreturn_t hns3_irq_handle(int irq, void *dev)
60{
61 struct hns3_enet_tqp_vector *tqp_vector = dev;
62
63 napi_schedule(&tqp_vector->napi);
64
65 return IRQ_HANDLED;
66}
67
68static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
69{
70 struct hns3_enet_tqp_vector *tqp_vectors;
71 unsigned int i;
72
73 for (i = 0; i < priv->vector_num; i++) {
74 tqp_vectors = &priv->tqp_vector[i];
75
76 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
77 continue;
78
79 /* release the irq resource */
80 free_irq(tqp_vectors->vector_irq, tqp_vectors);
81 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
82 }
83}
84
85static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
86{
87 struct hns3_enet_tqp_vector *tqp_vectors;
88 int txrx_int_idx = 0;
89 int rx_int_idx = 0;
90 int tx_int_idx = 0;
91 unsigned int i;
92 int ret;
93
94 for (i = 0; i < priv->vector_num; i++) {
95 tqp_vectors = &priv->tqp_vector[i];
96
97 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
98 continue;
99
100 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
101 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102 "%s-%s-%d", priv->netdev->name, "TxRx",
103 txrx_int_idx++);
104 txrx_int_idx++;
105 } else if (tqp_vectors->rx_group.ring) {
106 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
107 "%s-%s-%d", priv->netdev->name, "Rx",
108 rx_int_idx++);
109 } else if (tqp_vectors->tx_group.ring) {
110 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
111 "%s-%s-%d", priv->netdev->name, "Tx",
112 tx_int_idx++);
113 } else {
114 /* Skip this unused q_vector */
115 continue;
116 }
117
118 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
119
120 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
121 tqp_vectors->name,
122 tqp_vectors);
123 if (ret) {
124 netdev_err(priv->netdev, "request irq(%d) fail\n",
125 tqp_vectors->vector_irq);
126 return ret;
127 }
128
129 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
130 }
131
132 return 0;
133}
134
135static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
136 u32 mask_en)
137{
138 writel(mask_en, tqp_vector->mask_addr);
139}
140
141static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
142{
143 napi_enable(&tqp_vector->napi);
144
145 /* enable vector */
146 hns3_mask_vector_irq(tqp_vector, 1);
147}
148
149static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
150{
151 /* disable vector */
152 hns3_mask_vector_irq(tqp_vector, 0);
153
154 disable_irq(tqp_vector->vector_irq);
155 napi_disable(&tqp_vector->napi);
156}
157
158static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
159 u32 gl_value)
160{
161 /* this defines the configuration for GL (Interrupt Gap Limiter)
162 * GL defines inter interrupt gap.
163 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
164 */
165 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
166 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
167 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
168}
169
170static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
171 u32 rl_value)
172{
173 /* this defines the configuration for RL (Interrupt Rate Limiter).
174 * Rl defines rate of interrupts i.e. number of interrupts-per-second
175 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
176 */
177 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
178}
179
180static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
181{
182 /* initialize the configuration for interrupt coalescing.
183 * 1. GL (Interrupt Gap Limiter)
184 * 2. RL (Interrupt Rate Limiter)
185 */
186
187 /* Default :enable interrupt coalesce */
188 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
189 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
190 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
191 /* for now we are disabling Interrupt RL - we
192 * will re-enable later
193 */
194 hns3_set_vector_coalesc_rl(tqp_vector, 0);
195 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
196 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
197}
198
9df8f79a
YL
199static int hns3_nic_set_real_num_queue(struct net_device *netdev)
200{
9780cb97 201 struct hnae3_handle *h = hns3_get_handle(netdev);
9df8f79a
YL
202 struct hnae3_knic_private_info *kinfo = &h->kinfo;
203 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
204 int ret;
205
206 ret = netif_set_real_num_tx_queues(netdev, queue_size);
207 if (ret) {
208 netdev_err(netdev,
209 "netif_set_real_num_tx_queues fail, ret=%d!\n",
210 ret);
211 return ret;
212 }
213
214 ret = netif_set_real_num_rx_queues(netdev, queue_size);
215 if (ret) {
216 netdev_err(netdev,
217 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
218 return ret;
219 }
220
221 return 0;
222}
223
76ad4f0e
S
224static int hns3_nic_net_up(struct net_device *netdev)
225{
226 struct hns3_nic_priv *priv = netdev_priv(netdev);
227 struct hnae3_handle *h = priv->ae_handle;
228 int i, j;
229 int ret;
230
231 /* get irq resource for all vectors */
232 ret = hns3_nic_init_irq(priv);
233 if (ret) {
234 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
235 return ret;
236 }
237
238 /* enable the vectors */
239 for (i = 0; i < priv->vector_num; i++)
240 hns3_vector_enable(&priv->tqp_vector[i]);
241
242 /* start the ae_dev */
243 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
244 if (ret)
245 goto out_start_err;
246
247 return 0;
248
249out_start_err:
250 for (j = i - 1; j >= 0; j--)
251 hns3_vector_disable(&priv->tqp_vector[j]);
252
253 hns3_nic_uninit_irq(priv);
254
255 return ret;
256}
257
258static int hns3_nic_net_open(struct net_device *netdev)
259{
76ad4f0e
S
260 int ret;
261
262 netif_carrier_off(netdev);
263
9df8f79a
YL
264 ret = hns3_nic_set_real_num_queue(netdev);
265 if (ret)
76ad4f0e 266 return ret;
76ad4f0e
S
267
268 ret = hns3_nic_net_up(netdev);
269 if (ret) {
270 netdev_err(netdev,
271 "hns net up fail, ret=%d!\n", ret);
272 return ret;
273 }
274
275 return 0;
276}
277
278static void hns3_nic_net_down(struct net_device *netdev)
279{
280 struct hns3_nic_priv *priv = netdev_priv(netdev);
281 const struct hnae3_ae_ops *ops;
282 int i;
283
284 /* stop ae_dev */
285 ops = priv->ae_handle->ae_algo->ops;
286 if (ops->stop)
287 ops->stop(priv->ae_handle);
288
289 /* disable vectors */
290 for (i = 0; i < priv->vector_num; i++)
291 hns3_vector_disable(&priv->tqp_vector[i]);
292
293 /* free irq resources */
294 hns3_nic_uninit_irq(priv);
295}
296
297static int hns3_nic_net_stop(struct net_device *netdev)
298{
299 netif_tx_stop_all_queues(netdev);
300 netif_carrier_off(netdev);
301
302 hns3_nic_net_down(netdev);
303
304 return 0;
305}
306
76ad4f0e
S
307static int hns3_nic_uc_sync(struct net_device *netdev,
308 const unsigned char *addr)
309{
9780cb97 310 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
311
312 if (h->ae_algo->ops->add_uc_addr)
313 return h->ae_algo->ops->add_uc_addr(h, addr);
314
315 return 0;
316}
317
318static int hns3_nic_uc_unsync(struct net_device *netdev,
319 const unsigned char *addr)
320{
9780cb97 321 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
322
323 if (h->ae_algo->ops->rm_uc_addr)
324 return h->ae_algo->ops->rm_uc_addr(h, addr);
325
326 return 0;
327}
328
329static int hns3_nic_mc_sync(struct net_device *netdev,
330 const unsigned char *addr)
331{
9780cb97 332 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 333
720a8478 334 if (h->ae_algo->ops->add_mc_addr)
76ad4f0e
S
335 return h->ae_algo->ops->add_mc_addr(h, addr);
336
337 return 0;
338}
339
340static int hns3_nic_mc_unsync(struct net_device *netdev,
341 const unsigned char *addr)
342{
9780cb97 343 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 344
720a8478 345 if (h->ae_algo->ops->rm_mc_addr)
76ad4f0e
S
346 return h->ae_algo->ops->rm_mc_addr(h, addr);
347
348 return 0;
349}
350
1db9b1bf 351static void hns3_nic_set_rx_mode(struct net_device *netdev)
76ad4f0e 352{
9780cb97 353 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
354
355 if (h->ae_algo->ops->set_promisc_mode) {
356 if (netdev->flags & IFF_PROMISC)
357 h->ae_algo->ops->set_promisc_mode(h, 1);
358 else
359 h->ae_algo->ops->set_promisc_mode(h, 0);
360 }
361 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
362 netdev_err(netdev, "sync uc address fail\n");
363 if (netdev->flags & IFF_MULTICAST)
364 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
365 netdev_err(netdev, "sync mc address fail\n");
366}
367
368static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
369 u16 *mss, u32 *type_cs_vlan_tso)
370{
371 u32 l4_offset, hdr_len;
372 union l3_hdr_info l3;
373 union l4_hdr_info l4;
374 u32 l4_paylen;
375 int ret;
376
377 if (!skb_is_gso(skb))
378 return 0;
379
380 ret = skb_cow_head(skb, 0);
381 if (ret)
382 return ret;
383
384 l3.hdr = skb_network_header(skb);
385 l4.hdr = skb_transport_header(skb);
386
387 /* Software should clear the IPv4's checksum field when tso is
388 * needed.
389 */
390 if (l3.v4->version == 4)
391 l3.v4->check = 0;
392
393 /* tunnel packet.*/
394 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
395 SKB_GSO_GRE_CSUM |
396 SKB_GSO_UDP_TUNNEL |
397 SKB_GSO_UDP_TUNNEL_CSUM)) {
398 if ((!(skb_shinfo(skb)->gso_type &
399 SKB_GSO_PARTIAL)) &&
400 (skb_shinfo(skb)->gso_type &
401 SKB_GSO_UDP_TUNNEL_CSUM)) {
402 /* Software should clear the udp's checksum
403 * field when tso is needed.
404 */
405 l4.udp->check = 0;
406 }
407 /* reset l3&l4 pointers from outer to inner headers */
408 l3.hdr = skb_inner_network_header(skb);
409 l4.hdr = skb_inner_transport_header(skb);
410
411 /* Software should clear the IPv4's checksum field when
412 * tso is needed.
413 */
414 if (l3.v4->version == 4)
415 l3.v4->check = 0;
416 }
417
418 /* normal or tunnel packet*/
419 l4_offset = l4.hdr - skb->data;
420 hdr_len = (l4.tcp->doff * 4) + l4_offset;
421
422 /* remove payload length from inner pseudo checksum when tso*/
423 l4_paylen = skb->len - l4_offset;
424 csum_replace_by_diff(&l4.tcp->check,
425 (__force __wsum)htonl(l4_paylen));
426
427 /* find the txbd field values */
428 *paylen = skb->len - hdr_len;
429 hnae_set_bit(*type_cs_vlan_tso,
430 HNS3_TXD_TSO_B, 1);
431
432 /* get MSS for TSO */
433 *mss = skb_shinfo(skb)->gso_size;
434
435 return 0;
436}
437
1898d4e4
S
438static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
439 u8 *il4_proto)
76ad4f0e
S
440{
441 union {
442 struct iphdr *v4;
443 struct ipv6hdr *v6;
444 unsigned char *hdr;
445 } l3;
446 unsigned char *l4_hdr;
447 unsigned char *exthdr;
448 u8 l4_proto_tmp;
449 __be16 frag_off;
450
451 /* find outer header point */
452 l3.hdr = skb_network_header(skb);
453 l4_hdr = skb_inner_transport_header(skb);
454
455 if (skb->protocol == htons(ETH_P_IPV6)) {
456 exthdr = l3.hdr + sizeof(*l3.v6);
457 l4_proto_tmp = l3.v6->nexthdr;
458 if (l4_hdr != exthdr)
459 ipv6_skip_exthdr(skb, exthdr - skb->data,
460 &l4_proto_tmp, &frag_off);
461 } else if (skb->protocol == htons(ETH_P_IP)) {
462 l4_proto_tmp = l3.v4->protocol;
1898d4e4
S
463 } else {
464 return -EINVAL;
76ad4f0e
S
465 }
466
467 *ol4_proto = l4_proto_tmp;
468
469 /* tunnel packet */
470 if (!skb->encapsulation) {
471 *il4_proto = 0;
1898d4e4 472 return 0;
76ad4f0e
S
473 }
474
475 /* find inner header point */
476 l3.hdr = skb_inner_network_header(skb);
477 l4_hdr = skb_inner_transport_header(skb);
478
479 if (l3.v6->version == 6) {
480 exthdr = l3.hdr + sizeof(*l3.v6);
481 l4_proto_tmp = l3.v6->nexthdr;
482 if (l4_hdr != exthdr)
483 ipv6_skip_exthdr(skb, exthdr - skb->data,
484 &l4_proto_tmp, &frag_off);
485 } else if (l3.v4->version == 4) {
486 l4_proto_tmp = l3.v4->protocol;
487 }
488
489 *il4_proto = l4_proto_tmp;
1898d4e4
S
490
491 return 0;
76ad4f0e
S
492}
493
494static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
495 u8 il4_proto, u32 *type_cs_vlan_tso,
496 u32 *ol_type_vlan_len_msec)
497{
498 union {
499 struct iphdr *v4;
500 struct ipv6hdr *v6;
501 unsigned char *hdr;
502 } l3;
503 union {
504 struct tcphdr *tcp;
505 struct udphdr *udp;
506 struct gre_base_hdr *gre;
507 unsigned char *hdr;
508 } l4;
509 unsigned char *l2_hdr;
510 u8 l4_proto = ol4_proto;
511 u32 ol2_len;
512 u32 ol3_len;
513 u32 ol4_len;
514 u32 l2_len;
515 u32 l3_len;
516
517 l3.hdr = skb_network_header(skb);
518 l4.hdr = skb_transport_header(skb);
519
520 /* compute L2 header size for normal packet, defined in 2 Bytes */
521 l2_len = l3.hdr - skb->data;
522 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
523 HNS3_TXD_L2LEN_S, l2_len >> 1);
524
525 /* tunnel packet*/
526 if (skb->encapsulation) {
527 /* compute OL2 header size, defined in 2 Bytes */
528 ol2_len = l2_len;
529 hnae_set_field(*ol_type_vlan_len_msec,
530 HNS3_TXD_L2LEN_M,
531 HNS3_TXD_L2LEN_S, ol2_len >> 1);
532
533 /* compute OL3 header size, defined in 4 Bytes */
534 ol3_len = l4.hdr - l3.hdr;
535 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
536 HNS3_TXD_L3LEN_S, ol3_len >> 2);
537
538 /* MAC in UDP, MAC in GRE (0x6558)*/
539 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
540 /* switch MAC header ptr from outer to inner header.*/
541 l2_hdr = skb_inner_mac_header(skb);
542
543 /* compute OL4 header size, defined in 4 Bytes. */
544 ol4_len = l2_hdr - l4.hdr;
545 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
546 HNS3_TXD_L4LEN_S, ol4_len >> 2);
547
548 /* switch IP header ptr from outer to inner header */
549 l3.hdr = skb_inner_network_header(skb);
550
551 /* compute inner l2 header size, defined in 2 Bytes. */
552 l2_len = l3.hdr - l2_hdr;
553 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
554 HNS3_TXD_L2LEN_S, l2_len >> 1);
555 } else {
556 /* skb packet types not supported by hardware,
557 * txbd len fild doesn't be filled.
558 */
559 return;
560 }
561
562 /* switch L4 header pointer from outer to inner */
563 l4.hdr = skb_inner_transport_header(skb);
564
565 l4_proto = il4_proto;
566 }
567
568 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
569 l3_len = l4.hdr - l3.hdr;
570 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
571 HNS3_TXD_L3LEN_S, l3_len >> 2);
572
573 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
574 switch (l4_proto) {
575 case IPPROTO_TCP:
576 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
577 HNS3_TXD_L4LEN_S, l4.tcp->doff);
578 break;
579 case IPPROTO_SCTP:
580 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
581 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
582 break;
583 case IPPROTO_UDP:
584 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
585 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
586 break;
587 default:
588 /* skb packet types not supported by hardware,
589 * txbd len fild doesn't be filled.
590 */
591 return;
592 }
593}
594
595static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
596 u8 il4_proto, u32 *type_cs_vlan_tso,
597 u32 *ol_type_vlan_len_msec)
598{
599 union {
600 struct iphdr *v4;
601 struct ipv6hdr *v6;
602 unsigned char *hdr;
603 } l3;
604 u32 l4_proto = ol4_proto;
605
606 l3.hdr = skb_network_header(skb);
607
608 /* define OL3 type and tunnel type(OL4).*/
609 if (skb->encapsulation) {
610 /* define outer network header type.*/
611 if (skb->protocol == htons(ETH_P_IP)) {
612 if (skb_is_gso(skb))
613 hnae_set_field(*ol_type_vlan_len_msec,
614 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
615 HNS3_OL3T_IPV4_CSUM);
616 else
617 hnae_set_field(*ol_type_vlan_len_msec,
618 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
619 HNS3_OL3T_IPV4_NO_CSUM);
620
621 } else if (skb->protocol == htons(ETH_P_IPV6)) {
622 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
623 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
624 }
625
626 /* define tunnel type(OL4).*/
627 switch (l4_proto) {
628 case IPPROTO_UDP:
629 hnae_set_field(*ol_type_vlan_len_msec,
630 HNS3_TXD_TUNTYPE_M,
631 HNS3_TXD_TUNTYPE_S,
632 HNS3_TUN_MAC_IN_UDP);
633 break;
634 case IPPROTO_GRE:
635 hnae_set_field(*ol_type_vlan_len_msec,
636 HNS3_TXD_TUNTYPE_M,
637 HNS3_TXD_TUNTYPE_S,
638 HNS3_TUN_NVGRE);
639 break;
640 default:
641 /* drop the skb tunnel packet if hardware don't support,
642 * because hardware can't calculate csum when TSO.
643 */
644 if (skb_is_gso(skb))
645 return -EDOM;
646
647 /* the stack computes the IP header already,
648 * driver calculate l4 checksum when not TSO.
649 */
650 skb_checksum_help(skb);
651 return 0;
652 }
653
654 l3.hdr = skb_inner_network_header(skb);
655 l4_proto = il4_proto;
656 }
657
658 if (l3.v4->version == 4) {
659 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
660 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
661
662 /* the stack computes the IP header already, the only time we
663 * need the hardware to recompute it is in the case of TSO.
664 */
665 if (skb_is_gso(skb))
666 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
667
668 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
669 } else if (l3.v6->version == 6) {
670 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
671 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
672 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
673 }
674
675 switch (l4_proto) {
676 case IPPROTO_TCP:
677 hnae_set_field(*type_cs_vlan_tso,
678 HNS3_TXD_L4T_M,
679 HNS3_TXD_L4T_S,
680 HNS3_L4T_TCP);
681 break;
682 case IPPROTO_UDP:
683 hnae_set_field(*type_cs_vlan_tso,
684 HNS3_TXD_L4T_M,
685 HNS3_TXD_L4T_S,
686 HNS3_L4T_UDP);
687 break;
688 case IPPROTO_SCTP:
689 hnae_set_field(*type_cs_vlan_tso,
690 HNS3_TXD_L4T_M,
691 HNS3_TXD_L4T_S,
692 HNS3_L4T_SCTP);
693 break;
694 default:
695 /* drop the skb tunnel packet if hardware don't support,
696 * because hardware can't calculate csum when TSO.
697 */
698 if (skb_is_gso(skb))
699 return -EDOM;
700
701 /* the stack computes the IP header already,
702 * driver calculate l4 checksum when not TSO.
703 */
704 skb_checksum_help(skb);
705 return 0;
706 }
707
708 return 0;
709}
710
711static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
712{
713 /* Config bd buffer end */
714 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
715 HNS3_TXD_BDTYPE_M, 0);
716 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
717 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
718 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
719}
720
721static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
722 int size, dma_addr_t dma, int frag_end,
723 enum hns_desc_type type)
724{
725 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
726 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
727 u32 ol_type_vlan_len_msec = 0;
728 u16 bdtp_fe_sc_vld_ra_ri = 0;
729 u32 type_cs_vlan_tso = 0;
730 struct sk_buff *skb;
731 u32 paylen = 0;
732 u16 mss = 0;
733 __be16 protocol;
734 u8 ol4_proto;
735 u8 il4_proto;
736 int ret;
737
738 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
739 desc_cb->priv = priv;
740 desc_cb->length = size;
741 desc_cb->dma = dma;
742 desc_cb->type = type;
743
744 /* now, fill the descriptor */
745 desc->addr = cpu_to_le64(dma);
746 desc->tx.send_size = cpu_to_le16((u16)size);
747 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
748 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
749
750 if (type == DESC_TYPE_SKB) {
751 skb = (struct sk_buff *)priv;
a90bb9a5 752 paylen = skb->len;
76ad4f0e
S
753
754 if (skb->ip_summed == CHECKSUM_PARTIAL) {
755 skb_reset_mac_len(skb);
756 protocol = skb->protocol;
757
758 /* vlan packet*/
759 if (protocol == htons(ETH_P_8021Q)) {
760 protocol = vlan_get_protocol(skb);
761 skb->protocol = protocol;
762 }
1898d4e4
S
763 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
764 if (ret)
765 return ret;
76ad4f0e
S
766 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
767 &type_cs_vlan_tso,
768 &ol_type_vlan_len_msec);
769 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
770 &type_cs_vlan_tso,
771 &ol_type_vlan_len_msec);
772 if (ret)
773 return ret;
774
775 ret = hns3_set_tso(skb, &paylen, &mss,
776 &type_cs_vlan_tso);
777 if (ret)
778 return ret;
779 }
780
781 /* Set txbd */
782 desc->tx.ol_type_vlan_len_msec =
783 cpu_to_le32(ol_type_vlan_len_msec);
784 desc->tx.type_cs_vlan_tso_len =
785 cpu_to_le32(type_cs_vlan_tso);
a90bb9a5 786 desc->tx.paylen = cpu_to_le32(paylen);
76ad4f0e
S
787 desc->tx.mss = cpu_to_le16(mss);
788 }
789
790 /* move ring pointer to next.*/
791 ring_ptr_move_fw(ring, next_to_use);
792
793 return 0;
794}
795
796static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
797 int size, dma_addr_t dma, int frag_end,
798 enum hns_desc_type type)
799{
800 unsigned int frag_buf_num;
801 unsigned int k;
802 int sizeoflast;
803 int ret;
804
805 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
806 sizeoflast = size % HNS3_MAX_BD_SIZE;
807 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
808
809 /* When the frag size is bigger than hardware, split this frag */
810 for (k = 0; k < frag_buf_num; k++) {
811 ret = hns3_fill_desc(ring, priv,
812 (k == frag_buf_num - 1) ?
813 sizeoflast : HNS3_MAX_BD_SIZE,
814 dma + HNS3_MAX_BD_SIZE * k,
815 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
816 (type == DESC_TYPE_SKB && !k) ?
817 DESC_TYPE_SKB : DESC_TYPE_PAGE);
818 if (ret)
819 return ret;
820 }
821
822 return 0;
823}
824
825static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
826 struct hns3_enet_ring *ring)
827{
828 struct sk_buff *skb = *out_skb;
829 struct skb_frag_struct *frag;
830 int bdnum_for_frag;
831 int frag_num;
832 int buf_num;
833 int size;
834 int i;
835
836 size = skb_headlen(skb);
837 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
838
839 frag_num = skb_shinfo(skb)->nr_frags;
840 for (i = 0; i < frag_num; i++) {
841 frag = &skb_shinfo(skb)->frags[i];
842 size = skb_frag_size(frag);
843 bdnum_for_frag =
844 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
845 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
846 return -ENOMEM;
847
848 buf_num += bdnum_for_frag;
849 }
850
851 if (buf_num > ring_space(ring))
852 return -EBUSY;
853
854 *bnum = buf_num;
855 return 0;
856}
857
858static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
859 struct hns3_enet_ring *ring)
860{
861 struct sk_buff *skb = *out_skb;
862 int buf_num;
863
864 /* No. of segments (plus a header) */
865 buf_num = skb_shinfo(skb)->nr_frags + 1;
866
867 if (buf_num > ring_space(ring))
868 return -EBUSY;
869
870 *bnum = buf_num;
871
872 return 0;
873}
874
875static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
876{
877 struct device *dev = ring_to_dev(ring);
878 unsigned int i;
879
880 for (i = 0; i < ring->desc_num; i++) {
881 /* check if this is where we started */
882 if (ring->next_to_use == next_to_use_orig)
883 break;
884
885 /* unmap the descriptor dma address */
886 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
887 dma_unmap_single(dev,
888 ring->desc_cb[ring->next_to_use].dma,
889 ring->desc_cb[ring->next_to_use].length,
890 DMA_TO_DEVICE);
891 else
892 dma_unmap_page(dev,
893 ring->desc_cb[ring->next_to_use].dma,
894 ring->desc_cb[ring->next_to_use].length,
895 DMA_TO_DEVICE);
896
897 /* rollback one */
898 ring_ptr_move_bw(ring, next_to_use);
899 }
900}
901
902static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
903 struct net_device *netdev)
904{
905 struct hns3_nic_priv *priv = netdev_priv(netdev);
906 struct hns3_nic_ring_data *ring_data =
907 &tx_ring_data(priv, skb->queue_mapping);
908 struct hns3_enet_ring *ring = ring_data->ring;
909 struct device *dev = priv->dev;
910 struct netdev_queue *dev_queue;
911 struct skb_frag_struct *frag;
912 int next_to_use_head;
913 int next_to_use_frag;
914 dma_addr_t dma;
915 int buf_num;
916 int seg_num;
917 int size;
918 int ret;
919 int i;
920
921 /* Prefetch the data used later */
922 prefetch(skb->data);
923
924 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
925 case -EBUSY:
926 u64_stats_update_begin(&ring->syncp);
927 ring->stats.tx_busy++;
928 u64_stats_update_end(&ring->syncp);
929
930 goto out_net_tx_busy;
931 case -ENOMEM:
932 u64_stats_update_begin(&ring->syncp);
933 ring->stats.sw_err_cnt++;
934 u64_stats_update_end(&ring->syncp);
935 netdev_err(netdev, "no memory to xmit!\n");
936
937 goto out_err_tx_ok;
938 default:
939 break;
940 }
941
942 /* No. of segments (plus a header) */
943 seg_num = skb_shinfo(skb)->nr_frags + 1;
944 /* Fill the first part */
945 size = skb_headlen(skb);
946
947 next_to_use_head = ring->next_to_use;
948
949 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
950 if (dma_mapping_error(dev, dma)) {
951 netdev_err(netdev, "TX head DMA map failed\n");
952 ring->stats.sw_err_cnt++;
953 goto out_err_tx_ok;
954 }
955
956 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
957 DESC_TYPE_SKB);
958 if (ret)
959 goto head_dma_map_err;
960
961 next_to_use_frag = ring->next_to_use;
962 /* Fill the fragments */
963 for (i = 1; i < seg_num; i++) {
964 frag = &skb_shinfo(skb)->frags[i - 1];
965 size = skb_frag_size(frag);
966 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
967 if (dma_mapping_error(dev, dma)) {
968 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
969 ring->stats.sw_err_cnt++;
970 goto frag_dma_map_err;
971 }
972 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
973 seg_num - 1 == i ? 1 : 0,
974 DESC_TYPE_PAGE);
975
976 if (ret)
977 goto frag_dma_map_err;
978 }
979
980 /* Complete translate all packets */
981 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
982 netdev_tx_sent_queue(dev_queue, skb->len);
983
984 wmb(); /* Commit all data before submit */
985
986 hnae_queue_xmit(ring->tqp, buf_num);
987
988 return NETDEV_TX_OK;
989
990frag_dma_map_err:
991 hns_nic_dma_unmap(ring, next_to_use_frag);
992
993head_dma_map_err:
994 hns_nic_dma_unmap(ring, next_to_use_head);
995
996out_err_tx_ok:
997 dev_kfree_skb_any(skb);
998 return NETDEV_TX_OK;
999
1000out_net_tx_busy:
1001 netif_stop_subqueue(netdev, ring_data->queue_index);
1002 smp_mb(); /* Commit all data before submit */
1003
1004 return NETDEV_TX_BUSY;
1005}
1006
1007static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1008{
9780cb97 1009 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1010 struct sockaddr *mac_addr = p;
1011 int ret;
1012
1013 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1014 return -EADDRNOTAVAIL;
1015
1016 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1017 if (ret) {
1018 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1019 return ret;
1020 }
1021
1022 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1023
1024 return 0;
1025}
1026
1027static int hns3_nic_set_features(struct net_device *netdev,
1028 netdev_features_t features)
1029{
1030 struct hns3_nic_priv *priv = netdev_priv(netdev);
1031
1032 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1033 priv->ops.fill_desc = hns3_fill_desc_tso;
1034 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1035 } else {
1036 priv->ops.fill_desc = hns3_fill_desc;
1037 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1038 }
1039
1040 netdev->features = features;
1041 return 0;
1042}
1043
1044static void
1045hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1046{
1047 struct hns3_nic_priv *priv = netdev_priv(netdev);
1048 int queue_num = priv->ae_handle->kinfo.num_tqps;
1049 struct hns3_enet_ring *ring;
1050 unsigned int start;
1051 unsigned int idx;
1052 u64 tx_bytes = 0;
1053 u64 rx_bytes = 0;
1054 u64 tx_pkts = 0;
1055 u64 rx_pkts = 0;
1056
1057 for (idx = 0; idx < queue_num; idx++) {
1058 /* fetch the tx stats */
1059 ring = priv->ring_data[idx].ring;
1060 do {
d36d36ce 1061 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1062 tx_bytes += ring->stats.tx_bytes;
1063 tx_pkts += ring->stats.tx_pkts;
1064 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1065
1066 /* fetch the rx stats */
1067 ring = priv->ring_data[idx + queue_num].ring;
1068 do {
d36d36ce 1069 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1070 rx_bytes += ring->stats.rx_bytes;
1071 rx_pkts += ring->stats.rx_pkts;
1072 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1073 }
1074
1075 stats->tx_bytes = tx_bytes;
1076 stats->tx_packets = tx_pkts;
1077 stats->rx_bytes = rx_bytes;
1078 stats->rx_packets = rx_pkts;
1079
1080 stats->rx_errors = netdev->stats.rx_errors;
1081 stats->multicast = netdev->stats.multicast;
1082 stats->rx_length_errors = netdev->stats.rx_length_errors;
1083 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1084 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1085
1086 stats->tx_errors = netdev->stats.tx_errors;
1087 stats->rx_dropped = netdev->stats.rx_dropped;
1088 stats->tx_dropped = netdev->stats.tx_dropped;
1089 stats->collisions = netdev->stats.collisions;
1090 stats->rx_over_errors = netdev->stats.rx_over_errors;
1091 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1092 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1093 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1094 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1095 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1096 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1097 stats->tx_window_errors = netdev->stats.tx_window_errors;
1098 stats->rx_compressed = netdev->stats.rx_compressed;
1099 stats->tx_compressed = netdev->stats.tx_compressed;
1100}
1101
1102static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1103 enum hns3_udp_tnl_type type)
1104{
1105 struct hns3_nic_priv *priv = netdev_priv(netdev);
1106 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1107 struct hnae3_handle *h = priv->ae_handle;
1108
1109 if (udp_tnl->used && udp_tnl->dst_port == port) {
1110 udp_tnl->used++;
1111 return;
1112 }
1113
1114 if (udp_tnl->used) {
1115 netdev_warn(netdev,
1116 "UDP tunnel [%d], port [%d] offload\n", type, port);
1117 return;
1118 }
1119
1120 udp_tnl->dst_port = port;
1121 udp_tnl->used = 1;
1122 /* TBD send command to hardware to add port */
1123 if (h->ae_algo->ops->add_tunnel_udp)
1124 h->ae_algo->ops->add_tunnel_udp(h, port);
1125}
1126
1127static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1128 enum hns3_udp_tnl_type type)
1129{
1130 struct hns3_nic_priv *priv = netdev_priv(netdev);
1131 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1132 struct hnae3_handle *h = priv->ae_handle;
1133
1134 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1135 netdev_warn(netdev,
1136 "Invalid UDP tunnel port %d\n", port);
1137 return;
1138 }
1139
1140 udp_tnl->used--;
1141 if (udp_tnl->used)
1142 return;
1143
1144 udp_tnl->dst_port = 0;
1145 /* TBD send command to hardware to del port */
1146 if (h->ae_algo->ops->del_tunnel_udp)
9537e7cb 1147 h->ae_algo->ops->del_tunnel_udp(h, port);
76ad4f0e
S
1148}
1149
1150/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1151 * @netdev: This physical ports's netdev
1152 * @ti: Tunnel information
1153 */
1154static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1155 struct udp_tunnel_info *ti)
1156{
1157 u16 port_n = ntohs(ti->port);
1158
1159 switch (ti->type) {
1160 case UDP_TUNNEL_TYPE_VXLAN:
1161 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1162 break;
1163 case UDP_TUNNEL_TYPE_GENEVE:
1164 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1165 break;
1166 default:
1167 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1168 break;
1169 }
1170}
1171
1172static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1173 struct udp_tunnel_info *ti)
1174{
1175 u16 port_n = ntohs(ti->port);
1176
1177 switch (ti->type) {
1178 case UDP_TUNNEL_TYPE_VXLAN:
1179 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1180 break;
1181 case UDP_TUNNEL_TYPE_GENEVE:
1182 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1183 break;
1184 default:
1185 break;
1186 }
1187}
1188
1189static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1190{
9780cb97 1191 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1192 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1193 unsigned int i;
1194 int ret;
1195
1196 if (tc > HNAE3_MAX_TC)
1197 return -EINVAL;
1198
1199 if (kinfo->num_tc == tc)
1200 return 0;
1201
1202 if (!netdev)
1203 return -EINVAL;
1204
1205 if (!tc) {
1206 netdev_reset_tc(netdev);
1207 return 0;
1208 }
1209
1210 /* Set num_tc for netdev */
1211 ret = netdev_set_num_tc(netdev, tc);
1212 if (ret)
1213 return ret;
1214
1215 /* Set per TC queues for the VSI */
1216 for (i = 0; i < HNAE3_MAX_TC; i++) {
1217 if (kinfo->tc_info[i].enable)
1218 netdev_set_tc_queue(netdev,
1219 kinfo->tc_info[i].tc,
1220 kinfo->tc_info[i].tqp_count,
1221 kinfo->tc_info[i].tqp_offset);
1222 }
1223
1224 return 0;
1225}
1226
2572ac53 1227static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1228 void *type_data)
76ad4f0e 1229{
de4784ca
JP
1230 struct tc_mqprio_qopt *mqprio = type_data;
1231
74897ef0 1232 if (type != TC_SETUP_MQPRIO)
38cf0426 1233 return -EOPNOTSUPP;
76ad4f0e 1234
de4784ca 1235 return hns3_setup_tc(dev, mqprio->num_tc);
76ad4f0e
S
1236}
1237
1238static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1239 __be16 proto, u16 vid)
1240{
9780cb97 1241 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1242 int ret = -EIO;
1243
1244 if (h->ae_algo->ops->set_vlan_filter)
1245 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1246
1247 return ret;
1248}
1249
1250static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1251 __be16 proto, u16 vid)
1252{
9780cb97 1253 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1254 int ret = -EIO;
1255
1256 if (h->ae_algo->ops->set_vlan_filter)
1257 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1258
1259 return ret;
1260}
1261
1262static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1263 u8 qos, __be16 vlan_proto)
1264{
9780cb97 1265 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1266 int ret = -EIO;
1267
1268 if (h->ae_algo->ops->set_vf_vlan_filter)
1269 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1270 qos, vlan_proto);
1271
1272 return ret;
1273}
1274
a8e8b7ff
S
1275static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1276{
9780cb97 1277 struct hnae3_handle *h = hns3_get_handle(netdev);
a8e8b7ff
S
1278 bool if_running = netif_running(netdev);
1279 int ret;
1280
1281 if (!h->ae_algo->ops->set_mtu)
1282 return -EOPNOTSUPP;
1283
1284 /* if this was called with netdev up then bring netdevice down */
1285 if (if_running) {
1286 (void)hns3_nic_net_stop(netdev);
1287 msleep(100);
1288 }
1289
1290 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1291 if (ret) {
1292 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1293 ret);
1294 return ret;
1295 }
1296
1297 /* if the netdev was running earlier, bring it up again */
1298 if (if_running && hns3_nic_net_open(netdev))
1299 ret = -EINVAL;
1300
1301 return ret;
1302}
1303
76ad4f0e
S
1304static const struct net_device_ops hns3_nic_netdev_ops = {
1305 .ndo_open = hns3_nic_net_open,
1306 .ndo_stop = hns3_nic_net_stop,
1307 .ndo_start_xmit = hns3_nic_net_xmit,
1308 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
a8e8b7ff 1309 .ndo_change_mtu = hns3_nic_change_mtu,
76ad4f0e
S
1310 .ndo_set_features = hns3_nic_set_features,
1311 .ndo_get_stats64 = hns3_nic_get_stats64,
1312 .ndo_setup_tc = hns3_nic_setup_tc,
1313 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1314 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1315 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1316 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1317 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1318 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1319};
1320
1321/* hns3_probe - Device initialization routine
1322 * @pdev: PCI device information struct
1323 * @ent: entry in hns3_pci_tbl
1324 *
1325 * hns3_probe initializes a PF identified by a pci_dev structure.
1326 * The OS initialization, configuring of the PF private structure,
1327 * and a hardware reset occur.
1328 *
1329 * Returns 0 on success, negative on failure
1330 */
1331static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1332{
1333 struct hnae3_ae_dev *ae_dev;
1334 int ret;
1335
1336 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1337 GFP_KERNEL);
1338 if (!ae_dev) {
1339 ret = -ENOMEM;
1340 return ret;
1341 }
1342
1343 ae_dev->pdev = pdev;
e92a0843 1344 ae_dev->flag = ent->driver_data;
76ad4f0e
S
1345 ae_dev->dev_type = HNAE3_DEV_KNIC;
1346 pci_set_drvdata(pdev, ae_dev);
1347
1348 return hnae3_register_ae_dev(ae_dev);
1349}
1350
1351/* hns3_remove - Device removal routine
1352 * @pdev: PCI device information struct
1353 */
1354static void hns3_remove(struct pci_dev *pdev)
1355{
1356 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1357
1358 hnae3_unregister_ae_dev(ae_dev);
1359
1360 devm_kfree(&pdev->dev, ae_dev);
1361
1362 pci_set_drvdata(pdev, NULL);
1363}
1364
1365static struct pci_driver hns3_driver = {
1366 .name = hns3_driver_name,
1367 .id_table = hns3_pci_tbl,
1368 .probe = hns3_probe,
1369 .remove = hns3_remove,
1370};
1371
1372/* set default feature to hns3 */
1373static void hns3_set_default_feature(struct net_device *netdev)
1374{
1375 netdev->priv_flags |= IFF_UNICAST_FLT;
1376
1377 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1378 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1379 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1380 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1381 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1382
1383 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1384
1385 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1386
1387 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1388 NETIF_F_HW_VLAN_CTAG_FILTER |
1389 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1390 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1391 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1392 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1393
1394 netdev->vlan_features |=
1395 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1396 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1397 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1398 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1399 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1400
1401 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1402 NETIF_F_HW_VLAN_CTAG_FILTER |
1403 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1404 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1405 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1406 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1407}
1408
1409static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1410 struct hns3_desc_cb *cb)
1411{
1412 unsigned int order = hnae_page_order(ring);
1413 struct page *p;
1414
1415 p = dev_alloc_pages(order);
1416 if (!p)
1417 return -ENOMEM;
1418
1419 cb->priv = p;
1420 cb->page_offset = 0;
1421 cb->reuse_flag = 0;
1422 cb->buf = page_address(p);
1423 cb->length = hnae_page_size(ring);
1424 cb->type = DESC_TYPE_PAGE;
1425
1426 memset(cb->buf, 0, cb->length);
1427
1428 return 0;
1429}
1430
1431static void hns3_free_buffer(struct hns3_enet_ring *ring,
1432 struct hns3_desc_cb *cb)
1433{
1434 if (cb->type == DESC_TYPE_SKB)
1435 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1436 else if (!HNAE3_IS_TX_RING(ring))
1437 put_page((struct page *)cb->priv);
1438 memset(cb, 0, sizeof(*cb));
1439}
1440
1441static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1442{
1443 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1444 cb->length, ring_to_dma_dir(ring));
1445
1446 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1447 return -EIO;
1448
1449 return 0;
1450}
1451
1452static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1453 struct hns3_desc_cb *cb)
1454{
1455 if (cb->type == DESC_TYPE_SKB)
1456 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1457 ring_to_dma_dir(ring));
1458 else
1459 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1460 ring_to_dma_dir(ring));
1461}
1462
1463static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1464{
1465 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1466 ring->desc[i].addr = 0;
1467}
1468
1469static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1470{
1471 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1472
1473 if (!ring->desc_cb[i].dma)
1474 return;
1475
1476 hns3_buffer_detach(ring, i);
1477 hns3_free_buffer(ring, cb);
1478}
1479
1480static void hns3_free_buffers(struct hns3_enet_ring *ring)
1481{
1482 int i;
1483
1484 for (i = 0; i < ring->desc_num; i++)
1485 hns3_free_buffer_detach(ring, i);
1486}
1487
1488/* free desc along with its attached buffer */
1489static void hns3_free_desc(struct hns3_enet_ring *ring)
1490{
1491 hns3_free_buffers(ring);
1492
1493 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1494 ring->desc_num * sizeof(ring->desc[0]),
1495 DMA_BIDIRECTIONAL);
1496 ring->desc_dma_addr = 0;
1497 kfree(ring->desc);
1498 ring->desc = NULL;
1499}
1500
1501static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1502{
1503 int size = ring->desc_num * sizeof(ring->desc[0]);
1504
1505 ring->desc = kzalloc(size, GFP_KERNEL);
1506 if (!ring->desc)
1507 return -ENOMEM;
1508
1509 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1510 size, DMA_BIDIRECTIONAL);
1511 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1512 ring->desc_dma_addr = 0;
1513 kfree(ring->desc);
1514 ring->desc = NULL;
1515 return -ENOMEM;
1516 }
1517
1518 return 0;
1519}
1520
1521static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1522 struct hns3_desc_cb *cb)
1523{
1524 int ret;
1525
1526 ret = hns3_alloc_buffer(ring, cb);
1527 if (ret)
1528 goto out;
1529
1530 ret = hns3_map_buffer(ring, cb);
1531 if (ret)
1532 goto out_with_buf;
1533
1534 return 0;
1535
1536out_with_buf:
1537 hns3_free_buffers(ring);
1538out:
1539 return ret;
1540}
1541
1542static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1543{
1544 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1545
1546 if (ret)
1547 return ret;
1548
1549 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1550
1551 return 0;
1552}
1553
1554/* Allocate memory for raw pkg, and map with dma */
1555static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1556{
1557 int i, j, ret;
1558
1559 for (i = 0; i < ring->desc_num; i++) {
1560 ret = hns3_alloc_buffer_attach(ring, i);
1561 if (ret)
1562 goto out_buffer_fail;
1563 }
1564
1565 return 0;
1566
1567out_buffer_fail:
1568 for (j = i - 1; j >= 0; j--)
1569 hns3_free_buffer_detach(ring, j);
1570 return ret;
1571}
1572
1573/* detach a in-used buffer and replace with a reserved one */
1574static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1575 struct hns3_desc_cb *res_cb)
1576{
1577 hns3_map_buffer(ring, &ring->desc_cb[i]);
1578 ring->desc_cb[i] = *res_cb;
1579 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1580}
1581
1582static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1583{
1584 ring->desc_cb[i].reuse_flag = 0;
1585 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1586 + ring->desc_cb[i].page_offset);
1587}
1588
1589static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1590 int *pkts)
1591{
1592 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1593
1594 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1595 (*bytes) += desc_cb->length;
1596 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1597 hns3_free_buffer_detach(ring, ring->next_to_clean);
1598
1599 ring_ptr_move_fw(ring, next_to_clean);
1600}
1601
1602static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1603{
1604 int u = ring->next_to_use;
1605 int c = ring->next_to_clean;
1606
1607 if (unlikely(h > ring->desc_num))
1608 return 0;
1609
1610 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1611}
1612
1613int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1614{
1615 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1616 struct netdev_queue *dev_queue;
1617 int bytes, pkts;
1618 int head;
1619
1620 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1621 rmb(); /* Make sure head is ready before touch any data */
1622
1623 if (is_ring_empty(ring) || head == ring->next_to_clean)
1624 return 0; /* no data to poll */
1625
1626 if (!is_valid_clean_head(ring, head)) {
1627 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1628 ring->next_to_use, ring->next_to_clean);
1629
1630 u64_stats_update_begin(&ring->syncp);
1631 ring->stats.io_err_cnt++;
1632 u64_stats_update_end(&ring->syncp);
1633 return -EIO;
1634 }
1635
1636 bytes = 0;
1637 pkts = 0;
1638 while (head != ring->next_to_clean && budget) {
1639 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1640 /* Issue prefetch for next Tx descriptor */
1641 prefetch(&ring->desc_cb[ring->next_to_clean]);
1642 budget--;
1643 }
1644
1645 ring->tqp_vector->tx_group.total_bytes += bytes;
1646 ring->tqp_vector->tx_group.total_packets += pkts;
1647
1648 u64_stats_update_begin(&ring->syncp);
1649 ring->stats.tx_bytes += bytes;
1650 ring->stats.tx_pkts += pkts;
1651 u64_stats_update_end(&ring->syncp);
1652
1653 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1654 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1655
1656 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1657 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1658 /* Make sure that anybody stopping the queue after this
1659 * sees the new next_to_clean.
1660 */
1661 smp_mb();
1662 if (netif_tx_queue_stopped(dev_queue)) {
1663 netif_tx_wake_queue(dev_queue);
1664 ring->stats.restart_queue++;
1665 }
1666 }
1667
1668 return !!budget;
1669}
1670
1671static int hns3_desc_unused(struct hns3_enet_ring *ring)
1672{
1673 int ntc = ring->next_to_clean;
1674 int ntu = ring->next_to_use;
1675
1676 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1677}
1678
1679static void
1680hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1681{
1682 struct hns3_desc_cb *desc_cb;
1683 struct hns3_desc_cb res_cbs;
1684 int i, ret;
1685
1686 for (i = 0; i < cleand_count; i++) {
1687 desc_cb = &ring->desc_cb[ring->next_to_use];
1688 if (desc_cb->reuse_flag) {
1689 u64_stats_update_begin(&ring->syncp);
1690 ring->stats.reuse_pg_cnt++;
1691 u64_stats_update_end(&ring->syncp);
1692
1693 hns3_reuse_buffer(ring, ring->next_to_use);
1694 } else {
1695 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1696 if (ret) {
1697 u64_stats_update_begin(&ring->syncp);
1698 ring->stats.sw_err_cnt++;
1699 u64_stats_update_end(&ring->syncp);
1700
1701 netdev_err(ring->tqp->handle->kinfo.netdev,
1702 "hnae reserve buffer map failed.\n");
1703 break;
1704 }
1705 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1706 }
1707
1708 ring_ptr_move_fw(ring, next_to_use);
1709 }
1710
1711 wmb(); /* Make all data has been write before submit */
1712 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1713}
1714
1715/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1716 * @data: pointer to the start of the headers
1717 * @max: total length of section to find headers in
1718 *
1719 * This function is meant to determine the length of headers that will
1720 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1721 * motivation of doing this is to only perform one pull for IPv4 TCP
1722 * packets so that we can do basic things like calculating the gso_size
1723 * based on the average data per packet.
1724 */
1725static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1726 unsigned int max_size)
1727{
1728 unsigned char *network;
1729 u8 hlen;
1730
1731 /* This should never happen, but better safe than sorry */
1732 if (max_size < ETH_HLEN)
1733 return max_size;
1734
1735 /* Initialize network frame pointer */
1736 network = data;
1737
1738 /* Set first protocol and move network header forward */
1739 network += ETH_HLEN;
1740
1741 /* Handle any vlan tag if present */
1742 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1743 == HNS3_RX_FLAG_VLAN_PRESENT) {
1744 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1745 return max_size;
1746
1747 network += VLAN_HLEN;
1748 }
1749
1750 /* Handle L3 protocols */
1751 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1752 == HNS3_RX_FLAG_L3ID_IPV4) {
1753 if ((typeof(max_size))(network - data) >
1754 (max_size - sizeof(struct iphdr)))
1755 return max_size;
1756
1757 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1758 hlen = (network[0] & 0x0F) << 2;
1759
1760 /* Verify hlen meets minimum size requirements */
1761 if (hlen < sizeof(struct iphdr))
1762 return network - data;
1763
1764 /* Record next protocol if header is present */
1765 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1766 == HNS3_RX_FLAG_L3ID_IPV6) {
1767 if ((typeof(max_size))(network - data) >
1768 (max_size - sizeof(struct ipv6hdr)))
1769 return max_size;
1770
1771 /* Record next protocol */
1772 hlen = sizeof(struct ipv6hdr);
1773 } else {
1774 return network - data;
1775 }
1776
1777 /* Relocate pointer to start of L4 header */
1778 network += hlen;
1779
1780 /* Finally sort out TCP/UDP */
1781 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1782 == HNS3_RX_FLAG_L4ID_TCP) {
1783 if ((typeof(max_size))(network - data) >
1784 (max_size - sizeof(struct tcphdr)))
1785 return max_size;
1786
1787 /* Access doff as a u8 to avoid unaligned access on ia64 */
1788 hlen = (network[12] & 0xF0) >> 2;
1789
1790 /* Verify hlen meets minimum size requirements */
1791 if (hlen < sizeof(struct tcphdr))
1792 return network - data;
1793
1794 network += hlen;
1795 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1796 == HNS3_RX_FLAG_L4ID_UDP) {
1797 if ((typeof(max_size))(network - data) >
1798 (max_size - sizeof(struct udphdr)))
1799 return max_size;
1800
1801 network += sizeof(struct udphdr);
1802 }
1803
1804 /* If everything has gone correctly network should be the
1805 * data section of the packet and will be the end of the header.
1806 * If not then it probably represents the end of the last recognized
1807 * header.
1808 */
1809 if ((typeof(max_size))(network - data) < max_size)
1810 return network - data;
1811 else
1812 return max_size;
1813}
1814
1815static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1816 struct hns3_enet_ring *ring, int pull_len,
1817 struct hns3_desc_cb *desc_cb)
1818{
1819 struct hns3_desc *desc;
1820 int truesize, size;
1821 int last_offset;
1822 bool twobufs;
1823
1824 twobufs = ((PAGE_SIZE < 8192) &&
1825 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1826
1827 desc = &ring->desc[ring->next_to_clean];
1828 size = le16_to_cpu(desc->rx.size);
1829
1830 if (twobufs) {
1831 truesize = hnae_buf_size(ring);
1832 } else {
1833 truesize = ALIGN(size, L1_CACHE_BYTES);
1834 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1835 }
1836
1837 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1838 size - pull_len, truesize - pull_len);
1839
1840 /* Avoid re-using remote pages,flag default unreuse */
1841 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1842 return;
1843
1844 if (twobufs) {
1845 /* If we are only owner of page we can reuse it */
1846 if (likely(page_count(desc_cb->priv) == 1)) {
1847 /* Flip page offset to other buffer */
1848 desc_cb->page_offset ^= truesize;
1849
1850 desc_cb->reuse_flag = 1;
1851 /* bump ref count on page before it is given*/
1852 get_page(desc_cb->priv);
1853 }
1854 return;
1855 }
1856
1857 /* Move offset up to the next cache line */
1858 desc_cb->page_offset += truesize;
1859
1860 if (desc_cb->page_offset <= last_offset) {
1861 desc_cb->reuse_flag = 1;
1862 /* Bump ref count on page before it is given*/
1863 get_page(desc_cb->priv);
1864 }
1865}
1866
1867static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1868 struct hns3_desc *desc)
1869{
1870 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1871 int l3_type, l4_type;
1872 u32 bd_base_info;
1873 int ol4_type;
1874 u32 l234info;
1875
1876 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1877 l234info = le32_to_cpu(desc->rx.l234_info);
1878
1879 skb->ip_summed = CHECKSUM_NONE;
1880
1881 skb_checksum_none_assert(skb);
1882
1883 if (!(netdev->features & NETIF_F_RXCSUM))
1884 return;
1885
1886 /* check if hardware has done checksum */
1887 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1888 return;
1889
1890 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1891 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1892 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1893 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1894 netdev_err(netdev, "L3/L4 error pkt\n");
1895 u64_stats_update_begin(&ring->syncp);
1896 ring->stats.l3l4_csum_err++;
1897 u64_stats_update_end(&ring->syncp);
1898
1899 return;
1900 }
1901
1902 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1903 HNS3_RXD_L3ID_S);
1904 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1905 HNS3_RXD_L4ID_S);
1906
1907 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1908 switch (ol4_type) {
1909 case HNS3_OL4_TYPE_MAC_IN_UDP:
1910 case HNS3_OL4_TYPE_NVGRE:
1911 skb->csum_level = 1;
1912 case HNS3_OL4_TYPE_NO_TUN:
1913 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1914 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1915 (l3_type == HNS3_L3_TYPE_IPV6 &&
1916 (l4_type == HNS3_L4_TYPE_UDP ||
1917 l4_type == HNS3_L4_TYPE_TCP ||
1918 l4_type == HNS3_L4_TYPE_SCTP)))
1919 skb->ip_summed = CHECKSUM_UNNECESSARY;
1920 break;
1921 }
1922}
1923
1924static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1925 struct sk_buff **out_skb, int *out_bnum)
1926{
1927 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1928 struct hns3_desc_cb *desc_cb;
1929 struct hns3_desc *desc;
1930 struct sk_buff *skb;
1931 unsigned char *va;
1932 u32 bd_base_info;
1933 int pull_len;
1934 u32 l234info;
1935 int length;
1936 int bnum;
1937
1938 desc = &ring->desc[ring->next_to_clean];
1939 desc_cb = &ring->desc_cb[ring->next_to_clean];
1940
1941 prefetch(desc);
1942
1943 length = le16_to_cpu(desc->rx.pkt_len);
1944 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1945 l234info = le32_to_cpu(desc->rx.l234_info);
1946
1947 /* Check valid BD */
1948 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1949 return -EFAULT;
1950
1951 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1952
1953 /* Prefetch first cache line of first page
1954 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1955 * line size is 64B so need to prefetch twice to make it 128B. But in
1956 * actual we can have greater size of caches with 128B Level 1 cache
1957 * lines. In such a case, single fetch would suffice to cache in the
1958 * relevant part of the header.
1959 */
1960 prefetch(va);
1961#if L1_CACHE_BYTES < 128
1962 prefetch(va + L1_CACHE_BYTES);
1963#endif
1964
1965 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1966 HNS3_RX_HEAD_SIZE);
1967 if (unlikely(!skb)) {
1968 netdev_err(netdev, "alloc rx skb fail\n");
1969
1970 u64_stats_update_begin(&ring->syncp);
1971 ring->stats.sw_err_cnt++;
1972 u64_stats_update_end(&ring->syncp);
1973
1974 return -ENOMEM;
1975 }
1976
1977 prefetchw(skb->data);
1978
1979 bnum = 1;
1980 if (length <= HNS3_RX_HEAD_SIZE) {
1981 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
1982
1983 /* We can reuse buffer as-is, just make sure it is local */
1984 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
1985 desc_cb->reuse_flag = 1;
1986 else /* This page cannot be reused so discard it */
1987 put_page(desc_cb->priv);
1988
1989 ring_ptr_move_fw(ring, next_to_clean);
1990 } else {
1991 u64_stats_update_begin(&ring->syncp);
1992 ring->stats.seg_pkt_cnt++;
1993 u64_stats_update_end(&ring->syncp);
1994
1995 pull_len = hns3_nic_get_headlen(va, l234info,
1996 HNS3_RX_HEAD_SIZE);
1997 memcpy(__skb_put(skb, pull_len), va,
1998 ALIGN(pull_len, sizeof(long)));
1999
2000 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2001 ring_ptr_move_fw(ring, next_to_clean);
2002
2003 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2004 desc = &ring->desc[ring->next_to_clean];
2005 desc_cb = &ring->desc_cb[ring->next_to_clean];
2006 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2007 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2008 ring_ptr_move_fw(ring, next_to_clean);
2009 bnum++;
2010 }
2011 }
2012
2013 *out_bnum = bnum;
2014
2015 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2016 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2017 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2018 u64_stats_update_begin(&ring->syncp);
2019 ring->stats.non_vld_descs++;
2020 u64_stats_update_end(&ring->syncp);
2021
2022 dev_kfree_skb_any(skb);
2023 return -EINVAL;
2024 }
2025
2026 if (unlikely((!desc->rx.pkt_len) ||
2027 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2028 netdev_err(netdev, "truncated pkt\n");
2029 u64_stats_update_begin(&ring->syncp);
2030 ring->stats.err_pkt_len++;
2031 u64_stats_update_end(&ring->syncp);
2032
2033 dev_kfree_skb_any(skb);
2034 return -EFAULT;
2035 }
2036
2037 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2038 netdev_err(netdev, "L2 error pkt\n");
2039 u64_stats_update_begin(&ring->syncp);
2040 ring->stats.l2_err++;
2041 u64_stats_update_end(&ring->syncp);
2042
2043 dev_kfree_skb_any(skb);
2044 return -EFAULT;
2045 }
2046
2047 u64_stats_update_begin(&ring->syncp);
2048 ring->stats.rx_pkts++;
2049 ring->stats.rx_bytes += skb->len;
2050 u64_stats_update_end(&ring->syncp);
2051
2052 ring->tqp_vector->rx_group.total_bytes += skb->len;
2053
2054 hns3_rx_checksum(ring, skb, desc);
2055 return 0;
2056}
2057
2058static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2059{
2060#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2061 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2062 int recv_pkts, recv_bds, clean_count, err;
2063 int unused_count = hns3_desc_unused(ring);
2064 struct sk_buff *skb = NULL;
2065 int num, bnum = 0;
2066
2067 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2068 rmb(); /* Make sure num taken effect before the other data is touched */
2069
2070 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2071 num -= unused_count;
2072
2073 while (recv_pkts < budget && recv_bds < num) {
2074 /* Reuse or realloc buffers */
2075 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2076 hns3_nic_alloc_rx_buffers(ring,
2077 clean_count + unused_count);
2078 clean_count = 0;
2079 unused_count = hns3_desc_unused(ring);
2080 }
2081
2082 /* Poll one pkt */
2083 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2084 if (unlikely(!skb)) /* This fault cannot be repaired */
2085 goto out;
2086
2087 recv_bds += bnum;
2088 clean_count += bnum;
2089 if (unlikely(err)) { /* Do jump the err */
2090 recv_pkts++;
2091 continue;
2092 }
2093
2094 /* Do update ip stack process */
2095 skb->protocol = eth_type_trans(skb, netdev);
2096 (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2097
2098 recv_pkts++;
2099 }
2100
2101out:
2102 /* Make all data has been write before submit */
2103 if (clean_count + unused_count > 0)
2104 hns3_nic_alloc_rx_buffers(ring,
2105 clean_count + unused_count);
2106
2107 return recv_pkts;
2108}
2109
2110static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2111{
2112#define HNS3_RX_ULTRA_PACKET_RATE 40000
2113 enum hns3_flow_level_range new_flow_level;
2114 struct hns3_enet_tqp_vector *tqp_vector;
2115 int packets_per_secs;
2116 int bytes_per_usecs;
2117 u16 new_int_gl;
2118 int usecs;
2119
2120 if (!ring_group->int_gl)
2121 return false;
2122
2123 if (ring_group->total_packets == 0) {
2124 ring_group->int_gl = HNS3_INT_GL_50K;
2125 ring_group->flow_level = HNS3_FLOW_LOW;
2126 return true;
2127 }
2128
2129 /* Simple throttlerate management
2130 * 0-10MB/s lower (50000 ints/s)
2131 * 10-20MB/s middle (20000 ints/s)
2132 * 20-1249MB/s high (18000 ints/s)
2133 * > 40000pps ultra (8000 ints/s)
2134 */
2135 new_flow_level = ring_group->flow_level;
2136 new_int_gl = ring_group->int_gl;
2137 tqp_vector = ring_group->ring->tqp_vector;
2138 usecs = (ring_group->int_gl << 1);
2139 bytes_per_usecs = ring_group->total_bytes / usecs;
2140 /* 1000000 microseconds */
2141 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2142
2143 switch (new_flow_level) {
2144 case HNS3_FLOW_LOW:
2145 if (bytes_per_usecs > 10)
2146 new_flow_level = HNS3_FLOW_MID;
2147 break;
2148 case HNS3_FLOW_MID:
2149 if (bytes_per_usecs > 20)
2150 new_flow_level = HNS3_FLOW_HIGH;
2151 else if (bytes_per_usecs <= 10)
2152 new_flow_level = HNS3_FLOW_LOW;
2153 break;
2154 case HNS3_FLOW_HIGH:
2155 case HNS3_FLOW_ULTRA:
2156 default:
2157 if (bytes_per_usecs <= 20)
2158 new_flow_level = HNS3_FLOW_MID;
2159 break;
2160 }
2161
2162 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2163 (&tqp_vector->rx_group == ring_group))
2164 new_flow_level = HNS3_FLOW_ULTRA;
2165
2166 switch (new_flow_level) {
2167 case HNS3_FLOW_LOW:
2168 new_int_gl = HNS3_INT_GL_50K;
2169 break;
2170 case HNS3_FLOW_MID:
2171 new_int_gl = HNS3_INT_GL_20K;
2172 break;
2173 case HNS3_FLOW_HIGH:
2174 new_int_gl = HNS3_INT_GL_18K;
2175 break;
2176 case HNS3_FLOW_ULTRA:
2177 new_int_gl = HNS3_INT_GL_8K;
2178 break;
2179 default:
2180 break;
2181 }
2182
2183 ring_group->total_bytes = 0;
2184 ring_group->total_packets = 0;
2185 ring_group->flow_level = new_flow_level;
2186 if (new_int_gl != ring_group->int_gl) {
2187 ring_group->int_gl = new_int_gl;
2188 return true;
2189 }
2190 return false;
2191}
2192
2193static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2194{
2195 u16 rx_int_gl, tx_int_gl;
2196 bool rx, tx;
2197
2198 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2199 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2200 rx_int_gl = tqp_vector->rx_group.int_gl;
2201 tx_int_gl = tqp_vector->tx_group.int_gl;
2202 if (rx && tx) {
2203 if (rx_int_gl > tx_int_gl) {
2204 tqp_vector->tx_group.int_gl = rx_int_gl;
2205 tqp_vector->tx_group.flow_level =
2206 tqp_vector->rx_group.flow_level;
2207 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2208 } else {
2209 tqp_vector->rx_group.int_gl = tx_int_gl;
2210 tqp_vector->rx_group.flow_level =
2211 tqp_vector->tx_group.flow_level;
2212 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2213 }
2214 }
2215}
2216
2217static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2218{
2219 struct hns3_enet_ring *ring;
2220 int rx_pkt_total = 0;
2221
2222 struct hns3_enet_tqp_vector *tqp_vector =
2223 container_of(napi, struct hns3_enet_tqp_vector, napi);
2224 bool clean_complete = true;
2225 int rx_budget;
2226
2227 /* Since the actual Tx work is minimal, we can give the Tx a larger
2228 * budget and be more aggressive about cleaning up the Tx descriptors.
2229 */
2230 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2231 if (!hns3_clean_tx_ring(ring, budget))
2232 clean_complete = false;
2233 }
2234
2235 /* make sure rx ring budget not smaller than 1 */
2236 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2237
2238 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2239 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2240
2241 if (rx_cleaned >= rx_budget)
2242 clean_complete = false;
2243
2244 rx_pkt_total += rx_cleaned;
2245 }
2246
2247 tqp_vector->rx_group.total_packets += rx_pkt_total;
2248
2249 if (!clean_complete)
2250 return budget;
2251
2252 napi_complete(napi);
2253 hns3_update_new_int_gl(tqp_vector);
2254 hns3_mask_vector_irq(tqp_vector, 1);
2255
2256 return rx_pkt_total;
2257}
2258
2259static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2260 struct hnae3_ring_chain_node *head)
2261{
2262 struct pci_dev *pdev = tqp_vector->handle->pdev;
2263 struct hnae3_ring_chain_node *cur_chain = head;
2264 struct hnae3_ring_chain_node *chain;
2265 struct hns3_enet_ring *tx_ring;
2266 struct hns3_enet_ring *rx_ring;
2267
2268 tx_ring = tqp_vector->tx_group.ring;
2269 if (tx_ring) {
2270 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2271 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2272 HNAE3_RING_TYPE_TX);
2273
2274 cur_chain->next = NULL;
2275
2276 while (tx_ring->next) {
2277 tx_ring = tx_ring->next;
2278
2279 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2280 GFP_KERNEL);
2281 if (!chain)
2282 return -ENOMEM;
2283
2284 cur_chain->next = chain;
2285 chain->tqp_index = tx_ring->tqp->tqp_index;
2286 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2287 HNAE3_RING_TYPE_TX);
2288
2289 cur_chain = chain;
2290 }
2291 }
2292
2293 rx_ring = tqp_vector->rx_group.ring;
2294 if (!tx_ring && rx_ring) {
2295 cur_chain->next = NULL;
2296 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2297 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2298 HNAE3_RING_TYPE_RX);
2299
2300 rx_ring = rx_ring->next;
2301 }
2302
2303 while (rx_ring) {
2304 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2305 if (!chain)
2306 return -ENOMEM;
2307
2308 cur_chain->next = chain;
2309 chain->tqp_index = rx_ring->tqp->tqp_index;
2310 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2311 HNAE3_RING_TYPE_RX);
2312 cur_chain = chain;
2313
2314 rx_ring = rx_ring->next;
2315 }
2316
2317 return 0;
2318}
2319
2320static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2321 struct hnae3_ring_chain_node *head)
2322{
2323 struct pci_dev *pdev = tqp_vector->handle->pdev;
2324 struct hnae3_ring_chain_node *chain_tmp, *chain;
2325
2326 chain = head->next;
2327
2328 while (chain) {
2329 chain_tmp = chain->next;
2330 devm_kfree(&pdev->dev, chain);
2331 chain = chain_tmp;
2332 }
2333}
2334
2335static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2336 struct hns3_enet_ring *ring)
2337{
2338 ring->next = group->ring;
2339 group->ring = ring;
2340
2341 group->count++;
2342}
2343
2344static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2345{
2346 struct hnae3_ring_chain_node vector_ring_chain;
2347 struct hnae3_handle *h = priv->ae_handle;
2348 struct hns3_enet_tqp_vector *tqp_vector;
2349 struct hnae3_vector_info *vector;
2350 struct pci_dev *pdev = h->pdev;
2351 u16 tqp_num = h->kinfo.num_tqps;
2352 u16 vector_num;
2353 int ret = 0;
2354 u16 i;
2355
2356 /* RSS size, cpu online and vector_num should be the same */
2357 /* Should consider 2p/4p later */
2358 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2359 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2360 GFP_KERNEL);
2361 if (!vector)
2362 return -ENOMEM;
2363
2364 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2365
2366 priv->vector_num = vector_num;
2367 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2368 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2369 GFP_KERNEL);
2370 if (!priv->tqp_vector)
2371 return -ENOMEM;
2372
2373 for (i = 0; i < tqp_num; i++) {
2374 u16 vector_i = i % vector_num;
2375
2376 tqp_vector = &priv->tqp_vector[vector_i];
2377
2378 hns3_add_ring_to_group(&tqp_vector->tx_group,
2379 priv->ring_data[i].ring);
2380
2381 hns3_add_ring_to_group(&tqp_vector->rx_group,
2382 priv->ring_data[i + tqp_num].ring);
2383
2384 tqp_vector->idx = vector_i;
2385 tqp_vector->mask_addr = vector[vector_i].io_addr;
2386 tqp_vector->vector_irq = vector[vector_i].vector;
2387 tqp_vector->num_tqps++;
2388
2389 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2390 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2391 }
2392
2393 for (i = 0; i < vector_num; i++) {
2394 tqp_vector = &priv->tqp_vector[i];
2395
2396 tqp_vector->rx_group.total_bytes = 0;
2397 tqp_vector->rx_group.total_packets = 0;
2398 tqp_vector->tx_group.total_bytes = 0;
2399 tqp_vector->tx_group.total_packets = 0;
2400 hns3_vector_gl_rl_init(tqp_vector);
2401 tqp_vector->handle = h;
2402
2403 ret = hns3_get_vector_ring_chain(tqp_vector,
2404 &vector_ring_chain);
2405 if (ret)
2406 goto out;
2407
2408 ret = h->ae_algo->ops->map_ring_to_vector(h,
2409 tqp_vector->vector_irq, &vector_ring_chain);
2410 if (ret)
2411 goto out;
2412
2413 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2414
2415 netif_napi_add(priv->netdev, &tqp_vector->napi,
2416 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2417 }
2418
2419out:
2420 devm_kfree(&pdev->dev, vector);
2421 return ret;
2422}
2423
2424static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2425{
2426 struct hnae3_ring_chain_node vector_ring_chain;
2427 struct hnae3_handle *h = priv->ae_handle;
2428 struct hns3_enet_tqp_vector *tqp_vector;
2429 struct pci_dev *pdev = h->pdev;
2430 int i, ret;
2431
2432 for (i = 0; i < priv->vector_num; i++) {
2433 tqp_vector = &priv->tqp_vector[i];
2434
2435 ret = hns3_get_vector_ring_chain(tqp_vector,
2436 &vector_ring_chain);
2437 if (ret)
2438 return ret;
2439
2440 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2441 tqp_vector->vector_irq, &vector_ring_chain);
2442 if (ret)
2443 return ret;
2444
2445 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2446
2447 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2448 (void)irq_set_affinity_hint(
2449 priv->tqp_vector[i].vector_irq,
2450 NULL);
2451 devm_free_irq(&pdev->dev,
2452 priv->tqp_vector[i].vector_irq,
2453 &priv->tqp_vector[i]);
2454 }
2455
2456 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2457
2458 netif_napi_del(&priv->tqp_vector[i].napi);
2459 }
2460
2461 devm_kfree(&pdev->dev, priv->tqp_vector);
2462
2463 return 0;
2464}
2465
2466static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2467 int ring_type)
2468{
2469 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2470 int queue_num = priv->ae_handle->kinfo.num_tqps;
2471 struct pci_dev *pdev = priv->ae_handle->pdev;
2472 struct hns3_enet_ring *ring;
2473
2474 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2475 if (!ring)
2476 return -ENOMEM;
2477
2478 if (ring_type == HNAE3_RING_TYPE_TX) {
2479 ring_data[q->tqp_index].ring = ring;
2480 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2481 } else {
2482 ring_data[q->tqp_index + queue_num].ring = ring;
2483 ring->io_base = q->io_base;
2484 }
2485
2486 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2487
2488 ring_data[q->tqp_index].queue_index = q->tqp_index;
2489
2490 ring->tqp = q;
2491 ring->desc = NULL;
2492 ring->desc_cb = NULL;
2493 ring->dev = priv->dev;
2494 ring->desc_dma_addr = 0;
2495 ring->buf_size = q->buf_size;
2496 ring->desc_num = q->desc_num;
2497 ring->next_to_use = 0;
2498 ring->next_to_clean = 0;
2499
2500 return 0;
2501}
2502
2503static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2504 struct hns3_nic_priv *priv)
2505{
2506 int ret;
2507
2508 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2509 if (ret)
2510 return ret;
2511
2512 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2513 if (ret)
2514 return ret;
2515
2516 return 0;
2517}
2518
2519static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2520{
2521 struct hnae3_handle *h = priv->ae_handle;
2522 struct pci_dev *pdev = h->pdev;
2523 int i, ret;
2524
2525 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2526 sizeof(*priv->ring_data) * 2,
2527 GFP_KERNEL);
2528 if (!priv->ring_data)
2529 return -ENOMEM;
2530
2531 for (i = 0; i < h->kinfo.num_tqps; i++) {
2532 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2533 if (ret)
2534 goto err;
2535 }
2536
2537 return 0;
2538err:
2539 devm_kfree(&pdev->dev, priv->ring_data);
2540 return ret;
2541}
2542
2543static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2544{
2545 int ret;
2546
2547 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2548 return -EINVAL;
2549
2550 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2551 GFP_KERNEL);
2552 if (!ring->desc_cb) {
2553 ret = -ENOMEM;
2554 goto out;
2555 }
2556
2557 ret = hns3_alloc_desc(ring);
2558 if (ret)
2559 goto out_with_desc_cb;
2560
2561 if (!HNAE3_IS_TX_RING(ring)) {
2562 ret = hns3_alloc_ring_buffers(ring);
2563 if (ret)
2564 goto out_with_desc;
2565 }
2566
2567 return 0;
2568
2569out_with_desc:
2570 hns3_free_desc(ring);
2571out_with_desc_cb:
2572 kfree(ring->desc_cb);
2573 ring->desc_cb = NULL;
2574out:
2575 return ret;
2576}
2577
2578static void hns3_fini_ring(struct hns3_enet_ring *ring)
2579{
2580 hns3_free_desc(ring);
2581 kfree(ring->desc_cb);
2582 ring->desc_cb = NULL;
2583 ring->next_to_clean = 0;
2584 ring->next_to_use = 0;
2585}
2586
1db9b1bf 2587static int hns3_buf_size2type(u32 buf_size)
76ad4f0e
S
2588{
2589 int bd_size_type;
2590
2591 switch (buf_size) {
2592 case 512:
2593 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2594 break;
2595 case 1024:
2596 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2597 break;
2598 case 2048:
2599 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2600 break;
2601 case 4096:
2602 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2603 break;
2604 default:
2605 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2606 }
2607
2608 return bd_size_type;
2609}
2610
2611static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2612{
2613 dma_addr_t dma = ring->desc_dma_addr;
2614 struct hnae3_queue *q = ring->tqp;
2615
2616 if (!HNAE3_IS_TX_RING(ring)) {
2617 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2618 (u32)dma);
2619 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2620 (u32)((dma >> 31) >> 1));
2621
2622 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2623 hns3_buf_size2type(ring->buf_size));
2624 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2625 ring->desc_num / 8 - 1);
2626
2627 } else {
2628 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2629 (u32)dma);
2630 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2631 (u32)((dma >> 31) >> 1));
2632
2633 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2634 hns3_buf_size2type(ring->buf_size));
2635 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2636 ring->desc_num / 8 - 1);
2637 }
2638}
2639
2640static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2641{
2642 struct hnae3_handle *h = priv->ae_handle;
2643 int ring_num = h->kinfo.num_tqps * 2;
2644 int i, j;
2645 int ret;
2646
2647 for (i = 0; i < ring_num; i++) {
2648 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2649 if (ret) {
2650 dev_err(priv->dev,
2651 "Alloc ring memory fail! ret=%d\n", ret);
2652 goto out_when_alloc_ring_memory;
2653 }
2654
2655 hns3_init_ring_hw(priv->ring_data[i].ring);
2656
2657 u64_stats_init(&priv->ring_data[i].ring->syncp);
2658 }
2659
2660 return 0;
2661
2662out_when_alloc_ring_memory:
2663 for (j = i - 1; j >= 0; j--)
2664 hns3_fini_ring(priv->ring_data[i].ring);
2665
2666 return -ENOMEM;
2667}
2668
2669static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2670{
2671 struct hnae3_handle *h = priv->ae_handle;
2672 int i;
2673
2674 for (i = 0; i < h->kinfo.num_tqps; i++) {
2675 if (h->ae_algo->ops->reset_queue)
2676 h->ae_algo->ops->reset_queue(h, i);
2677
2678 hns3_fini_ring(priv->ring_data[i].ring);
2679 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2680 }
2681
2682 return 0;
2683}
2684
2685/* Set mac addr if it is configured. or leave it to the AE driver */
2686static void hns3_init_mac_addr(struct net_device *netdev)
2687{
2688 struct hns3_nic_priv *priv = netdev_priv(netdev);
2689 struct hnae3_handle *h = priv->ae_handle;
2690 u8 mac_addr_temp[ETH_ALEN];
2691
2692 if (h->ae_algo->ops->get_mac_addr) {
2693 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2694 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2695 }
2696
2697 /* Check if the MAC address is valid, if not get a random one */
2698 if (!is_valid_ether_addr(netdev->dev_addr)) {
2699 eth_hw_addr_random(netdev);
2700 dev_warn(priv->dev, "using random MAC address %pM\n",
2701 netdev->dev_addr);
76ad4f0e 2702 }
139e8792
L
2703
2704 if (h->ae_algo->ops->set_mac_addr)
2705 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2706
76ad4f0e
S
2707}
2708
2709static void hns3_nic_set_priv_ops(struct net_device *netdev)
2710{
2711 struct hns3_nic_priv *priv = netdev_priv(netdev);
2712
2713 if ((netdev->features & NETIF_F_TSO) ||
2714 (netdev->features & NETIF_F_TSO6)) {
2715 priv->ops.fill_desc = hns3_fill_desc_tso;
2716 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2717 } else {
2718 priv->ops.fill_desc = hns3_fill_desc;
2719 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2720 }
2721}
2722
2723static int hns3_client_init(struct hnae3_handle *handle)
2724{
2725 struct pci_dev *pdev = handle->pdev;
2726 struct hns3_nic_priv *priv;
2727 struct net_device *netdev;
2728 int ret;
2729
2730 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2731 handle->kinfo.num_tqps);
2732 if (!netdev)
2733 return -ENOMEM;
2734
2735 priv = netdev_priv(netdev);
2736 priv->dev = &pdev->dev;
2737 priv->netdev = netdev;
2738 priv->ae_handle = handle;
2739
2740 handle->kinfo.netdev = netdev;
2741 handle->priv = (void *)priv;
2742
2743 hns3_init_mac_addr(netdev);
2744
2745 hns3_set_default_feature(netdev);
2746
2747 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2748 netdev->priv_flags |= IFF_UNICAST_FLT;
2749 netdev->netdev_ops = &hns3_nic_netdev_ops;
2750 SET_NETDEV_DEV(netdev, &pdev->dev);
2751 hns3_ethtool_set_ops(netdev);
2752 hns3_nic_set_priv_ops(netdev);
2753
2754 /* Carrier off reporting is important to ethtool even BEFORE open */
2755 netif_carrier_off(netdev);
2756
2757 ret = hns3_get_ring_config(priv);
2758 if (ret) {
2759 ret = -ENOMEM;
2760 goto out_get_ring_cfg;
2761 }
2762
2763 ret = hns3_nic_init_vector_data(priv);
2764 if (ret) {
2765 ret = -ENOMEM;
2766 goto out_init_vector_data;
2767 }
2768
2769 ret = hns3_init_all_ring(priv);
2770 if (ret) {
2771 ret = -ENOMEM;
2772 goto out_init_ring_data;
2773 }
2774
2775 ret = register_netdev(netdev);
2776 if (ret) {
2777 dev_err(priv->dev, "probe register netdev fail!\n");
2778 goto out_reg_netdev_fail;
2779 }
2780
986743db
YL
2781 hns3_dcbnl_setup(handle);
2782
a8e8b7ff
S
2783 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2784 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2785
76ad4f0e
S
2786 return ret;
2787
2788out_reg_netdev_fail:
2789out_init_ring_data:
2790 (void)hns3_nic_uninit_vector_data(priv);
2791 priv->ring_data = NULL;
2792out_init_vector_data:
2793out_get_ring_cfg:
2794 priv->ae_handle = NULL;
2795 free_netdev(netdev);
2796 return ret;
2797}
2798
2799static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2800{
2801 struct net_device *netdev = handle->kinfo.netdev;
2802 struct hns3_nic_priv *priv = netdev_priv(netdev);
2803 int ret;
2804
2805 if (netdev->reg_state != NETREG_UNINITIALIZED)
2806 unregister_netdev(netdev);
2807
2808 ret = hns3_nic_uninit_vector_data(priv);
2809 if (ret)
2810 netdev_err(netdev, "uninit vector error\n");
2811
2812 ret = hns3_uninit_all_ring(priv);
2813 if (ret)
2814 netdev_err(netdev, "uninit ring error\n");
2815
2816 priv->ring_data = NULL;
2817
2818 free_netdev(netdev);
2819}
2820
2821static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2822{
2823 struct net_device *netdev = handle->kinfo.netdev;
2824
2825 if (!netdev)
2826 return;
2827
2828 if (linkup) {
2829 netif_carrier_on(netdev);
2830 netif_tx_wake_all_queues(netdev);
2831 netdev_info(netdev, "link up\n");
2832 } else {
2833 netif_carrier_off(netdev);
2834 netif_tx_stop_all_queues(netdev);
2835 netdev_info(netdev, "link down\n");
2836 }
2837}
2838
9df8f79a
YL
2839static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
2840{
2841 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
2842 struct net_device *ndev = kinfo->netdev;
075cfdd6 2843 bool if_running;
9df8f79a
YL
2844 int ret;
2845 u8 i;
2846
2847 if (tc > HNAE3_MAX_TC)
2848 return -EINVAL;
2849
2850 if (!ndev)
2851 return -ENODEV;
2852
075cfdd6
CIK
2853 if_running = netif_running(ndev);
2854
9df8f79a
YL
2855 ret = netdev_set_num_tc(ndev, tc);
2856 if (ret)
2857 return ret;
2858
2859 if (if_running) {
2860 (void)hns3_nic_net_stop(ndev);
2861 msleep(100);
2862 }
2863
2864 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
2865 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
2866 if (ret)
2867 goto err_out;
2868
2869 if (tc <= 1) {
2870 netdev_reset_tc(ndev);
2871 goto out;
2872 }
2873
2874 for (i = 0; i < HNAE3_MAX_TC; i++) {
2875 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
2876
2877 if (tc_info->enable)
2878 netdev_set_tc_queue(ndev,
2879 tc_info->tc,
2880 tc_info->tqp_count,
2881 tc_info->tqp_offset);
2882 }
2883
2884 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
2885 netdev_set_prio_tc_map(ndev, i,
2886 kinfo->prio_tc[i]);
2887 }
2888
2889out:
2890 ret = hns3_nic_set_real_num_queue(ndev);
2891
2892err_out:
2893 if (if_running)
2894 (void)hns3_nic_net_open(ndev);
2895
2896 return ret;
2897}
2898
1db9b1bf 2899static const struct hnae3_client_ops client_ops = {
76ad4f0e
S
2900 .init_instance = hns3_client_init,
2901 .uninit_instance = hns3_client_uninit,
2902 .link_status_change = hns3_link_status_change,
9df8f79a 2903 .setup_tc = hns3_client_setup_tc,
76ad4f0e
S
2904};
2905
2906/* hns3_init_module - Driver registration routine
2907 * hns3_init_module is the first routine called when the driver is
2908 * loaded. All it does is register with the PCI subsystem.
2909 */
2910static int __init hns3_init_module(void)
2911{
2912 int ret;
2913
2914 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2915 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2916
2917 client.type = HNAE3_CLIENT_KNIC;
2918 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2919 hns3_driver_name);
2920
2921 client.ops = &client_ops;
2922
2923 ret = hnae3_register_client(&client);
2924 if (ret)
2925 return ret;
2926
2927 ret = pci_register_driver(&hns3_driver);
2928 if (ret)
2929 hnae3_unregister_client(&client);
2930
2931 return ret;
2932}
2933module_init(hns3_init_module);
2934
2935/* hns3_exit_module - Driver exit cleanup routine
2936 * hns3_exit_module is called just before the driver is removed
2937 * from memory.
2938 */
2939static void __exit hns3_exit_module(void)
2940{
2941 pci_unregister_driver(&hns3_driver);
2942 hnae3_unregister_client(&client);
2943}
2944module_exit(hns3_exit_module);
2945
2946MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2947MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2948MODULE_LICENSE("GPL");
2949MODULE_ALIAS("pci:hns-nic");