]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
net: hns3: remove redundant memset when alloc buffer
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hns3_enet.c
CommitLineData
76ad4f0e
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
30d240df 22#include <net/pkt_cls.h>
76ad4f0e
S
23#include <net/vxlan.h>
24
25#include "hnae3.h"
26#include "hns3_enet.h"
27
1db9b1bf 28static const char hns3_driver_name[] = "hns3";
76ad4f0e
S
29const char hns3_driver_version[] = VERMAGIC_STRING;
30static const char hns3_driver_string[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
33static struct hnae3_client client;
34
35/* hns3_pci_tbl - PCI Device ID Table
36 *
37 * Last entry must be all 0s
38 *
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
41 */
42static const struct pci_device_id hns3_pci_tbl[] = {
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
e92a0843 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
2daf4a65 46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
2daf4a65 48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
2daf4a65 50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
2daf4a65 52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
2daf4a65 54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
76ad4f0e
S
55 /* required last entry */
56 {0, }
57};
58MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
59
60static irqreturn_t hns3_irq_handle(int irq, void *dev)
61{
62 struct hns3_enet_tqp_vector *tqp_vector = dev;
63
64 napi_schedule(&tqp_vector->napi);
65
66 return IRQ_HANDLED;
67}
68
69static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
70{
71 struct hns3_enet_tqp_vector *tqp_vectors;
72 unsigned int i;
73
74 for (i = 0; i < priv->vector_num; i++) {
75 tqp_vectors = &priv->tqp_vector[i];
76
77 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
78 continue;
79
80 /* release the irq resource */
81 free_irq(tqp_vectors->vector_irq, tqp_vectors);
82 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
83 }
84}
85
86static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
87{
88 struct hns3_enet_tqp_vector *tqp_vectors;
89 int txrx_int_idx = 0;
90 int rx_int_idx = 0;
91 int tx_int_idx = 0;
92 unsigned int i;
93 int ret;
94
95 for (i = 0; i < priv->vector_num; i++) {
96 tqp_vectors = &priv->tqp_vector[i];
97
98 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
99 continue;
100
101 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
102 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
103 "%s-%s-%d", priv->netdev->name, "TxRx",
104 txrx_int_idx++);
105 txrx_int_idx++;
106 } else if (tqp_vectors->rx_group.ring) {
107 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
108 "%s-%s-%d", priv->netdev->name, "Rx",
109 rx_int_idx++);
110 } else if (tqp_vectors->tx_group.ring) {
111 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
112 "%s-%s-%d", priv->netdev->name, "Tx",
113 tx_int_idx++);
114 } else {
115 /* Skip this unused q_vector */
116 continue;
117 }
118
119 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
120
121 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
122 tqp_vectors->name,
123 tqp_vectors);
124 if (ret) {
125 netdev_err(priv->netdev, "request irq(%d) fail\n",
126 tqp_vectors->vector_irq);
127 return ret;
128 }
129
130 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
131 }
132
133 return 0;
134}
135
136static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
137 u32 mask_en)
138{
139 writel(mask_en, tqp_vector->mask_addr);
140}
141
142static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
143{
144 napi_enable(&tqp_vector->napi);
145
146 /* enable vector */
147 hns3_mask_vector_irq(tqp_vector, 1);
148}
149
150static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
151{
152 /* disable vector */
153 hns3_mask_vector_irq(tqp_vector, 0);
154
155 disable_irq(tqp_vector->vector_irq);
156 napi_disable(&tqp_vector->napi);
157}
158
159static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
160 u32 gl_value)
161{
162 /* this defines the configuration for GL (Interrupt Gap Limiter)
163 * GL defines inter interrupt gap.
164 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
165 */
166 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
167 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
168 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
169}
170
171static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
172 u32 rl_value)
173{
174 /* this defines the configuration for RL (Interrupt Rate Limiter).
175 * Rl defines rate of interrupts i.e. number of interrupts-per-second
176 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
177 */
178 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
179}
180
181static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
182{
183 /* initialize the configuration for interrupt coalescing.
184 * 1. GL (Interrupt Gap Limiter)
185 * 2. RL (Interrupt Rate Limiter)
186 */
187
188 /* Default :enable interrupt coalesce */
189 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
190 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
191 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
192 /* for now we are disabling Interrupt RL - we
193 * will re-enable later
194 */
195 hns3_set_vector_coalesc_rl(tqp_vector, 0);
196 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
197 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
198}
199
9df8f79a
YL
200static int hns3_nic_set_real_num_queue(struct net_device *netdev)
201{
9780cb97 202 struct hnae3_handle *h = hns3_get_handle(netdev);
9df8f79a
YL
203 struct hnae3_knic_private_info *kinfo = &h->kinfo;
204 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
205 int ret;
206
207 ret = netif_set_real_num_tx_queues(netdev, queue_size);
208 if (ret) {
209 netdev_err(netdev,
210 "netif_set_real_num_tx_queues fail, ret=%d!\n",
211 ret);
212 return ret;
213 }
214
215 ret = netif_set_real_num_rx_queues(netdev, queue_size);
216 if (ret) {
217 netdev_err(netdev,
218 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
219 return ret;
220 }
221
222 return 0;
223}
224
76ad4f0e
S
225static int hns3_nic_net_up(struct net_device *netdev)
226{
227 struct hns3_nic_priv *priv = netdev_priv(netdev);
228 struct hnae3_handle *h = priv->ae_handle;
229 int i, j;
230 int ret;
231
232 /* get irq resource for all vectors */
233 ret = hns3_nic_init_irq(priv);
234 if (ret) {
235 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
236 return ret;
237 }
238
239 /* enable the vectors */
240 for (i = 0; i < priv->vector_num; i++)
241 hns3_vector_enable(&priv->tqp_vector[i]);
242
243 /* start the ae_dev */
244 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
245 if (ret)
246 goto out_start_err;
247
248 return 0;
249
250out_start_err:
251 for (j = i - 1; j >= 0; j--)
252 hns3_vector_disable(&priv->tqp_vector[j]);
253
254 hns3_nic_uninit_irq(priv);
255
256 return ret;
257}
258
259static int hns3_nic_net_open(struct net_device *netdev)
260{
76ad4f0e
S
261 int ret;
262
263 netif_carrier_off(netdev);
264
9df8f79a
YL
265 ret = hns3_nic_set_real_num_queue(netdev);
266 if (ret)
76ad4f0e 267 return ret;
76ad4f0e
S
268
269 ret = hns3_nic_net_up(netdev);
270 if (ret) {
271 netdev_err(netdev,
272 "hns net up fail, ret=%d!\n", ret);
273 return ret;
274 }
275
276 return 0;
277}
278
279static void hns3_nic_net_down(struct net_device *netdev)
280{
281 struct hns3_nic_priv *priv = netdev_priv(netdev);
282 const struct hnae3_ae_ops *ops;
283 int i;
284
285 /* stop ae_dev */
286 ops = priv->ae_handle->ae_algo->ops;
287 if (ops->stop)
288 ops->stop(priv->ae_handle);
289
290 /* disable vectors */
291 for (i = 0; i < priv->vector_num; i++)
292 hns3_vector_disable(&priv->tqp_vector[i]);
293
294 /* free irq resources */
295 hns3_nic_uninit_irq(priv);
296}
297
298static int hns3_nic_net_stop(struct net_device *netdev)
299{
300 netif_tx_stop_all_queues(netdev);
301 netif_carrier_off(netdev);
302
303 hns3_nic_net_down(netdev);
304
305 return 0;
306}
307
76ad4f0e
S
308static int hns3_nic_uc_sync(struct net_device *netdev,
309 const unsigned char *addr)
310{
9780cb97 311 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
312
313 if (h->ae_algo->ops->add_uc_addr)
314 return h->ae_algo->ops->add_uc_addr(h, addr);
315
316 return 0;
317}
318
319static int hns3_nic_uc_unsync(struct net_device *netdev,
320 const unsigned char *addr)
321{
9780cb97 322 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
323
324 if (h->ae_algo->ops->rm_uc_addr)
325 return h->ae_algo->ops->rm_uc_addr(h, addr);
326
327 return 0;
328}
329
330static int hns3_nic_mc_sync(struct net_device *netdev,
331 const unsigned char *addr)
332{
9780cb97 333 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 334
720a8478 335 if (h->ae_algo->ops->add_mc_addr)
76ad4f0e
S
336 return h->ae_algo->ops->add_mc_addr(h, addr);
337
338 return 0;
339}
340
341static int hns3_nic_mc_unsync(struct net_device *netdev,
342 const unsigned char *addr)
343{
9780cb97 344 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 345
720a8478 346 if (h->ae_algo->ops->rm_mc_addr)
76ad4f0e
S
347 return h->ae_algo->ops->rm_mc_addr(h, addr);
348
349 return 0;
350}
351
1db9b1bf 352static void hns3_nic_set_rx_mode(struct net_device *netdev)
76ad4f0e 353{
9780cb97 354 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
355
356 if (h->ae_algo->ops->set_promisc_mode) {
357 if (netdev->flags & IFF_PROMISC)
358 h->ae_algo->ops->set_promisc_mode(h, 1);
359 else
360 h->ae_algo->ops->set_promisc_mode(h, 0);
361 }
362 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
363 netdev_err(netdev, "sync uc address fail\n");
364 if (netdev->flags & IFF_MULTICAST)
365 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
366 netdev_err(netdev, "sync mc address fail\n");
367}
368
369static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
370 u16 *mss, u32 *type_cs_vlan_tso)
371{
372 u32 l4_offset, hdr_len;
373 union l3_hdr_info l3;
374 union l4_hdr_info l4;
375 u32 l4_paylen;
376 int ret;
377
378 if (!skb_is_gso(skb))
379 return 0;
380
381 ret = skb_cow_head(skb, 0);
382 if (ret)
383 return ret;
384
385 l3.hdr = skb_network_header(skb);
386 l4.hdr = skb_transport_header(skb);
387
388 /* Software should clear the IPv4's checksum field when tso is
389 * needed.
390 */
391 if (l3.v4->version == 4)
392 l3.v4->check = 0;
393
394 /* tunnel packet.*/
395 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
396 SKB_GSO_GRE_CSUM |
397 SKB_GSO_UDP_TUNNEL |
398 SKB_GSO_UDP_TUNNEL_CSUM)) {
399 if ((!(skb_shinfo(skb)->gso_type &
400 SKB_GSO_PARTIAL)) &&
401 (skb_shinfo(skb)->gso_type &
402 SKB_GSO_UDP_TUNNEL_CSUM)) {
403 /* Software should clear the udp's checksum
404 * field when tso is needed.
405 */
406 l4.udp->check = 0;
407 }
408 /* reset l3&l4 pointers from outer to inner headers */
409 l3.hdr = skb_inner_network_header(skb);
410 l4.hdr = skb_inner_transport_header(skb);
411
412 /* Software should clear the IPv4's checksum field when
413 * tso is needed.
414 */
415 if (l3.v4->version == 4)
416 l3.v4->check = 0;
417 }
418
419 /* normal or tunnel packet*/
420 l4_offset = l4.hdr - skb->data;
421 hdr_len = (l4.tcp->doff * 4) + l4_offset;
422
423 /* remove payload length from inner pseudo checksum when tso*/
424 l4_paylen = skb->len - l4_offset;
425 csum_replace_by_diff(&l4.tcp->check,
426 (__force __wsum)htonl(l4_paylen));
427
428 /* find the txbd field values */
429 *paylen = skb->len - hdr_len;
430 hnae_set_bit(*type_cs_vlan_tso,
431 HNS3_TXD_TSO_B, 1);
432
433 /* get MSS for TSO */
434 *mss = skb_shinfo(skb)->gso_size;
435
436 return 0;
437}
438
1898d4e4
S
439static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
440 u8 *il4_proto)
76ad4f0e
S
441{
442 union {
443 struct iphdr *v4;
444 struct ipv6hdr *v6;
445 unsigned char *hdr;
446 } l3;
447 unsigned char *l4_hdr;
448 unsigned char *exthdr;
449 u8 l4_proto_tmp;
450 __be16 frag_off;
451
452 /* find outer header point */
453 l3.hdr = skb_network_header(skb);
454 l4_hdr = skb_inner_transport_header(skb);
455
456 if (skb->protocol == htons(ETH_P_IPV6)) {
457 exthdr = l3.hdr + sizeof(*l3.v6);
458 l4_proto_tmp = l3.v6->nexthdr;
459 if (l4_hdr != exthdr)
460 ipv6_skip_exthdr(skb, exthdr - skb->data,
461 &l4_proto_tmp, &frag_off);
462 } else if (skb->protocol == htons(ETH_P_IP)) {
463 l4_proto_tmp = l3.v4->protocol;
1898d4e4
S
464 } else {
465 return -EINVAL;
76ad4f0e
S
466 }
467
468 *ol4_proto = l4_proto_tmp;
469
470 /* tunnel packet */
471 if (!skb->encapsulation) {
472 *il4_proto = 0;
1898d4e4 473 return 0;
76ad4f0e
S
474 }
475
476 /* find inner header point */
477 l3.hdr = skb_inner_network_header(skb);
478 l4_hdr = skb_inner_transport_header(skb);
479
480 if (l3.v6->version == 6) {
481 exthdr = l3.hdr + sizeof(*l3.v6);
482 l4_proto_tmp = l3.v6->nexthdr;
483 if (l4_hdr != exthdr)
484 ipv6_skip_exthdr(skb, exthdr - skb->data,
485 &l4_proto_tmp, &frag_off);
486 } else if (l3.v4->version == 4) {
487 l4_proto_tmp = l3.v4->protocol;
488 }
489
490 *il4_proto = l4_proto_tmp;
1898d4e4
S
491
492 return 0;
76ad4f0e
S
493}
494
495static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
496 u8 il4_proto, u32 *type_cs_vlan_tso,
497 u32 *ol_type_vlan_len_msec)
498{
499 union {
500 struct iphdr *v4;
501 struct ipv6hdr *v6;
502 unsigned char *hdr;
503 } l3;
504 union {
505 struct tcphdr *tcp;
506 struct udphdr *udp;
507 struct gre_base_hdr *gre;
508 unsigned char *hdr;
509 } l4;
510 unsigned char *l2_hdr;
511 u8 l4_proto = ol4_proto;
512 u32 ol2_len;
513 u32 ol3_len;
514 u32 ol4_len;
515 u32 l2_len;
516 u32 l3_len;
517
518 l3.hdr = skb_network_header(skb);
519 l4.hdr = skb_transport_header(skb);
520
521 /* compute L2 header size for normal packet, defined in 2 Bytes */
522 l2_len = l3.hdr - skb->data;
523 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
524 HNS3_TXD_L2LEN_S, l2_len >> 1);
525
526 /* tunnel packet*/
527 if (skb->encapsulation) {
528 /* compute OL2 header size, defined in 2 Bytes */
529 ol2_len = l2_len;
530 hnae_set_field(*ol_type_vlan_len_msec,
531 HNS3_TXD_L2LEN_M,
532 HNS3_TXD_L2LEN_S, ol2_len >> 1);
533
534 /* compute OL3 header size, defined in 4 Bytes */
535 ol3_len = l4.hdr - l3.hdr;
536 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
537 HNS3_TXD_L3LEN_S, ol3_len >> 2);
538
539 /* MAC in UDP, MAC in GRE (0x6558)*/
540 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
541 /* switch MAC header ptr from outer to inner header.*/
542 l2_hdr = skb_inner_mac_header(skb);
543
544 /* compute OL4 header size, defined in 4 Bytes. */
545 ol4_len = l2_hdr - l4.hdr;
546 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
547 HNS3_TXD_L4LEN_S, ol4_len >> 2);
548
549 /* switch IP header ptr from outer to inner header */
550 l3.hdr = skb_inner_network_header(skb);
551
552 /* compute inner l2 header size, defined in 2 Bytes. */
553 l2_len = l3.hdr - l2_hdr;
554 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
555 HNS3_TXD_L2LEN_S, l2_len >> 1);
556 } else {
557 /* skb packet types not supported by hardware,
558 * txbd len fild doesn't be filled.
559 */
560 return;
561 }
562
563 /* switch L4 header pointer from outer to inner */
564 l4.hdr = skb_inner_transport_header(skb);
565
566 l4_proto = il4_proto;
567 }
568
569 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
570 l3_len = l4.hdr - l3.hdr;
571 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
572 HNS3_TXD_L3LEN_S, l3_len >> 2);
573
574 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
575 switch (l4_proto) {
576 case IPPROTO_TCP:
577 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
578 HNS3_TXD_L4LEN_S, l4.tcp->doff);
579 break;
580 case IPPROTO_SCTP:
581 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
582 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
583 break;
584 case IPPROTO_UDP:
585 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
586 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
587 break;
588 default:
589 /* skb packet types not supported by hardware,
590 * txbd len fild doesn't be filled.
591 */
592 return;
593 }
594}
595
596static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
597 u8 il4_proto, u32 *type_cs_vlan_tso,
598 u32 *ol_type_vlan_len_msec)
599{
600 union {
601 struct iphdr *v4;
602 struct ipv6hdr *v6;
603 unsigned char *hdr;
604 } l3;
605 u32 l4_proto = ol4_proto;
606
607 l3.hdr = skb_network_header(skb);
608
609 /* define OL3 type and tunnel type(OL4).*/
610 if (skb->encapsulation) {
611 /* define outer network header type.*/
612 if (skb->protocol == htons(ETH_P_IP)) {
613 if (skb_is_gso(skb))
614 hnae_set_field(*ol_type_vlan_len_msec,
615 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
616 HNS3_OL3T_IPV4_CSUM);
617 else
618 hnae_set_field(*ol_type_vlan_len_msec,
619 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
620 HNS3_OL3T_IPV4_NO_CSUM);
621
622 } else if (skb->protocol == htons(ETH_P_IPV6)) {
623 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
624 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
625 }
626
627 /* define tunnel type(OL4).*/
628 switch (l4_proto) {
629 case IPPROTO_UDP:
630 hnae_set_field(*ol_type_vlan_len_msec,
631 HNS3_TXD_TUNTYPE_M,
632 HNS3_TXD_TUNTYPE_S,
633 HNS3_TUN_MAC_IN_UDP);
634 break;
635 case IPPROTO_GRE:
636 hnae_set_field(*ol_type_vlan_len_msec,
637 HNS3_TXD_TUNTYPE_M,
638 HNS3_TXD_TUNTYPE_S,
639 HNS3_TUN_NVGRE);
640 break;
641 default:
642 /* drop the skb tunnel packet if hardware don't support,
643 * because hardware can't calculate csum when TSO.
644 */
645 if (skb_is_gso(skb))
646 return -EDOM;
647
648 /* the stack computes the IP header already,
649 * driver calculate l4 checksum when not TSO.
650 */
651 skb_checksum_help(skb);
652 return 0;
653 }
654
655 l3.hdr = skb_inner_network_header(skb);
656 l4_proto = il4_proto;
657 }
658
659 if (l3.v4->version == 4) {
660 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
661 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
662
663 /* the stack computes the IP header already, the only time we
664 * need the hardware to recompute it is in the case of TSO.
665 */
666 if (skb_is_gso(skb))
667 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
668
669 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
670 } else if (l3.v6->version == 6) {
671 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
672 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
673 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
674 }
675
676 switch (l4_proto) {
677 case IPPROTO_TCP:
678 hnae_set_field(*type_cs_vlan_tso,
679 HNS3_TXD_L4T_M,
680 HNS3_TXD_L4T_S,
681 HNS3_L4T_TCP);
682 break;
683 case IPPROTO_UDP:
684 hnae_set_field(*type_cs_vlan_tso,
685 HNS3_TXD_L4T_M,
686 HNS3_TXD_L4T_S,
687 HNS3_L4T_UDP);
688 break;
689 case IPPROTO_SCTP:
690 hnae_set_field(*type_cs_vlan_tso,
691 HNS3_TXD_L4T_M,
692 HNS3_TXD_L4T_S,
693 HNS3_L4T_SCTP);
694 break;
695 default:
696 /* drop the skb tunnel packet if hardware don't support,
697 * because hardware can't calculate csum when TSO.
698 */
699 if (skb_is_gso(skb))
700 return -EDOM;
701
702 /* the stack computes the IP header already,
703 * driver calculate l4 checksum when not TSO.
704 */
705 skb_checksum_help(skb);
706 return 0;
707 }
708
709 return 0;
710}
711
712static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
713{
714 /* Config bd buffer end */
715 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
716 HNS3_TXD_BDTYPE_M, 0);
717 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
718 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
719 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
720}
721
722static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
723 int size, dma_addr_t dma, int frag_end,
724 enum hns_desc_type type)
725{
726 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
727 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
728 u32 ol_type_vlan_len_msec = 0;
729 u16 bdtp_fe_sc_vld_ra_ri = 0;
730 u32 type_cs_vlan_tso = 0;
731 struct sk_buff *skb;
732 u32 paylen = 0;
733 u16 mss = 0;
734 __be16 protocol;
735 u8 ol4_proto;
736 u8 il4_proto;
737 int ret;
738
739 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
740 desc_cb->priv = priv;
741 desc_cb->length = size;
742 desc_cb->dma = dma;
743 desc_cb->type = type;
744
745 /* now, fill the descriptor */
746 desc->addr = cpu_to_le64(dma);
747 desc->tx.send_size = cpu_to_le16((u16)size);
748 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
749 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
750
751 if (type == DESC_TYPE_SKB) {
752 skb = (struct sk_buff *)priv;
a90bb9a5 753 paylen = skb->len;
76ad4f0e
S
754
755 if (skb->ip_summed == CHECKSUM_PARTIAL) {
756 skb_reset_mac_len(skb);
757 protocol = skb->protocol;
758
759 /* vlan packet*/
760 if (protocol == htons(ETH_P_8021Q)) {
761 protocol = vlan_get_protocol(skb);
762 skb->protocol = protocol;
763 }
1898d4e4
S
764 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
765 if (ret)
766 return ret;
76ad4f0e
S
767 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
768 &type_cs_vlan_tso,
769 &ol_type_vlan_len_msec);
770 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
771 &type_cs_vlan_tso,
772 &ol_type_vlan_len_msec);
773 if (ret)
774 return ret;
775
776 ret = hns3_set_tso(skb, &paylen, &mss,
777 &type_cs_vlan_tso);
778 if (ret)
779 return ret;
780 }
781
782 /* Set txbd */
783 desc->tx.ol_type_vlan_len_msec =
784 cpu_to_le32(ol_type_vlan_len_msec);
785 desc->tx.type_cs_vlan_tso_len =
786 cpu_to_le32(type_cs_vlan_tso);
a90bb9a5 787 desc->tx.paylen = cpu_to_le32(paylen);
76ad4f0e
S
788 desc->tx.mss = cpu_to_le16(mss);
789 }
790
791 /* move ring pointer to next.*/
792 ring_ptr_move_fw(ring, next_to_use);
793
794 return 0;
795}
796
797static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
798 int size, dma_addr_t dma, int frag_end,
799 enum hns_desc_type type)
800{
801 unsigned int frag_buf_num;
802 unsigned int k;
803 int sizeoflast;
804 int ret;
805
806 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
807 sizeoflast = size % HNS3_MAX_BD_SIZE;
808 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
809
810 /* When the frag size is bigger than hardware, split this frag */
811 for (k = 0; k < frag_buf_num; k++) {
812 ret = hns3_fill_desc(ring, priv,
813 (k == frag_buf_num - 1) ?
814 sizeoflast : HNS3_MAX_BD_SIZE,
815 dma + HNS3_MAX_BD_SIZE * k,
816 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
817 (type == DESC_TYPE_SKB && !k) ?
818 DESC_TYPE_SKB : DESC_TYPE_PAGE);
819 if (ret)
820 return ret;
821 }
822
823 return 0;
824}
825
826static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
827 struct hns3_enet_ring *ring)
828{
829 struct sk_buff *skb = *out_skb;
830 struct skb_frag_struct *frag;
831 int bdnum_for_frag;
832 int frag_num;
833 int buf_num;
834 int size;
835 int i;
836
837 size = skb_headlen(skb);
838 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
839
840 frag_num = skb_shinfo(skb)->nr_frags;
841 for (i = 0; i < frag_num; i++) {
842 frag = &skb_shinfo(skb)->frags[i];
843 size = skb_frag_size(frag);
844 bdnum_for_frag =
845 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
846 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
847 return -ENOMEM;
848
849 buf_num += bdnum_for_frag;
850 }
851
852 if (buf_num > ring_space(ring))
853 return -EBUSY;
854
855 *bnum = buf_num;
856 return 0;
857}
858
859static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
860 struct hns3_enet_ring *ring)
861{
862 struct sk_buff *skb = *out_skb;
863 int buf_num;
864
865 /* No. of segments (plus a header) */
866 buf_num = skb_shinfo(skb)->nr_frags + 1;
867
868 if (buf_num > ring_space(ring))
869 return -EBUSY;
870
871 *bnum = buf_num;
872
873 return 0;
874}
875
876static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
877{
878 struct device *dev = ring_to_dev(ring);
879 unsigned int i;
880
881 for (i = 0; i < ring->desc_num; i++) {
882 /* check if this is where we started */
883 if (ring->next_to_use == next_to_use_orig)
884 break;
885
886 /* unmap the descriptor dma address */
887 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
888 dma_unmap_single(dev,
889 ring->desc_cb[ring->next_to_use].dma,
890 ring->desc_cb[ring->next_to_use].length,
891 DMA_TO_DEVICE);
892 else
893 dma_unmap_page(dev,
894 ring->desc_cb[ring->next_to_use].dma,
895 ring->desc_cb[ring->next_to_use].length,
896 DMA_TO_DEVICE);
897
898 /* rollback one */
899 ring_ptr_move_bw(ring, next_to_use);
900 }
901}
902
d43e5aca 903netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
76ad4f0e
S
904{
905 struct hns3_nic_priv *priv = netdev_priv(netdev);
906 struct hns3_nic_ring_data *ring_data =
907 &tx_ring_data(priv, skb->queue_mapping);
908 struct hns3_enet_ring *ring = ring_data->ring;
909 struct device *dev = priv->dev;
910 struct netdev_queue *dev_queue;
911 struct skb_frag_struct *frag;
912 int next_to_use_head;
913 int next_to_use_frag;
914 dma_addr_t dma;
915 int buf_num;
916 int seg_num;
917 int size;
918 int ret;
919 int i;
920
921 /* Prefetch the data used later */
922 prefetch(skb->data);
923
924 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
925 case -EBUSY:
926 u64_stats_update_begin(&ring->syncp);
927 ring->stats.tx_busy++;
928 u64_stats_update_end(&ring->syncp);
929
930 goto out_net_tx_busy;
931 case -ENOMEM:
932 u64_stats_update_begin(&ring->syncp);
933 ring->stats.sw_err_cnt++;
934 u64_stats_update_end(&ring->syncp);
935 netdev_err(netdev, "no memory to xmit!\n");
936
937 goto out_err_tx_ok;
938 default:
939 break;
940 }
941
942 /* No. of segments (plus a header) */
943 seg_num = skb_shinfo(skb)->nr_frags + 1;
944 /* Fill the first part */
945 size = skb_headlen(skb);
946
947 next_to_use_head = ring->next_to_use;
948
949 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
950 if (dma_mapping_error(dev, dma)) {
951 netdev_err(netdev, "TX head DMA map failed\n");
952 ring->stats.sw_err_cnt++;
953 goto out_err_tx_ok;
954 }
955
956 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
957 DESC_TYPE_SKB);
958 if (ret)
959 goto head_dma_map_err;
960
961 next_to_use_frag = ring->next_to_use;
962 /* Fill the fragments */
963 for (i = 1; i < seg_num; i++) {
964 frag = &skb_shinfo(skb)->frags[i - 1];
965 size = skb_frag_size(frag);
966 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
967 if (dma_mapping_error(dev, dma)) {
968 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
969 ring->stats.sw_err_cnt++;
970 goto frag_dma_map_err;
971 }
972 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
973 seg_num - 1 == i ? 1 : 0,
974 DESC_TYPE_PAGE);
975
976 if (ret)
977 goto frag_dma_map_err;
978 }
979
980 /* Complete translate all packets */
981 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
982 netdev_tx_sent_queue(dev_queue, skb->len);
983
984 wmb(); /* Commit all data before submit */
985
986 hnae_queue_xmit(ring->tqp, buf_num);
987
988 return NETDEV_TX_OK;
989
990frag_dma_map_err:
991 hns_nic_dma_unmap(ring, next_to_use_frag);
992
993head_dma_map_err:
994 hns_nic_dma_unmap(ring, next_to_use_head);
995
996out_err_tx_ok:
997 dev_kfree_skb_any(skb);
998 return NETDEV_TX_OK;
999
1000out_net_tx_busy:
1001 netif_stop_subqueue(netdev, ring_data->queue_index);
1002 smp_mb(); /* Commit all data before submit */
1003
1004 return NETDEV_TX_BUSY;
1005}
1006
1007static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1008{
9780cb97 1009 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1010 struct sockaddr *mac_addr = p;
1011 int ret;
1012
1013 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1014 return -EADDRNOTAVAIL;
1015
1016 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1017 if (ret) {
1018 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1019 return ret;
1020 }
1021
1022 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1023
1024 return 0;
1025}
1026
1027static int hns3_nic_set_features(struct net_device *netdev,
1028 netdev_features_t features)
1029{
1030 struct hns3_nic_priv *priv = netdev_priv(netdev);
1031
1032 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1033 priv->ops.fill_desc = hns3_fill_desc_tso;
1034 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1035 } else {
1036 priv->ops.fill_desc = hns3_fill_desc;
1037 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1038 }
1039
1040 netdev->features = features;
1041 return 0;
1042}
1043
1044static void
1045hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1046{
1047 struct hns3_nic_priv *priv = netdev_priv(netdev);
1048 int queue_num = priv->ae_handle->kinfo.num_tqps;
1049 struct hns3_enet_ring *ring;
1050 unsigned int start;
1051 unsigned int idx;
1052 u64 tx_bytes = 0;
1053 u64 rx_bytes = 0;
1054 u64 tx_pkts = 0;
1055 u64 rx_pkts = 0;
1056
1057 for (idx = 0; idx < queue_num; idx++) {
1058 /* fetch the tx stats */
1059 ring = priv->ring_data[idx].ring;
1060 do {
d36d36ce 1061 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1062 tx_bytes += ring->stats.tx_bytes;
1063 tx_pkts += ring->stats.tx_pkts;
1064 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1065
1066 /* fetch the rx stats */
1067 ring = priv->ring_data[idx + queue_num].ring;
1068 do {
d36d36ce 1069 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1070 rx_bytes += ring->stats.rx_bytes;
1071 rx_pkts += ring->stats.rx_pkts;
1072 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1073 }
1074
1075 stats->tx_bytes = tx_bytes;
1076 stats->tx_packets = tx_pkts;
1077 stats->rx_bytes = rx_bytes;
1078 stats->rx_packets = rx_pkts;
1079
1080 stats->rx_errors = netdev->stats.rx_errors;
1081 stats->multicast = netdev->stats.multicast;
1082 stats->rx_length_errors = netdev->stats.rx_length_errors;
1083 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1084 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1085
1086 stats->tx_errors = netdev->stats.tx_errors;
1087 stats->rx_dropped = netdev->stats.rx_dropped;
1088 stats->tx_dropped = netdev->stats.tx_dropped;
1089 stats->collisions = netdev->stats.collisions;
1090 stats->rx_over_errors = netdev->stats.rx_over_errors;
1091 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1092 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1093 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1094 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1095 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1096 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1097 stats->tx_window_errors = netdev->stats.tx_window_errors;
1098 stats->rx_compressed = netdev->stats.rx_compressed;
1099 stats->tx_compressed = netdev->stats.tx_compressed;
1100}
1101
1102static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1103 enum hns3_udp_tnl_type type)
1104{
1105 struct hns3_nic_priv *priv = netdev_priv(netdev);
1106 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1107 struct hnae3_handle *h = priv->ae_handle;
1108
1109 if (udp_tnl->used && udp_tnl->dst_port == port) {
1110 udp_tnl->used++;
1111 return;
1112 }
1113
1114 if (udp_tnl->used) {
1115 netdev_warn(netdev,
1116 "UDP tunnel [%d], port [%d] offload\n", type, port);
1117 return;
1118 }
1119
1120 udp_tnl->dst_port = port;
1121 udp_tnl->used = 1;
1122 /* TBD send command to hardware to add port */
1123 if (h->ae_algo->ops->add_tunnel_udp)
1124 h->ae_algo->ops->add_tunnel_udp(h, port);
1125}
1126
1127static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1128 enum hns3_udp_tnl_type type)
1129{
1130 struct hns3_nic_priv *priv = netdev_priv(netdev);
1131 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1132 struct hnae3_handle *h = priv->ae_handle;
1133
1134 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1135 netdev_warn(netdev,
1136 "Invalid UDP tunnel port %d\n", port);
1137 return;
1138 }
1139
1140 udp_tnl->used--;
1141 if (udp_tnl->used)
1142 return;
1143
1144 udp_tnl->dst_port = 0;
1145 /* TBD send command to hardware to del port */
1146 if (h->ae_algo->ops->del_tunnel_udp)
9537e7cb 1147 h->ae_algo->ops->del_tunnel_udp(h, port);
76ad4f0e
S
1148}
1149
1150/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1151 * @netdev: This physical ports's netdev
1152 * @ti: Tunnel information
1153 */
1154static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1155 struct udp_tunnel_info *ti)
1156{
1157 u16 port_n = ntohs(ti->port);
1158
1159 switch (ti->type) {
1160 case UDP_TUNNEL_TYPE_VXLAN:
1161 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1162 break;
1163 case UDP_TUNNEL_TYPE_GENEVE:
1164 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1165 break;
1166 default:
1167 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1168 break;
1169 }
1170}
1171
1172static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1173 struct udp_tunnel_info *ti)
1174{
1175 u16 port_n = ntohs(ti->port);
1176
1177 switch (ti->type) {
1178 case UDP_TUNNEL_TYPE_VXLAN:
1179 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1180 break;
1181 case UDP_TUNNEL_TYPE_GENEVE:
1182 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1183 break;
1184 default:
1185 break;
1186 }
1187}
1188
30d240df 1189static int hns3_setup_tc(struct net_device *netdev, void *type_data)
76ad4f0e 1190{
30d240df 1191 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9780cb97 1192 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 1193 struct hnae3_knic_private_info *kinfo = &h->kinfo;
30d240df
YL
1194 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1195 u8 tc = mqprio_qopt->qopt.num_tc;
1196 u16 mode = mqprio_qopt->mode;
1197 u8 hw = mqprio_qopt->qopt.hw;
1198 bool if_running;
76ad4f0e
S
1199 unsigned int i;
1200 int ret;
1201
30d240df
YL
1202 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1203 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1204 return -EOPNOTSUPP;
1205
76ad4f0e
S
1206 if (tc > HNAE3_MAX_TC)
1207 return -EINVAL;
1208
76ad4f0e
S
1209 if (!netdev)
1210 return -EINVAL;
1211
30d240df
YL
1212 if_running = netif_running(netdev);
1213 if (if_running) {
1214 hns3_nic_net_stop(netdev);
1215 msleep(100);
76ad4f0e
S
1216 }
1217
30d240df
YL
1218 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1219 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
76ad4f0e 1220 if (ret)
30d240df
YL
1221 goto out;
1222
1223 if (tc <= 1) {
1224 netdev_reset_tc(netdev);
1225 } else {
1226 ret = netdev_set_num_tc(netdev, tc);
1227 if (ret)
1228 goto out;
1229
1230 for (i = 0; i < HNAE3_MAX_TC; i++) {
1231 if (!kinfo->tc_info[i].enable)
1232 continue;
76ad4f0e 1233
76ad4f0e
S
1234 netdev_set_tc_queue(netdev,
1235 kinfo->tc_info[i].tc,
1236 kinfo->tc_info[i].tqp_count,
1237 kinfo->tc_info[i].tqp_offset);
30d240df 1238 }
76ad4f0e
S
1239 }
1240
30d240df
YL
1241 ret = hns3_nic_set_real_num_queue(netdev);
1242
1243out:
1244 if (if_running)
1245 hns3_nic_net_open(netdev);
1246
1247 return ret;
76ad4f0e
S
1248}
1249
2572ac53 1250static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1251 void *type_data)
76ad4f0e 1252{
74897ef0 1253 if (type != TC_SETUP_MQPRIO)
38cf0426 1254 return -EOPNOTSUPP;
76ad4f0e 1255
30d240df 1256 return hns3_setup_tc(dev, type_data);
76ad4f0e
S
1257}
1258
1259static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1260 __be16 proto, u16 vid)
1261{
9780cb97 1262 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1263 int ret = -EIO;
1264
1265 if (h->ae_algo->ops->set_vlan_filter)
1266 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1267
1268 return ret;
1269}
1270
1271static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1272 __be16 proto, u16 vid)
1273{
9780cb97 1274 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1275 int ret = -EIO;
1276
1277 if (h->ae_algo->ops->set_vlan_filter)
1278 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1279
1280 return ret;
1281}
1282
1283static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1284 u8 qos, __be16 vlan_proto)
1285{
9780cb97 1286 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1287 int ret = -EIO;
1288
1289 if (h->ae_algo->ops->set_vf_vlan_filter)
1290 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1291 qos, vlan_proto);
1292
1293 return ret;
1294}
1295
a8e8b7ff
S
1296static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1297{
9780cb97 1298 struct hnae3_handle *h = hns3_get_handle(netdev);
a8e8b7ff
S
1299 bool if_running = netif_running(netdev);
1300 int ret;
1301
1302 if (!h->ae_algo->ops->set_mtu)
1303 return -EOPNOTSUPP;
1304
1305 /* if this was called with netdev up then bring netdevice down */
1306 if (if_running) {
1307 (void)hns3_nic_net_stop(netdev);
1308 msleep(100);
1309 }
1310
1311 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1312 if (ret) {
1313 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1314 ret);
1315 return ret;
1316 }
1317
1318 /* if the netdev was running earlier, bring it up again */
1319 if (if_running && hns3_nic_net_open(netdev))
1320 ret = -EINVAL;
1321
1322 return ret;
1323}
1324
76ad4f0e
S
1325static const struct net_device_ops hns3_nic_netdev_ops = {
1326 .ndo_open = hns3_nic_net_open,
1327 .ndo_stop = hns3_nic_net_stop,
1328 .ndo_start_xmit = hns3_nic_net_xmit,
1329 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
a8e8b7ff 1330 .ndo_change_mtu = hns3_nic_change_mtu,
76ad4f0e
S
1331 .ndo_set_features = hns3_nic_set_features,
1332 .ndo_get_stats64 = hns3_nic_get_stats64,
1333 .ndo_setup_tc = hns3_nic_setup_tc,
1334 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1335 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1336 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1337 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1338 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1339 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1340};
1341
1342/* hns3_probe - Device initialization routine
1343 * @pdev: PCI device information struct
1344 * @ent: entry in hns3_pci_tbl
1345 *
1346 * hns3_probe initializes a PF identified by a pci_dev structure.
1347 * The OS initialization, configuring of the PF private structure,
1348 * and a hardware reset occur.
1349 *
1350 * Returns 0 on success, negative on failure
1351 */
1352static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1353{
1354 struct hnae3_ae_dev *ae_dev;
1355 int ret;
1356
1357 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1358 GFP_KERNEL);
1359 if (!ae_dev) {
1360 ret = -ENOMEM;
1361 return ret;
1362 }
1363
1364 ae_dev->pdev = pdev;
e92a0843 1365 ae_dev->flag = ent->driver_data;
76ad4f0e
S
1366 ae_dev->dev_type = HNAE3_DEV_KNIC;
1367 pci_set_drvdata(pdev, ae_dev);
1368
1369 return hnae3_register_ae_dev(ae_dev);
1370}
1371
1372/* hns3_remove - Device removal routine
1373 * @pdev: PCI device information struct
1374 */
1375static void hns3_remove(struct pci_dev *pdev)
1376{
1377 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1378
1379 hnae3_unregister_ae_dev(ae_dev);
1380
1381 devm_kfree(&pdev->dev, ae_dev);
1382
1383 pci_set_drvdata(pdev, NULL);
1384}
1385
1386static struct pci_driver hns3_driver = {
1387 .name = hns3_driver_name,
1388 .id_table = hns3_pci_tbl,
1389 .probe = hns3_probe,
1390 .remove = hns3_remove,
1391};
1392
1393/* set default feature to hns3 */
1394static void hns3_set_default_feature(struct net_device *netdev)
1395{
1396 netdev->priv_flags |= IFF_UNICAST_FLT;
1397
1398 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1399 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1400 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1401 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1402 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1403
1404 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1405
1406 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1407
1408 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1409 NETIF_F_HW_VLAN_CTAG_FILTER |
1410 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1411 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1412 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1413 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1414
1415 netdev->vlan_features |=
1416 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1417 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1418 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1419 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1420 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1421
1422 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1423 NETIF_F_HW_VLAN_CTAG_FILTER |
1424 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1425 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1426 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1427 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1428}
1429
1430static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1431 struct hns3_desc_cb *cb)
1432{
1433 unsigned int order = hnae_page_order(ring);
1434 struct page *p;
1435
1436 p = dev_alloc_pages(order);
1437 if (!p)
1438 return -ENOMEM;
1439
1440 cb->priv = p;
1441 cb->page_offset = 0;
1442 cb->reuse_flag = 0;
1443 cb->buf = page_address(p);
1444 cb->length = hnae_page_size(ring);
1445 cb->type = DESC_TYPE_PAGE;
1446
76ad4f0e
S
1447 return 0;
1448}
1449
1450static void hns3_free_buffer(struct hns3_enet_ring *ring,
1451 struct hns3_desc_cb *cb)
1452{
1453 if (cb->type == DESC_TYPE_SKB)
1454 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1455 else if (!HNAE3_IS_TX_RING(ring))
1456 put_page((struct page *)cb->priv);
1457 memset(cb, 0, sizeof(*cb));
1458}
1459
1460static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1461{
1462 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1463 cb->length, ring_to_dma_dir(ring));
1464
1465 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1466 return -EIO;
1467
1468 return 0;
1469}
1470
1471static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1472 struct hns3_desc_cb *cb)
1473{
1474 if (cb->type == DESC_TYPE_SKB)
1475 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1476 ring_to_dma_dir(ring));
1477 else
1478 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1479 ring_to_dma_dir(ring));
1480}
1481
1482static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1483{
1484 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1485 ring->desc[i].addr = 0;
1486}
1487
1488static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1489{
1490 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1491
1492 if (!ring->desc_cb[i].dma)
1493 return;
1494
1495 hns3_buffer_detach(ring, i);
1496 hns3_free_buffer(ring, cb);
1497}
1498
1499static void hns3_free_buffers(struct hns3_enet_ring *ring)
1500{
1501 int i;
1502
1503 for (i = 0; i < ring->desc_num; i++)
1504 hns3_free_buffer_detach(ring, i);
1505}
1506
1507/* free desc along with its attached buffer */
1508static void hns3_free_desc(struct hns3_enet_ring *ring)
1509{
1510 hns3_free_buffers(ring);
1511
1512 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1513 ring->desc_num * sizeof(ring->desc[0]),
1514 DMA_BIDIRECTIONAL);
1515 ring->desc_dma_addr = 0;
1516 kfree(ring->desc);
1517 ring->desc = NULL;
1518}
1519
1520static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1521{
1522 int size = ring->desc_num * sizeof(ring->desc[0]);
1523
1524 ring->desc = kzalloc(size, GFP_KERNEL);
1525 if (!ring->desc)
1526 return -ENOMEM;
1527
1528 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1529 size, DMA_BIDIRECTIONAL);
1530 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1531 ring->desc_dma_addr = 0;
1532 kfree(ring->desc);
1533 ring->desc = NULL;
1534 return -ENOMEM;
1535 }
1536
1537 return 0;
1538}
1539
1540static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1541 struct hns3_desc_cb *cb)
1542{
1543 int ret;
1544
1545 ret = hns3_alloc_buffer(ring, cb);
1546 if (ret)
1547 goto out;
1548
1549 ret = hns3_map_buffer(ring, cb);
1550 if (ret)
1551 goto out_with_buf;
1552
1553 return 0;
1554
1555out_with_buf:
564883bb 1556 hns3_free_buffer(ring, cb);
76ad4f0e
S
1557out:
1558 return ret;
1559}
1560
1561static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1562{
1563 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1564
1565 if (ret)
1566 return ret;
1567
1568 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1569
1570 return 0;
1571}
1572
1573/* Allocate memory for raw pkg, and map with dma */
1574static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1575{
1576 int i, j, ret;
1577
1578 for (i = 0; i < ring->desc_num; i++) {
1579 ret = hns3_alloc_buffer_attach(ring, i);
1580 if (ret)
1581 goto out_buffer_fail;
1582 }
1583
1584 return 0;
1585
1586out_buffer_fail:
1587 for (j = i - 1; j >= 0; j--)
1588 hns3_free_buffer_detach(ring, j);
1589 return ret;
1590}
1591
1592/* detach a in-used buffer and replace with a reserved one */
1593static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1594 struct hns3_desc_cb *res_cb)
1595{
b9077428 1596 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
76ad4f0e
S
1597 ring->desc_cb[i] = *res_cb;
1598 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1599}
1600
1601static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1602{
1603 ring->desc_cb[i].reuse_flag = 0;
1604 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1605 + ring->desc_cb[i].page_offset);
1606}
1607
1608static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1609 int *pkts)
1610{
1611 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1612
1613 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1614 (*bytes) += desc_cb->length;
1615 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1616 hns3_free_buffer_detach(ring, ring->next_to_clean);
1617
1618 ring_ptr_move_fw(ring, next_to_clean);
1619}
1620
1621static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1622{
1623 int u = ring->next_to_use;
1624 int c = ring->next_to_clean;
1625
1626 if (unlikely(h > ring->desc_num))
1627 return 0;
1628
1629 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1630}
1631
1632int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1633{
1634 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1635 struct netdev_queue *dev_queue;
1636 int bytes, pkts;
1637 int head;
1638
1639 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1640 rmb(); /* Make sure head is ready before touch any data */
1641
1642 if (is_ring_empty(ring) || head == ring->next_to_clean)
1643 return 0; /* no data to poll */
1644
1645 if (!is_valid_clean_head(ring, head)) {
1646 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1647 ring->next_to_use, ring->next_to_clean);
1648
1649 u64_stats_update_begin(&ring->syncp);
1650 ring->stats.io_err_cnt++;
1651 u64_stats_update_end(&ring->syncp);
1652 return -EIO;
1653 }
1654
1655 bytes = 0;
1656 pkts = 0;
1657 while (head != ring->next_to_clean && budget) {
1658 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1659 /* Issue prefetch for next Tx descriptor */
1660 prefetch(&ring->desc_cb[ring->next_to_clean]);
1661 budget--;
1662 }
1663
1664 ring->tqp_vector->tx_group.total_bytes += bytes;
1665 ring->tqp_vector->tx_group.total_packets += pkts;
1666
1667 u64_stats_update_begin(&ring->syncp);
1668 ring->stats.tx_bytes += bytes;
1669 ring->stats.tx_pkts += pkts;
1670 u64_stats_update_end(&ring->syncp);
1671
1672 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1673 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1674
1675 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1676 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1677 /* Make sure that anybody stopping the queue after this
1678 * sees the new next_to_clean.
1679 */
1680 smp_mb();
1681 if (netif_tx_queue_stopped(dev_queue)) {
1682 netif_tx_wake_queue(dev_queue);
1683 ring->stats.restart_queue++;
1684 }
1685 }
1686
1687 return !!budget;
1688}
1689
1690static int hns3_desc_unused(struct hns3_enet_ring *ring)
1691{
1692 int ntc = ring->next_to_clean;
1693 int ntu = ring->next_to_use;
1694
1695 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1696}
1697
1698static void
1699hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1700{
1701 struct hns3_desc_cb *desc_cb;
1702 struct hns3_desc_cb res_cbs;
1703 int i, ret;
1704
1705 for (i = 0; i < cleand_count; i++) {
1706 desc_cb = &ring->desc_cb[ring->next_to_use];
1707 if (desc_cb->reuse_flag) {
1708 u64_stats_update_begin(&ring->syncp);
1709 ring->stats.reuse_pg_cnt++;
1710 u64_stats_update_end(&ring->syncp);
1711
1712 hns3_reuse_buffer(ring, ring->next_to_use);
1713 } else {
1714 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1715 if (ret) {
1716 u64_stats_update_begin(&ring->syncp);
1717 ring->stats.sw_err_cnt++;
1718 u64_stats_update_end(&ring->syncp);
1719
1720 netdev_err(ring->tqp->handle->kinfo.netdev,
1721 "hnae reserve buffer map failed.\n");
1722 break;
1723 }
1724 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1725 }
1726
1727 ring_ptr_move_fw(ring, next_to_use);
1728 }
1729
1730 wmb(); /* Make all data has been write before submit */
1731 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1732}
1733
1734/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1735 * @data: pointer to the start of the headers
1736 * @max: total length of section to find headers in
1737 *
1738 * This function is meant to determine the length of headers that will
1739 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1740 * motivation of doing this is to only perform one pull for IPv4 TCP
1741 * packets so that we can do basic things like calculating the gso_size
1742 * based on the average data per packet.
1743 */
1744static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1745 unsigned int max_size)
1746{
1747 unsigned char *network;
1748 u8 hlen;
1749
1750 /* This should never happen, but better safe than sorry */
1751 if (max_size < ETH_HLEN)
1752 return max_size;
1753
1754 /* Initialize network frame pointer */
1755 network = data;
1756
1757 /* Set first protocol and move network header forward */
1758 network += ETH_HLEN;
1759
1760 /* Handle any vlan tag if present */
1761 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1762 == HNS3_RX_FLAG_VLAN_PRESENT) {
1763 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1764 return max_size;
1765
1766 network += VLAN_HLEN;
1767 }
1768
1769 /* Handle L3 protocols */
1770 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1771 == HNS3_RX_FLAG_L3ID_IPV4) {
1772 if ((typeof(max_size))(network - data) >
1773 (max_size - sizeof(struct iphdr)))
1774 return max_size;
1775
1776 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1777 hlen = (network[0] & 0x0F) << 2;
1778
1779 /* Verify hlen meets minimum size requirements */
1780 if (hlen < sizeof(struct iphdr))
1781 return network - data;
1782
1783 /* Record next protocol if header is present */
1784 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1785 == HNS3_RX_FLAG_L3ID_IPV6) {
1786 if ((typeof(max_size))(network - data) >
1787 (max_size - sizeof(struct ipv6hdr)))
1788 return max_size;
1789
1790 /* Record next protocol */
1791 hlen = sizeof(struct ipv6hdr);
1792 } else {
1793 return network - data;
1794 }
1795
1796 /* Relocate pointer to start of L4 header */
1797 network += hlen;
1798
1799 /* Finally sort out TCP/UDP */
1800 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1801 == HNS3_RX_FLAG_L4ID_TCP) {
1802 if ((typeof(max_size))(network - data) >
1803 (max_size - sizeof(struct tcphdr)))
1804 return max_size;
1805
1806 /* Access doff as a u8 to avoid unaligned access on ia64 */
1807 hlen = (network[12] & 0xF0) >> 2;
1808
1809 /* Verify hlen meets minimum size requirements */
1810 if (hlen < sizeof(struct tcphdr))
1811 return network - data;
1812
1813 network += hlen;
1814 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1815 == HNS3_RX_FLAG_L4ID_UDP) {
1816 if ((typeof(max_size))(network - data) >
1817 (max_size - sizeof(struct udphdr)))
1818 return max_size;
1819
1820 network += sizeof(struct udphdr);
1821 }
1822
1823 /* If everything has gone correctly network should be the
1824 * data section of the packet and will be the end of the header.
1825 * If not then it probably represents the end of the last recognized
1826 * header.
1827 */
1828 if ((typeof(max_size))(network - data) < max_size)
1829 return network - data;
1830 else
1831 return max_size;
1832}
1833
1834static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1835 struct hns3_enet_ring *ring, int pull_len,
1836 struct hns3_desc_cb *desc_cb)
1837{
1838 struct hns3_desc *desc;
1839 int truesize, size;
1840 int last_offset;
1841 bool twobufs;
1842
1843 twobufs = ((PAGE_SIZE < 8192) &&
1844 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1845
1846 desc = &ring->desc[ring->next_to_clean];
1847 size = le16_to_cpu(desc->rx.size);
1848
1849 if (twobufs) {
1850 truesize = hnae_buf_size(ring);
1851 } else {
1852 truesize = ALIGN(size, L1_CACHE_BYTES);
1853 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1854 }
1855
1856 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1857 size - pull_len, truesize - pull_len);
1858
1859 /* Avoid re-using remote pages,flag default unreuse */
1860 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1861 return;
1862
1863 if (twobufs) {
1864 /* If we are only owner of page we can reuse it */
1865 if (likely(page_count(desc_cb->priv) == 1)) {
1866 /* Flip page offset to other buffer */
1867 desc_cb->page_offset ^= truesize;
1868
1869 desc_cb->reuse_flag = 1;
1870 /* bump ref count on page before it is given*/
1871 get_page(desc_cb->priv);
1872 }
1873 return;
1874 }
1875
1876 /* Move offset up to the next cache line */
1877 desc_cb->page_offset += truesize;
1878
1879 if (desc_cb->page_offset <= last_offset) {
1880 desc_cb->reuse_flag = 1;
1881 /* Bump ref count on page before it is given*/
1882 get_page(desc_cb->priv);
1883 }
1884}
1885
1886static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1887 struct hns3_desc *desc)
1888{
1889 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1890 int l3_type, l4_type;
1891 u32 bd_base_info;
1892 int ol4_type;
1893 u32 l234info;
1894
1895 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1896 l234info = le32_to_cpu(desc->rx.l234_info);
1897
1898 skb->ip_summed = CHECKSUM_NONE;
1899
1900 skb_checksum_none_assert(skb);
1901
1902 if (!(netdev->features & NETIF_F_RXCSUM))
1903 return;
1904
1905 /* check if hardware has done checksum */
1906 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1907 return;
1908
1909 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1910 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1911 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1912 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1913 netdev_err(netdev, "L3/L4 error pkt\n");
1914 u64_stats_update_begin(&ring->syncp);
1915 ring->stats.l3l4_csum_err++;
1916 u64_stats_update_end(&ring->syncp);
1917
1918 return;
1919 }
1920
1921 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1922 HNS3_RXD_L3ID_S);
1923 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1924 HNS3_RXD_L4ID_S);
1925
1926 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1927 switch (ol4_type) {
1928 case HNS3_OL4_TYPE_MAC_IN_UDP:
1929 case HNS3_OL4_TYPE_NVGRE:
1930 skb->csum_level = 1;
1931 case HNS3_OL4_TYPE_NO_TUN:
1932 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1933 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1934 (l3_type == HNS3_L3_TYPE_IPV6 &&
1935 (l4_type == HNS3_L4_TYPE_UDP ||
1936 l4_type == HNS3_L4_TYPE_TCP ||
1937 l4_type == HNS3_L4_TYPE_SCTP)))
1938 skb->ip_summed = CHECKSUM_UNNECESSARY;
1939 break;
1940 }
1941}
1942
d43e5aca
YL
1943static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
1944{
1945 napi_gro_receive(&ring->tqp_vector->napi, skb);
1946}
1947
76ad4f0e
S
1948static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1949 struct sk_buff **out_skb, int *out_bnum)
1950{
1951 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1952 struct hns3_desc_cb *desc_cb;
1953 struct hns3_desc *desc;
1954 struct sk_buff *skb;
1955 unsigned char *va;
1956 u32 bd_base_info;
1957 int pull_len;
1958 u32 l234info;
1959 int length;
1960 int bnum;
1961
1962 desc = &ring->desc[ring->next_to_clean];
1963 desc_cb = &ring->desc_cb[ring->next_to_clean];
1964
1965 prefetch(desc);
1966
1967 length = le16_to_cpu(desc->rx.pkt_len);
1968 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1969 l234info = le32_to_cpu(desc->rx.l234_info);
1970
1971 /* Check valid BD */
1972 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1973 return -EFAULT;
1974
1975 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1976
1977 /* Prefetch first cache line of first page
1978 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1979 * line size is 64B so need to prefetch twice to make it 128B. But in
1980 * actual we can have greater size of caches with 128B Level 1 cache
1981 * lines. In such a case, single fetch would suffice to cache in the
1982 * relevant part of the header.
1983 */
1984 prefetch(va);
1985#if L1_CACHE_BYTES < 128
1986 prefetch(va + L1_CACHE_BYTES);
1987#endif
1988
1989 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1990 HNS3_RX_HEAD_SIZE);
1991 if (unlikely(!skb)) {
1992 netdev_err(netdev, "alloc rx skb fail\n");
1993
1994 u64_stats_update_begin(&ring->syncp);
1995 ring->stats.sw_err_cnt++;
1996 u64_stats_update_end(&ring->syncp);
1997
1998 return -ENOMEM;
1999 }
2000
2001 prefetchw(skb->data);
2002
2003 bnum = 1;
2004 if (length <= HNS3_RX_HEAD_SIZE) {
2005 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2006
2007 /* We can reuse buffer as-is, just make sure it is local */
2008 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2009 desc_cb->reuse_flag = 1;
2010 else /* This page cannot be reused so discard it */
2011 put_page(desc_cb->priv);
2012
2013 ring_ptr_move_fw(ring, next_to_clean);
2014 } else {
2015 u64_stats_update_begin(&ring->syncp);
2016 ring->stats.seg_pkt_cnt++;
2017 u64_stats_update_end(&ring->syncp);
2018
2019 pull_len = hns3_nic_get_headlen(va, l234info,
2020 HNS3_RX_HEAD_SIZE);
2021 memcpy(__skb_put(skb, pull_len), va,
2022 ALIGN(pull_len, sizeof(long)));
2023
2024 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2025 ring_ptr_move_fw(ring, next_to_clean);
2026
2027 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2028 desc = &ring->desc[ring->next_to_clean];
2029 desc_cb = &ring->desc_cb[ring->next_to_clean];
2030 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2031 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2032 ring_ptr_move_fw(ring, next_to_clean);
2033 bnum++;
2034 }
2035 }
2036
2037 *out_bnum = bnum;
2038
2039 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2040 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2041 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2042 u64_stats_update_begin(&ring->syncp);
2043 ring->stats.non_vld_descs++;
2044 u64_stats_update_end(&ring->syncp);
2045
2046 dev_kfree_skb_any(skb);
2047 return -EINVAL;
2048 }
2049
2050 if (unlikely((!desc->rx.pkt_len) ||
2051 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2052 netdev_err(netdev, "truncated pkt\n");
2053 u64_stats_update_begin(&ring->syncp);
2054 ring->stats.err_pkt_len++;
2055 u64_stats_update_end(&ring->syncp);
2056
2057 dev_kfree_skb_any(skb);
2058 return -EFAULT;
2059 }
2060
2061 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2062 netdev_err(netdev, "L2 error pkt\n");
2063 u64_stats_update_begin(&ring->syncp);
2064 ring->stats.l2_err++;
2065 u64_stats_update_end(&ring->syncp);
2066
2067 dev_kfree_skb_any(skb);
2068 return -EFAULT;
2069 }
2070
2071 u64_stats_update_begin(&ring->syncp);
2072 ring->stats.rx_pkts++;
2073 ring->stats.rx_bytes += skb->len;
2074 u64_stats_update_end(&ring->syncp);
2075
2076 ring->tqp_vector->rx_group.total_bytes += skb->len;
2077
2078 hns3_rx_checksum(ring, skb, desc);
2079 return 0;
2080}
2081
d43e5aca
YL
2082int hns3_clean_rx_ring(
2083 struct hns3_enet_ring *ring, int budget,
2084 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
76ad4f0e
S
2085{
2086#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2087 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2088 int recv_pkts, recv_bds, clean_count, err;
2089 int unused_count = hns3_desc_unused(ring);
2090 struct sk_buff *skb = NULL;
2091 int num, bnum = 0;
2092
2093 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2094 rmb(); /* Make sure num taken effect before the other data is touched */
2095
2096 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2097 num -= unused_count;
2098
2099 while (recv_pkts < budget && recv_bds < num) {
2100 /* Reuse or realloc buffers */
2101 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2102 hns3_nic_alloc_rx_buffers(ring,
2103 clean_count + unused_count);
2104 clean_count = 0;
2105 unused_count = hns3_desc_unused(ring);
2106 }
2107
2108 /* Poll one pkt */
2109 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2110 if (unlikely(!skb)) /* This fault cannot be repaired */
2111 goto out;
2112
2113 recv_bds += bnum;
2114 clean_count += bnum;
2115 if (unlikely(err)) { /* Do jump the err */
2116 recv_pkts++;
2117 continue;
2118 }
2119
2120 /* Do update ip stack process */
2121 skb->protocol = eth_type_trans(skb, netdev);
d43e5aca 2122 rx_fn(ring, skb);
76ad4f0e
S
2123
2124 recv_pkts++;
2125 }
2126
2127out:
2128 /* Make all data has been write before submit */
2129 if (clean_count + unused_count > 0)
2130 hns3_nic_alloc_rx_buffers(ring,
2131 clean_count + unused_count);
2132
2133 return recv_pkts;
2134}
2135
2136static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2137{
2138#define HNS3_RX_ULTRA_PACKET_RATE 40000
2139 enum hns3_flow_level_range new_flow_level;
2140 struct hns3_enet_tqp_vector *tqp_vector;
2141 int packets_per_secs;
2142 int bytes_per_usecs;
2143 u16 new_int_gl;
2144 int usecs;
2145
2146 if (!ring_group->int_gl)
2147 return false;
2148
2149 if (ring_group->total_packets == 0) {
2150 ring_group->int_gl = HNS3_INT_GL_50K;
2151 ring_group->flow_level = HNS3_FLOW_LOW;
2152 return true;
2153 }
2154
2155 /* Simple throttlerate management
2156 * 0-10MB/s lower (50000 ints/s)
2157 * 10-20MB/s middle (20000 ints/s)
2158 * 20-1249MB/s high (18000 ints/s)
2159 * > 40000pps ultra (8000 ints/s)
2160 */
2161 new_flow_level = ring_group->flow_level;
2162 new_int_gl = ring_group->int_gl;
2163 tqp_vector = ring_group->ring->tqp_vector;
2164 usecs = (ring_group->int_gl << 1);
2165 bytes_per_usecs = ring_group->total_bytes / usecs;
2166 /* 1000000 microseconds */
2167 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2168
2169 switch (new_flow_level) {
2170 case HNS3_FLOW_LOW:
2171 if (bytes_per_usecs > 10)
2172 new_flow_level = HNS3_FLOW_MID;
2173 break;
2174 case HNS3_FLOW_MID:
2175 if (bytes_per_usecs > 20)
2176 new_flow_level = HNS3_FLOW_HIGH;
2177 else if (bytes_per_usecs <= 10)
2178 new_flow_level = HNS3_FLOW_LOW;
2179 break;
2180 case HNS3_FLOW_HIGH:
2181 case HNS3_FLOW_ULTRA:
2182 default:
2183 if (bytes_per_usecs <= 20)
2184 new_flow_level = HNS3_FLOW_MID;
2185 break;
2186 }
2187
2188 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2189 (&tqp_vector->rx_group == ring_group))
2190 new_flow_level = HNS3_FLOW_ULTRA;
2191
2192 switch (new_flow_level) {
2193 case HNS3_FLOW_LOW:
2194 new_int_gl = HNS3_INT_GL_50K;
2195 break;
2196 case HNS3_FLOW_MID:
2197 new_int_gl = HNS3_INT_GL_20K;
2198 break;
2199 case HNS3_FLOW_HIGH:
2200 new_int_gl = HNS3_INT_GL_18K;
2201 break;
2202 case HNS3_FLOW_ULTRA:
2203 new_int_gl = HNS3_INT_GL_8K;
2204 break;
2205 default:
2206 break;
2207 }
2208
2209 ring_group->total_bytes = 0;
2210 ring_group->total_packets = 0;
2211 ring_group->flow_level = new_flow_level;
2212 if (new_int_gl != ring_group->int_gl) {
2213 ring_group->int_gl = new_int_gl;
2214 return true;
2215 }
2216 return false;
2217}
2218
2219static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2220{
2221 u16 rx_int_gl, tx_int_gl;
2222 bool rx, tx;
2223
2224 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2225 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2226 rx_int_gl = tqp_vector->rx_group.int_gl;
2227 tx_int_gl = tqp_vector->tx_group.int_gl;
2228 if (rx && tx) {
2229 if (rx_int_gl > tx_int_gl) {
2230 tqp_vector->tx_group.int_gl = rx_int_gl;
2231 tqp_vector->tx_group.flow_level =
2232 tqp_vector->rx_group.flow_level;
2233 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2234 } else {
2235 tqp_vector->rx_group.int_gl = tx_int_gl;
2236 tqp_vector->rx_group.flow_level =
2237 tqp_vector->tx_group.flow_level;
2238 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2239 }
2240 }
2241}
2242
2243static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2244{
2245 struct hns3_enet_ring *ring;
2246 int rx_pkt_total = 0;
2247
2248 struct hns3_enet_tqp_vector *tqp_vector =
2249 container_of(napi, struct hns3_enet_tqp_vector, napi);
2250 bool clean_complete = true;
2251 int rx_budget;
2252
2253 /* Since the actual Tx work is minimal, we can give the Tx a larger
2254 * budget and be more aggressive about cleaning up the Tx descriptors.
2255 */
2256 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2257 if (!hns3_clean_tx_ring(ring, budget))
2258 clean_complete = false;
2259 }
2260
2261 /* make sure rx ring budget not smaller than 1 */
2262 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2263
2264 hns3_for_each_ring(ring, tqp_vector->rx_group) {
d43e5aca
YL
2265 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2266 hns3_rx_skb);
76ad4f0e
S
2267
2268 if (rx_cleaned >= rx_budget)
2269 clean_complete = false;
2270
2271 rx_pkt_total += rx_cleaned;
2272 }
2273
2274 tqp_vector->rx_group.total_packets += rx_pkt_total;
2275
2276 if (!clean_complete)
2277 return budget;
2278
2279 napi_complete(napi);
2280 hns3_update_new_int_gl(tqp_vector);
2281 hns3_mask_vector_irq(tqp_vector, 1);
2282
2283 return rx_pkt_total;
2284}
2285
2286static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2287 struct hnae3_ring_chain_node *head)
2288{
2289 struct pci_dev *pdev = tqp_vector->handle->pdev;
2290 struct hnae3_ring_chain_node *cur_chain = head;
2291 struct hnae3_ring_chain_node *chain;
2292 struct hns3_enet_ring *tx_ring;
2293 struct hns3_enet_ring *rx_ring;
2294
2295 tx_ring = tqp_vector->tx_group.ring;
2296 if (tx_ring) {
2297 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2298 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2299 HNAE3_RING_TYPE_TX);
2300
2301 cur_chain->next = NULL;
2302
2303 while (tx_ring->next) {
2304 tx_ring = tx_ring->next;
2305
2306 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2307 GFP_KERNEL);
2308 if (!chain)
2309 return -ENOMEM;
2310
2311 cur_chain->next = chain;
2312 chain->tqp_index = tx_ring->tqp->tqp_index;
2313 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2314 HNAE3_RING_TYPE_TX);
2315
2316 cur_chain = chain;
2317 }
2318 }
2319
2320 rx_ring = tqp_vector->rx_group.ring;
2321 if (!tx_ring && rx_ring) {
2322 cur_chain->next = NULL;
2323 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2324 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2325 HNAE3_RING_TYPE_RX);
2326
2327 rx_ring = rx_ring->next;
2328 }
2329
2330 while (rx_ring) {
2331 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2332 if (!chain)
2333 return -ENOMEM;
2334
2335 cur_chain->next = chain;
2336 chain->tqp_index = rx_ring->tqp->tqp_index;
2337 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2338 HNAE3_RING_TYPE_RX);
2339 cur_chain = chain;
2340
2341 rx_ring = rx_ring->next;
2342 }
2343
2344 return 0;
2345}
2346
2347static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2348 struct hnae3_ring_chain_node *head)
2349{
2350 struct pci_dev *pdev = tqp_vector->handle->pdev;
2351 struct hnae3_ring_chain_node *chain_tmp, *chain;
2352
2353 chain = head->next;
2354
2355 while (chain) {
2356 chain_tmp = chain->next;
2357 devm_kfree(&pdev->dev, chain);
2358 chain = chain_tmp;
2359 }
2360}
2361
2362static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2363 struct hns3_enet_ring *ring)
2364{
2365 ring->next = group->ring;
2366 group->ring = ring;
2367
2368 group->count++;
2369}
2370
2371static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2372{
2373 struct hnae3_ring_chain_node vector_ring_chain;
2374 struct hnae3_handle *h = priv->ae_handle;
2375 struct hns3_enet_tqp_vector *tqp_vector;
2376 struct hnae3_vector_info *vector;
2377 struct pci_dev *pdev = h->pdev;
2378 u16 tqp_num = h->kinfo.num_tqps;
2379 u16 vector_num;
2380 int ret = 0;
2381 u16 i;
2382
2383 /* RSS size, cpu online and vector_num should be the same */
2384 /* Should consider 2p/4p later */
2385 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2386 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2387 GFP_KERNEL);
2388 if (!vector)
2389 return -ENOMEM;
2390
2391 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2392
2393 priv->vector_num = vector_num;
2394 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2395 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2396 GFP_KERNEL);
2397 if (!priv->tqp_vector)
2398 return -ENOMEM;
2399
2400 for (i = 0; i < tqp_num; i++) {
2401 u16 vector_i = i % vector_num;
2402
2403 tqp_vector = &priv->tqp_vector[vector_i];
2404
2405 hns3_add_ring_to_group(&tqp_vector->tx_group,
2406 priv->ring_data[i].ring);
2407
2408 hns3_add_ring_to_group(&tqp_vector->rx_group,
2409 priv->ring_data[i + tqp_num].ring);
2410
2411 tqp_vector->idx = vector_i;
2412 tqp_vector->mask_addr = vector[vector_i].io_addr;
2413 tqp_vector->vector_irq = vector[vector_i].vector;
2414 tqp_vector->num_tqps++;
2415
2416 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2417 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2418 }
2419
2420 for (i = 0; i < vector_num; i++) {
2421 tqp_vector = &priv->tqp_vector[i];
2422
2423 tqp_vector->rx_group.total_bytes = 0;
2424 tqp_vector->rx_group.total_packets = 0;
2425 tqp_vector->tx_group.total_bytes = 0;
2426 tqp_vector->tx_group.total_packets = 0;
2427 hns3_vector_gl_rl_init(tqp_vector);
2428 tqp_vector->handle = h;
2429
2430 ret = hns3_get_vector_ring_chain(tqp_vector,
2431 &vector_ring_chain);
2432 if (ret)
2433 goto out;
2434
2435 ret = h->ae_algo->ops->map_ring_to_vector(h,
2436 tqp_vector->vector_irq, &vector_ring_chain);
2437 if (ret)
2438 goto out;
2439
2440 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2441
2442 netif_napi_add(priv->netdev, &tqp_vector->napi,
2443 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2444 }
2445
2446out:
2447 devm_kfree(&pdev->dev, vector);
2448 return ret;
2449}
2450
2451static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2452{
2453 struct hnae3_ring_chain_node vector_ring_chain;
2454 struct hnae3_handle *h = priv->ae_handle;
2455 struct hns3_enet_tqp_vector *tqp_vector;
2456 struct pci_dev *pdev = h->pdev;
2457 int i, ret;
2458
2459 for (i = 0; i < priv->vector_num; i++) {
2460 tqp_vector = &priv->tqp_vector[i];
2461
2462 ret = hns3_get_vector_ring_chain(tqp_vector,
2463 &vector_ring_chain);
2464 if (ret)
2465 return ret;
2466
2467 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2468 tqp_vector->vector_irq, &vector_ring_chain);
2469 if (ret)
2470 return ret;
2471
2472 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2473
2474 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2475 (void)irq_set_affinity_hint(
2476 priv->tqp_vector[i].vector_irq,
2477 NULL);
2478 devm_free_irq(&pdev->dev,
2479 priv->tqp_vector[i].vector_irq,
2480 &priv->tqp_vector[i]);
2481 }
2482
2483 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2484
2485 netif_napi_del(&priv->tqp_vector[i].napi);
2486 }
2487
2488 devm_kfree(&pdev->dev, priv->tqp_vector);
2489
2490 return 0;
2491}
2492
2493static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2494 int ring_type)
2495{
2496 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2497 int queue_num = priv->ae_handle->kinfo.num_tqps;
2498 struct pci_dev *pdev = priv->ae_handle->pdev;
2499 struct hns3_enet_ring *ring;
2500
2501 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2502 if (!ring)
2503 return -ENOMEM;
2504
2505 if (ring_type == HNAE3_RING_TYPE_TX) {
2506 ring_data[q->tqp_index].ring = ring;
66b44730 2507 ring_data[q->tqp_index].queue_index = q->tqp_index;
76ad4f0e
S
2508 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2509 } else {
2510 ring_data[q->tqp_index + queue_num].ring = ring;
66b44730 2511 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
76ad4f0e
S
2512 ring->io_base = q->io_base;
2513 }
2514
2515 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2516
76ad4f0e
S
2517 ring->tqp = q;
2518 ring->desc = NULL;
2519 ring->desc_cb = NULL;
2520 ring->dev = priv->dev;
2521 ring->desc_dma_addr = 0;
2522 ring->buf_size = q->buf_size;
2523 ring->desc_num = q->desc_num;
2524 ring->next_to_use = 0;
2525 ring->next_to_clean = 0;
2526
2527 return 0;
2528}
2529
2530static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2531 struct hns3_nic_priv *priv)
2532{
2533 int ret;
2534
2535 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2536 if (ret)
2537 return ret;
2538
2539 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2540 if (ret)
2541 return ret;
2542
2543 return 0;
2544}
2545
2546static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2547{
2548 struct hnae3_handle *h = priv->ae_handle;
2549 struct pci_dev *pdev = h->pdev;
2550 int i, ret;
2551
2552 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2553 sizeof(*priv->ring_data) * 2,
2554 GFP_KERNEL);
2555 if (!priv->ring_data)
2556 return -ENOMEM;
2557
2558 for (i = 0; i < h->kinfo.num_tqps; i++) {
2559 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2560 if (ret)
2561 goto err;
2562 }
2563
2564 return 0;
2565err:
2566 devm_kfree(&pdev->dev, priv->ring_data);
2567 return ret;
2568}
2569
2570static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2571{
2572 int ret;
2573
2574 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2575 return -EINVAL;
2576
2577 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2578 GFP_KERNEL);
2579 if (!ring->desc_cb) {
2580 ret = -ENOMEM;
2581 goto out;
2582 }
2583
2584 ret = hns3_alloc_desc(ring);
2585 if (ret)
2586 goto out_with_desc_cb;
2587
2588 if (!HNAE3_IS_TX_RING(ring)) {
2589 ret = hns3_alloc_ring_buffers(ring);
2590 if (ret)
2591 goto out_with_desc;
2592 }
2593
2594 return 0;
2595
2596out_with_desc:
2597 hns3_free_desc(ring);
2598out_with_desc_cb:
2599 kfree(ring->desc_cb);
2600 ring->desc_cb = NULL;
2601out:
2602 return ret;
2603}
2604
2605static void hns3_fini_ring(struct hns3_enet_ring *ring)
2606{
2607 hns3_free_desc(ring);
2608 kfree(ring->desc_cb);
2609 ring->desc_cb = NULL;
2610 ring->next_to_clean = 0;
2611 ring->next_to_use = 0;
2612}
2613
1db9b1bf 2614static int hns3_buf_size2type(u32 buf_size)
76ad4f0e
S
2615{
2616 int bd_size_type;
2617
2618 switch (buf_size) {
2619 case 512:
2620 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2621 break;
2622 case 1024:
2623 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2624 break;
2625 case 2048:
2626 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2627 break;
2628 case 4096:
2629 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2630 break;
2631 default:
2632 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2633 }
2634
2635 return bd_size_type;
2636}
2637
2638static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2639{
2640 dma_addr_t dma = ring->desc_dma_addr;
2641 struct hnae3_queue *q = ring->tqp;
2642
2643 if (!HNAE3_IS_TX_RING(ring)) {
2644 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2645 (u32)dma);
2646 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2647 (u32)((dma >> 31) >> 1));
2648
2649 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2650 hns3_buf_size2type(ring->buf_size));
2651 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2652 ring->desc_num / 8 - 1);
2653
2654 } else {
2655 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2656 (u32)dma);
2657 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2658 (u32)((dma >> 31) >> 1));
2659
2660 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2661 hns3_buf_size2type(ring->buf_size));
2662 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2663 ring->desc_num / 8 - 1);
2664 }
2665}
2666
5668abda 2667int hns3_init_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
2668{
2669 struct hnae3_handle *h = priv->ae_handle;
2670 int ring_num = h->kinfo.num_tqps * 2;
2671 int i, j;
2672 int ret;
2673
2674 for (i = 0; i < ring_num; i++) {
2675 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2676 if (ret) {
2677 dev_err(priv->dev,
2678 "Alloc ring memory fail! ret=%d\n", ret);
2679 goto out_when_alloc_ring_memory;
2680 }
2681
2682 hns3_init_ring_hw(priv->ring_data[i].ring);
2683
2684 u64_stats_init(&priv->ring_data[i].ring->syncp);
2685 }
2686
2687 return 0;
2688
2689out_when_alloc_ring_memory:
2690 for (j = i - 1; j >= 0; j--)
ee83f776 2691 hns3_fini_ring(priv->ring_data[j].ring);
76ad4f0e
S
2692
2693 return -ENOMEM;
2694}
2695
5668abda 2696int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
2697{
2698 struct hnae3_handle *h = priv->ae_handle;
2699 int i;
2700
2701 for (i = 0; i < h->kinfo.num_tqps; i++) {
2702 if (h->ae_algo->ops->reset_queue)
2703 h->ae_algo->ops->reset_queue(h, i);
2704
2705 hns3_fini_ring(priv->ring_data[i].ring);
2706 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2707 }
2708
2709 return 0;
2710}
2711
2712/* Set mac addr if it is configured. or leave it to the AE driver */
2713static void hns3_init_mac_addr(struct net_device *netdev)
2714{
2715 struct hns3_nic_priv *priv = netdev_priv(netdev);
2716 struct hnae3_handle *h = priv->ae_handle;
2717 u8 mac_addr_temp[ETH_ALEN];
2718
2719 if (h->ae_algo->ops->get_mac_addr) {
2720 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2721 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2722 }
2723
2724 /* Check if the MAC address is valid, if not get a random one */
2725 if (!is_valid_ether_addr(netdev->dev_addr)) {
2726 eth_hw_addr_random(netdev);
2727 dev_warn(priv->dev, "using random MAC address %pM\n",
2728 netdev->dev_addr);
76ad4f0e 2729 }
139e8792
L
2730
2731 if (h->ae_algo->ops->set_mac_addr)
2732 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2733
76ad4f0e
S
2734}
2735
2736static void hns3_nic_set_priv_ops(struct net_device *netdev)
2737{
2738 struct hns3_nic_priv *priv = netdev_priv(netdev);
2739
2740 if ((netdev->features & NETIF_F_TSO) ||
2741 (netdev->features & NETIF_F_TSO6)) {
2742 priv->ops.fill_desc = hns3_fill_desc_tso;
2743 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2744 } else {
2745 priv->ops.fill_desc = hns3_fill_desc;
2746 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2747 }
2748}
2749
2750static int hns3_client_init(struct hnae3_handle *handle)
2751{
2752 struct pci_dev *pdev = handle->pdev;
2753 struct hns3_nic_priv *priv;
2754 struct net_device *netdev;
2755 int ret;
2756
2757 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2758 handle->kinfo.num_tqps);
2759 if (!netdev)
2760 return -ENOMEM;
2761
2762 priv = netdev_priv(netdev);
2763 priv->dev = &pdev->dev;
2764 priv->netdev = netdev;
2765 priv->ae_handle = handle;
2766
2767 handle->kinfo.netdev = netdev;
2768 handle->priv = (void *)priv;
2769
2770 hns3_init_mac_addr(netdev);
2771
2772 hns3_set_default_feature(netdev);
2773
2774 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2775 netdev->priv_flags |= IFF_UNICAST_FLT;
2776 netdev->netdev_ops = &hns3_nic_netdev_ops;
2777 SET_NETDEV_DEV(netdev, &pdev->dev);
2778 hns3_ethtool_set_ops(netdev);
2779 hns3_nic_set_priv_ops(netdev);
2780
2781 /* Carrier off reporting is important to ethtool even BEFORE open */
2782 netif_carrier_off(netdev);
2783
2784 ret = hns3_get_ring_config(priv);
2785 if (ret) {
2786 ret = -ENOMEM;
2787 goto out_get_ring_cfg;
2788 }
2789
2790 ret = hns3_nic_init_vector_data(priv);
2791 if (ret) {
2792 ret = -ENOMEM;
2793 goto out_init_vector_data;
2794 }
2795
2796 ret = hns3_init_all_ring(priv);
2797 if (ret) {
2798 ret = -ENOMEM;
2799 goto out_init_ring_data;
2800 }
2801
2802 ret = register_netdev(netdev);
2803 if (ret) {
2804 dev_err(priv->dev, "probe register netdev fail!\n");
2805 goto out_reg_netdev_fail;
2806 }
2807
986743db
YL
2808 hns3_dcbnl_setup(handle);
2809
a8e8b7ff
S
2810 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2811 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2812
76ad4f0e
S
2813 return ret;
2814
2815out_reg_netdev_fail:
2816out_init_ring_data:
2817 (void)hns3_nic_uninit_vector_data(priv);
2818 priv->ring_data = NULL;
2819out_init_vector_data:
2820out_get_ring_cfg:
2821 priv->ae_handle = NULL;
2822 free_netdev(netdev);
2823 return ret;
2824}
2825
2826static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2827{
2828 struct net_device *netdev = handle->kinfo.netdev;
2829 struct hns3_nic_priv *priv = netdev_priv(netdev);
2830 int ret;
2831
2832 if (netdev->reg_state != NETREG_UNINITIALIZED)
2833 unregister_netdev(netdev);
2834
2835 ret = hns3_nic_uninit_vector_data(priv);
2836 if (ret)
2837 netdev_err(netdev, "uninit vector error\n");
2838
2839 ret = hns3_uninit_all_ring(priv);
2840 if (ret)
2841 netdev_err(netdev, "uninit ring error\n");
2842
2843 priv->ring_data = NULL;
2844
2845 free_netdev(netdev);
2846}
2847
2848static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2849{
2850 struct net_device *netdev = handle->kinfo.netdev;
2851
2852 if (!netdev)
2853 return;
2854
2855 if (linkup) {
2856 netif_carrier_on(netdev);
2857 netif_tx_wake_all_queues(netdev);
2858 netdev_info(netdev, "link up\n");
2859 } else {
2860 netif_carrier_off(netdev);
2861 netif_tx_stop_all_queues(netdev);
2862 netdev_info(netdev, "link down\n");
2863 }
2864}
2865
9df8f79a
YL
2866static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
2867{
2868 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
2869 struct net_device *ndev = kinfo->netdev;
075cfdd6 2870 bool if_running;
9df8f79a
YL
2871 int ret;
2872 u8 i;
2873
2874 if (tc > HNAE3_MAX_TC)
2875 return -EINVAL;
2876
2877 if (!ndev)
2878 return -ENODEV;
2879
075cfdd6
CIK
2880 if_running = netif_running(ndev);
2881
9df8f79a
YL
2882 ret = netdev_set_num_tc(ndev, tc);
2883 if (ret)
2884 return ret;
2885
2886 if (if_running) {
2887 (void)hns3_nic_net_stop(ndev);
2888 msleep(100);
2889 }
2890
2891 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
2892 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
2893 if (ret)
2894 goto err_out;
2895
2896 if (tc <= 1) {
2897 netdev_reset_tc(ndev);
2898 goto out;
2899 }
2900
2901 for (i = 0; i < HNAE3_MAX_TC; i++) {
2902 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
2903
2904 if (tc_info->enable)
2905 netdev_set_tc_queue(ndev,
2906 tc_info->tc,
2907 tc_info->tqp_count,
2908 tc_info->tqp_offset);
2909 }
2910
2911 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
2912 netdev_set_prio_tc_map(ndev, i,
2913 kinfo->prio_tc[i]);
2914 }
2915
2916out:
2917 ret = hns3_nic_set_real_num_queue(ndev);
2918
2919err_out:
2920 if (if_running)
2921 (void)hns3_nic_net_open(ndev);
2922
2923 return ret;
2924}
2925
1db9b1bf 2926static const struct hnae3_client_ops client_ops = {
76ad4f0e
S
2927 .init_instance = hns3_client_init,
2928 .uninit_instance = hns3_client_uninit,
2929 .link_status_change = hns3_link_status_change,
9df8f79a 2930 .setup_tc = hns3_client_setup_tc,
76ad4f0e
S
2931};
2932
2933/* hns3_init_module - Driver registration routine
2934 * hns3_init_module is the first routine called when the driver is
2935 * loaded. All it does is register with the PCI subsystem.
2936 */
2937static int __init hns3_init_module(void)
2938{
2939 int ret;
2940
2941 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2942 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2943
2944 client.type = HNAE3_CLIENT_KNIC;
2945 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2946 hns3_driver_name);
2947
2948 client.ops = &client_ops;
2949
2950 ret = hnae3_register_client(&client);
2951 if (ret)
2952 return ret;
2953
2954 ret = pci_register_driver(&hns3_driver);
2955 if (ret)
2956 hnae3_unregister_client(&client);
2957
2958 return ret;
2959}
2960module_init(hns3_init_module);
2961
2962/* hns3_exit_module - Driver exit cleanup routine
2963 * hns3_exit_module is called just before the driver is removed
2964 * from memory.
2965 */
2966static void __exit hns3_exit_module(void)
2967{
2968 pci_unregister_driver(&hns3_driver);
2969 hnae3_unregister_client(&client);
2970}
2971module_exit(hns3_exit_module);
2972
2973MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2974MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2975MODULE_LICENSE("GPL");
2976MODULE_ALIAS("pci:hns-nic");