]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
Merge tag 'batadv-next-for-davem-20171023' of git://git.open-mesh.org/linux-merge
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hns3_enet.c
CommitLineData
76ad4f0e
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
30d240df 22#include <net/pkt_cls.h>
76ad4f0e
S
23#include <net/vxlan.h>
24
25#include "hnae3.h"
26#include "hns3_enet.h"
27
1db9b1bf 28static const char hns3_driver_name[] = "hns3";
76ad4f0e
S
29const char hns3_driver_version[] = VERMAGIC_STRING;
30static const char hns3_driver_string[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
33static struct hnae3_client client;
34
35/* hns3_pci_tbl - PCI Device ID Table
36 *
37 * Last entry must be all 0s
38 *
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
41 */
42static const struct pci_device_id hns3_pci_tbl[] = {
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
e92a0843 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
2daf4a65 46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
2daf4a65 48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
2daf4a65 50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
2daf4a65 52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
2daf4a65 54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
76ad4f0e
S
55 /* required last entry */
56 {0, }
57};
58MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
59
60static irqreturn_t hns3_irq_handle(int irq, void *dev)
61{
62 struct hns3_enet_tqp_vector *tqp_vector = dev;
63
64 napi_schedule(&tqp_vector->napi);
65
66 return IRQ_HANDLED;
67}
68
69static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
70{
71 struct hns3_enet_tqp_vector *tqp_vectors;
72 unsigned int i;
73
74 for (i = 0; i < priv->vector_num; i++) {
75 tqp_vectors = &priv->tqp_vector[i];
76
77 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
78 continue;
79
80 /* release the irq resource */
81 free_irq(tqp_vectors->vector_irq, tqp_vectors);
82 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
83 }
84}
85
86static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
87{
88 struct hns3_enet_tqp_vector *tqp_vectors;
89 int txrx_int_idx = 0;
90 int rx_int_idx = 0;
91 int tx_int_idx = 0;
92 unsigned int i;
93 int ret;
94
95 for (i = 0; i < priv->vector_num; i++) {
96 tqp_vectors = &priv->tqp_vector[i];
97
98 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
99 continue;
100
101 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
102 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
103 "%s-%s-%d", priv->netdev->name, "TxRx",
104 txrx_int_idx++);
105 txrx_int_idx++;
106 } else if (tqp_vectors->rx_group.ring) {
107 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
108 "%s-%s-%d", priv->netdev->name, "Rx",
109 rx_int_idx++);
110 } else if (tqp_vectors->tx_group.ring) {
111 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
112 "%s-%s-%d", priv->netdev->name, "Tx",
113 tx_int_idx++);
114 } else {
115 /* Skip this unused q_vector */
116 continue;
117 }
118
119 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
120
121 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
122 tqp_vectors->name,
123 tqp_vectors);
124 if (ret) {
125 netdev_err(priv->netdev, "request irq(%d) fail\n",
126 tqp_vectors->vector_irq);
127 return ret;
128 }
129
130 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
131 }
132
133 return 0;
134}
135
136static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
137 u32 mask_en)
138{
139 writel(mask_en, tqp_vector->mask_addr);
140}
141
142static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
143{
144 napi_enable(&tqp_vector->napi);
145
146 /* enable vector */
147 hns3_mask_vector_irq(tqp_vector, 1);
148}
149
150static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
151{
152 /* disable vector */
153 hns3_mask_vector_irq(tqp_vector, 0);
154
155 disable_irq(tqp_vector->vector_irq);
156 napi_disable(&tqp_vector->napi);
157}
158
159static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
160 u32 gl_value)
161{
162 /* this defines the configuration for GL (Interrupt Gap Limiter)
163 * GL defines inter interrupt gap.
164 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
165 */
166 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
167 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
168 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
169}
170
171static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
172 u32 rl_value)
173{
174 /* this defines the configuration for RL (Interrupt Rate Limiter).
175 * Rl defines rate of interrupts i.e. number of interrupts-per-second
176 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
177 */
178 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
179}
180
181static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
182{
183 /* initialize the configuration for interrupt coalescing.
184 * 1. GL (Interrupt Gap Limiter)
185 * 2. RL (Interrupt Rate Limiter)
186 */
187
188 /* Default :enable interrupt coalesce */
189 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
190 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
191 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
192 /* for now we are disabling Interrupt RL - we
193 * will re-enable later
194 */
195 hns3_set_vector_coalesc_rl(tqp_vector, 0);
196 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
197 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
198}
199
9df8f79a
YL
200static int hns3_nic_set_real_num_queue(struct net_device *netdev)
201{
9780cb97 202 struct hnae3_handle *h = hns3_get_handle(netdev);
9df8f79a
YL
203 struct hnae3_knic_private_info *kinfo = &h->kinfo;
204 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
205 int ret;
206
207 ret = netif_set_real_num_tx_queues(netdev, queue_size);
208 if (ret) {
209 netdev_err(netdev,
210 "netif_set_real_num_tx_queues fail, ret=%d!\n",
211 ret);
212 return ret;
213 }
214
215 ret = netif_set_real_num_rx_queues(netdev, queue_size);
216 if (ret) {
217 netdev_err(netdev,
218 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
219 return ret;
220 }
221
222 return 0;
223}
224
76ad4f0e
S
225static int hns3_nic_net_up(struct net_device *netdev)
226{
227 struct hns3_nic_priv *priv = netdev_priv(netdev);
228 struct hnae3_handle *h = priv->ae_handle;
229 int i, j;
230 int ret;
231
232 /* get irq resource for all vectors */
233 ret = hns3_nic_init_irq(priv);
234 if (ret) {
235 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
236 return ret;
237 }
238
239 /* enable the vectors */
240 for (i = 0; i < priv->vector_num; i++)
241 hns3_vector_enable(&priv->tqp_vector[i]);
242
243 /* start the ae_dev */
244 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
245 if (ret)
246 goto out_start_err;
247
248 return 0;
249
250out_start_err:
251 for (j = i - 1; j >= 0; j--)
252 hns3_vector_disable(&priv->tqp_vector[j]);
253
254 hns3_nic_uninit_irq(priv);
255
256 return ret;
257}
258
259static int hns3_nic_net_open(struct net_device *netdev)
260{
76ad4f0e
S
261 int ret;
262
263 netif_carrier_off(netdev);
264
9df8f79a
YL
265 ret = hns3_nic_set_real_num_queue(netdev);
266 if (ret)
76ad4f0e 267 return ret;
76ad4f0e
S
268
269 ret = hns3_nic_net_up(netdev);
270 if (ret) {
271 netdev_err(netdev,
272 "hns net up fail, ret=%d!\n", ret);
273 return ret;
274 }
275
276 return 0;
277}
278
279static void hns3_nic_net_down(struct net_device *netdev)
280{
281 struct hns3_nic_priv *priv = netdev_priv(netdev);
282 const struct hnae3_ae_ops *ops;
283 int i;
284
285 /* stop ae_dev */
286 ops = priv->ae_handle->ae_algo->ops;
287 if (ops->stop)
288 ops->stop(priv->ae_handle);
289
290 /* disable vectors */
291 for (i = 0; i < priv->vector_num; i++)
292 hns3_vector_disable(&priv->tqp_vector[i]);
293
294 /* free irq resources */
295 hns3_nic_uninit_irq(priv);
296}
297
298static int hns3_nic_net_stop(struct net_device *netdev)
299{
300 netif_tx_stop_all_queues(netdev);
301 netif_carrier_off(netdev);
302
303 hns3_nic_net_down(netdev);
304
305 return 0;
306}
307
76ad4f0e
S
308static int hns3_nic_uc_sync(struct net_device *netdev,
309 const unsigned char *addr)
310{
9780cb97 311 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
312
313 if (h->ae_algo->ops->add_uc_addr)
314 return h->ae_algo->ops->add_uc_addr(h, addr);
315
316 return 0;
317}
318
319static int hns3_nic_uc_unsync(struct net_device *netdev,
320 const unsigned char *addr)
321{
9780cb97 322 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
323
324 if (h->ae_algo->ops->rm_uc_addr)
325 return h->ae_algo->ops->rm_uc_addr(h, addr);
326
327 return 0;
328}
329
330static int hns3_nic_mc_sync(struct net_device *netdev,
331 const unsigned char *addr)
332{
9780cb97 333 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 334
720a8478 335 if (h->ae_algo->ops->add_mc_addr)
76ad4f0e
S
336 return h->ae_algo->ops->add_mc_addr(h, addr);
337
338 return 0;
339}
340
341static int hns3_nic_mc_unsync(struct net_device *netdev,
342 const unsigned char *addr)
343{
9780cb97 344 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 345
720a8478 346 if (h->ae_algo->ops->rm_mc_addr)
76ad4f0e
S
347 return h->ae_algo->ops->rm_mc_addr(h, addr);
348
349 return 0;
350}
351
1db9b1bf 352static void hns3_nic_set_rx_mode(struct net_device *netdev)
76ad4f0e 353{
9780cb97 354 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
355
356 if (h->ae_algo->ops->set_promisc_mode) {
357 if (netdev->flags & IFF_PROMISC)
358 h->ae_algo->ops->set_promisc_mode(h, 1);
359 else
360 h->ae_algo->ops->set_promisc_mode(h, 0);
361 }
362 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
363 netdev_err(netdev, "sync uc address fail\n");
364 if (netdev->flags & IFF_MULTICAST)
365 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
366 netdev_err(netdev, "sync mc address fail\n");
367}
368
369static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
370 u16 *mss, u32 *type_cs_vlan_tso)
371{
372 u32 l4_offset, hdr_len;
373 union l3_hdr_info l3;
374 union l4_hdr_info l4;
375 u32 l4_paylen;
376 int ret;
377
378 if (!skb_is_gso(skb))
379 return 0;
380
381 ret = skb_cow_head(skb, 0);
382 if (ret)
383 return ret;
384
385 l3.hdr = skb_network_header(skb);
386 l4.hdr = skb_transport_header(skb);
387
388 /* Software should clear the IPv4's checksum field when tso is
389 * needed.
390 */
391 if (l3.v4->version == 4)
392 l3.v4->check = 0;
393
394 /* tunnel packet.*/
395 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
396 SKB_GSO_GRE_CSUM |
397 SKB_GSO_UDP_TUNNEL |
398 SKB_GSO_UDP_TUNNEL_CSUM)) {
399 if ((!(skb_shinfo(skb)->gso_type &
400 SKB_GSO_PARTIAL)) &&
401 (skb_shinfo(skb)->gso_type &
402 SKB_GSO_UDP_TUNNEL_CSUM)) {
403 /* Software should clear the udp's checksum
404 * field when tso is needed.
405 */
406 l4.udp->check = 0;
407 }
408 /* reset l3&l4 pointers from outer to inner headers */
409 l3.hdr = skb_inner_network_header(skb);
410 l4.hdr = skb_inner_transport_header(skb);
411
412 /* Software should clear the IPv4's checksum field when
413 * tso is needed.
414 */
415 if (l3.v4->version == 4)
416 l3.v4->check = 0;
417 }
418
419 /* normal or tunnel packet*/
420 l4_offset = l4.hdr - skb->data;
421 hdr_len = (l4.tcp->doff * 4) + l4_offset;
422
423 /* remove payload length from inner pseudo checksum when tso*/
424 l4_paylen = skb->len - l4_offset;
425 csum_replace_by_diff(&l4.tcp->check,
426 (__force __wsum)htonl(l4_paylen));
427
428 /* find the txbd field values */
429 *paylen = skb->len - hdr_len;
430 hnae_set_bit(*type_cs_vlan_tso,
431 HNS3_TXD_TSO_B, 1);
432
433 /* get MSS for TSO */
434 *mss = skb_shinfo(skb)->gso_size;
435
436 return 0;
437}
438
1898d4e4
S
439static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
440 u8 *il4_proto)
76ad4f0e
S
441{
442 union {
443 struct iphdr *v4;
444 struct ipv6hdr *v6;
445 unsigned char *hdr;
446 } l3;
447 unsigned char *l4_hdr;
448 unsigned char *exthdr;
449 u8 l4_proto_tmp;
450 __be16 frag_off;
451
452 /* find outer header point */
453 l3.hdr = skb_network_header(skb);
454 l4_hdr = skb_inner_transport_header(skb);
455
456 if (skb->protocol == htons(ETH_P_IPV6)) {
457 exthdr = l3.hdr + sizeof(*l3.v6);
458 l4_proto_tmp = l3.v6->nexthdr;
459 if (l4_hdr != exthdr)
460 ipv6_skip_exthdr(skb, exthdr - skb->data,
461 &l4_proto_tmp, &frag_off);
462 } else if (skb->protocol == htons(ETH_P_IP)) {
463 l4_proto_tmp = l3.v4->protocol;
1898d4e4
S
464 } else {
465 return -EINVAL;
76ad4f0e
S
466 }
467
468 *ol4_proto = l4_proto_tmp;
469
470 /* tunnel packet */
471 if (!skb->encapsulation) {
472 *il4_proto = 0;
1898d4e4 473 return 0;
76ad4f0e
S
474 }
475
476 /* find inner header point */
477 l3.hdr = skb_inner_network_header(skb);
478 l4_hdr = skb_inner_transport_header(skb);
479
480 if (l3.v6->version == 6) {
481 exthdr = l3.hdr + sizeof(*l3.v6);
482 l4_proto_tmp = l3.v6->nexthdr;
483 if (l4_hdr != exthdr)
484 ipv6_skip_exthdr(skb, exthdr - skb->data,
485 &l4_proto_tmp, &frag_off);
486 } else if (l3.v4->version == 4) {
487 l4_proto_tmp = l3.v4->protocol;
488 }
489
490 *il4_proto = l4_proto_tmp;
1898d4e4
S
491
492 return 0;
76ad4f0e
S
493}
494
495static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
496 u8 il4_proto, u32 *type_cs_vlan_tso,
497 u32 *ol_type_vlan_len_msec)
498{
499 union {
500 struct iphdr *v4;
501 struct ipv6hdr *v6;
502 unsigned char *hdr;
503 } l3;
504 union {
505 struct tcphdr *tcp;
506 struct udphdr *udp;
507 struct gre_base_hdr *gre;
508 unsigned char *hdr;
509 } l4;
510 unsigned char *l2_hdr;
511 u8 l4_proto = ol4_proto;
512 u32 ol2_len;
513 u32 ol3_len;
514 u32 ol4_len;
515 u32 l2_len;
516 u32 l3_len;
517
518 l3.hdr = skb_network_header(skb);
519 l4.hdr = skb_transport_header(skb);
520
521 /* compute L2 header size for normal packet, defined in 2 Bytes */
522 l2_len = l3.hdr - skb->data;
523 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
524 HNS3_TXD_L2LEN_S, l2_len >> 1);
525
526 /* tunnel packet*/
527 if (skb->encapsulation) {
528 /* compute OL2 header size, defined in 2 Bytes */
529 ol2_len = l2_len;
530 hnae_set_field(*ol_type_vlan_len_msec,
531 HNS3_TXD_L2LEN_M,
532 HNS3_TXD_L2LEN_S, ol2_len >> 1);
533
534 /* compute OL3 header size, defined in 4 Bytes */
535 ol3_len = l4.hdr - l3.hdr;
536 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
537 HNS3_TXD_L3LEN_S, ol3_len >> 2);
538
539 /* MAC in UDP, MAC in GRE (0x6558)*/
540 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
541 /* switch MAC header ptr from outer to inner header.*/
542 l2_hdr = skb_inner_mac_header(skb);
543
544 /* compute OL4 header size, defined in 4 Bytes. */
545 ol4_len = l2_hdr - l4.hdr;
546 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
547 HNS3_TXD_L4LEN_S, ol4_len >> 2);
548
549 /* switch IP header ptr from outer to inner header */
550 l3.hdr = skb_inner_network_header(skb);
551
552 /* compute inner l2 header size, defined in 2 Bytes. */
553 l2_len = l3.hdr - l2_hdr;
554 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
555 HNS3_TXD_L2LEN_S, l2_len >> 1);
556 } else {
557 /* skb packet types not supported by hardware,
558 * txbd len fild doesn't be filled.
559 */
560 return;
561 }
562
563 /* switch L4 header pointer from outer to inner */
564 l4.hdr = skb_inner_transport_header(skb);
565
566 l4_proto = il4_proto;
567 }
568
569 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
570 l3_len = l4.hdr - l3.hdr;
571 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
572 HNS3_TXD_L3LEN_S, l3_len >> 2);
573
574 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
575 switch (l4_proto) {
576 case IPPROTO_TCP:
577 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
578 HNS3_TXD_L4LEN_S, l4.tcp->doff);
579 break;
580 case IPPROTO_SCTP:
581 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
582 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
583 break;
584 case IPPROTO_UDP:
585 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
586 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
587 break;
588 default:
589 /* skb packet types not supported by hardware,
590 * txbd len fild doesn't be filled.
591 */
592 return;
593 }
594}
595
596static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
597 u8 il4_proto, u32 *type_cs_vlan_tso,
598 u32 *ol_type_vlan_len_msec)
599{
600 union {
601 struct iphdr *v4;
602 struct ipv6hdr *v6;
603 unsigned char *hdr;
604 } l3;
605 u32 l4_proto = ol4_proto;
606
607 l3.hdr = skb_network_header(skb);
608
609 /* define OL3 type and tunnel type(OL4).*/
610 if (skb->encapsulation) {
611 /* define outer network header type.*/
612 if (skb->protocol == htons(ETH_P_IP)) {
613 if (skb_is_gso(skb))
614 hnae_set_field(*ol_type_vlan_len_msec,
615 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
616 HNS3_OL3T_IPV4_CSUM);
617 else
618 hnae_set_field(*ol_type_vlan_len_msec,
619 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
620 HNS3_OL3T_IPV4_NO_CSUM);
621
622 } else if (skb->protocol == htons(ETH_P_IPV6)) {
623 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
624 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
625 }
626
627 /* define tunnel type(OL4).*/
628 switch (l4_proto) {
629 case IPPROTO_UDP:
630 hnae_set_field(*ol_type_vlan_len_msec,
631 HNS3_TXD_TUNTYPE_M,
632 HNS3_TXD_TUNTYPE_S,
633 HNS3_TUN_MAC_IN_UDP);
634 break;
635 case IPPROTO_GRE:
636 hnae_set_field(*ol_type_vlan_len_msec,
637 HNS3_TXD_TUNTYPE_M,
638 HNS3_TXD_TUNTYPE_S,
639 HNS3_TUN_NVGRE);
640 break;
641 default:
642 /* drop the skb tunnel packet if hardware don't support,
643 * because hardware can't calculate csum when TSO.
644 */
645 if (skb_is_gso(skb))
646 return -EDOM;
647
648 /* the stack computes the IP header already,
649 * driver calculate l4 checksum when not TSO.
650 */
651 skb_checksum_help(skb);
652 return 0;
653 }
654
655 l3.hdr = skb_inner_network_header(skb);
656 l4_proto = il4_proto;
657 }
658
659 if (l3.v4->version == 4) {
660 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
661 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
662
663 /* the stack computes the IP header already, the only time we
664 * need the hardware to recompute it is in the case of TSO.
665 */
666 if (skb_is_gso(skb))
667 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
668
669 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
670 } else if (l3.v6->version == 6) {
671 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
672 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
673 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
674 }
675
676 switch (l4_proto) {
677 case IPPROTO_TCP:
678 hnae_set_field(*type_cs_vlan_tso,
679 HNS3_TXD_L4T_M,
680 HNS3_TXD_L4T_S,
681 HNS3_L4T_TCP);
682 break;
683 case IPPROTO_UDP:
684 hnae_set_field(*type_cs_vlan_tso,
685 HNS3_TXD_L4T_M,
686 HNS3_TXD_L4T_S,
687 HNS3_L4T_UDP);
688 break;
689 case IPPROTO_SCTP:
690 hnae_set_field(*type_cs_vlan_tso,
691 HNS3_TXD_L4T_M,
692 HNS3_TXD_L4T_S,
693 HNS3_L4T_SCTP);
694 break;
695 default:
696 /* drop the skb tunnel packet if hardware don't support,
697 * because hardware can't calculate csum when TSO.
698 */
699 if (skb_is_gso(skb))
700 return -EDOM;
701
702 /* the stack computes the IP header already,
703 * driver calculate l4 checksum when not TSO.
704 */
705 skb_checksum_help(skb);
706 return 0;
707 }
708
709 return 0;
710}
711
712static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
713{
714 /* Config bd buffer end */
715 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
716 HNS3_TXD_BDTYPE_M, 0);
717 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
718 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
719 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
720}
721
722static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
723 int size, dma_addr_t dma, int frag_end,
724 enum hns_desc_type type)
725{
726 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
727 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
728 u32 ol_type_vlan_len_msec = 0;
729 u16 bdtp_fe_sc_vld_ra_ri = 0;
730 u32 type_cs_vlan_tso = 0;
731 struct sk_buff *skb;
732 u32 paylen = 0;
733 u16 mss = 0;
734 __be16 protocol;
735 u8 ol4_proto;
736 u8 il4_proto;
737 int ret;
738
739 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
740 desc_cb->priv = priv;
741 desc_cb->length = size;
742 desc_cb->dma = dma;
743 desc_cb->type = type;
744
745 /* now, fill the descriptor */
746 desc->addr = cpu_to_le64(dma);
747 desc->tx.send_size = cpu_to_le16((u16)size);
748 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
749 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
750
751 if (type == DESC_TYPE_SKB) {
752 skb = (struct sk_buff *)priv;
a90bb9a5 753 paylen = skb->len;
76ad4f0e
S
754
755 if (skb->ip_summed == CHECKSUM_PARTIAL) {
756 skb_reset_mac_len(skb);
757 protocol = skb->protocol;
758
759 /* vlan packet*/
760 if (protocol == htons(ETH_P_8021Q)) {
761 protocol = vlan_get_protocol(skb);
762 skb->protocol = protocol;
763 }
1898d4e4
S
764 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
765 if (ret)
766 return ret;
76ad4f0e
S
767 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
768 &type_cs_vlan_tso,
769 &ol_type_vlan_len_msec);
770 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
771 &type_cs_vlan_tso,
772 &ol_type_vlan_len_msec);
773 if (ret)
774 return ret;
775
776 ret = hns3_set_tso(skb, &paylen, &mss,
777 &type_cs_vlan_tso);
778 if (ret)
779 return ret;
780 }
781
782 /* Set txbd */
783 desc->tx.ol_type_vlan_len_msec =
784 cpu_to_le32(ol_type_vlan_len_msec);
785 desc->tx.type_cs_vlan_tso_len =
786 cpu_to_le32(type_cs_vlan_tso);
a90bb9a5 787 desc->tx.paylen = cpu_to_le32(paylen);
76ad4f0e
S
788 desc->tx.mss = cpu_to_le16(mss);
789 }
790
791 /* move ring pointer to next.*/
792 ring_ptr_move_fw(ring, next_to_use);
793
794 return 0;
795}
796
797static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
798 int size, dma_addr_t dma, int frag_end,
799 enum hns_desc_type type)
800{
801 unsigned int frag_buf_num;
802 unsigned int k;
803 int sizeoflast;
804 int ret;
805
806 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
807 sizeoflast = size % HNS3_MAX_BD_SIZE;
808 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
809
810 /* When the frag size is bigger than hardware, split this frag */
811 for (k = 0; k < frag_buf_num; k++) {
812 ret = hns3_fill_desc(ring, priv,
813 (k == frag_buf_num - 1) ?
814 sizeoflast : HNS3_MAX_BD_SIZE,
815 dma + HNS3_MAX_BD_SIZE * k,
816 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
817 (type == DESC_TYPE_SKB && !k) ?
818 DESC_TYPE_SKB : DESC_TYPE_PAGE);
819 if (ret)
820 return ret;
821 }
822
823 return 0;
824}
825
826static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
827 struct hns3_enet_ring *ring)
828{
829 struct sk_buff *skb = *out_skb;
830 struct skb_frag_struct *frag;
831 int bdnum_for_frag;
832 int frag_num;
833 int buf_num;
834 int size;
835 int i;
836
837 size = skb_headlen(skb);
838 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
839
840 frag_num = skb_shinfo(skb)->nr_frags;
841 for (i = 0; i < frag_num; i++) {
842 frag = &skb_shinfo(skb)->frags[i];
843 size = skb_frag_size(frag);
844 bdnum_for_frag =
845 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
846 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
847 return -ENOMEM;
848
849 buf_num += bdnum_for_frag;
850 }
851
852 if (buf_num > ring_space(ring))
853 return -EBUSY;
854
855 *bnum = buf_num;
856 return 0;
857}
858
859static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
860 struct hns3_enet_ring *ring)
861{
862 struct sk_buff *skb = *out_skb;
863 int buf_num;
864
865 /* No. of segments (plus a header) */
866 buf_num = skb_shinfo(skb)->nr_frags + 1;
867
868 if (buf_num > ring_space(ring))
869 return -EBUSY;
870
871 *bnum = buf_num;
872
873 return 0;
874}
875
876static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
877{
878 struct device *dev = ring_to_dev(ring);
879 unsigned int i;
880
881 for (i = 0; i < ring->desc_num; i++) {
882 /* check if this is where we started */
883 if (ring->next_to_use == next_to_use_orig)
884 break;
885
886 /* unmap the descriptor dma address */
887 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
888 dma_unmap_single(dev,
889 ring->desc_cb[ring->next_to_use].dma,
890 ring->desc_cb[ring->next_to_use].length,
891 DMA_TO_DEVICE);
892 else
893 dma_unmap_page(dev,
894 ring->desc_cb[ring->next_to_use].dma,
895 ring->desc_cb[ring->next_to_use].length,
896 DMA_TO_DEVICE);
897
898 /* rollback one */
899 ring_ptr_move_bw(ring, next_to_use);
900 }
901}
902
d43e5aca 903netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
76ad4f0e
S
904{
905 struct hns3_nic_priv *priv = netdev_priv(netdev);
906 struct hns3_nic_ring_data *ring_data =
907 &tx_ring_data(priv, skb->queue_mapping);
908 struct hns3_enet_ring *ring = ring_data->ring;
909 struct device *dev = priv->dev;
910 struct netdev_queue *dev_queue;
911 struct skb_frag_struct *frag;
912 int next_to_use_head;
913 int next_to_use_frag;
914 dma_addr_t dma;
915 int buf_num;
916 int seg_num;
917 int size;
918 int ret;
919 int i;
920
921 /* Prefetch the data used later */
922 prefetch(skb->data);
923
924 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
925 case -EBUSY:
926 u64_stats_update_begin(&ring->syncp);
927 ring->stats.tx_busy++;
928 u64_stats_update_end(&ring->syncp);
929
930 goto out_net_tx_busy;
931 case -ENOMEM:
932 u64_stats_update_begin(&ring->syncp);
933 ring->stats.sw_err_cnt++;
934 u64_stats_update_end(&ring->syncp);
935 netdev_err(netdev, "no memory to xmit!\n");
936
937 goto out_err_tx_ok;
938 default:
939 break;
940 }
941
942 /* No. of segments (plus a header) */
943 seg_num = skb_shinfo(skb)->nr_frags + 1;
944 /* Fill the first part */
945 size = skb_headlen(skb);
946
947 next_to_use_head = ring->next_to_use;
948
949 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
950 if (dma_mapping_error(dev, dma)) {
951 netdev_err(netdev, "TX head DMA map failed\n");
952 ring->stats.sw_err_cnt++;
953 goto out_err_tx_ok;
954 }
955
956 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
957 DESC_TYPE_SKB);
958 if (ret)
959 goto head_dma_map_err;
960
961 next_to_use_frag = ring->next_to_use;
962 /* Fill the fragments */
963 for (i = 1; i < seg_num; i++) {
964 frag = &skb_shinfo(skb)->frags[i - 1];
965 size = skb_frag_size(frag);
966 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
967 if (dma_mapping_error(dev, dma)) {
968 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
969 ring->stats.sw_err_cnt++;
970 goto frag_dma_map_err;
971 }
972 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
973 seg_num - 1 == i ? 1 : 0,
974 DESC_TYPE_PAGE);
975
976 if (ret)
977 goto frag_dma_map_err;
978 }
979
980 /* Complete translate all packets */
981 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
982 netdev_tx_sent_queue(dev_queue, skb->len);
983
984 wmb(); /* Commit all data before submit */
985
986 hnae_queue_xmit(ring->tqp, buf_num);
987
988 return NETDEV_TX_OK;
989
990frag_dma_map_err:
991 hns_nic_dma_unmap(ring, next_to_use_frag);
992
993head_dma_map_err:
994 hns_nic_dma_unmap(ring, next_to_use_head);
995
996out_err_tx_ok:
997 dev_kfree_skb_any(skb);
998 return NETDEV_TX_OK;
999
1000out_net_tx_busy:
1001 netif_stop_subqueue(netdev, ring_data->queue_index);
1002 smp_mb(); /* Commit all data before submit */
1003
1004 return NETDEV_TX_BUSY;
1005}
1006
1007static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1008{
9780cb97 1009 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1010 struct sockaddr *mac_addr = p;
1011 int ret;
1012
1013 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1014 return -EADDRNOTAVAIL;
1015
1016 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1017 if (ret) {
1018 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1019 return ret;
1020 }
1021
1022 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1023
1024 return 0;
1025}
1026
1027static int hns3_nic_set_features(struct net_device *netdev,
1028 netdev_features_t features)
1029{
1030 struct hns3_nic_priv *priv = netdev_priv(netdev);
1031
1032 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1033 priv->ops.fill_desc = hns3_fill_desc_tso;
1034 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1035 } else {
1036 priv->ops.fill_desc = hns3_fill_desc;
1037 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1038 }
1039
1040 netdev->features = features;
1041 return 0;
1042}
1043
1044static void
1045hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1046{
1047 struct hns3_nic_priv *priv = netdev_priv(netdev);
1048 int queue_num = priv->ae_handle->kinfo.num_tqps;
1049 struct hns3_enet_ring *ring;
1050 unsigned int start;
1051 unsigned int idx;
1052 u64 tx_bytes = 0;
1053 u64 rx_bytes = 0;
1054 u64 tx_pkts = 0;
1055 u64 rx_pkts = 0;
1056
1057 for (idx = 0; idx < queue_num; idx++) {
1058 /* fetch the tx stats */
1059 ring = priv->ring_data[idx].ring;
1060 do {
d36d36ce 1061 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1062 tx_bytes += ring->stats.tx_bytes;
1063 tx_pkts += ring->stats.tx_pkts;
1064 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1065
1066 /* fetch the rx stats */
1067 ring = priv->ring_data[idx + queue_num].ring;
1068 do {
d36d36ce 1069 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1070 rx_bytes += ring->stats.rx_bytes;
1071 rx_pkts += ring->stats.rx_pkts;
1072 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1073 }
1074
1075 stats->tx_bytes = tx_bytes;
1076 stats->tx_packets = tx_pkts;
1077 stats->rx_bytes = rx_bytes;
1078 stats->rx_packets = rx_pkts;
1079
1080 stats->rx_errors = netdev->stats.rx_errors;
1081 stats->multicast = netdev->stats.multicast;
1082 stats->rx_length_errors = netdev->stats.rx_length_errors;
1083 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1084 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1085
1086 stats->tx_errors = netdev->stats.tx_errors;
1087 stats->rx_dropped = netdev->stats.rx_dropped;
1088 stats->tx_dropped = netdev->stats.tx_dropped;
1089 stats->collisions = netdev->stats.collisions;
1090 stats->rx_over_errors = netdev->stats.rx_over_errors;
1091 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1092 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1093 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1094 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1095 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1096 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1097 stats->tx_window_errors = netdev->stats.tx_window_errors;
1098 stats->rx_compressed = netdev->stats.rx_compressed;
1099 stats->tx_compressed = netdev->stats.tx_compressed;
1100}
1101
1102static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1103 enum hns3_udp_tnl_type type)
1104{
1105 struct hns3_nic_priv *priv = netdev_priv(netdev);
1106 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1107 struct hnae3_handle *h = priv->ae_handle;
1108
1109 if (udp_tnl->used && udp_tnl->dst_port == port) {
1110 udp_tnl->used++;
1111 return;
1112 }
1113
1114 if (udp_tnl->used) {
1115 netdev_warn(netdev,
1116 "UDP tunnel [%d], port [%d] offload\n", type, port);
1117 return;
1118 }
1119
1120 udp_tnl->dst_port = port;
1121 udp_tnl->used = 1;
1122 /* TBD send command to hardware to add port */
1123 if (h->ae_algo->ops->add_tunnel_udp)
1124 h->ae_algo->ops->add_tunnel_udp(h, port);
1125}
1126
1127static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1128 enum hns3_udp_tnl_type type)
1129{
1130 struct hns3_nic_priv *priv = netdev_priv(netdev);
1131 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1132 struct hnae3_handle *h = priv->ae_handle;
1133
1134 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1135 netdev_warn(netdev,
1136 "Invalid UDP tunnel port %d\n", port);
1137 return;
1138 }
1139
1140 udp_tnl->used--;
1141 if (udp_tnl->used)
1142 return;
1143
1144 udp_tnl->dst_port = 0;
1145 /* TBD send command to hardware to del port */
1146 if (h->ae_algo->ops->del_tunnel_udp)
9537e7cb 1147 h->ae_algo->ops->del_tunnel_udp(h, port);
76ad4f0e
S
1148}
1149
1150/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1151 * @netdev: This physical ports's netdev
1152 * @ti: Tunnel information
1153 */
1154static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1155 struct udp_tunnel_info *ti)
1156{
1157 u16 port_n = ntohs(ti->port);
1158
1159 switch (ti->type) {
1160 case UDP_TUNNEL_TYPE_VXLAN:
1161 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1162 break;
1163 case UDP_TUNNEL_TYPE_GENEVE:
1164 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1165 break;
1166 default:
1167 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1168 break;
1169 }
1170}
1171
1172static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1173 struct udp_tunnel_info *ti)
1174{
1175 u16 port_n = ntohs(ti->port);
1176
1177 switch (ti->type) {
1178 case UDP_TUNNEL_TYPE_VXLAN:
1179 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1180 break;
1181 case UDP_TUNNEL_TYPE_GENEVE:
1182 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1183 break;
1184 default:
1185 break;
1186 }
1187}
1188
30d240df 1189static int hns3_setup_tc(struct net_device *netdev, void *type_data)
76ad4f0e 1190{
30d240df 1191 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9780cb97 1192 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 1193 struct hnae3_knic_private_info *kinfo = &h->kinfo;
30d240df
YL
1194 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1195 u8 tc = mqprio_qopt->qopt.num_tc;
1196 u16 mode = mqprio_qopt->mode;
1197 u8 hw = mqprio_qopt->qopt.hw;
1198 bool if_running;
76ad4f0e
S
1199 unsigned int i;
1200 int ret;
1201
30d240df
YL
1202 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1203 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1204 return -EOPNOTSUPP;
1205
76ad4f0e
S
1206 if (tc > HNAE3_MAX_TC)
1207 return -EINVAL;
1208
76ad4f0e
S
1209 if (!netdev)
1210 return -EINVAL;
1211
30d240df
YL
1212 if_running = netif_running(netdev);
1213 if (if_running) {
1214 hns3_nic_net_stop(netdev);
1215 msleep(100);
76ad4f0e
S
1216 }
1217
30d240df
YL
1218 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1219 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
76ad4f0e 1220 if (ret)
30d240df
YL
1221 goto out;
1222
1223 if (tc <= 1) {
1224 netdev_reset_tc(netdev);
1225 } else {
1226 ret = netdev_set_num_tc(netdev, tc);
1227 if (ret)
1228 goto out;
1229
1230 for (i = 0; i < HNAE3_MAX_TC; i++) {
1231 if (!kinfo->tc_info[i].enable)
1232 continue;
76ad4f0e 1233
76ad4f0e
S
1234 netdev_set_tc_queue(netdev,
1235 kinfo->tc_info[i].tc,
1236 kinfo->tc_info[i].tqp_count,
1237 kinfo->tc_info[i].tqp_offset);
30d240df 1238 }
76ad4f0e
S
1239 }
1240
30d240df
YL
1241 ret = hns3_nic_set_real_num_queue(netdev);
1242
1243out:
1244 if (if_running)
1245 hns3_nic_net_open(netdev);
1246
1247 return ret;
76ad4f0e
S
1248}
1249
2572ac53 1250static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1251 void *type_data)
76ad4f0e 1252{
74897ef0 1253 if (type != TC_SETUP_MQPRIO)
38cf0426 1254 return -EOPNOTSUPP;
76ad4f0e 1255
30d240df 1256 return hns3_setup_tc(dev, type_data);
76ad4f0e
S
1257}
1258
1259static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1260 __be16 proto, u16 vid)
1261{
9780cb97 1262 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1263 int ret = -EIO;
1264
1265 if (h->ae_algo->ops->set_vlan_filter)
1266 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1267
1268 return ret;
1269}
1270
1271static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1272 __be16 proto, u16 vid)
1273{
9780cb97 1274 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1275 int ret = -EIO;
1276
1277 if (h->ae_algo->ops->set_vlan_filter)
1278 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1279
1280 return ret;
1281}
1282
1283static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1284 u8 qos, __be16 vlan_proto)
1285{
9780cb97 1286 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1287 int ret = -EIO;
1288
1289 if (h->ae_algo->ops->set_vf_vlan_filter)
1290 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1291 qos, vlan_proto);
1292
1293 return ret;
1294}
1295
a8e8b7ff
S
1296static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1297{
9780cb97 1298 struct hnae3_handle *h = hns3_get_handle(netdev);
a8e8b7ff
S
1299 bool if_running = netif_running(netdev);
1300 int ret;
1301
1302 if (!h->ae_algo->ops->set_mtu)
1303 return -EOPNOTSUPP;
1304
1305 /* if this was called with netdev up then bring netdevice down */
1306 if (if_running) {
1307 (void)hns3_nic_net_stop(netdev);
1308 msleep(100);
1309 }
1310
1311 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1312 if (ret) {
1313 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1314 ret);
1315 return ret;
1316 }
1317
1318 /* if the netdev was running earlier, bring it up again */
1319 if (if_running && hns3_nic_net_open(netdev))
1320 ret = -EINVAL;
1321
1322 return ret;
1323}
1324
76ad4f0e
S
1325static const struct net_device_ops hns3_nic_netdev_ops = {
1326 .ndo_open = hns3_nic_net_open,
1327 .ndo_stop = hns3_nic_net_stop,
1328 .ndo_start_xmit = hns3_nic_net_xmit,
1329 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
a8e8b7ff 1330 .ndo_change_mtu = hns3_nic_change_mtu,
76ad4f0e
S
1331 .ndo_set_features = hns3_nic_set_features,
1332 .ndo_get_stats64 = hns3_nic_get_stats64,
1333 .ndo_setup_tc = hns3_nic_setup_tc,
1334 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1335 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1336 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1337 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1338 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1339 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1340};
1341
1342/* hns3_probe - Device initialization routine
1343 * @pdev: PCI device information struct
1344 * @ent: entry in hns3_pci_tbl
1345 *
1346 * hns3_probe initializes a PF identified by a pci_dev structure.
1347 * The OS initialization, configuring of the PF private structure,
1348 * and a hardware reset occur.
1349 *
1350 * Returns 0 on success, negative on failure
1351 */
1352static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1353{
1354 struct hnae3_ae_dev *ae_dev;
1355 int ret;
1356
1357 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1358 GFP_KERNEL);
1359 if (!ae_dev) {
1360 ret = -ENOMEM;
1361 return ret;
1362 }
1363
1364 ae_dev->pdev = pdev;
e92a0843 1365 ae_dev->flag = ent->driver_data;
76ad4f0e
S
1366 ae_dev->dev_type = HNAE3_DEV_KNIC;
1367 pci_set_drvdata(pdev, ae_dev);
1368
1369 return hnae3_register_ae_dev(ae_dev);
1370}
1371
1372/* hns3_remove - Device removal routine
1373 * @pdev: PCI device information struct
1374 */
1375static void hns3_remove(struct pci_dev *pdev)
1376{
1377 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1378
1379 hnae3_unregister_ae_dev(ae_dev);
1380
1381 devm_kfree(&pdev->dev, ae_dev);
1382
1383 pci_set_drvdata(pdev, NULL);
1384}
1385
1386static struct pci_driver hns3_driver = {
1387 .name = hns3_driver_name,
1388 .id_table = hns3_pci_tbl,
1389 .probe = hns3_probe,
1390 .remove = hns3_remove,
1391};
1392
1393/* set default feature to hns3 */
1394static void hns3_set_default_feature(struct net_device *netdev)
1395{
1396 netdev->priv_flags |= IFF_UNICAST_FLT;
1397
1398 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1399 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1400 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1401 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1402 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1403
1404 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1405
1406 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1407
1408 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1409 NETIF_F_HW_VLAN_CTAG_FILTER |
1410 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1411 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1412 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1413 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1414
1415 netdev->vlan_features |=
1416 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1417 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1418 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1419 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1420 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1421
1422 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1423 NETIF_F_HW_VLAN_CTAG_FILTER |
1424 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1425 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1426 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1427 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1428}
1429
1430static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1431 struct hns3_desc_cb *cb)
1432{
1433 unsigned int order = hnae_page_order(ring);
1434 struct page *p;
1435
1436 p = dev_alloc_pages(order);
1437 if (!p)
1438 return -ENOMEM;
1439
1440 cb->priv = p;
1441 cb->page_offset = 0;
1442 cb->reuse_flag = 0;
1443 cb->buf = page_address(p);
1444 cb->length = hnae_page_size(ring);
1445 cb->type = DESC_TYPE_PAGE;
1446
1447 memset(cb->buf, 0, cb->length);
1448
1449 return 0;
1450}
1451
1452static void hns3_free_buffer(struct hns3_enet_ring *ring,
1453 struct hns3_desc_cb *cb)
1454{
1455 if (cb->type == DESC_TYPE_SKB)
1456 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1457 else if (!HNAE3_IS_TX_RING(ring))
1458 put_page((struct page *)cb->priv);
1459 memset(cb, 0, sizeof(*cb));
1460}
1461
1462static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1463{
1464 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1465 cb->length, ring_to_dma_dir(ring));
1466
1467 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1468 return -EIO;
1469
1470 return 0;
1471}
1472
1473static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1474 struct hns3_desc_cb *cb)
1475{
1476 if (cb->type == DESC_TYPE_SKB)
1477 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1478 ring_to_dma_dir(ring));
1479 else
1480 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1481 ring_to_dma_dir(ring));
1482}
1483
1484static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1485{
1486 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1487 ring->desc[i].addr = 0;
1488}
1489
1490static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1491{
1492 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1493
1494 if (!ring->desc_cb[i].dma)
1495 return;
1496
1497 hns3_buffer_detach(ring, i);
1498 hns3_free_buffer(ring, cb);
1499}
1500
1501static void hns3_free_buffers(struct hns3_enet_ring *ring)
1502{
1503 int i;
1504
1505 for (i = 0; i < ring->desc_num; i++)
1506 hns3_free_buffer_detach(ring, i);
1507}
1508
1509/* free desc along with its attached buffer */
1510static void hns3_free_desc(struct hns3_enet_ring *ring)
1511{
1512 hns3_free_buffers(ring);
1513
1514 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1515 ring->desc_num * sizeof(ring->desc[0]),
1516 DMA_BIDIRECTIONAL);
1517 ring->desc_dma_addr = 0;
1518 kfree(ring->desc);
1519 ring->desc = NULL;
1520}
1521
1522static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1523{
1524 int size = ring->desc_num * sizeof(ring->desc[0]);
1525
1526 ring->desc = kzalloc(size, GFP_KERNEL);
1527 if (!ring->desc)
1528 return -ENOMEM;
1529
1530 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1531 size, DMA_BIDIRECTIONAL);
1532 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1533 ring->desc_dma_addr = 0;
1534 kfree(ring->desc);
1535 ring->desc = NULL;
1536 return -ENOMEM;
1537 }
1538
1539 return 0;
1540}
1541
1542static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1543 struct hns3_desc_cb *cb)
1544{
1545 int ret;
1546
1547 ret = hns3_alloc_buffer(ring, cb);
1548 if (ret)
1549 goto out;
1550
1551 ret = hns3_map_buffer(ring, cb);
1552 if (ret)
1553 goto out_with_buf;
1554
1555 return 0;
1556
1557out_with_buf:
1558 hns3_free_buffers(ring);
1559out:
1560 return ret;
1561}
1562
1563static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1564{
1565 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1566
1567 if (ret)
1568 return ret;
1569
1570 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1571
1572 return 0;
1573}
1574
1575/* Allocate memory for raw pkg, and map with dma */
1576static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1577{
1578 int i, j, ret;
1579
1580 for (i = 0; i < ring->desc_num; i++) {
1581 ret = hns3_alloc_buffer_attach(ring, i);
1582 if (ret)
1583 goto out_buffer_fail;
1584 }
1585
1586 return 0;
1587
1588out_buffer_fail:
1589 for (j = i - 1; j >= 0; j--)
1590 hns3_free_buffer_detach(ring, j);
1591 return ret;
1592}
1593
1594/* detach a in-used buffer and replace with a reserved one */
1595static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1596 struct hns3_desc_cb *res_cb)
1597{
1598 hns3_map_buffer(ring, &ring->desc_cb[i]);
1599 ring->desc_cb[i] = *res_cb;
1600 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1601}
1602
1603static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1604{
1605 ring->desc_cb[i].reuse_flag = 0;
1606 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1607 + ring->desc_cb[i].page_offset);
1608}
1609
1610static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1611 int *pkts)
1612{
1613 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1614
1615 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1616 (*bytes) += desc_cb->length;
1617 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1618 hns3_free_buffer_detach(ring, ring->next_to_clean);
1619
1620 ring_ptr_move_fw(ring, next_to_clean);
1621}
1622
1623static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1624{
1625 int u = ring->next_to_use;
1626 int c = ring->next_to_clean;
1627
1628 if (unlikely(h > ring->desc_num))
1629 return 0;
1630
1631 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1632}
1633
1634int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1635{
1636 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1637 struct netdev_queue *dev_queue;
1638 int bytes, pkts;
1639 int head;
1640
1641 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1642 rmb(); /* Make sure head is ready before touch any data */
1643
1644 if (is_ring_empty(ring) || head == ring->next_to_clean)
1645 return 0; /* no data to poll */
1646
1647 if (!is_valid_clean_head(ring, head)) {
1648 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1649 ring->next_to_use, ring->next_to_clean);
1650
1651 u64_stats_update_begin(&ring->syncp);
1652 ring->stats.io_err_cnt++;
1653 u64_stats_update_end(&ring->syncp);
1654 return -EIO;
1655 }
1656
1657 bytes = 0;
1658 pkts = 0;
1659 while (head != ring->next_to_clean && budget) {
1660 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1661 /* Issue prefetch for next Tx descriptor */
1662 prefetch(&ring->desc_cb[ring->next_to_clean]);
1663 budget--;
1664 }
1665
1666 ring->tqp_vector->tx_group.total_bytes += bytes;
1667 ring->tqp_vector->tx_group.total_packets += pkts;
1668
1669 u64_stats_update_begin(&ring->syncp);
1670 ring->stats.tx_bytes += bytes;
1671 ring->stats.tx_pkts += pkts;
1672 u64_stats_update_end(&ring->syncp);
1673
1674 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1675 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1676
1677 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1678 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1679 /* Make sure that anybody stopping the queue after this
1680 * sees the new next_to_clean.
1681 */
1682 smp_mb();
1683 if (netif_tx_queue_stopped(dev_queue)) {
1684 netif_tx_wake_queue(dev_queue);
1685 ring->stats.restart_queue++;
1686 }
1687 }
1688
1689 return !!budget;
1690}
1691
1692static int hns3_desc_unused(struct hns3_enet_ring *ring)
1693{
1694 int ntc = ring->next_to_clean;
1695 int ntu = ring->next_to_use;
1696
1697 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1698}
1699
1700static void
1701hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1702{
1703 struct hns3_desc_cb *desc_cb;
1704 struct hns3_desc_cb res_cbs;
1705 int i, ret;
1706
1707 for (i = 0; i < cleand_count; i++) {
1708 desc_cb = &ring->desc_cb[ring->next_to_use];
1709 if (desc_cb->reuse_flag) {
1710 u64_stats_update_begin(&ring->syncp);
1711 ring->stats.reuse_pg_cnt++;
1712 u64_stats_update_end(&ring->syncp);
1713
1714 hns3_reuse_buffer(ring, ring->next_to_use);
1715 } else {
1716 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1717 if (ret) {
1718 u64_stats_update_begin(&ring->syncp);
1719 ring->stats.sw_err_cnt++;
1720 u64_stats_update_end(&ring->syncp);
1721
1722 netdev_err(ring->tqp->handle->kinfo.netdev,
1723 "hnae reserve buffer map failed.\n");
1724 break;
1725 }
1726 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1727 }
1728
1729 ring_ptr_move_fw(ring, next_to_use);
1730 }
1731
1732 wmb(); /* Make all data has been write before submit */
1733 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1734}
1735
1736/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1737 * @data: pointer to the start of the headers
1738 * @max: total length of section to find headers in
1739 *
1740 * This function is meant to determine the length of headers that will
1741 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1742 * motivation of doing this is to only perform one pull for IPv4 TCP
1743 * packets so that we can do basic things like calculating the gso_size
1744 * based on the average data per packet.
1745 */
1746static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1747 unsigned int max_size)
1748{
1749 unsigned char *network;
1750 u8 hlen;
1751
1752 /* This should never happen, but better safe than sorry */
1753 if (max_size < ETH_HLEN)
1754 return max_size;
1755
1756 /* Initialize network frame pointer */
1757 network = data;
1758
1759 /* Set first protocol and move network header forward */
1760 network += ETH_HLEN;
1761
1762 /* Handle any vlan tag if present */
1763 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1764 == HNS3_RX_FLAG_VLAN_PRESENT) {
1765 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1766 return max_size;
1767
1768 network += VLAN_HLEN;
1769 }
1770
1771 /* Handle L3 protocols */
1772 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1773 == HNS3_RX_FLAG_L3ID_IPV4) {
1774 if ((typeof(max_size))(network - data) >
1775 (max_size - sizeof(struct iphdr)))
1776 return max_size;
1777
1778 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1779 hlen = (network[0] & 0x0F) << 2;
1780
1781 /* Verify hlen meets minimum size requirements */
1782 if (hlen < sizeof(struct iphdr))
1783 return network - data;
1784
1785 /* Record next protocol if header is present */
1786 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1787 == HNS3_RX_FLAG_L3ID_IPV6) {
1788 if ((typeof(max_size))(network - data) >
1789 (max_size - sizeof(struct ipv6hdr)))
1790 return max_size;
1791
1792 /* Record next protocol */
1793 hlen = sizeof(struct ipv6hdr);
1794 } else {
1795 return network - data;
1796 }
1797
1798 /* Relocate pointer to start of L4 header */
1799 network += hlen;
1800
1801 /* Finally sort out TCP/UDP */
1802 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1803 == HNS3_RX_FLAG_L4ID_TCP) {
1804 if ((typeof(max_size))(network - data) >
1805 (max_size - sizeof(struct tcphdr)))
1806 return max_size;
1807
1808 /* Access doff as a u8 to avoid unaligned access on ia64 */
1809 hlen = (network[12] & 0xF0) >> 2;
1810
1811 /* Verify hlen meets minimum size requirements */
1812 if (hlen < sizeof(struct tcphdr))
1813 return network - data;
1814
1815 network += hlen;
1816 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1817 == HNS3_RX_FLAG_L4ID_UDP) {
1818 if ((typeof(max_size))(network - data) >
1819 (max_size - sizeof(struct udphdr)))
1820 return max_size;
1821
1822 network += sizeof(struct udphdr);
1823 }
1824
1825 /* If everything has gone correctly network should be the
1826 * data section of the packet and will be the end of the header.
1827 * If not then it probably represents the end of the last recognized
1828 * header.
1829 */
1830 if ((typeof(max_size))(network - data) < max_size)
1831 return network - data;
1832 else
1833 return max_size;
1834}
1835
1836static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1837 struct hns3_enet_ring *ring, int pull_len,
1838 struct hns3_desc_cb *desc_cb)
1839{
1840 struct hns3_desc *desc;
1841 int truesize, size;
1842 int last_offset;
1843 bool twobufs;
1844
1845 twobufs = ((PAGE_SIZE < 8192) &&
1846 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1847
1848 desc = &ring->desc[ring->next_to_clean];
1849 size = le16_to_cpu(desc->rx.size);
1850
1851 if (twobufs) {
1852 truesize = hnae_buf_size(ring);
1853 } else {
1854 truesize = ALIGN(size, L1_CACHE_BYTES);
1855 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1856 }
1857
1858 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1859 size - pull_len, truesize - pull_len);
1860
1861 /* Avoid re-using remote pages,flag default unreuse */
1862 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1863 return;
1864
1865 if (twobufs) {
1866 /* If we are only owner of page we can reuse it */
1867 if (likely(page_count(desc_cb->priv) == 1)) {
1868 /* Flip page offset to other buffer */
1869 desc_cb->page_offset ^= truesize;
1870
1871 desc_cb->reuse_flag = 1;
1872 /* bump ref count on page before it is given*/
1873 get_page(desc_cb->priv);
1874 }
1875 return;
1876 }
1877
1878 /* Move offset up to the next cache line */
1879 desc_cb->page_offset += truesize;
1880
1881 if (desc_cb->page_offset <= last_offset) {
1882 desc_cb->reuse_flag = 1;
1883 /* Bump ref count on page before it is given*/
1884 get_page(desc_cb->priv);
1885 }
1886}
1887
1888static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1889 struct hns3_desc *desc)
1890{
1891 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1892 int l3_type, l4_type;
1893 u32 bd_base_info;
1894 int ol4_type;
1895 u32 l234info;
1896
1897 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1898 l234info = le32_to_cpu(desc->rx.l234_info);
1899
1900 skb->ip_summed = CHECKSUM_NONE;
1901
1902 skb_checksum_none_assert(skb);
1903
1904 if (!(netdev->features & NETIF_F_RXCSUM))
1905 return;
1906
1907 /* check if hardware has done checksum */
1908 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1909 return;
1910
1911 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1912 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1913 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1914 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1915 netdev_err(netdev, "L3/L4 error pkt\n");
1916 u64_stats_update_begin(&ring->syncp);
1917 ring->stats.l3l4_csum_err++;
1918 u64_stats_update_end(&ring->syncp);
1919
1920 return;
1921 }
1922
1923 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1924 HNS3_RXD_L3ID_S);
1925 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1926 HNS3_RXD_L4ID_S);
1927
1928 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1929 switch (ol4_type) {
1930 case HNS3_OL4_TYPE_MAC_IN_UDP:
1931 case HNS3_OL4_TYPE_NVGRE:
1932 skb->csum_level = 1;
1933 case HNS3_OL4_TYPE_NO_TUN:
1934 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1935 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1936 (l3_type == HNS3_L3_TYPE_IPV6 &&
1937 (l4_type == HNS3_L4_TYPE_UDP ||
1938 l4_type == HNS3_L4_TYPE_TCP ||
1939 l4_type == HNS3_L4_TYPE_SCTP)))
1940 skb->ip_summed = CHECKSUM_UNNECESSARY;
1941 break;
1942 }
1943}
1944
d43e5aca
YL
1945static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
1946{
1947 napi_gro_receive(&ring->tqp_vector->napi, skb);
1948}
1949
76ad4f0e
S
1950static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1951 struct sk_buff **out_skb, int *out_bnum)
1952{
1953 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1954 struct hns3_desc_cb *desc_cb;
1955 struct hns3_desc *desc;
1956 struct sk_buff *skb;
1957 unsigned char *va;
1958 u32 bd_base_info;
1959 int pull_len;
1960 u32 l234info;
1961 int length;
1962 int bnum;
1963
1964 desc = &ring->desc[ring->next_to_clean];
1965 desc_cb = &ring->desc_cb[ring->next_to_clean];
1966
1967 prefetch(desc);
1968
1969 length = le16_to_cpu(desc->rx.pkt_len);
1970 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1971 l234info = le32_to_cpu(desc->rx.l234_info);
1972
1973 /* Check valid BD */
1974 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1975 return -EFAULT;
1976
1977 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1978
1979 /* Prefetch first cache line of first page
1980 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1981 * line size is 64B so need to prefetch twice to make it 128B. But in
1982 * actual we can have greater size of caches with 128B Level 1 cache
1983 * lines. In such a case, single fetch would suffice to cache in the
1984 * relevant part of the header.
1985 */
1986 prefetch(va);
1987#if L1_CACHE_BYTES < 128
1988 prefetch(va + L1_CACHE_BYTES);
1989#endif
1990
1991 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1992 HNS3_RX_HEAD_SIZE);
1993 if (unlikely(!skb)) {
1994 netdev_err(netdev, "alloc rx skb fail\n");
1995
1996 u64_stats_update_begin(&ring->syncp);
1997 ring->stats.sw_err_cnt++;
1998 u64_stats_update_end(&ring->syncp);
1999
2000 return -ENOMEM;
2001 }
2002
2003 prefetchw(skb->data);
2004
2005 bnum = 1;
2006 if (length <= HNS3_RX_HEAD_SIZE) {
2007 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2008
2009 /* We can reuse buffer as-is, just make sure it is local */
2010 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2011 desc_cb->reuse_flag = 1;
2012 else /* This page cannot be reused so discard it */
2013 put_page(desc_cb->priv);
2014
2015 ring_ptr_move_fw(ring, next_to_clean);
2016 } else {
2017 u64_stats_update_begin(&ring->syncp);
2018 ring->stats.seg_pkt_cnt++;
2019 u64_stats_update_end(&ring->syncp);
2020
2021 pull_len = hns3_nic_get_headlen(va, l234info,
2022 HNS3_RX_HEAD_SIZE);
2023 memcpy(__skb_put(skb, pull_len), va,
2024 ALIGN(pull_len, sizeof(long)));
2025
2026 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2027 ring_ptr_move_fw(ring, next_to_clean);
2028
2029 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2030 desc = &ring->desc[ring->next_to_clean];
2031 desc_cb = &ring->desc_cb[ring->next_to_clean];
2032 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2033 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2034 ring_ptr_move_fw(ring, next_to_clean);
2035 bnum++;
2036 }
2037 }
2038
2039 *out_bnum = bnum;
2040
2041 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2042 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2043 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2044 u64_stats_update_begin(&ring->syncp);
2045 ring->stats.non_vld_descs++;
2046 u64_stats_update_end(&ring->syncp);
2047
2048 dev_kfree_skb_any(skb);
2049 return -EINVAL;
2050 }
2051
2052 if (unlikely((!desc->rx.pkt_len) ||
2053 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2054 netdev_err(netdev, "truncated pkt\n");
2055 u64_stats_update_begin(&ring->syncp);
2056 ring->stats.err_pkt_len++;
2057 u64_stats_update_end(&ring->syncp);
2058
2059 dev_kfree_skb_any(skb);
2060 return -EFAULT;
2061 }
2062
2063 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2064 netdev_err(netdev, "L2 error pkt\n");
2065 u64_stats_update_begin(&ring->syncp);
2066 ring->stats.l2_err++;
2067 u64_stats_update_end(&ring->syncp);
2068
2069 dev_kfree_skb_any(skb);
2070 return -EFAULT;
2071 }
2072
2073 u64_stats_update_begin(&ring->syncp);
2074 ring->stats.rx_pkts++;
2075 ring->stats.rx_bytes += skb->len;
2076 u64_stats_update_end(&ring->syncp);
2077
2078 ring->tqp_vector->rx_group.total_bytes += skb->len;
2079
2080 hns3_rx_checksum(ring, skb, desc);
2081 return 0;
2082}
2083
d43e5aca
YL
2084int hns3_clean_rx_ring(
2085 struct hns3_enet_ring *ring, int budget,
2086 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
76ad4f0e
S
2087{
2088#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2089 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2090 int recv_pkts, recv_bds, clean_count, err;
2091 int unused_count = hns3_desc_unused(ring);
2092 struct sk_buff *skb = NULL;
2093 int num, bnum = 0;
2094
2095 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2096 rmb(); /* Make sure num taken effect before the other data is touched */
2097
2098 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2099 num -= unused_count;
2100
2101 while (recv_pkts < budget && recv_bds < num) {
2102 /* Reuse or realloc buffers */
2103 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2104 hns3_nic_alloc_rx_buffers(ring,
2105 clean_count + unused_count);
2106 clean_count = 0;
2107 unused_count = hns3_desc_unused(ring);
2108 }
2109
2110 /* Poll one pkt */
2111 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2112 if (unlikely(!skb)) /* This fault cannot be repaired */
2113 goto out;
2114
2115 recv_bds += bnum;
2116 clean_count += bnum;
2117 if (unlikely(err)) { /* Do jump the err */
2118 recv_pkts++;
2119 continue;
2120 }
2121
2122 /* Do update ip stack process */
2123 skb->protocol = eth_type_trans(skb, netdev);
d43e5aca 2124 rx_fn(ring, skb);
76ad4f0e
S
2125
2126 recv_pkts++;
2127 }
2128
2129out:
2130 /* Make all data has been write before submit */
2131 if (clean_count + unused_count > 0)
2132 hns3_nic_alloc_rx_buffers(ring,
2133 clean_count + unused_count);
2134
2135 return recv_pkts;
2136}
2137
2138static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2139{
2140#define HNS3_RX_ULTRA_PACKET_RATE 40000
2141 enum hns3_flow_level_range new_flow_level;
2142 struct hns3_enet_tqp_vector *tqp_vector;
2143 int packets_per_secs;
2144 int bytes_per_usecs;
2145 u16 new_int_gl;
2146 int usecs;
2147
2148 if (!ring_group->int_gl)
2149 return false;
2150
2151 if (ring_group->total_packets == 0) {
2152 ring_group->int_gl = HNS3_INT_GL_50K;
2153 ring_group->flow_level = HNS3_FLOW_LOW;
2154 return true;
2155 }
2156
2157 /* Simple throttlerate management
2158 * 0-10MB/s lower (50000 ints/s)
2159 * 10-20MB/s middle (20000 ints/s)
2160 * 20-1249MB/s high (18000 ints/s)
2161 * > 40000pps ultra (8000 ints/s)
2162 */
2163 new_flow_level = ring_group->flow_level;
2164 new_int_gl = ring_group->int_gl;
2165 tqp_vector = ring_group->ring->tqp_vector;
2166 usecs = (ring_group->int_gl << 1);
2167 bytes_per_usecs = ring_group->total_bytes / usecs;
2168 /* 1000000 microseconds */
2169 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2170
2171 switch (new_flow_level) {
2172 case HNS3_FLOW_LOW:
2173 if (bytes_per_usecs > 10)
2174 new_flow_level = HNS3_FLOW_MID;
2175 break;
2176 case HNS3_FLOW_MID:
2177 if (bytes_per_usecs > 20)
2178 new_flow_level = HNS3_FLOW_HIGH;
2179 else if (bytes_per_usecs <= 10)
2180 new_flow_level = HNS3_FLOW_LOW;
2181 break;
2182 case HNS3_FLOW_HIGH:
2183 case HNS3_FLOW_ULTRA:
2184 default:
2185 if (bytes_per_usecs <= 20)
2186 new_flow_level = HNS3_FLOW_MID;
2187 break;
2188 }
2189
2190 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2191 (&tqp_vector->rx_group == ring_group))
2192 new_flow_level = HNS3_FLOW_ULTRA;
2193
2194 switch (new_flow_level) {
2195 case HNS3_FLOW_LOW:
2196 new_int_gl = HNS3_INT_GL_50K;
2197 break;
2198 case HNS3_FLOW_MID:
2199 new_int_gl = HNS3_INT_GL_20K;
2200 break;
2201 case HNS3_FLOW_HIGH:
2202 new_int_gl = HNS3_INT_GL_18K;
2203 break;
2204 case HNS3_FLOW_ULTRA:
2205 new_int_gl = HNS3_INT_GL_8K;
2206 break;
2207 default:
2208 break;
2209 }
2210
2211 ring_group->total_bytes = 0;
2212 ring_group->total_packets = 0;
2213 ring_group->flow_level = new_flow_level;
2214 if (new_int_gl != ring_group->int_gl) {
2215 ring_group->int_gl = new_int_gl;
2216 return true;
2217 }
2218 return false;
2219}
2220
2221static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2222{
2223 u16 rx_int_gl, tx_int_gl;
2224 bool rx, tx;
2225
2226 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2227 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2228 rx_int_gl = tqp_vector->rx_group.int_gl;
2229 tx_int_gl = tqp_vector->tx_group.int_gl;
2230 if (rx && tx) {
2231 if (rx_int_gl > tx_int_gl) {
2232 tqp_vector->tx_group.int_gl = rx_int_gl;
2233 tqp_vector->tx_group.flow_level =
2234 tqp_vector->rx_group.flow_level;
2235 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2236 } else {
2237 tqp_vector->rx_group.int_gl = tx_int_gl;
2238 tqp_vector->rx_group.flow_level =
2239 tqp_vector->tx_group.flow_level;
2240 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2241 }
2242 }
2243}
2244
2245static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2246{
2247 struct hns3_enet_ring *ring;
2248 int rx_pkt_total = 0;
2249
2250 struct hns3_enet_tqp_vector *tqp_vector =
2251 container_of(napi, struct hns3_enet_tqp_vector, napi);
2252 bool clean_complete = true;
2253 int rx_budget;
2254
2255 /* Since the actual Tx work is minimal, we can give the Tx a larger
2256 * budget and be more aggressive about cleaning up the Tx descriptors.
2257 */
2258 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2259 if (!hns3_clean_tx_ring(ring, budget))
2260 clean_complete = false;
2261 }
2262
2263 /* make sure rx ring budget not smaller than 1 */
2264 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2265
2266 hns3_for_each_ring(ring, tqp_vector->rx_group) {
d43e5aca
YL
2267 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2268 hns3_rx_skb);
76ad4f0e
S
2269
2270 if (rx_cleaned >= rx_budget)
2271 clean_complete = false;
2272
2273 rx_pkt_total += rx_cleaned;
2274 }
2275
2276 tqp_vector->rx_group.total_packets += rx_pkt_total;
2277
2278 if (!clean_complete)
2279 return budget;
2280
2281 napi_complete(napi);
2282 hns3_update_new_int_gl(tqp_vector);
2283 hns3_mask_vector_irq(tqp_vector, 1);
2284
2285 return rx_pkt_total;
2286}
2287
2288static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2289 struct hnae3_ring_chain_node *head)
2290{
2291 struct pci_dev *pdev = tqp_vector->handle->pdev;
2292 struct hnae3_ring_chain_node *cur_chain = head;
2293 struct hnae3_ring_chain_node *chain;
2294 struct hns3_enet_ring *tx_ring;
2295 struct hns3_enet_ring *rx_ring;
2296
2297 tx_ring = tqp_vector->tx_group.ring;
2298 if (tx_ring) {
2299 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2300 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2301 HNAE3_RING_TYPE_TX);
2302
2303 cur_chain->next = NULL;
2304
2305 while (tx_ring->next) {
2306 tx_ring = tx_ring->next;
2307
2308 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2309 GFP_KERNEL);
2310 if (!chain)
2311 return -ENOMEM;
2312
2313 cur_chain->next = chain;
2314 chain->tqp_index = tx_ring->tqp->tqp_index;
2315 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2316 HNAE3_RING_TYPE_TX);
2317
2318 cur_chain = chain;
2319 }
2320 }
2321
2322 rx_ring = tqp_vector->rx_group.ring;
2323 if (!tx_ring && rx_ring) {
2324 cur_chain->next = NULL;
2325 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2326 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2327 HNAE3_RING_TYPE_RX);
2328
2329 rx_ring = rx_ring->next;
2330 }
2331
2332 while (rx_ring) {
2333 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2334 if (!chain)
2335 return -ENOMEM;
2336
2337 cur_chain->next = chain;
2338 chain->tqp_index = rx_ring->tqp->tqp_index;
2339 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2340 HNAE3_RING_TYPE_RX);
2341 cur_chain = chain;
2342
2343 rx_ring = rx_ring->next;
2344 }
2345
2346 return 0;
2347}
2348
2349static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2350 struct hnae3_ring_chain_node *head)
2351{
2352 struct pci_dev *pdev = tqp_vector->handle->pdev;
2353 struct hnae3_ring_chain_node *chain_tmp, *chain;
2354
2355 chain = head->next;
2356
2357 while (chain) {
2358 chain_tmp = chain->next;
2359 devm_kfree(&pdev->dev, chain);
2360 chain = chain_tmp;
2361 }
2362}
2363
2364static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2365 struct hns3_enet_ring *ring)
2366{
2367 ring->next = group->ring;
2368 group->ring = ring;
2369
2370 group->count++;
2371}
2372
2373static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2374{
2375 struct hnae3_ring_chain_node vector_ring_chain;
2376 struct hnae3_handle *h = priv->ae_handle;
2377 struct hns3_enet_tqp_vector *tqp_vector;
2378 struct hnae3_vector_info *vector;
2379 struct pci_dev *pdev = h->pdev;
2380 u16 tqp_num = h->kinfo.num_tqps;
2381 u16 vector_num;
2382 int ret = 0;
2383 u16 i;
2384
2385 /* RSS size, cpu online and vector_num should be the same */
2386 /* Should consider 2p/4p later */
2387 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2388 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2389 GFP_KERNEL);
2390 if (!vector)
2391 return -ENOMEM;
2392
2393 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2394
2395 priv->vector_num = vector_num;
2396 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2397 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2398 GFP_KERNEL);
2399 if (!priv->tqp_vector)
2400 return -ENOMEM;
2401
2402 for (i = 0; i < tqp_num; i++) {
2403 u16 vector_i = i % vector_num;
2404
2405 tqp_vector = &priv->tqp_vector[vector_i];
2406
2407 hns3_add_ring_to_group(&tqp_vector->tx_group,
2408 priv->ring_data[i].ring);
2409
2410 hns3_add_ring_to_group(&tqp_vector->rx_group,
2411 priv->ring_data[i + tqp_num].ring);
2412
2413 tqp_vector->idx = vector_i;
2414 tqp_vector->mask_addr = vector[vector_i].io_addr;
2415 tqp_vector->vector_irq = vector[vector_i].vector;
2416 tqp_vector->num_tqps++;
2417
2418 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2419 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2420 }
2421
2422 for (i = 0; i < vector_num; i++) {
2423 tqp_vector = &priv->tqp_vector[i];
2424
2425 tqp_vector->rx_group.total_bytes = 0;
2426 tqp_vector->rx_group.total_packets = 0;
2427 tqp_vector->tx_group.total_bytes = 0;
2428 tqp_vector->tx_group.total_packets = 0;
2429 hns3_vector_gl_rl_init(tqp_vector);
2430 tqp_vector->handle = h;
2431
2432 ret = hns3_get_vector_ring_chain(tqp_vector,
2433 &vector_ring_chain);
2434 if (ret)
2435 goto out;
2436
2437 ret = h->ae_algo->ops->map_ring_to_vector(h,
2438 tqp_vector->vector_irq, &vector_ring_chain);
2439 if (ret)
2440 goto out;
2441
2442 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2443
2444 netif_napi_add(priv->netdev, &tqp_vector->napi,
2445 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2446 }
2447
2448out:
2449 devm_kfree(&pdev->dev, vector);
2450 return ret;
2451}
2452
2453static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2454{
2455 struct hnae3_ring_chain_node vector_ring_chain;
2456 struct hnae3_handle *h = priv->ae_handle;
2457 struct hns3_enet_tqp_vector *tqp_vector;
2458 struct pci_dev *pdev = h->pdev;
2459 int i, ret;
2460
2461 for (i = 0; i < priv->vector_num; i++) {
2462 tqp_vector = &priv->tqp_vector[i];
2463
2464 ret = hns3_get_vector_ring_chain(tqp_vector,
2465 &vector_ring_chain);
2466 if (ret)
2467 return ret;
2468
2469 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2470 tqp_vector->vector_irq, &vector_ring_chain);
2471 if (ret)
2472 return ret;
2473
2474 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2475
2476 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2477 (void)irq_set_affinity_hint(
2478 priv->tqp_vector[i].vector_irq,
2479 NULL);
2480 devm_free_irq(&pdev->dev,
2481 priv->tqp_vector[i].vector_irq,
2482 &priv->tqp_vector[i]);
2483 }
2484
2485 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2486
2487 netif_napi_del(&priv->tqp_vector[i].napi);
2488 }
2489
2490 devm_kfree(&pdev->dev, priv->tqp_vector);
2491
2492 return 0;
2493}
2494
2495static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2496 int ring_type)
2497{
2498 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2499 int queue_num = priv->ae_handle->kinfo.num_tqps;
2500 struct pci_dev *pdev = priv->ae_handle->pdev;
2501 struct hns3_enet_ring *ring;
2502
2503 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2504 if (!ring)
2505 return -ENOMEM;
2506
2507 if (ring_type == HNAE3_RING_TYPE_TX) {
2508 ring_data[q->tqp_index].ring = ring;
2509 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2510 } else {
2511 ring_data[q->tqp_index + queue_num].ring = ring;
2512 ring->io_base = q->io_base;
2513 }
2514
2515 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2516
2517 ring_data[q->tqp_index].queue_index = q->tqp_index;
2518
2519 ring->tqp = q;
2520 ring->desc = NULL;
2521 ring->desc_cb = NULL;
2522 ring->dev = priv->dev;
2523 ring->desc_dma_addr = 0;
2524 ring->buf_size = q->buf_size;
2525 ring->desc_num = q->desc_num;
2526 ring->next_to_use = 0;
2527 ring->next_to_clean = 0;
2528
2529 return 0;
2530}
2531
2532static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2533 struct hns3_nic_priv *priv)
2534{
2535 int ret;
2536
2537 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2538 if (ret)
2539 return ret;
2540
2541 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2542 if (ret)
2543 return ret;
2544
2545 return 0;
2546}
2547
2548static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2549{
2550 struct hnae3_handle *h = priv->ae_handle;
2551 struct pci_dev *pdev = h->pdev;
2552 int i, ret;
2553
2554 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2555 sizeof(*priv->ring_data) * 2,
2556 GFP_KERNEL);
2557 if (!priv->ring_data)
2558 return -ENOMEM;
2559
2560 for (i = 0; i < h->kinfo.num_tqps; i++) {
2561 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2562 if (ret)
2563 goto err;
2564 }
2565
2566 return 0;
2567err:
2568 devm_kfree(&pdev->dev, priv->ring_data);
2569 return ret;
2570}
2571
2572static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2573{
2574 int ret;
2575
2576 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2577 return -EINVAL;
2578
2579 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2580 GFP_KERNEL);
2581 if (!ring->desc_cb) {
2582 ret = -ENOMEM;
2583 goto out;
2584 }
2585
2586 ret = hns3_alloc_desc(ring);
2587 if (ret)
2588 goto out_with_desc_cb;
2589
2590 if (!HNAE3_IS_TX_RING(ring)) {
2591 ret = hns3_alloc_ring_buffers(ring);
2592 if (ret)
2593 goto out_with_desc;
2594 }
2595
2596 return 0;
2597
2598out_with_desc:
2599 hns3_free_desc(ring);
2600out_with_desc_cb:
2601 kfree(ring->desc_cb);
2602 ring->desc_cb = NULL;
2603out:
2604 return ret;
2605}
2606
2607static void hns3_fini_ring(struct hns3_enet_ring *ring)
2608{
2609 hns3_free_desc(ring);
2610 kfree(ring->desc_cb);
2611 ring->desc_cb = NULL;
2612 ring->next_to_clean = 0;
2613 ring->next_to_use = 0;
2614}
2615
1db9b1bf 2616static int hns3_buf_size2type(u32 buf_size)
76ad4f0e
S
2617{
2618 int bd_size_type;
2619
2620 switch (buf_size) {
2621 case 512:
2622 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2623 break;
2624 case 1024:
2625 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2626 break;
2627 case 2048:
2628 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2629 break;
2630 case 4096:
2631 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2632 break;
2633 default:
2634 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2635 }
2636
2637 return bd_size_type;
2638}
2639
2640static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2641{
2642 dma_addr_t dma = ring->desc_dma_addr;
2643 struct hnae3_queue *q = ring->tqp;
2644
2645 if (!HNAE3_IS_TX_RING(ring)) {
2646 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2647 (u32)dma);
2648 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2649 (u32)((dma >> 31) >> 1));
2650
2651 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2652 hns3_buf_size2type(ring->buf_size));
2653 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2654 ring->desc_num / 8 - 1);
2655
2656 } else {
2657 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2658 (u32)dma);
2659 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2660 (u32)((dma >> 31) >> 1));
2661
2662 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2663 hns3_buf_size2type(ring->buf_size));
2664 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2665 ring->desc_num / 8 - 1);
2666 }
2667}
2668
5668abda 2669int hns3_init_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
2670{
2671 struct hnae3_handle *h = priv->ae_handle;
2672 int ring_num = h->kinfo.num_tqps * 2;
2673 int i, j;
2674 int ret;
2675
2676 for (i = 0; i < ring_num; i++) {
2677 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2678 if (ret) {
2679 dev_err(priv->dev,
2680 "Alloc ring memory fail! ret=%d\n", ret);
2681 goto out_when_alloc_ring_memory;
2682 }
2683
2684 hns3_init_ring_hw(priv->ring_data[i].ring);
2685
2686 u64_stats_init(&priv->ring_data[i].ring->syncp);
2687 }
2688
2689 return 0;
2690
2691out_when_alloc_ring_memory:
2692 for (j = i - 1; j >= 0; j--)
ee83f776 2693 hns3_fini_ring(priv->ring_data[j].ring);
76ad4f0e
S
2694
2695 return -ENOMEM;
2696}
2697
5668abda 2698int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
2699{
2700 struct hnae3_handle *h = priv->ae_handle;
2701 int i;
2702
2703 for (i = 0; i < h->kinfo.num_tqps; i++) {
2704 if (h->ae_algo->ops->reset_queue)
2705 h->ae_algo->ops->reset_queue(h, i);
2706
2707 hns3_fini_ring(priv->ring_data[i].ring);
2708 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2709 }
2710
2711 return 0;
2712}
2713
2714/* Set mac addr if it is configured. or leave it to the AE driver */
2715static void hns3_init_mac_addr(struct net_device *netdev)
2716{
2717 struct hns3_nic_priv *priv = netdev_priv(netdev);
2718 struct hnae3_handle *h = priv->ae_handle;
2719 u8 mac_addr_temp[ETH_ALEN];
2720
2721 if (h->ae_algo->ops->get_mac_addr) {
2722 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2723 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2724 }
2725
2726 /* Check if the MAC address is valid, if not get a random one */
2727 if (!is_valid_ether_addr(netdev->dev_addr)) {
2728 eth_hw_addr_random(netdev);
2729 dev_warn(priv->dev, "using random MAC address %pM\n",
2730 netdev->dev_addr);
76ad4f0e 2731 }
139e8792
L
2732
2733 if (h->ae_algo->ops->set_mac_addr)
2734 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2735
76ad4f0e
S
2736}
2737
2738static void hns3_nic_set_priv_ops(struct net_device *netdev)
2739{
2740 struct hns3_nic_priv *priv = netdev_priv(netdev);
2741
2742 if ((netdev->features & NETIF_F_TSO) ||
2743 (netdev->features & NETIF_F_TSO6)) {
2744 priv->ops.fill_desc = hns3_fill_desc_tso;
2745 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2746 } else {
2747 priv->ops.fill_desc = hns3_fill_desc;
2748 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2749 }
2750}
2751
2752static int hns3_client_init(struct hnae3_handle *handle)
2753{
2754 struct pci_dev *pdev = handle->pdev;
2755 struct hns3_nic_priv *priv;
2756 struct net_device *netdev;
2757 int ret;
2758
2759 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2760 handle->kinfo.num_tqps);
2761 if (!netdev)
2762 return -ENOMEM;
2763
2764 priv = netdev_priv(netdev);
2765 priv->dev = &pdev->dev;
2766 priv->netdev = netdev;
2767 priv->ae_handle = handle;
2768
2769 handle->kinfo.netdev = netdev;
2770 handle->priv = (void *)priv;
2771
2772 hns3_init_mac_addr(netdev);
2773
2774 hns3_set_default_feature(netdev);
2775
2776 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2777 netdev->priv_flags |= IFF_UNICAST_FLT;
2778 netdev->netdev_ops = &hns3_nic_netdev_ops;
2779 SET_NETDEV_DEV(netdev, &pdev->dev);
2780 hns3_ethtool_set_ops(netdev);
2781 hns3_nic_set_priv_ops(netdev);
2782
2783 /* Carrier off reporting is important to ethtool even BEFORE open */
2784 netif_carrier_off(netdev);
2785
2786 ret = hns3_get_ring_config(priv);
2787 if (ret) {
2788 ret = -ENOMEM;
2789 goto out_get_ring_cfg;
2790 }
2791
2792 ret = hns3_nic_init_vector_data(priv);
2793 if (ret) {
2794 ret = -ENOMEM;
2795 goto out_init_vector_data;
2796 }
2797
2798 ret = hns3_init_all_ring(priv);
2799 if (ret) {
2800 ret = -ENOMEM;
2801 goto out_init_ring_data;
2802 }
2803
2804 ret = register_netdev(netdev);
2805 if (ret) {
2806 dev_err(priv->dev, "probe register netdev fail!\n");
2807 goto out_reg_netdev_fail;
2808 }
2809
986743db
YL
2810 hns3_dcbnl_setup(handle);
2811
a8e8b7ff
S
2812 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2813 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2814
76ad4f0e
S
2815 return ret;
2816
2817out_reg_netdev_fail:
2818out_init_ring_data:
2819 (void)hns3_nic_uninit_vector_data(priv);
2820 priv->ring_data = NULL;
2821out_init_vector_data:
2822out_get_ring_cfg:
2823 priv->ae_handle = NULL;
2824 free_netdev(netdev);
2825 return ret;
2826}
2827
2828static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2829{
2830 struct net_device *netdev = handle->kinfo.netdev;
2831 struct hns3_nic_priv *priv = netdev_priv(netdev);
2832 int ret;
2833
2834 if (netdev->reg_state != NETREG_UNINITIALIZED)
2835 unregister_netdev(netdev);
2836
2837 ret = hns3_nic_uninit_vector_data(priv);
2838 if (ret)
2839 netdev_err(netdev, "uninit vector error\n");
2840
2841 ret = hns3_uninit_all_ring(priv);
2842 if (ret)
2843 netdev_err(netdev, "uninit ring error\n");
2844
2845 priv->ring_data = NULL;
2846
2847 free_netdev(netdev);
2848}
2849
2850static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2851{
2852 struct net_device *netdev = handle->kinfo.netdev;
2853
2854 if (!netdev)
2855 return;
2856
2857 if (linkup) {
2858 netif_carrier_on(netdev);
2859 netif_tx_wake_all_queues(netdev);
2860 netdev_info(netdev, "link up\n");
2861 } else {
2862 netif_carrier_off(netdev);
2863 netif_tx_stop_all_queues(netdev);
2864 netdev_info(netdev, "link down\n");
2865 }
2866}
2867
9df8f79a
YL
2868static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
2869{
2870 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
2871 struct net_device *ndev = kinfo->netdev;
075cfdd6 2872 bool if_running;
9df8f79a
YL
2873 int ret;
2874 u8 i;
2875
2876 if (tc > HNAE3_MAX_TC)
2877 return -EINVAL;
2878
2879 if (!ndev)
2880 return -ENODEV;
2881
075cfdd6
CIK
2882 if_running = netif_running(ndev);
2883
9df8f79a
YL
2884 ret = netdev_set_num_tc(ndev, tc);
2885 if (ret)
2886 return ret;
2887
2888 if (if_running) {
2889 (void)hns3_nic_net_stop(ndev);
2890 msleep(100);
2891 }
2892
2893 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
2894 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
2895 if (ret)
2896 goto err_out;
2897
2898 if (tc <= 1) {
2899 netdev_reset_tc(ndev);
2900 goto out;
2901 }
2902
2903 for (i = 0; i < HNAE3_MAX_TC; i++) {
2904 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
2905
2906 if (tc_info->enable)
2907 netdev_set_tc_queue(ndev,
2908 tc_info->tc,
2909 tc_info->tqp_count,
2910 tc_info->tqp_offset);
2911 }
2912
2913 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
2914 netdev_set_prio_tc_map(ndev, i,
2915 kinfo->prio_tc[i]);
2916 }
2917
2918out:
2919 ret = hns3_nic_set_real_num_queue(ndev);
2920
2921err_out:
2922 if (if_running)
2923 (void)hns3_nic_net_open(ndev);
2924
2925 return ret;
2926}
2927
1db9b1bf 2928static const struct hnae3_client_ops client_ops = {
76ad4f0e
S
2929 .init_instance = hns3_client_init,
2930 .uninit_instance = hns3_client_uninit,
2931 .link_status_change = hns3_link_status_change,
9df8f79a 2932 .setup_tc = hns3_client_setup_tc,
76ad4f0e
S
2933};
2934
2935/* hns3_init_module - Driver registration routine
2936 * hns3_init_module is the first routine called when the driver is
2937 * loaded. All it does is register with the PCI subsystem.
2938 */
2939static int __init hns3_init_module(void)
2940{
2941 int ret;
2942
2943 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2944 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2945
2946 client.type = HNAE3_CLIENT_KNIC;
2947 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2948 hns3_driver_name);
2949
2950 client.ops = &client_ops;
2951
2952 ret = hnae3_register_client(&client);
2953 if (ret)
2954 return ret;
2955
2956 ret = pci_register_driver(&hns3_driver);
2957 if (ret)
2958 hnae3_unregister_client(&client);
2959
2960 return ret;
2961}
2962module_init(hns3_init_module);
2963
2964/* hns3_exit_module - Driver exit cleanup routine
2965 * hns3_exit_module is called just before the driver is removed
2966 * from memory.
2967 */
2968static void __exit hns3_exit_module(void)
2969{
2970 pci_unregister_driver(&hns3_driver);
2971 hnae3_unregister_client(&client);
2972}
2973module_exit(hns3_exit_module);
2974
2975MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2976MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2977MODULE_LICENSE("GPL");
2978MODULE_ALIAS("pci:hns-nic");