]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
net: hns3: Cleanup for endian issue in hns3 driver
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hns3_enet.c
1 /*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
21 #include <net/gre.h>
22 #include <net/vxlan.h>
23
24 #include "hnae3.h"
25 #include "hns3_enet.h"
26
27 const char hns3_driver_name[] = "hns3";
28 const char hns3_driver_version[] = VERMAGIC_STRING;
29 static const char hns3_driver_string[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32 static struct hnae3_client client;
33
34 /* hns3_pci_tbl - PCI Device ID Table
35 *
36 * Last entry must be all 0s
37 *
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
40 */
41 static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
54 /* required last entry */
55 {0, }
56 };
57 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
58
59 static irqreturn_t hns3_irq_handle(int irq, void *dev)
60 {
61 struct hns3_enet_tqp_vector *tqp_vector = dev;
62
63 napi_schedule(&tqp_vector->napi);
64
65 return IRQ_HANDLED;
66 }
67
68 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
69 {
70 struct hns3_enet_tqp_vector *tqp_vectors;
71 unsigned int i;
72
73 for (i = 0; i < priv->vector_num; i++) {
74 tqp_vectors = &priv->tqp_vector[i];
75
76 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
77 continue;
78
79 /* release the irq resource */
80 free_irq(tqp_vectors->vector_irq, tqp_vectors);
81 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
82 }
83 }
84
85 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
86 {
87 struct hns3_enet_tqp_vector *tqp_vectors;
88 int txrx_int_idx = 0;
89 int rx_int_idx = 0;
90 int tx_int_idx = 0;
91 unsigned int i;
92 int ret;
93
94 for (i = 0; i < priv->vector_num; i++) {
95 tqp_vectors = &priv->tqp_vector[i];
96
97 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
98 continue;
99
100 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
101 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102 "%s-%s-%d", priv->netdev->name, "TxRx",
103 txrx_int_idx++);
104 txrx_int_idx++;
105 } else if (tqp_vectors->rx_group.ring) {
106 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
107 "%s-%s-%d", priv->netdev->name, "Rx",
108 rx_int_idx++);
109 } else if (tqp_vectors->tx_group.ring) {
110 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
111 "%s-%s-%d", priv->netdev->name, "Tx",
112 tx_int_idx++);
113 } else {
114 /* Skip this unused q_vector */
115 continue;
116 }
117
118 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
119
120 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
121 tqp_vectors->name,
122 tqp_vectors);
123 if (ret) {
124 netdev_err(priv->netdev, "request irq(%d) fail\n",
125 tqp_vectors->vector_irq);
126 return ret;
127 }
128
129 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
130 }
131
132 return 0;
133 }
134
135 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
136 u32 mask_en)
137 {
138 writel(mask_en, tqp_vector->mask_addr);
139 }
140
141 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
142 {
143 napi_enable(&tqp_vector->napi);
144
145 /* enable vector */
146 hns3_mask_vector_irq(tqp_vector, 1);
147 }
148
149 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
150 {
151 /* disable vector */
152 hns3_mask_vector_irq(tqp_vector, 0);
153
154 disable_irq(tqp_vector->vector_irq);
155 napi_disable(&tqp_vector->napi);
156 }
157
158 static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
159 u32 gl_value)
160 {
161 /* this defines the configuration for GL (Interrupt Gap Limiter)
162 * GL defines inter interrupt gap.
163 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
164 */
165 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
166 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
167 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
168 }
169
170 static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
171 u32 rl_value)
172 {
173 /* this defines the configuration for RL (Interrupt Rate Limiter).
174 * Rl defines rate of interrupts i.e. number of interrupts-per-second
175 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
176 */
177 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
178 }
179
180 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
181 {
182 /* initialize the configuration for interrupt coalescing.
183 * 1. GL (Interrupt Gap Limiter)
184 * 2. RL (Interrupt Rate Limiter)
185 */
186
187 /* Default :enable interrupt coalesce */
188 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
189 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
190 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
191 /* for now we are disabling Interrupt RL - we
192 * will re-enable later
193 */
194 hns3_set_vector_coalesc_rl(tqp_vector, 0);
195 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
196 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
197 }
198
199 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
200 {
201 struct hnae3_handle *h = hns3_get_handle(netdev);
202 struct hnae3_knic_private_info *kinfo = &h->kinfo;
203 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
204 int ret;
205
206 ret = netif_set_real_num_tx_queues(netdev, queue_size);
207 if (ret) {
208 netdev_err(netdev,
209 "netif_set_real_num_tx_queues fail, ret=%d!\n",
210 ret);
211 return ret;
212 }
213
214 ret = netif_set_real_num_rx_queues(netdev, queue_size);
215 if (ret) {
216 netdev_err(netdev,
217 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
218 return ret;
219 }
220
221 return 0;
222 }
223
224 static int hns3_nic_net_up(struct net_device *netdev)
225 {
226 struct hns3_nic_priv *priv = netdev_priv(netdev);
227 struct hnae3_handle *h = priv->ae_handle;
228 int i, j;
229 int ret;
230
231 /* get irq resource for all vectors */
232 ret = hns3_nic_init_irq(priv);
233 if (ret) {
234 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
235 return ret;
236 }
237
238 /* enable the vectors */
239 for (i = 0; i < priv->vector_num; i++)
240 hns3_vector_enable(&priv->tqp_vector[i]);
241
242 /* start the ae_dev */
243 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
244 if (ret)
245 goto out_start_err;
246
247 return 0;
248
249 out_start_err:
250 for (j = i - 1; j >= 0; j--)
251 hns3_vector_disable(&priv->tqp_vector[j]);
252
253 hns3_nic_uninit_irq(priv);
254
255 return ret;
256 }
257
258 static int hns3_nic_net_open(struct net_device *netdev)
259 {
260 int ret;
261
262 netif_carrier_off(netdev);
263
264 ret = hns3_nic_set_real_num_queue(netdev);
265 if (ret)
266 return ret;
267
268 ret = hns3_nic_net_up(netdev);
269 if (ret) {
270 netdev_err(netdev,
271 "hns net up fail, ret=%d!\n", ret);
272 return ret;
273 }
274
275 return 0;
276 }
277
278 static void hns3_nic_net_down(struct net_device *netdev)
279 {
280 struct hns3_nic_priv *priv = netdev_priv(netdev);
281 const struct hnae3_ae_ops *ops;
282 int i;
283
284 /* stop ae_dev */
285 ops = priv->ae_handle->ae_algo->ops;
286 if (ops->stop)
287 ops->stop(priv->ae_handle);
288
289 /* disable vectors */
290 for (i = 0; i < priv->vector_num; i++)
291 hns3_vector_disable(&priv->tqp_vector[i]);
292
293 /* free irq resources */
294 hns3_nic_uninit_irq(priv);
295 }
296
297 static int hns3_nic_net_stop(struct net_device *netdev)
298 {
299 netif_tx_stop_all_queues(netdev);
300 netif_carrier_off(netdev);
301
302 hns3_nic_net_down(netdev);
303
304 return 0;
305 }
306
307 void hns3_set_multicast_list(struct net_device *netdev)
308 {
309 struct hnae3_handle *h = hns3_get_handle(netdev);
310 struct netdev_hw_addr *ha = NULL;
311
312 if (h->ae_algo->ops->set_mc_addr) {
313 netdev_for_each_mc_addr(ha, netdev)
314 if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
315 netdev_err(netdev, "set multicast fail\n");
316 }
317 }
318
319 static int hns3_nic_uc_sync(struct net_device *netdev,
320 const unsigned char *addr)
321 {
322 struct hnae3_handle *h = hns3_get_handle(netdev);
323
324 if (h->ae_algo->ops->add_uc_addr)
325 return h->ae_algo->ops->add_uc_addr(h, addr);
326
327 return 0;
328 }
329
330 static int hns3_nic_uc_unsync(struct net_device *netdev,
331 const unsigned char *addr)
332 {
333 struct hnae3_handle *h = hns3_get_handle(netdev);
334
335 if (h->ae_algo->ops->rm_uc_addr)
336 return h->ae_algo->ops->rm_uc_addr(h, addr);
337
338 return 0;
339 }
340
341 static int hns3_nic_mc_sync(struct net_device *netdev,
342 const unsigned char *addr)
343 {
344 struct hnae3_handle *h = hns3_get_handle(netdev);
345
346 if (h->ae_algo->ops->add_mc_addr)
347 return h->ae_algo->ops->add_mc_addr(h, addr);
348
349 return 0;
350 }
351
352 static int hns3_nic_mc_unsync(struct net_device *netdev,
353 const unsigned char *addr)
354 {
355 struct hnae3_handle *h = hns3_get_handle(netdev);
356
357 if (h->ae_algo->ops->rm_mc_addr)
358 return h->ae_algo->ops->rm_mc_addr(h, addr);
359
360 return 0;
361 }
362
363 void hns3_nic_set_rx_mode(struct net_device *netdev)
364 {
365 struct hnae3_handle *h = hns3_get_handle(netdev);
366
367 if (h->ae_algo->ops->set_promisc_mode) {
368 if (netdev->flags & IFF_PROMISC)
369 h->ae_algo->ops->set_promisc_mode(h, 1);
370 else
371 h->ae_algo->ops->set_promisc_mode(h, 0);
372 }
373 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
374 netdev_err(netdev, "sync uc address fail\n");
375 if (netdev->flags & IFF_MULTICAST)
376 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
377 netdev_err(netdev, "sync mc address fail\n");
378 }
379
380 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
381 u16 *mss, u32 *type_cs_vlan_tso)
382 {
383 u32 l4_offset, hdr_len;
384 union l3_hdr_info l3;
385 union l4_hdr_info l4;
386 u32 l4_paylen;
387 int ret;
388
389 if (!skb_is_gso(skb))
390 return 0;
391
392 ret = skb_cow_head(skb, 0);
393 if (ret)
394 return ret;
395
396 l3.hdr = skb_network_header(skb);
397 l4.hdr = skb_transport_header(skb);
398
399 /* Software should clear the IPv4's checksum field when tso is
400 * needed.
401 */
402 if (l3.v4->version == 4)
403 l3.v4->check = 0;
404
405 /* tunnel packet.*/
406 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
407 SKB_GSO_GRE_CSUM |
408 SKB_GSO_UDP_TUNNEL |
409 SKB_GSO_UDP_TUNNEL_CSUM)) {
410 if ((!(skb_shinfo(skb)->gso_type &
411 SKB_GSO_PARTIAL)) &&
412 (skb_shinfo(skb)->gso_type &
413 SKB_GSO_UDP_TUNNEL_CSUM)) {
414 /* Software should clear the udp's checksum
415 * field when tso is needed.
416 */
417 l4.udp->check = 0;
418 }
419 /* reset l3&l4 pointers from outer to inner headers */
420 l3.hdr = skb_inner_network_header(skb);
421 l4.hdr = skb_inner_transport_header(skb);
422
423 /* Software should clear the IPv4's checksum field when
424 * tso is needed.
425 */
426 if (l3.v4->version == 4)
427 l3.v4->check = 0;
428 }
429
430 /* normal or tunnel packet*/
431 l4_offset = l4.hdr - skb->data;
432 hdr_len = (l4.tcp->doff * 4) + l4_offset;
433
434 /* remove payload length from inner pseudo checksum when tso*/
435 l4_paylen = skb->len - l4_offset;
436 csum_replace_by_diff(&l4.tcp->check,
437 (__force __wsum)htonl(l4_paylen));
438
439 /* find the txbd field values */
440 *paylen = skb->len - hdr_len;
441 hnae_set_bit(*type_cs_vlan_tso,
442 HNS3_TXD_TSO_B, 1);
443
444 /* get MSS for TSO */
445 *mss = skb_shinfo(skb)->gso_size;
446
447 return 0;
448 }
449
450 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
451 u8 *il4_proto)
452 {
453 union {
454 struct iphdr *v4;
455 struct ipv6hdr *v6;
456 unsigned char *hdr;
457 } l3;
458 unsigned char *l4_hdr;
459 unsigned char *exthdr;
460 u8 l4_proto_tmp;
461 __be16 frag_off;
462
463 /* find outer header point */
464 l3.hdr = skb_network_header(skb);
465 l4_hdr = skb_inner_transport_header(skb);
466
467 if (skb->protocol == htons(ETH_P_IPV6)) {
468 exthdr = l3.hdr + sizeof(*l3.v6);
469 l4_proto_tmp = l3.v6->nexthdr;
470 if (l4_hdr != exthdr)
471 ipv6_skip_exthdr(skb, exthdr - skb->data,
472 &l4_proto_tmp, &frag_off);
473 } else if (skb->protocol == htons(ETH_P_IP)) {
474 l4_proto_tmp = l3.v4->protocol;
475 } else {
476 return -EINVAL;
477 }
478
479 *ol4_proto = l4_proto_tmp;
480
481 /* tunnel packet */
482 if (!skb->encapsulation) {
483 *il4_proto = 0;
484 return 0;
485 }
486
487 /* find inner header point */
488 l3.hdr = skb_inner_network_header(skb);
489 l4_hdr = skb_inner_transport_header(skb);
490
491 if (l3.v6->version == 6) {
492 exthdr = l3.hdr + sizeof(*l3.v6);
493 l4_proto_tmp = l3.v6->nexthdr;
494 if (l4_hdr != exthdr)
495 ipv6_skip_exthdr(skb, exthdr - skb->data,
496 &l4_proto_tmp, &frag_off);
497 } else if (l3.v4->version == 4) {
498 l4_proto_tmp = l3.v4->protocol;
499 }
500
501 *il4_proto = l4_proto_tmp;
502
503 return 0;
504 }
505
506 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
507 u8 il4_proto, u32 *type_cs_vlan_tso,
508 u32 *ol_type_vlan_len_msec)
509 {
510 union {
511 struct iphdr *v4;
512 struct ipv6hdr *v6;
513 unsigned char *hdr;
514 } l3;
515 union {
516 struct tcphdr *tcp;
517 struct udphdr *udp;
518 struct gre_base_hdr *gre;
519 unsigned char *hdr;
520 } l4;
521 unsigned char *l2_hdr;
522 u8 l4_proto = ol4_proto;
523 u32 ol2_len;
524 u32 ol3_len;
525 u32 ol4_len;
526 u32 l2_len;
527 u32 l3_len;
528
529 l3.hdr = skb_network_header(skb);
530 l4.hdr = skb_transport_header(skb);
531
532 /* compute L2 header size for normal packet, defined in 2 Bytes */
533 l2_len = l3.hdr - skb->data;
534 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
535 HNS3_TXD_L2LEN_S, l2_len >> 1);
536
537 /* tunnel packet*/
538 if (skb->encapsulation) {
539 /* compute OL2 header size, defined in 2 Bytes */
540 ol2_len = l2_len;
541 hnae_set_field(*ol_type_vlan_len_msec,
542 HNS3_TXD_L2LEN_M,
543 HNS3_TXD_L2LEN_S, ol2_len >> 1);
544
545 /* compute OL3 header size, defined in 4 Bytes */
546 ol3_len = l4.hdr - l3.hdr;
547 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
548 HNS3_TXD_L3LEN_S, ol3_len >> 2);
549
550 /* MAC in UDP, MAC in GRE (0x6558)*/
551 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
552 /* switch MAC header ptr from outer to inner header.*/
553 l2_hdr = skb_inner_mac_header(skb);
554
555 /* compute OL4 header size, defined in 4 Bytes. */
556 ol4_len = l2_hdr - l4.hdr;
557 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
558 HNS3_TXD_L4LEN_S, ol4_len >> 2);
559
560 /* switch IP header ptr from outer to inner header */
561 l3.hdr = skb_inner_network_header(skb);
562
563 /* compute inner l2 header size, defined in 2 Bytes. */
564 l2_len = l3.hdr - l2_hdr;
565 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
566 HNS3_TXD_L2LEN_S, l2_len >> 1);
567 } else {
568 /* skb packet types not supported by hardware,
569 * txbd len fild doesn't be filled.
570 */
571 return;
572 }
573
574 /* switch L4 header pointer from outer to inner */
575 l4.hdr = skb_inner_transport_header(skb);
576
577 l4_proto = il4_proto;
578 }
579
580 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
581 l3_len = l4.hdr - l3.hdr;
582 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
583 HNS3_TXD_L3LEN_S, l3_len >> 2);
584
585 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
586 switch (l4_proto) {
587 case IPPROTO_TCP:
588 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
589 HNS3_TXD_L4LEN_S, l4.tcp->doff);
590 break;
591 case IPPROTO_SCTP:
592 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
593 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
594 break;
595 case IPPROTO_UDP:
596 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
597 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
598 break;
599 default:
600 /* skb packet types not supported by hardware,
601 * txbd len fild doesn't be filled.
602 */
603 return;
604 }
605 }
606
607 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
608 u8 il4_proto, u32 *type_cs_vlan_tso,
609 u32 *ol_type_vlan_len_msec)
610 {
611 union {
612 struct iphdr *v4;
613 struct ipv6hdr *v6;
614 unsigned char *hdr;
615 } l3;
616 u32 l4_proto = ol4_proto;
617
618 l3.hdr = skb_network_header(skb);
619
620 /* define OL3 type and tunnel type(OL4).*/
621 if (skb->encapsulation) {
622 /* define outer network header type.*/
623 if (skb->protocol == htons(ETH_P_IP)) {
624 if (skb_is_gso(skb))
625 hnae_set_field(*ol_type_vlan_len_msec,
626 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
627 HNS3_OL3T_IPV4_CSUM);
628 else
629 hnae_set_field(*ol_type_vlan_len_msec,
630 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
631 HNS3_OL3T_IPV4_NO_CSUM);
632
633 } else if (skb->protocol == htons(ETH_P_IPV6)) {
634 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
635 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
636 }
637
638 /* define tunnel type(OL4).*/
639 switch (l4_proto) {
640 case IPPROTO_UDP:
641 hnae_set_field(*ol_type_vlan_len_msec,
642 HNS3_TXD_TUNTYPE_M,
643 HNS3_TXD_TUNTYPE_S,
644 HNS3_TUN_MAC_IN_UDP);
645 break;
646 case IPPROTO_GRE:
647 hnae_set_field(*ol_type_vlan_len_msec,
648 HNS3_TXD_TUNTYPE_M,
649 HNS3_TXD_TUNTYPE_S,
650 HNS3_TUN_NVGRE);
651 break;
652 default:
653 /* drop the skb tunnel packet if hardware don't support,
654 * because hardware can't calculate csum when TSO.
655 */
656 if (skb_is_gso(skb))
657 return -EDOM;
658
659 /* the stack computes the IP header already,
660 * driver calculate l4 checksum when not TSO.
661 */
662 skb_checksum_help(skb);
663 return 0;
664 }
665
666 l3.hdr = skb_inner_network_header(skb);
667 l4_proto = il4_proto;
668 }
669
670 if (l3.v4->version == 4) {
671 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
672 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
673
674 /* the stack computes the IP header already, the only time we
675 * need the hardware to recompute it is in the case of TSO.
676 */
677 if (skb_is_gso(skb))
678 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
679
680 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
681 } else if (l3.v6->version == 6) {
682 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
683 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
684 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
685 }
686
687 switch (l4_proto) {
688 case IPPROTO_TCP:
689 hnae_set_field(*type_cs_vlan_tso,
690 HNS3_TXD_L4T_M,
691 HNS3_TXD_L4T_S,
692 HNS3_L4T_TCP);
693 break;
694 case IPPROTO_UDP:
695 hnae_set_field(*type_cs_vlan_tso,
696 HNS3_TXD_L4T_M,
697 HNS3_TXD_L4T_S,
698 HNS3_L4T_UDP);
699 break;
700 case IPPROTO_SCTP:
701 hnae_set_field(*type_cs_vlan_tso,
702 HNS3_TXD_L4T_M,
703 HNS3_TXD_L4T_S,
704 HNS3_L4T_SCTP);
705 break;
706 default:
707 /* drop the skb tunnel packet if hardware don't support,
708 * because hardware can't calculate csum when TSO.
709 */
710 if (skb_is_gso(skb))
711 return -EDOM;
712
713 /* the stack computes the IP header already,
714 * driver calculate l4 checksum when not TSO.
715 */
716 skb_checksum_help(skb);
717 return 0;
718 }
719
720 return 0;
721 }
722
723 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
724 {
725 /* Config bd buffer end */
726 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
727 HNS3_TXD_BDTYPE_M, 0);
728 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
729 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
730 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
731 }
732
733 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
734 int size, dma_addr_t dma, int frag_end,
735 enum hns_desc_type type)
736 {
737 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
738 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
739 u32 ol_type_vlan_len_msec = 0;
740 u16 bdtp_fe_sc_vld_ra_ri = 0;
741 u32 type_cs_vlan_tso = 0;
742 struct sk_buff *skb;
743 u32 paylen = 0;
744 u16 mss = 0;
745 __be16 protocol;
746 u8 ol4_proto;
747 u8 il4_proto;
748 int ret;
749
750 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
751 desc_cb->priv = priv;
752 desc_cb->length = size;
753 desc_cb->dma = dma;
754 desc_cb->type = type;
755
756 /* now, fill the descriptor */
757 desc->addr = cpu_to_le64(dma);
758 desc->tx.send_size = cpu_to_le16((u16)size);
759 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
760 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
761
762 if (type == DESC_TYPE_SKB) {
763 skb = (struct sk_buff *)priv;
764 paylen = skb->len;
765
766 if (skb->ip_summed == CHECKSUM_PARTIAL) {
767 skb_reset_mac_len(skb);
768 protocol = skb->protocol;
769
770 /* vlan packet*/
771 if (protocol == htons(ETH_P_8021Q)) {
772 protocol = vlan_get_protocol(skb);
773 skb->protocol = protocol;
774 }
775 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
776 if (ret)
777 return ret;
778 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
779 &type_cs_vlan_tso,
780 &ol_type_vlan_len_msec);
781 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
782 &type_cs_vlan_tso,
783 &ol_type_vlan_len_msec);
784 if (ret)
785 return ret;
786
787 ret = hns3_set_tso(skb, &paylen, &mss,
788 &type_cs_vlan_tso);
789 if (ret)
790 return ret;
791 }
792
793 /* Set txbd */
794 desc->tx.ol_type_vlan_len_msec =
795 cpu_to_le32(ol_type_vlan_len_msec);
796 desc->tx.type_cs_vlan_tso_len =
797 cpu_to_le32(type_cs_vlan_tso);
798 desc->tx.paylen = cpu_to_le32(paylen);
799 desc->tx.mss = cpu_to_le16(mss);
800 }
801
802 /* move ring pointer to next.*/
803 ring_ptr_move_fw(ring, next_to_use);
804
805 return 0;
806 }
807
808 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
809 int size, dma_addr_t dma, int frag_end,
810 enum hns_desc_type type)
811 {
812 unsigned int frag_buf_num;
813 unsigned int k;
814 int sizeoflast;
815 int ret;
816
817 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
818 sizeoflast = size % HNS3_MAX_BD_SIZE;
819 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
820
821 /* When the frag size is bigger than hardware, split this frag */
822 for (k = 0; k < frag_buf_num; k++) {
823 ret = hns3_fill_desc(ring, priv,
824 (k == frag_buf_num - 1) ?
825 sizeoflast : HNS3_MAX_BD_SIZE,
826 dma + HNS3_MAX_BD_SIZE * k,
827 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
828 (type == DESC_TYPE_SKB && !k) ?
829 DESC_TYPE_SKB : DESC_TYPE_PAGE);
830 if (ret)
831 return ret;
832 }
833
834 return 0;
835 }
836
837 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
838 struct hns3_enet_ring *ring)
839 {
840 struct sk_buff *skb = *out_skb;
841 struct skb_frag_struct *frag;
842 int bdnum_for_frag;
843 int frag_num;
844 int buf_num;
845 int size;
846 int i;
847
848 size = skb_headlen(skb);
849 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
850
851 frag_num = skb_shinfo(skb)->nr_frags;
852 for (i = 0; i < frag_num; i++) {
853 frag = &skb_shinfo(skb)->frags[i];
854 size = skb_frag_size(frag);
855 bdnum_for_frag =
856 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
857 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
858 return -ENOMEM;
859
860 buf_num += bdnum_for_frag;
861 }
862
863 if (buf_num > ring_space(ring))
864 return -EBUSY;
865
866 *bnum = buf_num;
867 return 0;
868 }
869
870 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
871 struct hns3_enet_ring *ring)
872 {
873 struct sk_buff *skb = *out_skb;
874 int buf_num;
875
876 /* No. of segments (plus a header) */
877 buf_num = skb_shinfo(skb)->nr_frags + 1;
878
879 if (buf_num > ring_space(ring))
880 return -EBUSY;
881
882 *bnum = buf_num;
883
884 return 0;
885 }
886
887 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
888 {
889 struct device *dev = ring_to_dev(ring);
890 unsigned int i;
891
892 for (i = 0; i < ring->desc_num; i++) {
893 /* check if this is where we started */
894 if (ring->next_to_use == next_to_use_orig)
895 break;
896
897 /* unmap the descriptor dma address */
898 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
899 dma_unmap_single(dev,
900 ring->desc_cb[ring->next_to_use].dma,
901 ring->desc_cb[ring->next_to_use].length,
902 DMA_TO_DEVICE);
903 else
904 dma_unmap_page(dev,
905 ring->desc_cb[ring->next_to_use].dma,
906 ring->desc_cb[ring->next_to_use].length,
907 DMA_TO_DEVICE);
908
909 /* rollback one */
910 ring_ptr_move_bw(ring, next_to_use);
911 }
912 }
913
914 static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
915 struct net_device *netdev)
916 {
917 struct hns3_nic_priv *priv = netdev_priv(netdev);
918 struct hns3_nic_ring_data *ring_data =
919 &tx_ring_data(priv, skb->queue_mapping);
920 struct hns3_enet_ring *ring = ring_data->ring;
921 struct device *dev = priv->dev;
922 struct netdev_queue *dev_queue;
923 struct skb_frag_struct *frag;
924 int next_to_use_head;
925 int next_to_use_frag;
926 dma_addr_t dma;
927 int buf_num;
928 int seg_num;
929 int size;
930 int ret;
931 int i;
932
933 /* Prefetch the data used later */
934 prefetch(skb->data);
935
936 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
937 case -EBUSY:
938 u64_stats_update_begin(&ring->syncp);
939 ring->stats.tx_busy++;
940 u64_stats_update_end(&ring->syncp);
941
942 goto out_net_tx_busy;
943 case -ENOMEM:
944 u64_stats_update_begin(&ring->syncp);
945 ring->stats.sw_err_cnt++;
946 u64_stats_update_end(&ring->syncp);
947 netdev_err(netdev, "no memory to xmit!\n");
948
949 goto out_err_tx_ok;
950 default:
951 break;
952 }
953
954 /* No. of segments (plus a header) */
955 seg_num = skb_shinfo(skb)->nr_frags + 1;
956 /* Fill the first part */
957 size = skb_headlen(skb);
958
959 next_to_use_head = ring->next_to_use;
960
961 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
962 if (dma_mapping_error(dev, dma)) {
963 netdev_err(netdev, "TX head DMA map failed\n");
964 ring->stats.sw_err_cnt++;
965 goto out_err_tx_ok;
966 }
967
968 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
969 DESC_TYPE_SKB);
970 if (ret)
971 goto head_dma_map_err;
972
973 next_to_use_frag = ring->next_to_use;
974 /* Fill the fragments */
975 for (i = 1; i < seg_num; i++) {
976 frag = &skb_shinfo(skb)->frags[i - 1];
977 size = skb_frag_size(frag);
978 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
979 if (dma_mapping_error(dev, dma)) {
980 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
981 ring->stats.sw_err_cnt++;
982 goto frag_dma_map_err;
983 }
984 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
985 seg_num - 1 == i ? 1 : 0,
986 DESC_TYPE_PAGE);
987
988 if (ret)
989 goto frag_dma_map_err;
990 }
991
992 /* Complete translate all packets */
993 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
994 netdev_tx_sent_queue(dev_queue, skb->len);
995
996 wmb(); /* Commit all data before submit */
997
998 hnae_queue_xmit(ring->tqp, buf_num);
999
1000 return NETDEV_TX_OK;
1001
1002 frag_dma_map_err:
1003 hns_nic_dma_unmap(ring, next_to_use_frag);
1004
1005 head_dma_map_err:
1006 hns_nic_dma_unmap(ring, next_to_use_head);
1007
1008 out_err_tx_ok:
1009 dev_kfree_skb_any(skb);
1010 return NETDEV_TX_OK;
1011
1012 out_net_tx_busy:
1013 netif_stop_subqueue(netdev, ring_data->queue_index);
1014 smp_mb(); /* Commit all data before submit */
1015
1016 return NETDEV_TX_BUSY;
1017 }
1018
1019 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1020 {
1021 struct hnae3_handle *h = hns3_get_handle(netdev);
1022 struct sockaddr *mac_addr = p;
1023 int ret;
1024
1025 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1026 return -EADDRNOTAVAIL;
1027
1028 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1029 if (ret) {
1030 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1031 return ret;
1032 }
1033
1034 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1035
1036 return 0;
1037 }
1038
1039 static int hns3_nic_set_features(struct net_device *netdev,
1040 netdev_features_t features)
1041 {
1042 struct hns3_nic_priv *priv = netdev_priv(netdev);
1043
1044 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1045 priv->ops.fill_desc = hns3_fill_desc_tso;
1046 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1047 } else {
1048 priv->ops.fill_desc = hns3_fill_desc;
1049 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1050 }
1051
1052 netdev->features = features;
1053 return 0;
1054 }
1055
1056 static void
1057 hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1058 {
1059 struct hns3_nic_priv *priv = netdev_priv(netdev);
1060 int queue_num = priv->ae_handle->kinfo.num_tqps;
1061 struct hns3_enet_ring *ring;
1062 unsigned int start;
1063 unsigned int idx;
1064 u64 tx_bytes = 0;
1065 u64 rx_bytes = 0;
1066 u64 tx_pkts = 0;
1067 u64 rx_pkts = 0;
1068
1069 for (idx = 0; idx < queue_num; idx++) {
1070 /* fetch the tx stats */
1071 ring = priv->ring_data[idx].ring;
1072 do {
1073 start = u64_stats_fetch_begin_irq(&ring->syncp);
1074 tx_bytes += ring->stats.tx_bytes;
1075 tx_pkts += ring->stats.tx_pkts;
1076 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1077
1078 /* fetch the rx stats */
1079 ring = priv->ring_data[idx + queue_num].ring;
1080 do {
1081 start = u64_stats_fetch_begin_irq(&ring->syncp);
1082 rx_bytes += ring->stats.rx_bytes;
1083 rx_pkts += ring->stats.rx_pkts;
1084 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1085 }
1086
1087 stats->tx_bytes = tx_bytes;
1088 stats->tx_packets = tx_pkts;
1089 stats->rx_bytes = rx_bytes;
1090 stats->rx_packets = rx_pkts;
1091
1092 stats->rx_errors = netdev->stats.rx_errors;
1093 stats->multicast = netdev->stats.multicast;
1094 stats->rx_length_errors = netdev->stats.rx_length_errors;
1095 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1096 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1097
1098 stats->tx_errors = netdev->stats.tx_errors;
1099 stats->rx_dropped = netdev->stats.rx_dropped;
1100 stats->tx_dropped = netdev->stats.tx_dropped;
1101 stats->collisions = netdev->stats.collisions;
1102 stats->rx_over_errors = netdev->stats.rx_over_errors;
1103 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1104 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1105 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1106 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1107 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1108 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1109 stats->tx_window_errors = netdev->stats.tx_window_errors;
1110 stats->rx_compressed = netdev->stats.rx_compressed;
1111 stats->tx_compressed = netdev->stats.tx_compressed;
1112 }
1113
1114 static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1115 enum hns3_udp_tnl_type type)
1116 {
1117 struct hns3_nic_priv *priv = netdev_priv(netdev);
1118 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1119 struct hnae3_handle *h = priv->ae_handle;
1120
1121 if (udp_tnl->used && udp_tnl->dst_port == port) {
1122 udp_tnl->used++;
1123 return;
1124 }
1125
1126 if (udp_tnl->used) {
1127 netdev_warn(netdev,
1128 "UDP tunnel [%d], port [%d] offload\n", type, port);
1129 return;
1130 }
1131
1132 udp_tnl->dst_port = port;
1133 udp_tnl->used = 1;
1134 /* TBD send command to hardware to add port */
1135 if (h->ae_algo->ops->add_tunnel_udp)
1136 h->ae_algo->ops->add_tunnel_udp(h, port);
1137 }
1138
1139 static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1140 enum hns3_udp_tnl_type type)
1141 {
1142 struct hns3_nic_priv *priv = netdev_priv(netdev);
1143 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1144 struct hnae3_handle *h = priv->ae_handle;
1145
1146 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1147 netdev_warn(netdev,
1148 "Invalid UDP tunnel port %d\n", port);
1149 return;
1150 }
1151
1152 udp_tnl->used--;
1153 if (udp_tnl->used)
1154 return;
1155
1156 udp_tnl->dst_port = 0;
1157 /* TBD send command to hardware to del port */
1158 if (h->ae_algo->ops->del_tunnel_udp)
1159 h->ae_algo->ops->del_tunnel_udp(h, port);
1160 }
1161
1162 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1163 * @netdev: This physical ports's netdev
1164 * @ti: Tunnel information
1165 */
1166 static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1167 struct udp_tunnel_info *ti)
1168 {
1169 u16 port_n = ntohs(ti->port);
1170
1171 switch (ti->type) {
1172 case UDP_TUNNEL_TYPE_VXLAN:
1173 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1174 break;
1175 case UDP_TUNNEL_TYPE_GENEVE:
1176 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1177 break;
1178 default:
1179 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1180 break;
1181 }
1182 }
1183
1184 static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1185 struct udp_tunnel_info *ti)
1186 {
1187 u16 port_n = ntohs(ti->port);
1188
1189 switch (ti->type) {
1190 case UDP_TUNNEL_TYPE_VXLAN:
1191 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1192 break;
1193 case UDP_TUNNEL_TYPE_GENEVE:
1194 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1195 break;
1196 default:
1197 break;
1198 }
1199 }
1200
1201 static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1202 {
1203 struct hnae3_handle *h = hns3_get_handle(netdev);
1204 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1205 unsigned int i;
1206 int ret;
1207
1208 if (tc > HNAE3_MAX_TC)
1209 return -EINVAL;
1210
1211 if (kinfo->num_tc == tc)
1212 return 0;
1213
1214 if (!netdev)
1215 return -EINVAL;
1216
1217 if (!tc) {
1218 netdev_reset_tc(netdev);
1219 return 0;
1220 }
1221
1222 /* Set num_tc for netdev */
1223 ret = netdev_set_num_tc(netdev, tc);
1224 if (ret)
1225 return ret;
1226
1227 /* Set per TC queues for the VSI */
1228 for (i = 0; i < HNAE3_MAX_TC; i++) {
1229 if (kinfo->tc_info[i].enable)
1230 netdev_set_tc_queue(netdev,
1231 kinfo->tc_info[i].tc,
1232 kinfo->tc_info[i].tqp_count,
1233 kinfo->tc_info[i].tqp_offset);
1234 }
1235
1236 return 0;
1237 }
1238
1239 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1240 void *type_data)
1241 {
1242 struct tc_mqprio_qopt *mqprio = type_data;
1243
1244 if (type != TC_SETUP_MQPRIO)
1245 return -EOPNOTSUPP;
1246
1247 return hns3_setup_tc(dev, mqprio->num_tc);
1248 }
1249
1250 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1251 __be16 proto, u16 vid)
1252 {
1253 struct hnae3_handle *h = hns3_get_handle(netdev);
1254 int ret = -EIO;
1255
1256 if (h->ae_algo->ops->set_vlan_filter)
1257 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1258
1259 return ret;
1260 }
1261
1262 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1263 __be16 proto, u16 vid)
1264 {
1265 struct hnae3_handle *h = hns3_get_handle(netdev);
1266 int ret = -EIO;
1267
1268 if (h->ae_algo->ops->set_vlan_filter)
1269 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1270
1271 return ret;
1272 }
1273
1274 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1275 u8 qos, __be16 vlan_proto)
1276 {
1277 struct hnae3_handle *h = hns3_get_handle(netdev);
1278 int ret = -EIO;
1279
1280 if (h->ae_algo->ops->set_vf_vlan_filter)
1281 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1282 qos, vlan_proto);
1283
1284 return ret;
1285 }
1286
1287 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1288 {
1289 struct hnae3_handle *h = hns3_get_handle(netdev);
1290 bool if_running = netif_running(netdev);
1291 int ret;
1292
1293 if (!h->ae_algo->ops->set_mtu)
1294 return -EOPNOTSUPP;
1295
1296 /* if this was called with netdev up then bring netdevice down */
1297 if (if_running) {
1298 (void)hns3_nic_net_stop(netdev);
1299 msleep(100);
1300 }
1301
1302 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1303 if (ret) {
1304 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1305 ret);
1306 return ret;
1307 }
1308
1309 /* if the netdev was running earlier, bring it up again */
1310 if (if_running && hns3_nic_net_open(netdev))
1311 ret = -EINVAL;
1312
1313 return ret;
1314 }
1315
1316 static const struct net_device_ops hns3_nic_netdev_ops = {
1317 .ndo_open = hns3_nic_net_open,
1318 .ndo_stop = hns3_nic_net_stop,
1319 .ndo_start_xmit = hns3_nic_net_xmit,
1320 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1321 .ndo_change_mtu = hns3_nic_change_mtu,
1322 .ndo_set_features = hns3_nic_set_features,
1323 .ndo_get_stats64 = hns3_nic_get_stats64,
1324 .ndo_setup_tc = hns3_nic_setup_tc,
1325 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1326 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1327 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1328 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1329 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1330 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1331 };
1332
1333 /* hns3_probe - Device initialization routine
1334 * @pdev: PCI device information struct
1335 * @ent: entry in hns3_pci_tbl
1336 *
1337 * hns3_probe initializes a PF identified by a pci_dev structure.
1338 * The OS initialization, configuring of the PF private structure,
1339 * and a hardware reset occur.
1340 *
1341 * Returns 0 on success, negative on failure
1342 */
1343 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1344 {
1345 struct hnae3_ae_dev *ae_dev;
1346 int ret;
1347
1348 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1349 GFP_KERNEL);
1350 if (!ae_dev) {
1351 ret = -ENOMEM;
1352 return ret;
1353 }
1354
1355 ae_dev->pdev = pdev;
1356 ae_dev->flag = ent->driver_data;
1357 ae_dev->dev_type = HNAE3_DEV_KNIC;
1358 pci_set_drvdata(pdev, ae_dev);
1359
1360 return hnae3_register_ae_dev(ae_dev);
1361 }
1362
1363 /* hns3_remove - Device removal routine
1364 * @pdev: PCI device information struct
1365 */
1366 static void hns3_remove(struct pci_dev *pdev)
1367 {
1368 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1369
1370 hnae3_unregister_ae_dev(ae_dev);
1371
1372 devm_kfree(&pdev->dev, ae_dev);
1373
1374 pci_set_drvdata(pdev, NULL);
1375 }
1376
1377 static struct pci_driver hns3_driver = {
1378 .name = hns3_driver_name,
1379 .id_table = hns3_pci_tbl,
1380 .probe = hns3_probe,
1381 .remove = hns3_remove,
1382 };
1383
1384 /* set default feature to hns3 */
1385 static void hns3_set_default_feature(struct net_device *netdev)
1386 {
1387 netdev->priv_flags |= IFF_UNICAST_FLT;
1388
1389 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1390 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1391 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1392 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1393 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1394
1395 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1396
1397 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1398
1399 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1400 NETIF_F_HW_VLAN_CTAG_FILTER |
1401 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1402 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1403 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1404 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1405
1406 netdev->vlan_features |=
1407 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1408 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1409 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1410 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1411 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1412
1413 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1414 NETIF_F_HW_VLAN_CTAG_FILTER |
1415 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1416 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1417 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1418 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1419 }
1420
1421 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1422 struct hns3_desc_cb *cb)
1423 {
1424 unsigned int order = hnae_page_order(ring);
1425 struct page *p;
1426
1427 p = dev_alloc_pages(order);
1428 if (!p)
1429 return -ENOMEM;
1430
1431 cb->priv = p;
1432 cb->page_offset = 0;
1433 cb->reuse_flag = 0;
1434 cb->buf = page_address(p);
1435 cb->length = hnae_page_size(ring);
1436 cb->type = DESC_TYPE_PAGE;
1437
1438 memset(cb->buf, 0, cb->length);
1439
1440 return 0;
1441 }
1442
1443 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1444 struct hns3_desc_cb *cb)
1445 {
1446 if (cb->type == DESC_TYPE_SKB)
1447 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1448 else if (!HNAE3_IS_TX_RING(ring))
1449 put_page((struct page *)cb->priv);
1450 memset(cb, 0, sizeof(*cb));
1451 }
1452
1453 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1454 {
1455 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1456 cb->length, ring_to_dma_dir(ring));
1457
1458 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1459 return -EIO;
1460
1461 return 0;
1462 }
1463
1464 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1465 struct hns3_desc_cb *cb)
1466 {
1467 if (cb->type == DESC_TYPE_SKB)
1468 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1469 ring_to_dma_dir(ring));
1470 else
1471 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1472 ring_to_dma_dir(ring));
1473 }
1474
1475 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1476 {
1477 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1478 ring->desc[i].addr = 0;
1479 }
1480
1481 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1482 {
1483 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1484
1485 if (!ring->desc_cb[i].dma)
1486 return;
1487
1488 hns3_buffer_detach(ring, i);
1489 hns3_free_buffer(ring, cb);
1490 }
1491
1492 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1493 {
1494 int i;
1495
1496 for (i = 0; i < ring->desc_num; i++)
1497 hns3_free_buffer_detach(ring, i);
1498 }
1499
1500 /* free desc along with its attached buffer */
1501 static void hns3_free_desc(struct hns3_enet_ring *ring)
1502 {
1503 hns3_free_buffers(ring);
1504
1505 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1506 ring->desc_num * sizeof(ring->desc[0]),
1507 DMA_BIDIRECTIONAL);
1508 ring->desc_dma_addr = 0;
1509 kfree(ring->desc);
1510 ring->desc = NULL;
1511 }
1512
1513 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1514 {
1515 int size = ring->desc_num * sizeof(ring->desc[0]);
1516
1517 ring->desc = kzalloc(size, GFP_KERNEL);
1518 if (!ring->desc)
1519 return -ENOMEM;
1520
1521 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1522 size, DMA_BIDIRECTIONAL);
1523 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1524 ring->desc_dma_addr = 0;
1525 kfree(ring->desc);
1526 ring->desc = NULL;
1527 return -ENOMEM;
1528 }
1529
1530 return 0;
1531 }
1532
1533 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1534 struct hns3_desc_cb *cb)
1535 {
1536 int ret;
1537
1538 ret = hns3_alloc_buffer(ring, cb);
1539 if (ret)
1540 goto out;
1541
1542 ret = hns3_map_buffer(ring, cb);
1543 if (ret)
1544 goto out_with_buf;
1545
1546 return 0;
1547
1548 out_with_buf:
1549 hns3_free_buffers(ring);
1550 out:
1551 return ret;
1552 }
1553
1554 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1555 {
1556 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1557
1558 if (ret)
1559 return ret;
1560
1561 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1562
1563 return 0;
1564 }
1565
1566 /* Allocate memory for raw pkg, and map with dma */
1567 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1568 {
1569 int i, j, ret;
1570
1571 for (i = 0; i < ring->desc_num; i++) {
1572 ret = hns3_alloc_buffer_attach(ring, i);
1573 if (ret)
1574 goto out_buffer_fail;
1575 }
1576
1577 return 0;
1578
1579 out_buffer_fail:
1580 for (j = i - 1; j >= 0; j--)
1581 hns3_free_buffer_detach(ring, j);
1582 return ret;
1583 }
1584
1585 /* detach a in-used buffer and replace with a reserved one */
1586 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1587 struct hns3_desc_cb *res_cb)
1588 {
1589 hns3_map_buffer(ring, &ring->desc_cb[i]);
1590 ring->desc_cb[i] = *res_cb;
1591 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1592 }
1593
1594 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1595 {
1596 ring->desc_cb[i].reuse_flag = 0;
1597 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1598 + ring->desc_cb[i].page_offset);
1599 }
1600
1601 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1602 int *pkts)
1603 {
1604 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1605
1606 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1607 (*bytes) += desc_cb->length;
1608 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1609 hns3_free_buffer_detach(ring, ring->next_to_clean);
1610
1611 ring_ptr_move_fw(ring, next_to_clean);
1612 }
1613
1614 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1615 {
1616 int u = ring->next_to_use;
1617 int c = ring->next_to_clean;
1618
1619 if (unlikely(h > ring->desc_num))
1620 return 0;
1621
1622 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1623 }
1624
1625 int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1626 {
1627 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1628 struct netdev_queue *dev_queue;
1629 int bytes, pkts;
1630 int head;
1631
1632 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1633 rmb(); /* Make sure head is ready before touch any data */
1634
1635 if (is_ring_empty(ring) || head == ring->next_to_clean)
1636 return 0; /* no data to poll */
1637
1638 if (!is_valid_clean_head(ring, head)) {
1639 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1640 ring->next_to_use, ring->next_to_clean);
1641
1642 u64_stats_update_begin(&ring->syncp);
1643 ring->stats.io_err_cnt++;
1644 u64_stats_update_end(&ring->syncp);
1645 return -EIO;
1646 }
1647
1648 bytes = 0;
1649 pkts = 0;
1650 while (head != ring->next_to_clean && budget) {
1651 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1652 /* Issue prefetch for next Tx descriptor */
1653 prefetch(&ring->desc_cb[ring->next_to_clean]);
1654 budget--;
1655 }
1656
1657 ring->tqp_vector->tx_group.total_bytes += bytes;
1658 ring->tqp_vector->tx_group.total_packets += pkts;
1659
1660 u64_stats_update_begin(&ring->syncp);
1661 ring->stats.tx_bytes += bytes;
1662 ring->stats.tx_pkts += pkts;
1663 u64_stats_update_end(&ring->syncp);
1664
1665 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1666 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1667
1668 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1669 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1670 /* Make sure that anybody stopping the queue after this
1671 * sees the new next_to_clean.
1672 */
1673 smp_mb();
1674 if (netif_tx_queue_stopped(dev_queue)) {
1675 netif_tx_wake_queue(dev_queue);
1676 ring->stats.restart_queue++;
1677 }
1678 }
1679
1680 return !!budget;
1681 }
1682
1683 static int hns3_desc_unused(struct hns3_enet_ring *ring)
1684 {
1685 int ntc = ring->next_to_clean;
1686 int ntu = ring->next_to_use;
1687
1688 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1689 }
1690
1691 static void
1692 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1693 {
1694 struct hns3_desc_cb *desc_cb;
1695 struct hns3_desc_cb res_cbs;
1696 int i, ret;
1697
1698 for (i = 0; i < cleand_count; i++) {
1699 desc_cb = &ring->desc_cb[ring->next_to_use];
1700 if (desc_cb->reuse_flag) {
1701 u64_stats_update_begin(&ring->syncp);
1702 ring->stats.reuse_pg_cnt++;
1703 u64_stats_update_end(&ring->syncp);
1704
1705 hns3_reuse_buffer(ring, ring->next_to_use);
1706 } else {
1707 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1708 if (ret) {
1709 u64_stats_update_begin(&ring->syncp);
1710 ring->stats.sw_err_cnt++;
1711 u64_stats_update_end(&ring->syncp);
1712
1713 netdev_err(ring->tqp->handle->kinfo.netdev,
1714 "hnae reserve buffer map failed.\n");
1715 break;
1716 }
1717 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1718 }
1719
1720 ring_ptr_move_fw(ring, next_to_use);
1721 }
1722
1723 wmb(); /* Make all data has been write before submit */
1724 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1725 }
1726
1727 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1728 * @data: pointer to the start of the headers
1729 * @max: total length of section to find headers in
1730 *
1731 * This function is meant to determine the length of headers that will
1732 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1733 * motivation of doing this is to only perform one pull for IPv4 TCP
1734 * packets so that we can do basic things like calculating the gso_size
1735 * based on the average data per packet.
1736 */
1737 static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1738 unsigned int max_size)
1739 {
1740 unsigned char *network;
1741 u8 hlen;
1742
1743 /* This should never happen, but better safe than sorry */
1744 if (max_size < ETH_HLEN)
1745 return max_size;
1746
1747 /* Initialize network frame pointer */
1748 network = data;
1749
1750 /* Set first protocol and move network header forward */
1751 network += ETH_HLEN;
1752
1753 /* Handle any vlan tag if present */
1754 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1755 == HNS3_RX_FLAG_VLAN_PRESENT) {
1756 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1757 return max_size;
1758
1759 network += VLAN_HLEN;
1760 }
1761
1762 /* Handle L3 protocols */
1763 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1764 == HNS3_RX_FLAG_L3ID_IPV4) {
1765 if ((typeof(max_size))(network - data) >
1766 (max_size - sizeof(struct iphdr)))
1767 return max_size;
1768
1769 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1770 hlen = (network[0] & 0x0F) << 2;
1771
1772 /* Verify hlen meets minimum size requirements */
1773 if (hlen < sizeof(struct iphdr))
1774 return network - data;
1775
1776 /* Record next protocol if header is present */
1777 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1778 == HNS3_RX_FLAG_L3ID_IPV6) {
1779 if ((typeof(max_size))(network - data) >
1780 (max_size - sizeof(struct ipv6hdr)))
1781 return max_size;
1782
1783 /* Record next protocol */
1784 hlen = sizeof(struct ipv6hdr);
1785 } else {
1786 return network - data;
1787 }
1788
1789 /* Relocate pointer to start of L4 header */
1790 network += hlen;
1791
1792 /* Finally sort out TCP/UDP */
1793 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1794 == HNS3_RX_FLAG_L4ID_TCP) {
1795 if ((typeof(max_size))(network - data) >
1796 (max_size - sizeof(struct tcphdr)))
1797 return max_size;
1798
1799 /* Access doff as a u8 to avoid unaligned access on ia64 */
1800 hlen = (network[12] & 0xF0) >> 2;
1801
1802 /* Verify hlen meets minimum size requirements */
1803 if (hlen < sizeof(struct tcphdr))
1804 return network - data;
1805
1806 network += hlen;
1807 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1808 == HNS3_RX_FLAG_L4ID_UDP) {
1809 if ((typeof(max_size))(network - data) >
1810 (max_size - sizeof(struct udphdr)))
1811 return max_size;
1812
1813 network += sizeof(struct udphdr);
1814 }
1815
1816 /* If everything has gone correctly network should be the
1817 * data section of the packet and will be the end of the header.
1818 * If not then it probably represents the end of the last recognized
1819 * header.
1820 */
1821 if ((typeof(max_size))(network - data) < max_size)
1822 return network - data;
1823 else
1824 return max_size;
1825 }
1826
1827 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1828 struct hns3_enet_ring *ring, int pull_len,
1829 struct hns3_desc_cb *desc_cb)
1830 {
1831 struct hns3_desc *desc;
1832 int truesize, size;
1833 int last_offset;
1834 bool twobufs;
1835
1836 twobufs = ((PAGE_SIZE < 8192) &&
1837 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1838
1839 desc = &ring->desc[ring->next_to_clean];
1840 size = le16_to_cpu(desc->rx.size);
1841
1842 if (twobufs) {
1843 truesize = hnae_buf_size(ring);
1844 } else {
1845 truesize = ALIGN(size, L1_CACHE_BYTES);
1846 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1847 }
1848
1849 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1850 size - pull_len, truesize - pull_len);
1851
1852 /* Avoid re-using remote pages,flag default unreuse */
1853 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1854 return;
1855
1856 if (twobufs) {
1857 /* If we are only owner of page we can reuse it */
1858 if (likely(page_count(desc_cb->priv) == 1)) {
1859 /* Flip page offset to other buffer */
1860 desc_cb->page_offset ^= truesize;
1861
1862 desc_cb->reuse_flag = 1;
1863 /* bump ref count on page before it is given*/
1864 get_page(desc_cb->priv);
1865 }
1866 return;
1867 }
1868
1869 /* Move offset up to the next cache line */
1870 desc_cb->page_offset += truesize;
1871
1872 if (desc_cb->page_offset <= last_offset) {
1873 desc_cb->reuse_flag = 1;
1874 /* Bump ref count on page before it is given*/
1875 get_page(desc_cb->priv);
1876 }
1877 }
1878
1879 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1880 struct hns3_desc *desc)
1881 {
1882 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1883 int l3_type, l4_type;
1884 u32 bd_base_info;
1885 int ol4_type;
1886 u32 l234info;
1887
1888 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1889 l234info = le32_to_cpu(desc->rx.l234_info);
1890
1891 skb->ip_summed = CHECKSUM_NONE;
1892
1893 skb_checksum_none_assert(skb);
1894
1895 if (!(netdev->features & NETIF_F_RXCSUM))
1896 return;
1897
1898 /* check if hardware has done checksum */
1899 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1900 return;
1901
1902 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1903 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1904 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1905 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1906 netdev_err(netdev, "L3/L4 error pkt\n");
1907 u64_stats_update_begin(&ring->syncp);
1908 ring->stats.l3l4_csum_err++;
1909 u64_stats_update_end(&ring->syncp);
1910
1911 return;
1912 }
1913
1914 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1915 HNS3_RXD_L3ID_S);
1916 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1917 HNS3_RXD_L4ID_S);
1918
1919 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1920 switch (ol4_type) {
1921 case HNS3_OL4_TYPE_MAC_IN_UDP:
1922 case HNS3_OL4_TYPE_NVGRE:
1923 skb->csum_level = 1;
1924 case HNS3_OL4_TYPE_NO_TUN:
1925 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1926 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1927 (l3_type == HNS3_L3_TYPE_IPV6 &&
1928 (l4_type == HNS3_L4_TYPE_UDP ||
1929 l4_type == HNS3_L4_TYPE_TCP ||
1930 l4_type == HNS3_L4_TYPE_SCTP)))
1931 skb->ip_summed = CHECKSUM_UNNECESSARY;
1932 break;
1933 }
1934 }
1935
1936 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1937 struct sk_buff **out_skb, int *out_bnum)
1938 {
1939 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1940 struct hns3_desc_cb *desc_cb;
1941 struct hns3_desc *desc;
1942 struct sk_buff *skb;
1943 unsigned char *va;
1944 u32 bd_base_info;
1945 int pull_len;
1946 u32 l234info;
1947 int length;
1948 int bnum;
1949
1950 desc = &ring->desc[ring->next_to_clean];
1951 desc_cb = &ring->desc_cb[ring->next_to_clean];
1952
1953 prefetch(desc);
1954
1955 length = le16_to_cpu(desc->rx.pkt_len);
1956 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1957 l234info = le32_to_cpu(desc->rx.l234_info);
1958
1959 /* Check valid BD */
1960 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1961 return -EFAULT;
1962
1963 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1964
1965 /* Prefetch first cache line of first page
1966 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1967 * line size is 64B so need to prefetch twice to make it 128B. But in
1968 * actual we can have greater size of caches with 128B Level 1 cache
1969 * lines. In such a case, single fetch would suffice to cache in the
1970 * relevant part of the header.
1971 */
1972 prefetch(va);
1973 #if L1_CACHE_BYTES < 128
1974 prefetch(va + L1_CACHE_BYTES);
1975 #endif
1976
1977 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1978 HNS3_RX_HEAD_SIZE);
1979 if (unlikely(!skb)) {
1980 netdev_err(netdev, "alloc rx skb fail\n");
1981
1982 u64_stats_update_begin(&ring->syncp);
1983 ring->stats.sw_err_cnt++;
1984 u64_stats_update_end(&ring->syncp);
1985
1986 return -ENOMEM;
1987 }
1988
1989 prefetchw(skb->data);
1990
1991 bnum = 1;
1992 if (length <= HNS3_RX_HEAD_SIZE) {
1993 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
1994
1995 /* We can reuse buffer as-is, just make sure it is local */
1996 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
1997 desc_cb->reuse_flag = 1;
1998 else /* This page cannot be reused so discard it */
1999 put_page(desc_cb->priv);
2000
2001 ring_ptr_move_fw(ring, next_to_clean);
2002 } else {
2003 u64_stats_update_begin(&ring->syncp);
2004 ring->stats.seg_pkt_cnt++;
2005 u64_stats_update_end(&ring->syncp);
2006
2007 pull_len = hns3_nic_get_headlen(va, l234info,
2008 HNS3_RX_HEAD_SIZE);
2009 memcpy(__skb_put(skb, pull_len), va,
2010 ALIGN(pull_len, sizeof(long)));
2011
2012 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2013 ring_ptr_move_fw(ring, next_to_clean);
2014
2015 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2016 desc = &ring->desc[ring->next_to_clean];
2017 desc_cb = &ring->desc_cb[ring->next_to_clean];
2018 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2019 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2020 ring_ptr_move_fw(ring, next_to_clean);
2021 bnum++;
2022 }
2023 }
2024
2025 *out_bnum = bnum;
2026
2027 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2028 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2029 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2030 u64_stats_update_begin(&ring->syncp);
2031 ring->stats.non_vld_descs++;
2032 u64_stats_update_end(&ring->syncp);
2033
2034 dev_kfree_skb_any(skb);
2035 return -EINVAL;
2036 }
2037
2038 if (unlikely((!desc->rx.pkt_len) ||
2039 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2040 netdev_err(netdev, "truncated pkt\n");
2041 u64_stats_update_begin(&ring->syncp);
2042 ring->stats.err_pkt_len++;
2043 u64_stats_update_end(&ring->syncp);
2044
2045 dev_kfree_skb_any(skb);
2046 return -EFAULT;
2047 }
2048
2049 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2050 netdev_err(netdev, "L2 error pkt\n");
2051 u64_stats_update_begin(&ring->syncp);
2052 ring->stats.l2_err++;
2053 u64_stats_update_end(&ring->syncp);
2054
2055 dev_kfree_skb_any(skb);
2056 return -EFAULT;
2057 }
2058
2059 u64_stats_update_begin(&ring->syncp);
2060 ring->stats.rx_pkts++;
2061 ring->stats.rx_bytes += skb->len;
2062 u64_stats_update_end(&ring->syncp);
2063
2064 ring->tqp_vector->rx_group.total_bytes += skb->len;
2065
2066 hns3_rx_checksum(ring, skb, desc);
2067 return 0;
2068 }
2069
2070 static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2071 {
2072 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2073 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2074 int recv_pkts, recv_bds, clean_count, err;
2075 int unused_count = hns3_desc_unused(ring);
2076 struct sk_buff *skb = NULL;
2077 int num, bnum = 0;
2078
2079 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2080 rmb(); /* Make sure num taken effect before the other data is touched */
2081
2082 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2083 num -= unused_count;
2084
2085 while (recv_pkts < budget && recv_bds < num) {
2086 /* Reuse or realloc buffers */
2087 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2088 hns3_nic_alloc_rx_buffers(ring,
2089 clean_count + unused_count);
2090 clean_count = 0;
2091 unused_count = hns3_desc_unused(ring);
2092 }
2093
2094 /* Poll one pkt */
2095 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2096 if (unlikely(!skb)) /* This fault cannot be repaired */
2097 goto out;
2098
2099 recv_bds += bnum;
2100 clean_count += bnum;
2101 if (unlikely(err)) { /* Do jump the err */
2102 recv_pkts++;
2103 continue;
2104 }
2105
2106 /* Do update ip stack process */
2107 skb->protocol = eth_type_trans(skb, netdev);
2108 (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2109
2110 recv_pkts++;
2111 }
2112
2113 out:
2114 /* Make all data has been write before submit */
2115 if (clean_count + unused_count > 0)
2116 hns3_nic_alloc_rx_buffers(ring,
2117 clean_count + unused_count);
2118
2119 return recv_pkts;
2120 }
2121
2122 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2123 {
2124 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2125 enum hns3_flow_level_range new_flow_level;
2126 struct hns3_enet_tqp_vector *tqp_vector;
2127 int packets_per_secs;
2128 int bytes_per_usecs;
2129 u16 new_int_gl;
2130 int usecs;
2131
2132 if (!ring_group->int_gl)
2133 return false;
2134
2135 if (ring_group->total_packets == 0) {
2136 ring_group->int_gl = HNS3_INT_GL_50K;
2137 ring_group->flow_level = HNS3_FLOW_LOW;
2138 return true;
2139 }
2140
2141 /* Simple throttlerate management
2142 * 0-10MB/s lower (50000 ints/s)
2143 * 10-20MB/s middle (20000 ints/s)
2144 * 20-1249MB/s high (18000 ints/s)
2145 * > 40000pps ultra (8000 ints/s)
2146 */
2147 new_flow_level = ring_group->flow_level;
2148 new_int_gl = ring_group->int_gl;
2149 tqp_vector = ring_group->ring->tqp_vector;
2150 usecs = (ring_group->int_gl << 1);
2151 bytes_per_usecs = ring_group->total_bytes / usecs;
2152 /* 1000000 microseconds */
2153 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2154
2155 switch (new_flow_level) {
2156 case HNS3_FLOW_LOW:
2157 if (bytes_per_usecs > 10)
2158 new_flow_level = HNS3_FLOW_MID;
2159 break;
2160 case HNS3_FLOW_MID:
2161 if (bytes_per_usecs > 20)
2162 new_flow_level = HNS3_FLOW_HIGH;
2163 else if (bytes_per_usecs <= 10)
2164 new_flow_level = HNS3_FLOW_LOW;
2165 break;
2166 case HNS3_FLOW_HIGH:
2167 case HNS3_FLOW_ULTRA:
2168 default:
2169 if (bytes_per_usecs <= 20)
2170 new_flow_level = HNS3_FLOW_MID;
2171 break;
2172 }
2173
2174 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2175 (&tqp_vector->rx_group == ring_group))
2176 new_flow_level = HNS3_FLOW_ULTRA;
2177
2178 switch (new_flow_level) {
2179 case HNS3_FLOW_LOW:
2180 new_int_gl = HNS3_INT_GL_50K;
2181 break;
2182 case HNS3_FLOW_MID:
2183 new_int_gl = HNS3_INT_GL_20K;
2184 break;
2185 case HNS3_FLOW_HIGH:
2186 new_int_gl = HNS3_INT_GL_18K;
2187 break;
2188 case HNS3_FLOW_ULTRA:
2189 new_int_gl = HNS3_INT_GL_8K;
2190 break;
2191 default:
2192 break;
2193 }
2194
2195 ring_group->total_bytes = 0;
2196 ring_group->total_packets = 0;
2197 ring_group->flow_level = new_flow_level;
2198 if (new_int_gl != ring_group->int_gl) {
2199 ring_group->int_gl = new_int_gl;
2200 return true;
2201 }
2202 return false;
2203 }
2204
2205 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2206 {
2207 u16 rx_int_gl, tx_int_gl;
2208 bool rx, tx;
2209
2210 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2211 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2212 rx_int_gl = tqp_vector->rx_group.int_gl;
2213 tx_int_gl = tqp_vector->tx_group.int_gl;
2214 if (rx && tx) {
2215 if (rx_int_gl > tx_int_gl) {
2216 tqp_vector->tx_group.int_gl = rx_int_gl;
2217 tqp_vector->tx_group.flow_level =
2218 tqp_vector->rx_group.flow_level;
2219 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2220 } else {
2221 tqp_vector->rx_group.int_gl = tx_int_gl;
2222 tqp_vector->rx_group.flow_level =
2223 tqp_vector->tx_group.flow_level;
2224 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2225 }
2226 }
2227 }
2228
2229 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2230 {
2231 struct hns3_enet_ring *ring;
2232 int rx_pkt_total = 0;
2233
2234 struct hns3_enet_tqp_vector *tqp_vector =
2235 container_of(napi, struct hns3_enet_tqp_vector, napi);
2236 bool clean_complete = true;
2237 int rx_budget;
2238
2239 /* Since the actual Tx work is minimal, we can give the Tx a larger
2240 * budget and be more aggressive about cleaning up the Tx descriptors.
2241 */
2242 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2243 if (!hns3_clean_tx_ring(ring, budget))
2244 clean_complete = false;
2245 }
2246
2247 /* make sure rx ring budget not smaller than 1 */
2248 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2249
2250 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2251 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2252
2253 if (rx_cleaned >= rx_budget)
2254 clean_complete = false;
2255
2256 rx_pkt_total += rx_cleaned;
2257 }
2258
2259 tqp_vector->rx_group.total_packets += rx_pkt_total;
2260
2261 if (!clean_complete)
2262 return budget;
2263
2264 napi_complete(napi);
2265 hns3_update_new_int_gl(tqp_vector);
2266 hns3_mask_vector_irq(tqp_vector, 1);
2267
2268 return rx_pkt_total;
2269 }
2270
2271 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2272 struct hnae3_ring_chain_node *head)
2273 {
2274 struct pci_dev *pdev = tqp_vector->handle->pdev;
2275 struct hnae3_ring_chain_node *cur_chain = head;
2276 struct hnae3_ring_chain_node *chain;
2277 struct hns3_enet_ring *tx_ring;
2278 struct hns3_enet_ring *rx_ring;
2279
2280 tx_ring = tqp_vector->tx_group.ring;
2281 if (tx_ring) {
2282 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2283 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2284 HNAE3_RING_TYPE_TX);
2285
2286 cur_chain->next = NULL;
2287
2288 while (tx_ring->next) {
2289 tx_ring = tx_ring->next;
2290
2291 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2292 GFP_KERNEL);
2293 if (!chain)
2294 return -ENOMEM;
2295
2296 cur_chain->next = chain;
2297 chain->tqp_index = tx_ring->tqp->tqp_index;
2298 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2299 HNAE3_RING_TYPE_TX);
2300
2301 cur_chain = chain;
2302 }
2303 }
2304
2305 rx_ring = tqp_vector->rx_group.ring;
2306 if (!tx_ring && rx_ring) {
2307 cur_chain->next = NULL;
2308 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2309 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2310 HNAE3_RING_TYPE_RX);
2311
2312 rx_ring = rx_ring->next;
2313 }
2314
2315 while (rx_ring) {
2316 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2317 if (!chain)
2318 return -ENOMEM;
2319
2320 cur_chain->next = chain;
2321 chain->tqp_index = rx_ring->tqp->tqp_index;
2322 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2323 HNAE3_RING_TYPE_RX);
2324 cur_chain = chain;
2325
2326 rx_ring = rx_ring->next;
2327 }
2328
2329 return 0;
2330 }
2331
2332 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2333 struct hnae3_ring_chain_node *head)
2334 {
2335 struct pci_dev *pdev = tqp_vector->handle->pdev;
2336 struct hnae3_ring_chain_node *chain_tmp, *chain;
2337
2338 chain = head->next;
2339
2340 while (chain) {
2341 chain_tmp = chain->next;
2342 devm_kfree(&pdev->dev, chain);
2343 chain = chain_tmp;
2344 }
2345 }
2346
2347 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2348 struct hns3_enet_ring *ring)
2349 {
2350 ring->next = group->ring;
2351 group->ring = ring;
2352
2353 group->count++;
2354 }
2355
2356 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2357 {
2358 struct hnae3_ring_chain_node vector_ring_chain;
2359 struct hnae3_handle *h = priv->ae_handle;
2360 struct hns3_enet_tqp_vector *tqp_vector;
2361 struct hnae3_vector_info *vector;
2362 struct pci_dev *pdev = h->pdev;
2363 u16 tqp_num = h->kinfo.num_tqps;
2364 u16 vector_num;
2365 int ret = 0;
2366 u16 i;
2367
2368 /* RSS size, cpu online and vector_num should be the same */
2369 /* Should consider 2p/4p later */
2370 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2371 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2372 GFP_KERNEL);
2373 if (!vector)
2374 return -ENOMEM;
2375
2376 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2377
2378 priv->vector_num = vector_num;
2379 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2380 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2381 GFP_KERNEL);
2382 if (!priv->tqp_vector)
2383 return -ENOMEM;
2384
2385 for (i = 0; i < tqp_num; i++) {
2386 u16 vector_i = i % vector_num;
2387
2388 tqp_vector = &priv->tqp_vector[vector_i];
2389
2390 hns3_add_ring_to_group(&tqp_vector->tx_group,
2391 priv->ring_data[i].ring);
2392
2393 hns3_add_ring_to_group(&tqp_vector->rx_group,
2394 priv->ring_data[i + tqp_num].ring);
2395
2396 tqp_vector->idx = vector_i;
2397 tqp_vector->mask_addr = vector[vector_i].io_addr;
2398 tqp_vector->vector_irq = vector[vector_i].vector;
2399 tqp_vector->num_tqps++;
2400
2401 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2402 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2403 }
2404
2405 for (i = 0; i < vector_num; i++) {
2406 tqp_vector = &priv->tqp_vector[i];
2407
2408 tqp_vector->rx_group.total_bytes = 0;
2409 tqp_vector->rx_group.total_packets = 0;
2410 tqp_vector->tx_group.total_bytes = 0;
2411 tqp_vector->tx_group.total_packets = 0;
2412 hns3_vector_gl_rl_init(tqp_vector);
2413 tqp_vector->handle = h;
2414
2415 ret = hns3_get_vector_ring_chain(tqp_vector,
2416 &vector_ring_chain);
2417 if (ret)
2418 goto out;
2419
2420 ret = h->ae_algo->ops->map_ring_to_vector(h,
2421 tqp_vector->vector_irq, &vector_ring_chain);
2422 if (ret)
2423 goto out;
2424
2425 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2426
2427 netif_napi_add(priv->netdev, &tqp_vector->napi,
2428 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2429 }
2430
2431 out:
2432 devm_kfree(&pdev->dev, vector);
2433 return ret;
2434 }
2435
2436 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2437 {
2438 struct hnae3_ring_chain_node vector_ring_chain;
2439 struct hnae3_handle *h = priv->ae_handle;
2440 struct hns3_enet_tqp_vector *tqp_vector;
2441 struct pci_dev *pdev = h->pdev;
2442 int i, ret;
2443
2444 for (i = 0; i < priv->vector_num; i++) {
2445 tqp_vector = &priv->tqp_vector[i];
2446
2447 ret = hns3_get_vector_ring_chain(tqp_vector,
2448 &vector_ring_chain);
2449 if (ret)
2450 return ret;
2451
2452 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2453 tqp_vector->vector_irq, &vector_ring_chain);
2454 if (ret)
2455 return ret;
2456
2457 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2458
2459 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2460 (void)irq_set_affinity_hint(
2461 priv->tqp_vector[i].vector_irq,
2462 NULL);
2463 devm_free_irq(&pdev->dev,
2464 priv->tqp_vector[i].vector_irq,
2465 &priv->tqp_vector[i]);
2466 }
2467
2468 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2469
2470 netif_napi_del(&priv->tqp_vector[i].napi);
2471 }
2472
2473 devm_kfree(&pdev->dev, priv->tqp_vector);
2474
2475 return 0;
2476 }
2477
2478 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2479 int ring_type)
2480 {
2481 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2482 int queue_num = priv->ae_handle->kinfo.num_tqps;
2483 struct pci_dev *pdev = priv->ae_handle->pdev;
2484 struct hns3_enet_ring *ring;
2485
2486 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2487 if (!ring)
2488 return -ENOMEM;
2489
2490 if (ring_type == HNAE3_RING_TYPE_TX) {
2491 ring_data[q->tqp_index].ring = ring;
2492 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2493 } else {
2494 ring_data[q->tqp_index + queue_num].ring = ring;
2495 ring->io_base = q->io_base;
2496 }
2497
2498 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2499
2500 ring_data[q->tqp_index].queue_index = q->tqp_index;
2501
2502 ring->tqp = q;
2503 ring->desc = NULL;
2504 ring->desc_cb = NULL;
2505 ring->dev = priv->dev;
2506 ring->desc_dma_addr = 0;
2507 ring->buf_size = q->buf_size;
2508 ring->desc_num = q->desc_num;
2509 ring->next_to_use = 0;
2510 ring->next_to_clean = 0;
2511
2512 return 0;
2513 }
2514
2515 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2516 struct hns3_nic_priv *priv)
2517 {
2518 int ret;
2519
2520 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2521 if (ret)
2522 return ret;
2523
2524 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2525 if (ret)
2526 return ret;
2527
2528 return 0;
2529 }
2530
2531 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2532 {
2533 struct hnae3_handle *h = priv->ae_handle;
2534 struct pci_dev *pdev = h->pdev;
2535 int i, ret;
2536
2537 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2538 sizeof(*priv->ring_data) * 2,
2539 GFP_KERNEL);
2540 if (!priv->ring_data)
2541 return -ENOMEM;
2542
2543 for (i = 0; i < h->kinfo.num_tqps; i++) {
2544 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2545 if (ret)
2546 goto err;
2547 }
2548
2549 return 0;
2550 err:
2551 devm_kfree(&pdev->dev, priv->ring_data);
2552 return ret;
2553 }
2554
2555 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2556 {
2557 int ret;
2558
2559 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2560 return -EINVAL;
2561
2562 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2563 GFP_KERNEL);
2564 if (!ring->desc_cb) {
2565 ret = -ENOMEM;
2566 goto out;
2567 }
2568
2569 ret = hns3_alloc_desc(ring);
2570 if (ret)
2571 goto out_with_desc_cb;
2572
2573 if (!HNAE3_IS_TX_RING(ring)) {
2574 ret = hns3_alloc_ring_buffers(ring);
2575 if (ret)
2576 goto out_with_desc;
2577 }
2578
2579 return 0;
2580
2581 out_with_desc:
2582 hns3_free_desc(ring);
2583 out_with_desc_cb:
2584 kfree(ring->desc_cb);
2585 ring->desc_cb = NULL;
2586 out:
2587 return ret;
2588 }
2589
2590 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2591 {
2592 hns3_free_desc(ring);
2593 kfree(ring->desc_cb);
2594 ring->desc_cb = NULL;
2595 ring->next_to_clean = 0;
2596 ring->next_to_use = 0;
2597 }
2598
2599 int hns3_buf_size2type(u32 buf_size)
2600 {
2601 int bd_size_type;
2602
2603 switch (buf_size) {
2604 case 512:
2605 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2606 break;
2607 case 1024:
2608 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2609 break;
2610 case 2048:
2611 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2612 break;
2613 case 4096:
2614 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2615 break;
2616 default:
2617 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2618 }
2619
2620 return bd_size_type;
2621 }
2622
2623 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2624 {
2625 dma_addr_t dma = ring->desc_dma_addr;
2626 struct hnae3_queue *q = ring->tqp;
2627
2628 if (!HNAE3_IS_TX_RING(ring)) {
2629 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2630 (u32)dma);
2631 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2632 (u32)((dma >> 31) >> 1));
2633
2634 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2635 hns3_buf_size2type(ring->buf_size));
2636 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2637 ring->desc_num / 8 - 1);
2638
2639 } else {
2640 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2641 (u32)dma);
2642 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2643 (u32)((dma >> 31) >> 1));
2644
2645 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2646 hns3_buf_size2type(ring->buf_size));
2647 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2648 ring->desc_num / 8 - 1);
2649 }
2650 }
2651
2652 static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2653 {
2654 struct hnae3_handle *h = priv->ae_handle;
2655 int ring_num = h->kinfo.num_tqps * 2;
2656 int i, j;
2657 int ret;
2658
2659 for (i = 0; i < ring_num; i++) {
2660 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2661 if (ret) {
2662 dev_err(priv->dev,
2663 "Alloc ring memory fail! ret=%d\n", ret);
2664 goto out_when_alloc_ring_memory;
2665 }
2666
2667 hns3_init_ring_hw(priv->ring_data[i].ring);
2668
2669 u64_stats_init(&priv->ring_data[i].ring->syncp);
2670 }
2671
2672 return 0;
2673
2674 out_when_alloc_ring_memory:
2675 for (j = i - 1; j >= 0; j--)
2676 hns3_fini_ring(priv->ring_data[i].ring);
2677
2678 return -ENOMEM;
2679 }
2680
2681 static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2682 {
2683 struct hnae3_handle *h = priv->ae_handle;
2684 int i;
2685
2686 for (i = 0; i < h->kinfo.num_tqps; i++) {
2687 if (h->ae_algo->ops->reset_queue)
2688 h->ae_algo->ops->reset_queue(h, i);
2689
2690 hns3_fini_ring(priv->ring_data[i].ring);
2691 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2692 }
2693
2694 return 0;
2695 }
2696
2697 /* Set mac addr if it is configured. or leave it to the AE driver */
2698 static void hns3_init_mac_addr(struct net_device *netdev)
2699 {
2700 struct hns3_nic_priv *priv = netdev_priv(netdev);
2701 struct hnae3_handle *h = priv->ae_handle;
2702 u8 mac_addr_temp[ETH_ALEN];
2703
2704 if (h->ae_algo->ops->get_mac_addr) {
2705 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2706 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2707 }
2708
2709 /* Check if the MAC address is valid, if not get a random one */
2710 if (!is_valid_ether_addr(netdev->dev_addr)) {
2711 eth_hw_addr_random(netdev);
2712 dev_warn(priv->dev, "using random MAC address %pM\n",
2713 netdev->dev_addr);
2714 }
2715
2716 if (h->ae_algo->ops->set_mac_addr)
2717 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2718
2719 }
2720
2721 static void hns3_nic_set_priv_ops(struct net_device *netdev)
2722 {
2723 struct hns3_nic_priv *priv = netdev_priv(netdev);
2724
2725 if ((netdev->features & NETIF_F_TSO) ||
2726 (netdev->features & NETIF_F_TSO6)) {
2727 priv->ops.fill_desc = hns3_fill_desc_tso;
2728 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2729 } else {
2730 priv->ops.fill_desc = hns3_fill_desc;
2731 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2732 }
2733 }
2734
2735 static int hns3_client_init(struct hnae3_handle *handle)
2736 {
2737 struct pci_dev *pdev = handle->pdev;
2738 struct hns3_nic_priv *priv;
2739 struct net_device *netdev;
2740 int ret;
2741
2742 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2743 handle->kinfo.num_tqps);
2744 if (!netdev)
2745 return -ENOMEM;
2746
2747 priv = netdev_priv(netdev);
2748 priv->dev = &pdev->dev;
2749 priv->netdev = netdev;
2750 priv->ae_handle = handle;
2751
2752 handle->kinfo.netdev = netdev;
2753 handle->priv = (void *)priv;
2754
2755 hns3_init_mac_addr(netdev);
2756
2757 hns3_set_default_feature(netdev);
2758
2759 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2760 netdev->priv_flags |= IFF_UNICAST_FLT;
2761 netdev->netdev_ops = &hns3_nic_netdev_ops;
2762 SET_NETDEV_DEV(netdev, &pdev->dev);
2763 hns3_ethtool_set_ops(netdev);
2764 hns3_nic_set_priv_ops(netdev);
2765
2766 /* Carrier off reporting is important to ethtool even BEFORE open */
2767 netif_carrier_off(netdev);
2768
2769 ret = hns3_get_ring_config(priv);
2770 if (ret) {
2771 ret = -ENOMEM;
2772 goto out_get_ring_cfg;
2773 }
2774
2775 ret = hns3_nic_init_vector_data(priv);
2776 if (ret) {
2777 ret = -ENOMEM;
2778 goto out_init_vector_data;
2779 }
2780
2781 ret = hns3_init_all_ring(priv);
2782 if (ret) {
2783 ret = -ENOMEM;
2784 goto out_init_ring_data;
2785 }
2786
2787 ret = register_netdev(netdev);
2788 if (ret) {
2789 dev_err(priv->dev, "probe register netdev fail!\n");
2790 goto out_reg_netdev_fail;
2791 }
2792
2793 hns3_dcbnl_setup(handle);
2794
2795 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2796 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2797
2798 return ret;
2799
2800 out_reg_netdev_fail:
2801 out_init_ring_data:
2802 (void)hns3_nic_uninit_vector_data(priv);
2803 priv->ring_data = NULL;
2804 out_init_vector_data:
2805 out_get_ring_cfg:
2806 priv->ae_handle = NULL;
2807 free_netdev(netdev);
2808 return ret;
2809 }
2810
2811 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2812 {
2813 struct net_device *netdev = handle->kinfo.netdev;
2814 struct hns3_nic_priv *priv = netdev_priv(netdev);
2815 int ret;
2816
2817 if (netdev->reg_state != NETREG_UNINITIALIZED)
2818 unregister_netdev(netdev);
2819
2820 ret = hns3_nic_uninit_vector_data(priv);
2821 if (ret)
2822 netdev_err(netdev, "uninit vector error\n");
2823
2824 ret = hns3_uninit_all_ring(priv);
2825 if (ret)
2826 netdev_err(netdev, "uninit ring error\n");
2827
2828 priv->ring_data = NULL;
2829
2830 free_netdev(netdev);
2831 }
2832
2833 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2834 {
2835 struct net_device *netdev = handle->kinfo.netdev;
2836
2837 if (!netdev)
2838 return;
2839
2840 if (linkup) {
2841 netif_carrier_on(netdev);
2842 netif_tx_wake_all_queues(netdev);
2843 netdev_info(netdev, "link up\n");
2844 } else {
2845 netif_carrier_off(netdev);
2846 netif_tx_stop_all_queues(netdev);
2847 netdev_info(netdev, "link down\n");
2848 }
2849 }
2850
2851 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
2852 {
2853 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
2854 struct net_device *ndev = kinfo->netdev;
2855 bool if_running;
2856 int ret;
2857 u8 i;
2858
2859 if (tc > HNAE3_MAX_TC)
2860 return -EINVAL;
2861
2862 if (!ndev)
2863 return -ENODEV;
2864
2865 if_running = netif_running(ndev);
2866
2867 ret = netdev_set_num_tc(ndev, tc);
2868 if (ret)
2869 return ret;
2870
2871 if (if_running) {
2872 (void)hns3_nic_net_stop(ndev);
2873 msleep(100);
2874 }
2875
2876 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
2877 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
2878 if (ret)
2879 goto err_out;
2880
2881 if (tc <= 1) {
2882 netdev_reset_tc(ndev);
2883 goto out;
2884 }
2885
2886 for (i = 0; i < HNAE3_MAX_TC; i++) {
2887 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
2888
2889 if (tc_info->enable)
2890 netdev_set_tc_queue(ndev,
2891 tc_info->tc,
2892 tc_info->tqp_count,
2893 tc_info->tqp_offset);
2894 }
2895
2896 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
2897 netdev_set_prio_tc_map(ndev, i,
2898 kinfo->prio_tc[i]);
2899 }
2900
2901 out:
2902 ret = hns3_nic_set_real_num_queue(ndev);
2903
2904 err_out:
2905 if (if_running)
2906 (void)hns3_nic_net_open(ndev);
2907
2908 return ret;
2909 }
2910
2911 const struct hnae3_client_ops client_ops = {
2912 .init_instance = hns3_client_init,
2913 .uninit_instance = hns3_client_uninit,
2914 .link_status_change = hns3_link_status_change,
2915 .setup_tc = hns3_client_setup_tc,
2916 };
2917
2918 /* hns3_init_module - Driver registration routine
2919 * hns3_init_module is the first routine called when the driver is
2920 * loaded. All it does is register with the PCI subsystem.
2921 */
2922 static int __init hns3_init_module(void)
2923 {
2924 int ret;
2925
2926 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2927 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2928
2929 client.type = HNAE3_CLIENT_KNIC;
2930 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2931 hns3_driver_name);
2932
2933 client.ops = &client_ops;
2934
2935 ret = hnae3_register_client(&client);
2936 if (ret)
2937 return ret;
2938
2939 ret = pci_register_driver(&hns3_driver);
2940 if (ret)
2941 hnae3_unregister_client(&client);
2942
2943 return ret;
2944 }
2945 module_init(hns3_init_module);
2946
2947 /* hns3_exit_module - Driver exit cleanup routine
2948 * hns3_exit_module is called just before the driver is removed
2949 * from memory.
2950 */
2951 static void __exit hns3_exit_module(void)
2952 {
2953 pci_unregister_driver(&hns3_driver);
2954 hnae3_unregister_client(&client);
2955 }
2956 module_exit(hns3_exit_module);
2957
2958 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2959 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2960 MODULE_LICENSE("GPL");
2961 MODULE_ALIAS("pci:hns-nic");