]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
UBUNTU: SAUCE: {topost} net: hns3: modify hnae_ to hnae3_
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
1 /*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
21 #include <net/gre.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
24
25 #include "hnae3.h"
26 #include "hns3_enet.h"
27
28 static void hns3_clear_all_ring(struct hnae3_handle *h);
29 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
30
31 static const char hns3_driver_name[] = "hns3";
32 const char hns3_driver_version[] = VERMAGIC_STRING;
33 static const char hns3_driver_string[] =
34 "Hisilicon Ethernet Network Driver for Hip08 Family";
35 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
36 static struct hnae3_client client;
37
38 /* hns3_pci_tbl - PCI Device ID Table
39 *
40 * Last entry must be all 0s
41 *
42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
43 * Class, Class Mask, private data (not used) }
44 */
45 static const struct pci_device_id hns3_pci_tbl[] = {
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
55 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
57 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
60 /* required last entry */
61 {0, }
62 };
63 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
64
65 static irqreturn_t hns3_irq_handle(int irq, void *dev)
66 {
67 struct hns3_enet_tqp_vector *tqp_vector = dev;
68
69 napi_schedule(&tqp_vector->napi);
70
71 return IRQ_HANDLED;
72 }
73
74 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
75 {
76 struct hns3_enet_tqp_vector *tqp_vectors;
77 unsigned int i;
78
79 for (i = 0; i < priv->vector_num; i++) {
80 tqp_vectors = &priv->tqp_vector[i];
81
82 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
83 continue;
84
85 /* release the irq resource */
86 free_irq(tqp_vectors->vector_irq, tqp_vectors);
87 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
88 }
89 }
90
91 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
92 {
93 struct hns3_enet_tqp_vector *tqp_vectors;
94 int txrx_int_idx = 0;
95 int rx_int_idx = 0;
96 int tx_int_idx = 0;
97 unsigned int i;
98 int ret;
99
100 for (i = 0; i < priv->vector_num; i++) {
101 tqp_vectors = &priv->tqp_vector[i];
102
103 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
104 continue;
105
106 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
107 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
108 "%s-%s-%d", priv->netdev->name, "TxRx",
109 txrx_int_idx++);
110 txrx_int_idx++;
111 } else if (tqp_vectors->rx_group.ring) {
112 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
113 "%s-%s-%d", priv->netdev->name, "Rx",
114 rx_int_idx++);
115 } else if (tqp_vectors->tx_group.ring) {
116 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
117 "%s-%s-%d", priv->netdev->name, "Tx",
118 tx_int_idx++);
119 } else {
120 /* Skip this unused q_vector */
121 continue;
122 }
123
124 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
125
126 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
127 tqp_vectors->name,
128 tqp_vectors);
129 if (ret) {
130 netdev_err(priv->netdev, "request irq(%d) fail\n",
131 tqp_vectors->vector_irq);
132 return ret;
133 }
134
135 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
136 }
137
138 return 0;
139 }
140
141 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
142 u32 mask_en)
143 {
144 writel(mask_en, tqp_vector->mask_addr);
145 }
146
147 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
148 {
149 napi_enable(&tqp_vector->napi);
150
151 /* enable vector */
152 hns3_mask_vector_irq(tqp_vector, 1);
153 }
154
155 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
156 {
157 /* disable vector */
158 hns3_mask_vector_irq(tqp_vector, 0);
159
160 disable_irq(tqp_vector->vector_irq);
161 napi_disable(&tqp_vector->napi);
162 }
163
164 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
165 u32 rl_value)
166 {
167 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
168
169 /* this defines the configuration for RL (Interrupt Rate Limiter).
170 * Rl defines rate of interrupts i.e. number of interrupts-per-second
171 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
172 */
173
174 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
175 !tqp_vector->rx_group.coal.gl_adapt_enable)
176 /* According to the hardware, the range of rl_reg is
177 * 0-59 and the unit is 4.
178 */
179 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
180
181 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
182 }
183
184 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
185 u32 gl_value)
186 {
187 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
188
189 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
190 }
191
192 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
193 u32 gl_value)
194 {
195 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
196
197 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
198 }
199
200 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
201 struct hns3_nic_priv *priv)
202 {
203 struct hnae3_handle *h = priv->ae_handle;
204
205 /* initialize the configuration for interrupt coalescing.
206 * 1. GL (Interrupt Gap Limiter)
207 * 2. RL (Interrupt Rate Limiter)
208 */
209
210 /* Default: enable interrupt coalescing self-adaptive and GL */
211 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
212 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
213
214 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
215 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
216
217 /* Default: disable RL */
218 h->kinfo.int_rl_setting = 0;
219
220 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
221 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
222 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
223 }
224
225 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
226 struct hns3_nic_priv *priv)
227 {
228 struct hnae3_handle *h = priv->ae_handle;
229
230 hns3_set_vector_coalesce_tx_gl(tqp_vector,
231 tqp_vector->tx_group.coal.int_gl);
232 hns3_set_vector_coalesce_rx_gl(tqp_vector,
233 tqp_vector->rx_group.coal.int_gl);
234 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
235 }
236
237 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
238 {
239 struct hnae3_handle *h = hns3_get_handle(netdev);
240 struct hnae3_knic_private_info *kinfo = &h->kinfo;
241 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
242 int ret;
243
244 ret = netif_set_real_num_tx_queues(netdev, queue_size);
245 if (ret) {
246 netdev_err(netdev,
247 "netif_set_real_num_tx_queues fail, ret=%d!\n",
248 ret);
249 return ret;
250 }
251
252 ret = netif_set_real_num_rx_queues(netdev, queue_size);
253 if (ret) {
254 netdev_err(netdev,
255 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
256 return ret;
257 }
258
259 return 0;
260 }
261
262 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
263 {
264 u16 free_tqps, max_rss_size, max_tqps;
265
266 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
267 max_tqps = h->kinfo.num_tc * max_rss_size;
268
269 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
270 }
271
272 static int hns3_nic_net_up(struct net_device *netdev)
273 {
274 struct hns3_nic_priv *priv = netdev_priv(netdev);
275 struct hnae3_handle *h = priv->ae_handle;
276 int i, j;
277 int ret;
278
279 ret = hns3_nic_reset_all_ring(h);
280 if (ret)
281 return ret;
282
283 /* get irq resource for all vectors */
284 ret = hns3_nic_init_irq(priv);
285 if (ret) {
286 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
287 return ret;
288 }
289
290 /* enable the vectors */
291 for (i = 0; i < priv->vector_num; i++)
292 hns3_vector_enable(&priv->tqp_vector[i]);
293
294 /* start the ae_dev */
295 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
296 if (ret)
297 goto out_start_err;
298
299 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
300
301 return 0;
302
303 out_start_err:
304 for (j = i - 1; j >= 0; j--)
305 hns3_vector_disable(&priv->tqp_vector[j]);
306
307 hns3_nic_uninit_irq(priv);
308
309 return ret;
310 }
311
312 static int hns3_nic_net_open(struct net_device *netdev)
313 {
314 struct hns3_nic_priv *priv = netdev_priv(netdev);
315 int ret;
316
317 netif_carrier_off(netdev);
318
319 ret = hns3_nic_set_real_num_queue(netdev);
320 if (ret)
321 return ret;
322
323 ret = hns3_nic_net_up(netdev);
324 if (ret) {
325 netdev_err(netdev,
326 "hns net up fail, ret=%d!\n", ret);
327 return ret;
328 }
329
330 priv->ae_handle->last_reset_time = jiffies;
331 return 0;
332 }
333
334 static void hns3_nic_net_down(struct net_device *netdev)
335 {
336 struct hns3_nic_priv *priv = netdev_priv(netdev);
337 const struct hnae3_ae_ops *ops;
338 int i;
339
340 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
341 return;
342
343 /* disable vectors */
344 for (i = 0; i < priv->vector_num; i++)
345 hns3_vector_disable(&priv->tqp_vector[i]);
346
347 /* stop ae_dev */
348 ops = priv->ae_handle->ae_algo->ops;
349 if (ops->stop)
350 ops->stop(priv->ae_handle);
351
352 /* free irq resources */
353 hns3_nic_uninit_irq(priv);
354
355 hns3_clear_all_ring(priv->ae_handle);
356 }
357
358 static int hns3_nic_net_stop(struct net_device *netdev)
359 {
360 netif_tx_stop_all_queues(netdev);
361 netif_carrier_off(netdev);
362
363 hns3_nic_net_down(netdev);
364
365 return 0;
366 }
367
368 static int hns3_nic_uc_sync(struct net_device *netdev,
369 const unsigned char *addr)
370 {
371 struct hnae3_handle *h = hns3_get_handle(netdev);
372
373 if (h->ae_algo->ops->add_uc_addr)
374 return h->ae_algo->ops->add_uc_addr(h, addr);
375
376 return 0;
377 }
378
379 static int hns3_nic_uc_unsync(struct net_device *netdev,
380 const unsigned char *addr)
381 {
382 struct hnae3_handle *h = hns3_get_handle(netdev);
383
384 if (h->ae_algo->ops->rm_uc_addr)
385 return h->ae_algo->ops->rm_uc_addr(h, addr);
386
387 return 0;
388 }
389
390 static int hns3_nic_mc_sync(struct net_device *netdev,
391 const unsigned char *addr)
392 {
393 struct hnae3_handle *h = hns3_get_handle(netdev);
394
395 if (h->ae_algo->ops->add_mc_addr)
396 return h->ae_algo->ops->add_mc_addr(h, addr);
397
398 return 0;
399 }
400
401 static int hns3_nic_mc_unsync(struct net_device *netdev,
402 const unsigned char *addr)
403 {
404 struct hnae3_handle *h = hns3_get_handle(netdev);
405
406 if (h->ae_algo->ops->rm_mc_addr)
407 return h->ae_algo->ops->rm_mc_addr(h, addr);
408
409 return 0;
410 }
411
412 static void hns3_nic_set_rx_mode(struct net_device *netdev)
413 {
414 struct hnae3_handle *h = hns3_get_handle(netdev);
415
416 if (h->ae_algo->ops->set_promisc_mode) {
417 if (netdev->flags & IFF_PROMISC)
418 h->ae_algo->ops->set_promisc_mode(h, true, true);
419 else if (netdev->flags & IFF_ALLMULTI)
420 h->ae_algo->ops->set_promisc_mode(h, false, true);
421 else
422 h->ae_algo->ops->set_promisc_mode(h, false, false);
423 }
424 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
425 netdev_err(netdev, "sync uc address fail\n");
426 if (netdev->flags & IFF_MULTICAST) {
427 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
428 netdev_err(netdev, "sync mc address fail\n");
429
430 if (h->ae_algo->ops->update_mta_status)
431 h->ae_algo->ops->update_mta_status(h);
432 }
433 }
434
435 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
436 u16 *mss, u32 *type_cs_vlan_tso)
437 {
438 u32 l4_offset, hdr_len;
439 union l3_hdr_info l3;
440 union l4_hdr_info l4;
441 u32 l4_paylen;
442 int ret;
443
444 if (!skb_is_gso(skb))
445 return 0;
446
447 ret = skb_cow_head(skb, 0);
448 if (ret)
449 return ret;
450
451 l3.hdr = skb_network_header(skb);
452 l4.hdr = skb_transport_header(skb);
453
454 /* Software should clear the IPv4's checksum field when tso is
455 * needed.
456 */
457 if (l3.v4->version == 4)
458 l3.v4->check = 0;
459
460 /* tunnel packet.*/
461 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
462 SKB_GSO_GRE_CSUM |
463 SKB_GSO_UDP_TUNNEL |
464 SKB_GSO_UDP_TUNNEL_CSUM)) {
465 if ((!(skb_shinfo(skb)->gso_type &
466 SKB_GSO_PARTIAL)) &&
467 (skb_shinfo(skb)->gso_type &
468 SKB_GSO_UDP_TUNNEL_CSUM)) {
469 /* Software should clear the udp's checksum
470 * field when tso is needed.
471 */
472 l4.udp->check = 0;
473 }
474 /* reset l3&l4 pointers from outer to inner headers */
475 l3.hdr = skb_inner_network_header(skb);
476 l4.hdr = skb_inner_transport_header(skb);
477
478 /* Software should clear the IPv4's checksum field when
479 * tso is needed.
480 */
481 if (l3.v4->version == 4)
482 l3.v4->check = 0;
483 }
484
485 /* normal or tunnel packet*/
486 l4_offset = l4.hdr - skb->data;
487 hdr_len = (l4.tcp->doff * 4) + l4_offset;
488
489 /* remove payload length from inner pseudo checksum when tso*/
490 l4_paylen = skb->len - l4_offset;
491 csum_replace_by_diff(&l4.tcp->check,
492 (__force __wsum)htonl(l4_paylen));
493
494 /* find the txbd field values */
495 *paylen = skb->len - hdr_len;
496 hnae3_set_bit(*type_cs_vlan_tso,
497 HNS3_TXD_TSO_B, 1);
498
499 /* get MSS for TSO */
500 *mss = skb_shinfo(skb)->gso_size;
501
502 return 0;
503 }
504
505 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
506 u8 *il4_proto)
507 {
508 union {
509 struct iphdr *v4;
510 struct ipv6hdr *v6;
511 unsigned char *hdr;
512 } l3;
513 unsigned char *l4_hdr;
514 unsigned char *exthdr;
515 u8 l4_proto_tmp;
516 __be16 frag_off;
517
518 /* find outer header point */
519 l3.hdr = skb_network_header(skb);
520 l4_hdr = skb_transport_header(skb);
521
522 if (skb->protocol == htons(ETH_P_IPV6)) {
523 exthdr = l3.hdr + sizeof(*l3.v6);
524 l4_proto_tmp = l3.v6->nexthdr;
525 if (l4_hdr != exthdr)
526 ipv6_skip_exthdr(skb, exthdr - skb->data,
527 &l4_proto_tmp, &frag_off);
528 } else if (skb->protocol == htons(ETH_P_IP)) {
529 l4_proto_tmp = l3.v4->protocol;
530 } else {
531 return -EINVAL;
532 }
533
534 *ol4_proto = l4_proto_tmp;
535
536 /* tunnel packet */
537 if (!skb->encapsulation) {
538 *il4_proto = 0;
539 return 0;
540 }
541
542 /* find inner header point */
543 l3.hdr = skb_inner_network_header(skb);
544 l4_hdr = skb_inner_transport_header(skb);
545
546 if (l3.v6->version == 6) {
547 exthdr = l3.hdr + sizeof(*l3.v6);
548 l4_proto_tmp = l3.v6->nexthdr;
549 if (l4_hdr != exthdr)
550 ipv6_skip_exthdr(skb, exthdr - skb->data,
551 &l4_proto_tmp, &frag_off);
552 } else if (l3.v4->version == 4) {
553 l4_proto_tmp = l3.v4->protocol;
554 }
555
556 *il4_proto = l4_proto_tmp;
557
558 return 0;
559 }
560
561 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
562 u8 il4_proto, u32 *type_cs_vlan_tso,
563 u32 *ol_type_vlan_len_msec)
564 {
565 union {
566 struct iphdr *v4;
567 struct ipv6hdr *v6;
568 unsigned char *hdr;
569 } l3;
570 union {
571 struct tcphdr *tcp;
572 struct udphdr *udp;
573 struct gre_base_hdr *gre;
574 unsigned char *hdr;
575 } l4;
576 unsigned char *l2_hdr;
577 u8 l4_proto = ol4_proto;
578 u32 ol2_len;
579 u32 ol3_len;
580 u32 ol4_len;
581 u32 l2_len;
582 u32 l3_len;
583
584 l3.hdr = skb_network_header(skb);
585 l4.hdr = skb_transport_header(skb);
586
587 /* compute L2 header size for normal packet, defined in 2 Bytes */
588 l2_len = l3.hdr - skb->data;
589 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
590 HNS3_TXD_L2LEN_S, l2_len >> 1);
591
592 /* tunnel packet*/
593 if (skb->encapsulation) {
594 /* compute OL2 header size, defined in 2 Bytes */
595 ol2_len = l2_len;
596 hnae3_set_field(*ol_type_vlan_len_msec,
597 HNS3_TXD_L2LEN_M,
598 HNS3_TXD_L2LEN_S, ol2_len >> 1);
599
600 /* compute OL3 header size, defined in 4 Bytes */
601 ol3_len = l4.hdr - l3.hdr;
602 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
603 HNS3_TXD_L3LEN_S, ol3_len >> 2);
604
605 /* MAC in UDP, MAC in GRE (0x6558)*/
606 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
607 /* switch MAC header ptr from outer to inner header.*/
608 l2_hdr = skb_inner_mac_header(skb);
609
610 /* compute OL4 header size, defined in 4 Bytes. */
611 ol4_len = l2_hdr - l4.hdr;
612 hnae3_set_field(*ol_type_vlan_len_msec,
613 HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
614 ol4_len >> 2);
615
616 /* switch IP header ptr from outer to inner header */
617 l3.hdr = skb_inner_network_header(skb);
618
619 /* compute inner l2 header size, defined in 2 Bytes. */
620 l2_len = l3.hdr - l2_hdr;
621 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
622 HNS3_TXD_L2LEN_S, l2_len >> 1);
623 } else {
624 /* skb packet types not supported by hardware,
625 * txbd len fild doesn't be filled.
626 */
627 return;
628 }
629
630 /* switch L4 header pointer from outer to inner */
631 l4.hdr = skb_inner_transport_header(skb);
632
633 l4_proto = il4_proto;
634 }
635
636 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
637 l3_len = l4.hdr - l3.hdr;
638 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
639 HNS3_TXD_L3LEN_S, l3_len >> 2);
640
641 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
642 switch (l4_proto) {
643 case IPPROTO_TCP:
644 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
645 HNS3_TXD_L4LEN_S, l4.tcp->doff);
646 break;
647 case IPPROTO_SCTP:
648 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
649 HNS3_TXD_L4LEN_S,
650 (sizeof(struct sctphdr) >> 2));
651 break;
652 case IPPROTO_UDP:
653 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
654 HNS3_TXD_L4LEN_S,
655 (sizeof(struct udphdr) >> 2));
656 break;
657 default:
658 /* skb packet types not supported by hardware,
659 * txbd len fild doesn't be filled.
660 */
661 return;
662 }
663 }
664
665 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
666 * and it is udp packet, which has a dest port as the IANA assigned.
667 * the hardware is expected to do the checksum offload, but the
668 * hardware will not do the checksum offload when udp dest port is
669 * 4789.
670 */
671 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
672 {
673 #define IANA_VXLAN_PORT 4789
674 union {
675 struct tcphdr *tcp;
676 struct udphdr *udp;
677 struct gre_base_hdr *gre;
678 unsigned char *hdr;
679 } l4;
680
681 l4.hdr = skb_transport_header(skb);
682
683 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
684 return false;
685
686 skb_checksum_help(skb);
687
688 return true;
689 }
690
691 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
692 u8 il4_proto, u32 *type_cs_vlan_tso,
693 u32 *ol_type_vlan_len_msec)
694 {
695 union {
696 struct iphdr *v4;
697 struct ipv6hdr *v6;
698 unsigned char *hdr;
699 } l3;
700 u32 l4_proto = ol4_proto;
701
702 l3.hdr = skb_network_header(skb);
703
704 /* define OL3 type and tunnel type(OL4).*/
705 if (skb->encapsulation) {
706 /* define outer network header type.*/
707 if (skb->protocol == htons(ETH_P_IP)) {
708 if (skb_is_gso(skb))
709 hnae3_set_field(*ol_type_vlan_len_msec,
710 HNS3_TXD_OL3T_M,
711 HNS3_TXD_OL3T_S,
712 HNS3_OL3T_IPV4_CSUM);
713 else
714 hnae3_set_field(*ol_type_vlan_len_msec,
715 HNS3_TXD_OL3T_M,
716 HNS3_TXD_OL3T_S,
717 HNS3_OL3T_IPV4_NO_CSUM);
718
719 } else if (skb->protocol == htons(ETH_P_IPV6)) {
720 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
721 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
722 }
723
724 /* define tunnel type(OL4).*/
725 switch (l4_proto) {
726 case IPPROTO_UDP:
727 hnae3_set_field(*ol_type_vlan_len_msec,
728 HNS3_TXD_TUNTYPE_M,
729 HNS3_TXD_TUNTYPE_S,
730 HNS3_TUN_MAC_IN_UDP);
731 break;
732 case IPPROTO_GRE:
733 hnae3_set_field(*ol_type_vlan_len_msec,
734 HNS3_TXD_TUNTYPE_M,
735 HNS3_TXD_TUNTYPE_S,
736 HNS3_TUN_NVGRE);
737 break;
738 default:
739 /* drop the skb tunnel packet if hardware don't support,
740 * because hardware can't calculate csum when TSO.
741 */
742 if (skb_is_gso(skb))
743 return -EDOM;
744
745 /* the stack computes the IP header already,
746 * driver calculate l4 checksum when not TSO.
747 */
748 skb_checksum_help(skb);
749 return 0;
750 }
751
752 l3.hdr = skb_inner_network_header(skb);
753 l4_proto = il4_proto;
754 }
755
756 if (l3.v4->version == 4) {
757 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
758 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
759
760 /* the stack computes the IP header already, the only time we
761 * need the hardware to recompute it is in the case of TSO.
762 */
763 if (skb_is_gso(skb))
764 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
765
766 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
767 } else if (l3.v6->version == 6) {
768 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
769 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
770 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
771 }
772
773 switch (l4_proto) {
774 case IPPROTO_TCP:
775 hnae3_set_field(*type_cs_vlan_tso,
776 HNS3_TXD_L4T_M,
777 HNS3_TXD_L4T_S,
778 HNS3_L4T_TCP);
779 break;
780 case IPPROTO_UDP:
781 if (hns3_tunnel_csum_bug(skb))
782 break;
783
784 hnae3_set_field(*type_cs_vlan_tso,
785 HNS3_TXD_L4T_M,
786 HNS3_TXD_L4T_S,
787 HNS3_L4T_UDP);
788 break;
789 case IPPROTO_SCTP:
790 hnae3_set_field(*type_cs_vlan_tso,
791 HNS3_TXD_L4T_M,
792 HNS3_TXD_L4T_S,
793 HNS3_L4T_SCTP);
794 break;
795 default:
796 /* drop the skb tunnel packet if hardware don't support,
797 * because hardware can't calculate csum when TSO.
798 */
799 if (skb_is_gso(skb))
800 return -EDOM;
801
802 /* the stack computes the IP header already,
803 * driver calculate l4 checksum when not TSO.
804 */
805 skb_checksum_help(skb);
806 return 0;
807 }
808
809 return 0;
810 }
811
812 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
813 {
814 /* Config bd buffer end */
815 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
816 HNS3_TXD_BDTYPE_S, 0);
817 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
818 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
819 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
820 }
821
822 static int hns3_fill_desc_vtags(struct sk_buff *skb,
823 struct hns3_enet_ring *tx_ring,
824 u32 *inner_vlan_flag,
825 u32 *out_vlan_flag,
826 u16 *inner_vtag,
827 u16 *out_vtag)
828 {
829 #define HNS3_TX_VLAN_PRIO_SHIFT 13
830
831 if (skb->protocol == htons(ETH_P_8021Q) &&
832 !(tx_ring->tqp->handle->kinfo.netdev->features &
833 NETIF_F_HW_VLAN_CTAG_TX)) {
834 /* When HW VLAN acceleration is turned off, and the stack
835 * sets the protocol to 802.1q, the driver just need to
836 * set the protocol to the encapsulated ethertype.
837 */
838 skb->protocol = vlan_get_protocol(skb);
839 return 0;
840 }
841
842 if (skb_vlan_tag_present(skb)) {
843 u16 vlan_tag;
844
845 vlan_tag = skb_vlan_tag_get(skb);
846 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
847
848 /* Based on hw strategy, use out_vtag in two layer tag case,
849 * and use inner_vtag in one tag case.
850 */
851 if (skb->protocol == htons(ETH_P_8021Q)) {
852 hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
853 *out_vtag = vlan_tag;
854 } else {
855 hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
856 *inner_vtag = vlan_tag;
857 }
858 } else if (skb->protocol == htons(ETH_P_8021Q)) {
859 struct vlan_ethhdr *vhdr;
860 int rc;
861
862 rc = skb_cow_head(skb, 0);
863 if (rc < 0)
864 return rc;
865 vhdr = (struct vlan_ethhdr *)skb->data;
866 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
867 << HNS3_TX_VLAN_PRIO_SHIFT);
868 }
869
870 skb->protocol = vlan_get_protocol(skb);
871 return 0;
872 }
873
874 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
875 int size, dma_addr_t dma, int frag_end,
876 enum hns_desc_type type)
877 {
878 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
879 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
880 u32 ol_type_vlan_len_msec = 0;
881 u16 bdtp_fe_sc_vld_ra_ri = 0;
882 u32 type_cs_vlan_tso = 0;
883 struct sk_buff *skb;
884 u16 inner_vtag = 0;
885 u16 out_vtag = 0;
886 u32 paylen = 0;
887 u16 mss = 0;
888 __be16 protocol;
889 u8 ol4_proto;
890 u8 il4_proto;
891 int ret;
892
893 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
894 desc_cb->priv = priv;
895 desc_cb->length = size;
896 desc_cb->dma = dma;
897 desc_cb->type = type;
898
899 /* now, fill the descriptor */
900 desc->addr = cpu_to_le64(dma);
901 desc->tx.send_size = cpu_to_le16((u16)size);
902 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
903 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
904
905 if (type == DESC_TYPE_SKB) {
906 skb = (struct sk_buff *)priv;
907 paylen = skb->len;
908
909 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
910 &ol_type_vlan_len_msec,
911 &inner_vtag, &out_vtag);
912 if (unlikely(ret))
913 return ret;
914
915 if (skb->ip_summed == CHECKSUM_PARTIAL) {
916 skb_reset_mac_len(skb);
917 protocol = skb->protocol;
918
919 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
920 if (ret)
921 return ret;
922 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
923 &type_cs_vlan_tso,
924 &ol_type_vlan_len_msec);
925 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
926 &type_cs_vlan_tso,
927 &ol_type_vlan_len_msec);
928 if (ret)
929 return ret;
930
931 ret = hns3_set_tso(skb, &paylen, &mss,
932 &type_cs_vlan_tso);
933 if (ret)
934 return ret;
935 }
936
937 /* Set txbd */
938 desc->tx.ol_type_vlan_len_msec =
939 cpu_to_le32(ol_type_vlan_len_msec);
940 desc->tx.type_cs_vlan_tso_len =
941 cpu_to_le32(type_cs_vlan_tso);
942 desc->tx.paylen = cpu_to_le32(paylen);
943 desc->tx.mss = cpu_to_le16(mss);
944 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
945 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
946 }
947
948 /* move ring pointer to next.*/
949 ring_ptr_move_fw(ring, next_to_use);
950
951 return 0;
952 }
953
954 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
955 int size, dma_addr_t dma, int frag_end,
956 enum hns_desc_type type)
957 {
958 unsigned int frag_buf_num;
959 unsigned int k;
960 int sizeoflast;
961 int ret;
962
963 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
964 sizeoflast = size % HNS3_MAX_BD_SIZE;
965 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
966
967 /* When the frag size is bigger than hardware, split this frag */
968 for (k = 0; k < frag_buf_num; k++) {
969 ret = hns3_fill_desc(ring, priv,
970 (k == frag_buf_num - 1) ?
971 sizeoflast : HNS3_MAX_BD_SIZE,
972 dma + HNS3_MAX_BD_SIZE * k,
973 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
974 (type == DESC_TYPE_SKB && !k) ?
975 DESC_TYPE_SKB : DESC_TYPE_PAGE);
976 if (ret)
977 return ret;
978 }
979
980 return 0;
981 }
982
983 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
984 struct hns3_enet_ring *ring)
985 {
986 struct sk_buff *skb = *out_skb;
987 struct skb_frag_struct *frag;
988 int bdnum_for_frag;
989 int frag_num;
990 int buf_num;
991 int size;
992 int i;
993
994 size = skb_headlen(skb);
995 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
996
997 frag_num = skb_shinfo(skb)->nr_frags;
998 for (i = 0; i < frag_num; i++) {
999 frag = &skb_shinfo(skb)->frags[i];
1000 size = skb_frag_size(frag);
1001 bdnum_for_frag =
1002 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1003 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
1004 return -ENOMEM;
1005
1006 buf_num += bdnum_for_frag;
1007 }
1008
1009 if (buf_num > ring_space(ring))
1010 return -EBUSY;
1011
1012 *bnum = buf_num;
1013 return 0;
1014 }
1015
1016 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1017 struct hns3_enet_ring *ring)
1018 {
1019 struct sk_buff *skb = *out_skb;
1020 int buf_num;
1021
1022 /* No. of segments (plus a header) */
1023 buf_num = skb_shinfo(skb)->nr_frags + 1;
1024
1025 if (buf_num > ring_space(ring))
1026 return -EBUSY;
1027
1028 *bnum = buf_num;
1029
1030 return 0;
1031 }
1032
1033 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
1034 {
1035 struct device *dev = ring_to_dev(ring);
1036 unsigned int i;
1037
1038 for (i = 0; i < ring->desc_num; i++) {
1039 /* check if this is where we started */
1040 if (ring->next_to_use == next_to_use_orig)
1041 break;
1042
1043 /* unmap the descriptor dma address */
1044 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1045 dma_unmap_single(dev,
1046 ring->desc_cb[ring->next_to_use].dma,
1047 ring->desc_cb[ring->next_to_use].length,
1048 DMA_TO_DEVICE);
1049 else
1050 dma_unmap_page(dev,
1051 ring->desc_cb[ring->next_to_use].dma,
1052 ring->desc_cb[ring->next_to_use].length,
1053 DMA_TO_DEVICE);
1054
1055 /* rollback one */
1056 ring_ptr_move_bw(ring, next_to_use);
1057 }
1058 }
1059
1060 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1061 {
1062 struct hns3_nic_priv *priv = netdev_priv(netdev);
1063 struct hns3_nic_ring_data *ring_data =
1064 &tx_ring_data(priv, skb->queue_mapping);
1065 struct hns3_enet_ring *ring = ring_data->ring;
1066 struct device *dev = priv->dev;
1067 struct netdev_queue *dev_queue;
1068 struct skb_frag_struct *frag;
1069 int next_to_use_head;
1070 int next_to_use_frag;
1071 dma_addr_t dma;
1072 int buf_num;
1073 int seg_num;
1074 int size;
1075 int ret;
1076 int i;
1077
1078 /* Prefetch the data used later */
1079 prefetch(skb->data);
1080
1081 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1082 case -EBUSY:
1083 u64_stats_update_begin(&ring->syncp);
1084 ring->stats.tx_busy++;
1085 u64_stats_update_end(&ring->syncp);
1086
1087 goto out_net_tx_busy;
1088 case -ENOMEM:
1089 u64_stats_update_begin(&ring->syncp);
1090 ring->stats.sw_err_cnt++;
1091 u64_stats_update_end(&ring->syncp);
1092 netdev_err(netdev, "no memory to xmit!\n");
1093
1094 goto out_err_tx_ok;
1095 default:
1096 break;
1097 }
1098
1099 /* No. of segments (plus a header) */
1100 seg_num = skb_shinfo(skb)->nr_frags + 1;
1101 /* Fill the first part */
1102 size = skb_headlen(skb);
1103
1104 next_to_use_head = ring->next_to_use;
1105
1106 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1107 if (dma_mapping_error(dev, dma)) {
1108 netdev_err(netdev, "TX head DMA map failed\n");
1109 ring->stats.sw_err_cnt++;
1110 goto out_err_tx_ok;
1111 }
1112
1113 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1114 DESC_TYPE_SKB);
1115 if (ret)
1116 goto head_dma_map_err;
1117
1118 next_to_use_frag = ring->next_to_use;
1119 /* Fill the fragments */
1120 for (i = 1; i < seg_num; i++) {
1121 frag = &skb_shinfo(skb)->frags[i - 1];
1122 size = skb_frag_size(frag);
1123 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1124 if (dma_mapping_error(dev, dma)) {
1125 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1126 ring->stats.sw_err_cnt++;
1127 goto frag_dma_map_err;
1128 }
1129 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1130 seg_num - 1 == i ? 1 : 0,
1131 DESC_TYPE_PAGE);
1132
1133 if (ret)
1134 goto frag_dma_map_err;
1135 }
1136
1137 /* Complete translate all packets */
1138 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1139 netdev_tx_sent_queue(dev_queue, skb->len);
1140
1141 wmb(); /* Commit all data before submit */
1142
1143 hnae3_queue_xmit(ring->tqp, buf_num);
1144
1145 return NETDEV_TX_OK;
1146
1147 frag_dma_map_err:
1148 hns_nic_dma_unmap(ring, next_to_use_frag);
1149
1150 head_dma_map_err:
1151 hns_nic_dma_unmap(ring, next_to_use_head);
1152
1153 out_err_tx_ok:
1154 dev_kfree_skb_any(skb);
1155 return NETDEV_TX_OK;
1156
1157 out_net_tx_busy:
1158 netif_stop_subqueue(netdev, ring_data->queue_index);
1159 smp_mb(); /* Commit all data before submit */
1160
1161 return NETDEV_TX_BUSY;
1162 }
1163
1164 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1165 {
1166 struct hnae3_handle *h = hns3_get_handle(netdev);
1167 struct sockaddr *mac_addr = p;
1168 int ret;
1169
1170 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1171 return -EADDRNOTAVAIL;
1172
1173 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1174 netdev_info(netdev, "already using mac address %pM\n",
1175 mac_addr->sa_data);
1176 return 0;
1177 }
1178
1179 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1180 if (ret) {
1181 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1182 return ret;
1183 }
1184
1185 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1186
1187 return 0;
1188 }
1189
1190 static int hns3_nic_set_features(struct net_device *netdev,
1191 netdev_features_t features)
1192 {
1193 netdev_features_t changed = netdev->features ^ features;
1194 struct hns3_nic_priv *priv = netdev_priv(netdev);
1195 struct hnae3_handle *h = priv->ae_handle;
1196 int ret;
1197
1198 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1199 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1200 priv->ops.fill_desc = hns3_fill_desc_tso;
1201 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1202 } else {
1203 priv->ops.fill_desc = hns3_fill_desc;
1204 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1205 }
1206 }
1207
1208 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1209 h->ae_algo->ops->enable_vlan_filter) {
1210 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1211 h->ae_algo->ops->enable_vlan_filter(h, true);
1212 else
1213 h->ae_algo->ops->enable_vlan_filter(h, false);
1214 }
1215
1216 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1217 h->ae_algo->ops->enable_hw_strip_rxvtag) {
1218 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1219 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1220 else
1221 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1222
1223 if (ret)
1224 return ret;
1225 }
1226
1227 netdev->features = features;
1228 return 0;
1229 }
1230
1231 static void hns3_nic_get_stats64(struct net_device *netdev,
1232 struct rtnl_link_stats64 *stats)
1233 {
1234 struct hns3_nic_priv *priv = netdev_priv(netdev);
1235 int queue_num = priv->ae_handle->kinfo.num_tqps;
1236 struct hnae3_handle *handle = priv->ae_handle;
1237 struct hns3_enet_ring *ring;
1238 unsigned int start;
1239 unsigned int idx;
1240 u64 tx_bytes = 0;
1241 u64 rx_bytes = 0;
1242 u64 tx_pkts = 0;
1243 u64 rx_pkts = 0;
1244 u64 tx_drop = 0;
1245 u64 rx_drop = 0;
1246
1247 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1248 return;
1249
1250 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1251
1252 for (idx = 0; idx < queue_num; idx++) {
1253 /* fetch the tx stats */
1254 ring = priv->ring_data[idx].ring;
1255 do {
1256 start = u64_stats_fetch_begin_irq(&ring->syncp);
1257 tx_bytes += ring->stats.tx_bytes;
1258 tx_pkts += ring->stats.tx_pkts;
1259 tx_drop += ring->stats.tx_busy;
1260 tx_drop += ring->stats.sw_err_cnt;
1261 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1262
1263 /* fetch the rx stats */
1264 ring = priv->ring_data[idx + queue_num].ring;
1265 do {
1266 start = u64_stats_fetch_begin_irq(&ring->syncp);
1267 rx_bytes += ring->stats.rx_bytes;
1268 rx_pkts += ring->stats.rx_pkts;
1269 rx_drop += ring->stats.non_vld_descs;
1270 rx_drop += ring->stats.err_pkt_len;
1271 rx_drop += ring->stats.l2_err;
1272 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1273 }
1274
1275 stats->tx_bytes = tx_bytes;
1276 stats->tx_packets = tx_pkts;
1277 stats->rx_bytes = rx_bytes;
1278 stats->rx_packets = rx_pkts;
1279
1280 stats->rx_errors = netdev->stats.rx_errors;
1281 stats->multicast = netdev->stats.multicast;
1282 stats->rx_length_errors = netdev->stats.rx_length_errors;
1283 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1284 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1285
1286 stats->tx_errors = netdev->stats.tx_errors;
1287 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1288 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1289 stats->collisions = netdev->stats.collisions;
1290 stats->rx_over_errors = netdev->stats.rx_over_errors;
1291 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1292 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1293 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1294 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1295 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1296 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1297 stats->tx_window_errors = netdev->stats.tx_window_errors;
1298 stats->rx_compressed = netdev->stats.rx_compressed;
1299 stats->tx_compressed = netdev->stats.tx_compressed;
1300 }
1301
1302 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1303 {
1304 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1305 struct hnae3_handle *h = hns3_get_handle(netdev);
1306 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1307 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1308 u8 tc = mqprio_qopt->qopt.num_tc;
1309 u16 mode = mqprio_qopt->mode;
1310 u8 hw = mqprio_qopt->qopt.hw;
1311 bool if_running;
1312 unsigned int i;
1313 int ret;
1314
1315 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1316 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1317 return -EOPNOTSUPP;
1318
1319 if (tc > HNAE3_MAX_TC)
1320 return -EINVAL;
1321
1322 if (!netdev)
1323 return -EINVAL;
1324
1325 if_running = netif_running(netdev);
1326 if (if_running) {
1327 hns3_nic_net_stop(netdev);
1328 msleep(100);
1329 }
1330
1331 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1332 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1333 if (ret)
1334 goto out;
1335
1336 if (tc <= 1) {
1337 netdev_reset_tc(netdev);
1338 } else {
1339 ret = netdev_set_num_tc(netdev, tc);
1340 if (ret)
1341 goto out;
1342
1343 for (i = 0; i < HNAE3_MAX_TC; i++) {
1344 if (!kinfo->tc_info[i].enable)
1345 continue;
1346
1347 netdev_set_tc_queue(netdev,
1348 kinfo->tc_info[i].tc,
1349 kinfo->tc_info[i].tqp_count,
1350 kinfo->tc_info[i].tqp_offset);
1351 }
1352 }
1353
1354 ret = hns3_nic_set_real_num_queue(netdev);
1355
1356 out:
1357 if (if_running)
1358 hns3_nic_net_open(netdev);
1359
1360 return ret;
1361 }
1362
1363 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1364 void *type_data)
1365 {
1366 if (type != TC_SETUP_QDISC_MQPRIO)
1367 return -EOPNOTSUPP;
1368
1369 return hns3_setup_tc(dev, type_data);
1370 }
1371
1372 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1373 __be16 proto, u16 vid)
1374 {
1375 struct hnae3_handle *h = hns3_get_handle(netdev);
1376 struct hns3_nic_priv *priv = netdev_priv(netdev);
1377 int ret = -EIO;
1378
1379 if (h->ae_algo->ops->set_vlan_filter)
1380 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1381
1382 if (!ret)
1383 set_bit(vid, priv->active_vlans);
1384
1385 return ret;
1386 }
1387
1388 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1389 __be16 proto, u16 vid)
1390 {
1391 struct hnae3_handle *h = hns3_get_handle(netdev);
1392 struct hns3_nic_priv *priv = netdev_priv(netdev);
1393 int ret = -EIO;
1394
1395 if (h->ae_algo->ops->set_vlan_filter)
1396 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1397
1398 if (!ret)
1399 clear_bit(vid, priv->active_vlans);
1400
1401 return ret;
1402 }
1403
1404 static void hns3_restore_vlan(struct net_device *netdev)
1405 {
1406 struct hns3_nic_priv *priv = netdev_priv(netdev);
1407 u16 vid;
1408 int ret;
1409
1410 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1411 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1412 if (ret)
1413 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1414 vid, ret);
1415 }
1416 }
1417
1418 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1419 u8 qos, __be16 vlan_proto)
1420 {
1421 struct hnae3_handle *h = hns3_get_handle(netdev);
1422 int ret = -EIO;
1423
1424 if (h->ae_algo->ops->set_vf_vlan_filter)
1425 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1426 qos, vlan_proto);
1427
1428 return ret;
1429 }
1430
1431 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1432 {
1433 struct hnae3_handle *h = hns3_get_handle(netdev);
1434 bool if_running = netif_running(netdev);
1435 int ret;
1436
1437 if (!h->ae_algo->ops->set_mtu)
1438 return -EOPNOTSUPP;
1439
1440 /* if this was called with netdev up then bring netdevice down */
1441 if (if_running) {
1442 (void)hns3_nic_net_stop(netdev);
1443 msleep(100);
1444 }
1445
1446 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1447 if (ret) {
1448 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1449 ret);
1450 return ret;
1451 }
1452
1453 netdev->mtu = new_mtu;
1454
1455 /* if the netdev was running earlier, bring it up again */
1456 if (if_running && hns3_nic_net_open(netdev))
1457 ret = -EINVAL;
1458
1459 return ret;
1460 }
1461
1462 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1463 {
1464 struct hns3_nic_priv *priv = netdev_priv(ndev);
1465 struct hns3_enet_ring *tx_ring = NULL;
1466 int timeout_queue = 0;
1467 int hw_head, hw_tail;
1468 int i;
1469
1470 /* Find the stopped queue the same way the stack does */
1471 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1472 struct netdev_queue *q;
1473 unsigned long trans_start;
1474
1475 q = netdev_get_tx_queue(ndev, i);
1476 trans_start = q->trans_start;
1477 if (netif_xmit_stopped(q) &&
1478 time_after(jiffies,
1479 (trans_start + ndev->watchdog_timeo))) {
1480 timeout_queue = i;
1481 break;
1482 }
1483 }
1484
1485 if (i == ndev->num_tx_queues) {
1486 netdev_info(ndev,
1487 "no netdev TX timeout queue found, timeout count: %llu\n",
1488 priv->tx_timeout_count);
1489 return false;
1490 }
1491
1492 tx_ring = priv->ring_data[timeout_queue].ring;
1493
1494 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1495 HNS3_RING_TX_RING_HEAD_REG);
1496 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1497 HNS3_RING_TX_RING_TAIL_REG);
1498 netdev_info(ndev,
1499 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1500 priv->tx_timeout_count,
1501 timeout_queue,
1502 tx_ring->next_to_use,
1503 tx_ring->next_to_clean,
1504 hw_head,
1505 hw_tail,
1506 readl(tx_ring->tqp_vector->mask_addr));
1507
1508 return true;
1509 }
1510
1511 static void hns3_nic_net_timeout(struct net_device *ndev)
1512 {
1513 struct hns3_nic_priv *priv = netdev_priv(ndev);
1514 struct hnae3_handle *h = priv->ae_handle;
1515
1516 if (!hns3_get_tx_timeo_queue_info(ndev))
1517 return;
1518
1519 priv->tx_timeout_count++;
1520
1521 if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
1522 return;
1523
1524 /* request the reset */
1525 if (h->ae_algo->ops->reset_event)
1526 h->ae_algo->ops->reset_event(h);
1527 }
1528
1529 static const struct net_device_ops hns3_nic_netdev_ops = {
1530 .ndo_open = hns3_nic_net_open,
1531 .ndo_stop = hns3_nic_net_stop,
1532 .ndo_start_xmit = hns3_nic_net_xmit,
1533 .ndo_tx_timeout = hns3_nic_net_timeout,
1534 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1535 .ndo_change_mtu = hns3_nic_change_mtu,
1536 .ndo_set_features = hns3_nic_set_features,
1537 .ndo_get_stats64 = hns3_nic_get_stats64,
1538 .ndo_setup_tc = hns3_nic_setup_tc,
1539 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1540 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1541 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1542 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1543 };
1544
1545 static bool hns3_is_phys_func(struct pci_dev *pdev)
1546 {
1547 u32 dev_id = pdev->device;
1548
1549 switch (dev_id) {
1550 case HNAE3_DEV_ID_GE:
1551 case HNAE3_DEV_ID_25GE:
1552 case HNAE3_DEV_ID_25GE_RDMA:
1553 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1554 case HNAE3_DEV_ID_50GE_RDMA:
1555 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1556 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1557 return true;
1558 case HNAE3_DEV_ID_100G_VF:
1559 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1560 return false;
1561 default:
1562 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1563 dev_id);
1564 }
1565
1566 return false;
1567 }
1568
1569 static void hns3_disable_sriov(struct pci_dev *pdev)
1570 {
1571 /* If our VFs are assigned we cannot shut down SR-IOV
1572 * without causing issues, so just leave the hardware
1573 * available but disabled
1574 */
1575 if (pci_vfs_assigned(pdev)) {
1576 dev_warn(&pdev->dev,
1577 "disabling driver while VFs are assigned\n");
1578 return;
1579 }
1580
1581 pci_disable_sriov(pdev);
1582 }
1583
1584 /* hns3_probe - Device initialization routine
1585 * @pdev: PCI device information struct
1586 * @ent: entry in hns3_pci_tbl
1587 *
1588 * hns3_probe initializes a PF identified by a pci_dev structure.
1589 * The OS initialization, configuring of the PF private structure,
1590 * and a hardware reset occur.
1591 *
1592 * Returns 0 on success, negative on failure
1593 */
1594 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1595 {
1596 struct hnae3_ae_dev *ae_dev;
1597 int ret;
1598
1599 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1600 GFP_KERNEL);
1601 if (!ae_dev) {
1602 ret = -ENOMEM;
1603 return ret;
1604 }
1605
1606 ae_dev->pdev = pdev;
1607 ae_dev->flag = ent->driver_data;
1608 ae_dev->dev_type = HNAE3_DEV_KNIC;
1609 pci_set_drvdata(pdev, ae_dev);
1610
1611 hnae3_register_ae_dev(ae_dev);
1612
1613 return 0;
1614 }
1615
1616 /* hns3_remove - Device removal routine
1617 * @pdev: PCI device information struct
1618 */
1619 static void hns3_remove(struct pci_dev *pdev)
1620 {
1621 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1622
1623 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1624 hns3_disable_sriov(pdev);
1625
1626 hnae3_unregister_ae_dev(ae_dev);
1627 }
1628
1629 /**
1630 * hns3_pci_sriov_configure
1631 * @pdev: pointer to a pci_dev structure
1632 * @num_vfs: number of VFs to allocate
1633 *
1634 * Enable or change the number of VFs. Called when the user updates the number
1635 * of VFs in sysfs.
1636 **/
1637 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1638 {
1639 int ret;
1640
1641 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1642 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1643 return -EINVAL;
1644 }
1645
1646 if (num_vfs) {
1647 ret = pci_enable_sriov(pdev, num_vfs);
1648 if (ret)
1649 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1650 else
1651 return num_vfs;
1652 } else if (!pci_vfs_assigned(pdev)) {
1653 pci_disable_sriov(pdev);
1654 } else {
1655 dev_warn(&pdev->dev,
1656 "Unable to free VFs because some are assigned to VMs.\n");
1657 }
1658
1659 return 0;
1660 }
1661
1662 static struct pci_driver hns3_driver = {
1663 .name = hns3_driver_name,
1664 .id_table = hns3_pci_tbl,
1665 .probe = hns3_probe,
1666 .remove = hns3_remove,
1667 .sriov_configure = hns3_pci_sriov_configure,
1668 };
1669
1670 /* set default feature to hns3 */
1671 static void hns3_set_default_feature(struct net_device *netdev)
1672 {
1673 netdev->priv_flags |= IFF_UNICAST_FLT;
1674
1675 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1676 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1677 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1678 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1679 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1680
1681 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1682
1683 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1684
1685 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1686 NETIF_F_HW_VLAN_CTAG_FILTER |
1687 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1688 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1689 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1690 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1691 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1692
1693 netdev->vlan_features |=
1694 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1695 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1696 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1697 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1698 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1699
1700 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1701 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1702 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1703 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1704 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1705 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1706 }
1707
1708 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1709 struct hns3_desc_cb *cb)
1710 {
1711 unsigned int order = hnae3_page_order(ring);
1712 struct page *p;
1713
1714 p = dev_alloc_pages(order);
1715 if (!p)
1716 return -ENOMEM;
1717
1718 cb->priv = p;
1719 cb->page_offset = 0;
1720 cb->reuse_flag = 0;
1721 cb->buf = page_address(p);
1722 cb->length = hnae3_page_size(ring);
1723 cb->type = DESC_TYPE_PAGE;
1724
1725 return 0;
1726 }
1727
1728 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1729 struct hns3_desc_cb *cb)
1730 {
1731 if (cb->type == DESC_TYPE_SKB)
1732 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1733 else if (!HNAE3_IS_TX_RING(ring))
1734 put_page((struct page *)cb->priv);
1735 memset(cb, 0, sizeof(*cb));
1736 }
1737
1738 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1739 {
1740 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1741 cb->length, ring_to_dma_dir(ring));
1742
1743 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1744 return -EIO;
1745
1746 return 0;
1747 }
1748
1749 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1750 struct hns3_desc_cb *cb)
1751 {
1752 if (cb->type == DESC_TYPE_SKB)
1753 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1754 ring_to_dma_dir(ring));
1755 else
1756 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1757 ring_to_dma_dir(ring));
1758 }
1759
1760 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1761 {
1762 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1763 ring->desc[i].addr = 0;
1764 }
1765
1766 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1767 {
1768 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1769
1770 if (!ring->desc_cb[i].dma)
1771 return;
1772
1773 hns3_buffer_detach(ring, i);
1774 hns3_free_buffer(ring, cb);
1775 }
1776
1777 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1778 {
1779 int i;
1780
1781 for (i = 0; i < ring->desc_num; i++)
1782 hns3_free_buffer_detach(ring, i);
1783 }
1784
1785 /* free desc along with its attached buffer */
1786 static void hns3_free_desc(struct hns3_enet_ring *ring)
1787 {
1788 int size = ring->desc_num * sizeof(ring->desc[0]);
1789
1790 hns3_free_buffers(ring);
1791
1792 if (ring->desc) {
1793 dma_free_coherent(ring_to_dev(ring), size,
1794 ring->desc, ring->desc_dma_addr);
1795 ring->desc = NULL;
1796 }
1797 }
1798
1799 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1800 {
1801 int size = ring->desc_num * sizeof(ring->desc[0]);
1802
1803 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
1804 &ring->desc_dma_addr,
1805 GFP_KERNEL);
1806 if (!ring->desc)
1807 return -ENOMEM;
1808
1809 return 0;
1810 }
1811
1812 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1813 struct hns3_desc_cb *cb)
1814 {
1815 int ret;
1816
1817 ret = hns3_alloc_buffer(ring, cb);
1818 if (ret)
1819 goto out;
1820
1821 ret = hns3_map_buffer(ring, cb);
1822 if (ret)
1823 goto out_with_buf;
1824
1825 return 0;
1826
1827 out_with_buf:
1828 hns3_free_buffer(ring, cb);
1829 out:
1830 return ret;
1831 }
1832
1833 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1834 {
1835 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1836
1837 if (ret)
1838 return ret;
1839
1840 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1841
1842 return 0;
1843 }
1844
1845 /* Allocate memory for raw pkg, and map with dma */
1846 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1847 {
1848 int i, j, ret;
1849
1850 for (i = 0; i < ring->desc_num; i++) {
1851 ret = hns3_alloc_buffer_attach(ring, i);
1852 if (ret)
1853 goto out_buffer_fail;
1854 }
1855
1856 return 0;
1857
1858 out_buffer_fail:
1859 for (j = i - 1; j >= 0; j--)
1860 hns3_free_buffer_detach(ring, j);
1861 return ret;
1862 }
1863
1864 /* detach a in-used buffer and replace with a reserved one */
1865 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1866 struct hns3_desc_cb *res_cb)
1867 {
1868 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1869 ring->desc_cb[i] = *res_cb;
1870 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1871 ring->desc[i].rx.bd_base_info = 0;
1872 }
1873
1874 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1875 {
1876 ring->desc_cb[i].reuse_flag = 0;
1877 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1878 + ring->desc_cb[i].page_offset);
1879 ring->desc[i].rx.bd_base_info = 0;
1880 }
1881
1882 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1883 int *pkts)
1884 {
1885 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1886
1887 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1888 (*bytes) += desc_cb->length;
1889 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1890 hns3_free_buffer_detach(ring, ring->next_to_clean);
1891
1892 ring_ptr_move_fw(ring, next_to_clean);
1893 }
1894
1895 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1896 {
1897 int u = ring->next_to_use;
1898 int c = ring->next_to_clean;
1899
1900 if (unlikely(h > ring->desc_num))
1901 return 0;
1902
1903 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1904 }
1905
1906 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1907 {
1908 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1909 struct netdev_queue *dev_queue;
1910 int bytes, pkts;
1911 int head;
1912
1913 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1914 rmb(); /* Make sure head is ready before touch any data */
1915
1916 if (is_ring_empty(ring) || head == ring->next_to_clean)
1917 return true; /* no data to poll */
1918
1919 if (unlikely(!is_valid_clean_head(ring, head))) {
1920 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1921 ring->next_to_use, ring->next_to_clean);
1922
1923 u64_stats_update_begin(&ring->syncp);
1924 ring->stats.io_err_cnt++;
1925 u64_stats_update_end(&ring->syncp);
1926 return true;
1927 }
1928
1929 bytes = 0;
1930 pkts = 0;
1931 while (head != ring->next_to_clean && budget) {
1932 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1933 /* Issue prefetch for next Tx descriptor */
1934 prefetch(&ring->desc_cb[ring->next_to_clean]);
1935 budget--;
1936 }
1937
1938 ring->tqp_vector->tx_group.total_bytes += bytes;
1939 ring->tqp_vector->tx_group.total_packets += pkts;
1940
1941 u64_stats_update_begin(&ring->syncp);
1942 ring->stats.tx_bytes += bytes;
1943 ring->stats.tx_pkts += pkts;
1944 u64_stats_update_end(&ring->syncp);
1945
1946 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1947 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1948
1949 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1950 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1951 /* Make sure that anybody stopping the queue after this
1952 * sees the new next_to_clean.
1953 */
1954 smp_mb();
1955 if (netif_tx_queue_stopped(dev_queue)) {
1956 netif_tx_wake_queue(dev_queue);
1957 ring->stats.restart_queue++;
1958 }
1959 }
1960
1961 return !!budget;
1962 }
1963
1964 static int hns3_desc_unused(struct hns3_enet_ring *ring)
1965 {
1966 int ntc = ring->next_to_clean;
1967 int ntu = ring->next_to_use;
1968
1969 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1970 }
1971
1972 static void
1973 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1974 {
1975 struct hns3_desc_cb *desc_cb;
1976 struct hns3_desc_cb res_cbs;
1977 int i, ret;
1978
1979 for (i = 0; i < cleand_count; i++) {
1980 desc_cb = &ring->desc_cb[ring->next_to_use];
1981 if (desc_cb->reuse_flag) {
1982 u64_stats_update_begin(&ring->syncp);
1983 ring->stats.reuse_pg_cnt++;
1984 u64_stats_update_end(&ring->syncp);
1985
1986 hns3_reuse_buffer(ring, ring->next_to_use);
1987 } else {
1988 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1989 if (ret) {
1990 u64_stats_update_begin(&ring->syncp);
1991 ring->stats.sw_err_cnt++;
1992 u64_stats_update_end(&ring->syncp);
1993
1994 netdev_err(ring->tqp->handle->kinfo.netdev,
1995 "hnae reserve buffer map failed.\n");
1996 break;
1997 }
1998 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1999 }
2000
2001 ring_ptr_move_fw(ring, next_to_use);
2002 }
2003
2004 wmb(); /* Make all data has been write before submit */
2005 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2006 }
2007
2008 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2009 struct hns3_enet_ring *ring, int pull_len,
2010 struct hns3_desc_cb *desc_cb)
2011 {
2012 struct hns3_desc *desc;
2013 int truesize, size;
2014 int last_offset;
2015 bool twobufs;
2016
2017 twobufs = ((PAGE_SIZE < 8192) &&
2018 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2019
2020 desc = &ring->desc[ring->next_to_clean];
2021 size = le16_to_cpu(desc->rx.size);
2022
2023 truesize = hnae3_buf_size(ring);
2024
2025 if (!twobufs)
2026 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2027
2028 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2029 size - pull_len, truesize);
2030
2031 /* Avoid re-using remote pages,flag default unreuse */
2032 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2033 return;
2034
2035 if (twobufs) {
2036 /* If we are only owner of page we can reuse it */
2037 if (likely(page_count(desc_cb->priv) == 1)) {
2038 /* Flip page offset to other buffer */
2039 desc_cb->page_offset ^= truesize;
2040
2041 desc_cb->reuse_flag = 1;
2042 /* bump ref count on page before it is given*/
2043 get_page(desc_cb->priv);
2044 }
2045 return;
2046 }
2047
2048 /* Move offset up to the next cache line */
2049 desc_cb->page_offset += truesize;
2050
2051 if (desc_cb->page_offset <= last_offset) {
2052 desc_cb->reuse_flag = 1;
2053 /* Bump ref count on page before it is given*/
2054 get_page(desc_cb->priv);
2055 }
2056 }
2057
2058 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2059 struct hns3_desc *desc)
2060 {
2061 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2062 int l3_type, l4_type;
2063 u32 bd_base_info;
2064 int ol4_type;
2065 u32 l234info;
2066
2067 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2068 l234info = le32_to_cpu(desc->rx.l234_info);
2069
2070 skb->ip_summed = CHECKSUM_NONE;
2071
2072 skb_checksum_none_assert(skb);
2073
2074 if (!(netdev->features & NETIF_F_RXCSUM))
2075 return;
2076
2077 /* check if hardware has done checksum */
2078 if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2079 return;
2080
2081 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2082 hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2083 hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2084 hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2085 netdev_err(netdev, "L3/L4 error pkt\n");
2086 u64_stats_update_begin(&ring->syncp);
2087 ring->stats.l3l4_csum_err++;
2088 u64_stats_update_end(&ring->syncp);
2089
2090 return;
2091 }
2092
2093 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2094 HNS3_RXD_L3ID_S);
2095 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2096 HNS3_RXD_L4ID_S);
2097
2098 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2099 HNS3_RXD_OL4ID_S);
2100 switch (ol4_type) {
2101 case HNS3_OL4_TYPE_MAC_IN_UDP:
2102 case HNS3_OL4_TYPE_NVGRE:
2103 skb->csum_level = 1;
2104 case HNS3_OL4_TYPE_NO_TUN:
2105 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2106 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2107 l3_type == HNS3_L3_TYPE_IPV6) &&
2108 (l4_type == HNS3_L4_TYPE_UDP ||
2109 l4_type == HNS3_L4_TYPE_TCP ||
2110 l4_type == HNS3_L4_TYPE_SCTP))
2111 skb->ip_summed = CHECKSUM_UNNECESSARY;
2112 break;
2113 }
2114 }
2115
2116 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2117 {
2118 napi_gro_receive(&ring->tqp_vector->napi, skb);
2119 }
2120
2121 static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2122 struct hns3_desc *desc, u32 l234info)
2123 {
2124 struct pci_dev *pdev = ring->tqp->handle->pdev;
2125 u16 vlan_tag;
2126
2127 if (pdev->revision == 0x20) {
2128 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2129 if (!(vlan_tag & VLAN_VID_MASK))
2130 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2131
2132 return vlan_tag;
2133 }
2134
2135 #define HNS3_STRP_OUTER_VLAN 0x1
2136 #define HNS3_STRP_INNER_VLAN 0x2
2137
2138 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2139 HNS3_RXD_STRP_TAGP_S)) {
2140 case HNS3_STRP_OUTER_VLAN:
2141 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2142 break;
2143 case HNS3_STRP_INNER_VLAN:
2144 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2145 break;
2146 default:
2147 vlan_tag = 0;
2148 break;
2149 }
2150
2151 return vlan_tag;
2152 }
2153
2154 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2155 struct sk_buff **out_skb, int *out_bnum)
2156 {
2157 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2158 struct hns3_desc_cb *desc_cb;
2159 struct hns3_desc *desc;
2160 struct sk_buff *skb;
2161 unsigned char *va;
2162 u32 bd_base_info;
2163 int pull_len;
2164 u32 l234info;
2165 int length;
2166 int bnum;
2167
2168 desc = &ring->desc[ring->next_to_clean];
2169 desc_cb = &ring->desc_cb[ring->next_to_clean];
2170
2171 prefetch(desc);
2172
2173 length = le16_to_cpu(desc->rx.size);
2174 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2175
2176 /* Check valid BD */
2177 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
2178 return -EFAULT;
2179
2180 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2181
2182 /* Prefetch first cache line of first page
2183 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2184 * line size is 64B so need to prefetch twice to make it 128B. But in
2185 * actual we can have greater size of caches with 128B Level 1 cache
2186 * lines. In such a case, single fetch would suffice to cache in the
2187 * relevant part of the header.
2188 */
2189 prefetch(va);
2190 #if L1_CACHE_BYTES < 128
2191 prefetch(va + L1_CACHE_BYTES);
2192 #endif
2193
2194 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2195 HNS3_RX_HEAD_SIZE);
2196 if (unlikely(!skb)) {
2197 netdev_err(netdev, "alloc rx skb fail\n");
2198
2199 u64_stats_update_begin(&ring->syncp);
2200 ring->stats.sw_err_cnt++;
2201 u64_stats_update_end(&ring->syncp);
2202
2203 return -ENOMEM;
2204 }
2205
2206 prefetchw(skb->data);
2207
2208 bnum = 1;
2209 if (length <= HNS3_RX_HEAD_SIZE) {
2210 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2211
2212 /* We can reuse buffer as-is, just make sure it is local */
2213 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2214 desc_cb->reuse_flag = 1;
2215 else /* This page cannot be reused so discard it */
2216 put_page(desc_cb->priv);
2217
2218 ring_ptr_move_fw(ring, next_to_clean);
2219 } else {
2220 u64_stats_update_begin(&ring->syncp);
2221 ring->stats.seg_pkt_cnt++;
2222 u64_stats_update_end(&ring->syncp);
2223
2224 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2225
2226 memcpy(__skb_put(skb, pull_len), va,
2227 ALIGN(pull_len, sizeof(long)));
2228
2229 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2230 ring_ptr_move_fw(ring, next_to_clean);
2231
2232 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2233 desc = &ring->desc[ring->next_to_clean];
2234 desc_cb = &ring->desc_cb[ring->next_to_clean];
2235 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2236 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2237 ring_ptr_move_fw(ring, next_to_clean);
2238 bnum++;
2239 }
2240 }
2241
2242 *out_bnum = bnum;
2243 /* Based on hw strategy, the tag offloaded will be stored at
2244 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2245 * in one layer tag case.
2246 */
2247 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2248 u16 vlan_tag;
2249
2250 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2251 if (!(vlan_tag & VLAN_VID_MASK))
2252 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2253 if (vlan_tag & VLAN_VID_MASK)
2254 __vlan_hwaccel_put_tag(skb,
2255 htons(ETH_P_8021Q),
2256 vlan_tag);
2257 }
2258
2259 l234info = le32_to_cpu(desc->rx.l234_info);
2260
2261 /* Based on hw strategy, the tag offloaded will be stored at
2262 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2263 * in one layer tag case.
2264 */
2265 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2266 u16 vlan_tag;
2267
2268 vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
2269 if (vlan_tag & VLAN_VID_MASK)
2270 __vlan_hwaccel_put_tag(skb,
2271 htons(ETH_P_8021Q),
2272 vlan_tag);
2273 }
2274
2275 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2276 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2277 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2278 u64_stats_update_begin(&ring->syncp);
2279 ring->stats.non_vld_descs++;
2280 u64_stats_update_end(&ring->syncp);
2281
2282 dev_kfree_skb_any(skb);
2283 return -EINVAL;
2284 }
2285
2286 if (unlikely((!desc->rx.pkt_len) ||
2287 hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2288 netdev_err(netdev, "truncated pkt\n");
2289 u64_stats_update_begin(&ring->syncp);
2290 ring->stats.err_pkt_len++;
2291 u64_stats_update_end(&ring->syncp);
2292
2293 dev_kfree_skb_any(skb);
2294 return -EFAULT;
2295 }
2296
2297 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
2298 netdev_err(netdev, "L2 error pkt\n");
2299 u64_stats_update_begin(&ring->syncp);
2300 ring->stats.l2_err++;
2301 u64_stats_update_end(&ring->syncp);
2302
2303 dev_kfree_skb_any(skb);
2304 return -EFAULT;
2305 }
2306
2307 u64_stats_update_begin(&ring->syncp);
2308 ring->stats.rx_pkts++;
2309 ring->stats.rx_bytes += skb->len;
2310 u64_stats_update_end(&ring->syncp);
2311
2312 ring->tqp_vector->rx_group.total_bytes += skb->len;
2313
2314 hns3_rx_checksum(ring, skb, desc);
2315 return 0;
2316 }
2317
2318 int hns3_clean_rx_ring(
2319 struct hns3_enet_ring *ring, int budget,
2320 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2321 {
2322 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2323 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2324 int recv_pkts, recv_bds, clean_count, err;
2325 int unused_count = hns3_desc_unused(ring);
2326 struct sk_buff *skb = NULL;
2327 int num, bnum = 0;
2328
2329 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2330 rmb(); /* Make sure num taken effect before the other data is touched */
2331
2332 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2333 num -= unused_count;
2334
2335 while (recv_pkts < budget && recv_bds < num) {
2336 /* Reuse or realloc buffers */
2337 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2338 hns3_nic_alloc_rx_buffers(ring,
2339 clean_count + unused_count);
2340 clean_count = 0;
2341 unused_count = hns3_desc_unused(ring);
2342 }
2343
2344 /* Poll one pkt */
2345 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2346 if (unlikely(!skb)) /* This fault cannot be repaired */
2347 goto out;
2348
2349 recv_bds += bnum;
2350 clean_count += bnum;
2351 if (unlikely(err)) { /* Do jump the err */
2352 recv_pkts++;
2353 continue;
2354 }
2355
2356 /* Do update ip stack process */
2357 skb->protocol = eth_type_trans(skb, netdev);
2358 rx_fn(ring, skb);
2359
2360 recv_pkts++;
2361 }
2362
2363 out:
2364 /* Make all data has been write before submit */
2365 if (clean_count + unused_count > 0)
2366 hns3_nic_alloc_rx_buffers(ring,
2367 clean_count + unused_count);
2368
2369 return recv_pkts;
2370 }
2371
2372 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2373 {
2374 struct hns3_enet_tqp_vector *tqp_vector =
2375 ring_group->ring->tqp_vector;
2376 enum hns3_flow_level_range new_flow_level;
2377 int packets_per_msecs;
2378 int bytes_per_msecs;
2379 u32 time_passed_ms;
2380 u16 new_int_gl;
2381
2382 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
2383 return false;
2384
2385 if (ring_group->total_packets == 0) {
2386 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2387 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2388 return true;
2389 }
2390
2391 /* Simple throttlerate management
2392 * 0-10MB/s lower (50000 ints/s)
2393 * 10-20MB/s middle (20000 ints/s)
2394 * 20-1249MB/s high (18000 ints/s)
2395 * > 40000pps ultra (8000 ints/s)
2396 */
2397 new_flow_level = ring_group->coal.flow_level;
2398 new_int_gl = ring_group->coal.int_gl;
2399 time_passed_ms =
2400 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2401
2402 if (!time_passed_ms)
2403 return false;
2404
2405 do_div(ring_group->total_packets, time_passed_ms);
2406 packets_per_msecs = ring_group->total_packets;
2407
2408 do_div(ring_group->total_bytes, time_passed_ms);
2409 bytes_per_msecs = ring_group->total_bytes;
2410
2411 #define HNS3_RX_LOW_BYTE_RATE 10000
2412 #define HNS3_RX_MID_BYTE_RATE 20000
2413
2414 switch (new_flow_level) {
2415 case HNS3_FLOW_LOW:
2416 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2417 new_flow_level = HNS3_FLOW_MID;
2418 break;
2419 case HNS3_FLOW_MID:
2420 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2421 new_flow_level = HNS3_FLOW_HIGH;
2422 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2423 new_flow_level = HNS3_FLOW_LOW;
2424 break;
2425 case HNS3_FLOW_HIGH:
2426 case HNS3_FLOW_ULTRA:
2427 default:
2428 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2429 new_flow_level = HNS3_FLOW_MID;
2430 break;
2431 }
2432
2433 #define HNS3_RX_ULTRA_PACKET_RATE 40
2434
2435 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2436 &tqp_vector->rx_group == ring_group)
2437 new_flow_level = HNS3_FLOW_ULTRA;
2438
2439 switch (new_flow_level) {
2440 case HNS3_FLOW_LOW:
2441 new_int_gl = HNS3_INT_GL_50K;
2442 break;
2443 case HNS3_FLOW_MID:
2444 new_int_gl = HNS3_INT_GL_20K;
2445 break;
2446 case HNS3_FLOW_HIGH:
2447 new_int_gl = HNS3_INT_GL_18K;
2448 break;
2449 case HNS3_FLOW_ULTRA:
2450 new_int_gl = HNS3_INT_GL_8K;
2451 break;
2452 default:
2453 break;
2454 }
2455
2456 ring_group->total_bytes = 0;
2457 ring_group->total_packets = 0;
2458 ring_group->coal.flow_level = new_flow_level;
2459 if (new_int_gl != ring_group->coal.int_gl) {
2460 ring_group->coal.int_gl = new_int_gl;
2461 return true;
2462 }
2463 return false;
2464 }
2465
2466 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2467 {
2468 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2469 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2470 bool rx_update, tx_update;
2471
2472 if (tqp_vector->int_adapt_down > 0) {
2473 tqp_vector->int_adapt_down--;
2474 return;
2475 }
2476
2477 if (rx_group->coal.gl_adapt_enable) {
2478 rx_update = hns3_get_new_int_gl(rx_group);
2479 if (rx_update)
2480 hns3_set_vector_coalesce_rx_gl(tqp_vector,
2481 rx_group->coal.int_gl);
2482 }
2483
2484 if (tx_group->coal.gl_adapt_enable) {
2485 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2486 if (tx_update)
2487 hns3_set_vector_coalesce_tx_gl(tqp_vector,
2488 tx_group->coal.int_gl);
2489 }
2490
2491 tqp_vector->last_jiffies = jiffies;
2492 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
2493 }
2494
2495 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2496 {
2497 struct hns3_enet_ring *ring;
2498 int rx_pkt_total = 0;
2499
2500 struct hns3_enet_tqp_vector *tqp_vector =
2501 container_of(napi, struct hns3_enet_tqp_vector, napi);
2502 bool clean_complete = true;
2503 int rx_budget;
2504
2505 /* Since the actual Tx work is minimal, we can give the Tx a larger
2506 * budget and be more aggressive about cleaning up the Tx descriptors.
2507 */
2508 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2509 if (!hns3_clean_tx_ring(ring, budget))
2510 clean_complete = false;
2511 }
2512
2513 /* make sure rx ring budget not smaller than 1 */
2514 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2515
2516 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2517 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2518 hns3_rx_skb);
2519
2520 if (rx_cleaned >= rx_budget)
2521 clean_complete = false;
2522
2523 rx_pkt_total += rx_cleaned;
2524 }
2525
2526 tqp_vector->rx_group.total_packets += rx_pkt_total;
2527
2528 if (!clean_complete)
2529 return budget;
2530
2531 napi_complete(napi);
2532 hns3_update_new_int_gl(tqp_vector);
2533 hns3_mask_vector_irq(tqp_vector, 1);
2534
2535 return rx_pkt_total;
2536 }
2537
2538 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2539 struct hnae3_ring_chain_node *head)
2540 {
2541 struct pci_dev *pdev = tqp_vector->handle->pdev;
2542 struct hnae3_ring_chain_node *cur_chain = head;
2543 struct hnae3_ring_chain_node *chain;
2544 struct hns3_enet_ring *tx_ring;
2545 struct hns3_enet_ring *rx_ring;
2546
2547 tx_ring = tqp_vector->tx_group.ring;
2548 if (tx_ring) {
2549 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2550 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2551 HNAE3_RING_TYPE_TX);
2552 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2553 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2554
2555 cur_chain->next = NULL;
2556
2557 while (tx_ring->next) {
2558 tx_ring = tx_ring->next;
2559
2560 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2561 GFP_KERNEL);
2562 if (!chain)
2563 return -ENOMEM;
2564
2565 cur_chain->next = chain;
2566 chain->tqp_index = tx_ring->tqp->tqp_index;
2567 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2568 HNAE3_RING_TYPE_TX);
2569 hnae3_set_field(chain->int_gl_idx,
2570 HNAE3_RING_GL_IDX_M,
2571 HNAE3_RING_GL_IDX_S,
2572 HNAE3_RING_GL_TX);
2573
2574 cur_chain = chain;
2575 }
2576 }
2577
2578 rx_ring = tqp_vector->rx_group.ring;
2579 if (!tx_ring && rx_ring) {
2580 cur_chain->next = NULL;
2581 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2582 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2583 HNAE3_RING_TYPE_RX);
2584 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2585 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2586
2587 rx_ring = rx_ring->next;
2588 }
2589
2590 while (rx_ring) {
2591 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2592 if (!chain)
2593 return -ENOMEM;
2594
2595 cur_chain->next = chain;
2596 chain->tqp_index = rx_ring->tqp->tqp_index;
2597 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2598 HNAE3_RING_TYPE_RX);
2599 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2600 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2601
2602 cur_chain = chain;
2603
2604 rx_ring = rx_ring->next;
2605 }
2606
2607 return 0;
2608 }
2609
2610 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2611 struct hnae3_ring_chain_node *head)
2612 {
2613 struct pci_dev *pdev = tqp_vector->handle->pdev;
2614 struct hnae3_ring_chain_node *chain_tmp, *chain;
2615
2616 chain = head->next;
2617
2618 while (chain) {
2619 chain_tmp = chain->next;
2620 devm_kfree(&pdev->dev, chain);
2621 chain = chain_tmp;
2622 }
2623 }
2624
2625 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2626 struct hns3_enet_ring *ring)
2627 {
2628 ring->next = group->ring;
2629 group->ring = ring;
2630
2631 group->count++;
2632 }
2633
2634 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2635 {
2636 struct hnae3_ring_chain_node vector_ring_chain;
2637 struct hnae3_handle *h = priv->ae_handle;
2638 struct hns3_enet_tqp_vector *tqp_vector;
2639 int ret = 0;
2640 u16 i;
2641
2642 for (i = 0; i < priv->vector_num; i++) {
2643 tqp_vector = &priv->tqp_vector[i];
2644 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2645 tqp_vector->num_tqps = 0;
2646 }
2647
2648 for (i = 0; i < h->kinfo.num_tqps; i++) {
2649 u16 vector_i = i % priv->vector_num;
2650 u16 tqp_num = h->kinfo.num_tqps;
2651
2652 tqp_vector = &priv->tqp_vector[vector_i];
2653
2654 hns3_add_ring_to_group(&tqp_vector->tx_group,
2655 priv->ring_data[i].ring);
2656
2657 hns3_add_ring_to_group(&tqp_vector->rx_group,
2658 priv->ring_data[i + tqp_num].ring);
2659
2660 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2661 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2662 tqp_vector->num_tqps++;
2663 }
2664
2665 for (i = 0; i < priv->vector_num; i++) {
2666 tqp_vector = &priv->tqp_vector[i];
2667
2668 tqp_vector->rx_group.total_bytes = 0;
2669 tqp_vector->rx_group.total_packets = 0;
2670 tqp_vector->tx_group.total_bytes = 0;
2671 tqp_vector->tx_group.total_packets = 0;
2672 tqp_vector->handle = h;
2673
2674 ret = hns3_get_vector_ring_chain(tqp_vector,
2675 &vector_ring_chain);
2676 if (ret)
2677 return ret;
2678
2679 ret = h->ae_algo->ops->map_ring_to_vector(h,
2680 tqp_vector->vector_irq, &vector_ring_chain);
2681
2682 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2683
2684 if (ret)
2685 return ret;
2686
2687 netif_napi_add(priv->netdev, &tqp_vector->napi,
2688 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2689 }
2690
2691 return 0;
2692 }
2693
2694 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2695 {
2696 struct hnae3_handle *h = priv->ae_handle;
2697 struct hns3_enet_tqp_vector *tqp_vector;
2698 struct hnae3_vector_info *vector;
2699 struct pci_dev *pdev = h->pdev;
2700 u16 tqp_num = h->kinfo.num_tqps;
2701 u16 vector_num;
2702 int ret = 0;
2703 u16 i;
2704
2705 /* RSS size, cpu online and vector_num should be the same */
2706 /* Should consider 2p/4p later */
2707 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2708 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2709 GFP_KERNEL);
2710 if (!vector)
2711 return -ENOMEM;
2712
2713 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2714
2715 priv->vector_num = vector_num;
2716 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2717 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2718 GFP_KERNEL);
2719 if (!priv->tqp_vector) {
2720 ret = -ENOMEM;
2721 goto out;
2722 }
2723
2724 for (i = 0; i < priv->vector_num; i++) {
2725 tqp_vector = &priv->tqp_vector[i];
2726 tqp_vector->idx = i;
2727 tqp_vector->mask_addr = vector[i].io_addr;
2728 tqp_vector->vector_irq = vector[i].vector;
2729 hns3_vector_gl_rl_init(tqp_vector, priv);
2730 }
2731
2732 out:
2733 devm_kfree(&pdev->dev, vector);
2734 return ret;
2735 }
2736
2737 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2738 {
2739 group->ring = NULL;
2740 group->count = 0;
2741 }
2742
2743 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2744 {
2745 struct hnae3_ring_chain_node vector_ring_chain;
2746 struct hnae3_handle *h = priv->ae_handle;
2747 struct hns3_enet_tqp_vector *tqp_vector;
2748 int i, ret;
2749
2750 for (i = 0; i < priv->vector_num; i++) {
2751 tqp_vector = &priv->tqp_vector[i];
2752
2753 ret = hns3_get_vector_ring_chain(tqp_vector,
2754 &vector_ring_chain);
2755 if (ret)
2756 return ret;
2757
2758 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2759 tqp_vector->vector_irq, &vector_ring_chain);
2760 if (ret)
2761 return ret;
2762
2763 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2764
2765 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2766 (void)irq_set_affinity_hint(
2767 priv->tqp_vector[i].vector_irq,
2768 NULL);
2769 free_irq(priv->tqp_vector[i].vector_irq,
2770 &priv->tqp_vector[i]);
2771 }
2772
2773 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2774 hns3_clear_ring_group(&tqp_vector->rx_group);
2775 hns3_clear_ring_group(&tqp_vector->tx_group);
2776 netif_napi_del(&priv->tqp_vector[i].napi);
2777 }
2778
2779 return 0;
2780 }
2781
2782 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2783 {
2784 struct hnae3_handle *h = priv->ae_handle;
2785 struct pci_dev *pdev = h->pdev;
2786 int i, ret;
2787
2788 for (i = 0; i < priv->vector_num; i++) {
2789 struct hns3_enet_tqp_vector *tqp_vector;
2790
2791 tqp_vector = &priv->tqp_vector[i];
2792 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2793 if (ret)
2794 return ret;
2795 }
2796
2797 devm_kfree(&pdev->dev, priv->tqp_vector);
2798 return 0;
2799 }
2800
2801 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2802 int ring_type)
2803 {
2804 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2805 int queue_num = priv->ae_handle->kinfo.num_tqps;
2806 struct pci_dev *pdev = priv->ae_handle->pdev;
2807 struct hns3_enet_ring *ring;
2808
2809 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2810 if (!ring)
2811 return -ENOMEM;
2812
2813 if (ring_type == HNAE3_RING_TYPE_TX) {
2814 ring_data[q->tqp_index].ring = ring;
2815 ring_data[q->tqp_index].queue_index = q->tqp_index;
2816 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2817 } else {
2818 ring_data[q->tqp_index + queue_num].ring = ring;
2819 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2820 ring->io_base = q->io_base;
2821 }
2822
2823 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2824
2825 ring->tqp = q;
2826 ring->desc = NULL;
2827 ring->desc_cb = NULL;
2828 ring->dev = priv->dev;
2829 ring->desc_dma_addr = 0;
2830 ring->buf_size = q->buf_size;
2831 ring->desc_num = q->desc_num;
2832 ring->next_to_use = 0;
2833 ring->next_to_clean = 0;
2834
2835 return 0;
2836 }
2837
2838 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2839 struct hns3_nic_priv *priv)
2840 {
2841 int ret;
2842
2843 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2844 if (ret)
2845 return ret;
2846
2847 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2848 if (ret)
2849 return ret;
2850
2851 return 0;
2852 }
2853
2854 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2855 {
2856 struct hnae3_handle *h = priv->ae_handle;
2857 struct pci_dev *pdev = h->pdev;
2858 int i, ret;
2859
2860 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2861 sizeof(*priv->ring_data) * 2,
2862 GFP_KERNEL);
2863 if (!priv->ring_data)
2864 return -ENOMEM;
2865
2866 for (i = 0; i < h->kinfo.num_tqps; i++) {
2867 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2868 if (ret)
2869 goto err;
2870 }
2871
2872 return 0;
2873 err:
2874 devm_kfree(&pdev->dev, priv->ring_data);
2875 return ret;
2876 }
2877
2878 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2879 {
2880 struct hnae3_handle *h = priv->ae_handle;
2881 int i;
2882
2883 for (i = 0; i < h->kinfo.num_tqps; i++) {
2884 devm_kfree(priv->dev, priv->ring_data[i].ring);
2885 devm_kfree(priv->dev,
2886 priv->ring_data[i + h->kinfo.num_tqps].ring);
2887 }
2888 devm_kfree(priv->dev, priv->ring_data);
2889 }
2890
2891 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2892 {
2893 int ret;
2894
2895 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2896 return -EINVAL;
2897
2898 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2899 GFP_KERNEL);
2900 if (!ring->desc_cb) {
2901 ret = -ENOMEM;
2902 goto out;
2903 }
2904
2905 ret = hns3_alloc_desc(ring);
2906 if (ret)
2907 goto out_with_desc_cb;
2908
2909 if (!HNAE3_IS_TX_RING(ring)) {
2910 ret = hns3_alloc_ring_buffers(ring);
2911 if (ret)
2912 goto out_with_desc;
2913 }
2914
2915 return 0;
2916
2917 out_with_desc:
2918 hns3_free_desc(ring);
2919 out_with_desc_cb:
2920 kfree(ring->desc_cb);
2921 ring->desc_cb = NULL;
2922 out:
2923 return ret;
2924 }
2925
2926 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2927 {
2928 hns3_free_desc(ring);
2929 kfree(ring->desc_cb);
2930 ring->desc_cb = NULL;
2931 ring->next_to_clean = 0;
2932 ring->next_to_use = 0;
2933 }
2934
2935 static int hns3_buf_size2type(u32 buf_size)
2936 {
2937 int bd_size_type;
2938
2939 switch (buf_size) {
2940 case 512:
2941 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2942 break;
2943 case 1024:
2944 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2945 break;
2946 case 2048:
2947 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2948 break;
2949 case 4096:
2950 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2951 break;
2952 default:
2953 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2954 }
2955
2956 return bd_size_type;
2957 }
2958
2959 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2960 {
2961 dma_addr_t dma = ring->desc_dma_addr;
2962 struct hnae3_queue *q = ring->tqp;
2963
2964 if (!HNAE3_IS_TX_RING(ring)) {
2965 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2966 (u32)dma);
2967 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2968 (u32)((dma >> 31) >> 1));
2969
2970 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2971 hns3_buf_size2type(ring->buf_size));
2972 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2973 ring->desc_num / 8 - 1);
2974
2975 } else {
2976 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2977 (u32)dma);
2978 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2979 (u32)((dma >> 31) >> 1));
2980
2981 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2982 hns3_buf_size2type(ring->buf_size));
2983 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2984 ring->desc_num / 8 - 1);
2985 }
2986 }
2987
2988 int hns3_init_all_ring(struct hns3_nic_priv *priv)
2989 {
2990 struct hnae3_handle *h = priv->ae_handle;
2991 int ring_num = h->kinfo.num_tqps * 2;
2992 int i, j;
2993 int ret;
2994
2995 for (i = 0; i < ring_num; i++) {
2996 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2997 if (ret) {
2998 dev_err(priv->dev,
2999 "Alloc ring memory fail! ret=%d\n", ret);
3000 goto out_when_alloc_ring_memory;
3001 }
3002
3003 u64_stats_init(&priv->ring_data[i].ring->syncp);
3004 }
3005
3006 return 0;
3007
3008 out_when_alloc_ring_memory:
3009 for (j = i - 1; j >= 0; j--)
3010 hns3_fini_ring(priv->ring_data[j].ring);
3011
3012 return -ENOMEM;
3013 }
3014
3015 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3016 {
3017 struct hnae3_handle *h = priv->ae_handle;
3018 int i;
3019
3020 for (i = 0; i < h->kinfo.num_tqps; i++) {
3021 if (h->ae_algo->ops->reset_queue)
3022 h->ae_algo->ops->reset_queue(h, i);
3023
3024 hns3_fini_ring(priv->ring_data[i].ring);
3025 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3026 }
3027 return 0;
3028 }
3029
3030 /* Set mac addr if it is configured. or leave it to the AE driver */
3031 static void hns3_init_mac_addr(struct net_device *netdev, bool init)
3032 {
3033 struct hns3_nic_priv *priv = netdev_priv(netdev);
3034 struct hnae3_handle *h = priv->ae_handle;
3035 u8 mac_addr_temp[ETH_ALEN];
3036
3037 if (h->ae_algo->ops->get_mac_addr && init) {
3038 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3039 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3040 }
3041
3042 /* Check if the MAC address is valid, if not get a random one */
3043 if (!is_valid_ether_addr(netdev->dev_addr)) {
3044 eth_hw_addr_random(netdev);
3045 dev_warn(priv->dev, "using random MAC address %pM\n",
3046 netdev->dev_addr);
3047 }
3048
3049 if (h->ae_algo->ops->set_mac_addr)
3050 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3051
3052 }
3053
3054 static void hns3_uninit_mac_addr(struct net_device *netdev)
3055 {
3056 struct hns3_nic_priv *priv = netdev_priv(netdev);
3057 struct hnae3_handle *h = priv->ae_handle;
3058
3059 if (h->ae_algo->ops->rm_uc_addr)
3060 h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
3061 }
3062
3063 static void hns3_nic_set_priv_ops(struct net_device *netdev)
3064 {
3065 struct hns3_nic_priv *priv = netdev_priv(netdev);
3066
3067 if ((netdev->features & NETIF_F_TSO) ||
3068 (netdev->features & NETIF_F_TSO6)) {
3069 priv->ops.fill_desc = hns3_fill_desc_tso;
3070 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3071 } else {
3072 priv->ops.fill_desc = hns3_fill_desc;
3073 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3074 }
3075 }
3076
3077 static int hns3_client_init(struct hnae3_handle *handle)
3078 {
3079 struct pci_dev *pdev = handle->pdev;
3080 struct hns3_nic_priv *priv;
3081 struct net_device *netdev;
3082 int ret;
3083
3084 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
3085 hns3_get_max_available_channels(handle));
3086 if (!netdev)
3087 return -ENOMEM;
3088
3089 priv = netdev_priv(netdev);
3090 priv->dev = &pdev->dev;
3091 priv->netdev = netdev;
3092 priv->ae_handle = handle;
3093 priv->ae_handle->reset_level = HNAE3_NONE_RESET;
3094 priv->ae_handle->last_reset_time = jiffies;
3095 priv->tx_timeout_count = 0;
3096
3097 handle->kinfo.netdev = netdev;
3098 handle->priv = (void *)priv;
3099
3100 hns3_init_mac_addr(netdev, true);
3101
3102 hns3_set_default_feature(netdev);
3103
3104 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3105 netdev->priv_flags |= IFF_UNICAST_FLT;
3106 netdev->netdev_ops = &hns3_nic_netdev_ops;
3107 SET_NETDEV_DEV(netdev, &pdev->dev);
3108 hns3_ethtool_set_ops(netdev);
3109 hns3_nic_set_priv_ops(netdev);
3110
3111 /* Carrier off reporting is important to ethtool even BEFORE open */
3112 netif_carrier_off(netdev);
3113
3114 ret = hns3_get_ring_config(priv);
3115 if (ret) {
3116 ret = -ENOMEM;
3117 goto out_get_ring_cfg;
3118 }
3119
3120 ret = hns3_nic_alloc_vector_data(priv);
3121 if (ret) {
3122 ret = -ENOMEM;
3123 goto out_alloc_vector_data;
3124 }
3125
3126 ret = hns3_nic_init_vector_data(priv);
3127 if (ret) {
3128 ret = -ENOMEM;
3129 goto out_init_vector_data;
3130 }
3131
3132 ret = hns3_init_all_ring(priv);
3133 if (ret) {
3134 ret = -ENOMEM;
3135 goto out_init_ring_data;
3136 }
3137
3138 ret = register_netdev(netdev);
3139 if (ret) {
3140 dev_err(priv->dev, "probe register netdev fail!\n");
3141 goto out_reg_netdev_fail;
3142 }
3143
3144 hns3_dcbnl_setup(handle);
3145
3146 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3147 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3148
3149 return ret;
3150
3151 out_reg_netdev_fail:
3152 out_init_ring_data:
3153 (void)hns3_nic_uninit_vector_data(priv);
3154 out_init_vector_data:
3155 hns3_nic_dealloc_vector_data(priv);
3156 out_alloc_vector_data:
3157 priv->ring_data = NULL;
3158 out_get_ring_cfg:
3159 priv->ae_handle = NULL;
3160 free_netdev(netdev);
3161 return ret;
3162 }
3163
3164 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3165 {
3166 struct net_device *netdev = handle->kinfo.netdev;
3167 struct hns3_nic_priv *priv = netdev_priv(netdev);
3168 int ret;
3169
3170 if (netdev->reg_state != NETREG_UNINITIALIZED)
3171 unregister_netdev(netdev);
3172
3173 hns3_force_clear_all_rx_ring(handle);
3174
3175 ret = hns3_nic_uninit_vector_data(priv);
3176 if (ret)
3177 netdev_err(netdev, "uninit vector error\n");
3178
3179 ret = hns3_nic_dealloc_vector_data(priv);
3180 if (ret)
3181 netdev_err(netdev, "dealloc vector error\n");
3182
3183 ret = hns3_uninit_all_ring(priv);
3184 if (ret)
3185 netdev_err(netdev, "uninit ring error\n");
3186
3187 hns3_put_ring_config(priv);
3188
3189 priv->ring_data = NULL;
3190
3191 hns3_uninit_mac_addr(netdev);
3192
3193 free_netdev(netdev);
3194 }
3195
3196 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3197 {
3198 struct net_device *netdev = handle->kinfo.netdev;
3199
3200 if (!netdev)
3201 return;
3202
3203 if (linkup) {
3204 netif_carrier_on(netdev);
3205 netif_tx_wake_all_queues(netdev);
3206 netdev_info(netdev, "link up\n");
3207 } else {
3208 netif_carrier_off(netdev);
3209 netif_tx_stop_all_queues(netdev);
3210 netdev_info(netdev, "link down\n");
3211 }
3212 }
3213
3214 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3215 {
3216 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3217 struct net_device *ndev = kinfo->netdev;
3218 bool if_running;
3219 int ret;
3220 u8 i;
3221
3222 if (tc > HNAE3_MAX_TC)
3223 return -EINVAL;
3224
3225 if (!ndev)
3226 return -ENODEV;
3227
3228 if_running = netif_running(ndev);
3229
3230 ret = netdev_set_num_tc(ndev, tc);
3231 if (ret)
3232 return ret;
3233
3234 if (if_running) {
3235 (void)hns3_nic_net_stop(ndev);
3236 msleep(100);
3237 }
3238
3239 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3240 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3241 if (ret)
3242 goto err_out;
3243
3244 if (tc <= 1) {
3245 netdev_reset_tc(ndev);
3246 goto out;
3247 }
3248
3249 for (i = 0; i < HNAE3_MAX_TC; i++) {
3250 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3251
3252 if (tc_info->enable)
3253 netdev_set_tc_queue(ndev,
3254 tc_info->tc,
3255 tc_info->tqp_count,
3256 tc_info->tqp_offset);
3257 }
3258
3259 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3260 netdev_set_prio_tc_map(ndev, i,
3261 kinfo->prio_tc[i]);
3262 }
3263
3264 out:
3265 ret = hns3_nic_set_real_num_queue(ndev);
3266
3267 err_out:
3268 if (if_running)
3269 (void)hns3_nic_net_open(ndev);
3270
3271 return ret;
3272 }
3273
3274 static void hns3_recover_hw_addr(struct net_device *ndev)
3275 {
3276 struct netdev_hw_addr_list *list;
3277 struct netdev_hw_addr *ha, *tmp;
3278
3279 /* go through and sync uc_addr entries to the device */
3280 list = &ndev->uc;
3281 list_for_each_entry_safe(ha, tmp, &list->list, list)
3282 hns3_nic_uc_sync(ndev, ha->addr);
3283
3284 /* go through and sync mc_addr entries to the device */
3285 list = &ndev->mc;
3286 list_for_each_entry_safe(ha, tmp, &list->list, list)
3287 hns3_nic_mc_sync(ndev, ha->addr);
3288 }
3289
3290 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3291 {
3292 while (ring->next_to_clean != ring->next_to_use) {
3293 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3294 hns3_free_buffer_detach(ring, ring->next_to_clean);
3295 ring_ptr_move_fw(ring, next_to_clean);
3296 }
3297 }
3298
3299 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3300 {
3301 struct hns3_desc_cb res_cbs;
3302 int ret;
3303
3304 while (ring->next_to_use != ring->next_to_clean) {
3305 /* When a buffer is not reused, it's memory has been
3306 * freed in hns3_handle_rx_bd or will be freed by
3307 * stack, so we need to replace the buffer here.
3308 */
3309 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3310 ret = hns3_reserve_buffer_map(ring, &res_cbs);
3311 if (ret) {
3312 u64_stats_update_begin(&ring->syncp);
3313 ring->stats.sw_err_cnt++;
3314 u64_stats_update_end(&ring->syncp);
3315 /* if alloc new buffer fail, exit directly
3316 * and reclear in up flow.
3317 */
3318 netdev_warn(ring->tqp->handle->kinfo.netdev,
3319 "reserve buffer map failed, ret = %d\n",
3320 ret);
3321 return ret;
3322 }
3323 hns3_replace_buffer(ring, ring->next_to_use,
3324 &res_cbs);
3325 }
3326 ring_ptr_move_fw(ring, next_to_use);
3327 }
3328
3329 return 0;
3330 }
3331
3332 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3333 {
3334 while (ring->next_to_use != ring->next_to_clean) {
3335 /* When a buffer is not reused, it's memory has been
3336 * freed in hns3_handle_rx_bd or will be freed by
3337 * stack, so only need to unmap the buffer here.
3338 */
3339 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3340 hns3_unmap_buffer(ring,
3341 &ring->desc_cb[ring->next_to_use]);
3342 ring->desc_cb[ring->next_to_use].dma = 0;
3343 }
3344
3345 ring_ptr_move_fw(ring, next_to_use);
3346 }
3347 }
3348
3349 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3350 {
3351 struct net_device *ndev = h->kinfo.netdev;
3352 struct hns3_nic_priv *priv = netdev_priv(ndev);
3353 struct hns3_enet_ring *ring;
3354 u32 i;
3355
3356 for (i = 0; i < h->kinfo.num_tqps; i++) {
3357 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3358 hns3_force_clear_rx_ring(ring);
3359 }
3360 }
3361
3362 static void hns3_clear_all_ring(struct hnae3_handle *h)
3363 {
3364 struct net_device *ndev = h->kinfo.netdev;
3365 struct hns3_nic_priv *priv = netdev_priv(ndev);
3366 u32 i;
3367
3368 for (i = 0; i < h->kinfo.num_tqps; i++) {
3369 struct netdev_queue *dev_queue;
3370 struct hns3_enet_ring *ring;
3371
3372 ring = priv->ring_data[i].ring;
3373 hns3_clear_tx_ring(ring);
3374 dev_queue = netdev_get_tx_queue(ndev,
3375 priv->ring_data[i].queue_index);
3376 netdev_tx_reset_queue(dev_queue);
3377
3378 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3379 /* Continue to clear other rings even if clearing some
3380 * rings failed.
3381 */
3382 hns3_clear_rx_ring(ring);
3383 }
3384 }
3385
3386 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3387 {
3388 struct net_device *ndev = h->kinfo.netdev;
3389 struct hns3_nic_priv *priv = netdev_priv(ndev);
3390 struct hns3_enet_ring *rx_ring;
3391 int i, j;
3392 int ret;
3393
3394 for (i = 0; i < h->kinfo.num_tqps; i++) {
3395 h->ae_algo->ops->reset_queue(h, i);
3396 hns3_init_ring_hw(priv->ring_data[i].ring);
3397
3398 /* We need to clear tx ring here because self test will
3399 * use the ring and will not run down before up
3400 */
3401 hns3_clear_tx_ring(priv->ring_data[i].ring);
3402 priv->ring_data[i].ring->next_to_clean = 0;
3403 priv->ring_data[i].ring->next_to_use = 0;
3404
3405 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3406 hns3_init_ring_hw(rx_ring);
3407 ret = hns3_clear_rx_ring(rx_ring);
3408 if (ret)
3409 return ret;
3410
3411 /* We can not know the hardware head and tail when this
3412 * function is called in reset flow, so we reuse all desc.
3413 */
3414 for (j = 0; j < rx_ring->desc_num; j++)
3415 hns3_reuse_buffer(rx_ring, j);
3416
3417 rx_ring->next_to_clean = 0;
3418 rx_ring->next_to_use = 0;
3419 }
3420
3421 return 0;
3422 }
3423
3424 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3425 {
3426 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3427 struct net_device *ndev = kinfo->netdev;
3428
3429 if (!netif_running(ndev))
3430 return -EIO;
3431
3432 return hns3_nic_net_stop(ndev);
3433 }
3434
3435 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3436 {
3437 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3438 int ret = 0;
3439
3440 if (netif_running(kinfo->netdev)) {
3441 ret = hns3_nic_net_up(kinfo->netdev);
3442 if (ret) {
3443 netdev_err(kinfo->netdev,
3444 "hns net up fail, ret=%d!\n", ret);
3445 return ret;
3446 }
3447 handle->last_reset_time = jiffies;
3448 }
3449
3450 return ret;
3451 }
3452
3453 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3454 {
3455 struct net_device *netdev = handle->kinfo.netdev;
3456 struct hns3_nic_priv *priv = netdev_priv(netdev);
3457 int ret;
3458
3459 hns3_init_mac_addr(netdev, false);
3460 hns3_nic_set_rx_mode(netdev);
3461 hns3_recover_hw_addr(netdev);
3462
3463 /* Hardware table is only clear when pf resets */
3464 if (!(handle->flags & HNAE3_SUPPORT_VF))
3465 hns3_restore_vlan(netdev);
3466
3467 /* Carrier off reporting is important to ethtool even BEFORE open */
3468 netif_carrier_off(netdev);
3469
3470 ret = hns3_get_ring_config(priv);
3471 if (ret)
3472 return ret;
3473
3474 ret = hns3_nic_init_vector_data(priv);
3475 if (ret)
3476 return ret;
3477
3478 ret = hns3_init_all_ring(priv);
3479 if (ret) {
3480 hns3_nic_uninit_vector_data(priv);
3481 priv->ring_data = NULL;
3482 }
3483
3484 return ret;
3485 }
3486
3487 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3488 {
3489 struct net_device *netdev = handle->kinfo.netdev;
3490 struct hns3_nic_priv *priv = netdev_priv(netdev);
3491 int ret;
3492
3493 hns3_force_clear_all_rx_ring(handle);
3494
3495 ret = hns3_nic_uninit_vector_data(priv);
3496 if (ret) {
3497 netdev_err(netdev, "uninit vector error\n");
3498 return ret;
3499 }
3500
3501 ret = hns3_uninit_all_ring(priv);
3502 if (ret)
3503 netdev_err(netdev, "uninit ring error\n");
3504
3505 hns3_put_ring_config(priv);
3506
3507 priv->ring_data = NULL;
3508
3509 hns3_uninit_mac_addr(netdev);
3510
3511 return ret;
3512 }
3513
3514 static int hns3_reset_notify(struct hnae3_handle *handle,
3515 enum hnae3_reset_notify_type type)
3516 {
3517 int ret = 0;
3518
3519 switch (type) {
3520 case HNAE3_UP_CLIENT:
3521 ret = hns3_reset_notify_up_enet(handle);
3522 break;
3523 case HNAE3_DOWN_CLIENT:
3524 ret = hns3_reset_notify_down_enet(handle);
3525 break;
3526 case HNAE3_INIT_CLIENT:
3527 ret = hns3_reset_notify_init_enet(handle);
3528 break;
3529 case HNAE3_UNINIT_CLIENT:
3530 ret = hns3_reset_notify_uninit_enet(handle);
3531 break;
3532 default:
3533 break;
3534 }
3535
3536 return ret;
3537 }
3538
3539 static void hns3_restore_coal(struct hns3_nic_priv *priv,
3540 struct hns3_enet_coalesce *tx,
3541 struct hns3_enet_coalesce *rx)
3542 {
3543 u16 vector_num = priv->vector_num;
3544 int i;
3545
3546 for (i = 0; i < vector_num; i++) {
3547 memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
3548 sizeof(struct hns3_enet_coalesce));
3549 memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
3550 sizeof(struct hns3_enet_coalesce));
3551 }
3552 }
3553
3554 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
3555 struct hns3_enet_coalesce *tx,
3556 struct hns3_enet_coalesce *rx)
3557 {
3558 struct hns3_nic_priv *priv = netdev_priv(netdev);
3559 struct hnae3_handle *h = hns3_get_handle(netdev);
3560 int ret;
3561
3562 ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3563 if (ret)
3564 return ret;
3565
3566 ret = hns3_get_ring_config(priv);
3567 if (ret)
3568 return ret;
3569
3570 ret = hns3_nic_alloc_vector_data(priv);
3571 if (ret)
3572 goto err_alloc_vector;
3573
3574 hns3_restore_coal(priv, tx, rx);
3575
3576 ret = hns3_nic_init_vector_data(priv);
3577 if (ret)
3578 goto err_uninit_vector;
3579
3580 ret = hns3_init_all_ring(priv);
3581 if (ret)
3582 goto err_put_ring;
3583
3584 return 0;
3585
3586 err_put_ring:
3587 hns3_put_ring_config(priv);
3588 err_uninit_vector:
3589 hns3_nic_uninit_vector_data(priv);
3590 err_alloc_vector:
3591 hns3_nic_dealloc_vector_data(priv);
3592 return ret;
3593 }
3594
3595 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3596 {
3597 return (new_tqp_num / num_tc) * num_tc;
3598 }
3599
3600 int hns3_set_channels(struct net_device *netdev,
3601 struct ethtool_channels *ch)
3602 {
3603 struct hns3_nic_priv *priv = netdev_priv(netdev);
3604 struct hnae3_handle *h = hns3_get_handle(netdev);
3605 struct hnae3_knic_private_info *kinfo = &h->kinfo;
3606 struct hns3_enet_coalesce tx_coal, rx_coal;
3607 bool if_running = netif_running(netdev);
3608 u32 new_tqp_num = ch->combined_count;
3609 u16 org_tqp_num;
3610 int ret;
3611
3612 if (ch->rx_count || ch->tx_count)
3613 return -EINVAL;
3614
3615 if (new_tqp_num > hns3_get_max_available_channels(h) ||
3616 new_tqp_num < kinfo->num_tc) {
3617 dev_err(&netdev->dev,
3618 "Change tqps fail, the tqp range is from %d to %d",
3619 kinfo->num_tc,
3620 hns3_get_max_available_channels(h));
3621 return -EINVAL;
3622 }
3623
3624 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3625 if (kinfo->num_tqps == new_tqp_num)
3626 return 0;
3627
3628 if (if_running)
3629 hns3_nic_net_stop(netdev);
3630
3631 ret = hns3_nic_uninit_vector_data(priv);
3632 if (ret) {
3633 dev_err(&netdev->dev,
3634 "Unbind vector with tqp fail, nothing is changed");
3635 goto open_netdev;
3636 }
3637
3638 /* Changing the tqp num may also change the vector num,
3639 * ethtool only support setting and querying one coal
3640 * configuation for now, so save the vector 0' coal
3641 * configuation here in order to restore it.
3642 */
3643 memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
3644 sizeof(struct hns3_enet_coalesce));
3645 memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
3646 sizeof(struct hns3_enet_coalesce));
3647
3648 hns3_nic_dealloc_vector_data(priv);
3649
3650 hns3_uninit_all_ring(priv);
3651 hns3_put_ring_config(priv);
3652
3653 org_tqp_num = h->kinfo.num_tqps;
3654 ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
3655 if (ret) {
3656 ret = hns3_modify_tqp_num(netdev, org_tqp_num,
3657 &tx_coal, &rx_coal);
3658 if (ret) {
3659 /* If revert to old tqp failed, fatal error occurred */
3660 dev_err(&netdev->dev,
3661 "Revert to old tqp num fail, ret=%d", ret);
3662 return ret;
3663 }
3664 dev_info(&netdev->dev,
3665 "Change tqp num fail, Revert to old tqp num");
3666 }
3667
3668 open_netdev:
3669 if (if_running)
3670 hns3_nic_net_open(netdev);
3671
3672 return ret;
3673 }
3674
3675 static const struct hnae3_client_ops client_ops = {
3676 .init_instance = hns3_client_init,
3677 .uninit_instance = hns3_client_uninit,
3678 .link_status_change = hns3_link_status_change,
3679 .setup_tc = hns3_client_setup_tc,
3680 .reset_notify = hns3_reset_notify,
3681 };
3682
3683 /* hns3_init_module - Driver registration routine
3684 * hns3_init_module is the first routine called when the driver is
3685 * loaded. All it does is register with the PCI subsystem.
3686 */
3687 static int __init hns3_init_module(void)
3688 {
3689 int ret;
3690
3691 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3692 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3693
3694 client.type = HNAE3_CLIENT_KNIC;
3695 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3696 hns3_driver_name);
3697
3698 client.ops = &client_ops;
3699
3700 INIT_LIST_HEAD(&client.node);
3701
3702 ret = hnae3_register_client(&client);
3703 if (ret)
3704 return ret;
3705
3706 ret = pci_register_driver(&hns3_driver);
3707 if (ret)
3708 hnae3_unregister_client(&client);
3709
3710 return ret;
3711 }
3712 module_init(hns3_init_module);
3713
3714 /* hns3_exit_module - Driver exit cleanup routine
3715 * hns3_exit_module is called just before the driver is removed
3716 * from memory.
3717 */
3718 static void __exit hns3_exit_module(void)
3719 {
3720 pci_unregister_driver(&hns3_driver);
3721 hnae3_unregister_client(&client);
3722 }
3723 module_exit(hns3_exit_module);
3724
3725 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3726 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3727 MODULE_LICENSE("GPL");
3728 MODULE_ALIAS("pci:hns-nic");
3729 MODULE_VERSION(HNS3_MOD_VERSION);