]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
net: hns3: Correct reset event status register
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
CommitLineData
76ad4f0e
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
30d240df 22#include <net/pkt_cls.h>
76ad4f0e
S
23#include <net/vxlan.h>
24
25#include "hnae3.h"
26#include "hns3_enet.h"
27
7b763f3f
FL
28static void hns3_clear_all_ring(struct hnae3_handle *h);
29static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
30
1db9b1bf 31static const char hns3_driver_name[] = "hns3";
76ad4f0e
S
32const char hns3_driver_version[] = VERMAGIC_STRING;
33static const char hns3_driver_string[] =
34 "Hisilicon Ethernet Network Driver for Hip08 Family";
35static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
36static struct hnae3_client client;
37
38/* hns3_pci_tbl - PCI Device ID Table
39 *
40 * Last entry must be all 0s
41 *
42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
43 * Class, Class Mask, private data (not used) }
44 */
45static const struct pci_device_id hns3_pci_tbl[] = {
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
e92a0843 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
2daf4a65 49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
2daf4a65 51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
2daf4a65 53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
2daf4a65 55 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
2daf4a65 57 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
424eb834
SM
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
76ad4f0e
S
60 /* required last entry */
61 {0, }
62};
63MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
64
65static irqreturn_t hns3_irq_handle(int irq, void *dev)
66{
67 struct hns3_enet_tqp_vector *tqp_vector = dev;
68
69 napi_schedule(&tqp_vector->napi);
70
71 return IRQ_HANDLED;
72}
73
74static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
75{
76 struct hns3_enet_tqp_vector *tqp_vectors;
77 unsigned int i;
78
79 for (i = 0; i < priv->vector_num; i++) {
80 tqp_vectors = &priv->tqp_vector[i];
81
82 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
83 continue;
84
85 /* release the irq resource */
86 free_irq(tqp_vectors->vector_irq, tqp_vectors);
87 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
88 }
89}
90
91static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
92{
93 struct hns3_enet_tqp_vector *tqp_vectors;
94 int txrx_int_idx = 0;
95 int rx_int_idx = 0;
96 int tx_int_idx = 0;
97 unsigned int i;
98 int ret;
99
100 for (i = 0; i < priv->vector_num; i++) {
101 tqp_vectors = &priv->tqp_vector[i];
102
103 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
104 continue;
105
106 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
107 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
108 "%s-%s-%d", priv->netdev->name, "TxRx",
109 txrx_int_idx++);
110 txrx_int_idx++;
111 } else if (tqp_vectors->rx_group.ring) {
112 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
113 "%s-%s-%d", priv->netdev->name, "Rx",
114 rx_int_idx++);
115 } else if (tqp_vectors->tx_group.ring) {
116 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
117 "%s-%s-%d", priv->netdev->name, "Tx",
118 tx_int_idx++);
119 } else {
120 /* Skip this unused q_vector */
121 continue;
122 }
123
124 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
125
126 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
127 tqp_vectors->name,
128 tqp_vectors);
129 if (ret) {
130 netdev_err(priv->netdev, "request irq(%d) fail\n",
131 tqp_vectors->vector_irq);
132 return ret;
133 }
134
135 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
136 }
137
138 return 0;
139}
140
141static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
142 u32 mask_en)
143{
144 writel(mask_en, tqp_vector->mask_addr);
145}
146
147static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
148{
149 napi_enable(&tqp_vector->napi);
150
151 /* enable vector */
152 hns3_mask_vector_irq(tqp_vector, 1);
153}
154
155static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
156{
157 /* disable vector */
158 hns3_mask_vector_irq(tqp_vector, 0);
159
160 disable_irq(tqp_vector->vector_irq);
161 napi_disable(&tqp_vector->napi);
162}
163
434776a5
FL
164void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
165 u32 rl_value)
76ad4f0e 166{
434776a5
FL
167 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
168
76ad4f0e
S
169 /* this defines the configuration for RL (Interrupt Rate Limiter).
170 * Rl defines rate of interrupts i.e. number of interrupts-per-second
171 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
172 */
434776a5 173
9bc727a9
YL
174 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
175 !tqp_vector->rx_group.coal.gl_adapt_enable)
434776a5
FL
176 /* According to the hardware, the range of rl_reg is
177 * 0-59 and the unit is 4.
178 */
179 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
180
181 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
182}
183
184void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
185 u32 gl_value)
186{
187 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
188
189 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
190}
191
192void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
193 u32 gl_value)
194{
195 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
196
197 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
76ad4f0e
S
198}
199
5fd4789a
FL
200static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
201 struct hns3_nic_priv *priv)
76ad4f0e 202{
5fd4789a
FL
203 struct hnae3_handle *h = priv->ae_handle;
204
76ad4f0e
S
205 /* initialize the configuration for interrupt coalescing.
206 * 1. GL (Interrupt Gap Limiter)
207 * 2. RL (Interrupt Rate Limiter)
208 */
209
5fd4789a 210 /* Default: enable interrupt coalescing self-adaptive and GL */
9bc727a9
YL
211 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
212 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
5fd4789a 213
9bc727a9
YL
214 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
215 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
5fd4789a 216
5fd4789a
FL
217 /* Default: disable RL */
218 h->kinfo.int_rl_setting = 0;
5fd4789a 219
cd9d187b 220 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
9bc727a9
YL
221 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
222 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
76ad4f0e
S
223}
224
dd38c726
YL
225static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
226 struct hns3_nic_priv *priv)
227{
228 struct hnae3_handle *h = priv->ae_handle;
229
230 hns3_set_vector_coalesce_tx_gl(tqp_vector,
9bc727a9 231 tqp_vector->tx_group.coal.int_gl);
dd38c726 232 hns3_set_vector_coalesce_rx_gl(tqp_vector,
9bc727a9 233 tqp_vector->rx_group.coal.int_gl);
dd38c726
YL
234 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
235}
236
9df8f79a
YL
237static int hns3_nic_set_real_num_queue(struct net_device *netdev)
238{
9780cb97 239 struct hnae3_handle *h = hns3_get_handle(netdev);
9df8f79a
YL
240 struct hnae3_knic_private_info *kinfo = &h->kinfo;
241 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
a75a8efa
YL
242 int i, ret;
243
244 if (kinfo->num_tc <= 1) {
245 netdev_reset_tc(netdev);
246 } else {
247 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
248 if (ret) {
249 netdev_err(netdev,
250 "netdev_set_num_tc fail, ret=%d!\n", ret);
251 return ret;
252 }
253
254 for (i = 0; i < HNAE3_MAX_TC; i++) {
255 if (!kinfo->tc_info[i].enable)
256 continue;
257
258 netdev_set_tc_queue(netdev,
259 kinfo->tc_info[i].tc,
260 kinfo->tc_info[i].tqp_count,
261 kinfo->tc_info[i].tqp_offset);
262 }
263 }
9df8f79a
YL
264
265 ret = netif_set_real_num_tx_queues(netdev, queue_size);
266 if (ret) {
267 netdev_err(netdev,
268 "netif_set_real_num_tx_queues fail, ret=%d!\n",
269 ret);
270 return ret;
271 }
272
273 ret = netif_set_real_num_rx_queues(netdev, queue_size);
274 if (ret) {
275 netdev_err(netdev,
276 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
277 return ret;
278 }
279
280 return 0;
281}
282
678335a1
PL
283static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
284{
285 u16 free_tqps, max_rss_size, max_tqps;
286
287 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
288 max_tqps = h->kinfo.num_tc * max_rss_size;
289
290 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
291}
292
76ad4f0e
S
293static int hns3_nic_net_up(struct net_device *netdev)
294{
295 struct hns3_nic_priv *priv = netdev_priv(netdev);
296 struct hnae3_handle *h = priv->ae_handle;
297 int i, j;
298 int ret;
299
7b763f3f
FL
300 ret = hns3_nic_reset_all_ring(h);
301 if (ret)
302 return ret;
303
76ad4f0e
S
304 /* get irq resource for all vectors */
305 ret = hns3_nic_init_irq(priv);
306 if (ret) {
307 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
308 return ret;
309 }
310
311 /* enable the vectors */
312 for (i = 0; i < priv->vector_num; i++)
313 hns3_vector_enable(&priv->tqp_vector[i]);
314
315 /* start the ae_dev */
316 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
317 if (ret)
318 goto out_start_err;
319
b875cc37
JS
320 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
321
76ad4f0e
S
322 return 0;
323
324out_start_err:
325 for (j = i - 1; j >= 0; j--)
326 hns3_vector_disable(&priv->tqp_vector[j]);
327
328 hns3_nic_uninit_irq(priv);
329
330 return ret;
331}
332
333static int hns3_nic_net_open(struct net_device *netdev)
334{
f8fa222c 335 struct hns3_nic_priv *priv = netdev_priv(netdev);
a75a8efa
YL
336 struct hnae3_handle *h = hns3_get_handle(netdev);
337 struct hnae3_knic_private_info *kinfo;
338 int i, ret;
76ad4f0e
S
339
340 netif_carrier_off(netdev);
341
9df8f79a
YL
342 ret = hns3_nic_set_real_num_queue(netdev);
343 if (ret)
76ad4f0e 344 return ret;
76ad4f0e
S
345
346 ret = hns3_nic_net_up(netdev);
347 if (ret) {
348 netdev_err(netdev,
349 "hns net up fail, ret=%d!\n", ret);
350 return ret;
351 }
352
a75a8efa
YL
353 kinfo = &h->kinfo;
354 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
355 netdev_set_prio_tc_map(netdev, i,
356 kinfo->prio_tc[i]);
357 }
358
6d4c3981 359 priv->ae_handle->last_reset_time = jiffies;
76ad4f0e
S
360 return 0;
361}
362
363static void hns3_nic_net_down(struct net_device *netdev)
364{
365 struct hns3_nic_priv *priv = netdev_priv(netdev);
366 const struct hnae3_ae_ops *ops;
367 int i;
368
b875cc37
JS
369 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
370 return;
371
7b763f3f
FL
372 /* disable vectors */
373 for (i = 0; i < priv->vector_num; i++)
374 hns3_vector_disable(&priv->tqp_vector[i]);
375
76ad4f0e
S
376 /* stop ae_dev */
377 ops = priv->ae_handle->ae_algo->ops;
378 if (ops->stop)
379 ops->stop(priv->ae_handle);
380
76ad4f0e
S
381 /* free irq resources */
382 hns3_nic_uninit_irq(priv);
7b763f3f
FL
383
384 hns3_clear_all_ring(priv->ae_handle);
76ad4f0e
S
385}
386
387static int hns3_nic_net_stop(struct net_device *netdev)
388{
389 netif_tx_stop_all_queues(netdev);
390 netif_carrier_off(netdev);
391
392 hns3_nic_net_down(netdev);
393
394 return 0;
395}
396
76ad4f0e
S
397static int hns3_nic_uc_sync(struct net_device *netdev,
398 const unsigned char *addr)
399{
9780cb97 400 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
401
402 if (h->ae_algo->ops->add_uc_addr)
403 return h->ae_algo->ops->add_uc_addr(h, addr);
404
405 return 0;
406}
407
408static int hns3_nic_uc_unsync(struct net_device *netdev,
409 const unsigned char *addr)
410{
9780cb97 411 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
412
413 if (h->ae_algo->ops->rm_uc_addr)
414 return h->ae_algo->ops->rm_uc_addr(h, addr);
415
416 return 0;
417}
418
419static int hns3_nic_mc_sync(struct net_device *netdev,
420 const unsigned char *addr)
421{
9780cb97 422 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 423
720a8478 424 if (h->ae_algo->ops->add_mc_addr)
76ad4f0e
S
425 return h->ae_algo->ops->add_mc_addr(h, addr);
426
427 return 0;
428}
429
430static int hns3_nic_mc_unsync(struct net_device *netdev,
431 const unsigned char *addr)
432{
9780cb97 433 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 434
720a8478 435 if (h->ae_algo->ops->rm_mc_addr)
76ad4f0e
S
436 return h->ae_algo->ops->rm_mc_addr(h, addr);
437
438 return 0;
439}
440
1db9b1bf 441static void hns3_nic_set_rx_mode(struct net_device *netdev)
76ad4f0e 442{
9780cb97 443 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
444
445 if (h->ae_algo->ops->set_promisc_mode) {
446 if (netdev->flags & IFF_PROMISC)
3b75c3df
PL
447 h->ae_algo->ops->set_promisc_mode(h, true, true);
448 else if (netdev->flags & IFF_ALLMULTI)
449 h->ae_algo->ops->set_promisc_mode(h, false, true);
76ad4f0e 450 else
3b75c3df 451 h->ae_algo->ops->set_promisc_mode(h, false, false);
76ad4f0e
S
452 }
453 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
454 netdev_err(netdev, "sync uc address fail\n");
40cca1c5 455 if (netdev->flags & IFF_MULTICAST) {
76ad4f0e
S
456 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
457 netdev_err(netdev, "sync mc address fail\n");
40cca1c5
XW
458
459 if (h->ae_algo->ops->update_mta_status)
460 h->ae_algo->ops->update_mta_status(h);
461 }
76ad4f0e
S
462}
463
464static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
465 u16 *mss, u32 *type_cs_vlan_tso)
466{
467 u32 l4_offset, hdr_len;
468 union l3_hdr_info l3;
469 union l4_hdr_info l4;
470 u32 l4_paylen;
471 int ret;
472
473 if (!skb_is_gso(skb))
474 return 0;
475
476 ret = skb_cow_head(skb, 0);
477 if (ret)
478 return ret;
479
480 l3.hdr = skb_network_header(skb);
481 l4.hdr = skb_transport_header(skb);
482
483 /* Software should clear the IPv4's checksum field when tso is
484 * needed.
485 */
486 if (l3.v4->version == 4)
487 l3.v4->check = 0;
488
489 /* tunnel packet.*/
490 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
491 SKB_GSO_GRE_CSUM |
492 SKB_GSO_UDP_TUNNEL |
493 SKB_GSO_UDP_TUNNEL_CSUM)) {
494 if ((!(skb_shinfo(skb)->gso_type &
495 SKB_GSO_PARTIAL)) &&
496 (skb_shinfo(skb)->gso_type &
497 SKB_GSO_UDP_TUNNEL_CSUM)) {
498 /* Software should clear the udp's checksum
499 * field when tso is needed.
500 */
501 l4.udp->check = 0;
502 }
503 /* reset l3&l4 pointers from outer to inner headers */
504 l3.hdr = skb_inner_network_header(skb);
505 l4.hdr = skb_inner_transport_header(skb);
506
507 /* Software should clear the IPv4's checksum field when
508 * tso is needed.
509 */
510 if (l3.v4->version == 4)
511 l3.v4->check = 0;
512 }
513
514 /* normal or tunnel packet*/
515 l4_offset = l4.hdr - skb->data;
516 hdr_len = (l4.tcp->doff * 4) + l4_offset;
517
518 /* remove payload length from inner pseudo checksum when tso*/
519 l4_paylen = skb->len - l4_offset;
520 csum_replace_by_diff(&l4.tcp->check,
521 (__force __wsum)htonl(l4_paylen));
522
523 /* find the txbd field values */
524 *paylen = skb->len - hdr_len;
e4e87715
PL
525 hnae3_set_bit(*type_cs_vlan_tso,
526 HNS3_TXD_TSO_B, 1);
76ad4f0e
S
527
528 /* get MSS for TSO */
529 *mss = skb_shinfo(skb)->gso_size;
530
531 return 0;
532}
533
1898d4e4
S
534static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
535 u8 *il4_proto)
76ad4f0e
S
536{
537 union {
538 struct iphdr *v4;
539 struct ipv6hdr *v6;
540 unsigned char *hdr;
541 } l3;
542 unsigned char *l4_hdr;
543 unsigned char *exthdr;
544 u8 l4_proto_tmp;
545 __be16 frag_off;
546
547 /* find outer header point */
548 l3.hdr = skb_network_header(skb);
35f58fd7 549 l4_hdr = skb_transport_header(skb);
76ad4f0e
S
550
551 if (skb->protocol == htons(ETH_P_IPV6)) {
552 exthdr = l3.hdr + sizeof(*l3.v6);
553 l4_proto_tmp = l3.v6->nexthdr;
554 if (l4_hdr != exthdr)
555 ipv6_skip_exthdr(skb, exthdr - skb->data,
556 &l4_proto_tmp, &frag_off);
557 } else if (skb->protocol == htons(ETH_P_IP)) {
558 l4_proto_tmp = l3.v4->protocol;
1898d4e4
S
559 } else {
560 return -EINVAL;
76ad4f0e
S
561 }
562
563 *ol4_proto = l4_proto_tmp;
564
565 /* tunnel packet */
566 if (!skb->encapsulation) {
567 *il4_proto = 0;
1898d4e4 568 return 0;
76ad4f0e
S
569 }
570
571 /* find inner header point */
572 l3.hdr = skb_inner_network_header(skb);
573 l4_hdr = skb_inner_transport_header(skb);
574
575 if (l3.v6->version == 6) {
576 exthdr = l3.hdr + sizeof(*l3.v6);
577 l4_proto_tmp = l3.v6->nexthdr;
578 if (l4_hdr != exthdr)
579 ipv6_skip_exthdr(skb, exthdr - skb->data,
580 &l4_proto_tmp, &frag_off);
581 } else if (l3.v4->version == 4) {
582 l4_proto_tmp = l3.v4->protocol;
583 }
584
585 *il4_proto = l4_proto_tmp;
1898d4e4
S
586
587 return 0;
76ad4f0e
S
588}
589
590static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
591 u8 il4_proto, u32 *type_cs_vlan_tso,
592 u32 *ol_type_vlan_len_msec)
593{
594 union {
595 struct iphdr *v4;
596 struct ipv6hdr *v6;
597 unsigned char *hdr;
598 } l3;
599 union {
600 struct tcphdr *tcp;
601 struct udphdr *udp;
602 struct gre_base_hdr *gre;
603 unsigned char *hdr;
604 } l4;
605 unsigned char *l2_hdr;
606 u8 l4_proto = ol4_proto;
607 u32 ol2_len;
608 u32 ol3_len;
609 u32 ol4_len;
610 u32 l2_len;
611 u32 l3_len;
612
613 l3.hdr = skb_network_header(skb);
614 l4.hdr = skb_transport_header(skb);
615
616 /* compute L2 header size for normal packet, defined in 2 Bytes */
617 l2_len = l3.hdr - skb->data;
e4e87715
PL
618 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
619 HNS3_TXD_L2LEN_S, l2_len >> 1);
76ad4f0e
S
620
621 /* tunnel packet*/
622 if (skb->encapsulation) {
623 /* compute OL2 header size, defined in 2 Bytes */
624 ol2_len = l2_len;
e4e87715
PL
625 hnae3_set_field(*ol_type_vlan_len_msec,
626 HNS3_TXD_L2LEN_M,
627 HNS3_TXD_L2LEN_S, ol2_len >> 1);
76ad4f0e
S
628
629 /* compute OL3 header size, defined in 4 Bytes */
630 ol3_len = l4.hdr - l3.hdr;
e4e87715
PL
631 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
632 HNS3_TXD_L3LEN_S, ol3_len >> 2);
76ad4f0e
S
633
634 /* MAC in UDP, MAC in GRE (0x6558)*/
635 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
636 /* switch MAC header ptr from outer to inner header.*/
637 l2_hdr = skb_inner_mac_header(skb);
638
639 /* compute OL4 header size, defined in 4 Bytes. */
640 ol4_len = l2_hdr - l4.hdr;
e4e87715
PL
641 hnae3_set_field(*ol_type_vlan_len_msec,
642 HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
643 ol4_len >> 2);
76ad4f0e
S
644
645 /* switch IP header ptr from outer to inner header */
646 l3.hdr = skb_inner_network_header(skb);
647
648 /* compute inner l2 header size, defined in 2 Bytes. */
649 l2_len = l3.hdr - l2_hdr;
e4e87715
PL
650 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
651 HNS3_TXD_L2LEN_S, l2_len >> 1);
76ad4f0e
S
652 } else {
653 /* skb packet types not supported by hardware,
654 * txbd len fild doesn't be filled.
655 */
656 return;
657 }
658
659 /* switch L4 header pointer from outer to inner */
660 l4.hdr = skb_inner_transport_header(skb);
661
662 l4_proto = il4_proto;
663 }
664
665 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
666 l3_len = l4.hdr - l3.hdr;
e4e87715
PL
667 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
668 HNS3_TXD_L3LEN_S, l3_len >> 2);
76ad4f0e
S
669
670 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
671 switch (l4_proto) {
672 case IPPROTO_TCP:
e4e87715
PL
673 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
674 HNS3_TXD_L4LEN_S, l4.tcp->doff);
76ad4f0e
S
675 break;
676 case IPPROTO_SCTP:
e4e87715
PL
677 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
678 HNS3_TXD_L4LEN_S,
679 (sizeof(struct sctphdr) >> 2));
76ad4f0e
S
680 break;
681 case IPPROTO_UDP:
e4e87715
PL
682 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
683 HNS3_TXD_L4LEN_S,
684 (sizeof(struct udphdr) >> 2));
76ad4f0e
S
685 break;
686 default:
687 /* skb packet types not supported by hardware,
688 * txbd len fild doesn't be filled.
689 */
690 return;
691 }
692}
693
3db084d2
YL
694/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
695 * and it is udp packet, which has a dest port as the IANA assigned.
696 * the hardware is expected to do the checksum offload, but the
697 * hardware will not do the checksum offload when udp dest port is
698 * 4789.
699 */
700static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
701{
702#define IANA_VXLAN_PORT 4789
703 union {
704 struct tcphdr *tcp;
705 struct udphdr *udp;
706 struct gre_base_hdr *gre;
707 unsigned char *hdr;
708 } l4;
709
710 l4.hdr = skb_transport_header(skb);
711
712 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
713 return false;
714
715 skb_checksum_help(skb);
716
717 return true;
718}
719
76ad4f0e
S
720static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
721 u8 il4_proto, u32 *type_cs_vlan_tso,
722 u32 *ol_type_vlan_len_msec)
723{
724 union {
725 struct iphdr *v4;
726 struct ipv6hdr *v6;
727 unsigned char *hdr;
728 } l3;
729 u32 l4_proto = ol4_proto;
730
731 l3.hdr = skb_network_header(skb);
732
733 /* define OL3 type and tunnel type(OL4).*/
734 if (skb->encapsulation) {
735 /* define outer network header type.*/
736 if (skb->protocol == htons(ETH_P_IP)) {
737 if (skb_is_gso(skb))
e4e87715
PL
738 hnae3_set_field(*ol_type_vlan_len_msec,
739 HNS3_TXD_OL3T_M,
740 HNS3_TXD_OL3T_S,
741 HNS3_OL3T_IPV4_CSUM);
76ad4f0e 742 else
e4e87715
PL
743 hnae3_set_field(*ol_type_vlan_len_msec,
744 HNS3_TXD_OL3T_M,
745 HNS3_TXD_OL3T_S,
746 HNS3_OL3T_IPV4_NO_CSUM);
76ad4f0e
S
747
748 } else if (skb->protocol == htons(ETH_P_IPV6)) {
e4e87715
PL
749 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
750 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
76ad4f0e
S
751 }
752
753 /* define tunnel type(OL4).*/
754 switch (l4_proto) {
755 case IPPROTO_UDP:
e4e87715
PL
756 hnae3_set_field(*ol_type_vlan_len_msec,
757 HNS3_TXD_TUNTYPE_M,
758 HNS3_TXD_TUNTYPE_S,
759 HNS3_TUN_MAC_IN_UDP);
76ad4f0e
S
760 break;
761 case IPPROTO_GRE:
e4e87715
PL
762 hnae3_set_field(*ol_type_vlan_len_msec,
763 HNS3_TXD_TUNTYPE_M,
764 HNS3_TXD_TUNTYPE_S,
765 HNS3_TUN_NVGRE);
76ad4f0e
S
766 break;
767 default:
768 /* drop the skb tunnel packet if hardware don't support,
769 * because hardware can't calculate csum when TSO.
770 */
771 if (skb_is_gso(skb))
772 return -EDOM;
773
774 /* the stack computes the IP header already,
775 * driver calculate l4 checksum when not TSO.
776 */
777 skb_checksum_help(skb);
778 return 0;
779 }
780
781 l3.hdr = skb_inner_network_header(skb);
782 l4_proto = il4_proto;
783 }
784
785 if (l3.v4->version == 4) {
e4e87715
PL
786 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
787 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
76ad4f0e
S
788
789 /* the stack computes the IP header already, the only time we
790 * need the hardware to recompute it is in the case of TSO.
791 */
792 if (skb_is_gso(skb))
e4e87715 793 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
76ad4f0e 794 } else if (l3.v6->version == 6) {
e4e87715
PL
795 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
796 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
76ad4f0e
S
797 }
798
799 switch (l4_proto) {
800 case IPPROTO_TCP:
5c897197 801 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
e4e87715
PL
802 hnae3_set_field(*type_cs_vlan_tso,
803 HNS3_TXD_L4T_M,
804 HNS3_TXD_L4T_S,
805 HNS3_L4T_TCP);
76ad4f0e
S
806 break;
807 case IPPROTO_UDP:
3db084d2
YL
808 if (hns3_tunnel_csum_bug(skb))
809 break;
810
5c897197 811 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
e4e87715
PL
812 hnae3_set_field(*type_cs_vlan_tso,
813 HNS3_TXD_L4T_M,
814 HNS3_TXD_L4T_S,
815 HNS3_L4T_UDP);
76ad4f0e
S
816 break;
817 case IPPROTO_SCTP:
5c897197 818 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
e4e87715
PL
819 hnae3_set_field(*type_cs_vlan_tso,
820 HNS3_TXD_L4T_M,
821 HNS3_TXD_L4T_S,
822 HNS3_L4T_SCTP);
76ad4f0e
S
823 break;
824 default:
825 /* drop the skb tunnel packet if hardware don't support,
826 * because hardware can't calculate csum when TSO.
827 */
828 if (skb_is_gso(skb))
829 return -EDOM;
830
831 /* the stack computes the IP header already,
832 * driver calculate l4 checksum when not TSO.
833 */
834 skb_checksum_help(skb);
835 return 0;
836 }
837
838 return 0;
839}
840
841static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
842{
843 /* Config bd buffer end */
e4e87715
PL
844 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
845 HNS3_TXD_BDTYPE_S, 0);
846 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
847 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
848 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
76ad4f0e
S
849}
850
9699cffe
PL
851static int hns3_fill_desc_vtags(struct sk_buff *skb,
852 struct hns3_enet_ring *tx_ring,
853 u32 *inner_vlan_flag,
854 u32 *out_vlan_flag,
855 u16 *inner_vtag,
856 u16 *out_vtag)
857{
858#define HNS3_TX_VLAN_PRIO_SHIFT 13
859
860 if (skb->protocol == htons(ETH_P_8021Q) &&
861 !(tx_ring->tqp->handle->kinfo.netdev->features &
862 NETIF_F_HW_VLAN_CTAG_TX)) {
863 /* When HW VLAN acceleration is turned off, and the stack
864 * sets the protocol to 802.1q, the driver just need to
865 * set the protocol to the encapsulated ethertype.
866 */
867 skb->protocol = vlan_get_protocol(skb);
868 return 0;
869 }
870
871 if (skb_vlan_tag_present(skb)) {
872 u16 vlan_tag;
873
874 vlan_tag = skb_vlan_tag_get(skb);
875 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
876
877 /* Based on hw strategy, use out_vtag in two layer tag case,
878 * and use inner_vtag in one tag case.
879 */
880 if (skb->protocol == htons(ETH_P_8021Q)) {
e4e87715 881 hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
9699cffe
PL
882 *out_vtag = vlan_tag;
883 } else {
e4e87715 884 hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
9699cffe
PL
885 *inner_vtag = vlan_tag;
886 }
887 } else if (skb->protocol == htons(ETH_P_8021Q)) {
888 struct vlan_ethhdr *vhdr;
889 int rc;
890
891 rc = skb_cow_head(skb, 0);
892 if (rc < 0)
893 return rc;
894 vhdr = (struct vlan_ethhdr *)skb->data;
895 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
896 << HNS3_TX_VLAN_PRIO_SHIFT);
897 }
898
899 skb->protocol = vlan_get_protocol(skb);
900 return 0;
901}
902
76ad4f0e
S
903static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
904 int size, dma_addr_t dma, int frag_end,
905 enum hns_desc_type type)
906{
907 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
908 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
909 u32 ol_type_vlan_len_msec = 0;
910 u16 bdtp_fe_sc_vld_ra_ri = 0;
911 u32 type_cs_vlan_tso = 0;
912 struct sk_buff *skb;
9699cffe
PL
913 u16 inner_vtag = 0;
914 u16 out_vtag = 0;
76ad4f0e
S
915 u32 paylen = 0;
916 u16 mss = 0;
76ad4f0e
S
917 u8 ol4_proto;
918 u8 il4_proto;
919 int ret;
920
921 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
922 desc_cb->priv = priv;
923 desc_cb->length = size;
924 desc_cb->dma = dma;
925 desc_cb->type = type;
926
927 /* now, fill the descriptor */
928 desc->addr = cpu_to_le64(dma);
929 desc->tx.send_size = cpu_to_le16((u16)size);
930 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
931 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
932
933 if (type == DESC_TYPE_SKB) {
934 skb = (struct sk_buff *)priv;
a90bb9a5 935 paylen = skb->len;
76ad4f0e 936
9699cffe
PL
937 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
938 &ol_type_vlan_len_msec,
939 &inner_vtag, &out_vtag);
940 if (unlikely(ret))
941 return ret;
942
76ad4f0e
S
943 if (skb->ip_summed == CHECKSUM_PARTIAL) {
944 skb_reset_mac_len(skb);
76ad4f0e 945
1898d4e4
S
946 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
947 if (ret)
948 return ret;
76ad4f0e
S
949 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
950 &type_cs_vlan_tso,
951 &ol_type_vlan_len_msec);
952 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
953 &type_cs_vlan_tso,
954 &ol_type_vlan_len_msec);
955 if (ret)
956 return ret;
957
958 ret = hns3_set_tso(skb, &paylen, &mss,
959 &type_cs_vlan_tso);
960 if (ret)
961 return ret;
962 }
963
964 /* Set txbd */
965 desc->tx.ol_type_vlan_len_msec =
966 cpu_to_le32(ol_type_vlan_len_msec);
967 desc->tx.type_cs_vlan_tso_len =
968 cpu_to_le32(type_cs_vlan_tso);
a90bb9a5 969 desc->tx.paylen = cpu_to_le32(paylen);
76ad4f0e 970 desc->tx.mss = cpu_to_le16(mss);
9699cffe
PL
971 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
972 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
76ad4f0e
S
973 }
974
975 /* move ring pointer to next.*/
976 ring_ptr_move_fw(ring, next_to_use);
977
978 return 0;
979}
980
981static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
982 int size, dma_addr_t dma, int frag_end,
983 enum hns_desc_type type)
984{
985 unsigned int frag_buf_num;
986 unsigned int k;
987 int sizeoflast;
988 int ret;
989
990 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
991 sizeoflast = size % HNS3_MAX_BD_SIZE;
992 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
993
994 /* When the frag size is bigger than hardware, split this frag */
995 for (k = 0; k < frag_buf_num; k++) {
996 ret = hns3_fill_desc(ring, priv,
997 (k == frag_buf_num - 1) ?
998 sizeoflast : HNS3_MAX_BD_SIZE,
999 dma + HNS3_MAX_BD_SIZE * k,
1000 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
1001 (type == DESC_TYPE_SKB && !k) ?
1002 DESC_TYPE_SKB : DESC_TYPE_PAGE);
1003 if (ret)
1004 return ret;
1005 }
1006
1007 return 0;
1008}
1009
1010static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1011 struct hns3_enet_ring *ring)
1012{
1013 struct sk_buff *skb = *out_skb;
1014 struct skb_frag_struct *frag;
1015 int bdnum_for_frag;
1016 int frag_num;
1017 int buf_num;
1018 int size;
1019 int i;
1020
1021 size = skb_headlen(skb);
1022 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1023
1024 frag_num = skb_shinfo(skb)->nr_frags;
1025 for (i = 0; i < frag_num; i++) {
1026 frag = &skb_shinfo(skb)->frags[i];
1027 size = skb_frag_size(frag);
1028 bdnum_for_frag =
1029 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1030 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
1031 return -ENOMEM;
1032
1033 buf_num += bdnum_for_frag;
1034 }
1035
1036 if (buf_num > ring_space(ring))
1037 return -EBUSY;
1038
1039 *bnum = buf_num;
1040 return 0;
1041}
1042
1043static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1044 struct hns3_enet_ring *ring)
1045{
1046 struct sk_buff *skb = *out_skb;
1047 int buf_num;
1048
1049 /* No. of segments (plus a header) */
1050 buf_num = skb_shinfo(skb)->nr_frags + 1;
1051
1052 if (buf_num > ring_space(ring))
1053 return -EBUSY;
1054
1055 *bnum = buf_num;
1056
1057 return 0;
1058}
1059
1060static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
1061{
1062 struct device *dev = ring_to_dev(ring);
1063 unsigned int i;
1064
1065 for (i = 0; i < ring->desc_num; i++) {
1066 /* check if this is where we started */
1067 if (ring->next_to_use == next_to_use_orig)
1068 break;
1069
1070 /* unmap the descriptor dma address */
1071 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1072 dma_unmap_single(dev,
1073 ring->desc_cb[ring->next_to_use].dma,
1074 ring->desc_cb[ring->next_to_use].length,
1075 DMA_TO_DEVICE);
1076 else
1077 dma_unmap_page(dev,
1078 ring->desc_cb[ring->next_to_use].dma,
1079 ring->desc_cb[ring->next_to_use].length,
1080 DMA_TO_DEVICE);
1081
1082 /* rollback one */
1083 ring_ptr_move_bw(ring, next_to_use);
1084 }
1085}
1086
d43e5aca 1087netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
76ad4f0e
S
1088{
1089 struct hns3_nic_priv *priv = netdev_priv(netdev);
1090 struct hns3_nic_ring_data *ring_data =
1091 &tx_ring_data(priv, skb->queue_mapping);
1092 struct hns3_enet_ring *ring = ring_data->ring;
1093 struct device *dev = priv->dev;
1094 struct netdev_queue *dev_queue;
1095 struct skb_frag_struct *frag;
1096 int next_to_use_head;
1097 int next_to_use_frag;
1098 dma_addr_t dma;
1099 int buf_num;
1100 int seg_num;
1101 int size;
1102 int ret;
1103 int i;
1104
1105 /* Prefetch the data used later */
1106 prefetch(skb->data);
1107
1108 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1109 case -EBUSY:
1110 u64_stats_update_begin(&ring->syncp);
1111 ring->stats.tx_busy++;
1112 u64_stats_update_end(&ring->syncp);
1113
1114 goto out_net_tx_busy;
1115 case -ENOMEM:
1116 u64_stats_update_begin(&ring->syncp);
1117 ring->stats.sw_err_cnt++;
1118 u64_stats_update_end(&ring->syncp);
1119 netdev_err(netdev, "no memory to xmit!\n");
1120
1121 goto out_err_tx_ok;
1122 default:
1123 break;
1124 }
1125
1126 /* No. of segments (plus a header) */
1127 seg_num = skb_shinfo(skb)->nr_frags + 1;
1128 /* Fill the first part */
1129 size = skb_headlen(skb);
1130
1131 next_to_use_head = ring->next_to_use;
1132
1133 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1134 if (dma_mapping_error(dev, dma)) {
1135 netdev_err(netdev, "TX head DMA map failed\n");
1136 ring->stats.sw_err_cnt++;
1137 goto out_err_tx_ok;
1138 }
1139
1140 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1141 DESC_TYPE_SKB);
1142 if (ret)
1143 goto head_dma_map_err;
1144
1145 next_to_use_frag = ring->next_to_use;
1146 /* Fill the fragments */
1147 for (i = 1; i < seg_num; i++) {
1148 frag = &skb_shinfo(skb)->frags[i - 1];
1149 size = skb_frag_size(frag);
1150 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1151 if (dma_mapping_error(dev, dma)) {
1152 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1153 ring->stats.sw_err_cnt++;
1154 goto frag_dma_map_err;
1155 }
1156 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1157 seg_num - 1 == i ? 1 : 0,
1158 DESC_TYPE_PAGE);
1159
1160 if (ret)
1161 goto frag_dma_map_err;
1162 }
1163
1164 /* Complete translate all packets */
1165 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1166 netdev_tx_sent_queue(dev_queue, skb->len);
1167
1168 wmb(); /* Commit all data before submit */
1169
e4e87715 1170 hnae3_queue_xmit(ring->tqp, buf_num);
76ad4f0e
S
1171
1172 return NETDEV_TX_OK;
1173
1174frag_dma_map_err:
1175 hns_nic_dma_unmap(ring, next_to_use_frag);
1176
1177head_dma_map_err:
1178 hns_nic_dma_unmap(ring, next_to_use_head);
1179
1180out_err_tx_ok:
1181 dev_kfree_skb_any(skb);
1182 return NETDEV_TX_OK;
1183
1184out_net_tx_busy:
1185 netif_stop_subqueue(netdev, ring_data->queue_index);
1186 smp_mb(); /* Commit all data before submit */
1187
1188 return NETDEV_TX_BUSY;
1189}
1190
1191static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1192{
9780cb97 1193 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1194 struct sockaddr *mac_addr = p;
1195 int ret;
1196
1197 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1198 return -EADDRNOTAVAIL;
1199
5ec2a51e
JS
1200 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1201 netdev_info(netdev, "already using mac address %pM\n",
1202 mac_addr->sa_data);
1203 return 0;
1204 }
1205
59098055 1206 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
76ad4f0e
S
1207 if (ret) {
1208 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1209 return ret;
1210 }
1211
1212 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1213
1214 return 0;
1215}
1216
1217static int hns3_nic_set_features(struct net_device *netdev,
1218 netdev_features_t features)
1219{
181d454b 1220 netdev_features_t changed = netdev->features ^ features;
76ad4f0e 1221 struct hns3_nic_priv *priv = netdev_priv(netdev);
052ece6d 1222 struct hnae3_handle *h = priv->ae_handle;
052ece6d 1223 int ret;
76ad4f0e 1224
181d454b
JS
1225 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1226 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1227 priv->ops.fill_desc = hns3_fill_desc_tso;
1228 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1229 } else {
1230 priv->ops.fill_desc = hns3_fill_desc;
1231 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1232 }
76ad4f0e
S
1233 }
1234
bd368416
JS
1235 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1236 h->ae_algo->ops->enable_vlan_filter) {
181d454b
JS
1237 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1238 h->ae_algo->ops->enable_vlan_filter(h, true);
1239 else
1240 h->ae_algo->ops->enable_vlan_filter(h, false);
1241 }
391b5e93 1242
bd368416
JS
1243 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1244 h->ae_algo->ops->enable_hw_strip_rxvtag) {
052ece6d
PL
1245 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1246 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1247 else
1248 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1249
1250 if (ret)
1251 return ret;
1252 }
1253
76ad4f0e
S
1254 netdev->features = features;
1255 return 0;
1256}
1257
6c88d9d7
PL
1258static void hns3_nic_get_stats64(struct net_device *netdev,
1259 struct rtnl_link_stats64 *stats)
76ad4f0e
S
1260{
1261 struct hns3_nic_priv *priv = netdev_priv(netdev);
1262 int queue_num = priv->ae_handle->kinfo.num_tqps;
c5f65480 1263 struct hnae3_handle *handle = priv->ae_handle;
76ad4f0e
S
1264 struct hns3_enet_ring *ring;
1265 unsigned int start;
1266 unsigned int idx;
1267 u64 tx_bytes = 0;
1268 u64 rx_bytes = 0;
1269 u64 tx_pkts = 0;
1270 u64 rx_pkts = 0;
d2a5dca8
JS
1271 u64 tx_drop = 0;
1272 u64 rx_drop = 0;
76ad4f0e 1273
b875cc37
JS
1274 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1275 return;
1276
c5f65480
JS
1277 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1278
76ad4f0e
S
1279 for (idx = 0; idx < queue_num; idx++) {
1280 /* fetch the tx stats */
1281 ring = priv->ring_data[idx].ring;
1282 do {
d36d36ce 1283 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1284 tx_bytes += ring->stats.tx_bytes;
1285 tx_pkts += ring->stats.tx_pkts;
d2a5dca8
JS
1286 tx_drop += ring->stats.tx_busy;
1287 tx_drop += ring->stats.sw_err_cnt;
76ad4f0e
S
1288 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1289
1290 /* fetch the rx stats */
1291 ring = priv->ring_data[idx + queue_num].ring;
1292 do {
d36d36ce 1293 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1294 rx_bytes += ring->stats.rx_bytes;
1295 rx_pkts += ring->stats.rx_pkts;
d2a5dca8
JS
1296 rx_drop += ring->stats.non_vld_descs;
1297 rx_drop += ring->stats.err_pkt_len;
1298 rx_drop += ring->stats.l2_err;
76ad4f0e
S
1299 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1300 }
1301
1302 stats->tx_bytes = tx_bytes;
1303 stats->tx_packets = tx_pkts;
1304 stats->rx_bytes = rx_bytes;
1305 stats->rx_packets = rx_pkts;
1306
1307 stats->rx_errors = netdev->stats.rx_errors;
1308 stats->multicast = netdev->stats.multicast;
1309 stats->rx_length_errors = netdev->stats.rx_length_errors;
1310 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1311 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1312
1313 stats->tx_errors = netdev->stats.tx_errors;
d2a5dca8
JS
1314 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1315 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
76ad4f0e
S
1316 stats->collisions = netdev->stats.collisions;
1317 stats->rx_over_errors = netdev->stats.rx_over_errors;
1318 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1319 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1320 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1321 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1322 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1323 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1324 stats->tx_window_errors = netdev->stats.tx_window_errors;
1325 stats->rx_compressed = netdev->stats.rx_compressed;
1326 stats->tx_compressed = netdev->stats.tx_compressed;
1327}
1328
30d240df 1329static int hns3_setup_tc(struct net_device *netdev, void *type_data)
76ad4f0e 1330{
30d240df 1331 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9780cb97 1332 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 1333 struct hnae3_knic_private_info *kinfo = &h->kinfo;
30d240df
YL
1334 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1335 u8 tc = mqprio_qopt->qopt.num_tc;
1336 u16 mode = mqprio_qopt->mode;
1337 u8 hw = mqprio_qopt->qopt.hw;
1338 bool if_running;
76ad4f0e
S
1339 int ret;
1340
30d240df
YL
1341 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1342 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1343 return -EOPNOTSUPP;
1344
76ad4f0e
S
1345 if (tc > HNAE3_MAX_TC)
1346 return -EINVAL;
1347
76ad4f0e
S
1348 if (!netdev)
1349 return -EINVAL;
1350
30d240df
YL
1351 if_running = netif_running(netdev);
1352 if (if_running) {
1353 hns3_nic_net_stop(netdev);
1354 msleep(100);
76ad4f0e
S
1355 }
1356
30d240df
YL
1357 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1358 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
76ad4f0e 1359 if (ret)
30d240df
YL
1360 goto out;
1361
30d240df
YL
1362 ret = hns3_nic_set_real_num_queue(netdev);
1363
1364out:
1365 if (if_running)
1366 hns3_nic_net_open(netdev);
1367
1368 return ret;
76ad4f0e
S
1369}
1370
2572ac53 1371static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1372 void *type_data)
76ad4f0e 1373{
575ed7d3 1374 if (type != TC_SETUP_QDISC_MQPRIO)
38cf0426 1375 return -EOPNOTSUPP;
76ad4f0e 1376
30d240df 1377 return hns3_setup_tc(dev, type_data);
76ad4f0e
S
1378}
1379
1380static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1381 __be16 proto, u16 vid)
1382{
9780cb97 1383 struct hnae3_handle *h = hns3_get_handle(netdev);
681ec399 1384 struct hns3_nic_priv *priv = netdev_priv(netdev);
76ad4f0e
S
1385 int ret = -EIO;
1386
1387 if (h->ae_algo->ops->set_vlan_filter)
1388 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1389
681ec399
YL
1390 if (!ret)
1391 set_bit(vid, priv->active_vlans);
1392
76ad4f0e
S
1393 return ret;
1394}
1395
1396static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1397 __be16 proto, u16 vid)
1398{
9780cb97 1399 struct hnae3_handle *h = hns3_get_handle(netdev);
681ec399 1400 struct hns3_nic_priv *priv = netdev_priv(netdev);
76ad4f0e
S
1401 int ret = -EIO;
1402
1403 if (h->ae_algo->ops->set_vlan_filter)
1404 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1405
681ec399
YL
1406 if (!ret)
1407 clear_bit(vid, priv->active_vlans);
1408
76ad4f0e
S
1409 return ret;
1410}
1411
681ec399
YL
1412static void hns3_restore_vlan(struct net_device *netdev)
1413{
1414 struct hns3_nic_priv *priv = netdev_priv(netdev);
1415 u16 vid;
1416 int ret;
1417
1418 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1419 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1420 if (ret)
1421 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1422 vid, ret);
1423 }
1424}
1425
76ad4f0e
S
1426static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1427 u8 qos, __be16 vlan_proto)
1428{
9780cb97 1429 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1430 int ret = -EIO;
1431
1432 if (h->ae_algo->ops->set_vf_vlan_filter)
1433 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1434 qos, vlan_proto);
1435
1436 return ret;
1437}
1438
a8e8b7ff
S
1439static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1440{
9780cb97 1441 struct hnae3_handle *h = hns3_get_handle(netdev);
a8e8b7ff
S
1442 bool if_running = netif_running(netdev);
1443 int ret;
1444
1445 if (!h->ae_algo->ops->set_mtu)
1446 return -EOPNOTSUPP;
1447
1448 /* if this was called with netdev up then bring netdevice down */
1449 if (if_running) {
1450 (void)hns3_nic_net_stop(netdev);
1451 msleep(100);
1452 }
1453
1454 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1455 if (ret) {
1456 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1457 ret);
1458 return ret;
1459 }
1460
5bad95a1
FL
1461 netdev->mtu = new_mtu;
1462
a8e8b7ff
S
1463 /* if the netdev was running earlier, bring it up again */
1464 if (if_running && hns3_nic_net_open(netdev))
1465 ret = -EINVAL;
1466
1467 return ret;
1468}
1469
f8fa222c
L
1470static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1471{
1472 struct hns3_nic_priv *priv = netdev_priv(ndev);
1473 struct hns3_enet_ring *tx_ring = NULL;
1474 int timeout_queue = 0;
1475 int hw_head, hw_tail;
1476 int i;
1477
1478 /* Find the stopped queue the same way the stack does */
1479 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1480 struct netdev_queue *q;
1481 unsigned long trans_start;
1482
1483 q = netdev_get_tx_queue(ndev, i);
1484 trans_start = q->trans_start;
1485 if (netif_xmit_stopped(q) &&
1486 time_after(jiffies,
1487 (trans_start + ndev->watchdog_timeo))) {
1488 timeout_queue = i;
1489 break;
1490 }
1491 }
1492
1493 if (i == ndev->num_tx_queues) {
1494 netdev_info(ndev,
1495 "no netdev TX timeout queue found, timeout count: %llu\n",
1496 priv->tx_timeout_count);
1497 return false;
1498 }
1499
1500 tx_ring = priv->ring_data[timeout_queue].ring;
1501
1502 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1503 HNS3_RING_TX_RING_HEAD_REG);
1504 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1505 HNS3_RING_TX_RING_TAIL_REG);
1506 netdev_info(ndev,
1507 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1508 priv->tx_timeout_count,
1509 timeout_queue,
1510 tx_ring->next_to_use,
1511 tx_ring->next_to_clean,
1512 hw_head,
1513 hw_tail,
1514 readl(tx_ring->tqp_vector->mask_addr));
1515
1516 return true;
1517}
1518
1519static void hns3_nic_net_timeout(struct net_device *ndev)
1520{
1521 struct hns3_nic_priv *priv = netdev_priv(ndev);
f8fa222c
L
1522 struct hnae3_handle *h = priv->ae_handle;
1523
1524 if (!hns3_get_tx_timeo_queue_info(ndev))
1525 return;
1526
1527 priv->tx_timeout_count++;
1528
6d4c3981 1529 if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
f8fa222c
L
1530 return;
1531
6d4c3981 1532 /* request the reset */
f8fa222c 1533 if (h->ae_algo->ops->reset_event)
6d4c3981 1534 h->ae_algo->ops->reset_event(h);
f8fa222c
L
1535}
1536
76ad4f0e
S
1537static const struct net_device_ops hns3_nic_netdev_ops = {
1538 .ndo_open = hns3_nic_net_open,
1539 .ndo_stop = hns3_nic_net_stop,
1540 .ndo_start_xmit = hns3_nic_net_xmit,
f8fa222c 1541 .ndo_tx_timeout = hns3_nic_net_timeout,
76ad4f0e 1542 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
a8e8b7ff 1543 .ndo_change_mtu = hns3_nic_change_mtu,
76ad4f0e
S
1544 .ndo_set_features = hns3_nic_set_features,
1545 .ndo_get_stats64 = hns3_nic_get_stats64,
1546 .ndo_setup_tc = hns3_nic_setup_tc,
1547 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
76ad4f0e
S
1548 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1549 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1550 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1551};
1552
2312e050
FL
1553static bool hns3_is_phys_func(struct pci_dev *pdev)
1554{
1555 u32 dev_id = pdev->device;
1556
1557 switch (dev_id) {
1558 case HNAE3_DEV_ID_GE:
1559 case HNAE3_DEV_ID_25GE:
1560 case HNAE3_DEV_ID_25GE_RDMA:
1561 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1562 case HNAE3_DEV_ID_50GE_RDMA:
1563 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1564 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1565 return true;
1566 case HNAE3_DEV_ID_100G_VF:
1567 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1568 return false;
1569 default:
1570 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1571 dev_id);
1572 }
1573
1574 return false;
1575}
1576
2312e050
FL
1577static void hns3_disable_sriov(struct pci_dev *pdev)
1578{
1579 /* If our VFs are assigned we cannot shut down SR-IOV
1580 * without causing issues, so just leave the hardware
1581 * available but disabled
1582 */
1583 if (pci_vfs_assigned(pdev)) {
1584 dev_warn(&pdev->dev,
1585 "disabling driver while VFs are assigned\n");
1586 return;
1587 }
1588
1589 pci_disable_sriov(pdev);
1590}
1591
76ad4f0e
S
1592/* hns3_probe - Device initialization routine
1593 * @pdev: PCI device information struct
1594 * @ent: entry in hns3_pci_tbl
1595 *
1596 * hns3_probe initializes a PF identified by a pci_dev structure.
1597 * The OS initialization, configuring of the PF private structure,
1598 * and a hardware reset occur.
1599 *
1600 * Returns 0 on success, negative on failure
1601 */
1602static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1603{
1604 struct hnae3_ae_dev *ae_dev;
1605 int ret;
1606
1607 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1608 GFP_KERNEL);
1609 if (!ae_dev) {
1610 ret = -ENOMEM;
1611 return ret;
1612 }
1613
1614 ae_dev->pdev = pdev;
e92a0843 1615 ae_dev->flag = ent->driver_data;
76ad4f0e
S
1616 ae_dev->dev_type = HNAE3_DEV_KNIC;
1617 pci_set_drvdata(pdev, ae_dev);
1618
50fbc237 1619 hnae3_register_ae_dev(ae_dev);
2312e050 1620
2312e050 1621 return 0;
76ad4f0e
S
1622}
1623
1624/* hns3_remove - Device removal routine
1625 * @pdev: PCI device information struct
1626 */
1627static void hns3_remove(struct pci_dev *pdev)
1628{
1629 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1630
2312e050
FL
1631 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1632 hns3_disable_sriov(pdev);
1633
76ad4f0e 1634 hnae3_unregister_ae_dev(ae_dev);
76ad4f0e
S
1635}
1636
fa8d82e8
PL
1637/**
1638 * hns3_pci_sriov_configure
1639 * @pdev: pointer to a pci_dev structure
1640 * @num_vfs: number of VFs to allocate
1641 *
1642 * Enable or change the number of VFs. Called when the user updates the number
1643 * of VFs in sysfs.
1644 **/
743e1a84 1645static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
fa8d82e8
PL
1646{
1647 int ret;
1648
1649 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1650 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1651 return -EINVAL;
1652 }
1653
1654 if (num_vfs) {
1655 ret = pci_enable_sriov(pdev, num_vfs);
1656 if (ret)
1657 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
743e1a84
SM
1658 else
1659 return num_vfs;
fa8d82e8
PL
1660 } else if (!pci_vfs_assigned(pdev)) {
1661 pci_disable_sriov(pdev);
1662 } else {
1663 dev_warn(&pdev->dev,
1664 "Unable to free VFs because some are assigned to VMs.\n");
1665 }
1666
1667 return 0;
1668}
1669
76ad4f0e
S
1670static struct pci_driver hns3_driver = {
1671 .name = hns3_driver_name,
1672 .id_table = hns3_pci_tbl,
1673 .probe = hns3_probe,
1674 .remove = hns3_remove,
fa8d82e8 1675 .sriov_configure = hns3_pci_sriov_configure,
76ad4f0e
S
1676};
1677
1678/* set default feature to hns3 */
1679static void hns3_set_default_feature(struct net_device *netdev)
1680{
1681 netdev->priv_flags |= IFF_UNICAST_FLT;
1682
1683 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1684 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1685 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1686 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1687 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1688
1689 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1690
1691 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1692
1693 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1694 NETIF_F_HW_VLAN_CTAG_FILTER |
052ece6d 1695 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
76ad4f0e
S
1696 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1697 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1698 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1699 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1700
1701 netdev->vlan_features |=
1702 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1703 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1704 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1705 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1706 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1707
1708 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
b2641e2a 1709 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
76ad4f0e
S
1710 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1711 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1712 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1713 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1714}
1715
1716static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1717 struct hns3_desc_cb *cb)
1718{
e4e87715 1719 unsigned int order = hnae3_page_order(ring);
76ad4f0e
S
1720 struct page *p;
1721
1722 p = dev_alloc_pages(order);
1723 if (!p)
1724 return -ENOMEM;
1725
1726 cb->priv = p;
1727 cb->page_offset = 0;
1728 cb->reuse_flag = 0;
1729 cb->buf = page_address(p);
e4e87715 1730 cb->length = hnae3_page_size(ring);
76ad4f0e
S
1731 cb->type = DESC_TYPE_PAGE;
1732
76ad4f0e
S
1733 return 0;
1734}
1735
1736static void hns3_free_buffer(struct hns3_enet_ring *ring,
1737 struct hns3_desc_cb *cb)
1738{
1739 if (cb->type == DESC_TYPE_SKB)
1740 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1741 else if (!HNAE3_IS_TX_RING(ring))
1742 put_page((struct page *)cb->priv);
1743 memset(cb, 0, sizeof(*cb));
1744}
1745
1746static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1747{
1748 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1749 cb->length, ring_to_dma_dir(ring));
1750
1751 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1752 return -EIO;
1753
1754 return 0;
1755}
1756
1757static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1758 struct hns3_desc_cb *cb)
1759{
1760 if (cb->type == DESC_TYPE_SKB)
1761 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1762 ring_to_dma_dir(ring));
1763 else
1764 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1765 ring_to_dma_dir(ring));
1766}
1767
1768static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1769{
1770 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1771 ring->desc[i].addr = 0;
1772}
1773
1774static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1775{
1776 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1777
1778 if (!ring->desc_cb[i].dma)
1779 return;
1780
1781 hns3_buffer_detach(ring, i);
1782 hns3_free_buffer(ring, cb);
1783}
1784
1785static void hns3_free_buffers(struct hns3_enet_ring *ring)
1786{
1787 int i;
1788
1789 for (i = 0; i < ring->desc_num; i++)
1790 hns3_free_buffer_detach(ring, i);
1791}
1792
1793/* free desc along with its attached buffer */
1794static void hns3_free_desc(struct hns3_enet_ring *ring)
1795{
024cc792
HT
1796 int size = ring->desc_num * sizeof(ring->desc[0]);
1797
76ad4f0e
S
1798 hns3_free_buffers(ring);
1799
024cc792
HT
1800 if (ring->desc) {
1801 dma_free_coherent(ring_to_dev(ring), size,
1802 ring->desc, ring->desc_dma_addr);
1803 ring->desc = NULL;
1804 }
76ad4f0e
S
1805}
1806
1807static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1808{
1809 int size = ring->desc_num * sizeof(ring->desc[0]);
1810
024cc792
HT
1811 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
1812 &ring->desc_dma_addr,
1813 GFP_KERNEL);
76ad4f0e
S
1814 if (!ring->desc)
1815 return -ENOMEM;
1816
76ad4f0e
S
1817 return 0;
1818}
1819
1820static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1821 struct hns3_desc_cb *cb)
1822{
1823 int ret;
1824
1825 ret = hns3_alloc_buffer(ring, cb);
1826 if (ret)
1827 goto out;
1828
1829 ret = hns3_map_buffer(ring, cb);
1830 if (ret)
1831 goto out_with_buf;
1832
1833 return 0;
1834
1835out_with_buf:
564883bb 1836 hns3_free_buffer(ring, cb);
76ad4f0e
S
1837out:
1838 return ret;
1839}
1840
1841static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1842{
1843 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1844
1845 if (ret)
1846 return ret;
1847
1848 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1849
1850 return 0;
1851}
1852
1853/* Allocate memory for raw pkg, and map with dma */
1854static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1855{
1856 int i, j, ret;
1857
1858 for (i = 0; i < ring->desc_num; i++) {
1859 ret = hns3_alloc_buffer_attach(ring, i);
1860 if (ret)
1861 goto out_buffer_fail;
1862 }
1863
1864 return 0;
1865
1866out_buffer_fail:
1867 for (j = i - 1; j >= 0; j--)
1868 hns3_free_buffer_detach(ring, j);
1869 return ret;
1870}
1871
1872/* detach a in-used buffer and replace with a reserved one */
1873static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1874 struct hns3_desc_cb *res_cb)
1875{
b9077428 1876 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
76ad4f0e
S
1877 ring->desc_cb[i] = *res_cb;
1878 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
7d0b130c 1879 ring->desc[i].rx.bd_base_info = 0;
76ad4f0e
S
1880}
1881
1882static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1883{
1884 ring->desc_cb[i].reuse_flag = 0;
1885 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1886 + ring->desc_cb[i].page_offset);
7d0b130c 1887 ring->desc[i].rx.bd_base_info = 0;
76ad4f0e
S
1888}
1889
1890static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1891 int *pkts)
1892{
1893 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1894
1895 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1896 (*bytes) += desc_cb->length;
e4e87715 1897 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
76ad4f0e
S
1898 hns3_free_buffer_detach(ring, ring->next_to_clean);
1899
1900 ring_ptr_move_fw(ring, next_to_clean);
1901}
1902
1903static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1904{
1905 int u = ring->next_to_use;
1906 int c = ring->next_to_clean;
1907
1908 if (unlikely(h > ring->desc_num))
1909 return 0;
1910
1911 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1912}
1913
24e750c4 1914bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
76ad4f0e
S
1915{
1916 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1917 struct netdev_queue *dev_queue;
1918 int bytes, pkts;
1919 int head;
1920
1921 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1922 rmb(); /* Make sure head is ready before touch any data */
1923
1924 if (is_ring_empty(ring) || head == ring->next_to_clean)
24e750c4 1925 return true; /* no data to poll */
76ad4f0e 1926
0e6084aa 1927 if (unlikely(!is_valid_clean_head(ring, head))) {
76ad4f0e
S
1928 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1929 ring->next_to_use, ring->next_to_clean);
1930
1931 u64_stats_update_begin(&ring->syncp);
1932 ring->stats.io_err_cnt++;
1933 u64_stats_update_end(&ring->syncp);
24e750c4 1934 return true;
76ad4f0e
S
1935 }
1936
1937 bytes = 0;
1938 pkts = 0;
1939 while (head != ring->next_to_clean && budget) {
1940 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1941 /* Issue prefetch for next Tx descriptor */
1942 prefetch(&ring->desc_cb[ring->next_to_clean]);
1943 budget--;
1944 }
1945
1946 ring->tqp_vector->tx_group.total_bytes += bytes;
1947 ring->tqp_vector->tx_group.total_packets += pkts;
1948
1949 u64_stats_update_begin(&ring->syncp);
1950 ring->stats.tx_bytes += bytes;
1951 ring->stats.tx_pkts += pkts;
1952 u64_stats_update_end(&ring->syncp);
1953
1954 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1955 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1956
1957 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1958 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1959 /* Make sure that anybody stopping the queue after this
1960 * sees the new next_to_clean.
1961 */
1962 smp_mb();
1963 if (netif_tx_queue_stopped(dev_queue)) {
1964 netif_tx_wake_queue(dev_queue);
1965 ring->stats.restart_queue++;
1966 }
1967 }
1968
1969 return !!budget;
1970}
1971
1972static int hns3_desc_unused(struct hns3_enet_ring *ring)
1973{
1974 int ntc = ring->next_to_clean;
1975 int ntu = ring->next_to_use;
1976
1977 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1978}
1979
1980static void
1981hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1982{
1983 struct hns3_desc_cb *desc_cb;
1984 struct hns3_desc_cb res_cbs;
1985 int i, ret;
1986
1987 for (i = 0; i < cleand_count; i++) {
1988 desc_cb = &ring->desc_cb[ring->next_to_use];
1989 if (desc_cb->reuse_flag) {
1990 u64_stats_update_begin(&ring->syncp);
1991 ring->stats.reuse_pg_cnt++;
1992 u64_stats_update_end(&ring->syncp);
1993
1994 hns3_reuse_buffer(ring, ring->next_to_use);
1995 } else {
1996 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1997 if (ret) {
1998 u64_stats_update_begin(&ring->syncp);
1999 ring->stats.sw_err_cnt++;
2000 u64_stats_update_end(&ring->syncp);
2001
2002 netdev_err(ring->tqp->handle->kinfo.netdev,
2003 "hnae reserve buffer map failed.\n");
2004 break;
2005 }
2006 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2007 }
2008
2009 ring_ptr_move_fw(ring, next_to_use);
2010 }
2011
2012 wmb(); /* Make all data has been write before submit */
2013 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2014}
2015
76ad4f0e
S
2016static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2017 struct hns3_enet_ring *ring, int pull_len,
2018 struct hns3_desc_cb *desc_cb)
2019{
2020 struct hns3_desc *desc;
2021 int truesize, size;
2022 int last_offset;
2023 bool twobufs;
2024
2025 twobufs = ((PAGE_SIZE < 8192) &&
e4e87715 2026 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
76ad4f0e
S
2027
2028 desc = &ring->desc[ring->next_to_clean];
2029 size = le16_to_cpu(desc->rx.size);
2030
e4e87715 2031 truesize = hnae3_buf_size(ring);
f8d291f0
PL
2032
2033 if (!twobufs)
e4e87715 2034 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
76ad4f0e
S
2035
2036 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
f8d291f0 2037 size - pull_len, truesize);
76ad4f0e
S
2038
2039 /* Avoid re-using remote pages,flag default unreuse */
2040 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2041 return;
2042
2043 if (twobufs) {
2044 /* If we are only owner of page we can reuse it */
2045 if (likely(page_count(desc_cb->priv) == 1)) {
2046 /* Flip page offset to other buffer */
2047 desc_cb->page_offset ^= truesize;
2048
2049 desc_cb->reuse_flag = 1;
2050 /* bump ref count on page before it is given*/
2051 get_page(desc_cb->priv);
2052 }
2053 return;
2054 }
2055
2056 /* Move offset up to the next cache line */
2057 desc_cb->page_offset += truesize;
2058
2059 if (desc_cb->page_offset <= last_offset) {
2060 desc_cb->reuse_flag = 1;
2061 /* Bump ref count on page before it is given*/
2062 get_page(desc_cb->priv);
2063 }
2064}
2065
2066static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2067 struct hns3_desc *desc)
2068{
2069 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2070 int l3_type, l4_type;
2071 u32 bd_base_info;
2072 int ol4_type;
2073 u32 l234info;
2074
2075 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2076 l234info = le32_to_cpu(desc->rx.l234_info);
2077
2078 skb->ip_summed = CHECKSUM_NONE;
2079
2080 skb_checksum_none_assert(skb);
2081
2082 if (!(netdev->features & NETIF_F_RXCSUM))
2083 return;
2084
2085 /* check if hardware has done checksum */
e4e87715 2086 if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
76ad4f0e
S
2087 return;
2088
e4e87715
PL
2089 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2090 hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2091 hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2092 hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
76ad4f0e
S
2093 netdev_err(netdev, "L3/L4 error pkt\n");
2094 u64_stats_update_begin(&ring->syncp);
2095 ring->stats.l3l4_csum_err++;
2096 u64_stats_update_end(&ring->syncp);
2097
2098 return;
2099 }
2100
e4e87715
PL
2101 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2102 HNS3_RXD_L3ID_S);
2103 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2104 HNS3_RXD_L4ID_S);
76ad4f0e 2105
e4e87715
PL
2106 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2107 HNS3_RXD_OL4ID_S);
76ad4f0e
S
2108 switch (ol4_type) {
2109 case HNS3_OL4_TYPE_MAC_IN_UDP:
2110 case HNS3_OL4_TYPE_NVGRE:
2111 skb->csum_level = 1;
2112 case HNS3_OL4_TYPE_NO_TUN:
2113 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
94c5e532
PL
2114 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2115 l3_type == HNS3_L3_TYPE_IPV6) &&
2116 (l4_type == HNS3_L4_TYPE_UDP ||
2117 l4_type == HNS3_L4_TYPE_TCP ||
2118 l4_type == HNS3_L4_TYPE_SCTP))
76ad4f0e
S
2119 skb->ip_summed = CHECKSUM_UNNECESSARY;
2120 break;
2121 }
2122}
2123
d43e5aca
YL
2124static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2125{
2126 napi_gro_receive(&ring->tqp_vector->napi, skb);
2127}
2128
5b5455a9
PL
2129static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2130 struct hns3_desc *desc, u32 l234info)
2131{
2132 struct pci_dev *pdev = ring->tqp->handle->pdev;
2133 u16 vlan_tag;
2134
2135 if (pdev->revision == 0x20) {
2136 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2137 if (!(vlan_tag & VLAN_VID_MASK))
2138 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2139
2140 return vlan_tag;
2141 }
2142
2143#define HNS3_STRP_OUTER_VLAN 0x1
2144#define HNS3_STRP_INNER_VLAN 0x2
2145
e4e87715
PL
2146 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2147 HNS3_RXD_STRP_TAGP_S)) {
5b5455a9
PL
2148 case HNS3_STRP_OUTER_VLAN:
2149 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2150 break;
2151 case HNS3_STRP_INNER_VLAN:
2152 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2153 break;
2154 default:
2155 vlan_tag = 0;
2156 break;
2157 }
2158
2159 return vlan_tag;
2160}
2161
76ad4f0e
S
2162static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2163 struct sk_buff **out_skb, int *out_bnum)
2164{
2165 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2166 struct hns3_desc_cb *desc_cb;
2167 struct hns3_desc *desc;
2168 struct sk_buff *skb;
2169 unsigned char *va;
2170 u32 bd_base_info;
2171 int pull_len;
2172 u32 l234info;
2173 int length;
2174 int bnum;
2175
2176 desc = &ring->desc[ring->next_to_clean];
2177 desc_cb = &ring->desc_cb[ring->next_to_clean];
2178
2179 prefetch(desc);
2180
846fcc83 2181 length = le16_to_cpu(desc->rx.size);
76ad4f0e 2182 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
76ad4f0e
S
2183
2184 /* Check valid BD */
e4e87715 2185 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
76ad4f0e
S
2186 return -EFAULT;
2187
2188 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2189
2190 /* Prefetch first cache line of first page
2191 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2192 * line size is 64B so need to prefetch twice to make it 128B. But in
2193 * actual we can have greater size of caches with 128B Level 1 cache
2194 * lines. In such a case, single fetch would suffice to cache in the
2195 * relevant part of the header.
2196 */
2197 prefetch(va);
2198#if L1_CACHE_BYTES < 128
2199 prefetch(va + L1_CACHE_BYTES);
2200#endif
2201
2202 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2203 HNS3_RX_HEAD_SIZE);
2204 if (unlikely(!skb)) {
2205 netdev_err(netdev, "alloc rx skb fail\n");
2206
2207 u64_stats_update_begin(&ring->syncp);
2208 ring->stats.sw_err_cnt++;
2209 u64_stats_update_end(&ring->syncp);
2210
2211 return -ENOMEM;
2212 }
2213
2214 prefetchw(skb->data);
2215
2216 bnum = 1;
2217 if (length <= HNS3_RX_HEAD_SIZE) {
2218 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2219
2220 /* We can reuse buffer as-is, just make sure it is local */
2221 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2222 desc_cb->reuse_flag = 1;
2223 else /* This page cannot be reused so discard it */
2224 put_page(desc_cb->priv);
2225
2226 ring_ptr_move_fw(ring, next_to_clean);
2227 } else {
2228 u64_stats_update_begin(&ring->syncp);
2229 ring->stats.seg_pkt_cnt++;
2230 u64_stats_update_end(&ring->syncp);
2231
e63cd65f
PL
2232 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2233
76ad4f0e
S
2234 memcpy(__skb_put(skb, pull_len), va,
2235 ALIGN(pull_len, sizeof(long)));
2236
2237 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2238 ring_ptr_move_fw(ring, next_to_clean);
2239
e4e87715 2240 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
76ad4f0e
S
2241 desc = &ring->desc[ring->next_to_clean];
2242 desc_cb = &ring->desc_cb[ring->next_to_clean];
2243 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2244 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2245 ring_ptr_move_fw(ring, next_to_clean);
2246 bnum++;
2247 }
2248 }
2249
2250 *out_bnum = bnum;
5b5455a9
PL
2251
2252 l234info = le32_to_cpu(desc->rx.l234_info);
2253
846fcc83
PL
2254 /* Based on hw strategy, the tag offloaded will be stored at
2255 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2256 * in one layer tag case.
2257 */
2258 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2259 u16 vlan_tag;
2260
5b5455a9 2261 vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
846fcc83
PL
2262 if (vlan_tag & VLAN_VID_MASK)
2263 __vlan_hwaccel_put_tag(skb,
2264 htons(ETH_P_8021Q),
2265 vlan_tag);
2266 }
2267
e4e87715 2268 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
76ad4f0e
S
2269 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2270 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2271 u64_stats_update_begin(&ring->syncp);
2272 ring->stats.non_vld_descs++;
2273 u64_stats_update_end(&ring->syncp);
2274
2275 dev_kfree_skb_any(skb);
2276 return -EINVAL;
2277 }
2278
2279 if (unlikely((!desc->rx.pkt_len) ||
e4e87715 2280 hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
76ad4f0e
S
2281 netdev_err(netdev, "truncated pkt\n");
2282 u64_stats_update_begin(&ring->syncp);
2283 ring->stats.err_pkt_len++;
2284 u64_stats_update_end(&ring->syncp);
2285
2286 dev_kfree_skb_any(skb);
2287 return -EFAULT;
2288 }
2289
e4e87715 2290 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
76ad4f0e
S
2291 netdev_err(netdev, "L2 error pkt\n");
2292 u64_stats_update_begin(&ring->syncp);
2293 ring->stats.l2_err++;
2294 u64_stats_update_end(&ring->syncp);
2295
2296 dev_kfree_skb_any(skb);
2297 return -EFAULT;
2298 }
2299
2300 u64_stats_update_begin(&ring->syncp);
2301 ring->stats.rx_pkts++;
2302 ring->stats.rx_bytes += skb->len;
2303 u64_stats_update_end(&ring->syncp);
2304
2305 ring->tqp_vector->rx_group.total_bytes += skb->len;
2306
2307 hns3_rx_checksum(ring, skb, desc);
2308 return 0;
2309}
2310
d43e5aca
YL
2311int hns3_clean_rx_ring(
2312 struct hns3_enet_ring *ring, int budget,
2313 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
76ad4f0e
S
2314{
2315#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2316 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2317 int recv_pkts, recv_bds, clean_count, err;
2318 int unused_count = hns3_desc_unused(ring);
2319 struct sk_buff *skb = NULL;
2320 int num, bnum = 0;
2321
2322 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2323 rmb(); /* Make sure num taken effect before the other data is touched */
2324
2325 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2326 num -= unused_count;
2327
2328 while (recv_pkts < budget && recv_bds < num) {
2329 /* Reuse or realloc buffers */
2330 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2331 hns3_nic_alloc_rx_buffers(ring,
2332 clean_count + unused_count);
2333 clean_count = 0;
2334 unused_count = hns3_desc_unused(ring);
2335 }
2336
2337 /* Poll one pkt */
2338 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2339 if (unlikely(!skb)) /* This fault cannot be repaired */
2340 goto out;
2341
2342 recv_bds += bnum;
2343 clean_count += bnum;
2344 if (unlikely(err)) { /* Do jump the err */
2345 recv_pkts++;
2346 continue;
2347 }
2348
2349 /* Do update ip stack process */
2350 skb->protocol = eth_type_trans(skb, netdev);
d43e5aca 2351 rx_fn(ring, skb);
76ad4f0e
S
2352
2353 recv_pkts++;
2354 }
2355
2356out:
2357 /* Make all data has been write before submit */
2358 if (clean_count + unused_count > 0)
2359 hns3_nic_alloc_rx_buffers(ring,
2360 clean_count + unused_count);
2361
2362 return recv_pkts;
2363}
2364
2365static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2366{
a95e1f86
FL
2367 struct hns3_enet_tqp_vector *tqp_vector =
2368 ring_group->ring->tqp_vector;
76ad4f0e 2369 enum hns3_flow_level_range new_flow_level;
a95e1f86
FL
2370 int packets_per_msecs;
2371 int bytes_per_msecs;
2372 u32 time_passed_ms;
76ad4f0e 2373 u16 new_int_gl;
76ad4f0e 2374
a95e1f86 2375 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
76ad4f0e
S
2376 return false;
2377
2378 if (ring_group->total_packets == 0) {
9bc727a9
YL
2379 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2380 ring_group->coal.flow_level = HNS3_FLOW_LOW;
76ad4f0e
S
2381 return true;
2382 }
2383
2384 /* Simple throttlerate management
2385 * 0-10MB/s lower (50000 ints/s)
2386 * 10-20MB/s middle (20000 ints/s)
2387 * 20-1249MB/s high (18000 ints/s)
2388 * > 40000pps ultra (8000 ints/s)
2389 */
9bc727a9
YL
2390 new_flow_level = ring_group->coal.flow_level;
2391 new_int_gl = ring_group->coal.int_gl;
a95e1f86
FL
2392 time_passed_ms =
2393 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2394
2395 if (!time_passed_ms)
2396 return false;
2397
2398 do_div(ring_group->total_packets, time_passed_ms);
2399 packets_per_msecs = ring_group->total_packets;
2400
2401 do_div(ring_group->total_bytes, time_passed_ms);
2402 bytes_per_msecs = ring_group->total_bytes;
2403
2404#define HNS3_RX_LOW_BYTE_RATE 10000
2405#define HNS3_RX_MID_BYTE_RATE 20000
76ad4f0e
S
2406
2407 switch (new_flow_level) {
2408 case HNS3_FLOW_LOW:
a95e1f86 2409 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
76ad4f0e
S
2410 new_flow_level = HNS3_FLOW_MID;
2411 break;
2412 case HNS3_FLOW_MID:
a95e1f86 2413 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
76ad4f0e 2414 new_flow_level = HNS3_FLOW_HIGH;
a95e1f86 2415 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
76ad4f0e
S
2416 new_flow_level = HNS3_FLOW_LOW;
2417 break;
2418 case HNS3_FLOW_HIGH:
2419 case HNS3_FLOW_ULTRA:
2420 default:
a95e1f86 2421 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
76ad4f0e
S
2422 new_flow_level = HNS3_FLOW_MID;
2423 break;
2424 }
2425
a95e1f86
FL
2426#define HNS3_RX_ULTRA_PACKET_RATE 40
2427
2428 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2429 &tqp_vector->rx_group == ring_group)
76ad4f0e
S
2430 new_flow_level = HNS3_FLOW_ULTRA;
2431
2432 switch (new_flow_level) {
2433 case HNS3_FLOW_LOW:
2434 new_int_gl = HNS3_INT_GL_50K;
2435 break;
2436 case HNS3_FLOW_MID:
2437 new_int_gl = HNS3_INT_GL_20K;
2438 break;
2439 case HNS3_FLOW_HIGH:
2440 new_int_gl = HNS3_INT_GL_18K;
2441 break;
2442 case HNS3_FLOW_ULTRA:
2443 new_int_gl = HNS3_INT_GL_8K;
2444 break;
2445 default:
2446 break;
2447 }
2448
2449 ring_group->total_bytes = 0;
2450 ring_group->total_packets = 0;
9bc727a9
YL
2451 ring_group->coal.flow_level = new_flow_level;
2452 if (new_int_gl != ring_group->coal.int_gl) {
2453 ring_group->coal.int_gl = new_int_gl;
76ad4f0e
S
2454 return true;
2455 }
2456 return false;
2457}
2458
2459static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2460{
8b1ff1ea
FL
2461 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2462 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2463 bool rx_update, tx_update;
2464
cd9d187b
FL
2465 if (tqp_vector->int_adapt_down > 0) {
2466 tqp_vector->int_adapt_down--;
2467 return;
2468 }
2469
9bc727a9 2470 if (rx_group->coal.gl_adapt_enable) {
8b1ff1ea
FL
2471 rx_update = hns3_get_new_int_gl(rx_group);
2472 if (rx_update)
2473 hns3_set_vector_coalesce_rx_gl(tqp_vector,
9bc727a9 2474 rx_group->coal.int_gl);
8b1ff1ea
FL
2475 }
2476
9bc727a9 2477 if (tx_group->coal.gl_adapt_enable) {
8b1ff1ea
FL
2478 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2479 if (tx_update)
2480 hns3_set_vector_coalesce_tx_gl(tqp_vector,
9bc727a9 2481 tx_group->coal.int_gl);
76ad4f0e 2482 }
cd9d187b 2483
a95e1f86 2484 tqp_vector->last_jiffies = jiffies;
cd9d187b 2485 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
76ad4f0e
S
2486}
2487
2488static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2489{
2490 struct hns3_enet_ring *ring;
2491 int rx_pkt_total = 0;
2492
2493 struct hns3_enet_tqp_vector *tqp_vector =
2494 container_of(napi, struct hns3_enet_tqp_vector, napi);
2495 bool clean_complete = true;
2496 int rx_budget;
2497
2498 /* Since the actual Tx work is minimal, we can give the Tx a larger
2499 * budget and be more aggressive about cleaning up the Tx descriptors.
2500 */
2501 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2502 if (!hns3_clean_tx_ring(ring, budget))
2503 clean_complete = false;
2504 }
2505
2506 /* make sure rx ring budget not smaller than 1 */
2507 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2508
2509 hns3_for_each_ring(ring, tqp_vector->rx_group) {
d43e5aca
YL
2510 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2511 hns3_rx_skb);
76ad4f0e
S
2512
2513 if (rx_cleaned >= rx_budget)
2514 clean_complete = false;
2515
2516 rx_pkt_total += rx_cleaned;
2517 }
2518
2519 tqp_vector->rx_group.total_packets += rx_pkt_total;
2520
2521 if (!clean_complete)
2522 return budget;
2523
2524 napi_complete(napi);
2525 hns3_update_new_int_gl(tqp_vector);
2526 hns3_mask_vector_irq(tqp_vector, 1);
2527
2528 return rx_pkt_total;
2529}
2530
2531static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2532 struct hnae3_ring_chain_node *head)
2533{
2534 struct pci_dev *pdev = tqp_vector->handle->pdev;
2535 struct hnae3_ring_chain_node *cur_chain = head;
2536 struct hnae3_ring_chain_node *chain;
2537 struct hns3_enet_ring *tx_ring;
2538 struct hns3_enet_ring *rx_ring;
2539
2540 tx_ring = tqp_vector->tx_group.ring;
2541 if (tx_ring) {
2542 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
e4e87715
PL
2543 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2544 HNAE3_RING_TYPE_TX);
2545 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2546 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
76ad4f0e
S
2547
2548 cur_chain->next = NULL;
2549
2550 while (tx_ring->next) {
2551 tx_ring = tx_ring->next;
2552
2553 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2554 GFP_KERNEL);
2555 if (!chain)
2556 return -ENOMEM;
2557
2558 cur_chain->next = chain;
2559 chain->tqp_index = tx_ring->tqp->tqp_index;
e4e87715
PL
2560 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2561 HNAE3_RING_TYPE_TX);
2562 hnae3_set_field(chain->int_gl_idx,
2563 HNAE3_RING_GL_IDX_M,
2564 HNAE3_RING_GL_IDX_S,
2565 HNAE3_RING_GL_TX);
76ad4f0e
S
2566
2567 cur_chain = chain;
2568 }
2569 }
2570
2571 rx_ring = tqp_vector->rx_group.ring;
2572 if (!tx_ring && rx_ring) {
2573 cur_chain->next = NULL;
2574 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
e4e87715
PL
2575 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2576 HNAE3_RING_TYPE_RX);
2577 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2578 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
76ad4f0e
S
2579
2580 rx_ring = rx_ring->next;
2581 }
2582
2583 while (rx_ring) {
2584 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2585 if (!chain)
2586 return -ENOMEM;
2587
2588 cur_chain->next = chain;
2589 chain->tqp_index = rx_ring->tqp->tqp_index;
e4e87715
PL
2590 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2591 HNAE3_RING_TYPE_RX);
2592 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2593 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
11af96a4 2594
76ad4f0e
S
2595 cur_chain = chain;
2596
2597 rx_ring = rx_ring->next;
2598 }
2599
2600 return 0;
2601}
2602
2603static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2604 struct hnae3_ring_chain_node *head)
2605{
2606 struct pci_dev *pdev = tqp_vector->handle->pdev;
2607 struct hnae3_ring_chain_node *chain_tmp, *chain;
2608
2609 chain = head->next;
2610
2611 while (chain) {
2612 chain_tmp = chain->next;
2613 devm_kfree(&pdev->dev, chain);
2614 chain = chain_tmp;
2615 }
2616}
2617
2618static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2619 struct hns3_enet_ring *ring)
2620{
2621 ring->next = group->ring;
2622 group->ring = ring;
2623
2624 group->count++;
2625}
2626
2627static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2628{
2629 struct hnae3_ring_chain_node vector_ring_chain;
2630 struct hnae3_handle *h = priv->ae_handle;
2631 struct hns3_enet_tqp_vector *tqp_vector;
76ad4f0e
S
2632 int ret = 0;
2633 u16 i;
2634
dd38c726
YL
2635 for (i = 0; i < priv->vector_num; i++) {
2636 tqp_vector = &priv->tqp_vector[i];
2637 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2638 tqp_vector->num_tqps = 0;
2639 }
76ad4f0e 2640
dd38c726
YL
2641 for (i = 0; i < h->kinfo.num_tqps; i++) {
2642 u16 vector_i = i % priv->vector_num;
2643 u16 tqp_num = h->kinfo.num_tqps;
76ad4f0e
S
2644
2645 tqp_vector = &priv->tqp_vector[vector_i];
2646
2647 hns3_add_ring_to_group(&tqp_vector->tx_group,
2648 priv->ring_data[i].ring);
2649
2650 hns3_add_ring_to_group(&tqp_vector->rx_group,
2651 priv->ring_data[i + tqp_num].ring);
2652
76ad4f0e
S
2653 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2654 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
dd38c726 2655 tqp_vector->num_tqps++;
76ad4f0e
S
2656 }
2657
dd38c726 2658 for (i = 0; i < priv->vector_num; i++) {
76ad4f0e
S
2659 tqp_vector = &priv->tqp_vector[i];
2660
2661 tqp_vector->rx_group.total_bytes = 0;
2662 tqp_vector->rx_group.total_packets = 0;
2663 tqp_vector->tx_group.total_bytes = 0;
2664 tqp_vector->tx_group.total_packets = 0;
76ad4f0e
S
2665 tqp_vector->handle = h;
2666
2667 ret = hns3_get_vector_ring_chain(tqp_vector,
2668 &vector_ring_chain);
2669 if (ret)
dd38c726 2670 return ret;
76ad4f0e
S
2671
2672 ret = h->ae_algo->ops->map_ring_to_vector(h,
2673 tqp_vector->vector_irq, &vector_ring_chain);
76ad4f0e
S
2674
2675 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2676
dd38c726
YL
2677 if (ret)
2678 return ret;
2679
76ad4f0e
S
2680 netif_napi_add(priv->netdev, &tqp_vector->napi,
2681 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2682 }
2683
dd38c726
YL
2684 return 0;
2685}
2686
2687static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2688{
2689 struct hnae3_handle *h = priv->ae_handle;
2690 struct hns3_enet_tqp_vector *tqp_vector;
2691 struct hnae3_vector_info *vector;
2692 struct pci_dev *pdev = h->pdev;
2693 u16 tqp_num = h->kinfo.num_tqps;
2694 u16 vector_num;
2695 int ret = 0;
2696 u16 i;
2697
2698 /* RSS size, cpu online and vector_num should be the same */
2699 /* Should consider 2p/4p later */
2700 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2701 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2702 GFP_KERNEL);
2703 if (!vector)
2704 return -ENOMEM;
2705
2706 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2707
2708 priv->vector_num = vector_num;
2709 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2710 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2711 GFP_KERNEL);
2712 if (!priv->tqp_vector) {
2713 ret = -ENOMEM;
2714 goto out;
2715 }
2716
2717 for (i = 0; i < priv->vector_num; i++) {
2718 tqp_vector = &priv->tqp_vector[i];
2719 tqp_vector->idx = i;
2720 tqp_vector->mask_addr = vector[i].io_addr;
2721 tqp_vector->vector_irq = vector[i].vector;
2722 hns3_vector_gl_rl_init(tqp_vector, priv);
2723 }
2724
76ad4f0e
S
2725out:
2726 devm_kfree(&pdev->dev, vector);
2727 return ret;
2728}
2729
dd38c726
YL
2730static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2731{
2732 group->ring = NULL;
2733 group->count = 0;
2734}
2735
76ad4f0e
S
2736static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2737{
2738 struct hnae3_ring_chain_node vector_ring_chain;
2739 struct hnae3_handle *h = priv->ae_handle;
2740 struct hns3_enet_tqp_vector *tqp_vector;
76ad4f0e
S
2741 int i, ret;
2742
2743 for (i = 0; i < priv->vector_num; i++) {
2744 tqp_vector = &priv->tqp_vector[i];
2745
2746 ret = hns3_get_vector_ring_chain(tqp_vector,
2747 &vector_ring_chain);
2748 if (ret)
2749 return ret;
2750
2751 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2752 tqp_vector->vector_irq, &vector_ring_chain);
2753 if (ret)
2754 return ret;
2755
2756 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2757
2758 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2759 (void)irq_set_affinity_hint(
2760 priv->tqp_vector[i].vector_irq,
2761 NULL);
ae064e61 2762 free_irq(priv->tqp_vector[i].vector_irq,
2763 &priv->tqp_vector[i]);
76ad4f0e
S
2764 }
2765
2766 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
dd38c726
YL
2767 hns3_clear_ring_group(&tqp_vector->rx_group);
2768 hns3_clear_ring_group(&tqp_vector->tx_group);
76ad4f0e
S
2769 netif_napi_del(&priv->tqp_vector[i].napi);
2770 }
2771
dd38c726
YL
2772 return 0;
2773}
2774
2775static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2776{
2777 struct hnae3_handle *h = priv->ae_handle;
2778 struct pci_dev *pdev = h->pdev;
2779 int i, ret;
2780
2781 for (i = 0; i < priv->vector_num; i++) {
2782 struct hns3_enet_tqp_vector *tqp_vector;
2783
2784 tqp_vector = &priv->tqp_vector[i];
2785 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2786 if (ret)
2787 return ret;
2788 }
76ad4f0e 2789
dd38c726 2790 devm_kfree(&pdev->dev, priv->tqp_vector);
76ad4f0e
S
2791 return 0;
2792}
2793
2794static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2795 int ring_type)
2796{
2797 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2798 int queue_num = priv->ae_handle->kinfo.num_tqps;
2799 struct pci_dev *pdev = priv->ae_handle->pdev;
2800 struct hns3_enet_ring *ring;
2801
2802 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2803 if (!ring)
2804 return -ENOMEM;
2805
2806 if (ring_type == HNAE3_RING_TYPE_TX) {
2807 ring_data[q->tqp_index].ring = ring;
66b44730 2808 ring_data[q->tqp_index].queue_index = q->tqp_index;
76ad4f0e
S
2809 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2810 } else {
2811 ring_data[q->tqp_index + queue_num].ring = ring;
66b44730 2812 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
76ad4f0e
S
2813 ring->io_base = q->io_base;
2814 }
2815
e4e87715 2816 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
76ad4f0e 2817
76ad4f0e
S
2818 ring->tqp = q;
2819 ring->desc = NULL;
2820 ring->desc_cb = NULL;
2821 ring->dev = priv->dev;
2822 ring->desc_dma_addr = 0;
2823 ring->buf_size = q->buf_size;
2824 ring->desc_num = q->desc_num;
2825 ring->next_to_use = 0;
2826 ring->next_to_clean = 0;
2827
2828 return 0;
2829}
2830
2831static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2832 struct hns3_nic_priv *priv)
2833{
2834 int ret;
2835
2836 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2837 if (ret)
2838 return ret;
2839
2840 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2841 if (ret)
2842 return ret;
2843
2844 return 0;
2845}
2846
2847static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2848{
2849 struct hnae3_handle *h = priv->ae_handle;
2850 struct pci_dev *pdev = h->pdev;
2851 int i, ret;
2852
a86854d0
KC
2853 priv->ring_data = devm_kzalloc(&pdev->dev,
2854 array3_size(h->kinfo.num_tqps,
2855 sizeof(*priv->ring_data),
2856 2),
76ad4f0e
S
2857 GFP_KERNEL);
2858 if (!priv->ring_data)
2859 return -ENOMEM;
2860
2861 for (i = 0; i < h->kinfo.num_tqps; i++) {
2862 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2863 if (ret)
2864 goto err;
2865 }
2866
2867 return 0;
2868err:
2869 devm_kfree(&pdev->dev, priv->ring_data);
2870 return ret;
2871}
2872
09f2af64
PL
2873static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2874{
2875 struct hnae3_handle *h = priv->ae_handle;
2876 int i;
2877
2878 for (i = 0; i < h->kinfo.num_tqps; i++) {
2879 devm_kfree(priv->dev, priv->ring_data[i].ring);
2880 devm_kfree(priv->dev,
2881 priv->ring_data[i + h->kinfo.num_tqps].ring);
2882 }
2883 devm_kfree(priv->dev, priv->ring_data);
2884}
2885
76ad4f0e
S
2886static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2887{
2888 int ret;
2889
2890 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2891 return -EINVAL;
2892
2893 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2894 GFP_KERNEL);
2895 if (!ring->desc_cb) {
2896 ret = -ENOMEM;
2897 goto out;
2898 }
2899
2900 ret = hns3_alloc_desc(ring);
2901 if (ret)
2902 goto out_with_desc_cb;
2903
2904 if (!HNAE3_IS_TX_RING(ring)) {
2905 ret = hns3_alloc_ring_buffers(ring);
2906 if (ret)
2907 goto out_with_desc;
2908 }
2909
2910 return 0;
2911
2912out_with_desc:
2913 hns3_free_desc(ring);
2914out_with_desc_cb:
2915 kfree(ring->desc_cb);
2916 ring->desc_cb = NULL;
2917out:
2918 return ret;
2919}
2920
2921static void hns3_fini_ring(struct hns3_enet_ring *ring)
2922{
2923 hns3_free_desc(ring);
2924 kfree(ring->desc_cb);
2925 ring->desc_cb = NULL;
2926 ring->next_to_clean = 0;
2927 ring->next_to_use = 0;
2928}
2929
1db9b1bf 2930static int hns3_buf_size2type(u32 buf_size)
76ad4f0e
S
2931{
2932 int bd_size_type;
2933
2934 switch (buf_size) {
2935 case 512:
2936 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2937 break;
2938 case 1024:
2939 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2940 break;
2941 case 2048:
2942 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2943 break;
2944 case 4096:
2945 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2946 break;
2947 default:
2948 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2949 }
2950
2951 return bd_size_type;
2952}
2953
2954static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2955{
2956 dma_addr_t dma = ring->desc_dma_addr;
2957 struct hnae3_queue *q = ring->tqp;
2958
2959 if (!HNAE3_IS_TX_RING(ring)) {
2960 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2961 (u32)dma);
2962 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2963 (u32)((dma >> 31) >> 1));
2964
2965 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2966 hns3_buf_size2type(ring->buf_size));
2967 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2968 ring->desc_num / 8 - 1);
2969
2970 } else {
2971 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2972 (u32)dma);
2973 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2974 (u32)((dma >> 31) >> 1));
2975
2976 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2977 hns3_buf_size2type(ring->buf_size));
2978 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2979 ring->desc_num / 8 - 1);
2980 }
2981}
2982
5668abda 2983int hns3_init_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
2984{
2985 struct hnae3_handle *h = priv->ae_handle;
2986 int ring_num = h->kinfo.num_tqps * 2;
2987 int i, j;
2988 int ret;
2989
2990 for (i = 0; i < ring_num; i++) {
2991 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2992 if (ret) {
2993 dev_err(priv->dev,
2994 "Alloc ring memory fail! ret=%d\n", ret);
2995 goto out_when_alloc_ring_memory;
2996 }
2997
76ad4f0e
S
2998 u64_stats_init(&priv->ring_data[i].ring->syncp);
2999 }
3000
3001 return 0;
3002
3003out_when_alloc_ring_memory:
3004 for (j = i - 1; j >= 0; j--)
ee83f776 3005 hns3_fini_ring(priv->ring_data[j].ring);
76ad4f0e
S
3006
3007 return -ENOMEM;
3008}
3009
5668abda 3010int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
3011{
3012 struct hnae3_handle *h = priv->ae_handle;
3013 int i;
3014
3015 for (i = 0; i < h->kinfo.num_tqps; i++) {
3016 if (h->ae_algo->ops->reset_queue)
3017 h->ae_algo->ops->reset_queue(h, i);
3018
3019 hns3_fini_ring(priv->ring_data[i].ring);
3020 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3021 }
76ad4f0e
S
3022 return 0;
3023}
3024
3025/* Set mac addr if it is configured. or leave it to the AE driver */
f09555ff 3026static void hns3_init_mac_addr(struct net_device *netdev, bool init)
76ad4f0e
S
3027{
3028 struct hns3_nic_priv *priv = netdev_priv(netdev);
3029 struct hnae3_handle *h = priv->ae_handle;
3030 u8 mac_addr_temp[ETH_ALEN];
3031
f09555ff 3032 if (h->ae_algo->ops->get_mac_addr && init) {
76ad4f0e
S
3033 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3034 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3035 }
3036
3037 /* Check if the MAC address is valid, if not get a random one */
3038 if (!is_valid_ether_addr(netdev->dev_addr)) {
3039 eth_hw_addr_random(netdev);
3040 dev_warn(priv->dev, "using random MAC address %pM\n",
3041 netdev->dev_addr);
76ad4f0e 3042 }
139e8792
L
3043
3044 if (h->ae_algo->ops->set_mac_addr)
59098055 3045 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
139e8792 3046
76ad4f0e
S
3047}
3048
c7fc8fb6
JS
3049static void hns3_uninit_mac_addr(struct net_device *netdev)
3050{
3051 struct hns3_nic_priv *priv = netdev_priv(netdev);
3052 struct hnae3_handle *h = priv->ae_handle;
3053
3054 if (h->ae_algo->ops->rm_uc_addr)
3055 h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
3056}
3057
76ad4f0e
S
3058static void hns3_nic_set_priv_ops(struct net_device *netdev)
3059{
3060 struct hns3_nic_priv *priv = netdev_priv(netdev);
3061
3062 if ((netdev->features & NETIF_F_TSO) ||
3063 (netdev->features & NETIF_F_TSO6)) {
3064 priv->ops.fill_desc = hns3_fill_desc_tso;
3065 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3066 } else {
3067 priv->ops.fill_desc = hns3_fill_desc;
3068 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3069 }
3070}
3071
3072static int hns3_client_init(struct hnae3_handle *handle)
3073{
3074 struct pci_dev *pdev = handle->pdev;
3075 struct hns3_nic_priv *priv;
3076 struct net_device *netdev;
3077 int ret;
3078
3079 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
678335a1 3080 hns3_get_max_available_channels(handle));
76ad4f0e
S
3081 if (!netdev)
3082 return -ENOMEM;
3083
3084 priv = netdev_priv(netdev);
3085 priv->dev = &pdev->dev;
3086 priv->netdev = netdev;
3087 priv->ae_handle = handle;
6d4c3981
SM
3088 priv->ae_handle->reset_level = HNAE3_NONE_RESET;
3089 priv->ae_handle->last_reset_time = jiffies;
f8fa222c 3090 priv->tx_timeout_count = 0;
76ad4f0e
S
3091
3092 handle->kinfo.netdev = netdev;
3093 handle->priv = (void *)priv;
3094
f09555ff 3095 hns3_init_mac_addr(netdev, true);
76ad4f0e
S
3096
3097 hns3_set_default_feature(netdev);
3098
3099 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3100 netdev->priv_flags |= IFF_UNICAST_FLT;
3101 netdev->netdev_ops = &hns3_nic_netdev_ops;
3102 SET_NETDEV_DEV(netdev, &pdev->dev);
3103 hns3_ethtool_set_ops(netdev);
3104 hns3_nic_set_priv_ops(netdev);
3105
3106 /* Carrier off reporting is important to ethtool even BEFORE open */
3107 netif_carrier_off(netdev);
3108
3109 ret = hns3_get_ring_config(priv);
3110 if (ret) {
3111 ret = -ENOMEM;
3112 goto out_get_ring_cfg;
3113 }
3114
dd38c726
YL
3115 ret = hns3_nic_alloc_vector_data(priv);
3116 if (ret) {
3117 ret = -ENOMEM;
3118 goto out_alloc_vector_data;
3119 }
3120
76ad4f0e
S
3121 ret = hns3_nic_init_vector_data(priv);
3122 if (ret) {
3123 ret = -ENOMEM;
3124 goto out_init_vector_data;
3125 }
3126
3127 ret = hns3_init_all_ring(priv);
3128 if (ret) {
3129 ret = -ENOMEM;
3130 goto out_init_ring_data;
3131 }
3132
3133 ret = register_netdev(netdev);
3134 if (ret) {
3135 dev_err(priv->dev, "probe register netdev fail!\n");
3136 goto out_reg_netdev_fail;
3137 }
3138
986743db
YL
3139 hns3_dcbnl_setup(handle);
3140
a8e8b7ff
S
3141 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3142 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3143
76ad4f0e
S
3144 return ret;
3145
3146out_reg_netdev_fail:
3147out_init_ring_data:
3148 (void)hns3_nic_uninit_vector_data(priv);
76ad4f0e 3149out_init_vector_data:
dd38c726
YL
3150 hns3_nic_dealloc_vector_data(priv);
3151out_alloc_vector_data:
3152 priv->ring_data = NULL;
76ad4f0e
S
3153out_get_ring_cfg:
3154 priv->ae_handle = NULL;
3155 free_netdev(netdev);
3156 return ret;
3157}
3158
3159static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3160{
3161 struct net_device *netdev = handle->kinfo.netdev;
3162 struct hns3_nic_priv *priv = netdev_priv(netdev);
3163 int ret;
3164
3165 if (netdev->reg_state != NETREG_UNINITIALIZED)
3166 unregister_netdev(netdev);
3167
7b763f3f
FL
3168 hns3_force_clear_all_rx_ring(handle);
3169
76ad4f0e
S
3170 ret = hns3_nic_uninit_vector_data(priv);
3171 if (ret)
3172 netdev_err(netdev, "uninit vector error\n");
3173
dd38c726
YL
3174 ret = hns3_nic_dealloc_vector_data(priv);
3175 if (ret)
3176 netdev_err(netdev, "dealloc vector error\n");
3177
76ad4f0e
S
3178 ret = hns3_uninit_all_ring(priv);
3179 if (ret)
3180 netdev_err(netdev, "uninit ring error\n");
3181
ec777890
YL
3182 hns3_put_ring_config(priv);
3183
76ad4f0e
S
3184 priv->ring_data = NULL;
3185
c7fc8fb6
JS
3186 hns3_uninit_mac_addr(netdev);
3187
76ad4f0e
S
3188 free_netdev(netdev);
3189}
3190
3191static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3192{
3193 struct net_device *netdev = handle->kinfo.netdev;
3194
3195 if (!netdev)
3196 return;
3197
3198 if (linkup) {
3199 netif_carrier_on(netdev);
3200 netif_tx_wake_all_queues(netdev);
3201 netdev_info(netdev, "link up\n");
3202 } else {
3203 netif_carrier_off(netdev);
3204 netif_tx_stop_all_queues(netdev);
3205 netdev_info(netdev, "link down\n");
3206 }
3207}
3208
9df8f79a
YL
3209static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3210{
3211 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3212 struct net_device *ndev = kinfo->netdev;
075cfdd6 3213 bool if_running;
9df8f79a 3214 int ret;
9df8f79a
YL
3215
3216 if (tc > HNAE3_MAX_TC)
3217 return -EINVAL;
3218
3219 if (!ndev)
3220 return -ENODEV;
3221
075cfdd6
CIK
3222 if_running = netif_running(ndev);
3223
9df8f79a
YL
3224 if (if_running) {
3225 (void)hns3_nic_net_stop(ndev);
3226 msleep(100);
3227 }
3228
3229 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3230 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3231 if (ret)
3232 goto err_out;
3233
9df8f79a
YL
3234 ret = hns3_nic_set_real_num_queue(ndev);
3235
3236err_out:
3237 if (if_running)
3238 (void)hns3_nic_net_open(ndev);
3239
3240 return ret;
3241}
3242
bb6b94a8
L
3243static void hns3_recover_hw_addr(struct net_device *ndev)
3244{
3245 struct netdev_hw_addr_list *list;
3246 struct netdev_hw_addr *ha, *tmp;
3247
3248 /* go through and sync uc_addr entries to the device */
3249 list = &ndev->uc;
3250 list_for_each_entry_safe(ha, tmp, &list->list, list)
3251 hns3_nic_uc_sync(ndev, ha->addr);
3252
3253 /* go through and sync mc_addr entries to the device */
3254 list = &ndev->mc;
3255 list_for_each_entry_safe(ha, tmp, &list->list, list)
3256 hns3_nic_mc_sync(ndev, ha->addr);
3257}
3258
beebca3a 3259static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
bb6b94a8 3260{
beebca3a 3261 while (ring->next_to_clean != ring->next_to_use) {
7b763f3f 3262 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
beebca3a
YL
3263 hns3_free_buffer_detach(ring, ring->next_to_clean);
3264 ring_ptr_move_fw(ring, next_to_clean);
3265 }
3266}
3267
7b763f3f
FL
3268static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3269{
3270 struct hns3_desc_cb res_cbs;
3271 int ret;
3272
3273 while (ring->next_to_use != ring->next_to_clean) {
3274 /* When a buffer is not reused, it's memory has been
3275 * freed in hns3_handle_rx_bd or will be freed by
3276 * stack, so we need to replace the buffer here.
3277 */
3278 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3279 ret = hns3_reserve_buffer_map(ring, &res_cbs);
3280 if (ret) {
3281 u64_stats_update_begin(&ring->syncp);
3282 ring->stats.sw_err_cnt++;
3283 u64_stats_update_end(&ring->syncp);
3284 /* if alloc new buffer fail, exit directly
3285 * and reclear in up flow.
3286 */
3287 netdev_warn(ring->tqp->handle->kinfo.netdev,
3288 "reserve buffer map failed, ret = %d\n",
3289 ret);
3290 return ret;
3291 }
3292 hns3_replace_buffer(ring, ring->next_to_use,
3293 &res_cbs);
3294 }
3295 ring_ptr_move_fw(ring, next_to_use);
3296 }
3297
3298 return 0;
3299}
3300
3301static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
beebca3a 3302{
beebca3a
YL
3303 while (ring->next_to_use != ring->next_to_clean) {
3304 /* When a buffer is not reused, it's memory has been
3305 * freed in hns3_handle_rx_bd or will be freed by
3306 * stack, so only need to unmap the buffer here.
3307 */
3308 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3309 hns3_unmap_buffer(ring,
3310 &ring->desc_cb[ring->next_to_use]);
3311 ring->desc_cb[ring->next_to_use].dma = 0;
3312 }
3313
3314 ring_ptr_move_fw(ring, next_to_use);
3315 }
bb6b94a8
L
3316}
3317
7b763f3f
FL
3318static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3319{
3320 struct net_device *ndev = h->kinfo.netdev;
3321 struct hns3_nic_priv *priv = netdev_priv(ndev);
3322 struct hns3_enet_ring *ring;
3323 u32 i;
3324
3325 for (i = 0; i < h->kinfo.num_tqps; i++) {
3326 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3327 hns3_force_clear_rx_ring(ring);
3328 }
3329}
3330
bb6b94a8
L
3331static void hns3_clear_all_ring(struct hnae3_handle *h)
3332{
3333 struct net_device *ndev = h->kinfo.netdev;
3334 struct hns3_nic_priv *priv = netdev_priv(ndev);
3335 u32 i;
3336
3337 for (i = 0; i < h->kinfo.num_tqps; i++) {
3338 struct netdev_queue *dev_queue;
3339 struct hns3_enet_ring *ring;
3340
3341 ring = priv->ring_data[i].ring;
beebca3a 3342 hns3_clear_tx_ring(ring);
bb6b94a8
L
3343 dev_queue = netdev_get_tx_queue(ndev,
3344 priv->ring_data[i].queue_index);
3345 netdev_tx_reset_queue(dev_queue);
3346
3347 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
7b763f3f
FL
3348 /* Continue to clear other rings even if clearing some
3349 * rings failed.
3350 */
beebca3a 3351 hns3_clear_rx_ring(ring);
bb6b94a8
L
3352 }
3353}
3354
7b763f3f
FL
3355int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3356{
3357 struct net_device *ndev = h->kinfo.netdev;
3358 struct hns3_nic_priv *priv = netdev_priv(ndev);
3359 struct hns3_enet_ring *rx_ring;
3360 int i, j;
3361 int ret;
3362
3363 for (i = 0; i < h->kinfo.num_tqps; i++) {
3364 h->ae_algo->ops->reset_queue(h, i);
3365 hns3_init_ring_hw(priv->ring_data[i].ring);
3366
3367 /* We need to clear tx ring here because self test will
3368 * use the ring and will not run down before up
3369 */
3370 hns3_clear_tx_ring(priv->ring_data[i].ring);
3371 priv->ring_data[i].ring->next_to_clean = 0;
3372 priv->ring_data[i].ring->next_to_use = 0;
3373
3374 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3375 hns3_init_ring_hw(rx_ring);
3376 ret = hns3_clear_rx_ring(rx_ring);
3377 if (ret)
3378 return ret;
3379
3380 /* We can not know the hardware head and tail when this
3381 * function is called in reset flow, so we reuse all desc.
3382 */
3383 for (j = 0; j < rx_ring->desc_num; j++)
3384 hns3_reuse_buffer(rx_ring, j);
3385
3386 rx_ring->next_to_clean = 0;
3387 rx_ring->next_to_use = 0;
3388 }
3389
3390 return 0;
3391}
3392
bb6b94a8
L
3393static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3394{
3395 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3396 struct net_device *ndev = kinfo->netdev;
3397
3398 if (!netif_running(ndev))
3399 return -EIO;
3400
3401 return hns3_nic_net_stop(ndev);
3402}
3403
3404static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3405{
3406 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
bb6b94a8
L
3407 int ret = 0;
3408
3409 if (netif_running(kinfo->netdev)) {
3410 ret = hns3_nic_net_up(kinfo->netdev);
3411 if (ret) {
3412 netdev_err(kinfo->netdev,
3413 "hns net up fail, ret=%d!\n", ret);
3414 return ret;
3415 }
6d4c3981 3416 handle->last_reset_time = jiffies;
bb6b94a8
L
3417 }
3418
3419 return ret;
3420}
3421
3422static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3423{
3424 struct net_device *netdev = handle->kinfo.netdev;
3425 struct hns3_nic_priv *priv = netdev_priv(netdev);
3426 int ret;
3427
f09555ff 3428 hns3_init_mac_addr(netdev, false);
bb6b94a8
L
3429 hns3_nic_set_rx_mode(netdev);
3430 hns3_recover_hw_addr(netdev);
3431
681ec399
YL
3432 /* Hardware table is only clear when pf resets */
3433 if (!(handle->flags & HNAE3_SUPPORT_VF))
3434 hns3_restore_vlan(netdev);
3435
bb6b94a8
L
3436 /* Carrier off reporting is important to ethtool even BEFORE open */
3437 netif_carrier_off(netdev);
3438
3439 ret = hns3_get_ring_config(priv);
3440 if (ret)
3441 return ret;
3442
3443 ret = hns3_nic_init_vector_data(priv);
3444 if (ret)
3445 return ret;
3446
3447 ret = hns3_init_all_ring(priv);
3448 if (ret) {
3449 hns3_nic_uninit_vector_data(priv);
3450 priv->ring_data = NULL;
3451 }
3452
3453 return ret;
3454}
3455
3456static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3457{
3458 struct net_device *netdev = handle->kinfo.netdev;
3459 struct hns3_nic_priv *priv = netdev_priv(netdev);
3460 int ret;
3461
7b763f3f 3462 hns3_force_clear_all_rx_ring(handle);
bb6b94a8
L
3463
3464 ret = hns3_nic_uninit_vector_data(priv);
3465 if (ret) {
3466 netdev_err(netdev, "uninit vector error\n");
3467 return ret;
3468 }
3469
3470 ret = hns3_uninit_all_ring(priv);
3471 if (ret)
3472 netdev_err(netdev, "uninit ring error\n");
3473
ec777890
YL
3474 hns3_put_ring_config(priv);
3475
bb6b94a8
L
3476 priv->ring_data = NULL;
3477
c7fc8fb6
JS
3478 hns3_uninit_mac_addr(netdev);
3479
bb6b94a8
L
3480 return ret;
3481}
3482
3483static int hns3_reset_notify(struct hnae3_handle *handle,
3484 enum hnae3_reset_notify_type type)
3485{
3486 int ret = 0;
3487
3488 switch (type) {
3489 case HNAE3_UP_CLIENT:
e1586241
SM
3490 ret = hns3_reset_notify_up_enet(handle);
3491 break;
bb6b94a8
L
3492 case HNAE3_DOWN_CLIENT:
3493 ret = hns3_reset_notify_down_enet(handle);
3494 break;
3495 case HNAE3_INIT_CLIENT:
3496 ret = hns3_reset_notify_init_enet(handle);
3497 break;
3498 case HNAE3_UNINIT_CLIENT:
3499 ret = hns3_reset_notify_uninit_enet(handle);
3500 break;
3501 default:
3502 break;
3503 }
3504
3505 return ret;
3506}
3507
7a242b23
YL
3508static void hns3_restore_coal(struct hns3_nic_priv *priv,
3509 struct hns3_enet_coalesce *tx,
3510 struct hns3_enet_coalesce *rx)
3511{
3512 u16 vector_num = priv->vector_num;
3513 int i;
3514
3515 for (i = 0; i < vector_num; i++) {
3516 memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
3517 sizeof(struct hns3_enet_coalesce));
3518 memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
3519 sizeof(struct hns3_enet_coalesce));
3520 }
3521}
3522
3523static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
3524 struct hns3_enet_coalesce *tx,
3525 struct hns3_enet_coalesce *rx)
09f2af64
PL
3526{
3527 struct hns3_nic_priv *priv = netdev_priv(netdev);
3528 struct hnae3_handle *h = hns3_get_handle(netdev);
3529 int ret;
3530
3531 ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3532 if (ret)
3533 return ret;
3534
3535 ret = hns3_get_ring_config(priv);
3536 if (ret)
3537 return ret;
3538
dd38c726
YL
3539 ret = hns3_nic_alloc_vector_data(priv);
3540 if (ret)
3541 goto err_alloc_vector;
3542
7a242b23
YL
3543 hns3_restore_coal(priv, tx, rx);
3544
09f2af64
PL
3545 ret = hns3_nic_init_vector_data(priv);
3546 if (ret)
3547 goto err_uninit_vector;
3548
3549 ret = hns3_init_all_ring(priv);
3550 if (ret)
3551 goto err_put_ring;
3552
3553 return 0;
3554
3555err_put_ring:
3556 hns3_put_ring_config(priv);
3557err_uninit_vector:
3558 hns3_nic_uninit_vector_data(priv);
dd38c726
YL
3559err_alloc_vector:
3560 hns3_nic_dealloc_vector_data(priv);
09f2af64
PL
3561 return ret;
3562}
3563
3564static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3565{
3566 return (new_tqp_num / num_tc) * num_tc;
3567}
3568
3569int hns3_set_channels(struct net_device *netdev,
3570 struct ethtool_channels *ch)
3571{
3572 struct hns3_nic_priv *priv = netdev_priv(netdev);
3573 struct hnae3_handle *h = hns3_get_handle(netdev);
3574 struct hnae3_knic_private_info *kinfo = &h->kinfo;
7a242b23 3575 struct hns3_enet_coalesce tx_coal, rx_coal;
09f2af64
PL
3576 bool if_running = netif_running(netdev);
3577 u32 new_tqp_num = ch->combined_count;
3578 u16 org_tqp_num;
3579 int ret;
3580
3581 if (ch->rx_count || ch->tx_count)
3582 return -EINVAL;
3583
678335a1 3584 if (new_tqp_num > hns3_get_max_available_channels(h) ||
09f2af64
PL
3585 new_tqp_num < kinfo->num_tc) {
3586 dev_err(&netdev->dev,
3587 "Change tqps fail, the tqp range is from %d to %d",
3588 kinfo->num_tc,
678335a1 3589 hns3_get_max_available_channels(h));
09f2af64
PL
3590 return -EINVAL;
3591 }
3592
3593 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3594 if (kinfo->num_tqps == new_tqp_num)
3595 return 0;
3596
3597 if (if_running)
20e4bf98 3598 hns3_nic_net_stop(netdev);
09f2af64 3599
09f2af64
PL
3600 ret = hns3_nic_uninit_vector_data(priv);
3601 if (ret) {
3602 dev_err(&netdev->dev,
3603 "Unbind vector with tqp fail, nothing is changed");
3604 goto open_netdev;
3605 }
3606
7a242b23
YL
3607 /* Changing the tqp num may also change the vector num,
3608 * ethtool only support setting and querying one coal
3609 * configuation for now, so save the vector 0' coal
3610 * configuation here in order to restore it.
3611 */
3612 memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
3613 sizeof(struct hns3_enet_coalesce));
3614 memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
3615 sizeof(struct hns3_enet_coalesce));
3616
dd38c726
YL
3617 hns3_nic_dealloc_vector_data(priv);
3618
09f2af64 3619 hns3_uninit_all_ring(priv);
ec777890 3620 hns3_put_ring_config(priv);
09f2af64
PL
3621
3622 org_tqp_num = h->kinfo.num_tqps;
7a242b23 3623 ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
09f2af64 3624 if (ret) {
7a242b23
YL
3625 ret = hns3_modify_tqp_num(netdev, org_tqp_num,
3626 &tx_coal, &rx_coal);
09f2af64
PL
3627 if (ret) {
3628 /* If revert to old tqp failed, fatal error occurred */
3629 dev_err(&netdev->dev,
3630 "Revert to old tqp num fail, ret=%d", ret);
3631 return ret;
3632 }
3633 dev_info(&netdev->dev,
3634 "Change tqp num fail, Revert to old tqp num");
3635 }
3636
3637open_netdev:
3638 if (if_running)
20e4bf98 3639 hns3_nic_net_open(netdev);
09f2af64
PL
3640
3641 return ret;
3642}
3643
1db9b1bf 3644static const struct hnae3_client_ops client_ops = {
76ad4f0e
S
3645 .init_instance = hns3_client_init,
3646 .uninit_instance = hns3_client_uninit,
3647 .link_status_change = hns3_link_status_change,
9df8f79a 3648 .setup_tc = hns3_client_setup_tc,
bb6b94a8 3649 .reset_notify = hns3_reset_notify,
76ad4f0e
S
3650};
3651
3652/* hns3_init_module - Driver registration routine
3653 * hns3_init_module is the first routine called when the driver is
3654 * loaded. All it does is register with the PCI subsystem.
3655 */
3656static int __init hns3_init_module(void)
3657{
3658 int ret;
3659
3660 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3661 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3662
3663 client.type = HNAE3_CLIENT_KNIC;
3664 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3665 hns3_driver_name);
3666
3667 client.ops = &client_ops;
3668
13562d1f
XW
3669 INIT_LIST_HEAD(&client.node);
3670
76ad4f0e
S
3671 ret = hnae3_register_client(&client);
3672 if (ret)
3673 return ret;
3674
3675 ret = pci_register_driver(&hns3_driver);
3676 if (ret)
3677 hnae3_unregister_client(&client);
3678
3679 return ret;
3680}
3681module_init(hns3_init_module);
3682
3683/* hns3_exit_module - Driver exit cleanup routine
3684 * hns3_exit_module is called just before the driver is removed
3685 * from memory.
3686 */
3687static void __exit hns3_exit_module(void)
3688{
3689 pci_unregister_driver(&hns3_driver);
3690 hnae3_unregister_client(&client);
3691}
3692module_exit(hns3_exit_module);
3693
3694MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3695MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3696MODULE_LICENSE("GPL");
3697MODULE_ALIAS("pci:hns-nic");
3c7624d8 3698MODULE_VERSION(HNS3_MOD_VERSION);