]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
net: hns3: add error handler for hns3_nic_init_vector_data()
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
CommitLineData
d71d8381
JS
1// SPDX-License-Identifier: GPL-2.0+
2// Copyright (c) 2016-2017 Hisilicon Limited.
76ad4f0e
S
3
4#include <linux/dma-mapping.h>
5#include <linux/etherdevice.h>
6#include <linux/interrupt.h>
7#include <linux/if_vlan.h>
8#include <linux/ip.h>
9#include <linux/ipv6.h>
10#include <linux/module.h>
11#include <linux/pci.h>
6ae4e733 12#include <linux/aer.h>
76ad4f0e
S
13#include <linux/skbuff.h>
14#include <linux/sctp.h>
15#include <linux/vermagic.h>
16#include <net/gre.h>
30d240df 17#include <net/pkt_cls.h>
76ad4f0e
S
18#include <net/vxlan.h>
19
20#include "hnae3.h"
21#include "hns3_enet.h"
22
7b763f3f
FL
23static void hns3_clear_all_ring(struct hnae3_handle *h);
24static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
f05e2109 25static void hns3_remove_hw_addr(struct net_device *netdev);
7b763f3f 26
1db9b1bf 27static const char hns3_driver_name[] = "hns3";
76ad4f0e
S
28const char hns3_driver_version[] = VERMAGIC_STRING;
29static const char hns3_driver_string[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32static struct hnae3_client client;
33
34/* hns3_pci_tbl - PCI Device ID Table
35 *
36 * Last entry must be all 0s
37 *
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
40 */
41static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
e92a0843 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
2daf4a65 45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
2daf4a65 47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
2daf4a65 49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
2daf4a65 51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
e92a0843 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
2daf4a65 53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
424eb834 54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
07acf909
JS
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
56 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
76ad4f0e
S
57 /* required last entry */
58 {0, }
59};
60MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
61
ef0c5009 62static irqreturn_t hns3_irq_handle(int irq, void *vector)
76ad4f0e 63{
ef0c5009 64 struct hns3_enet_tqp_vector *tqp_vector = vector;
76ad4f0e
S
65
66 napi_schedule(&tqp_vector->napi);
67
68 return IRQ_HANDLED;
69}
70
874bff0b
PL
71/* This callback function is used to set affinity changes to the irq affinity
72 * masks when the irq_set_affinity_notifier function is used.
73 */
74static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
75 const cpumask_t *mask)
76{
77 struct hns3_enet_tqp_vector *tqp_vectors =
78 container_of(notify, struct hns3_enet_tqp_vector,
79 affinity_notify);
80
81 tqp_vectors->affinity_mask = *mask;
82}
83
84static void hns3_nic_irq_affinity_release(struct kref *ref)
85{
86}
87
76ad4f0e
S
88static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
89{
90 struct hns3_enet_tqp_vector *tqp_vectors;
91 unsigned int i;
92
93 for (i = 0; i < priv->vector_num; i++) {
94 tqp_vectors = &priv->tqp_vector[i];
95
96 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
97 continue;
98
874bff0b
PL
99 /* clear the affinity notifier and affinity mask */
100 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
101 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
102
76ad4f0e
S
103 /* release the irq resource */
104 free_irq(tqp_vectors->vector_irq, tqp_vectors);
105 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
106 }
107}
108
109static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
110{
111 struct hns3_enet_tqp_vector *tqp_vectors;
112 int txrx_int_idx = 0;
113 int rx_int_idx = 0;
114 int tx_int_idx = 0;
115 unsigned int i;
116 int ret;
117
118 for (i = 0; i < priv->vector_num; i++) {
119 tqp_vectors = &priv->tqp_vector[i];
120
121 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
122 continue;
123
124 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
125 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
126 "%s-%s-%d", priv->netdev->name, "TxRx",
127 txrx_int_idx++);
128 txrx_int_idx++;
129 } else if (tqp_vectors->rx_group.ring) {
130 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
131 "%s-%s-%d", priv->netdev->name, "Rx",
132 rx_int_idx++);
133 } else if (tqp_vectors->tx_group.ring) {
134 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
135 "%s-%s-%d", priv->netdev->name, "Tx",
136 tx_int_idx++);
137 } else {
138 /* Skip this unused q_vector */
139 continue;
140 }
141
142 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
143
144 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
145 tqp_vectors->name,
146 tqp_vectors);
147 if (ret) {
148 netdev_err(priv->netdev, "request irq(%d) fail\n",
149 tqp_vectors->vector_irq);
150 return ret;
151 }
152
874bff0b
PL
153 tqp_vectors->affinity_notify.notify =
154 hns3_nic_irq_affinity_notify;
155 tqp_vectors->affinity_notify.release =
156 hns3_nic_irq_affinity_release;
157 irq_set_affinity_notifier(tqp_vectors->vector_irq,
158 &tqp_vectors->affinity_notify);
159 irq_set_affinity_hint(tqp_vectors->vector_irq,
160 &tqp_vectors->affinity_mask);
161
76ad4f0e
S
162 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
163 }
164
165 return 0;
166}
167
168static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
169 u32 mask_en)
170{
171 writel(mask_en, tqp_vector->mask_addr);
172}
173
174static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
175{
176 napi_enable(&tqp_vector->napi);
177
178 /* enable vector */
179 hns3_mask_vector_irq(tqp_vector, 1);
180}
181
182static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
183{
184 /* disable vector */
185 hns3_mask_vector_irq(tqp_vector, 0);
186
187 disable_irq(tqp_vector->vector_irq);
188 napi_disable(&tqp_vector->napi);
189}
190
434776a5
FL
191void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
192 u32 rl_value)
76ad4f0e 193{
434776a5
FL
194 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
195
76ad4f0e
S
196 /* this defines the configuration for RL (Interrupt Rate Limiter).
197 * Rl defines rate of interrupts i.e. number of interrupts-per-second
198 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
199 */
434776a5 200
9bc727a9
YL
201 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
202 !tqp_vector->rx_group.coal.gl_adapt_enable)
434776a5
FL
203 /* According to the hardware, the range of rl_reg is
204 * 0-59 and the unit is 4.
205 */
206 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
207
208 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
209}
210
211void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
212 u32 gl_value)
213{
214 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
215
216 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
217}
218
219void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
220 u32 gl_value)
221{
222 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
223
224 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
76ad4f0e
S
225}
226
5fd4789a
FL
227static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
228 struct hns3_nic_priv *priv)
76ad4f0e
S
229{
230 /* initialize the configuration for interrupt coalescing.
231 * 1. GL (Interrupt Gap Limiter)
232 * 2. RL (Interrupt Rate Limiter)
233 */
234
5fd4789a 235 /* Default: enable interrupt coalescing self-adaptive and GL */
9bc727a9
YL
236 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
237 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
5fd4789a 238
9bc727a9
YL
239 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
240 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
5fd4789a 241
cd9d187b 242 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
9bc727a9
YL
243 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
244 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
76ad4f0e
S
245}
246
dd38c726
YL
247static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
248 struct hns3_nic_priv *priv)
249{
250 struct hnae3_handle *h = priv->ae_handle;
251
252 hns3_set_vector_coalesce_tx_gl(tqp_vector,
9bc727a9 253 tqp_vector->tx_group.coal.int_gl);
dd38c726 254 hns3_set_vector_coalesce_rx_gl(tqp_vector,
9bc727a9 255 tqp_vector->rx_group.coal.int_gl);
dd38c726
YL
256 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
257}
258
9df8f79a
YL
259static int hns3_nic_set_real_num_queue(struct net_device *netdev)
260{
9780cb97 261 struct hnae3_handle *h = hns3_get_handle(netdev);
9df8f79a
YL
262 struct hnae3_knic_private_info *kinfo = &h->kinfo;
263 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
a75a8efa
YL
264 int i, ret;
265
266 if (kinfo->num_tc <= 1) {
267 netdev_reset_tc(netdev);
268 } else {
269 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
270 if (ret) {
271 netdev_err(netdev,
272 "netdev_set_num_tc fail, ret=%d!\n", ret);
273 return ret;
274 }
275
276 for (i = 0; i < HNAE3_MAX_TC; i++) {
277 if (!kinfo->tc_info[i].enable)
278 continue;
279
280 netdev_set_tc_queue(netdev,
281 kinfo->tc_info[i].tc,
282 kinfo->tc_info[i].tqp_count,
283 kinfo->tc_info[i].tqp_offset);
284 }
285 }
9df8f79a
YL
286
287 ret = netif_set_real_num_tx_queues(netdev, queue_size);
288 if (ret) {
289 netdev_err(netdev,
290 "netif_set_real_num_tx_queues fail, ret=%d!\n",
291 ret);
292 return ret;
293 }
294
295 ret = netif_set_real_num_rx_queues(netdev, queue_size);
296 if (ret) {
297 netdev_err(netdev,
298 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
299 return ret;
300 }
301
302 return 0;
303}
304
678335a1
PL
305static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
306{
0d43bf45 307 u16 alloc_tqps, max_rss_size, rss_size;
678335a1 308
0d43bf45
HT
309 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
310 rss_size = alloc_tqps / h->kinfo.num_tc;
678335a1 311
0d43bf45 312 return min_t(u16, rss_size, max_rss_size);
678335a1
PL
313}
314
76ad4f0e
S
315static int hns3_nic_net_up(struct net_device *netdev)
316{
317 struct hns3_nic_priv *priv = netdev_priv(netdev);
318 struct hnae3_handle *h = priv->ae_handle;
319 int i, j;
320 int ret;
321
7b763f3f
FL
322 ret = hns3_nic_reset_all_ring(h);
323 if (ret)
324 return ret;
325
76ad4f0e
S
326 /* get irq resource for all vectors */
327 ret = hns3_nic_init_irq(priv);
328 if (ret) {
329 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
330 return ret;
331 }
332
333 /* enable the vectors */
334 for (i = 0; i < priv->vector_num; i++)
335 hns3_vector_enable(&priv->tqp_vector[i]);
336
337 /* start the ae_dev */
338 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
339 if (ret)
340 goto out_start_err;
341
b875cc37
JS
342 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
343
76ad4f0e
S
344 return 0;
345
346out_start_err:
347 for (j = i - 1; j >= 0; j--)
348 hns3_vector_disable(&priv->tqp_vector[j]);
349
350 hns3_nic_uninit_irq(priv);
351
352 return ret;
353}
354
355static int hns3_nic_net_open(struct net_device *netdev)
356{
f8fa222c 357 struct hns3_nic_priv *priv = netdev_priv(netdev);
a75a8efa
YL
358 struct hnae3_handle *h = hns3_get_handle(netdev);
359 struct hnae3_knic_private_info *kinfo;
360 int i, ret;
76ad4f0e
S
361
362 netif_carrier_off(netdev);
363
9df8f79a
YL
364 ret = hns3_nic_set_real_num_queue(netdev);
365 if (ret)
76ad4f0e 366 return ret;
76ad4f0e
S
367
368 ret = hns3_nic_net_up(netdev);
369 if (ret) {
370 netdev_err(netdev,
371 "hns net up fail, ret=%d!\n", ret);
372 return ret;
373 }
374
a75a8efa
YL
375 kinfo = &h->kinfo;
376 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
377 netdev_set_prio_tc_map(netdev, i,
378 kinfo->prio_tc[i]);
379 }
380
6d4c3981 381 priv->ae_handle->last_reset_time = jiffies;
76ad4f0e
S
382 return 0;
383}
384
385static void hns3_nic_net_down(struct net_device *netdev)
386{
387 struct hns3_nic_priv *priv = netdev_priv(netdev);
388 const struct hnae3_ae_ops *ops;
389 int i;
390
b875cc37
JS
391 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
392 return;
393
7b763f3f
FL
394 /* disable vectors */
395 for (i = 0; i < priv->vector_num; i++)
396 hns3_vector_disable(&priv->tqp_vector[i]);
397
76ad4f0e
S
398 /* stop ae_dev */
399 ops = priv->ae_handle->ae_algo->ops;
400 if (ops->stop)
401 ops->stop(priv->ae_handle);
402
76ad4f0e
S
403 /* free irq resources */
404 hns3_nic_uninit_irq(priv);
7b763f3f
FL
405
406 hns3_clear_all_ring(priv->ae_handle);
76ad4f0e
S
407}
408
409static int hns3_nic_net_stop(struct net_device *netdev)
410{
411 netif_tx_stop_all_queues(netdev);
412 netif_carrier_off(netdev);
413
414 hns3_nic_net_down(netdev);
415
416 return 0;
417}
418
76ad4f0e
S
419static int hns3_nic_uc_sync(struct net_device *netdev,
420 const unsigned char *addr)
421{
9780cb97 422 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
423
424 if (h->ae_algo->ops->add_uc_addr)
425 return h->ae_algo->ops->add_uc_addr(h, addr);
426
427 return 0;
428}
429
430static int hns3_nic_uc_unsync(struct net_device *netdev,
431 const unsigned char *addr)
432{
9780cb97 433 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
434
435 if (h->ae_algo->ops->rm_uc_addr)
436 return h->ae_algo->ops->rm_uc_addr(h, addr);
437
438 return 0;
439}
440
441static int hns3_nic_mc_sync(struct net_device *netdev,
442 const unsigned char *addr)
443{
9780cb97 444 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 445
720a8478 446 if (h->ae_algo->ops->add_mc_addr)
76ad4f0e
S
447 return h->ae_algo->ops->add_mc_addr(h, addr);
448
449 return 0;
450}
451
452static int hns3_nic_mc_unsync(struct net_device *netdev,
453 const unsigned char *addr)
454{
9780cb97 455 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 456
720a8478 457 if (h->ae_algo->ops->rm_mc_addr)
76ad4f0e
S
458 return h->ae_algo->ops->rm_mc_addr(h, addr);
459
460 return 0;
461}
462
c60edc17
JS
463static u8 hns3_get_netdev_flags(struct net_device *netdev)
464{
465 u8 flags = 0;
466
467 if (netdev->flags & IFF_PROMISC) {
468 flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
469 } else {
470 flags |= HNAE3_VLAN_FLTR;
471 if (netdev->flags & IFF_ALLMULTI)
472 flags |= HNAE3_USER_MPE;
473 }
474
475 return flags;
476}
477
1db9b1bf 478static void hns3_nic_set_rx_mode(struct net_device *netdev)
76ad4f0e 479{
9780cb97 480 struct hnae3_handle *h = hns3_get_handle(netdev);
c60edc17
JS
481 u8 new_flags;
482 int ret;
76ad4f0e 483
c60edc17
JS
484 new_flags = hns3_get_netdev_flags(netdev);
485
486 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
487 if (ret) {
76ad4f0e 488 netdev_err(netdev, "sync uc address fail\n");
c60edc17
JS
489 if (ret == -ENOSPC)
490 new_flags |= HNAE3_OVERFLOW_UPE;
491 }
492
40cca1c5 493 if (netdev->flags & IFF_MULTICAST) {
c60edc17
JS
494 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
495 hns3_nic_mc_unsync);
496 if (ret) {
76ad4f0e 497 netdev_err(netdev, "sync mc address fail\n");
c60edc17
JS
498 if (ret == -ENOSPC)
499 new_flags |= HNAE3_OVERFLOW_MPE;
500 }
501 }
502
503 hns3_update_promisc_mode(netdev, new_flags);
504 /* User mode Promisc mode enable and vlan filtering is disabled to
505 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
506 * vlan fitering is enabled
507 */
508 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
509 h->netdev_flags = new_flags;
510}
511
512void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
513{
514 struct hns3_nic_priv *priv = netdev_priv(netdev);
515 struct hnae3_handle *h = priv->ae_handle;
516
517 if (h->ae_algo->ops->set_promisc_mode) {
518 h->ae_algo->ops->set_promisc_mode(h,
519 promisc_flags & HNAE3_UPE,
520 promisc_flags & HNAE3_MPE);
521 }
522}
523
524void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
525{
526 struct hns3_nic_priv *priv = netdev_priv(netdev);
527 struct hnae3_handle *h = priv->ae_handle;
528 bool last_state;
529
530 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
531 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
532 if (enable != last_state) {
533 netdev_info(netdev,
534 "%s vlan filter\n",
535 enable ? "enable" : "disable");
536 h->ae_algo->ops->enable_vlan_filter(h, enable);
537 }
40cca1c5 538 }
76ad4f0e
S
539}
540
541static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
542 u16 *mss, u32 *type_cs_vlan_tso)
543{
544 u32 l4_offset, hdr_len;
545 union l3_hdr_info l3;
546 union l4_hdr_info l4;
547 u32 l4_paylen;
548 int ret;
549
550 if (!skb_is_gso(skb))
551 return 0;
552
553 ret = skb_cow_head(skb, 0);
554 if (ret)
555 return ret;
556
557 l3.hdr = skb_network_header(skb);
558 l4.hdr = skb_transport_header(skb);
559
560 /* Software should clear the IPv4's checksum field when tso is
561 * needed.
562 */
563 if (l3.v4->version == 4)
564 l3.v4->check = 0;
565
566 /* tunnel packet.*/
567 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
568 SKB_GSO_GRE_CSUM |
569 SKB_GSO_UDP_TUNNEL |
570 SKB_GSO_UDP_TUNNEL_CSUM)) {
571 if ((!(skb_shinfo(skb)->gso_type &
572 SKB_GSO_PARTIAL)) &&
573 (skb_shinfo(skb)->gso_type &
574 SKB_GSO_UDP_TUNNEL_CSUM)) {
575 /* Software should clear the udp's checksum
576 * field when tso is needed.
577 */
578 l4.udp->check = 0;
579 }
580 /* reset l3&l4 pointers from outer to inner headers */
581 l3.hdr = skb_inner_network_header(skb);
582 l4.hdr = skb_inner_transport_header(skb);
583
584 /* Software should clear the IPv4's checksum field when
585 * tso is needed.
586 */
587 if (l3.v4->version == 4)
588 l3.v4->check = 0;
589 }
590
591 /* normal or tunnel packet*/
592 l4_offset = l4.hdr - skb->data;
593 hdr_len = (l4.tcp->doff * 4) + l4_offset;
594
595 /* remove payload length from inner pseudo checksum when tso*/
596 l4_paylen = skb->len - l4_offset;
597 csum_replace_by_diff(&l4.tcp->check,
598 (__force __wsum)htonl(l4_paylen));
599
600 /* find the txbd field values */
601 *paylen = skb->len - hdr_len;
e4e87715
PL
602 hnae3_set_bit(*type_cs_vlan_tso,
603 HNS3_TXD_TSO_B, 1);
76ad4f0e
S
604
605 /* get MSS for TSO */
606 *mss = skb_shinfo(skb)->gso_size;
607
608 return 0;
609}
610
1898d4e4
S
611static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
612 u8 *il4_proto)
76ad4f0e
S
613{
614 union {
615 struct iphdr *v4;
616 struct ipv6hdr *v6;
617 unsigned char *hdr;
618 } l3;
619 unsigned char *l4_hdr;
620 unsigned char *exthdr;
621 u8 l4_proto_tmp;
622 __be16 frag_off;
623
624 /* find outer header point */
625 l3.hdr = skb_network_header(skb);
35f58fd7 626 l4_hdr = skb_transport_header(skb);
76ad4f0e
S
627
628 if (skb->protocol == htons(ETH_P_IPV6)) {
629 exthdr = l3.hdr + sizeof(*l3.v6);
630 l4_proto_tmp = l3.v6->nexthdr;
631 if (l4_hdr != exthdr)
632 ipv6_skip_exthdr(skb, exthdr - skb->data,
633 &l4_proto_tmp, &frag_off);
634 } else if (skb->protocol == htons(ETH_P_IP)) {
635 l4_proto_tmp = l3.v4->protocol;
1898d4e4
S
636 } else {
637 return -EINVAL;
76ad4f0e
S
638 }
639
640 *ol4_proto = l4_proto_tmp;
641
642 /* tunnel packet */
643 if (!skb->encapsulation) {
644 *il4_proto = 0;
1898d4e4 645 return 0;
76ad4f0e
S
646 }
647
648 /* find inner header point */
649 l3.hdr = skb_inner_network_header(skb);
650 l4_hdr = skb_inner_transport_header(skb);
651
652 if (l3.v6->version == 6) {
653 exthdr = l3.hdr + sizeof(*l3.v6);
654 l4_proto_tmp = l3.v6->nexthdr;
655 if (l4_hdr != exthdr)
656 ipv6_skip_exthdr(skb, exthdr - skb->data,
657 &l4_proto_tmp, &frag_off);
658 } else if (l3.v4->version == 4) {
659 l4_proto_tmp = l3.v4->protocol;
660 }
661
662 *il4_proto = l4_proto_tmp;
1898d4e4
S
663
664 return 0;
76ad4f0e
S
665}
666
667static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
668 u8 il4_proto, u32 *type_cs_vlan_tso,
669 u32 *ol_type_vlan_len_msec)
670{
671 union {
672 struct iphdr *v4;
673 struct ipv6hdr *v6;
674 unsigned char *hdr;
675 } l3;
676 union {
677 struct tcphdr *tcp;
678 struct udphdr *udp;
679 struct gre_base_hdr *gre;
680 unsigned char *hdr;
681 } l4;
682 unsigned char *l2_hdr;
683 u8 l4_proto = ol4_proto;
684 u32 ol2_len;
685 u32 ol3_len;
686 u32 ol4_len;
687 u32 l2_len;
688 u32 l3_len;
689
690 l3.hdr = skb_network_header(skb);
691 l4.hdr = skb_transport_header(skb);
692
693 /* compute L2 header size for normal packet, defined in 2 Bytes */
694 l2_len = l3.hdr - skb->data;
e4e87715
PL
695 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
696 HNS3_TXD_L2LEN_S, l2_len >> 1);
76ad4f0e
S
697
698 /* tunnel packet*/
699 if (skb->encapsulation) {
700 /* compute OL2 header size, defined in 2 Bytes */
701 ol2_len = l2_len;
e4e87715
PL
702 hnae3_set_field(*ol_type_vlan_len_msec,
703 HNS3_TXD_L2LEN_M,
704 HNS3_TXD_L2LEN_S, ol2_len >> 1);
76ad4f0e
S
705
706 /* compute OL3 header size, defined in 4 Bytes */
707 ol3_len = l4.hdr - l3.hdr;
e4e87715
PL
708 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
709 HNS3_TXD_L3LEN_S, ol3_len >> 2);
76ad4f0e
S
710
711 /* MAC in UDP, MAC in GRE (0x6558)*/
712 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
713 /* switch MAC header ptr from outer to inner header.*/
714 l2_hdr = skb_inner_mac_header(skb);
715
716 /* compute OL4 header size, defined in 4 Bytes. */
717 ol4_len = l2_hdr - l4.hdr;
e4e87715
PL
718 hnae3_set_field(*ol_type_vlan_len_msec,
719 HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
720 ol4_len >> 2);
76ad4f0e
S
721
722 /* switch IP header ptr from outer to inner header */
723 l3.hdr = skb_inner_network_header(skb);
724
725 /* compute inner l2 header size, defined in 2 Bytes. */
726 l2_len = l3.hdr - l2_hdr;
e4e87715
PL
727 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
728 HNS3_TXD_L2LEN_S, l2_len >> 1);
76ad4f0e
S
729 } else {
730 /* skb packet types not supported by hardware,
731 * txbd len fild doesn't be filled.
732 */
733 return;
734 }
735
736 /* switch L4 header pointer from outer to inner */
737 l4.hdr = skb_inner_transport_header(skb);
738
739 l4_proto = il4_proto;
740 }
741
742 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
743 l3_len = l4.hdr - l3.hdr;
e4e87715
PL
744 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
745 HNS3_TXD_L3LEN_S, l3_len >> 2);
76ad4f0e
S
746
747 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
748 switch (l4_proto) {
749 case IPPROTO_TCP:
e4e87715
PL
750 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
751 HNS3_TXD_L4LEN_S, l4.tcp->doff);
76ad4f0e
S
752 break;
753 case IPPROTO_SCTP:
e4e87715
PL
754 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
755 HNS3_TXD_L4LEN_S,
756 (sizeof(struct sctphdr) >> 2));
76ad4f0e
S
757 break;
758 case IPPROTO_UDP:
e4e87715
PL
759 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
760 HNS3_TXD_L4LEN_S,
761 (sizeof(struct udphdr) >> 2));
76ad4f0e
S
762 break;
763 default:
764 /* skb packet types not supported by hardware,
765 * txbd len fild doesn't be filled.
766 */
767 return;
768 }
769}
770
3db084d2
YL
771/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
772 * and it is udp packet, which has a dest port as the IANA assigned.
773 * the hardware is expected to do the checksum offload, but the
774 * hardware will not do the checksum offload when udp dest port is
775 * 4789.
776 */
777static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
778{
779#define IANA_VXLAN_PORT 4789
780 union {
781 struct tcphdr *tcp;
782 struct udphdr *udp;
783 struct gre_base_hdr *gre;
784 unsigned char *hdr;
785 } l4;
786
787 l4.hdr = skb_transport_header(skb);
788
789 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
790 return false;
791
792 skb_checksum_help(skb);
793
794 return true;
795}
796
76ad4f0e
S
797static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
798 u8 il4_proto, u32 *type_cs_vlan_tso,
799 u32 *ol_type_vlan_len_msec)
800{
801 union {
802 struct iphdr *v4;
803 struct ipv6hdr *v6;
804 unsigned char *hdr;
805 } l3;
806 u32 l4_proto = ol4_proto;
807
808 l3.hdr = skb_network_header(skb);
809
810 /* define OL3 type and tunnel type(OL4).*/
811 if (skb->encapsulation) {
812 /* define outer network header type.*/
813 if (skb->protocol == htons(ETH_P_IP)) {
814 if (skb_is_gso(skb))
e4e87715
PL
815 hnae3_set_field(*ol_type_vlan_len_msec,
816 HNS3_TXD_OL3T_M,
817 HNS3_TXD_OL3T_S,
818 HNS3_OL3T_IPV4_CSUM);
76ad4f0e 819 else
e4e87715
PL
820 hnae3_set_field(*ol_type_vlan_len_msec,
821 HNS3_TXD_OL3T_M,
822 HNS3_TXD_OL3T_S,
823 HNS3_OL3T_IPV4_NO_CSUM);
76ad4f0e
S
824
825 } else if (skb->protocol == htons(ETH_P_IPV6)) {
e4e87715
PL
826 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
827 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
76ad4f0e
S
828 }
829
830 /* define tunnel type(OL4).*/
831 switch (l4_proto) {
832 case IPPROTO_UDP:
e4e87715
PL
833 hnae3_set_field(*ol_type_vlan_len_msec,
834 HNS3_TXD_TUNTYPE_M,
835 HNS3_TXD_TUNTYPE_S,
836 HNS3_TUN_MAC_IN_UDP);
76ad4f0e
S
837 break;
838 case IPPROTO_GRE:
e4e87715
PL
839 hnae3_set_field(*ol_type_vlan_len_msec,
840 HNS3_TXD_TUNTYPE_M,
841 HNS3_TXD_TUNTYPE_S,
842 HNS3_TUN_NVGRE);
76ad4f0e
S
843 break;
844 default:
845 /* drop the skb tunnel packet if hardware don't support,
846 * because hardware can't calculate csum when TSO.
847 */
848 if (skb_is_gso(skb))
849 return -EDOM;
850
851 /* the stack computes the IP header already,
852 * driver calculate l4 checksum when not TSO.
853 */
854 skb_checksum_help(skb);
855 return 0;
856 }
857
858 l3.hdr = skb_inner_network_header(skb);
859 l4_proto = il4_proto;
860 }
861
862 if (l3.v4->version == 4) {
e4e87715
PL
863 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
864 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
76ad4f0e
S
865
866 /* the stack computes the IP header already, the only time we
867 * need the hardware to recompute it is in the case of TSO.
868 */
869 if (skb_is_gso(skb))
e4e87715 870 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
76ad4f0e 871 } else if (l3.v6->version == 6) {
e4e87715
PL
872 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
873 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
76ad4f0e
S
874 }
875
876 switch (l4_proto) {
877 case IPPROTO_TCP:
5c897197 878 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
e4e87715
PL
879 hnae3_set_field(*type_cs_vlan_tso,
880 HNS3_TXD_L4T_M,
881 HNS3_TXD_L4T_S,
882 HNS3_L4T_TCP);
76ad4f0e
S
883 break;
884 case IPPROTO_UDP:
3db084d2
YL
885 if (hns3_tunnel_csum_bug(skb))
886 break;
887
5c897197 888 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
e4e87715
PL
889 hnae3_set_field(*type_cs_vlan_tso,
890 HNS3_TXD_L4T_M,
891 HNS3_TXD_L4T_S,
892 HNS3_L4T_UDP);
76ad4f0e
S
893 break;
894 case IPPROTO_SCTP:
5c897197 895 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
e4e87715
PL
896 hnae3_set_field(*type_cs_vlan_tso,
897 HNS3_TXD_L4T_M,
898 HNS3_TXD_L4T_S,
899 HNS3_L4T_SCTP);
76ad4f0e
S
900 break;
901 default:
902 /* drop the skb tunnel packet if hardware don't support,
903 * because hardware can't calculate csum when TSO.
904 */
905 if (skb_is_gso(skb))
906 return -EDOM;
907
908 /* the stack computes the IP header already,
909 * driver calculate l4 checksum when not TSO.
910 */
911 skb_checksum_help(skb);
912 return 0;
913 }
914
915 return 0;
916}
917
918static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
919{
920 /* Config bd buffer end */
e4e87715
PL
921 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
922 HNS3_TXD_BDTYPE_S, 0);
923 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
924 hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
925 hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
76ad4f0e
S
926}
927
9699cffe
PL
928static int hns3_fill_desc_vtags(struct sk_buff *skb,
929 struct hns3_enet_ring *tx_ring,
930 u32 *inner_vlan_flag,
931 u32 *out_vlan_flag,
932 u16 *inner_vtag,
933 u16 *out_vtag)
934{
935#define HNS3_TX_VLAN_PRIO_SHIFT 13
936
937 if (skb->protocol == htons(ETH_P_8021Q) &&
938 !(tx_ring->tqp->handle->kinfo.netdev->features &
939 NETIF_F_HW_VLAN_CTAG_TX)) {
940 /* When HW VLAN acceleration is turned off, and the stack
941 * sets the protocol to 802.1q, the driver just need to
942 * set the protocol to the encapsulated ethertype.
943 */
944 skb->protocol = vlan_get_protocol(skb);
945 return 0;
946 }
947
948 if (skb_vlan_tag_present(skb)) {
949 u16 vlan_tag;
950
951 vlan_tag = skb_vlan_tag_get(skb);
952 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
953
954 /* Based on hw strategy, use out_vtag in two layer tag case,
955 * and use inner_vtag in one tag case.
956 */
957 if (skb->protocol == htons(ETH_P_8021Q)) {
e4e87715 958 hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
9699cffe
PL
959 *out_vtag = vlan_tag;
960 } else {
e4e87715 961 hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
9699cffe
PL
962 *inner_vtag = vlan_tag;
963 }
964 } else if (skb->protocol == htons(ETH_P_8021Q)) {
965 struct vlan_ethhdr *vhdr;
966 int rc;
967
968 rc = skb_cow_head(skb, 0);
969 if (rc < 0)
970 return rc;
971 vhdr = (struct vlan_ethhdr *)skb->data;
972 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
973 << HNS3_TX_VLAN_PRIO_SHIFT);
974 }
975
976 skb->protocol = vlan_get_protocol(skb);
977 return 0;
978}
979
76ad4f0e 980static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
5188f218 981 int size, int frag_end, enum hns_desc_type type)
76ad4f0e
S
982{
983 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
984 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
5188f218 985 struct device *dev = ring_to_dev(ring);
76ad4f0e
S
986 u32 ol_type_vlan_len_msec = 0;
987 u16 bdtp_fe_sc_vld_ra_ri = 0;
5188f218 988 struct skb_frag_struct *frag;
1e8a7977 989 unsigned int frag_buf_num;
76ad4f0e
S
990 u32 type_cs_vlan_tso = 0;
991 struct sk_buff *skb;
9699cffe
PL
992 u16 inner_vtag = 0;
993 u16 out_vtag = 0;
1e8a7977
FL
994 unsigned int k;
995 int sizeoflast;
76ad4f0e 996 u32 paylen = 0;
5188f218 997 dma_addr_t dma;
76ad4f0e 998 u16 mss = 0;
76ad4f0e
S
999 u8 ol4_proto;
1000 u8 il4_proto;
1001 int ret;
1002
76ad4f0e
S
1003 if (type == DESC_TYPE_SKB) {
1004 skb = (struct sk_buff *)priv;
a90bb9a5 1005 paylen = skb->len;
76ad4f0e 1006
9699cffe
PL
1007 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1008 &ol_type_vlan_len_msec,
1009 &inner_vtag, &out_vtag);
1010 if (unlikely(ret))
1011 return ret;
1012
76ad4f0e
S
1013 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1014 skb_reset_mac_len(skb);
76ad4f0e 1015
1898d4e4
S
1016 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1017 if (ret)
1018 return ret;
76ad4f0e
S
1019 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
1020 &type_cs_vlan_tso,
1021 &ol_type_vlan_len_msec);
1022 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
1023 &type_cs_vlan_tso,
1024 &ol_type_vlan_len_msec);
1025 if (ret)
1026 return ret;
1027
1028 ret = hns3_set_tso(skb, &paylen, &mss,
1029 &type_cs_vlan_tso);
1030 if (ret)
1031 return ret;
1032 }
1033
1034 /* Set txbd */
1035 desc->tx.ol_type_vlan_len_msec =
1036 cpu_to_le32(ol_type_vlan_len_msec);
1037 desc->tx.type_cs_vlan_tso_len =
1038 cpu_to_le32(type_cs_vlan_tso);
a90bb9a5 1039 desc->tx.paylen = cpu_to_le32(paylen);
76ad4f0e 1040 desc->tx.mss = cpu_to_le16(mss);
9699cffe
PL
1041 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1042 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
5188f218
PL
1043
1044 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1045 } else {
1046 frag = (struct skb_frag_struct *)priv;
1047 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1048 }
1049
1050 if (dma_mapping_error(ring->dev, dma)) {
1051 ring->stats.sw_err_cnt++;
1052 return -ENOMEM;
76ad4f0e
S
1053 }
1054
bcdb12b7
FL
1055 desc_cb->length = size;
1056
1e8a7977
FL
1057 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1058 sizeoflast = size % HNS3_MAX_BD_SIZE;
1059 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1060
1061 /* When frag size is bigger than hardware limit, split this frag */
1062 for (k = 0; k < frag_buf_num; k++) {
1063 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1064 desc_cb->priv = priv;
1e8a7977
FL
1065 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1066 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1067 DESC_TYPE_SKB : DESC_TYPE_PAGE;
1068
1069 /* now, fill the descriptor */
1070 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
bcdb12b7
FL
1071 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1072 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1e8a7977
FL
1073 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1074 frag_end && (k == frag_buf_num - 1) ?
1075 1 : 0);
1076 desc->tx.bdtp_fe_sc_vld_ra_ri =
1077 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1078
1079 /* move ring pointer to next.*/
1080 ring_ptr_move_fw(ring, next_to_use);
1081
1082 desc_cb = &ring->desc_cb[ring->next_to_use];
1083 desc = &ring->desc[ring->next_to_use];
1084 }
76ad4f0e
S
1085
1086 return 0;
1087}
1088
76ad4f0e
S
1089static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1090 struct hns3_enet_ring *ring)
1091{
1092 struct sk_buff *skb = *out_skb;
1093 struct skb_frag_struct *frag;
1094 int bdnum_for_frag;
1095 int frag_num;
1096 int buf_num;
1097 int size;
1098 int i;
1099
1100 size = skb_headlen(skb);
1101 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1102
1103 frag_num = skb_shinfo(skb)->nr_frags;
1104 for (i = 0; i < frag_num; i++) {
1105 frag = &skb_shinfo(skb)->frags[i];
1106 size = skb_frag_size(frag);
1107 bdnum_for_frag =
1108 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1109 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
1110 return -ENOMEM;
1111
1112 buf_num += bdnum_for_frag;
1113 }
1114
1115 if (buf_num > ring_space(ring))
1116 return -EBUSY;
1117
1118 *bnum = buf_num;
1119 return 0;
1120}
1121
1122static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1123 struct hns3_enet_ring *ring)
1124{
1125 struct sk_buff *skb = *out_skb;
1126 int buf_num;
1127
1128 /* No. of segments (plus a header) */
1129 buf_num = skb_shinfo(skb)->nr_frags + 1;
1130
932d1252 1131 if (unlikely(ring_space(ring) < buf_num))
76ad4f0e
S
1132 return -EBUSY;
1133
1134 *bnum = buf_num;
1135
1136 return 0;
1137}
1138
ba3f808f 1139static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
76ad4f0e
S
1140{
1141 struct device *dev = ring_to_dev(ring);
1142 unsigned int i;
1143
1144 for (i = 0; i < ring->desc_num; i++) {
1145 /* check if this is where we started */
1146 if (ring->next_to_use == next_to_use_orig)
1147 break;
1148
1149 /* unmap the descriptor dma address */
1150 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1151 dma_unmap_single(dev,
1152 ring->desc_cb[ring->next_to_use].dma,
1153 ring->desc_cb[ring->next_to_use].length,
1154 DMA_TO_DEVICE);
bcdb12b7 1155 else if (ring->desc_cb[ring->next_to_use].length)
76ad4f0e
S
1156 dma_unmap_page(dev,
1157 ring->desc_cb[ring->next_to_use].dma,
1158 ring->desc_cb[ring->next_to_use].length,
1159 DMA_TO_DEVICE);
1160
bcdb12b7
FL
1161 ring->desc_cb[ring->next_to_use].length = 0;
1162
76ad4f0e
S
1163 /* rollback one */
1164 ring_ptr_move_bw(ring, next_to_use);
1165 }
1166}
1167
d43e5aca 1168netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
76ad4f0e
S
1169{
1170 struct hns3_nic_priv *priv = netdev_priv(netdev);
1171 struct hns3_nic_ring_data *ring_data =
1172 &tx_ring_data(priv, skb->queue_mapping);
1173 struct hns3_enet_ring *ring = ring_data->ring;
76ad4f0e
S
1174 struct netdev_queue *dev_queue;
1175 struct skb_frag_struct *frag;
1176 int next_to_use_head;
1177 int next_to_use_frag;
76ad4f0e
S
1178 int buf_num;
1179 int seg_num;
1180 int size;
1181 int ret;
1182 int i;
1183
1184 /* Prefetch the data used later */
1185 prefetch(skb->data);
1186
1187 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1188 case -EBUSY:
1189 u64_stats_update_begin(&ring->syncp);
1190 ring->stats.tx_busy++;
1191 u64_stats_update_end(&ring->syncp);
1192
1193 goto out_net_tx_busy;
1194 case -ENOMEM:
1195 u64_stats_update_begin(&ring->syncp);
1196 ring->stats.sw_err_cnt++;
1197 u64_stats_update_end(&ring->syncp);
1198 netdev_err(netdev, "no memory to xmit!\n");
1199
1200 goto out_err_tx_ok;
1201 default:
1202 break;
1203 }
1204
1205 /* No. of segments (plus a header) */
1206 seg_num = skb_shinfo(skb)->nr_frags + 1;
1207 /* Fill the first part */
1208 size = skb_headlen(skb);
1209
1210 next_to_use_head = ring->next_to_use;
1211
5188f218
PL
1212 ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1213 DESC_TYPE_SKB);
76ad4f0e 1214 if (ret)
ba3f808f 1215 goto head_fill_err;
76ad4f0e
S
1216
1217 next_to_use_frag = ring->next_to_use;
1218 /* Fill the fragments */
1219 for (i = 1; i < seg_num; i++) {
1220 frag = &skb_shinfo(skb)->frags[i - 1];
1221 size = skb_frag_size(frag);
5188f218
PL
1222
1223 ret = priv->ops.fill_desc(ring, frag, size,
1224 seg_num - 1 == i ? 1 : 0,
1225 DESC_TYPE_PAGE);
76ad4f0e
S
1226
1227 if (ret)
ba3f808f 1228 goto frag_fill_err;
76ad4f0e
S
1229 }
1230
1231 /* Complete translate all packets */
1232 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1233 netdev_tx_sent_queue(dev_queue, skb->len);
1234
1235 wmb(); /* Commit all data before submit */
1236
e4e87715 1237 hnae3_queue_xmit(ring->tqp, buf_num);
76ad4f0e
S
1238
1239 return NETDEV_TX_OK;
1240
ba3f808f
FL
1241frag_fill_err:
1242 hns3_clear_desc(ring, next_to_use_frag);
76ad4f0e 1243
ba3f808f
FL
1244head_fill_err:
1245 hns3_clear_desc(ring, next_to_use_head);
76ad4f0e
S
1246
1247out_err_tx_ok:
1248 dev_kfree_skb_any(skb);
1249 return NETDEV_TX_OK;
1250
1251out_net_tx_busy:
1252 netif_stop_subqueue(netdev, ring_data->queue_index);
1253 smp_mb(); /* Commit all data before submit */
1254
1255 return NETDEV_TX_BUSY;
1256}
1257
1258static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1259{
9780cb97 1260 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1261 struct sockaddr *mac_addr = p;
1262 int ret;
1263
1264 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1265 return -EADDRNOTAVAIL;
1266
5ec2a51e
JS
1267 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1268 netdev_info(netdev, "already using mac address %pM\n",
1269 mac_addr->sa_data);
1270 return 0;
1271 }
1272
59098055 1273 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
76ad4f0e
S
1274 if (ret) {
1275 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1276 return ret;
1277 }
1278
1279 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1280
1281 return 0;
1282}
1283
26483246
XW
1284static int hns3_nic_do_ioctl(struct net_device *netdev,
1285 struct ifreq *ifr, int cmd)
1286{
1287 struct hnae3_handle *h = hns3_get_handle(netdev);
1288
1289 if (!netif_running(netdev))
1290 return -EINVAL;
1291
1292 if (!h->ae_algo->ops->do_ioctl)
1293 return -EOPNOTSUPP;
1294
1295 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1296}
1297
76ad4f0e
S
1298static int hns3_nic_set_features(struct net_device *netdev,
1299 netdev_features_t features)
1300{
181d454b 1301 netdev_features_t changed = netdev->features ^ features;
76ad4f0e 1302 struct hns3_nic_priv *priv = netdev_priv(netdev);
052ece6d 1303 struct hnae3_handle *h = priv->ae_handle;
052ece6d 1304 int ret;
76ad4f0e 1305
181d454b 1306 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
0bbbf15d 1307 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
181d454b 1308 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
0bbbf15d 1309 else
181d454b 1310 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
76ad4f0e
S
1311 }
1312
bd368416
JS
1313 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1314 h->ae_algo->ops->enable_vlan_filter) {
181d454b
JS
1315 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1316 h->ae_algo->ops->enable_vlan_filter(h, true);
1317 else
1318 h->ae_algo->ops->enable_vlan_filter(h, false);
1319 }
391b5e93 1320
bd368416
JS
1321 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1322 h->ae_algo->ops->enable_hw_strip_rxvtag) {
052ece6d
PL
1323 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1324 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1325 else
1326 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1327
1328 if (ret)
1329 return ret;
1330 }
1331
c17852a8
JS
1332 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1333 if (features & NETIF_F_NTUPLE)
1334 h->ae_algo->ops->enable_fd(h, true);
1335 else
1336 h->ae_algo->ops->enable_fd(h, false);
1337 }
1338
76ad4f0e
S
1339 netdev->features = features;
1340 return 0;
1341}
1342
6c88d9d7
PL
1343static void hns3_nic_get_stats64(struct net_device *netdev,
1344 struct rtnl_link_stats64 *stats)
76ad4f0e
S
1345{
1346 struct hns3_nic_priv *priv = netdev_priv(netdev);
1347 int queue_num = priv->ae_handle->kinfo.num_tqps;
c5f65480 1348 struct hnae3_handle *handle = priv->ae_handle;
76ad4f0e
S
1349 struct hns3_enet_ring *ring;
1350 unsigned int start;
1351 unsigned int idx;
1352 u64 tx_bytes = 0;
1353 u64 rx_bytes = 0;
1354 u64 tx_pkts = 0;
1355 u64 rx_pkts = 0;
d2a5dca8
JS
1356 u64 tx_drop = 0;
1357 u64 rx_drop = 0;
76ad4f0e 1358
b875cc37
JS
1359 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1360 return;
1361
c5f65480
JS
1362 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1363
76ad4f0e
S
1364 for (idx = 0; idx < queue_num; idx++) {
1365 /* fetch the tx stats */
1366 ring = priv->ring_data[idx].ring;
1367 do {
d36d36ce 1368 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1369 tx_bytes += ring->stats.tx_bytes;
1370 tx_pkts += ring->stats.tx_pkts;
d2a5dca8
JS
1371 tx_drop += ring->stats.tx_busy;
1372 tx_drop += ring->stats.sw_err_cnt;
76ad4f0e
S
1373 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1374
1375 /* fetch the rx stats */
1376 ring = priv->ring_data[idx + queue_num].ring;
1377 do {
d36d36ce 1378 start = u64_stats_fetch_begin_irq(&ring->syncp);
76ad4f0e
S
1379 rx_bytes += ring->stats.rx_bytes;
1380 rx_pkts += ring->stats.rx_pkts;
d2a5dca8
JS
1381 rx_drop += ring->stats.non_vld_descs;
1382 rx_drop += ring->stats.err_pkt_len;
1383 rx_drop += ring->stats.l2_err;
76ad4f0e
S
1384 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1385 }
1386
1387 stats->tx_bytes = tx_bytes;
1388 stats->tx_packets = tx_pkts;
1389 stats->rx_bytes = rx_bytes;
1390 stats->rx_packets = rx_pkts;
1391
1392 stats->rx_errors = netdev->stats.rx_errors;
1393 stats->multicast = netdev->stats.multicast;
1394 stats->rx_length_errors = netdev->stats.rx_length_errors;
1395 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1396 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1397
1398 stats->tx_errors = netdev->stats.tx_errors;
d2a5dca8
JS
1399 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1400 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
76ad4f0e
S
1401 stats->collisions = netdev->stats.collisions;
1402 stats->rx_over_errors = netdev->stats.rx_over_errors;
1403 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1404 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1405 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1406 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1407 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1408 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1409 stats->tx_window_errors = netdev->stats.tx_window_errors;
1410 stats->rx_compressed = netdev->stats.rx_compressed;
1411 stats->tx_compressed = netdev->stats.tx_compressed;
1412}
1413
30d240df 1414static int hns3_setup_tc(struct net_device *netdev, void *type_data)
76ad4f0e 1415{
30d240df 1416 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
9780cb97 1417 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e 1418 struct hnae3_knic_private_info *kinfo = &h->kinfo;
30d240df
YL
1419 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1420 u8 tc = mqprio_qopt->qopt.num_tc;
1421 u16 mode = mqprio_qopt->mode;
1422 u8 hw = mqprio_qopt->qopt.hw;
1423 bool if_running;
76ad4f0e
S
1424 int ret;
1425
30d240df
YL
1426 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1427 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1428 return -EOPNOTSUPP;
1429
76ad4f0e
S
1430 if (tc > HNAE3_MAX_TC)
1431 return -EINVAL;
1432
76ad4f0e
S
1433 if (!netdev)
1434 return -EINVAL;
1435
30d240df
YL
1436 if_running = netif_running(netdev);
1437 if (if_running) {
1438 hns3_nic_net_stop(netdev);
1439 msleep(100);
76ad4f0e
S
1440 }
1441
30d240df
YL
1442 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1443 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
76ad4f0e 1444 if (ret)
30d240df
YL
1445 goto out;
1446
30d240df
YL
1447 ret = hns3_nic_set_real_num_queue(netdev);
1448
1449out:
1450 if (if_running)
1451 hns3_nic_net_open(netdev);
1452
1453 return ret;
76ad4f0e
S
1454}
1455
2572ac53 1456static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1457 void *type_data)
76ad4f0e 1458{
575ed7d3 1459 if (type != TC_SETUP_QDISC_MQPRIO)
38cf0426 1460 return -EOPNOTSUPP;
76ad4f0e 1461
30d240df 1462 return hns3_setup_tc(dev, type_data);
76ad4f0e
S
1463}
1464
1465static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1466 __be16 proto, u16 vid)
1467{
9780cb97 1468 struct hnae3_handle *h = hns3_get_handle(netdev);
681ec399 1469 struct hns3_nic_priv *priv = netdev_priv(netdev);
76ad4f0e
S
1470 int ret = -EIO;
1471
1472 if (h->ae_algo->ops->set_vlan_filter)
1473 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1474
681ec399
YL
1475 if (!ret)
1476 set_bit(vid, priv->active_vlans);
1477
76ad4f0e
S
1478 return ret;
1479}
1480
1481static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1482 __be16 proto, u16 vid)
1483{
9780cb97 1484 struct hnae3_handle *h = hns3_get_handle(netdev);
681ec399 1485 struct hns3_nic_priv *priv = netdev_priv(netdev);
76ad4f0e
S
1486 int ret = -EIO;
1487
1488 if (h->ae_algo->ops->set_vlan_filter)
1489 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1490
681ec399
YL
1491 if (!ret)
1492 clear_bit(vid, priv->active_vlans);
1493
76ad4f0e
S
1494 return ret;
1495}
1496
681ec399
YL
1497static void hns3_restore_vlan(struct net_device *netdev)
1498{
1499 struct hns3_nic_priv *priv = netdev_priv(netdev);
1500 u16 vid;
1501 int ret;
1502
1503 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1504 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1505 if (ret)
1506 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1507 vid, ret);
1508 }
1509}
1510
76ad4f0e
S
1511static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1512 u8 qos, __be16 vlan_proto)
1513{
9780cb97 1514 struct hnae3_handle *h = hns3_get_handle(netdev);
76ad4f0e
S
1515 int ret = -EIO;
1516
1517 if (h->ae_algo->ops->set_vf_vlan_filter)
1518 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1519 qos, vlan_proto);
1520
1521 return ret;
1522}
1523
a8e8b7ff
S
1524static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1525{
9780cb97 1526 struct hnae3_handle *h = hns3_get_handle(netdev);
a8e8b7ff
S
1527 bool if_running = netif_running(netdev);
1528 int ret;
1529
1530 if (!h->ae_algo->ops->set_mtu)
1531 return -EOPNOTSUPP;
1532
1533 /* if this was called with netdev up then bring netdevice down */
1534 if (if_running) {
1535 (void)hns3_nic_net_stop(netdev);
1536 msleep(100);
1537 }
1538
1539 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
93d8daf4 1540 if (ret)
a8e8b7ff
S
1541 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1542 ret);
93d8daf4
YL
1543 else
1544 netdev->mtu = new_mtu;
5bad95a1 1545
a8e8b7ff
S
1546 /* if the netdev was running earlier, bring it up again */
1547 if (if_running && hns3_nic_net_open(netdev))
1548 ret = -EINVAL;
1549
1550 return ret;
1551}
1552
f8fa222c
L
1553static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1554{
1555 struct hns3_nic_priv *priv = netdev_priv(ndev);
1556 struct hns3_enet_ring *tx_ring = NULL;
1557 int timeout_queue = 0;
1558 int hw_head, hw_tail;
1559 int i;
1560
1561 /* Find the stopped queue the same way the stack does */
1562 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1563 struct netdev_queue *q;
1564 unsigned long trans_start;
1565
1566 q = netdev_get_tx_queue(ndev, i);
1567 trans_start = q->trans_start;
1568 if (netif_xmit_stopped(q) &&
1569 time_after(jiffies,
1570 (trans_start + ndev->watchdog_timeo))) {
1571 timeout_queue = i;
1572 break;
1573 }
1574 }
1575
1576 if (i == ndev->num_tx_queues) {
1577 netdev_info(ndev,
1578 "no netdev TX timeout queue found, timeout count: %llu\n",
1579 priv->tx_timeout_count);
1580 return false;
1581 }
1582
1583 tx_ring = priv->ring_data[timeout_queue].ring;
1584
1585 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1586 HNS3_RING_TX_RING_HEAD_REG);
1587 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1588 HNS3_RING_TX_RING_TAIL_REG);
1589 netdev_info(ndev,
1590 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1591 priv->tx_timeout_count,
1592 timeout_queue,
1593 tx_ring->next_to_use,
1594 tx_ring->next_to_clean,
1595 hw_head,
1596 hw_tail,
1597 readl(tx_ring->tqp_vector->mask_addr));
1598
1599 return true;
1600}
1601
1602static void hns3_nic_net_timeout(struct net_device *ndev)
1603{
1604 struct hns3_nic_priv *priv = netdev_priv(ndev);
f8fa222c
L
1605 struct hnae3_handle *h = priv->ae_handle;
1606
1607 if (!hns3_get_tx_timeo_queue_info(ndev))
1608 return;
1609
1610 priv->tx_timeout_count++;
1611
6d4c3981 1612 if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
f8fa222c
L
1613 return;
1614
6d4c3981 1615 /* request the reset */
f8fa222c 1616 if (h->ae_algo->ops->reset_event)
6ae4e733 1617 h->ae_algo->ops->reset_event(h->pdev, h);
f8fa222c
L
1618}
1619
76ad4f0e
S
1620static const struct net_device_ops hns3_nic_netdev_ops = {
1621 .ndo_open = hns3_nic_net_open,
1622 .ndo_stop = hns3_nic_net_stop,
1623 .ndo_start_xmit = hns3_nic_net_xmit,
f8fa222c 1624 .ndo_tx_timeout = hns3_nic_net_timeout,
76ad4f0e 1625 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
26483246 1626 .ndo_do_ioctl = hns3_nic_do_ioctl,
a8e8b7ff 1627 .ndo_change_mtu = hns3_nic_change_mtu,
76ad4f0e
S
1628 .ndo_set_features = hns3_nic_set_features,
1629 .ndo_get_stats64 = hns3_nic_get_stats64,
1630 .ndo_setup_tc = hns3_nic_setup_tc,
1631 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
76ad4f0e
S
1632 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1633 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1634 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1635};
1636
2312e050
FL
1637static bool hns3_is_phys_func(struct pci_dev *pdev)
1638{
1639 u32 dev_id = pdev->device;
1640
1641 switch (dev_id) {
1642 case HNAE3_DEV_ID_GE:
1643 case HNAE3_DEV_ID_25GE:
1644 case HNAE3_DEV_ID_25GE_RDMA:
1645 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1646 case HNAE3_DEV_ID_50GE_RDMA:
1647 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1648 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1649 return true;
1650 case HNAE3_DEV_ID_100G_VF:
1651 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1652 return false;
1653 default:
1654 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1655 dev_id);
1656 }
1657
1658 return false;
1659}
1660
2312e050
FL
1661static void hns3_disable_sriov(struct pci_dev *pdev)
1662{
1663 /* If our VFs are assigned we cannot shut down SR-IOV
1664 * without causing issues, so just leave the hardware
1665 * available but disabled
1666 */
1667 if (pci_vfs_assigned(pdev)) {
1668 dev_warn(&pdev->dev,
1669 "disabling driver while VFs are assigned\n");
1670 return;
1671 }
1672
1673 pci_disable_sriov(pdev);
1674}
1675
d695964d
JS
1676static void hns3_get_dev_capability(struct pci_dev *pdev,
1677 struct hnae3_ae_dev *ae_dev)
1678{
1679 if (pdev->revision >= 0x21)
1680 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1681}
1682
76ad4f0e
S
1683/* hns3_probe - Device initialization routine
1684 * @pdev: PCI device information struct
1685 * @ent: entry in hns3_pci_tbl
1686 *
1687 * hns3_probe initializes a PF identified by a pci_dev structure.
1688 * The OS initialization, configuring of the PF private structure,
1689 * and a hardware reset occur.
1690 *
1691 * Returns 0 on success, negative on failure
1692 */
1693static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1694{
1695 struct hnae3_ae_dev *ae_dev;
1696 int ret;
1697
1698 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1699 GFP_KERNEL);
1700 if (!ae_dev) {
1701 ret = -ENOMEM;
1702 return ret;
1703 }
1704
1705 ae_dev->pdev = pdev;
e92a0843 1706 ae_dev->flag = ent->driver_data;
76ad4f0e 1707 ae_dev->dev_type = HNAE3_DEV_KNIC;
6871af29 1708 ae_dev->reset_type = HNAE3_NONE_RESET;
d695964d 1709 hns3_get_dev_capability(pdev, ae_dev);
76ad4f0e
S
1710 pci_set_drvdata(pdev, ae_dev);
1711
50fbc237 1712 hnae3_register_ae_dev(ae_dev);
2312e050 1713
2312e050 1714 return 0;
76ad4f0e
S
1715}
1716
1717/* hns3_remove - Device removal routine
1718 * @pdev: PCI device information struct
1719 */
1720static void hns3_remove(struct pci_dev *pdev)
1721{
1722 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1723
2312e050
FL
1724 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1725 hns3_disable_sriov(pdev);
1726
76ad4f0e 1727 hnae3_unregister_ae_dev(ae_dev);
76ad4f0e
S
1728}
1729
fa8d82e8
PL
1730/**
1731 * hns3_pci_sriov_configure
1732 * @pdev: pointer to a pci_dev structure
1733 * @num_vfs: number of VFs to allocate
1734 *
1735 * Enable or change the number of VFs. Called when the user updates the number
1736 * of VFs in sysfs.
1737 **/
743e1a84 1738static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
fa8d82e8
PL
1739{
1740 int ret;
1741
1742 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1743 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1744 return -EINVAL;
1745 }
1746
1747 if (num_vfs) {
1748 ret = pci_enable_sriov(pdev, num_vfs);
1749 if (ret)
1750 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
743e1a84
SM
1751 else
1752 return num_vfs;
fa8d82e8
PL
1753 } else if (!pci_vfs_assigned(pdev)) {
1754 pci_disable_sriov(pdev);
1755 } else {
1756 dev_warn(&pdev->dev,
1757 "Unable to free VFs because some are assigned to VMs.\n");
1758 }
1759
1760 return 0;
1761}
1762
ce2c1d2e
YL
1763static void hns3_shutdown(struct pci_dev *pdev)
1764{
1765 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1766
1767 hnae3_unregister_ae_dev(ae_dev);
1768 devm_kfree(&pdev->dev, ae_dev);
1769 pci_set_drvdata(pdev, NULL);
1770
1771 if (system_state == SYSTEM_POWER_OFF)
1772 pci_set_power_state(pdev, PCI_D3hot);
1773}
1774
5a9f0eac
SJ
1775static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1776 pci_channel_state_t state)
1777{
1778 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1779 pci_ers_result_t ret;
1780
1781 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1782
1783 if (state == pci_channel_io_perm_failure)
1784 return PCI_ERS_RESULT_DISCONNECT;
1785
1786 if (!ae_dev) {
1787 dev_err(&pdev->dev,
1788 "Can't recover - error happened during device init\n");
1789 return PCI_ERS_RESULT_NONE;
1790 }
1791
1792 if (ae_dev->ops->process_hw_error)
1793 ret = ae_dev->ops->process_hw_error(ae_dev);
1794 else
1795 return PCI_ERS_RESULT_NONE;
1796
1797 return ret;
1798}
1799
6ae4e733
SJ
1800static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1801{
1802 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1803 struct device *dev = &pdev->dev;
1804
1805 dev_info(dev, "requesting reset due to PCI error\n");
1806
1807 /* request the reset */
1808 if (ae_dev->ops->reset_event) {
1809 ae_dev->ops->reset_event(pdev, NULL);
1810 return PCI_ERS_RESULT_RECOVERED;
1811 }
1812
1813 return PCI_ERS_RESULT_DISCONNECT;
1814}
1815
5a9f0eac
SJ
1816static const struct pci_error_handlers hns3_err_handler = {
1817 .error_detected = hns3_error_detected,
6ae4e733 1818 .slot_reset = hns3_slot_reset,
5a9f0eac
SJ
1819};
1820
76ad4f0e
S
1821static struct pci_driver hns3_driver = {
1822 .name = hns3_driver_name,
1823 .id_table = hns3_pci_tbl,
1824 .probe = hns3_probe,
1825 .remove = hns3_remove,
ce2c1d2e 1826 .shutdown = hns3_shutdown,
fa8d82e8 1827 .sriov_configure = hns3_pci_sriov_configure,
5a9f0eac 1828 .err_handler = &hns3_err_handler,
76ad4f0e
S
1829};
1830
1831/* set default feature to hns3 */
1832static void hns3_set_default_feature(struct net_device *netdev)
1833{
3e85af6a
PL
1834 struct hnae3_handle *h = hns3_get_handle(netdev);
1835 struct pci_dev *pdev = h->pdev;
1836
76ad4f0e
S
1837 netdev->priv_flags |= IFF_UNICAST_FLT;
1838
1839 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1840 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1841 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1842 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
5b71ac3c 1843 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
76ad4f0e
S
1844
1845 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1846
1847 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1848
1849 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1850 NETIF_F_HW_VLAN_CTAG_FILTER |
052ece6d 1851 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
76ad4f0e
S
1852 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1853 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1854 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
5b71ac3c 1855 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
76ad4f0e
S
1856
1857 netdev->vlan_features |=
1858 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1859 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1860 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1861 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
5b71ac3c 1862 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
76ad4f0e
S
1863
1864 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
b2641e2a 1865 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
76ad4f0e
S
1866 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1867 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1868 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
5b71ac3c 1869 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
3e85af6a 1870
c17852a8 1871 if (pdev->revision >= 0x21) {
3e85af6a 1872 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
c17852a8
JS
1873
1874 if (!(h->flags & HNAE3_SUPPORT_VF)) {
1875 netdev->hw_features |= NETIF_F_NTUPLE;
1876 netdev->features |= NETIF_F_NTUPLE;
1877 }
1878 }
76ad4f0e
S
1879}
1880
1881static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1882 struct hns3_desc_cb *cb)
1883{
e4e87715 1884 unsigned int order = hnae3_page_order(ring);
76ad4f0e
S
1885 struct page *p;
1886
1887 p = dev_alloc_pages(order);
1888 if (!p)
1889 return -ENOMEM;
1890
1891 cb->priv = p;
1892 cb->page_offset = 0;
1893 cb->reuse_flag = 0;
1894 cb->buf = page_address(p);
e4e87715 1895 cb->length = hnae3_page_size(ring);
76ad4f0e
S
1896 cb->type = DESC_TYPE_PAGE;
1897
76ad4f0e
S
1898 return 0;
1899}
1900
1901static void hns3_free_buffer(struct hns3_enet_ring *ring,
1902 struct hns3_desc_cb *cb)
1903{
1904 if (cb->type == DESC_TYPE_SKB)
1905 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1906 else if (!HNAE3_IS_TX_RING(ring))
1907 put_page((struct page *)cb->priv);
1908 memset(cb, 0, sizeof(*cb));
1909}
1910
1911static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1912{
1913 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1914 cb->length, ring_to_dma_dir(ring));
1915
2211f4e1 1916 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
76ad4f0e
S
1917 return -EIO;
1918
1919 return 0;
1920}
1921
1922static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1923 struct hns3_desc_cb *cb)
1924{
1925 if (cb->type == DESC_TYPE_SKB)
1926 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1927 ring_to_dma_dir(ring));
bcdb12b7 1928 else if (cb->length)
76ad4f0e
S
1929 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1930 ring_to_dma_dir(ring));
1931}
1932
1933static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1934{
1935 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1936 ring->desc[i].addr = 0;
1937}
1938
1939static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1940{
1941 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1942
1943 if (!ring->desc_cb[i].dma)
1944 return;
1945
1946 hns3_buffer_detach(ring, i);
1947 hns3_free_buffer(ring, cb);
1948}
1949
1950static void hns3_free_buffers(struct hns3_enet_ring *ring)
1951{
1952 int i;
1953
1954 for (i = 0; i < ring->desc_num; i++)
1955 hns3_free_buffer_detach(ring, i);
1956}
1957
1958/* free desc along with its attached buffer */
1959static void hns3_free_desc(struct hns3_enet_ring *ring)
1960{
024cc792
HT
1961 int size = ring->desc_num * sizeof(ring->desc[0]);
1962
76ad4f0e
S
1963 hns3_free_buffers(ring);
1964
024cc792
HT
1965 if (ring->desc) {
1966 dma_free_coherent(ring_to_dev(ring), size,
1967 ring->desc, ring->desc_dma_addr);
1968 ring->desc = NULL;
1969 }
76ad4f0e
S
1970}
1971
1972static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1973{
1974 int size = ring->desc_num * sizeof(ring->desc[0]);
1975
024cc792
HT
1976 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
1977 &ring->desc_dma_addr,
1978 GFP_KERNEL);
76ad4f0e
S
1979 if (!ring->desc)
1980 return -ENOMEM;
1981
76ad4f0e
S
1982 return 0;
1983}
1984
1985static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1986 struct hns3_desc_cb *cb)
1987{
1988 int ret;
1989
1990 ret = hns3_alloc_buffer(ring, cb);
1991 if (ret)
1992 goto out;
1993
1994 ret = hns3_map_buffer(ring, cb);
1995 if (ret)
1996 goto out_with_buf;
1997
1998 return 0;
1999
2000out_with_buf:
564883bb 2001 hns3_free_buffer(ring, cb);
76ad4f0e
S
2002out:
2003 return ret;
2004}
2005
2006static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2007{
2008 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2009
2010 if (ret)
2011 return ret;
2012
2013 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2014
2015 return 0;
2016}
2017
2018/* Allocate memory for raw pkg, and map with dma */
2019static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2020{
2021 int i, j, ret;
2022
2023 for (i = 0; i < ring->desc_num; i++) {
2024 ret = hns3_alloc_buffer_attach(ring, i);
2025 if (ret)
2026 goto out_buffer_fail;
2027 }
2028
2029 return 0;
2030
2031out_buffer_fail:
2032 for (j = i - 1; j >= 0; j--)
2033 hns3_free_buffer_detach(ring, j);
2034 return ret;
2035}
2036
2037/* detach a in-used buffer and replace with a reserved one */
2038static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2039 struct hns3_desc_cb *res_cb)
2040{
b9077428 2041 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
76ad4f0e
S
2042 ring->desc_cb[i] = *res_cb;
2043 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
7d0b130c 2044 ring->desc[i].rx.bd_base_info = 0;
76ad4f0e
S
2045}
2046
2047static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2048{
2049 ring->desc_cb[i].reuse_flag = 0;
2050 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2051 + ring->desc_cb[i].page_offset);
7d0b130c 2052 ring->desc[i].rx.bd_base_info = 0;
76ad4f0e
S
2053}
2054
2055static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
2056 int *pkts)
2057{
2058 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2059
2060 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2061 (*bytes) += desc_cb->length;
e4e87715 2062 /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
76ad4f0e
S
2063 hns3_free_buffer_detach(ring, ring->next_to_clean);
2064
2065 ring_ptr_move_fw(ring, next_to_clean);
2066}
2067
2068static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2069{
2070 int u = ring->next_to_use;
2071 int c = ring->next_to_clean;
2072
2073 if (unlikely(h > ring->desc_num))
2074 return 0;
2075
2076 return u > c ? (h > c && h <= u) : (h > c || h <= u);
2077}
2078
799997a3 2079void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
76ad4f0e
S
2080{
2081 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
7a810110 2082 struct hns3_nic_priv *priv = netdev_priv(netdev);
76ad4f0e
S
2083 struct netdev_queue *dev_queue;
2084 int bytes, pkts;
2085 int head;
2086
2087 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2088 rmb(); /* Make sure head is ready before touch any data */
2089
2090 if (is_ring_empty(ring) || head == ring->next_to_clean)
799997a3 2091 return; /* no data to poll */
76ad4f0e 2092
0e6084aa 2093 if (unlikely(!is_valid_clean_head(ring, head))) {
76ad4f0e
S
2094 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2095 ring->next_to_use, ring->next_to_clean);
2096
2097 u64_stats_update_begin(&ring->syncp);
2098 ring->stats.io_err_cnt++;
2099 u64_stats_update_end(&ring->syncp);
799997a3 2100 return;
76ad4f0e
S
2101 }
2102
2103 bytes = 0;
2104 pkts = 0;
799997a3 2105 while (head != ring->next_to_clean) {
76ad4f0e
S
2106 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2107 /* Issue prefetch for next Tx descriptor */
2108 prefetch(&ring->desc_cb[ring->next_to_clean]);
76ad4f0e
S
2109 }
2110
2111 ring->tqp_vector->tx_group.total_bytes += bytes;
2112 ring->tqp_vector->tx_group.total_packets += pkts;
2113
2114 u64_stats_update_begin(&ring->syncp);
2115 ring->stats.tx_bytes += bytes;
2116 ring->stats.tx_pkts += pkts;
2117 u64_stats_update_end(&ring->syncp);
2118
2119 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2120 netdev_tx_completed_queue(dev_queue, pkts, bytes);
2121
2122 if (unlikely(pkts && netif_carrier_ok(netdev) &&
2123 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2124 /* Make sure that anybody stopping the queue after this
2125 * sees the new next_to_clean.
2126 */
2127 smp_mb();
7a810110
JS
2128 if (netif_tx_queue_stopped(dev_queue) &&
2129 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
76ad4f0e
S
2130 netif_tx_wake_queue(dev_queue);
2131 ring->stats.restart_queue++;
2132 }
2133 }
76ad4f0e
S
2134}
2135
2136static int hns3_desc_unused(struct hns3_enet_ring *ring)
2137{
2138 int ntc = ring->next_to_clean;
2139 int ntu = ring->next_to_use;
2140
2141 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2142}
2143
2144static void
2145hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2146{
2147 struct hns3_desc_cb *desc_cb;
2148 struct hns3_desc_cb res_cbs;
2149 int i, ret;
2150
2151 for (i = 0; i < cleand_count; i++) {
2152 desc_cb = &ring->desc_cb[ring->next_to_use];
2153 if (desc_cb->reuse_flag) {
2154 u64_stats_update_begin(&ring->syncp);
2155 ring->stats.reuse_pg_cnt++;
2156 u64_stats_update_end(&ring->syncp);
2157
2158 hns3_reuse_buffer(ring, ring->next_to_use);
2159 } else {
2160 ret = hns3_reserve_buffer_map(ring, &res_cbs);
2161 if (ret) {
2162 u64_stats_update_begin(&ring->syncp);
2163 ring->stats.sw_err_cnt++;
2164 u64_stats_update_end(&ring->syncp);
2165
2166 netdev_err(ring->tqp->handle->kinfo.netdev,
2167 "hnae reserve buffer map failed.\n");
2168 break;
2169 }
2170 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2171 }
2172
2173 ring_ptr_move_fw(ring, next_to_use);
2174 }
2175
2176 wmb(); /* Make all data has been write before submit */
2177 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2178}
2179
76ad4f0e
S
2180static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2181 struct hns3_enet_ring *ring, int pull_len,
2182 struct hns3_desc_cb *desc_cb)
2183{
2184 struct hns3_desc *desc;
583e7281
HT
2185 u32 truesize;
2186 int size;
76ad4f0e
S
2187 int last_offset;
2188 bool twobufs;
2189
2190 twobufs = ((PAGE_SIZE < 8192) &&
e4e87715 2191 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
76ad4f0e
S
2192
2193 desc = &ring->desc[ring->next_to_clean];
2194 size = le16_to_cpu(desc->rx.size);
2195
e4e87715 2196 truesize = hnae3_buf_size(ring);
f8d291f0
PL
2197
2198 if (!twobufs)
e4e87715 2199 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
76ad4f0e
S
2200
2201 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
f8d291f0 2202 size - pull_len, truesize);
76ad4f0e
S
2203
2204 /* Avoid re-using remote pages,flag default unreuse */
2205 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2206 return;
2207
2208 if (twobufs) {
2209 /* If we are only owner of page we can reuse it */
2210 if (likely(page_count(desc_cb->priv) == 1)) {
2211 /* Flip page offset to other buffer */
2212 desc_cb->page_offset ^= truesize;
2213
2214 desc_cb->reuse_flag = 1;
2215 /* bump ref count on page before it is given*/
2216 get_page(desc_cb->priv);
2217 }
2218 return;
2219 }
2220
2221 /* Move offset up to the next cache line */
2222 desc_cb->page_offset += truesize;
2223
2224 if (desc_cb->page_offset <= last_offset) {
2225 desc_cb->reuse_flag = 1;
2226 /* Bump ref count on page before it is given*/
2227 get_page(desc_cb->priv);
2228 }
2229}
2230
2231static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2232 struct hns3_desc *desc)
2233{
2234 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2235 int l3_type, l4_type;
2236 u32 bd_base_info;
2237 int ol4_type;
2238 u32 l234info;
2239
2240 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2241 l234info = le32_to_cpu(desc->rx.l234_info);
2242
2243 skb->ip_summed = CHECKSUM_NONE;
2244
2245 skb_checksum_none_assert(skb);
2246
2247 if (!(netdev->features & NETIF_F_RXCSUM))
2248 return;
2249
2250 /* check if hardware has done checksum */
e4e87715 2251 if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
76ad4f0e
S
2252 return;
2253
e4e87715
PL
2254 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2255 hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2256 hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2257 hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
76ad4f0e
S
2258 u64_stats_update_begin(&ring->syncp);
2259 ring->stats.l3l4_csum_err++;
2260 u64_stats_update_end(&ring->syncp);
2261
2262 return;
2263 }
2264
e4e87715
PL
2265 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2266 HNS3_RXD_L3ID_S);
2267 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2268 HNS3_RXD_L4ID_S);
76ad4f0e 2269
e4e87715
PL
2270 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2271 HNS3_RXD_OL4ID_S);
76ad4f0e
S
2272 switch (ol4_type) {
2273 case HNS3_OL4_TYPE_MAC_IN_UDP:
2274 case HNS3_OL4_TYPE_NVGRE:
2275 skb->csum_level = 1;
be44b3af 2276 /* fall through */
76ad4f0e
S
2277 case HNS3_OL4_TYPE_NO_TUN:
2278 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
94c5e532
PL
2279 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2280 l3_type == HNS3_L3_TYPE_IPV6) &&
2281 (l4_type == HNS3_L4_TYPE_UDP ||
2282 l4_type == HNS3_L4_TYPE_TCP ||
2283 l4_type == HNS3_L4_TYPE_SCTP))
76ad4f0e
S
2284 skb->ip_summed = CHECKSUM_UNNECESSARY;
2285 break;
fa7a4bd5
JS
2286 default:
2287 break;
76ad4f0e
S
2288 }
2289}
2290
d43e5aca
YL
2291static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2292{
2293 napi_gro_receive(&ring->tqp_vector->napi, skb);
2294}
2295
701a6d6a
JS
2296static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2297 struct hns3_desc *desc, u32 l234info,
2298 u16 *vlan_tag)
5b5455a9
PL
2299{
2300 struct pci_dev *pdev = ring->tqp->handle->pdev;
5b5455a9
PL
2301
2302 if (pdev->revision == 0x20) {
701a6d6a
JS
2303 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2304 if (!(*vlan_tag & VLAN_VID_MASK))
2305 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
5b5455a9 2306
701a6d6a 2307 return (*vlan_tag != 0);
5b5455a9
PL
2308 }
2309
2310#define HNS3_STRP_OUTER_VLAN 0x1
2311#define HNS3_STRP_INNER_VLAN 0x2
2312
e4e87715
PL
2313 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2314 HNS3_RXD_STRP_TAGP_S)) {
5b5455a9 2315 case HNS3_STRP_OUTER_VLAN:
701a6d6a
JS
2316 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2317 return true;
5b5455a9 2318 case HNS3_STRP_INNER_VLAN:
701a6d6a
JS
2319 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2320 return true;
5b5455a9 2321 default:
701a6d6a 2322 return false;
5b5455a9 2323 }
5b5455a9
PL
2324}
2325
232fc64b
PL
2326static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2327 struct sk_buff *skb)
2328{
2329 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2330 struct hnae3_handle *handle = ring->tqp->handle;
2331 enum pkt_hash_types rss_type;
2332
2333 if (le32_to_cpu(desc->rx.rss_hash))
2334 rss_type = handle->kinfo.rss_type;
2335 else
2336 rss_type = PKT_HASH_TYPE_NONE;
2337
2338 skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
2339}
2340
76ad4f0e
S
2341static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2342 struct sk_buff **out_skb, int *out_bnum)
2343{
2344 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2345 struct hns3_desc_cb *desc_cb;
2346 struct hns3_desc *desc;
2347 struct sk_buff *skb;
2348 unsigned char *va;
2349 u32 bd_base_info;
2350 int pull_len;
2351 u32 l234info;
2352 int length;
2353 int bnum;
2354
2355 desc = &ring->desc[ring->next_to_clean];
2356 desc_cb = &ring->desc_cb[ring->next_to_clean];
2357
2358 prefetch(desc);
2359
846fcc83 2360 length = le16_to_cpu(desc->rx.size);
76ad4f0e 2361 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
76ad4f0e
S
2362
2363 /* Check valid BD */
e4e87715 2364 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
76ad4f0e
S
2365 return -EFAULT;
2366
2367 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2368
2369 /* Prefetch first cache line of first page
2370 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2371 * line size is 64B so need to prefetch twice to make it 128B. But in
2372 * actual we can have greater size of caches with 128B Level 1 cache
2373 * lines. In such a case, single fetch would suffice to cache in the
2374 * relevant part of the header.
2375 */
2376 prefetch(va);
2377#if L1_CACHE_BYTES < 128
2378 prefetch(va + L1_CACHE_BYTES);
2379#endif
2380
2381 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2382 HNS3_RX_HEAD_SIZE);
2383 if (unlikely(!skb)) {
2384 netdev_err(netdev, "alloc rx skb fail\n");
2385
2386 u64_stats_update_begin(&ring->syncp);
2387 ring->stats.sw_err_cnt++;
2388 u64_stats_update_end(&ring->syncp);
2389
2390 return -ENOMEM;
2391 }
2392
2393 prefetchw(skb->data);
2394
2395 bnum = 1;
2396 if (length <= HNS3_RX_HEAD_SIZE) {
2397 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2398
2399 /* We can reuse buffer as-is, just make sure it is local */
2400 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2401 desc_cb->reuse_flag = 1;
2402 else /* This page cannot be reused so discard it */
2403 put_page(desc_cb->priv);
2404
2405 ring_ptr_move_fw(ring, next_to_clean);
2406 } else {
2407 u64_stats_update_begin(&ring->syncp);
2408 ring->stats.seg_pkt_cnt++;
2409 u64_stats_update_end(&ring->syncp);
2410
e63cd65f
PL
2411 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2412
76ad4f0e
S
2413 memcpy(__skb_put(skb, pull_len), va,
2414 ALIGN(pull_len, sizeof(long)));
2415
2416 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2417 ring_ptr_move_fw(ring, next_to_clean);
2418
e4e87715 2419 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
76ad4f0e
S
2420 desc = &ring->desc[ring->next_to_clean];
2421 desc_cb = &ring->desc_cb[ring->next_to_clean];
2422 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2423 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2424 ring_ptr_move_fw(ring, next_to_clean);
2425 bnum++;
2426 }
2427 }
2428
2429 *out_bnum = bnum;
5b5455a9
PL
2430
2431 l234info = le32_to_cpu(desc->rx.l234_info);
2432
846fcc83
PL
2433 /* Based on hw strategy, the tag offloaded will be stored at
2434 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2435 * in one layer tag case.
2436 */
2437 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2438 u16 vlan_tag;
2439
701a6d6a 2440 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
846fcc83
PL
2441 __vlan_hwaccel_put_tag(skb,
2442 htons(ETH_P_8021Q),
2443 vlan_tag);
2444 }
2445
e4e87715 2446 if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
76ad4f0e
S
2447 u64_stats_update_begin(&ring->syncp);
2448 ring->stats.non_vld_descs++;
2449 u64_stats_update_end(&ring->syncp);
2450
2451 dev_kfree_skb_any(skb);
2452 return -EINVAL;
2453 }
2454
2455 if (unlikely((!desc->rx.pkt_len) ||
e4e87715 2456 hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
76ad4f0e
S
2457 u64_stats_update_begin(&ring->syncp);
2458 ring->stats.err_pkt_len++;
2459 u64_stats_update_end(&ring->syncp);
2460
2461 dev_kfree_skb_any(skb);
2462 return -EFAULT;
2463 }
2464
e4e87715 2465 if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
76ad4f0e
S
2466 u64_stats_update_begin(&ring->syncp);
2467 ring->stats.l2_err++;
2468 u64_stats_update_end(&ring->syncp);
2469
2470 dev_kfree_skb_any(skb);
2471 return -EFAULT;
2472 }
2473
2474 u64_stats_update_begin(&ring->syncp);
2475 ring->stats.rx_pkts++;
2476 ring->stats.rx_bytes += skb->len;
2477 u64_stats_update_end(&ring->syncp);
2478
2479 ring->tqp_vector->rx_group.total_bytes += skb->len;
2480
2481 hns3_rx_checksum(ring, skb, desc);
232fc64b
PL
2482 hns3_set_rx_skb_rss_type(ring, skb);
2483
76ad4f0e
S
2484 return 0;
2485}
2486
d43e5aca
YL
2487int hns3_clean_rx_ring(
2488 struct hns3_enet_ring *ring, int budget,
2489 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
76ad4f0e
S
2490{
2491#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2492 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2493 int recv_pkts, recv_bds, clean_count, err;
2494 int unused_count = hns3_desc_unused(ring);
2495 struct sk_buff *skb = NULL;
2496 int num, bnum = 0;
2497
2498 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2499 rmb(); /* Make sure num taken effect before the other data is touched */
2500
2501 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2502 num -= unused_count;
2503
2504 while (recv_pkts < budget && recv_bds < num) {
2505 /* Reuse or realloc buffers */
2506 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2507 hns3_nic_alloc_rx_buffers(ring,
2508 clean_count + unused_count);
2509 clean_count = 0;
2510 unused_count = hns3_desc_unused(ring);
2511 }
2512
2513 /* Poll one pkt */
2514 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2515 if (unlikely(!skb)) /* This fault cannot be repaired */
2516 goto out;
2517
2518 recv_bds += bnum;
2519 clean_count += bnum;
2520 if (unlikely(err)) { /* Do jump the err */
2521 recv_pkts++;
2522 continue;
2523 }
2524
2525 /* Do update ip stack process */
2526 skb->protocol = eth_type_trans(skb, netdev);
d43e5aca 2527 rx_fn(ring, skb);
76ad4f0e
S
2528
2529 recv_pkts++;
2530 }
2531
2532out:
2533 /* Make all data has been write before submit */
2534 if (clean_count + unused_count > 0)
2535 hns3_nic_alloc_rx_buffers(ring,
2536 clean_count + unused_count);
2537
2538 return recv_pkts;
2539}
2540
2541static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2542{
a95e1f86
FL
2543 struct hns3_enet_tqp_vector *tqp_vector =
2544 ring_group->ring->tqp_vector;
76ad4f0e 2545 enum hns3_flow_level_range new_flow_level;
a95e1f86
FL
2546 int packets_per_msecs;
2547 int bytes_per_msecs;
2548 u32 time_passed_ms;
76ad4f0e 2549 u16 new_int_gl;
76ad4f0e 2550
a95e1f86 2551 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
76ad4f0e
S
2552 return false;
2553
2554 if (ring_group->total_packets == 0) {
9bc727a9
YL
2555 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2556 ring_group->coal.flow_level = HNS3_FLOW_LOW;
76ad4f0e
S
2557 return true;
2558 }
2559
2560 /* Simple throttlerate management
2561 * 0-10MB/s lower (50000 ints/s)
2562 * 10-20MB/s middle (20000 ints/s)
2563 * 20-1249MB/s high (18000 ints/s)
2564 * > 40000pps ultra (8000 ints/s)
2565 */
9bc727a9
YL
2566 new_flow_level = ring_group->coal.flow_level;
2567 new_int_gl = ring_group->coal.int_gl;
a95e1f86
FL
2568 time_passed_ms =
2569 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2570
2571 if (!time_passed_ms)
2572 return false;
2573
2574 do_div(ring_group->total_packets, time_passed_ms);
2575 packets_per_msecs = ring_group->total_packets;
2576
2577 do_div(ring_group->total_bytes, time_passed_ms);
2578 bytes_per_msecs = ring_group->total_bytes;
2579
2580#define HNS3_RX_LOW_BYTE_RATE 10000
2581#define HNS3_RX_MID_BYTE_RATE 20000
76ad4f0e
S
2582
2583 switch (new_flow_level) {
2584 case HNS3_FLOW_LOW:
a95e1f86 2585 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
76ad4f0e
S
2586 new_flow_level = HNS3_FLOW_MID;
2587 break;
2588 case HNS3_FLOW_MID:
a95e1f86 2589 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
76ad4f0e 2590 new_flow_level = HNS3_FLOW_HIGH;
a95e1f86 2591 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
76ad4f0e
S
2592 new_flow_level = HNS3_FLOW_LOW;
2593 break;
2594 case HNS3_FLOW_HIGH:
2595 case HNS3_FLOW_ULTRA:
2596 default:
a95e1f86 2597 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
76ad4f0e
S
2598 new_flow_level = HNS3_FLOW_MID;
2599 break;
2600 }
2601
a95e1f86
FL
2602#define HNS3_RX_ULTRA_PACKET_RATE 40
2603
2604 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2605 &tqp_vector->rx_group == ring_group)
76ad4f0e
S
2606 new_flow_level = HNS3_FLOW_ULTRA;
2607
2608 switch (new_flow_level) {
2609 case HNS3_FLOW_LOW:
2610 new_int_gl = HNS3_INT_GL_50K;
2611 break;
2612 case HNS3_FLOW_MID:
2613 new_int_gl = HNS3_INT_GL_20K;
2614 break;
2615 case HNS3_FLOW_HIGH:
2616 new_int_gl = HNS3_INT_GL_18K;
2617 break;
2618 case HNS3_FLOW_ULTRA:
2619 new_int_gl = HNS3_INT_GL_8K;
2620 break;
2621 default:
2622 break;
2623 }
2624
2625 ring_group->total_bytes = 0;
2626 ring_group->total_packets = 0;
9bc727a9
YL
2627 ring_group->coal.flow_level = new_flow_level;
2628 if (new_int_gl != ring_group->coal.int_gl) {
2629 ring_group->coal.int_gl = new_int_gl;
76ad4f0e
S
2630 return true;
2631 }
2632 return false;
2633}
2634
2635static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2636{
8b1ff1ea
FL
2637 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2638 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2639 bool rx_update, tx_update;
2640
cd9d187b
FL
2641 if (tqp_vector->int_adapt_down > 0) {
2642 tqp_vector->int_adapt_down--;
2643 return;
2644 }
2645
9bc727a9 2646 if (rx_group->coal.gl_adapt_enable) {
8b1ff1ea
FL
2647 rx_update = hns3_get_new_int_gl(rx_group);
2648 if (rx_update)
2649 hns3_set_vector_coalesce_rx_gl(tqp_vector,
9bc727a9 2650 rx_group->coal.int_gl);
8b1ff1ea
FL
2651 }
2652
9bc727a9 2653 if (tx_group->coal.gl_adapt_enable) {
8b1ff1ea
FL
2654 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2655 if (tx_update)
2656 hns3_set_vector_coalesce_tx_gl(tqp_vector,
9bc727a9 2657 tx_group->coal.int_gl);
76ad4f0e 2658 }
cd9d187b 2659
a95e1f86 2660 tqp_vector->last_jiffies = jiffies;
cd9d187b 2661 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
76ad4f0e
S
2662}
2663
2664static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2665{
2666 struct hns3_enet_ring *ring;
2667 int rx_pkt_total = 0;
2668
2669 struct hns3_enet_tqp_vector *tqp_vector =
2670 container_of(napi, struct hns3_enet_tqp_vector, napi);
2671 bool clean_complete = true;
2672 int rx_budget;
2673
2674 /* Since the actual Tx work is minimal, we can give the Tx a larger
2675 * budget and be more aggressive about cleaning up the Tx descriptors.
2676 */
799997a3
PL
2677 hns3_for_each_ring(ring, tqp_vector->tx_group)
2678 hns3_clean_tx_ring(ring);
76ad4f0e
S
2679
2680 /* make sure rx ring budget not smaller than 1 */
2681 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2682
2683 hns3_for_each_ring(ring, tqp_vector->rx_group) {
d43e5aca
YL
2684 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2685 hns3_rx_skb);
76ad4f0e
S
2686
2687 if (rx_cleaned >= rx_budget)
2688 clean_complete = false;
2689
2690 rx_pkt_total += rx_cleaned;
2691 }
2692
2693 tqp_vector->rx_group.total_packets += rx_pkt_total;
2694
2695 if (!clean_complete)
2696 return budget;
2697
2698 napi_complete(napi);
2699 hns3_update_new_int_gl(tqp_vector);
2700 hns3_mask_vector_irq(tqp_vector, 1);
2701
2702 return rx_pkt_total;
2703}
2704
2705static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2706 struct hnae3_ring_chain_node *head)
2707{
2708 struct pci_dev *pdev = tqp_vector->handle->pdev;
2709 struct hnae3_ring_chain_node *cur_chain = head;
2710 struct hnae3_ring_chain_node *chain;
2711 struct hns3_enet_ring *tx_ring;
2712 struct hns3_enet_ring *rx_ring;
2713
2714 tx_ring = tqp_vector->tx_group.ring;
2715 if (tx_ring) {
2716 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
e4e87715
PL
2717 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2718 HNAE3_RING_TYPE_TX);
2719 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2720 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
76ad4f0e
S
2721
2722 cur_chain->next = NULL;
2723
2724 while (tx_ring->next) {
2725 tx_ring = tx_ring->next;
2726
2727 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2728 GFP_KERNEL);
2729 if (!chain)
2730 return -ENOMEM;
2731
2732 cur_chain->next = chain;
2733 chain->tqp_index = tx_ring->tqp->tqp_index;
e4e87715
PL
2734 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2735 HNAE3_RING_TYPE_TX);
2736 hnae3_set_field(chain->int_gl_idx,
2737 HNAE3_RING_GL_IDX_M,
2738 HNAE3_RING_GL_IDX_S,
2739 HNAE3_RING_GL_TX);
76ad4f0e
S
2740
2741 cur_chain = chain;
2742 }
2743 }
2744
2745 rx_ring = tqp_vector->rx_group.ring;
2746 if (!tx_ring && rx_ring) {
2747 cur_chain->next = NULL;
2748 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
e4e87715
PL
2749 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2750 HNAE3_RING_TYPE_RX);
2751 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2752 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
76ad4f0e
S
2753
2754 rx_ring = rx_ring->next;
2755 }
2756
2757 while (rx_ring) {
2758 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2759 if (!chain)
2760 return -ENOMEM;
2761
2762 cur_chain->next = chain;
2763 chain->tqp_index = rx_ring->tqp->tqp_index;
e4e87715
PL
2764 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2765 HNAE3_RING_TYPE_RX);
2766 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2767 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
11af96a4 2768
76ad4f0e
S
2769 cur_chain = chain;
2770
2771 rx_ring = rx_ring->next;
2772 }
2773
2774 return 0;
2775}
2776
2777static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2778 struct hnae3_ring_chain_node *head)
2779{
2780 struct pci_dev *pdev = tqp_vector->handle->pdev;
2781 struct hnae3_ring_chain_node *chain_tmp, *chain;
2782
2783 chain = head->next;
2784
2785 while (chain) {
2786 chain_tmp = chain->next;
2787 devm_kfree(&pdev->dev, chain);
2788 chain = chain_tmp;
2789 }
2790}
2791
2792static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2793 struct hns3_enet_ring *ring)
2794{
2795 ring->next = group->ring;
2796 group->ring = ring;
2797
2798 group->count++;
2799}
2800
874bff0b
PL
2801static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
2802{
2803 struct pci_dev *pdev = priv->ae_handle->pdev;
2804 struct hns3_enet_tqp_vector *tqp_vector;
2805 int num_vectors = priv->vector_num;
2806 int numa_node;
2807 int vector_i;
2808
2809 numa_node = dev_to_node(&pdev->dev);
2810
2811 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
2812 tqp_vector = &priv->tqp_vector[vector_i];
2813 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
2814 &tqp_vector->affinity_mask);
2815 }
2816}
2817
76ad4f0e
S
2818static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2819{
2820 struct hnae3_ring_chain_node vector_ring_chain;
2821 struct hnae3_handle *h = priv->ae_handle;
2822 struct hns3_enet_tqp_vector *tqp_vector;
76ad4f0e 2823 int ret = 0;
ece4bf46 2824 int i;
76ad4f0e 2825
874bff0b
PL
2826 hns3_nic_set_cpumask(priv);
2827
dd38c726
YL
2828 for (i = 0; i < priv->vector_num; i++) {
2829 tqp_vector = &priv->tqp_vector[i];
2830 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2831 tqp_vector->num_tqps = 0;
2832 }
76ad4f0e 2833
dd38c726
YL
2834 for (i = 0; i < h->kinfo.num_tqps; i++) {
2835 u16 vector_i = i % priv->vector_num;
2836 u16 tqp_num = h->kinfo.num_tqps;
76ad4f0e
S
2837
2838 tqp_vector = &priv->tqp_vector[vector_i];
2839
2840 hns3_add_ring_to_group(&tqp_vector->tx_group,
2841 priv->ring_data[i].ring);
2842
2843 hns3_add_ring_to_group(&tqp_vector->rx_group,
2844 priv->ring_data[i + tqp_num].ring);
2845
76ad4f0e
S
2846 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2847 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
dd38c726 2848 tqp_vector->num_tqps++;
76ad4f0e
S
2849 }
2850
dd38c726 2851 for (i = 0; i < priv->vector_num; i++) {
76ad4f0e
S
2852 tqp_vector = &priv->tqp_vector[i];
2853
2854 tqp_vector->rx_group.total_bytes = 0;
2855 tqp_vector->rx_group.total_packets = 0;
2856 tqp_vector->tx_group.total_bytes = 0;
2857 tqp_vector->tx_group.total_packets = 0;
76ad4f0e
S
2858 tqp_vector->handle = h;
2859
2860 ret = hns3_get_vector_ring_chain(tqp_vector,
2861 &vector_ring_chain);
2862 if (ret)
dd38c726 2863 return ret;
76ad4f0e
S
2864
2865 ret = h->ae_algo->ops->map_ring_to_vector(h,
2866 tqp_vector->vector_irq, &vector_ring_chain);
76ad4f0e
S
2867
2868 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2869
dd38c726 2870 if (ret)
ece4bf46 2871 goto map_ring_fail;
dd38c726 2872
76ad4f0e
S
2873 netif_napi_add(priv->netdev, &tqp_vector->napi,
2874 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2875 }
2876
dd38c726 2877 return 0;
ece4bf46
HT
2878
2879map_ring_fail:
2880 while (i--)
2881 netif_napi_del(&priv->tqp_vector[i].napi);
2882
2883 return ret;
dd38c726
YL
2884}
2885
2886static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2887{
2888 struct hnae3_handle *h = priv->ae_handle;
2889 struct hns3_enet_tqp_vector *tqp_vector;
2890 struct hnae3_vector_info *vector;
2891 struct pci_dev *pdev = h->pdev;
2892 u16 tqp_num = h->kinfo.num_tqps;
2893 u16 vector_num;
2894 int ret = 0;
2895 u16 i;
2896
2897 /* RSS size, cpu online and vector_num should be the same */
2898 /* Should consider 2p/4p later */
2899 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2900 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2901 GFP_KERNEL);
2902 if (!vector)
2903 return -ENOMEM;
2904
2905 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2906
2907 priv->vector_num = vector_num;
2908 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2909 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2910 GFP_KERNEL);
2911 if (!priv->tqp_vector) {
2912 ret = -ENOMEM;
2913 goto out;
2914 }
2915
2916 for (i = 0; i < priv->vector_num; i++) {
2917 tqp_vector = &priv->tqp_vector[i];
2918 tqp_vector->idx = i;
2919 tqp_vector->mask_addr = vector[i].io_addr;
2920 tqp_vector->vector_irq = vector[i].vector;
2921 hns3_vector_gl_rl_init(tqp_vector, priv);
2922 }
2923
76ad4f0e
S
2924out:
2925 devm_kfree(&pdev->dev, vector);
2926 return ret;
2927}
2928
dd38c726
YL
2929static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2930{
2931 group->ring = NULL;
2932 group->count = 0;
2933}
2934
76ad4f0e
S
2935static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2936{
2937 struct hnae3_ring_chain_node vector_ring_chain;
2938 struct hnae3_handle *h = priv->ae_handle;
2939 struct hns3_enet_tqp_vector *tqp_vector;
76ad4f0e
S
2940 int i, ret;
2941
2942 for (i = 0; i < priv->vector_num; i++) {
2943 tqp_vector = &priv->tqp_vector[i];
2944
2945 ret = hns3_get_vector_ring_chain(tqp_vector,
2946 &vector_ring_chain);
2947 if (ret)
2948 return ret;
2949
2950 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2951 tqp_vector->vector_irq, &vector_ring_chain);
2952 if (ret)
2953 return ret;
2954
2955 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2956
2957 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2958 (void)irq_set_affinity_hint(
2959 priv->tqp_vector[i].vector_irq,
2960 NULL);
ae064e61 2961 free_irq(priv->tqp_vector[i].vector_irq,
2962 &priv->tqp_vector[i]);
76ad4f0e
S
2963 }
2964
2965 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
dd38c726
YL
2966 hns3_clear_ring_group(&tqp_vector->rx_group);
2967 hns3_clear_ring_group(&tqp_vector->tx_group);
76ad4f0e
S
2968 netif_napi_del(&priv->tqp_vector[i].napi);
2969 }
2970
dd38c726
YL
2971 return 0;
2972}
2973
2974static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2975{
2976 struct hnae3_handle *h = priv->ae_handle;
2977 struct pci_dev *pdev = h->pdev;
2978 int i, ret;
2979
2980 for (i = 0; i < priv->vector_num; i++) {
2981 struct hns3_enet_tqp_vector *tqp_vector;
2982
2983 tqp_vector = &priv->tqp_vector[i];
2984 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2985 if (ret)
2986 return ret;
2987 }
76ad4f0e 2988
dd38c726 2989 devm_kfree(&pdev->dev, priv->tqp_vector);
76ad4f0e
S
2990 return 0;
2991}
2992
2993static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2994 int ring_type)
2995{
2996 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2997 int queue_num = priv->ae_handle->kinfo.num_tqps;
2998 struct pci_dev *pdev = priv->ae_handle->pdev;
2999 struct hns3_enet_ring *ring;
3000
3001 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3002 if (!ring)
3003 return -ENOMEM;
3004
3005 if (ring_type == HNAE3_RING_TYPE_TX) {
3006 ring_data[q->tqp_index].ring = ring;
66b44730 3007 ring_data[q->tqp_index].queue_index = q->tqp_index;
76ad4f0e
S
3008 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3009 } else {
3010 ring_data[q->tqp_index + queue_num].ring = ring;
66b44730 3011 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
76ad4f0e
S
3012 ring->io_base = q->io_base;
3013 }
3014
e4e87715 3015 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
76ad4f0e 3016
76ad4f0e
S
3017 ring->tqp = q;
3018 ring->desc = NULL;
3019 ring->desc_cb = NULL;
3020 ring->dev = priv->dev;
3021 ring->desc_dma_addr = 0;
3022 ring->buf_size = q->buf_size;
3023 ring->desc_num = q->desc_num;
3024 ring->next_to_use = 0;
3025 ring->next_to_clean = 0;
3026
3027 return 0;
3028}
3029
3030static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3031 struct hns3_nic_priv *priv)
3032{
3033 int ret;
3034
3035 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3036 if (ret)
3037 return ret;
3038
3039 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3040 if (ret)
3041 return ret;
3042
3043 return 0;
3044}
3045
3046static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3047{
3048 struct hnae3_handle *h = priv->ae_handle;
3049 struct pci_dev *pdev = h->pdev;
3050 int i, ret;
3051
a86854d0
KC
3052 priv->ring_data = devm_kzalloc(&pdev->dev,
3053 array3_size(h->kinfo.num_tqps,
3054 sizeof(*priv->ring_data),
3055 2),
76ad4f0e
S
3056 GFP_KERNEL);
3057 if (!priv->ring_data)
3058 return -ENOMEM;
3059
3060 for (i = 0; i < h->kinfo.num_tqps; i++) {
3061 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3062 if (ret)
3063 goto err;
3064 }
3065
3066 return 0;
3067err:
3068 devm_kfree(&pdev->dev, priv->ring_data);
3069 return ret;
3070}
3071
09f2af64
PL
3072static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3073{
3074 struct hnae3_handle *h = priv->ae_handle;
3075 int i;
3076
3077 for (i = 0; i < h->kinfo.num_tqps; i++) {
3078 devm_kfree(priv->dev, priv->ring_data[i].ring);
3079 devm_kfree(priv->dev,
3080 priv->ring_data[i + h->kinfo.num_tqps].ring);
3081 }
3082 devm_kfree(priv->dev, priv->ring_data);
3083}
3084
76ad4f0e
S
3085static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3086{
3087 int ret;
3088
3089 if (ring->desc_num <= 0 || ring->buf_size <= 0)
3090 return -EINVAL;
3091
3092 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
3093 GFP_KERNEL);
3094 if (!ring->desc_cb) {
3095 ret = -ENOMEM;
3096 goto out;
3097 }
3098
3099 ret = hns3_alloc_desc(ring);
3100 if (ret)
3101 goto out_with_desc_cb;
3102
3103 if (!HNAE3_IS_TX_RING(ring)) {
3104 ret = hns3_alloc_ring_buffers(ring);
3105 if (ret)
3106 goto out_with_desc;
3107 }
3108
3109 return 0;
3110
3111out_with_desc:
3112 hns3_free_desc(ring);
3113out_with_desc_cb:
3114 kfree(ring->desc_cb);
3115 ring->desc_cb = NULL;
3116out:
3117 return ret;
3118}
3119
3120static void hns3_fini_ring(struct hns3_enet_ring *ring)
3121{
3122 hns3_free_desc(ring);
3123 kfree(ring->desc_cb);
3124 ring->desc_cb = NULL;
3125 ring->next_to_clean = 0;
3126 ring->next_to_use = 0;
3127}
3128
1db9b1bf 3129static int hns3_buf_size2type(u32 buf_size)
76ad4f0e
S
3130{
3131 int bd_size_type;
3132
3133 switch (buf_size) {
3134 case 512:
3135 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3136 break;
3137 case 1024:
3138 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3139 break;
3140 case 2048:
3141 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3142 break;
3143 case 4096:
3144 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3145 break;
3146 default:
3147 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3148 }
3149
3150 return bd_size_type;
3151}
3152
3153static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3154{
3155 dma_addr_t dma = ring->desc_dma_addr;
3156 struct hnae3_queue *q = ring->tqp;
3157
3158 if (!HNAE3_IS_TX_RING(ring)) {
3159 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3160 (u32)dma);
3161 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3162 (u32)((dma >> 31) >> 1));
3163
3164 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3165 hns3_buf_size2type(ring->buf_size));
3166 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3167 ring->desc_num / 8 - 1);
3168
3169 } else {
3170 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3171 (u32)dma);
3172 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3173 (u32)((dma >> 31) >> 1));
3174
76ad4f0e
S
3175 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3176 ring->desc_num / 8 - 1);
3177 }
3178}
3179
1c772154
YL
3180static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3181{
3182 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3183 int i;
3184
3185 for (i = 0; i < HNAE3_MAX_TC; i++) {
3186 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3187 int j;
3188
3189 if (!tc_info->enable)
3190 continue;
3191
3192 for (j = 0; j < tc_info->tqp_count; j++) {
3193 struct hnae3_queue *q;
3194
3195 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3196 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3197 tc_info->tc);
3198 }
3199 }
3200}
3201
5668abda 3202int hns3_init_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
3203{
3204 struct hnae3_handle *h = priv->ae_handle;
3205 int ring_num = h->kinfo.num_tqps * 2;
3206 int i, j;
3207 int ret;
3208
3209 for (i = 0; i < ring_num; i++) {
3210 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3211 if (ret) {
3212 dev_err(priv->dev,
3213 "Alloc ring memory fail! ret=%d\n", ret);
3214 goto out_when_alloc_ring_memory;
3215 }
3216
76ad4f0e
S
3217 u64_stats_init(&priv->ring_data[i].ring->syncp);
3218 }
3219
3220 return 0;
3221
3222out_when_alloc_ring_memory:
3223 for (j = i - 1; j >= 0; j--)
ee83f776 3224 hns3_fini_ring(priv->ring_data[j].ring);
76ad4f0e
S
3225
3226 return -ENOMEM;
3227}
3228
5668abda 3229int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
76ad4f0e
S
3230{
3231 struct hnae3_handle *h = priv->ae_handle;
3232 int i;
3233
3234 for (i = 0; i < h->kinfo.num_tqps; i++) {
3235 if (h->ae_algo->ops->reset_queue)
3236 h->ae_algo->ops->reset_queue(h, i);
3237
3238 hns3_fini_ring(priv->ring_data[i].ring);
3239 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3240 }
76ad4f0e
S
3241 return 0;
3242}
3243
3244/* Set mac addr if it is configured. or leave it to the AE driver */
f09555ff 3245static void hns3_init_mac_addr(struct net_device *netdev, bool init)
76ad4f0e
S
3246{
3247 struct hns3_nic_priv *priv = netdev_priv(netdev);
3248 struct hnae3_handle *h = priv->ae_handle;
3249 u8 mac_addr_temp[ETH_ALEN];
3250
f09555ff 3251 if (h->ae_algo->ops->get_mac_addr && init) {
76ad4f0e
S
3252 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3253 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3254 }
3255
3256 /* Check if the MAC address is valid, if not get a random one */
3257 if (!is_valid_ether_addr(netdev->dev_addr)) {
3258 eth_hw_addr_random(netdev);
3259 dev_warn(priv->dev, "using random MAC address %pM\n",
3260 netdev->dev_addr);
76ad4f0e 3261 }
139e8792
L
3262
3263 if (h->ae_algo->ops->set_mac_addr)
59098055 3264 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
139e8792 3265
76ad4f0e
S
3266}
3267
6871af29
JS
3268static int hns3_restore_fd_rules(struct net_device *netdev)
3269{
3270 struct hnae3_handle *h = hns3_get_handle(netdev);
3271 int ret = 0;
3272
3273 if (h->ae_algo->ops->restore_fd_rules)
3274 ret = h->ae_algo->ops->restore_fd_rules(h);
3275
3276 return ret;
3277}
3278
3279static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3280{
3281 struct hnae3_handle *h = hns3_get_handle(netdev);
3282
3283 if (h->ae_algo->ops->del_all_fd_entries)
3284 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3285}
3286
76ad4f0e
S
3287static void hns3_nic_set_priv_ops(struct net_device *netdev)
3288{
3289 struct hns3_nic_priv *priv = netdev_priv(netdev);
3290
0bbbf15d 3291 priv->ops.fill_desc = hns3_fill_desc;
76ad4f0e 3292 if ((netdev->features & NETIF_F_TSO) ||
0bbbf15d 3293 (netdev->features & NETIF_F_TSO6))
76ad4f0e 3294 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
0bbbf15d 3295 else
76ad4f0e 3296 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
76ad4f0e
S
3297}
3298
3299static int hns3_client_init(struct hnae3_handle *handle)
3300{
3301 struct pci_dev *pdev = handle->pdev;
0d43bf45 3302 u16 alloc_tqps, max_rss_size;
76ad4f0e
S
3303 struct hns3_nic_priv *priv;
3304 struct net_device *netdev;
3305 int ret;
3306
0d43bf45
HT
3307 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3308 &max_rss_size);
3309 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
76ad4f0e
S
3310 if (!netdev)
3311 return -ENOMEM;
3312
3313 priv = netdev_priv(netdev);
3314 priv->dev = &pdev->dev;
3315 priv->netdev = netdev;
3316 priv->ae_handle = handle;
6d4c3981 3317 priv->ae_handle->last_reset_time = jiffies;
f8fa222c 3318 priv->tx_timeout_count = 0;
76ad4f0e
S
3319
3320 handle->kinfo.netdev = netdev;
3321 handle->priv = (void *)priv;
3322
f09555ff 3323 hns3_init_mac_addr(netdev, true);
76ad4f0e
S
3324
3325 hns3_set_default_feature(netdev);
3326
3327 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3328 netdev->priv_flags |= IFF_UNICAST_FLT;
3329 netdev->netdev_ops = &hns3_nic_netdev_ops;
3330 SET_NETDEV_DEV(netdev, &pdev->dev);
3331 hns3_ethtool_set_ops(netdev);
3332 hns3_nic_set_priv_ops(netdev);
3333
3334 /* Carrier off reporting is important to ethtool even BEFORE open */
3335 netif_carrier_off(netdev);
3336
82b53214
YL
3337 if (handle->flags & HNAE3_SUPPORT_VF)
3338 handle->reset_level = HNAE3_VF_RESET;
3339 else
3340 handle->reset_level = HNAE3_FUNC_RESET;
3341
76ad4f0e
S
3342 ret = hns3_get_ring_config(priv);
3343 if (ret) {
3344 ret = -ENOMEM;
3345 goto out_get_ring_cfg;
3346 }
3347
dd38c726
YL
3348 ret = hns3_nic_alloc_vector_data(priv);
3349 if (ret) {
3350 ret = -ENOMEM;
3351 goto out_alloc_vector_data;
3352 }
3353
76ad4f0e
S
3354 ret = hns3_nic_init_vector_data(priv);
3355 if (ret) {
3356 ret = -ENOMEM;
3357 goto out_init_vector_data;
3358 }
3359
3360 ret = hns3_init_all_ring(priv);
3361 if (ret) {
3362 ret = -ENOMEM;
3363 goto out_init_ring_data;
3364 }
3365
3366 ret = register_netdev(netdev);
3367 if (ret) {
3368 dev_err(priv->dev, "probe register netdev fail!\n");
3369 goto out_reg_netdev_fail;
3370 }
3371
986743db
YL
3372 hns3_dcbnl_setup(handle);
3373
a8e8b7ff
S
3374 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3375 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3376
76ad4f0e
S
3377 return ret;
3378
3379out_reg_netdev_fail:
3380out_init_ring_data:
3381 (void)hns3_nic_uninit_vector_data(priv);
76ad4f0e 3382out_init_vector_data:
dd38c726
YL
3383 hns3_nic_dealloc_vector_data(priv);
3384out_alloc_vector_data:
3385 priv->ring_data = NULL;
76ad4f0e
S
3386out_get_ring_cfg:
3387 priv->ae_handle = NULL;
3388 free_netdev(netdev);
3389 return ret;
3390}
3391
3392static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3393{
3394 struct net_device *netdev = handle->kinfo.netdev;
3395 struct hns3_nic_priv *priv = netdev_priv(netdev);
3396 int ret;
3397
f05e2109
JS
3398 hns3_remove_hw_addr(netdev);
3399
76ad4f0e
S
3400 if (netdev->reg_state != NETREG_UNINITIALIZED)
3401 unregister_netdev(netdev);
3402
dc5e6064
JS
3403 hns3_del_all_fd_rules(netdev, true);
3404
7b763f3f
FL
3405 hns3_force_clear_all_rx_ring(handle);
3406
76ad4f0e
S
3407 ret = hns3_nic_uninit_vector_data(priv);
3408 if (ret)
3409 netdev_err(netdev, "uninit vector error\n");
3410
dd38c726
YL
3411 ret = hns3_nic_dealloc_vector_data(priv);
3412 if (ret)
3413 netdev_err(netdev, "dealloc vector error\n");
3414
76ad4f0e
S
3415 ret = hns3_uninit_all_ring(priv);
3416 if (ret)
3417 netdev_err(netdev, "uninit ring error\n");
3418
ec777890
YL
3419 hns3_put_ring_config(priv);
3420
76ad4f0e
S
3421 priv->ring_data = NULL;
3422
3423 free_netdev(netdev);
3424}
3425
3426static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3427{
3428 struct net_device *netdev = handle->kinfo.netdev;
3429
3430 if (!netdev)
3431 return;
3432
3433 if (linkup) {
3434 netif_carrier_on(netdev);
3435 netif_tx_wake_all_queues(netdev);
3436 netdev_info(netdev, "link up\n");
3437 } else {
3438 netif_carrier_off(netdev);
3439 netif_tx_stop_all_queues(netdev);
3440 netdev_info(netdev, "link down\n");
3441 }
3442}
3443
9df8f79a
YL
3444static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3445{
3446 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3447 struct net_device *ndev = kinfo->netdev;
075cfdd6 3448 bool if_running;
9df8f79a 3449 int ret;
9df8f79a
YL
3450
3451 if (tc > HNAE3_MAX_TC)
3452 return -EINVAL;
3453
3454 if (!ndev)
3455 return -ENODEV;
3456
075cfdd6
CIK
3457 if_running = netif_running(ndev);
3458
9df8f79a
YL
3459 if (if_running) {
3460 (void)hns3_nic_net_stop(ndev);
3461 msleep(100);
3462 }
3463
3464 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3465 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3466 if (ret)
3467 goto err_out;
3468
9df8f79a
YL
3469 ret = hns3_nic_set_real_num_queue(ndev);
3470
3471err_out:
3472 if (if_running)
3473 (void)hns3_nic_net_open(ndev);
3474
3475 return ret;
3476}
3477
bb6b94a8
L
3478static void hns3_recover_hw_addr(struct net_device *ndev)
3479{
3480 struct netdev_hw_addr_list *list;
3481 struct netdev_hw_addr *ha, *tmp;
3482
3483 /* go through and sync uc_addr entries to the device */
3484 list = &ndev->uc;
3485 list_for_each_entry_safe(ha, tmp, &list->list, list)
3486 hns3_nic_uc_sync(ndev, ha->addr);
3487
3488 /* go through and sync mc_addr entries to the device */
3489 list = &ndev->mc;
3490 list_for_each_entry_safe(ha, tmp, &list->list, list)
3491 hns3_nic_mc_sync(ndev, ha->addr);
3492}
3493
f05e2109
JS
3494static void hns3_remove_hw_addr(struct net_device *netdev)
3495{
3496 struct netdev_hw_addr_list *list;
3497 struct netdev_hw_addr *ha, *tmp;
3498
3499 hns3_nic_uc_unsync(netdev, netdev->dev_addr);
3500
3501 /* go through and unsync uc_addr entries to the device */
3502 list = &netdev->uc;
3503 list_for_each_entry_safe(ha, tmp, &list->list, list)
3504 hns3_nic_uc_unsync(netdev, ha->addr);
3505
3506 /* go through and unsync mc_addr entries to the device */
3507 list = &netdev->mc;
3508 list_for_each_entry_safe(ha, tmp, &list->list, list)
3509 if (ha->refcount > 1)
3510 hns3_nic_mc_unsync(netdev, ha->addr);
3511}
3512
beebca3a 3513static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
bb6b94a8 3514{
beebca3a 3515 while (ring->next_to_clean != ring->next_to_use) {
7b763f3f 3516 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
beebca3a
YL
3517 hns3_free_buffer_detach(ring, ring->next_to_clean);
3518 ring_ptr_move_fw(ring, next_to_clean);
3519 }
3520}
3521
7b763f3f
FL
3522static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3523{
3524 struct hns3_desc_cb res_cbs;
3525 int ret;
3526
3527 while (ring->next_to_use != ring->next_to_clean) {
3528 /* When a buffer is not reused, it's memory has been
3529 * freed in hns3_handle_rx_bd or will be freed by
3530 * stack, so we need to replace the buffer here.
3531 */
3532 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3533 ret = hns3_reserve_buffer_map(ring, &res_cbs);
3534 if (ret) {
3535 u64_stats_update_begin(&ring->syncp);
3536 ring->stats.sw_err_cnt++;
3537 u64_stats_update_end(&ring->syncp);
3538 /* if alloc new buffer fail, exit directly
3539 * and reclear in up flow.
3540 */
3541 netdev_warn(ring->tqp->handle->kinfo.netdev,
3542 "reserve buffer map failed, ret = %d\n",
3543 ret);
3544 return ret;
3545 }
3546 hns3_replace_buffer(ring, ring->next_to_use,
3547 &res_cbs);
3548 }
3549 ring_ptr_move_fw(ring, next_to_use);
3550 }
3551
3552 return 0;
3553}
3554
3555static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
beebca3a 3556{
beebca3a
YL
3557 while (ring->next_to_use != ring->next_to_clean) {
3558 /* When a buffer is not reused, it's memory has been
3559 * freed in hns3_handle_rx_bd or will be freed by
3560 * stack, so only need to unmap the buffer here.
3561 */
3562 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3563 hns3_unmap_buffer(ring,
3564 &ring->desc_cb[ring->next_to_use]);
3565 ring->desc_cb[ring->next_to_use].dma = 0;
3566 }
3567
3568 ring_ptr_move_fw(ring, next_to_use);
3569 }
bb6b94a8
L
3570}
3571
7b763f3f
FL
3572static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3573{
3574 struct net_device *ndev = h->kinfo.netdev;
3575 struct hns3_nic_priv *priv = netdev_priv(ndev);
3576 struct hns3_enet_ring *ring;
3577 u32 i;
3578
3579 for (i = 0; i < h->kinfo.num_tqps; i++) {
3580 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3581 hns3_force_clear_rx_ring(ring);
3582 }
3583}
3584
bb6b94a8
L
3585static void hns3_clear_all_ring(struct hnae3_handle *h)
3586{
3587 struct net_device *ndev = h->kinfo.netdev;
3588 struct hns3_nic_priv *priv = netdev_priv(ndev);
3589 u32 i;
3590
3591 for (i = 0; i < h->kinfo.num_tqps; i++) {
3592 struct netdev_queue *dev_queue;
3593 struct hns3_enet_ring *ring;
3594
3595 ring = priv->ring_data[i].ring;
beebca3a 3596 hns3_clear_tx_ring(ring);
bb6b94a8
L
3597 dev_queue = netdev_get_tx_queue(ndev,
3598 priv->ring_data[i].queue_index);
3599 netdev_tx_reset_queue(dev_queue);
3600
3601 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
7b763f3f
FL
3602 /* Continue to clear other rings even if clearing some
3603 * rings failed.
3604 */
beebca3a 3605 hns3_clear_rx_ring(ring);
bb6b94a8
L
3606 }
3607}
3608
7b763f3f
FL
3609int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3610{
3611 struct net_device *ndev = h->kinfo.netdev;
3612 struct hns3_nic_priv *priv = netdev_priv(ndev);
3613 struct hns3_enet_ring *rx_ring;
3614 int i, j;
3615 int ret;
3616
3617 for (i = 0; i < h->kinfo.num_tqps; i++) {
3618 h->ae_algo->ops->reset_queue(h, i);
3619 hns3_init_ring_hw(priv->ring_data[i].ring);
3620
3621 /* We need to clear tx ring here because self test will
3622 * use the ring and will not run down before up
3623 */
3624 hns3_clear_tx_ring(priv->ring_data[i].ring);
3625 priv->ring_data[i].ring->next_to_clean = 0;
3626 priv->ring_data[i].ring->next_to_use = 0;
3627
3628 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3629 hns3_init_ring_hw(rx_ring);
3630 ret = hns3_clear_rx_ring(rx_ring);
3631 if (ret)
3632 return ret;
3633
3634 /* We can not know the hardware head and tail when this
3635 * function is called in reset flow, so we reuse all desc.
3636 */
3637 for (j = 0; j < rx_ring->desc_num; j++)
3638 hns3_reuse_buffer(rx_ring, j);
3639
3640 rx_ring->next_to_clean = 0;
3641 rx_ring->next_to_use = 0;
3642 }
3643
1c772154
YL
3644 hns3_init_tx_ring_tc(priv);
3645
7b763f3f
FL
3646 return 0;
3647}
3648
e4fd7502
HT
3649static void hns3_store_coal(struct hns3_nic_priv *priv)
3650{
3651 /* ethtool only support setting and querying one coal
3652 * configuation for now, so save the vector 0' coal
3653 * configuation here in order to restore it.
3654 */
3655 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
3656 sizeof(struct hns3_enet_coalesce));
3657 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
3658 sizeof(struct hns3_enet_coalesce));
3659}
3660
3661static void hns3_restore_coal(struct hns3_nic_priv *priv)
3662{
3663 u16 vector_num = priv->vector_num;
3664 int i;
3665
3666 for (i = 0; i < vector_num; i++) {
3667 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
3668 sizeof(struct hns3_enet_coalesce));
3669 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
3670 sizeof(struct hns3_enet_coalesce));
3671 }
3672}
3673
bb6b94a8
L
3674static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3675{
3676 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3677 struct net_device *ndev = kinfo->netdev;
3678
3679 if (!netif_running(ndev))
6b1385cc 3680 return 0;
bb6b94a8
L
3681
3682 return hns3_nic_net_stop(ndev);
3683}
3684
3685static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3686{
3687 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
bb6b94a8
L
3688 int ret = 0;
3689
3690 if (netif_running(kinfo->netdev)) {
3691 ret = hns3_nic_net_up(kinfo->netdev);
3692 if (ret) {
3693 netdev_err(kinfo->netdev,
3694 "hns net up fail, ret=%d!\n", ret);
3695 return ret;
3696 }
6d4c3981 3697 handle->last_reset_time = jiffies;
bb6b94a8
L
3698 }
3699
3700 return ret;
3701}
3702
3703static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3704{
3705 struct net_device *netdev = handle->kinfo.netdev;
3706 struct hns3_nic_priv *priv = netdev_priv(netdev);
7325523a 3707 bool vlan_filter_enable;
bb6b94a8
L
3708 int ret;
3709
f09555ff 3710 hns3_init_mac_addr(netdev, false);
bb6b94a8 3711 hns3_recover_hw_addr(netdev);
7325523a
JS
3712 hns3_update_promisc_mode(netdev, handle->netdev_flags);
3713 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
3714 hns3_enable_vlan_filter(netdev, vlan_filter_enable);
3715
bb6b94a8 3716
681ec399
YL
3717 /* Hardware table is only clear when pf resets */
3718 if (!(handle->flags & HNAE3_SUPPORT_VF))
3719 hns3_restore_vlan(netdev);
3720
6871af29
JS
3721 hns3_restore_fd_rules(netdev);
3722
bb6b94a8
L
3723 /* Carrier off reporting is important to ethtool even BEFORE open */
3724 netif_carrier_off(netdev);
3725
e4fd7502
HT
3726 hns3_restore_coal(priv);
3727
bb6b94a8
L
3728 ret = hns3_nic_init_vector_data(priv);
3729 if (ret)
3730 return ret;
3731
3732 ret = hns3_init_all_ring(priv);
3733 if (ret) {
3734 hns3_nic_uninit_vector_data(priv);
3735 priv->ring_data = NULL;
3736 }
3737
3738 return ret;
3739}
3740
3741static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3742{
6871af29 3743 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
bb6b94a8
L
3744 struct net_device *netdev = handle->kinfo.netdev;
3745 struct hns3_nic_priv *priv = netdev_priv(netdev);
3746 int ret;
3747
7b763f3f 3748 hns3_force_clear_all_rx_ring(handle);
bb6b94a8
L
3749
3750 ret = hns3_nic_uninit_vector_data(priv);
3751 if (ret) {
3752 netdev_err(netdev, "uninit vector error\n");
3753 return ret;
3754 }
3755
e4fd7502
HT
3756 hns3_store_coal(priv);
3757
bb6b94a8
L
3758 ret = hns3_uninit_all_ring(priv);
3759 if (ret)
3760 netdev_err(netdev, "uninit ring error\n");
3761
f05e2109
JS
3762 /* it is cumbersome for hardware to pick-and-choose entries for deletion
3763 * from table space. Hence, for function reset software intervention is
3764 * required to delete the entries
6871af29 3765 */
f05e2109
JS
3766 if (hns3_dev_ongoing_func_reset(ae_dev)) {
3767 hns3_remove_hw_addr(netdev);
6871af29 3768 hns3_del_all_fd_rules(netdev, false);
f05e2109 3769 }
6871af29 3770
bb6b94a8
L
3771 return ret;
3772}
3773
3774static int hns3_reset_notify(struct hnae3_handle *handle,
3775 enum hnae3_reset_notify_type type)
3776{
3777 int ret = 0;
3778
3779 switch (type) {
3780 case HNAE3_UP_CLIENT:
e1586241
SM
3781 ret = hns3_reset_notify_up_enet(handle);
3782 break;
bb6b94a8
L
3783 case HNAE3_DOWN_CLIENT:
3784 ret = hns3_reset_notify_down_enet(handle);
3785 break;
3786 case HNAE3_INIT_CLIENT:
3787 ret = hns3_reset_notify_init_enet(handle);
3788 break;
3789 case HNAE3_UNINIT_CLIENT:
3790 ret = hns3_reset_notify_uninit_enet(handle);
3791 break;
3792 default:
3793 break;
3794 }
3795
3796 return ret;
3797}
3798
e4fd7502 3799static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
09f2af64
PL
3800{
3801 struct hns3_nic_priv *priv = netdev_priv(netdev);
3802 struct hnae3_handle *h = hns3_get_handle(netdev);
3803 int ret;
3804
3805 ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3806 if (ret)
3807 return ret;
3808
3809 ret = hns3_get_ring_config(priv);
3810 if (ret)
3811 return ret;
3812
dd38c726
YL
3813 ret = hns3_nic_alloc_vector_data(priv);
3814 if (ret)
3815 goto err_alloc_vector;
3816
e4fd7502 3817 hns3_restore_coal(priv);
7a242b23 3818
09f2af64
PL
3819 ret = hns3_nic_init_vector_data(priv);
3820 if (ret)
3821 goto err_uninit_vector;
3822
3823 ret = hns3_init_all_ring(priv);
3824 if (ret)
3825 goto err_put_ring;
3826
3827 return 0;
3828
3829err_put_ring:
3830 hns3_put_ring_config(priv);
3831err_uninit_vector:
3832 hns3_nic_uninit_vector_data(priv);
dd38c726
YL
3833err_alloc_vector:
3834 hns3_nic_dealloc_vector_data(priv);
09f2af64
PL
3835 return ret;
3836}
3837
3838static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3839{
3840 return (new_tqp_num / num_tc) * num_tc;
3841}
3842
3843int hns3_set_channels(struct net_device *netdev,
3844 struct ethtool_channels *ch)
3845{
3846 struct hns3_nic_priv *priv = netdev_priv(netdev);
3847 struct hnae3_handle *h = hns3_get_handle(netdev);
3848 struct hnae3_knic_private_info *kinfo = &h->kinfo;
3849 bool if_running = netif_running(netdev);
3850 u32 new_tqp_num = ch->combined_count;
3851 u16 org_tqp_num;
3852 int ret;
3853
3854 if (ch->rx_count || ch->tx_count)
3855 return -EINVAL;
3856
678335a1 3857 if (new_tqp_num > hns3_get_max_available_channels(h) ||
09f2af64
PL
3858 new_tqp_num < kinfo->num_tc) {
3859 dev_err(&netdev->dev,
3860 "Change tqps fail, the tqp range is from %d to %d",
3861 kinfo->num_tc,
678335a1 3862 hns3_get_max_available_channels(h));
09f2af64
PL
3863 return -EINVAL;
3864 }
3865
3866 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3867 if (kinfo->num_tqps == new_tqp_num)
3868 return 0;
3869
3870 if (if_running)
20e4bf98 3871 hns3_nic_net_stop(netdev);
09f2af64 3872
09f2af64
PL
3873 ret = hns3_nic_uninit_vector_data(priv);
3874 if (ret) {
3875 dev_err(&netdev->dev,
3876 "Unbind vector with tqp fail, nothing is changed");
3877 goto open_netdev;
3878 }
3879
e4fd7502 3880 hns3_store_coal(priv);
7a242b23 3881
dd38c726
YL
3882 hns3_nic_dealloc_vector_data(priv);
3883
09f2af64 3884 hns3_uninit_all_ring(priv);
ec777890 3885 hns3_put_ring_config(priv);
09f2af64
PL
3886
3887 org_tqp_num = h->kinfo.num_tqps;
e4fd7502 3888 ret = hns3_modify_tqp_num(netdev, new_tqp_num);
09f2af64 3889 if (ret) {
e4fd7502 3890 ret = hns3_modify_tqp_num(netdev, org_tqp_num);
09f2af64
PL
3891 if (ret) {
3892 /* If revert to old tqp failed, fatal error occurred */
3893 dev_err(&netdev->dev,
3894 "Revert to old tqp num fail, ret=%d", ret);
3895 return ret;
3896 }
3897 dev_info(&netdev->dev,
3898 "Change tqp num fail, Revert to old tqp num");
3899 }
3900
3901open_netdev:
3902 if (if_running)
20e4bf98 3903 hns3_nic_net_open(netdev);
09f2af64
PL
3904
3905 return ret;
3906}
3907
1db9b1bf 3908static const struct hnae3_client_ops client_ops = {
76ad4f0e
S
3909 .init_instance = hns3_client_init,
3910 .uninit_instance = hns3_client_uninit,
3911 .link_status_change = hns3_link_status_change,
9df8f79a 3912 .setup_tc = hns3_client_setup_tc,
bb6b94a8 3913 .reset_notify = hns3_reset_notify,
76ad4f0e
S
3914};
3915
3916/* hns3_init_module - Driver registration routine
3917 * hns3_init_module is the first routine called when the driver is
3918 * loaded. All it does is register with the PCI subsystem.
3919 */
3920static int __init hns3_init_module(void)
3921{
3922 int ret;
3923
3924 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3925 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3926
3927 client.type = HNAE3_CLIENT_KNIC;
3928 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3929 hns3_driver_name);
3930
3931 client.ops = &client_ops;
3932
13562d1f
XW
3933 INIT_LIST_HEAD(&client.node);
3934
76ad4f0e
S
3935 ret = hnae3_register_client(&client);
3936 if (ret)
3937 return ret;
3938
3939 ret = pci_register_driver(&hns3_driver);
3940 if (ret)
3941 hnae3_unregister_client(&client);
3942
3943 return ret;
3944}
3945module_init(hns3_init_module);
3946
3947/* hns3_exit_module - Driver exit cleanup routine
3948 * hns3_exit_module is called just before the driver is removed
3949 * from memory.
3950 */
3951static void __exit hns3_exit_module(void)
3952{
3953 pci_unregister_driver(&hns3_driver);
3954 hnae3_unregister_client(&client);
3955}
3956module_exit(hns3_exit_module);
3957
3958MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3959MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3960MODULE_LICENSE("GPL");
3961MODULE_ALIAS("pci:hns-nic");
3c7624d8 3962MODULE_VERSION(HNS3_MOD_VERSION);