]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/amazon/ena/ena_netdev.c
net: ena: fix request of incorrect number of IRQ vectors
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / amazon / ena / ena_netdev.c
CommitLineData
1738cd3e
NB
1/*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#ifdef CONFIG_RFS_ACCEL
36#include <linux/cpu_rmap.h>
37#endif /* CONFIG_RFS_ACCEL */
38#include <linux/ethtool.h>
1738cd3e
NB
39#include <linux/kernel.h>
40#include <linux/module.h>
1738cd3e
NB
41#include <linux/numa.h>
42#include <linux/pci.h>
43#include <linux/utsname.h>
44#include <linux/version.h>
45#include <linux/vmalloc.h>
46#include <net/ip.h>
47
48#include "ena_netdev.h"
838c93dc 49#include <linux/bpf_trace.h>
1738cd3e
NB
50#include "ena_pci_id_tbl.h"
51
52static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
53
54MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55MODULE_DESCRIPTION(DEVICE_NAME);
56MODULE_LICENSE("GPL");
57MODULE_VERSION(DRV_MODULE_VERSION);
58
59/* Time in jiffies before concluding the transmitter is hung. */
60#define TX_TIMEOUT (5 * HZ)
61
62#define ENA_NAPI_BUDGET 64
63
64#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66static int debug = -1;
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70static struct ena_aenq_handlers aenq_handlers;
71
72static struct workqueue_struct *ena_wq;
73
74MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
75
76static int ena_rss_init_default(struct ena_adapter *adapter);
ee4552aa 77static void check_for_admin_com_state(struct ena_adapter *adapter);
cfa324a5 78static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
ee4552aa 79static int ena_restore_device(struct ena_adapter *adapter);
548c4940
SJ
80
81static void ena_init_io_rings(struct ena_adapter *adapter,
82 int first_index, int count);
83static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
84 int count);
85static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
86 int count);
87static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
88static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
89 int first_index,
90 int count);
91static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
92static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
93static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
94static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
95static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
96static void ena_napi_disable_in_range(struct ena_adapter *adapter,
97 int first_index, int count);
98static void ena_napi_enable_in_range(struct ena_adapter *adapter,
99 int first_index, int count);
838c93dc 100static int ena_up(struct ena_adapter *adapter);
548c4940
SJ
101static void ena_down(struct ena_adapter *adapter);
102static void ena_unmask_interrupt(struct ena_ring *tx_ring,
103 struct ena_ring *rx_ring);
104static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
105 struct ena_ring *rx_ring);
106static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
107 struct ena_tx_buffer *tx_info);
108static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
109 int first_index, int count);
1738cd3e 110
0290bd29 111static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
1738cd3e
NB
112{
113 struct ena_adapter *adapter = netdev_priv(dev);
114
3f6159db
NB
115 /* Change the state of the device to trigger reset
116 * Check that we are not in the middle or a trigger already
117 */
118
119 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
120 return;
121
e2eed0e3 122 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
1738cd3e
NB
123 u64_stats_update_begin(&adapter->syncp);
124 adapter->dev_stats.tx_timeout++;
125 u64_stats_update_end(&adapter->syncp);
126
127 netif_err(adapter, tx_err, dev, "Transmit time out\n");
1738cd3e
NB
128}
129
130static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
131{
132 int i;
133
faa615f9 134 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
135 adapter->rx_ring[i].mtu = mtu;
136}
137
138static int ena_change_mtu(struct net_device *dev, int new_mtu)
139{
140 struct ena_adapter *adapter = netdev_priv(dev);
141 int ret;
142
1738cd3e
NB
143 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
144 if (!ret) {
145 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
146 update_rx_ring_mtu(adapter, new_mtu);
147 dev->mtu = new_mtu;
148 } else {
149 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
150 new_mtu);
151 }
152
153 return ret;
154}
155
548c4940
SJ
156static int ena_xmit_common(struct net_device *dev,
157 struct ena_ring *ring,
158 struct ena_tx_buffer *tx_info,
159 struct ena_com_tx_ctx *ena_tx_ctx,
160 u16 next_to_use,
161 u32 bytes)
162{
163 struct ena_adapter *adapter = netdev_priv(dev);
164 int rc, nb_hw_desc;
165
166 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
167 ena_tx_ctx))) {
168 netif_dbg(adapter, tx_queued, dev,
169 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
170 ring->qid);
171 ena_com_write_sq_doorbell(ring->ena_com_io_sq);
172 }
173
174 /* prepare the packet's descriptors to dma engine */
175 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
176 &nb_hw_desc);
177
178 /* In case there isn't enough space in the queue for the packet,
179 * we simply drop it. All other failure reasons of
180 * ena_com_prepare_tx() are fatal and therefore require a device reset.
181 */
182 if (unlikely(rc)) {
183 netif_err(adapter, tx_queued, dev,
184 "failed to prepare tx bufs\n");
185 u64_stats_update_begin(&ring->syncp);
186 ring->tx_stats.prepare_ctx_err++;
187 u64_stats_update_end(&ring->syncp);
188 if (rc != -ENOMEM) {
189 adapter->reset_reason =
190 ENA_REGS_RESET_DRIVER_INVALID_STATE;
191 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
192 }
193 return rc;
194 }
195
196 u64_stats_update_begin(&ring->syncp);
197 ring->tx_stats.cnt++;
198 ring->tx_stats.bytes += bytes;
199 u64_stats_update_end(&ring->syncp);
200
201 tx_info->tx_descs = nb_hw_desc;
202 tx_info->last_jiffies = jiffies;
203 tx_info->print_once = 0;
204
205 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
206 ring->ring_size);
207 return 0;
208}
209
210/* This is the XDP napi callback. XDP queues use a separate napi callback
211 * than Rx/Tx queues.
212 */
213static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
214{
215 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
216 u32 xdp_work_done, xdp_budget;
217 struct ena_ring *xdp_ring;
218 int napi_comp_call = 0;
219 int ret;
220
221 xdp_ring = ena_napi->xdp_ring;
913b0bfd 222 xdp_ring->first_interrupt = ena_napi->first_interrupt;
548c4940
SJ
223
224 xdp_budget = budget;
225
226 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
227 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
228 napi_complete_done(napi, 0);
229 return 0;
230 }
231
232 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
233
234 /* If the device is about to reset or down, avoid unmask
235 * the interrupt and return 0 so NAPI won't reschedule
236 */
237 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
238 napi_complete_done(napi, 0);
239 ret = 0;
240 } else if (xdp_budget > xdp_work_done) {
241 napi_comp_call = 1;
242 if (napi_complete_done(napi, xdp_work_done))
243 ena_unmask_interrupt(xdp_ring, NULL);
244 ena_update_ring_numa_node(xdp_ring, NULL);
245 ret = xdp_work_done;
246 } else {
247 ret = xdp_budget;
248 }
249
250 u64_stats_update_begin(&xdp_ring->syncp);
251 xdp_ring->tx_stats.napi_comp += napi_comp_call;
252 xdp_ring->tx_stats.tx_poll++;
253 u64_stats_update_end(&xdp_ring->syncp);
254
255 return ret;
256}
257
258static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
259 struct ena_tx_buffer *tx_info,
260 struct xdp_buff *xdp,
261 void **push_hdr,
262 u32 *push_len)
263{
264 struct ena_adapter *adapter = xdp_ring->adapter;
265 struct ena_com_buf *ena_buf;
266 dma_addr_t dma = 0;
267 u32 size;
268
269 tx_info->xdpf = convert_to_xdp_frame(xdp);
270 size = tx_info->xdpf->len;
271 ena_buf = tx_info->bufs;
272
273 /* llq push buffer */
274 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
275 *push_hdr = tx_info->xdpf->data;
276
277 if (size - *push_len > 0) {
278 dma = dma_map_single(xdp_ring->dev,
279 *push_hdr + *push_len,
280 size - *push_len,
281 DMA_TO_DEVICE);
282 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
283 goto error_report_dma_error;
284
285 tx_info->map_linear_data = 1;
286 tx_info->num_of_bufs = 1;
287 }
288
289 ena_buf->paddr = dma;
290 ena_buf->len = size;
291
292 return 0;
293
294error_report_dma_error:
295 u64_stats_update_begin(&xdp_ring->syncp);
296 xdp_ring->tx_stats.dma_mapping_err++;
297 u64_stats_update_end(&xdp_ring->syncp);
298 netdev_warn(adapter->netdev, "failed to map xdp buff\n");
299
300 xdp_return_frame_rx_napi(tx_info->xdpf);
301 tx_info->xdpf = NULL;
302 tx_info->num_of_bufs = 0;
303
304 return -EINVAL;
305}
306
307static int ena_xdp_xmit_buff(struct net_device *dev,
308 struct xdp_buff *xdp,
309 int qid,
310 struct ena_rx_buffer *rx_info)
311{
312 struct ena_adapter *adapter = netdev_priv(dev);
313 struct ena_com_tx_ctx ena_tx_ctx = {0};
314 struct ena_tx_buffer *tx_info;
315 struct ena_ring *xdp_ring;
548c4940
SJ
316 u16 next_to_use, req_id;
317 int rc;
318 void *push_hdr;
319 u32 push_len;
320
321 xdp_ring = &adapter->tx_ring[qid];
322 next_to_use = xdp_ring->next_to_use;
323 req_id = xdp_ring->free_ids[next_to_use];
324 tx_info = &xdp_ring->tx_buffer_info[req_id];
325 tx_info->num_of_bufs = 0;
548c4940
SJ
326 page_ref_inc(rx_info->page);
327 tx_info->xdp_rx_page = rx_info->page;
328
329 rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
330 if (unlikely(rc))
331 goto error_drop_packet;
332
333 ena_tx_ctx.ena_bufs = tx_info->bufs;
334 ena_tx_ctx.push_header = push_hdr;
335 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
336 ena_tx_ctx.req_id = req_id;
337 ena_tx_ctx.header_len = push_len;
338
339 rc = ena_xmit_common(dev,
340 xdp_ring,
341 tx_info,
342 &ena_tx_ctx,
343 next_to_use,
344 xdp->data_end - xdp->data);
345 if (rc)
346 goto error_unmap_dma;
347 /* trigger the dma engine. ena_com_write_sq_doorbell()
348 * has a mb
349 */
350 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
351 u64_stats_update_begin(&xdp_ring->syncp);
352 xdp_ring->tx_stats.doorbells++;
353 u64_stats_update_end(&xdp_ring->syncp);
354
355 return NETDEV_TX_OK;
356
357error_unmap_dma:
358 ena_unmap_tx_buff(xdp_ring, tx_info);
359 tx_info->xdpf = NULL;
360error_drop_packet:
361
362 return NETDEV_TX_OK;
363}
364
365static int ena_xdp_execute(struct ena_ring *rx_ring,
366 struct xdp_buff *xdp,
367 struct ena_rx_buffer *rx_info)
838c93dc
SJ
368{
369 struct bpf_prog *xdp_prog;
370 u32 verdict = XDP_PASS;
371
372 rcu_read_lock();
373 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
374
375 if (!xdp_prog)
376 goto out;
377
378 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
379
548c4940
SJ
380 if (verdict == XDP_TX)
381 ena_xdp_xmit_buff(rx_ring->netdev,
382 xdp,
383 rx_ring->qid + rx_ring->adapter->num_io_queues,
384 rx_info);
385 else if (unlikely(verdict == XDP_ABORTED))
838c93dc 386 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
548c4940 387 else if (unlikely(verdict > XDP_TX))
838c93dc
SJ
388 bpf_warn_invalid_xdp_action(verdict);
389out:
390 rcu_read_unlock();
391 return verdict;
392}
393
548c4940
SJ
394static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
395{
396 adapter->xdp_first_ring = adapter->num_io_queues;
397 adapter->xdp_num_queues = adapter->num_io_queues;
398
399 ena_init_io_rings(adapter,
400 adapter->xdp_first_ring,
401 adapter->xdp_num_queues);
402}
403
404static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
405{
406 int rc = 0;
407
408 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
409 adapter->xdp_num_queues);
410 if (rc)
411 goto setup_err;
412
413 rc = ena_create_io_tx_queues_in_range(adapter,
414 adapter->xdp_first_ring,
415 adapter->xdp_num_queues);
416 if (rc)
417 goto create_err;
418
419 return 0;
420
421create_err:
422 ena_free_all_io_tx_resources(adapter);
423setup_err:
424 return rc;
425}
426
427/* Provides a way for both kernel and bpf-prog to know
428 * more about the RX-queue a given XDP frame arrived on.
429 */
430static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
431{
432 int rc;
433
434 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
435
436 if (rc) {
437 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
438 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
439 rx_ring->qid, rc);
440 goto err;
441 }
442
443 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
444 NULL);
445
446 if (rc) {
447 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
448 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
449 rx_ring->qid, rc);
450 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
451 }
452
453err:
454 return rc;
455}
456
457static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
458{
459 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
460 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
461}
462
838c93dc
SJ
463void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
464 struct bpf_prog *prog,
465 int first,
466 int count)
467{
468 struct ena_ring *rx_ring;
469 int i = 0;
470
471 for (i = first; i < count; i++) {
472 rx_ring = &adapter->rx_ring[i];
473 xchg(&rx_ring->xdp_bpf_prog, prog);
548c4940
SJ
474 if (prog) {
475 ena_xdp_register_rxq_info(rx_ring);
838c93dc 476 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
548c4940
SJ
477 } else {
478 ena_xdp_unregister_rxq_info(rx_ring);
838c93dc 479 rx_ring->rx_headroom = 0;
548c4940 480 }
838c93dc
SJ
481 }
482}
483
484void ena_xdp_exchange_program(struct ena_adapter *adapter,
485 struct bpf_prog *prog)
486{
487 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
488
489 ena_xdp_exchange_program_rx_in_range(adapter,
490 prog,
491 0,
492 adapter->num_io_queues);
493
494 if (old_bpf_prog)
495 bpf_prog_put(old_bpf_prog);
496}
497
548c4940
SJ
498static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
499{
500 bool was_up;
501 int rc;
502
503 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
504
505 if (was_up)
506 ena_down(adapter);
507
508 adapter->xdp_first_ring = 0;
509 adapter->xdp_num_queues = 0;
510 ena_xdp_exchange_program(adapter, NULL);
511 if (was_up) {
512 rc = ena_up(adapter);
513 if (rc)
514 return rc;
515 }
516 return 0;
517}
518
838c93dc
SJ
519static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
520{
521 struct ena_adapter *adapter = netdev_priv(netdev);
522 struct bpf_prog *prog = bpf->prog;
548c4940 523 struct bpf_prog *old_bpf_prog;
838c93dc
SJ
524 int rc, prev_mtu;
525 bool is_up;
526
527 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
548c4940
SJ
528 rc = ena_xdp_allowed(adapter);
529 if (rc == ENA_XDP_ALLOWED) {
530 old_bpf_prog = adapter->xdp_bpf_prog;
531 if (prog) {
532 if (!is_up) {
533 ena_init_all_xdp_queues(adapter);
534 } else if (!old_bpf_prog) {
535 ena_down(adapter);
536 ena_init_all_xdp_queues(adapter);
537 }
538 ena_xdp_exchange_program(adapter, prog);
838c93dc 539
548c4940
SJ
540 if (is_up && !old_bpf_prog) {
541 rc = ena_up(adapter);
542 if (rc)
543 return rc;
544 }
545 } else if (old_bpf_prog) {
546 rc = ena_destroy_and_free_all_xdp_queues(adapter);
838c93dc
SJ
547 if (rc)
548 return rc;
549 }
838c93dc 550
548c4940
SJ
551 prev_mtu = netdev->max_mtu;
552 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
553
554 if (!old_bpf_prog)
555 netif_info(adapter, drv, adapter->netdev,
556 "xdp program set, changing the max_mtu from %d to %d",
557 prev_mtu, netdev->max_mtu);
558
559 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
560 netif_err(adapter, drv, adapter->netdev,
561 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
838c93dc 562 netdev->mtu, ENA_XDP_MAX_MTU);
548c4940
SJ
563 NL_SET_ERR_MSG_MOD(bpf->extack,
564 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
565 return -EINVAL;
566 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
567 netif_err(adapter, drv, adapter->netdev,
568 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
569 adapter->num_io_queues, adapter->max_num_io_queues);
570 NL_SET_ERR_MSG_MOD(bpf->extack,
571 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
838c93dc
SJ
572 return -EINVAL;
573 }
574
575 return 0;
576}
577
578/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
579 * program as well as to query the current xdp program id.
580 */
581static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
582{
583 struct ena_adapter *adapter = netdev_priv(netdev);
584
585 switch (bpf->command) {
586 case XDP_SETUP_PROG:
587 return ena_xdp_set(netdev, bpf);
588 case XDP_QUERY_PROG:
589 bpf->prog_id = adapter->xdp_bpf_prog ?
590 adapter->xdp_bpf_prog->aux->id : 0;
591 break;
592 default:
593 return -EINVAL;
594 }
595 return 0;
596}
597
1738cd3e
NB
598static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
599{
600#ifdef CONFIG_RFS_ACCEL
601 u32 i;
602 int rc;
603
faa615f9 604 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
1738cd3e
NB
605 if (!adapter->netdev->rx_cpu_rmap)
606 return -ENOMEM;
faa615f9 607 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
608 int irq_idx = ENA_IO_IRQ_IDX(i);
609
610 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
da6f4cf5 611 pci_irq_vector(adapter->pdev, irq_idx));
1738cd3e
NB
612 if (rc) {
613 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
614 adapter->netdev->rx_cpu_rmap = NULL;
615 return rc;
616 }
617 }
618#endif /* CONFIG_RFS_ACCEL */
619 return 0;
620}
621
622static void ena_init_io_rings_common(struct ena_adapter *adapter,
623 struct ena_ring *ring, u16 qid)
624{
625 ring->qid = qid;
626 ring->pdev = adapter->pdev;
627 ring->dev = &adapter->pdev->dev;
628 ring->netdev = adapter->netdev;
629 ring->napi = &adapter->ena_napi[qid].napi;
630 ring->adapter = adapter;
631 ring->ena_dev = adapter->ena_dev;
632 ring->per_napi_packets = 0;
1738cd3e 633 ring->cpu = 0;
8510e1a3
NB
634 ring->first_interrupt = false;
635 ring->no_interrupt_event_cnt = 0;
1738cd3e
NB
636 u64_stats_init(&ring->syncp);
637}
638
548c4940
SJ
639static void ena_init_io_rings(struct ena_adapter *adapter,
640 int first_index, int count)
1738cd3e
NB
641{
642 struct ena_com_dev *ena_dev;
643 struct ena_ring *txr, *rxr;
644 int i;
645
646 ena_dev = adapter->ena_dev;
647
548c4940 648 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
649 txr = &adapter->tx_ring[i];
650 rxr = &adapter->rx_ring[i];
651
548c4940 652 /* TX common ring state */
1738cd3e 653 ena_init_io_rings_common(adapter, txr, i);
1738cd3e
NB
654
655 /* TX specific ring state */
13ca32a6 656 txr->ring_size = adapter->requested_tx_ring_size;
1738cd3e
NB
657 txr->tx_max_header_size = ena_dev->tx_max_header_size;
658 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
659 txr->sgl_size = adapter->max_tx_sgl_size;
660 txr->smoothed_interval =
661 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
662
548c4940
SJ
663 /* Don't init RX queues for xdp queues */
664 if (!ENA_IS_XDP_INDEX(adapter, i)) {
665 /* RX common ring state */
666 ena_init_io_rings_common(adapter, rxr, i);
667
668 /* RX specific ring state */
669 rxr->ring_size = adapter->requested_rx_ring_size;
670 rxr->rx_copybreak = adapter->rx_copybreak;
671 rxr->sgl_size = adapter->max_rx_sgl_size;
672 rxr->smoothed_interval =
673 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
674 rxr->empty_rx_queue = 0;
675 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
676 }
1738cd3e
NB
677 }
678}
679
680/* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
681 * @adapter: network interface device structure
682 * @qid: queue index
683 *
684 * Return 0 on success, negative on failure
685 */
686static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
687{
688 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
689 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
690 int size, i, node;
691
692 if (tx_ring->tx_buffer_info) {
693 netif_err(adapter, ifup,
694 adapter->netdev, "tx_buffer_info info is not NULL");
695 return -EEXIST;
696 }
697
698 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
699 node = cpu_to_node(ena_irq->cpu);
700
701 tx_ring->tx_buffer_info = vzalloc_node(size, node);
702 if (!tx_ring->tx_buffer_info) {
703 tx_ring->tx_buffer_info = vzalloc(size);
704 if (!tx_ring->tx_buffer_info)
8ee8ee7f 705 goto err_tx_buffer_info;
1738cd3e
NB
706 }
707
708 size = sizeof(u16) * tx_ring->ring_size;
f9172498
SJ
709 tx_ring->free_ids = vzalloc_node(size, node);
710 if (!tx_ring->free_ids) {
711 tx_ring->free_ids = vzalloc(size);
712 if (!tx_ring->free_ids)
713 goto err_tx_free_ids;
1738cd3e
NB
714 }
715
38005ca8
AK
716 size = tx_ring->tx_max_header_size;
717 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
718 if (!tx_ring->push_buf_intermediate_buf) {
719 tx_ring->push_buf_intermediate_buf = vzalloc(size);
8ee8ee7f
SJ
720 if (!tx_ring->push_buf_intermediate_buf)
721 goto err_push_buf_intermediate_buf;
38005ca8
AK
722 }
723
1738cd3e
NB
724 /* Req id ring for TX out of order completions */
725 for (i = 0; i < tx_ring->ring_size; i++)
f9172498 726 tx_ring->free_ids[i] = i;
1738cd3e
NB
727
728 /* Reset tx statistics */
729 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
730
731 tx_ring->next_to_use = 0;
732 tx_ring->next_to_clean = 0;
733 tx_ring->cpu = ena_irq->cpu;
734 return 0;
8ee8ee7f
SJ
735
736err_push_buf_intermediate_buf:
f9172498
SJ
737 vfree(tx_ring->free_ids);
738 tx_ring->free_ids = NULL;
739err_tx_free_ids:
8ee8ee7f
SJ
740 vfree(tx_ring->tx_buffer_info);
741 tx_ring->tx_buffer_info = NULL;
742err_tx_buffer_info:
743 return -ENOMEM;
1738cd3e
NB
744}
745
746/* ena_free_tx_resources - Free I/O Tx Resources per Queue
747 * @adapter: network interface device structure
748 * @qid: queue index
749 *
750 * Free all transmit software resources
751 */
752static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
753{
754 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
755
756 vfree(tx_ring->tx_buffer_info);
757 tx_ring->tx_buffer_info = NULL;
758
f9172498
SJ
759 vfree(tx_ring->free_ids);
760 tx_ring->free_ids = NULL;
38005ca8
AK
761
762 vfree(tx_ring->push_buf_intermediate_buf);
763 tx_ring->push_buf_intermediate_buf = NULL;
1738cd3e
NB
764}
765
548c4940
SJ
766static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
767 int first_index,
768 int count)
1738cd3e
NB
769{
770 int i, rc = 0;
771
548c4940 772 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
773 rc = ena_setup_tx_resources(adapter, i);
774 if (rc)
775 goto err_setup_tx;
776 }
777
778 return 0;
779
780err_setup_tx:
781
782 netif_err(adapter, ifup, adapter->netdev,
783 "Tx queue %d: allocation failed\n", i);
784
785 /* rewind the index freeing the rings as we go */
548c4940 786 while (first_index < i--)
1738cd3e
NB
787 ena_free_tx_resources(adapter, i);
788 return rc;
789}
790
548c4940
SJ
791static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
792 int first_index, int count)
793{
794 int i;
795
796 for (i = first_index; i < first_index + count; i++)
797 ena_free_tx_resources(adapter, i);
798}
799
1738cd3e
NB
800/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
801 * @adapter: board private structure
802 *
803 * Free all transmit software resources
804 */
805static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
806{
548c4940
SJ
807 ena_free_all_io_tx_resources_in_range(adapter,
808 0,
809 adapter->xdp_num_queues +
810 adapter->num_io_queues);
1738cd3e
NB
811}
812
c2b54204 813static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
ad974bae
NB
814{
815 if (likely(req_id < rx_ring->ring_size))
816 return 0;
817
818 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
819 "Invalid rx req_id: %hu\n", req_id);
820
821 u64_stats_update_begin(&rx_ring->syncp);
822 rx_ring->rx_stats.bad_req_id++;
823 u64_stats_update_end(&rx_ring->syncp);
824
825 /* Trigger device reset */
826 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
827 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
828 return -EFAULT;
829}
830
1738cd3e
NB
831/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
832 * @adapter: network interface device structure
833 * @qid: queue index
834 *
835 * Returns 0 on success, negative on failure
836 */
837static int ena_setup_rx_resources(struct ena_adapter *adapter,
838 u32 qid)
839{
840 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
841 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
ad974bae 842 int size, node, i;
1738cd3e
NB
843
844 if (rx_ring->rx_buffer_info) {
845 netif_err(adapter, ifup, adapter->netdev,
846 "rx_buffer_info is not NULL");
847 return -EEXIST;
848 }
849
850 /* alloc extra element so in rx path
851 * we can always prefetch rx_info + 1
852 */
853 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
854 node = cpu_to_node(ena_irq->cpu);
855
856 rx_ring->rx_buffer_info = vzalloc_node(size, node);
857 if (!rx_ring->rx_buffer_info) {
858 rx_ring->rx_buffer_info = vzalloc(size);
859 if (!rx_ring->rx_buffer_info)
860 return -ENOMEM;
861 }
862
ad974bae 863 size = sizeof(u16) * rx_ring->ring_size;
f9172498
SJ
864 rx_ring->free_ids = vzalloc_node(size, node);
865 if (!rx_ring->free_ids) {
866 rx_ring->free_ids = vzalloc(size);
867 if (!rx_ring->free_ids) {
ad974bae 868 vfree(rx_ring->rx_buffer_info);
8ee8ee7f 869 rx_ring->rx_buffer_info = NULL;
ad974bae
NB
870 return -ENOMEM;
871 }
872 }
873
874 /* Req id ring for receiving RX pkts out of order */
875 for (i = 0; i < rx_ring->ring_size; i++)
f9172498 876 rx_ring->free_ids[i] = i;
ad974bae 877
1738cd3e
NB
878 /* Reset rx statistics */
879 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
880
881 rx_ring->next_to_clean = 0;
882 rx_ring->next_to_use = 0;
883 rx_ring->cpu = ena_irq->cpu;
884
885 return 0;
886}
887
888/* ena_free_rx_resources - Free I/O Rx Resources
889 * @adapter: network interface device structure
890 * @qid: queue index
891 *
892 * Free all receive software resources
893 */
894static void ena_free_rx_resources(struct ena_adapter *adapter,
895 u32 qid)
896{
897 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
898
899 vfree(rx_ring->rx_buffer_info);
900 rx_ring->rx_buffer_info = NULL;
ad974bae 901
f9172498
SJ
902 vfree(rx_ring->free_ids);
903 rx_ring->free_ids = NULL;
1738cd3e
NB
904}
905
906/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
907 * @adapter: board private structure
908 *
909 * Return 0 on success, negative on failure
910 */
911static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
912{
913 int i, rc = 0;
914
faa615f9 915 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
916 rc = ena_setup_rx_resources(adapter, i);
917 if (rc)
918 goto err_setup_rx;
919 }
920
921 return 0;
922
923err_setup_rx:
924
925 netif_err(adapter, ifup, adapter->netdev,
926 "Rx queue %d: allocation failed\n", i);
927
928 /* rewind the index freeing the rings as we go */
929 while (i--)
930 ena_free_rx_resources(adapter, i);
931 return rc;
932}
933
934/* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
935 * @adapter: board private structure
936 *
937 * Free all receive software resources
938 */
939static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
940{
941 int i;
942
faa615f9 943 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
944 ena_free_rx_resources(adapter, i);
945}
946
c2b54204 947static int ena_alloc_rx_page(struct ena_ring *rx_ring,
1738cd3e
NB
948 struct ena_rx_buffer *rx_info, gfp_t gfp)
949{
950 struct ena_com_buf *ena_buf;
951 struct page *page;
952 dma_addr_t dma;
953
954 /* if previous allocated page is not used */
955 if (unlikely(rx_info->page))
956 return 0;
957
958 page = alloc_page(gfp);
959 if (unlikely(!page)) {
960 u64_stats_update_begin(&rx_ring->syncp);
961 rx_ring->rx_stats.page_alloc_fail++;
962 u64_stats_update_end(&rx_ring->syncp);
963 return -ENOMEM;
964 }
965
ef5b0771 966 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
1738cd3e
NB
967 DMA_FROM_DEVICE);
968 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
969 u64_stats_update_begin(&rx_ring->syncp);
970 rx_ring->rx_stats.dma_mapping_err++;
971 u64_stats_update_end(&rx_ring->syncp);
972
973 __free_page(page);
974 return -EIO;
975 }
976 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
977 "alloc page %p, rx_info %p\n", page, rx_info);
978
979 rx_info->page = page;
980 rx_info->page_offset = 0;
981 ena_buf = &rx_info->ena_buf;
838c93dc 982 ena_buf->paddr = dma + rx_ring->rx_headroom;
548c4940 983 ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
1738cd3e
NB
984
985 return 0;
986}
987
988static void ena_free_rx_page(struct ena_ring *rx_ring,
989 struct ena_rx_buffer *rx_info)
990{
991 struct page *page = rx_info->page;
992 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
993
994 if (unlikely(!page)) {
995 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
996 "Trying to free unallocated buffer\n");
997 return;
998 }
999
548c4940
SJ
1000 dma_unmap_page(rx_ring->dev,
1001 ena_buf->paddr - rx_ring->rx_headroom,
1002 ENA_PAGE_SIZE,
1738cd3e
NB
1003 DMA_FROM_DEVICE);
1004
1005 __free_page(page);
1006 rx_info->page = NULL;
1007}
1008
1009static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1010{
ad974bae 1011 u16 next_to_use, req_id;
1738cd3e
NB
1012 u32 i;
1013 int rc;
1014
1015 next_to_use = rx_ring->next_to_use;
1016
1017 for (i = 0; i < num; i++) {
ad974bae
NB
1018 struct ena_rx_buffer *rx_info;
1019
f9172498 1020 req_id = rx_ring->free_ids[next_to_use];
ad974bae
NB
1021 rc = validate_rx_req_id(rx_ring, req_id);
1022 if (unlikely(rc < 0))
1023 break;
1024
1025 rx_info = &rx_ring->rx_buffer_info[req_id];
1026
1738cd3e
NB
1027
1028 rc = ena_alloc_rx_page(rx_ring, rx_info,
453f85d4 1029 GFP_ATOMIC | __GFP_COMP);
1738cd3e
NB
1030 if (unlikely(rc < 0)) {
1031 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1032 "failed to alloc buffer for rx queue %d\n",
1033 rx_ring->qid);
1034 break;
1035 }
1036 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1037 &rx_info->ena_buf,
ad974bae 1038 req_id);
1738cd3e
NB
1039 if (unlikely(rc)) {
1040 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1041 "failed to add buffer for rx queue %d\n",
1042 rx_ring->qid);
1043 break;
1044 }
1045 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1046 rx_ring->ring_size);
1047 }
1048
1049 if (unlikely(i < num)) {
1050 u64_stats_update_begin(&rx_ring->syncp);
1051 rx_ring->rx_stats.refil_partial++;
1052 u64_stats_update_end(&rx_ring->syncp);
1053 netdev_warn(rx_ring->netdev,
1054 "refilled rx qid %d with only %d buffers (from %d)\n",
1055 rx_ring->qid, i, num);
1056 }
1057
37dff155
NB
1058 /* ena_com_write_sq_doorbell issues a wmb() */
1059 if (likely(i))
1060 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1738cd3e
NB
1061
1062 rx_ring->next_to_use = next_to_use;
1063
1064 return i;
1065}
1066
1067static void ena_free_rx_bufs(struct ena_adapter *adapter,
1068 u32 qid)
1069{
1070 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1071 u32 i;
1072
1073 for (i = 0; i < rx_ring->ring_size; i++) {
1074 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1075
1076 if (rx_info->page)
1077 ena_free_rx_page(rx_ring, rx_info);
1078 }
1079}
1080
1081/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1082 * @adapter: board private structure
1738cd3e
NB
1083 */
1084static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1085{
1086 struct ena_ring *rx_ring;
1087 int i, rc, bufs_num;
1088
faa615f9 1089 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
1090 rx_ring = &adapter->rx_ring[i];
1091 bufs_num = rx_ring->ring_size - 1;
1092 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1093
1094 if (unlikely(rc != bufs_num))
1095 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1096 "refilling Queue %d failed. allocated %d buffers from: %d\n",
1097 i, rc, bufs_num);
1098 }
1099}
1100
1101static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1102{
1103 int i;
1104
faa615f9 1105 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
1106 ena_free_rx_bufs(adapter, i);
1107}
1108
548c4940
SJ
1109static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1110 struct ena_tx_buffer *tx_info)
38005ca8
AK
1111{
1112 struct ena_com_buf *ena_buf;
1113 u32 cnt;
1114 int i;
1115
1116 ena_buf = tx_info->bufs;
1117 cnt = tx_info->num_of_bufs;
1118
1119 if (unlikely(!cnt))
1120 return;
1121
1122 if (tx_info->map_linear_data) {
1123 dma_unmap_single(tx_ring->dev,
1124 dma_unmap_addr(ena_buf, paddr),
1125 dma_unmap_len(ena_buf, len),
1126 DMA_TO_DEVICE);
1127 ena_buf++;
1128 cnt--;
1129 }
1130
1131 /* unmap remaining mapped pages */
1132 for (i = 0; i < cnt; i++) {
1133 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1134 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1135 ena_buf++;
1136 }
1137}
1138
1738cd3e
NB
1139/* ena_free_tx_bufs - Free Tx Buffers per Queue
1140 * @tx_ring: TX ring for which buffers be freed
1141 */
1142static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1143{
5add6e4a 1144 bool print_once = true;
1738cd3e
NB
1145 u32 i;
1146
1147 for (i = 0; i < tx_ring->ring_size; i++) {
1148 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1738cd3e
NB
1149
1150 if (!tx_info->skb)
1151 continue;
1152
5add6e4a
NB
1153 if (print_once) {
1154 netdev_notice(tx_ring->netdev,
1155 "free uncompleted tx skb qid %d idx 0x%x\n",
1156 tx_ring->qid, i);
1157 print_once = false;
1158 } else {
1159 netdev_dbg(tx_ring->netdev,
1160 "free uncompleted tx skb qid %d idx 0x%x\n",
1161 tx_ring->qid, i);
1162 }
1738cd3e 1163
548c4940 1164 ena_unmap_tx_buff(tx_ring, tx_info);
1738cd3e
NB
1165
1166 dev_kfree_skb_any(tx_info->skb);
1167 }
1168 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1169 tx_ring->qid));
1170}
1171
1172static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1173{
1174 struct ena_ring *tx_ring;
1175 int i;
1176
548c4940 1177 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1738cd3e
NB
1178 tx_ring = &adapter->tx_ring[i];
1179 ena_free_tx_bufs(tx_ring);
1180 }
1181}
1182
1183static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1184{
1185 u16 ena_qid;
1186 int i;
1187
548c4940 1188 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1738cd3e
NB
1189 ena_qid = ENA_IO_TXQ_IDX(i);
1190 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1191 }
1192}
1193
1194static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1195{
1196 u16 ena_qid;
1197 int i;
1198
faa615f9 1199 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e 1200 ena_qid = ENA_IO_RXQ_IDX(i);
282faf61 1201 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1738cd3e
NB
1202 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1203 }
1204}
1205
1206static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1207{
1208 ena_destroy_all_tx_queues(adapter);
1209 ena_destroy_all_rx_queues(adapter);
1210}
1211
548c4940
SJ
1212static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1213 struct ena_tx_buffer *tx_info, bool is_xdp)
1214{
1215 if (tx_info)
1216 netif_err(ring->adapter,
1217 tx_done,
1218 ring->netdev,
1219 "tx_info doesn't have valid %s",
1220 is_xdp ? "xdp frame" : "skb");
1221 else
1222 netif_err(ring->adapter,
1223 tx_done,
1224 ring->netdev,
1225 "Invalid req_id: %hu\n",
1226 req_id);
1227
1228 u64_stats_update_begin(&ring->syncp);
1229 ring->tx_stats.bad_req_id++;
1230 u64_stats_update_end(&ring->syncp);
1231
1232 /* Trigger device reset */
1233 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1234 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1235 return -EFAULT;
1236}
1237
1738cd3e
NB
1238static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1239{
1240 struct ena_tx_buffer *tx_info = NULL;
1241
1242 if (likely(req_id < tx_ring->ring_size)) {
1243 tx_info = &tx_ring->tx_buffer_info[req_id];
1244 if (likely(tx_info->skb))
1245 return 0;
1246 }
1247
548c4940
SJ
1248 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1249}
1738cd3e 1250
548c4940
SJ
1251static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1252{
1253 struct ena_tx_buffer *tx_info = NULL;
1738cd3e 1254
548c4940
SJ
1255 if (likely(req_id < xdp_ring->ring_size)) {
1256 tx_info = &xdp_ring->tx_buffer_info[req_id];
1257 if (likely(tx_info->xdpf))
1258 return 0;
1259 }
1260
1261 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1738cd3e
NB
1262}
1263
1264static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1265{
1266 struct netdev_queue *txq;
1267 bool above_thresh;
1268 u32 tx_bytes = 0;
1269 u32 total_done = 0;
1270 u16 next_to_clean;
1271 u16 req_id;
1272 int tx_pkts = 0;
1273 int rc;
1274
1275 next_to_clean = tx_ring->next_to_clean;
1276 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1277
1278 while (tx_pkts < budget) {
1279 struct ena_tx_buffer *tx_info;
1280 struct sk_buff *skb;
1738cd3e
NB
1281
1282 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1283 &req_id);
1284 if (rc)
1285 break;
1286
1287 rc = validate_tx_req_id(tx_ring, req_id);
1288 if (rc)
1289 break;
1290
1291 tx_info = &tx_ring->tx_buffer_info[req_id];
1292 skb = tx_info->skb;
1293
1294 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1295 prefetch(&skb->end);
1296
1297 tx_info->skb = NULL;
1298 tx_info->last_jiffies = 0;
1299
548c4940 1300 ena_unmap_tx_buff(tx_ring, tx_info);
1738cd3e
NB
1301
1302 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1303 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1304 skb);
1305
1306 tx_bytes += skb->len;
1307 dev_kfree_skb(skb);
1308 tx_pkts++;
1309 total_done += tx_info->tx_descs;
1310
f9172498 1311 tx_ring->free_ids[next_to_clean] = req_id;
1738cd3e
NB
1312 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1313 tx_ring->ring_size);
1314 }
1315
1316 tx_ring->next_to_clean = next_to_clean;
1317 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1318 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1319
1320 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1321
1322 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1323 "tx_poll: q %d done. total pkts: %d\n",
1324 tx_ring->qid, tx_pkts);
1325
1326 /* need to make the rings circular update visible to
1327 * ena_start_xmit() before checking for netif_queue_stopped().
1328 */
1329 smp_mb();
1330
689b2bda
AK
1331 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1332 ENA_TX_WAKEUP_THRESH);
1738cd3e
NB
1333 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1334 __netif_tx_lock(txq, smp_processor_id());
689b2bda
AK
1335 above_thresh =
1336 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1337 ENA_TX_WAKEUP_THRESH);
a53651ec
SJ
1338 if (netif_tx_queue_stopped(txq) && above_thresh &&
1339 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1738cd3e
NB
1340 netif_tx_wake_queue(txq);
1341 u64_stats_update_begin(&tx_ring->syncp);
1342 tx_ring->tx_stats.queue_wakeup++;
1343 u64_stats_update_end(&tx_ring->syncp);
1344 }
1345 __netif_tx_unlock(txq);
1346 }
1347
1738cd3e
NB
1348 return tx_pkts;
1349}
1350
4265114d
NB
1351static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1352{
1353 struct sk_buff *skb;
1354
1355 if (frags)
1356 skb = napi_get_frags(rx_ring->napi);
1357 else
1358 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1359 rx_ring->rx_copybreak);
1360
1361 if (unlikely(!skb)) {
1362 u64_stats_update_begin(&rx_ring->syncp);
1363 rx_ring->rx_stats.skb_alloc_fail++;
1364 u64_stats_update_end(&rx_ring->syncp);
1365 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1366 "Failed to allocate skb. frags: %d\n", frags);
1367 return NULL;
1368 }
1369
1370 return skb;
1371}
1372
1738cd3e
NB
1373static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1374 struct ena_com_rx_buf_info *ena_bufs,
1375 u32 descs,
1376 u16 *next_to_clean)
1377{
1378 struct sk_buff *skb;
ad974bae
NB
1379 struct ena_rx_buffer *rx_info;
1380 u16 len, req_id, buf = 0;
1738cd3e
NB
1381 void *va;
1382
ad974bae
NB
1383 len = ena_bufs[buf].len;
1384 req_id = ena_bufs[buf].req_id;
1385 rx_info = &rx_ring->rx_buffer_info[req_id];
1386
1738cd3e
NB
1387 if (unlikely(!rx_info->page)) {
1388 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1389 "Page is NULL\n");
1390 return NULL;
1391 }
1392
1393 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1394 "rx_info %p page %p\n",
1395 rx_info, rx_info->page);
1396
1397 /* save virt address of first buffer */
1398 va = page_address(rx_info->page) + rx_info->page_offset;
1399 prefetch(va + NET_IP_ALIGN);
1400
1401 if (len <= rx_ring->rx_copybreak) {
4265114d
NB
1402 skb = ena_alloc_skb(rx_ring, false);
1403 if (unlikely(!skb))
1738cd3e 1404 return NULL;
1738cd3e
NB
1405
1406 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1407 "rx allocated small packet. len %d. data_len %d\n",
1408 skb->len, skb->data_len);
1409
1410 /* sync this buffer for CPU use */
1411 dma_sync_single_for_cpu(rx_ring->dev,
1412 dma_unmap_addr(&rx_info->ena_buf, paddr),
1413 len,
1414 DMA_FROM_DEVICE);
1415 skb_copy_to_linear_data(skb, va, len);
1416 dma_sync_single_for_device(rx_ring->dev,
1417 dma_unmap_addr(&rx_info->ena_buf, paddr),
1418 len,
1419 DMA_FROM_DEVICE);
1420
1421 skb_put(skb, len);
1422 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
f9172498 1423 rx_ring->free_ids[*next_to_clean] = req_id;
1738cd3e
NB
1424 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1425 rx_ring->ring_size);
1426 return skb;
1427 }
1428
4265114d
NB
1429 skb = ena_alloc_skb(rx_ring, true);
1430 if (unlikely(!skb))
1738cd3e 1431 return NULL;
1738cd3e
NB
1432
1433 do {
1434 dma_unmap_page(rx_ring->dev,
1435 dma_unmap_addr(&rx_info->ena_buf, paddr),
ef5b0771 1436 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
1738cd3e
NB
1437
1438 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
ef5b0771 1439 rx_info->page_offset, len, ENA_PAGE_SIZE);
1738cd3e
NB
1440
1441 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1442 "rx skb updated. len %d. data_len %d\n",
1443 skb->len, skb->data_len);
1444
1445 rx_info->page = NULL;
ad974bae 1446
f9172498 1447 rx_ring->free_ids[*next_to_clean] = req_id;
1738cd3e
NB
1448 *next_to_clean =
1449 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1450 rx_ring->ring_size);
1451 if (likely(--descs == 0))
1452 break;
ad974bae
NB
1453
1454 buf++;
1455 len = ena_bufs[buf].len;
1456 req_id = ena_bufs[buf].req_id;
1457 rx_info = &rx_ring->rx_buffer_info[req_id];
1738cd3e
NB
1458 } while (1);
1459
1460 return skb;
1461}
1462
1463/* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1464 * @adapter: structure containing adapter specific data
1465 * @ena_rx_ctx: received packet context/metadata
1466 * @skb: skb currently being received and modified
1467 */
c2b54204 1468static void ena_rx_checksum(struct ena_ring *rx_ring,
1738cd3e
NB
1469 struct ena_com_rx_ctx *ena_rx_ctx,
1470 struct sk_buff *skb)
1471{
1472 /* Rx csum disabled */
1473 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1474 skb->ip_summed = CHECKSUM_NONE;
1475 return;
1476 }
1477
1478 /* For fragmented packets the checksum isn't valid */
1479 if (ena_rx_ctx->frag) {
1480 skb->ip_summed = CHECKSUM_NONE;
1481 return;
1482 }
1483
1484 /* if IP and error */
1485 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1486 (ena_rx_ctx->l3_csum_err))) {
1487 /* ipv4 checksum error */
1488 skb->ip_summed = CHECKSUM_NONE;
1489 u64_stats_update_begin(&rx_ring->syncp);
1490 rx_ring->rx_stats.bad_csum++;
1491 u64_stats_update_end(&rx_ring->syncp);
cd7aea18 1492 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1738cd3e
NB
1493 "RX IPv4 header checksum error\n");
1494 return;
1495 }
1496
1497 /* if TCP/UDP */
1498 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1499 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1500 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1501 /* TCP/UDP checksum error */
1502 u64_stats_update_begin(&rx_ring->syncp);
1503 rx_ring->rx_stats.bad_csum++;
1504 u64_stats_update_end(&rx_ring->syncp);
cd7aea18 1505 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1738cd3e
NB
1506 "RX L4 checksum error\n");
1507 skb->ip_summed = CHECKSUM_NONE;
1508 return;
1509 }
1510
cb36bb36
AK
1511 if (likely(ena_rx_ctx->l4_csum_checked)) {
1512 skb->ip_summed = CHECKSUM_UNNECESSARY;
d2eecc6e
SJ
1513 u64_stats_update_begin(&rx_ring->syncp);
1514 rx_ring->rx_stats.csum_good++;
1515 u64_stats_update_end(&rx_ring->syncp);
cb36bb36
AK
1516 } else {
1517 u64_stats_update_begin(&rx_ring->syncp);
1518 rx_ring->rx_stats.csum_unchecked++;
1519 u64_stats_update_end(&rx_ring->syncp);
1520 skb->ip_summed = CHECKSUM_NONE;
1521 }
1522 } else {
1523 skb->ip_summed = CHECKSUM_NONE;
1524 return;
1738cd3e 1525 }
cb36bb36 1526
1738cd3e
NB
1527}
1528
1529static void ena_set_rx_hash(struct ena_ring *rx_ring,
1530 struct ena_com_rx_ctx *ena_rx_ctx,
1531 struct sk_buff *skb)
1532{
1533 enum pkt_hash_types hash_type;
1534
1535 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1536 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1537 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1538
1539 hash_type = PKT_HASH_TYPE_L4;
1540 else
1541 hash_type = PKT_HASH_TYPE_NONE;
1542
1543 /* Override hash type if the packet is fragmented */
1544 if (ena_rx_ctx->frag)
1545 hash_type = PKT_HASH_TYPE_NONE;
1546
1547 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1548 }
1549}
1550
838c93dc
SJ
1551int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1552{
1553 struct ena_rx_buffer *rx_info;
1554 int ret;
1555
1556 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1557 xdp->data = page_address(rx_info->page) +
1558 rx_info->page_offset + rx_ring->rx_headroom;
1559 xdp_set_data_meta_invalid(xdp);
1560 xdp->data_hard_start = page_address(rx_info->page);
1561 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
1562 /* If for some reason we received a bigger packet than
1563 * we expect, then we simply drop it
1564 */
1565 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1566 return XDP_DROP;
1567
548c4940 1568 ret = ena_xdp_execute(rx_ring, xdp, rx_info);
838c93dc
SJ
1569
1570 /* The xdp program might expand the headers */
1571 if (ret == XDP_PASS) {
1572 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1573 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1574 }
1575
1576 return ret;
1577}
1738cd3e
NB
1578/* ena_clean_rx_irq - Cleanup RX irq
1579 * @rx_ring: RX ring to clean
1580 * @napi: napi handler
1581 * @budget: how many packets driver is allowed to clean
1582 *
1583 * Returns the number of cleaned buffers.
1584 */
1585static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1586 u32 budget)
1587{
1588 u16 next_to_clean = rx_ring->next_to_clean;
1738cd3e
NB
1589 struct ena_com_rx_ctx ena_rx_ctx;
1590 struct ena_adapter *adapter;
548c4940 1591 u32 res_budget, work_done;
838c93dc
SJ
1592 int rx_copybreak_pkt = 0;
1593 int refill_threshold;
1738cd3e
NB
1594 struct sk_buff *skb;
1595 int refill_required;
838c93dc 1596 struct xdp_buff xdp;
1738cd3e 1597 int total_len = 0;
838c93dc
SJ
1598 int xdp_verdict;
1599 int rc = 0;
ad974bae 1600 int i;
1738cd3e
NB
1601
1602 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1603 "%s qid %d\n", __func__, rx_ring->qid);
1604 res_budget = budget;
838c93dc 1605 xdp.rxq = &rx_ring->xdp_rxq;
548c4940 1606
1738cd3e 1607 do {
838c93dc
SJ
1608 xdp_verdict = XDP_PASS;
1609 skb = NULL;
1738cd3e
NB
1610 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1611 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1612 ena_rx_ctx.descs = 0;
1613 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1614 rx_ring->ena_com_io_sq,
1615 &ena_rx_ctx);
1616 if (unlikely(rc))
1617 goto error;
1618
1619 if (unlikely(ena_rx_ctx.descs == 0))
1620 break;
1621
1622 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1623 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1624 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1625 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1626
838c93dc
SJ
1627 if (ena_xdp_present_ring(rx_ring))
1628 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1629
1738cd3e 1630 /* allocate skb and fill it */
838c93dc
SJ
1631 if (xdp_verdict == XDP_PASS)
1632 skb = ena_rx_skb(rx_ring,
1633 rx_ring->ena_bufs,
1634 ena_rx_ctx.descs,
1635 &next_to_clean);
1738cd3e 1636
1738cd3e 1637 if (unlikely(!skb)) {
548c4940
SJ
1638 if (xdp_verdict == XDP_TX) {
1639 ena_free_rx_page(rx_ring,
1640 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
1641 res_budget--;
1642 }
ad974bae 1643 for (i = 0; i < ena_rx_ctx.descs; i++) {
f9172498 1644 rx_ring->free_ids[next_to_clean] =
ad974bae
NB
1645 rx_ring->ena_bufs[i].req_id;
1646 next_to_clean =
1647 ENA_RX_RING_IDX_NEXT(next_to_clean,
1648 rx_ring->ring_size);
1649 }
548c4940 1650 if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
838c93dc 1651 continue;
1738cd3e
NB
1652 break;
1653 }
1654
1655 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1656
1657 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1658
1659 skb_record_rx_queue(skb, rx_ring->qid);
1660
1661 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1662 total_len += rx_ring->ena_bufs[0].len;
1663 rx_copybreak_pkt++;
1664 napi_gro_receive(napi, skb);
1665 } else {
1666 total_len += skb->len;
1667 napi_gro_frags(napi);
1668 }
1669
1670 res_budget--;
1671 } while (likely(res_budget));
1672
1673 work_done = budget - res_budget;
1738cd3e
NB
1674 rx_ring->per_napi_packets += work_done;
1675 u64_stats_update_begin(&rx_ring->syncp);
1676 rx_ring->rx_stats.bytes += total_len;
1677 rx_ring->rx_stats.cnt += work_done;
1678 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1679 u64_stats_update_end(&rx_ring->syncp);
1680
1681 rx_ring->next_to_clean = next_to_clean;
1682
689b2bda 1683 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
0574bb80
AK
1684 refill_threshold =
1685 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1686 ENA_RX_REFILL_THRESH_PACKET);
1738cd3e
NB
1687
1688 /* Optimization, try to batch new rx buffers */
1689 if (refill_required > refill_threshold) {
1690 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1691 ena_refill_rx_bufs(rx_ring, refill_required);
1692 }
1693
1694 return work_done;
1695
1696error:
1697 adapter = netdev_priv(rx_ring->netdev);
1698
1699 u64_stats_update_begin(&rx_ring->syncp);
1700 rx_ring->rx_stats.bad_desc_num++;
1701 u64_stats_update_end(&rx_ring->syncp);
1702
1703 /* Too many desc from the device. Trigger reset */
e2eed0e3 1704 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1738cd3e
NB
1705 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1706
1707 return 0;
1708}
1709
282faf61 1710static void ena_dim_work(struct work_struct *w)
1738cd3e 1711{
282faf61
AK
1712 struct dim *dim = container_of(w, struct dim, work);
1713 struct dim_cq_moder cur_moder =
1714 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1715 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1716
1717 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1718 dim->state = DIM_START_MEASURE;
1719}
1720
1721static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1722{
1723 struct dim_sample dim_sample;
1724 struct ena_ring *rx_ring = ena_napi->rx_ring;
1725
1726 if (!rx_ring->per_napi_packets)
1727 return;
1728
1729 rx_ring->non_empty_napi_events++;
1730
1731 dim_update_sample(rx_ring->non_empty_napi_events,
1732 rx_ring->rx_stats.cnt,
1733 rx_ring->rx_stats.bytes,
1734 &dim_sample);
1735
1736 net_dim(&ena_napi->dim, dim_sample);
1737
1738cd3e 1738 rx_ring->per_napi_packets = 0;
1738cd3e
NB
1739}
1740
c2b54204 1741static void ena_unmask_interrupt(struct ena_ring *tx_ring,
418df30f
NB
1742 struct ena_ring *rx_ring)
1743{
1744 struct ena_eth_io_intr_reg intr_reg;
548c4940
SJ
1745 u32 rx_interval = 0;
1746 /* Rx ring can be NULL when for XDP tx queues which don't have an
1747 * accompanying rx_ring pair.
1748 */
1749 if (rx_ring)
1750 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1751 rx_ring->smoothed_interval :
1752 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
418df30f
NB
1753
1754 /* Update intr register: rx intr delay,
1755 * tx intr delay and interrupt unmask
1756 */
1757 ena_com_update_intr_reg(&intr_reg,
7b8a2878 1758 rx_interval,
418df30f
NB
1759 tx_ring->smoothed_interval,
1760 true);
1761
1762 /* It is a shared MSI-X.
1763 * Tx and Rx CQ have pointer to it.
1764 * So we use one of them to reach the intr reg
548c4940 1765 * The Tx ring is used because the rx_ring is NULL for XDP queues
418df30f 1766 */
548c4940 1767 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
418df30f
NB
1768}
1769
c2b54204 1770static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1738cd3e
NB
1771 struct ena_ring *rx_ring)
1772{
1773 int cpu = get_cpu();
1774 int numa_node;
1775
1776 /* Check only one ring since the 2 rings are running on the same cpu */
1777 if (likely(tx_ring->cpu == cpu))
1778 goto out;
1779
1780 numa_node = cpu_to_node(cpu);
1781 put_cpu();
1782
1783 if (numa_node != NUMA_NO_NODE) {
1784 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
548c4940
SJ
1785 if (rx_ring)
1786 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1787 numa_node);
1738cd3e
NB
1788 }
1789
1790 tx_ring->cpu = cpu;
548c4940
SJ
1791 if (rx_ring)
1792 rx_ring->cpu = cpu;
1738cd3e
NB
1793
1794 return;
1795out:
1796 put_cpu();
1797}
1798
548c4940
SJ
1799static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1800{
1801 u32 total_done = 0;
1802 u16 next_to_clean;
1803 u32 tx_bytes = 0;
1804 int tx_pkts = 0;
1805 u16 req_id;
1806 int rc;
1807
1808 if (unlikely(!xdp_ring))
1809 return 0;
1810 next_to_clean = xdp_ring->next_to_clean;
1811
1812 while (tx_pkts < budget) {
1813 struct ena_tx_buffer *tx_info;
1814 struct xdp_frame *xdpf;
1815
1816 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1817 &req_id);
1818 if (rc)
1819 break;
1820
1821 rc = validate_xdp_req_id(xdp_ring, req_id);
1822 if (rc)
1823 break;
1824
1825 tx_info = &xdp_ring->tx_buffer_info[req_id];
1826 xdpf = tx_info->xdpf;
1827
1828 tx_info->xdpf = NULL;
1829 tx_info->last_jiffies = 0;
1830 ena_unmap_tx_buff(xdp_ring, tx_info);
1831
1832 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1833 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1834 xdpf);
1835
1836 tx_bytes += xdpf->len;
1837 tx_pkts++;
1838 total_done += tx_info->tx_descs;
1839
1840 __free_page(tx_info->xdp_rx_page);
1841 xdp_ring->free_ids[next_to_clean] = req_id;
1842 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1843 xdp_ring->ring_size);
1844 }
1845
1846 xdp_ring->next_to_clean = next_to_clean;
1847 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1848 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1849
1850 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1851 "tx_poll: q %d done. total pkts: %d\n",
1852 xdp_ring->qid, tx_pkts);
1853
1854 return tx_pkts;
1855}
1856
1738cd3e
NB
1857static int ena_io_poll(struct napi_struct *napi, int budget)
1858{
1859 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1860 struct ena_ring *tx_ring, *rx_ring;
24dee0c7
NB
1861 int tx_work_done;
1862 int rx_work_done = 0;
1738cd3e
NB
1863 int tx_budget;
1864 int napi_comp_call = 0;
1865 int ret;
1866
1867 tx_ring = ena_napi->tx_ring;
1868 rx_ring = ena_napi->rx_ring;
1869
913b0bfd
SJ
1870 tx_ring->first_interrupt = ena_napi->first_interrupt;
1871 rx_ring->first_interrupt = ena_napi->first_interrupt;
1872
1738cd3e
NB
1873 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1874
3f6159db
NB
1875 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1876 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1738cd3e
NB
1877 napi_complete_done(napi, 0);
1878 return 0;
1879 }
1880
1881 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
24dee0c7
NB
1882 /* On netpoll the budget is zero and the handler should only clean the
1883 * tx completions.
1884 */
1885 if (likely(budget))
1886 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1738cd3e 1887
b1669c9f
NB
1888 /* If the device is about to reset or down, avoid unmask
1889 * the interrupt and return 0 so NAPI won't reschedule
1890 */
1891 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1892 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1893 napi_complete_done(napi, 0);
1894 ret = 0;
1738cd3e 1895
b1669c9f 1896 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1738cd3e 1897 napi_comp_call = 1;
1738cd3e 1898
b1669c9f
NB
1899 /* Update numa and unmask the interrupt only when schedule
1900 * from the interrupt context (vs from sk_busy_loop)
1738cd3e 1901 */
b1669c9f 1902 if (napi_complete_done(napi, rx_work_done)) {
282faf61
AK
1903 /* We apply adaptive moderation on Rx path only.
1904 * Tx uses static interrupt moderation.
1905 */
b1669c9f 1906 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
282faf61 1907 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
b1669c9f 1908
418df30f 1909 ena_unmask_interrupt(tx_ring, rx_ring);
b1669c9f 1910 }
1738cd3e 1911
1738cd3e
NB
1912 ena_update_ring_numa_node(tx_ring, rx_ring);
1913
1914 ret = rx_work_done;
1915 } else {
1916 ret = budget;
1917 }
1918
1919 u64_stats_update_begin(&tx_ring->syncp);
1920 tx_ring->tx_stats.napi_comp += napi_comp_call;
1921 tx_ring->tx_stats.tx_poll++;
1922 u64_stats_update_end(&tx_ring->syncp);
1923
1924 return ret;
1925}
1926
1927static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1928{
1929 struct ena_adapter *adapter = (struct ena_adapter *)data;
1930
1931 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1932
1933 /* Don't call the aenq handler before probe is done */
1934 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1935 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1936
1937 return IRQ_HANDLED;
1938}
1939
1940/* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1941 * @irq: interrupt number
1942 * @data: pointer to a network interface private napi device structure
1943 */
1944static irqreturn_t ena_intr_msix_io(int irq, void *data)
1945{
1946 struct ena_napi *ena_napi = data;
1947
913b0bfd 1948 ena_napi->first_interrupt = true;
8510e1a3 1949
e745dafa 1950 napi_schedule_irqoff(&ena_napi->napi);
1738cd3e
NB
1951
1952 return IRQ_HANDLED;
1953}
1954
06443684
NB
1955/* Reserve a single MSI-X vector for management (admin + aenq).
1956 * plus reserve one vector for each potential io queue.
1957 * the number of potential io queues is the minimum of what the device
1958 * supports and the number of vCPUs.
1959 */
4d192660 1960static int ena_enable_msix(struct ena_adapter *adapter)
1738cd3e 1961{
06443684
NB
1962 int msix_vecs, irq_cnt;
1963
1964 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1965 netif_err(adapter, probe, adapter->netdev,
1966 "Error, MSI-X is already enabled\n");
1967 return -EPERM;
1968 }
1738cd3e
NB
1969
1970 /* Reserved the max msix vectors we might need */
ce1f3521 1971 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1738cd3e
NB
1972 netif_dbg(adapter, probe, adapter->netdev,
1973 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1974
06443684
NB
1975 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1976 msix_vecs, PCI_IRQ_MSIX);
1977
1978 if (irq_cnt < 0) {
1738cd3e 1979 netif_err(adapter, probe, adapter->netdev,
06443684 1980 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1738cd3e
NB
1981 return -ENOSPC;
1982 }
1983
06443684
NB
1984 if (irq_cnt != msix_vecs) {
1985 netif_notice(adapter, probe, adapter->netdev,
1986 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1987 irq_cnt, msix_vecs);
faa615f9 1988 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1738cd3e
NB
1989 }
1990
06443684
NB
1991 if (ena_init_rx_cpu_rmap(adapter))
1992 netif_warn(adapter, probe, adapter->netdev,
1993 "Failed to map IRQs to CPUs\n");
1994
1995 adapter->msix_vecs = irq_cnt;
1996 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1738cd3e
NB
1997
1998 return 0;
1999}
2000
2001static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2002{
2003 u32 cpu;
2004
2005 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2006 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2007 pci_name(adapter->pdev));
2008 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2009 ena_intr_msix_mgmnt;
2010 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2011 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
da6f4cf5 2012 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1738cd3e
NB
2013 cpu = cpumask_first(cpu_online_mask);
2014 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2015 cpumask_set_cpu(cpu,
2016 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2017}
2018
2019static void ena_setup_io_intr(struct ena_adapter *adapter)
2020{
2021 struct net_device *netdev;
2022 int irq_idx, i, cpu;
548c4940 2023 int io_queue_count;
1738cd3e
NB
2024
2025 netdev = adapter->netdev;
548c4940 2026 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e 2027
548c4940 2028 for (i = 0; i < io_queue_count; i++) {
1738cd3e
NB
2029 irq_idx = ENA_IO_IRQ_IDX(i);
2030 cpu = i % num_online_cpus();
2031
2032 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2033 "%s-Tx-Rx-%d", netdev->name, i);
2034 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2035 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2036 adapter->irq_tbl[irq_idx].vector =
da6f4cf5 2037 pci_irq_vector(adapter->pdev, irq_idx);
1738cd3e
NB
2038 adapter->irq_tbl[irq_idx].cpu = cpu;
2039
2040 cpumask_set_cpu(cpu,
2041 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2042 }
2043}
2044
2045static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2046{
2047 unsigned long flags = 0;
2048 struct ena_irq *irq;
2049 int rc;
2050
2051 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2052 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2053 irq->data);
2054 if (rc) {
2055 netif_err(adapter, probe, adapter->netdev,
2056 "failed to request admin irq\n");
2057 return rc;
2058 }
2059
2060 netif_dbg(adapter, probe, adapter->netdev,
2061 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2062 irq->affinity_hint_mask.bits[0], irq->vector);
2063
2064 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2065
2066 return rc;
2067}
2068
2069static int ena_request_io_irq(struct ena_adapter *adapter)
2070{
e02ae6ed 2071 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2072 unsigned long flags = 0;
2073 struct ena_irq *irq;
2074 int rc = 0, i, k;
2075
06443684
NB
2076 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2077 netif_err(adapter, ifup, adapter->netdev,
2078 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2079 return -EINVAL;
2080 }
2081
e02ae6ed 2082 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1738cd3e
NB
2083 irq = &adapter->irq_tbl[i];
2084 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2085 irq->data);
2086 if (rc) {
2087 netif_err(adapter, ifup, adapter->netdev,
2088 "Failed to request I/O IRQ. index %d rc %d\n",
2089 i, rc);
2090 goto err;
2091 }
2092
2093 netif_dbg(adapter, ifup, adapter->netdev,
2094 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2095 i, irq->affinity_hint_mask.bits[0], irq->vector);
2096
2097 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2098 }
2099
2100 return rc;
2101
2102err:
2103 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2104 irq = &adapter->irq_tbl[k];
2105 free_irq(irq->vector, irq->data);
2106 }
2107
2108 return rc;
2109}
2110
2111static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2112{
2113 struct ena_irq *irq;
2114
2115 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2116 synchronize_irq(irq->vector);
2117 irq_set_affinity_hint(irq->vector, NULL);
2118 free_irq(irq->vector, irq->data);
2119}
2120
2121static void ena_free_io_irq(struct ena_adapter *adapter)
2122{
e02ae6ed 2123 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2124 struct ena_irq *irq;
2125 int i;
2126
2127#ifdef CONFIG_RFS_ACCEL
2128 if (adapter->msix_vecs >= 1) {
2129 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2130 adapter->netdev->rx_cpu_rmap = NULL;
2131 }
2132#endif /* CONFIG_RFS_ACCEL */
2133
e02ae6ed 2134 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1738cd3e
NB
2135 irq = &adapter->irq_tbl[i];
2136 irq_set_affinity_hint(irq->vector, NULL);
2137 free_irq(irq->vector, irq->data);
2138 }
2139}
2140
06443684
NB
2141static void ena_disable_msix(struct ena_adapter *adapter)
2142{
2143 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2144 pci_free_irq_vectors(adapter->pdev);
2145}
2146
1738cd3e
NB
2147static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2148{
e02ae6ed 2149 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2150 int i;
2151
2152 if (!netif_running(adapter->netdev))
2153 return;
2154
e02ae6ed 2155 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
1738cd3e
NB
2156 synchronize_irq(adapter->irq_tbl[i].vector);
2157}
2158
548c4940
SJ
2159static void ena_del_napi_in_range(struct ena_adapter *adapter,
2160 int first_index,
2161 int count)
1738cd3e
NB
2162{
2163 int i;
2164
548c4940
SJ
2165 for (i = first_index; i < first_index + count; i++) {
2166 /* Check if napi was initialized before */
2167 if (!ENA_IS_XDP_INDEX(adapter, i) ||
2168 adapter->ena_napi[i].xdp_ring)
2169 netif_napi_del(&adapter->ena_napi[i].napi);
2170 else
2171 WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
2172 adapter->ena_napi[i].xdp_ring);
2173 }
1738cd3e
NB
2174}
2175
548c4940
SJ
2176static void ena_init_napi_in_range(struct ena_adapter *adapter,
2177 int first_index, int count)
1738cd3e 2178{
548c4940 2179 struct ena_napi *napi = {0};
1738cd3e
NB
2180 int i;
2181
548c4940 2182 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
2183 napi = &adapter->ena_napi[i];
2184
2185 netif_napi_add(adapter->netdev,
2186 &adapter->ena_napi[i].napi,
548c4940 2187 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
1738cd3e 2188 ENA_NAPI_BUDGET);
548c4940
SJ
2189
2190 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2191 napi->rx_ring = &adapter->rx_ring[i];
2192 napi->tx_ring = &adapter->tx_ring[i];
2193 } else {
2194 napi->xdp_ring = &adapter->tx_ring[i];
2195 }
1738cd3e
NB
2196 napi->qid = i;
2197 }
2198}
2199
548c4940
SJ
2200static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2201 int first_index,
2202 int count)
1738cd3e
NB
2203{
2204 int i;
2205
548c4940 2206 for (i = first_index; i < first_index + count; i++)
1738cd3e
NB
2207 napi_disable(&adapter->ena_napi[i].napi);
2208}
2209
548c4940
SJ
2210static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2211 int first_index,
2212 int count)
1738cd3e
NB
2213{
2214 int i;
2215
548c4940 2216 for (i = first_index; i < first_index + count; i++)
1738cd3e
NB
2217 napi_enable(&adapter->ena_napi[i].napi);
2218}
2219
1738cd3e
NB
2220/* Configure the Rx forwarding */
2221static int ena_rss_configure(struct ena_adapter *adapter)
2222{
2223 struct ena_com_dev *ena_dev = adapter->ena_dev;
2224 int rc;
2225
2226 /* In case the RSS table wasn't initialized by probe */
2227 if (!ena_dev->rss.tbl_log_size) {
2228 rc = ena_rss_init_default(adapter);
d1497638 2229 if (rc && (rc != -EOPNOTSUPP)) {
1738cd3e 2230 netif_err(adapter, ifup, adapter->netdev,
548c4940 2231 "Failed to init RSS rc: %d\n", rc);
1738cd3e
NB
2232 return rc;
2233 }
2234 }
2235
2236 /* Set indirect table */
2237 rc = ena_com_indirect_table_set(ena_dev);
d1497638 2238 if (unlikely(rc && rc != -EOPNOTSUPP))
1738cd3e
NB
2239 return rc;
2240
2241 /* Configure hash function (if supported) */
2242 rc = ena_com_set_hash_function(ena_dev);
d1497638 2243 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1738cd3e
NB
2244 return rc;
2245
2246 /* Configure hash inputs (if supported) */
2247 rc = ena_com_set_hash_ctrl(ena_dev);
d1497638 2248 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1738cd3e
NB
2249 return rc;
2250
2251 return 0;
2252}
2253
2254static int ena_up_complete(struct ena_adapter *adapter)
2255{
7853b49c 2256 int rc;
1738cd3e
NB
2257
2258 rc = ena_rss_configure(adapter);
2259 if (rc)
2260 return rc;
2261
1738cd3e
NB
2262 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2263
2264 ena_refill_all_rx_bufs(adapter);
2265
2266 /* enable transmits */
2267 netif_tx_start_all_queues(adapter->netdev);
2268
548c4940
SJ
2269 ena_napi_enable_in_range(adapter,
2270 0,
2271 adapter->xdp_num_queues + adapter->num_io_queues);
1738cd3e 2272
1738cd3e
NB
2273 return 0;
2274}
2275
2276static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2277{
38005ca8 2278 struct ena_com_create_io_ctx ctx;
1738cd3e
NB
2279 struct ena_com_dev *ena_dev;
2280 struct ena_ring *tx_ring;
2281 u32 msix_vector;
2282 u16 ena_qid;
2283 int rc;
2284
2285 ena_dev = adapter->ena_dev;
2286
2287 tx_ring = &adapter->tx_ring[qid];
2288 msix_vector = ENA_IO_IRQ_IDX(qid);
2289 ena_qid = ENA_IO_TXQ_IDX(qid);
2290
38005ca8
AK
2291 memset(&ctx, 0x0, sizeof(ctx));
2292
1738cd3e
NB
2293 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2294 ctx.qid = ena_qid;
2295 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2296 ctx.msix_vector = msix_vector;
13ca32a6 2297 ctx.queue_size = tx_ring->ring_size;
1738cd3e
NB
2298 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2299
2300 rc = ena_com_create_io_queue(ena_dev, &ctx);
2301 if (rc) {
2302 netif_err(adapter, ifup, adapter->netdev,
2303 "Failed to create I/O TX queue num %d rc: %d\n",
548c4940 2304 qid, rc);
1738cd3e
NB
2305 return rc;
2306 }
2307
2308 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2309 &tx_ring->ena_com_io_sq,
2310 &tx_ring->ena_com_io_cq);
2311 if (rc) {
2312 netif_err(adapter, ifup, adapter->netdev,
2313 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2314 qid, rc);
2315 ena_com_destroy_io_queue(ena_dev, ena_qid);
2d2c600a 2316 return rc;
1738cd3e
NB
2317 }
2318
2319 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2320 return rc;
2321}
2322
548c4940
SJ
2323static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2324 int first_index, int count)
1738cd3e
NB
2325{
2326 struct ena_com_dev *ena_dev = adapter->ena_dev;
2327 int rc, i;
2328
548c4940 2329 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
2330 rc = ena_create_io_tx_queue(adapter, i);
2331 if (rc)
2332 goto create_err;
2333 }
2334
2335 return 0;
2336
2337create_err:
548c4940 2338 while (i-- > first_index)
1738cd3e
NB
2339 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2340
2341 return rc;
2342}
2343
2344static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2345{
2346 struct ena_com_dev *ena_dev;
38005ca8 2347 struct ena_com_create_io_ctx ctx;
1738cd3e
NB
2348 struct ena_ring *rx_ring;
2349 u32 msix_vector;
2350 u16 ena_qid;
2351 int rc;
2352
2353 ena_dev = adapter->ena_dev;
2354
2355 rx_ring = &adapter->rx_ring[qid];
2356 msix_vector = ENA_IO_IRQ_IDX(qid);
2357 ena_qid = ENA_IO_RXQ_IDX(qid);
2358
38005ca8
AK
2359 memset(&ctx, 0x0, sizeof(ctx));
2360
1738cd3e
NB
2361 ctx.qid = ena_qid;
2362 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2363 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2364 ctx.msix_vector = msix_vector;
13ca32a6 2365 ctx.queue_size = rx_ring->ring_size;
1738cd3e
NB
2366 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2367
2368 rc = ena_com_create_io_queue(ena_dev, &ctx);
2369 if (rc) {
2370 netif_err(adapter, ifup, adapter->netdev,
2371 "Failed to create I/O RX queue num %d rc: %d\n",
2372 qid, rc);
2373 return rc;
2374 }
2375
2376 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2377 &rx_ring->ena_com_io_sq,
2378 &rx_ring->ena_com_io_cq);
2379 if (rc) {
2380 netif_err(adapter, ifup, adapter->netdev,
2381 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2382 qid, rc);
838c93dc 2383 goto err;
1738cd3e
NB
2384 }
2385
2386 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2387
838c93dc
SJ
2388 return rc;
2389err:
2390 ena_com_destroy_io_queue(ena_dev, ena_qid);
1738cd3e
NB
2391 return rc;
2392}
2393
2394static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2395{
2396 struct ena_com_dev *ena_dev = adapter->ena_dev;
2397 int rc, i;
2398
faa615f9 2399 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
2400 rc = ena_create_io_rx_queue(adapter, i);
2401 if (rc)
2402 goto create_err;
282faf61 2403 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
1738cd3e
NB
2404 }
2405
2406 return 0;
2407
2408create_err:
282faf61
AK
2409 while (i--) {
2410 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1738cd3e 2411 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
282faf61 2412 }
1738cd3e
NB
2413
2414 return rc;
2415}
2416
13ca32a6 2417static void set_io_rings_size(struct ena_adapter *adapter,
548c4940
SJ
2418 int new_tx_size,
2419 int new_rx_size)
13ca32a6
SJ
2420{
2421 int i;
2422
faa615f9 2423 for (i = 0; i < adapter->num_io_queues; i++) {
13ca32a6
SJ
2424 adapter->tx_ring[i].ring_size = new_tx_size;
2425 adapter->rx_ring[i].ring_size = new_rx_size;
2426 }
2427}
2428
2429/* This function allows queue allocation to backoff when the system is
2430 * low on memory. If there is not enough memory to allocate io queues
2431 * the driver will try to allocate smaller queues.
2432 *
2433 * The backoff algorithm is as follows:
2434 * 1. Try to allocate TX and RX and if successful.
2435 * 1.1. return success
2436 *
2437 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2438 *
2439 * 3. If TX or RX is smaller than 256
2440 * 3.1. return failure.
2441 * 4. else
2442 * 4.1. go back to 1.
2443 */
2444static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2445{
2446 int rc, cur_rx_ring_size, cur_tx_ring_size;
2447 int new_rx_ring_size, new_tx_ring_size;
2448
2449 /* current queue sizes might be set to smaller than the requested
2450 * ones due to past queue allocation failures.
2451 */
2452 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
548c4940 2453 adapter->requested_rx_ring_size);
13ca32a6
SJ
2454
2455 while (1) {
548c4940
SJ
2456 if (ena_xdp_present(adapter)) {
2457 rc = ena_setup_and_create_all_xdp_queues(adapter);
2458
2459 if (rc)
2460 goto err_setup_tx;
2461 }
2462 rc = ena_setup_tx_resources_in_range(adapter,
2463 0,
2464 adapter->num_io_queues);
13ca32a6
SJ
2465 if (rc)
2466 goto err_setup_tx;
2467
548c4940
SJ
2468 rc = ena_create_io_tx_queues_in_range(adapter,
2469 0,
2470 adapter->num_io_queues);
13ca32a6
SJ
2471 if (rc)
2472 goto err_create_tx_queues;
2473
2474 rc = ena_setup_all_rx_resources(adapter);
2475 if (rc)
2476 goto err_setup_rx;
2477
2478 rc = ena_create_all_io_rx_queues(adapter);
2479 if (rc)
2480 goto err_create_rx_queues;
2481
2482 return 0;
2483
2484err_create_rx_queues:
2485 ena_free_all_io_rx_resources(adapter);
2486err_setup_rx:
2487 ena_destroy_all_tx_queues(adapter);
2488err_create_tx_queues:
2489 ena_free_all_io_tx_resources(adapter);
2490err_setup_tx:
2491 if (rc != -ENOMEM) {
2492 netif_err(adapter, ifup, adapter->netdev,
2493 "Queue creation failed with error code %d\n",
548c4940 2494 rc);
13ca32a6
SJ
2495 return rc;
2496 }
2497
2498 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2499 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2500
2501 netif_err(adapter, ifup, adapter->netdev,
2502 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2503 cur_tx_ring_size, cur_rx_ring_size);
2504
2505 new_tx_ring_size = cur_tx_ring_size;
2506 new_rx_ring_size = cur_rx_ring_size;
2507
2508 /* Decrease the size of the larger queue, or
2509 * decrease both if they are the same size.
2510 */
2511 if (cur_rx_ring_size <= cur_tx_ring_size)
2512 new_tx_ring_size = cur_tx_ring_size / 2;
2513 if (cur_rx_ring_size >= cur_tx_ring_size)
2514 new_rx_ring_size = cur_rx_ring_size / 2;
2515
3e5bfb18 2516 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
548c4940 2517 new_rx_ring_size < ENA_MIN_RING_SIZE) {
13ca32a6
SJ
2518 netif_err(adapter, ifup, adapter->netdev,
2519 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2520 ENA_MIN_RING_SIZE);
2521 return rc;
2522 }
2523
2524 netif_err(adapter, ifup, adapter->netdev,
2525 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2526 new_tx_ring_size,
2527 new_rx_ring_size);
2528
2529 set_io_rings_size(adapter, new_tx_ring_size,
2530 new_rx_ring_size);
2531 }
2532}
2533
1738cd3e
NB
2534static int ena_up(struct ena_adapter *adapter)
2535{
548c4940 2536 int io_queue_count, rc, i;
1738cd3e
NB
2537
2538 netdev_dbg(adapter->netdev, "%s\n", __func__);
2539
548c4940 2540 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2541 ena_setup_io_intr(adapter);
2542
78a55d05
AK
2543 /* napi poll functions should be initialized before running
2544 * request_irq(), to handle a rare condition where there is a pending
2545 * interrupt, causing the ISR to fire immediately while the poll
2546 * function wasn't set yet, causing a null dereference
2547 */
548c4940 2548 ena_init_napi_in_range(adapter, 0, io_queue_count);
78a55d05 2549
1738cd3e
NB
2550 rc = ena_request_io_irq(adapter);
2551 if (rc)
2552 goto err_req_irq;
2553
13ca32a6 2554 rc = create_queues_with_size_backoff(adapter);
1738cd3e 2555 if (rc)
13ca32a6 2556 goto err_create_queues_with_backoff;
1738cd3e
NB
2557
2558 rc = ena_up_complete(adapter);
2559 if (rc)
2560 goto err_up;
2561
2562 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2563 netif_carrier_on(adapter->netdev);
2564
2565 u64_stats_update_begin(&adapter->syncp);
2566 adapter->dev_stats.interface_up++;
2567 u64_stats_update_end(&adapter->syncp);
2568
2569 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2570
7853b49c 2571 /* Enable completion queues interrupt */
faa615f9 2572 for (i = 0; i < adapter->num_io_queues; i++)
7853b49c
NB
2573 ena_unmask_interrupt(&adapter->tx_ring[i],
2574 &adapter->rx_ring[i]);
2575
2576 /* schedule napi in case we had pending packets
2577 * from the last time we disable napi
2578 */
548c4940 2579 for (i = 0; i < io_queue_count; i++)
7853b49c
NB
2580 napi_schedule(&adapter->ena_napi[i].napi);
2581
1738cd3e
NB
2582 return rc;
2583
2584err_up:
1738cd3e 2585 ena_destroy_all_tx_queues(adapter);
1738cd3e 2586 ena_free_all_io_tx_resources(adapter);
13ca32a6
SJ
2587 ena_destroy_all_rx_queues(adapter);
2588 ena_free_all_io_rx_resources(adapter);
2589err_create_queues_with_backoff:
1738cd3e
NB
2590 ena_free_io_irq(adapter);
2591err_req_irq:
548c4940 2592 ena_del_napi_in_range(adapter, 0, io_queue_count);
1738cd3e
NB
2593
2594 return rc;
2595}
2596
2597static void ena_down(struct ena_adapter *adapter)
2598{
548c4940
SJ
2599 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2600
1738cd3e
NB
2601 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2602
2603 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2604
2605 u64_stats_update_begin(&adapter->syncp);
2606 adapter->dev_stats.interface_down++;
2607 u64_stats_update_end(&adapter->syncp);
2608
1738cd3e
NB
2609 netif_carrier_off(adapter->netdev);
2610 netif_tx_disable(adapter->netdev);
2611
3f6159db 2612 /* After this point the napi handler won't enable the tx queue */
548c4940 2613 ena_napi_disable_in_range(adapter, 0, io_queue_count);
3f6159db 2614
1738cd3e 2615 /* After destroy the queue there won't be any new interrupts */
3f6159db
NB
2616
2617 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2618 int rc;
2619
e2eed0e3 2620 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3f6159db
NB
2621 if (rc)
2622 dev_err(&adapter->pdev->dev, "Device reset failed\n");
58a54b9c
AK
2623 /* stop submitting admin commands on a device that was reset */
2624 ena_com_set_admin_running_state(adapter->ena_dev, false);
3f6159db
NB
2625 }
2626
1738cd3e
NB
2627 ena_destroy_all_io_queues(adapter);
2628
2629 ena_disable_io_intr_sync(adapter);
2630 ena_free_io_irq(adapter);
548c4940 2631 ena_del_napi_in_range(adapter, 0, io_queue_count);
1738cd3e
NB
2632
2633 ena_free_all_tx_bufs(adapter);
2634 ena_free_all_rx_bufs(adapter);
2635 ena_free_all_io_tx_resources(adapter);
2636 ena_free_all_io_rx_resources(adapter);
2637}
2638
2639/* ena_open - Called when a network interface is made active
2640 * @netdev: network interface device structure
2641 *
2642 * Returns 0 on success, negative value on failure
2643 *
2644 * The open entry point is called when a network interface is made
2645 * active by the system (IFF_UP). At this point all resources needed
2646 * for transmit and receive operations are allocated, the interrupt
2647 * handler is registered with the OS, the watchdog timer is started,
2648 * and the stack is notified that the interface is ready.
2649 */
2650static int ena_open(struct net_device *netdev)
2651{
2652 struct ena_adapter *adapter = netdev_priv(netdev);
2653 int rc;
2654
2655 /* Notify the stack of the actual queue counts. */
faa615f9 2656 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
1738cd3e
NB
2657 if (rc) {
2658 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2659 return rc;
2660 }
2661
faa615f9 2662 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
1738cd3e
NB
2663 if (rc) {
2664 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2665 return rc;
2666 }
2667
2668 rc = ena_up(adapter);
2669 if (rc)
2670 return rc;
2671
2672 return rc;
2673}
2674
2675/* ena_close - Disables a network interface
2676 * @netdev: network interface device structure
2677 *
2678 * Returns 0, this is not allowed to fail
2679 *
2680 * The close entry point is called when an interface is de-activated
2681 * by the OS. The hardware is still under the drivers control, but
2682 * needs to be disabled. A global MAC reset is issued to stop the
2683 * hardware, and all transmit and receive resources are freed.
2684 */
2685static int ena_close(struct net_device *netdev)
2686{
2687 struct ena_adapter *adapter = netdev_priv(netdev);
2688
2689 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2690
58a54b9c
AK
2691 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2692 return 0;
2693
1738cd3e
NB
2694 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2695 ena_down(adapter);
2696
ee4552aa
NB
2697 /* Check for device status and issue reset if needed*/
2698 check_for_admin_com_state(adapter);
2699 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2700 netif_err(adapter, ifdown, adapter->netdev,
2701 "Destroy failure, restarting device\n");
2702 ena_dump_stats_to_dmesg(adapter);
2703 /* rtnl lock already obtained in dev_ioctl() layer */
cfa324a5 2704 ena_destroy_device(adapter, false);
ee4552aa
NB
2705 ena_restore_device(adapter);
2706 }
2707
1738cd3e
NB
2708 return 0;
2709}
2710
eece4d2a
SJ
2711int ena_update_queue_sizes(struct ena_adapter *adapter,
2712 u32 new_tx_size,
2713 u32 new_rx_size)
2714{
2413ea97 2715 bool dev_was_up;
eece4d2a 2716
2413ea97 2717 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
eece4d2a
SJ
2718 ena_close(adapter->netdev);
2719 adapter->requested_tx_ring_size = new_tx_size;
2720 adapter->requested_rx_ring_size = new_rx_size;
548c4940
SJ
2721 ena_init_io_rings(adapter,
2722 0,
2723 adapter->xdp_num_queues +
2724 adapter->num_io_queues);
2413ea97
SJ
2725 return dev_was_up ? ena_up(adapter) : 0;
2726}
2727
2728int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2729{
2730 struct ena_com_dev *ena_dev = adapter->ena_dev;
838c93dc 2731 int prev_channel_count;
2413ea97
SJ
2732 bool dev_was_up;
2733
2734 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2735 ena_close(adapter->netdev);
838c93dc 2736 prev_channel_count = adapter->num_io_queues;
2413ea97 2737 adapter->num_io_queues = new_channel_count;
548c4940
SJ
2738 if (ena_xdp_present(adapter) &&
2739 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2740 adapter->xdp_first_ring = new_channel_count;
2741 adapter->xdp_num_queues = new_channel_count;
838c93dc
SJ
2742 if (prev_channel_count > new_channel_count)
2743 ena_xdp_exchange_program_rx_in_range(adapter,
2744 NULL,
2745 new_channel_count,
2746 prev_channel_count);
2747 else
2748 ena_xdp_exchange_program_rx_in_range(adapter,
2749 adapter->xdp_bpf_prog,
2750 prev_channel_count,
2751 new_channel_count);
2752 }
2753
2413ea97
SJ
2754 /* We need to destroy the rss table so that the indirection
2755 * table will be reinitialized by ena_up()
2756 */
2757 ena_com_rss_destroy(ena_dev);
548c4940
SJ
2758 ena_init_io_rings(adapter,
2759 0,
2760 adapter->xdp_num_queues +
2761 adapter->num_io_queues);
2413ea97 2762 return dev_was_up ? ena_open(adapter->netdev) : 0;
eece4d2a
SJ
2763}
2764
1738cd3e
NB
2765static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
2766{
2767 u32 mss = skb_shinfo(skb)->gso_size;
2768 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2769 u8 l4_protocol = 0;
2770
2771 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2772 ena_tx_ctx->l4_csum_enable = 1;
2773 if (mss) {
2774 ena_tx_ctx->tso_enable = 1;
2775 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2776 ena_tx_ctx->l4_csum_partial = 0;
2777 } else {
2778 ena_tx_ctx->tso_enable = 0;
2779 ena_meta->l4_hdr_len = 0;
2780 ena_tx_ctx->l4_csum_partial = 1;
2781 }
2782
2783 switch (ip_hdr(skb)->version) {
2784 case IPVERSION:
2785 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2786 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2787 ena_tx_ctx->df = 1;
2788 if (mss)
2789 ena_tx_ctx->l3_csum_enable = 1;
2790 l4_protocol = ip_hdr(skb)->protocol;
2791 break;
2792 case 6:
2793 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2794 l4_protocol = ipv6_hdr(skb)->nexthdr;
2795 break;
2796 default:
2797 break;
2798 }
2799
2800 if (l4_protocol == IPPROTO_TCP)
2801 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2802 else
2803 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2804
2805 ena_meta->mss = mss;
2806 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2807 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2808 ena_tx_ctx->meta_valid = 1;
2809
2810 } else {
2811 ena_tx_ctx->meta_valid = 0;
2812 }
2813}
2814
2815static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2816 struct sk_buff *skb)
2817{
2818 int num_frags, header_len, rc;
2819
2820 num_frags = skb_shinfo(skb)->nr_frags;
2821 header_len = skb_headlen(skb);
2822
2823 if (num_frags < tx_ring->sgl_size)
2824 return 0;
2825
2826 if ((num_frags == tx_ring->sgl_size) &&
2827 (header_len < tx_ring->tx_max_header_size))
2828 return 0;
2829
2830 u64_stats_update_begin(&tx_ring->syncp);
2831 tx_ring->tx_stats.linearize++;
2832 u64_stats_update_end(&tx_ring->syncp);
2833
2834 rc = skb_linearize(skb);
2835 if (unlikely(rc)) {
2836 u64_stats_update_begin(&tx_ring->syncp);
2837 tx_ring->tx_stats.linearize_failed++;
2838 u64_stats_update_end(&tx_ring->syncp);
2839 }
2840
2841 return rc;
2842}
2843
38005ca8
AK
2844static int ena_tx_map_skb(struct ena_ring *tx_ring,
2845 struct ena_tx_buffer *tx_info,
2846 struct sk_buff *skb,
2847 void **push_hdr,
2848 u16 *header_len)
1738cd3e 2849{
38005ca8 2850 struct ena_adapter *adapter = tx_ring->adapter;
1738cd3e 2851 struct ena_com_buf *ena_buf;
1738cd3e 2852 dma_addr_t dma;
38005ca8
AK
2853 u32 skb_head_len, frag_len, last_frag;
2854 u16 push_len = 0;
2855 u16 delta = 0;
2856 int i = 0;
1738cd3e 2857
38005ca8 2858 skb_head_len = skb_headlen(skb);
1738cd3e 2859 tx_info->skb = skb;
38005ca8 2860 ena_buf = tx_info->bufs;
1738cd3e
NB
2861
2862 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
38005ca8
AK
2863 /* When the device is LLQ mode, the driver will copy
2864 * the header into the device memory space.
2865 * the ena_com layer assume the header is in a linear
2866 * memory space.
2867 * This assumption might be wrong since part of the header
2868 * can be in the fragmented buffers.
2869 * Use skb_header_pointer to make sure the header is in a
2870 * linear memory space.
2871 */
2872
2873 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2874 *push_hdr = skb_header_pointer(skb, 0, push_len,
2875 tx_ring->push_buf_intermediate_buf);
2876 *header_len = push_len;
2877 if (unlikely(skb->data != *push_hdr)) {
2878 u64_stats_update_begin(&tx_ring->syncp);
2879 tx_ring->tx_stats.llq_buffer_copy++;
2880 u64_stats_update_end(&tx_ring->syncp);
2881
2882 delta = push_len - skb_head_len;
2883 }
1738cd3e 2884 } else {
38005ca8
AK
2885 *push_hdr = NULL;
2886 *header_len = min_t(u32, skb_head_len,
2887 tx_ring->tx_max_header_size);
1738cd3e
NB
2888 }
2889
38005ca8 2890 netif_dbg(adapter, tx_queued, adapter->netdev,
1738cd3e 2891 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
38005ca8 2892 *push_hdr, push_len);
1738cd3e 2893
38005ca8 2894 if (skb_head_len > push_len) {
1738cd3e 2895 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
38005ca8
AK
2896 skb_head_len - push_len, DMA_TO_DEVICE);
2897 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
1738cd3e
NB
2898 goto error_report_dma_error;
2899
2900 ena_buf->paddr = dma;
38005ca8 2901 ena_buf->len = skb_head_len - push_len;
1738cd3e
NB
2902
2903 ena_buf++;
2904 tx_info->num_of_bufs++;
38005ca8
AK
2905 tx_info->map_linear_data = 1;
2906 } else {
2907 tx_info->map_linear_data = 0;
1738cd3e
NB
2908 }
2909
2910 last_frag = skb_shinfo(skb)->nr_frags;
2911
2912 for (i = 0; i < last_frag; i++) {
2913 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2914
38005ca8
AK
2915 frag_len = skb_frag_size(frag);
2916
2917 if (unlikely(delta >= frag_len)) {
2918 delta -= frag_len;
2919 continue;
2920 }
2921
2922 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2923 frag_len - delta, DMA_TO_DEVICE);
2924 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
1738cd3e
NB
2925 goto error_report_dma_error;
2926
2927 ena_buf->paddr = dma;
38005ca8 2928 ena_buf->len = frag_len - delta;
1738cd3e 2929 ena_buf++;
38005ca8
AK
2930 tx_info->num_of_bufs++;
2931 delta = 0;
1738cd3e
NB
2932 }
2933
38005ca8
AK
2934 return 0;
2935
2936error_report_dma_error:
2937 u64_stats_update_begin(&tx_ring->syncp);
2938 tx_ring->tx_stats.dma_mapping_err++;
2939 u64_stats_update_end(&tx_ring->syncp);
2940 netdev_warn(adapter->netdev, "failed to map skb\n");
2941
2942 tx_info->skb = NULL;
2943
2944 tx_info->num_of_bufs += i;
548c4940 2945 ena_unmap_tx_buff(tx_ring, tx_info);
38005ca8
AK
2946
2947 return -EINVAL;
2948}
2949
2950/* Called with netif_tx_lock. */
2951static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2952{
2953 struct ena_adapter *adapter = netdev_priv(dev);
2954 struct ena_tx_buffer *tx_info;
2955 struct ena_com_tx_ctx ena_tx_ctx;
2956 struct ena_ring *tx_ring;
2957 struct netdev_queue *txq;
2958 void *push_hdr;
2959 u16 next_to_use, req_id, header_len;
548c4940 2960 int qid, rc;
38005ca8
AK
2961
2962 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2963 /* Determine which tx ring we will be placed on */
2964 qid = skb_get_queue_mapping(skb);
2965 tx_ring = &adapter->tx_ring[qid];
2966 txq = netdev_get_tx_queue(dev, qid);
2967
2968 rc = ena_check_and_linearize_skb(tx_ring, skb);
2969 if (unlikely(rc))
2970 goto error_drop_packet;
2971
2972 skb_tx_timestamp(skb);
2973
2974 next_to_use = tx_ring->next_to_use;
f9172498 2975 req_id = tx_ring->free_ids[next_to_use];
38005ca8
AK
2976 tx_info = &tx_ring->tx_buffer_info[req_id];
2977 tx_info->num_of_bufs = 0;
2978
2979 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2980
2981 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2982 if (unlikely(rc))
2983 goto error_drop_packet;
1738cd3e
NB
2984
2985 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2986 ena_tx_ctx.ena_bufs = tx_info->bufs;
2987 ena_tx_ctx.push_header = push_hdr;
2988 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2989 ena_tx_ctx.req_id = req_id;
2990 ena_tx_ctx.header_len = header_len;
2991
2992 /* set flags and meta data */
2993 ena_tx_csum(&ena_tx_ctx, skb);
2994
548c4940
SJ
2995 rc = ena_xmit_common(dev,
2996 tx_ring,
2997 tx_info,
2998 &ena_tx_ctx,
2999 next_to_use,
3000 skb->len);
3001 if (rc)
1738cd3e 3002 goto error_unmap_dma;
1738cd3e
NB
3003
3004 netdev_tx_sent_queue(txq, skb->len);
3005
1738cd3e
NB
3006 /* stop the queue when no more space available, the packet can have up
3007 * to sgl_size + 2. one for the meta descriptor and one for header
3008 * (if the header is larger than tx_max_header_size).
3009 */
689b2bda
AK
3010 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3011 tx_ring->sgl_size + 2))) {
1738cd3e
NB
3012 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3013 __func__, qid);
3014
3015 netif_tx_stop_queue(txq);
3016 u64_stats_update_begin(&tx_ring->syncp);
3017 tx_ring->tx_stats.queue_stop++;
3018 u64_stats_update_end(&tx_ring->syncp);
3019
3020 /* There is a rare condition where this function decide to
3021 * stop the queue but meanwhile clean_tx_irq updates
3022 * next_to_completion and terminates.
3023 * The queue will remain stopped forever.
37dff155
NB
3024 * To solve this issue add a mb() to make sure that
3025 * netif_tx_stop_queue() write is vissible before checking if
3026 * there is additional space in the queue.
1738cd3e 3027 */
37dff155 3028 smp_mb();
1738cd3e 3029
689b2bda
AK
3030 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3031 ENA_TX_WAKEUP_THRESH)) {
1738cd3e
NB
3032 netif_tx_wake_queue(txq);
3033 u64_stats_update_begin(&tx_ring->syncp);
3034 tx_ring->tx_stats.queue_wakeup++;
3035 u64_stats_update_end(&tx_ring->syncp);
3036 }
3037 }
3038
6b16f9ee 3039 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
37dff155
NB
3040 /* trigger the dma engine. ena_com_write_sq_doorbell()
3041 * has a mb
3042 */
3043 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
1738cd3e
NB
3044 u64_stats_update_begin(&tx_ring->syncp);
3045 tx_ring->tx_stats.doorbells++;
3046 u64_stats_update_end(&tx_ring->syncp);
3047 }
3048
3049 return NETDEV_TX_OK;
3050
1738cd3e 3051error_unmap_dma:
548c4940 3052 ena_unmap_tx_buff(tx_ring, tx_info);
38005ca8 3053 tx_info->skb = NULL;
1738cd3e
NB
3054
3055error_drop_packet:
1738cd3e
NB
3056 dev_kfree_skb(skb);
3057 return NETDEV_TX_OK;
3058}
3059
1738cd3e 3060static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
a350ecce 3061 struct net_device *sb_dev)
1738cd3e
NB
3062{
3063 u16 qid;
3064 /* we suspect that this is good for in--kernel network services that
3065 * want to loop incoming skb rx to tx in normal user generated traffic,
3066 * most probably we will not get to this
3067 */
3068 if (skb_rx_queue_recorded(skb))
3069 qid = skb_get_rx_queue(skb);
3070 else
a350ecce 3071 qid = netdev_pick_tx(dev, skb, NULL);
1738cd3e
NB
3072
3073 return qid;
3074}
3075
095f2f1f
AK
3076static void ena_config_host_info(struct ena_com_dev *ena_dev,
3077 struct pci_dev *pdev)
1738cd3e
NB
3078{
3079 struct ena_admin_host_info *host_info;
3080 int rc;
3081
3082 /* Allocate only the host info */
3083 rc = ena_com_allocate_host_info(ena_dev);
3084 if (rc) {
3085 pr_err("Cannot allocate host info\n");
3086 return;
3087 }
3088
3089 host_info = ena_dev->host_attr.host_info;
3090
095f2f1f 3091 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
1738cd3e
NB
3092 host_info->os_type = ENA_ADMIN_OS_LINUX;
3093 host_info->kernel_ver = LINUX_VERSION_CODE;
f9133088 3094 strlcpy(host_info->kernel_ver_str, utsname()->version,
1738cd3e
NB
3095 sizeof(host_info->kernel_ver_str) - 1);
3096 host_info->os_dist = 0;
3097 strncpy(host_info->os_dist_str, utsname()->release,
3098 sizeof(host_info->os_dist_str) - 1);
3099 host_info->driver_version =
3100 (DRV_MODULE_VER_MAJOR) |
3101 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
095f2f1f
AK
3102 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3103 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3104 host_info->num_cpus = num_online_cpus();
1738cd3e 3105
bd21b0cc
AK
3106 host_info->driver_supported_features =
3107 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
3108
1738cd3e
NB
3109 rc = ena_com_set_host_attributes(ena_dev);
3110 if (rc) {
d1497638 3111 if (rc == -EOPNOTSUPP)
1738cd3e
NB
3112 pr_warn("Cannot set host attributes\n");
3113 else
3114 pr_err("Cannot set host attributes\n");
3115
3116 goto err;
3117 }
3118
3119 return;
3120
3121err:
3122 ena_com_delete_host_info(ena_dev);
3123}
3124
3125static void ena_config_debug_area(struct ena_adapter *adapter)
3126{
3127 u32 debug_area_size;
3128 int rc, ss_count;
3129
3130 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3131 if (ss_count <= 0) {
3132 netif_err(adapter, drv, adapter->netdev,
3133 "SS count is negative\n");
3134 return;
3135 }
3136
3137 /* allocate 32 bytes for each string and 64bit for the value */
3138 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3139
3140 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3141 if (rc) {
3142 pr_err("Cannot allocate debug area\n");
3143 return;
3144 }
3145
3146 rc = ena_com_set_host_attributes(adapter->ena_dev);
3147 if (rc) {
d1497638 3148 if (rc == -EOPNOTSUPP)
1738cd3e
NB
3149 netif_warn(adapter, drv, adapter->netdev,
3150 "Cannot set host attributes\n");
3151 else
3152 netif_err(adapter, drv, adapter->netdev,
3153 "Cannot set host attributes\n");
3154 goto err;
3155 }
3156
3157 return;
3158err:
3159 ena_com_delete_debug_area(adapter->ena_dev);
3160}
3161
bc1f4470 3162static void ena_get_stats64(struct net_device *netdev,
3163 struct rtnl_link_stats64 *stats)
1738cd3e
NB
3164{
3165 struct ena_adapter *adapter = netdev_priv(netdev);
d81db240
NB
3166 struct ena_ring *rx_ring, *tx_ring;
3167 unsigned int start;
3168 u64 rx_drops;
3169 int i;
1738cd3e
NB
3170
3171 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
bc1f4470 3172 return;
1738cd3e 3173
faa615f9 3174 for (i = 0; i < adapter->num_io_queues; i++) {
d81db240
NB
3175 u64 bytes, packets;
3176
3177 tx_ring = &adapter->tx_ring[i];
1738cd3e 3178
d81db240
NB
3179 do {
3180 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3181 packets = tx_ring->tx_stats.cnt;
3182 bytes = tx_ring->tx_stats.bytes;
3183 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
1738cd3e 3184
d81db240
NB
3185 stats->tx_packets += packets;
3186 stats->tx_bytes += bytes;
3187
3188 rx_ring = &adapter->rx_ring[i];
3189
3190 do {
3191 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3192 packets = rx_ring->rx_stats.cnt;
3193 bytes = rx_ring->rx_stats.bytes;
3194 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3195
3196 stats->rx_packets += packets;
3197 stats->rx_bytes += bytes;
3198 }
3199
3200 do {
3201 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3202 rx_drops = adapter->dev_stats.rx_drops;
3203 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
1738cd3e 3204
d81db240 3205 stats->rx_dropped = rx_drops;
1738cd3e
NB
3206
3207 stats->multicast = 0;
3208 stats->collisions = 0;
3209
3210 stats->rx_length_errors = 0;
3211 stats->rx_crc_errors = 0;
3212 stats->rx_frame_errors = 0;
3213 stats->rx_fifo_errors = 0;
3214 stats->rx_missed_errors = 0;
3215 stats->tx_window_errors = 0;
3216
3217 stats->rx_errors = 0;
3218 stats->tx_errors = 0;
1738cd3e
NB
3219}
3220
3221static const struct net_device_ops ena_netdev_ops = {
3222 .ndo_open = ena_open,
3223 .ndo_stop = ena_close,
3224 .ndo_start_xmit = ena_start_xmit,
3225 .ndo_select_queue = ena_select_queue,
3226 .ndo_get_stats64 = ena_get_stats64,
3227 .ndo_tx_timeout = ena_tx_timeout,
3228 .ndo_change_mtu = ena_change_mtu,
3229 .ndo_set_mac_address = NULL,
3230 .ndo_validate_addr = eth_validate_addr,
838c93dc 3231 .ndo_bpf = ena_xdp,
1738cd3e
NB
3232};
3233
1738cd3e
NB
3234static int ena_device_validate_params(struct ena_adapter *adapter,
3235 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3236{
3237 struct net_device *netdev = adapter->netdev;
3238 int rc;
3239
3240 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3241 adapter->mac_addr);
3242 if (!rc) {
3243 netif_err(adapter, drv, netdev,
3244 "Error, mac address are different\n");
3245 return -EINVAL;
3246 }
3247
1738cd3e
NB
3248 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3249 netif_err(adapter, drv, netdev,
3250 "Error, device max mtu is smaller than netdev MTU\n");
3251 return -EINVAL;
3252 }
3253
3254 return 0;
3255}
3256
3257static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3258 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3259 bool *wd_state)
3260{
3261 struct device *dev = &pdev->dev;
3262 bool readless_supported;
3263 u32 aenq_groups;
3264 int dma_width;
3265 int rc;
3266
3267 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3268 if (rc) {
3269 dev_err(dev, "failed to init mmio read less\n");
3270 return rc;
3271 }
3272
3273 /* The PCIe configuration space revision id indicate if mmio reg
3274 * read is disabled
3275 */
3276 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3277 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3278
e2eed0e3 3279 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
1738cd3e
NB
3280 if (rc) {
3281 dev_err(dev, "Can not reset device\n");
3282 goto err_mmio_read_less;
3283 }
3284
3285 rc = ena_com_validate_version(ena_dev);
3286 if (rc) {
3287 dev_err(dev, "device version is too low\n");
3288 goto err_mmio_read_less;
3289 }
3290
3291 dma_width = ena_com_get_dma_width(ena_dev);
3292 if (dma_width < 0) {
3293 dev_err(dev, "Invalid dma width value %d", dma_width);
6e22066f 3294 rc = dma_width;
1738cd3e
NB
3295 goto err_mmio_read_less;
3296 }
3297
3298 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3299 if (rc) {
3300 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
3301 goto err_mmio_read_less;
3302 }
3303
3304 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3305 if (rc) {
3306 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3307 rc);
3308 goto err_mmio_read_less;
3309 }
3310
3311 /* ENA admin level init */
f1e90f6e 3312 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
1738cd3e
NB
3313 if (rc) {
3314 dev_err(dev,
3315 "Can not initialize ena admin queue with device\n");
3316 goto err_mmio_read_less;
3317 }
3318
3319 /* To enable the msix interrupts the driver needs to know the number
3320 * of queues. So the driver uses polling mode to retrieve this
3321 * information
3322 */
3323 ena_com_set_admin_polling_mode(ena_dev, true);
3324
095f2f1f 3325 ena_config_host_info(ena_dev, pdev);
dd8427a7 3326
1738cd3e
NB
3327 /* Get Device Attributes*/
3328 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3329 if (rc) {
3330 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3331 goto err_admin_init;
3332 }
3333
3334 /* Try to turn all the available aenq groups */
3335 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3336 BIT(ENA_ADMIN_FATAL_ERROR) |
3337 BIT(ENA_ADMIN_WARNING) |
3338 BIT(ENA_ADMIN_NOTIFICATION) |
3339 BIT(ENA_ADMIN_KEEP_ALIVE);
3340
3341 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3342
3343 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3344 if (rc) {
3345 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3346 goto err_admin_init;
3347 }
3348
3349 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3350
1738cd3e
NB
3351 return 0;
3352
3353err_admin_init:
dd8427a7 3354 ena_com_delete_host_info(ena_dev);
1738cd3e
NB
3355 ena_com_admin_destroy(ena_dev);
3356err_mmio_read_less:
3357 ena_com_mmio_reg_read_request_destroy(ena_dev);
3358
3359 return rc;
3360}
3361
4d192660 3362static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
1738cd3e
NB
3363{
3364 struct ena_com_dev *ena_dev = adapter->ena_dev;
3365 struct device *dev = &adapter->pdev->dev;
3366 int rc;
3367
4d192660 3368 rc = ena_enable_msix(adapter);
1738cd3e
NB
3369 if (rc) {
3370 dev_err(dev, "Can not reserve msix vectors\n");
3371 return rc;
3372 }
3373
3374 ena_setup_mgmnt_intr(adapter);
3375
3376 rc = ena_request_mgmnt_irq(adapter);
3377 if (rc) {
3378 dev_err(dev, "Can not setup management interrupts\n");
3379 goto err_disable_msix;
3380 }
3381
3382 ena_com_set_admin_polling_mode(ena_dev, false);
3383
3384 ena_com_admin_aenq_enable(ena_dev);
3385
3386 return 0;
3387
3388err_disable_msix:
06443684
NB
3389 ena_disable_msix(adapter);
3390
1738cd3e
NB
3391 return rc;
3392}
3393
cfa324a5 3394static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
1738cd3e 3395{
1738cd3e
NB
3396 struct net_device *netdev = adapter->netdev;
3397 struct ena_com_dev *ena_dev = adapter->ena_dev;
8c5c7abd 3398 bool dev_up;
3f6159db 3399
fe870c77
NB
3400 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3401 return;
3402
3f6159db
NB
3403 netif_carrier_off(netdev);
3404
1738cd3e
NB
3405 del_timer_sync(&adapter->timer_service);
3406
1738cd3e 3407 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
8c5c7abd 3408 adapter->dev_up_before_reset = dev_up;
cfa324a5
NB
3409 if (!graceful)
3410 ena_com_set_admin_running_state(ena_dev, false);
1738cd3e 3411
ee4552aa
NB
3412 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3413 ena_down(adapter);
1738cd3e 3414
bd791175 3415 /* Stop the device from sending AENQ events (in case reset flag is set
58a54b9c 3416 * and device is up, ena_down() already reset the device.
8c5c7abd
NB
3417 */
3418 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3419 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3420
1738cd3e
NB
3421 ena_free_mgmnt_irq(adapter);
3422
06443684 3423 ena_disable_msix(adapter);
1738cd3e
NB
3424
3425 ena_com_abort_admin_commands(ena_dev);
3426
3427 ena_com_wait_for_abort_completion(ena_dev);
3428
3429 ena_com_admin_destroy(ena_dev);
3430
3431 ena_com_mmio_reg_read_request_destroy(ena_dev);
3432
e2eed0e3 3433 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
8c5c7abd 3434
3f6159db 3435 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
fe870c77 3436 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
8c5c7abd 3437}
3f6159db 3438
8c5c7abd
NB
3439static int ena_restore_device(struct ena_adapter *adapter)
3440{
3441 struct ena_com_dev_get_features_ctx get_feat_ctx;
3442 struct ena_com_dev *ena_dev = adapter->ena_dev;
3443 struct pci_dev *pdev = adapter->pdev;
3444 bool wd_state;
3445 int rc;
1738cd3e 3446
d18e4f68 3447 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1738cd3e
NB
3448 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3449 if (rc) {
3450 dev_err(&pdev->dev, "Can not initialize device\n");
3451 goto err;
3452 }
3453 adapter->wd_state = wd_state;
3454
3455 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3456 if (rc) {
3457 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3458 goto err_device_destroy;
3459 }
3460
4d192660 3461 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
1738cd3e
NB
3462 if (rc) {
3463 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3464 goto err_device_destroy;
3465 }
3466 /* If the interface was up before the reset bring it up */
8c5c7abd 3467 if (adapter->dev_up_before_reset) {
1738cd3e
NB
3468 rc = ena_up(adapter);
3469 if (rc) {
3470 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3471 goto err_disable_msix;
3472 }
3473 }
3474
fe870c77 3475 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
e1f1bd9b
AK
3476
3477 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3478 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3479 netif_carrier_on(adapter->netdev);
3480
1738cd3e 3481 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
38005ca8
AK
3482 dev_err(&pdev->dev,
3483 "Device reset completed successfully, Driver info: %s\n",
3484 version);
1738cd3e 3485
8c5c7abd 3486 return rc;
1738cd3e
NB
3487err_disable_msix:
3488 ena_free_mgmnt_irq(adapter);
06443684 3489 ena_disable_msix(adapter);
1738cd3e 3490err_device_destroy:
d7703ddb
AK
3491 ena_com_abort_admin_commands(ena_dev);
3492 ena_com_wait_for_abort_completion(ena_dev);
1738cd3e 3493 ena_com_admin_destroy(ena_dev);
d7703ddb 3494 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
e76ad21d 3495 ena_com_mmio_reg_read_request_destroy(ena_dev);
1738cd3e 3496err:
22b331c9 3497 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
d18e4f68 3498 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1738cd3e
NB
3499 dev_err(&pdev->dev,
3500 "Reset attempt failed. Can not reset the device\n");
8c5c7abd
NB
3501
3502 return rc;
3503}
3504
3505static void ena_fw_reset_device(struct work_struct *work)
3506{
3507 struct ena_adapter *adapter =
3508 container_of(work, struct ena_adapter, reset_task);
3509 struct pci_dev *pdev = adapter->pdev;
3510
3511 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3512 dev_err(&pdev->dev,
3513 "device reset schedule while reset bit is off\n");
3514 return;
3515 }
3516 rtnl_lock();
cfa324a5 3517 ena_destroy_device(adapter, false);
8c5c7abd
NB
3518 ena_restore_device(adapter);
3519 rtnl_unlock();
1738cd3e
NB
3520}
3521
8510e1a3
NB
3522static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3523 struct ena_ring *rx_ring)
3524{
3525 if (likely(rx_ring->first_interrupt))
3526 return 0;
3527
3528 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3529 return 0;
3530
3531 rx_ring->no_interrupt_event_cnt++;
3532
3533 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3534 netif_err(adapter, rx_err, adapter->netdev,
3535 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3536 rx_ring->qid);
3537 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3538 smp_mb__before_atomic();
3539 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3540 return -EIO;
3541 }
3542
3543 return 0;
3544}
3545
3546static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3547 struct ena_ring *tx_ring)
1738cd3e
NB
3548{
3549 struct ena_tx_buffer *tx_buf;
3550 unsigned long last_jiffies;
800c55cb 3551 u32 missed_tx = 0;
11095fdb 3552 int i, rc = 0;
800c55cb
NB
3553
3554 for (i = 0; i < tx_ring->ring_size; i++) {
3555 tx_buf = &tx_ring->tx_buffer_info[i];
3556 last_jiffies = tx_buf->last_jiffies;
8510e1a3
NB
3557
3558 if (last_jiffies == 0)
3559 /* no pending Tx at this location */
3560 continue;
3561
3562 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3563 2 * adapter->missing_tx_completion_to))) {
3564 /* If after graceful period interrupt is still not
3565 * received, we schedule a reset
3566 */
3567 netif_err(adapter, tx_err, adapter->netdev,
3568 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3569 tx_ring->qid);
3570 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3571 smp_mb__before_atomic();
3572 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3573 return -EIO;
3574 }
3575
3576 if (unlikely(time_is_before_jiffies(last_jiffies +
3577 adapter->missing_tx_completion_to))) {
800c55cb
NB
3578 if (!tx_buf->print_once)
3579 netif_notice(adapter, tx_err, adapter->netdev,
3580 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3581 tx_ring->qid, i);
3582
3583 tx_buf->print_once = 1;
3584 missed_tx++;
800c55cb
NB
3585 }
3586 }
3587
11095fdb
NB
3588 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3589 netif_err(adapter, tx_err, adapter->netdev,
3590 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3591 missed_tx,
3592 adapter->missing_tx_completion_threshold);
3593 adapter->reset_reason =
3594 ENA_REGS_RESET_MISS_TX_CMPL;
3595 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3596 rc = -EIO;
3597 }
3598
3599 u64_stats_update_begin(&tx_ring->syncp);
3600 tx_ring->tx_stats.missed_tx = missed_tx;
3601 u64_stats_update_end(&tx_ring->syncp);
3602
3603 return rc;
800c55cb
NB
3604}
3605
8510e1a3 3606static void check_for_missing_completions(struct ena_adapter *adapter)
800c55cb 3607{
1738cd3e 3608 struct ena_ring *tx_ring;
8510e1a3 3609 struct ena_ring *rx_ring;
800c55cb 3610 int i, budget, rc;
548c4940 3611 int io_queue_count;
1738cd3e 3612
548c4940 3613 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
1738cd3e
NB
3614 /* Make sure the driver doesn't turn the device in other process */
3615 smp_rmb();
3616
3617 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3618 return;
3619
3f6159db
NB
3620 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3621 return;
3622
82ef30f1
NB
3623 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3624 return;
3625
1738cd3e
NB
3626 budget = ENA_MONITORED_TX_QUEUES;
3627
548c4940 3628 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
1738cd3e 3629 tx_ring = &adapter->tx_ring[i];
8510e1a3
NB
3630 rx_ring = &adapter->rx_ring[i];
3631
3632 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3633 if (unlikely(rc))
3634 return;
1738cd3e 3635
548c4940
SJ
3636 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3637 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
800c55cb
NB
3638 if (unlikely(rc))
3639 return;
1738cd3e
NB
3640
3641 budget--;
3642 if (!budget)
3643 break;
3644 }
3645
548c4940 3646 adapter->last_monitored_tx_qid = i % io_queue_count;
1738cd3e
NB
3647}
3648
a3af7c18
NB
3649/* trigger napi schedule after 2 consecutive detections */
3650#define EMPTY_RX_REFILL 2
3651/* For the rare case where the device runs out of Rx descriptors and the
3652 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3653 * for example).
3654 * This case will lead to a deadlock:
3655 * The device won't send interrupts since all the new Rx packets will be dropped
3656 * The napi handler won't allocate new Rx descriptors so the device will be
3657 * able to send new packets.
3658 *
3659 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3660 * It is recommended to have at least 512MB, with a minimum of 128MB for
3661 * constrained environment).
3662 *
3663 * When such a situation is detected - Reschedule napi
3664 */
3665static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3666{
3667 struct ena_ring *rx_ring;
3668 int i, refill_required;
3669
3670 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3671 return;
3672
3673 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3674 return;
3675
faa615f9 3676 for (i = 0; i < adapter->num_io_queues; i++) {
a3af7c18
NB
3677 rx_ring = &adapter->rx_ring[i];
3678
3679 refill_required =
689b2bda 3680 ena_com_free_desc(rx_ring->ena_com_io_sq);
a3af7c18
NB
3681 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3682 rx_ring->empty_rx_queue++;
3683
3684 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3685 u64_stats_update_begin(&rx_ring->syncp);
3686 rx_ring->rx_stats.empty_rx_ring++;
3687 u64_stats_update_end(&rx_ring->syncp);
3688
3689 netif_err(adapter, drv, adapter->netdev,
3690 "trigger refill for ring %d\n", i);
3691
3692 napi_schedule(rx_ring->napi);
3693 rx_ring->empty_rx_queue = 0;
3694 }
3695 } else {
3696 rx_ring->empty_rx_queue = 0;
3697 }
3698 }
3699}
3700
1738cd3e
NB
3701/* Check for keep alive expiration */
3702static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3703{
3704 unsigned long keep_alive_expired;
3705
3706 if (!adapter->wd_state)
3707 return;
3708
82ef30f1
NB
3709 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3710 return;
3711
2a6e5fa2
AK
3712 keep_alive_expired = adapter->last_keep_alive_jiffies +
3713 adapter->keep_alive_timeout;
1738cd3e
NB
3714 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3715 netif_err(adapter, drv, adapter->netdev,
3716 "Keep alive watchdog timeout.\n");
3717 u64_stats_update_begin(&adapter->syncp);
3718 adapter->dev_stats.wd_expired++;
3719 u64_stats_update_end(&adapter->syncp);
e2eed0e3 3720 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
1738cd3e
NB
3721 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3722 }
3723}
3724
3725static void check_for_admin_com_state(struct ena_adapter *adapter)
3726{
3727 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3728 netif_err(adapter, drv, adapter->netdev,
3729 "ENA admin queue is not in running state!\n");
3730 u64_stats_update_begin(&adapter->syncp);
3731 adapter->dev_stats.admin_q_pause++;
3732 u64_stats_update_end(&adapter->syncp);
e2eed0e3 3733 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
1738cd3e
NB
3734 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3735 }
3736}
3737
82ef30f1
NB
3738static void ena_update_hints(struct ena_adapter *adapter,
3739 struct ena_admin_ena_hw_hints *hints)
3740{
3741 struct net_device *netdev = adapter->netdev;
3742
3743 if (hints->admin_completion_tx_timeout)
3744 adapter->ena_dev->admin_queue.completion_timeout =
3745 hints->admin_completion_tx_timeout * 1000;
3746
3747 if (hints->mmio_read_timeout)
3748 /* convert to usec */
3749 adapter->ena_dev->mmio_read.reg_read_to =
3750 hints->mmio_read_timeout * 1000;
3751
3752 if (hints->missed_tx_completion_count_threshold_to_reset)
3753 adapter->missing_tx_completion_threshold =
3754 hints->missed_tx_completion_count_threshold_to_reset;
3755
3756 if (hints->missing_tx_completion_timeout) {
3757 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3758 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3759 else
3760 adapter->missing_tx_completion_to =
3761 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3762 }
3763
3764 if (hints->netdev_wd_timeout)
3765 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3766
3767 if (hints->driver_watchdog_timeout) {
3768 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3769 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3770 else
3771 adapter->keep_alive_timeout =
3772 msecs_to_jiffies(hints->driver_watchdog_timeout);
3773 }
3774}
3775
1738cd3e
NB
3776static void ena_update_host_info(struct ena_admin_host_info *host_info,
3777 struct net_device *netdev)
3778{
3779 host_info->supported_network_features[0] =
3780 netdev->features & GENMASK_ULL(31, 0);
3781 host_info->supported_network_features[1] =
3782 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3783}
3784
e99e88a9 3785static void ena_timer_service(struct timer_list *t)
1738cd3e 3786{
e99e88a9 3787 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
1738cd3e
NB
3788 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3789 struct ena_admin_host_info *host_info =
3790 adapter->ena_dev->host_attr.host_info;
3791
3792 check_for_missing_keep_alive(adapter);
3793
3794 check_for_admin_com_state(adapter);
3795
8510e1a3 3796 check_for_missing_completions(adapter);
1738cd3e 3797
a3af7c18
NB
3798 check_for_empty_rx_ring(adapter);
3799
1738cd3e
NB
3800 if (debug_area)
3801 ena_dump_stats_to_buf(adapter, debug_area);
3802
3803 if (host_info)
3804 ena_update_host_info(host_info, adapter->netdev);
3805
3f6159db 3806 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1738cd3e
NB
3807 netif_err(adapter, drv, adapter->netdev,
3808 "Trigger reset is on\n");
3809 ena_dump_stats_to_dmesg(adapter);
3810 queue_work(ena_wq, &adapter->reset_task);
3811 return;
3812 }
3813
3814 /* Reset the timer */
2a6e5fa2 3815 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1738cd3e
NB
3816}
3817
736ce3f4
SJ
3818static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
3819 struct ena_com_dev *ena_dev,
3820 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1738cd3e 3821{
736ce3f4 3822 int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
31aa9857
SJ
3823
3824 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3825 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3826 &get_feat_ctx->max_queue_ext.max_queue_ext;
736ce3f4 3827 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
31aa9857 3828 max_queue_ext->max_rx_cq_num);
1738cd3e 3829
31aa9857
SJ
3830 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3831 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3832 } else {
3833 struct ena_admin_queue_feature_desc *max_queues =
3834 &get_feat_ctx->max_queues;
3835 io_tx_sq_num = max_queues->max_sq_num;
3836 io_tx_cq_num = max_queues->max_cq_num;
736ce3f4 3837 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
31aa9857
SJ
3838 }
3839
3840 /* In case of LLQ use the llq fields for the tx SQ/CQ */
9fd25592 3841 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
31aa9857 3842 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
1738cd3e 3843
736ce3f4
SJ
3844 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3845 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3846 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3847 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
1738cd3e 3848 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
736ce3f4
SJ
3849 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3850 if (unlikely(!max_num_io_queues)) {
1738cd3e
NB
3851 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3852 return -EFAULT;
3853 }
3854
736ce3f4 3855 return max_num_io_queues;
1738cd3e
NB
3856}
3857
38005ca8
AK
3858static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3859 struct ena_com_dev *ena_dev,
3860 struct ena_admin_feature_llq_desc *llq,
3861 struct ena_llq_configurations *llq_default_configurations)
1738cd3e
NB
3862{
3863 bool has_mem_bar;
38005ca8
AK
3864 int rc;
3865 u32 llq_feature_mask;
3866
3867 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3868 if (!(ena_dev->supported_features & llq_feature_mask)) {
3869 dev_err(&pdev->dev,
3870 "LLQ is not supported Fallback to host mode policy.\n");
3871 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3872 return 0;
3873 }
1738cd3e
NB
3874
3875 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3876
38005ca8
AK
3877 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3878 if (unlikely(rc)) {
3879 dev_err(&pdev->dev,
3880 "Failed to configure the device mode. Fallback to host mode policy.\n");
3881 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3882 return 0;
3883 }
3884
3885 /* Nothing to config, exit */
3886 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3887 return 0;
3888
3889 if (!has_mem_bar) {
3890 dev_err(&pdev->dev,
3891 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
1738cd3e 3892 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
38005ca8
AK
3893 return 0;
3894 }
3895
3896 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3897 pci_resource_start(pdev, ENA_MEM_BAR),
3898 pci_resource_len(pdev, ENA_MEM_BAR));
3899
3900 if (!ena_dev->mem_bar)
3901 return -EFAULT;
3902
3903 return 0;
1738cd3e
NB
3904}
3905
3906static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3907 struct net_device *netdev)
3908{
3909 netdev_features_t dev_features = 0;
3910
3911 /* Set offload features */
3912 if (feat->offload.tx &
3913 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3914 dev_features |= NETIF_F_IP_CSUM;
3915
3916 if (feat->offload.tx &
3917 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3918 dev_features |= NETIF_F_IPV6_CSUM;
3919
3920 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3921 dev_features |= NETIF_F_TSO;
3922
3923 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3924 dev_features |= NETIF_F_TSO6;
3925
3926 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3927 dev_features |= NETIF_F_TSO_ECN;
3928
3929 if (feat->offload.rx_supported &
3930 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3931 dev_features |= NETIF_F_RXCSUM;
3932
3933 if (feat->offload.rx_supported &
3934 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3935 dev_features |= NETIF_F_RXCSUM;
3936
3937 netdev->features =
3938 dev_features |
3939 NETIF_F_SG |
1738cd3e
NB
3940 NETIF_F_RXHASH |
3941 NETIF_F_HIGHDMA;
3942
3943 netdev->hw_features |= netdev->features;
3944 netdev->vlan_features |= netdev->features;
3945}
3946
3947static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3948 struct ena_com_dev_get_features_ctx *feat)
3949{
3950 struct net_device *netdev = adapter->netdev;
3951
3952 /* Copy mac address */
3953 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3954 eth_hw_addr_random(netdev);
3955 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3956 } else {
3957 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3958 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3959 }
3960
3961 /* Set offload features */
3962 ena_set_dev_offloads(feat, netdev);
3963
3964 adapter->max_mtu = feat->dev_attr.max_mtu;
d894be57
JW
3965 netdev->max_mtu = adapter->max_mtu;
3966 netdev->min_mtu = ENA_MIN_MTU;
1738cd3e
NB
3967}
3968
3969static int ena_rss_init_default(struct ena_adapter *adapter)
3970{
3971 struct ena_com_dev *ena_dev = adapter->ena_dev;
3972 struct device *dev = &adapter->pdev->dev;
3973 int rc, i;
3974 u32 val;
3975
3976 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3977 if (unlikely(rc)) {
3978 dev_err(dev, "Cannot init indirect table\n");
3979 goto err_rss_init;
3980 }
3981
3982 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
faa615f9 3983 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
1738cd3e
NB
3984 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3985 ENA_IO_RXQ_IDX(val));
d1497638 3986 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
3987 dev_err(dev, "Cannot fill indirect table\n");
3988 goto err_fill_indir;
3989 }
3990 }
3991
3992 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3993 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
d1497638 3994 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
3995 dev_err(dev, "Cannot fill hash function\n");
3996 goto err_fill_indir;
3997 }
3998
3999 rc = ena_com_set_default_hash_ctrl(ena_dev);
d1497638 4000 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
4001 dev_err(dev, "Cannot fill hash control\n");
4002 goto err_fill_indir;
4003 }
4004
4005 return 0;
4006
4007err_fill_indir:
4008 ena_com_rss_destroy(ena_dev);
4009err_rss_init:
4010
4011 return rc;
4012}
4013
4014static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4015{
d79c3888 4016 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
0857d92f 4017
1738cd3e
NB
4018 pci_release_selected_regions(pdev, release_bars);
4019}
4020
c2b54204 4021static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
38005ca8
AK
4022{
4023 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
4024 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
4025 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
4026 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
4027 llq_config->llq_ring_entry_size_value = 128;
4028}
4029
4d192660 4030static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
1738cd3e 4031{
31aa9857
SJ
4032 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4033 struct ena_com_dev *ena_dev = ctx->ena_dev;
4034 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4035 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4036 u32 max_tx_queue_size;
4037 u32 max_rx_queue_size;
1738cd3e 4038
4d192660 4039 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
31aa9857
SJ
4040 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4041 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4042 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4043 max_queue_ext->max_rx_sq_depth);
4044 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
1738cd3e 4045
31aa9857
SJ
4046 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4047 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4048 llq->max_llq_depth);
4049 else
4050 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4051 max_queue_ext->max_tx_sq_depth);
1738cd3e 4052
31aa9857
SJ
4053 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4054 max_queue_ext->max_per_packet_tx_descs);
4055 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4056 max_queue_ext->max_per_packet_rx_descs);
4057 } else {
4058 struct ena_admin_queue_feature_desc *max_queues =
4059 &ctx->get_feat_ctx->max_queues;
4060 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4061 max_queues->max_sq_depth);
4062 max_tx_queue_size = max_queues->max_cq_depth;
4063
4064 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4065 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4066 llq->max_llq_depth);
4067 else
4068 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4069 max_queues->max_sq_depth);
4070
4071 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4072 max_queues->max_packet_tx_descs);
4073 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4074 max_queues->max_packet_rx_descs);
4075 }
4076
4077 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4078 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
1738cd3e 4079
13ca32a6
SJ
4080 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4081 max_tx_queue_size);
4082 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4083 max_rx_queue_size);
31aa9857
SJ
4084
4085 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4086 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4087
31aa9857
SJ
4088 ctx->max_tx_queue_size = max_tx_queue_size;
4089 ctx->max_rx_queue_size = max_rx_queue_size;
4090 ctx->tx_queue_size = tx_queue_size;
4091 ctx->rx_queue_size = rx_queue_size;
1738cd3e 4092
31aa9857 4093 return 0;
1738cd3e
NB
4094}
4095
4096/* ena_probe - Device Initialization Routine
4097 * @pdev: PCI device information struct
4098 * @ent: entry in ena_pci_tbl
4099 *
4100 * Returns 0 on success, negative on failure
4101 *
4102 * ena_probe initializes an adapter identified by a pci_dev structure.
4103 * The OS initialization, configuring of the adapter private structure,
4104 * and a hardware reset occur.
4105 */
4106static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4107{
4108 struct ena_com_dev_get_features_ctx get_feat_ctx;
31aa9857 4109 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
38005ca8 4110 struct ena_llq_configurations llq_config;
1738cd3e 4111 struct ena_com_dev *ena_dev = NULL;
83b92404 4112 struct ena_adapter *adapter;
83b92404
SJ
4113 struct net_device *netdev;
4114 static int adapters_found;
736ce3f4 4115 u32 max_num_io_queues;
83b92404 4116 char *queue_type_str;
1738cd3e 4117 bool wd_state;
736ce3f4 4118 int bars, rc;
1738cd3e
NB
4119
4120 dev_dbg(&pdev->dev, "%s\n", __func__);
4121
1e9c3fba 4122 dev_info_once(&pdev->dev, "%s", version);
1738cd3e
NB
4123
4124 rc = pci_enable_device_mem(pdev);
4125 if (rc) {
4126 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4127 return rc;
4128 }
4129
4130 pci_set_master(pdev);
4131
4132 ena_dev = vzalloc(sizeof(*ena_dev));
4133 if (!ena_dev) {
4134 rc = -ENOMEM;
4135 goto err_disable_device;
4136 }
4137
4138 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4139 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4140 if (rc) {
4141 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4142 rc);
4143 goto err_free_ena_dev;
4144 }
4145
0857d92f
NB
4146 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4147 pci_resource_start(pdev, ENA_REG_BAR),
4148 pci_resource_len(pdev, ENA_REG_BAR));
1738cd3e
NB
4149 if (!ena_dev->reg_bar) {
4150 dev_err(&pdev->dev, "failed to remap regs bar\n");
4151 rc = -EFAULT;
4152 goto err_free_region;
4153 }
4154
4155 ena_dev->dmadev = &pdev->dev;
4156
4157 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4158 if (rc) {
4159 dev_err(&pdev->dev, "ena device init failed\n");
4160 if (rc == -ETIME)
4161 rc = -EPROBE_DEFER;
4162 goto err_free_region;
4163 }
4164
38005ca8 4165 set_default_llq_configurations(&llq_config);
1738cd3e 4166
38005ca8
AK
4167 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
4168 &llq_config);
4169 if (rc) {
4170 dev_err(&pdev->dev, "ena device init failed\n");
4171 goto err_device_destroy;
1738cd3e
NB
4172 }
4173
31aa9857
SJ
4174 calc_queue_ctx.ena_dev = ena_dev;
4175 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4176 calc_queue_ctx.pdev = pdev;
4177
15619e72 4178 /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
4d192660
SJ
4179 * Updated during device initialization with the real granularity
4180 */
1738cd3e 4181 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
15619e72 4182 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
79226cea 4183 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
736ce3f4 4184 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4d192660 4185 rc = ena_calc_io_queue_size(&calc_queue_ctx);
736ce3f4 4186 if (rc || !max_num_io_queues) {
1738cd3e
NB
4187 rc = -EFAULT;
4188 goto err_device_destroy;
4189 }
4190
1738cd3e 4191 /* dev zeroed in init_etherdev */
736ce3f4 4192 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
1738cd3e
NB
4193 if (!netdev) {
4194 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4195 rc = -ENOMEM;
4196 goto err_device_destroy;
4197 }
4198
4199 SET_NETDEV_DEV(netdev, &pdev->dev);
4200
4201 adapter = netdev_priv(netdev);
4202 pci_set_drvdata(pdev, adapter);
4203
4204 adapter->ena_dev = ena_dev;
4205 adapter->netdev = netdev;
4206 adapter->pdev = pdev;
4207
4208 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4209
4210 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
e2eed0e3 4211 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
1738cd3e 4212
13ca32a6
SJ
4213 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4214 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
9f9ae3f9
SJ
4215 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4216 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
31aa9857
SJ
4217 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4218 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
1738cd3e 4219
736ce3f4
SJ
4220 adapter->num_io_queues = max_num_io_queues;
4221 adapter->max_num_io_queues = max_num_io_queues;
4222
548c4940
SJ
4223 adapter->xdp_first_ring = 0;
4224 adapter->xdp_num_queues = 0;
4225
1738cd3e
NB
4226 adapter->last_monitored_tx_qid = 0;
4227
4228 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4229 adapter->wd_state = wd_state;
4230
4231 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4232
4233 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4234 if (rc) {
4235 dev_err(&pdev->dev,
4236 "Failed to query interrupt moderation feature\n");
4237 goto err_netdev_destroy;
4238 }
548c4940
SJ
4239 ena_init_io_rings(adapter,
4240 0,
4241 adapter->xdp_num_queues +
4242 adapter->num_io_queues);
1738cd3e
NB
4243
4244 netdev->netdev_ops = &ena_netdev_ops;
4245 netdev->watchdog_timeo = TX_TIMEOUT;
4246 ena_set_ethtool_ops(netdev);
4247
4248 netdev->priv_flags |= IFF_UNICAST_FLT;
4249
4250 u64_stats_init(&adapter->syncp);
4251
4d192660 4252 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
1738cd3e
NB
4253 if (rc) {
4254 dev_err(&pdev->dev,
4255 "Failed to enable and set the admin interrupts\n");
4256 goto err_worker_destroy;
4257 }
4258 rc = ena_rss_init_default(adapter);
d1497638 4259 if (rc && (rc != -EOPNOTSUPP)) {
1738cd3e
NB
4260 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4261 goto err_free_msix;
4262 }
4263
4264 ena_config_debug_area(adapter);
4265
4266 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4267
4268 netif_carrier_off(netdev);
4269
4270 rc = register_netdev(netdev);
4271 if (rc) {
4272 dev_err(&pdev->dev, "Cannot register net device\n");
4273 goto err_rss;
4274 }
4275
1738cd3e
NB
4276 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4277
4278 adapter->last_keep_alive_jiffies = jiffies;
82ef30f1
NB
4279 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4280 adapter->missing_tx_completion_to = TX_TIMEOUT;
4281 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4282
4283 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
1738cd3e 4284
e99e88a9 4285 timer_setup(&adapter->timer_service, ena_timer_service, 0);
f850b4a7 4286 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1738cd3e 4287
38005ca8
AK
4288 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
4289 queue_type_str = "Regular";
4290 else
4291 queue_type_str = "Low Latency";
4292
4293 dev_info(&pdev->dev,
9f648f7b 4294 "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
1738cd3e 4295 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
9f648f7b 4296 netdev->dev_addr, queue_type_str);
1738cd3e
NB
4297
4298 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4299
4300 adapters_found++;
4301
4302 return 0;
4303
4304err_rss:
4305 ena_com_delete_debug_area(ena_dev);
4306 ena_com_rss_destroy(ena_dev);
4307err_free_msix:
e2eed0e3 4308 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
58a54b9c
AK
4309 /* stop submitting admin commands on a device that was reset */
4310 ena_com_set_admin_running_state(ena_dev, false);
1738cd3e 4311 ena_free_mgmnt_irq(adapter);
06443684 4312 ena_disable_msix(adapter);
1738cd3e 4313err_worker_destroy:
1738cd3e 4314 del_timer(&adapter->timer_service);
1738cd3e
NB
4315err_netdev_destroy:
4316 free_netdev(netdev);
4317err_device_destroy:
4318 ena_com_delete_host_info(ena_dev);
4319 ena_com_admin_destroy(ena_dev);
4320err_free_region:
4321 ena_release_bars(ena_dev, pdev);
4322err_free_ena_dev:
1738cd3e
NB
4323 vfree(ena_dev);
4324err_disable_device:
4325 pci_disable_device(pdev);
4326 return rc;
4327}
4328
1738cd3e
NB
4329/*****************************************************************************/
4330
4331/* ena_remove - Device Removal Routine
4332 * @pdev: PCI device information struct
4333 *
4334 * ena_remove is called by the PCI subsystem to alert the driver
4335 * that it should release a PCI device.
4336 */
4337static void ena_remove(struct pci_dev *pdev)
4338{
4339 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4340 struct ena_com_dev *ena_dev;
4341 struct net_device *netdev;
4342
1738cd3e
NB
4343 ena_dev = adapter->ena_dev;
4344 netdev = adapter->netdev;
4345
4346#ifdef CONFIG_RFS_ACCEL
4347 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4348 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4349 netdev->rx_cpu_rmap = NULL;
4350 }
4351#endif /* CONFIG_RFS_ACCEL */
1738cd3e
NB
4352 del_timer_sync(&adapter->timer_service);
4353
4354 cancel_work_sync(&adapter->reset_task);
4355
944b28aa
NB
4356 rtnl_lock();
4357 ena_destroy_device(adapter, true);
4358 rtnl_unlock();
1738cd3e 4359
58a54b9c
AK
4360 unregister_netdev(netdev);
4361
1738cd3e
NB
4362 free_netdev(netdev);
4363
1738cd3e
NB
4364 ena_com_rss_destroy(ena_dev);
4365
4366 ena_com_delete_debug_area(ena_dev);
4367
4368 ena_com_delete_host_info(ena_dev);
4369
4370 ena_release_bars(ena_dev, pdev);
4371
1738cd3e
NB
4372 pci_disable_device(pdev);
4373
1738cd3e
NB
4374 vfree(ena_dev);
4375}
4376
8c5c7abd
NB
4377#ifdef CONFIG_PM
4378/* ena_suspend - PM suspend callback
4379 * @pdev: PCI device information struct
4380 * @state:power state
4381 */
4382static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
4383{
4384 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4385
4386 u64_stats_update_begin(&adapter->syncp);
4387 adapter->dev_stats.suspend++;
4388 u64_stats_update_end(&adapter->syncp);
4389
4390 rtnl_lock();
4391 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4392 dev_err(&pdev->dev,
4393 "ignoring device reset request as the device is being suspended\n");
4394 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4395 }
cfa324a5 4396 ena_destroy_device(adapter, true);
8c5c7abd
NB
4397 rtnl_unlock();
4398 return 0;
4399}
4400
4401/* ena_resume - PM resume callback
4402 * @pdev: PCI device information struct
4403 *
4404 */
4405static int ena_resume(struct pci_dev *pdev)
4406{
4407 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4408 int rc;
4409
4410 u64_stats_update_begin(&adapter->syncp);
4411 adapter->dev_stats.resume++;
4412 u64_stats_update_end(&adapter->syncp);
4413
4414 rtnl_lock();
4415 rc = ena_restore_device(adapter);
4416 rtnl_unlock();
4417 return rc;
4418}
4419#endif
4420
1738cd3e
NB
4421static struct pci_driver ena_pci_driver = {
4422 .name = DRV_MODULE_NAME,
4423 .id_table = ena_pci_tbl,
4424 .probe = ena_probe,
4425 .remove = ena_remove,
8c5c7abd
NB
4426#ifdef CONFIG_PM
4427 .suspend = ena_suspend,
4428 .resume = ena_resume,
4429#endif
115ddc49 4430 .sriov_configure = pci_sriov_configure_simple,
1738cd3e
NB
4431};
4432
4433static int __init ena_init(void)
4434{
4435 pr_info("%s", version);
4436
4437 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4438 if (!ena_wq) {
4439 pr_err("Failed to create workqueue\n");
4440 return -ENOMEM;
4441 }
4442
4443 return pci_register_driver(&ena_pci_driver);
4444}
4445
4446static void __exit ena_cleanup(void)
4447{
4448 pci_unregister_driver(&ena_pci_driver);
4449
4450 if (ena_wq) {
4451 destroy_workqueue(ena_wq);
4452 ena_wq = NULL;
4453 }
4454}
4455
4456/******************************************************************************
4457 ******************************** AENQ Handlers *******************************
4458 *****************************************************************************/
4459/* ena_update_on_link_change:
4460 * Notify the network interface about the change in link status
4461 */
4462static void ena_update_on_link_change(void *adapter_data,
4463 struct ena_admin_aenq_entry *aenq_e)
4464{
4465 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4466 struct ena_admin_aenq_link_change_desc *aenq_desc =
4467 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4468 int status = aenq_desc->flags &
4469 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4470
4471 if (status) {
4472 netdev_dbg(adapter->netdev, "%s\n", __func__);
4473 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
d18e4f68
NB
4474 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4475 netif_carrier_on(adapter->netdev);
1738cd3e
NB
4476 } else {
4477 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4478 netif_carrier_off(adapter->netdev);
4479 }
4480}
4481
4482static void ena_keep_alive_wd(void *adapter_data,
4483 struct ena_admin_aenq_entry *aenq_e)
4484{
4485 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
11a9a460
NB
4486 struct ena_admin_aenq_keep_alive_desc *desc;
4487 u64 rx_drops;
1738cd3e 4488
11a9a460 4489 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
1738cd3e 4490 adapter->last_keep_alive_jiffies = jiffies;
11a9a460
NB
4491
4492 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4493
4494 u64_stats_update_begin(&adapter->syncp);
4495 adapter->dev_stats.rx_drops = rx_drops;
4496 u64_stats_update_end(&adapter->syncp);
1738cd3e
NB
4497}
4498
4499static void ena_notification(void *adapter_data,
4500 struct ena_admin_aenq_entry *aenq_e)
4501{
4502 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
82ef30f1 4503 struct ena_admin_ena_hw_hints *hints;
1738cd3e
NB
4504
4505 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4506 "Invalid group(%x) expected %x\n",
4507 aenq_e->aenq_common_desc.group,
4508 ENA_ADMIN_NOTIFICATION);
4509
4510 switch (aenq_e->aenq_common_desc.syndrom) {
82ef30f1
NB
4511 case ENA_ADMIN_UPDATE_HINTS:
4512 hints = (struct ena_admin_ena_hw_hints *)
4513 (&aenq_e->inline_data_w4);
4514 ena_update_hints(adapter, hints);
4515 break;
1738cd3e
NB
4516 default:
4517 netif_err(adapter, drv, adapter->netdev,
4518 "Invalid aenq notification link state %d\n",
4519 aenq_e->aenq_common_desc.syndrom);
4520 }
4521}
4522
4523/* This handler will called for unknown event group or unimplemented handlers*/
4524static void unimplemented_aenq_handler(void *data,
4525 struct ena_admin_aenq_entry *aenq_e)
4526{
4527 struct ena_adapter *adapter = (struct ena_adapter *)data;
4528
4529 netif_err(adapter, drv, adapter->netdev,
4530 "Unknown event was received or event with unimplemented handler\n");
4531}
4532
4533static struct ena_aenq_handlers aenq_handlers = {
4534 .handlers = {
4535 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4536 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4537 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4538 },
4539 .unimplemented_handler = unimplemented_aenq_handler
4540};
4541
4542module_init(ena_init);
4543module_exit(ena_cleanup);