]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/amazon/ena/ena_netdev.c
qed: Fix ILT and XRCD bitmap memory leaks
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / amazon / ena / ena_netdev.c
CommitLineData
1738cd3e
NB
1/*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#ifdef CONFIG_RFS_ACCEL
36#include <linux/cpu_rmap.h>
37#endif /* CONFIG_RFS_ACCEL */
38#include <linux/ethtool.h>
1738cd3e
NB
39#include <linux/kernel.h>
40#include <linux/module.h>
1738cd3e
NB
41#include <linux/numa.h>
42#include <linux/pci.h>
43#include <linux/utsname.h>
44#include <linux/version.h>
45#include <linux/vmalloc.h>
46#include <net/ip.h>
47
48#include "ena_netdev.h"
838c93dc 49#include <linux/bpf_trace.h>
1738cd3e
NB
50#include "ena_pci_id_tbl.h"
51
1738cd3e
NB
52MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
53MODULE_DESCRIPTION(DEVICE_NAME);
54MODULE_LICENSE("GPL");
1738cd3e
NB
55
56/* Time in jiffies before concluding the transmitter is hung. */
57#define TX_TIMEOUT (5 * HZ)
58
59#define ENA_NAPI_BUDGET 64
60
61#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
62 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
63static int debug = -1;
64module_param(debug, int, 0);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66
67static struct ena_aenq_handlers aenq_handlers;
68
69static struct workqueue_struct *ena_wq;
70
71MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
72
73static int ena_rss_init_default(struct ena_adapter *adapter);
ee4552aa 74static void check_for_admin_com_state(struct ena_adapter *adapter);
cfa324a5 75static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
ee4552aa 76static int ena_restore_device(struct ena_adapter *adapter);
548c4940
SJ
77
78static void ena_init_io_rings(struct ena_adapter *adapter,
79 int first_index, int count);
80static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
81 int count);
82static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
83 int count);
84static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
85static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
86 int first_index,
87 int count);
88static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
89static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
90static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
91static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
92static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
93static void ena_napi_disable_in_range(struct ena_adapter *adapter,
94 int first_index, int count);
95static void ena_napi_enable_in_range(struct ena_adapter *adapter,
96 int first_index, int count);
838c93dc 97static int ena_up(struct ena_adapter *adapter);
548c4940
SJ
98static void ena_down(struct ena_adapter *adapter);
99static void ena_unmask_interrupt(struct ena_ring *tx_ring,
100 struct ena_ring *rx_ring);
101static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
102 struct ena_ring *rx_ring);
103static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
104 struct ena_tx_buffer *tx_info);
105static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
106 int first_index, int count);
1738cd3e 107
0290bd29 108static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
1738cd3e
NB
109{
110 struct ena_adapter *adapter = netdev_priv(dev);
111
3f6159db
NB
112 /* Change the state of the device to trigger reset
113 * Check that we are not in the middle or a trigger already
114 */
115
116 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
117 return;
118
e2eed0e3 119 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
1738cd3e
NB
120 u64_stats_update_begin(&adapter->syncp);
121 adapter->dev_stats.tx_timeout++;
122 u64_stats_update_end(&adapter->syncp);
123
124 netif_err(adapter, tx_err, dev, "Transmit time out\n");
1738cd3e
NB
125}
126
127static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
128{
129 int i;
130
faa615f9 131 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
132 adapter->rx_ring[i].mtu = mtu;
133}
134
135static int ena_change_mtu(struct net_device *dev, int new_mtu)
136{
137 struct ena_adapter *adapter = netdev_priv(dev);
138 int ret;
139
1738cd3e
NB
140 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
141 if (!ret) {
142 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
143 update_rx_ring_mtu(adapter, new_mtu);
144 dev->mtu = new_mtu;
145 } else {
146 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
147 new_mtu);
148 }
149
150 return ret;
151}
152
548c4940
SJ
153static int ena_xmit_common(struct net_device *dev,
154 struct ena_ring *ring,
155 struct ena_tx_buffer *tx_info,
156 struct ena_com_tx_ctx *ena_tx_ctx,
157 u16 next_to_use,
158 u32 bytes)
159{
160 struct ena_adapter *adapter = netdev_priv(dev);
161 int rc, nb_hw_desc;
162
163 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
164 ena_tx_ctx))) {
165 netif_dbg(adapter, tx_queued, dev,
166 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
167 ring->qid);
168 ena_com_write_sq_doorbell(ring->ena_com_io_sq);
169 }
170
171 /* prepare the packet's descriptors to dma engine */
172 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
173 &nb_hw_desc);
174
175 /* In case there isn't enough space in the queue for the packet,
176 * we simply drop it. All other failure reasons of
177 * ena_com_prepare_tx() are fatal and therefore require a device reset.
178 */
179 if (unlikely(rc)) {
180 netif_err(adapter, tx_queued, dev,
181 "failed to prepare tx bufs\n");
182 u64_stats_update_begin(&ring->syncp);
183 ring->tx_stats.prepare_ctx_err++;
184 u64_stats_update_end(&ring->syncp);
185 if (rc != -ENOMEM) {
186 adapter->reset_reason =
187 ENA_REGS_RESET_DRIVER_INVALID_STATE;
188 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
189 }
190 return rc;
191 }
192
193 u64_stats_update_begin(&ring->syncp);
194 ring->tx_stats.cnt++;
195 ring->tx_stats.bytes += bytes;
196 u64_stats_update_end(&ring->syncp);
197
198 tx_info->tx_descs = nb_hw_desc;
199 tx_info->last_jiffies = jiffies;
200 tx_info->print_once = 0;
201
202 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
203 ring->ring_size);
204 return 0;
205}
206
207/* This is the XDP napi callback. XDP queues use a separate napi callback
208 * than Rx/Tx queues.
209 */
210static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
211{
212 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
213 u32 xdp_work_done, xdp_budget;
214 struct ena_ring *xdp_ring;
215 int napi_comp_call = 0;
216 int ret;
217
218 xdp_ring = ena_napi->xdp_ring;
913b0bfd 219 xdp_ring->first_interrupt = ena_napi->first_interrupt;
548c4940
SJ
220
221 xdp_budget = budget;
222
223 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
224 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
225 napi_complete_done(napi, 0);
226 return 0;
227 }
228
229 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
230
231 /* If the device is about to reset or down, avoid unmask
232 * the interrupt and return 0 so NAPI won't reschedule
233 */
234 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
235 napi_complete_done(napi, 0);
236 ret = 0;
237 } else if (xdp_budget > xdp_work_done) {
238 napi_comp_call = 1;
239 if (napi_complete_done(napi, xdp_work_done))
240 ena_unmask_interrupt(xdp_ring, NULL);
241 ena_update_ring_numa_node(xdp_ring, NULL);
242 ret = xdp_work_done;
243 } else {
244 ret = xdp_budget;
245 }
246
247 u64_stats_update_begin(&xdp_ring->syncp);
248 xdp_ring->tx_stats.napi_comp += napi_comp_call;
249 xdp_ring->tx_stats.tx_poll++;
250 u64_stats_update_end(&xdp_ring->syncp);
251
252 return ret;
253}
254
255static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
256 struct ena_tx_buffer *tx_info,
257 struct xdp_buff *xdp,
258 void **push_hdr,
259 u32 *push_len)
260{
261 struct ena_adapter *adapter = xdp_ring->adapter;
262 struct ena_com_buf *ena_buf;
263 dma_addr_t dma = 0;
264 u32 size;
265
1b698fa5 266 tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
548c4940
SJ
267 size = tx_info->xdpf->len;
268 ena_buf = tx_info->bufs;
269
270 /* llq push buffer */
271 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
272 *push_hdr = tx_info->xdpf->data;
273
274 if (size - *push_len > 0) {
275 dma = dma_map_single(xdp_ring->dev,
276 *push_hdr + *push_len,
277 size - *push_len,
278 DMA_TO_DEVICE);
279 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
280 goto error_report_dma_error;
281
282 tx_info->map_linear_data = 1;
283 tx_info->num_of_bufs = 1;
284 }
285
286 ena_buf->paddr = dma;
287 ena_buf->len = size;
288
289 return 0;
290
291error_report_dma_error:
292 u64_stats_update_begin(&xdp_ring->syncp);
293 xdp_ring->tx_stats.dma_mapping_err++;
294 u64_stats_update_end(&xdp_ring->syncp);
295 netdev_warn(adapter->netdev, "failed to map xdp buff\n");
296
297 xdp_return_frame_rx_napi(tx_info->xdpf);
298 tx_info->xdpf = NULL;
299 tx_info->num_of_bufs = 0;
300
301 return -EINVAL;
302}
303
304static int ena_xdp_xmit_buff(struct net_device *dev,
305 struct xdp_buff *xdp,
306 int qid,
307 struct ena_rx_buffer *rx_info)
308{
309 struct ena_adapter *adapter = netdev_priv(dev);
310 struct ena_com_tx_ctx ena_tx_ctx = {0};
311 struct ena_tx_buffer *tx_info;
312 struct ena_ring *xdp_ring;
548c4940
SJ
313 u16 next_to_use, req_id;
314 int rc;
315 void *push_hdr;
316 u32 push_len;
317
318 xdp_ring = &adapter->tx_ring[qid];
319 next_to_use = xdp_ring->next_to_use;
320 req_id = xdp_ring->free_ids[next_to_use];
321 tx_info = &xdp_ring->tx_buffer_info[req_id];
322 tx_info->num_of_bufs = 0;
548c4940
SJ
323 page_ref_inc(rx_info->page);
324 tx_info->xdp_rx_page = rx_info->page;
325
326 rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
327 if (unlikely(rc))
328 goto error_drop_packet;
329
330 ena_tx_ctx.ena_bufs = tx_info->bufs;
331 ena_tx_ctx.push_header = push_hdr;
332 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
333 ena_tx_ctx.req_id = req_id;
334 ena_tx_ctx.header_len = push_len;
335
336 rc = ena_xmit_common(dev,
337 xdp_ring,
338 tx_info,
339 &ena_tx_ctx,
340 next_to_use,
341 xdp->data_end - xdp->data);
342 if (rc)
343 goto error_unmap_dma;
344 /* trigger the dma engine. ena_com_write_sq_doorbell()
345 * has a mb
346 */
347 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
348 u64_stats_update_begin(&xdp_ring->syncp);
349 xdp_ring->tx_stats.doorbells++;
350 u64_stats_update_end(&xdp_ring->syncp);
351
352 return NETDEV_TX_OK;
353
354error_unmap_dma:
355 ena_unmap_tx_buff(xdp_ring, tx_info);
356 tx_info->xdpf = NULL;
357error_drop_packet:
cd07eccc 358 __free_page(tx_info->xdp_rx_page);
548c4940
SJ
359 return NETDEV_TX_OK;
360}
361
362static int ena_xdp_execute(struct ena_ring *rx_ring,
363 struct xdp_buff *xdp,
364 struct ena_rx_buffer *rx_info)
838c93dc
SJ
365{
366 struct bpf_prog *xdp_prog;
367 u32 verdict = XDP_PASS;
368
369 rcu_read_lock();
370 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
371
372 if (!xdp_prog)
373 goto out;
374
375 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
376
548c4940
SJ
377 if (verdict == XDP_TX)
378 ena_xdp_xmit_buff(rx_ring->netdev,
379 xdp,
380 rx_ring->qid + rx_ring->adapter->num_io_queues,
381 rx_info);
382 else if (unlikely(verdict == XDP_ABORTED))
838c93dc 383 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
548c4940 384 else if (unlikely(verdict > XDP_TX))
838c93dc
SJ
385 bpf_warn_invalid_xdp_action(verdict);
386out:
387 rcu_read_unlock();
388 return verdict;
389}
390
548c4940
SJ
391static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
392{
393 adapter->xdp_first_ring = adapter->num_io_queues;
394 adapter->xdp_num_queues = adapter->num_io_queues;
395
396 ena_init_io_rings(adapter,
397 adapter->xdp_first_ring,
398 adapter->xdp_num_queues);
399}
400
401static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
402{
403 int rc = 0;
404
405 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
406 adapter->xdp_num_queues);
407 if (rc)
408 goto setup_err;
409
410 rc = ena_create_io_tx_queues_in_range(adapter,
411 adapter->xdp_first_ring,
412 adapter->xdp_num_queues);
413 if (rc)
414 goto create_err;
415
416 return 0;
417
418create_err:
419 ena_free_all_io_tx_resources(adapter);
420setup_err:
421 return rc;
422}
423
424/* Provides a way for both kernel and bpf-prog to know
425 * more about the RX-queue a given XDP frame arrived on.
426 */
427static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
428{
429 int rc;
430
431 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
432
433 if (rc) {
434 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
435 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
436 rx_ring->qid, rc);
437 goto err;
438 }
439
440 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
441 NULL);
442
443 if (rc) {
444 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
445 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
446 rx_ring->qid, rc);
447 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
448 }
449
450err:
451 return rc;
452}
453
454static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
455{
456 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
457 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
458}
459
32109c70
Y
460static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
461 struct bpf_prog *prog,
462 int first, int count)
838c93dc
SJ
463{
464 struct ena_ring *rx_ring;
465 int i = 0;
466
467 for (i = first; i < count; i++) {
468 rx_ring = &adapter->rx_ring[i];
469 xchg(&rx_ring->xdp_bpf_prog, prog);
548c4940
SJ
470 if (prog) {
471 ena_xdp_register_rxq_info(rx_ring);
838c93dc 472 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
548c4940
SJ
473 } else {
474 ena_xdp_unregister_rxq_info(rx_ring);
838c93dc 475 rx_ring->rx_headroom = 0;
548c4940 476 }
838c93dc
SJ
477 }
478}
479
32109c70
Y
480static void ena_xdp_exchange_program(struct ena_adapter *adapter,
481 struct bpf_prog *prog)
838c93dc
SJ
482{
483 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
484
485 ena_xdp_exchange_program_rx_in_range(adapter,
486 prog,
487 0,
488 adapter->num_io_queues);
489
490 if (old_bpf_prog)
491 bpf_prog_put(old_bpf_prog);
492}
493
548c4940
SJ
494static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
495{
496 bool was_up;
497 int rc;
498
499 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
500
501 if (was_up)
502 ena_down(adapter);
503
504 adapter->xdp_first_ring = 0;
505 adapter->xdp_num_queues = 0;
506 ena_xdp_exchange_program(adapter, NULL);
507 if (was_up) {
508 rc = ena_up(adapter);
509 if (rc)
510 return rc;
511 }
512 return 0;
513}
514
838c93dc
SJ
515static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
516{
517 struct ena_adapter *adapter = netdev_priv(netdev);
518 struct bpf_prog *prog = bpf->prog;
548c4940 519 struct bpf_prog *old_bpf_prog;
838c93dc
SJ
520 int rc, prev_mtu;
521 bool is_up;
522
523 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
548c4940
SJ
524 rc = ena_xdp_allowed(adapter);
525 if (rc == ENA_XDP_ALLOWED) {
526 old_bpf_prog = adapter->xdp_bpf_prog;
527 if (prog) {
528 if (!is_up) {
529 ena_init_all_xdp_queues(adapter);
530 } else if (!old_bpf_prog) {
531 ena_down(adapter);
532 ena_init_all_xdp_queues(adapter);
533 }
534 ena_xdp_exchange_program(adapter, prog);
838c93dc 535
548c4940
SJ
536 if (is_up && !old_bpf_prog) {
537 rc = ena_up(adapter);
538 if (rc)
539 return rc;
540 }
541 } else if (old_bpf_prog) {
542 rc = ena_destroy_and_free_all_xdp_queues(adapter);
838c93dc
SJ
543 if (rc)
544 return rc;
545 }
838c93dc 546
548c4940
SJ
547 prev_mtu = netdev->max_mtu;
548 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
549
550 if (!old_bpf_prog)
551 netif_info(adapter, drv, adapter->netdev,
552 "xdp program set, changing the max_mtu from %d to %d",
553 prev_mtu, netdev->max_mtu);
554
555 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
556 netif_err(adapter, drv, adapter->netdev,
557 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
838c93dc 558 netdev->mtu, ENA_XDP_MAX_MTU);
548c4940
SJ
559 NL_SET_ERR_MSG_MOD(bpf->extack,
560 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
561 return -EINVAL;
562 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
563 netif_err(adapter, drv, adapter->netdev,
564 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
565 adapter->num_io_queues, adapter->max_num_io_queues);
566 NL_SET_ERR_MSG_MOD(bpf->extack,
567 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
838c93dc
SJ
568 return -EINVAL;
569 }
570
571 return 0;
572}
573
574/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
575 * program as well as to query the current xdp program id.
576 */
577static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
578{
579 struct ena_adapter *adapter = netdev_priv(netdev);
580
581 switch (bpf->command) {
582 case XDP_SETUP_PROG:
583 return ena_xdp_set(netdev, bpf);
584 case XDP_QUERY_PROG:
585 bpf->prog_id = adapter->xdp_bpf_prog ?
586 adapter->xdp_bpf_prog->aux->id : 0;
587 break;
588 default:
589 return -EINVAL;
590 }
591 return 0;
592}
593
1738cd3e
NB
594static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
595{
596#ifdef CONFIG_RFS_ACCEL
597 u32 i;
598 int rc;
599
faa615f9 600 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
1738cd3e
NB
601 if (!adapter->netdev->rx_cpu_rmap)
602 return -ENOMEM;
faa615f9 603 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
604 int irq_idx = ENA_IO_IRQ_IDX(i);
605
606 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
da6f4cf5 607 pci_irq_vector(adapter->pdev, irq_idx));
1738cd3e
NB
608 if (rc) {
609 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
610 adapter->netdev->rx_cpu_rmap = NULL;
611 return rc;
612 }
613 }
614#endif /* CONFIG_RFS_ACCEL */
615 return 0;
616}
617
618static void ena_init_io_rings_common(struct ena_adapter *adapter,
619 struct ena_ring *ring, u16 qid)
620{
621 ring->qid = qid;
622 ring->pdev = adapter->pdev;
623 ring->dev = &adapter->pdev->dev;
624 ring->netdev = adapter->netdev;
625 ring->napi = &adapter->ena_napi[qid].napi;
626 ring->adapter = adapter;
627 ring->ena_dev = adapter->ena_dev;
628 ring->per_napi_packets = 0;
1738cd3e 629 ring->cpu = 0;
8510e1a3
NB
630 ring->first_interrupt = false;
631 ring->no_interrupt_event_cnt = 0;
1738cd3e
NB
632 u64_stats_init(&ring->syncp);
633}
634
548c4940
SJ
635static void ena_init_io_rings(struct ena_adapter *adapter,
636 int first_index, int count)
1738cd3e
NB
637{
638 struct ena_com_dev *ena_dev;
639 struct ena_ring *txr, *rxr;
640 int i;
641
642 ena_dev = adapter->ena_dev;
643
548c4940 644 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
645 txr = &adapter->tx_ring[i];
646 rxr = &adapter->rx_ring[i];
647
548c4940 648 /* TX common ring state */
1738cd3e 649 ena_init_io_rings_common(adapter, txr, i);
1738cd3e
NB
650
651 /* TX specific ring state */
13ca32a6 652 txr->ring_size = adapter->requested_tx_ring_size;
1738cd3e
NB
653 txr->tx_max_header_size = ena_dev->tx_max_header_size;
654 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
655 txr->sgl_size = adapter->max_tx_sgl_size;
656 txr->smoothed_interval =
657 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
658
548c4940
SJ
659 /* Don't init RX queues for xdp queues */
660 if (!ENA_IS_XDP_INDEX(adapter, i)) {
661 /* RX common ring state */
662 ena_init_io_rings_common(adapter, rxr, i);
663
664 /* RX specific ring state */
665 rxr->ring_size = adapter->requested_rx_ring_size;
666 rxr->rx_copybreak = adapter->rx_copybreak;
667 rxr->sgl_size = adapter->max_rx_sgl_size;
668 rxr->smoothed_interval =
669 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
670 rxr->empty_rx_queue = 0;
671 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
672 }
1738cd3e
NB
673 }
674}
675
676/* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
677 * @adapter: network interface device structure
678 * @qid: queue index
679 *
680 * Return 0 on success, negative on failure
681 */
682static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
683{
684 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
685 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
686 int size, i, node;
687
688 if (tx_ring->tx_buffer_info) {
689 netif_err(adapter, ifup,
690 adapter->netdev, "tx_buffer_info info is not NULL");
691 return -EEXIST;
692 }
693
694 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
695 node = cpu_to_node(ena_irq->cpu);
696
697 tx_ring->tx_buffer_info = vzalloc_node(size, node);
698 if (!tx_ring->tx_buffer_info) {
699 tx_ring->tx_buffer_info = vzalloc(size);
700 if (!tx_ring->tx_buffer_info)
8ee8ee7f 701 goto err_tx_buffer_info;
1738cd3e
NB
702 }
703
704 size = sizeof(u16) * tx_ring->ring_size;
f9172498
SJ
705 tx_ring->free_ids = vzalloc_node(size, node);
706 if (!tx_ring->free_ids) {
707 tx_ring->free_ids = vzalloc(size);
708 if (!tx_ring->free_ids)
709 goto err_tx_free_ids;
1738cd3e
NB
710 }
711
38005ca8
AK
712 size = tx_ring->tx_max_header_size;
713 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
714 if (!tx_ring->push_buf_intermediate_buf) {
715 tx_ring->push_buf_intermediate_buf = vzalloc(size);
8ee8ee7f
SJ
716 if (!tx_ring->push_buf_intermediate_buf)
717 goto err_push_buf_intermediate_buf;
38005ca8
AK
718 }
719
1738cd3e
NB
720 /* Req id ring for TX out of order completions */
721 for (i = 0; i < tx_ring->ring_size; i++)
f9172498 722 tx_ring->free_ids[i] = i;
1738cd3e
NB
723
724 /* Reset tx statistics */
725 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
726
727 tx_ring->next_to_use = 0;
728 tx_ring->next_to_clean = 0;
729 tx_ring->cpu = ena_irq->cpu;
730 return 0;
8ee8ee7f
SJ
731
732err_push_buf_intermediate_buf:
f9172498
SJ
733 vfree(tx_ring->free_ids);
734 tx_ring->free_ids = NULL;
735err_tx_free_ids:
8ee8ee7f
SJ
736 vfree(tx_ring->tx_buffer_info);
737 tx_ring->tx_buffer_info = NULL;
738err_tx_buffer_info:
739 return -ENOMEM;
1738cd3e
NB
740}
741
742/* ena_free_tx_resources - Free I/O Tx Resources per Queue
743 * @adapter: network interface device structure
744 * @qid: queue index
745 *
746 * Free all transmit software resources
747 */
748static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
749{
750 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
751
752 vfree(tx_ring->tx_buffer_info);
753 tx_ring->tx_buffer_info = NULL;
754
f9172498
SJ
755 vfree(tx_ring->free_ids);
756 tx_ring->free_ids = NULL;
38005ca8
AK
757
758 vfree(tx_ring->push_buf_intermediate_buf);
759 tx_ring->push_buf_intermediate_buf = NULL;
1738cd3e
NB
760}
761
548c4940
SJ
762static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
763 int first_index,
764 int count)
1738cd3e
NB
765{
766 int i, rc = 0;
767
548c4940 768 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
769 rc = ena_setup_tx_resources(adapter, i);
770 if (rc)
771 goto err_setup_tx;
772 }
773
774 return 0;
775
776err_setup_tx:
777
778 netif_err(adapter, ifup, adapter->netdev,
779 "Tx queue %d: allocation failed\n", i);
780
781 /* rewind the index freeing the rings as we go */
548c4940 782 while (first_index < i--)
1738cd3e
NB
783 ena_free_tx_resources(adapter, i);
784 return rc;
785}
786
548c4940
SJ
787static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
788 int first_index, int count)
789{
790 int i;
791
792 for (i = first_index; i < first_index + count; i++)
793 ena_free_tx_resources(adapter, i);
794}
795
1738cd3e
NB
796/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
797 * @adapter: board private structure
798 *
799 * Free all transmit software resources
800 */
801static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
802{
548c4940
SJ
803 ena_free_all_io_tx_resources_in_range(adapter,
804 0,
805 adapter->xdp_num_queues +
806 adapter->num_io_queues);
1738cd3e
NB
807}
808
c2b54204 809static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
ad974bae
NB
810{
811 if (likely(req_id < rx_ring->ring_size))
812 return 0;
813
814 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
815 "Invalid rx req_id: %hu\n", req_id);
816
817 u64_stats_update_begin(&rx_ring->syncp);
818 rx_ring->rx_stats.bad_req_id++;
819 u64_stats_update_end(&rx_ring->syncp);
820
821 /* Trigger device reset */
822 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
823 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
824 return -EFAULT;
825}
826
1738cd3e
NB
827/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
828 * @adapter: network interface device structure
829 * @qid: queue index
830 *
831 * Returns 0 on success, negative on failure
832 */
833static int ena_setup_rx_resources(struct ena_adapter *adapter,
834 u32 qid)
835{
836 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
837 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
ad974bae 838 int size, node, i;
1738cd3e
NB
839
840 if (rx_ring->rx_buffer_info) {
841 netif_err(adapter, ifup, adapter->netdev,
842 "rx_buffer_info is not NULL");
843 return -EEXIST;
844 }
845
846 /* alloc extra element so in rx path
847 * we can always prefetch rx_info + 1
848 */
849 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
850 node = cpu_to_node(ena_irq->cpu);
851
852 rx_ring->rx_buffer_info = vzalloc_node(size, node);
853 if (!rx_ring->rx_buffer_info) {
854 rx_ring->rx_buffer_info = vzalloc(size);
855 if (!rx_ring->rx_buffer_info)
856 return -ENOMEM;
857 }
858
ad974bae 859 size = sizeof(u16) * rx_ring->ring_size;
f9172498
SJ
860 rx_ring->free_ids = vzalloc_node(size, node);
861 if (!rx_ring->free_ids) {
862 rx_ring->free_ids = vzalloc(size);
863 if (!rx_ring->free_ids) {
ad974bae 864 vfree(rx_ring->rx_buffer_info);
8ee8ee7f 865 rx_ring->rx_buffer_info = NULL;
ad974bae
NB
866 return -ENOMEM;
867 }
868 }
869
870 /* Req id ring for receiving RX pkts out of order */
871 for (i = 0; i < rx_ring->ring_size; i++)
f9172498 872 rx_ring->free_ids[i] = i;
ad974bae 873
1738cd3e
NB
874 /* Reset rx statistics */
875 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
876
877 rx_ring->next_to_clean = 0;
878 rx_ring->next_to_use = 0;
879 rx_ring->cpu = ena_irq->cpu;
880
881 return 0;
882}
883
884/* ena_free_rx_resources - Free I/O Rx Resources
885 * @adapter: network interface device structure
886 * @qid: queue index
887 *
888 * Free all receive software resources
889 */
890static void ena_free_rx_resources(struct ena_adapter *adapter,
891 u32 qid)
892{
893 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
894
895 vfree(rx_ring->rx_buffer_info);
896 rx_ring->rx_buffer_info = NULL;
ad974bae 897
f9172498
SJ
898 vfree(rx_ring->free_ids);
899 rx_ring->free_ids = NULL;
1738cd3e
NB
900}
901
902/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
903 * @adapter: board private structure
904 *
905 * Return 0 on success, negative on failure
906 */
907static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
908{
909 int i, rc = 0;
910
faa615f9 911 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
912 rc = ena_setup_rx_resources(adapter, i);
913 if (rc)
914 goto err_setup_rx;
915 }
916
917 return 0;
918
919err_setup_rx:
920
921 netif_err(adapter, ifup, adapter->netdev,
922 "Rx queue %d: allocation failed\n", i);
923
924 /* rewind the index freeing the rings as we go */
925 while (i--)
926 ena_free_rx_resources(adapter, i);
927 return rc;
928}
929
930/* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
931 * @adapter: board private structure
932 *
933 * Free all receive software resources
934 */
935static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
936{
937 int i;
938
faa615f9 939 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
940 ena_free_rx_resources(adapter, i);
941}
942
c2b54204 943static int ena_alloc_rx_page(struct ena_ring *rx_ring,
1738cd3e
NB
944 struct ena_rx_buffer *rx_info, gfp_t gfp)
945{
946 struct ena_com_buf *ena_buf;
947 struct page *page;
948 dma_addr_t dma;
949
950 /* if previous allocated page is not used */
951 if (unlikely(rx_info->page))
952 return 0;
953
954 page = alloc_page(gfp);
955 if (unlikely(!page)) {
956 u64_stats_update_begin(&rx_ring->syncp);
957 rx_ring->rx_stats.page_alloc_fail++;
958 u64_stats_update_end(&rx_ring->syncp);
959 return -ENOMEM;
960 }
961
ef5b0771 962 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
1738cd3e
NB
963 DMA_FROM_DEVICE);
964 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
965 u64_stats_update_begin(&rx_ring->syncp);
966 rx_ring->rx_stats.dma_mapping_err++;
967 u64_stats_update_end(&rx_ring->syncp);
968
969 __free_page(page);
970 return -EIO;
971 }
972 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
973 "alloc page %p, rx_info %p\n", page, rx_info);
974
975 rx_info->page = page;
976 rx_info->page_offset = 0;
977 ena_buf = &rx_info->ena_buf;
838c93dc 978 ena_buf->paddr = dma + rx_ring->rx_headroom;
548c4940 979 ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
1738cd3e
NB
980
981 return 0;
982}
983
984static void ena_free_rx_page(struct ena_ring *rx_ring,
985 struct ena_rx_buffer *rx_info)
986{
987 struct page *page = rx_info->page;
988 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
989
990 if (unlikely(!page)) {
991 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
992 "Trying to free unallocated buffer\n");
993 return;
994 }
995
548c4940
SJ
996 dma_unmap_page(rx_ring->dev,
997 ena_buf->paddr - rx_ring->rx_headroom,
998 ENA_PAGE_SIZE,
1738cd3e
NB
999 DMA_FROM_DEVICE);
1000
1001 __free_page(page);
1002 rx_info->page = NULL;
1003}
1004
1005static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1006{
ad974bae 1007 u16 next_to_use, req_id;
1738cd3e
NB
1008 u32 i;
1009 int rc;
1010
1011 next_to_use = rx_ring->next_to_use;
1012
1013 for (i = 0; i < num; i++) {
ad974bae
NB
1014 struct ena_rx_buffer *rx_info;
1015
f9172498 1016 req_id = rx_ring->free_ids[next_to_use];
ad974bae
NB
1017
1018 rx_info = &rx_ring->rx_buffer_info[req_id];
1019
1738cd3e 1020 rc = ena_alloc_rx_page(rx_ring, rx_info,
453f85d4 1021 GFP_ATOMIC | __GFP_COMP);
1738cd3e
NB
1022 if (unlikely(rc < 0)) {
1023 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1024 "failed to alloc buffer for rx queue %d\n",
1025 rx_ring->qid);
1026 break;
1027 }
1028 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1029 &rx_info->ena_buf,
ad974bae 1030 req_id);
1738cd3e
NB
1031 if (unlikely(rc)) {
1032 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1033 "failed to add buffer for rx queue %d\n",
1034 rx_ring->qid);
1035 break;
1036 }
1037 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1038 rx_ring->ring_size);
1039 }
1040
1041 if (unlikely(i < num)) {
1042 u64_stats_update_begin(&rx_ring->syncp);
1043 rx_ring->rx_stats.refil_partial++;
1044 u64_stats_update_end(&rx_ring->syncp);
1045 netdev_warn(rx_ring->netdev,
1046 "refilled rx qid %d with only %d buffers (from %d)\n",
1047 rx_ring->qid, i, num);
1048 }
1049
37dff155
NB
1050 /* ena_com_write_sq_doorbell issues a wmb() */
1051 if (likely(i))
1052 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1738cd3e
NB
1053
1054 rx_ring->next_to_use = next_to_use;
1055
1056 return i;
1057}
1058
1059static void ena_free_rx_bufs(struct ena_adapter *adapter,
1060 u32 qid)
1061{
1062 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1063 u32 i;
1064
1065 for (i = 0; i < rx_ring->ring_size; i++) {
1066 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1067
1068 if (rx_info->page)
1069 ena_free_rx_page(rx_ring, rx_info);
1070 }
1071}
1072
1073/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1074 * @adapter: board private structure
1738cd3e
NB
1075 */
1076static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1077{
1078 struct ena_ring *rx_ring;
1079 int i, rc, bufs_num;
1080
faa615f9 1081 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
1082 rx_ring = &adapter->rx_ring[i];
1083 bufs_num = rx_ring->ring_size - 1;
1084 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1085
1086 if (unlikely(rc != bufs_num))
1087 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1088 "refilling Queue %d failed. allocated %d buffers from: %d\n",
1089 i, rc, bufs_num);
1090 }
1091}
1092
1093static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1094{
1095 int i;
1096
faa615f9 1097 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
1098 ena_free_rx_bufs(adapter, i);
1099}
1100
548c4940
SJ
1101static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1102 struct ena_tx_buffer *tx_info)
38005ca8
AK
1103{
1104 struct ena_com_buf *ena_buf;
1105 u32 cnt;
1106 int i;
1107
1108 ena_buf = tx_info->bufs;
1109 cnt = tx_info->num_of_bufs;
1110
1111 if (unlikely(!cnt))
1112 return;
1113
1114 if (tx_info->map_linear_data) {
1115 dma_unmap_single(tx_ring->dev,
1116 dma_unmap_addr(ena_buf, paddr),
1117 dma_unmap_len(ena_buf, len),
1118 DMA_TO_DEVICE);
1119 ena_buf++;
1120 cnt--;
1121 }
1122
1123 /* unmap remaining mapped pages */
1124 for (i = 0; i < cnt; i++) {
1125 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1126 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1127 ena_buf++;
1128 }
1129}
1130
1738cd3e
NB
1131/* ena_free_tx_bufs - Free Tx Buffers per Queue
1132 * @tx_ring: TX ring for which buffers be freed
1133 */
1134static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1135{
5add6e4a 1136 bool print_once = true;
1738cd3e
NB
1137 u32 i;
1138
1139 for (i = 0; i < tx_ring->ring_size; i++) {
1140 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1738cd3e
NB
1141
1142 if (!tx_info->skb)
1143 continue;
1144
5add6e4a
NB
1145 if (print_once) {
1146 netdev_notice(tx_ring->netdev,
1147 "free uncompleted tx skb qid %d idx 0x%x\n",
1148 tx_ring->qid, i);
1149 print_once = false;
1150 } else {
1151 netdev_dbg(tx_ring->netdev,
1152 "free uncompleted tx skb qid %d idx 0x%x\n",
1153 tx_ring->qid, i);
1154 }
1738cd3e 1155
548c4940 1156 ena_unmap_tx_buff(tx_ring, tx_info);
1738cd3e
NB
1157
1158 dev_kfree_skb_any(tx_info->skb);
1159 }
1160 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1161 tx_ring->qid));
1162}
1163
1164static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1165{
1166 struct ena_ring *tx_ring;
1167 int i;
1168
548c4940 1169 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1738cd3e
NB
1170 tx_ring = &adapter->tx_ring[i];
1171 ena_free_tx_bufs(tx_ring);
1172 }
1173}
1174
1175static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1176{
1177 u16 ena_qid;
1178 int i;
1179
548c4940 1180 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1738cd3e
NB
1181 ena_qid = ENA_IO_TXQ_IDX(i);
1182 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1183 }
1184}
1185
1186static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1187{
1188 u16 ena_qid;
1189 int i;
1190
faa615f9 1191 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e 1192 ena_qid = ENA_IO_RXQ_IDX(i);
282faf61 1193 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1738cd3e
NB
1194 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1195 }
1196}
1197
1198static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1199{
1200 ena_destroy_all_tx_queues(adapter);
1201 ena_destroy_all_rx_queues(adapter);
1202}
1203
548c4940
SJ
1204static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1205 struct ena_tx_buffer *tx_info, bool is_xdp)
1206{
1207 if (tx_info)
1208 netif_err(ring->adapter,
1209 tx_done,
1210 ring->netdev,
1211 "tx_info doesn't have valid %s",
1212 is_xdp ? "xdp frame" : "skb");
1213 else
1214 netif_err(ring->adapter,
1215 tx_done,
1216 ring->netdev,
1217 "Invalid req_id: %hu\n",
1218 req_id);
1219
1220 u64_stats_update_begin(&ring->syncp);
1221 ring->tx_stats.bad_req_id++;
1222 u64_stats_update_end(&ring->syncp);
1223
1224 /* Trigger device reset */
1225 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1226 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1227 return -EFAULT;
1228}
1229
1738cd3e
NB
1230static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1231{
1232 struct ena_tx_buffer *tx_info = NULL;
1233
1234 if (likely(req_id < tx_ring->ring_size)) {
1235 tx_info = &tx_ring->tx_buffer_info[req_id];
1236 if (likely(tx_info->skb))
1237 return 0;
1238 }
1239
548c4940
SJ
1240 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1241}
1738cd3e 1242
548c4940
SJ
1243static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1244{
1245 struct ena_tx_buffer *tx_info = NULL;
1738cd3e 1246
548c4940
SJ
1247 if (likely(req_id < xdp_ring->ring_size)) {
1248 tx_info = &xdp_ring->tx_buffer_info[req_id];
1249 if (likely(tx_info->xdpf))
1250 return 0;
1251 }
1252
1253 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1738cd3e
NB
1254}
1255
1256static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1257{
1258 struct netdev_queue *txq;
1259 bool above_thresh;
1260 u32 tx_bytes = 0;
1261 u32 total_done = 0;
1262 u16 next_to_clean;
1263 u16 req_id;
1264 int tx_pkts = 0;
1265 int rc;
1266
1267 next_to_clean = tx_ring->next_to_clean;
1268 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1269
1270 while (tx_pkts < budget) {
1271 struct ena_tx_buffer *tx_info;
1272 struct sk_buff *skb;
1738cd3e
NB
1273
1274 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1275 &req_id);
1276 if (rc)
1277 break;
1278
1279 rc = validate_tx_req_id(tx_ring, req_id);
1280 if (rc)
1281 break;
1282
1283 tx_info = &tx_ring->tx_buffer_info[req_id];
1284 skb = tx_info->skb;
1285
1286 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1287 prefetch(&skb->end);
1288
1289 tx_info->skb = NULL;
1290 tx_info->last_jiffies = 0;
1291
548c4940 1292 ena_unmap_tx_buff(tx_ring, tx_info);
1738cd3e
NB
1293
1294 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1295 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1296 skb);
1297
1298 tx_bytes += skb->len;
1299 dev_kfree_skb(skb);
1300 tx_pkts++;
1301 total_done += tx_info->tx_descs;
1302
f9172498 1303 tx_ring->free_ids[next_to_clean] = req_id;
1738cd3e
NB
1304 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1305 tx_ring->ring_size);
1306 }
1307
1308 tx_ring->next_to_clean = next_to_clean;
1309 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1310 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1311
1312 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1313
1314 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1315 "tx_poll: q %d done. total pkts: %d\n",
1316 tx_ring->qid, tx_pkts);
1317
1318 /* need to make the rings circular update visible to
1319 * ena_start_xmit() before checking for netif_queue_stopped().
1320 */
1321 smp_mb();
1322
689b2bda
AK
1323 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1324 ENA_TX_WAKEUP_THRESH);
1738cd3e
NB
1325 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1326 __netif_tx_lock(txq, smp_processor_id());
689b2bda
AK
1327 above_thresh =
1328 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1329 ENA_TX_WAKEUP_THRESH);
a53651ec
SJ
1330 if (netif_tx_queue_stopped(txq) && above_thresh &&
1331 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1738cd3e
NB
1332 netif_tx_wake_queue(txq);
1333 u64_stats_update_begin(&tx_ring->syncp);
1334 tx_ring->tx_stats.queue_wakeup++;
1335 u64_stats_update_end(&tx_ring->syncp);
1336 }
1337 __netif_tx_unlock(txq);
1338 }
1339
1738cd3e
NB
1340 return tx_pkts;
1341}
1342
4265114d
NB
1343static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1344{
1345 struct sk_buff *skb;
1346
1347 if (frags)
1348 skb = napi_get_frags(rx_ring->napi);
1349 else
1350 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1351 rx_ring->rx_copybreak);
1352
1353 if (unlikely(!skb)) {
1354 u64_stats_update_begin(&rx_ring->syncp);
1355 rx_ring->rx_stats.skb_alloc_fail++;
1356 u64_stats_update_end(&rx_ring->syncp);
1357 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1358 "Failed to allocate skb. frags: %d\n", frags);
1359 return NULL;
1360 }
1361
1362 return skb;
1363}
1364
1738cd3e
NB
1365static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1366 struct ena_com_rx_buf_info *ena_bufs,
1367 u32 descs,
1368 u16 *next_to_clean)
1369{
1370 struct sk_buff *skb;
ad974bae
NB
1371 struct ena_rx_buffer *rx_info;
1372 u16 len, req_id, buf = 0;
1738cd3e 1373 void *va;
30623e1e 1374 int rc;
1738cd3e 1375
ad974bae
NB
1376 len = ena_bufs[buf].len;
1377 req_id = ena_bufs[buf].req_id;
30623e1e
AK
1378
1379 rc = validate_rx_req_id(rx_ring, req_id);
1380 if (unlikely(rc < 0))
1381 return NULL;
1382
ad974bae
NB
1383 rx_info = &rx_ring->rx_buffer_info[req_id];
1384
1738cd3e
NB
1385 if (unlikely(!rx_info->page)) {
1386 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1387 "Page is NULL\n");
1388 return NULL;
1389 }
1390
1391 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1392 "rx_info %p page %p\n",
1393 rx_info, rx_info->page);
1394
1395 /* save virt address of first buffer */
1396 va = page_address(rx_info->page) + rx_info->page_offset;
1397 prefetch(va + NET_IP_ALIGN);
1398
1399 if (len <= rx_ring->rx_copybreak) {
4265114d
NB
1400 skb = ena_alloc_skb(rx_ring, false);
1401 if (unlikely(!skb))
1738cd3e 1402 return NULL;
1738cd3e
NB
1403
1404 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1405 "rx allocated small packet. len %d. data_len %d\n",
1406 skb->len, skb->data_len);
1407
1408 /* sync this buffer for CPU use */
1409 dma_sync_single_for_cpu(rx_ring->dev,
1410 dma_unmap_addr(&rx_info->ena_buf, paddr),
1411 len,
1412 DMA_FROM_DEVICE);
1413 skb_copy_to_linear_data(skb, va, len);
1414 dma_sync_single_for_device(rx_ring->dev,
1415 dma_unmap_addr(&rx_info->ena_buf, paddr),
1416 len,
1417 DMA_FROM_DEVICE);
1418
1419 skb_put(skb, len);
1420 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
f9172498 1421 rx_ring->free_ids[*next_to_clean] = req_id;
1738cd3e
NB
1422 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1423 rx_ring->ring_size);
1424 return skb;
1425 }
1426
4265114d
NB
1427 skb = ena_alloc_skb(rx_ring, true);
1428 if (unlikely(!skb))
1738cd3e 1429 return NULL;
1738cd3e
NB
1430
1431 do {
1432 dma_unmap_page(rx_ring->dev,
1433 dma_unmap_addr(&rx_info->ena_buf, paddr),
ef5b0771 1434 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
1738cd3e
NB
1435
1436 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
ef5b0771 1437 rx_info->page_offset, len, ENA_PAGE_SIZE);
68f236df
AK
1438 /* The offset is non zero only for the first buffer */
1439 rx_info->page_offset = 0;
1738cd3e
NB
1440
1441 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1442 "rx skb updated. len %d. data_len %d\n",
1443 skb->len, skb->data_len);
1444
1445 rx_info->page = NULL;
ad974bae 1446
f9172498 1447 rx_ring->free_ids[*next_to_clean] = req_id;
1738cd3e
NB
1448 *next_to_clean =
1449 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1450 rx_ring->ring_size);
1451 if (likely(--descs == 0))
1452 break;
ad974bae
NB
1453
1454 buf++;
1455 len = ena_bufs[buf].len;
1456 req_id = ena_bufs[buf].req_id;
30623e1e
AK
1457
1458 rc = validate_rx_req_id(rx_ring, req_id);
1459 if (unlikely(rc < 0))
1460 return NULL;
1461
ad974bae 1462 rx_info = &rx_ring->rx_buffer_info[req_id];
1738cd3e
NB
1463 } while (1);
1464
1465 return skb;
1466}
1467
1468/* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1469 * @adapter: structure containing adapter specific data
1470 * @ena_rx_ctx: received packet context/metadata
1471 * @skb: skb currently being received and modified
1472 */
c2b54204 1473static void ena_rx_checksum(struct ena_ring *rx_ring,
1738cd3e
NB
1474 struct ena_com_rx_ctx *ena_rx_ctx,
1475 struct sk_buff *skb)
1476{
1477 /* Rx csum disabled */
1478 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1479 skb->ip_summed = CHECKSUM_NONE;
1480 return;
1481 }
1482
1483 /* For fragmented packets the checksum isn't valid */
1484 if (ena_rx_ctx->frag) {
1485 skb->ip_summed = CHECKSUM_NONE;
1486 return;
1487 }
1488
1489 /* if IP and error */
1490 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1491 (ena_rx_ctx->l3_csum_err))) {
1492 /* ipv4 checksum error */
1493 skb->ip_summed = CHECKSUM_NONE;
1494 u64_stats_update_begin(&rx_ring->syncp);
1495 rx_ring->rx_stats.bad_csum++;
1496 u64_stats_update_end(&rx_ring->syncp);
cd7aea18 1497 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1738cd3e
NB
1498 "RX IPv4 header checksum error\n");
1499 return;
1500 }
1501
1502 /* if TCP/UDP */
1503 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1504 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1505 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1506 /* TCP/UDP checksum error */
1507 u64_stats_update_begin(&rx_ring->syncp);
1508 rx_ring->rx_stats.bad_csum++;
1509 u64_stats_update_end(&rx_ring->syncp);
cd7aea18 1510 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1738cd3e
NB
1511 "RX L4 checksum error\n");
1512 skb->ip_summed = CHECKSUM_NONE;
1513 return;
1514 }
1515
cb36bb36
AK
1516 if (likely(ena_rx_ctx->l4_csum_checked)) {
1517 skb->ip_summed = CHECKSUM_UNNECESSARY;
d2eecc6e
SJ
1518 u64_stats_update_begin(&rx_ring->syncp);
1519 rx_ring->rx_stats.csum_good++;
1520 u64_stats_update_end(&rx_ring->syncp);
cb36bb36
AK
1521 } else {
1522 u64_stats_update_begin(&rx_ring->syncp);
1523 rx_ring->rx_stats.csum_unchecked++;
1524 u64_stats_update_end(&rx_ring->syncp);
1525 skb->ip_summed = CHECKSUM_NONE;
1526 }
1527 } else {
1528 skb->ip_summed = CHECKSUM_NONE;
1529 return;
1738cd3e 1530 }
cb36bb36 1531
1738cd3e
NB
1532}
1533
1534static void ena_set_rx_hash(struct ena_ring *rx_ring,
1535 struct ena_com_rx_ctx *ena_rx_ctx,
1536 struct sk_buff *skb)
1537{
1538 enum pkt_hash_types hash_type;
1539
1540 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1541 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1542 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1543
1544 hash_type = PKT_HASH_TYPE_L4;
1545 else
1546 hash_type = PKT_HASH_TYPE_NONE;
1547
1548 /* Override hash type if the packet is fragmented */
1549 if (ena_rx_ctx->frag)
1550 hash_type = PKT_HASH_TYPE_NONE;
1551
1552 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1553 }
1554}
1555
32109c70 1556static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
838c93dc
SJ
1557{
1558 struct ena_rx_buffer *rx_info;
1559 int ret;
1560
1561 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1562 xdp->data = page_address(rx_info->page) +
1563 rx_info->page_offset + rx_ring->rx_headroom;
1564 xdp_set_data_meta_invalid(xdp);
1565 xdp->data_hard_start = page_address(rx_info->page);
1566 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
1567 /* If for some reason we received a bigger packet than
1568 * we expect, then we simply drop it
1569 */
1570 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1571 return XDP_DROP;
1572
548c4940 1573 ret = ena_xdp_execute(rx_ring, xdp, rx_info);
838c93dc
SJ
1574
1575 /* The xdp program might expand the headers */
1576 if (ret == XDP_PASS) {
1577 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1578 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1579 }
1580
1581 return ret;
1582}
1738cd3e
NB
1583/* ena_clean_rx_irq - Cleanup RX irq
1584 * @rx_ring: RX ring to clean
1585 * @napi: napi handler
1586 * @budget: how many packets driver is allowed to clean
1587 *
1588 * Returns the number of cleaned buffers.
1589 */
1590static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1591 u32 budget)
1592{
1593 u16 next_to_clean = rx_ring->next_to_clean;
1738cd3e 1594 struct ena_com_rx_ctx ena_rx_ctx;
68f236df 1595 struct ena_rx_buffer *rx_info;
1738cd3e 1596 struct ena_adapter *adapter;
548c4940 1597 u32 res_budget, work_done;
838c93dc
SJ
1598 int rx_copybreak_pkt = 0;
1599 int refill_threshold;
1738cd3e
NB
1600 struct sk_buff *skb;
1601 int refill_required;
838c93dc 1602 struct xdp_buff xdp;
1738cd3e 1603 int total_len = 0;
838c93dc
SJ
1604 int xdp_verdict;
1605 int rc = 0;
ad974bae 1606 int i;
1738cd3e
NB
1607
1608 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1609 "%s qid %d\n", __func__, rx_ring->qid);
1610 res_budget = budget;
838c93dc 1611 xdp.rxq = &rx_ring->xdp_rxq;
08fc1cfd 1612 xdp.frame_sz = ENA_PAGE_SIZE;
548c4940 1613
1738cd3e 1614 do {
838c93dc
SJ
1615 xdp_verdict = XDP_PASS;
1616 skb = NULL;
1738cd3e
NB
1617 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1618 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1619 ena_rx_ctx.descs = 0;
68f236df 1620 ena_rx_ctx.pkt_offset = 0;
1738cd3e
NB
1621 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1622 rx_ring->ena_com_io_sq,
1623 &ena_rx_ctx);
1624 if (unlikely(rc))
1625 goto error;
1626
1627 if (unlikely(ena_rx_ctx.descs == 0))
1628 break;
1629
68f236df
AK
1630 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1631 rx_info->page_offset = ena_rx_ctx.pkt_offset;
1632
1738cd3e
NB
1633 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1634 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1635 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1636 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1637
838c93dc
SJ
1638 if (ena_xdp_present_ring(rx_ring))
1639 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1640
1738cd3e 1641 /* allocate skb and fill it */
838c93dc
SJ
1642 if (xdp_verdict == XDP_PASS)
1643 skb = ena_rx_skb(rx_ring,
1644 rx_ring->ena_bufs,
1645 ena_rx_ctx.descs,
1646 &next_to_clean);
1738cd3e 1647
1738cd3e 1648 if (unlikely(!skb)) {
3921a81c 1649 if (xdp_verdict == XDP_TX)
548c4940
SJ
1650 ena_free_rx_page(rx_ring,
1651 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
ad974bae 1652 for (i = 0; i < ena_rx_ctx.descs; i++) {
f9172498 1653 rx_ring->free_ids[next_to_clean] =
ad974bae
NB
1654 rx_ring->ena_bufs[i].req_id;
1655 next_to_clean =
1656 ENA_RX_RING_IDX_NEXT(next_to_clean,
1657 rx_ring->ring_size);
1658 }
3921a81c
SJ
1659 if (xdp_verdict != XDP_PASS) {
1660 res_budget--;
838c93dc 1661 continue;
3921a81c 1662 }
1738cd3e
NB
1663 break;
1664 }
1665
1666 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1667
1668 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1669
1670 skb_record_rx_queue(skb, rx_ring->qid);
1671
1672 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1673 total_len += rx_ring->ena_bufs[0].len;
1674 rx_copybreak_pkt++;
1675 napi_gro_receive(napi, skb);
1676 } else {
1677 total_len += skb->len;
1678 napi_gro_frags(napi);
1679 }
1680
1681 res_budget--;
1682 } while (likely(res_budget));
1683
1684 work_done = budget - res_budget;
1738cd3e
NB
1685 rx_ring->per_napi_packets += work_done;
1686 u64_stats_update_begin(&rx_ring->syncp);
1687 rx_ring->rx_stats.bytes += total_len;
1688 rx_ring->rx_stats.cnt += work_done;
1689 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1690 u64_stats_update_end(&rx_ring->syncp);
1691
1692 rx_ring->next_to_clean = next_to_clean;
1693
7cfe9a55 1694 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
0574bb80
AK
1695 refill_threshold =
1696 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1697 ENA_RX_REFILL_THRESH_PACKET);
1738cd3e
NB
1698
1699 /* Optimization, try to batch new rx buffers */
1700 if (refill_required > refill_threshold) {
1701 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1702 ena_refill_rx_bufs(rx_ring, refill_required);
1703 }
1704
1705 return work_done;
1706
1707error:
1708 adapter = netdev_priv(rx_ring->netdev);
1709
1710 u64_stats_update_begin(&rx_ring->syncp);
1711 rx_ring->rx_stats.bad_desc_num++;
1712 u64_stats_update_end(&rx_ring->syncp);
1713
1714 /* Too many desc from the device. Trigger reset */
e2eed0e3 1715 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1738cd3e
NB
1716 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1717
1718 return 0;
1719}
1720
282faf61 1721static void ena_dim_work(struct work_struct *w)
1738cd3e 1722{
282faf61
AK
1723 struct dim *dim = container_of(w, struct dim, work);
1724 struct dim_cq_moder cur_moder =
1725 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1726 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1727
1728 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1729 dim->state = DIM_START_MEASURE;
1730}
1731
1732static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1733{
1734 struct dim_sample dim_sample;
1735 struct ena_ring *rx_ring = ena_napi->rx_ring;
1736
1737 if (!rx_ring->per_napi_packets)
1738 return;
1739
1740 rx_ring->non_empty_napi_events++;
1741
1742 dim_update_sample(rx_ring->non_empty_napi_events,
1743 rx_ring->rx_stats.cnt,
1744 rx_ring->rx_stats.bytes,
1745 &dim_sample);
1746
1747 net_dim(&ena_napi->dim, dim_sample);
1748
1738cd3e 1749 rx_ring->per_napi_packets = 0;
1738cd3e
NB
1750}
1751
c2b54204 1752static void ena_unmask_interrupt(struct ena_ring *tx_ring,
418df30f
NB
1753 struct ena_ring *rx_ring)
1754{
1755 struct ena_eth_io_intr_reg intr_reg;
548c4940
SJ
1756 u32 rx_interval = 0;
1757 /* Rx ring can be NULL when for XDP tx queues which don't have an
1758 * accompanying rx_ring pair.
1759 */
1760 if (rx_ring)
1761 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1762 rx_ring->smoothed_interval :
1763 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
418df30f
NB
1764
1765 /* Update intr register: rx intr delay,
1766 * tx intr delay and interrupt unmask
1767 */
1768 ena_com_update_intr_reg(&intr_reg,
7b8a2878 1769 rx_interval,
418df30f
NB
1770 tx_ring->smoothed_interval,
1771 true);
1772
d4a8b3bb
SJ
1773 u64_stats_update_begin(&tx_ring->syncp);
1774 tx_ring->tx_stats.unmask_interrupt++;
1775 u64_stats_update_end(&tx_ring->syncp);
418df30f
NB
1776 /* It is a shared MSI-X.
1777 * Tx and Rx CQ have pointer to it.
1778 * So we use one of them to reach the intr reg
548c4940 1779 * The Tx ring is used because the rx_ring is NULL for XDP queues
418df30f 1780 */
548c4940 1781 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
418df30f
NB
1782}
1783
c2b54204 1784static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1738cd3e
NB
1785 struct ena_ring *rx_ring)
1786{
1787 int cpu = get_cpu();
1788 int numa_node;
1789
1790 /* Check only one ring since the 2 rings are running on the same cpu */
1791 if (likely(tx_ring->cpu == cpu))
1792 goto out;
1793
1794 numa_node = cpu_to_node(cpu);
1795 put_cpu();
1796
1797 if (numa_node != NUMA_NO_NODE) {
1798 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
548c4940
SJ
1799 if (rx_ring)
1800 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1801 numa_node);
1738cd3e
NB
1802 }
1803
1804 tx_ring->cpu = cpu;
548c4940
SJ
1805 if (rx_ring)
1806 rx_ring->cpu = cpu;
1738cd3e
NB
1807
1808 return;
1809out:
1810 put_cpu();
1811}
1812
548c4940
SJ
1813static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1814{
1815 u32 total_done = 0;
1816 u16 next_to_clean;
1817 u32 tx_bytes = 0;
1818 int tx_pkts = 0;
1819 u16 req_id;
1820 int rc;
1821
1822 if (unlikely(!xdp_ring))
1823 return 0;
1824 next_to_clean = xdp_ring->next_to_clean;
1825
1826 while (tx_pkts < budget) {
1827 struct ena_tx_buffer *tx_info;
1828 struct xdp_frame *xdpf;
1829
1830 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1831 &req_id);
1832 if (rc)
1833 break;
1834
1835 rc = validate_xdp_req_id(xdp_ring, req_id);
1836 if (rc)
1837 break;
1838
1839 tx_info = &xdp_ring->tx_buffer_info[req_id];
1840 xdpf = tx_info->xdpf;
1841
1842 tx_info->xdpf = NULL;
1843 tx_info->last_jiffies = 0;
1844 ena_unmap_tx_buff(xdp_ring, tx_info);
1845
1846 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1847 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1848 xdpf);
1849
1850 tx_bytes += xdpf->len;
1851 tx_pkts++;
1852 total_done += tx_info->tx_descs;
1853
1854 __free_page(tx_info->xdp_rx_page);
1855 xdp_ring->free_ids[next_to_clean] = req_id;
1856 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1857 xdp_ring->ring_size);
1858 }
1859
1860 xdp_ring->next_to_clean = next_to_clean;
1861 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1862 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1863
1864 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1865 "tx_poll: q %d done. total pkts: %d\n",
1866 xdp_ring->qid, tx_pkts);
1867
1868 return tx_pkts;
1869}
1870
1738cd3e
NB
1871static int ena_io_poll(struct napi_struct *napi, int budget)
1872{
1873 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1874 struct ena_ring *tx_ring, *rx_ring;
24dee0c7
NB
1875 int tx_work_done;
1876 int rx_work_done = 0;
1738cd3e
NB
1877 int tx_budget;
1878 int napi_comp_call = 0;
1879 int ret;
1880
1881 tx_ring = ena_napi->tx_ring;
1882 rx_ring = ena_napi->rx_ring;
1883
913b0bfd
SJ
1884 tx_ring->first_interrupt = ena_napi->first_interrupt;
1885 rx_ring->first_interrupt = ena_napi->first_interrupt;
1886
1738cd3e
NB
1887 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1888
3f6159db
NB
1889 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1890 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1738cd3e
NB
1891 napi_complete_done(napi, 0);
1892 return 0;
1893 }
1894
1895 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
24dee0c7
NB
1896 /* On netpoll the budget is zero and the handler should only clean the
1897 * tx completions.
1898 */
1899 if (likely(budget))
1900 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1738cd3e 1901
b1669c9f
NB
1902 /* If the device is about to reset or down, avoid unmask
1903 * the interrupt and return 0 so NAPI won't reschedule
1904 */
1905 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1906 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1907 napi_complete_done(napi, 0);
1908 ret = 0;
1738cd3e 1909
b1669c9f 1910 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1738cd3e 1911 napi_comp_call = 1;
1738cd3e 1912
b1669c9f
NB
1913 /* Update numa and unmask the interrupt only when schedule
1914 * from the interrupt context (vs from sk_busy_loop)
1738cd3e 1915 */
b1669c9f 1916 if (napi_complete_done(napi, rx_work_done)) {
282faf61
AK
1917 /* We apply adaptive moderation on Rx path only.
1918 * Tx uses static interrupt moderation.
1919 */
b1669c9f 1920 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
282faf61 1921 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
b1669c9f 1922
418df30f 1923 ena_unmask_interrupt(tx_ring, rx_ring);
b1669c9f 1924 }
1738cd3e 1925
1738cd3e
NB
1926 ena_update_ring_numa_node(tx_ring, rx_ring);
1927
1928 ret = rx_work_done;
1929 } else {
1930 ret = budget;
1931 }
1932
1933 u64_stats_update_begin(&tx_ring->syncp);
1934 tx_ring->tx_stats.napi_comp += napi_comp_call;
1935 tx_ring->tx_stats.tx_poll++;
1936 u64_stats_update_end(&tx_ring->syncp);
1937
1938 return ret;
1939}
1940
1941static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1942{
1943 struct ena_adapter *adapter = (struct ena_adapter *)data;
1944
1945 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1946
1947 /* Don't call the aenq handler before probe is done */
1948 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1949 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1950
1951 return IRQ_HANDLED;
1952}
1953
1954/* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1955 * @irq: interrupt number
1956 * @data: pointer to a network interface private napi device structure
1957 */
1958static irqreturn_t ena_intr_msix_io(int irq, void *data)
1959{
1960 struct ena_napi *ena_napi = data;
1961
913b0bfd 1962 ena_napi->first_interrupt = true;
8510e1a3 1963
e745dafa 1964 napi_schedule_irqoff(&ena_napi->napi);
1738cd3e
NB
1965
1966 return IRQ_HANDLED;
1967}
1968
06443684
NB
1969/* Reserve a single MSI-X vector for management (admin + aenq).
1970 * plus reserve one vector for each potential io queue.
1971 * the number of potential io queues is the minimum of what the device
1972 * supports and the number of vCPUs.
1973 */
4d192660 1974static int ena_enable_msix(struct ena_adapter *adapter)
1738cd3e 1975{
06443684
NB
1976 int msix_vecs, irq_cnt;
1977
1978 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1979 netif_err(adapter, probe, adapter->netdev,
1980 "Error, MSI-X is already enabled\n");
1981 return -EPERM;
1982 }
1738cd3e
NB
1983
1984 /* Reserved the max msix vectors we might need */
ce1f3521 1985 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1738cd3e
NB
1986 netif_dbg(adapter, probe, adapter->netdev,
1987 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1988
06443684
NB
1989 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1990 msix_vecs, PCI_IRQ_MSIX);
1991
1992 if (irq_cnt < 0) {
1738cd3e 1993 netif_err(adapter, probe, adapter->netdev,
06443684 1994 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1738cd3e
NB
1995 return -ENOSPC;
1996 }
1997
06443684
NB
1998 if (irq_cnt != msix_vecs) {
1999 netif_notice(adapter, probe, adapter->netdev,
2000 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
2001 irq_cnt, msix_vecs);
faa615f9 2002 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1738cd3e
NB
2003 }
2004
06443684
NB
2005 if (ena_init_rx_cpu_rmap(adapter))
2006 netif_warn(adapter, probe, adapter->netdev,
2007 "Failed to map IRQs to CPUs\n");
2008
2009 adapter->msix_vecs = irq_cnt;
2010 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1738cd3e
NB
2011
2012 return 0;
2013}
2014
2015static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2016{
2017 u32 cpu;
2018
2019 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2020 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2021 pci_name(adapter->pdev));
2022 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2023 ena_intr_msix_mgmnt;
2024 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2025 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
da6f4cf5 2026 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1738cd3e
NB
2027 cpu = cpumask_first(cpu_online_mask);
2028 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2029 cpumask_set_cpu(cpu,
2030 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2031}
2032
2033static void ena_setup_io_intr(struct ena_adapter *adapter)
2034{
2035 struct net_device *netdev;
2036 int irq_idx, i, cpu;
548c4940 2037 int io_queue_count;
1738cd3e
NB
2038
2039 netdev = adapter->netdev;
548c4940 2040 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e 2041
548c4940 2042 for (i = 0; i < io_queue_count; i++) {
1738cd3e
NB
2043 irq_idx = ENA_IO_IRQ_IDX(i);
2044 cpu = i % num_online_cpus();
2045
2046 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2047 "%s-Tx-Rx-%d", netdev->name, i);
2048 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2049 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2050 adapter->irq_tbl[irq_idx].vector =
da6f4cf5 2051 pci_irq_vector(adapter->pdev, irq_idx);
1738cd3e
NB
2052 adapter->irq_tbl[irq_idx].cpu = cpu;
2053
2054 cpumask_set_cpu(cpu,
2055 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2056 }
2057}
2058
2059static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2060{
2061 unsigned long flags = 0;
2062 struct ena_irq *irq;
2063 int rc;
2064
2065 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2066 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2067 irq->data);
2068 if (rc) {
2069 netif_err(adapter, probe, adapter->netdev,
2070 "failed to request admin irq\n");
2071 return rc;
2072 }
2073
2074 netif_dbg(adapter, probe, adapter->netdev,
2075 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2076 irq->affinity_hint_mask.bits[0], irq->vector);
2077
2078 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2079
2080 return rc;
2081}
2082
2083static int ena_request_io_irq(struct ena_adapter *adapter)
2084{
e02ae6ed 2085 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2086 unsigned long flags = 0;
2087 struct ena_irq *irq;
2088 int rc = 0, i, k;
2089
06443684
NB
2090 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2091 netif_err(adapter, ifup, adapter->netdev,
2092 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2093 return -EINVAL;
2094 }
2095
e02ae6ed 2096 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1738cd3e
NB
2097 irq = &adapter->irq_tbl[i];
2098 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2099 irq->data);
2100 if (rc) {
2101 netif_err(adapter, ifup, adapter->netdev,
2102 "Failed to request I/O IRQ. index %d rc %d\n",
2103 i, rc);
2104 goto err;
2105 }
2106
2107 netif_dbg(adapter, ifup, adapter->netdev,
2108 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2109 i, irq->affinity_hint_mask.bits[0], irq->vector);
2110
2111 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2112 }
2113
2114 return rc;
2115
2116err:
2117 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2118 irq = &adapter->irq_tbl[k];
2119 free_irq(irq->vector, irq->data);
2120 }
2121
2122 return rc;
2123}
2124
2125static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2126{
2127 struct ena_irq *irq;
2128
2129 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2130 synchronize_irq(irq->vector);
2131 irq_set_affinity_hint(irq->vector, NULL);
2132 free_irq(irq->vector, irq->data);
2133}
2134
2135static void ena_free_io_irq(struct ena_adapter *adapter)
2136{
e02ae6ed 2137 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2138 struct ena_irq *irq;
2139 int i;
2140
2141#ifdef CONFIG_RFS_ACCEL
2142 if (adapter->msix_vecs >= 1) {
2143 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2144 adapter->netdev->rx_cpu_rmap = NULL;
2145 }
2146#endif /* CONFIG_RFS_ACCEL */
2147
e02ae6ed 2148 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1738cd3e
NB
2149 irq = &adapter->irq_tbl[i];
2150 irq_set_affinity_hint(irq->vector, NULL);
2151 free_irq(irq->vector, irq->data);
2152 }
2153}
2154
06443684
NB
2155static void ena_disable_msix(struct ena_adapter *adapter)
2156{
2157 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2158 pci_free_irq_vectors(adapter->pdev);
2159}
2160
1738cd3e
NB
2161static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2162{
e02ae6ed 2163 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2164 int i;
2165
2166 if (!netif_running(adapter->netdev))
2167 return;
2168
e02ae6ed 2169 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
1738cd3e
NB
2170 synchronize_irq(adapter->irq_tbl[i].vector);
2171}
2172
548c4940
SJ
2173static void ena_del_napi_in_range(struct ena_adapter *adapter,
2174 int first_index,
2175 int count)
1738cd3e
NB
2176{
2177 int i;
2178
548c4940
SJ
2179 for (i = first_index; i < first_index + count; i++) {
2180 /* Check if napi was initialized before */
2181 if (!ENA_IS_XDP_INDEX(adapter, i) ||
2182 adapter->ena_napi[i].xdp_ring)
2183 netif_napi_del(&adapter->ena_napi[i].napi);
2184 else
2185 WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
2186 adapter->ena_napi[i].xdp_ring);
2187 }
1738cd3e
NB
2188}
2189
548c4940
SJ
2190static void ena_init_napi_in_range(struct ena_adapter *adapter,
2191 int first_index, int count)
1738cd3e 2192{
1738cd3e
NB
2193 int i;
2194
548c4940 2195 for (i = first_index; i < first_index + count; i++) {
d89d8d4d 2196 struct ena_napi *napi = &adapter->ena_napi[i];
1738cd3e
NB
2197
2198 netif_napi_add(adapter->netdev,
d89d8d4d 2199 &napi->napi,
548c4940 2200 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
1738cd3e 2201 ENA_NAPI_BUDGET);
548c4940
SJ
2202
2203 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2204 napi->rx_ring = &adapter->rx_ring[i];
2205 napi->tx_ring = &adapter->tx_ring[i];
2206 } else {
2207 napi->xdp_ring = &adapter->tx_ring[i];
2208 }
1738cd3e
NB
2209 napi->qid = i;
2210 }
2211}
2212
548c4940
SJ
2213static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2214 int first_index,
2215 int count)
1738cd3e
NB
2216{
2217 int i;
2218
548c4940 2219 for (i = first_index; i < first_index + count; i++)
1738cd3e
NB
2220 napi_disable(&adapter->ena_napi[i].napi);
2221}
2222
548c4940
SJ
2223static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2224 int first_index,
2225 int count)
1738cd3e
NB
2226{
2227 int i;
2228
548c4940 2229 for (i = first_index; i < first_index + count; i++)
1738cd3e
NB
2230 napi_enable(&adapter->ena_napi[i].napi);
2231}
2232
1738cd3e
NB
2233/* Configure the Rx forwarding */
2234static int ena_rss_configure(struct ena_adapter *adapter)
2235{
2236 struct ena_com_dev *ena_dev = adapter->ena_dev;
2237 int rc;
2238
2239 /* In case the RSS table wasn't initialized by probe */
2240 if (!ena_dev->rss.tbl_log_size) {
2241 rc = ena_rss_init_default(adapter);
d1497638 2242 if (rc && (rc != -EOPNOTSUPP)) {
1738cd3e 2243 netif_err(adapter, ifup, adapter->netdev,
46143e58 2244 "Failed to init RSS rc: %d\n", rc);
1738cd3e
NB
2245 return rc;
2246 }
2247 }
2248
2249 /* Set indirect table */
2250 rc = ena_com_indirect_table_set(ena_dev);
d1497638 2251 if (unlikely(rc && rc != -EOPNOTSUPP))
1738cd3e
NB
2252 return rc;
2253
2254 /* Configure hash function (if supported) */
2255 rc = ena_com_set_hash_function(ena_dev);
d1497638 2256 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1738cd3e
NB
2257 return rc;
2258
2259 /* Configure hash inputs (if supported) */
2260 rc = ena_com_set_hash_ctrl(ena_dev);
d1497638 2261 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1738cd3e
NB
2262 return rc;
2263
2264 return 0;
2265}
2266
2267static int ena_up_complete(struct ena_adapter *adapter)
2268{
7853b49c 2269 int rc;
1738cd3e
NB
2270
2271 rc = ena_rss_configure(adapter);
2272 if (rc)
2273 return rc;
2274
1738cd3e
NB
2275 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2276
2277 ena_refill_all_rx_bufs(adapter);
2278
2279 /* enable transmits */
2280 netif_tx_start_all_queues(adapter->netdev);
2281
548c4940
SJ
2282 ena_napi_enable_in_range(adapter,
2283 0,
2284 adapter->xdp_num_queues + adapter->num_io_queues);
1738cd3e 2285
1738cd3e
NB
2286 return 0;
2287}
2288
2289static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2290{
38005ca8 2291 struct ena_com_create_io_ctx ctx;
1738cd3e
NB
2292 struct ena_com_dev *ena_dev;
2293 struct ena_ring *tx_ring;
2294 u32 msix_vector;
2295 u16 ena_qid;
2296 int rc;
2297
2298 ena_dev = adapter->ena_dev;
2299
2300 tx_ring = &adapter->tx_ring[qid];
2301 msix_vector = ENA_IO_IRQ_IDX(qid);
2302 ena_qid = ENA_IO_TXQ_IDX(qid);
2303
38005ca8
AK
2304 memset(&ctx, 0x0, sizeof(ctx));
2305
1738cd3e
NB
2306 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2307 ctx.qid = ena_qid;
2308 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2309 ctx.msix_vector = msix_vector;
13ca32a6 2310 ctx.queue_size = tx_ring->ring_size;
1738cd3e
NB
2311 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2312
2313 rc = ena_com_create_io_queue(ena_dev, &ctx);
2314 if (rc) {
2315 netif_err(adapter, ifup, adapter->netdev,
2316 "Failed to create I/O TX queue num %d rc: %d\n",
46143e58 2317 qid, rc);
1738cd3e
NB
2318 return rc;
2319 }
2320
2321 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2322 &tx_ring->ena_com_io_sq,
2323 &tx_ring->ena_com_io_cq);
2324 if (rc) {
2325 netif_err(adapter, ifup, adapter->netdev,
2326 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2327 qid, rc);
2328 ena_com_destroy_io_queue(ena_dev, ena_qid);
2d2c600a 2329 return rc;
1738cd3e
NB
2330 }
2331
2332 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2333 return rc;
2334}
2335
548c4940
SJ
2336static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2337 int first_index, int count)
1738cd3e
NB
2338{
2339 struct ena_com_dev *ena_dev = adapter->ena_dev;
2340 int rc, i;
2341
548c4940 2342 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
2343 rc = ena_create_io_tx_queue(adapter, i);
2344 if (rc)
2345 goto create_err;
2346 }
2347
2348 return 0;
2349
2350create_err:
548c4940 2351 while (i-- > first_index)
1738cd3e
NB
2352 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2353
2354 return rc;
2355}
2356
2357static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2358{
2359 struct ena_com_dev *ena_dev;
38005ca8 2360 struct ena_com_create_io_ctx ctx;
1738cd3e
NB
2361 struct ena_ring *rx_ring;
2362 u32 msix_vector;
2363 u16 ena_qid;
2364 int rc;
2365
2366 ena_dev = adapter->ena_dev;
2367
2368 rx_ring = &adapter->rx_ring[qid];
2369 msix_vector = ENA_IO_IRQ_IDX(qid);
2370 ena_qid = ENA_IO_RXQ_IDX(qid);
2371
38005ca8
AK
2372 memset(&ctx, 0x0, sizeof(ctx));
2373
1738cd3e
NB
2374 ctx.qid = ena_qid;
2375 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2376 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2377 ctx.msix_vector = msix_vector;
13ca32a6 2378 ctx.queue_size = rx_ring->ring_size;
1738cd3e
NB
2379 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2380
2381 rc = ena_com_create_io_queue(ena_dev, &ctx);
2382 if (rc) {
2383 netif_err(adapter, ifup, adapter->netdev,
2384 "Failed to create I/O RX queue num %d rc: %d\n",
2385 qid, rc);
2386 return rc;
2387 }
2388
2389 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2390 &rx_ring->ena_com_io_sq,
2391 &rx_ring->ena_com_io_cq);
2392 if (rc) {
2393 netif_err(adapter, ifup, adapter->netdev,
2394 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2395 qid, rc);
838c93dc 2396 goto err;
1738cd3e
NB
2397 }
2398
2399 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2400
838c93dc
SJ
2401 return rc;
2402err:
2403 ena_com_destroy_io_queue(ena_dev, ena_qid);
1738cd3e
NB
2404 return rc;
2405}
2406
2407static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2408{
2409 struct ena_com_dev *ena_dev = adapter->ena_dev;
2410 int rc, i;
2411
faa615f9 2412 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
2413 rc = ena_create_io_rx_queue(adapter, i);
2414 if (rc)
2415 goto create_err;
282faf61 2416 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
1738cd3e
NB
2417 }
2418
2419 return 0;
2420
2421create_err:
282faf61
AK
2422 while (i--) {
2423 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1738cd3e 2424 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
282faf61 2425 }
1738cd3e
NB
2426
2427 return rc;
2428}
2429
13ca32a6 2430static void set_io_rings_size(struct ena_adapter *adapter,
548c4940
SJ
2431 int new_tx_size,
2432 int new_rx_size)
13ca32a6
SJ
2433{
2434 int i;
2435
faa615f9 2436 for (i = 0; i < adapter->num_io_queues; i++) {
13ca32a6
SJ
2437 adapter->tx_ring[i].ring_size = new_tx_size;
2438 adapter->rx_ring[i].ring_size = new_rx_size;
2439 }
2440}
2441
2442/* This function allows queue allocation to backoff when the system is
2443 * low on memory. If there is not enough memory to allocate io queues
2444 * the driver will try to allocate smaller queues.
2445 *
2446 * The backoff algorithm is as follows:
2447 * 1. Try to allocate TX and RX and if successful.
2448 * 1.1. return success
2449 *
2450 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2451 *
2452 * 3. If TX or RX is smaller than 256
2453 * 3.1. return failure.
2454 * 4. else
2455 * 4.1. go back to 1.
2456 */
2457static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2458{
2459 int rc, cur_rx_ring_size, cur_tx_ring_size;
2460 int new_rx_ring_size, new_tx_ring_size;
2461
2462 /* current queue sizes might be set to smaller than the requested
2463 * ones due to past queue allocation failures.
2464 */
2465 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
46143e58 2466 adapter->requested_rx_ring_size);
13ca32a6
SJ
2467
2468 while (1) {
548c4940
SJ
2469 if (ena_xdp_present(adapter)) {
2470 rc = ena_setup_and_create_all_xdp_queues(adapter);
2471
2472 if (rc)
2473 goto err_setup_tx;
2474 }
2475 rc = ena_setup_tx_resources_in_range(adapter,
2476 0,
2477 adapter->num_io_queues);
13ca32a6
SJ
2478 if (rc)
2479 goto err_setup_tx;
2480
548c4940
SJ
2481 rc = ena_create_io_tx_queues_in_range(adapter,
2482 0,
2483 adapter->num_io_queues);
13ca32a6
SJ
2484 if (rc)
2485 goto err_create_tx_queues;
2486
2487 rc = ena_setup_all_rx_resources(adapter);
2488 if (rc)
2489 goto err_setup_rx;
2490
2491 rc = ena_create_all_io_rx_queues(adapter);
2492 if (rc)
2493 goto err_create_rx_queues;
2494
2495 return 0;
2496
2497err_create_rx_queues:
2498 ena_free_all_io_rx_resources(adapter);
2499err_setup_rx:
2500 ena_destroy_all_tx_queues(adapter);
2501err_create_tx_queues:
2502 ena_free_all_io_tx_resources(adapter);
2503err_setup_tx:
2504 if (rc != -ENOMEM) {
2505 netif_err(adapter, ifup, adapter->netdev,
2506 "Queue creation failed with error code %d\n",
46143e58 2507 rc);
13ca32a6
SJ
2508 return rc;
2509 }
2510
2511 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2512 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2513
2514 netif_err(adapter, ifup, adapter->netdev,
2515 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2516 cur_tx_ring_size, cur_rx_ring_size);
2517
2518 new_tx_ring_size = cur_tx_ring_size;
2519 new_rx_ring_size = cur_rx_ring_size;
2520
2521 /* Decrease the size of the larger queue, or
2522 * decrease both if they are the same size.
2523 */
2524 if (cur_rx_ring_size <= cur_tx_ring_size)
2525 new_tx_ring_size = cur_tx_ring_size / 2;
2526 if (cur_rx_ring_size >= cur_tx_ring_size)
2527 new_rx_ring_size = cur_rx_ring_size / 2;
2528
3e5bfb18 2529 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
46143e58 2530 new_rx_ring_size < ENA_MIN_RING_SIZE) {
13ca32a6
SJ
2531 netif_err(adapter, ifup, adapter->netdev,
2532 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2533 ENA_MIN_RING_SIZE);
2534 return rc;
2535 }
2536
2537 netif_err(adapter, ifup, adapter->netdev,
2538 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2539 new_tx_ring_size,
2540 new_rx_ring_size);
2541
2542 set_io_rings_size(adapter, new_tx_ring_size,
2543 new_rx_ring_size);
2544 }
2545}
2546
1738cd3e
NB
2547static int ena_up(struct ena_adapter *adapter)
2548{
548c4940 2549 int io_queue_count, rc, i;
1738cd3e
NB
2550
2551 netdev_dbg(adapter->netdev, "%s\n", __func__);
2552
548c4940 2553 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2554 ena_setup_io_intr(adapter);
2555
78a55d05
AK
2556 /* napi poll functions should be initialized before running
2557 * request_irq(), to handle a rare condition where there is a pending
2558 * interrupt, causing the ISR to fire immediately while the poll
2559 * function wasn't set yet, causing a null dereference
2560 */
548c4940 2561 ena_init_napi_in_range(adapter, 0, io_queue_count);
78a55d05 2562
1738cd3e
NB
2563 rc = ena_request_io_irq(adapter);
2564 if (rc)
2565 goto err_req_irq;
2566
13ca32a6 2567 rc = create_queues_with_size_backoff(adapter);
1738cd3e 2568 if (rc)
13ca32a6 2569 goto err_create_queues_with_backoff;
1738cd3e
NB
2570
2571 rc = ena_up_complete(adapter);
2572 if (rc)
2573 goto err_up;
2574
2575 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2576 netif_carrier_on(adapter->netdev);
2577
2578 u64_stats_update_begin(&adapter->syncp);
2579 adapter->dev_stats.interface_up++;
2580 u64_stats_update_end(&adapter->syncp);
2581
2582 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2583
7853b49c 2584 /* Enable completion queues interrupt */
faa615f9 2585 for (i = 0; i < adapter->num_io_queues; i++)
7853b49c
NB
2586 ena_unmask_interrupt(&adapter->tx_ring[i],
2587 &adapter->rx_ring[i]);
2588
2589 /* schedule napi in case we had pending packets
2590 * from the last time we disable napi
2591 */
548c4940 2592 for (i = 0; i < io_queue_count; i++)
7853b49c
NB
2593 napi_schedule(&adapter->ena_napi[i].napi);
2594
1738cd3e
NB
2595 return rc;
2596
2597err_up:
1738cd3e 2598 ena_destroy_all_tx_queues(adapter);
1738cd3e 2599 ena_free_all_io_tx_resources(adapter);
13ca32a6
SJ
2600 ena_destroy_all_rx_queues(adapter);
2601 ena_free_all_io_rx_resources(adapter);
2602err_create_queues_with_backoff:
1738cd3e
NB
2603 ena_free_io_irq(adapter);
2604err_req_irq:
548c4940 2605 ena_del_napi_in_range(adapter, 0, io_queue_count);
1738cd3e
NB
2606
2607 return rc;
2608}
2609
2610static void ena_down(struct ena_adapter *adapter)
2611{
548c4940
SJ
2612 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2613
1738cd3e
NB
2614 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2615
2616 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2617
2618 u64_stats_update_begin(&adapter->syncp);
2619 adapter->dev_stats.interface_down++;
2620 u64_stats_update_end(&adapter->syncp);
2621
1738cd3e
NB
2622 netif_carrier_off(adapter->netdev);
2623 netif_tx_disable(adapter->netdev);
2624
3f6159db 2625 /* After this point the napi handler won't enable the tx queue */
548c4940 2626 ena_napi_disable_in_range(adapter, 0, io_queue_count);
3f6159db 2627
1738cd3e 2628 /* After destroy the queue there won't be any new interrupts */
3f6159db
NB
2629
2630 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2631 int rc;
2632
e2eed0e3 2633 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3f6159db
NB
2634 if (rc)
2635 dev_err(&adapter->pdev->dev, "Device reset failed\n");
58a54b9c
AK
2636 /* stop submitting admin commands on a device that was reset */
2637 ena_com_set_admin_running_state(adapter->ena_dev, false);
3f6159db
NB
2638 }
2639
1738cd3e
NB
2640 ena_destroy_all_io_queues(adapter);
2641
2642 ena_disable_io_intr_sync(adapter);
2643 ena_free_io_irq(adapter);
548c4940 2644 ena_del_napi_in_range(adapter, 0, io_queue_count);
1738cd3e
NB
2645
2646 ena_free_all_tx_bufs(adapter);
2647 ena_free_all_rx_bufs(adapter);
2648 ena_free_all_io_tx_resources(adapter);
2649 ena_free_all_io_rx_resources(adapter);
2650}
2651
2652/* ena_open - Called when a network interface is made active
2653 * @netdev: network interface device structure
2654 *
2655 * Returns 0 on success, negative value on failure
2656 *
2657 * The open entry point is called when a network interface is made
2658 * active by the system (IFF_UP). At this point all resources needed
2659 * for transmit and receive operations are allocated, the interrupt
2660 * handler is registered with the OS, the watchdog timer is started,
2661 * and the stack is notified that the interface is ready.
2662 */
2663static int ena_open(struct net_device *netdev)
2664{
2665 struct ena_adapter *adapter = netdev_priv(netdev);
2666 int rc;
2667
2668 /* Notify the stack of the actual queue counts. */
faa615f9 2669 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
1738cd3e
NB
2670 if (rc) {
2671 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2672 return rc;
2673 }
2674
faa615f9 2675 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
1738cd3e
NB
2676 if (rc) {
2677 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2678 return rc;
2679 }
2680
2681 rc = ena_up(adapter);
2682 if (rc)
2683 return rc;
2684
2685 return rc;
2686}
2687
2688/* ena_close - Disables a network interface
2689 * @netdev: network interface device structure
2690 *
2691 * Returns 0, this is not allowed to fail
2692 *
2693 * The close entry point is called when an interface is de-activated
2694 * by the OS. The hardware is still under the drivers control, but
2695 * needs to be disabled. A global MAC reset is issued to stop the
2696 * hardware, and all transmit and receive resources are freed.
2697 */
2698static int ena_close(struct net_device *netdev)
2699{
2700 struct ena_adapter *adapter = netdev_priv(netdev);
2701
2702 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2703
58a54b9c
AK
2704 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2705 return 0;
2706
1738cd3e
NB
2707 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2708 ena_down(adapter);
2709
ee4552aa
NB
2710 /* Check for device status and issue reset if needed*/
2711 check_for_admin_com_state(adapter);
2712 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2713 netif_err(adapter, ifdown, adapter->netdev,
2714 "Destroy failure, restarting device\n");
2715 ena_dump_stats_to_dmesg(adapter);
2716 /* rtnl lock already obtained in dev_ioctl() layer */
cfa324a5 2717 ena_destroy_device(adapter, false);
ee4552aa
NB
2718 ena_restore_device(adapter);
2719 }
2720
1738cd3e
NB
2721 return 0;
2722}
2723
eece4d2a
SJ
2724int ena_update_queue_sizes(struct ena_adapter *adapter,
2725 u32 new_tx_size,
2726 u32 new_rx_size)
2727{
2413ea97 2728 bool dev_was_up;
eece4d2a 2729
2413ea97 2730 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
eece4d2a
SJ
2731 ena_close(adapter->netdev);
2732 adapter->requested_tx_ring_size = new_tx_size;
2733 adapter->requested_rx_ring_size = new_rx_size;
548c4940
SJ
2734 ena_init_io_rings(adapter,
2735 0,
2736 adapter->xdp_num_queues +
2737 adapter->num_io_queues);
2413ea97
SJ
2738 return dev_was_up ? ena_up(adapter) : 0;
2739}
2740
2741int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2742{
2743 struct ena_com_dev *ena_dev = adapter->ena_dev;
838c93dc 2744 int prev_channel_count;
2413ea97
SJ
2745 bool dev_was_up;
2746
2747 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2748 ena_close(adapter->netdev);
838c93dc 2749 prev_channel_count = adapter->num_io_queues;
2413ea97 2750 adapter->num_io_queues = new_channel_count;
548c4940
SJ
2751 if (ena_xdp_present(adapter) &&
2752 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2753 adapter->xdp_first_ring = new_channel_count;
2754 adapter->xdp_num_queues = new_channel_count;
838c93dc
SJ
2755 if (prev_channel_count > new_channel_count)
2756 ena_xdp_exchange_program_rx_in_range(adapter,
2757 NULL,
2758 new_channel_count,
2759 prev_channel_count);
2760 else
2761 ena_xdp_exchange_program_rx_in_range(adapter,
2762 adapter->xdp_bpf_prog,
2763 prev_channel_count,
2764 new_channel_count);
2765 }
2766
2413ea97
SJ
2767 /* We need to destroy the rss table so that the indirection
2768 * table will be reinitialized by ena_up()
2769 */
2770 ena_com_rss_destroy(ena_dev);
548c4940
SJ
2771 ena_init_io_rings(adapter,
2772 0,
2773 adapter->xdp_num_queues +
2774 adapter->num_io_queues);
2413ea97 2775 return dev_was_up ? ena_open(adapter->netdev) : 0;
eece4d2a
SJ
2776}
2777
1738cd3e
NB
2778static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
2779{
2780 u32 mss = skb_shinfo(skb)->gso_size;
2781 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2782 u8 l4_protocol = 0;
2783
2784 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2785 ena_tx_ctx->l4_csum_enable = 1;
2786 if (mss) {
2787 ena_tx_ctx->tso_enable = 1;
2788 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2789 ena_tx_ctx->l4_csum_partial = 0;
2790 } else {
2791 ena_tx_ctx->tso_enable = 0;
2792 ena_meta->l4_hdr_len = 0;
2793 ena_tx_ctx->l4_csum_partial = 1;
2794 }
2795
2796 switch (ip_hdr(skb)->version) {
2797 case IPVERSION:
2798 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2799 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2800 ena_tx_ctx->df = 1;
2801 if (mss)
2802 ena_tx_ctx->l3_csum_enable = 1;
2803 l4_protocol = ip_hdr(skb)->protocol;
2804 break;
2805 case 6:
2806 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2807 l4_protocol = ipv6_hdr(skb)->nexthdr;
2808 break;
2809 default:
2810 break;
2811 }
2812
2813 if (l4_protocol == IPPROTO_TCP)
2814 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2815 else
2816 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2817
2818 ena_meta->mss = mss;
2819 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2820 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2821 ena_tx_ctx->meta_valid = 1;
2822
2823 } else {
2824 ena_tx_ctx->meta_valid = 0;
2825 }
2826}
2827
2828static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2829 struct sk_buff *skb)
2830{
2831 int num_frags, header_len, rc;
2832
2833 num_frags = skb_shinfo(skb)->nr_frags;
2834 header_len = skb_headlen(skb);
2835
2836 if (num_frags < tx_ring->sgl_size)
2837 return 0;
2838
2839 if ((num_frags == tx_ring->sgl_size) &&
2840 (header_len < tx_ring->tx_max_header_size))
2841 return 0;
2842
2843 u64_stats_update_begin(&tx_ring->syncp);
2844 tx_ring->tx_stats.linearize++;
2845 u64_stats_update_end(&tx_ring->syncp);
2846
2847 rc = skb_linearize(skb);
2848 if (unlikely(rc)) {
2849 u64_stats_update_begin(&tx_ring->syncp);
2850 tx_ring->tx_stats.linearize_failed++;
2851 u64_stats_update_end(&tx_ring->syncp);
2852 }
2853
2854 return rc;
2855}
2856
38005ca8
AK
2857static int ena_tx_map_skb(struct ena_ring *tx_ring,
2858 struct ena_tx_buffer *tx_info,
2859 struct sk_buff *skb,
2860 void **push_hdr,
2861 u16 *header_len)
1738cd3e 2862{
38005ca8 2863 struct ena_adapter *adapter = tx_ring->adapter;
1738cd3e 2864 struct ena_com_buf *ena_buf;
1738cd3e 2865 dma_addr_t dma;
38005ca8
AK
2866 u32 skb_head_len, frag_len, last_frag;
2867 u16 push_len = 0;
2868 u16 delta = 0;
2869 int i = 0;
1738cd3e 2870
38005ca8 2871 skb_head_len = skb_headlen(skb);
1738cd3e 2872 tx_info->skb = skb;
38005ca8 2873 ena_buf = tx_info->bufs;
1738cd3e
NB
2874
2875 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
38005ca8
AK
2876 /* When the device is LLQ mode, the driver will copy
2877 * the header into the device memory space.
2878 * the ena_com layer assume the header is in a linear
2879 * memory space.
2880 * This assumption might be wrong since part of the header
2881 * can be in the fragmented buffers.
2882 * Use skb_header_pointer to make sure the header is in a
2883 * linear memory space.
2884 */
2885
2886 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2887 *push_hdr = skb_header_pointer(skb, 0, push_len,
2888 tx_ring->push_buf_intermediate_buf);
2889 *header_len = push_len;
2890 if (unlikely(skb->data != *push_hdr)) {
2891 u64_stats_update_begin(&tx_ring->syncp);
2892 tx_ring->tx_stats.llq_buffer_copy++;
2893 u64_stats_update_end(&tx_ring->syncp);
2894
2895 delta = push_len - skb_head_len;
2896 }
1738cd3e 2897 } else {
38005ca8
AK
2898 *push_hdr = NULL;
2899 *header_len = min_t(u32, skb_head_len,
2900 tx_ring->tx_max_header_size);
1738cd3e
NB
2901 }
2902
38005ca8 2903 netif_dbg(adapter, tx_queued, adapter->netdev,
1738cd3e 2904 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
38005ca8 2905 *push_hdr, push_len);
1738cd3e 2906
38005ca8 2907 if (skb_head_len > push_len) {
1738cd3e 2908 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
38005ca8
AK
2909 skb_head_len - push_len, DMA_TO_DEVICE);
2910 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
1738cd3e
NB
2911 goto error_report_dma_error;
2912
2913 ena_buf->paddr = dma;
38005ca8 2914 ena_buf->len = skb_head_len - push_len;
1738cd3e
NB
2915
2916 ena_buf++;
2917 tx_info->num_of_bufs++;
38005ca8
AK
2918 tx_info->map_linear_data = 1;
2919 } else {
2920 tx_info->map_linear_data = 0;
1738cd3e
NB
2921 }
2922
2923 last_frag = skb_shinfo(skb)->nr_frags;
2924
2925 for (i = 0; i < last_frag; i++) {
2926 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2927
38005ca8
AK
2928 frag_len = skb_frag_size(frag);
2929
2930 if (unlikely(delta >= frag_len)) {
2931 delta -= frag_len;
2932 continue;
2933 }
2934
2935 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2936 frag_len - delta, DMA_TO_DEVICE);
2937 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
1738cd3e
NB
2938 goto error_report_dma_error;
2939
2940 ena_buf->paddr = dma;
38005ca8 2941 ena_buf->len = frag_len - delta;
1738cd3e 2942 ena_buf++;
38005ca8
AK
2943 tx_info->num_of_bufs++;
2944 delta = 0;
1738cd3e
NB
2945 }
2946
38005ca8
AK
2947 return 0;
2948
2949error_report_dma_error:
2950 u64_stats_update_begin(&tx_ring->syncp);
2951 tx_ring->tx_stats.dma_mapping_err++;
2952 u64_stats_update_end(&tx_ring->syncp);
2953 netdev_warn(adapter->netdev, "failed to map skb\n");
2954
2955 tx_info->skb = NULL;
2956
2957 tx_info->num_of_bufs += i;
548c4940 2958 ena_unmap_tx_buff(tx_ring, tx_info);
38005ca8
AK
2959
2960 return -EINVAL;
2961}
2962
2963/* Called with netif_tx_lock. */
2964static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2965{
2966 struct ena_adapter *adapter = netdev_priv(dev);
2967 struct ena_tx_buffer *tx_info;
2968 struct ena_com_tx_ctx ena_tx_ctx;
2969 struct ena_ring *tx_ring;
2970 struct netdev_queue *txq;
2971 void *push_hdr;
2972 u16 next_to_use, req_id, header_len;
548c4940 2973 int qid, rc;
38005ca8
AK
2974
2975 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2976 /* Determine which tx ring we will be placed on */
2977 qid = skb_get_queue_mapping(skb);
2978 tx_ring = &adapter->tx_ring[qid];
2979 txq = netdev_get_tx_queue(dev, qid);
2980
2981 rc = ena_check_and_linearize_skb(tx_ring, skb);
2982 if (unlikely(rc))
2983 goto error_drop_packet;
2984
2985 skb_tx_timestamp(skb);
2986
2987 next_to_use = tx_ring->next_to_use;
f9172498 2988 req_id = tx_ring->free_ids[next_to_use];
38005ca8
AK
2989 tx_info = &tx_ring->tx_buffer_info[req_id];
2990 tx_info->num_of_bufs = 0;
2991
2992 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2993
2994 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2995 if (unlikely(rc))
2996 goto error_drop_packet;
1738cd3e
NB
2997
2998 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2999 ena_tx_ctx.ena_bufs = tx_info->bufs;
3000 ena_tx_ctx.push_header = push_hdr;
3001 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
3002 ena_tx_ctx.req_id = req_id;
3003 ena_tx_ctx.header_len = header_len;
3004
3005 /* set flags and meta data */
3006 ena_tx_csum(&ena_tx_ctx, skb);
3007
548c4940
SJ
3008 rc = ena_xmit_common(dev,
3009 tx_ring,
3010 tx_info,
3011 &ena_tx_ctx,
3012 next_to_use,
3013 skb->len);
3014 if (rc)
1738cd3e 3015 goto error_unmap_dma;
1738cd3e
NB
3016
3017 netdev_tx_sent_queue(txq, skb->len);
3018
1738cd3e
NB
3019 /* stop the queue when no more space available, the packet can have up
3020 * to sgl_size + 2. one for the meta descriptor and one for header
3021 * (if the header is larger than tx_max_header_size).
3022 */
689b2bda
AK
3023 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3024 tx_ring->sgl_size + 2))) {
1738cd3e
NB
3025 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3026 __func__, qid);
3027
3028 netif_tx_stop_queue(txq);
3029 u64_stats_update_begin(&tx_ring->syncp);
3030 tx_ring->tx_stats.queue_stop++;
3031 u64_stats_update_end(&tx_ring->syncp);
3032
3033 /* There is a rare condition where this function decide to
3034 * stop the queue but meanwhile clean_tx_irq updates
3035 * next_to_completion and terminates.
3036 * The queue will remain stopped forever.
37dff155
NB
3037 * To solve this issue add a mb() to make sure that
3038 * netif_tx_stop_queue() write is vissible before checking if
3039 * there is additional space in the queue.
1738cd3e 3040 */
37dff155 3041 smp_mb();
1738cd3e 3042
689b2bda
AK
3043 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3044 ENA_TX_WAKEUP_THRESH)) {
1738cd3e
NB
3045 netif_tx_wake_queue(txq);
3046 u64_stats_update_begin(&tx_ring->syncp);
3047 tx_ring->tx_stats.queue_wakeup++;
3048 u64_stats_update_end(&tx_ring->syncp);
3049 }
3050 }
3051
6b16f9ee 3052 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
37dff155
NB
3053 /* trigger the dma engine. ena_com_write_sq_doorbell()
3054 * has a mb
3055 */
3056 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
1738cd3e
NB
3057 u64_stats_update_begin(&tx_ring->syncp);
3058 tx_ring->tx_stats.doorbells++;
3059 u64_stats_update_end(&tx_ring->syncp);
3060 }
3061
3062 return NETDEV_TX_OK;
3063
1738cd3e 3064error_unmap_dma:
548c4940 3065 ena_unmap_tx_buff(tx_ring, tx_info);
38005ca8 3066 tx_info->skb = NULL;
1738cd3e
NB
3067
3068error_drop_packet:
1738cd3e
NB
3069 dev_kfree_skb(skb);
3070 return NETDEV_TX_OK;
3071}
3072
1738cd3e 3073static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
a350ecce 3074 struct net_device *sb_dev)
1738cd3e
NB
3075{
3076 u16 qid;
3077 /* we suspect that this is good for in--kernel network services that
3078 * want to loop incoming skb rx to tx in normal user generated traffic,
3079 * most probably we will not get to this
3080 */
3081 if (skb_rx_queue_recorded(skb))
3082 qid = skb_get_rx_queue(skb);
3083 else
a350ecce 3084 qid = netdev_pick_tx(dev, skb, NULL);
1738cd3e
NB
3085
3086 return qid;
3087}
3088
46143e58 3089static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
1738cd3e
NB
3090{
3091 struct ena_admin_host_info *host_info;
3092 int rc;
3093
3094 /* Allocate only the host info */
3095 rc = ena_com_allocate_host_info(ena_dev);
3096 if (rc) {
3097 pr_err("Cannot allocate host info\n");
3098 return;
3099 }
3100
3101 host_info = ena_dev->host_attr.host_info;
3102
095f2f1f 3103 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
1738cd3e
NB
3104 host_info->os_type = ENA_ADMIN_OS_LINUX;
3105 host_info->kernel_ver = LINUX_VERSION_CODE;
f9133088 3106 strlcpy(host_info->kernel_ver_str, utsname()->version,
1738cd3e
NB
3107 sizeof(host_info->kernel_ver_str) - 1);
3108 host_info->os_dist = 0;
3109 strncpy(host_info->os_dist_str, utsname()->release,
3110 sizeof(host_info->os_dist_str) - 1);
92040c6d
AK
3111 host_info->driver_version =
3112 (DRV_MODULE_GEN_MAJOR) |
3113 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3114 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3115 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
095f2f1f 3116 host_info->num_cpus = num_online_cpus();
1738cd3e 3117
bd21b0cc 3118 host_info->driver_supported_features =
68f236df 3119 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
bd21b0cc
AK
3120 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
3121
1738cd3e
NB
3122 rc = ena_com_set_host_attributes(ena_dev);
3123 if (rc) {
d1497638 3124 if (rc == -EOPNOTSUPP)
1738cd3e
NB
3125 pr_warn("Cannot set host attributes\n");
3126 else
3127 pr_err("Cannot set host attributes\n");
3128
3129 goto err;
3130 }
3131
3132 return;
3133
3134err:
3135 ena_com_delete_host_info(ena_dev);
3136}
3137
3138static void ena_config_debug_area(struct ena_adapter *adapter)
3139{
3140 u32 debug_area_size;
3141 int rc, ss_count;
3142
3143 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3144 if (ss_count <= 0) {
3145 netif_err(adapter, drv, adapter->netdev,
3146 "SS count is negative\n");
3147 return;
3148 }
3149
3150 /* allocate 32 bytes for each string and 64bit for the value */
3151 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3152
3153 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3154 if (rc) {
3155 pr_err("Cannot allocate debug area\n");
3156 return;
3157 }
3158
3159 rc = ena_com_set_host_attributes(adapter->ena_dev);
3160 if (rc) {
d1497638 3161 if (rc == -EOPNOTSUPP)
1738cd3e
NB
3162 netif_warn(adapter, drv, adapter->netdev,
3163 "Cannot set host attributes\n");
3164 else
3165 netif_err(adapter, drv, adapter->netdev,
3166 "Cannot set host attributes\n");
3167 goto err;
3168 }
3169
3170 return;
3171err:
3172 ena_com_delete_debug_area(adapter->ena_dev);
3173}
3174
bc1f4470 3175static void ena_get_stats64(struct net_device *netdev,
3176 struct rtnl_link_stats64 *stats)
1738cd3e
NB
3177{
3178 struct ena_adapter *adapter = netdev_priv(netdev);
d81db240
NB
3179 struct ena_ring *rx_ring, *tx_ring;
3180 unsigned int start;
3181 u64 rx_drops;
5c665f8c 3182 u64 tx_drops;
d81db240 3183 int i;
1738cd3e
NB
3184
3185 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
bc1f4470 3186 return;
1738cd3e 3187
faa615f9 3188 for (i = 0; i < adapter->num_io_queues; i++) {
d81db240
NB
3189 u64 bytes, packets;
3190
3191 tx_ring = &adapter->tx_ring[i];
1738cd3e 3192
d81db240
NB
3193 do {
3194 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3195 packets = tx_ring->tx_stats.cnt;
3196 bytes = tx_ring->tx_stats.bytes;
3197 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
1738cd3e 3198
d81db240
NB
3199 stats->tx_packets += packets;
3200 stats->tx_bytes += bytes;
3201
3202 rx_ring = &adapter->rx_ring[i];
3203
3204 do {
3205 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3206 packets = rx_ring->rx_stats.cnt;
3207 bytes = rx_ring->rx_stats.bytes;
3208 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3209
3210 stats->rx_packets += packets;
3211 stats->rx_bytes += bytes;
3212 }
3213
3214 do {
3215 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3216 rx_drops = adapter->dev_stats.rx_drops;
5c665f8c 3217 tx_drops = adapter->dev_stats.tx_drops;
d81db240 3218 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
1738cd3e 3219
d81db240 3220 stats->rx_dropped = rx_drops;
5c665f8c 3221 stats->tx_dropped = tx_drops;
1738cd3e
NB
3222
3223 stats->multicast = 0;
3224 stats->collisions = 0;
3225
3226 stats->rx_length_errors = 0;
3227 stats->rx_crc_errors = 0;
3228 stats->rx_frame_errors = 0;
3229 stats->rx_fifo_errors = 0;
3230 stats->rx_missed_errors = 0;
3231 stats->tx_window_errors = 0;
3232
3233 stats->rx_errors = 0;
3234 stats->tx_errors = 0;
1738cd3e
NB
3235}
3236
3237static const struct net_device_ops ena_netdev_ops = {
3238 .ndo_open = ena_open,
3239 .ndo_stop = ena_close,
3240 .ndo_start_xmit = ena_start_xmit,
3241 .ndo_select_queue = ena_select_queue,
3242 .ndo_get_stats64 = ena_get_stats64,
3243 .ndo_tx_timeout = ena_tx_timeout,
3244 .ndo_change_mtu = ena_change_mtu,
3245 .ndo_set_mac_address = NULL,
3246 .ndo_validate_addr = eth_validate_addr,
838c93dc 3247 .ndo_bpf = ena_xdp,
1738cd3e
NB
3248};
3249
1738cd3e
NB
3250static int ena_device_validate_params(struct ena_adapter *adapter,
3251 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3252{
3253 struct net_device *netdev = adapter->netdev;
3254 int rc;
3255
3256 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3257 adapter->mac_addr);
3258 if (!rc) {
3259 netif_err(adapter, drv, netdev,
3260 "Error, mac address are different\n");
3261 return -EINVAL;
3262 }
3263
1738cd3e
NB
3264 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3265 netif_err(adapter, drv, netdev,
3266 "Error, device max mtu is smaller than netdev MTU\n");
3267 return -EINVAL;
3268 }
3269
3270 return 0;
3271}
3272
3273static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3274 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3275 bool *wd_state)
3276{
3277 struct device *dev = &pdev->dev;
3278 bool readless_supported;
3279 u32 aenq_groups;
3280 int dma_width;
3281 int rc;
3282
3283 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3284 if (rc) {
3285 dev_err(dev, "failed to init mmio read less\n");
3286 return rc;
3287 }
3288
3289 /* The PCIe configuration space revision id indicate if mmio reg
3290 * read is disabled
3291 */
3292 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3293 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3294
e2eed0e3 3295 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
1738cd3e
NB
3296 if (rc) {
3297 dev_err(dev, "Can not reset device\n");
3298 goto err_mmio_read_less;
3299 }
3300
3301 rc = ena_com_validate_version(ena_dev);
3302 if (rc) {
3303 dev_err(dev, "device version is too low\n");
3304 goto err_mmio_read_less;
3305 }
3306
3307 dma_width = ena_com_get_dma_width(ena_dev);
3308 if (dma_width < 0) {
3309 dev_err(dev, "Invalid dma width value %d", dma_width);
6e22066f 3310 rc = dma_width;
1738cd3e
NB
3311 goto err_mmio_read_less;
3312 }
3313
3314 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3315 if (rc) {
3316 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
3317 goto err_mmio_read_less;
3318 }
3319
3320 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3321 if (rc) {
3322 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3323 rc);
3324 goto err_mmio_read_less;
3325 }
3326
3327 /* ENA admin level init */
f1e90f6e 3328 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
1738cd3e
NB
3329 if (rc) {
3330 dev_err(dev,
3331 "Can not initialize ena admin queue with device\n");
3332 goto err_mmio_read_less;
3333 }
3334
3335 /* To enable the msix interrupts the driver needs to know the number
3336 * of queues. So the driver uses polling mode to retrieve this
3337 * information
3338 */
3339 ena_com_set_admin_polling_mode(ena_dev, true);
3340
095f2f1f 3341 ena_config_host_info(ena_dev, pdev);
dd8427a7 3342
1738cd3e
NB
3343 /* Get Device Attributes*/
3344 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3345 if (rc) {
3346 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3347 goto err_admin_init;
3348 }
3349
3350 /* Try to turn all the available aenq groups */
3351 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3352 BIT(ENA_ADMIN_FATAL_ERROR) |
3353 BIT(ENA_ADMIN_WARNING) |
3354 BIT(ENA_ADMIN_NOTIFICATION) |
3355 BIT(ENA_ADMIN_KEEP_ALIVE);
3356
3357 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3358
3359 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3360 if (rc) {
3361 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3362 goto err_admin_init;
3363 }
3364
3365 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3366
1738cd3e
NB
3367 return 0;
3368
3369err_admin_init:
dd8427a7 3370 ena_com_delete_host_info(ena_dev);
1738cd3e
NB
3371 ena_com_admin_destroy(ena_dev);
3372err_mmio_read_less:
3373 ena_com_mmio_reg_read_request_destroy(ena_dev);
3374
3375 return rc;
3376}
3377
4d192660 3378static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
1738cd3e
NB
3379{
3380 struct ena_com_dev *ena_dev = adapter->ena_dev;
3381 struct device *dev = &adapter->pdev->dev;
3382 int rc;
3383
4d192660 3384 rc = ena_enable_msix(adapter);
1738cd3e
NB
3385 if (rc) {
3386 dev_err(dev, "Can not reserve msix vectors\n");
3387 return rc;
3388 }
3389
3390 ena_setup_mgmnt_intr(adapter);
3391
3392 rc = ena_request_mgmnt_irq(adapter);
3393 if (rc) {
3394 dev_err(dev, "Can not setup management interrupts\n");
3395 goto err_disable_msix;
3396 }
3397
3398 ena_com_set_admin_polling_mode(ena_dev, false);
3399
3400 ena_com_admin_aenq_enable(ena_dev);
3401
3402 return 0;
3403
3404err_disable_msix:
06443684
NB
3405 ena_disable_msix(adapter);
3406
1738cd3e
NB
3407 return rc;
3408}
3409
cfa324a5 3410static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
1738cd3e 3411{
1738cd3e
NB
3412 struct net_device *netdev = adapter->netdev;
3413 struct ena_com_dev *ena_dev = adapter->ena_dev;
8c5c7abd 3414 bool dev_up;
3f6159db 3415
fe870c77
NB
3416 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3417 return;
3418
3f6159db
NB
3419 netif_carrier_off(netdev);
3420
1738cd3e
NB
3421 del_timer_sync(&adapter->timer_service);
3422
1738cd3e 3423 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
8c5c7abd 3424 adapter->dev_up_before_reset = dev_up;
cfa324a5
NB
3425 if (!graceful)
3426 ena_com_set_admin_running_state(ena_dev, false);
1738cd3e 3427
ee4552aa
NB
3428 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3429 ena_down(adapter);
1738cd3e 3430
bd791175 3431 /* Stop the device from sending AENQ events (in case reset flag is set
58a54b9c 3432 * and device is up, ena_down() already reset the device.
8c5c7abd
NB
3433 */
3434 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3435 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3436
1738cd3e
NB
3437 ena_free_mgmnt_irq(adapter);
3438
06443684 3439 ena_disable_msix(adapter);
1738cd3e
NB
3440
3441 ena_com_abort_admin_commands(ena_dev);
3442
3443 ena_com_wait_for_abort_completion(ena_dev);
3444
3445 ena_com_admin_destroy(ena_dev);
3446
3447 ena_com_mmio_reg_read_request_destroy(ena_dev);
3448
c1c0e40b 3449 /* return reset reason to default value */
e2eed0e3 3450 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
8c5c7abd 3451
3f6159db 3452 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
fe870c77 3453 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
8c5c7abd 3454}
3f6159db 3455
8c5c7abd
NB
3456static int ena_restore_device(struct ena_adapter *adapter)
3457{
3458 struct ena_com_dev_get_features_ctx get_feat_ctx;
3459 struct ena_com_dev *ena_dev = adapter->ena_dev;
3460 struct pci_dev *pdev = adapter->pdev;
3461 bool wd_state;
3462 int rc;
1738cd3e 3463
d18e4f68 3464 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1738cd3e
NB
3465 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3466 if (rc) {
3467 dev_err(&pdev->dev, "Can not initialize device\n");
3468 goto err;
3469 }
3470 adapter->wd_state = wd_state;
3471
3472 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3473 if (rc) {
3474 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3475 goto err_device_destroy;
3476 }
3477
4d192660 3478 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
1738cd3e
NB
3479 if (rc) {
3480 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3481 goto err_device_destroy;
3482 }
3483 /* If the interface was up before the reset bring it up */
8c5c7abd 3484 if (adapter->dev_up_before_reset) {
1738cd3e
NB
3485 rc = ena_up(adapter);
3486 if (rc) {
3487 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3488 goto err_disable_msix;
3489 }
3490 }
3491
fe870c77 3492 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
e1f1bd9b
AK
3493
3494 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3495 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3496 netif_carrier_on(adapter->netdev);
3497
1738cd3e 3498 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1a63443a 3499 dev_err(&pdev->dev, "Device reset completed successfully\n");
dfdde134 3500 adapter->last_keep_alive_jiffies = jiffies;
1738cd3e 3501
8c5c7abd 3502 return rc;
1738cd3e
NB
3503err_disable_msix:
3504 ena_free_mgmnt_irq(adapter);
06443684 3505 ena_disable_msix(adapter);
1738cd3e 3506err_device_destroy:
d7703ddb
AK
3507 ena_com_abort_admin_commands(ena_dev);
3508 ena_com_wait_for_abort_completion(ena_dev);
1738cd3e 3509 ena_com_admin_destroy(ena_dev);
d7703ddb 3510 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
e76ad21d 3511 ena_com_mmio_reg_read_request_destroy(ena_dev);
1738cd3e 3512err:
22b331c9 3513 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
d18e4f68 3514 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1738cd3e
NB
3515 dev_err(&pdev->dev,
3516 "Reset attempt failed. Can not reset the device\n");
8c5c7abd
NB
3517
3518 return rc;
3519}
3520
3521static void ena_fw_reset_device(struct work_struct *work)
3522{
3523 struct ena_adapter *adapter =
3524 container_of(work, struct ena_adapter, reset_task);
3525 struct pci_dev *pdev = adapter->pdev;
3526
3527 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3528 dev_err(&pdev->dev,
3529 "device reset schedule while reset bit is off\n");
3530 return;
3531 }
3532 rtnl_lock();
cfa324a5 3533 ena_destroy_device(adapter, false);
8c5c7abd
NB
3534 ena_restore_device(adapter);
3535 rtnl_unlock();
1738cd3e
NB
3536}
3537
8510e1a3
NB
3538static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3539 struct ena_ring *rx_ring)
3540{
3541 if (likely(rx_ring->first_interrupt))
3542 return 0;
3543
3544 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3545 return 0;
3546
3547 rx_ring->no_interrupt_event_cnt++;
3548
3549 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3550 netif_err(adapter, rx_err, adapter->netdev,
3551 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3552 rx_ring->qid);
3553 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3554 smp_mb__before_atomic();
3555 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3556 return -EIO;
3557 }
3558
3559 return 0;
3560}
3561
3562static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3563 struct ena_ring *tx_ring)
1738cd3e
NB
3564{
3565 struct ena_tx_buffer *tx_buf;
3566 unsigned long last_jiffies;
800c55cb 3567 u32 missed_tx = 0;
11095fdb 3568 int i, rc = 0;
800c55cb
NB
3569
3570 for (i = 0; i < tx_ring->ring_size; i++) {
3571 tx_buf = &tx_ring->tx_buffer_info[i];
3572 last_jiffies = tx_buf->last_jiffies;
8510e1a3
NB
3573
3574 if (last_jiffies == 0)
3575 /* no pending Tx at this location */
3576 continue;
3577
3578 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3579 2 * adapter->missing_tx_completion_to))) {
3580 /* If after graceful period interrupt is still not
3581 * received, we schedule a reset
3582 */
3583 netif_err(adapter, tx_err, adapter->netdev,
3584 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3585 tx_ring->qid);
3586 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3587 smp_mb__before_atomic();
3588 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3589 return -EIO;
3590 }
3591
3592 if (unlikely(time_is_before_jiffies(last_jiffies +
3593 adapter->missing_tx_completion_to))) {
800c55cb
NB
3594 if (!tx_buf->print_once)
3595 netif_notice(adapter, tx_err, adapter->netdev,
3596 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3597 tx_ring->qid, i);
3598
3599 tx_buf->print_once = 1;
3600 missed_tx++;
800c55cb
NB
3601 }
3602 }
3603
11095fdb
NB
3604 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3605 netif_err(adapter, tx_err, adapter->netdev,
3606 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3607 missed_tx,
3608 adapter->missing_tx_completion_threshold);
3609 adapter->reset_reason =
3610 ENA_REGS_RESET_MISS_TX_CMPL;
3611 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3612 rc = -EIO;
3613 }
3614
3615 u64_stats_update_begin(&tx_ring->syncp);
3616 tx_ring->tx_stats.missed_tx = missed_tx;
3617 u64_stats_update_end(&tx_ring->syncp);
3618
3619 return rc;
800c55cb
NB
3620}
3621
8510e1a3 3622static void check_for_missing_completions(struct ena_adapter *adapter)
800c55cb 3623{
1738cd3e 3624 struct ena_ring *tx_ring;
8510e1a3 3625 struct ena_ring *rx_ring;
800c55cb 3626 int i, budget, rc;
548c4940 3627 int io_queue_count;
1738cd3e 3628
548c4940 3629 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
1738cd3e
NB
3630 /* Make sure the driver doesn't turn the device in other process */
3631 smp_rmb();
3632
3633 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3634 return;
3635
3f6159db
NB
3636 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3637 return;
3638
82ef30f1
NB
3639 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3640 return;
3641
1738cd3e
NB
3642 budget = ENA_MONITORED_TX_QUEUES;
3643
548c4940 3644 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
1738cd3e 3645 tx_ring = &adapter->tx_ring[i];
8510e1a3
NB
3646 rx_ring = &adapter->rx_ring[i];
3647
3648 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3649 if (unlikely(rc))
3650 return;
1738cd3e 3651
548c4940
SJ
3652 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3653 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
800c55cb
NB
3654 if (unlikely(rc))
3655 return;
1738cd3e
NB
3656
3657 budget--;
3658 if (!budget)
3659 break;
3660 }
3661
548c4940 3662 adapter->last_monitored_tx_qid = i % io_queue_count;
1738cd3e
NB
3663}
3664
a3af7c18
NB
3665/* trigger napi schedule after 2 consecutive detections */
3666#define EMPTY_RX_REFILL 2
3667/* For the rare case where the device runs out of Rx descriptors and the
3668 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3669 * for example).
3670 * This case will lead to a deadlock:
3671 * The device won't send interrupts since all the new Rx packets will be dropped
3672 * The napi handler won't allocate new Rx descriptors so the device will be
3673 * able to send new packets.
3674 *
3675 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3676 * It is recommended to have at least 512MB, with a minimum of 128MB for
3677 * constrained environment).
3678 *
3679 * When such a situation is detected - Reschedule napi
3680 */
3681static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3682{
3683 struct ena_ring *rx_ring;
3684 int i, refill_required;
3685
3686 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3687 return;
3688
3689 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3690 return;
3691
faa615f9 3692 for (i = 0; i < adapter->num_io_queues; i++) {
a3af7c18
NB
3693 rx_ring = &adapter->rx_ring[i];
3694
7cfe9a55 3695 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
a3af7c18
NB
3696 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3697 rx_ring->empty_rx_queue++;
3698
3699 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3700 u64_stats_update_begin(&rx_ring->syncp);
3701 rx_ring->rx_stats.empty_rx_ring++;
3702 u64_stats_update_end(&rx_ring->syncp);
3703
3704 netif_err(adapter, drv, adapter->netdev,
3705 "trigger refill for ring %d\n", i);
3706
3707 napi_schedule(rx_ring->napi);
3708 rx_ring->empty_rx_queue = 0;
3709 }
3710 } else {
3711 rx_ring->empty_rx_queue = 0;
3712 }
3713 }
3714}
3715
1738cd3e
NB
3716/* Check for keep alive expiration */
3717static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3718{
3719 unsigned long keep_alive_expired;
3720
3721 if (!adapter->wd_state)
3722 return;
3723
82ef30f1
NB
3724 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3725 return;
3726
2a6e5fa2
AK
3727 keep_alive_expired = adapter->last_keep_alive_jiffies +
3728 adapter->keep_alive_timeout;
1738cd3e
NB
3729 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3730 netif_err(adapter, drv, adapter->netdev,
3731 "Keep alive watchdog timeout.\n");
3732 u64_stats_update_begin(&adapter->syncp);
3733 adapter->dev_stats.wd_expired++;
3734 u64_stats_update_end(&adapter->syncp);
e2eed0e3 3735 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
1738cd3e
NB
3736 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3737 }
3738}
3739
3740static void check_for_admin_com_state(struct ena_adapter *adapter)
3741{
3742 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3743 netif_err(adapter, drv, adapter->netdev,
3744 "ENA admin queue is not in running state!\n");
3745 u64_stats_update_begin(&adapter->syncp);
3746 adapter->dev_stats.admin_q_pause++;
3747 u64_stats_update_end(&adapter->syncp);
e2eed0e3 3748 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
1738cd3e
NB
3749 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3750 }
3751}
3752
82ef30f1
NB
3753static void ena_update_hints(struct ena_adapter *adapter,
3754 struct ena_admin_ena_hw_hints *hints)
3755{
3756 struct net_device *netdev = adapter->netdev;
3757
3758 if (hints->admin_completion_tx_timeout)
3759 adapter->ena_dev->admin_queue.completion_timeout =
3760 hints->admin_completion_tx_timeout * 1000;
3761
3762 if (hints->mmio_read_timeout)
3763 /* convert to usec */
3764 adapter->ena_dev->mmio_read.reg_read_to =
3765 hints->mmio_read_timeout * 1000;
3766
3767 if (hints->missed_tx_completion_count_threshold_to_reset)
3768 adapter->missing_tx_completion_threshold =
3769 hints->missed_tx_completion_count_threshold_to_reset;
3770
3771 if (hints->missing_tx_completion_timeout) {
3772 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3773 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3774 else
3775 adapter->missing_tx_completion_to =
3776 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3777 }
3778
3779 if (hints->netdev_wd_timeout)
3780 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3781
3782 if (hints->driver_watchdog_timeout) {
3783 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3784 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3785 else
3786 adapter->keep_alive_timeout =
3787 msecs_to_jiffies(hints->driver_watchdog_timeout);
3788 }
3789}
3790
1738cd3e
NB
3791static void ena_update_host_info(struct ena_admin_host_info *host_info,
3792 struct net_device *netdev)
3793{
3794 host_info->supported_network_features[0] =
3795 netdev->features & GENMASK_ULL(31, 0);
3796 host_info->supported_network_features[1] =
3797 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3798}
3799
e99e88a9 3800static void ena_timer_service(struct timer_list *t)
1738cd3e 3801{
e99e88a9 3802 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
1738cd3e
NB
3803 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3804 struct ena_admin_host_info *host_info =
3805 adapter->ena_dev->host_attr.host_info;
3806
3807 check_for_missing_keep_alive(adapter);
3808
3809 check_for_admin_com_state(adapter);
3810
8510e1a3 3811 check_for_missing_completions(adapter);
1738cd3e 3812
a3af7c18
NB
3813 check_for_empty_rx_ring(adapter);
3814
1738cd3e
NB
3815 if (debug_area)
3816 ena_dump_stats_to_buf(adapter, debug_area);
3817
3818 if (host_info)
3819 ena_update_host_info(host_info, adapter->netdev);
3820
3f6159db 3821 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1738cd3e
NB
3822 netif_err(adapter, drv, adapter->netdev,
3823 "Trigger reset is on\n");
3824 ena_dump_stats_to_dmesg(adapter);
3825 queue_work(ena_wq, &adapter->reset_task);
3826 return;
3827 }
3828
3829 /* Reset the timer */
2a6e5fa2 3830 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1738cd3e
NB
3831}
3832
ba6f6b41 3833static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
736ce3f4
SJ
3834 struct ena_com_dev *ena_dev,
3835 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1738cd3e 3836{
ba6f6b41 3837 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
31aa9857
SJ
3838
3839 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3840 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3841 &get_feat_ctx->max_queue_ext.max_queue_ext;
736ce3f4 3842 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
31aa9857 3843 max_queue_ext->max_rx_cq_num);
1738cd3e 3844
31aa9857
SJ
3845 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3846 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3847 } else {
3848 struct ena_admin_queue_feature_desc *max_queues =
3849 &get_feat_ctx->max_queues;
3850 io_tx_sq_num = max_queues->max_sq_num;
3851 io_tx_cq_num = max_queues->max_cq_num;
736ce3f4 3852 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
31aa9857
SJ
3853 }
3854
3855 /* In case of LLQ use the llq fields for the tx SQ/CQ */
9fd25592 3856 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
31aa9857 3857 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
1738cd3e 3858
736ce3f4
SJ
3859 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3860 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3861 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3862 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
1738cd3e 3863 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
736ce3f4
SJ
3864 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3865 if (unlikely(!max_num_io_queues)) {
1738cd3e
NB
3866 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3867 return -EFAULT;
3868 }
3869
736ce3f4 3870 return max_num_io_queues;
1738cd3e
NB
3871}
3872
38005ca8
AK
3873static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3874 struct ena_com_dev *ena_dev,
3875 struct ena_admin_feature_llq_desc *llq,
3876 struct ena_llq_configurations *llq_default_configurations)
1738cd3e
NB
3877{
3878 bool has_mem_bar;
38005ca8
AK
3879 int rc;
3880 u32 llq_feature_mask;
3881
3882 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3883 if (!(ena_dev->supported_features & llq_feature_mask)) {
3884 dev_err(&pdev->dev,
3885 "LLQ is not supported Fallback to host mode policy.\n");
3886 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3887 return 0;
3888 }
1738cd3e
NB
3889
3890 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3891
38005ca8
AK
3892 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3893 if (unlikely(rc)) {
3894 dev_err(&pdev->dev,
3895 "Failed to configure the device mode. Fallback to host mode policy.\n");
3896 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3897 return 0;
3898 }
3899
3900 /* Nothing to config, exit */
3901 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3902 return 0;
3903
3904 if (!has_mem_bar) {
3905 dev_err(&pdev->dev,
3906 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
1738cd3e 3907 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
38005ca8
AK
3908 return 0;
3909 }
3910
3911 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3912 pci_resource_start(pdev, ENA_MEM_BAR),
3913 pci_resource_len(pdev, ENA_MEM_BAR));
3914
3915 if (!ena_dev->mem_bar)
3916 return -EFAULT;
3917
3918 return 0;
1738cd3e
NB
3919}
3920
3921static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3922 struct net_device *netdev)
3923{
3924 netdev_features_t dev_features = 0;
3925
3926 /* Set offload features */
3927 if (feat->offload.tx &
3928 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3929 dev_features |= NETIF_F_IP_CSUM;
3930
3931 if (feat->offload.tx &
3932 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3933 dev_features |= NETIF_F_IPV6_CSUM;
3934
3935 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3936 dev_features |= NETIF_F_TSO;
3937
3938 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3939 dev_features |= NETIF_F_TSO6;
3940
3941 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3942 dev_features |= NETIF_F_TSO_ECN;
3943
3944 if (feat->offload.rx_supported &
3945 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3946 dev_features |= NETIF_F_RXCSUM;
3947
3948 if (feat->offload.rx_supported &
3949 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3950 dev_features |= NETIF_F_RXCSUM;
3951
3952 netdev->features =
3953 dev_features |
3954 NETIF_F_SG |
1738cd3e
NB
3955 NETIF_F_RXHASH |
3956 NETIF_F_HIGHDMA;
3957
3958 netdev->hw_features |= netdev->features;
3959 netdev->vlan_features |= netdev->features;
3960}
3961
3962static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3963 struct ena_com_dev_get_features_ctx *feat)
3964{
3965 struct net_device *netdev = adapter->netdev;
3966
3967 /* Copy mac address */
3968 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3969 eth_hw_addr_random(netdev);
3970 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3971 } else {
3972 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3973 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3974 }
3975
3976 /* Set offload features */
3977 ena_set_dev_offloads(feat, netdev);
3978
3979 adapter->max_mtu = feat->dev_attr.max_mtu;
d894be57
JW
3980 netdev->max_mtu = adapter->max_mtu;
3981 netdev->min_mtu = ENA_MIN_MTU;
1738cd3e
NB
3982}
3983
3984static int ena_rss_init_default(struct ena_adapter *adapter)
3985{
3986 struct ena_com_dev *ena_dev = adapter->ena_dev;
3987 struct device *dev = &adapter->pdev->dev;
3988 int rc, i;
3989 u32 val;
3990
3991 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3992 if (unlikely(rc)) {
3993 dev_err(dev, "Cannot init indirect table\n");
3994 goto err_rss_init;
3995 }
3996
3997 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
faa615f9 3998 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
1738cd3e
NB
3999 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4000 ENA_IO_RXQ_IDX(val));
d1497638 4001 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
4002 dev_err(dev, "Cannot fill indirect table\n");
4003 goto err_fill_indir;
4004 }
4005 }
4006
c1bd17e5 4007 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
1738cd3e 4008 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
d1497638 4009 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
4010 dev_err(dev, "Cannot fill hash function\n");
4011 goto err_fill_indir;
4012 }
4013
4014 rc = ena_com_set_default_hash_ctrl(ena_dev);
d1497638 4015 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
4016 dev_err(dev, "Cannot fill hash control\n");
4017 goto err_fill_indir;
4018 }
4019
4020 return 0;
4021
4022err_fill_indir:
4023 ena_com_rss_destroy(ena_dev);
4024err_rss_init:
4025
4026 return rc;
4027}
4028
4029static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4030{
d79c3888 4031 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
0857d92f 4032
1738cd3e
NB
4033 pci_release_selected_regions(pdev, release_bars);
4034}
4035
c2b54204 4036static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
38005ca8
AK
4037{
4038 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
4039 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
4040 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
4041 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
4042 llq_config->llq_ring_entry_size_value = 128;
4043}
4044
4d192660 4045static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
1738cd3e 4046{
31aa9857
SJ
4047 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4048 struct ena_com_dev *ena_dev = ctx->ena_dev;
4049 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4050 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4051 u32 max_tx_queue_size;
4052 u32 max_rx_queue_size;
1738cd3e 4053
4d192660 4054 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
31aa9857
SJ
4055 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4056 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4057 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4058 max_queue_ext->max_rx_sq_depth);
4059 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
1738cd3e 4060
31aa9857
SJ
4061 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4062 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4063 llq->max_llq_depth);
4064 else
4065 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4066 max_queue_ext->max_tx_sq_depth);
1738cd3e 4067
31aa9857
SJ
4068 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4069 max_queue_ext->max_per_packet_tx_descs);
4070 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4071 max_queue_ext->max_per_packet_rx_descs);
4072 } else {
4073 struct ena_admin_queue_feature_desc *max_queues =
4074 &ctx->get_feat_ctx->max_queues;
4075 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4076 max_queues->max_sq_depth);
4077 max_tx_queue_size = max_queues->max_cq_depth;
4078
4079 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4080 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4081 llq->max_llq_depth);
4082 else
4083 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4084 max_queues->max_sq_depth);
4085
4086 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4087 max_queues->max_packet_tx_descs);
4088 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4089 max_queues->max_packet_rx_descs);
4090 }
4091
4092 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4093 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
1738cd3e 4094
13ca32a6
SJ
4095 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4096 max_tx_queue_size);
4097 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4098 max_rx_queue_size);
31aa9857
SJ
4099
4100 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4101 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4102
31aa9857
SJ
4103 ctx->max_tx_queue_size = max_tx_queue_size;
4104 ctx->max_rx_queue_size = max_rx_queue_size;
4105 ctx->tx_queue_size = tx_queue_size;
4106 ctx->rx_queue_size = rx_queue_size;
1738cd3e 4107
31aa9857 4108 return 0;
1738cd3e
NB
4109}
4110
4111/* ena_probe - Device Initialization Routine
4112 * @pdev: PCI device information struct
4113 * @ent: entry in ena_pci_tbl
4114 *
4115 * Returns 0 on success, negative on failure
4116 *
4117 * ena_probe initializes an adapter identified by a pci_dev structure.
4118 * The OS initialization, configuring of the adapter private structure,
4119 * and a hardware reset occur.
4120 */
4121static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4122{
31aa9857 4123 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
0a39a35f 4124 struct ena_com_dev_get_features_ctx get_feat_ctx;
38005ca8 4125 struct ena_llq_configurations llq_config;
1738cd3e 4126 struct ena_com_dev *ena_dev = NULL;
83b92404 4127 struct ena_adapter *adapter;
83b92404
SJ
4128 struct net_device *netdev;
4129 static int adapters_found;
736ce3f4 4130 u32 max_num_io_queues;
83b92404 4131 char *queue_type_str;
1738cd3e 4132 bool wd_state;
736ce3f4 4133 int bars, rc;
1738cd3e
NB
4134
4135 dev_dbg(&pdev->dev, "%s\n", __func__);
4136
1738cd3e
NB
4137 rc = pci_enable_device_mem(pdev);
4138 if (rc) {
4139 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4140 return rc;
4141 }
4142
4143 pci_set_master(pdev);
4144
4145 ena_dev = vzalloc(sizeof(*ena_dev));
4146 if (!ena_dev) {
4147 rc = -ENOMEM;
4148 goto err_disable_device;
4149 }
4150
4151 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4152 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4153 if (rc) {
4154 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4155 rc);
4156 goto err_free_ena_dev;
4157 }
4158
0857d92f
NB
4159 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4160 pci_resource_start(pdev, ENA_REG_BAR),
4161 pci_resource_len(pdev, ENA_REG_BAR));
1738cd3e
NB
4162 if (!ena_dev->reg_bar) {
4163 dev_err(&pdev->dev, "failed to remap regs bar\n");
4164 rc = -EFAULT;
4165 goto err_free_region;
4166 }
4167
4bb7f4cf
AK
4168 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
4169
1738cd3e
NB
4170 ena_dev->dmadev = &pdev->dev;
4171
4172 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4173 if (rc) {
4174 dev_err(&pdev->dev, "ena device init failed\n");
4175 if (rc == -ETIME)
4176 rc = -EPROBE_DEFER;
4177 goto err_free_region;
4178 }
4179
38005ca8 4180 set_default_llq_configurations(&llq_config);
1738cd3e 4181
38005ca8
AK
4182 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
4183 &llq_config);
4184 if (rc) {
4185 dev_err(&pdev->dev, "ena device init failed\n");
4186 goto err_device_destroy;
1738cd3e
NB
4187 }
4188
31aa9857
SJ
4189 calc_queue_ctx.ena_dev = ena_dev;
4190 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4191 calc_queue_ctx.pdev = pdev;
4192
13830937 4193 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4d192660
SJ
4194 * Updated during device initialization with the real granularity
4195 */
1738cd3e 4196 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
15619e72 4197 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
79226cea 4198 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
736ce3f4 4199 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4d192660 4200 rc = ena_calc_io_queue_size(&calc_queue_ctx);
736ce3f4 4201 if (rc || !max_num_io_queues) {
1738cd3e
NB
4202 rc = -EFAULT;
4203 goto err_device_destroy;
4204 }
4205
1738cd3e 4206 /* dev zeroed in init_etherdev */
736ce3f4 4207 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
1738cd3e
NB
4208 if (!netdev) {
4209 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4210 rc = -ENOMEM;
4211 goto err_device_destroy;
4212 }
4213
4214 SET_NETDEV_DEV(netdev, &pdev->dev);
4215
4216 adapter = netdev_priv(netdev);
4217 pci_set_drvdata(pdev, adapter);
4218
4219 adapter->ena_dev = ena_dev;
4220 adapter->netdev = netdev;
4221 adapter->pdev = pdev;
4222
4223 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4224
4225 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
e2eed0e3 4226 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
1738cd3e 4227
13ca32a6
SJ
4228 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4229 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
9f9ae3f9
SJ
4230 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4231 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
31aa9857
SJ
4232 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4233 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
1738cd3e 4234
736ce3f4
SJ
4235 adapter->num_io_queues = max_num_io_queues;
4236 adapter->max_num_io_queues = max_num_io_queues;
0a39a35f 4237 adapter->last_monitored_tx_qid = 0;
736ce3f4 4238
548c4940
SJ
4239 adapter->xdp_first_ring = 0;
4240 adapter->xdp_num_queues = 0;
4241
1738cd3e
NB
4242 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4243 adapter->wd_state = wd_state;
4244
4245 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4246
4247 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4248 if (rc) {
4249 dev_err(&pdev->dev,
4250 "Failed to query interrupt moderation feature\n");
4251 goto err_netdev_destroy;
4252 }
548c4940
SJ
4253 ena_init_io_rings(adapter,
4254 0,
4255 adapter->xdp_num_queues +
4256 adapter->num_io_queues);
1738cd3e
NB
4257
4258 netdev->netdev_ops = &ena_netdev_ops;
4259 netdev->watchdog_timeo = TX_TIMEOUT;
4260 ena_set_ethtool_ops(netdev);
4261
4262 netdev->priv_flags |= IFF_UNICAST_FLT;
4263
4264 u64_stats_init(&adapter->syncp);
4265
4d192660 4266 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
1738cd3e
NB
4267 if (rc) {
4268 dev_err(&pdev->dev,
4269 "Failed to enable and set the admin interrupts\n");
4270 goto err_worker_destroy;
4271 }
4272 rc = ena_rss_init_default(adapter);
d1497638 4273 if (rc && (rc != -EOPNOTSUPP)) {
1738cd3e
NB
4274 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4275 goto err_free_msix;
4276 }
4277
4278 ena_config_debug_area(adapter);
4279
4280 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4281
4282 netif_carrier_off(netdev);
4283
4284 rc = register_netdev(netdev);
4285 if (rc) {
4286 dev_err(&pdev->dev, "Cannot register net device\n");
4287 goto err_rss;
4288 }
4289
1738cd3e
NB
4290 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4291
4292 adapter->last_keep_alive_jiffies = jiffies;
82ef30f1
NB
4293 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4294 adapter->missing_tx_completion_to = TX_TIMEOUT;
4295 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4296
4297 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
1738cd3e 4298
e99e88a9 4299 timer_setup(&adapter->timer_service, ena_timer_service, 0);
f850b4a7 4300 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1738cd3e 4301
38005ca8
AK
4302 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
4303 queue_type_str = "Regular";
4304 else
4305 queue_type_str = "Low Latency";
4306
4307 dev_info(&pdev->dev,
9f648f7b 4308 "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
1738cd3e 4309 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
9f648f7b 4310 netdev->dev_addr, queue_type_str);
1738cd3e
NB
4311
4312 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4313
4314 adapters_found++;
4315
4316 return 0;
4317
4318err_rss:
4319 ena_com_delete_debug_area(ena_dev);
4320 ena_com_rss_destroy(ena_dev);
4321err_free_msix:
e2eed0e3 4322 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
58a54b9c
AK
4323 /* stop submitting admin commands on a device that was reset */
4324 ena_com_set_admin_running_state(ena_dev, false);
1738cd3e 4325 ena_free_mgmnt_irq(adapter);
06443684 4326 ena_disable_msix(adapter);
1738cd3e 4327err_worker_destroy:
1738cd3e 4328 del_timer(&adapter->timer_service);
1738cd3e
NB
4329err_netdev_destroy:
4330 free_netdev(netdev);
4331err_device_destroy:
4332 ena_com_delete_host_info(ena_dev);
4333 ena_com_admin_destroy(ena_dev);
4334err_free_region:
4335 ena_release_bars(ena_dev, pdev);
4336err_free_ena_dev:
1738cd3e
NB
4337 vfree(ena_dev);
4338err_disable_device:
4339 pci_disable_device(pdev);
4340 return rc;
4341}
4342
1738cd3e
NB
4343/*****************************************************************************/
4344
428c4913 4345/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
1738cd3e 4346 * @pdev: PCI device information struct
428c4913 4347 * @shutdown: Is it a shutdown operation? If false, means it is a removal
1738cd3e 4348 *
428c4913
GP
4349 * __ena_shutoff is a helper routine that does the real work on shutdown and
4350 * removal paths; the difference between those paths is with regards to whether
4351 * dettach or unregister the netdevice.
1738cd3e 4352 */
428c4913 4353static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
1738cd3e
NB
4354{
4355 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4356 struct ena_com_dev *ena_dev;
4357 struct net_device *netdev;
4358
1738cd3e
NB
4359 ena_dev = adapter->ena_dev;
4360 netdev = adapter->netdev;
4361
4362#ifdef CONFIG_RFS_ACCEL
4363 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4364 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4365 netdev->rx_cpu_rmap = NULL;
4366 }
4367#endif /* CONFIG_RFS_ACCEL */
1738cd3e
NB
4368 del_timer_sync(&adapter->timer_service);
4369
4370 cancel_work_sync(&adapter->reset_task);
4371
428c4913 4372 rtnl_lock(); /* lock released inside the below if-else block */
c1c0e40b 4373 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
944b28aa 4374 ena_destroy_device(adapter, true);
428c4913
GP
4375 if (shutdown) {
4376 netif_device_detach(netdev);
4377 dev_close(netdev);
4378 rtnl_unlock();
4379 } else {
4380 rtnl_unlock();
4381 unregister_netdev(netdev);
4382 free_netdev(netdev);
4383 }
1738cd3e 4384
1738cd3e
NB
4385 ena_com_rss_destroy(ena_dev);
4386
4387 ena_com_delete_debug_area(ena_dev);
4388
4389 ena_com_delete_host_info(ena_dev);
4390
4391 ena_release_bars(ena_dev, pdev);
4392
1738cd3e
NB
4393 pci_disable_device(pdev);
4394
1738cd3e
NB
4395 vfree(ena_dev);
4396}
4397
428c4913
GP
4398/* ena_remove - Device Removal Routine
4399 * @pdev: PCI device information struct
4400 *
4401 * ena_remove is called by the PCI subsystem to alert the driver
4402 * that it should release a PCI device.
4403 */
4404
4405static void ena_remove(struct pci_dev *pdev)
4406{
4407 __ena_shutoff(pdev, false);
4408}
4409
4410/* ena_shutdown - Device Shutdown Routine
4411 * @pdev: PCI device information struct
4412 *
4413 * ena_shutdown is called by the PCI subsystem to alert the driver that
4414 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4415 */
4416
4417static void ena_shutdown(struct pci_dev *pdev)
4418{
4419 __ena_shutoff(pdev, true);
4420}
4421
8c5c7abd 4422/* ena_suspend - PM suspend callback
817a89ae 4423 * @dev_d: Device information struct
8c5c7abd 4424 */
817a89ae 4425static int __maybe_unused ena_suspend(struct device *dev_d)
8c5c7abd 4426{
817a89ae 4427 struct pci_dev *pdev = to_pci_dev(dev_d);
8c5c7abd
NB
4428 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4429
4430 u64_stats_update_begin(&adapter->syncp);
4431 adapter->dev_stats.suspend++;
4432 u64_stats_update_end(&adapter->syncp);
4433
4434 rtnl_lock();
4435 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4436 dev_err(&pdev->dev,
4437 "ignoring device reset request as the device is being suspended\n");
4438 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4439 }
cfa324a5 4440 ena_destroy_device(adapter, true);
8c5c7abd
NB
4441 rtnl_unlock();
4442 return 0;
4443}
4444
4445/* ena_resume - PM resume callback
817a89ae 4446 * @dev_d: Device information struct
8c5c7abd 4447 */
817a89ae 4448static int __maybe_unused ena_resume(struct device *dev_d)
8c5c7abd 4449{
817a89ae 4450 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
8c5c7abd
NB
4451 int rc;
4452
4453 u64_stats_update_begin(&adapter->syncp);
4454 adapter->dev_stats.resume++;
4455 u64_stats_update_end(&adapter->syncp);
4456
4457 rtnl_lock();
4458 rc = ena_restore_device(adapter);
4459 rtnl_unlock();
4460 return rc;
4461}
817a89ae
VG
4462
4463static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
8c5c7abd 4464
1738cd3e
NB
4465static struct pci_driver ena_pci_driver = {
4466 .name = DRV_MODULE_NAME,
4467 .id_table = ena_pci_tbl,
4468 .probe = ena_probe,
4469 .remove = ena_remove,
428c4913 4470 .shutdown = ena_shutdown,
817a89ae 4471 .driver.pm = &ena_pm_ops,
115ddc49 4472 .sriov_configure = pci_sriov_configure_simple,
1738cd3e
NB
4473};
4474
4475static int __init ena_init(void)
4476{
1738cd3e
NB
4477 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4478 if (!ena_wq) {
4479 pr_err("Failed to create workqueue\n");
4480 return -ENOMEM;
4481 }
4482
4483 return pci_register_driver(&ena_pci_driver);
4484}
4485
4486static void __exit ena_cleanup(void)
4487{
4488 pci_unregister_driver(&ena_pci_driver);
4489
4490 if (ena_wq) {
4491 destroy_workqueue(ena_wq);
4492 ena_wq = NULL;
4493 }
4494}
4495
4496/******************************************************************************
4497 ******************************** AENQ Handlers *******************************
4498 *****************************************************************************/
4499/* ena_update_on_link_change:
4500 * Notify the network interface about the change in link status
4501 */
4502static void ena_update_on_link_change(void *adapter_data,
4503 struct ena_admin_aenq_entry *aenq_e)
4504{
4505 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4506 struct ena_admin_aenq_link_change_desc *aenq_desc =
4507 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4508 int status = aenq_desc->flags &
4509 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4510
4511 if (status) {
4512 netdev_dbg(adapter->netdev, "%s\n", __func__);
4513 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
d18e4f68
NB
4514 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4515 netif_carrier_on(adapter->netdev);
1738cd3e
NB
4516 } else {
4517 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4518 netif_carrier_off(adapter->netdev);
4519 }
4520}
4521
4522static void ena_keep_alive_wd(void *adapter_data,
4523 struct ena_admin_aenq_entry *aenq_e)
4524{
4525 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
11a9a460
NB
4526 struct ena_admin_aenq_keep_alive_desc *desc;
4527 u64 rx_drops;
5c665f8c 4528 u64 tx_drops;
1738cd3e 4529
11a9a460 4530 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
1738cd3e 4531 adapter->last_keep_alive_jiffies = jiffies;
11a9a460
NB
4532
4533 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
5c665f8c 4534 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
11a9a460
NB
4535
4536 u64_stats_update_begin(&adapter->syncp);
4537 adapter->dev_stats.rx_drops = rx_drops;
5c665f8c 4538 adapter->dev_stats.tx_drops = tx_drops;
11a9a460 4539 u64_stats_update_end(&adapter->syncp);
1738cd3e
NB
4540}
4541
4542static void ena_notification(void *adapter_data,
4543 struct ena_admin_aenq_entry *aenq_e)
4544{
4545 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
82ef30f1 4546 struct ena_admin_ena_hw_hints *hints;
1738cd3e
NB
4547
4548 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4549 "Invalid group(%x) expected %x\n",
4550 aenq_e->aenq_common_desc.group,
4551 ENA_ADMIN_NOTIFICATION);
4552
4553 switch (aenq_e->aenq_common_desc.syndrom) {
82ef30f1
NB
4554 case ENA_ADMIN_UPDATE_HINTS:
4555 hints = (struct ena_admin_ena_hw_hints *)
4556 (&aenq_e->inline_data_w4);
4557 ena_update_hints(adapter, hints);
4558 break;
1738cd3e
NB
4559 default:
4560 netif_err(adapter, drv, adapter->netdev,
4561 "Invalid aenq notification link state %d\n",
4562 aenq_e->aenq_common_desc.syndrom);
4563 }
4564}
4565
4566/* This handler will called for unknown event group or unimplemented handlers*/
4567static void unimplemented_aenq_handler(void *data,
4568 struct ena_admin_aenq_entry *aenq_e)
4569{
4570 struct ena_adapter *adapter = (struct ena_adapter *)data;
4571
4572 netif_err(adapter, drv, adapter->netdev,
4573 "Unknown event was received or event with unimplemented handler\n");
4574}
4575
4576static struct ena_aenq_handlers aenq_handlers = {
4577 .handlers = {
4578 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4579 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4580 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4581 },
4582 .unimplemented_handler = unimplemented_aenq_handler
4583};
4584
4585module_init(ena_init);
4586module_exit(ena_cleanup);