]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/amazon/ena/ena_netdev.c
net: ena: cosmetic: fix spelling and grammar mistakes in comments
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / amazon / ena / ena_netdev.c
CommitLineData
1738cd3e
NB
1/*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#ifdef CONFIG_RFS_ACCEL
36#include <linux/cpu_rmap.h>
37#endif /* CONFIG_RFS_ACCEL */
38#include <linux/ethtool.h>
1738cd3e
NB
39#include <linux/kernel.h>
40#include <linux/module.h>
1738cd3e
NB
41#include <linux/numa.h>
42#include <linux/pci.h>
43#include <linux/utsname.h>
44#include <linux/version.h>
45#include <linux/vmalloc.h>
46#include <net/ip.h>
47
48#include "ena_netdev.h"
838c93dc 49#include <linux/bpf_trace.h>
1738cd3e
NB
50#include "ena_pci_id_tbl.h"
51
1738cd3e
NB
52MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
53MODULE_DESCRIPTION(DEVICE_NAME);
54MODULE_LICENSE("GPL");
1738cd3e
NB
55
56/* Time in jiffies before concluding the transmitter is hung. */
57#define TX_TIMEOUT (5 * HZ)
58
59#define ENA_NAPI_BUDGET 64
60
61#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
62 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
63static int debug = -1;
64module_param(debug, int, 0);
65MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66
67static struct ena_aenq_handlers aenq_handlers;
68
69static struct workqueue_struct *ena_wq;
70
71MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
72
73static int ena_rss_init_default(struct ena_adapter *adapter);
ee4552aa 74static void check_for_admin_com_state(struct ena_adapter *adapter);
cfa324a5 75static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
ee4552aa 76static int ena_restore_device(struct ena_adapter *adapter);
548c4940
SJ
77
78static void ena_init_io_rings(struct ena_adapter *adapter,
79 int first_index, int count);
80static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
81 int count);
82static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
83 int count);
84static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
85static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
86 int first_index,
87 int count);
88static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
89static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
90static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
91static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
92static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
93static void ena_napi_disable_in_range(struct ena_adapter *adapter,
94 int first_index, int count);
95static void ena_napi_enable_in_range(struct ena_adapter *adapter,
96 int first_index, int count);
838c93dc 97static int ena_up(struct ena_adapter *adapter);
548c4940
SJ
98static void ena_down(struct ena_adapter *adapter);
99static void ena_unmask_interrupt(struct ena_ring *tx_ring,
100 struct ena_ring *rx_ring);
101static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
102 struct ena_ring *rx_ring);
103static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
104 struct ena_tx_buffer *tx_info);
105static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
106 int first_index, int count);
1738cd3e 107
0290bd29 108static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
1738cd3e
NB
109{
110 struct ena_adapter *adapter = netdev_priv(dev);
111
3f6159db
NB
112 /* Change the state of the device to trigger reset
113 * Check that we are not in the middle or a trigger already
114 */
115
116 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
117 return;
118
e2eed0e3 119 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
1738cd3e
NB
120 u64_stats_update_begin(&adapter->syncp);
121 adapter->dev_stats.tx_timeout++;
122 u64_stats_update_end(&adapter->syncp);
123
124 netif_err(adapter, tx_err, dev, "Transmit time out\n");
1738cd3e
NB
125}
126
127static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
128{
129 int i;
130
faa615f9 131 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
132 adapter->rx_ring[i].mtu = mtu;
133}
134
135static int ena_change_mtu(struct net_device *dev, int new_mtu)
136{
137 struct ena_adapter *adapter = netdev_priv(dev);
138 int ret;
139
1738cd3e
NB
140 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
141 if (!ret) {
142 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
143 update_rx_ring_mtu(adapter, new_mtu);
144 dev->mtu = new_mtu;
145 } else {
146 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
147 new_mtu);
148 }
149
150 return ret;
151}
152
548c4940
SJ
153static int ena_xmit_common(struct net_device *dev,
154 struct ena_ring *ring,
155 struct ena_tx_buffer *tx_info,
156 struct ena_com_tx_ctx *ena_tx_ctx,
157 u16 next_to_use,
158 u32 bytes)
159{
160 struct ena_adapter *adapter = netdev_priv(dev);
161 int rc, nb_hw_desc;
162
163 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
164 ena_tx_ctx))) {
165 netif_dbg(adapter, tx_queued, dev,
166 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
167 ring->qid);
168 ena_com_write_sq_doorbell(ring->ena_com_io_sq);
169 }
170
171 /* prepare the packet's descriptors to dma engine */
172 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
173 &nb_hw_desc);
174
175 /* In case there isn't enough space in the queue for the packet,
176 * we simply drop it. All other failure reasons of
177 * ena_com_prepare_tx() are fatal and therefore require a device reset.
178 */
179 if (unlikely(rc)) {
180 netif_err(adapter, tx_queued, dev,
181 "failed to prepare tx bufs\n");
182 u64_stats_update_begin(&ring->syncp);
183 ring->tx_stats.prepare_ctx_err++;
184 u64_stats_update_end(&ring->syncp);
185 if (rc != -ENOMEM) {
186 adapter->reset_reason =
187 ENA_REGS_RESET_DRIVER_INVALID_STATE;
188 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
189 }
190 return rc;
191 }
192
193 u64_stats_update_begin(&ring->syncp);
194 ring->tx_stats.cnt++;
195 ring->tx_stats.bytes += bytes;
196 u64_stats_update_end(&ring->syncp);
197
198 tx_info->tx_descs = nb_hw_desc;
199 tx_info->last_jiffies = jiffies;
200 tx_info->print_once = 0;
201
202 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
203 ring->ring_size);
204 return 0;
205}
206
207/* This is the XDP napi callback. XDP queues use a separate napi callback
208 * than Rx/Tx queues.
209 */
210static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
211{
212 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
213 u32 xdp_work_done, xdp_budget;
214 struct ena_ring *xdp_ring;
215 int napi_comp_call = 0;
216 int ret;
217
218 xdp_ring = ena_napi->xdp_ring;
913b0bfd 219 xdp_ring->first_interrupt = ena_napi->first_interrupt;
548c4940
SJ
220
221 xdp_budget = budget;
222
223 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
224 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
225 napi_complete_done(napi, 0);
226 return 0;
227 }
228
229 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
230
231 /* If the device is about to reset or down, avoid unmask
232 * the interrupt and return 0 so NAPI won't reschedule
233 */
234 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
235 napi_complete_done(napi, 0);
236 ret = 0;
237 } else if (xdp_budget > xdp_work_done) {
238 napi_comp_call = 1;
239 if (napi_complete_done(napi, xdp_work_done))
240 ena_unmask_interrupt(xdp_ring, NULL);
241 ena_update_ring_numa_node(xdp_ring, NULL);
242 ret = xdp_work_done;
243 } else {
244 ret = xdp_budget;
245 }
246
247 u64_stats_update_begin(&xdp_ring->syncp);
248 xdp_ring->tx_stats.napi_comp += napi_comp_call;
249 xdp_ring->tx_stats.tx_poll++;
250 u64_stats_update_end(&xdp_ring->syncp);
251
252 return ret;
253}
254
255static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
256 struct ena_tx_buffer *tx_info,
257 struct xdp_buff *xdp,
258 void **push_hdr,
259 u32 *push_len)
260{
261 struct ena_adapter *adapter = xdp_ring->adapter;
262 struct ena_com_buf *ena_buf;
263 dma_addr_t dma = 0;
264 u32 size;
265
266 tx_info->xdpf = convert_to_xdp_frame(xdp);
267 size = tx_info->xdpf->len;
268 ena_buf = tx_info->bufs;
269
270 /* llq push buffer */
271 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
272 *push_hdr = tx_info->xdpf->data;
273
274 if (size - *push_len > 0) {
275 dma = dma_map_single(xdp_ring->dev,
276 *push_hdr + *push_len,
277 size - *push_len,
278 DMA_TO_DEVICE);
279 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
280 goto error_report_dma_error;
281
282 tx_info->map_linear_data = 1;
283 tx_info->num_of_bufs = 1;
284 }
285
286 ena_buf->paddr = dma;
287 ena_buf->len = size;
288
289 return 0;
290
291error_report_dma_error:
292 u64_stats_update_begin(&xdp_ring->syncp);
293 xdp_ring->tx_stats.dma_mapping_err++;
294 u64_stats_update_end(&xdp_ring->syncp);
295 netdev_warn(adapter->netdev, "failed to map xdp buff\n");
296
297 xdp_return_frame_rx_napi(tx_info->xdpf);
298 tx_info->xdpf = NULL;
299 tx_info->num_of_bufs = 0;
300
301 return -EINVAL;
302}
303
304static int ena_xdp_xmit_buff(struct net_device *dev,
305 struct xdp_buff *xdp,
306 int qid,
307 struct ena_rx_buffer *rx_info)
308{
309 struct ena_adapter *adapter = netdev_priv(dev);
310 struct ena_com_tx_ctx ena_tx_ctx = {0};
311 struct ena_tx_buffer *tx_info;
312 struct ena_ring *xdp_ring;
548c4940
SJ
313 u16 next_to_use, req_id;
314 int rc;
315 void *push_hdr;
316 u32 push_len;
317
318 xdp_ring = &adapter->tx_ring[qid];
319 next_to_use = xdp_ring->next_to_use;
320 req_id = xdp_ring->free_ids[next_to_use];
321 tx_info = &xdp_ring->tx_buffer_info[req_id];
322 tx_info->num_of_bufs = 0;
548c4940
SJ
323 page_ref_inc(rx_info->page);
324 tx_info->xdp_rx_page = rx_info->page;
325
326 rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
327 if (unlikely(rc))
328 goto error_drop_packet;
329
330 ena_tx_ctx.ena_bufs = tx_info->bufs;
331 ena_tx_ctx.push_header = push_hdr;
332 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
333 ena_tx_ctx.req_id = req_id;
334 ena_tx_ctx.header_len = push_len;
335
336 rc = ena_xmit_common(dev,
337 xdp_ring,
338 tx_info,
339 &ena_tx_ctx,
340 next_to_use,
341 xdp->data_end - xdp->data);
342 if (rc)
343 goto error_unmap_dma;
344 /* trigger the dma engine. ena_com_write_sq_doorbell()
345 * has a mb
346 */
347 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
348 u64_stats_update_begin(&xdp_ring->syncp);
349 xdp_ring->tx_stats.doorbells++;
350 u64_stats_update_end(&xdp_ring->syncp);
351
352 return NETDEV_TX_OK;
353
354error_unmap_dma:
355 ena_unmap_tx_buff(xdp_ring, tx_info);
356 tx_info->xdpf = NULL;
357error_drop_packet:
358
359 return NETDEV_TX_OK;
360}
361
362static int ena_xdp_execute(struct ena_ring *rx_ring,
363 struct xdp_buff *xdp,
364 struct ena_rx_buffer *rx_info)
838c93dc
SJ
365{
366 struct bpf_prog *xdp_prog;
367 u32 verdict = XDP_PASS;
368
369 rcu_read_lock();
370 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
371
372 if (!xdp_prog)
373 goto out;
374
375 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
376
548c4940
SJ
377 if (verdict == XDP_TX)
378 ena_xdp_xmit_buff(rx_ring->netdev,
379 xdp,
380 rx_ring->qid + rx_ring->adapter->num_io_queues,
381 rx_info);
382 else if (unlikely(verdict == XDP_ABORTED))
838c93dc 383 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
548c4940 384 else if (unlikely(verdict > XDP_TX))
838c93dc
SJ
385 bpf_warn_invalid_xdp_action(verdict);
386out:
387 rcu_read_unlock();
388 return verdict;
389}
390
548c4940
SJ
391static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
392{
393 adapter->xdp_first_ring = adapter->num_io_queues;
394 adapter->xdp_num_queues = adapter->num_io_queues;
395
396 ena_init_io_rings(adapter,
397 adapter->xdp_first_ring,
398 adapter->xdp_num_queues);
399}
400
401static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
402{
403 int rc = 0;
404
405 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
406 adapter->xdp_num_queues);
407 if (rc)
408 goto setup_err;
409
410 rc = ena_create_io_tx_queues_in_range(adapter,
411 adapter->xdp_first_ring,
412 adapter->xdp_num_queues);
413 if (rc)
414 goto create_err;
415
416 return 0;
417
418create_err:
419 ena_free_all_io_tx_resources(adapter);
420setup_err:
421 return rc;
422}
423
424/* Provides a way for both kernel and bpf-prog to know
425 * more about the RX-queue a given XDP frame arrived on.
426 */
427static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
428{
429 int rc;
430
431 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
432
433 if (rc) {
434 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
435 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
436 rx_ring->qid, rc);
437 goto err;
438 }
439
440 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
441 NULL);
442
443 if (rc) {
444 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
445 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
446 rx_ring->qid, rc);
447 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
448 }
449
450err:
451 return rc;
452}
453
454static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
455{
456 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
457 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
458}
459
32109c70
Y
460static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
461 struct bpf_prog *prog,
462 int first, int count)
838c93dc
SJ
463{
464 struct ena_ring *rx_ring;
465 int i = 0;
466
467 for (i = first; i < count; i++) {
468 rx_ring = &adapter->rx_ring[i];
469 xchg(&rx_ring->xdp_bpf_prog, prog);
548c4940
SJ
470 if (prog) {
471 ena_xdp_register_rxq_info(rx_ring);
838c93dc 472 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
548c4940
SJ
473 } else {
474 ena_xdp_unregister_rxq_info(rx_ring);
838c93dc 475 rx_ring->rx_headroom = 0;
548c4940 476 }
838c93dc
SJ
477 }
478}
479
32109c70
Y
480static void ena_xdp_exchange_program(struct ena_adapter *adapter,
481 struct bpf_prog *prog)
838c93dc
SJ
482{
483 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
484
485 ena_xdp_exchange_program_rx_in_range(adapter,
486 prog,
487 0,
488 adapter->num_io_queues);
489
490 if (old_bpf_prog)
491 bpf_prog_put(old_bpf_prog);
492}
493
548c4940
SJ
494static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
495{
496 bool was_up;
497 int rc;
498
499 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
500
501 if (was_up)
502 ena_down(adapter);
503
504 adapter->xdp_first_ring = 0;
505 adapter->xdp_num_queues = 0;
506 ena_xdp_exchange_program(adapter, NULL);
507 if (was_up) {
508 rc = ena_up(adapter);
509 if (rc)
510 return rc;
511 }
512 return 0;
513}
514
838c93dc
SJ
515static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
516{
517 struct ena_adapter *adapter = netdev_priv(netdev);
518 struct bpf_prog *prog = bpf->prog;
548c4940 519 struct bpf_prog *old_bpf_prog;
838c93dc
SJ
520 int rc, prev_mtu;
521 bool is_up;
522
523 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
548c4940
SJ
524 rc = ena_xdp_allowed(adapter);
525 if (rc == ENA_XDP_ALLOWED) {
526 old_bpf_prog = adapter->xdp_bpf_prog;
527 if (prog) {
528 if (!is_up) {
529 ena_init_all_xdp_queues(adapter);
530 } else if (!old_bpf_prog) {
531 ena_down(adapter);
532 ena_init_all_xdp_queues(adapter);
533 }
534 ena_xdp_exchange_program(adapter, prog);
838c93dc 535
548c4940
SJ
536 if (is_up && !old_bpf_prog) {
537 rc = ena_up(adapter);
538 if (rc)
539 return rc;
540 }
541 } else if (old_bpf_prog) {
542 rc = ena_destroy_and_free_all_xdp_queues(adapter);
838c93dc
SJ
543 if (rc)
544 return rc;
545 }
838c93dc 546
548c4940
SJ
547 prev_mtu = netdev->max_mtu;
548 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
549
550 if (!old_bpf_prog)
551 netif_info(adapter, drv, adapter->netdev,
552 "xdp program set, changing the max_mtu from %d to %d",
553 prev_mtu, netdev->max_mtu);
554
555 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
556 netif_err(adapter, drv, adapter->netdev,
557 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
838c93dc 558 netdev->mtu, ENA_XDP_MAX_MTU);
548c4940
SJ
559 NL_SET_ERR_MSG_MOD(bpf->extack,
560 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
561 return -EINVAL;
562 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
563 netif_err(adapter, drv, adapter->netdev,
564 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
565 adapter->num_io_queues, adapter->max_num_io_queues);
566 NL_SET_ERR_MSG_MOD(bpf->extack,
567 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
838c93dc
SJ
568 return -EINVAL;
569 }
570
571 return 0;
572}
573
574/* This is the main xdp callback, it's used by the kernel to set/unset the xdp
575 * program as well as to query the current xdp program id.
576 */
577static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
578{
579 struct ena_adapter *adapter = netdev_priv(netdev);
580
581 switch (bpf->command) {
582 case XDP_SETUP_PROG:
583 return ena_xdp_set(netdev, bpf);
584 case XDP_QUERY_PROG:
585 bpf->prog_id = adapter->xdp_bpf_prog ?
586 adapter->xdp_bpf_prog->aux->id : 0;
587 break;
588 default:
589 return -EINVAL;
590 }
591 return 0;
592}
593
1738cd3e
NB
594static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
595{
596#ifdef CONFIG_RFS_ACCEL
597 u32 i;
598 int rc;
599
faa615f9 600 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
1738cd3e
NB
601 if (!adapter->netdev->rx_cpu_rmap)
602 return -ENOMEM;
faa615f9 603 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
604 int irq_idx = ENA_IO_IRQ_IDX(i);
605
606 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
da6f4cf5 607 pci_irq_vector(adapter->pdev, irq_idx));
1738cd3e
NB
608 if (rc) {
609 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
610 adapter->netdev->rx_cpu_rmap = NULL;
611 return rc;
612 }
613 }
614#endif /* CONFIG_RFS_ACCEL */
615 return 0;
616}
617
618static void ena_init_io_rings_common(struct ena_adapter *adapter,
619 struct ena_ring *ring, u16 qid)
620{
621 ring->qid = qid;
622 ring->pdev = adapter->pdev;
623 ring->dev = &adapter->pdev->dev;
624 ring->netdev = adapter->netdev;
625 ring->napi = &adapter->ena_napi[qid].napi;
626 ring->adapter = adapter;
627 ring->ena_dev = adapter->ena_dev;
628 ring->per_napi_packets = 0;
1738cd3e 629 ring->cpu = 0;
8510e1a3
NB
630 ring->first_interrupt = false;
631 ring->no_interrupt_event_cnt = 0;
1738cd3e
NB
632 u64_stats_init(&ring->syncp);
633}
634
548c4940
SJ
635static void ena_init_io_rings(struct ena_adapter *adapter,
636 int first_index, int count)
1738cd3e
NB
637{
638 struct ena_com_dev *ena_dev;
639 struct ena_ring *txr, *rxr;
640 int i;
641
642 ena_dev = adapter->ena_dev;
643
548c4940 644 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
645 txr = &adapter->tx_ring[i];
646 rxr = &adapter->rx_ring[i];
647
548c4940 648 /* TX common ring state */
1738cd3e 649 ena_init_io_rings_common(adapter, txr, i);
1738cd3e
NB
650
651 /* TX specific ring state */
13ca32a6 652 txr->ring_size = adapter->requested_tx_ring_size;
1738cd3e
NB
653 txr->tx_max_header_size = ena_dev->tx_max_header_size;
654 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
655 txr->sgl_size = adapter->max_tx_sgl_size;
656 txr->smoothed_interval =
657 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
658
548c4940
SJ
659 /* Don't init RX queues for xdp queues */
660 if (!ENA_IS_XDP_INDEX(adapter, i)) {
661 /* RX common ring state */
662 ena_init_io_rings_common(adapter, rxr, i);
663
664 /* RX specific ring state */
665 rxr->ring_size = adapter->requested_rx_ring_size;
666 rxr->rx_copybreak = adapter->rx_copybreak;
667 rxr->sgl_size = adapter->max_rx_sgl_size;
668 rxr->smoothed_interval =
669 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
670 rxr->empty_rx_queue = 0;
671 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
672 }
1738cd3e
NB
673 }
674}
675
676/* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
677 * @adapter: network interface device structure
678 * @qid: queue index
679 *
680 * Return 0 on success, negative on failure
681 */
682static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
683{
684 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
685 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
686 int size, i, node;
687
688 if (tx_ring->tx_buffer_info) {
689 netif_err(adapter, ifup,
690 adapter->netdev, "tx_buffer_info info is not NULL");
691 return -EEXIST;
692 }
693
694 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
695 node = cpu_to_node(ena_irq->cpu);
696
697 tx_ring->tx_buffer_info = vzalloc_node(size, node);
698 if (!tx_ring->tx_buffer_info) {
699 tx_ring->tx_buffer_info = vzalloc(size);
700 if (!tx_ring->tx_buffer_info)
8ee8ee7f 701 goto err_tx_buffer_info;
1738cd3e
NB
702 }
703
704 size = sizeof(u16) * tx_ring->ring_size;
f9172498
SJ
705 tx_ring->free_ids = vzalloc_node(size, node);
706 if (!tx_ring->free_ids) {
707 tx_ring->free_ids = vzalloc(size);
708 if (!tx_ring->free_ids)
709 goto err_tx_free_ids;
1738cd3e
NB
710 }
711
38005ca8
AK
712 size = tx_ring->tx_max_header_size;
713 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
714 if (!tx_ring->push_buf_intermediate_buf) {
715 tx_ring->push_buf_intermediate_buf = vzalloc(size);
8ee8ee7f
SJ
716 if (!tx_ring->push_buf_intermediate_buf)
717 goto err_push_buf_intermediate_buf;
38005ca8
AK
718 }
719
1738cd3e
NB
720 /* Req id ring for TX out of order completions */
721 for (i = 0; i < tx_ring->ring_size; i++)
f9172498 722 tx_ring->free_ids[i] = i;
1738cd3e
NB
723
724 /* Reset tx statistics */
725 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
726
727 tx_ring->next_to_use = 0;
728 tx_ring->next_to_clean = 0;
729 tx_ring->cpu = ena_irq->cpu;
730 return 0;
8ee8ee7f
SJ
731
732err_push_buf_intermediate_buf:
f9172498
SJ
733 vfree(tx_ring->free_ids);
734 tx_ring->free_ids = NULL;
735err_tx_free_ids:
8ee8ee7f
SJ
736 vfree(tx_ring->tx_buffer_info);
737 tx_ring->tx_buffer_info = NULL;
738err_tx_buffer_info:
739 return -ENOMEM;
1738cd3e
NB
740}
741
742/* ena_free_tx_resources - Free I/O Tx Resources per Queue
743 * @adapter: network interface device structure
744 * @qid: queue index
745 *
746 * Free all transmit software resources
747 */
748static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
749{
750 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
751
752 vfree(tx_ring->tx_buffer_info);
753 tx_ring->tx_buffer_info = NULL;
754
f9172498
SJ
755 vfree(tx_ring->free_ids);
756 tx_ring->free_ids = NULL;
38005ca8
AK
757
758 vfree(tx_ring->push_buf_intermediate_buf);
759 tx_ring->push_buf_intermediate_buf = NULL;
1738cd3e
NB
760}
761
548c4940
SJ
762static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
763 int first_index,
764 int count)
1738cd3e
NB
765{
766 int i, rc = 0;
767
548c4940 768 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
769 rc = ena_setup_tx_resources(adapter, i);
770 if (rc)
771 goto err_setup_tx;
772 }
773
774 return 0;
775
776err_setup_tx:
777
778 netif_err(adapter, ifup, adapter->netdev,
779 "Tx queue %d: allocation failed\n", i);
780
781 /* rewind the index freeing the rings as we go */
548c4940 782 while (first_index < i--)
1738cd3e
NB
783 ena_free_tx_resources(adapter, i);
784 return rc;
785}
786
548c4940
SJ
787static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
788 int first_index, int count)
789{
790 int i;
791
792 for (i = first_index; i < first_index + count; i++)
793 ena_free_tx_resources(adapter, i);
794}
795
1738cd3e
NB
796/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
797 * @adapter: board private structure
798 *
799 * Free all transmit software resources
800 */
801static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
802{
548c4940
SJ
803 ena_free_all_io_tx_resources_in_range(adapter,
804 0,
805 adapter->xdp_num_queues +
806 adapter->num_io_queues);
1738cd3e
NB
807}
808
c2b54204 809static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
ad974bae
NB
810{
811 if (likely(req_id < rx_ring->ring_size))
812 return 0;
813
814 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
815 "Invalid rx req_id: %hu\n", req_id);
816
817 u64_stats_update_begin(&rx_ring->syncp);
818 rx_ring->rx_stats.bad_req_id++;
819 u64_stats_update_end(&rx_ring->syncp);
820
821 /* Trigger device reset */
822 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
823 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
824 return -EFAULT;
825}
826
1738cd3e
NB
827/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
828 * @adapter: network interface device structure
829 * @qid: queue index
830 *
831 * Returns 0 on success, negative on failure
832 */
833static int ena_setup_rx_resources(struct ena_adapter *adapter,
834 u32 qid)
835{
836 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
837 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
ad974bae 838 int size, node, i;
1738cd3e
NB
839
840 if (rx_ring->rx_buffer_info) {
841 netif_err(adapter, ifup, adapter->netdev,
842 "rx_buffer_info is not NULL");
843 return -EEXIST;
844 }
845
846 /* alloc extra element so in rx path
847 * we can always prefetch rx_info + 1
848 */
849 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
850 node = cpu_to_node(ena_irq->cpu);
851
852 rx_ring->rx_buffer_info = vzalloc_node(size, node);
853 if (!rx_ring->rx_buffer_info) {
854 rx_ring->rx_buffer_info = vzalloc(size);
855 if (!rx_ring->rx_buffer_info)
856 return -ENOMEM;
857 }
858
ad974bae 859 size = sizeof(u16) * rx_ring->ring_size;
f9172498
SJ
860 rx_ring->free_ids = vzalloc_node(size, node);
861 if (!rx_ring->free_ids) {
862 rx_ring->free_ids = vzalloc(size);
863 if (!rx_ring->free_ids) {
ad974bae 864 vfree(rx_ring->rx_buffer_info);
8ee8ee7f 865 rx_ring->rx_buffer_info = NULL;
ad974bae
NB
866 return -ENOMEM;
867 }
868 }
869
870 /* Req id ring for receiving RX pkts out of order */
871 for (i = 0; i < rx_ring->ring_size; i++)
f9172498 872 rx_ring->free_ids[i] = i;
ad974bae 873
1738cd3e
NB
874 /* Reset rx statistics */
875 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
876
877 rx_ring->next_to_clean = 0;
878 rx_ring->next_to_use = 0;
879 rx_ring->cpu = ena_irq->cpu;
880
881 return 0;
882}
883
884/* ena_free_rx_resources - Free I/O Rx Resources
885 * @adapter: network interface device structure
886 * @qid: queue index
887 *
888 * Free all receive software resources
889 */
890static void ena_free_rx_resources(struct ena_adapter *adapter,
891 u32 qid)
892{
893 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
894
895 vfree(rx_ring->rx_buffer_info);
896 rx_ring->rx_buffer_info = NULL;
ad974bae 897
f9172498
SJ
898 vfree(rx_ring->free_ids);
899 rx_ring->free_ids = NULL;
1738cd3e
NB
900}
901
902/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
903 * @adapter: board private structure
904 *
905 * Return 0 on success, negative on failure
906 */
907static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
908{
909 int i, rc = 0;
910
faa615f9 911 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
912 rc = ena_setup_rx_resources(adapter, i);
913 if (rc)
914 goto err_setup_rx;
915 }
916
917 return 0;
918
919err_setup_rx:
920
921 netif_err(adapter, ifup, adapter->netdev,
922 "Rx queue %d: allocation failed\n", i);
923
924 /* rewind the index freeing the rings as we go */
925 while (i--)
926 ena_free_rx_resources(adapter, i);
927 return rc;
928}
929
930/* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
931 * @adapter: board private structure
932 *
933 * Free all receive software resources
934 */
935static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
936{
937 int i;
938
faa615f9 939 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
940 ena_free_rx_resources(adapter, i);
941}
942
c2b54204 943static int ena_alloc_rx_page(struct ena_ring *rx_ring,
1738cd3e
NB
944 struct ena_rx_buffer *rx_info, gfp_t gfp)
945{
946 struct ena_com_buf *ena_buf;
947 struct page *page;
948 dma_addr_t dma;
949
950 /* if previous allocated page is not used */
951 if (unlikely(rx_info->page))
952 return 0;
953
954 page = alloc_page(gfp);
955 if (unlikely(!page)) {
956 u64_stats_update_begin(&rx_ring->syncp);
957 rx_ring->rx_stats.page_alloc_fail++;
958 u64_stats_update_end(&rx_ring->syncp);
959 return -ENOMEM;
960 }
961
ef5b0771 962 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
1738cd3e
NB
963 DMA_FROM_DEVICE);
964 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
965 u64_stats_update_begin(&rx_ring->syncp);
966 rx_ring->rx_stats.dma_mapping_err++;
967 u64_stats_update_end(&rx_ring->syncp);
968
969 __free_page(page);
970 return -EIO;
971 }
972 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
973 "alloc page %p, rx_info %p\n", page, rx_info);
974
975 rx_info->page = page;
976 rx_info->page_offset = 0;
977 ena_buf = &rx_info->ena_buf;
838c93dc 978 ena_buf->paddr = dma + rx_ring->rx_headroom;
548c4940 979 ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
1738cd3e
NB
980
981 return 0;
982}
983
984static void ena_free_rx_page(struct ena_ring *rx_ring,
985 struct ena_rx_buffer *rx_info)
986{
987 struct page *page = rx_info->page;
988 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
989
990 if (unlikely(!page)) {
991 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
992 "Trying to free unallocated buffer\n");
993 return;
994 }
995
548c4940
SJ
996 dma_unmap_page(rx_ring->dev,
997 ena_buf->paddr - rx_ring->rx_headroom,
998 ENA_PAGE_SIZE,
1738cd3e
NB
999 DMA_FROM_DEVICE);
1000
1001 __free_page(page);
1002 rx_info->page = NULL;
1003}
1004
1005static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1006{
ad974bae 1007 u16 next_to_use, req_id;
1738cd3e
NB
1008 u32 i;
1009 int rc;
1010
1011 next_to_use = rx_ring->next_to_use;
1012
1013 for (i = 0; i < num; i++) {
ad974bae
NB
1014 struct ena_rx_buffer *rx_info;
1015
f9172498 1016 req_id = rx_ring->free_ids[next_to_use];
ad974bae
NB
1017
1018 rx_info = &rx_ring->rx_buffer_info[req_id];
1019
1738cd3e 1020 rc = ena_alloc_rx_page(rx_ring, rx_info,
453f85d4 1021 GFP_ATOMIC | __GFP_COMP);
1738cd3e
NB
1022 if (unlikely(rc < 0)) {
1023 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1024 "failed to alloc buffer for rx queue %d\n",
1025 rx_ring->qid);
1026 break;
1027 }
1028 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1029 &rx_info->ena_buf,
ad974bae 1030 req_id);
1738cd3e
NB
1031 if (unlikely(rc)) {
1032 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1033 "failed to add buffer for rx queue %d\n",
1034 rx_ring->qid);
1035 break;
1036 }
1037 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1038 rx_ring->ring_size);
1039 }
1040
1041 if (unlikely(i < num)) {
1042 u64_stats_update_begin(&rx_ring->syncp);
1043 rx_ring->rx_stats.refil_partial++;
1044 u64_stats_update_end(&rx_ring->syncp);
1045 netdev_warn(rx_ring->netdev,
1046 "refilled rx qid %d with only %d buffers (from %d)\n",
1047 rx_ring->qid, i, num);
1048 }
1049
37dff155
NB
1050 /* ena_com_write_sq_doorbell issues a wmb() */
1051 if (likely(i))
1052 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1738cd3e
NB
1053
1054 rx_ring->next_to_use = next_to_use;
1055
1056 return i;
1057}
1058
1059static void ena_free_rx_bufs(struct ena_adapter *adapter,
1060 u32 qid)
1061{
1062 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1063 u32 i;
1064
1065 for (i = 0; i < rx_ring->ring_size; i++) {
1066 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1067
1068 if (rx_info->page)
1069 ena_free_rx_page(rx_ring, rx_info);
1070 }
1071}
1072
1073/* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1074 * @adapter: board private structure
1738cd3e
NB
1075 */
1076static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1077{
1078 struct ena_ring *rx_ring;
1079 int i, rc, bufs_num;
1080
faa615f9 1081 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
1082 rx_ring = &adapter->rx_ring[i];
1083 bufs_num = rx_ring->ring_size - 1;
1084 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1085
1086 if (unlikely(rc != bufs_num))
1087 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1088 "refilling Queue %d failed. allocated %d buffers from: %d\n",
1089 i, rc, bufs_num);
1090 }
1091}
1092
1093static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1094{
1095 int i;
1096
faa615f9 1097 for (i = 0; i < adapter->num_io_queues; i++)
1738cd3e
NB
1098 ena_free_rx_bufs(adapter, i);
1099}
1100
548c4940
SJ
1101static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1102 struct ena_tx_buffer *tx_info)
38005ca8
AK
1103{
1104 struct ena_com_buf *ena_buf;
1105 u32 cnt;
1106 int i;
1107
1108 ena_buf = tx_info->bufs;
1109 cnt = tx_info->num_of_bufs;
1110
1111 if (unlikely(!cnt))
1112 return;
1113
1114 if (tx_info->map_linear_data) {
1115 dma_unmap_single(tx_ring->dev,
1116 dma_unmap_addr(ena_buf, paddr),
1117 dma_unmap_len(ena_buf, len),
1118 DMA_TO_DEVICE);
1119 ena_buf++;
1120 cnt--;
1121 }
1122
1123 /* unmap remaining mapped pages */
1124 for (i = 0; i < cnt; i++) {
1125 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1126 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1127 ena_buf++;
1128 }
1129}
1130
1738cd3e
NB
1131/* ena_free_tx_bufs - Free Tx Buffers per Queue
1132 * @tx_ring: TX ring for which buffers be freed
1133 */
1134static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1135{
5add6e4a 1136 bool print_once = true;
1738cd3e
NB
1137 u32 i;
1138
1139 for (i = 0; i < tx_ring->ring_size; i++) {
1140 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1738cd3e
NB
1141
1142 if (!tx_info->skb)
1143 continue;
1144
5add6e4a
NB
1145 if (print_once) {
1146 netdev_notice(tx_ring->netdev,
1147 "free uncompleted tx skb qid %d idx 0x%x\n",
1148 tx_ring->qid, i);
1149 print_once = false;
1150 } else {
1151 netdev_dbg(tx_ring->netdev,
1152 "free uncompleted tx skb qid %d idx 0x%x\n",
1153 tx_ring->qid, i);
1154 }
1738cd3e 1155
548c4940 1156 ena_unmap_tx_buff(tx_ring, tx_info);
1738cd3e
NB
1157
1158 dev_kfree_skb_any(tx_info->skb);
1159 }
1160 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1161 tx_ring->qid));
1162}
1163
1164static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1165{
1166 struct ena_ring *tx_ring;
1167 int i;
1168
548c4940 1169 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1738cd3e
NB
1170 tx_ring = &adapter->tx_ring[i];
1171 ena_free_tx_bufs(tx_ring);
1172 }
1173}
1174
1175static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1176{
1177 u16 ena_qid;
1178 int i;
1179
548c4940 1180 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1738cd3e
NB
1181 ena_qid = ENA_IO_TXQ_IDX(i);
1182 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1183 }
1184}
1185
1186static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1187{
1188 u16 ena_qid;
1189 int i;
1190
faa615f9 1191 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e 1192 ena_qid = ENA_IO_RXQ_IDX(i);
282faf61 1193 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1738cd3e
NB
1194 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1195 }
1196}
1197
1198static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1199{
1200 ena_destroy_all_tx_queues(adapter);
1201 ena_destroy_all_rx_queues(adapter);
1202}
1203
548c4940
SJ
1204static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1205 struct ena_tx_buffer *tx_info, bool is_xdp)
1206{
1207 if (tx_info)
1208 netif_err(ring->adapter,
1209 tx_done,
1210 ring->netdev,
1211 "tx_info doesn't have valid %s",
1212 is_xdp ? "xdp frame" : "skb");
1213 else
1214 netif_err(ring->adapter,
1215 tx_done,
1216 ring->netdev,
1217 "Invalid req_id: %hu\n",
1218 req_id);
1219
1220 u64_stats_update_begin(&ring->syncp);
1221 ring->tx_stats.bad_req_id++;
1222 u64_stats_update_end(&ring->syncp);
1223
1224 /* Trigger device reset */
1225 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1226 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1227 return -EFAULT;
1228}
1229
1738cd3e
NB
1230static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1231{
1232 struct ena_tx_buffer *tx_info = NULL;
1233
1234 if (likely(req_id < tx_ring->ring_size)) {
1235 tx_info = &tx_ring->tx_buffer_info[req_id];
1236 if (likely(tx_info->skb))
1237 return 0;
1238 }
1239
548c4940
SJ
1240 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1241}
1738cd3e 1242
548c4940
SJ
1243static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1244{
1245 struct ena_tx_buffer *tx_info = NULL;
1738cd3e 1246
548c4940
SJ
1247 if (likely(req_id < xdp_ring->ring_size)) {
1248 tx_info = &xdp_ring->tx_buffer_info[req_id];
1249 if (likely(tx_info->xdpf))
1250 return 0;
1251 }
1252
1253 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1738cd3e
NB
1254}
1255
1256static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1257{
1258 struct netdev_queue *txq;
1259 bool above_thresh;
1260 u32 tx_bytes = 0;
1261 u32 total_done = 0;
1262 u16 next_to_clean;
1263 u16 req_id;
1264 int tx_pkts = 0;
1265 int rc;
1266
1267 next_to_clean = tx_ring->next_to_clean;
1268 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1269
1270 while (tx_pkts < budget) {
1271 struct ena_tx_buffer *tx_info;
1272 struct sk_buff *skb;
1738cd3e
NB
1273
1274 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1275 &req_id);
1276 if (rc)
1277 break;
1278
1279 rc = validate_tx_req_id(tx_ring, req_id);
1280 if (rc)
1281 break;
1282
1283 tx_info = &tx_ring->tx_buffer_info[req_id];
1284 skb = tx_info->skb;
1285
1286 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1287 prefetch(&skb->end);
1288
1289 tx_info->skb = NULL;
1290 tx_info->last_jiffies = 0;
1291
548c4940 1292 ena_unmap_tx_buff(tx_ring, tx_info);
1738cd3e
NB
1293
1294 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1295 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1296 skb);
1297
1298 tx_bytes += skb->len;
1299 dev_kfree_skb(skb);
1300 tx_pkts++;
1301 total_done += tx_info->tx_descs;
1302
f9172498 1303 tx_ring->free_ids[next_to_clean] = req_id;
1738cd3e
NB
1304 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1305 tx_ring->ring_size);
1306 }
1307
1308 tx_ring->next_to_clean = next_to_clean;
1309 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1310 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1311
1312 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1313
1314 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1315 "tx_poll: q %d done. total pkts: %d\n",
1316 tx_ring->qid, tx_pkts);
1317
1318 /* need to make the rings circular update visible to
1319 * ena_start_xmit() before checking for netif_queue_stopped().
1320 */
1321 smp_mb();
1322
689b2bda
AK
1323 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1324 ENA_TX_WAKEUP_THRESH);
1738cd3e
NB
1325 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1326 __netif_tx_lock(txq, smp_processor_id());
689b2bda
AK
1327 above_thresh =
1328 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1329 ENA_TX_WAKEUP_THRESH);
a53651ec
SJ
1330 if (netif_tx_queue_stopped(txq) && above_thresh &&
1331 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1738cd3e
NB
1332 netif_tx_wake_queue(txq);
1333 u64_stats_update_begin(&tx_ring->syncp);
1334 tx_ring->tx_stats.queue_wakeup++;
1335 u64_stats_update_end(&tx_ring->syncp);
1336 }
1337 __netif_tx_unlock(txq);
1338 }
1339
1738cd3e
NB
1340 return tx_pkts;
1341}
1342
4265114d
NB
1343static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1344{
1345 struct sk_buff *skb;
1346
1347 if (frags)
1348 skb = napi_get_frags(rx_ring->napi);
1349 else
1350 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1351 rx_ring->rx_copybreak);
1352
1353 if (unlikely(!skb)) {
1354 u64_stats_update_begin(&rx_ring->syncp);
1355 rx_ring->rx_stats.skb_alloc_fail++;
1356 u64_stats_update_end(&rx_ring->syncp);
1357 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1358 "Failed to allocate skb. frags: %d\n", frags);
1359 return NULL;
1360 }
1361
1362 return skb;
1363}
1364
1738cd3e
NB
1365static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1366 struct ena_com_rx_buf_info *ena_bufs,
1367 u32 descs,
1368 u16 *next_to_clean)
1369{
1370 struct sk_buff *skb;
ad974bae
NB
1371 struct ena_rx_buffer *rx_info;
1372 u16 len, req_id, buf = 0;
1738cd3e 1373 void *va;
30623e1e 1374 int rc;
1738cd3e 1375
ad974bae
NB
1376 len = ena_bufs[buf].len;
1377 req_id = ena_bufs[buf].req_id;
30623e1e
AK
1378
1379 rc = validate_rx_req_id(rx_ring, req_id);
1380 if (unlikely(rc < 0))
1381 return NULL;
1382
ad974bae
NB
1383 rx_info = &rx_ring->rx_buffer_info[req_id];
1384
1738cd3e
NB
1385 if (unlikely(!rx_info->page)) {
1386 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1387 "Page is NULL\n");
1388 return NULL;
1389 }
1390
1391 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1392 "rx_info %p page %p\n",
1393 rx_info, rx_info->page);
1394
1395 /* save virt address of first buffer */
1396 va = page_address(rx_info->page) + rx_info->page_offset;
1397 prefetch(va + NET_IP_ALIGN);
1398
1399 if (len <= rx_ring->rx_copybreak) {
4265114d
NB
1400 skb = ena_alloc_skb(rx_ring, false);
1401 if (unlikely(!skb))
1738cd3e 1402 return NULL;
1738cd3e
NB
1403
1404 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1405 "rx allocated small packet. len %d. data_len %d\n",
1406 skb->len, skb->data_len);
1407
1408 /* sync this buffer for CPU use */
1409 dma_sync_single_for_cpu(rx_ring->dev,
1410 dma_unmap_addr(&rx_info->ena_buf, paddr),
1411 len,
1412 DMA_FROM_DEVICE);
1413 skb_copy_to_linear_data(skb, va, len);
1414 dma_sync_single_for_device(rx_ring->dev,
1415 dma_unmap_addr(&rx_info->ena_buf, paddr),
1416 len,
1417 DMA_FROM_DEVICE);
1418
1419 skb_put(skb, len);
1420 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
f9172498 1421 rx_ring->free_ids[*next_to_clean] = req_id;
1738cd3e
NB
1422 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1423 rx_ring->ring_size);
1424 return skb;
1425 }
1426
4265114d
NB
1427 skb = ena_alloc_skb(rx_ring, true);
1428 if (unlikely(!skb))
1738cd3e 1429 return NULL;
1738cd3e
NB
1430
1431 do {
1432 dma_unmap_page(rx_ring->dev,
1433 dma_unmap_addr(&rx_info->ena_buf, paddr),
ef5b0771 1434 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
1738cd3e
NB
1435
1436 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
ef5b0771 1437 rx_info->page_offset, len, ENA_PAGE_SIZE);
68f236df
AK
1438 /* The offset is non zero only for the first buffer */
1439 rx_info->page_offset = 0;
1738cd3e
NB
1440
1441 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1442 "rx skb updated. len %d. data_len %d\n",
1443 skb->len, skb->data_len);
1444
1445 rx_info->page = NULL;
ad974bae 1446
f9172498 1447 rx_ring->free_ids[*next_to_clean] = req_id;
1738cd3e
NB
1448 *next_to_clean =
1449 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1450 rx_ring->ring_size);
1451 if (likely(--descs == 0))
1452 break;
ad974bae
NB
1453
1454 buf++;
1455 len = ena_bufs[buf].len;
1456 req_id = ena_bufs[buf].req_id;
30623e1e
AK
1457
1458 rc = validate_rx_req_id(rx_ring, req_id);
1459 if (unlikely(rc < 0))
1460 return NULL;
1461
ad974bae 1462 rx_info = &rx_ring->rx_buffer_info[req_id];
1738cd3e
NB
1463 } while (1);
1464
1465 return skb;
1466}
1467
1468/* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1469 * @adapter: structure containing adapter specific data
1470 * @ena_rx_ctx: received packet context/metadata
1471 * @skb: skb currently being received and modified
1472 */
c2b54204 1473static void ena_rx_checksum(struct ena_ring *rx_ring,
1738cd3e
NB
1474 struct ena_com_rx_ctx *ena_rx_ctx,
1475 struct sk_buff *skb)
1476{
1477 /* Rx csum disabled */
1478 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1479 skb->ip_summed = CHECKSUM_NONE;
1480 return;
1481 }
1482
1483 /* For fragmented packets the checksum isn't valid */
1484 if (ena_rx_ctx->frag) {
1485 skb->ip_summed = CHECKSUM_NONE;
1486 return;
1487 }
1488
1489 /* if IP and error */
1490 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1491 (ena_rx_ctx->l3_csum_err))) {
1492 /* ipv4 checksum error */
1493 skb->ip_summed = CHECKSUM_NONE;
1494 u64_stats_update_begin(&rx_ring->syncp);
1495 rx_ring->rx_stats.bad_csum++;
1496 u64_stats_update_end(&rx_ring->syncp);
cd7aea18 1497 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1738cd3e
NB
1498 "RX IPv4 header checksum error\n");
1499 return;
1500 }
1501
1502 /* if TCP/UDP */
1503 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1504 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1505 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1506 /* TCP/UDP checksum error */
1507 u64_stats_update_begin(&rx_ring->syncp);
1508 rx_ring->rx_stats.bad_csum++;
1509 u64_stats_update_end(&rx_ring->syncp);
cd7aea18 1510 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1738cd3e
NB
1511 "RX L4 checksum error\n");
1512 skb->ip_summed = CHECKSUM_NONE;
1513 return;
1514 }
1515
cb36bb36
AK
1516 if (likely(ena_rx_ctx->l4_csum_checked)) {
1517 skb->ip_summed = CHECKSUM_UNNECESSARY;
d2eecc6e
SJ
1518 u64_stats_update_begin(&rx_ring->syncp);
1519 rx_ring->rx_stats.csum_good++;
1520 u64_stats_update_end(&rx_ring->syncp);
cb36bb36
AK
1521 } else {
1522 u64_stats_update_begin(&rx_ring->syncp);
1523 rx_ring->rx_stats.csum_unchecked++;
1524 u64_stats_update_end(&rx_ring->syncp);
1525 skb->ip_summed = CHECKSUM_NONE;
1526 }
1527 } else {
1528 skb->ip_summed = CHECKSUM_NONE;
1529 return;
1738cd3e 1530 }
cb36bb36 1531
1738cd3e
NB
1532}
1533
1534static void ena_set_rx_hash(struct ena_ring *rx_ring,
1535 struct ena_com_rx_ctx *ena_rx_ctx,
1536 struct sk_buff *skb)
1537{
1538 enum pkt_hash_types hash_type;
1539
1540 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1541 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1542 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1543
1544 hash_type = PKT_HASH_TYPE_L4;
1545 else
1546 hash_type = PKT_HASH_TYPE_NONE;
1547
1548 /* Override hash type if the packet is fragmented */
1549 if (ena_rx_ctx->frag)
1550 hash_type = PKT_HASH_TYPE_NONE;
1551
1552 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1553 }
1554}
1555
32109c70 1556static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
838c93dc
SJ
1557{
1558 struct ena_rx_buffer *rx_info;
1559 int ret;
1560
1561 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1562 xdp->data = page_address(rx_info->page) +
1563 rx_info->page_offset + rx_ring->rx_headroom;
1564 xdp_set_data_meta_invalid(xdp);
1565 xdp->data_hard_start = page_address(rx_info->page);
1566 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
1567 /* If for some reason we received a bigger packet than
1568 * we expect, then we simply drop it
1569 */
1570 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1571 return XDP_DROP;
1572
548c4940 1573 ret = ena_xdp_execute(rx_ring, xdp, rx_info);
838c93dc
SJ
1574
1575 /* The xdp program might expand the headers */
1576 if (ret == XDP_PASS) {
1577 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1578 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1579 }
1580
1581 return ret;
1582}
1738cd3e
NB
1583/* ena_clean_rx_irq - Cleanup RX irq
1584 * @rx_ring: RX ring to clean
1585 * @napi: napi handler
1586 * @budget: how many packets driver is allowed to clean
1587 *
1588 * Returns the number of cleaned buffers.
1589 */
1590static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1591 u32 budget)
1592{
1593 u16 next_to_clean = rx_ring->next_to_clean;
1738cd3e 1594 struct ena_com_rx_ctx ena_rx_ctx;
68f236df 1595 struct ena_rx_buffer *rx_info;
1738cd3e 1596 struct ena_adapter *adapter;
548c4940 1597 u32 res_budget, work_done;
838c93dc
SJ
1598 int rx_copybreak_pkt = 0;
1599 int refill_threshold;
1738cd3e
NB
1600 struct sk_buff *skb;
1601 int refill_required;
838c93dc 1602 struct xdp_buff xdp;
1738cd3e 1603 int total_len = 0;
838c93dc
SJ
1604 int xdp_verdict;
1605 int rc = 0;
ad974bae 1606 int i;
1738cd3e
NB
1607
1608 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1609 "%s qid %d\n", __func__, rx_ring->qid);
1610 res_budget = budget;
838c93dc 1611 xdp.rxq = &rx_ring->xdp_rxq;
08fc1cfd 1612 xdp.frame_sz = ENA_PAGE_SIZE;
548c4940 1613
1738cd3e 1614 do {
838c93dc
SJ
1615 xdp_verdict = XDP_PASS;
1616 skb = NULL;
1738cd3e
NB
1617 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1618 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1619 ena_rx_ctx.descs = 0;
68f236df 1620 ena_rx_ctx.pkt_offset = 0;
1738cd3e
NB
1621 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1622 rx_ring->ena_com_io_sq,
1623 &ena_rx_ctx);
1624 if (unlikely(rc))
1625 goto error;
1626
1627 if (unlikely(ena_rx_ctx.descs == 0))
1628 break;
1629
68f236df
AK
1630 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1631 rx_info->page_offset = ena_rx_ctx.pkt_offset;
1632
1738cd3e
NB
1633 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1634 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1635 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1636 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1637
838c93dc
SJ
1638 if (ena_xdp_present_ring(rx_ring))
1639 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1640
1738cd3e 1641 /* allocate skb and fill it */
838c93dc
SJ
1642 if (xdp_verdict == XDP_PASS)
1643 skb = ena_rx_skb(rx_ring,
1644 rx_ring->ena_bufs,
1645 ena_rx_ctx.descs,
1646 &next_to_clean);
1738cd3e 1647
1738cd3e 1648 if (unlikely(!skb)) {
548c4940
SJ
1649 if (xdp_verdict == XDP_TX) {
1650 ena_free_rx_page(rx_ring,
1651 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
1652 res_budget--;
1653 }
ad974bae 1654 for (i = 0; i < ena_rx_ctx.descs; i++) {
f9172498 1655 rx_ring->free_ids[next_to_clean] =
ad974bae
NB
1656 rx_ring->ena_bufs[i].req_id;
1657 next_to_clean =
1658 ENA_RX_RING_IDX_NEXT(next_to_clean,
1659 rx_ring->ring_size);
1660 }
548c4940 1661 if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
838c93dc 1662 continue;
1738cd3e
NB
1663 break;
1664 }
1665
1666 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1667
1668 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1669
1670 skb_record_rx_queue(skb, rx_ring->qid);
1671
1672 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1673 total_len += rx_ring->ena_bufs[0].len;
1674 rx_copybreak_pkt++;
1675 napi_gro_receive(napi, skb);
1676 } else {
1677 total_len += skb->len;
1678 napi_gro_frags(napi);
1679 }
1680
1681 res_budget--;
1682 } while (likely(res_budget));
1683
1684 work_done = budget - res_budget;
1738cd3e
NB
1685 rx_ring->per_napi_packets += work_done;
1686 u64_stats_update_begin(&rx_ring->syncp);
1687 rx_ring->rx_stats.bytes += total_len;
1688 rx_ring->rx_stats.cnt += work_done;
1689 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1690 u64_stats_update_end(&rx_ring->syncp);
1691
1692 rx_ring->next_to_clean = next_to_clean;
1693
7cfe9a55 1694 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
0574bb80
AK
1695 refill_threshold =
1696 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1697 ENA_RX_REFILL_THRESH_PACKET);
1738cd3e
NB
1698
1699 /* Optimization, try to batch new rx buffers */
1700 if (refill_required > refill_threshold) {
1701 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1702 ena_refill_rx_bufs(rx_ring, refill_required);
1703 }
1704
1705 return work_done;
1706
1707error:
1708 adapter = netdev_priv(rx_ring->netdev);
1709
1710 u64_stats_update_begin(&rx_ring->syncp);
1711 rx_ring->rx_stats.bad_desc_num++;
1712 u64_stats_update_end(&rx_ring->syncp);
1713
1714 /* Too many desc from the device. Trigger reset */
e2eed0e3 1715 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1738cd3e
NB
1716 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1717
1718 return 0;
1719}
1720
282faf61 1721static void ena_dim_work(struct work_struct *w)
1738cd3e 1722{
282faf61
AK
1723 struct dim *dim = container_of(w, struct dim, work);
1724 struct dim_cq_moder cur_moder =
1725 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1726 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1727
1728 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1729 dim->state = DIM_START_MEASURE;
1730}
1731
1732static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1733{
1734 struct dim_sample dim_sample;
1735 struct ena_ring *rx_ring = ena_napi->rx_ring;
1736
1737 if (!rx_ring->per_napi_packets)
1738 return;
1739
1740 rx_ring->non_empty_napi_events++;
1741
1742 dim_update_sample(rx_ring->non_empty_napi_events,
1743 rx_ring->rx_stats.cnt,
1744 rx_ring->rx_stats.bytes,
1745 &dim_sample);
1746
1747 net_dim(&ena_napi->dim, dim_sample);
1748
1738cd3e 1749 rx_ring->per_napi_packets = 0;
1738cd3e
NB
1750}
1751
c2b54204 1752static void ena_unmask_interrupt(struct ena_ring *tx_ring,
418df30f
NB
1753 struct ena_ring *rx_ring)
1754{
1755 struct ena_eth_io_intr_reg intr_reg;
548c4940
SJ
1756 u32 rx_interval = 0;
1757 /* Rx ring can be NULL when for XDP tx queues which don't have an
1758 * accompanying rx_ring pair.
1759 */
1760 if (rx_ring)
1761 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1762 rx_ring->smoothed_interval :
1763 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
418df30f
NB
1764
1765 /* Update intr register: rx intr delay,
1766 * tx intr delay and interrupt unmask
1767 */
1768 ena_com_update_intr_reg(&intr_reg,
7b8a2878 1769 rx_interval,
418df30f
NB
1770 tx_ring->smoothed_interval,
1771 true);
1772
d4a8b3bb
SJ
1773 u64_stats_update_begin(&tx_ring->syncp);
1774 tx_ring->tx_stats.unmask_interrupt++;
1775 u64_stats_update_end(&tx_ring->syncp);
418df30f
NB
1776 /* It is a shared MSI-X.
1777 * Tx and Rx CQ have pointer to it.
1778 * So we use one of them to reach the intr reg
548c4940 1779 * The Tx ring is used because the rx_ring is NULL for XDP queues
418df30f 1780 */
548c4940 1781 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
418df30f
NB
1782}
1783
c2b54204 1784static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1738cd3e
NB
1785 struct ena_ring *rx_ring)
1786{
1787 int cpu = get_cpu();
1788 int numa_node;
1789
1790 /* Check only one ring since the 2 rings are running on the same cpu */
1791 if (likely(tx_ring->cpu == cpu))
1792 goto out;
1793
1794 numa_node = cpu_to_node(cpu);
1795 put_cpu();
1796
1797 if (numa_node != NUMA_NO_NODE) {
1798 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
548c4940
SJ
1799 if (rx_ring)
1800 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1801 numa_node);
1738cd3e
NB
1802 }
1803
1804 tx_ring->cpu = cpu;
548c4940
SJ
1805 if (rx_ring)
1806 rx_ring->cpu = cpu;
1738cd3e
NB
1807
1808 return;
1809out:
1810 put_cpu();
1811}
1812
548c4940
SJ
1813static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1814{
1815 u32 total_done = 0;
1816 u16 next_to_clean;
1817 u32 tx_bytes = 0;
1818 int tx_pkts = 0;
1819 u16 req_id;
1820 int rc;
1821
1822 if (unlikely(!xdp_ring))
1823 return 0;
1824 next_to_clean = xdp_ring->next_to_clean;
1825
1826 while (tx_pkts < budget) {
1827 struct ena_tx_buffer *tx_info;
1828 struct xdp_frame *xdpf;
1829
1830 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1831 &req_id);
1832 if (rc)
1833 break;
1834
1835 rc = validate_xdp_req_id(xdp_ring, req_id);
1836 if (rc)
1837 break;
1838
1839 tx_info = &xdp_ring->tx_buffer_info[req_id];
1840 xdpf = tx_info->xdpf;
1841
1842 tx_info->xdpf = NULL;
1843 tx_info->last_jiffies = 0;
1844 ena_unmap_tx_buff(xdp_ring, tx_info);
1845
1846 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1847 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1848 xdpf);
1849
1850 tx_bytes += xdpf->len;
1851 tx_pkts++;
1852 total_done += tx_info->tx_descs;
1853
1854 __free_page(tx_info->xdp_rx_page);
1855 xdp_ring->free_ids[next_to_clean] = req_id;
1856 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1857 xdp_ring->ring_size);
1858 }
1859
1860 xdp_ring->next_to_clean = next_to_clean;
1861 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1862 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1863
1864 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1865 "tx_poll: q %d done. total pkts: %d\n",
1866 xdp_ring->qid, tx_pkts);
1867
1868 return tx_pkts;
1869}
1870
1738cd3e
NB
1871static int ena_io_poll(struct napi_struct *napi, int budget)
1872{
1873 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1874 struct ena_ring *tx_ring, *rx_ring;
24dee0c7
NB
1875 int tx_work_done;
1876 int rx_work_done = 0;
1738cd3e
NB
1877 int tx_budget;
1878 int napi_comp_call = 0;
1879 int ret;
1880
1881 tx_ring = ena_napi->tx_ring;
1882 rx_ring = ena_napi->rx_ring;
1883
913b0bfd
SJ
1884 tx_ring->first_interrupt = ena_napi->first_interrupt;
1885 rx_ring->first_interrupt = ena_napi->first_interrupt;
1886
1738cd3e
NB
1887 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1888
3f6159db
NB
1889 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1890 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1738cd3e
NB
1891 napi_complete_done(napi, 0);
1892 return 0;
1893 }
1894
1895 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
24dee0c7
NB
1896 /* On netpoll the budget is zero and the handler should only clean the
1897 * tx completions.
1898 */
1899 if (likely(budget))
1900 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1738cd3e 1901
b1669c9f
NB
1902 /* If the device is about to reset or down, avoid unmask
1903 * the interrupt and return 0 so NAPI won't reschedule
1904 */
1905 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1906 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1907 napi_complete_done(napi, 0);
1908 ret = 0;
1738cd3e 1909
b1669c9f 1910 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1738cd3e 1911 napi_comp_call = 1;
1738cd3e 1912
b1669c9f
NB
1913 /* Update numa and unmask the interrupt only when schedule
1914 * from the interrupt context (vs from sk_busy_loop)
1738cd3e 1915 */
b1669c9f 1916 if (napi_complete_done(napi, rx_work_done)) {
282faf61
AK
1917 /* We apply adaptive moderation on Rx path only.
1918 * Tx uses static interrupt moderation.
1919 */
b1669c9f 1920 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
282faf61 1921 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
b1669c9f 1922
418df30f 1923 ena_unmask_interrupt(tx_ring, rx_ring);
b1669c9f 1924 }
1738cd3e 1925
1738cd3e
NB
1926 ena_update_ring_numa_node(tx_ring, rx_ring);
1927
1928 ret = rx_work_done;
1929 } else {
1930 ret = budget;
1931 }
1932
1933 u64_stats_update_begin(&tx_ring->syncp);
1934 tx_ring->tx_stats.napi_comp += napi_comp_call;
1935 tx_ring->tx_stats.tx_poll++;
1936 u64_stats_update_end(&tx_ring->syncp);
1937
1938 return ret;
1939}
1940
1941static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1942{
1943 struct ena_adapter *adapter = (struct ena_adapter *)data;
1944
1945 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1946
1947 /* Don't call the aenq handler before probe is done */
1948 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1949 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1950
1951 return IRQ_HANDLED;
1952}
1953
1954/* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1955 * @irq: interrupt number
1956 * @data: pointer to a network interface private napi device structure
1957 */
1958static irqreturn_t ena_intr_msix_io(int irq, void *data)
1959{
1960 struct ena_napi *ena_napi = data;
1961
913b0bfd 1962 ena_napi->first_interrupt = true;
8510e1a3 1963
e745dafa 1964 napi_schedule_irqoff(&ena_napi->napi);
1738cd3e
NB
1965
1966 return IRQ_HANDLED;
1967}
1968
06443684
NB
1969/* Reserve a single MSI-X vector for management (admin + aenq).
1970 * plus reserve one vector for each potential io queue.
1971 * the number of potential io queues is the minimum of what the device
1972 * supports and the number of vCPUs.
1973 */
4d192660 1974static int ena_enable_msix(struct ena_adapter *adapter)
1738cd3e 1975{
06443684
NB
1976 int msix_vecs, irq_cnt;
1977
1978 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1979 netif_err(adapter, probe, adapter->netdev,
1980 "Error, MSI-X is already enabled\n");
1981 return -EPERM;
1982 }
1738cd3e
NB
1983
1984 /* Reserved the max msix vectors we might need */
ce1f3521 1985 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1738cd3e
NB
1986 netif_dbg(adapter, probe, adapter->netdev,
1987 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1988
06443684
NB
1989 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1990 msix_vecs, PCI_IRQ_MSIX);
1991
1992 if (irq_cnt < 0) {
1738cd3e 1993 netif_err(adapter, probe, adapter->netdev,
06443684 1994 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1738cd3e
NB
1995 return -ENOSPC;
1996 }
1997
06443684
NB
1998 if (irq_cnt != msix_vecs) {
1999 netif_notice(adapter, probe, adapter->netdev,
2000 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
2001 irq_cnt, msix_vecs);
faa615f9 2002 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1738cd3e
NB
2003 }
2004
06443684
NB
2005 if (ena_init_rx_cpu_rmap(adapter))
2006 netif_warn(adapter, probe, adapter->netdev,
2007 "Failed to map IRQs to CPUs\n");
2008
2009 adapter->msix_vecs = irq_cnt;
2010 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1738cd3e
NB
2011
2012 return 0;
2013}
2014
2015static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2016{
2017 u32 cpu;
2018
2019 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2020 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2021 pci_name(adapter->pdev));
2022 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2023 ena_intr_msix_mgmnt;
2024 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2025 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
da6f4cf5 2026 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1738cd3e
NB
2027 cpu = cpumask_first(cpu_online_mask);
2028 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2029 cpumask_set_cpu(cpu,
2030 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2031}
2032
2033static void ena_setup_io_intr(struct ena_adapter *adapter)
2034{
2035 struct net_device *netdev;
2036 int irq_idx, i, cpu;
548c4940 2037 int io_queue_count;
1738cd3e
NB
2038
2039 netdev = adapter->netdev;
548c4940 2040 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e 2041
548c4940 2042 for (i = 0; i < io_queue_count; i++) {
1738cd3e
NB
2043 irq_idx = ENA_IO_IRQ_IDX(i);
2044 cpu = i % num_online_cpus();
2045
2046 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2047 "%s-Tx-Rx-%d", netdev->name, i);
2048 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2049 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2050 adapter->irq_tbl[irq_idx].vector =
da6f4cf5 2051 pci_irq_vector(adapter->pdev, irq_idx);
1738cd3e
NB
2052 adapter->irq_tbl[irq_idx].cpu = cpu;
2053
2054 cpumask_set_cpu(cpu,
2055 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2056 }
2057}
2058
2059static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2060{
2061 unsigned long flags = 0;
2062 struct ena_irq *irq;
2063 int rc;
2064
2065 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2066 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2067 irq->data);
2068 if (rc) {
2069 netif_err(adapter, probe, adapter->netdev,
2070 "failed to request admin irq\n");
2071 return rc;
2072 }
2073
2074 netif_dbg(adapter, probe, adapter->netdev,
2075 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2076 irq->affinity_hint_mask.bits[0], irq->vector);
2077
2078 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2079
2080 return rc;
2081}
2082
2083static int ena_request_io_irq(struct ena_adapter *adapter)
2084{
e02ae6ed 2085 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2086 unsigned long flags = 0;
2087 struct ena_irq *irq;
2088 int rc = 0, i, k;
2089
06443684
NB
2090 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2091 netif_err(adapter, ifup, adapter->netdev,
2092 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2093 return -EINVAL;
2094 }
2095
e02ae6ed 2096 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1738cd3e
NB
2097 irq = &adapter->irq_tbl[i];
2098 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2099 irq->data);
2100 if (rc) {
2101 netif_err(adapter, ifup, adapter->netdev,
2102 "Failed to request I/O IRQ. index %d rc %d\n",
2103 i, rc);
2104 goto err;
2105 }
2106
2107 netif_dbg(adapter, ifup, adapter->netdev,
2108 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2109 i, irq->affinity_hint_mask.bits[0], irq->vector);
2110
2111 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2112 }
2113
2114 return rc;
2115
2116err:
2117 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2118 irq = &adapter->irq_tbl[k];
2119 free_irq(irq->vector, irq->data);
2120 }
2121
2122 return rc;
2123}
2124
2125static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2126{
2127 struct ena_irq *irq;
2128
2129 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2130 synchronize_irq(irq->vector);
2131 irq_set_affinity_hint(irq->vector, NULL);
2132 free_irq(irq->vector, irq->data);
2133}
2134
2135static void ena_free_io_irq(struct ena_adapter *adapter)
2136{
e02ae6ed 2137 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2138 struct ena_irq *irq;
2139 int i;
2140
2141#ifdef CONFIG_RFS_ACCEL
2142 if (adapter->msix_vecs >= 1) {
2143 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2144 adapter->netdev->rx_cpu_rmap = NULL;
2145 }
2146#endif /* CONFIG_RFS_ACCEL */
2147
e02ae6ed 2148 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1738cd3e
NB
2149 irq = &adapter->irq_tbl[i];
2150 irq_set_affinity_hint(irq->vector, NULL);
2151 free_irq(irq->vector, irq->data);
2152 }
2153}
2154
06443684
NB
2155static void ena_disable_msix(struct ena_adapter *adapter)
2156{
2157 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2158 pci_free_irq_vectors(adapter->pdev);
2159}
2160
1738cd3e
NB
2161static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2162{
e02ae6ed 2163 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2164 int i;
2165
2166 if (!netif_running(adapter->netdev))
2167 return;
2168
e02ae6ed 2169 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
1738cd3e
NB
2170 synchronize_irq(adapter->irq_tbl[i].vector);
2171}
2172
548c4940
SJ
2173static void ena_del_napi_in_range(struct ena_adapter *adapter,
2174 int first_index,
2175 int count)
1738cd3e
NB
2176{
2177 int i;
2178
548c4940
SJ
2179 for (i = first_index; i < first_index + count; i++) {
2180 /* Check if napi was initialized before */
2181 if (!ENA_IS_XDP_INDEX(adapter, i) ||
2182 adapter->ena_napi[i].xdp_ring)
2183 netif_napi_del(&adapter->ena_napi[i].napi);
2184 else
2185 WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
2186 adapter->ena_napi[i].xdp_ring);
2187 }
1738cd3e
NB
2188}
2189
548c4940
SJ
2190static void ena_init_napi_in_range(struct ena_adapter *adapter,
2191 int first_index, int count)
1738cd3e 2192{
548c4940 2193 struct ena_napi *napi = {0};
1738cd3e
NB
2194 int i;
2195
548c4940 2196 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
2197 napi = &adapter->ena_napi[i];
2198
2199 netif_napi_add(adapter->netdev,
2200 &adapter->ena_napi[i].napi,
548c4940 2201 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
1738cd3e 2202 ENA_NAPI_BUDGET);
548c4940
SJ
2203
2204 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2205 napi->rx_ring = &adapter->rx_ring[i];
2206 napi->tx_ring = &adapter->tx_ring[i];
2207 } else {
2208 napi->xdp_ring = &adapter->tx_ring[i];
2209 }
1738cd3e
NB
2210 napi->qid = i;
2211 }
2212}
2213
548c4940
SJ
2214static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2215 int first_index,
2216 int count)
1738cd3e
NB
2217{
2218 int i;
2219
548c4940 2220 for (i = first_index; i < first_index + count; i++)
1738cd3e
NB
2221 napi_disable(&adapter->ena_napi[i].napi);
2222}
2223
548c4940
SJ
2224static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2225 int first_index,
2226 int count)
1738cd3e
NB
2227{
2228 int i;
2229
548c4940 2230 for (i = first_index; i < first_index + count; i++)
1738cd3e
NB
2231 napi_enable(&adapter->ena_napi[i].napi);
2232}
2233
1738cd3e
NB
2234/* Configure the Rx forwarding */
2235static int ena_rss_configure(struct ena_adapter *adapter)
2236{
2237 struct ena_com_dev *ena_dev = adapter->ena_dev;
2238 int rc;
2239
2240 /* In case the RSS table wasn't initialized by probe */
2241 if (!ena_dev->rss.tbl_log_size) {
2242 rc = ena_rss_init_default(adapter);
d1497638 2243 if (rc && (rc != -EOPNOTSUPP)) {
1738cd3e 2244 netif_err(adapter, ifup, adapter->netdev,
548c4940 2245 "Failed to init RSS rc: %d\n", rc);
1738cd3e
NB
2246 return rc;
2247 }
2248 }
2249
2250 /* Set indirect table */
2251 rc = ena_com_indirect_table_set(ena_dev);
d1497638 2252 if (unlikely(rc && rc != -EOPNOTSUPP))
1738cd3e
NB
2253 return rc;
2254
2255 /* Configure hash function (if supported) */
2256 rc = ena_com_set_hash_function(ena_dev);
d1497638 2257 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1738cd3e
NB
2258 return rc;
2259
2260 /* Configure hash inputs (if supported) */
2261 rc = ena_com_set_hash_ctrl(ena_dev);
d1497638 2262 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1738cd3e
NB
2263 return rc;
2264
2265 return 0;
2266}
2267
2268static int ena_up_complete(struct ena_adapter *adapter)
2269{
7853b49c 2270 int rc;
1738cd3e
NB
2271
2272 rc = ena_rss_configure(adapter);
2273 if (rc)
2274 return rc;
2275
1738cd3e
NB
2276 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2277
2278 ena_refill_all_rx_bufs(adapter);
2279
2280 /* enable transmits */
2281 netif_tx_start_all_queues(adapter->netdev);
2282
548c4940
SJ
2283 ena_napi_enable_in_range(adapter,
2284 0,
2285 adapter->xdp_num_queues + adapter->num_io_queues);
1738cd3e 2286
1738cd3e
NB
2287 return 0;
2288}
2289
2290static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2291{
38005ca8 2292 struct ena_com_create_io_ctx ctx;
1738cd3e
NB
2293 struct ena_com_dev *ena_dev;
2294 struct ena_ring *tx_ring;
2295 u32 msix_vector;
2296 u16 ena_qid;
2297 int rc;
2298
2299 ena_dev = adapter->ena_dev;
2300
2301 tx_ring = &adapter->tx_ring[qid];
2302 msix_vector = ENA_IO_IRQ_IDX(qid);
2303 ena_qid = ENA_IO_TXQ_IDX(qid);
2304
38005ca8
AK
2305 memset(&ctx, 0x0, sizeof(ctx));
2306
1738cd3e
NB
2307 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2308 ctx.qid = ena_qid;
2309 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2310 ctx.msix_vector = msix_vector;
13ca32a6 2311 ctx.queue_size = tx_ring->ring_size;
1738cd3e
NB
2312 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2313
2314 rc = ena_com_create_io_queue(ena_dev, &ctx);
2315 if (rc) {
2316 netif_err(adapter, ifup, adapter->netdev,
2317 "Failed to create I/O TX queue num %d rc: %d\n",
548c4940 2318 qid, rc);
1738cd3e
NB
2319 return rc;
2320 }
2321
2322 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2323 &tx_ring->ena_com_io_sq,
2324 &tx_ring->ena_com_io_cq);
2325 if (rc) {
2326 netif_err(adapter, ifup, adapter->netdev,
2327 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2328 qid, rc);
2329 ena_com_destroy_io_queue(ena_dev, ena_qid);
2d2c600a 2330 return rc;
1738cd3e
NB
2331 }
2332
2333 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2334 return rc;
2335}
2336
548c4940
SJ
2337static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2338 int first_index, int count)
1738cd3e
NB
2339{
2340 struct ena_com_dev *ena_dev = adapter->ena_dev;
2341 int rc, i;
2342
548c4940 2343 for (i = first_index; i < first_index + count; i++) {
1738cd3e
NB
2344 rc = ena_create_io_tx_queue(adapter, i);
2345 if (rc)
2346 goto create_err;
2347 }
2348
2349 return 0;
2350
2351create_err:
548c4940 2352 while (i-- > first_index)
1738cd3e
NB
2353 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2354
2355 return rc;
2356}
2357
2358static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2359{
2360 struct ena_com_dev *ena_dev;
38005ca8 2361 struct ena_com_create_io_ctx ctx;
1738cd3e
NB
2362 struct ena_ring *rx_ring;
2363 u32 msix_vector;
2364 u16 ena_qid;
2365 int rc;
2366
2367 ena_dev = adapter->ena_dev;
2368
2369 rx_ring = &adapter->rx_ring[qid];
2370 msix_vector = ENA_IO_IRQ_IDX(qid);
2371 ena_qid = ENA_IO_RXQ_IDX(qid);
2372
38005ca8
AK
2373 memset(&ctx, 0x0, sizeof(ctx));
2374
1738cd3e
NB
2375 ctx.qid = ena_qid;
2376 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2377 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2378 ctx.msix_vector = msix_vector;
13ca32a6 2379 ctx.queue_size = rx_ring->ring_size;
1738cd3e
NB
2380 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2381
2382 rc = ena_com_create_io_queue(ena_dev, &ctx);
2383 if (rc) {
2384 netif_err(adapter, ifup, adapter->netdev,
2385 "Failed to create I/O RX queue num %d rc: %d\n",
2386 qid, rc);
2387 return rc;
2388 }
2389
2390 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2391 &rx_ring->ena_com_io_sq,
2392 &rx_ring->ena_com_io_cq);
2393 if (rc) {
2394 netif_err(adapter, ifup, adapter->netdev,
2395 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2396 qid, rc);
838c93dc 2397 goto err;
1738cd3e
NB
2398 }
2399
2400 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2401
838c93dc
SJ
2402 return rc;
2403err:
2404 ena_com_destroy_io_queue(ena_dev, ena_qid);
1738cd3e
NB
2405 return rc;
2406}
2407
2408static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2409{
2410 struct ena_com_dev *ena_dev = adapter->ena_dev;
2411 int rc, i;
2412
faa615f9 2413 for (i = 0; i < adapter->num_io_queues; i++) {
1738cd3e
NB
2414 rc = ena_create_io_rx_queue(adapter, i);
2415 if (rc)
2416 goto create_err;
282faf61 2417 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
1738cd3e
NB
2418 }
2419
2420 return 0;
2421
2422create_err:
282faf61
AK
2423 while (i--) {
2424 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1738cd3e 2425 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
282faf61 2426 }
1738cd3e
NB
2427
2428 return rc;
2429}
2430
13ca32a6 2431static void set_io_rings_size(struct ena_adapter *adapter,
548c4940
SJ
2432 int new_tx_size,
2433 int new_rx_size)
13ca32a6
SJ
2434{
2435 int i;
2436
faa615f9 2437 for (i = 0; i < adapter->num_io_queues; i++) {
13ca32a6
SJ
2438 adapter->tx_ring[i].ring_size = new_tx_size;
2439 adapter->rx_ring[i].ring_size = new_rx_size;
2440 }
2441}
2442
2443/* This function allows queue allocation to backoff when the system is
2444 * low on memory. If there is not enough memory to allocate io queues
2445 * the driver will try to allocate smaller queues.
2446 *
2447 * The backoff algorithm is as follows:
2448 * 1. Try to allocate TX and RX and if successful.
2449 * 1.1. return success
2450 *
2451 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2452 *
2453 * 3. If TX or RX is smaller than 256
2454 * 3.1. return failure.
2455 * 4. else
2456 * 4.1. go back to 1.
2457 */
2458static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2459{
2460 int rc, cur_rx_ring_size, cur_tx_ring_size;
2461 int new_rx_ring_size, new_tx_ring_size;
2462
2463 /* current queue sizes might be set to smaller than the requested
2464 * ones due to past queue allocation failures.
2465 */
2466 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
548c4940 2467 adapter->requested_rx_ring_size);
13ca32a6
SJ
2468
2469 while (1) {
548c4940
SJ
2470 if (ena_xdp_present(adapter)) {
2471 rc = ena_setup_and_create_all_xdp_queues(adapter);
2472
2473 if (rc)
2474 goto err_setup_tx;
2475 }
2476 rc = ena_setup_tx_resources_in_range(adapter,
2477 0,
2478 adapter->num_io_queues);
13ca32a6
SJ
2479 if (rc)
2480 goto err_setup_tx;
2481
548c4940
SJ
2482 rc = ena_create_io_tx_queues_in_range(adapter,
2483 0,
2484 adapter->num_io_queues);
13ca32a6
SJ
2485 if (rc)
2486 goto err_create_tx_queues;
2487
2488 rc = ena_setup_all_rx_resources(adapter);
2489 if (rc)
2490 goto err_setup_rx;
2491
2492 rc = ena_create_all_io_rx_queues(adapter);
2493 if (rc)
2494 goto err_create_rx_queues;
2495
2496 return 0;
2497
2498err_create_rx_queues:
2499 ena_free_all_io_rx_resources(adapter);
2500err_setup_rx:
2501 ena_destroy_all_tx_queues(adapter);
2502err_create_tx_queues:
2503 ena_free_all_io_tx_resources(adapter);
2504err_setup_tx:
2505 if (rc != -ENOMEM) {
2506 netif_err(adapter, ifup, adapter->netdev,
2507 "Queue creation failed with error code %d\n",
548c4940 2508 rc);
13ca32a6
SJ
2509 return rc;
2510 }
2511
2512 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2513 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2514
2515 netif_err(adapter, ifup, adapter->netdev,
2516 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2517 cur_tx_ring_size, cur_rx_ring_size);
2518
2519 new_tx_ring_size = cur_tx_ring_size;
2520 new_rx_ring_size = cur_rx_ring_size;
2521
2522 /* Decrease the size of the larger queue, or
2523 * decrease both if they are the same size.
2524 */
2525 if (cur_rx_ring_size <= cur_tx_ring_size)
2526 new_tx_ring_size = cur_tx_ring_size / 2;
2527 if (cur_rx_ring_size >= cur_tx_ring_size)
2528 new_rx_ring_size = cur_rx_ring_size / 2;
2529
3e5bfb18 2530 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
548c4940 2531 new_rx_ring_size < ENA_MIN_RING_SIZE) {
13ca32a6
SJ
2532 netif_err(adapter, ifup, adapter->netdev,
2533 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2534 ENA_MIN_RING_SIZE);
2535 return rc;
2536 }
2537
2538 netif_err(adapter, ifup, adapter->netdev,
2539 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2540 new_tx_ring_size,
2541 new_rx_ring_size);
2542
2543 set_io_rings_size(adapter, new_tx_ring_size,
2544 new_rx_ring_size);
2545 }
2546}
2547
1738cd3e
NB
2548static int ena_up(struct ena_adapter *adapter)
2549{
548c4940 2550 int io_queue_count, rc, i;
1738cd3e
NB
2551
2552 netdev_dbg(adapter->netdev, "%s\n", __func__);
2553
548c4940 2554 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1738cd3e
NB
2555 ena_setup_io_intr(adapter);
2556
78a55d05
AK
2557 /* napi poll functions should be initialized before running
2558 * request_irq(), to handle a rare condition where there is a pending
2559 * interrupt, causing the ISR to fire immediately while the poll
2560 * function wasn't set yet, causing a null dereference
2561 */
548c4940 2562 ena_init_napi_in_range(adapter, 0, io_queue_count);
78a55d05 2563
1738cd3e
NB
2564 rc = ena_request_io_irq(adapter);
2565 if (rc)
2566 goto err_req_irq;
2567
13ca32a6 2568 rc = create_queues_with_size_backoff(adapter);
1738cd3e 2569 if (rc)
13ca32a6 2570 goto err_create_queues_with_backoff;
1738cd3e
NB
2571
2572 rc = ena_up_complete(adapter);
2573 if (rc)
2574 goto err_up;
2575
2576 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2577 netif_carrier_on(adapter->netdev);
2578
2579 u64_stats_update_begin(&adapter->syncp);
2580 adapter->dev_stats.interface_up++;
2581 u64_stats_update_end(&adapter->syncp);
2582
2583 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2584
7853b49c 2585 /* Enable completion queues interrupt */
faa615f9 2586 for (i = 0; i < adapter->num_io_queues; i++)
7853b49c
NB
2587 ena_unmask_interrupt(&adapter->tx_ring[i],
2588 &adapter->rx_ring[i]);
2589
2590 /* schedule napi in case we had pending packets
2591 * from the last time we disable napi
2592 */
548c4940 2593 for (i = 0; i < io_queue_count; i++)
7853b49c
NB
2594 napi_schedule(&adapter->ena_napi[i].napi);
2595
1738cd3e
NB
2596 return rc;
2597
2598err_up:
1738cd3e 2599 ena_destroy_all_tx_queues(adapter);
1738cd3e 2600 ena_free_all_io_tx_resources(adapter);
13ca32a6
SJ
2601 ena_destroy_all_rx_queues(adapter);
2602 ena_free_all_io_rx_resources(adapter);
2603err_create_queues_with_backoff:
1738cd3e
NB
2604 ena_free_io_irq(adapter);
2605err_req_irq:
548c4940 2606 ena_del_napi_in_range(adapter, 0, io_queue_count);
1738cd3e
NB
2607
2608 return rc;
2609}
2610
2611static void ena_down(struct ena_adapter *adapter)
2612{
548c4940
SJ
2613 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2614
1738cd3e
NB
2615 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2616
2617 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2618
2619 u64_stats_update_begin(&adapter->syncp);
2620 adapter->dev_stats.interface_down++;
2621 u64_stats_update_end(&adapter->syncp);
2622
1738cd3e
NB
2623 netif_carrier_off(adapter->netdev);
2624 netif_tx_disable(adapter->netdev);
2625
3f6159db 2626 /* After this point the napi handler won't enable the tx queue */
548c4940 2627 ena_napi_disable_in_range(adapter, 0, io_queue_count);
3f6159db 2628
1738cd3e 2629 /* After destroy the queue there won't be any new interrupts */
3f6159db
NB
2630
2631 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2632 int rc;
2633
e2eed0e3 2634 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3f6159db
NB
2635 if (rc)
2636 dev_err(&adapter->pdev->dev, "Device reset failed\n");
58a54b9c
AK
2637 /* stop submitting admin commands on a device that was reset */
2638 ena_com_set_admin_running_state(adapter->ena_dev, false);
3f6159db
NB
2639 }
2640
1738cd3e
NB
2641 ena_destroy_all_io_queues(adapter);
2642
2643 ena_disable_io_intr_sync(adapter);
2644 ena_free_io_irq(adapter);
548c4940 2645 ena_del_napi_in_range(adapter, 0, io_queue_count);
1738cd3e
NB
2646
2647 ena_free_all_tx_bufs(adapter);
2648 ena_free_all_rx_bufs(adapter);
2649 ena_free_all_io_tx_resources(adapter);
2650 ena_free_all_io_rx_resources(adapter);
2651}
2652
2653/* ena_open - Called when a network interface is made active
2654 * @netdev: network interface device structure
2655 *
2656 * Returns 0 on success, negative value on failure
2657 *
2658 * The open entry point is called when a network interface is made
2659 * active by the system (IFF_UP). At this point all resources needed
2660 * for transmit and receive operations are allocated, the interrupt
2661 * handler is registered with the OS, the watchdog timer is started,
2662 * and the stack is notified that the interface is ready.
2663 */
2664static int ena_open(struct net_device *netdev)
2665{
2666 struct ena_adapter *adapter = netdev_priv(netdev);
2667 int rc;
2668
2669 /* Notify the stack of the actual queue counts. */
faa615f9 2670 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
1738cd3e
NB
2671 if (rc) {
2672 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2673 return rc;
2674 }
2675
faa615f9 2676 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
1738cd3e
NB
2677 if (rc) {
2678 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2679 return rc;
2680 }
2681
2682 rc = ena_up(adapter);
2683 if (rc)
2684 return rc;
2685
2686 return rc;
2687}
2688
2689/* ena_close - Disables a network interface
2690 * @netdev: network interface device structure
2691 *
2692 * Returns 0, this is not allowed to fail
2693 *
2694 * The close entry point is called when an interface is de-activated
2695 * by the OS. The hardware is still under the drivers control, but
2696 * needs to be disabled. A global MAC reset is issued to stop the
2697 * hardware, and all transmit and receive resources are freed.
2698 */
2699static int ena_close(struct net_device *netdev)
2700{
2701 struct ena_adapter *adapter = netdev_priv(netdev);
2702
2703 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2704
58a54b9c
AK
2705 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2706 return 0;
2707
1738cd3e
NB
2708 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2709 ena_down(adapter);
2710
ee4552aa
NB
2711 /* Check for device status and issue reset if needed*/
2712 check_for_admin_com_state(adapter);
2713 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2714 netif_err(adapter, ifdown, adapter->netdev,
2715 "Destroy failure, restarting device\n");
2716 ena_dump_stats_to_dmesg(adapter);
2717 /* rtnl lock already obtained in dev_ioctl() layer */
cfa324a5 2718 ena_destroy_device(adapter, false);
ee4552aa
NB
2719 ena_restore_device(adapter);
2720 }
2721
1738cd3e
NB
2722 return 0;
2723}
2724
eece4d2a
SJ
2725int ena_update_queue_sizes(struct ena_adapter *adapter,
2726 u32 new_tx_size,
2727 u32 new_rx_size)
2728{
2413ea97 2729 bool dev_was_up;
eece4d2a 2730
2413ea97 2731 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
eece4d2a
SJ
2732 ena_close(adapter->netdev);
2733 adapter->requested_tx_ring_size = new_tx_size;
2734 adapter->requested_rx_ring_size = new_rx_size;
548c4940
SJ
2735 ena_init_io_rings(adapter,
2736 0,
2737 adapter->xdp_num_queues +
2738 adapter->num_io_queues);
2413ea97
SJ
2739 return dev_was_up ? ena_up(adapter) : 0;
2740}
2741
2742int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2743{
2744 struct ena_com_dev *ena_dev = adapter->ena_dev;
838c93dc 2745 int prev_channel_count;
2413ea97
SJ
2746 bool dev_was_up;
2747
2748 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2749 ena_close(adapter->netdev);
838c93dc 2750 prev_channel_count = adapter->num_io_queues;
2413ea97 2751 adapter->num_io_queues = new_channel_count;
548c4940
SJ
2752 if (ena_xdp_present(adapter) &&
2753 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2754 adapter->xdp_first_ring = new_channel_count;
2755 adapter->xdp_num_queues = new_channel_count;
838c93dc
SJ
2756 if (prev_channel_count > new_channel_count)
2757 ena_xdp_exchange_program_rx_in_range(adapter,
2758 NULL,
2759 new_channel_count,
2760 prev_channel_count);
2761 else
2762 ena_xdp_exchange_program_rx_in_range(adapter,
2763 adapter->xdp_bpf_prog,
2764 prev_channel_count,
2765 new_channel_count);
2766 }
2767
2413ea97
SJ
2768 /* We need to destroy the rss table so that the indirection
2769 * table will be reinitialized by ena_up()
2770 */
2771 ena_com_rss_destroy(ena_dev);
548c4940
SJ
2772 ena_init_io_rings(adapter,
2773 0,
2774 adapter->xdp_num_queues +
2775 adapter->num_io_queues);
2413ea97 2776 return dev_was_up ? ena_open(adapter->netdev) : 0;
eece4d2a
SJ
2777}
2778
1738cd3e
NB
2779static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
2780{
2781 u32 mss = skb_shinfo(skb)->gso_size;
2782 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2783 u8 l4_protocol = 0;
2784
2785 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2786 ena_tx_ctx->l4_csum_enable = 1;
2787 if (mss) {
2788 ena_tx_ctx->tso_enable = 1;
2789 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2790 ena_tx_ctx->l4_csum_partial = 0;
2791 } else {
2792 ena_tx_ctx->tso_enable = 0;
2793 ena_meta->l4_hdr_len = 0;
2794 ena_tx_ctx->l4_csum_partial = 1;
2795 }
2796
2797 switch (ip_hdr(skb)->version) {
2798 case IPVERSION:
2799 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2800 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2801 ena_tx_ctx->df = 1;
2802 if (mss)
2803 ena_tx_ctx->l3_csum_enable = 1;
2804 l4_protocol = ip_hdr(skb)->protocol;
2805 break;
2806 case 6:
2807 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2808 l4_protocol = ipv6_hdr(skb)->nexthdr;
2809 break;
2810 default:
2811 break;
2812 }
2813
2814 if (l4_protocol == IPPROTO_TCP)
2815 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2816 else
2817 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2818
2819 ena_meta->mss = mss;
2820 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2821 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2822 ena_tx_ctx->meta_valid = 1;
2823
2824 } else {
2825 ena_tx_ctx->meta_valid = 0;
2826 }
2827}
2828
2829static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2830 struct sk_buff *skb)
2831{
2832 int num_frags, header_len, rc;
2833
2834 num_frags = skb_shinfo(skb)->nr_frags;
2835 header_len = skb_headlen(skb);
2836
2837 if (num_frags < tx_ring->sgl_size)
2838 return 0;
2839
2840 if ((num_frags == tx_ring->sgl_size) &&
2841 (header_len < tx_ring->tx_max_header_size))
2842 return 0;
2843
2844 u64_stats_update_begin(&tx_ring->syncp);
2845 tx_ring->tx_stats.linearize++;
2846 u64_stats_update_end(&tx_ring->syncp);
2847
2848 rc = skb_linearize(skb);
2849 if (unlikely(rc)) {
2850 u64_stats_update_begin(&tx_ring->syncp);
2851 tx_ring->tx_stats.linearize_failed++;
2852 u64_stats_update_end(&tx_ring->syncp);
2853 }
2854
2855 return rc;
2856}
2857
38005ca8
AK
2858static int ena_tx_map_skb(struct ena_ring *tx_ring,
2859 struct ena_tx_buffer *tx_info,
2860 struct sk_buff *skb,
2861 void **push_hdr,
2862 u16 *header_len)
1738cd3e 2863{
38005ca8 2864 struct ena_adapter *adapter = tx_ring->adapter;
1738cd3e 2865 struct ena_com_buf *ena_buf;
1738cd3e 2866 dma_addr_t dma;
38005ca8
AK
2867 u32 skb_head_len, frag_len, last_frag;
2868 u16 push_len = 0;
2869 u16 delta = 0;
2870 int i = 0;
1738cd3e 2871
38005ca8 2872 skb_head_len = skb_headlen(skb);
1738cd3e 2873 tx_info->skb = skb;
38005ca8 2874 ena_buf = tx_info->bufs;
1738cd3e
NB
2875
2876 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
38005ca8
AK
2877 /* When the device is LLQ mode, the driver will copy
2878 * the header into the device memory space.
2879 * the ena_com layer assume the header is in a linear
2880 * memory space.
2881 * This assumption might be wrong since part of the header
2882 * can be in the fragmented buffers.
2883 * Use skb_header_pointer to make sure the header is in a
2884 * linear memory space.
2885 */
2886
2887 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2888 *push_hdr = skb_header_pointer(skb, 0, push_len,
2889 tx_ring->push_buf_intermediate_buf);
2890 *header_len = push_len;
2891 if (unlikely(skb->data != *push_hdr)) {
2892 u64_stats_update_begin(&tx_ring->syncp);
2893 tx_ring->tx_stats.llq_buffer_copy++;
2894 u64_stats_update_end(&tx_ring->syncp);
2895
2896 delta = push_len - skb_head_len;
2897 }
1738cd3e 2898 } else {
38005ca8
AK
2899 *push_hdr = NULL;
2900 *header_len = min_t(u32, skb_head_len,
2901 tx_ring->tx_max_header_size);
1738cd3e
NB
2902 }
2903
38005ca8 2904 netif_dbg(adapter, tx_queued, adapter->netdev,
1738cd3e 2905 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
38005ca8 2906 *push_hdr, push_len);
1738cd3e 2907
38005ca8 2908 if (skb_head_len > push_len) {
1738cd3e 2909 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
38005ca8
AK
2910 skb_head_len - push_len, DMA_TO_DEVICE);
2911 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
1738cd3e
NB
2912 goto error_report_dma_error;
2913
2914 ena_buf->paddr = dma;
38005ca8 2915 ena_buf->len = skb_head_len - push_len;
1738cd3e
NB
2916
2917 ena_buf++;
2918 tx_info->num_of_bufs++;
38005ca8
AK
2919 tx_info->map_linear_data = 1;
2920 } else {
2921 tx_info->map_linear_data = 0;
1738cd3e
NB
2922 }
2923
2924 last_frag = skb_shinfo(skb)->nr_frags;
2925
2926 for (i = 0; i < last_frag; i++) {
2927 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2928
38005ca8
AK
2929 frag_len = skb_frag_size(frag);
2930
2931 if (unlikely(delta >= frag_len)) {
2932 delta -= frag_len;
2933 continue;
2934 }
2935
2936 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2937 frag_len - delta, DMA_TO_DEVICE);
2938 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
1738cd3e
NB
2939 goto error_report_dma_error;
2940
2941 ena_buf->paddr = dma;
38005ca8 2942 ena_buf->len = frag_len - delta;
1738cd3e 2943 ena_buf++;
38005ca8
AK
2944 tx_info->num_of_bufs++;
2945 delta = 0;
1738cd3e
NB
2946 }
2947
38005ca8
AK
2948 return 0;
2949
2950error_report_dma_error:
2951 u64_stats_update_begin(&tx_ring->syncp);
2952 tx_ring->tx_stats.dma_mapping_err++;
2953 u64_stats_update_end(&tx_ring->syncp);
2954 netdev_warn(adapter->netdev, "failed to map skb\n");
2955
2956 tx_info->skb = NULL;
2957
2958 tx_info->num_of_bufs += i;
548c4940 2959 ena_unmap_tx_buff(tx_ring, tx_info);
38005ca8
AK
2960
2961 return -EINVAL;
2962}
2963
2964/* Called with netif_tx_lock. */
2965static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2966{
2967 struct ena_adapter *adapter = netdev_priv(dev);
2968 struct ena_tx_buffer *tx_info;
2969 struct ena_com_tx_ctx ena_tx_ctx;
2970 struct ena_ring *tx_ring;
2971 struct netdev_queue *txq;
2972 void *push_hdr;
2973 u16 next_to_use, req_id, header_len;
548c4940 2974 int qid, rc;
38005ca8
AK
2975
2976 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2977 /* Determine which tx ring we will be placed on */
2978 qid = skb_get_queue_mapping(skb);
2979 tx_ring = &adapter->tx_ring[qid];
2980 txq = netdev_get_tx_queue(dev, qid);
2981
2982 rc = ena_check_and_linearize_skb(tx_ring, skb);
2983 if (unlikely(rc))
2984 goto error_drop_packet;
2985
2986 skb_tx_timestamp(skb);
2987
2988 next_to_use = tx_ring->next_to_use;
f9172498 2989 req_id = tx_ring->free_ids[next_to_use];
38005ca8
AK
2990 tx_info = &tx_ring->tx_buffer_info[req_id];
2991 tx_info->num_of_bufs = 0;
2992
2993 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2994
2995 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2996 if (unlikely(rc))
2997 goto error_drop_packet;
1738cd3e
NB
2998
2999 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
3000 ena_tx_ctx.ena_bufs = tx_info->bufs;
3001 ena_tx_ctx.push_header = push_hdr;
3002 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
3003 ena_tx_ctx.req_id = req_id;
3004 ena_tx_ctx.header_len = header_len;
3005
3006 /* set flags and meta data */
3007 ena_tx_csum(&ena_tx_ctx, skb);
3008
548c4940
SJ
3009 rc = ena_xmit_common(dev,
3010 tx_ring,
3011 tx_info,
3012 &ena_tx_ctx,
3013 next_to_use,
3014 skb->len);
3015 if (rc)
1738cd3e 3016 goto error_unmap_dma;
1738cd3e
NB
3017
3018 netdev_tx_sent_queue(txq, skb->len);
3019
1738cd3e
NB
3020 /* stop the queue when no more space available, the packet can have up
3021 * to sgl_size + 2. one for the meta descriptor and one for header
3022 * (if the header is larger than tx_max_header_size).
3023 */
689b2bda
AK
3024 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3025 tx_ring->sgl_size + 2))) {
1738cd3e
NB
3026 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3027 __func__, qid);
3028
3029 netif_tx_stop_queue(txq);
3030 u64_stats_update_begin(&tx_ring->syncp);
3031 tx_ring->tx_stats.queue_stop++;
3032 u64_stats_update_end(&tx_ring->syncp);
3033
3034 /* There is a rare condition where this function decide to
3035 * stop the queue but meanwhile clean_tx_irq updates
3036 * next_to_completion and terminates.
3037 * The queue will remain stopped forever.
37dff155
NB
3038 * To solve this issue add a mb() to make sure that
3039 * netif_tx_stop_queue() write is vissible before checking if
3040 * there is additional space in the queue.
1738cd3e 3041 */
37dff155 3042 smp_mb();
1738cd3e 3043
689b2bda
AK
3044 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3045 ENA_TX_WAKEUP_THRESH)) {
1738cd3e
NB
3046 netif_tx_wake_queue(txq);
3047 u64_stats_update_begin(&tx_ring->syncp);
3048 tx_ring->tx_stats.queue_wakeup++;
3049 u64_stats_update_end(&tx_ring->syncp);
3050 }
3051 }
3052
6b16f9ee 3053 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
37dff155
NB
3054 /* trigger the dma engine. ena_com_write_sq_doorbell()
3055 * has a mb
3056 */
3057 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
1738cd3e
NB
3058 u64_stats_update_begin(&tx_ring->syncp);
3059 tx_ring->tx_stats.doorbells++;
3060 u64_stats_update_end(&tx_ring->syncp);
3061 }
3062
3063 return NETDEV_TX_OK;
3064
1738cd3e 3065error_unmap_dma:
548c4940 3066 ena_unmap_tx_buff(tx_ring, tx_info);
38005ca8 3067 tx_info->skb = NULL;
1738cd3e
NB
3068
3069error_drop_packet:
1738cd3e
NB
3070 dev_kfree_skb(skb);
3071 return NETDEV_TX_OK;
3072}
3073
1738cd3e 3074static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
a350ecce 3075 struct net_device *sb_dev)
1738cd3e
NB
3076{
3077 u16 qid;
3078 /* we suspect that this is good for in--kernel network services that
3079 * want to loop incoming skb rx to tx in normal user generated traffic,
3080 * most probably we will not get to this
3081 */
3082 if (skb_rx_queue_recorded(skb))
3083 qid = skb_get_rx_queue(skb);
3084 else
a350ecce 3085 qid = netdev_pick_tx(dev, skb, NULL);
1738cd3e
NB
3086
3087 return qid;
3088}
3089
095f2f1f
AK
3090static void ena_config_host_info(struct ena_com_dev *ena_dev,
3091 struct pci_dev *pdev)
1738cd3e
NB
3092{
3093 struct ena_admin_host_info *host_info;
3094 int rc;
3095
3096 /* Allocate only the host info */
3097 rc = ena_com_allocate_host_info(ena_dev);
3098 if (rc) {
3099 pr_err("Cannot allocate host info\n");
3100 return;
3101 }
3102
3103 host_info = ena_dev->host_attr.host_info;
3104
095f2f1f 3105 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
1738cd3e
NB
3106 host_info->os_type = ENA_ADMIN_OS_LINUX;
3107 host_info->kernel_ver = LINUX_VERSION_CODE;
f9133088 3108 strlcpy(host_info->kernel_ver_str, utsname()->version,
1738cd3e
NB
3109 sizeof(host_info->kernel_ver_str) - 1);
3110 host_info->os_dist = 0;
3111 strncpy(host_info->os_dist_str, utsname()->release,
3112 sizeof(host_info->os_dist_str) - 1);
92040c6d
AK
3113 host_info->driver_version =
3114 (DRV_MODULE_GEN_MAJOR) |
3115 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3116 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3117 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
095f2f1f 3118 host_info->num_cpus = num_online_cpus();
1738cd3e 3119
bd21b0cc 3120 host_info->driver_supported_features =
68f236df 3121 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
bd21b0cc
AK
3122 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
3123
1738cd3e
NB
3124 rc = ena_com_set_host_attributes(ena_dev);
3125 if (rc) {
d1497638 3126 if (rc == -EOPNOTSUPP)
1738cd3e
NB
3127 pr_warn("Cannot set host attributes\n");
3128 else
3129 pr_err("Cannot set host attributes\n");
3130
3131 goto err;
3132 }
3133
3134 return;
3135
3136err:
3137 ena_com_delete_host_info(ena_dev);
3138}
3139
3140static void ena_config_debug_area(struct ena_adapter *adapter)
3141{
3142 u32 debug_area_size;
3143 int rc, ss_count;
3144
3145 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3146 if (ss_count <= 0) {
3147 netif_err(adapter, drv, adapter->netdev,
3148 "SS count is negative\n");
3149 return;
3150 }
3151
3152 /* allocate 32 bytes for each string and 64bit for the value */
3153 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3154
3155 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3156 if (rc) {
3157 pr_err("Cannot allocate debug area\n");
3158 return;
3159 }
3160
3161 rc = ena_com_set_host_attributes(adapter->ena_dev);
3162 if (rc) {
d1497638 3163 if (rc == -EOPNOTSUPP)
1738cd3e
NB
3164 netif_warn(adapter, drv, adapter->netdev,
3165 "Cannot set host attributes\n");
3166 else
3167 netif_err(adapter, drv, adapter->netdev,
3168 "Cannot set host attributes\n");
3169 goto err;
3170 }
3171
3172 return;
3173err:
3174 ena_com_delete_debug_area(adapter->ena_dev);
3175}
3176
bc1f4470 3177static void ena_get_stats64(struct net_device *netdev,
3178 struct rtnl_link_stats64 *stats)
1738cd3e
NB
3179{
3180 struct ena_adapter *adapter = netdev_priv(netdev);
d81db240
NB
3181 struct ena_ring *rx_ring, *tx_ring;
3182 unsigned int start;
3183 u64 rx_drops;
5c665f8c 3184 u64 tx_drops;
d81db240 3185 int i;
1738cd3e
NB
3186
3187 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
bc1f4470 3188 return;
1738cd3e 3189
faa615f9 3190 for (i = 0; i < adapter->num_io_queues; i++) {
d81db240
NB
3191 u64 bytes, packets;
3192
3193 tx_ring = &adapter->tx_ring[i];
1738cd3e 3194
d81db240
NB
3195 do {
3196 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3197 packets = tx_ring->tx_stats.cnt;
3198 bytes = tx_ring->tx_stats.bytes;
3199 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
1738cd3e 3200
d81db240
NB
3201 stats->tx_packets += packets;
3202 stats->tx_bytes += bytes;
3203
3204 rx_ring = &adapter->rx_ring[i];
3205
3206 do {
3207 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3208 packets = rx_ring->rx_stats.cnt;
3209 bytes = rx_ring->rx_stats.bytes;
3210 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3211
3212 stats->rx_packets += packets;
3213 stats->rx_bytes += bytes;
3214 }
3215
3216 do {
3217 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3218 rx_drops = adapter->dev_stats.rx_drops;
5c665f8c 3219 tx_drops = adapter->dev_stats.tx_drops;
d81db240 3220 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
1738cd3e 3221
d81db240 3222 stats->rx_dropped = rx_drops;
5c665f8c 3223 stats->tx_dropped = tx_drops;
1738cd3e
NB
3224
3225 stats->multicast = 0;
3226 stats->collisions = 0;
3227
3228 stats->rx_length_errors = 0;
3229 stats->rx_crc_errors = 0;
3230 stats->rx_frame_errors = 0;
3231 stats->rx_fifo_errors = 0;
3232 stats->rx_missed_errors = 0;
3233 stats->tx_window_errors = 0;
3234
3235 stats->rx_errors = 0;
3236 stats->tx_errors = 0;
1738cd3e
NB
3237}
3238
3239static const struct net_device_ops ena_netdev_ops = {
3240 .ndo_open = ena_open,
3241 .ndo_stop = ena_close,
3242 .ndo_start_xmit = ena_start_xmit,
3243 .ndo_select_queue = ena_select_queue,
3244 .ndo_get_stats64 = ena_get_stats64,
3245 .ndo_tx_timeout = ena_tx_timeout,
3246 .ndo_change_mtu = ena_change_mtu,
3247 .ndo_set_mac_address = NULL,
3248 .ndo_validate_addr = eth_validate_addr,
838c93dc 3249 .ndo_bpf = ena_xdp,
1738cd3e
NB
3250};
3251
1738cd3e
NB
3252static int ena_device_validate_params(struct ena_adapter *adapter,
3253 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3254{
3255 struct net_device *netdev = adapter->netdev;
3256 int rc;
3257
3258 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3259 adapter->mac_addr);
3260 if (!rc) {
3261 netif_err(adapter, drv, netdev,
3262 "Error, mac address are different\n");
3263 return -EINVAL;
3264 }
3265
1738cd3e
NB
3266 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3267 netif_err(adapter, drv, netdev,
3268 "Error, device max mtu is smaller than netdev MTU\n");
3269 return -EINVAL;
3270 }
3271
3272 return 0;
3273}
3274
3275static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3276 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3277 bool *wd_state)
3278{
3279 struct device *dev = &pdev->dev;
3280 bool readless_supported;
3281 u32 aenq_groups;
3282 int dma_width;
3283 int rc;
3284
3285 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3286 if (rc) {
3287 dev_err(dev, "failed to init mmio read less\n");
3288 return rc;
3289 }
3290
3291 /* The PCIe configuration space revision id indicate if mmio reg
3292 * read is disabled
3293 */
3294 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3295 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3296
e2eed0e3 3297 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
1738cd3e
NB
3298 if (rc) {
3299 dev_err(dev, "Can not reset device\n");
3300 goto err_mmio_read_less;
3301 }
3302
3303 rc = ena_com_validate_version(ena_dev);
3304 if (rc) {
3305 dev_err(dev, "device version is too low\n");
3306 goto err_mmio_read_less;
3307 }
3308
3309 dma_width = ena_com_get_dma_width(ena_dev);
3310 if (dma_width < 0) {
3311 dev_err(dev, "Invalid dma width value %d", dma_width);
6e22066f 3312 rc = dma_width;
1738cd3e
NB
3313 goto err_mmio_read_less;
3314 }
3315
3316 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3317 if (rc) {
3318 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
3319 goto err_mmio_read_less;
3320 }
3321
3322 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3323 if (rc) {
3324 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3325 rc);
3326 goto err_mmio_read_less;
3327 }
3328
3329 /* ENA admin level init */
f1e90f6e 3330 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
1738cd3e
NB
3331 if (rc) {
3332 dev_err(dev,
3333 "Can not initialize ena admin queue with device\n");
3334 goto err_mmio_read_less;
3335 }
3336
3337 /* To enable the msix interrupts the driver needs to know the number
3338 * of queues. So the driver uses polling mode to retrieve this
3339 * information
3340 */
3341 ena_com_set_admin_polling_mode(ena_dev, true);
3342
095f2f1f 3343 ena_config_host_info(ena_dev, pdev);
dd8427a7 3344
1738cd3e
NB
3345 /* Get Device Attributes*/
3346 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3347 if (rc) {
3348 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3349 goto err_admin_init;
3350 }
3351
3352 /* Try to turn all the available aenq groups */
3353 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3354 BIT(ENA_ADMIN_FATAL_ERROR) |
3355 BIT(ENA_ADMIN_WARNING) |
3356 BIT(ENA_ADMIN_NOTIFICATION) |
3357 BIT(ENA_ADMIN_KEEP_ALIVE);
3358
3359 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3360
3361 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3362 if (rc) {
3363 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3364 goto err_admin_init;
3365 }
3366
3367 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3368
1738cd3e
NB
3369 return 0;
3370
3371err_admin_init:
dd8427a7 3372 ena_com_delete_host_info(ena_dev);
1738cd3e
NB
3373 ena_com_admin_destroy(ena_dev);
3374err_mmio_read_less:
3375 ena_com_mmio_reg_read_request_destroy(ena_dev);
3376
3377 return rc;
3378}
3379
4d192660 3380static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
1738cd3e
NB
3381{
3382 struct ena_com_dev *ena_dev = adapter->ena_dev;
3383 struct device *dev = &adapter->pdev->dev;
3384 int rc;
3385
4d192660 3386 rc = ena_enable_msix(adapter);
1738cd3e
NB
3387 if (rc) {
3388 dev_err(dev, "Can not reserve msix vectors\n");
3389 return rc;
3390 }
3391
3392 ena_setup_mgmnt_intr(adapter);
3393
3394 rc = ena_request_mgmnt_irq(adapter);
3395 if (rc) {
3396 dev_err(dev, "Can not setup management interrupts\n");
3397 goto err_disable_msix;
3398 }
3399
3400 ena_com_set_admin_polling_mode(ena_dev, false);
3401
3402 ena_com_admin_aenq_enable(ena_dev);
3403
3404 return 0;
3405
3406err_disable_msix:
06443684
NB
3407 ena_disable_msix(adapter);
3408
1738cd3e
NB
3409 return rc;
3410}
3411
cfa324a5 3412static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
1738cd3e 3413{
1738cd3e
NB
3414 struct net_device *netdev = adapter->netdev;
3415 struct ena_com_dev *ena_dev = adapter->ena_dev;
8c5c7abd 3416 bool dev_up;
3f6159db 3417
fe870c77
NB
3418 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3419 return;
3420
3f6159db
NB
3421 netif_carrier_off(netdev);
3422
1738cd3e
NB
3423 del_timer_sync(&adapter->timer_service);
3424
1738cd3e 3425 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
8c5c7abd 3426 adapter->dev_up_before_reset = dev_up;
cfa324a5
NB
3427 if (!graceful)
3428 ena_com_set_admin_running_state(ena_dev, false);
1738cd3e 3429
ee4552aa
NB
3430 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3431 ena_down(adapter);
1738cd3e 3432
bd791175 3433 /* Stop the device from sending AENQ events (in case reset flag is set
58a54b9c 3434 * and device is up, ena_down() already reset the device.
8c5c7abd
NB
3435 */
3436 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3437 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3438
1738cd3e
NB
3439 ena_free_mgmnt_irq(adapter);
3440
06443684 3441 ena_disable_msix(adapter);
1738cd3e
NB
3442
3443 ena_com_abort_admin_commands(ena_dev);
3444
3445 ena_com_wait_for_abort_completion(ena_dev);
3446
3447 ena_com_admin_destroy(ena_dev);
3448
3449 ena_com_mmio_reg_read_request_destroy(ena_dev);
3450
c1c0e40b 3451 /* return reset reason to default value */
e2eed0e3 3452 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
8c5c7abd 3453
3f6159db 3454 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
fe870c77 3455 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
8c5c7abd 3456}
3f6159db 3457
8c5c7abd
NB
3458static int ena_restore_device(struct ena_adapter *adapter)
3459{
3460 struct ena_com_dev_get_features_ctx get_feat_ctx;
3461 struct ena_com_dev *ena_dev = adapter->ena_dev;
3462 struct pci_dev *pdev = adapter->pdev;
3463 bool wd_state;
3464 int rc;
1738cd3e 3465
d18e4f68 3466 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1738cd3e
NB
3467 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3468 if (rc) {
3469 dev_err(&pdev->dev, "Can not initialize device\n");
3470 goto err;
3471 }
3472 adapter->wd_state = wd_state;
3473
3474 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3475 if (rc) {
3476 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3477 goto err_device_destroy;
3478 }
3479
4d192660 3480 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
1738cd3e
NB
3481 if (rc) {
3482 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3483 goto err_device_destroy;
3484 }
3485 /* If the interface was up before the reset bring it up */
8c5c7abd 3486 if (adapter->dev_up_before_reset) {
1738cd3e
NB
3487 rc = ena_up(adapter);
3488 if (rc) {
3489 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3490 goto err_disable_msix;
3491 }
3492 }
3493
fe870c77 3494 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
e1f1bd9b
AK
3495
3496 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3497 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3498 netif_carrier_on(adapter->netdev);
3499
1738cd3e 3500 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1a63443a 3501 dev_err(&pdev->dev, "Device reset completed successfully\n");
dfdde134 3502 adapter->last_keep_alive_jiffies = jiffies;
1738cd3e 3503
8c5c7abd 3504 return rc;
1738cd3e
NB
3505err_disable_msix:
3506 ena_free_mgmnt_irq(adapter);
06443684 3507 ena_disable_msix(adapter);
1738cd3e 3508err_device_destroy:
d7703ddb
AK
3509 ena_com_abort_admin_commands(ena_dev);
3510 ena_com_wait_for_abort_completion(ena_dev);
1738cd3e 3511 ena_com_admin_destroy(ena_dev);
d7703ddb 3512 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
e76ad21d 3513 ena_com_mmio_reg_read_request_destroy(ena_dev);
1738cd3e 3514err:
22b331c9 3515 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
d18e4f68 3516 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
1738cd3e
NB
3517 dev_err(&pdev->dev,
3518 "Reset attempt failed. Can not reset the device\n");
8c5c7abd
NB
3519
3520 return rc;
3521}
3522
3523static void ena_fw_reset_device(struct work_struct *work)
3524{
3525 struct ena_adapter *adapter =
3526 container_of(work, struct ena_adapter, reset_task);
3527 struct pci_dev *pdev = adapter->pdev;
3528
3529 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3530 dev_err(&pdev->dev,
3531 "device reset schedule while reset bit is off\n");
3532 return;
3533 }
3534 rtnl_lock();
cfa324a5 3535 ena_destroy_device(adapter, false);
8c5c7abd
NB
3536 ena_restore_device(adapter);
3537 rtnl_unlock();
1738cd3e
NB
3538}
3539
8510e1a3
NB
3540static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3541 struct ena_ring *rx_ring)
3542{
3543 if (likely(rx_ring->first_interrupt))
3544 return 0;
3545
3546 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3547 return 0;
3548
3549 rx_ring->no_interrupt_event_cnt++;
3550
3551 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3552 netif_err(adapter, rx_err, adapter->netdev,
3553 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3554 rx_ring->qid);
3555 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3556 smp_mb__before_atomic();
3557 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3558 return -EIO;
3559 }
3560
3561 return 0;
3562}
3563
3564static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3565 struct ena_ring *tx_ring)
1738cd3e
NB
3566{
3567 struct ena_tx_buffer *tx_buf;
3568 unsigned long last_jiffies;
800c55cb 3569 u32 missed_tx = 0;
11095fdb 3570 int i, rc = 0;
800c55cb
NB
3571
3572 for (i = 0; i < tx_ring->ring_size; i++) {
3573 tx_buf = &tx_ring->tx_buffer_info[i];
3574 last_jiffies = tx_buf->last_jiffies;
8510e1a3
NB
3575
3576 if (last_jiffies == 0)
3577 /* no pending Tx at this location */
3578 continue;
3579
3580 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3581 2 * adapter->missing_tx_completion_to))) {
3582 /* If after graceful period interrupt is still not
3583 * received, we schedule a reset
3584 */
3585 netif_err(adapter, tx_err, adapter->netdev,
3586 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3587 tx_ring->qid);
3588 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3589 smp_mb__before_atomic();
3590 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3591 return -EIO;
3592 }
3593
3594 if (unlikely(time_is_before_jiffies(last_jiffies +
3595 adapter->missing_tx_completion_to))) {
800c55cb
NB
3596 if (!tx_buf->print_once)
3597 netif_notice(adapter, tx_err, adapter->netdev,
3598 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3599 tx_ring->qid, i);
3600
3601 tx_buf->print_once = 1;
3602 missed_tx++;
800c55cb
NB
3603 }
3604 }
3605
11095fdb
NB
3606 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3607 netif_err(adapter, tx_err, adapter->netdev,
3608 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3609 missed_tx,
3610 adapter->missing_tx_completion_threshold);
3611 adapter->reset_reason =
3612 ENA_REGS_RESET_MISS_TX_CMPL;
3613 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3614 rc = -EIO;
3615 }
3616
3617 u64_stats_update_begin(&tx_ring->syncp);
3618 tx_ring->tx_stats.missed_tx = missed_tx;
3619 u64_stats_update_end(&tx_ring->syncp);
3620
3621 return rc;
800c55cb
NB
3622}
3623
8510e1a3 3624static void check_for_missing_completions(struct ena_adapter *adapter)
800c55cb 3625{
1738cd3e 3626 struct ena_ring *tx_ring;
8510e1a3 3627 struct ena_ring *rx_ring;
800c55cb 3628 int i, budget, rc;
548c4940 3629 int io_queue_count;
1738cd3e 3630
548c4940 3631 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
1738cd3e
NB
3632 /* Make sure the driver doesn't turn the device in other process */
3633 smp_rmb();
3634
3635 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3636 return;
3637
3f6159db
NB
3638 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3639 return;
3640
82ef30f1
NB
3641 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3642 return;
3643
1738cd3e
NB
3644 budget = ENA_MONITORED_TX_QUEUES;
3645
548c4940 3646 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
1738cd3e 3647 tx_ring = &adapter->tx_ring[i];
8510e1a3
NB
3648 rx_ring = &adapter->rx_ring[i];
3649
3650 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3651 if (unlikely(rc))
3652 return;
1738cd3e 3653
548c4940
SJ
3654 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3655 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
800c55cb
NB
3656 if (unlikely(rc))
3657 return;
1738cd3e
NB
3658
3659 budget--;
3660 if (!budget)
3661 break;
3662 }
3663
548c4940 3664 adapter->last_monitored_tx_qid = i % io_queue_count;
1738cd3e
NB
3665}
3666
a3af7c18
NB
3667/* trigger napi schedule after 2 consecutive detections */
3668#define EMPTY_RX_REFILL 2
3669/* For the rare case where the device runs out of Rx descriptors and the
3670 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3671 * for example).
3672 * This case will lead to a deadlock:
3673 * The device won't send interrupts since all the new Rx packets will be dropped
3674 * The napi handler won't allocate new Rx descriptors so the device will be
3675 * able to send new packets.
3676 *
3677 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3678 * It is recommended to have at least 512MB, with a minimum of 128MB for
3679 * constrained environment).
3680 *
3681 * When such a situation is detected - Reschedule napi
3682 */
3683static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3684{
3685 struct ena_ring *rx_ring;
3686 int i, refill_required;
3687
3688 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3689 return;
3690
3691 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3692 return;
3693
faa615f9 3694 for (i = 0; i < adapter->num_io_queues; i++) {
a3af7c18
NB
3695 rx_ring = &adapter->rx_ring[i];
3696
7cfe9a55 3697 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
a3af7c18
NB
3698 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3699 rx_ring->empty_rx_queue++;
3700
3701 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3702 u64_stats_update_begin(&rx_ring->syncp);
3703 rx_ring->rx_stats.empty_rx_ring++;
3704 u64_stats_update_end(&rx_ring->syncp);
3705
3706 netif_err(adapter, drv, adapter->netdev,
3707 "trigger refill for ring %d\n", i);
3708
3709 napi_schedule(rx_ring->napi);
3710 rx_ring->empty_rx_queue = 0;
3711 }
3712 } else {
3713 rx_ring->empty_rx_queue = 0;
3714 }
3715 }
3716}
3717
1738cd3e
NB
3718/* Check for keep alive expiration */
3719static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3720{
3721 unsigned long keep_alive_expired;
3722
3723 if (!adapter->wd_state)
3724 return;
3725
82ef30f1
NB
3726 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3727 return;
3728
2a6e5fa2
AK
3729 keep_alive_expired = adapter->last_keep_alive_jiffies +
3730 adapter->keep_alive_timeout;
1738cd3e
NB
3731 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3732 netif_err(adapter, drv, adapter->netdev,
3733 "Keep alive watchdog timeout.\n");
3734 u64_stats_update_begin(&adapter->syncp);
3735 adapter->dev_stats.wd_expired++;
3736 u64_stats_update_end(&adapter->syncp);
e2eed0e3 3737 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
1738cd3e
NB
3738 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3739 }
3740}
3741
3742static void check_for_admin_com_state(struct ena_adapter *adapter)
3743{
3744 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3745 netif_err(adapter, drv, adapter->netdev,
3746 "ENA admin queue is not in running state!\n");
3747 u64_stats_update_begin(&adapter->syncp);
3748 adapter->dev_stats.admin_q_pause++;
3749 u64_stats_update_end(&adapter->syncp);
e2eed0e3 3750 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
1738cd3e
NB
3751 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3752 }
3753}
3754
82ef30f1
NB
3755static void ena_update_hints(struct ena_adapter *adapter,
3756 struct ena_admin_ena_hw_hints *hints)
3757{
3758 struct net_device *netdev = adapter->netdev;
3759
3760 if (hints->admin_completion_tx_timeout)
3761 adapter->ena_dev->admin_queue.completion_timeout =
3762 hints->admin_completion_tx_timeout * 1000;
3763
3764 if (hints->mmio_read_timeout)
3765 /* convert to usec */
3766 adapter->ena_dev->mmio_read.reg_read_to =
3767 hints->mmio_read_timeout * 1000;
3768
3769 if (hints->missed_tx_completion_count_threshold_to_reset)
3770 adapter->missing_tx_completion_threshold =
3771 hints->missed_tx_completion_count_threshold_to_reset;
3772
3773 if (hints->missing_tx_completion_timeout) {
3774 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3775 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3776 else
3777 adapter->missing_tx_completion_to =
3778 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3779 }
3780
3781 if (hints->netdev_wd_timeout)
3782 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3783
3784 if (hints->driver_watchdog_timeout) {
3785 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3786 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3787 else
3788 adapter->keep_alive_timeout =
3789 msecs_to_jiffies(hints->driver_watchdog_timeout);
3790 }
3791}
3792
1738cd3e
NB
3793static void ena_update_host_info(struct ena_admin_host_info *host_info,
3794 struct net_device *netdev)
3795{
3796 host_info->supported_network_features[0] =
3797 netdev->features & GENMASK_ULL(31, 0);
3798 host_info->supported_network_features[1] =
3799 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3800}
3801
e99e88a9 3802static void ena_timer_service(struct timer_list *t)
1738cd3e 3803{
e99e88a9 3804 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
1738cd3e
NB
3805 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3806 struct ena_admin_host_info *host_info =
3807 adapter->ena_dev->host_attr.host_info;
3808
3809 check_for_missing_keep_alive(adapter);
3810
3811 check_for_admin_com_state(adapter);
3812
8510e1a3 3813 check_for_missing_completions(adapter);
1738cd3e 3814
a3af7c18
NB
3815 check_for_empty_rx_ring(adapter);
3816
1738cd3e
NB
3817 if (debug_area)
3818 ena_dump_stats_to_buf(adapter, debug_area);
3819
3820 if (host_info)
3821 ena_update_host_info(host_info, adapter->netdev);
3822
3f6159db 3823 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1738cd3e
NB
3824 netif_err(adapter, drv, adapter->netdev,
3825 "Trigger reset is on\n");
3826 ena_dump_stats_to_dmesg(adapter);
3827 queue_work(ena_wq, &adapter->reset_task);
3828 return;
3829 }
3830
3831 /* Reset the timer */
2a6e5fa2 3832 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1738cd3e
NB
3833}
3834
ba6f6b41 3835static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
736ce3f4
SJ
3836 struct ena_com_dev *ena_dev,
3837 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1738cd3e 3838{
ba6f6b41 3839 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
31aa9857
SJ
3840
3841 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3842 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3843 &get_feat_ctx->max_queue_ext.max_queue_ext;
736ce3f4 3844 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
31aa9857 3845 max_queue_ext->max_rx_cq_num);
1738cd3e 3846
31aa9857
SJ
3847 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3848 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3849 } else {
3850 struct ena_admin_queue_feature_desc *max_queues =
3851 &get_feat_ctx->max_queues;
3852 io_tx_sq_num = max_queues->max_sq_num;
3853 io_tx_cq_num = max_queues->max_cq_num;
736ce3f4 3854 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
31aa9857
SJ
3855 }
3856
3857 /* In case of LLQ use the llq fields for the tx SQ/CQ */
9fd25592 3858 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
31aa9857 3859 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
1738cd3e 3860
736ce3f4
SJ
3861 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3862 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3863 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3864 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
1738cd3e 3865 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
736ce3f4
SJ
3866 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3867 if (unlikely(!max_num_io_queues)) {
1738cd3e
NB
3868 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3869 return -EFAULT;
3870 }
3871
736ce3f4 3872 return max_num_io_queues;
1738cd3e
NB
3873}
3874
38005ca8
AK
3875static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3876 struct ena_com_dev *ena_dev,
3877 struct ena_admin_feature_llq_desc *llq,
3878 struct ena_llq_configurations *llq_default_configurations)
1738cd3e
NB
3879{
3880 bool has_mem_bar;
38005ca8
AK
3881 int rc;
3882 u32 llq_feature_mask;
3883
3884 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3885 if (!(ena_dev->supported_features & llq_feature_mask)) {
3886 dev_err(&pdev->dev,
3887 "LLQ is not supported Fallback to host mode policy.\n");
3888 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3889 return 0;
3890 }
1738cd3e
NB
3891
3892 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3893
38005ca8
AK
3894 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3895 if (unlikely(rc)) {
3896 dev_err(&pdev->dev,
3897 "Failed to configure the device mode. Fallback to host mode policy.\n");
3898 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3899 return 0;
3900 }
3901
3902 /* Nothing to config, exit */
3903 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3904 return 0;
3905
3906 if (!has_mem_bar) {
3907 dev_err(&pdev->dev,
3908 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
1738cd3e 3909 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
38005ca8
AK
3910 return 0;
3911 }
3912
3913 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3914 pci_resource_start(pdev, ENA_MEM_BAR),
3915 pci_resource_len(pdev, ENA_MEM_BAR));
3916
3917 if (!ena_dev->mem_bar)
3918 return -EFAULT;
3919
3920 return 0;
1738cd3e
NB
3921}
3922
3923static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3924 struct net_device *netdev)
3925{
3926 netdev_features_t dev_features = 0;
3927
3928 /* Set offload features */
3929 if (feat->offload.tx &
3930 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3931 dev_features |= NETIF_F_IP_CSUM;
3932
3933 if (feat->offload.tx &
3934 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3935 dev_features |= NETIF_F_IPV6_CSUM;
3936
3937 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3938 dev_features |= NETIF_F_TSO;
3939
3940 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3941 dev_features |= NETIF_F_TSO6;
3942
3943 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3944 dev_features |= NETIF_F_TSO_ECN;
3945
3946 if (feat->offload.rx_supported &
3947 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3948 dev_features |= NETIF_F_RXCSUM;
3949
3950 if (feat->offload.rx_supported &
3951 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3952 dev_features |= NETIF_F_RXCSUM;
3953
3954 netdev->features =
3955 dev_features |
3956 NETIF_F_SG |
1738cd3e
NB
3957 NETIF_F_RXHASH |
3958 NETIF_F_HIGHDMA;
3959
3960 netdev->hw_features |= netdev->features;
3961 netdev->vlan_features |= netdev->features;
3962}
3963
3964static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3965 struct ena_com_dev_get_features_ctx *feat)
3966{
3967 struct net_device *netdev = adapter->netdev;
3968
3969 /* Copy mac address */
3970 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3971 eth_hw_addr_random(netdev);
3972 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3973 } else {
3974 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3975 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3976 }
3977
3978 /* Set offload features */
3979 ena_set_dev_offloads(feat, netdev);
3980
3981 adapter->max_mtu = feat->dev_attr.max_mtu;
d894be57
JW
3982 netdev->max_mtu = adapter->max_mtu;
3983 netdev->min_mtu = ENA_MIN_MTU;
1738cd3e
NB
3984}
3985
3986static int ena_rss_init_default(struct ena_adapter *adapter)
3987{
3988 struct ena_com_dev *ena_dev = adapter->ena_dev;
3989 struct device *dev = &adapter->pdev->dev;
3990 int rc, i;
3991 u32 val;
3992
3993 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3994 if (unlikely(rc)) {
3995 dev_err(dev, "Cannot init indirect table\n");
3996 goto err_rss_init;
3997 }
3998
3999 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
faa615f9 4000 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
1738cd3e
NB
4001 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4002 ENA_IO_RXQ_IDX(val));
d1497638 4003 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
4004 dev_err(dev, "Cannot fill indirect table\n");
4005 goto err_fill_indir;
4006 }
4007 }
4008
c1bd17e5 4009 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
1738cd3e 4010 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
d1497638 4011 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
4012 dev_err(dev, "Cannot fill hash function\n");
4013 goto err_fill_indir;
4014 }
4015
4016 rc = ena_com_set_default_hash_ctrl(ena_dev);
d1497638 4017 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
1738cd3e
NB
4018 dev_err(dev, "Cannot fill hash control\n");
4019 goto err_fill_indir;
4020 }
4021
4022 return 0;
4023
4024err_fill_indir:
4025 ena_com_rss_destroy(ena_dev);
4026err_rss_init:
4027
4028 return rc;
4029}
4030
4031static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4032{
d79c3888 4033 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
0857d92f 4034
1738cd3e
NB
4035 pci_release_selected_regions(pdev, release_bars);
4036}
4037
c2b54204 4038static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
38005ca8
AK
4039{
4040 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
4041 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
4042 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
4043 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
4044 llq_config->llq_ring_entry_size_value = 128;
4045}
4046
4d192660 4047static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
1738cd3e 4048{
31aa9857
SJ
4049 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4050 struct ena_com_dev *ena_dev = ctx->ena_dev;
4051 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4052 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4053 u32 max_tx_queue_size;
4054 u32 max_rx_queue_size;
1738cd3e 4055
4d192660 4056 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
31aa9857
SJ
4057 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4058 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4059 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4060 max_queue_ext->max_rx_sq_depth);
4061 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
1738cd3e 4062
31aa9857
SJ
4063 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4064 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4065 llq->max_llq_depth);
4066 else
4067 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4068 max_queue_ext->max_tx_sq_depth);
1738cd3e 4069
31aa9857
SJ
4070 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4071 max_queue_ext->max_per_packet_tx_descs);
4072 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4073 max_queue_ext->max_per_packet_rx_descs);
4074 } else {
4075 struct ena_admin_queue_feature_desc *max_queues =
4076 &ctx->get_feat_ctx->max_queues;
4077 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4078 max_queues->max_sq_depth);
4079 max_tx_queue_size = max_queues->max_cq_depth;
4080
4081 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4082 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4083 llq->max_llq_depth);
4084 else
4085 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4086 max_queues->max_sq_depth);
4087
4088 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4089 max_queues->max_packet_tx_descs);
4090 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4091 max_queues->max_packet_rx_descs);
4092 }
4093
4094 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4095 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
1738cd3e 4096
13ca32a6
SJ
4097 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4098 max_tx_queue_size);
4099 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4100 max_rx_queue_size);
31aa9857
SJ
4101
4102 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4103 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4104
31aa9857
SJ
4105 ctx->max_tx_queue_size = max_tx_queue_size;
4106 ctx->max_rx_queue_size = max_rx_queue_size;
4107 ctx->tx_queue_size = tx_queue_size;
4108 ctx->rx_queue_size = rx_queue_size;
1738cd3e 4109
31aa9857 4110 return 0;
1738cd3e
NB
4111}
4112
4113/* ena_probe - Device Initialization Routine
4114 * @pdev: PCI device information struct
4115 * @ent: entry in ena_pci_tbl
4116 *
4117 * Returns 0 on success, negative on failure
4118 *
4119 * ena_probe initializes an adapter identified by a pci_dev structure.
4120 * The OS initialization, configuring of the adapter private structure,
4121 * and a hardware reset occur.
4122 */
4123static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4124{
4125 struct ena_com_dev_get_features_ctx get_feat_ctx;
31aa9857 4126 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
38005ca8 4127 struct ena_llq_configurations llq_config;
1738cd3e 4128 struct ena_com_dev *ena_dev = NULL;
83b92404 4129 struct ena_adapter *adapter;
83b92404
SJ
4130 struct net_device *netdev;
4131 static int adapters_found;
736ce3f4 4132 u32 max_num_io_queues;
83b92404 4133 char *queue_type_str;
1738cd3e 4134 bool wd_state;
736ce3f4 4135 int bars, rc;
1738cd3e
NB
4136
4137 dev_dbg(&pdev->dev, "%s\n", __func__);
4138
1738cd3e
NB
4139 rc = pci_enable_device_mem(pdev);
4140 if (rc) {
4141 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4142 return rc;
4143 }
4144
4145 pci_set_master(pdev);
4146
4147 ena_dev = vzalloc(sizeof(*ena_dev));
4148 if (!ena_dev) {
4149 rc = -ENOMEM;
4150 goto err_disable_device;
4151 }
4152
4153 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4154 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4155 if (rc) {
4156 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4157 rc);
4158 goto err_free_ena_dev;
4159 }
4160
0857d92f
NB
4161 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4162 pci_resource_start(pdev, ENA_REG_BAR),
4163 pci_resource_len(pdev, ENA_REG_BAR));
1738cd3e
NB
4164 if (!ena_dev->reg_bar) {
4165 dev_err(&pdev->dev, "failed to remap regs bar\n");
4166 rc = -EFAULT;
4167 goto err_free_region;
4168 }
4169
4170 ena_dev->dmadev = &pdev->dev;
4171
4172 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4173 if (rc) {
4174 dev_err(&pdev->dev, "ena device init failed\n");
4175 if (rc == -ETIME)
4176 rc = -EPROBE_DEFER;
4177 goto err_free_region;
4178 }
4179
38005ca8 4180 set_default_llq_configurations(&llq_config);
1738cd3e 4181
38005ca8
AK
4182 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
4183 &llq_config);
4184 if (rc) {
4185 dev_err(&pdev->dev, "ena device init failed\n");
4186 goto err_device_destroy;
1738cd3e
NB
4187 }
4188
31aa9857
SJ
4189 calc_queue_ctx.ena_dev = ena_dev;
4190 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4191 calc_queue_ctx.pdev = pdev;
4192
13830937 4193 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4d192660
SJ
4194 * Updated during device initialization with the real granularity
4195 */
1738cd3e 4196 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
15619e72 4197 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
79226cea 4198 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
736ce3f4 4199 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4d192660 4200 rc = ena_calc_io_queue_size(&calc_queue_ctx);
736ce3f4 4201 if (rc || !max_num_io_queues) {
1738cd3e
NB
4202 rc = -EFAULT;
4203 goto err_device_destroy;
4204 }
4205
1738cd3e 4206 /* dev zeroed in init_etherdev */
736ce3f4 4207 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
1738cd3e
NB
4208 if (!netdev) {
4209 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4210 rc = -ENOMEM;
4211 goto err_device_destroy;
4212 }
4213
4214 SET_NETDEV_DEV(netdev, &pdev->dev);
4215
4216 adapter = netdev_priv(netdev);
4217 pci_set_drvdata(pdev, adapter);
4218
4219 adapter->ena_dev = ena_dev;
4220 adapter->netdev = netdev;
4221 adapter->pdev = pdev;
4222
4223 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4224
4225 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
e2eed0e3 4226 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
1738cd3e 4227
13ca32a6
SJ
4228 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4229 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
9f9ae3f9
SJ
4230 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4231 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
31aa9857
SJ
4232 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4233 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
1738cd3e 4234
736ce3f4
SJ
4235 adapter->num_io_queues = max_num_io_queues;
4236 adapter->max_num_io_queues = max_num_io_queues;
4237
548c4940
SJ
4238 adapter->xdp_first_ring = 0;
4239 adapter->xdp_num_queues = 0;
4240
1738cd3e
NB
4241 adapter->last_monitored_tx_qid = 0;
4242
4243 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4244 adapter->wd_state = wd_state;
4245
4246 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4247
4248 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4249 if (rc) {
4250 dev_err(&pdev->dev,
4251 "Failed to query interrupt moderation feature\n");
4252 goto err_netdev_destroy;
4253 }
548c4940
SJ
4254 ena_init_io_rings(adapter,
4255 0,
4256 adapter->xdp_num_queues +
4257 adapter->num_io_queues);
1738cd3e
NB
4258
4259 netdev->netdev_ops = &ena_netdev_ops;
4260 netdev->watchdog_timeo = TX_TIMEOUT;
4261 ena_set_ethtool_ops(netdev);
4262
4263 netdev->priv_flags |= IFF_UNICAST_FLT;
4264
4265 u64_stats_init(&adapter->syncp);
4266
4d192660 4267 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
1738cd3e
NB
4268 if (rc) {
4269 dev_err(&pdev->dev,
4270 "Failed to enable and set the admin interrupts\n");
4271 goto err_worker_destroy;
4272 }
4273 rc = ena_rss_init_default(adapter);
d1497638 4274 if (rc && (rc != -EOPNOTSUPP)) {
1738cd3e
NB
4275 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4276 goto err_free_msix;
4277 }
4278
4279 ena_config_debug_area(adapter);
4280
4281 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4282
4283 netif_carrier_off(netdev);
4284
4285 rc = register_netdev(netdev);
4286 if (rc) {
4287 dev_err(&pdev->dev, "Cannot register net device\n");
4288 goto err_rss;
4289 }
4290
1738cd3e
NB
4291 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4292
4293 adapter->last_keep_alive_jiffies = jiffies;
82ef30f1
NB
4294 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4295 adapter->missing_tx_completion_to = TX_TIMEOUT;
4296 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4297
4298 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
1738cd3e 4299
e99e88a9 4300 timer_setup(&adapter->timer_service, ena_timer_service, 0);
f850b4a7 4301 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
1738cd3e 4302
38005ca8
AK
4303 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
4304 queue_type_str = "Regular";
4305 else
4306 queue_type_str = "Low Latency";
4307
4308 dev_info(&pdev->dev,
9f648f7b 4309 "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
1738cd3e 4310 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
9f648f7b 4311 netdev->dev_addr, queue_type_str);
1738cd3e
NB
4312
4313 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4314
4315 adapters_found++;
4316
4317 return 0;
4318
4319err_rss:
4320 ena_com_delete_debug_area(ena_dev);
4321 ena_com_rss_destroy(ena_dev);
4322err_free_msix:
e2eed0e3 4323 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
58a54b9c
AK
4324 /* stop submitting admin commands on a device that was reset */
4325 ena_com_set_admin_running_state(ena_dev, false);
1738cd3e 4326 ena_free_mgmnt_irq(adapter);
06443684 4327 ena_disable_msix(adapter);
1738cd3e 4328err_worker_destroy:
1738cd3e 4329 del_timer(&adapter->timer_service);
1738cd3e
NB
4330err_netdev_destroy:
4331 free_netdev(netdev);
4332err_device_destroy:
4333 ena_com_delete_host_info(ena_dev);
4334 ena_com_admin_destroy(ena_dev);
4335err_free_region:
4336 ena_release_bars(ena_dev, pdev);
4337err_free_ena_dev:
1738cd3e
NB
4338 vfree(ena_dev);
4339err_disable_device:
4340 pci_disable_device(pdev);
4341 return rc;
4342}
4343
1738cd3e
NB
4344/*****************************************************************************/
4345
428c4913 4346/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
1738cd3e 4347 * @pdev: PCI device information struct
428c4913 4348 * @shutdown: Is it a shutdown operation? If false, means it is a removal
1738cd3e 4349 *
428c4913
GP
4350 * __ena_shutoff is a helper routine that does the real work on shutdown and
4351 * removal paths; the difference between those paths is with regards to whether
4352 * dettach or unregister the netdevice.
1738cd3e 4353 */
428c4913 4354static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
1738cd3e
NB
4355{
4356 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4357 struct ena_com_dev *ena_dev;
4358 struct net_device *netdev;
4359
1738cd3e
NB
4360 ena_dev = adapter->ena_dev;
4361 netdev = adapter->netdev;
4362
4363#ifdef CONFIG_RFS_ACCEL
4364 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4365 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4366 netdev->rx_cpu_rmap = NULL;
4367 }
4368#endif /* CONFIG_RFS_ACCEL */
1738cd3e
NB
4369 del_timer_sync(&adapter->timer_service);
4370
4371 cancel_work_sync(&adapter->reset_task);
4372
428c4913 4373 rtnl_lock(); /* lock released inside the below if-else block */
c1c0e40b 4374 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
944b28aa 4375 ena_destroy_device(adapter, true);
428c4913
GP
4376 if (shutdown) {
4377 netif_device_detach(netdev);
4378 dev_close(netdev);
4379 rtnl_unlock();
4380 } else {
4381 rtnl_unlock();
4382 unregister_netdev(netdev);
4383 free_netdev(netdev);
4384 }
1738cd3e 4385
1738cd3e
NB
4386 ena_com_rss_destroy(ena_dev);
4387
4388 ena_com_delete_debug_area(ena_dev);
4389
4390 ena_com_delete_host_info(ena_dev);
4391
4392 ena_release_bars(ena_dev, pdev);
4393
1738cd3e
NB
4394 pci_disable_device(pdev);
4395
1738cd3e
NB
4396 vfree(ena_dev);
4397}
4398
428c4913
GP
4399/* ena_remove - Device Removal Routine
4400 * @pdev: PCI device information struct
4401 *
4402 * ena_remove is called by the PCI subsystem to alert the driver
4403 * that it should release a PCI device.
4404 */
4405
4406static void ena_remove(struct pci_dev *pdev)
4407{
4408 __ena_shutoff(pdev, false);
4409}
4410
4411/* ena_shutdown - Device Shutdown Routine
4412 * @pdev: PCI device information struct
4413 *
4414 * ena_shutdown is called by the PCI subsystem to alert the driver that
4415 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4416 */
4417
4418static void ena_shutdown(struct pci_dev *pdev)
4419{
4420 __ena_shutoff(pdev, true);
4421}
4422
8c5c7abd
NB
4423#ifdef CONFIG_PM
4424/* ena_suspend - PM suspend callback
4425 * @pdev: PCI device information struct
4426 * @state:power state
4427 */
4428static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
4429{
4430 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4431
4432 u64_stats_update_begin(&adapter->syncp);
4433 adapter->dev_stats.suspend++;
4434 u64_stats_update_end(&adapter->syncp);
4435
4436 rtnl_lock();
4437 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4438 dev_err(&pdev->dev,
4439 "ignoring device reset request as the device is being suspended\n");
4440 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4441 }
cfa324a5 4442 ena_destroy_device(adapter, true);
8c5c7abd
NB
4443 rtnl_unlock();
4444 return 0;
4445}
4446
4447/* ena_resume - PM resume callback
4448 * @pdev: PCI device information struct
4449 *
4450 */
4451static int ena_resume(struct pci_dev *pdev)
4452{
4453 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4454 int rc;
4455
4456 u64_stats_update_begin(&adapter->syncp);
4457 adapter->dev_stats.resume++;
4458 u64_stats_update_end(&adapter->syncp);
4459
4460 rtnl_lock();
4461 rc = ena_restore_device(adapter);
4462 rtnl_unlock();
4463 return rc;
4464}
4465#endif
4466
1738cd3e
NB
4467static struct pci_driver ena_pci_driver = {
4468 .name = DRV_MODULE_NAME,
4469 .id_table = ena_pci_tbl,
4470 .probe = ena_probe,
4471 .remove = ena_remove,
428c4913 4472 .shutdown = ena_shutdown,
8c5c7abd
NB
4473#ifdef CONFIG_PM
4474 .suspend = ena_suspend,
4475 .resume = ena_resume,
4476#endif
115ddc49 4477 .sriov_configure = pci_sriov_configure_simple,
1738cd3e
NB
4478};
4479
4480static int __init ena_init(void)
4481{
1738cd3e
NB
4482 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4483 if (!ena_wq) {
4484 pr_err("Failed to create workqueue\n");
4485 return -ENOMEM;
4486 }
4487
4488 return pci_register_driver(&ena_pci_driver);
4489}
4490
4491static void __exit ena_cleanup(void)
4492{
4493 pci_unregister_driver(&ena_pci_driver);
4494
4495 if (ena_wq) {
4496 destroy_workqueue(ena_wq);
4497 ena_wq = NULL;
4498 }
4499}
4500
4501/******************************************************************************
4502 ******************************** AENQ Handlers *******************************
4503 *****************************************************************************/
4504/* ena_update_on_link_change:
4505 * Notify the network interface about the change in link status
4506 */
4507static void ena_update_on_link_change(void *adapter_data,
4508 struct ena_admin_aenq_entry *aenq_e)
4509{
4510 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4511 struct ena_admin_aenq_link_change_desc *aenq_desc =
4512 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4513 int status = aenq_desc->flags &
4514 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4515
4516 if (status) {
4517 netdev_dbg(adapter->netdev, "%s\n", __func__);
4518 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
d18e4f68
NB
4519 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4520 netif_carrier_on(adapter->netdev);
1738cd3e
NB
4521 } else {
4522 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4523 netif_carrier_off(adapter->netdev);
4524 }
4525}
4526
4527static void ena_keep_alive_wd(void *adapter_data,
4528 struct ena_admin_aenq_entry *aenq_e)
4529{
4530 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
11a9a460
NB
4531 struct ena_admin_aenq_keep_alive_desc *desc;
4532 u64 rx_drops;
5c665f8c 4533 u64 tx_drops;
1738cd3e 4534
11a9a460 4535 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
1738cd3e 4536 adapter->last_keep_alive_jiffies = jiffies;
11a9a460
NB
4537
4538 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
5c665f8c 4539 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
11a9a460
NB
4540
4541 u64_stats_update_begin(&adapter->syncp);
4542 adapter->dev_stats.rx_drops = rx_drops;
5c665f8c 4543 adapter->dev_stats.tx_drops = tx_drops;
11a9a460 4544 u64_stats_update_end(&adapter->syncp);
1738cd3e
NB
4545}
4546
4547static void ena_notification(void *adapter_data,
4548 struct ena_admin_aenq_entry *aenq_e)
4549{
4550 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
82ef30f1 4551 struct ena_admin_ena_hw_hints *hints;
1738cd3e
NB
4552
4553 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4554 "Invalid group(%x) expected %x\n",
4555 aenq_e->aenq_common_desc.group,
4556 ENA_ADMIN_NOTIFICATION);
4557
4558 switch (aenq_e->aenq_common_desc.syndrom) {
82ef30f1
NB
4559 case ENA_ADMIN_UPDATE_HINTS:
4560 hints = (struct ena_admin_ena_hw_hints *)
4561 (&aenq_e->inline_data_w4);
4562 ena_update_hints(adapter, hints);
4563 break;
1738cd3e
NB
4564 default:
4565 netif_err(adapter, drv, adapter->netdev,
4566 "Invalid aenq notification link state %d\n",
4567 aenq_e->aenq_common_desc.syndrom);
4568 }
4569}
4570
4571/* This handler will called for unknown event group or unimplemented handlers*/
4572static void unimplemented_aenq_handler(void *data,
4573 struct ena_admin_aenq_entry *aenq_e)
4574{
4575 struct ena_adapter *adapter = (struct ena_adapter *)data;
4576
4577 netif_err(adapter, drv, adapter->netdev,
4578 "Unknown event was received or event with unimplemented handler\n");
4579}
4580
4581static struct ena_aenq_handlers aenq_handlers = {
4582 .handlers = {
4583 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4584 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4585 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4586 },
4587 .unimplemented_handler = unimplemented_aenq_handler
4588};
4589
4590module_init(ena_init);
4591module_exit(ena_cleanup);