2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/numa.h>
43 #include <linux/pci.h>
44 #include <linux/utsname.h>
45 #include <linux/version.h>
46 #include <linux/vmalloc.h>
49 #include "ena_netdev.h"
50 #include "ena_pci_id_tbl.h"
52 static char version
[] = DEVICE_NAME
" v" DRV_MODULE_VERSION
"\n";
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME
);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION
);
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT (5 * HZ)
62 #define ENA_NAPI_BUDGET 64
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug
= -1;
67 module_param(debug
, int, 0);
68 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
70 static struct ena_aenq_handlers aenq_handlers
;
72 static struct workqueue_struct
*ena_wq
;
74 MODULE_DEVICE_TABLE(pci
, ena_pci_tbl
);
76 static int ena_rss_init_default(struct ena_adapter
*adapter
);
77 static void check_for_admin_com_state(struct ena_adapter
*adapter
);
78 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
);
79 static int ena_restore_device(struct ena_adapter
*adapter
);
81 static void ena_tx_timeout(struct net_device
*dev
)
83 struct ena_adapter
*adapter
= netdev_priv(dev
);
85 /* Change the state of the device to trigger reset
86 * Check that we are not in the middle or a trigger already
89 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
92 adapter
->reset_reason
= ENA_REGS_RESET_OS_NETDEV_WD
;
93 u64_stats_update_begin(&adapter
->syncp
);
94 adapter
->dev_stats
.tx_timeout
++;
95 u64_stats_update_end(&adapter
->syncp
);
97 netif_err(adapter
, tx_err
, dev
, "Transmit time out\n");
100 static void update_rx_ring_mtu(struct ena_adapter
*adapter
, int mtu
)
104 for (i
= 0; i
< adapter
->num_queues
; i
++)
105 adapter
->rx_ring
[i
].mtu
= mtu
;
108 static int ena_change_mtu(struct net_device
*dev
, int new_mtu
)
110 struct ena_adapter
*adapter
= netdev_priv(dev
);
113 ret
= ena_com_set_dev_mtu(adapter
->ena_dev
, new_mtu
);
115 netif_dbg(adapter
, drv
, dev
, "set MTU to %d\n", new_mtu
);
116 update_rx_ring_mtu(adapter
, new_mtu
);
119 netif_err(adapter
, drv
, dev
, "Failed to set MTU to %d\n",
126 static int ena_init_rx_cpu_rmap(struct ena_adapter
*adapter
)
128 #ifdef CONFIG_RFS_ACCEL
132 adapter
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(adapter
->num_queues
);
133 if (!adapter
->netdev
->rx_cpu_rmap
)
135 for (i
= 0; i
< adapter
->num_queues
; i
++) {
136 int irq_idx
= ENA_IO_IRQ_IDX(i
);
138 rc
= irq_cpu_rmap_add(adapter
->netdev
->rx_cpu_rmap
,
139 pci_irq_vector(adapter
->pdev
, irq_idx
));
141 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
142 adapter
->netdev
->rx_cpu_rmap
= NULL
;
146 #endif /* CONFIG_RFS_ACCEL */
150 static void ena_init_io_rings_common(struct ena_adapter
*adapter
,
151 struct ena_ring
*ring
, u16 qid
)
154 ring
->pdev
= adapter
->pdev
;
155 ring
->dev
= &adapter
->pdev
->dev
;
156 ring
->netdev
= adapter
->netdev
;
157 ring
->napi
= &adapter
->ena_napi
[qid
].napi
;
158 ring
->adapter
= adapter
;
159 ring
->ena_dev
= adapter
->ena_dev
;
160 ring
->per_napi_packets
= 0;
162 ring
->first_interrupt
= false;
163 ring
->no_interrupt_event_cnt
= 0;
164 u64_stats_init(&ring
->syncp
);
167 static void ena_init_io_rings(struct ena_adapter
*adapter
)
169 struct ena_com_dev
*ena_dev
;
170 struct ena_ring
*txr
, *rxr
;
173 ena_dev
= adapter
->ena_dev
;
175 for (i
= 0; i
< adapter
->num_queues
; i
++) {
176 txr
= &adapter
->tx_ring
[i
];
177 rxr
= &adapter
->rx_ring
[i
];
179 /* TX/RX common ring state */
180 ena_init_io_rings_common(adapter
, txr
, i
);
181 ena_init_io_rings_common(adapter
, rxr
, i
);
183 /* TX specific ring state */
184 txr
->ring_size
= adapter
->requested_tx_ring_size
;
185 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
186 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
187 txr
->sgl_size
= adapter
->max_tx_sgl_size
;
188 txr
->smoothed_interval
=
189 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
);
191 /* RX specific ring state */
192 rxr
->ring_size
= adapter
->requested_rx_ring_size
;
193 rxr
->rx_copybreak
= adapter
->rx_copybreak
;
194 rxr
->sgl_size
= adapter
->max_rx_sgl_size
;
195 rxr
->smoothed_interval
=
196 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
);
197 rxr
->empty_rx_queue
= 0;
198 adapter
->ena_napi
[i
].dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
202 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
203 * @adapter: network interface device structure
206 * Return 0 on success, negative on failure
208 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
)
210 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
211 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
214 if (tx_ring
->tx_buffer_info
) {
215 netif_err(adapter
, ifup
,
216 adapter
->netdev
, "tx_buffer_info info is not NULL");
220 size
= sizeof(struct ena_tx_buffer
) * tx_ring
->ring_size
;
221 node
= cpu_to_node(ena_irq
->cpu
);
223 tx_ring
->tx_buffer_info
= vzalloc_node(size
, node
);
224 if (!tx_ring
->tx_buffer_info
) {
225 tx_ring
->tx_buffer_info
= vzalloc(size
);
226 if (!tx_ring
->tx_buffer_info
)
227 goto err_tx_buffer_info
;
230 size
= sizeof(u16
) * tx_ring
->ring_size
;
231 tx_ring
->free_ids
= vzalloc_node(size
, node
);
232 if (!tx_ring
->free_ids
) {
233 tx_ring
->free_ids
= vzalloc(size
);
234 if (!tx_ring
->free_ids
)
235 goto err_tx_free_ids
;
238 size
= tx_ring
->tx_max_header_size
;
239 tx_ring
->push_buf_intermediate_buf
= vzalloc_node(size
, node
);
240 if (!tx_ring
->push_buf_intermediate_buf
) {
241 tx_ring
->push_buf_intermediate_buf
= vzalloc(size
);
242 if (!tx_ring
->push_buf_intermediate_buf
)
243 goto err_push_buf_intermediate_buf
;
246 /* Req id ring for TX out of order completions */
247 for (i
= 0; i
< tx_ring
->ring_size
; i
++)
248 tx_ring
->free_ids
[i
] = i
;
250 /* Reset tx statistics */
251 memset(&tx_ring
->tx_stats
, 0x0, sizeof(tx_ring
->tx_stats
));
253 tx_ring
->next_to_use
= 0;
254 tx_ring
->next_to_clean
= 0;
255 tx_ring
->cpu
= ena_irq
->cpu
;
258 err_push_buf_intermediate_buf
:
259 vfree(tx_ring
->free_ids
);
260 tx_ring
->free_ids
= NULL
;
262 vfree(tx_ring
->tx_buffer_info
);
263 tx_ring
->tx_buffer_info
= NULL
;
268 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
269 * @adapter: network interface device structure
272 * Free all transmit software resources
274 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
)
276 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
278 vfree(tx_ring
->tx_buffer_info
);
279 tx_ring
->tx_buffer_info
= NULL
;
281 vfree(tx_ring
->free_ids
);
282 tx_ring
->free_ids
= NULL
;
284 vfree(tx_ring
->push_buf_intermediate_buf
);
285 tx_ring
->push_buf_intermediate_buf
= NULL
;
288 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
289 * @adapter: private structure
291 * Return 0 on success, negative on failure
293 static int ena_setup_all_tx_resources(struct ena_adapter
*adapter
)
297 for (i
= 0; i
< adapter
->num_queues
; i
++) {
298 rc
= ena_setup_tx_resources(adapter
, i
);
307 netif_err(adapter
, ifup
, adapter
->netdev
,
308 "Tx queue %d: allocation failed\n", i
);
310 /* rewind the index freeing the rings as we go */
312 ena_free_tx_resources(adapter
, i
);
316 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
317 * @adapter: board private structure
319 * Free all transmit software resources
321 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
)
325 for (i
= 0; i
< adapter
->num_queues
; i
++)
326 ena_free_tx_resources(adapter
, i
);
329 static int validate_rx_req_id(struct ena_ring
*rx_ring
, u16 req_id
)
331 if (likely(req_id
< rx_ring
->ring_size
))
334 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
335 "Invalid rx req_id: %hu\n", req_id
);
337 u64_stats_update_begin(&rx_ring
->syncp
);
338 rx_ring
->rx_stats
.bad_req_id
++;
339 u64_stats_update_end(&rx_ring
->syncp
);
341 /* Trigger device reset */
342 rx_ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_RX_REQ_ID
;
343 set_bit(ENA_FLAG_TRIGGER_RESET
, &rx_ring
->adapter
->flags
);
347 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
348 * @adapter: network interface device structure
351 * Returns 0 on success, negative on failure
353 static int ena_setup_rx_resources(struct ena_adapter
*adapter
,
356 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
357 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
360 if (rx_ring
->rx_buffer_info
) {
361 netif_err(adapter
, ifup
, adapter
->netdev
,
362 "rx_buffer_info is not NULL");
366 /* alloc extra element so in rx path
367 * we can always prefetch rx_info + 1
369 size
= sizeof(struct ena_rx_buffer
) * (rx_ring
->ring_size
+ 1);
370 node
= cpu_to_node(ena_irq
->cpu
);
372 rx_ring
->rx_buffer_info
= vzalloc_node(size
, node
);
373 if (!rx_ring
->rx_buffer_info
) {
374 rx_ring
->rx_buffer_info
= vzalloc(size
);
375 if (!rx_ring
->rx_buffer_info
)
379 size
= sizeof(u16
) * rx_ring
->ring_size
;
380 rx_ring
->free_ids
= vzalloc_node(size
, node
);
381 if (!rx_ring
->free_ids
) {
382 rx_ring
->free_ids
= vzalloc(size
);
383 if (!rx_ring
->free_ids
) {
384 vfree(rx_ring
->rx_buffer_info
);
385 rx_ring
->rx_buffer_info
= NULL
;
390 /* Req id ring for receiving RX pkts out of order */
391 for (i
= 0; i
< rx_ring
->ring_size
; i
++)
392 rx_ring
->free_ids
[i
] = i
;
394 /* Reset rx statistics */
395 memset(&rx_ring
->rx_stats
, 0x0, sizeof(rx_ring
->rx_stats
));
397 rx_ring
->next_to_clean
= 0;
398 rx_ring
->next_to_use
= 0;
399 rx_ring
->cpu
= ena_irq
->cpu
;
404 /* ena_free_rx_resources - Free I/O Rx Resources
405 * @adapter: network interface device structure
408 * Free all receive software resources
410 static void ena_free_rx_resources(struct ena_adapter
*adapter
,
413 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
415 vfree(rx_ring
->rx_buffer_info
);
416 rx_ring
->rx_buffer_info
= NULL
;
418 vfree(rx_ring
->free_ids
);
419 rx_ring
->free_ids
= NULL
;
422 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
423 * @adapter: board private structure
425 * Return 0 on success, negative on failure
427 static int ena_setup_all_rx_resources(struct ena_adapter
*adapter
)
431 for (i
= 0; i
< adapter
->num_queues
; i
++) {
432 rc
= ena_setup_rx_resources(adapter
, i
);
441 netif_err(adapter
, ifup
, adapter
->netdev
,
442 "Rx queue %d: allocation failed\n", i
);
444 /* rewind the index freeing the rings as we go */
446 ena_free_rx_resources(adapter
, i
);
450 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
451 * @adapter: board private structure
453 * Free all receive software resources
455 static void ena_free_all_io_rx_resources(struct ena_adapter
*adapter
)
459 for (i
= 0; i
< adapter
->num_queues
; i
++)
460 ena_free_rx_resources(adapter
, i
);
463 static int ena_alloc_rx_page(struct ena_ring
*rx_ring
,
464 struct ena_rx_buffer
*rx_info
, gfp_t gfp
)
466 struct ena_com_buf
*ena_buf
;
470 /* if previous allocated page is not used */
471 if (unlikely(rx_info
->page
))
474 page
= alloc_page(gfp
);
475 if (unlikely(!page
)) {
476 u64_stats_update_begin(&rx_ring
->syncp
);
477 rx_ring
->rx_stats
.page_alloc_fail
++;
478 u64_stats_update_end(&rx_ring
->syncp
);
482 dma
= dma_map_page(rx_ring
->dev
, page
, 0, ENA_PAGE_SIZE
,
484 if (unlikely(dma_mapping_error(rx_ring
->dev
, dma
))) {
485 u64_stats_update_begin(&rx_ring
->syncp
);
486 rx_ring
->rx_stats
.dma_mapping_err
++;
487 u64_stats_update_end(&rx_ring
->syncp
);
492 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
493 "alloc page %p, rx_info %p\n", page
, rx_info
);
495 rx_info
->page
= page
;
496 rx_info
->page_offset
= 0;
497 ena_buf
= &rx_info
->ena_buf
;
498 ena_buf
->paddr
= dma
;
499 ena_buf
->len
= ENA_PAGE_SIZE
;
504 static void ena_free_rx_page(struct ena_ring
*rx_ring
,
505 struct ena_rx_buffer
*rx_info
)
507 struct page
*page
= rx_info
->page
;
508 struct ena_com_buf
*ena_buf
= &rx_info
->ena_buf
;
510 if (unlikely(!page
)) {
511 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
512 "Trying to free unallocated buffer\n");
516 dma_unmap_page(rx_ring
->dev
, ena_buf
->paddr
, ENA_PAGE_SIZE
,
520 rx_info
->page
= NULL
;
523 static int ena_refill_rx_bufs(struct ena_ring
*rx_ring
, u32 num
)
525 u16 next_to_use
, req_id
;
529 next_to_use
= rx_ring
->next_to_use
;
531 for (i
= 0; i
< num
; i
++) {
532 struct ena_rx_buffer
*rx_info
;
534 req_id
= rx_ring
->free_ids
[next_to_use
];
535 rc
= validate_rx_req_id(rx_ring
, req_id
);
536 if (unlikely(rc
< 0))
539 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
542 rc
= ena_alloc_rx_page(rx_ring
, rx_info
,
543 GFP_ATOMIC
| __GFP_COMP
);
544 if (unlikely(rc
< 0)) {
545 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
546 "failed to alloc buffer for rx queue %d\n",
550 rc
= ena_com_add_single_rx_desc(rx_ring
->ena_com_io_sq
,
554 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
555 "failed to add buffer for rx queue %d\n",
559 next_to_use
= ENA_RX_RING_IDX_NEXT(next_to_use
,
563 if (unlikely(i
< num
)) {
564 u64_stats_update_begin(&rx_ring
->syncp
);
565 rx_ring
->rx_stats
.refil_partial
++;
566 u64_stats_update_end(&rx_ring
->syncp
);
567 netdev_warn(rx_ring
->netdev
,
568 "refilled rx qid %d with only %d buffers (from %d)\n",
569 rx_ring
->qid
, i
, num
);
572 /* ena_com_write_sq_doorbell issues a wmb() */
574 ena_com_write_sq_doorbell(rx_ring
->ena_com_io_sq
);
576 rx_ring
->next_to_use
= next_to_use
;
581 static void ena_free_rx_bufs(struct ena_adapter
*adapter
,
584 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
587 for (i
= 0; i
< rx_ring
->ring_size
; i
++) {
588 struct ena_rx_buffer
*rx_info
= &rx_ring
->rx_buffer_info
[i
];
591 ena_free_rx_page(rx_ring
, rx_info
);
595 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
596 * @adapter: board private structure
598 static void ena_refill_all_rx_bufs(struct ena_adapter
*adapter
)
600 struct ena_ring
*rx_ring
;
603 for (i
= 0; i
< adapter
->num_queues
; i
++) {
604 rx_ring
= &adapter
->rx_ring
[i
];
605 bufs_num
= rx_ring
->ring_size
- 1;
606 rc
= ena_refill_rx_bufs(rx_ring
, bufs_num
);
608 if (unlikely(rc
!= bufs_num
))
609 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
610 "refilling Queue %d failed. allocated %d buffers from: %d\n",
615 static void ena_free_all_rx_bufs(struct ena_adapter
*adapter
)
619 for (i
= 0; i
< adapter
->num_queues
; i
++)
620 ena_free_rx_bufs(adapter
, i
);
623 static void ena_unmap_tx_skb(struct ena_ring
*tx_ring
,
624 struct ena_tx_buffer
*tx_info
)
626 struct ena_com_buf
*ena_buf
;
630 ena_buf
= tx_info
->bufs
;
631 cnt
= tx_info
->num_of_bufs
;
636 if (tx_info
->map_linear_data
) {
637 dma_unmap_single(tx_ring
->dev
,
638 dma_unmap_addr(ena_buf
, paddr
),
639 dma_unmap_len(ena_buf
, len
),
645 /* unmap remaining mapped pages */
646 for (i
= 0; i
< cnt
; i
++) {
647 dma_unmap_page(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
648 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
653 /* ena_free_tx_bufs - Free Tx Buffers per Queue
654 * @tx_ring: TX ring for which buffers be freed
656 static void ena_free_tx_bufs(struct ena_ring
*tx_ring
)
658 bool print_once
= true;
661 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
662 struct ena_tx_buffer
*tx_info
= &tx_ring
->tx_buffer_info
[i
];
668 netdev_notice(tx_ring
->netdev
,
669 "free uncompleted tx skb qid %d idx 0x%x\n",
673 netdev_dbg(tx_ring
->netdev
,
674 "free uncompleted tx skb qid %d idx 0x%x\n",
678 ena_unmap_tx_skb(tx_ring
, tx_info
);
680 dev_kfree_skb_any(tx_info
->skb
);
682 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
686 static void ena_free_all_tx_bufs(struct ena_adapter
*adapter
)
688 struct ena_ring
*tx_ring
;
691 for (i
= 0; i
< adapter
->num_queues
; i
++) {
692 tx_ring
= &adapter
->tx_ring
[i
];
693 ena_free_tx_bufs(tx_ring
);
697 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
)
702 for (i
= 0; i
< adapter
->num_queues
; i
++) {
703 ena_qid
= ENA_IO_TXQ_IDX(i
);
704 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
708 static void ena_destroy_all_rx_queues(struct ena_adapter
*adapter
)
713 for (i
= 0; i
< adapter
->num_queues
; i
++) {
714 ena_qid
= ENA_IO_RXQ_IDX(i
);
715 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
716 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
720 static void ena_destroy_all_io_queues(struct ena_adapter
*adapter
)
722 ena_destroy_all_tx_queues(adapter
);
723 ena_destroy_all_rx_queues(adapter
);
726 static int validate_tx_req_id(struct ena_ring
*tx_ring
, u16 req_id
)
728 struct ena_tx_buffer
*tx_info
= NULL
;
730 if (likely(req_id
< tx_ring
->ring_size
)) {
731 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
732 if (likely(tx_info
->skb
))
737 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
738 "tx_info doesn't have valid skb\n");
740 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
741 "Invalid req_id: %hu\n", req_id
);
743 u64_stats_update_begin(&tx_ring
->syncp
);
744 tx_ring
->tx_stats
.bad_req_id
++;
745 u64_stats_update_end(&tx_ring
->syncp
);
747 /* Trigger device reset */
748 tx_ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_TX_REQ_ID
;
749 set_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
);
753 static int ena_clean_tx_irq(struct ena_ring
*tx_ring
, u32 budget
)
755 struct netdev_queue
*txq
;
764 next_to_clean
= tx_ring
->next_to_clean
;
765 txq
= netdev_get_tx_queue(tx_ring
->netdev
, tx_ring
->qid
);
767 while (tx_pkts
< budget
) {
768 struct ena_tx_buffer
*tx_info
;
771 rc
= ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
,
776 rc
= validate_tx_req_id(tx_ring
, req_id
);
780 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
783 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
787 tx_info
->last_jiffies
= 0;
789 ena_unmap_tx_skb(tx_ring
, tx_info
);
791 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
792 "tx_poll: q %d skb %p completed\n", tx_ring
->qid
,
795 tx_bytes
+= skb
->len
;
798 total_done
+= tx_info
->tx_descs
;
800 tx_ring
->free_ids
[next_to_clean
] = req_id
;
801 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
805 tx_ring
->next_to_clean
= next_to_clean
;
806 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_done
);
807 ena_com_update_dev_comp_head(tx_ring
->ena_com_io_cq
);
809 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
811 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
812 "tx_poll: q %d done. total pkts: %d\n",
813 tx_ring
->qid
, tx_pkts
);
815 /* need to make the rings circular update visible to
816 * ena_start_xmit() before checking for netif_queue_stopped().
820 above_thresh
= ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
821 ENA_TX_WAKEUP_THRESH
);
822 if (unlikely(netif_tx_queue_stopped(txq
) && above_thresh
)) {
823 __netif_tx_lock(txq
, smp_processor_id());
825 ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
826 ENA_TX_WAKEUP_THRESH
);
827 if (netif_tx_queue_stopped(txq
) && above_thresh
&&
828 test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
)) {
829 netif_tx_wake_queue(txq
);
830 u64_stats_update_begin(&tx_ring
->syncp
);
831 tx_ring
->tx_stats
.queue_wakeup
++;
832 u64_stats_update_end(&tx_ring
->syncp
);
834 __netif_tx_unlock(txq
);
840 static struct sk_buff
*ena_alloc_skb(struct ena_ring
*rx_ring
, bool frags
)
845 skb
= napi_get_frags(rx_ring
->napi
);
847 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
848 rx_ring
->rx_copybreak
);
850 if (unlikely(!skb
)) {
851 u64_stats_update_begin(&rx_ring
->syncp
);
852 rx_ring
->rx_stats
.skb_alloc_fail
++;
853 u64_stats_update_end(&rx_ring
->syncp
);
854 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
855 "Failed to allocate skb. frags: %d\n", frags
);
862 static struct sk_buff
*ena_rx_skb(struct ena_ring
*rx_ring
,
863 struct ena_com_rx_buf_info
*ena_bufs
,
868 struct ena_rx_buffer
*rx_info
;
869 u16 len
, req_id
, buf
= 0;
872 len
= ena_bufs
[buf
].len
;
873 req_id
= ena_bufs
[buf
].req_id
;
874 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
876 if (unlikely(!rx_info
->page
)) {
877 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
882 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
883 "rx_info %p page %p\n",
884 rx_info
, rx_info
->page
);
886 /* save virt address of first buffer */
887 va
= page_address(rx_info
->page
) + rx_info
->page_offset
;
888 prefetch(va
+ NET_IP_ALIGN
);
890 if (len
<= rx_ring
->rx_copybreak
) {
891 skb
= ena_alloc_skb(rx_ring
, false);
895 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
896 "rx allocated small packet. len %d. data_len %d\n",
897 skb
->len
, skb
->data_len
);
899 /* sync this buffer for CPU use */
900 dma_sync_single_for_cpu(rx_ring
->dev
,
901 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
904 skb_copy_to_linear_data(skb
, va
, len
);
905 dma_sync_single_for_device(rx_ring
->dev
,
906 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
911 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
912 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
913 *next_to_clean
= ENA_RX_RING_IDX_ADD(*next_to_clean
, descs
,
918 skb
= ena_alloc_skb(rx_ring
, true);
923 dma_unmap_page(rx_ring
->dev
,
924 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
925 ENA_PAGE_SIZE
, DMA_FROM_DEVICE
);
927 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_info
->page
,
928 rx_info
->page_offset
, len
, ENA_PAGE_SIZE
);
930 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
931 "rx skb updated. len %d. data_len %d\n",
932 skb
->len
, skb
->data_len
);
934 rx_info
->page
= NULL
;
936 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
938 ENA_RX_RING_IDX_NEXT(*next_to_clean
,
940 if (likely(--descs
== 0))
944 len
= ena_bufs
[buf
].len
;
945 req_id
= ena_bufs
[buf
].req_id
;
946 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
952 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
953 * @adapter: structure containing adapter specific data
954 * @ena_rx_ctx: received packet context/metadata
955 * @skb: skb currently being received and modified
957 static void ena_rx_checksum(struct ena_ring
*rx_ring
,
958 struct ena_com_rx_ctx
*ena_rx_ctx
,
961 /* Rx csum disabled */
962 if (unlikely(!(rx_ring
->netdev
->features
& NETIF_F_RXCSUM
))) {
963 skb
->ip_summed
= CHECKSUM_NONE
;
967 /* For fragmented packets the checksum isn't valid */
968 if (ena_rx_ctx
->frag
) {
969 skb
->ip_summed
= CHECKSUM_NONE
;
973 /* if IP and error */
974 if (unlikely((ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
) &&
975 (ena_rx_ctx
->l3_csum_err
))) {
976 /* ipv4 checksum error */
977 skb
->ip_summed
= CHECKSUM_NONE
;
978 u64_stats_update_begin(&rx_ring
->syncp
);
979 rx_ring
->rx_stats
.bad_csum
++;
980 u64_stats_update_end(&rx_ring
->syncp
);
981 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
982 "RX IPv4 header checksum error\n");
987 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
988 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
))) {
989 if (unlikely(ena_rx_ctx
->l4_csum_err
)) {
990 /* TCP/UDP checksum error */
991 u64_stats_update_begin(&rx_ring
->syncp
);
992 rx_ring
->rx_stats
.bad_csum
++;
993 u64_stats_update_end(&rx_ring
->syncp
);
994 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
995 "RX L4 checksum error\n");
996 skb
->ip_summed
= CHECKSUM_NONE
;
1000 if (likely(ena_rx_ctx
->l4_csum_checked
)) {
1001 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1002 u64_stats_update_begin(&rx_ring
->syncp
);
1003 rx_ring
->rx_stats
.csum_good
++;
1004 u64_stats_update_end(&rx_ring
->syncp
);
1006 u64_stats_update_begin(&rx_ring
->syncp
);
1007 rx_ring
->rx_stats
.csum_unchecked
++;
1008 u64_stats_update_end(&rx_ring
->syncp
);
1009 skb
->ip_summed
= CHECKSUM_NONE
;
1012 skb
->ip_summed
= CHECKSUM_NONE
;
1018 static void ena_set_rx_hash(struct ena_ring
*rx_ring
,
1019 struct ena_com_rx_ctx
*ena_rx_ctx
,
1020 struct sk_buff
*skb
)
1022 enum pkt_hash_types hash_type
;
1024 if (likely(rx_ring
->netdev
->features
& NETIF_F_RXHASH
)) {
1025 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1026 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)))
1028 hash_type
= PKT_HASH_TYPE_L4
;
1030 hash_type
= PKT_HASH_TYPE_NONE
;
1032 /* Override hash type if the packet is fragmented */
1033 if (ena_rx_ctx
->frag
)
1034 hash_type
= PKT_HASH_TYPE_NONE
;
1036 skb_set_hash(skb
, ena_rx_ctx
->hash
, hash_type
);
1040 /* ena_clean_rx_irq - Cleanup RX irq
1041 * @rx_ring: RX ring to clean
1042 * @napi: napi handler
1043 * @budget: how many packets driver is allowed to clean
1045 * Returns the number of cleaned buffers.
1047 static int ena_clean_rx_irq(struct ena_ring
*rx_ring
, struct napi_struct
*napi
,
1050 u16 next_to_clean
= rx_ring
->next_to_clean
;
1051 u32 res_budget
, work_done
;
1053 struct ena_com_rx_ctx ena_rx_ctx
;
1054 struct ena_adapter
*adapter
;
1055 struct sk_buff
*skb
;
1056 int refill_required
;
1057 int refill_threshold
;
1060 int rx_copybreak_pkt
= 0;
1063 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1064 "%s qid %d\n", __func__
, rx_ring
->qid
);
1065 res_budget
= budget
;
1068 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
1069 ena_rx_ctx
.max_bufs
= rx_ring
->sgl_size
;
1070 ena_rx_ctx
.descs
= 0;
1071 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
1072 rx_ring
->ena_com_io_sq
,
1077 if (unlikely(ena_rx_ctx
.descs
== 0))
1080 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1081 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1082 rx_ring
->qid
, ena_rx_ctx
.descs
, ena_rx_ctx
.l3_proto
,
1083 ena_rx_ctx
.l4_proto
, ena_rx_ctx
.hash
);
1085 /* allocate skb and fill it */
1086 skb
= ena_rx_skb(rx_ring
, rx_ring
->ena_bufs
, ena_rx_ctx
.descs
,
1089 /* exit if we failed to retrieve a buffer */
1090 if (unlikely(!skb
)) {
1091 for (i
= 0; i
< ena_rx_ctx
.descs
; i
++) {
1092 rx_ring
->free_ids
[next_to_clean
] =
1093 rx_ring
->ena_bufs
[i
].req_id
;
1095 ENA_RX_RING_IDX_NEXT(next_to_clean
,
1096 rx_ring
->ring_size
);
1101 ena_rx_checksum(rx_ring
, &ena_rx_ctx
, skb
);
1103 ena_set_rx_hash(rx_ring
, &ena_rx_ctx
, skb
);
1105 skb_record_rx_queue(skb
, rx_ring
->qid
);
1107 if (rx_ring
->ena_bufs
[0].len
<= rx_ring
->rx_copybreak
) {
1108 total_len
+= rx_ring
->ena_bufs
[0].len
;
1110 napi_gro_receive(napi
, skb
);
1112 total_len
+= skb
->len
;
1113 napi_gro_frags(napi
);
1117 } while (likely(res_budget
));
1119 work_done
= budget
- res_budget
;
1120 rx_ring
->per_napi_packets
+= work_done
;
1121 u64_stats_update_begin(&rx_ring
->syncp
);
1122 rx_ring
->rx_stats
.bytes
+= total_len
;
1123 rx_ring
->rx_stats
.cnt
+= work_done
;
1124 rx_ring
->rx_stats
.rx_copybreak_pkt
+= rx_copybreak_pkt
;
1125 u64_stats_update_end(&rx_ring
->syncp
);
1127 rx_ring
->next_to_clean
= next_to_clean
;
1129 refill_required
= ena_com_free_desc(rx_ring
->ena_com_io_sq
);
1131 min_t(int, rx_ring
->ring_size
/ ENA_RX_REFILL_THRESH_DIVIDER
,
1132 ENA_RX_REFILL_THRESH_PACKET
);
1134 /* Optimization, try to batch new rx buffers */
1135 if (refill_required
> refill_threshold
) {
1136 ena_com_update_dev_comp_head(rx_ring
->ena_com_io_cq
);
1137 ena_refill_rx_bufs(rx_ring
, refill_required
);
1143 adapter
= netdev_priv(rx_ring
->netdev
);
1145 u64_stats_update_begin(&rx_ring
->syncp
);
1146 rx_ring
->rx_stats
.bad_desc_num
++;
1147 u64_stats_update_end(&rx_ring
->syncp
);
1149 /* Too many desc from the device. Trigger reset */
1150 adapter
->reset_reason
= ENA_REGS_RESET_TOO_MANY_RX_DESCS
;
1151 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
1156 static void ena_dim_work(struct work_struct
*w
)
1158 struct dim
*dim
= container_of(w
, struct dim
, work
);
1159 struct dim_cq_moder cur_moder
=
1160 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
1161 struct ena_napi
*ena_napi
= container_of(dim
, struct ena_napi
, dim
);
1163 ena_napi
->rx_ring
->smoothed_interval
= cur_moder
.usec
;
1164 dim
->state
= DIM_START_MEASURE
;
1167 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi
*ena_napi
)
1169 struct dim_sample dim_sample
;
1170 struct ena_ring
*rx_ring
= ena_napi
->rx_ring
;
1172 if (!rx_ring
->per_napi_packets
)
1175 rx_ring
->non_empty_napi_events
++;
1177 dim_update_sample(rx_ring
->non_empty_napi_events
,
1178 rx_ring
->rx_stats
.cnt
,
1179 rx_ring
->rx_stats
.bytes
,
1182 net_dim(&ena_napi
->dim
, dim_sample
);
1184 rx_ring
->per_napi_packets
= 0;
1187 static void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
1188 struct ena_ring
*rx_ring
)
1190 struct ena_eth_io_intr_reg intr_reg
;
1191 u32 rx_interval
= ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
) ?
1192 rx_ring
->smoothed_interval
:
1193 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring
->ena_dev
);
1195 /* Update intr register: rx intr delay,
1196 * tx intr delay and interrupt unmask
1198 ena_com_update_intr_reg(&intr_reg
,
1200 tx_ring
->smoothed_interval
,
1203 /* It is a shared MSI-X.
1204 * Tx and Rx CQ have pointer to it.
1205 * So we use one of them to reach the intr reg
1207 ena_com_unmask_intr(rx_ring
->ena_com_io_cq
, &intr_reg
);
1210 static void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
1211 struct ena_ring
*rx_ring
)
1213 int cpu
= get_cpu();
1216 /* Check only one ring since the 2 rings are running on the same cpu */
1217 if (likely(tx_ring
->cpu
== cpu
))
1220 numa_node
= cpu_to_node(cpu
);
1223 if (numa_node
!= NUMA_NO_NODE
) {
1224 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, numa_node
);
1225 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, numa_node
);
1236 static int ena_io_poll(struct napi_struct
*napi
, int budget
)
1238 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
1239 struct ena_ring
*tx_ring
, *rx_ring
;
1242 int rx_work_done
= 0;
1244 int napi_comp_call
= 0;
1247 tx_ring
= ena_napi
->tx_ring
;
1248 rx_ring
= ena_napi
->rx_ring
;
1250 tx_budget
= tx_ring
->ring_size
/ ENA_TX_POLL_BUDGET_DIVIDER
;
1252 if (!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1253 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
)) {
1254 napi_complete_done(napi
, 0);
1258 tx_work_done
= ena_clean_tx_irq(tx_ring
, tx_budget
);
1259 /* On netpoll the budget is zero and the handler should only clean the
1263 rx_work_done
= ena_clean_rx_irq(rx_ring
, napi
, budget
);
1265 /* If the device is about to reset or down, avoid unmask
1266 * the interrupt and return 0 so NAPI won't reschedule
1268 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1269 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
))) {
1270 napi_complete_done(napi
, 0);
1273 } else if ((budget
> rx_work_done
) && (tx_budget
> tx_work_done
)) {
1276 /* Update numa and unmask the interrupt only when schedule
1277 * from the interrupt context (vs from sk_busy_loop)
1279 if (napi_complete_done(napi
, rx_work_done
)) {
1280 /* We apply adaptive moderation on Rx path only.
1281 * Tx uses static interrupt moderation.
1283 if (ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
))
1284 ena_adjust_adaptive_rx_intr_moderation(ena_napi
);
1286 ena_unmask_interrupt(tx_ring
, rx_ring
);
1289 ena_update_ring_numa_node(tx_ring
, rx_ring
);
1296 u64_stats_update_begin(&tx_ring
->syncp
);
1297 tx_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
1298 tx_ring
->tx_stats
.tx_poll
++;
1299 u64_stats_update_end(&tx_ring
->syncp
);
1304 static irqreturn_t
ena_intr_msix_mgmnt(int irq
, void *data
)
1306 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
1308 ena_com_admin_q_comp_intr_handler(adapter
->ena_dev
);
1310 /* Don't call the aenq handler before probe is done */
1311 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
)))
1312 ena_com_aenq_intr_handler(adapter
->ena_dev
, data
);
1317 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1318 * @irq: interrupt number
1319 * @data: pointer to a network interface private napi device structure
1321 static irqreturn_t
ena_intr_msix_io(int irq
, void *data
)
1323 struct ena_napi
*ena_napi
= data
;
1325 ena_napi
->tx_ring
->first_interrupt
= true;
1326 ena_napi
->rx_ring
->first_interrupt
= true;
1328 napi_schedule_irqoff(&ena_napi
->napi
);
1333 /* Reserve a single MSI-X vector for management (admin + aenq).
1334 * plus reserve one vector for each potential io queue.
1335 * the number of potential io queues is the minimum of what the device
1336 * supports and the number of vCPUs.
1338 static int ena_enable_msix(struct ena_adapter
*adapter
, int num_queues
)
1340 int msix_vecs
, irq_cnt
;
1342 if (test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
1343 netif_err(adapter
, probe
, adapter
->netdev
,
1344 "Error, MSI-X is already enabled\n");
1348 /* Reserved the max msix vectors we might need */
1349 msix_vecs
= ENA_MAX_MSIX_VEC(num_queues
);
1350 netif_dbg(adapter
, probe
, adapter
->netdev
,
1351 "trying to enable MSI-X, vectors %d\n", msix_vecs
);
1353 irq_cnt
= pci_alloc_irq_vectors(adapter
->pdev
, ENA_MIN_MSIX_VEC
,
1354 msix_vecs
, PCI_IRQ_MSIX
);
1357 netif_err(adapter
, probe
, adapter
->netdev
,
1358 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt
);
1362 if (irq_cnt
!= msix_vecs
) {
1363 netif_notice(adapter
, probe
, adapter
->netdev
,
1364 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1365 irq_cnt
, msix_vecs
);
1366 adapter
->num_queues
= irq_cnt
- ENA_ADMIN_MSIX_VEC
;
1369 if (ena_init_rx_cpu_rmap(adapter
))
1370 netif_warn(adapter
, probe
, adapter
->netdev
,
1371 "Failed to map IRQs to CPUs\n");
1373 adapter
->msix_vecs
= irq_cnt
;
1374 set_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
);
1379 static void ena_setup_mgmnt_intr(struct ena_adapter
*adapter
)
1383 snprintf(adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].name
,
1384 ENA_IRQNAME_SIZE
, "ena-mgmnt@pci:%s",
1385 pci_name(adapter
->pdev
));
1386 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].handler
=
1387 ena_intr_msix_mgmnt
;
1388 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].data
= adapter
;
1389 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].vector
=
1390 pci_irq_vector(adapter
->pdev
, ENA_MGMNT_IRQ_IDX
);
1391 cpu
= cpumask_first(cpu_online_mask
);
1392 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].cpu
= cpu
;
1393 cpumask_set_cpu(cpu
,
1394 &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].affinity_hint_mask
);
1397 static void ena_setup_io_intr(struct ena_adapter
*adapter
)
1399 struct net_device
*netdev
;
1400 int irq_idx
, i
, cpu
;
1402 netdev
= adapter
->netdev
;
1404 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1405 irq_idx
= ENA_IO_IRQ_IDX(i
);
1406 cpu
= i
% num_online_cpus();
1408 snprintf(adapter
->irq_tbl
[irq_idx
].name
, ENA_IRQNAME_SIZE
,
1409 "%s-Tx-Rx-%d", netdev
->name
, i
);
1410 adapter
->irq_tbl
[irq_idx
].handler
= ena_intr_msix_io
;
1411 adapter
->irq_tbl
[irq_idx
].data
= &adapter
->ena_napi
[i
];
1412 adapter
->irq_tbl
[irq_idx
].vector
=
1413 pci_irq_vector(adapter
->pdev
, irq_idx
);
1414 adapter
->irq_tbl
[irq_idx
].cpu
= cpu
;
1416 cpumask_set_cpu(cpu
,
1417 &adapter
->irq_tbl
[irq_idx
].affinity_hint_mask
);
1421 static int ena_request_mgmnt_irq(struct ena_adapter
*adapter
)
1423 unsigned long flags
= 0;
1424 struct ena_irq
*irq
;
1427 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1428 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1431 netif_err(adapter
, probe
, adapter
->netdev
,
1432 "failed to request admin irq\n");
1436 netif_dbg(adapter
, probe
, adapter
->netdev
,
1437 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1438 irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1440 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1445 static int ena_request_io_irq(struct ena_adapter
*adapter
)
1447 unsigned long flags
= 0;
1448 struct ena_irq
*irq
;
1451 if (!test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
1452 netif_err(adapter
, ifup
, adapter
->netdev
,
1453 "Failed to request I/O IRQ: MSI-X is not enabled\n");
1457 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1458 irq
= &adapter
->irq_tbl
[i
];
1459 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1462 netif_err(adapter
, ifup
, adapter
->netdev
,
1463 "Failed to request I/O IRQ. index %d rc %d\n",
1468 netif_dbg(adapter
, ifup
, adapter
->netdev
,
1469 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1470 i
, irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1472 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1478 for (k
= ENA_IO_IRQ_FIRST_IDX
; k
< i
; k
++) {
1479 irq
= &adapter
->irq_tbl
[k
];
1480 free_irq(irq
->vector
, irq
->data
);
1486 static void ena_free_mgmnt_irq(struct ena_adapter
*adapter
)
1488 struct ena_irq
*irq
;
1490 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1491 synchronize_irq(irq
->vector
);
1492 irq_set_affinity_hint(irq
->vector
, NULL
);
1493 free_irq(irq
->vector
, irq
->data
);
1496 static void ena_free_io_irq(struct ena_adapter
*adapter
)
1498 struct ena_irq
*irq
;
1501 #ifdef CONFIG_RFS_ACCEL
1502 if (adapter
->msix_vecs
>= 1) {
1503 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
1504 adapter
->netdev
->rx_cpu_rmap
= NULL
;
1506 #endif /* CONFIG_RFS_ACCEL */
1508 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1509 irq
= &adapter
->irq_tbl
[i
];
1510 irq_set_affinity_hint(irq
->vector
, NULL
);
1511 free_irq(irq
->vector
, irq
->data
);
1515 static void ena_disable_msix(struct ena_adapter
*adapter
)
1517 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
))
1518 pci_free_irq_vectors(adapter
->pdev
);
1521 static void ena_disable_io_intr_sync(struct ena_adapter
*adapter
)
1525 if (!netif_running(adapter
->netdev
))
1528 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++)
1529 synchronize_irq(adapter
->irq_tbl
[i
].vector
);
1532 static void ena_del_napi(struct ena_adapter
*adapter
)
1536 for (i
= 0; i
< adapter
->num_queues
; i
++)
1537 netif_napi_del(&adapter
->ena_napi
[i
].napi
);
1540 static void ena_init_napi(struct ena_adapter
*adapter
)
1542 struct ena_napi
*napi
;
1545 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1546 napi
= &adapter
->ena_napi
[i
];
1548 netif_napi_add(adapter
->netdev
,
1549 &adapter
->ena_napi
[i
].napi
,
1552 napi
->rx_ring
= &adapter
->rx_ring
[i
];
1553 napi
->tx_ring
= &adapter
->tx_ring
[i
];
1558 static void ena_napi_disable_all(struct ena_adapter
*adapter
)
1562 for (i
= 0; i
< adapter
->num_queues
; i
++)
1563 napi_disable(&adapter
->ena_napi
[i
].napi
);
1566 static void ena_napi_enable_all(struct ena_adapter
*adapter
)
1570 for (i
= 0; i
< adapter
->num_queues
; i
++)
1571 napi_enable(&adapter
->ena_napi
[i
].napi
);
1574 /* Configure the Rx forwarding */
1575 static int ena_rss_configure(struct ena_adapter
*adapter
)
1577 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1580 /* In case the RSS table wasn't initialized by probe */
1581 if (!ena_dev
->rss
.tbl_log_size
) {
1582 rc
= ena_rss_init_default(adapter
);
1583 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
1584 netif_err(adapter
, ifup
, adapter
->netdev
,
1585 "Failed to init RSS rc: %d\n", rc
);
1590 /* Set indirect table */
1591 rc
= ena_com_indirect_table_set(ena_dev
);
1592 if (unlikely(rc
&& rc
!= -EOPNOTSUPP
))
1595 /* Configure hash function (if supported) */
1596 rc
= ena_com_set_hash_function(ena_dev
);
1597 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
1600 /* Configure hash inputs (if supported) */
1601 rc
= ena_com_set_hash_ctrl(ena_dev
);
1602 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
1608 static int ena_up_complete(struct ena_adapter
*adapter
)
1612 rc
= ena_rss_configure(adapter
);
1616 ena_change_mtu(adapter
->netdev
, adapter
->netdev
->mtu
);
1618 ena_refill_all_rx_bufs(adapter
);
1620 /* enable transmits */
1621 netif_tx_start_all_queues(adapter
->netdev
);
1623 ena_napi_enable_all(adapter
);
1628 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
)
1630 struct ena_com_create_io_ctx ctx
;
1631 struct ena_com_dev
*ena_dev
;
1632 struct ena_ring
*tx_ring
;
1637 ena_dev
= adapter
->ena_dev
;
1639 tx_ring
= &adapter
->tx_ring
[qid
];
1640 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1641 ena_qid
= ENA_IO_TXQ_IDX(qid
);
1643 memset(&ctx
, 0x0, sizeof(ctx
));
1645 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
1647 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
1648 ctx
.msix_vector
= msix_vector
;
1649 ctx
.queue_size
= tx_ring
->ring_size
;
1650 ctx
.numa_node
= cpu_to_node(tx_ring
->cpu
);
1652 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1654 netif_err(adapter
, ifup
, adapter
->netdev
,
1655 "Failed to create I/O TX queue num %d rc: %d\n",
1660 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1661 &tx_ring
->ena_com_io_sq
,
1662 &tx_ring
->ena_com_io_cq
);
1664 netif_err(adapter
, ifup
, adapter
->netdev
,
1665 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1667 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1671 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1675 static int ena_create_all_io_tx_queues(struct ena_adapter
*adapter
)
1677 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1680 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1681 rc
= ena_create_io_tx_queue(adapter
, i
);
1690 ena_com_destroy_io_queue(ena_dev
, ENA_IO_TXQ_IDX(i
));
1695 static int ena_create_io_rx_queue(struct ena_adapter
*adapter
, int qid
)
1697 struct ena_com_dev
*ena_dev
;
1698 struct ena_com_create_io_ctx ctx
;
1699 struct ena_ring
*rx_ring
;
1704 ena_dev
= adapter
->ena_dev
;
1706 rx_ring
= &adapter
->rx_ring
[qid
];
1707 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1708 ena_qid
= ENA_IO_RXQ_IDX(qid
);
1710 memset(&ctx
, 0x0, sizeof(ctx
));
1713 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
1714 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
1715 ctx
.msix_vector
= msix_vector
;
1716 ctx
.queue_size
= rx_ring
->ring_size
;
1717 ctx
.numa_node
= cpu_to_node(rx_ring
->cpu
);
1719 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1721 netif_err(adapter
, ifup
, adapter
->netdev
,
1722 "Failed to create I/O RX queue num %d rc: %d\n",
1727 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1728 &rx_ring
->ena_com_io_sq
,
1729 &rx_ring
->ena_com_io_cq
);
1731 netif_err(adapter
, ifup
, adapter
->netdev
,
1732 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1734 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1738 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1743 static int ena_create_all_io_rx_queues(struct ena_adapter
*adapter
)
1745 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1748 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1749 rc
= ena_create_io_rx_queue(adapter
, i
);
1752 INIT_WORK(&adapter
->ena_napi
[i
].dim
.work
, ena_dim_work
);
1759 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
1760 ena_com_destroy_io_queue(ena_dev
, ENA_IO_RXQ_IDX(i
));
1766 static void set_io_rings_size(struct ena_adapter
*adapter
,
1767 int new_tx_size
, int new_rx_size
)
1771 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1772 adapter
->tx_ring
[i
].ring_size
= new_tx_size
;
1773 adapter
->rx_ring
[i
].ring_size
= new_rx_size
;
1777 /* This function allows queue allocation to backoff when the system is
1778 * low on memory. If there is not enough memory to allocate io queues
1779 * the driver will try to allocate smaller queues.
1781 * The backoff algorithm is as follows:
1782 * 1. Try to allocate TX and RX and if successful.
1783 * 1.1. return success
1785 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
1787 * 3. If TX or RX is smaller than 256
1788 * 3.1. return failure.
1790 * 4.1. go back to 1.
1792 static int create_queues_with_size_backoff(struct ena_adapter
*adapter
)
1794 int rc
, cur_rx_ring_size
, cur_tx_ring_size
;
1795 int new_rx_ring_size
, new_tx_ring_size
;
1797 /* current queue sizes might be set to smaller than the requested
1798 * ones due to past queue allocation failures.
1800 set_io_rings_size(adapter
, adapter
->requested_tx_ring_size
,
1801 adapter
->requested_rx_ring_size
);
1804 rc
= ena_setup_all_tx_resources(adapter
);
1808 rc
= ena_create_all_io_tx_queues(adapter
);
1810 goto err_create_tx_queues
;
1812 rc
= ena_setup_all_rx_resources(adapter
);
1816 rc
= ena_create_all_io_rx_queues(adapter
);
1818 goto err_create_rx_queues
;
1822 err_create_rx_queues
:
1823 ena_free_all_io_rx_resources(adapter
);
1825 ena_destroy_all_tx_queues(adapter
);
1826 err_create_tx_queues
:
1827 ena_free_all_io_tx_resources(adapter
);
1829 if (rc
!= -ENOMEM
) {
1830 netif_err(adapter
, ifup
, adapter
->netdev
,
1831 "Queue creation failed with error code %d\n",
1836 cur_tx_ring_size
= adapter
->tx_ring
[0].ring_size
;
1837 cur_rx_ring_size
= adapter
->rx_ring
[0].ring_size
;
1839 netif_err(adapter
, ifup
, adapter
->netdev
,
1840 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
1841 cur_tx_ring_size
, cur_rx_ring_size
);
1843 new_tx_ring_size
= cur_tx_ring_size
;
1844 new_rx_ring_size
= cur_rx_ring_size
;
1846 /* Decrease the size of the larger queue, or
1847 * decrease both if they are the same size.
1849 if (cur_rx_ring_size
<= cur_tx_ring_size
)
1850 new_tx_ring_size
= cur_tx_ring_size
/ 2;
1851 if (cur_rx_ring_size
>= cur_tx_ring_size
)
1852 new_rx_ring_size
= cur_rx_ring_size
/ 2;
1854 if (new_tx_ring_size
< ENA_MIN_RING_SIZE
||
1855 new_rx_ring_size
< ENA_MIN_RING_SIZE
) {
1856 netif_err(adapter
, ifup
, adapter
->netdev
,
1857 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
1862 netif_err(adapter
, ifup
, adapter
->netdev
,
1863 "Retrying queue creation with sizes TX=%d, RX=%d\n",
1867 set_io_rings_size(adapter
, new_tx_ring_size
,
1872 static int ena_up(struct ena_adapter
*adapter
)
1876 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
1878 ena_setup_io_intr(adapter
);
1880 /* napi poll functions should be initialized before running
1881 * request_irq(), to handle a rare condition where there is a pending
1882 * interrupt, causing the ISR to fire immediately while the poll
1883 * function wasn't set yet, causing a null dereference
1885 ena_init_napi(adapter
);
1887 rc
= ena_request_io_irq(adapter
);
1891 rc
= create_queues_with_size_backoff(adapter
);
1893 goto err_create_queues_with_backoff
;
1895 rc
= ena_up_complete(adapter
);
1899 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
1900 netif_carrier_on(adapter
->netdev
);
1902 u64_stats_update_begin(&adapter
->syncp
);
1903 adapter
->dev_stats
.interface_up
++;
1904 u64_stats_update_end(&adapter
->syncp
);
1906 set_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1908 /* Enable completion queues interrupt */
1909 for (i
= 0; i
< adapter
->num_queues
; i
++)
1910 ena_unmask_interrupt(&adapter
->tx_ring
[i
],
1911 &adapter
->rx_ring
[i
]);
1913 /* schedule napi in case we had pending packets
1914 * from the last time we disable napi
1916 for (i
= 0; i
< adapter
->num_queues
; i
++)
1917 napi_schedule(&adapter
->ena_napi
[i
].napi
);
1922 ena_destroy_all_tx_queues(adapter
);
1923 ena_free_all_io_tx_resources(adapter
);
1924 ena_destroy_all_rx_queues(adapter
);
1925 ena_free_all_io_rx_resources(adapter
);
1926 err_create_queues_with_backoff
:
1927 ena_free_io_irq(adapter
);
1929 ena_del_napi(adapter
);
1934 static void ena_down(struct ena_adapter
*adapter
)
1936 netif_info(adapter
, ifdown
, adapter
->netdev
, "%s\n", __func__
);
1938 clear_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1940 u64_stats_update_begin(&adapter
->syncp
);
1941 adapter
->dev_stats
.interface_down
++;
1942 u64_stats_update_end(&adapter
->syncp
);
1944 netif_carrier_off(adapter
->netdev
);
1945 netif_tx_disable(adapter
->netdev
);
1947 /* After this point the napi handler won't enable the tx queue */
1948 ena_napi_disable_all(adapter
);
1950 /* After destroy the queue there won't be any new interrupts */
1952 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
)) {
1955 rc
= ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
1957 dev_err(&adapter
->pdev
->dev
, "Device reset failed\n");
1958 /* stop submitting admin commands on a device that was reset */
1959 ena_com_set_admin_running_state(adapter
->ena_dev
, false);
1962 ena_destroy_all_io_queues(adapter
);
1964 ena_disable_io_intr_sync(adapter
);
1965 ena_free_io_irq(adapter
);
1966 ena_del_napi(adapter
);
1968 ena_free_all_tx_bufs(adapter
);
1969 ena_free_all_rx_bufs(adapter
);
1970 ena_free_all_io_tx_resources(adapter
);
1971 ena_free_all_io_rx_resources(adapter
);
1974 /* ena_open - Called when a network interface is made active
1975 * @netdev: network interface device structure
1977 * Returns 0 on success, negative value on failure
1979 * The open entry point is called when a network interface is made
1980 * active by the system (IFF_UP). At this point all resources needed
1981 * for transmit and receive operations are allocated, the interrupt
1982 * handler is registered with the OS, the watchdog timer is started,
1983 * and the stack is notified that the interface is ready.
1985 static int ena_open(struct net_device
*netdev
)
1987 struct ena_adapter
*adapter
= netdev_priv(netdev
);
1990 /* Notify the stack of the actual queue counts. */
1991 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->num_queues
);
1993 netif_err(adapter
, ifup
, netdev
, "Can't set num tx queues\n");
1997 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->num_queues
);
1999 netif_err(adapter
, ifup
, netdev
, "Can't set num rx queues\n");
2003 rc
= ena_up(adapter
);
2010 /* ena_close - Disables a network interface
2011 * @netdev: network interface device structure
2013 * Returns 0, this is not allowed to fail
2015 * The close entry point is called when an interface is de-activated
2016 * by the OS. The hardware is still under the drivers control, but
2017 * needs to be disabled. A global MAC reset is issued to stop the
2018 * hardware, and all transmit and receive resources are freed.
2020 static int ena_close(struct net_device
*netdev
)
2022 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2024 netif_dbg(adapter
, ifdown
, netdev
, "%s\n", __func__
);
2026 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
2029 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2032 /* Check for device status and issue reset if needed*/
2033 check_for_admin_com_state(adapter
);
2034 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2035 netif_err(adapter
, ifdown
, adapter
->netdev
,
2036 "Destroy failure, restarting device\n");
2037 ena_dump_stats_to_dmesg(adapter
);
2038 /* rtnl lock already obtained in dev_ioctl() layer */
2039 ena_destroy_device(adapter
, false);
2040 ena_restore_device(adapter
);
2046 int ena_update_queue_sizes(struct ena_adapter
*adapter
,
2052 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2053 ena_close(adapter
->netdev
);
2054 adapter
->requested_tx_ring_size
= new_tx_size
;
2055 adapter
->requested_rx_ring_size
= new_rx_size
;
2056 ena_init_io_rings(adapter
);
2057 return dev_up
? ena_up(adapter
) : 0;
2060 static void ena_tx_csum(struct ena_com_tx_ctx
*ena_tx_ctx
, struct sk_buff
*skb
)
2062 u32 mss
= skb_shinfo(skb
)->gso_size
;
2063 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
2066 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) || mss
) {
2067 ena_tx_ctx
->l4_csum_enable
= 1;
2069 ena_tx_ctx
->tso_enable
= 1;
2070 ena_meta
->l4_hdr_len
= tcp_hdr(skb
)->doff
;
2071 ena_tx_ctx
->l4_csum_partial
= 0;
2073 ena_tx_ctx
->tso_enable
= 0;
2074 ena_meta
->l4_hdr_len
= 0;
2075 ena_tx_ctx
->l4_csum_partial
= 1;
2078 switch (ip_hdr(skb
)->version
) {
2080 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
2081 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
))
2084 ena_tx_ctx
->l3_csum_enable
= 1;
2085 l4_protocol
= ip_hdr(skb
)->protocol
;
2088 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
2089 l4_protocol
= ipv6_hdr(skb
)->nexthdr
;
2095 if (l4_protocol
== IPPROTO_TCP
)
2096 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
2098 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
2100 ena_meta
->mss
= mss
;
2101 ena_meta
->l3_hdr_len
= skb_network_header_len(skb
);
2102 ena_meta
->l3_hdr_offset
= skb_network_offset(skb
);
2103 ena_tx_ctx
->meta_valid
= 1;
2106 ena_tx_ctx
->meta_valid
= 0;
2110 static int ena_check_and_linearize_skb(struct ena_ring
*tx_ring
,
2111 struct sk_buff
*skb
)
2113 int num_frags
, header_len
, rc
;
2115 num_frags
= skb_shinfo(skb
)->nr_frags
;
2116 header_len
= skb_headlen(skb
);
2118 if (num_frags
< tx_ring
->sgl_size
)
2121 if ((num_frags
== tx_ring
->sgl_size
) &&
2122 (header_len
< tx_ring
->tx_max_header_size
))
2125 u64_stats_update_begin(&tx_ring
->syncp
);
2126 tx_ring
->tx_stats
.linearize
++;
2127 u64_stats_update_end(&tx_ring
->syncp
);
2129 rc
= skb_linearize(skb
);
2131 u64_stats_update_begin(&tx_ring
->syncp
);
2132 tx_ring
->tx_stats
.linearize_failed
++;
2133 u64_stats_update_end(&tx_ring
->syncp
);
2139 static int ena_tx_map_skb(struct ena_ring
*tx_ring
,
2140 struct ena_tx_buffer
*tx_info
,
2141 struct sk_buff
*skb
,
2145 struct ena_adapter
*adapter
= tx_ring
->adapter
;
2146 struct ena_com_buf
*ena_buf
;
2148 u32 skb_head_len
, frag_len
, last_frag
;
2153 skb_head_len
= skb_headlen(skb
);
2155 ena_buf
= tx_info
->bufs
;
2157 if (tx_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2158 /* When the device is LLQ mode, the driver will copy
2159 * the header into the device memory space.
2160 * the ena_com layer assume the header is in a linear
2162 * This assumption might be wrong since part of the header
2163 * can be in the fragmented buffers.
2164 * Use skb_header_pointer to make sure the header is in a
2165 * linear memory space.
2168 push_len
= min_t(u32
, skb
->len
, tx_ring
->tx_max_header_size
);
2169 *push_hdr
= skb_header_pointer(skb
, 0, push_len
,
2170 tx_ring
->push_buf_intermediate_buf
);
2171 *header_len
= push_len
;
2172 if (unlikely(skb
->data
!= *push_hdr
)) {
2173 u64_stats_update_begin(&tx_ring
->syncp
);
2174 tx_ring
->tx_stats
.llq_buffer_copy
++;
2175 u64_stats_update_end(&tx_ring
->syncp
);
2177 delta
= push_len
- skb_head_len
;
2181 *header_len
= min_t(u32
, skb_head_len
,
2182 tx_ring
->tx_max_header_size
);
2185 netif_dbg(adapter
, tx_queued
, adapter
->netdev
,
2186 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb
,
2187 *push_hdr
, push_len
);
2189 if (skb_head_len
> push_len
) {
2190 dma
= dma_map_single(tx_ring
->dev
, skb
->data
+ push_len
,
2191 skb_head_len
- push_len
, DMA_TO_DEVICE
);
2192 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
2193 goto error_report_dma_error
;
2195 ena_buf
->paddr
= dma
;
2196 ena_buf
->len
= skb_head_len
- push_len
;
2199 tx_info
->num_of_bufs
++;
2200 tx_info
->map_linear_data
= 1;
2202 tx_info
->map_linear_data
= 0;
2205 last_frag
= skb_shinfo(skb
)->nr_frags
;
2207 for (i
= 0; i
< last_frag
; i
++) {
2208 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2210 frag_len
= skb_frag_size(frag
);
2212 if (unlikely(delta
>= frag_len
)) {
2217 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, delta
,
2218 frag_len
- delta
, DMA_TO_DEVICE
);
2219 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
2220 goto error_report_dma_error
;
2222 ena_buf
->paddr
= dma
;
2223 ena_buf
->len
= frag_len
- delta
;
2225 tx_info
->num_of_bufs
++;
2231 error_report_dma_error
:
2232 u64_stats_update_begin(&tx_ring
->syncp
);
2233 tx_ring
->tx_stats
.dma_mapping_err
++;
2234 u64_stats_update_end(&tx_ring
->syncp
);
2235 netdev_warn(adapter
->netdev
, "failed to map skb\n");
2237 tx_info
->skb
= NULL
;
2239 tx_info
->num_of_bufs
+= i
;
2240 ena_unmap_tx_skb(tx_ring
, tx_info
);
2245 /* Called with netif_tx_lock. */
2246 static netdev_tx_t
ena_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2248 struct ena_adapter
*adapter
= netdev_priv(dev
);
2249 struct ena_tx_buffer
*tx_info
;
2250 struct ena_com_tx_ctx ena_tx_ctx
;
2251 struct ena_ring
*tx_ring
;
2252 struct netdev_queue
*txq
;
2254 u16 next_to_use
, req_id
, header_len
;
2255 int qid
, rc
, nb_hw_desc
;
2257 netif_dbg(adapter
, tx_queued
, dev
, "%s skb %p\n", __func__
, skb
);
2258 /* Determine which tx ring we will be placed on */
2259 qid
= skb_get_queue_mapping(skb
);
2260 tx_ring
= &adapter
->tx_ring
[qid
];
2261 txq
= netdev_get_tx_queue(dev
, qid
);
2263 rc
= ena_check_and_linearize_skb(tx_ring
, skb
);
2265 goto error_drop_packet
;
2267 skb_tx_timestamp(skb
);
2269 next_to_use
= tx_ring
->next_to_use
;
2270 req_id
= tx_ring
->free_ids
[next_to_use
];
2271 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
2272 tx_info
->num_of_bufs
= 0;
2274 WARN(tx_info
->skb
, "SKB isn't NULL req_id %d\n", req_id
);
2276 rc
= ena_tx_map_skb(tx_ring
, tx_info
, skb
, &push_hdr
, &header_len
);
2278 goto error_drop_packet
;
2280 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
2281 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
2282 ena_tx_ctx
.push_header
= push_hdr
;
2283 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
2284 ena_tx_ctx
.req_id
= req_id
;
2285 ena_tx_ctx
.header_len
= header_len
;
2287 /* set flags and meta data */
2288 ena_tx_csum(&ena_tx_ctx
, skb
);
2290 if (unlikely(ena_com_is_doorbell_needed(tx_ring
->ena_com_io_sq
, &ena_tx_ctx
))) {
2291 netif_dbg(adapter
, tx_queued
, dev
,
2292 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
2294 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
2297 /* prepare the packet's descriptors to dma engine */
2298 rc
= ena_com_prepare_tx(tx_ring
->ena_com_io_sq
, &ena_tx_ctx
,
2301 /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
2302 * since the number of free descriptors in the queue is checked
2303 * after sending the previous packet. In case there isn't enough
2304 * space in the queue for the next packet, it is stopped
2305 * until there is again enough available space in the queue.
2306 * All other failure reasons of ena_com_prepare_tx() are fatal
2307 * and therefore require a device reset.
2310 netif_err(adapter
, tx_queued
, dev
,
2311 "failed to prepare tx bufs\n");
2312 u64_stats_update_begin(&tx_ring
->syncp
);
2313 tx_ring
->tx_stats
.prepare_ctx_err
++;
2314 u64_stats_update_end(&tx_ring
->syncp
);
2315 adapter
->reset_reason
= ENA_REGS_RESET_DRIVER_INVALID_STATE
;
2316 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2317 goto error_unmap_dma
;
2320 netdev_tx_sent_queue(txq
, skb
->len
);
2322 u64_stats_update_begin(&tx_ring
->syncp
);
2323 tx_ring
->tx_stats
.cnt
++;
2324 tx_ring
->tx_stats
.bytes
+= skb
->len
;
2325 u64_stats_update_end(&tx_ring
->syncp
);
2327 tx_info
->tx_descs
= nb_hw_desc
;
2328 tx_info
->last_jiffies
= jiffies
;
2329 tx_info
->print_once
= 0;
2331 tx_ring
->next_to_use
= ENA_TX_RING_IDX_NEXT(next_to_use
,
2332 tx_ring
->ring_size
);
2334 /* stop the queue when no more space available, the packet can have up
2335 * to sgl_size + 2. one for the meta descriptor and one for header
2336 * (if the header is larger than tx_max_header_size).
2338 if (unlikely(!ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
2339 tx_ring
->sgl_size
+ 2))) {
2340 netif_dbg(adapter
, tx_queued
, dev
, "%s stop queue %d\n",
2343 netif_tx_stop_queue(txq
);
2344 u64_stats_update_begin(&tx_ring
->syncp
);
2345 tx_ring
->tx_stats
.queue_stop
++;
2346 u64_stats_update_end(&tx_ring
->syncp
);
2348 /* There is a rare condition where this function decide to
2349 * stop the queue but meanwhile clean_tx_irq updates
2350 * next_to_completion and terminates.
2351 * The queue will remain stopped forever.
2352 * To solve this issue add a mb() to make sure that
2353 * netif_tx_stop_queue() write is vissible before checking if
2354 * there is additional space in the queue.
2358 if (ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
2359 ENA_TX_WAKEUP_THRESH
)) {
2360 netif_tx_wake_queue(txq
);
2361 u64_stats_update_begin(&tx_ring
->syncp
);
2362 tx_ring
->tx_stats
.queue_wakeup
++;
2363 u64_stats_update_end(&tx_ring
->syncp
);
2367 if (netif_xmit_stopped(txq
) || !netdev_xmit_more()) {
2368 /* trigger the dma engine. ena_com_write_sq_doorbell()
2371 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
2372 u64_stats_update_begin(&tx_ring
->syncp
);
2373 tx_ring
->tx_stats
.doorbells
++;
2374 u64_stats_update_end(&tx_ring
->syncp
);
2377 return NETDEV_TX_OK
;
2380 ena_unmap_tx_skb(tx_ring
, tx_info
);
2381 tx_info
->skb
= NULL
;
2385 return NETDEV_TX_OK
;
2388 static u16
ena_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
2389 struct net_device
*sb_dev
)
2392 /* we suspect that this is good for in--kernel network services that
2393 * want to loop incoming skb rx to tx in normal user generated traffic,
2394 * most probably we will not get to this
2396 if (skb_rx_queue_recorded(skb
))
2397 qid
= skb_get_rx_queue(skb
);
2399 qid
= netdev_pick_tx(dev
, skb
, NULL
);
2404 static void ena_config_host_info(struct ena_com_dev
*ena_dev
,
2405 struct pci_dev
*pdev
)
2407 struct ena_admin_host_info
*host_info
;
2410 /* Allocate only the host info */
2411 rc
= ena_com_allocate_host_info(ena_dev
);
2413 pr_err("Cannot allocate host info\n");
2417 host_info
= ena_dev
->host_attr
.host_info
;
2419 host_info
->bdf
= (pdev
->bus
->number
<< 8) | pdev
->devfn
;
2420 host_info
->os_type
= ENA_ADMIN_OS_LINUX
;
2421 host_info
->kernel_ver
= LINUX_VERSION_CODE
;
2422 strlcpy(host_info
->kernel_ver_str
, utsname()->version
,
2423 sizeof(host_info
->kernel_ver_str
) - 1);
2424 host_info
->os_dist
= 0;
2425 strncpy(host_info
->os_dist_str
, utsname()->release
,
2426 sizeof(host_info
->os_dist_str
) - 1);
2427 host_info
->driver_version
=
2428 (DRV_MODULE_VER_MAJOR
) |
2429 (DRV_MODULE_VER_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
2430 (DRV_MODULE_VER_SUBMINOR
<< ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
) |
2431 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT
);
2432 host_info
->num_cpus
= num_online_cpus();
2434 host_info
->driver_supported_features
=
2435 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK
;
2437 rc
= ena_com_set_host_attributes(ena_dev
);
2439 if (rc
== -EOPNOTSUPP
)
2440 pr_warn("Cannot set host attributes\n");
2442 pr_err("Cannot set host attributes\n");
2450 ena_com_delete_host_info(ena_dev
);
2453 static void ena_config_debug_area(struct ena_adapter
*adapter
)
2455 u32 debug_area_size
;
2458 ss_count
= ena_get_sset_count(adapter
->netdev
, ETH_SS_STATS
);
2459 if (ss_count
<= 0) {
2460 netif_err(adapter
, drv
, adapter
->netdev
,
2461 "SS count is negative\n");
2465 /* allocate 32 bytes for each string and 64bit for the value */
2466 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
2468 rc
= ena_com_allocate_debug_area(adapter
->ena_dev
, debug_area_size
);
2470 pr_err("Cannot allocate debug area\n");
2474 rc
= ena_com_set_host_attributes(adapter
->ena_dev
);
2476 if (rc
== -EOPNOTSUPP
)
2477 netif_warn(adapter
, drv
, adapter
->netdev
,
2478 "Cannot set host attributes\n");
2480 netif_err(adapter
, drv
, adapter
->netdev
,
2481 "Cannot set host attributes\n");
2487 ena_com_delete_debug_area(adapter
->ena_dev
);
2490 static void ena_get_stats64(struct net_device
*netdev
,
2491 struct rtnl_link_stats64
*stats
)
2493 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2494 struct ena_ring
*rx_ring
, *tx_ring
;
2499 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2502 for (i
= 0; i
< adapter
->num_queues
; i
++) {
2505 tx_ring
= &adapter
->tx_ring
[i
];
2508 start
= u64_stats_fetch_begin_irq(&tx_ring
->syncp
);
2509 packets
= tx_ring
->tx_stats
.cnt
;
2510 bytes
= tx_ring
->tx_stats
.bytes
;
2511 } while (u64_stats_fetch_retry_irq(&tx_ring
->syncp
, start
));
2513 stats
->tx_packets
+= packets
;
2514 stats
->tx_bytes
+= bytes
;
2516 rx_ring
= &adapter
->rx_ring
[i
];
2519 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
2520 packets
= rx_ring
->rx_stats
.cnt
;
2521 bytes
= rx_ring
->rx_stats
.bytes
;
2522 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
2524 stats
->rx_packets
+= packets
;
2525 stats
->rx_bytes
+= bytes
;
2529 start
= u64_stats_fetch_begin_irq(&adapter
->syncp
);
2530 rx_drops
= adapter
->dev_stats
.rx_drops
;
2531 } while (u64_stats_fetch_retry_irq(&adapter
->syncp
, start
));
2533 stats
->rx_dropped
= rx_drops
;
2535 stats
->multicast
= 0;
2536 stats
->collisions
= 0;
2538 stats
->rx_length_errors
= 0;
2539 stats
->rx_crc_errors
= 0;
2540 stats
->rx_frame_errors
= 0;
2541 stats
->rx_fifo_errors
= 0;
2542 stats
->rx_missed_errors
= 0;
2543 stats
->tx_window_errors
= 0;
2545 stats
->rx_errors
= 0;
2546 stats
->tx_errors
= 0;
2549 static const struct net_device_ops ena_netdev_ops
= {
2550 .ndo_open
= ena_open
,
2551 .ndo_stop
= ena_close
,
2552 .ndo_start_xmit
= ena_start_xmit
,
2553 .ndo_select_queue
= ena_select_queue
,
2554 .ndo_get_stats64
= ena_get_stats64
,
2555 .ndo_tx_timeout
= ena_tx_timeout
,
2556 .ndo_change_mtu
= ena_change_mtu
,
2557 .ndo_set_mac_address
= NULL
,
2558 .ndo_validate_addr
= eth_validate_addr
,
2561 static int ena_device_validate_params(struct ena_adapter
*adapter
,
2562 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2564 struct net_device
*netdev
= adapter
->netdev
;
2567 rc
= ether_addr_equal(get_feat_ctx
->dev_attr
.mac_addr
,
2570 netif_err(adapter
, drv
, netdev
,
2571 "Error, mac address are different\n");
2575 if (get_feat_ctx
->dev_attr
.max_mtu
< netdev
->mtu
) {
2576 netif_err(adapter
, drv
, netdev
,
2577 "Error, device max mtu is smaller than netdev MTU\n");
2584 static int ena_device_init(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
,
2585 struct ena_com_dev_get_features_ctx
*get_feat_ctx
,
2588 struct device
*dev
= &pdev
->dev
;
2589 bool readless_supported
;
2594 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
2596 dev_err(dev
, "failed to init mmio read less\n");
2600 /* The PCIe configuration space revision id indicate if mmio reg
2603 readless_supported
= !(pdev
->revision
& ENA_MMIO_DISABLE_REG_READ
);
2604 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
2606 rc
= ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_NORMAL
);
2608 dev_err(dev
, "Can not reset device\n");
2609 goto err_mmio_read_less
;
2612 rc
= ena_com_validate_version(ena_dev
);
2614 dev_err(dev
, "device version is too low\n");
2615 goto err_mmio_read_less
;
2618 dma_width
= ena_com_get_dma_width(ena_dev
);
2619 if (dma_width
< 0) {
2620 dev_err(dev
, "Invalid dma width value %d", dma_width
);
2622 goto err_mmio_read_less
;
2625 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2627 dev_err(dev
, "pci_set_dma_mask failed 0x%x\n", rc
);
2628 goto err_mmio_read_less
;
2631 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2633 dev_err(dev
, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2635 goto err_mmio_read_less
;
2638 /* ENA admin level init */
2639 rc
= ena_com_admin_init(ena_dev
, &aenq_handlers
);
2642 "Can not initialize ena admin queue with device\n");
2643 goto err_mmio_read_less
;
2646 /* To enable the msix interrupts the driver needs to know the number
2647 * of queues. So the driver uses polling mode to retrieve this
2650 ena_com_set_admin_polling_mode(ena_dev
, true);
2652 ena_config_host_info(ena_dev
, pdev
);
2654 /* Get Device Attributes*/
2655 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
2657 dev_err(dev
, "Cannot get attribute for ena device rc=%d\n", rc
);
2658 goto err_admin_init
;
2661 /* Try to turn all the available aenq groups */
2662 aenq_groups
= BIT(ENA_ADMIN_LINK_CHANGE
) |
2663 BIT(ENA_ADMIN_FATAL_ERROR
) |
2664 BIT(ENA_ADMIN_WARNING
) |
2665 BIT(ENA_ADMIN_NOTIFICATION
) |
2666 BIT(ENA_ADMIN_KEEP_ALIVE
);
2668 aenq_groups
&= get_feat_ctx
->aenq
.supported_groups
;
2670 rc
= ena_com_set_aenq_config(ena_dev
, aenq_groups
);
2672 dev_err(dev
, "Cannot configure aenq groups rc= %d\n", rc
);
2673 goto err_admin_init
;
2676 *wd_state
= !!(aenq_groups
& BIT(ENA_ADMIN_KEEP_ALIVE
));
2681 ena_com_delete_host_info(ena_dev
);
2682 ena_com_admin_destroy(ena_dev
);
2684 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2689 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter
*adapter
,
2692 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2693 struct device
*dev
= &adapter
->pdev
->dev
;
2696 rc
= ena_enable_msix(adapter
, io_vectors
);
2698 dev_err(dev
, "Can not reserve msix vectors\n");
2702 ena_setup_mgmnt_intr(adapter
);
2704 rc
= ena_request_mgmnt_irq(adapter
);
2706 dev_err(dev
, "Can not setup management interrupts\n");
2707 goto err_disable_msix
;
2710 ena_com_set_admin_polling_mode(ena_dev
, false);
2712 ena_com_admin_aenq_enable(ena_dev
);
2717 ena_disable_msix(adapter
);
2722 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
)
2724 struct net_device
*netdev
= adapter
->netdev
;
2725 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2728 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
2731 netif_carrier_off(netdev
);
2733 del_timer_sync(&adapter
->timer_service
);
2735 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2736 adapter
->dev_up_before_reset
= dev_up
;
2738 ena_com_set_admin_running_state(ena_dev
, false);
2740 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2743 /* Stop the device from sending AENQ events (in case reset flag is set
2744 * and device is up, ena_down() already reset the device.
2746 if (!(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
) && dev_up
))
2747 ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
2749 ena_free_mgmnt_irq(adapter
);
2751 ena_disable_msix(adapter
);
2753 ena_com_abort_admin_commands(ena_dev
);
2755 ena_com_wait_for_abort_completion(ena_dev
);
2757 ena_com_admin_destroy(ena_dev
);
2759 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2761 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
2763 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2764 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
2767 static int ena_restore_device(struct ena_adapter
*adapter
)
2769 struct ena_com_dev_get_features_ctx get_feat_ctx
;
2770 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2771 struct pci_dev
*pdev
= adapter
->pdev
;
2775 set_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
2776 rc
= ena_device_init(ena_dev
, adapter
->pdev
, &get_feat_ctx
, &wd_state
);
2778 dev_err(&pdev
->dev
, "Can not initialize device\n");
2781 adapter
->wd_state
= wd_state
;
2783 rc
= ena_device_validate_params(adapter
, &get_feat_ctx
);
2785 dev_err(&pdev
->dev
, "Validation of device parameters failed\n");
2786 goto err_device_destroy
;
2789 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
,
2790 adapter
->num_queues
);
2792 dev_err(&pdev
->dev
, "Enable MSI-X failed\n");
2793 goto err_device_destroy
;
2795 /* If the interface was up before the reset bring it up */
2796 if (adapter
->dev_up_before_reset
) {
2797 rc
= ena_up(adapter
);
2799 dev_err(&pdev
->dev
, "Failed to create I/O queues\n");
2800 goto err_disable_msix
;
2804 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
2806 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
2807 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
2808 netif_carrier_on(adapter
->netdev
);
2810 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
2812 "Device reset completed successfully, Driver info: %s\n",
2817 ena_free_mgmnt_irq(adapter
);
2818 ena_disable_msix(adapter
);
2820 ena_com_abort_admin_commands(ena_dev
);
2821 ena_com_wait_for_abort_completion(ena_dev
);
2822 ena_com_admin_destroy(ena_dev
);
2823 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_DRIVER_INVALID_STATE
);
2824 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2826 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
2827 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
2829 "Reset attempt failed. Can not reset the device\n");
2834 static void ena_fw_reset_device(struct work_struct
*work
)
2836 struct ena_adapter
*adapter
=
2837 container_of(work
, struct ena_adapter
, reset_task
);
2838 struct pci_dev
*pdev
= adapter
->pdev
;
2840 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2842 "device reset schedule while reset bit is off\n");
2846 ena_destroy_device(adapter
, false);
2847 ena_restore_device(adapter
);
2851 static int check_for_rx_interrupt_queue(struct ena_adapter
*adapter
,
2852 struct ena_ring
*rx_ring
)
2854 if (likely(rx_ring
->first_interrupt
))
2857 if (ena_com_cq_empty(rx_ring
->ena_com_io_cq
))
2860 rx_ring
->no_interrupt_event_cnt
++;
2862 if (rx_ring
->no_interrupt_event_cnt
== ENA_MAX_NO_INTERRUPT_ITERATIONS
) {
2863 netif_err(adapter
, rx_err
, adapter
->netdev
,
2864 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2866 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
2867 smp_mb__before_atomic();
2868 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2875 static int check_missing_comp_in_tx_queue(struct ena_adapter
*adapter
,
2876 struct ena_ring
*tx_ring
)
2878 struct ena_tx_buffer
*tx_buf
;
2879 unsigned long last_jiffies
;
2883 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
2884 tx_buf
= &tx_ring
->tx_buffer_info
[i
];
2885 last_jiffies
= tx_buf
->last_jiffies
;
2887 if (last_jiffies
== 0)
2888 /* no pending Tx at this location */
2891 if (unlikely(!tx_ring
->first_interrupt
&& time_is_before_jiffies(last_jiffies
+
2892 2 * adapter
->missing_tx_completion_to
))) {
2893 /* If after graceful period interrupt is still not
2894 * received, we schedule a reset
2896 netif_err(adapter
, tx_err
, adapter
->netdev
,
2897 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2899 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
2900 smp_mb__before_atomic();
2901 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2905 if (unlikely(time_is_before_jiffies(last_jiffies
+
2906 adapter
->missing_tx_completion_to
))) {
2907 if (!tx_buf
->print_once
)
2908 netif_notice(adapter
, tx_err
, adapter
->netdev
,
2909 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2912 tx_buf
->print_once
= 1;
2917 if (unlikely(missed_tx
> adapter
->missing_tx_completion_threshold
)) {
2918 netif_err(adapter
, tx_err
, adapter
->netdev
,
2919 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2921 adapter
->missing_tx_completion_threshold
);
2922 adapter
->reset_reason
=
2923 ENA_REGS_RESET_MISS_TX_CMPL
;
2924 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2928 u64_stats_update_begin(&tx_ring
->syncp
);
2929 tx_ring
->tx_stats
.missed_tx
= missed_tx
;
2930 u64_stats_update_end(&tx_ring
->syncp
);
2935 static void check_for_missing_completions(struct ena_adapter
*adapter
)
2937 struct ena_ring
*tx_ring
;
2938 struct ena_ring
*rx_ring
;
2941 /* Make sure the driver doesn't turn the device in other process */
2944 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2947 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
2950 if (adapter
->missing_tx_completion_to
== ENA_HW_HINTS_NO_TIMEOUT
)
2953 budget
= ENA_MONITORED_TX_QUEUES
;
2955 for (i
= adapter
->last_monitored_tx_qid
; i
< adapter
->num_queues
; i
++) {
2956 tx_ring
= &adapter
->tx_ring
[i
];
2957 rx_ring
= &adapter
->rx_ring
[i
];
2959 rc
= check_missing_comp_in_tx_queue(adapter
, tx_ring
);
2963 rc
= check_for_rx_interrupt_queue(adapter
, rx_ring
);
2972 adapter
->last_monitored_tx_qid
= i
% adapter
->num_queues
;
2975 /* trigger napi schedule after 2 consecutive detections */
2976 #define EMPTY_RX_REFILL 2
2977 /* For the rare case where the device runs out of Rx descriptors and the
2978 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2980 * This case will lead to a deadlock:
2981 * The device won't send interrupts since all the new Rx packets will be dropped
2982 * The napi handler won't allocate new Rx descriptors so the device will be
2983 * able to send new packets.
2985 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2986 * It is recommended to have at least 512MB, with a minimum of 128MB for
2987 * constrained environment).
2989 * When such a situation is detected - Reschedule napi
2991 static void check_for_empty_rx_ring(struct ena_adapter
*adapter
)
2993 struct ena_ring
*rx_ring
;
2994 int i
, refill_required
;
2996 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2999 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
3002 for (i
= 0; i
< adapter
->num_queues
; i
++) {
3003 rx_ring
= &adapter
->rx_ring
[i
];
3006 ena_com_free_desc(rx_ring
->ena_com_io_sq
);
3007 if (unlikely(refill_required
== (rx_ring
->ring_size
- 1))) {
3008 rx_ring
->empty_rx_queue
++;
3010 if (rx_ring
->empty_rx_queue
>= EMPTY_RX_REFILL
) {
3011 u64_stats_update_begin(&rx_ring
->syncp
);
3012 rx_ring
->rx_stats
.empty_rx_ring
++;
3013 u64_stats_update_end(&rx_ring
->syncp
);
3015 netif_err(adapter
, drv
, adapter
->netdev
,
3016 "trigger refill for ring %d\n", i
);
3018 napi_schedule(rx_ring
->napi
);
3019 rx_ring
->empty_rx_queue
= 0;
3022 rx_ring
->empty_rx_queue
= 0;
3027 /* Check for keep alive expiration */
3028 static void check_for_missing_keep_alive(struct ena_adapter
*adapter
)
3030 unsigned long keep_alive_expired
;
3032 if (!adapter
->wd_state
)
3035 if (adapter
->keep_alive_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3038 keep_alive_expired
= round_jiffies(adapter
->last_keep_alive_jiffies
+
3039 adapter
->keep_alive_timeout
);
3040 if (unlikely(time_is_before_jiffies(keep_alive_expired
))) {
3041 netif_err(adapter
, drv
, adapter
->netdev
,
3042 "Keep alive watchdog timeout.\n");
3043 u64_stats_update_begin(&adapter
->syncp
);
3044 adapter
->dev_stats
.wd_expired
++;
3045 u64_stats_update_end(&adapter
->syncp
);
3046 adapter
->reset_reason
= ENA_REGS_RESET_KEEP_ALIVE_TO
;
3047 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3051 static void check_for_admin_com_state(struct ena_adapter
*adapter
)
3053 if (unlikely(!ena_com_get_admin_running_state(adapter
->ena_dev
))) {
3054 netif_err(adapter
, drv
, adapter
->netdev
,
3055 "ENA admin queue is not in running state!\n");
3056 u64_stats_update_begin(&adapter
->syncp
);
3057 adapter
->dev_stats
.admin_q_pause
++;
3058 u64_stats_update_end(&adapter
->syncp
);
3059 adapter
->reset_reason
= ENA_REGS_RESET_ADMIN_TO
;
3060 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3064 static void ena_update_hints(struct ena_adapter
*adapter
,
3065 struct ena_admin_ena_hw_hints
*hints
)
3067 struct net_device
*netdev
= adapter
->netdev
;
3069 if (hints
->admin_completion_tx_timeout
)
3070 adapter
->ena_dev
->admin_queue
.completion_timeout
=
3071 hints
->admin_completion_tx_timeout
* 1000;
3073 if (hints
->mmio_read_timeout
)
3074 /* convert to usec */
3075 adapter
->ena_dev
->mmio_read
.reg_read_to
=
3076 hints
->mmio_read_timeout
* 1000;
3078 if (hints
->missed_tx_completion_count_threshold_to_reset
)
3079 adapter
->missing_tx_completion_threshold
=
3080 hints
->missed_tx_completion_count_threshold_to_reset
;
3082 if (hints
->missing_tx_completion_timeout
) {
3083 if (hints
->missing_tx_completion_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3084 adapter
->missing_tx_completion_to
= ENA_HW_HINTS_NO_TIMEOUT
;
3086 adapter
->missing_tx_completion_to
=
3087 msecs_to_jiffies(hints
->missing_tx_completion_timeout
);
3090 if (hints
->netdev_wd_timeout
)
3091 netdev
->watchdog_timeo
= msecs_to_jiffies(hints
->netdev_wd_timeout
);
3093 if (hints
->driver_watchdog_timeout
) {
3094 if (hints
->driver_watchdog_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3095 adapter
->keep_alive_timeout
= ENA_HW_HINTS_NO_TIMEOUT
;
3097 adapter
->keep_alive_timeout
=
3098 msecs_to_jiffies(hints
->driver_watchdog_timeout
);
3102 static void ena_update_host_info(struct ena_admin_host_info
*host_info
,
3103 struct net_device
*netdev
)
3105 host_info
->supported_network_features
[0] =
3106 netdev
->features
& GENMASK_ULL(31, 0);
3107 host_info
->supported_network_features
[1] =
3108 (netdev
->features
& GENMASK_ULL(63, 32)) >> 32;
3111 static void ena_timer_service(struct timer_list
*t
)
3113 struct ena_adapter
*adapter
= from_timer(adapter
, t
, timer_service
);
3114 u8
*debug_area
= adapter
->ena_dev
->host_attr
.debug_area_virt_addr
;
3115 struct ena_admin_host_info
*host_info
=
3116 adapter
->ena_dev
->host_attr
.host_info
;
3118 check_for_missing_keep_alive(adapter
);
3120 check_for_admin_com_state(adapter
);
3122 check_for_missing_completions(adapter
);
3124 check_for_empty_rx_ring(adapter
);
3127 ena_dump_stats_to_buf(adapter
, debug_area
);
3130 ena_update_host_info(host_info
, adapter
->netdev
);
3132 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3133 netif_err(adapter
, drv
, adapter
->netdev
,
3134 "Trigger reset is on\n");
3135 ena_dump_stats_to_dmesg(adapter
);
3136 queue_work(ena_wq
, &adapter
->reset_task
);
3140 /* Reset the timer */
3141 mod_timer(&adapter
->timer_service
, jiffies
+ HZ
);
3144 static int ena_calc_io_queue_num(struct pci_dev
*pdev
,
3145 struct ena_com_dev
*ena_dev
,
3146 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3148 int io_tx_sq_num
, io_tx_cq_num
, io_rx_num
, io_queue_num
;
3150 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
3151 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
3152 &get_feat_ctx
->max_queue_ext
.max_queue_ext
;
3153 io_rx_num
= min_t(int, max_queue_ext
->max_rx_sq_num
,
3154 max_queue_ext
->max_rx_cq_num
);
3156 io_tx_sq_num
= max_queue_ext
->max_tx_sq_num
;
3157 io_tx_cq_num
= max_queue_ext
->max_tx_cq_num
;
3159 struct ena_admin_queue_feature_desc
*max_queues
=
3160 &get_feat_ctx
->max_queues
;
3161 io_tx_sq_num
= max_queues
->max_sq_num
;
3162 io_tx_cq_num
= max_queues
->max_cq_num
;
3163 io_rx_num
= min_t(int, io_tx_sq_num
, io_tx_cq_num
);
3166 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3167 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3168 io_tx_sq_num
= get_feat_ctx
->llq
.max_llq_num
;
3170 io_queue_num
= min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES
);
3171 io_queue_num
= min_t(int, io_queue_num
, io_rx_num
);
3172 io_queue_num
= min_t(int, io_queue_num
, io_tx_sq_num
);
3173 io_queue_num
= min_t(int, io_queue_num
, io_tx_cq_num
);
3174 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3175 io_queue_num
= min_t(int, io_queue_num
, pci_msix_vec_count(pdev
) - 1);
3176 if (unlikely(!io_queue_num
)) {
3177 dev_err(&pdev
->dev
, "The device doesn't have io queues\n");
3181 return io_queue_num
;
3184 static int ena_set_queues_placement_policy(struct pci_dev
*pdev
,
3185 struct ena_com_dev
*ena_dev
,
3186 struct ena_admin_feature_llq_desc
*llq
,
3187 struct ena_llq_configurations
*llq_default_configurations
)
3191 u32 llq_feature_mask
;
3193 llq_feature_mask
= 1 << ENA_ADMIN_LLQ
;
3194 if (!(ena_dev
->supported_features
& llq_feature_mask
)) {
3196 "LLQ is not supported Fallback to host mode policy.\n");
3197 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3201 has_mem_bar
= pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(ENA_MEM_BAR
);
3203 rc
= ena_com_config_dev_mode(ena_dev
, llq
, llq_default_configurations
);
3206 "Failed to configure the device mode. Fallback to host mode policy.\n");
3207 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3211 /* Nothing to config, exit */
3212 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
3217 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3218 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3222 ena_dev
->mem_bar
= devm_ioremap_wc(&pdev
->dev
,
3223 pci_resource_start(pdev
, ENA_MEM_BAR
),
3224 pci_resource_len(pdev
, ENA_MEM_BAR
));
3226 if (!ena_dev
->mem_bar
)
3232 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx
*feat
,
3233 struct net_device
*netdev
)
3235 netdev_features_t dev_features
= 0;
3237 /* Set offload features */
3238 if (feat
->offload
.tx
&
3239 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
3240 dev_features
|= NETIF_F_IP_CSUM
;
3242 if (feat
->offload
.tx
&
3243 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK
)
3244 dev_features
|= NETIF_F_IPV6_CSUM
;
3246 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
3247 dev_features
|= NETIF_F_TSO
;
3249 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK
)
3250 dev_features
|= NETIF_F_TSO6
;
3252 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK
)
3253 dev_features
|= NETIF_F_TSO_ECN
;
3255 if (feat
->offload
.rx_supported
&
3256 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
3257 dev_features
|= NETIF_F_RXCSUM
;
3259 if (feat
->offload
.rx_supported
&
3260 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK
)
3261 dev_features
|= NETIF_F_RXCSUM
;
3269 netdev
->hw_features
|= netdev
->features
;
3270 netdev
->vlan_features
|= netdev
->features
;
3273 static void ena_set_conf_feat_params(struct ena_adapter
*adapter
,
3274 struct ena_com_dev_get_features_ctx
*feat
)
3276 struct net_device
*netdev
= adapter
->netdev
;
3278 /* Copy mac address */
3279 if (!is_valid_ether_addr(feat
->dev_attr
.mac_addr
)) {
3280 eth_hw_addr_random(netdev
);
3281 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
3283 ether_addr_copy(adapter
->mac_addr
, feat
->dev_attr
.mac_addr
);
3284 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3287 /* Set offload features */
3288 ena_set_dev_offloads(feat
, netdev
);
3290 adapter
->max_mtu
= feat
->dev_attr
.max_mtu
;
3291 netdev
->max_mtu
= adapter
->max_mtu
;
3292 netdev
->min_mtu
= ENA_MIN_MTU
;
3295 static int ena_rss_init_default(struct ena_adapter
*adapter
)
3297 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3298 struct device
*dev
= &adapter
->pdev
->dev
;
3302 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
3304 dev_err(dev
, "Cannot init indirect table\n");
3308 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
3309 val
= ethtool_rxfh_indir_default(i
, adapter
->num_queues
);
3310 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
3311 ENA_IO_RXQ_IDX(val
));
3312 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3313 dev_err(dev
, "Cannot fill indirect table\n");
3314 goto err_fill_indir
;
3318 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_CRC32
, NULL
,
3319 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
3320 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3321 dev_err(dev
, "Cannot fill hash function\n");
3322 goto err_fill_indir
;
3325 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
3326 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3327 dev_err(dev
, "Cannot fill hash control\n");
3328 goto err_fill_indir
;
3334 ena_com_rss_destroy(ena_dev
);
3340 static void ena_release_bars(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
3342 int release_bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
3344 pci_release_selected_regions(pdev
, release_bars
);
3347 static void set_default_llq_configurations(struct ena_llq_configurations
*llq_config
)
3349 llq_config
->llq_header_location
= ENA_ADMIN_INLINE_HEADER
;
3350 llq_config
->llq_ring_entry_size
= ENA_ADMIN_LIST_ENTRY_SIZE_128B
;
3351 llq_config
->llq_stride_ctrl
= ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
;
3352 llq_config
->llq_num_decs_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
;
3353 llq_config
->llq_ring_entry_size_value
= 128;
3356 static int ena_calc_queue_size(struct ena_calc_queue_size_ctx
*ctx
)
3358 struct ena_admin_feature_llq_desc
*llq
= &ctx
->get_feat_ctx
->llq
;
3359 struct ena_com_dev
*ena_dev
= ctx
->ena_dev
;
3360 u32 tx_queue_size
= ENA_DEFAULT_RING_SIZE
;
3361 u32 rx_queue_size
= ENA_DEFAULT_RING_SIZE
;
3362 u32 max_tx_queue_size
;
3363 u32 max_rx_queue_size
;
3365 if (ctx
->ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
3366 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
3367 &ctx
->get_feat_ctx
->max_queue_ext
.max_queue_ext
;
3368 max_rx_queue_size
= min_t(u32
, max_queue_ext
->max_rx_cq_depth
,
3369 max_queue_ext
->max_rx_sq_depth
);
3370 max_tx_queue_size
= max_queue_ext
->max_tx_cq_depth
;
3372 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3373 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3374 llq
->max_llq_depth
);
3376 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3377 max_queue_ext
->max_tx_sq_depth
);
3379 ctx
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3380 max_queue_ext
->max_per_packet_tx_descs
);
3381 ctx
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3382 max_queue_ext
->max_per_packet_rx_descs
);
3384 struct ena_admin_queue_feature_desc
*max_queues
=
3385 &ctx
->get_feat_ctx
->max_queues
;
3386 max_rx_queue_size
= min_t(u32
, max_queues
->max_cq_depth
,
3387 max_queues
->max_sq_depth
);
3388 max_tx_queue_size
= max_queues
->max_cq_depth
;
3390 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3391 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3392 llq
->max_llq_depth
);
3394 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
3395 max_queues
->max_sq_depth
);
3397 ctx
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3398 max_queues
->max_packet_tx_descs
);
3399 ctx
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3400 max_queues
->max_packet_rx_descs
);
3403 max_tx_queue_size
= rounddown_pow_of_two(max_tx_queue_size
);
3404 max_rx_queue_size
= rounddown_pow_of_two(max_rx_queue_size
);
3406 tx_queue_size
= clamp_val(tx_queue_size
, ENA_MIN_RING_SIZE
,
3408 rx_queue_size
= clamp_val(rx_queue_size
, ENA_MIN_RING_SIZE
,
3411 tx_queue_size
= rounddown_pow_of_two(tx_queue_size
);
3412 rx_queue_size
= rounddown_pow_of_two(rx_queue_size
);
3414 ctx
->max_tx_queue_size
= max_tx_queue_size
;
3415 ctx
->max_rx_queue_size
= max_rx_queue_size
;
3416 ctx
->tx_queue_size
= tx_queue_size
;
3417 ctx
->rx_queue_size
= rx_queue_size
;
3422 /* ena_probe - Device Initialization Routine
3423 * @pdev: PCI device information struct
3424 * @ent: entry in ena_pci_tbl
3426 * Returns 0 on success, negative on failure
3428 * ena_probe initializes an adapter identified by a pci_dev structure.
3429 * The OS initialization, configuring of the adapter private structure,
3430 * and a hardware reset occur.
3432 static int ena_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3434 struct ena_com_dev_get_features_ctx get_feat_ctx
;
3435 struct ena_calc_queue_size_ctx calc_queue_ctx
= { 0 };
3436 struct ena_llq_configurations llq_config
;
3437 struct ena_com_dev
*ena_dev
= NULL
;
3438 struct ena_adapter
*adapter
;
3439 int io_queue_num
, bars
, rc
;
3440 struct net_device
*netdev
;
3441 static int adapters_found
;
3442 char *queue_type_str
;
3445 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
3447 dev_info_once(&pdev
->dev
, "%s", version
);
3449 rc
= pci_enable_device_mem(pdev
);
3451 dev_err(&pdev
->dev
, "pci_enable_device_mem() failed!\n");
3455 pci_set_master(pdev
);
3457 ena_dev
= vzalloc(sizeof(*ena_dev
));
3460 goto err_disable_device
;
3463 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
3464 rc
= pci_request_selected_regions(pdev
, bars
, DRV_MODULE_NAME
);
3466 dev_err(&pdev
->dev
, "pci_request_selected_regions failed %d\n",
3468 goto err_free_ena_dev
;
3471 ena_dev
->reg_bar
= devm_ioremap(&pdev
->dev
,
3472 pci_resource_start(pdev
, ENA_REG_BAR
),
3473 pci_resource_len(pdev
, ENA_REG_BAR
));
3474 if (!ena_dev
->reg_bar
) {
3475 dev_err(&pdev
->dev
, "failed to remap regs bar\n");
3477 goto err_free_region
;
3480 ena_dev
->dmadev
= &pdev
->dev
;
3482 rc
= ena_device_init(ena_dev
, pdev
, &get_feat_ctx
, &wd_state
);
3484 dev_err(&pdev
->dev
, "ena device init failed\n");
3487 goto err_free_region
;
3490 set_default_llq_configurations(&llq_config
);
3492 rc
= ena_set_queues_placement_policy(pdev
, ena_dev
, &get_feat_ctx
.llq
,
3495 dev_err(&pdev
->dev
, "ena device init failed\n");
3496 goto err_device_destroy
;
3499 calc_queue_ctx
.ena_dev
= ena_dev
;
3500 calc_queue_ctx
.get_feat_ctx
= &get_feat_ctx
;
3501 calc_queue_ctx
.pdev
= pdev
;
3503 /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
3504 * Updated during device initialization with the real granularity
3506 ena_dev
->intr_moder_tx_interval
= ENA_INTR_INITIAL_TX_INTERVAL_USECS
;
3507 ena_dev
->intr_moder_rx_interval
= ENA_INTR_INITIAL_RX_INTERVAL_USECS
;
3508 ena_dev
->intr_delay_resolution
= ENA_DEFAULT_INTR_DELAY_RESOLUTION
;
3509 io_queue_num
= ena_calc_io_queue_num(pdev
, ena_dev
, &get_feat_ctx
);
3510 rc
= ena_calc_queue_size(&calc_queue_ctx
);
3511 if (rc
|| io_queue_num
<= 0) {
3513 goto err_device_destroy
;
3516 dev_info(&pdev
->dev
, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
3518 calc_queue_ctx
.rx_queue_size
,
3519 calc_queue_ctx
.tx_queue_size
,
3520 (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) ?
3521 "ENABLED" : "DISABLED");
3523 /* dev zeroed in init_etherdev */
3524 netdev
= alloc_etherdev_mq(sizeof(struct ena_adapter
), io_queue_num
);
3526 dev_err(&pdev
->dev
, "alloc_etherdev_mq failed\n");
3528 goto err_device_destroy
;
3531 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3533 adapter
= netdev_priv(netdev
);
3534 pci_set_drvdata(pdev
, adapter
);
3536 adapter
->ena_dev
= ena_dev
;
3537 adapter
->netdev
= netdev
;
3538 adapter
->pdev
= pdev
;
3540 ena_set_conf_feat_params(adapter
, &get_feat_ctx
);
3542 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3543 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
3545 adapter
->requested_tx_ring_size
= calc_queue_ctx
.tx_queue_size
;
3546 adapter
->requested_rx_ring_size
= calc_queue_ctx
.rx_queue_size
;
3547 adapter
->max_tx_ring_size
= calc_queue_ctx
.max_tx_queue_size
;
3548 adapter
->max_rx_ring_size
= calc_queue_ctx
.max_rx_queue_size
;
3549 adapter
->max_tx_sgl_size
= calc_queue_ctx
.max_tx_sgl_size
;
3550 adapter
->max_rx_sgl_size
= calc_queue_ctx
.max_rx_sgl_size
;
3552 adapter
->num_queues
= io_queue_num
;
3553 adapter
->last_monitored_tx_qid
= 0;
3555 adapter
->rx_copybreak
= ENA_DEFAULT_RX_COPYBREAK
;
3556 adapter
->wd_state
= wd_state
;
3558 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d", adapters_found
);
3560 rc
= ena_com_init_interrupt_moderation(adapter
->ena_dev
);
3563 "Failed to query interrupt moderation feature\n");
3564 goto err_netdev_destroy
;
3566 ena_init_io_rings(adapter
);
3568 netdev
->netdev_ops
= &ena_netdev_ops
;
3569 netdev
->watchdog_timeo
= TX_TIMEOUT
;
3570 ena_set_ethtool_ops(netdev
);
3572 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3574 u64_stats_init(&adapter
->syncp
);
3576 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
, io_queue_num
);
3579 "Failed to enable and set the admin interrupts\n");
3580 goto err_worker_destroy
;
3582 rc
= ena_rss_init_default(adapter
);
3583 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
3584 dev_err(&pdev
->dev
, "Cannot init RSS rc: %d\n", rc
);
3588 ena_config_debug_area(adapter
);
3590 memcpy(adapter
->netdev
->perm_addr
, adapter
->mac_addr
, netdev
->addr_len
);
3592 netif_carrier_off(netdev
);
3594 rc
= register_netdev(netdev
);
3596 dev_err(&pdev
->dev
, "Cannot register net device\n");
3600 INIT_WORK(&adapter
->reset_task
, ena_fw_reset_device
);
3602 adapter
->last_keep_alive_jiffies
= jiffies
;
3603 adapter
->keep_alive_timeout
= ENA_DEVICE_KALIVE_TIMEOUT
;
3604 adapter
->missing_tx_completion_to
= TX_TIMEOUT
;
3605 adapter
->missing_tx_completion_threshold
= MAX_NUM_OF_TIMEOUTED_PACKETS
;
3607 ena_update_hints(adapter
, &get_feat_ctx
.hw_hints
);
3609 timer_setup(&adapter
->timer_service
, ena_timer_service
, 0);
3610 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3612 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
3613 queue_type_str
= "Regular";
3615 queue_type_str
= "Low Latency";
3617 dev_info(&pdev
->dev
,
3618 "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
3619 DEVICE_NAME
, (long)pci_resource_start(pdev
, 0),
3620 netdev
->dev_addr
, io_queue_num
, queue_type_str
);
3622 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3629 ena_com_delete_debug_area(ena_dev
);
3630 ena_com_rss_destroy(ena_dev
);
3632 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_INIT_ERR
);
3633 /* stop submitting admin commands on a device that was reset */
3634 ena_com_set_admin_running_state(ena_dev
, false);
3635 ena_free_mgmnt_irq(adapter
);
3636 ena_disable_msix(adapter
);
3638 del_timer(&adapter
->timer_service
);
3640 free_netdev(netdev
);
3642 ena_com_delete_host_info(ena_dev
);
3643 ena_com_admin_destroy(ena_dev
);
3645 ena_release_bars(ena_dev
, pdev
);
3649 pci_disable_device(pdev
);
3653 /*****************************************************************************/
3655 /* ena_remove - Device Removal Routine
3656 * @pdev: PCI device information struct
3658 * ena_remove is called by the PCI subsystem to alert the driver
3659 * that it should release a PCI device.
3661 static void ena_remove(struct pci_dev
*pdev
)
3663 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3664 struct ena_com_dev
*ena_dev
;
3665 struct net_device
*netdev
;
3667 ena_dev
= adapter
->ena_dev
;
3668 netdev
= adapter
->netdev
;
3670 #ifdef CONFIG_RFS_ACCEL
3671 if ((adapter
->msix_vecs
>= 1) && (netdev
->rx_cpu_rmap
)) {
3672 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
3673 netdev
->rx_cpu_rmap
= NULL
;
3675 #endif /* CONFIG_RFS_ACCEL */
3676 del_timer_sync(&adapter
->timer_service
);
3678 cancel_work_sync(&adapter
->reset_task
);
3681 ena_destroy_device(adapter
, true);
3684 unregister_netdev(netdev
);
3686 free_netdev(netdev
);
3688 ena_com_rss_destroy(ena_dev
);
3690 ena_com_delete_debug_area(ena_dev
);
3692 ena_com_delete_host_info(ena_dev
);
3694 ena_release_bars(ena_dev
, pdev
);
3696 pci_disable_device(pdev
);
3702 /* ena_suspend - PM suspend callback
3703 * @pdev: PCI device information struct
3704 * @state:power state
3706 static int ena_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3708 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3710 u64_stats_update_begin(&adapter
->syncp
);
3711 adapter
->dev_stats
.suspend
++;
3712 u64_stats_update_end(&adapter
->syncp
);
3715 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3717 "ignoring device reset request as the device is being suspended\n");
3718 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3720 ena_destroy_device(adapter
, true);
3725 /* ena_resume - PM resume callback
3726 * @pdev: PCI device information struct
3729 static int ena_resume(struct pci_dev
*pdev
)
3731 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3734 u64_stats_update_begin(&adapter
->syncp
);
3735 adapter
->dev_stats
.resume
++;
3736 u64_stats_update_end(&adapter
->syncp
);
3739 rc
= ena_restore_device(adapter
);
3745 static struct pci_driver ena_pci_driver
= {
3746 .name
= DRV_MODULE_NAME
,
3747 .id_table
= ena_pci_tbl
,
3749 .remove
= ena_remove
,
3751 .suspend
= ena_suspend
,
3752 .resume
= ena_resume
,
3754 .sriov_configure
= pci_sriov_configure_simple
,
3757 static int __init
ena_init(void)
3759 pr_info("%s", version
);
3761 ena_wq
= create_singlethread_workqueue(DRV_MODULE_NAME
);
3763 pr_err("Failed to create workqueue\n");
3767 return pci_register_driver(&ena_pci_driver
);
3770 static void __exit
ena_cleanup(void)
3772 pci_unregister_driver(&ena_pci_driver
);
3775 destroy_workqueue(ena_wq
);
3780 /******************************************************************************
3781 ******************************** AENQ Handlers *******************************
3782 *****************************************************************************/
3783 /* ena_update_on_link_change:
3784 * Notify the network interface about the change in link status
3786 static void ena_update_on_link_change(void *adapter_data
,
3787 struct ena_admin_aenq_entry
*aenq_e
)
3789 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3790 struct ena_admin_aenq_link_change_desc
*aenq_desc
=
3791 (struct ena_admin_aenq_link_change_desc
*)aenq_e
;
3792 int status
= aenq_desc
->flags
&
3793 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK
;
3796 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
3797 set_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3798 if (!test_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
))
3799 netif_carrier_on(adapter
->netdev
);
3801 clear_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3802 netif_carrier_off(adapter
->netdev
);
3806 static void ena_keep_alive_wd(void *adapter_data
,
3807 struct ena_admin_aenq_entry
*aenq_e
)
3809 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3810 struct ena_admin_aenq_keep_alive_desc
*desc
;
3813 desc
= (struct ena_admin_aenq_keep_alive_desc
*)aenq_e
;
3814 adapter
->last_keep_alive_jiffies
= jiffies
;
3816 rx_drops
= ((u64
)desc
->rx_drops_high
<< 32) | desc
->rx_drops_low
;
3818 u64_stats_update_begin(&adapter
->syncp
);
3819 adapter
->dev_stats
.rx_drops
= rx_drops
;
3820 u64_stats_update_end(&adapter
->syncp
);
3823 static void ena_notification(void *adapter_data
,
3824 struct ena_admin_aenq_entry
*aenq_e
)
3826 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3827 struct ena_admin_ena_hw_hints
*hints
;
3829 WARN(aenq_e
->aenq_common_desc
.group
!= ENA_ADMIN_NOTIFICATION
,
3830 "Invalid group(%x) expected %x\n",
3831 aenq_e
->aenq_common_desc
.group
,
3832 ENA_ADMIN_NOTIFICATION
);
3834 switch (aenq_e
->aenq_common_desc
.syndrom
) {
3835 case ENA_ADMIN_UPDATE_HINTS
:
3836 hints
= (struct ena_admin_ena_hw_hints
*)
3837 (&aenq_e
->inline_data_w4
);
3838 ena_update_hints(adapter
, hints
);
3841 netif_err(adapter
, drv
, adapter
->netdev
,
3842 "Invalid aenq notification link state %d\n",
3843 aenq_e
->aenq_common_desc
.syndrom
);
3847 /* This handler will called for unknown event group or unimplemented handlers*/
3848 static void unimplemented_aenq_handler(void *data
,
3849 struct ena_admin_aenq_entry
*aenq_e
)
3851 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
3853 netif_err(adapter
, drv
, adapter
->netdev
,
3854 "Unknown event was received or event with unimplemented handler\n");
3857 static struct ena_aenq_handlers aenq_handlers
= {
3859 [ENA_ADMIN_LINK_CHANGE
] = ena_update_on_link_change
,
3860 [ENA_ADMIN_NOTIFICATION
] = ena_notification
,
3861 [ENA_ADMIN_KEEP_ALIVE
] = ena_keep_alive_wd
,
3863 .unimplemented_handler
= unimplemented_aenq_handler
3866 module_init(ena_init
);
3867 module_exit(ena_cleanup
);