2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/numa.h>
44 #include <linux/pci.h>
45 #include <linux/utsname.h>
46 #include <linux/version.h>
47 #include <linux/vmalloc.h>
50 #include "ena_netdev.h"
51 #include "ena_pci_id_tbl.h"
53 static char version
[] = DEVICE_NAME
" v" DRV_MODULE_VERSION
"\n";
55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
56 MODULE_DESCRIPTION(DEVICE_NAME
);
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION
);
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT (5 * HZ)
63 #define ENA_NAPI_BUDGET 64
65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
66 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
67 static int debug
= -1;
68 module_param(debug
, int, 0);
69 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
71 static struct ena_aenq_handlers aenq_handlers
;
73 static struct workqueue_struct
*ena_wq
;
75 MODULE_DEVICE_TABLE(pci
, ena_pci_tbl
);
77 static int ena_rss_init_default(struct ena_adapter
*adapter
);
79 static void ena_tx_timeout(struct net_device
*dev
)
81 struct ena_adapter
*adapter
= netdev_priv(dev
);
83 /* Change the state of the device to trigger reset
84 * Check that we are not in the middle or a trigger already
87 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
90 adapter
->reset_reason
= ENA_REGS_RESET_OS_NETDEV_WD
;
91 u64_stats_update_begin(&adapter
->syncp
);
92 adapter
->dev_stats
.tx_timeout
++;
93 u64_stats_update_end(&adapter
->syncp
);
95 netif_err(adapter
, tx_err
, dev
, "Transmit time out\n");
98 static void update_rx_ring_mtu(struct ena_adapter
*adapter
, int mtu
)
102 for (i
= 0; i
< adapter
->num_queues
; i
++)
103 adapter
->rx_ring
[i
].mtu
= mtu
;
106 static int ena_change_mtu(struct net_device
*dev
, int new_mtu
)
108 struct ena_adapter
*adapter
= netdev_priv(dev
);
111 ret
= ena_com_set_dev_mtu(adapter
->ena_dev
, new_mtu
);
113 netif_dbg(adapter
, drv
, dev
, "set MTU to %d\n", new_mtu
);
114 update_rx_ring_mtu(adapter
, new_mtu
);
117 netif_err(adapter
, drv
, dev
, "Failed to set MTU to %d\n",
124 static int ena_init_rx_cpu_rmap(struct ena_adapter
*adapter
)
126 #ifdef CONFIG_RFS_ACCEL
130 adapter
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(adapter
->num_queues
);
131 if (!adapter
->netdev
->rx_cpu_rmap
)
133 for (i
= 0; i
< adapter
->num_queues
; i
++) {
134 int irq_idx
= ENA_IO_IRQ_IDX(i
);
136 rc
= irq_cpu_rmap_add(adapter
->netdev
->rx_cpu_rmap
,
137 pci_irq_vector(adapter
->pdev
, irq_idx
));
139 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
140 adapter
->netdev
->rx_cpu_rmap
= NULL
;
144 #endif /* CONFIG_RFS_ACCEL */
148 static void ena_init_io_rings_common(struct ena_adapter
*adapter
,
149 struct ena_ring
*ring
, u16 qid
)
152 ring
->pdev
= adapter
->pdev
;
153 ring
->dev
= &adapter
->pdev
->dev
;
154 ring
->netdev
= adapter
->netdev
;
155 ring
->napi
= &adapter
->ena_napi
[qid
].napi
;
156 ring
->adapter
= adapter
;
157 ring
->ena_dev
= adapter
->ena_dev
;
158 ring
->per_napi_packets
= 0;
159 ring
->per_napi_bytes
= 0;
161 u64_stats_init(&ring
->syncp
);
164 static void ena_init_io_rings(struct ena_adapter
*adapter
)
166 struct ena_com_dev
*ena_dev
;
167 struct ena_ring
*txr
, *rxr
;
170 ena_dev
= adapter
->ena_dev
;
172 for (i
= 0; i
< adapter
->num_queues
; i
++) {
173 txr
= &adapter
->tx_ring
[i
];
174 rxr
= &adapter
->rx_ring
[i
];
176 /* TX/RX common ring state */
177 ena_init_io_rings_common(adapter
, txr
, i
);
178 ena_init_io_rings_common(adapter
, rxr
, i
);
180 /* TX specific ring state */
181 txr
->ring_size
= adapter
->tx_ring_size
;
182 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
183 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
184 txr
->sgl_size
= adapter
->max_tx_sgl_size
;
185 txr
->smoothed_interval
=
186 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
);
188 /* RX specific ring state */
189 rxr
->ring_size
= adapter
->rx_ring_size
;
190 rxr
->rx_copybreak
= adapter
->rx_copybreak
;
191 rxr
->sgl_size
= adapter
->max_rx_sgl_size
;
192 rxr
->smoothed_interval
=
193 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
);
194 rxr
->empty_rx_queue
= 0;
198 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
199 * @adapter: network interface device structure
202 * Return 0 on success, negative on failure
204 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
)
206 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
207 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
210 if (tx_ring
->tx_buffer_info
) {
211 netif_err(adapter
, ifup
,
212 adapter
->netdev
, "tx_buffer_info info is not NULL");
216 size
= sizeof(struct ena_tx_buffer
) * tx_ring
->ring_size
;
217 node
= cpu_to_node(ena_irq
->cpu
);
219 tx_ring
->tx_buffer_info
= vzalloc_node(size
, node
);
220 if (!tx_ring
->tx_buffer_info
) {
221 tx_ring
->tx_buffer_info
= vzalloc(size
);
222 if (!tx_ring
->tx_buffer_info
)
226 size
= sizeof(u16
) * tx_ring
->ring_size
;
227 tx_ring
->free_tx_ids
= vzalloc_node(size
, node
);
228 if (!tx_ring
->free_tx_ids
) {
229 tx_ring
->free_tx_ids
= vzalloc(size
);
230 if (!tx_ring
->free_tx_ids
) {
231 vfree(tx_ring
->tx_buffer_info
);
236 /* Req id ring for TX out of order completions */
237 for (i
= 0; i
< tx_ring
->ring_size
; i
++)
238 tx_ring
->free_tx_ids
[i
] = i
;
240 /* Reset tx statistics */
241 memset(&tx_ring
->tx_stats
, 0x0, sizeof(tx_ring
->tx_stats
));
243 tx_ring
->next_to_use
= 0;
244 tx_ring
->next_to_clean
= 0;
245 tx_ring
->cpu
= ena_irq
->cpu
;
249 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
250 * @adapter: network interface device structure
253 * Free all transmit software resources
255 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
)
257 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
259 vfree(tx_ring
->tx_buffer_info
);
260 tx_ring
->tx_buffer_info
= NULL
;
262 vfree(tx_ring
->free_tx_ids
);
263 tx_ring
->free_tx_ids
= NULL
;
266 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
267 * @adapter: private structure
269 * Return 0 on success, negative on failure
271 static int ena_setup_all_tx_resources(struct ena_adapter
*adapter
)
275 for (i
= 0; i
< adapter
->num_queues
; i
++) {
276 rc
= ena_setup_tx_resources(adapter
, i
);
285 netif_err(adapter
, ifup
, adapter
->netdev
,
286 "Tx queue %d: allocation failed\n", i
);
288 /* rewind the index freeing the rings as we go */
290 ena_free_tx_resources(adapter
, i
);
294 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
295 * @adapter: board private structure
297 * Free all transmit software resources
299 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
)
303 for (i
= 0; i
< adapter
->num_queues
; i
++)
304 ena_free_tx_resources(adapter
, i
);
307 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
308 * @adapter: network interface device structure
311 * Returns 0 on success, negative on failure
313 static int ena_setup_rx_resources(struct ena_adapter
*adapter
,
316 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
317 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
320 if (rx_ring
->rx_buffer_info
) {
321 netif_err(adapter
, ifup
, adapter
->netdev
,
322 "rx_buffer_info is not NULL");
326 /* alloc extra element so in rx path
327 * we can always prefetch rx_info + 1
329 size
= sizeof(struct ena_rx_buffer
) * (rx_ring
->ring_size
+ 1);
330 node
= cpu_to_node(ena_irq
->cpu
);
332 rx_ring
->rx_buffer_info
= vzalloc_node(size
, node
);
333 if (!rx_ring
->rx_buffer_info
) {
334 rx_ring
->rx_buffer_info
= vzalloc(size
);
335 if (!rx_ring
->rx_buffer_info
)
339 /* Reset rx statistics */
340 memset(&rx_ring
->rx_stats
, 0x0, sizeof(rx_ring
->rx_stats
));
342 rx_ring
->next_to_clean
= 0;
343 rx_ring
->next_to_use
= 0;
344 rx_ring
->cpu
= ena_irq
->cpu
;
349 /* ena_free_rx_resources - Free I/O Rx Resources
350 * @adapter: network interface device structure
353 * Free all receive software resources
355 static void ena_free_rx_resources(struct ena_adapter
*adapter
,
358 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
360 vfree(rx_ring
->rx_buffer_info
);
361 rx_ring
->rx_buffer_info
= NULL
;
364 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
365 * @adapter: board private structure
367 * Return 0 on success, negative on failure
369 static int ena_setup_all_rx_resources(struct ena_adapter
*adapter
)
373 for (i
= 0; i
< adapter
->num_queues
; i
++) {
374 rc
= ena_setup_rx_resources(adapter
, i
);
383 netif_err(adapter
, ifup
, adapter
->netdev
,
384 "Rx queue %d: allocation failed\n", i
);
386 /* rewind the index freeing the rings as we go */
388 ena_free_rx_resources(adapter
, i
);
392 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
393 * @adapter: board private structure
395 * Free all receive software resources
397 static void ena_free_all_io_rx_resources(struct ena_adapter
*adapter
)
401 for (i
= 0; i
< adapter
->num_queues
; i
++)
402 ena_free_rx_resources(adapter
, i
);
405 static inline int ena_alloc_rx_page(struct ena_ring
*rx_ring
,
406 struct ena_rx_buffer
*rx_info
, gfp_t gfp
)
408 struct ena_com_buf
*ena_buf
;
412 /* if previous allocated page is not used */
413 if (unlikely(rx_info
->page
))
416 page
= alloc_page(gfp
);
417 if (unlikely(!page
)) {
418 u64_stats_update_begin(&rx_ring
->syncp
);
419 rx_ring
->rx_stats
.page_alloc_fail
++;
420 u64_stats_update_end(&rx_ring
->syncp
);
424 dma
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
,
426 if (unlikely(dma_mapping_error(rx_ring
->dev
, dma
))) {
427 u64_stats_update_begin(&rx_ring
->syncp
);
428 rx_ring
->rx_stats
.dma_mapping_err
++;
429 u64_stats_update_end(&rx_ring
->syncp
);
434 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
435 "alloc page %p, rx_info %p\n", page
, rx_info
);
437 rx_info
->page
= page
;
438 rx_info
->page_offset
= 0;
439 ena_buf
= &rx_info
->ena_buf
;
440 ena_buf
->paddr
= dma
;
441 ena_buf
->len
= PAGE_SIZE
;
446 static void ena_free_rx_page(struct ena_ring
*rx_ring
,
447 struct ena_rx_buffer
*rx_info
)
449 struct page
*page
= rx_info
->page
;
450 struct ena_com_buf
*ena_buf
= &rx_info
->ena_buf
;
452 if (unlikely(!page
)) {
453 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
454 "Trying to free unallocated buffer\n");
458 dma_unmap_page(rx_ring
->dev
, ena_buf
->paddr
, PAGE_SIZE
,
462 rx_info
->page
= NULL
;
465 static int ena_refill_rx_bufs(struct ena_ring
*rx_ring
, u32 num
)
471 next_to_use
= rx_ring
->next_to_use
;
473 for (i
= 0; i
< num
; i
++) {
474 struct ena_rx_buffer
*rx_info
=
475 &rx_ring
->rx_buffer_info
[next_to_use
];
477 rc
= ena_alloc_rx_page(rx_ring
, rx_info
,
478 __GFP_COLD
| GFP_ATOMIC
| __GFP_COMP
);
479 if (unlikely(rc
< 0)) {
480 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
481 "failed to alloc buffer for rx queue %d\n",
485 rc
= ena_com_add_single_rx_desc(rx_ring
->ena_com_io_sq
,
489 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
490 "failed to add buffer for rx queue %d\n",
494 next_to_use
= ENA_RX_RING_IDX_NEXT(next_to_use
,
498 if (unlikely(i
< num
)) {
499 u64_stats_update_begin(&rx_ring
->syncp
);
500 rx_ring
->rx_stats
.refil_partial
++;
501 u64_stats_update_end(&rx_ring
->syncp
);
502 netdev_warn(rx_ring
->netdev
,
503 "refilled rx qid %d with only %d buffers (from %d)\n",
504 rx_ring
->qid
, i
, num
);
508 /* Add memory barrier to make sure the desc were written before
512 ena_com_write_sq_doorbell(rx_ring
->ena_com_io_sq
);
515 rx_ring
->next_to_use
= next_to_use
;
520 static void ena_free_rx_bufs(struct ena_adapter
*adapter
,
523 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
526 for (i
= 0; i
< rx_ring
->ring_size
; i
++) {
527 struct ena_rx_buffer
*rx_info
= &rx_ring
->rx_buffer_info
[i
];
530 ena_free_rx_page(rx_ring
, rx_info
);
534 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
535 * @adapter: board private structure
538 static void ena_refill_all_rx_bufs(struct ena_adapter
*adapter
)
540 struct ena_ring
*rx_ring
;
543 for (i
= 0; i
< adapter
->num_queues
; i
++) {
544 rx_ring
= &adapter
->rx_ring
[i
];
545 bufs_num
= rx_ring
->ring_size
- 1;
546 rc
= ena_refill_rx_bufs(rx_ring
, bufs_num
);
548 if (unlikely(rc
!= bufs_num
))
549 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
550 "refilling Queue %d failed. allocated %d buffers from: %d\n",
555 static void ena_free_all_rx_bufs(struct ena_adapter
*adapter
)
559 for (i
= 0; i
< adapter
->num_queues
; i
++)
560 ena_free_rx_bufs(adapter
, i
);
563 /* ena_free_tx_bufs - Free Tx Buffers per Queue
564 * @tx_ring: TX ring for which buffers be freed
566 static void ena_free_tx_bufs(struct ena_ring
*tx_ring
)
568 bool print_once
= true;
571 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
572 struct ena_tx_buffer
*tx_info
= &tx_ring
->tx_buffer_info
[i
];
573 struct ena_com_buf
*ena_buf
;
581 netdev_notice(tx_ring
->netdev
,
582 "free uncompleted tx skb qid %d idx 0x%x\n",
586 netdev_dbg(tx_ring
->netdev
,
587 "free uncompleted tx skb qid %d idx 0x%x\n",
591 ena_buf
= tx_info
->bufs
;
592 dma_unmap_single(tx_ring
->dev
,
597 /* unmap remaining mapped pages */
598 nr_frags
= tx_info
->num_of_bufs
- 1;
599 for (j
= 0; j
< nr_frags
; j
++) {
601 dma_unmap_page(tx_ring
->dev
,
607 dev_kfree_skb_any(tx_info
->skb
);
609 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
613 static void ena_free_all_tx_bufs(struct ena_adapter
*adapter
)
615 struct ena_ring
*tx_ring
;
618 for (i
= 0; i
< adapter
->num_queues
; i
++) {
619 tx_ring
= &adapter
->tx_ring
[i
];
620 ena_free_tx_bufs(tx_ring
);
624 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
)
629 for (i
= 0; i
< adapter
->num_queues
; i
++) {
630 ena_qid
= ENA_IO_TXQ_IDX(i
);
631 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
635 static void ena_destroy_all_rx_queues(struct ena_adapter
*adapter
)
640 for (i
= 0; i
< adapter
->num_queues
; i
++) {
641 ena_qid
= ENA_IO_RXQ_IDX(i
);
642 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
646 static void ena_destroy_all_io_queues(struct ena_adapter
*adapter
)
648 ena_destroy_all_tx_queues(adapter
);
649 ena_destroy_all_rx_queues(adapter
);
652 static int validate_tx_req_id(struct ena_ring
*tx_ring
, u16 req_id
)
654 struct ena_tx_buffer
*tx_info
= NULL
;
656 if (likely(req_id
< tx_ring
->ring_size
)) {
657 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
658 if (likely(tx_info
->skb
))
663 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
664 "tx_info doesn't have valid skb\n");
666 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
667 "Invalid req_id: %hu\n", req_id
);
669 u64_stats_update_begin(&tx_ring
->syncp
);
670 tx_ring
->tx_stats
.bad_req_id
++;
671 u64_stats_update_end(&tx_ring
->syncp
);
673 /* Trigger device reset */
674 tx_ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_TX_REQ_ID
;
675 set_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
);
679 static int ena_clean_tx_irq(struct ena_ring
*tx_ring
, u32 budget
)
681 struct netdev_queue
*txq
;
690 next_to_clean
= tx_ring
->next_to_clean
;
691 txq
= netdev_get_tx_queue(tx_ring
->netdev
, tx_ring
->qid
);
693 while (tx_pkts
< budget
) {
694 struct ena_tx_buffer
*tx_info
;
696 struct ena_com_buf
*ena_buf
;
699 rc
= ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
,
704 rc
= validate_tx_req_id(tx_ring
, req_id
);
708 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
711 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
715 tx_info
->last_jiffies
= 0;
717 if (likely(tx_info
->num_of_bufs
!= 0)) {
718 ena_buf
= tx_info
->bufs
;
720 dma_unmap_single(tx_ring
->dev
,
721 dma_unmap_addr(ena_buf
, paddr
),
722 dma_unmap_len(ena_buf
, len
),
725 /* unmap remaining mapped pages */
726 nr_frags
= tx_info
->num_of_bufs
- 1;
727 for (i
= 0; i
< nr_frags
; i
++) {
729 dma_unmap_page(tx_ring
->dev
,
730 dma_unmap_addr(ena_buf
, paddr
),
731 dma_unmap_len(ena_buf
, len
),
736 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
737 "tx_poll: q %d skb %p completed\n", tx_ring
->qid
,
740 tx_bytes
+= skb
->len
;
743 total_done
+= tx_info
->tx_descs
;
745 tx_ring
->free_tx_ids
[next_to_clean
] = req_id
;
746 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
750 tx_ring
->next_to_clean
= next_to_clean
;
751 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_done
);
752 ena_com_update_dev_comp_head(tx_ring
->ena_com_io_cq
);
754 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
756 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
757 "tx_poll: q %d done. total pkts: %d\n",
758 tx_ring
->qid
, tx_pkts
);
760 /* need to make the rings circular update visible to
761 * ena_start_xmit() before checking for netif_queue_stopped().
765 above_thresh
= ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) >
766 ENA_TX_WAKEUP_THRESH
;
767 if (unlikely(netif_tx_queue_stopped(txq
) && above_thresh
)) {
768 __netif_tx_lock(txq
, smp_processor_id());
769 above_thresh
= ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) >
770 ENA_TX_WAKEUP_THRESH
;
771 if (netif_tx_queue_stopped(txq
) && above_thresh
) {
772 netif_tx_wake_queue(txq
);
773 u64_stats_update_begin(&tx_ring
->syncp
);
774 tx_ring
->tx_stats
.queue_wakeup
++;
775 u64_stats_update_end(&tx_ring
->syncp
);
777 __netif_tx_unlock(txq
);
780 tx_ring
->per_napi_bytes
+= tx_bytes
;
781 tx_ring
->per_napi_packets
+= tx_pkts
;
786 static struct sk_buff
*ena_rx_skb(struct ena_ring
*rx_ring
,
787 struct ena_com_rx_buf_info
*ena_bufs
,
792 struct ena_rx_buffer
*rx_info
=
793 &rx_ring
->rx_buffer_info
[*next_to_clean
];
798 len
= ena_bufs
[0].len
;
799 if (unlikely(!rx_info
->page
)) {
800 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
805 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
806 "rx_info %p page %p\n",
807 rx_info
, rx_info
->page
);
809 /* save virt address of first buffer */
810 va
= page_address(rx_info
->page
) + rx_info
->page_offset
;
811 prefetch(va
+ NET_IP_ALIGN
);
813 if (len
<= rx_ring
->rx_copybreak
) {
814 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
815 rx_ring
->rx_copybreak
);
816 if (unlikely(!skb
)) {
817 u64_stats_update_begin(&rx_ring
->syncp
);
818 rx_ring
->rx_stats
.skb_alloc_fail
++;
819 u64_stats_update_end(&rx_ring
->syncp
);
820 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
821 "Failed to allocate skb\n");
825 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
826 "rx allocated small packet. len %d. data_len %d\n",
827 skb
->len
, skb
->data_len
);
829 /* sync this buffer for CPU use */
830 dma_sync_single_for_cpu(rx_ring
->dev
,
831 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
834 skb_copy_to_linear_data(skb
, va
, len
);
835 dma_sync_single_for_device(rx_ring
->dev
,
836 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
841 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
842 *next_to_clean
= ENA_RX_RING_IDX_ADD(*next_to_clean
, descs
,
847 skb
= napi_get_frags(rx_ring
->napi
);
848 if (unlikely(!skb
)) {
849 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
850 "Failed allocating skb\n");
851 u64_stats_update_begin(&rx_ring
->syncp
);
852 rx_ring
->rx_stats
.skb_alloc_fail
++;
853 u64_stats_update_end(&rx_ring
->syncp
);
858 dma_unmap_page(rx_ring
->dev
,
859 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
860 PAGE_SIZE
, DMA_FROM_DEVICE
);
862 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_info
->page
,
863 rx_info
->page_offset
, len
, PAGE_SIZE
);
865 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
866 "rx skb updated. len %d. data_len %d\n",
867 skb
->len
, skb
->data_len
);
869 rx_info
->page
= NULL
;
871 ENA_RX_RING_IDX_NEXT(*next_to_clean
,
873 if (likely(--descs
== 0))
875 rx_info
= &rx_ring
->rx_buffer_info
[*next_to_clean
];
876 len
= ena_bufs
[++buf
].len
;
882 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
883 * @adapter: structure containing adapter specific data
884 * @ena_rx_ctx: received packet context/metadata
885 * @skb: skb currently being received and modified
887 static inline void ena_rx_checksum(struct ena_ring
*rx_ring
,
888 struct ena_com_rx_ctx
*ena_rx_ctx
,
891 /* Rx csum disabled */
892 if (unlikely(!(rx_ring
->netdev
->features
& NETIF_F_RXCSUM
))) {
893 skb
->ip_summed
= CHECKSUM_NONE
;
897 /* For fragmented packets the checksum isn't valid */
898 if (ena_rx_ctx
->frag
) {
899 skb
->ip_summed
= CHECKSUM_NONE
;
903 /* if IP and error */
904 if (unlikely((ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
) &&
905 (ena_rx_ctx
->l3_csum_err
))) {
906 /* ipv4 checksum error */
907 skb
->ip_summed
= CHECKSUM_NONE
;
908 u64_stats_update_begin(&rx_ring
->syncp
);
909 rx_ring
->rx_stats
.bad_csum
++;
910 u64_stats_update_end(&rx_ring
->syncp
);
911 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
912 "RX IPv4 header checksum error\n");
917 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
918 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
))) {
919 if (unlikely(ena_rx_ctx
->l4_csum_err
)) {
920 /* TCP/UDP checksum error */
921 u64_stats_update_begin(&rx_ring
->syncp
);
922 rx_ring
->rx_stats
.bad_csum
++;
923 u64_stats_update_end(&rx_ring
->syncp
);
924 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
925 "RX L4 checksum error\n");
926 skb
->ip_summed
= CHECKSUM_NONE
;
930 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
934 static void ena_set_rx_hash(struct ena_ring
*rx_ring
,
935 struct ena_com_rx_ctx
*ena_rx_ctx
,
938 enum pkt_hash_types hash_type
;
940 if (likely(rx_ring
->netdev
->features
& NETIF_F_RXHASH
)) {
941 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
942 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)))
944 hash_type
= PKT_HASH_TYPE_L4
;
946 hash_type
= PKT_HASH_TYPE_NONE
;
948 /* Override hash type if the packet is fragmented */
949 if (ena_rx_ctx
->frag
)
950 hash_type
= PKT_HASH_TYPE_NONE
;
952 skb_set_hash(skb
, ena_rx_ctx
->hash
, hash_type
);
956 /* ena_clean_rx_irq - Cleanup RX irq
957 * @rx_ring: RX ring to clean
958 * @napi: napi handler
959 * @budget: how many packets driver is allowed to clean
961 * Returns the number of cleaned buffers.
963 static int ena_clean_rx_irq(struct ena_ring
*rx_ring
, struct napi_struct
*napi
,
966 u16 next_to_clean
= rx_ring
->next_to_clean
;
967 u32 res_budget
, work_done
;
969 struct ena_com_rx_ctx ena_rx_ctx
;
970 struct ena_adapter
*adapter
;
973 int refill_threshold
;
976 int rx_copybreak_pkt
= 0;
978 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
979 "%s qid %d\n", __func__
, rx_ring
->qid
);
983 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
984 ena_rx_ctx
.max_bufs
= rx_ring
->sgl_size
;
985 ena_rx_ctx
.descs
= 0;
986 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
987 rx_ring
->ena_com_io_sq
,
992 if (unlikely(ena_rx_ctx
.descs
== 0))
995 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
996 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
997 rx_ring
->qid
, ena_rx_ctx
.descs
, ena_rx_ctx
.l3_proto
,
998 ena_rx_ctx
.l4_proto
, ena_rx_ctx
.hash
);
1000 /* allocate skb and fill it */
1001 skb
= ena_rx_skb(rx_ring
, rx_ring
->ena_bufs
, ena_rx_ctx
.descs
,
1004 /* exit if we failed to retrieve a buffer */
1005 if (unlikely(!skb
)) {
1006 next_to_clean
= ENA_RX_RING_IDX_ADD(next_to_clean
,
1008 rx_ring
->ring_size
);
1012 ena_rx_checksum(rx_ring
, &ena_rx_ctx
, skb
);
1014 ena_set_rx_hash(rx_ring
, &ena_rx_ctx
, skb
);
1016 skb_record_rx_queue(skb
, rx_ring
->qid
);
1018 if (rx_ring
->ena_bufs
[0].len
<= rx_ring
->rx_copybreak
) {
1019 total_len
+= rx_ring
->ena_bufs
[0].len
;
1021 napi_gro_receive(napi
, skb
);
1023 total_len
+= skb
->len
;
1024 napi_gro_frags(napi
);
1028 } while (likely(res_budget
));
1030 work_done
= budget
- res_budget
;
1031 rx_ring
->per_napi_bytes
+= total_len
;
1032 rx_ring
->per_napi_packets
+= work_done
;
1033 u64_stats_update_begin(&rx_ring
->syncp
);
1034 rx_ring
->rx_stats
.bytes
+= total_len
;
1035 rx_ring
->rx_stats
.cnt
+= work_done
;
1036 rx_ring
->rx_stats
.rx_copybreak_pkt
+= rx_copybreak_pkt
;
1037 u64_stats_update_end(&rx_ring
->syncp
);
1039 rx_ring
->next_to_clean
= next_to_clean
;
1041 refill_required
= ena_com_sq_empty_space(rx_ring
->ena_com_io_sq
);
1042 refill_threshold
= rx_ring
->ring_size
/ ENA_RX_REFILL_THRESH_DIVIDER
;
1044 /* Optimization, try to batch new rx buffers */
1045 if (refill_required
> refill_threshold
) {
1046 ena_com_update_dev_comp_head(rx_ring
->ena_com_io_cq
);
1047 ena_refill_rx_bufs(rx_ring
, refill_required
);
1053 adapter
= netdev_priv(rx_ring
->netdev
);
1055 u64_stats_update_begin(&rx_ring
->syncp
);
1056 rx_ring
->rx_stats
.bad_desc_num
++;
1057 u64_stats_update_end(&rx_ring
->syncp
);
1059 /* Too many desc from the device. Trigger reset */
1060 adapter
->reset_reason
= ENA_REGS_RESET_TOO_MANY_RX_DESCS
;
1061 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
1066 inline void ena_adjust_intr_moderation(struct ena_ring
*rx_ring
,
1067 struct ena_ring
*tx_ring
)
1069 /* We apply adaptive moderation on Rx path only.
1070 * Tx uses static interrupt moderation.
1072 ena_com_calculate_interrupt_delay(rx_ring
->ena_dev
,
1073 rx_ring
->per_napi_packets
,
1074 rx_ring
->per_napi_bytes
,
1075 &rx_ring
->smoothed_interval
,
1076 &rx_ring
->moder_tbl_idx
);
1078 /* Reset per napi packets/bytes */
1079 tx_ring
->per_napi_packets
= 0;
1080 tx_ring
->per_napi_bytes
= 0;
1081 rx_ring
->per_napi_packets
= 0;
1082 rx_ring
->per_napi_bytes
= 0;
1085 static inline void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
1086 struct ena_ring
*rx_ring
)
1088 struct ena_eth_io_intr_reg intr_reg
;
1090 /* Update intr register: rx intr delay,
1091 * tx intr delay and interrupt unmask
1093 ena_com_update_intr_reg(&intr_reg
,
1094 rx_ring
->smoothed_interval
,
1095 tx_ring
->smoothed_interval
,
1098 /* It is a shared MSI-X.
1099 * Tx and Rx CQ have pointer to it.
1100 * So we use one of them to reach the intr reg
1102 ena_com_unmask_intr(rx_ring
->ena_com_io_cq
, &intr_reg
);
1105 static inline void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
1106 struct ena_ring
*rx_ring
)
1108 int cpu
= get_cpu();
1111 /* Check only one ring since the 2 rings are running on the same cpu */
1112 if (likely(tx_ring
->cpu
== cpu
))
1115 numa_node
= cpu_to_node(cpu
);
1118 if (numa_node
!= NUMA_NO_NODE
) {
1119 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, numa_node
);
1120 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, numa_node
);
1131 static int ena_io_poll(struct napi_struct
*napi
, int budget
)
1133 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
1134 struct ena_ring
*tx_ring
, *rx_ring
;
1139 int napi_comp_call
= 0;
1142 tx_ring
= ena_napi
->tx_ring
;
1143 rx_ring
= ena_napi
->rx_ring
;
1145 tx_budget
= tx_ring
->ring_size
/ ENA_TX_POLL_BUDGET_DIVIDER
;
1147 if (!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1148 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
)) {
1149 napi_complete_done(napi
, 0);
1153 tx_work_done
= ena_clean_tx_irq(tx_ring
, tx_budget
);
1154 rx_work_done
= ena_clean_rx_irq(rx_ring
, napi
, budget
);
1156 /* If the device is about to reset or down, avoid unmask
1157 * the interrupt and return 0 so NAPI won't reschedule
1159 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1160 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
))) {
1161 napi_complete_done(napi
, 0);
1164 } else if ((budget
> rx_work_done
) && (tx_budget
> tx_work_done
)) {
1167 /* Update numa and unmask the interrupt only when schedule
1168 * from the interrupt context (vs from sk_busy_loop)
1170 if (napi_complete_done(napi
, rx_work_done
)) {
1171 /* Tx and Rx share the same interrupt vector */
1172 if (ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
))
1173 ena_adjust_intr_moderation(rx_ring
, tx_ring
);
1175 ena_unmask_interrupt(tx_ring
, rx_ring
);
1178 ena_update_ring_numa_node(tx_ring
, rx_ring
);
1185 u64_stats_update_begin(&tx_ring
->syncp
);
1186 tx_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
1187 tx_ring
->tx_stats
.tx_poll
++;
1188 u64_stats_update_end(&tx_ring
->syncp
);
1193 static irqreturn_t
ena_intr_msix_mgmnt(int irq
, void *data
)
1195 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
1197 ena_com_admin_q_comp_intr_handler(adapter
->ena_dev
);
1199 /* Don't call the aenq handler before probe is done */
1200 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
)))
1201 ena_com_aenq_intr_handler(adapter
->ena_dev
, data
);
1206 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1207 * @irq: interrupt number
1208 * @data: pointer to a network interface private napi device structure
1210 static irqreturn_t
ena_intr_msix_io(int irq
, void *data
)
1212 struct ena_napi
*ena_napi
= data
;
1214 napi_schedule(&ena_napi
->napi
);
1219 static int ena_enable_msix(struct ena_adapter
*adapter
, int num_queues
)
1223 /* Reserved the max msix vectors we might need */
1224 msix_vecs
= ENA_MAX_MSIX_VEC(num_queues
);
1226 netif_dbg(adapter
, probe
, adapter
->netdev
,
1227 "trying to enable MSI-X, vectors %d\n", msix_vecs
);
1229 rc
= pci_alloc_irq_vectors(adapter
->pdev
, msix_vecs
, msix_vecs
,
1232 netif_err(adapter
, probe
, adapter
->netdev
,
1233 "Failed to enable MSI-X, vectors %d rc %d\n",
1238 netif_dbg(adapter
, probe
, adapter
->netdev
, "enable MSI-X, vectors %d\n",
1241 if (msix_vecs
>= 1) {
1242 if (ena_init_rx_cpu_rmap(adapter
))
1243 netif_warn(adapter
, probe
, adapter
->netdev
,
1244 "Failed to map IRQs to CPUs\n");
1247 adapter
->msix_vecs
= msix_vecs
;
1252 static void ena_setup_mgmnt_intr(struct ena_adapter
*adapter
)
1256 snprintf(adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].name
,
1257 ENA_IRQNAME_SIZE
, "ena-mgmnt@pci:%s",
1258 pci_name(adapter
->pdev
));
1259 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].handler
=
1260 ena_intr_msix_mgmnt
;
1261 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].data
= adapter
;
1262 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].vector
=
1263 pci_irq_vector(adapter
->pdev
, ENA_MGMNT_IRQ_IDX
);
1264 cpu
= cpumask_first(cpu_online_mask
);
1265 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].cpu
= cpu
;
1266 cpumask_set_cpu(cpu
,
1267 &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].affinity_hint_mask
);
1270 static void ena_setup_io_intr(struct ena_adapter
*adapter
)
1272 struct net_device
*netdev
;
1273 int irq_idx
, i
, cpu
;
1275 netdev
= adapter
->netdev
;
1277 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1278 irq_idx
= ENA_IO_IRQ_IDX(i
);
1279 cpu
= i
% num_online_cpus();
1281 snprintf(adapter
->irq_tbl
[irq_idx
].name
, ENA_IRQNAME_SIZE
,
1282 "%s-Tx-Rx-%d", netdev
->name
, i
);
1283 adapter
->irq_tbl
[irq_idx
].handler
= ena_intr_msix_io
;
1284 adapter
->irq_tbl
[irq_idx
].data
= &adapter
->ena_napi
[i
];
1285 adapter
->irq_tbl
[irq_idx
].vector
=
1286 pci_irq_vector(adapter
->pdev
, irq_idx
);
1287 adapter
->irq_tbl
[irq_idx
].cpu
= cpu
;
1289 cpumask_set_cpu(cpu
,
1290 &adapter
->irq_tbl
[irq_idx
].affinity_hint_mask
);
1294 static int ena_request_mgmnt_irq(struct ena_adapter
*adapter
)
1296 unsigned long flags
= 0;
1297 struct ena_irq
*irq
;
1300 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1301 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1304 netif_err(adapter
, probe
, adapter
->netdev
,
1305 "failed to request admin irq\n");
1309 netif_dbg(adapter
, probe
, adapter
->netdev
,
1310 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1311 irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1313 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1318 static int ena_request_io_irq(struct ena_adapter
*adapter
)
1320 unsigned long flags
= 0;
1321 struct ena_irq
*irq
;
1324 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1325 irq
= &adapter
->irq_tbl
[i
];
1326 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1329 netif_err(adapter
, ifup
, adapter
->netdev
,
1330 "Failed to request I/O IRQ. index %d rc %d\n",
1335 netif_dbg(adapter
, ifup
, adapter
->netdev
,
1336 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1337 i
, irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1339 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1345 for (k
= ENA_IO_IRQ_FIRST_IDX
; k
< i
; k
++) {
1346 irq
= &adapter
->irq_tbl
[k
];
1347 free_irq(irq
->vector
, irq
->data
);
1353 static void ena_free_mgmnt_irq(struct ena_adapter
*adapter
)
1355 struct ena_irq
*irq
;
1357 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1358 synchronize_irq(irq
->vector
);
1359 irq_set_affinity_hint(irq
->vector
, NULL
);
1360 free_irq(irq
->vector
, irq
->data
);
1363 static void ena_free_io_irq(struct ena_adapter
*adapter
)
1365 struct ena_irq
*irq
;
1368 #ifdef CONFIG_RFS_ACCEL
1369 if (adapter
->msix_vecs
>= 1) {
1370 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
1371 adapter
->netdev
->rx_cpu_rmap
= NULL
;
1373 #endif /* CONFIG_RFS_ACCEL */
1375 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1376 irq
= &adapter
->irq_tbl
[i
];
1377 irq_set_affinity_hint(irq
->vector
, NULL
);
1378 free_irq(irq
->vector
, irq
->data
);
1382 static void ena_disable_io_intr_sync(struct ena_adapter
*adapter
)
1386 if (!netif_running(adapter
->netdev
))
1389 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++)
1390 synchronize_irq(adapter
->irq_tbl
[i
].vector
);
1393 static void ena_del_napi(struct ena_adapter
*adapter
)
1397 for (i
= 0; i
< adapter
->num_queues
; i
++)
1398 netif_napi_del(&adapter
->ena_napi
[i
].napi
);
1401 static void ena_init_napi(struct ena_adapter
*adapter
)
1403 struct ena_napi
*napi
;
1406 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1407 napi
= &adapter
->ena_napi
[i
];
1409 netif_napi_add(adapter
->netdev
,
1410 &adapter
->ena_napi
[i
].napi
,
1413 napi
->rx_ring
= &adapter
->rx_ring
[i
];
1414 napi
->tx_ring
= &adapter
->tx_ring
[i
];
1419 static void ena_napi_disable_all(struct ena_adapter
*adapter
)
1423 for (i
= 0; i
< adapter
->num_queues
; i
++)
1424 napi_disable(&adapter
->ena_napi
[i
].napi
);
1427 static void ena_napi_enable_all(struct ena_adapter
*adapter
)
1431 for (i
= 0; i
< adapter
->num_queues
; i
++)
1432 napi_enable(&adapter
->ena_napi
[i
].napi
);
1435 static void ena_restore_ethtool_params(struct ena_adapter
*adapter
)
1437 adapter
->tx_usecs
= 0;
1438 adapter
->rx_usecs
= 0;
1439 adapter
->tx_frames
= 1;
1440 adapter
->rx_frames
= 1;
1443 /* Configure the Rx forwarding */
1444 static int ena_rss_configure(struct ena_adapter
*adapter
)
1446 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1449 /* In case the RSS table wasn't initialized by probe */
1450 if (!ena_dev
->rss
.tbl_log_size
) {
1451 rc
= ena_rss_init_default(adapter
);
1452 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
1453 netif_err(adapter
, ifup
, adapter
->netdev
,
1454 "Failed to init RSS rc: %d\n", rc
);
1459 /* Set indirect table */
1460 rc
= ena_com_indirect_table_set(ena_dev
);
1461 if (unlikely(rc
&& rc
!= -EOPNOTSUPP
))
1464 /* Configure hash function (if supported) */
1465 rc
= ena_com_set_hash_function(ena_dev
);
1466 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
1469 /* Configure hash inputs (if supported) */
1470 rc
= ena_com_set_hash_ctrl(ena_dev
);
1471 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
1477 static int ena_up_complete(struct ena_adapter
*adapter
)
1481 rc
= ena_rss_configure(adapter
);
1485 ena_init_napi(adapter
);
1487 ena_change_mtu(adapter
->netdev
, adapter
->netdev
->mtu
);
1489 ena_refill_all_rx_bufs(adapter
);
1491 /* enable transmits */
1492 netif_tx_start_all_queues(adapter
->netdev
);
1494 ena_restore_ethtool_params(adapter
);
1496 ena_napi_enable_all(adapter
);
1498 /* Enable completion queues interrupt */
1499 for (i
= 0; i
< adapter
->num_queues
; i
++)
1500 ena_unmask_interrupt(&adapter
->tx_ring
[i
],
1501 &adapter
->rx_ring
[i
]);
1503 /* schedule napi in case we had pending packets
1504 * from the last time we disable napi
1506 for (i
= 0; i
< adapter
->num_queues
; i
++)
1507 napi_schedule(&adapter
->ena_napi
[i
].napi
);
1512 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
)
1514 struct ena_com_create_io_ctx ctx
= { 0 };
1515 struct ena_com_dev
*ena_dev
;
1516 struct ena_ring
*tx_ring
;
1521 ena_dev
= adapter
->ena_dev
;
1523 tx_ring
= &adapter
->tx_ring
[qid
];
1524 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1525 ena_qid
= ENA_IO_TXQ_IDX(qid
);
1527 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
1529 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
1530 ctx
.msix_vector
= msix_vector
;
1531 ctx
.queue_size
= adapter
->tx_ring_size
;
1532 ctx
.numa_node
= cpu_to_node(tx_ring
->cpu
);
1534 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1536 netif_err(adapter
, ifup
, adapter
->netdev
,
1537 "Failed to create I/O TX queue num %d rc: %d\n",
1542 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1543 &tx_ring
->ena_com_io_sq
,
1544 &tx_ring
->ena_com_io_cq
);
1546 netif_err(adapter
, ifup
, adapter
->netdev
,
1547 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1549 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1553 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1557 static int ena_create_all_io_tx_queues(struct ena_adapter
*adapter
)
1559 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1562 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1563 rc
= ena_create_io_tx_queue(adapter
, i
);
1572 ena_com_destroy_io_queue(ena_dev
, ENA_IO_TXQ_IDX(i
));
1577 static int ena_create_io_rx_queue(struct ena_adapter
*adapter
, int qid
)
1579 struct ena_com_dev
*ena_dev
;
1580 struct ena_com_create_io_ctx ctx
= { 0 };
1581 struct ena_ring
*rx_ring
;
1586 ena_dev
= adapter
->ena_dev
;
1588 rx_ring
= &adapter
->rx_ring
[qid
];
1589 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1590 ena_qid
= ENA_IO_RXQ_IDX(qid
);
1593 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
1594 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
1595 ctx
.msix_vector
= msix_vector
;
1596 ctx
.queue_size
= adapter
->rx_ring_size
;
1597 ctx
.numa_node
= cpu_to_node(rx_ring
->cpu
);
1599 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1601 netif_err(adapter
, ifup
, adapter
->netdev
,
1602 "Failed to create I/O RX queue num %d rc: %d\n",
1607 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1608 &rx_ring
->ena_com_io_sq
,
1609 &rx_ring
->ena_com_io_cq
);
1611 netif_err(adapter
, ifup
, adapter
->netdev
,
1612 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1614 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1618 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1623 static int ena_create_all_io_rx_queues(struct ena_adapter
*adapter
)
1625 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1628 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1629 rc
= ena_create_io_rx_queue(adapter
, i
);
1638 ena_com_destroy_io_queue(ena_dev
, ENA_IO_RXQ_IDX(i
));
1643 static int ena_up(struct ena_adapter
*adapter
)
1647 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
1649 ena_setup_io_intr(adapter
);
1651 rc
= ena_request_io_irq(adapter
);
1655 /* allocate transmit descriptors */
1656 rc
= ena_setup_all_tx_resources(adapter
);
1660 /* allocate receive descriptors */
1661 rc
= ena_setup_all_rx_resources(adapter
);
1665 /* Create TX queues */
1666 rc
= ena_create_all_io_tx_queues(adapter
);
1668 goto err_create_tx_queues
;
1670 /* Create RX queues */
1671 rc
= ena_create_all_io_rx_queues(adapter
);
1673 goto err_create_rx_queues
;
1675 rc
= ena_up_complete(adapter
);
1679 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
1680 netif_carrier_on(adapter
->netdev
);
1682 u64_stats_update_begin(&adapter
->syncp
);
1683 adapter
->dev_stats
.interface_up
++;
1684 u64_stats_update_end(&adapter
->syncp
);
1686 set_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1691 ena_destroy_all_rx_queues(adapter
);
1692 err_create_rx_queues
:
1693 ena_destroy_all_tx_queues(adapter
);
1694 err_create_tx_queues
:
1695 ena_free_all_io_rx_resources(adapter
);
1697 ena_free_all_io_tx_resources(adapter
);
1699 ena_free_io_irq(adapter
);
1705 static void ena_down(struct ena_adapter
*adapter
)
1707 netif_info(adapter
, ifdown
, adapter
->netdev
, "%s\n", __func__
);
1709 clear_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1711 u64_stats_update_begin(&adapter
->syncp
);
1712 adapter
->dev_stats
.interface_down
++;
1713 u64_stats_update_end(&adapter
->syncp
);
1715 netif_carrier_off(adapter
->netdev
);
1716 netif_tx_disable(adapter
->netdev
);
1718 /* After this point the napi handler won't enable the tx queue */
1719 ena_napi_disable_all(adapter
);
1721 /* After destroy the queue there won't be any new interrupts */
1723 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
)) {
1726 rc
= ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
1728 dev_err(&adapter
->pdev
->dev
, "Device reset failed\n");
1731 ena_destroy_all_io_queues(adapter
);
1733 ena_disable_io_intr_sync(adapter
);
1734 ena_free_io_irq(adapter
);
1735 ena_del_napi(adapter
);
1737 ena_free_all_tx_bufs(adapter
);
1738 ena_free_all_rx_bufs(adapter
);
1739 ena_free_all_io_tx_resources(adapter
);
1740 ena_free_all_io_rx_resources(adapter
);
1743 /* ena_open - Called when a network interface is made active
1744 * @netdev: network interface device structure
1746 * Returns 0 on success, negative value on failure
1748 * The open entry point is called when a network interface is made
1749 * active by the system (IFF_UP). At this point all resources needed
1750 * for transmit and receive operations are allocated, the interrupt
1751 * handler is registered with the OS, the watchdog timer is started,
1752 * and the stack is notified that the interface is ready.
1754 static int ena_open(struct net_device
*netdev
)
1756 struct ena_adapter
*adapter
= netdev_priv(netdev
);
1759 /* Notify the stack of the actual queue counts. */
1760 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->num_queues
);
1762 netif_err(adapter
, ifup
, netdev
, "Can't set num tx queues\n");
1766 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->num_queues
);
1768 netif_err(adapter
, ifup
, netdev
, "Can't set num rx queues\n");
1772 rc
= ena_up(adapter
);
1779 /* ena_close - Disables a network interface
1780 * @netdev: network interface device structure
1782 * Returns 0, this is not allowed to fail
1784 * The close entry point is called when an interface is de-activated
1785 * by the OS. The hardware is still under the drivers control, but
1786 * needs to be disabled. A global MAC reset is issued to stop the
1787 * hardware, and all transmit and receive resources are freed.
1789 static int ena_close(struct net_device
*netdev
)
1791 struct ena_adapter
*adapter
= netdev_priv(netdev
);
1793 netif_dbg(adapter
, ifdown
, netdev
, "%s\n", __func__
);
1795 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
1801 static void ena_tx_csum(struct ena_com_tx_ctx
*ena_tx_ctx
, struct sk_buff
*skb
)
1803 u32 mss
= skb_shinfo(skb
)->gso_size
;
1804 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
1807 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) || mss
) {
1808 ena_tx_ctx
->l4_csum_enable
= 1;
1810 ena_tx_ctx
->tso_enable
= 1;
1811 ena_meta
->l4_hdr_len
= tcp_hdr(skb
)->doff
;
1812 ena_tx_ctx
->l4_csum_partial
= 0;
1814 ena_tx_ctx
->tso_enable
= 0;
1815 ena_meta
->l4_hdr_len
= 0;
1816 ena_tx_ctx
->l4_csum_partial
= 1;
1819 switch (ip_hdr(skb
)->version
) {
1821 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
1822 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
))
1825 ena_tx_ctx
->l3_csum_enable
= 1;
1826 l4_protocol
= ip_hdr(skb
)->protocol
;
1829 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
1830 l4_protocol
= ipv6_hdr(skb
)->nexthdr
;
1836 if (l4_protocol
== IPPROTO_TCP
)
1837 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
1839 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
1841 ena_meta
->mss
= mss
;
1842 ena_meta
->l3_hdr_len
= skb_network_header_len(skb
);
1843 ena_meta
->l3_hdr_offset
= skb_network_offset(skb
);
1844 ena_tx_ctx
->meta_valid
= 1;
1847 ena_tx_ctx
->meta_valid
= 0;
1851 static int ena_check_and_linearize_skb(struct ena_ring
*tx_ring
,
1852 struct sk_buff
*skb
)
1854 int num_frags
, header_len
, rc
;
1856 num_frags
= skb_shinfo(skb
)->nr_frags
;
1857 header_len
= skb_headlen(skb
);
1859 if (num_frags
< tx_ring
->sgl_size
)
1862 if ((num_frags
== tx_ring
->sgl_size
) &&
1863 (header_len
< tx_ring
->tx_max_header_size
))
1866 u64_stats_update_begin(&tx_ring
->syncp
);
1867 tx_ring
->tx_stats
.linearize
++;
1868 u64_stats_update_end(&tx_ring
->syncp
);
1870 rc
= skb_linearize(skb
);
1872 u64_stats_update_begin(&tx_ring
->syncp
);
1873 tx_ring
->tx_stats
.linearize_failed
++;
1874 u64_stats_update_end(&tx_ring
->syncp
);
1880 /* Called with netif_tx_lock. */
1881 static netdev_tx_t
ena_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1883 struct ena_adapter
*adapter
= netdev_priv(dev
);
1884 struct ena_tx_buffer
*tx_info
;
1885 struct ena_com_tx_ctx ena_tx_ctx
;
1886 struct ena_ring
*tx_ring
;
1887 struct netdev_queue
*txq
;
1888 struct ena_com_buf
*ena_buf
;
1896 int qid
, rc
, nb_hw_desc
;
1899 netif_dbg(adapter
, tx_queued
, dev
, "%s skb %p\n", __func__
, skb
);
1900 /* Determine which tx ring we will be placed on */
1901 qid
= skb_get_queue_mapping(skb
);
1902 tx_ring
= &adapter
->tx_ring
[qid
];
1903 txq
= netdev_get_tx_queue(dev
, qid
);
1905 rc
= ena_check_and_linearize_skb(tx_ring
, skb
);
1907 goto error_drop_packet
;
1909 skb_tx_timestamp(skb
);
1910 len
= skb_headlen(skb
);
1912 next_to_use
= tx_ring
->next_to_use
;
1913 req_id
= tx_ring
->free_tx_ids
[next_to_use
];
1914 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1915 tx_info
->num_of_bufs
= 0;
1917 WARN(tx_info
->skb
, "SKB isn't NULL req_id %d\n", req_id
);
1918 ena_buf
= tx_info
->bufs
;
1921 if (tx_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1922 /* prepared the push buffer */
1923 push_len
= min_t(u32
, len
, tx_ring
->tx_max_header_size
);
1924 header_len
= push_len
;
1925 push_hdr
= skb
->data
;
1928 header_len
= min_t(u32
, len
, tx_ring
->tx_max_header_size
);
1932 netif_dbg(adapter
, tx_queued
, dev
,
1933 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb
,
1934 push_hdr
, push_len
);
1936 if (len
> push_len
) {
1937 dma
= dma_map_single(tx_ring
->dev
, skb
->data
+ push_len
,
1938 len
- push_len
, DMA_TO_DEVICE
);
1939 if (dma_mapping_error(tx_ring
->dev
, dma
))
1940 goto error_report_dma_error
;
1942 ena_buf
->paddr
= dma
;
1943 ena_buf
->len
= len
- push_len
;
1946 tx_info
->num_of_bufs
++;
1949 last_frag
= skb_shinfo(skb
)->nr_frags
;
1951 for (i
= 0; i
< last_frag
; i
++) {
1952 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1954 len
= skb_frag_size(frag
);
1955 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, len
,
1957 if (dma_mapping_error(tx_ring
->dev
, dma
))
1958 goto error_report_dma_error
;
1960 ena_buf
->paddr
= dma
;
1965 tx_info
->num_of_bufs
+= last_frag
;
1967 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
1968 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
1969 ena_tx_ctx
.push_header
= push_hdr
;
1970 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
1971 ena_tx_ctx
.req_id
= req_id
;
1972 ena_tx_ctx
.header_len
= header_len
;
1974 /* set flags and meta data */
1975 ena_tx_csum(&ena_tx_ctx
, skb
);
1977 /* prepare the packet's descriptors to dma engine */
1978 rc
= ena_com_prepare_tx(tx_ring
->ena_com_io_sq
, &ena_tx_ctx
,
1982 netif_err(adapter
, tx_queued
, dev
,
1983 "failed to prepare tx bufs\n");
1984 u64_stats_update_begin(&tx_ring
->syncp
);
1985 tx_ring
->tx_stats
.queue_stop
++;
1986 tx_ring
->tx_stats
.prepare_ctx_err
++;
1987 u64_stats_update_end(&tx_ring
->syncp
);
1988 netif_tx_stop_queue(txq
);
1989 goto error_unmap_dma
;
1992 netdev_tx_sent_queue(txq
, skb
->len
);
1994 u64_stats_update_begin(&tx_ring
->syncp
);
1995 tx_ring
->tx_stats
.cnt
++;
1996 tx_ring
->tx_stats
.bytes
+= skb
->len
;
1997 u64_stats_update_end(&tx_ring
->syncp
);
1999 tx_info
->tx_descs
= nb_hw_desc
;
2000 tx_info
->last_jiffies
= jiffies
;
2001 tx_info
->print_once
= 0;
2003 tx_ring
->next_to_use
= ENA_TX_RING_IDX_NEXT(next_to_use
,
2004 tx_ring
->ring_size
);
2006 /* This WMB is aimed to:
2007 * 1 - perform smp barrier before reading next_to_completion
2008 * 2 - make sure the desc were written before trigger DB
2012 /* stop the queue when no more space available, the packet can have up
2013 * to sgl_size + 2. one for the meta descriptor and one for header
2014 * (if the header is larger than tx_max_header_size).
2016 if (unlikely(ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) <
2017 (tx_ring
->sgl_size
+ 2))) {
2018 netif_dbg(adapter
, tx_queued
, dev
, "%s stop queue %d\n",
2021 netif_tx_stop_queue(txq
);
2022 u64_stats_update_begin(&tx_ring
->syncp
);
2023 tx_ring
->tx_stats
.queue_stop
++;
2024 u64_stats_update_end(&tx_ring
->syncp
);
2026 /* There is a rare condition where this function decide to
2027 * stop the queue but meanwhile clean_tx_irq updates
2028 * next_to_completion and terminates.
2029 * The queue will remain stopped forever.
2030 * To solve this issue this function perform rmb, check
2031 * the wakeup condition and wake up the queue if needed.
2035 if (ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
)
2036 > ENA_TX_WAKEUP_THRESH
) {
2037 netif_tx_wake_queue(txq
);
2038 u64_stats_update_begin(&tx_ring
->syncp
);
2039 tx_ring
->tx_stats
.queue_wakeup
++;
2040 u64_stats_update_end(&tx_ring
->syncp
);
2044 if (netif_xmit_stopped(txq
) || !skb
->xmit_more
) {
2045 /* trigger the dma engine */
2046 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
2047 u64_stats_update_begin(&tx_ring
->syncp
);
2048 tx_ring
->tx_stats
.doorbells
++;
2049 u64_stats_update_end(&tx_ring
->syncp
);
2052 return NETDEV_TX_OK
;
2054 error_report_dma_error
:
2055 u64_stats_update_begin(&tx_ring
->syncp
);
2056 tx_ring
->tx_stats
.dma_mapping_err
++;
2057 u64_stats_update_end(&tx_ring
->syncp
);
2058 netdev_warn(adapter
->netdev
, "failed to map skb\n");
2060 tx_info
->skb
= NULL
;
2064 /* save value of frag that failed */
2067 /* start back at beginning and unmap skb */
2068 tx_info
->skb
= NULL
;
2069 ena_buf
= tx_info
->bufs
;
2070 dma_unmap_single(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
2071 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
2073 /* unmap remaining mapped pages */
2074 for (i
= 0; i
< last_frag
; i
++) {
2076 dma_unmap_page(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
2077 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
2084 return NETDEV_TX_OK
;
2087 #ifdef CONFIG_NET_POLL_CONTROLLER
2088 static void ena_netpoll(struct net_device
*netdev
)
2090 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2093 /* Dont schedule NAPI if the driver is in the middle of reset
2094 * or netdev is down.
2097 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
) ||
2098 test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
2101 for (i
= 0; i
< adapter
->num_queues
; i
++)
2102 napi_schedule(&adapter
->ena_napi
[i
].napi
);
2104 #endif /* CONFIG_NET_POLL_CONTROLLER */
2106 static u16
ena_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
2107 void *accel_priv
, select_queue_fallback_t fallback
)
2110 /* we suspect that this is good for in--kernel network services that
2111 * want to loop incoming skb rx to tx in normal user generated traffic,
2112 * most probably we will not get to this
2114 if (skb_rx_queue_recorded(skb
))
2115 qid
= skb_get_rx_queue(skb
);
2117 qid
= fallback(dev
, skb
);
2122 static void ena_config_host_info(struct ena_com_dev
*ena_dev
)
2124 struct ena_admin_host_info
*host_info
;
2127 /* Allocate only the host info */
2128 rc
= ena_com_allocate_host_info(ena_dev
);
2130 pr_err("Cannot allocate host info\n");
2134 host_info
= ena_dev
->host_attr
.host_info
;
2136 host_info
->os_type
= ENA_ADMIN_OS_LINUX
;
2137 host_info
->kernel_ver
= LINUX_VERSION_CODE
;
2138 strncpy(host_info
->kernel_ver_str
, utsname()->version
,
2139 sizeof(host_info
->kernel_ver_str
) - 1);
2140 host_info
->os_dist
= 0;
2141 strncpy(host_info
->os_dist_str
, utsname()->release
,
2142 sizeof(host_info
->os_dist_str
) - 1);
2143 host_info
->driver_version
=
2144 (DRV_MODULE_VER_MAJOR
) |
2145 (DRV_MODULE_VER_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
2146 (DRV_MODULE_VER_SUBMINOR
<< ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
);
2148 rc
= ena_com_set_host_attributes(ena_dev
);
2150 if (rc
== -EOPNOTSUPP
)
2151 pr_warn("Cannot set host attributes\n");
2153 pr_err("Cannot set host attributes\n");
2161 ena_com_delete_host_info(ena_dev
);
2164 static void ena_config_debug_area(struct ena_adapter
*adapter
)
2166 u32 debug_area_size
;
2169 ss_count
= ena_get_sset_count(adapter
->netdev
, ETH_SS_STATS
);
2170 if (ss_count
<= 0) {
2171 netif_err(adapter
, drv
, adapter
->netdev
,
2172 "SS count is negative\n");
2176 /* allocate 32 bytes for each string and 64bit for the value */
2177 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
2179 rc
= ena_com_allocate_debug_area(adapter
->ena_dev
, debug_area_size
);
2181 pr_err("Cannot allocate debug area\n");
2185 rc
= ena_com_set_host_attributes(adapter
->ena_dev
);
2187 if (rc
== -EOPNOTSUPP
)
2188 netif_warn(adapter
, drv
, adapter
->netdev
,
2189 "Cannot set host attributes\n");
2191 netif_err(adapter
, drv
, adapter
->netdev
,
2192 "Cannot set host attributes\n");
2198 ena_com_delete_debug_area(adapter
->ena_dev
);
2201 static struct rtnl_link_stats64
*ena_get_stats64(struct net_device
*netdev
,
2202 struct rtnl_link_stats64
*stats
)
2204 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2205 struct ena_ring
*rx_ring
, *tx_ring
;
2210 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2213 for (i
= 0; i
< adapter
->num_queues
; i
++) {
2216 tx_ring
= &adapter
->tx_ring
[i
];
2219 start
= u64_stats_fetch_begin_irq(&tx_ring
->syncp
);
2220 packets
= tx_ring
->tx_stats
.cnt
;
2221 bytes
= tx_ring
->tx_stats
.bytes
;
2222 } while (u64_stats_fetch_retry_irq(&tx_ring
->syncp
, start
));
2224 stats
->tx_packets
+= packets
;
2225 stats
->tx_bytes
+= bytes
;
2227 rx_ring
= &adapter
->rx_ring
[i
];
2230 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
2231 packets
= rx_ring
->rx_stats
.cnt
;
2232 bytes
= rx_ring
->rx_stats
.bytes
;
2233 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
2235 stats
->rx_packets
+= packets
;
2236 stats
->rx_bytes
+= bytes
;
2240 start
= u64_stats_fetch_begin_irq(&adapter
->syncp
);
2241 rx_drops
= adapter
->dev_stats
.rx_drops
;
2242 } while (u64_stats_fetch_retry_irq(&adapter
->syncp
, start
));
2244 stats
->rx_dropped
= rx_drops
;
2246 stats
->multicast
= 0;
2247 stats
->collisions
= 0;
2249 stats
->rx_length_errors
= 0;
2250 stats
->rx_crc_errors
= 0;
2251 stats
->rx_frame_errors
= 0;
2252 stats
->rx_fifo_errors
= 0;
2253 stats
->rx_missed_errors
= 0;
2254 stats
->tx_window_errors
= 0;
2256 stats
->rx_errors
= 0;
2257 stats
->tx_errors
= 0;
2262 static const struct net_device_ops ena_netdev_ops
= {
2263 .ndo_open
= ena_open
,
2264 .ndo_stop
= ena_close
,
2265 .ndo_start_xmit
= ena_start_xmit
,
2266 .ndo_select_queue
= ena_select_queue
,
2267 .ndo_get_stats64
= ena_get_stats64
,
2268 .ndo_tx_timeout
= ena_tx_timeout
,
2269 .ndo_change_mtu
= ena_change_mtu
,
2270 .ndo_set_mac_address
= NULL
,
2271 .ndo_validate_addr
= eth_validate_addr
,
2272 #ifdef CONFIG_NET_POLL_CONTROLLER
2273 .ndo_poll_controller
= ena_netpoll
,
2274 #endif /* CONFIG_NET_POLL_CONTROLLER */
2277 static void ena_device_io_suspend(struct work_struct
*work
)
2279 struct ena_adapter
*adapter
=
2280 container_of(work
, struct ena_adapter
, suspend_io_task
);
2281 struct net_device
*netdev
= adapter
->netdev
;
2283 /* ena_napi_disable_all disables only the IO handling.
2284 * We are still subject to AENQ keep alive watchdog.
2286 u64_stats_update_begin(&adapter
->syncp
);
2287 adapter
->dev_stats
.io_suspend
++;
2288 u64_stats_update_begin(&adapter
->syncp
);
2289 ena_napi_disable_all(adapter
);
2290 netif_tx_lock(netdev
);
2291 netif_device_detach(netdev
);
2292 netif_tx_unlock(netdev
);
2295 static void ena_device_io_resume(struct work_struct
*work
)
2297 struct ena_adapter
*adapter
=
2298 container_of(work
, struct ena_adapter
, resume_io_task
);
2299 struct net_device
*netdev
= adapter
->netdev
;
2301 u64_stats_update_begin(&adapter
->syncp
);
2302 adapter
->dev_stats
.io_resume
++;
2303 u64_stats_update_end(&adapter
->syncp
);
2305 netif_device_attach(netdev
);
2306 ena_napi_enable_all(adapter
);
2309 static int ena_device_validate_params(struct ena_adapter
*adapter
,
2310 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2312 struct net_device
*netdev
= adapter
->netdev
;
2315 rc
= ether_addr_equal(get_feat_ctx
->dev_attr
.mac_addr
,
2318 netif_err(adapter
, drv
, netdev
,
2319 "Error, mac address are different\n");
2323 if ((get_feat_ctx
->max_queues
.max_cq_num
< adapter
->num_queues
) ||
2324 (get_feat_ctx
->max_queues
.max_sq_num
< adapter
->num_queues
)) {
2325 netif_err(adapter
, drv
, netdev
,
2326 "Error, device doesn't support enough queues\n");
2330 if (get_feat_ctx
->dev_attr
.max_mtu
< netdev
->mtu
) {
2331 netif_err(adapter
, drv
, netdev
,
2332 "Error, device max mtu is smaller than netdev MTU\n");
2339 static int ena_device_init(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
,
2340 struct ena_com_dev_get_features_ctx
*get_feat_ctx
,
2343 struct device
*dev
= &pdev
->dev
;
2344 bool readless_supported
;
2349 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
2351 dev_err(dev
, "failed to init mmio read less\n");
2355 /* The PCIe configuration space revision id indicate if mmio reg
2358 readless_supported
= !(pdev
->revision
& ENA_MMIO_DISABLE_REG_READ
);
2359 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
2361 rc
= ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_NORMAL
);
2363 dev_err(dev
, "Can not reset device\n");
2364 goto err_mmio_read_less
;
2367 rc
= ena_com_validate_version(ena_dev
);
2369 dev_err(dev
, "device version is too low\n");
2370 goto err_mmio_read_less
;
2373 dma_width
= ena_com_get_dma_width(ena_dev
);
2374 if (dma_width
< 0) {
2375 dev_err(dev
, "Invalid dma width value %d", dma_width
);
2377 goto err_mmio_read_less
;
2380 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2382 dev_err(dev
, "pci_set_dma_mask failed 0x%x\n", rc
);
2383 goto err_mmio_read_less
;
2386 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2388 dev_err(dev
, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2390 goto err_mmio_read_less
;
2393 /* ENA admin level init */
2394 rc
= ena_com_admin_init(ena_dev
, &aenq_handlers
, true);
2397 "Can not initialize ena admin queue with device\n");
2398 goto err_mmio_read_less
;
2401 /* To enable the msix interrupts the driver needs to know the number
2402 * of queues. So the driver uses polling mode to retrieve this
2405 ena_com_set_admin_polling_mode(ena_dev
, true);
2407 ena_config_host_info(ena_dev
);
2409 /* Get Device Attributes*/
2410 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
2412 dev_err(dev
, "Cannot get attribute for ena device rc=%d\n", rc
);
2413 goto err_admin_init
;
2416 /* Try to turn all the available aenq groups */
2417 aenq_groups
= BIT(ENA_ADMIN_LINK_CHANGE
) |
2418 BIT(ENA_ADMIN_FATAL_ERROR
) |
2419 BIT(ENA_ADMIN_WARNING
) |
2420 BIT(ENA_ADMIN_NOTIFICATION
) |
2421 BIT(ENA_ADMIN_KEEP_ALIVE
);
2423 aenq_groups
&= get_feat_ctx
->aenq
.supported_groups
;
2425 rc
= ena_com_set_aenq_config(ena_dev
, aenq_groups
);
2427 dev_err(dev
, "Cannot configure aenq groups rc= %d\n", rc
);
2428 goto err_admin_init
;
2431 *wd_state
= !!(aenq_groups
& BIT(ENA_ADMIN_KEEP_ALIVE
));
2436 ena_com_delete_host_info(ena_dev
);
2437 ena_com_admin_destroy(ena_dev
);
2439 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2444 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter
*adapter
,
2447 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2448 struct device
*dev
= &adapter
->pdev
->dev
;
2451 rc
= ena_enable_msix(adapter
, io_vectors
);
2453 dev_err(dev
, "Can not reserve msix vectors\n");
2457 ena_setup_mgmnt_intr(adapter
);
2459 rc
= ena_request_mgmnt_irq(adapter
);
2461 dev_err(dev
, "Can not setup management interrupts\n");
2462 goto err_disable_msix
;
2465 ena_com_set_admin_polling_mode(ena_dev
, false);
2467 ena_com_admin_aenq_enable(ena_dev
);
2472 pci_free_irq_vectors(adapter
->pdev
);
2476 static void ena_fw_reset_device(struct work_struct
*work
)
2478 struct ena_com_dev_get_features_ctx get_feat_ctx
;
2479 struct ena_adapter
*adapter
=
2480 container_of(work
, struct ena_adapter
, reset_task
);
2481 struct net_device
*netdev
= adapter
->netdev
;
2482 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2483 struct pci_dev
*pdev
= adapter
->pdev
;
2484 bool dev_up
, wd_state
;
2487 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2489 "device reset schedule while reset bit is off\n");
2493 netif_carrier_off(netdev
);
2495 del_timer_sync(&adapter
->timer_service
);
2499 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2500 ena_com_set_admin_running_state(ena_dev
, false);
2502 /* After calling ena_close the tx queues and the napi
2503 * are disabled so no one can interfere or touch the
2508 ena_free_mgmnt_irq(adapter
);
2510 pci_free_irq_vectors(adapter
->pdev
);
2512 ena_com_abort_admin_commands(ena_dev
);
2514 ena_com_wait_for_abort_completion(ena_dev
);
2516 ena_com_admin_destroy(ena_dev
);
2518 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2520 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
2521 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2523 /* Finish with the destroy part. Start the init part */
2525 rc
= ena_device_init(ena_dev
, adapter
->pdev
, &get_feat_ctx
, &wd_state
);
2527 dev_err(&pdev
->dev
, "Can not initialize device\n");
2530 adapter
->wd_state
= wd_state
;
2532 rc
= ena_device_validate_params(adapter
, &get_feat_ctx
);
2534 dev_err(&pdev
->dev
, "Validation of device parameters failed\n");
2535 goto err_device_destroy
;
2538 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
,
2539 adapter
->num_queues
);
2541 dev_err(&pdev
->dev
, "Enable MSI-X failed\n");
2542 goto err_device_destroy
;
2544 /* If the interface was up before the reset bring it up */
2546 rc
= ena_up(adapter
);
2548 dev_err(&pdev
->dev
, "Failed to create I/O queues\n");
2549 goto err_disable_msix
;
2553 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
2557 dev_err(&pdev
->dev
, "Device reset completed successfully\n");
2561 ena_free_mgmnt_irq(adapter
);
2562 pci_free_irq_vectors(adapter
->pdev
);
2564 ena_com_admin_destroy(ena_dev
);
2568 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
2571 "Reset attempt failed. Can not reset the device\n");
2574 static int check_missing_comp_in_queue(struct ena_adapter
*adapter
,
2575 struct ena_ring
*tx_ring
)
2577 struct ena_tx_buffer
*tx_buf
;
2578 unsigned long last_jiffies
;
2582 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
2583 tx_buf
= &tx_ring
->tx_buffer_info
[i
];
2584 last_jiffies
= tx_buf
->last_jiffies
;
2585 if (unlikely(last_jiffies
&&
2586 time_is_before_jiffies(last_jiffies
+ adapter
->missing_tx_completion_to
))) {
2587 if (!tx_buf
->print_once
)
2588 netif_notice(adapter
, tx_err
, adapter
->netdev
,
2589 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2592 tx_buf
->print_once
= 1;
2595 if (unlikely(missed_tx
> adapter
->missing_tx_completion_threshold
)) {
2596 netif_err(adapter
, tx_err
, adapter
->netdev
,
2597 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2599 adapter
->missing_tx_completion_threshold
);
2600 adapter
->reset_reason
=
2601 ENA_REGS_RESET_MISS_TX_CMPL
;
2602 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2611 static void check_for_missing_tx_completions(struct ena_adapter
*adapter
)
2613 struct ena_ring
*tx_ring
;
2616 /* Make sure the driver doesn't turn the device in other process */
2619 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2622 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
2625 if (adapter
->missing_tx_completion_to
== ENA_HW_HINTS_NO_TIMEOUT
)
2628 budget
= ENA_MONITORED_TX_QUEUES
;
2630 for (i
= adapter
->last_monitored_tx_qid
; i
< adapter
->num_queues
; i
++) {
2631 tx_ring
= &adapter
->tx_ring
[i
];
2633 rc
= check_missing_comp_in_queue(adapter
, tx_ring
);
2642 adapter
->last_monitored_tx_qid
= i
% adapter
->num_queues
;
2645 /* trigger napi schedule after 2 consecutive detections */
2646 #define EMPTY_RX_REFILL 2
2647 /* For the rare case where the device runs out of Rx descriptors and the
2648 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2650 * This case will lead to a deadlock:
2651 * The device won't send interrupts since all the new Rx packets will be dropped
2652 * The napi handler won't allocate new Rx descriptors so the device will be
2653 * able to send new packets.
2655 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2656 * It is recommended to have at least 512MB, with a minimum of 128MB for
2657 * constrained environment).
2659 * When such a situation is detected - Reschedule napi
2661 static void check_for_empty_rx_ring(struct ena_adapter
*adapter
)
2663 struct ena_ring
*rx_ring
;
2664 int i
, refill_required
;
2666 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2669 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
2672 for (i
= 0; i
< adapter
->num_queues
; i
++) {
2673 rx_ring
= &adapter
->rx_ring
[i
];
2676 ena_com_sq_empty_space(rx_ring
->ena_com_io_sq
);
2677 if (unlikely(refill_required
== (rx_ring
->ring_size
- 1))) {
2678 rx_ring
->empty_rx_queue
++;
2680 if (rx_ring
->empty_rx_queue
>= EMPTY_RX_REFILL
) {
2681 u64_stats_update_begin(&rx_ring
->syncp
);
2682 rx_ring
->rx_stats
.empty_rx_ring
++;
2683 u64_stats_update_end(&rx_ring
->syncp
);
2685 netif_err(adapter
, drv
, adapter
->netdev
,
2686 "trigger refill for ring %d\n", i
);
2688 napi_schedule(rx_ring
->napi
);
2689 rx_ring
->empty_rx_queue
= 0;
2692 rx_ring
->empty_rx_queue
= 0;
2697 /* Check for keep alive expiration */
2698 static void check_for_missing_keep_alive(struct ena_adapter
*adapter
)
2700 unsigned long keep_alive_expired
;
2702 if (!adapter
->wd_state
)
2705 if (adapter
->keep_alive_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
2708 keep_alive_expired
= round_jiffies(adapter
->last_keep_alive_jiffies
+
2709 adapter
->keep_alive_timeout
);
2710 if (unlikely(time_is_before_jiffies(keep_alive_expired
))) {
2711 netif_err(adapter
, drv
, adapter
->netdev
,
2712 "Keep alive watchdog timeout.\n");
2713 u64_stats_update_begin(&adapter
->syncp
);
2714 adapter
->dev_stats
.wd_expired
++;
2715 u64_stats_update_end(&adapter
->syncp
);
2716 adapter
->reset_reason
= ENA_REGS_RESET_KEEP_ALIVE_TO
;
2717 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2721 static void check_for_admin_com_state(struct ena_adapter
*adapter
)
2723 if (unlikely(!ena_com_get_admin_running_state(adapter
->ena_dev
))) {
2724 netif_err(adapter
, drv
, adapter
->netdev
,
2725 "ENA admin queue is not in running state!\n");
2726 u64_stats_update_begin(&adapter
->syncp
);
2727 adapter
->dev_stats
.admin_q_pause
++;
2728 u64_stats_update_end(&adapter
->syncp
);
2729 adapter
->reset_reason
= ENA_REGS_RESET_ADMIN_TO
;
2730 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2734 static void ena_update_hints(struct ena_adapter
*adapter
,
2735 struct ena_admin_ena_hw_hints
*hints
)
2737 struct net_device
*netdev
= adapter
->netdev
;
2739 if (hints
->admin_completion_tx_timeout
)
2740 adapter
->ena_dev
->admin_queue
.completion_timeout
=
2741 hints
->admin_completion_tx_timeout
* 1000;
2743 if (hints
->mmio_read_timeout
)
2744 /* convert to usec */
2745 adapter
->ena_dev
->mmio_read
.reg_read_to
=
2746 hints
->mmio_read_timeout
* 1000;
2748 if (hints
->missed_tx_completion_count_threshold_to_reset
)
2749 adapter
->missing_tx_completion_threshold
=
2750 hints
->missed_tx_completion_count_threshold_to_reset
;
2752 if (hints
->missing_tx_completion_timeout
) {
2753 if (hints
->missing_tx_completion_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
2754 adapter
->missing_tx_completion_to
= ENA_HW_HINTS_NO_TIMEOUT
;
2756 adapter
->missing_tx_completion_to
=
2757 msecs_to_jiffies(hints
->missing_tx_completion_timeout
);
2760 if (hints
->netdev_wd_timeout
)
2761 netdev
->watchdog_timeo
= msecs_to_jiffies(hints
->netdev_wd_timeout
);
2763 if (hints
->driver_watchdog_timeout
) {
2764 if (hints
->driver_watchdog_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
2765 adapter
->keep_alive_timeout
= ENA_HW_HINTS_NO_TIMEOUT
;
2767 adapter
->keep_alive_timeout
=
2768 msecs_to_jiffies(hints
->driver_watchdog_timeout
);
2772 static void ena_update_host_info(struct ena_admin_host_info
*host_info
,
2773 struct net_device
*netdev
)
2775 host_info
->supported_network_features
[0] =
2776 netdev
->features
& GENMASK_ULL(31, 0);
2777 host_info
->supported_network_features
[1] =
2778 (netdev
->features
& GENMASK_ULL(63, 32)) >> 32;
2781 static void ena_timer_service(unsigned long data
)
2783 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
2784 u8
*debug_area
= adapter
->ena_dev
->host_attr
.debug_area_virt_addr
;
2785 struct ena_admin_host_info
*host_info
=
2786 adapter
->ena_dev
->host_attr
.host_info
;
2788 check_for_missing_keep_alive(adapter
);
2790 check_for_admin_com_state(adapter
);
2792 check_for_missing_tx_completions(adapter
);
2794 check_for_empty_rx_ring(adapter
);
2797 ena_dump_stats_to_buf(adapter
, debug_area
);
2800 ena_update_host_info(host_info
, adapter
->netdev
);
2802 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2803 netif_err(adapter
, drv
, adapter
->netdev
,
2804 "Trigger reset is on\n");
2805 ena_dump_stats_to_dmesg(adapter
);
2806 queue_work(ena_wq
, &adapter
->reset_task
);
2810 /* Reset the timer */
2811 mod_timer(&adapter
->timer_service
, jiffies
+ HZ
);
2814 static int ena_calc_io_queue_num(struct pci_dev
*pdev
,
2815 struct ena_com_dev
*ena_dev
,
2816 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2818 int io_sq_num
, io_queue_num
;
2820 /* In case of LLQ use the llq number in the get feature cmd */
2821 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2822 io_sq_num
= get_feat_ctx
->max_queues
.max_llq_num
;
2824 if (io_sq_num
== 0) {
2826 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2828 ena_dev
->tx_mem_queue_type
=
2829 ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2830 io_sq_num
= get_feat_ctx
->max_queues
.max_sq_num
;
2833 io_sq_num
= get_feat_ctx
->max_queues
.max_sq_num
;
2836 io_queue_num
= min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES
);
2837 io_queue_num
= min_t(int, io_queue_num
, io_sq_num
);
2838 io_queue_num
= min_t(int, io_queue_num
,
2839 get_feat_ctx
->max_queues
.max_cq_num
);
2840 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2841 io_queue_num
= min_t(int, io_queue_num
, pci_msix_vec_count(pdev
) - 1);
2842 if (unlikely(!io_queue_num
)) {
2843 dev_err(&pdev
->dev
, "The device doesn't have io queues\n");
2847 return io_queue_num
;
2850 static void ena_set_push_mode(struct pci_dev
*pdev
, struct ena_com_dev
*ena_dev
,
2851 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2855 has_mem_bar
= pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(ENA_MEM_BAR
);
2857 /* Enable push mode if device supports LLQ */
2858 if (has_mem_bar
&& (get_feat_ctx
->max_queues
.max_llq_num
> 0))
2859 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_DEV
;
2861 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2864 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx
*feat
,
2865 struct net_device
*netdev
)
2867 netdev_features_t dev_features
= 0;
2869 /* Set offload features */
2870 if (feat
->offload
.tx
&
2871 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
2872 dev_features
|= NETIF_F_IP_CSUM
;
2874 if (feat
->offload
.tx
&
2875 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK
)
2876 dev_features
|= NETIF_F_IPV6_CSUM
;
2878 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
2879 dev_features
|= NETIF_F_TSO
;
2881 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK
)
2882 dev_features
|= NETIF_F_TSO6
;
2884 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK
)
2885 dev_features
|= NETIF_F_TSO_ECN
;
2887 if (feat
->offload
.rx_supported
&
2888 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
2889 dev_features
|= NETIF_F_RXCSUM
;
2891 if (feat
->offload
.rx_supported
&
2892 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK
)
2893 dev_features
|= NETIF_F_RXCSUM
;
2901 netdev
->hw_features
|= netdev
->features
;
2902 netdev
->vlan_features
|= netdev
->features
;
2905 static void ena_set_conf_feat_params(struct ena_adapter
*adapter
,
2906 struct ena_com_dev_get_features_ctx
*feat
)
2908 struct net_device
*netdev
= adapter
->netdev
;
2910 /* Copy mac address */
2911 if (!is_valid_ether_addr(feat
->dev_attr
.mac_addr
)) {
2912 eth_hw_addr_random(netdev
);
2913 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
2915 ether_addr_copy(adapter
->mac_addr
, feat
->dev_attr
.mac_addr
);
2916 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
2919 /* Set offload features */
2920 ena_set_dev_offloads(feat
, netdev
);
2922 adapter
->max_mtu
= feat
->dev_attr
.max_mtu
;
2923 netdev
->max_mtu
= adapter
->max_mtu
;
2924 netdev
->min_mtu
= ENA_MIN_MTU
;
2927 static int ena_rss_init_default(struct ena_adapter
*adapter
)
2929 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2930 struct device
*dev
= &adapter
->pdev
->dev
;
2934 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
2936 dev_err(dev
, "Cannot init indirect table\n");
2940 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
2941 val
= ethtool_rxfh_indir_default(i
, adapter
->num_queues
);
2942 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
2943 ENA_IO_RXQ_IDX(val
));
2944 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
2945 dev_err(dev
, "Cannot fill indirect table\n");
2946 goto err_fill_indir
;
2950 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_CRC32
, NULL
,
2951 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
2952 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
2953 dev_err(dev
, "Cannot fill hash function\n");
2954 goto err_fill_indir
;
2957 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
2958 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
2959 dev_err(dev
, "Cannot fill hash control\n");
2960 goto err_fill_indir
;
2966 ena_com_rss_destroy(ena_dev
);
2972 static void ena_release_bars(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
2976 if (ena_dev
->mem_bar
)
2977 devm_iounmap(&pdev
->dev
, ena_dev
->mem_bar
);
2979 devm_iounmap(&pdev
->dev
, ena_dev
->reg_bar
);
2981 release_bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
2982 pci_release_selected_regions(pdev
, release_bars
);
2985 static int ena_calc_queue_size(struct pci_dev
*pdev
,
2986 struct ena_com_dev
*ena_dev
,
2987 u16
*max_tx_sgl_size
,
2988 u16
*max_rx_sgl_size
,
2989 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2991 u32 queue_size
= ENA_DEFAULT_RING_SIZE
;
2993 queue_size
= min_t(u32
, queue_size
,
2994 get_feat_ctx
->max_queues
.max_cq_depth
);
2995 queue_size
= min_t(u32
, queue_size
,
2996 get_feat_ctx
->max_queues
.max_sq_depth
);
2998 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
2999 queue_size
= min_t(u32
, queue_size
,
3000 get_feat_ctx
->max_queues
.max_llq_depth
);
3002 queue_size
= rounddown_pow_of_two(queue_size
);
3004 if (unlikely(!queue_size
)) {
3005 dev_err(&pdev
->dev
, "Invalid queue size\n");
3009 *max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3010 get_feat_ctx
->max_queues
.max_packet_tx_descs
);
3011 *max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
3012 get_feat_ctx
->max_queues
.max_packet_rx_descs
);
3017 /* ena_probe - Device Initialization Routine
3018 * @pdev: PCI device information struct
3019 * @ent: entry in ena_pci_tbl
3021 * Returns 0 on success, negative on failure
3023 * ena_probe initializes an adapter identified by a pci_dev structure.
3024 * The OS initialization, configuring of the adapter private structure,
3025 * and a hardware reset occur.
3027 static int ena_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3029 struct ena_com_dev_get_features_ctx get_feat_ctx
;
3030 static int version_printed
;
3031 struct net_device
*netdev
;
3032 struct ena_adapter
*adapter
;
3033 struct ena_com_dev
*ena_dev
= NULL
;
3034 static int adapters_found
;
3035 int io_queue_num
, bars
, rc
;
3037 u16 tx_sgl_size
= 0;
3038 u16 rx_sgl_size
= 0;
3041 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
3043 if (version_printed
++ == 0)
3044 dev_info(&pdev
->dev
, "%s", version
);
3046 rc
= pci_enable_device_mem(pdev
);
3048 dev_err(&pdev
->dev
, "pci_enable_device_mem() failed!\n");
3052 pci_set_master(pdev
);
3054 ena_dev
= vzalloc(sizeof(*ena_dev
));
3057 goto err_disable_device
;
3060 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
3061 rc
= pci_request_selected_regions(pdev
, bars
, DRV_MODULE_NAME
);
3063 dev_err(&pdev
->dev
, "pci_request_selected_regions failed %d\n",
3065 goto err_free_ena_dev
;
3068 ena_dev
->reg_bar
= devm_ioremap(&pdev
->dev
,
3069 pci_resource_start(pdev
, ENA_REG_BAR
),
3070 pci_resource_len(pdev
, ENA_REG_BAR
));
3071 if (!ena_dev
->reg_bar
) {
3072 dev_err(&pdev
->dev
, "failed to remap regs bar\n");
3074 goto err_free_region
;
3077 ena_dev
->dmadev
= &pdev
->dev
;
3079 rc
= ena_device_init(ena_dev
, pdev
, &get_feat_ctx
, &wd_state
);
3081 dev_err(&pdev
->dev
, "ena device init failed\n");
3084 goto err_free_region
;
3087 ena_set_push_mode(pdev
, ena_dev
, &get_feat_ctx
);
3089 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
3090 ena_dev
->mem_bar
= devm_ioremap_wc(&pdev
->dev
,
3091 pci_resource_start(pdev
, ENA_MEM_BAR
),
3092 pci_resource_len(pdev
, ENA_MEM_BAR
));
3093 if (!ena_dev
->mem_bar
) {
3095 goto err_device_destroy
;
3099 /* initial Tx interrupt delay, Assumes 1 usec granularity.
3100 * Updated during device initialization with the real granularity
3102 ena_dev
->intr_moder_tx_interval
= ENA_INTR_INITIAL_TX_INTERVAL_USECS
;
3103 io_queue_num
= ena_calc_io_queue_num(pdev
, ena_dev
, &get_feat_ctx
);
3104 queue_size
= ena_calc_queue_size(pdev
, ena_dev
, &tx_sgl_size
,
3105 &rx_sgl_size
, &get_feat_ctx
);
3106 if ((queue_size
<= 0) || (io_queue_num
<= 0)) {
3108 goto err_device_destroy
;
3111 dev_info(&pdev
->dev
, "creating %d io queues. queue size: %d\n",
3112 io_queue_num
, queue_size
);
3114 /* dev zeroed in init_etherdev */
3115 netdev
= alloc_etherdev_mq(sizeof(struct ena_adapter
), io_queue_num
);
3117 dev_err(&pdev
->dev
, "alloc_etherdev_mq failed\n");
3119 goto err_device_destroy
;
3122 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3124 adapter
= netdev_priv(netdev
);
3125 pci_set_drvdata(pdev
, adapter
);
3127 adapter
->ena_dev
= ena_dev
;
3128 adapter
->netdev
= netdev
;
3129 adapter
->pdev
= pdev
;
3131 ena_set_conf_feat_params(adapter
, &get_feat_ctx
);
3133 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3134 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
3136 adapter
->tx_ring_size
= queue_size
;
3137 adapter
->rx_ring_size
= queue_size
;
3139 adapter
->max_tx_sgl_size
= tx_sgl_size
;
3140 adapter
->max_rx_sgl_size
= rx_sgl_size
;
3142 adapter
->num_queues
= io_queue_num
;
3143 adapter
->last_monitored_tx_qid
= 0;
3145 adapter
->rx_copybreak
= ENA_DEFAULT_RX_COPYBREAK
;
3146 adapter
->wd_state
= wd_state
;
3148 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d", adapters_found
);
3150 rc
= ena_com_init_interrupt_moderation(adapter
->ena_dev
);
3153 "Failed to query interrupt moderation feature\n");
3154 goto err_netdev_destroy
;
3156 ena_init_io_rings(adapter
);
3158 netdev
->netdev_ops
= &ena_netdev_ops
;
3159 netdev
->watchdog_timeo
= TX_TIMEOUT
;
3160 ena_set_ethtool_ops(netdev
);
3162 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3164 u64_stats_init(&adapter
->syncp
);
3166 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
, io_queue_num
);
3169 "Failed to enable and set the admin interrupts\n");
3170 goto err_worker_destroy
;
3172 rc
= ena_rss_init_default(adapter
);
3173 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
3174 dev_err(&pdev
->dev
, "Cannot init RSS rc: %d\n", rc
);
3178 ena_config_debug_area(adapter
);
3180 memcpy(adapter
->netdev
->perm_addr
, adapter
->mac_addr
, netdev
->addr_len
);
3182 netif_carrier_off(netdev
);
3184 rc
= register_netdev(netdev
);
3186 dev_err(&pdev
->dev
, "Cannot register net device\n");
3190 INIT_WORK(&adapter
->suspend_io_task
, ena_device_io_suspend
);
3191 INIT_WORK(&adapter
->resume_io_task
, ena_device_io_resume
);
3192 INIT_WORK(&adapter
->reset_task
, ena_fw_reset_device
);
3194 adapter
->last_keep_alive_jiffies
= jiffies
;
3195 adapter
->keep_alive_timeout
= ENA_DEVICE_KALIVE_TIMEOUT
;
3196 adapter
->missing_tx_completion_to
= TX_TIMEOUT
;
3197 adapter
->missing_tx_completion_threshold
= MAX_NUM_OF_TIMEOUTED_PACKETS
;
3199 ena_update_hints(adapter
, &get_feat_ctx
.hw_hints
);
3201 setup_timer(&adapter
->timer_service
, ena_timer_service
,
3202 (unsigned long)adapter
);
3203 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3205 dev_info(&pdev
->dev
, "%s found at mem %lx, mac addr %pM Queues %d\n",
3206 DEVICE_NAME
, (long)pci_resource_start(pdev
, 0),
3207 netdev
->dev_addr
, io_queue_num
);
3209 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3216 ena_com_delete_debug_area(ena_dev
);
3217 ena_com_rss_destroy(ena_dev
);
3219 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_INIT_ERR
);
3220 ena_free_mgmnt_irq(adapter
);
3221 pci_free_irq_vectors(adapter
->pdev
);
3223 ena_com_destroy_interrupt_moderation(ena_dev
);
3224 del_timer(&adapter
->timer_service
);
3225 cancel_work_sync(&adapter
->suspend_io_task
);
3226 cancel_work_sync(&adapter
->resume_io_task
);
3228 free_netdev(netdev
);
3230 ena_com_delete_host_info(ena_dev
);
3231 ena_com_admin_destroy(ena_dev
);
3233 ena_release_bars(ena_dev
, pdev
);
3237 pci_disable_device(pdev
);
3241 /*****************************************************************************/
3242 static int ena_sriov_configure(struct pci_dev
*dev
, int numvfs
)
3247 rc
= pci_enable_sriov(dev
, numvfs
);
3250 "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
3259 pci_disable_sriov(dev
);
3266 /*****************************************************************************/
3267 /*****************************************************************************/
3269 /* ena_remove - Device Removal Routine
3270 * @pdev: PCI device information struct
3272 * ena_remove is called by the PCI subsystem to alert the driver
3273 * that it should release a PCI device.
3275 static void ena_remove(struct pci_dev
*pdev
)
3277 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3278 struct ena_com_dev
*ena_dev
;
3279 struct net_device
*netdev
;
3281 ena_dev
= adapter
->ena_dev
;
3282 netdev
= adapter
->netdev
;
3284 #ifdef CONFIG_RFS_ACCEL
3285 if ((adapter
->msix_vecs
>= 1) && (netdev
->rx_cpu_rmap
)) {
3286 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
3287 netdev
->rx_cpu_rmap
= NULL
;
3289 #endif /* CONFIG_RFS_ACCEL */
3291 unregister_netdev(netdev
);
3292 del_timer_sync(&adapter
->timer_service
);
3294 cancel_work_sync(&adapter
->reset_task
);
3296 cancel_work_sync(&adapter
->suspend_io_task
);
3298 cancel_work_sync(&adapter
->resume_io_task
);
3300 /* Reset the device only if the device is running. */
3301 if (test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
3302 ena_com_dev_reset(ena_dev
, adapter
->reset_reason
);
3304 ena_free_mgmnt_irq(adapter
);
3306 pci_free_irq_vectors(adapter
->pdev
);
3308 free_netdev(netdev
);
3310 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3312 ena_com_abort_admin_commands(ena_dev
);
3314 ena_com_wait_for_abort_completion(ena_dev
);
3316 ena_com_admin_destroy(ena_dev
);
3318 ena_com_rss_destroy(ena_dev
);
3320 ena_com_delete_debug_area(ena_dev
);
3322 ena_com_delete_host_info(ena_dev
);
3324 ena_release_bars(ena_dev
, pdev
);
3326 pci_disable_device(pdev
);
3328 ena_com_destroy_interrupt_moderation(ena_dev
);
3333 static struct pci_driver ena_pci_driver
= {
3334 .name
= DRV_MODULE_NAME
,
3335 .id_table
= ena_pci_tbl
,
3337 .remove
= ena_remove
,
3338 .sriov_configure
= ena_sriov_configure
,
3341 static int __init
ena_init(void)
3343 pr_info("%s", version
);
3345 ena_wq
= create_singlethread_workqueue(DRV_MODULE_NAME
);
3347 pr_err("Failed to create workqueue\n");
3351 return pci_register_driver(&ena_pci_driver
);
3354 static void __exit
ena_cleanup(void)
3356 pci_unregister_driver(&ena_pci_driver
);
3359 destroy_workqueue(ena_wq
);
3364 /******************************************************************************
3365 ******************************** AENQ Handlers *******************************
3366 *****************************************************************************/
3367 /* ena_update_on_link_change:
3368 * Notify the network interface about the change in link status
3370 static void ena_update_on_link_change(void *adapter_data
,
3371 struct ena_admin_aenq_entry
*aenq_e
)
3373 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3374 struct ena_admin_aenq_link_change_desc
*aenq_desc
=
3375 (struct ena_admin_aenq_link_change_desc
*)aenq_e
;
3376 int status
= aenq_desc
->flags
&
3377 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK
;
3380 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
3381 set_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3382 netif_carrier_on(adapter
->netdev
);
3384 clear_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3385 netif_carrier_off(adapter
->netdev
);
3389 static void ena_keep_alive_wd(void *adapter_data
,
3390 struct ena_admin_aenq_entry
*aenq_e
)
3392 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3394 adapter
->last_keep_alive_jiffies
= jiffies
;
3397 static void ena_notification(void *adapter_data
,
3398 struct ena_admin_aenq_entry
*aenq_e
)
3400 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3401 struct ena_admin_ena_hw_hints
*hints
;
3403 WARN(aenq_e
->aenq_common_desc
.group
!= ENA_ADMIN_NOTIFICATION
,
3404 "Invalid group(%x) expected %x\n",
3405 aenq_e
->aenq_common_desc
.group
,
3406 ENA_ADMIN_NOTIFICATION
);
3408 switch (aenq_e
->aenq_common_desc
.syndrom
) {
3409 case ENA_ADMIN_SUSPEND
:
3410 /* Suspend just the IO queues.
3411 * We deliberately don't suspend admin so the timer and
3412 * the keep_alive events should remain.
3414 queue_work(ena_wq
, &adapter
->suspend_io_task
);
3416 case ENA_ADMIN_RESUME
:
3417 queue_work(ena_wq
, &adapter
->resume_io_task
);
3419 case ENA_ADMIN_UPDATE_HINTS
:
3420 hints
= (struct ena_admin_ena_hw_hints
*)
3421 (&aenq_e
->inline_data_w4
);
3422 ena_update_hints(adapter
, hints
);
3425 netif_err(adapter
, drv
, adapter
->netdev
,
3426 "Invalid aenq notification link state %d\n",
3427 aenq_e
->aenq_common_desc
.syndrom
);
3431 /* This handler will called for unknown event group or unimplemented handlers*/
3432 static void unimplemented_aenq_handler(void *data
,
3433 struct ena_admin_aenq_entry
*aenq_e
)
3435 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
3437 netif_err(adapter
, drv
, adapter
->netdev
,
3438 "Unknown event was received or event with unimplemented handler\n");
3441 static struct ena_aenq_handlers aenq_handlers
= {
3443 [ENA_ADMIN_LINK_CHANGE
] = ena_update_on_link_change
,
3444 [ENA_ADMIN_NOTIFICATION
] = ena_notification
,
3445 [ENA_ADMIN_KEEP_ALIVE
] = ena_keep_alive_wd
,
3447 .unimplemented_handler
= unimplemented_aenq_handler
3450 module_init(ena_init
);
3451 module_exit(ena_cleanup
);