2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/numa.h>
44 #include <linux/pci.h>
45 #include <linux/utsname.h>
46 #include <linux/version.h>
47 #include <linux/vmalloc.h>
50 #include "ena_netdev.h"
51 #include "ena_pci_id_tbl.h"
53 static char version
[] = DEVICE_NAME
" v" DRV_MODULE_VERSION
"\n";
55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
56 MODULE_DESCRIPTION(DEVICE_NAME
);
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION
);
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT (5 * HZ)
63 #define ENA_NAPI_BUDGET 64
65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
66 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
67 static int debug
= -1;
68 module_param(debug
, int, 0);
69 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
71 static struct ena_aenq_handlers aenq_handlers
;
73 static struct workqueue_struct
*ena_wq
;
75 MODULE_DEVICE_TABLE(pci
, ena_pci_tbl
);
77 static int ena_rss_init_default(struct ena_adapter
*adapter
);
79 static void ena_tx_timeout(struct net_device
*dev
)
81 struct ena_adapter
*adapter
= netdev_priv(dev
);
83 u64_stats_update_begin(&adapter
->syncp
);
84 adapter
->dev_stats
.tx_timeout
++;
85 u64_stats_update_end(&adapter
->syncp
);
87 netif_err(adapter
, tx_err
, dev
, "Transmit time out\n");
89 /* Change the state of the device to trigger reset */
90 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
93 static void update_rx_ring_mtu(struct ena_adapter
*adapter
, int mtu
)
97 for (i
= 0; i
< adapter
->num_queues
; i
++)
98 adapter
->rx_ring
[i
].mtu
= mtu
;
101 static int ena_change_mtu(struct net_device
*dev
, int new_mtu
)
103 struct ena_adapter
*adapter
= netdev_priv(dev
);
106 ret
= ena_com_set_dev_mtu(adapter
->ena_dev
, new_mtu
);
108 netif_dbg(adapter
, drv
, dev
, "set MTU to %d\n", new_mtu
);
109 update_rx_ring_mtu(adapter
, new_mtu
);
112 netif_err(adapter
, drv
, dev
, "Failed to set MTU to %d\n",
119 static int ena_init_rx_cpu_rmap(struct ena_adapter
*adapter
)
121 #ifdef CONFIG_RFS_ACCEL
125 adapter
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(adapter
->num_queues
);
126 if (!adapter
->netdev
->rx_cpu_rmap
)
128 for (i
= 0; i
< adapter
->num_queues
; i
++) {
129 int irq_idx
= ENA_IO_IRQ_IDX(i
);
131 rc
= irq_cpu_rmap_add(adapter
->netdev
->rx_cpu_rmap
,
132 adapter
->msix_entries
[irq_idx
].vector
);
134 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
135 adapter
->netdev
->rx_cpu_rmap
= NULL
;
139 #endif /* CONFIG_RFS_ACCEL */
143 static void ena_init_io_rings_common(struct ena_adapter
*adapter
,
144 struct ena_ring
*ring
, u16 qid
)
147 ring
->pdev
= adapter
->pdev
;
148 ring
->dev
= &adapter
->pdev
->dev
;
149 ring
->netdev
= adapter
->netdev
;
150 ring
->napi
= &adapter
->ena_napi
[qid
].napi
;
151 ring
->adapter
= adapter
;
152 ring
->ena_dev
= adapter
->ena_dev
;
153 ring
->per_napi_packets
= 0;
154 ring
->per_napi_bytes
= 0;
156 u64_stats_init(&ring
->syncp
);
159 static void ena_init_io_rings(struct ena_adapter
*adapter
)
161 struct ena_com_dev
*ena_dev
;
162 struct ena_ring
*txr
, *rxr
;
165 ena_dev
= adapter
->ena_dev
;
167 for (i
= 0; i
< adapter
->num_queues
; i
++) {
168 txr
= &adapter
->tx_ring
[i
];
169 rxr
= &adapter
->rx_ring
[i
];
171 /* TX/RX common ring state */
172 ena_init_io_rings_common(adapter
, txr
, i
);
173 ena_init_io_rings_common(adapter
, rxr
, i
);
175 /* TX specific ring state */
176 txr
->ring_size
= adapter
->tx_ring_size
;
177 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
178 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
179 txr
->sgl_size
= adapter
->max_tx_sgl_size
;
180 txr
->smoothed_interval
=
181 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
);
183 /* RX specific ring state */
184 rxr
->ring_size
= adapter
->rx_ring_size
;
185 rxr
->rx_copybreak
= adapter
->rx_copybreak
;
186 rxr
->sgl_size
= adapter
->max_rx_sgl_size
;
187 rxr
->smoothed_interval
=
188 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
);
192 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
193 * @adapter: network interface device structure
196 * Return 0 on success, negative on failure
198 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
)
200 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
201 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
204 if (tx_ring
->tx_buffer_info
) {
205 netif_err(adapter
, ifup
,
206 adapter
->netdev
, "tx_buffer_info info is not NULL");
210 size
= sizeof(struct ena_tx_buffer
) * tx_ring
->ring_size
;
211 node
= cpu_to_node(ena_irq
->cpu
);
213 tx_ring
->tx_buffer_info
= vzalloc_node(size
, node
);
214 if (!tx_ring
->tx_buffer_info
) {
215 tx_ring
->tx_buffer_info
= vzalloc(size
);
216 if (!tx_ring
->tx_buffer_info
)
220 size
= sizeof(u16
) * tx_ring
->ring_size
;
221 tx_ring
->free_tx_ids
= vzalloc_node(size
, node
);
222 if (!tx_ring
->free_tx_ids
) {
223 tx_ring
->free_tx_ids
= vzalloc(size
);
224 if (!tx_ring
->free_tx_ids
) {
225 vfree(tx_ring
->tx_buffer_info
);
230 /* Req id ring for TX out of order completions */
231 for (i
= 0; i
< tx_ring
->ring_size
; i
++)
232 tx_ring
->free_tx_ids
[i
] = i
;
234 /* Reset tx statistics */
235 memset(&tx_ring
->tx_stats
, 0x0, sizeof(tx_ring
->tx_stats
));
237 tx_ring
->next_to_use
= 0;
238 tx_ring
->next_to_clean
= 0;
239 tx_ring
->cpu
= ena_irq
->cpu
;
243 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
244 * @adapter: network interface device structure
247 * Free all transmit software resources
249 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
)
251 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
253 vfree(tx_ring
->tx_buffer_info
);
254 tx_ring
->tx_buffer_info
= NULL
;
256 vfree(tx_ring
->free_tx_ids
);
257 tx_ring
->free_tx_ids
= NULL
;
260 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
261 * @adapter: private structure
263 * Return 0 on success, negative on failure
265 static int ena_setup_all_tx_resources(struct ena_adapter
*adapter
)
269 for (i
= 0; i
< adapter
->num_queues
; i
++) {
270 rc
= ena_setup_tx_resources(adapter
, i
);
279 netif_err(adapter
, ifup
, adapter
->netdev
,
280 "Tx queue %d: allocation failed\n", i
);
282 /* rewind the index freeing the rings as we go */
284 ena_free_tx_resources(adapter
, i
);
288 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
289 * @adapter: board private structure
291 * Free all transmit software resources
293 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
)
297 for (i
= 0; i
< adapter
->num_queues
; i
++)
298 ena_free_tx_resources(adapter
, i
);
301 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
302 * @adapter: network interface device structure
305 * Returns 0 on success, negative on failure
307 static int ena_setup_rx_resources(struct ena_adapter
*adapter
,
310 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
311 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
314 if (rx_ring
->rx_buffer_info
) {
315 netif_err(adapter
, ifup
, adapter
->netdev
,
316 "rx_buffer_info is not NULL");
320 /* alloc extra element so in rx path
321 * we can always prefetch rx_info + 1
323 size
= sizeof(struct ena_rx_buffer
) * (rx_ring
->ring_size
+ 1);
324 node
= cpu_to_node(ena_irq
->cpu
);
326 rx_ring
->rx_buffer_info
= vzalloc_node(size
, node
);
327 if (!rx_ring
->rx_buffer_info
) {
328 rx_ring
->rx_buffer_info
= vzalloc(size
);
329 if (!rx_ring
->rx_buffer_info
)
333 /* Reset rx statistics */
334 memset(&rx_ring
->rx_stats
, 0x0, sizeof(rx_ring
->rx_stats
));
336 rx_ring
->next_to_clean
= 0;
337 rx_ring
->next_to_use
= 0;
338 rx_ring
->cpu
= ena_irq
->cpu
;
343 /* ena_free_rx_resources - Free I/O Rx Resources
344 * @adapter: network interface device structure
347 * Free all receive software resources
349 static void ena_free_rx_resources(struct ena_adapter
*adapter
,
352 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
354 vfree(rx_ring
->rx_buffer_info
);
355 rx_ring
->rx_buffer_info
= NULL
;
358 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
359 * @adapter: board private structure
361 * Return 0 on success, negative on failure
363 static int ena_setup_all_rx_resources(struct ena_adapter
*adapter
)
367 for (i
= 0; i
< adapter
->num_queues
; i
++) {
368 rc
= ena_setup_rx_resources(adapter
, i
);
377 netif_err(adapter
, ifup
, adapter
->netdev
,
378 "Rx queue %d: allocation failed\n", i
);
380 /* rewind the index freeing the rings as we go */
382 ena_free_rx_resources(adapter
, i
);
386 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
387 * @adapter: board private structure
389 * Free all receive software resources
391 static void ena_free_all_io_rx_resources(struct ena_adapter
*adapter
)
395 for (i
= 0; i
< adapter
->num_queues
; i
++)
396 ena_free_rx_resources(adapter
, i
);
399 static inline int ena_alloc_rx_page(struct ena_ring
*rx_ring
,
400 struct ena_rx_buffer
*rx_info
, gfp_t gfp
)
402 struct ena_com_buf
*ena_buf
;
406 /* if previous allocated page is not used */
407 if (unlikely(rx_info
->page
))
410 page
= alloc_page(gfp
);
411 if (unlikely(!page
)) {
412 u64_stats_update_begin(&rx_ring
->syncp
);
413 rx_ring
->rx_stats
.page_alloc_fail
++;
414 u64_stats_update_end(&rx_ring
->syncp
);
418 dma
= dma_map_page(rx_ring
->dev
, page
, 0, PAGE_SIZE
,
420 if (unlikely(dma_mapping_error(rx_ring
->dev
, dma
))) {
421 u64_stats_update_begin(&rx_ring
->syncp
);
422 rx_ring
->rx_stats
.dma_mapping_err
++;
423 u64_stats_update_end(&rx_ring
->syncp
);
428 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
429 "alloc page %p, rx_info %p\n", page
, rx_info
);
431 rx_info
->page
= page
;
432 rx_info
->page_offset
= 0;
433 ena_buf
= &rx_info
->ena_buf
;
434 ena_buf
->paddr
= dma
;
435 ena_buf
->len
= PAGE_SIZE
;
440 static void ena_free_rx_page(struct ena_ring
*rx_ring
,
441 struct ena_rx_buffer
*rx_info
)
443 struct page
*page
= rx_info
->page
;
444 struct ena_com_buf
*ena_buf
= &rx_info
->ena_buf
;
446 if (unlikely(!page
)) {
447 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
448 "Trying to free unallocated buffer\n");
452 dma_unmap_page(rx_ring
->dev
, ena_buf
->paddr
, PAGE_SIZE
,
456 rx_info
->page
= NULL
;
459 static int ena_refill_rx_bufs(struct ena_ring
*rx_ring
, u32 num
)
465 next_to_use
= rx_ring
->next_to_use
;
467 for (i
= 0; i
< num
; i
++) {
468 struct ena_rx_buffer
*rx_info
=
469 &rx_ring
->rx_buffer_info
[next_to_use
];
471 rc
= ena_alloc_rx_page(rx_ring
, rx_info
,
472 __GFP_COLD
| GFP_ATOMIC
| __GFP_COMP
);
473 if (unlikely(rc
< 0)) {
474 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
475 "failed to alloc buffer for rx queue %d\n",
479 rc
= ena_com_add_single_rx_desc(rx_ring
->ena_com_io_sq
,
483 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
484 "failed to add buffer for rx queue %d\n",
488 next_to_use
= ENA_RX_RING_IDX_NEXT(next_to_use
,
492 if (unlikely(i
< num
)) {
493 u64_stats_update_begin(&rx_ring
->syncp
);
494 rx_ring
->rx_stats
.refil_partial
++;
495 u64_stats_update_end(&rx_ring
->syncp
);
496 netdev_warn(rx_ring
->netdev
,
497 "refilled rx qid %d with only %d buffers (from %d)\n",
498 rx_ring
->qid
, i
, num
);
502 /* Add memory barrier to make sure the desc were written before
506 ena_com_write_sq_doorbell(rx_ring
->ena_com_io_sq
);
509 rx_ring
->next_to_use
= next_to_use
;
514 static void ena_free_rx_bufs(struct ena_adapter
*adapter
,
517 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
520 for (i
= 0; i
< rx_ring
->ring_size
; i
++) {
521 struct ena_rx_buffer
*rx_info
= &rx_ring
->rx_buffer_info
[i
];
524 ena_free_rx_page(rx_ring
, rx_info
);
528 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
529 * @adapter: board private structure
532 static void ena_refill_all_rx_bufs(struct ena_adapter
*adapter
)
534 struct ena_ring
*rx_ring
;
537 for (i
= 0; i
< adapter
->num_queues
; i
++) {
538 rx_ring
= &adapter
->rx_ring
[i
];
539 bufs_num
= rx_ring
->ring_size
- 1;
540 rc
= ena_refill_rx_bufs(rx_ring
, bufs_num
);
542 if (unlikely(rc
!= bufs_num
))
543 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
544 "refilling Queue %d failed. allocated %d buffers from: %d\n",
549 static void ena_free_all_rx_bufs(struct ena_adapter
*adapter
)
553 for (i
= 0; i
< adapter
->num_queues
; i
++)
554 ena_free_rx_bufs(adapter
, i
);
557 /* ena_free_tx_bufs - Free Tx Buffers per Queue
558 * @tx_ring: TX ring for which buffers be freed
560 static void ena_free_tx_bufs(struct ena_ring
*tx_ring
)
564 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
565 struct ena_tx_buffer
*tx_info
= &tx_ring
->tx_buffer_info
[i
];
566 struct ena_com_buf
*ena_buf
;
573 netdev_notice(tx_ring
->netdev
,
574 "free uncompleted tx skb qid %d idx 0x%x\n",
577 ena_buf
= tx_info
->bufs
;
578 dma_unmap_single(tx_ring
->dev
,
583 /* unmap remaining mapped pages */
584 nr_frags
= tx_info
->num_of_bufs
- 1;
585 for (j
= 0; j
< nr_frags
; j
++) {
587 dma_unmap_page(tx_ring
->dev
,
593 dev_kfree_skb_any(tx_info
->skb
);
595 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
599 static void ena_free_all_tx_bufs(struct ena_adapter
*adapter
)
601 struct ena_ring
*tx_ring
;
604 for (i
= 0; i
< adapter
->num_queues
; i
++) {
605 tx_ring
= &adapter
->tx_ring
[i
];
606 ena_free_tx_bufs(tx_ring
);
610 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
)
615 for (i
= 0; i
< adapter
->num_queues
; i
++) {
616 ena_qid
= ENA_IO_TXQ_IDX(i
);
617 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
621 static void ena_destroy_all_rx_queues(struct ena_adapter
*adapter
)
626 for (i
= 0; i
< adapter
->num_queues
; i
++) {
627 ena_qid
= ENA_IO_RXQ_IDX(i
);
628 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
632 static void ena_destroy_all_io_queues(struct ena_adapter
*adapter
)
634 ena_destroy_all_tx_queues(adapter
);
635 ena_destroy_all_rx_queues(adapter
);
638 static int validate_tx_req_id(struct ena_ring
*tx_ring
, u16 req_id
)
640 struct ena_tx_buffer
*tx_info
= NULL
;
642 if (likely(req_id
< tx_ring
->ring_size
)) {
643 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
644 if (likely(tx_info
->skb
))
649 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
650 "tx_info doesn't have valid skb\n");
652 netif_err(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
653 "Invalid req_id: %hu\n", req_id
);
655 u64_stats_update_begin(&tx_ring
->syncp
);
656 tx_ring
->tx_stats
.bad_req_id
++;
657 u64_stats_update_end(&tx_ring
->syncp
);
659 /* Trigger device reset */
660 set_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
);
664 static int ena_clean_tx_irq(struct ena_ring
*tx_ring
, u32 budget
)
666 struct netdev_queue
*txq
;
675 next_to_clean
= tx_ring
->next_to_clean
;
676 txq
= netdev_get_tx_queue(tx_ring
->netdev
, tx_ring
->qid
);
678 while (tx_pkts
< budget
) {
679 struct ena_tx_buffer
*tx_info
;
681 struct ena_com_buf
*ena_buf
;
684 rc
= ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
,
689 rc
= validate_tx_req_id(tx_ring
, req_id
);
693 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
696 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
700 tx_info
->last_jiffies
= 0;
702 if (likely(tx_info
->num_of_bufs
!= 0)) {
703 ena_buf
= tx_info
->bufs
;
705 dma_unmap_single(tx_ring
->dev
,
706 dma_unmap_addr(ena_buf
, paddr
),
707 dma_unmap_len(ena_buf
, len
),
710 /* unmap remaining mapped pages */
711 nr_frags
= tx_info
->num_of_bufs
- 1;
712 for (i
= 0; i
< nr_frags
; i
++) {
714 dma_unmap_page(tx_ring
->dev
,
715 dma_unmap_addr(ena_buf
, paddr
),
716 dma_unmap_len(ena_buf
, len
),
721 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
722 "tx_poll: q %d skb %p completed\n", tx_ring
->qid
,
725 tx_bytes
+= skb
->len
;
728 total_done
+= tx_info
->tx_descs
;
730 tx_ring
->free_tx_ids
[next_to_clean
] = req_id
;
731 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
735 tx_ring
->next_to_clean
= next_to_clean
;
736 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_done
);
737 ena_com_update_dev_comp_head(tx_ring
->ena_com_io_cq
);
739 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
741 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
742 "tx_poll: q %d done. total pkts: %d\n",
743 tx_ring
->qid
, tx_pkts
);
745 /* need to make the rings circular update visible to
746 * ena_start_xmit() before checking for netif_queue_stopped().
750 above_thresh
= ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) >
751 ENA_TX_WAKEUP_THRESH
;
752 if (unlikely(netif_tx_queue_stopped(txq
) && above_thresh
)) {
753 __netif_tx_lock(txq
, smp_processor_id());
754 above_thresh
= ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) >
755 ENA_TX_WAKEUP_THRESH
;
756 if (netif_tx_queue_stopped(txq
) && above_thresh
) {
757 netif_tx_wake_queue(txq
);
758 u64_stats_update_begin(&tx_ring
->syncp
);
759 tx_ring
->tx_stats
.queue_wakeup
++;
760 u64_stats_update_end(&tx_ring
->syncp
);
762 __netif_tx_unlock(txq
);
765 tx_ring
->per_napi_bytes
+= tx_bytes
;
766 tx_ring
->per_napi_packets
+= tx_pkts
;
771 static struct sk_buff
*ena_rx_skb(struct ena_ring
*rx_ring
,
772 struct ena_com_rx_buf_info
*ena_bufs
,
777 struct ena_rx_buffer
*rx_info
=
778 &rx_ring
->rx_buffer_info
[*next_to_clean
];
783 len
= ena_bufs
[0].len
;
784 if (unlikely(!rx_info
->page
)) {
785 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
790 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
791 "rx_info %p page %p\n",
792 rx_info
, rx_info
->page
);
794 /* save virt address of first buffer */
795 va
= page_address(rx_info
->page
) + rx_info
->page_offset
;
796 prefetch(va
+ NET_IP_ALIGN
);
798 if (len
<= rx_ring
->rx_copybreak
) {
799 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
800 rx_ring
->rx_copybreak
);
801 if (unlikely(!skb
)) {
802 u64_stats_update_begin(&rx_ring
->syncp
);
803 rx_ring
->rx_stats
.skb_alloc_fail
++;
804 u64_stats_update_end(&rx_ring
->syncp
);
805 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
806 "Failed to allocate skb\n");
810 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
811 "rx allocated small packet. len %d. data_len %d\n",
812 skb
->len
, skb
->data_len
);
814 /* sync this buffer for CPU use */
815 dma_sync_single_for_cpu(rx_ring
->dev
,
816 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
819 skb_copy_to_linear_data(skb
, va
, len
);
820 dma_sync_single_for_device(rx_ring
->dev
,
821 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
826 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
827 *next_to_clean
= ENA_RX_RING_IDX_ADD(*next_to_clean
, descs
,
832 skb
= napi_get_frags(rx_ring
->napi
);
833 if (unlikely(!skb
)) {
834 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
835 "Failed allocating skb\n");
836 u64_stats_update_begin(&rx_ring
->syncp
);
837 rx_ring
->rx_stats
.skb_alloc_fail
++;
838 u64_stats_update_end(&rx_ring
->syncp
);
843 dma_unmap_page(rx_ring
->dev
,
844 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
845 PAGE_SIZE
, DMA_FROM_DEVICE
);
847 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_info
->page
,
848 rx_info
->page_offset
, len
, PAGE_SIZE
);
850 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
851 "rx skb updated. len %d. data_len %d\n",
852 skb
->len
, skb
->data_len
);
854 rx_info
->page
= NULL
;
856 ENA_RX_RING_IDX_NEXT(*next_to_clean
,
858 if (likely(--descs
== 0))
860 rx_info
= &rx_ring
->rx_buffer_info
[*next_to_clean
];
861 len
= ena_bufs
[++buf
].len
;
867 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
868 * @adapter: structure containing adapter specific data
869 * @ena_rx_ctx: received packet context/metadata
870 * @skb: skb currently being received and modified
872 static inline void ena_rx_checksum(struct ena_ring
*rx_ring
,
873 struct ena_com_rx_ctx
*ena_rx_ctx
,
876 /* Rx csum disabled */
877 if (unlikely(!(rx_ring
->netdev
->features
& NETIF_F_RXCSUM
))) {
878 skb
->ip_summed
= CHECKSUM_NONE
;
882 /* For fragmented packets the checksum isn't valid */
883 if (ena_rx_ctx
->frag
) {
884 skb
->ip_summed
= CHECKSUM_NONE
;
888 /* if IP and error */
889 if (unlikely((ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
) &&
890 (ena_rx_ctx
->l3_csum_err
))) {
891 /* ipv4 checksum error */
892 skb
->ip_summed
= CHECKSUM_NONE
;
893 u64_stats_update_begin(&rx_ring
->syncp
);
894 rx_ring
->rx_stats
.bad_csum
++;
895 u64_stats_update_end(&rx_ring
->syncp
);
896 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
897 "RX IPv4 header checksum error\n");
902 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
903 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
))) {
904 if (unlikely(ena_rx_ctx
->l4_csum_err
)) {
905 /* TCP/UDP checksum error */
906 u64_stats_update_begin(&rx_ring
->syncp
);
907 rx_ring
->rx_stats
.bad_csum
++;
908 u64_stats_update_end(&rx_ring
->syncp
);
909 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
910 "RX L4 checksum error\n");
911 skb
->ip_summed
= CHECKSUM_NONE
;
915 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
919 static void ena_set_rx_hash(struct ena_ring
*rx_ring
,
920 struct ena_com_rx_ctx
*ena_rx_ctx
,
923 enum pkt_hash_types hash_type
;
925 if (likely(rx_ring
->netdev
->features
& NETIF_F_RXHASH
)) {
926 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
927 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)))
929 hash_type
= PKT_HASH_TYPE_L4
;
931 hash_type
= PKT_HASH_TYPE_NONE
;
933 /* Override hash type if the packet is fragmented */
934 if (ena_rx_ctx
->frag
)
935 hash_type
= PKT_HASH_TYPE_NONE
;
937 skb_set_hash(skb
, ena_rx_ctx
->hash
, hash_type
);
941 /* ena_clean_rx_irq - Cleanup RX irq
942 * @rx_ring: RX ring to clean
943 * @napi: napi handler
944 * @budget: how many packets driver is allowed to clean
946 * Returns the number of cleaned buffers.
948 static int ena_clean_rx_irq(struct ena_ring
*rx_ring
, struct napi_struct
*napi
,
951 u16 next_to_clean
= rx_ring
->next_to_clean
;
952 u32 res_budget
, work_done
;
954 struct ena_com_rx_ctx ena_rx_ctx
;
955 struct ena_adapter
*adapter
;
958 int refill_threshold
;
961 int rx_copybreak_pkt
= 0;
963 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
964 "%s qid %d\n", __func__
, rx_ring
->qid
);
968 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
969 ena_rx_ctx
.max_bufs
= rx_ring
->sgl_size
;
970 ena_rx_ctx
.descs
= 0;
971 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
972 rx_ring
->ena_com_io_sq
,
977 if (unlikely(ena_rx_ctx
.descs
== 0))
980 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
981 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
982 rx_ring
->qid
, ena_rx_ctx
.descs
, ena_rx_ctx
.l3_proto
,
983 ena_rx_ctx
.l4_proto
, ena_rx_ctx
.hash
);
985 /* allocate skb and fill it */
986 skb
= ena_rx_skb(rx_ring
, rx_ring
->ena_bufs
, ena_rx_ctx
.descs
,
989 /* exit if we failed to retrieve a buffer */
990 if (unlikely(!skb
)) {
991 next_to_clean
= ENA_RX_RING_IDX_ADD(next_to_clean
,
997 ena_rx_checksum(rx_ring
, &ena_rx_ctx
, skb
);
999 ena_set_rx_hash(rx_ring
, &ena_rx_ctx
, skb
);
1001 skb_record_rx_queue(skb
, rx_ring
->qid
);
1003 if (rx_ring
->ena_bufs
[0].len
<= rx_ring
->rx_copybreak
) {
1004 total_len
+= rx_ring
->ena_bufs
[0].len
;
1006 napi_gro_receive(napi
, skb
);
1008 total_len
+= skb
->len
;
1009 napi_gro_frags(napi
);
1013 } while (likely(res_budget
));
1015 work_done
= budget
- res_budget
;
1016 rx_ring
->per_napi_bytes
+= total_len
;
1017 rx_ring
->per_napi_packets
+= work_done
;
1018 u64_stats_update_begin(&rx_ring
->syncp
);
1019 rx_ring
->rx_stats
.bytes
+= total_len
;
1020 rx_ring
->rx_stats
.cnt
+= work_done
;
1021 rx_ring
->rx_stats
.rx_copybreak_pkt
+= rx_copybreak_pkt
;
1022 u64_stats_update_end(&rx_ring
->syncp
);
1024 rx_ring
->next_to_clean
= next_to_clean
;
1026 refill_required
= ena_com_sq_empty_space(rx_ring
->ena_com_io_sq
);
1027 refill_threshold
= rx_ring
->ring_size
/ ENA_RX_REFILL_THRESH_DIVIDER
;
1029 /* Optimization, try to batch new rx buffers */
1030 if (refill_required
> refill_threshold
) {
1031 ena_com_update_dev_comp_head(rx_ring
->ena_com_io_cq
);
1032 ena_refill_rx_bufs(rx_ring
, refill_required
);
1038 adapter
= netdev_priv(rx_ring
->netdev
);
1040 u64_stats_update_begin(&rx_ring
->syncp
);
1041 rx_ring
->rx_stats
.bad_desc_num
++;
1042 u64_stats_update_end(&rx_ring
->syncp
);
1044 /* Too many desc from the device. Trigger reset */
1045 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
1050 inline void ena_adjust_intr_moderation(struct ena_ring
*rx_ring
,
1051 struct ena_ring
*tx_ring
)
1053 /* We apply adaptive moderation on Rx path only.
1054 * Tx uses static interrupt moderation.
1056 ena_com_calculate_interrupt_delay(rx_ring
->ena_dev
,
1057 rx_ring
->per_napi_packets
,
1058 rx_ring
->per_napi_bytes
,
1059 &rx_ring
->smoothed_interval
,
1060 &rx_ring
->moder_tbl_idx
);
1062 /* Reset per napi packets/bytes */
1063 tx_ring
->per_napi_packets
= 0;
1064 tx_ring
->per_napi_bytes
= 0;
1065 rx_ring
->per_napi_packets
= 0;
1066 rx_ring
->per_napi_bytes
= 0;
1069 static inline void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
1070 struct ena_ring
*rx_ring
)
1072 int cpu
= get_cpu();
1075 /* Check only one ring since the 2 rings are running on the same cpu */
1076 if (likely(tx_ring
->cpu
== cpu
))
1079 numa_node
= cpu_to_node(cpu
);
1082 if (numa_node
!= NUMA_NO_NODE
) {
1083 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, numa_node
);
1084 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, numa_node
);
1095 static int ena_io_poll(struct napi_struct
*napi
, int budget
)
1097 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
1098 struct ena_ring
*tx_ring
, *rx_ring
;
1099 struct ena_eth_io_intr_reg intr_reg
;
1104 int napi_comp_call
= 0;
1107 tx_ring
= ena_napi
->tx_ring
;
1108 rx_ring
= ena_napi
->rx_ring
;
1110 tx_budget
= tx_ring
->ring_size
/ ENA_TX_POLL_BUDGET_DIVIDER
;
1112 if (!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
)) {
1113 napi_complete_done(napi
, 0);
1117 tx_work_done
= ena_clean_tx_irq(tx_ring
, tx_budget
);
1118 rx_work_done
= ena_clean_rx_irq(rx_ring
, napi
, budget
);
1120 if ((budget
> rx_work_done
) && (tx_budget
> tx_work_done
)) {
1121 napi_complete_done(napi
, rx_work_done
);
1124 /* Tx and Rx share the same interrupt vector */
1125 if (ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
))
1126 ena_adjust_intr_moderation(rx_ring
, tx_ring
);
1128 /* Update intr register: rx intr delay, tx intr delay and
1131 ena_com_update_intr_reg(&intr_reg
,
1132 rx_ring
->smoothed_interval
,
1133 tx_ring
->smoothed_interval
,
1136 /* It is a shared MSI-X. Tx and Rx CQ have pointer to it.
1137 * So we use one of them to reach the intr reg
1139 ena_com_unmask_intr(rx_ring
->ena_com_io_cq
, &intr_reg
);
1141 ena_update_ring_numa_node(tx_ring
, rx_ring
);
1148 u64_stats_update_begin(&tx_ring
->syncp
);
1149 tx_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
1150 tx_ring
->tx_stats
.tx_poll
++;
1151 u64_stats_update_end(&tx_ring
->syncp
);
1156 static irqreturn_t
ena_intr_msix_mgmnt(int irq
, void *data
)
1158 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
1160 ena_com_admin_q_comp_intr_handler(adapter
->ena_dev
);
1162 /* Don't call the aenq handler before probe is done */
1163 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
)))
1164 ena_com_aenq_intr_handler(adapter
->ena_dev
, data
);
1169 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1170 * @irq: interrupt number
1171 * @data: pointer to a network interface private napi device structure
1173 static irqreturn_t
ena_intr_msix_io(int irq
, void *data
)
1175 struct ena_napi
*ena_napi
= data
;
1177 napi_schedule(&ena_napi
->napi
);
1182 static int ena_enable_msix(struct ena_adapter
*adapter
, int num_queues
)
1184 int i
, msix_vecs
, rc
;
1186 if (test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
1187 netif_err(adapter
, probe
, adapter
->netdev
,
1188 "Error, MSI-X is already enabled\n");
1192 /* Reserved the max msix vectors we might need */
1193 msix_vecs
= ENA_MAX_MSIX_VEC(num_queues
);
1195 netif_dbg(adapter
, probe
, adapter
->netdev
,
1196 "trying to enable MSI-X, vectors %d\n", msix_vecs
);
1198 adapter
->msix_entries
= vzalloc(msix_vecs
* sizeof(struct msix_entry
));
1200 if (!adapter
->msix_entries
)
1203 for (i
= 0; i
< msix_vecs
; i
++)
1204 adapter
->msix_entries
[i
].entry
= i
;
1206 rc
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
, msix_vecs
);
1208 netif_err(adapter
, probe
, adapter
->netdev
,
1209 "Failed to enable MSI-X, vectors %d rc %d\n",
1214 netif_dbg(adapter
, probe
, adapter
->netdev
, "enable MSI-X, vectors %d\n",
1217 if (msix_vecs
>= 1) {
1218 if (ena_init_rx_cpu_rmap(adapter
))
1219 netif_warn(adapter
, probe
, adapter
->netdev
,
1220 "Failed to map IRQs to CPUs\n");
1223 adapter
->msix_vecs
= msix_vecs
;
1224 set_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
);
1229 static void ena_setup_mgmnt_intr(struct ena_adapter
*adapter
)
1233 snprintf(adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].name
,
1234 ENA_IRQNAME_SIZE
, "ena-mgmnt@pci:%s",
1235 pci_name(adapter
->pdev
));
1236 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].handler
=
1237 ena_intr_msix_mgmnt
;
1238 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].data
= adapter
;
1239 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].vector
=
1240 adapter
->msix_entries
[ENA_MGMNT_IRQ_IDX
].vector
;
1241 cpu
= cpumask_first(cpu_online_mask
);
1242 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].cpu
= cpu
;
1243 cpumask_set_cpu(cpu
,
1244 &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].affinity_hint_mask
);
1247 static void ena_setup_io_intr(struct ena_adapter
*adapter
)
1249 struct net_device
*netdev
;
1250 int irq_idx
, i
, cpu
;
1252 netdev
= adapter
->netdev
;
1254 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1255 irq_idx
= ENA_IO_IRQ_IDX(i
);
1256 cpu
= i
% num_online_cpus();
1258 snprintf(adapter
->irq_tbl
[irq_idx
].name
, ENA_IRQNAME_SIZE
,
1259 "%s-Tx-Rx-%d", netdev
->name
, i
);
1260 adapter
->irq_tbl
[irq_idx
].handler
= ena_intr_msix_io
;
1261 adapter
->irq_tbl
[irq_idx
].data
= &adapter
->ena_napi
[i
];
1262 adapter
->irq_tbl
[irq_idx
].vector
=
1263 adapter
->msix_entries
[irq_idx
].vector
;
1264 adapter
->irq_tbl
[irq_idx
].cpu
= cpu
;
1266 cpumask_set_cpu(cpu
,
1267 &adapter
->irq_tbl
[irq_idx
].affinity_hint_mask
);
1271 static int ena_request_mgmnt_irq(struct ena_adapter
*adapter
)
1273 unsigned long flags
= 0;
1274 struct ena_irq
*irq
;
1277 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1278 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1281 netif_err(adapter
, probe
, adapter
->netdev
,
1282 "failed to request admin irq\n");
1286 netif_dbg(adapter
, probe
, adapter
->netdev
,
1287 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1288 irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1290 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1295 static int ena_request_io_irq(struct ena_adapter
*adapter
)
1297 unsigned long flags
= 0;
1298 struct ena_irq
*irq
;
1301 if (!test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
1302 netif_err(adapter
, ifup
, adapter
->netdev
,
1303 "Failed to request I/O IRQ: MSI-X is not enabled\n");
1307 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1308 irq
= &adapter
->irq_tbl
[i
];
1309 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
1312 netif_err(adapter
, ifup
, adapter
->netdev
,
1313 "Failed to request I/O IRQ. index %d rc %d\n",
1318 netif_dbg(adapter
, ifup
, adapter
->netdev
,
1319 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1320 i
, irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
1322 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
1328 for (k
= ENA_IO_IRQ_FIRST_IDX
; k
< i
; k
++) {
1329 irq
= &adapter
->irq_tbl
[k
];
1330 free_irq(irq
->vector
, irq
->data
);
1336 static void ena_free_mgmnt_irq(struct ena_adapter
*adapter
)
1338 struct ena_irq
*irq
;
1340 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
1341 synchronize_irq(irq
->vector
);
1342 irq_set_affinity_hint(irq
->vector
, NULL
);
1343 free_irq(irq
->vector
, irq
->data
);
1346 static void ena_free_io_irq(struct ena_adapter
*adapter
)
1348 struct ena_irq
*irq
;
1351 #ifdef CONFIG_RFS_ACCEL
1352 if (adapter
->msix_vecs
>= 1) {
1353 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
1354 adapter
->netdev
->rx_cpu_rmap
= NULL
;
1356 #endif /* CONFIG_RFS_ACCEL */
1358 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
1359 irq
= &adapter
->irq_tbl
[i
];
1360 irq_set_affinity_hint(irq
->vector
, NULL
);
1361 free_irq(irq
->vector
, irq
->data
);
1365 static void ena_disable_msix(struct ena_adapter
*adapter
)
1367 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
))
1368 pci_disable_msix(adapter
->pdev
);
1370 if (adapter
->msix_entries
)
1371 vfree(adapter
->msix_entries
);
1372 adapter
->msix_entries
= NULL
;
1375 static void ena_disable_io_intr_sync(struct ena_adapter
*adapter
)
1379 if (!netif_running(adapter
->netdev
))
1382 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++)
1383 synchronize_irq(adapter
->irq_tbl
[i
].vector
);
1386 static void ena_del_napi(struct ena_adapter
*adapter
)
1390 for (i
= 0; i
< adapter
->num_queues
; i
++)
1391 netif_napi_del(&adapter
->ena_napi
[i
].napi
);
1394 static void ena_init_napi(struct ena_adapter
*adapter
)
1396 struct ena_napi
*napi
;
1399 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1400 napi
= &adapter
->ena_napi
[i
];
1402 netif_napi_add(adapter
->netdev
,
1403 &adapter
->ena_napi
[i
].napi
,
1406 napi
->rx_ring
= &adapter
->rx_ring
[i
];
1407 napi
->tx_ring
= &adapter
->tx_ring
[i
];
1412 static void ena_napi_disable_all(struct ena_adapter
*adapter
)
1416 for (i
= 0; i
< adapter
->num_queues
; i
++)
1417 napi_disable(&adapter
->ena_napi
[i
].napi
);
1420 static void ena_napi_enable_all(struct ena_adapter
*adapter
)
1424 for (i
= 0; i
< adapter
->num_queues
; i
++)
1425 napi_enable(&adapter
->ena_napi
[i
].napi
);
1428 static void ena_restore_ethtool_params(struct ena_adapter
*adapter
)
1430 adapter
->tx_usecs
= 0;
1431 adapter
->rx_usecs
= 0;
1432 adapter
->tx_frames
= 1;
1433 adapter
->rx_frames
= 1;
1436 /* Configure the Rx forwarding */
1437 static int ena_rss_configure(struct ena_adapter
*adapter
)
1439 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1442 /* In case the RSS table wasn't initialized by probe */
1443 if (!ena_dev
->rss
.tbl_log_size
) {
1444 rc
= ena_rss_init_default(adapter
);
1445 if (rc
&& (rc
!= -EPERM
)) {
1446 netif_err(adapter
, ifup
, adapter
->netdev
,
1447 "Failed to init RSS rc: %d\n", rc
);
1452 /* Set indirect table */
1453 rc
= ena_com_indirect_table_set(ena_dev
);
1454 if (unlikely(rc
&& rc
!= -EPERM
))
1457 /* Configure hash function (if supported) */
1458 rc
= ena_com_set_hash_function(ena_dev
);
1459 if (unlikely(rc
&& (rc
!= -EPERM
)))
1462 /* Configure hash inputs (if supported) */
1463 rc
= ena_com_set_hash_ctrl(ena_dev
);
1464 if (unlikely(rc
&& (rc
!= -EPERM
)))
1470 static int ena_up_complete(struct ena_adapter
*adapter
)
1474 rc
= ena_rss_configure(adapter
);
1478 ena_init_napi(adapter
);
1480 ena_change_mtu(adapter
->netdev
, adapter
->netdev
->mtu
);
1482 ena_refill_all_rx_bufs(adapter
);
1484 /* enable transmits */
1485 netif_tx_start_all_queues(adapter
->netdev
);
1487 ena_restore_ethtool_params(adapter
);
1489 ena_napi_enable_all(adapter
);
1491 /* schedule napi in case we had pending packets
1492 * from the last time we disable napi
1494 for (i
= 0; i
< adapter
->num_queues
; i
++)
1495 napi_schedule(&adapter
->ena_napi
[i
].napi
);
1500 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
)
1502 struct ena_com_create_io_ctx ctx
= { 0 };
1503 struct ena_com_dev
*ena_dev
;
1504 struct ena_ring
*tx_ring
;
1509 ena_dev
= adapter
->ena_dev
;
1511 tx_ring
= &adapter
->tx_ring
[qid
];
1512 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1513 ena_qid
= ENA_IO_TXQ_IDX(qid
);
1515 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
1517 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
1518 ctx
.msix_vector
= msix_vector
;
1519 ctx
.queue_size
= adapter
->tx_ring_size
;
1520 ctx
.numa_node
= cpu_to_node(tx_ring
->cpu
);
1522 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1524 netif_err(adapter
, ifup
, adapter
->netdev
,
1525 "Failed to create I/O TX queue num %d rc: %d\n",
1530 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1531 &tx_ring
->ena_com_io_sq
,
1532 &tx_ring
->ena_com_io_cq
);
1534 netif_err(adapter
, ifup
, adapter
->netdev
,
1535 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1537 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1540 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1544 static int ena_create_all_io_tx_queues(struct ena_adapter
*adapter
)
1546 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1549 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1550 rc
= ena_create_io_tx_queue(adapter
, i
);
1559 ena_com_destroy_io_queue(ena_dev
, ENA_IO_TXQ_IDX(i
));
1564 static int ena_create_io_rx_queue(struct ena_adapter
*adapter
, int qid
)
1566 struct ena_com_dev
*ena_dev
;
1567 struct ena_com_create_io_ctx ctx
= { 0 };
1568 struct ena_ring
*rx_ring
;
1573 ena_dev
= adapter
->ena_dev
;
1575 rx_ring
= &adapter
->rx_ring
[qid
];
1576 msix_vector
= ENA_IO_IRQ_IDX(qid
);
1577 ena_qid
= ENA_IO_RXQ_IDX(qid
);
1580 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
1581 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
1582 ctx
.msix_vector
= msix_vector
;
1583 ctx
.queue_size
= adapter
->rx_ring_size
;
1584 ctx
.numa_node
= cpu_to_node(rx_ring
->cpu
);
1586 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1588 netif_err(adapter
, ifup
, adapter
->netdev
,
1589 "Failed to create I/O RX queue num %d rc: %d\n",
1594 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1595 &rx_ring
->ena_com_io_sq
,
1596 &rx_ring
->ena_com_io_cq
);
1598 netif_err(adapter
, ifup
, adapter
->netdev
,
1599 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1601 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1604 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, ctx
.numa_node
);
1609 static int ena_create_all_io_rx_queues(struct ena_adapter
*adapter
)
1611 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
1614 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1615 rc
= ena_create_io_rx_queue(adapter
, i
);
1624 ena_com_destroy_io_queue(ena_dev
, ENA_IO_RXQ_IDX(i
));
1629 static int ena_up(struct ena_adapter
*adapter
)
1633 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
1635 ena_setup_io_intr(adapter
);
1637 rc
= ena_request_io_irq(adapter
);
1641 /* allocate transmit descriptors */
1642 rc
= ena_setup_all_tx_resources(adapter
);
1646 /* allocate receive descriptors */
1647 rc
= ena_setup_all_rx_resources(adapter
);
1651 /* Create TX queues */
1652 rc
= ena_create_all_io_tx_queues(adapter
);
1654 goto err_create_tx_queues
;
1656 /* Create RX queues */
1657 rc
= ena_create_all_io_rx_queues(adapter
);
1659 goto err_create_rx_queues
;
1661 rc
= ena_up_complete(adapter
);
1665 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
1666 netif_carrier_on(adapter
->netdev
);
1668 u64_stats_update_begin(&adapter
->syncp
);
1669 adapter
->dev_stats
.interface_up
++;
1670 u64_stats_update_end(&adapter
->syncp
);
1672 set_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1677 ena_destroy_all_rx_queues(adapter
);
1678 err_create_rx_queues
:
1679 ena_destroy_all_tx_queues(adapter
);
1680 err_create_tx_queues
:
1681 ena_free_all_io_rx_resources(adapter
);
1683 ena_free_all_io_tx_resources(adapter
);
1685 ena_free_io_irq(adapter
);
1691 static void ena_down(struct ena_adapter
*adapter
)
1693 netif_info(adapter
, ifdown
, adapter
->netdev
, "%s\n", __func__
);
1695 clear_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
1697 u64_stats_update_begin(&adapter
->syncp
);
1698 adapter
->dev_stats
.interface_down
++;
1699 u64_stats_update_end(&adapter
->syncp
);
1701 /* After this point the napi handler won't enable the tx queue */
1702 ena_napi_disable_all(adapter
);
1703 netif_carrier_off(adapter
->netdev
);
1704 netif_tx_disable(adapter
->netdev
);
1706 /* After destroy the queue there won't be any new interrupts */
1707 ena_destroy_all_io_queues(adapter
);
1709 ena_disable_io_intr_sync(adapter
);
1710 ena_free_io_irq(adapter
);
1711 ena_del_napi(adapter
);
1713 ena_free_all_tx_bufs(adapter
);
1714 ena_free_all_rx_bufs(adapter
);
1715 ena_free_all_io_tx_resources(adapter
);
1716 ena_free_all_io_rx_resources(adapter
);
1719 /* ena_open - Called when a network interface is made active
1720 * @netdev: network interface device structure
1722 * Returns 0 on success, negative value on failure
1724 * The open entry point is called when a network interface is made
1725 * active by the system (IFF_UP). At this point all resources needed
1726 * for transmit and receive operations are allocated, the interrupt
1727 * handler is registered with the OS, the watchdog timer is started,
1728 * and the stack is notified that the interface is ready.
1730 static int ena_open(struct net_device
*netdev
)
1732 struct ena_adapter
*adapter
= netdev_priv(netdev
);
1735 /* Notify the stack of the actual queue counts. */
1736 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->num_queues
);
1738 netif_err(adapter
, ifup
, netdev
, "Can't set num tx queues\n");
1742 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->num_queues
);
1744 netif_err(adapter
, ifup
, netdev
, "Can't set num rx queues\n");
1748 rc
= ena_up(adapter
);
1755 /* ena_close - Disables a network interface
1756 * @netdev: network interface device structure
1758 * Returns 0, this is not allowed to fail
1760 * The close entry point is called when an interface is de-activated
1761 * by the OS. The hardware is still under the drivers control, but
1762 * needs to be disabled. A global MAC reset is issued to stop the
1763 * hardware, and all transmit and receive resources are freed.
1765 static int ena_close(struct net_device
*netdev
)
1767 struct ena_adapter
*adapter
= netdev_priv(netdev
);
1769 netif_dbg(adapter
, ifdown
, netdev
, "%s\n", __func__
);
1771 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
1777 static void ena_tx_csum(struct ena_com_tx_ctx
*ena_tx_ctx
, struct sk_buff
*skb
)
1779 u32 mss
= skb_shinfo(skb
)->gso_size
;
1780 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
1783 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) || mss
) {
1784 ena_tx_ctx
->l4_csum_enable
= 1;
1786 ena_tx_ctx
->tso_enable
= 1;
1787 ena_meta
->l4_hdr_len
= tcp_hdr(skb
)->doff
;
1788 ena_tx_ctx
->l4_csum_partial
= 0;
1790 ena_tx_ctx
->tso_enable
= 0;
1791 ena_meta
->l4_hdr_len
= 0;
1792 ena_tx_ctx
->l4_csum_partial
= 1;
1795 switch (ip_hdr(skb
)->version
) {
1797 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
1798 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
))
1801 ena_tx_ctx
->l3_csum_enable
= 1;
1802 l4_protocol
= ip_hdr(skb
)->protocol
;
1805 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
1806 l4_protocol
= ipv6_hdr(skb
)->nexthdr
;
1812 if (l4_protocol
== IPPROTO_TCP
)
1813 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
1815 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
1817 ena_meta
->mss
= mss
;
1818 ena_meta
->l3_hdr_len
= skb_network_header_len(skb
);
1819 ena_meta
->l3_hdr_offset
= skb_network_offset(skb
);
1820 ena_tx_ctx
->meta_valid
= 1;
1823 ena_tx_ctx
->meta_valid
= 0;
1827 static int ena_check_and_linearize_skb(struct ena_ring
*tx_ring
,
1828 struct sk_buff
*skb
)
1830 int num_frags
, header_len
, rc
;
1832 num_frags
= skb_shinfo(skb
)->nr_frags
;
1833 header_len
= skb_headlen(skb
);
1835 if (num_frags
< tx_ring
->sgl_size
)
1838 if ((num_frags
== tx_ring
->sgl_size
) &&
1839 (header_len
< tx_ring
->tx_max_header_size
))
1842 u64_stats_update_begin(&tx_ring
->syncp
);
1843 tx_ring
->tx_stats
.linearize
++;
1844 u64_stats_update_end(&tx_ring
->syncp
);
1846 rc
= skb_linearize(skb
);
1848 u64_stats_update_begin(&tx_ring
->syncp
);
1849 tx_ring
->tx_stats
.linearize_failed
++;
1850 u64_stats_update_end(&tx_ring
->syncp
);
1856 /* Called with netif_tx_lock. */
1857 static netdev_tx_t
ena_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1859 struct ena_adapter
*adapter
= netdev_priv(dev
);
1860 struct ena_tx_buffer
*tx_info
;
1861 struct ena_com_tx_ctx ena_tx_ctx
;
1862 struct ena_ring
*tx_ring
;
1863 struct netdev_queue
*txq
;
1864 struct ena_com_buf
*ena_buf
;
1872 int qid
, rc
, nb_hw_desc
;
1875 netif_dbg(adapter
, tx_queued
, dev
, "%s skb %p\n", __func__
, skb
);
1876 /* Determine which tx ring we will be placed on */
1877 qid
= skb_get_queue_mapping(skb
);
1878 tx_ring
= &adapter
->tx_ring
[qid
];
1879 txq
= netdev_get_tx_queue(dev
, qid
);
1881 rc
= ena_check_and_linearize_skb(tx_ring
, skb
);
1883 goto error_drop_packet
;
1885 skb_tx_timestamp(skb
);
1886 len
= skb_headlen(skb
);
1888 next_to_use
= tx_ring
->next_to_use
;
1889 req_id
= tx_ring
->free_tx_ids
[next_to_use
];
1890 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1891 tx_info
->num_of_bufs
= 0;
1893 WARN(tx_info
->skb
, "SKB isn't NULL req_id %d\n", req_id
);
1894 ena_buf
= tx_info
->bufs
;
1897 if (tx_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1898 /* prepared the push buffer */
1899 push_len
= min_t(u32
, len
, tx_ring
->tx_max_header_size
);
1900 header_len
= push_len
;
1901 push_hdr
= skb
->data
;
1904 header_len
= min_t(u32
, len
, tx_ring
->tx_max_header_size
);
1908 netif_dbg(adapter
, tx_queued
, dev
,
1909 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb
,
1910 push_hdr
, push_len
);
1912 if (len
> push_len
) {
1913 dma
= dma_map_single(tx_ring
->dev
, skb
->data
+ push_len
,
1914 len
- push_len
, DMA_TO_DEVICE
);
1915 if (dma_mapping_error(tx_ring
->dev
, dma
))
1916 goto error_report_dma_error
;
1918 ena_buf
->paddr
= dma
;
1919 ena_buf
->len
= len
- push_len
;
1922 tx_info
->num_of_bufs
++;
1925 last_frag
= skb_shinfo(skb
)->nr_frags
;
1927 for (i
= 0; i
< last_frag
; i
++) {
1928 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1930 len
= skb_frag_size(frag
);
1931 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, len
,
1933 if (dma_mapping_error(tx_ring
->dev
, dma
))
1934 goto error_report_dma_error
;
1936 ena_buf
->paddr
= dma
;
1941 tx_info
->num_of_bufs
+= last_frag
;
1943 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
1944 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
1945 ena_tx_ctx
.push_header
= push_hdr
;
1946 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
1947 ena_tx_ctx
.req_id
= req_id
;
1948 ena_tx_ctx
.header_len
= header_len
;
1950 /* set flags and meta data */
1951 ena_tx_csum(&ena_tx_ctx
, skb
);
1953 /* prepare the packet's descriptors to dma engine */
1954 rc
= ena_com_prepare_tx(tx_ring
->ena_com_io_sq
, &ena_tx_ctx
,
1958 netif_err(adapter
, tx_queued
, dev
,
1959 "failed to prepare tx bufs\n");
1960 u64_stats_update_begin(&tx_ring
->syncp
);
1961 tx_ring
->tx_stats
.queue_stop
++;
1962 tx_ring
->tx_stats
.prepare_ctx_err
++;
1963 u64_stats_update_end(&tx_ring
->syncp
);
1964 netif_tx_stop_queue(txq
);
1965 goto error_unmap_dma
;
1968 netdev_tx_sent_queue(txq
, skb
->len
);
1970 u64_stats_update_begin(&tx_ring
->syncp
);
1971 tx_ring
->tx_stats
.cnt
++;
1972 tx_ring
->tx_stats
.bytes
+= skb
->len
;
1973 u64_stats_update_end(&tx_ring
->syncp
);
1975 tx_info
->tx_descs
= nb_hw_desc
;
1976 tx_info
->last_jiffies
= jiffies
;
1978 tx_ring
->next_to_use
= ENA_TX_RING_IDX_NEXT(next_to_use
,
1979 tx_ring
->ring_size
);
1981 /* This WMB is aimed to:
1982 * 1 - perform smp barrier before reading next_to_completion
1983 * 2 - make sure the desc were written before trigger DB
1987 /* stop the queue when no more space available, the packet can have up
1988 * to sgl_size + 2. one for the meta descriptor and one for header
1989 * (if the header is larger than tx_max_header_size).
1991 if (unlikely(ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
) <
1992 (tx_ring
->sgl_size
+ 2))) {
1993 netif_dbg(adapter
, tx_queued
, dev
, "%s stop queue %d\n",
1996 netif_tx_stop_queue(txq
);
1997 u64_stats_update_begin(&tx_ring
->syncp
);
1998 tx_ring
->tx_stats
.queue_stop
++;
1999 u64_stats_update_end(&tx_ring
->syncp
);
2001 /* There is a rare condition where this function decide to
2002 * stop the queue but meanwhile clean_tx_irq updates
2003 * next_to_completion and terminates.
2004 * The queue will remain stopped forever.
2005 * To solve this issue this function perform rmb, check
2006 * the wakeup condition and wake up the queue if needed.
2010 if (ena_com_sq_empty_space(tx_ring
->ena_com_io_sq
)
2011 > ENA_TX_WAKEUP_THRESH
) {
2012 netif_tx_wake_queue(txq
);
2013 u64_stats_update_begin(&tx_ring
->syncp
);
2014 tx_ring
->tx_stats
.queue_wakeup
++;
2015 u64_stats_update_end(&tx_ring
->syncp
);
2019 if (netif_xmit_stopped(txq
) || !skb
->xmit_more
) {
2020 /* trigger the dma engine */
2021 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
2022 u64_stats_update_begin(&tx_ring
->syncp
);
2023 tx_ring
->tx_stats
.doorbells
++;
2024 u64_stats_update_end(&tx_ring
->syncp
);
2027 return NETDEV_TX_OK
;
2029 error_report_dma_error
:
2030 u64_stats_update_begin(&tx_ring
->syncp
);
2031 tx_ring
->tx_stats
.dma_mapping_err
++;
2032 u64_stats_update_end(&tx_ring
->syncp
);
2033 netdev_warn(adapter
->netdev
, "failed to map skb\n");
2035 tx_info
->skb
= NULL
;
2039 /* save value of frag that failed */
2042 /* start back at beginning and unmap skb */
2043 tx_info
->skb
= NULL
;
2044 ena_buf
= tx_info
->bufs
;
2045 dma_unmap_single(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
2046 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
2048 /* unmap remaining mapped pages */
2049 for (i
= 0; i
< last_frag
; i
++) {
2051 dma_unmap_page(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
2052 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
2059 return NETDEV_TX_OK
;
2062 #ifdef CONFIG_NET_POLL_CONTROLLER
2063 static void ena_netpoll(struct net_device
*netdev
)
2065 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2068 for (i
= 0; i
< adapter
->num_queues
; i
++)
2069 napi_schedule(&adapter
->ena_napi
[i
].napi
);
2071 #endif /* CONFIG_NET_POLL_CONTROLLER */
2073 static u16
ena_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
2074 void *accel_priv
, select_queue_fallback_t fallback
)
2077 /* we suspect that this is good for in--kernel network services that
2078 * want to loop incoming skb rx to tx in normal user generated traffic,
2079 * most probably we will not get to this
2081 if (skb_rx_queue_recorded(skb
))
2082 qid
= skb_get_rx_queue(skb
);
2084 qid
= fallback(dev
, skb
);
2089 static void ena_config_host_info(struct ena_com_dev
*ena_dev
)
2091 struct ena_admin_host_info
*host_info
;
2094 /* Allocate only the host info */
2095 rc
= ena_com_allocate_host_info(ena_dev
);
2097 pr_err("Cannot allocate host info\n");
2101 host_info
= ena_dev
->host_attr
.host_info
;
2103 host_info
->os_type
= ENA_ADMIN_OS_LINUX
;
2104 host_info
->kernel_ver
= LINUX_VERSION_CODE
;
2105 strncpy(host_info
->kernel_ver_str
, utsname()->version
,
2106 sizeof(host_info
->kernel_ver_str
) - 1);
2107 host_info
->os_dist
= 0;
2108 strncpy(host_info
->os_dist_str
, utsname()->release
,
2109 sizeof(host_info
->os_dist_str
) - 1);
2110 host_info
->driver_version
=
2111 (DRV_MODULE_VER_MAJOR
) |
2112 (DRV_MODULE_VER_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
2113 (DRV_MODULE_VER_SUBMINOR
<< ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
);
2115 rc
= ena_com_set_host_attributes(ena_dev
);
2118 pr_warn("Cannot set host attributes\n");
2120 pr_err("Cannot set host attributes\n");
2128 ena_com_delete_host_info(ena_dev
);
2131 static void ena_config_debug_area(struct ena_adapter
*adapter
)
2133 u32 debug_area_size
;
2136 ss_count
= ena_get_sset_count(adapter
->netdev
, ETH_SS_STATS
);
2137 if (ss_count
<= 0) {
2138 netif_err(adapter
, drv
, adapter
->netdev
,
2139 "SS count is negative\n");
2143 /* allocate 32 bytes for each string and 64bit for the value */
2144 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
2146 rc
= ena_com_allocate_debug_area(adapter
->ena_dev
, debug_area_size
);
2148 pr_err("Cannot allocate debug area\n");
2152 rc
= ena_com_set_host_attributes(adapter
->ena_dev
);
2155 netif_warn(adapter
, drv
, adapter
->netdev
,
2156 "Cannot set host attributes\n");
2158 netif_err(adapter
, drv
, adapter
->netdev
,
2159 "Cannot set host attributes\n");
2165 ena_com_delete_debug_area(adapter
->ena_dev
);
2168 static struct rtnl_link_stats64
*ena_get_stats64(struct net_device
*netdev
,
2169 struct rtnl_link_stats64
*stats
)
2171 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2172 struct ena_admin_basic_stats ena_stats
;
2175 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2178 rc
= ena_com_get_dev_basic_stats(adapter
->ena_dev
, &ena_stats
);
2182 stats
->tx_bytes
= ((u64
)ena_stats
.tx_bytes_high
<< 32) |
2183 ena_stats
.tx_bytes_low
;
2184 stats
->rx_bytes
= ((u64
)ena_stats
.rx_bytes_high
<< 32) |
2185 ena_stats
.rx_bytes_low
;
2187 stats
->rx_packets
= ((u64
)ena_stats
.rx_pkts_high
<< 32) |
2188 ena_stats
.rx_pkts_low
;
2189 stats
->tx_packets
= ((u64
)ena_stats
.tx_pkts_high
<< 32) |
2190 ena_stats
.tx_pkts_low
;
2192 stats
->rx_dropped
= ((u64
)ena_stats
.rx_drops_high
<< 32) |
2193 ena_stats
.rx_drops_low
;
2195 stats
->multicast
= 0;
2196 stats
->collisions
= 0;
2198 stats
->rx_length_errors
= 0;
2199 stats
->rx_crc_errors
= 0;
2200 stats
->rx_frame_errors
= 0;
2201 stats
->rx_fifo_errors
= 0;
2202 stats
->rx_missed_errors
= 0;
2203 stats
->tx_window_errors
= 0;
2205 stats
->rx_errors
= 0;
2206 stats
->tx_errors
= 0;
2211 static const struct net_device_ops ena_netdev_ops
= {
2212 .ndo_open
= ena_open
,
2213 .ndo_stop
= ena_close
,
2214 .ndo_start_xmit
= ena_start_xmit
,
2215 .ndo_select_queue
= ena_select_queue
,
2216 .ndo_get_stats64
= ena_get_stats64
,
2217 .ndo_tx_timeout
= ena_tx_timeout
,
2218 .ndo_change_mtu
= ena_change_mtu
,
2219 .ndo_set_mac_address
= NULL
,
2220 .ndo_validate_addr
= eth_validate_addr
,
2221 #ifdef CONFIG_NET_POLL_CONTROLLER
2222 .ndo_poll_controller
= ena_netpoll
,
2223 #endif /* CONFIG_NET_POLL_CONTROLLER */
2226 static void ena_device_io_suspend(struct work_struct
*work
)
2228 struct ena_adapter
*adapter
=
2229 container_of(work
, struct ena_adapter
, suspend_io_task
);
2230 struct net_device
*netdev
= adapter
->netdev
;
2232 /* ena_napi_disable_all disables only the IO handling.
2233 * We are still subject to AENQ keep alive watchdog.
2235 u64_stats_update_begin(&adapter
->syncp
);
2236 adapter
->dev_stats
.io_suspend
++;
2237 u64_stats_update_begin(&adapter
->syncp
);
2238 ena_napi_disable_all(adapter
);
2239 netif_tx_lock(netdev
);
2240 netif_device_detach(netdev
);
2241 netif_tx_unlock(netdev
);
2244 static void ena_device_io_resume(struct work_struct
*work
)
2246 struct ena_adapter
*adapter
=
2247 container_of(work
, struct ena_adapter
, resume_io_task
);
2248 struct net_device
*netdev
= adapter
->netdev
;
2250 u64_stats_update_begin(&adapter
->syncp
);
2251 adapter
->dev_stats
.io_resume
++;
2252 u64_stats_update_end(&adapter
->syncp
);
2254 netif_device_attach(netdev
);
2255 ena_napi_enable_all(adapter
);
2258 static int ena_device_validate_params(struct ena_adapter
*adapter
,
2259 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2261 struct net_device
*netdev
= adapter
->netdev
;
2264 rc
= ether_addr_equal(get_feat_ctx
->dev_attr
.mac_addr
,
2267 netif_err(adapter
, drv
, netdev
,
2268 "Error, mac address are different\n");
2272 if ((get_feat_ctx
->max_queues
.max_cq_num
< adapter
->num_queues
) ||
2273 (get_feat_ctx
->max_queues
.max_sq_num
< adapter
->num_queues
)) {
2274 netif_err(adapter
, drv
, netdev
,
2275 "Error, device doesn't support enough queues\n");
2279 if (get_feat_ctx
->dev_attr
.max_mtu
< netdev
->mtu
) {
2280 netif_err(adapter
, drv
, netdev
,
2281 "Error, device max mtu is smaller than netdev MTU\n");
2288 static int ena_device_init(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
,
2289 struct ena_com_dev_get_features_ctx
*get_feat_ctx
,
2292 struct device
*dev
= &pdev
->dev
;
2293 bool readless_supported
;
2298 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
2300 dev_err(dev
, "failed to init mmio read less\n");
2304 /* The PCIe configuration space revision id indicate if mmio reg
2307 readless_supported
= !(pdev
->revision
& ENA_MMIO_DISABLE_REG_READ
);
2308 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
2310 rc
= ena_com_dev_reset(ena_dev
);
2312 dev_err(dev
, "Can not reset device\n");
2313 goto err_mmio_read_less
;
2316 rc
= ena_com_validate_version(ena_dev
);
2318 dev_err(dev
, "device version is too low\n");
2319 goto err_mmio_read_less
;
2322 dma_width
= ena_com_get_dma_width(ena_dev
);
2323 if (dma_width
< 0) {
2324 dev_err(dev
, "Invalid dma width value %d", dma_width
);
2326 goto err_mmio_read_less
;
2329 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2331 dev_err(dev
, "pci_set_dma_mask failed 0x%x\n", rc
);
2332 goto err_mmio_read_less
;
2335 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
2337 dev_err(dev
, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2339 goto err_mmio_read_less
;
2342 /* ENA admin level init */
2343 rc
= ena_com_admin_init(ena_dev
, &aenq_handlers
, true);
2346 "Can not initialize ena admin queue with device\n");
2347 goto err_mmio_read_less
;
2350 /* To enable the msix interrupts the driver needs to know the number
2351 * of queues. So the driver uses polling mode to retrieve this
2354 ena_com_set_admin_polling_mode(ena_dev
, true);
2356 /* Get Device Attributes*/
2357 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
2359 dev_err(dev
, "Cannot get attribute for ena device rc=%d\n", rc
);
2360 goto err_admin_init
;
2363 /* Try to turn all the available aenq groups */
2364 aenq_groups
= BIT(ENA_ADMIN_LINK_CHANGE
) |
2365 BIT(ENA_ADMIN_FATAL_ERROR
) |
2366 BIT(ENA_ADMIN_WARNING
) |
2367 BIT(ENA_ADMIN_NOTIFICATION
) |
2368 BIT(ENA_ADMIN_KEEP_ALIVE
);
2370 aenq_groups
&= get_feat_ctx
->aenq
.supported_groups
;
2372 rc
= ena_com_set_aenq_config(ena_dev
, aenq_groups
);
2374 dev_err(dev
, "Cannot configure aenq groups rc= %d\n", rc
);
2375 goto err_admin_init
;
2378 *wd_state
= !!(aenq_groups
& BIT(ENA_ADMIN_KEEP_ALIVE
));
2380 ena_config_host_info(ena_dev
);
2385 ena_com_admin_destroy(ena_dev
);
2387 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2392 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter
*adapter
,
2395 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2396 struct device
*dev
= &adapter
->pdev
->dev
;
2399 rc
= ena_enable_msix(adapter
, io_vectors
);
2401 dev_err(dev
, "Can not reserve msix vectors\n");
2405 ena_setup_mgmnt_intr(adapter
);
2407 rc
= ena_request_mgmnt_irq(adapter
);
2409 dev_err(dev
, "Can not setup management interrupts\n");
2410 goto err_disable_msix
;
2413 ena_com_set_admin_polling_mode(ena_dev
, false);
2415 ena_com_admin_aenq_enable(ena_dev
);
2420 ena_disable_msix(adapter
);
2425 static void ena_fw_reset_device(struct work_struct
*work
)
2427 struct ena_com_dev_get_features_ctx get_feat_ctx
;
2428 struct ena_adapter
*adapter
=
2429 container_of(work
, struct ena_adapter
, reset_task
);
2430 struct net_device
*netdev
= adapter
->netdev
;
2431 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2432 struct pci_dev
*pdev
= adapter
->pdev
;
2433 bool dev_up
, wd_state
;
2436 del_timer_sync(&adapter
->timer_service
);
2440 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2441 ena_com_set_admin_running_state(ena_dev
, false);
2443 /* After calling ena_close the tx queues and the napi
2444 * are disabled so no one can interfere or touch the
2449 rc
= ena_com_dev_reset(ena_dev
);
2451 dev_err(&pdev
->dev
, "Device reset failed\n");
2455 ena_free_mgmnt_irq(adapter
);
2457 ena_disable_msix(adapter
);
2459 ena_com_abort_admin_commands(ena_dev
);
2461 ena_com_wait_for_abort_completion(ena_dev
);
2463 ena_com_admin_destroy(ena_dev
);
2465 ena_com_mmio_reg_read_request_destroy(ena_dev
);
2467 /* Finish with the destroy part. Start the init part */
2469 rc
= ena_device_init(ena_dev
, adapter
->pdev
, &get_feat_ctx
, &wd_state
);
2471 dev_err(&pdev
->dev
, "Can not initialize device\n");
2474 adapter
->wd_state
= wd_state
;
2476 rc
= ena_device_validate_params(adapter
, &get_feat_ctx
);
2478 dev_err(&pdev
->dev
, "Validation of device parameters failed\n");
2479 goto err_device_destroy
;
2482 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
,
2483 adapter
->num_queues
);
2485 dev_err(&pdev
->dev
, "Enable MSI-X failed\n");
2486 goto err_device_destroy
;
2488 /* If the interface was up before the reset bring it up */
2490 rc
= ena_up(adapter
);
2492 dev_err(&pdev
->dev
, "Failed to create I/O queues\n");
2493 goto err_disable_msix
;
2497 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
2501 dev_err(&pdev
->dev
, "Device reset completed successfully\n");
2505 ena_free_mgmnt_irq(adapter
);
2506 ena_disable_msix(adapter
);
2508 ena_com_admin_destroy(ena_dev
);
2513 "Reset attempt failed. Can not reset the device\n");
2516 static void check_for_missing_tx_completions(struct ena_adapter
*adapter
)
2518 struct ena_tx_buffer
*tx_buf
;
2519 unsigned long last_jiffies
;
2520 struct ena_ring
*tx_ring
;
2524 /* Make sure the driver doesn't turn the device in other process */
2527 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2530 budget
= ENA_MONITORED_TX_QUEUES
;
2532 for (i
= adapter
->last_monitored_tx_qid
; i
< adapter
->num_queues
; i
++) {
2533 tx_ring
= &adapter
->tx_ring
[i
];
2535 for (j
= 0; j
< tx_ring
->ring_size
; j
++) {
2536 tx_buf
= &tx_ring
->tx_buffer_info
[j
];
2537 last_jiffies
= tx_buf
->last_jiffies
;
2538 if (unlikely(last_jiffies
&& time_is_before_jiffies(last_jiffies
+ TX_TIMEOUT
))) {
2539 netif_notice(adapter
, tx_err
, adapter
->netdev
,
2540 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2543 u64_stats_update_begin(&tx_ring
->syncp
);
2544 missed_tx
= tx_ring
->tx_stats
.missing_tx_comp
++;
2545 u64_stats_update_end(&tx_ring
->syncp
);
2547 /* Clear last jiffies so the lost buffer won't
2550 tx_buf
->last_jiffies
= 0;
2552 if (unlikely(missed_tx
> MAX_NUM_OF_TIMEOUTED_PACKETS
)) {
2553 netif_err(adapter
, tx_err
, adapter
->netdev
,
2554 "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
2555 missed_tx
, MAX_NUM_OF_TIMEOUTED_PACKETS
);
2556 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2566 adapter
->last_monitored_tx_qid
= i
% adapter
->num_queues
;
2569 /* Check for keep alive expiration */
2570 static void check_for_missing_keep_alive(struct ena_adapter
*adapter
)
2572 unsigned long keep_alive_expired
;
2574 if (!adapter
->wd_state
)
2577 keep_alive_expired
= round_jiffies(adapter
->last_keep_alive_jiffies
2578 + ENA_DEVICE_KALIVE_TIMEOUT
);
2579 if (unlikely(time_is_before_jiffies(keep_alive_expired
))) {
2580 netif_err(adapter
, drv
, adapter
->netdev
,
2581 "Keep alive watchdog timeout.\n");
2582 u64_stats_update_begin(&adapter
->syncp
);
2583 adapter
->dev_stats
.wd_expired
++;
2584 u64_stats_update_end(&adapter
->syncp
);
2585 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2589 static void check_for_admin_com_state(struct ena_adapter
*adapter
)
2591 if (unlikely(!ena_com_get_admin_running_state(adapter
->ena_dev
))) {
2592 netif_err(adapter
, drv
, adapter
->netdev
,
2593 "ENA admin queue is not in running state!\n");
2594 u64_stats_update_begin(&adapter
->syncp
);
2595 adapter
->dev_stats
.admin_q_pause
++;
2596 u64_stats_update_end(&adapter
->syncp
);
2597 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
2601 static void ena_update_host_info(struct ena_admin_host_info
*host_info
,
2602 struct net_device
*netdev
)
2604 host_info
->supported_network_features
[0] =
2605 netdev
->features
& GENMASK_ULL(31, 0);
2606 host_info
->supported_network_features
[1] =
2607 (netdev
->features
& GENMASK_ULL(63, 32)) >> 32;
2610 static void ena_timer_service(unsigned long data
)
2612 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
2613 u8
*debug_area
= adapter
->ena_dev
->host_attr
.debug_area_virt_addr
;
2614 struct ena_admin_host_info
*host_info
=
2615 adapter
->ena_dev
->host_attr
.host_info
;
2617 check_for_missing_keep_alive(adapter
);
2619 check_for_admin_com_state(adapter
);
2621 check_for_missing_tx_completions(adapter
);
2624 ena_dump_stats_to_buf(adapter
, debug_area
);
2627 ena_update_host_info(host_info
, adapter
->netdev
);
2629 if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2630 netif_err(adapter
, drv
, adapter
->netdev
,
2631 "Trigger reset is on\n");
2632 ena_dump_stats_to_dmesg(adapter
);
2633 queue_work(ena_wq
, &adapter
->reset_task
);
2637 /* Reset the timer */
2638 mod_timer(&adapter
->timer_service
, jiffies
+ HZ
);
2641 static int ena_calc_io_queue_num(struct pci_dev
*pdev
,
2642 struct ena_com_dev
*ena_dev
,
2643 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2645 int io_sq_num
, io_queue_num
;
2647 /* In case of LLQ use the llq number in the get feature cmd */
2648 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2649 io_sq_num
= get_feat_ctx
->max_queues
.max_llq_num
;
2651 if (io_sq_num
== 0) {
2653 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2655 ena_dev
->tx_mem_queue_type
=
2656 ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2657 io_sq_num
= get_feat_ctx
->max_queues
.max_sq_num
;
2660 io_sq_num
= get_feat_ctx
->max_queues
.max_sq_num
;
2663 io_queue_num
= min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES
);
2664 io_queue_num
= min_t(int, io_queue_num
, io_sq_num
);
2665 io_queue_num
= min_t(int, io_queue_num
,
2666 get_feat_ctx
->max_queues
.max_cq_num
);
2667 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2668 io_queue_num
= min_t(int, io_queue_num
, pci_msix_vec_count(pdev
) - 1);
2669 if (unlikely(!io_queue_num
)) {
2670 dev_err(&pdev
->dev
, "The device doesn't have io queues\n");
2674 return io_queue_num
;
2677 static void ena_set_push_mode(struct pci_dev
*pdev
, struct ena_com_dev
*ena_dev
,
2678 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2682 has_mem_bar
= pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(ENA_MEM_BAR
);
2684 /* Enable push mode if device supports LLQ */
2685 if (has_mem_bar
&& (get_feat_ctx
->max_queues
.max_llq_num
> 0))
2686 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_DEV
;
2688 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2691 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx
*feat
,
2692 struct net_device
*netdev
)
2694 netdev_features_t dev_features
= 0;
2696 /* Set offload features */
2697 if (feat
->offload
.tx
&
2698 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
2699 dev_features
|= NETIF_F_IP_CSUM
;
2701 if (feat
->offload
.tx
&
2702 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK
)
2703 dev_features
|= NETIF_F_IPV6_CSUM
;
2705 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
2706 dev_features
|= NETIF_F_TSO
;
2708 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK
)
2709 dev_features
|= NETIF_F_TSO6
;
2711 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK
)
2712 dev_features
|= NETIF_F_TSO_ECN
;
2714 if (feat
->offload
.rx_supported
&
2715 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
2716 dev_features
|= NETIF_F_RXCSUM
;
2718 if (feat
->offload
.rx_supported
&
2719 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK
)
2720 dev_features
|= NETIF_F_RXCSUM
;
2729 netdev
->hw_features
|= netdev
->features
;
2730 netdev
->vlan_features
|= netdev
->features
;
2733 static void ena_set_conf_feat_params(struct ena_adapter
*adapter
,
2734 struct ena_com_dev_get_features_ctx
*feat
)
2736 struct net_device
*netdev
= adapter
->netdev
;
2738 /* Copy mac address */
2739 if (!is_valid_ether_addr(feat
->dev_attr
.mac_addr
)) {
2740 eth_hw_addr_random(netdev
);
2741 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
2743 ether_addr_copy(adapter
->mac_addr
, feat
->dev_attr
.mac_addr
);
2744 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
2747 /* Set offload features */
2748 ena_set_dev_offloads(feat
, netdev
);
2750 adapter
->max_mtu
= feat
->dev_attr
.max_mtu
;
2751 netdev
->max_mtu
= adapter
->max_mtu
;
2752 netdev
->min_mtu
= ENA_MIN_MTU
;
2755 static int ena_rss_init_default(struct ena_adapter
*adapter
)
2757 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2758 struct device
*dev
= &adapter
->pdev
->dev
;
2762 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
2764 dev_err(dev
, "Cannot init indirect table\n");
2768 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
2769 val
= ethtool_rxfh_indir_default(i
, adapter
->num_queues
);
2770 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
2771 ENA_IO_RXQ_IDX(val
));
2772 if (unlikely(rc
&& (rc
!= -EPERM
))) {
2773 dev_err(dev
, "Cannot fill indirect table\n");
2774 goto err_fill_indir
;
2778 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_CRC32
, NULL
,
2779 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
2780 if (unlikely(rc
&& (rc
!= -EPERM
))) {
2781 dev_err(dev
, "Cannot fill hash function\n");
2782 goto err_fill_indir
;
2785 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
2786 if (unlikely(rc
&& (rc
!= -EPERM
))) {
2787 dev_err(dev
, "Cannot fill hash control\n");
2788 goto err_fill_indir
;
2794 ena_com_rss_destroy(ena_dev
);
2800 static void ena_release_bars(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
2804 release_bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
2805 pci_release_selected_regions(pdev
, release_bars
);
2808 static int ena_calc_queue_size(struct pci_dev
*pdev
,
2809 struct ena_com_dev
*ena_dev
,
2810 u16
*max_tx_sgl_size
,
2811 u16
*max_rx_sgl_size
,
2812 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
2814 u32 queue_size
= ENA_DEFAULT_RING_SIZE
;
2816 queue_size
= min_t(u32
, queue_size
,
2817 get_feat_ctx
->max_queues
.max_cq_depth
);
2818 queue_size
= min_t(u32
, queue_size
,
2819 get_feat_ctx
->max_queues
.max_sq_depth
);
2821 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
2822 queue_size
= min_t(u32
, queue_size
,
2823 get_feat_ctx
->max_queues
.max_llq_depth
);
2825 queue_size
= rounddown_pow_of_two(queue_size
);
2827 if (unlikely(!queue_size
)) {
2828 dev_err(&pdev
->dev
, "Invalid queue size\n");
2832 *max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
2833 get_feat_ctx
->max_queues
.max_packet_tx_descs
);
2834 *max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
2835 get_feat_ctx
->max_queues
.max_packet_rx_descs
);
2840 /* ena_probe - Device Initialization Routine
2841 * @pdev: PCI device information struct
2842 * @ent: entry in ena_pci_tbl
2844 * Returns 0 on success, negative on failure
2846 * ena_probe initializes an adapter identified by a pci_dev structure.
2847 * The OS initialization, configuring of the adapter private structure,
2848 * and a hardware reset occur.
2850 static int ena_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2852 struct ena_com_dev_get_features_ctx get_feat_ctx
;
2853 static int version_printed
;
2854 struct net_device
*netdev
;
2855 struct ena_adapter
*adapter
;
2856 struct ena_com_dev
*ena_dev
= NULL
;
2857 static int adapters_found
;
2858 int io_queue_num
, bars
, rc
;
2860 u16 tx_sgl_size
= 0;
2861 u16 rx_sgl_size
= 0;
2864 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
2866 if (version_printed
++ == 0)
2867 dev_info(&pdev
->dev
, "%s", version
);
2869 rc
= pci_enable_device_mem(pdev
);
2871 dev_err(&pdev
->dev
, "pci_enable_device_mem() failed!\n");
2875 pci_set_master(pdev
);
2877 ena_dev
= vzalloc(sizeof(*ena_dev
));
2880 goto err_disable_device
;
2883 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
2884 rc
= pci_request_selected_regions(pdev
, bars
, DRV_MODULE_NAME
);
2886 dev_err(&pdev
->dev
, "pci_request_selected_regions failed %d\n",
2888 goto err_free_ena_dev
;
2891 ena_dev
->reg_bar
= ioremap(pci_resource_start(pdev
, ENA_REG_BAR
),
2892 pci_resource_len(pdev
, ENA_REG_BAR
));
2893 if (!ena_dev
->reg_bar
) {
2894 dev_err(&pdev
->dev
, "failed to remap regs bar\n");
2896 goto err_free_region
;
2899 ena_dev
->dmadev
= &pdev
->dev
;
2901 rc
= ena_device_init(ena_dev
, pdev
, &get_feat_ctx
, &wd_state
);
2903 dev_err(&pdev
->dev
, "ena device init failed\n");
2906 goto err_free_region
;
2909 ena_set_push_mode(pdev
, ena_dev
, &get_feat_ctx
);
2911 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2912 ena_dev
->mem_bar
= ioremap_wc(pci_resource_start(pdev
, ENA_MEM_BAR
),
2913 pci_resource_len(pdev
, ENA_MEM_BAR
));
2914 if (!ena_dev
->mem_bar
) {
2916 goto err_device_destroy
;
2920 /* initial Tx interrupt delay, Assumes 1 usec granularity.
2921 * Updated during device initialization with the real granularity
2923 ena_dev
->intr_moder_tx_interval
= ENA_INTR_INITIAL_TX_INTERVAL_USECS
;
2924 io_queue_num
= ena_calc_io_queue_num(pdev
, ena_dev
, &get_feat_ctx
);
2925 queue_size
= ena_calc_queue_size(pdev
, ena_dev
, &tx_sgl_size
,
2926 &rx_sgl_size
, &get_feat_ctx
);
2927 if ((queue_size
<= 0) || (io_queue_num
<= 0)) {
2929 goto err_device_destroy
;
2932 dev_info(&pdev
->dev
, "creating %d io queues. queue size: %d\n",
2933 io_queue_num
, queue_size
);
2935 /* dev zeroed in init_etherdev */
2936 netdev
= alloc_etherdev_mq(sizeof(struct ena_adapter
), io_queue_num
);
2938 dev_err(&pdev
->dev
, "alloc_etherdev_mq failed\n");
2940 goto err_device_destroy
;
2943 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2945 adapter
= netdev_priv(netdev
);
2946 pci_set_drvdata(pdev
, adapter
);
2948 adapter
->ena_dev
= ena_dev
;
2949 adapter
->netdev
= netdev
;
2950 adapter
->pdev
= pdev
;
2952 ena_set_conf_feat_params(adapter
, &get_feat_ctx
);
2954 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
2956 adapter
->tx_ring_size
= queue_size
;
2957 adapter
->rx_ring_size
= queue_size
;
2959 adapter
->max_tx_sgl_size
= tx_sgl_size
;
2960 adapter
->max_rx_sgl_size
= rx_sgl_size
;
2962 adapter
->num_queues
= io_queue_num
;
2963 adapter
->last_monitored_tx_qid
= 0;
2965 adapter
->rx_copybreak
= ENA_DEFAULT_RX_COPYBREAK
;
2966 adapter
->wd_state
= wd_state
;
2968 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d", adapters_found
);
2970 rc
= ena_com_init_interrupt_moderation(adapter
->ena_dev
);
2973 "Failed to query interrupt moderation feature\n");
2974 goto err_netdev_destroy
;
2976 ena_init_io_rings(adapter
);
2978 netdev
->netdev_ops
= &ena_netdev_ops
;
2979 netdev
->watchdog_timeo
= TX_TIMEOUT
;
2980 ena_set_ethtool_ops(netdev
);
2982 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2984 u64_stats_init(&adapter
->syncp
);
2986 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
, io_queue_num
);
2989 "Failed to enable and set the admin interrupts\n");
2990 goto err_worker_destroy
;
2992 rc
= ena_rss_init_default(adapter
);
2993 if (rc
&& (rc
!= -EPERM
)) {
2994 dev_err(&pdev
->dev
, "Cannot init RSS rc: %d\n", rc
);
2998 ena_config_debug_area(adapter
);
3000 memcpy(adapter
->netdev
->perm_addr
, adapter
->mac_addr
, netdev
->addr_len
);
3002 netif_carrier_off(netdev
);
3004 rc
= register_netdev(netdev
);
3006 dev_err(&pdev
->dev
, "Cannot register net device\n");
3010 INIT_WORK(&adapter
->suspend_io_task
, ena_device_io_suspend
);
3011 INIT_WORK(&adapter
->resume_io_task
, ena_device_io_resume
);
3012 INIT_WORK(&adapter
->reset_task
, ena_fw_reset_device
);
3014 adapter
->last_keep_alive_jiffies
= jiffies
;
3016 setup_timer(&adapter
->timer_service
, ena_timer_service
,
3017 (unsigned long)adapter
);
3018 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3020 dev_info(&pdev
->dev
, "%s found at mem %lx, mac addr %pM Queues %d\n",
3021 DEVICE_NAME
, (long)pci_resource_start(pdev
, 0),
3022 netdev
->dev_addr
, io_queue_num
);
3024 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3031 ena_com_delete_debug_area(ena_dev
);
3032 ena_com_rss_destroy(ena_dev
);
3034 ena_com_dev_reset(ena_dev
);
3035 ena_free_mgmnt_irq(adapter
);
3036 ena_disable_msix(adapter
);
3038 ena_com_destroy_interrupt_moderation(ena_dev
);
3039 del_timer(&adapter
->timer_service
);
3040 cancel_work_sync(&adapter
->suspend_io_task
);
3041 cancel_work_sync(&adapter
->resume_io_task
);
3043 free_netdev(netdev
);
3045 ena_com_delete_host_info(ena_dev
);
3046 ena_com_admin_destroy(ena_dev
);
3048 ena_release_bars(ena_dev
, pdev
);
3052 pci_disable_device(pdev
);
3056 /*****************************************************************************/
3057 static int ena_sriov_configure(struct pci_dev
*dev
, int numvfs
)
3062 rc
= pci_enable_sriov(dev
, numvfs
);
3065 "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
3074 pci_disable_sriov(dev
);
3081 /*****************************************************************************/
3082 /*****************************************************************************/
3084 /* ena_remove - Device Removal Routine
3085 * @pdev: PCI device information struct
3087 * ena_remove is called by the PCI subsystem to alert the driver
3088 * that it should release a PCI device.
3090 static void ena_remove(struct pci_dev
*pdev
)
3092 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
3093 struct ena_com_dev
*ena_dev
;
3094 struct net_device
*netdev
;
3097 /* This device didn't load properly and it's resources
3098 * already released, nothing to do
3102 ena_dev
= adapter
->ena_dev
;
3103 netdev
= adapter
->netdev
;
3105 #ifdef CONFIG_RFS_ACCEL
3106 if ((adapter
->msix_vecs
>= 1) && (netdev
->rx_cpu_rmap
)) {
3107 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
3108 netdev
->rx_cpu_rmap
= NULL
;
3110 #endif /* CONFIG_RFS_ACCEL */
3112 unregister_netdev(netdev
);
3113 del_timer_sync(&adapter
->timer_service
);
3115 cancel_work_sync(&adapter
->reset_task
);
3117 cancel_work_sync(&adapter
->suspend_io_task
);
3119 cancel_work_sync(&adapter
->resume_io_task
);
3121 ena_com_dev_reset(ena_dev
);
3123 ena_free_mgmnt_irq(adapter
);
3125 ena_disable_msix(adapter
);
3127 free_netdev(netdev
);
3129 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3131 ena_com_abort_admin_commands(ena_dev
);
3133 ena_com_wait_for_abort_completion(ena_dev
);
3135 ena_com_admin_destroy(ena_dev
);
3137 ena_com_rss_destroy(ena_dev
);
3139 ena_com_delete_debug_area(ena_dev
);
3141 ena_com_delete_host_info(ena_dev
);
3143 ena_release_bars(ena_dev
, pdev
);
3145 pci_disable_device(pdev
);
3147 ena_com_destroy_interrupt_moderation(ena_dev
);
3152 static struct pci_driver ena_pci_driver
= {
3153 .name
= DRV_MODULE_NAME
,
3154 .id_table
= ena_pci_tbl
,
3156 .remove
= ena_remove
,
3157 .sriov_configure
= ena_sriov_configure
,
3160 static int __init
ena_init(void)
3162 pr_info("%s", version
);
3164 ena_wq
= create_singlethread_workqueue(DRV_MODULE_NAME
);
3166 pr_err("Failed to create workqueue\n");
3170 return pci_register_driver(&ena_pci_driver
);
3173 static void __exit
ena_cleanup(void)
3175 pci_unregister_driver(&ena_pci_driver
);
3178 destroy_workqueue(ena_wq
);
3183 /******************************************************************************
3184 ******************************** AENQ Handlers *******************************
3185 *****************************************************************************/
3186 /* ena_update_on_link_change:
3187 * Notify the network interface about the change in link status
3189 static void ena_update_on_link_change(void *adapter_data
,
3190 struct ena_admin_aenq_entry
*aenq_e
)
3192 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3193 struct ena_admin_aenq_link_change_desc
*aenq_desc
=
3194 (struct ena_admin_aenq_link_change_desc
*)aenq_e
;
3195 int status
= aenq_desc
->flags
&
3196 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK
;
3199 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
3200 set_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3201 netif_carrier_on(adapter
->netdev
);
3203 clear_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
3204 netif_carrier_off(adapter
->netdev
);
3208 static void ena_keep_alive_wd(void *adapter_data
,
3209 struct ena_admin_aenq_entry
*aenq_e
)
3211 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3213 adapter
->last_keep_alive_jiffies
= jiffies
;
3216 static void ena_notification(void *adapter_data
,
3217 struct ena_admin_aenq_entry
*aenq_e
)
3219 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
3221 WARN(aenq_e
->aenq_common_desc
.group
!= ENA_ADMIN_NOTIFICATION
,
3222 "Invalid group(%x) expected %x\n",
3223 aenq_e
->aenq_common_desc
.group
,
3224 ENA_ADMIN_NOTIFICATION
);
3226 switch (aenq_e
->aenq_common_desc
.syndrom
) {
3227 case ENA_ADMIN_SUSPEND
:
3228 /* Suspend just the IO queues.
3229 * We deliberately don't suspend admin so the timer and
3230 * the keep_alive events should remain.
3232 queue_work(ena_wq
, &adapter
->suspend_io_task
);
3234 case ENA_ADMIN_RESUME
:
3235 queue_work(ena_wq
, &adapter
->resume_io_task
);
3238 netif_err(adapter
, drv
, adapter
->netdev
,
3239 "Invalid aenq notification link state %d\n",
3240 aenq_e
->aenq_common_desc
.syndrom
);
3244 /* This handler will called for unknown event group or unimplemented handlers*/
3245 static void unimplemented_aenq_handler(void *data
,
3246 struct ena_admin_aenq_entry
*aenq_e
)
3248 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
3250 netif_err(adapter
, drv
, adapter
->netdev
,
3251 "Unknown event was received or event with unimplemented handler\n");
3254 static struct ena_aenq_handlers aenq_handlers
= {
3256 [ENA_ADMIN_LINK_CHANGE
] = ena_update_on_link_change
,
3257 [ENA_ADMIN_NOTIFICATION
] = ena_notification
,
3258 [ENA_ADMIN_KEEP_ALIVE
] = ena_keep_alive_wd
,
3260 .unimplemented_handler
= unimplemented_aenq_handler
3263 module_init(ena_init
);
3264 module_exit(ena_cleanup
);