4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ether.h>
35 #include <rte_ethdev.h>
37 #include <rte_atomic.h>
39 #include <rte_errno.h>
40 #include <rte_version.h>
41 #include <rte_eal_memconfig.h>
43 #include "ena_ethdev.h"
45 #include "ena_platform.h"
47 #include "ena_eth_com.h"
49 #include <ena_common_defs.h>
50 #include <ena_regs_defs.h>
51 #include <ena_admin_defs.h>
52 #include <ena_eth_io_defs.h>
54 #define DRV_MODULE_VER_MAJOR 1
55 #define DRV_MODULE_VER_MINOR 0
56 #define DRV_MODULE_VER_SUBMINOR 0
58 #define ENA_IO_TXQ_IDX(q) (2 * (q))
59 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
60 /*reverse version of ENA_IO_RXQ_IDX*/
61 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2)
63 /* While processing submitted and completed descriptors (rx and tx path
64 * respectively) in a loop it is desired to:
65 * - perform batch submissions while populating sumbissmion queue
66 * - avoid blocking transmission of other packets during cleanup phase
67 * Hence the utilization ratio of 1/8 of a queue size.
69 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8)
71 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
72 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
74 #define GET_L4_HDR_LEN(mbuf) \
75 ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \
76 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
78 #define ENA_RX_RSS_TABLE_LOG_SIZE 7
79 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
80 #define ENA_HASH_KEY_SIZE 40
81 #define ENA_ETH_SS_STATS 0xFF
82 #define ETH_GSTRING_LEN 32
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
86 enum ethtool_stringset
{
92 char name
[ETH_GSTRING_LEN
];
96 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
98 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
101 #define ENA_STAT_ENTRY(stat, stat_type) { \
103 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
106 #define ENA_STAT_RX_ENTRY(stat) \
107 ENA_STAT_ENTRY(stat, rx)
109 #define ENA_STAT_TX_ENTRY(stat) \
110 ENA_STAT_ENTRY(stat, tx)
112 #define ENA_STAT_GLOBAL_ENTRY(stat) \
113 ENA_STAT_ENTRY(stat, dev)
115 static const struct ena_stats ena_stats_global_strings
[] = {
116 ENA_STAT_GLOBAL_ENTRY(tx_timeout
),
117 ENA_STAT_GLOBAL_ENTRY(io_suspend
),
118 ENA_STAT_GLOBAL_ENTRY(io_resume
),
119 ENA_STAT_GLOBAL_ENTRY(wd_expired
),
120 ENA_STAT_GLOBAL_ENTRY(interface_up
),
121 ENA_STAT_GLOBAL_ENTRY(interface_down
),
122 ENA_STAT_GLOBAL_ENTRY(admin_q_pause
),
125 static const struct ena_stats ena_stats_tx_strings
[] = {
126 ENA_STAT_TX_ENTRY(cnt
),
127 ENA_STAT_TX_ENTRY(bytes
),
128 ENA_STAT_TX_ENTRY(queue_stop
),
129 ENA_STAT_TX_ENTRY(queue_wakeup
),
130 ENA_STAT_TX_ENTRY(dma_mapping_err
),
131 ENA_STAT_TX_ENTRY(linearize
),
132 ENA_STAT_TX_ENTRY(linearize_failed
),
133 ENA_STAT_TX_ENTRY(tx_poll
),
134 ENA_STAT_TX_ENTRY(doorbells
),
135 ENA_STAT_TX_ENTRY(prepare_ctx_err
),
136 ENA_STAT_TX_ENTRY(missing_tx_comp
),
137 ENA_STAT_TX_ENTRY(bad_req_id
),
140 static const struct ena_stats ena_stats_rx_strings
[] = {
141 ENA_STAT_RX_ENTRY(cnt
),
142 ENA_STAT_RX_ENTRY(bytes
),
143 ENA_STAT_RX_ENTRY(refil_partial
),
144 ENA_STAT_RX_ENTRY(bad_csum
),
145 ENA_STAT_RX_ENTRY(page_alloc_fail
),
146 ENA_STAT_RX_ENTRY(skb_alloc_fail
),
147 ENA_STAT_RX_ENTRY(dma_mapping_err
),
148 ENA_STAT_RX_ENTRY(bad_desc_num
),
149 ENA_STAT_RX_ENTRY(small_copy_len_pkt
),
152 static const struct ena_stats ena_stats_ena_com_strings
[] = {
153 ENA_STAT_ENA_COM_ENTRY(aborted_cmd
),
154 ENA_STAT_ENA_COM_ENTRY(submitted_cmd
),
155 ENA_STAT_ENA_COM_ENTRY(completed_cmd
),
156 ENA_STAT_ENA_COM_ENTRY(out_of_space
),
157 ENA_STAT_ENA_COM_ENTRY(no_completion
),
160 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
161 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
162 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
163 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
165 /** Vendor ID used by Amazon devices */
166 #define PCI_VENDOR_ID_AMAZON 0x1D0F
167 /** Amazon devices */
168 #define PCI_DEVICE_ID_ENA_VF 0xEC20
169 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21
171 static struct rte_pci_id pci_id_ena_map
[] = {
172 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, PCI_DEVICE_ID_ENA_VF
) },
173 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON
, PCI_DEVICE_ID_ENA_LLQ_VF
) },
177 static int ena_device_init(struct ena_com_dev
*ena_dev
,
178 struct ena_com_dev_get_features_ctx
*get_feat_ctx
);
179 static int ena_dev_configure(struct rte_eth_dev
*dev
);
180 static uint16_t eth_ena_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
182 static int ena_tx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
183 uint16_t nb_desc
, unsigned int socket_id
,
184 const struct rte_eth_txconf
*tx_conf
);
185 static int ena_rx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
186 uint16_t nb_desc
, unsigned int socket_id
,
187 const struct rte_eth_rxconf
*rx_conf
,
188 struct rte_mempool
*mp
);
189 static uint16_t eth_ena_recv_pkts(void *rx_queue
,
190 struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
);
191 static int ena_populate_rx_queue(struct ena_ring
*rxq
, unsigned int count
);
192 static void ena_init_rings(struct ena_adapter
*adapter
);
193 static int ena_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
);
194 static int ena_start(struct rte_eth_dev
*dev
);
195 static void ena_close(struct rte_eth_dev
*dev
);
196 static void ena_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
);
197 static void ena_rx_queue_release_all(struct rte_eth_dev
*dev
);
198 static void ena_tx_queue_release_all(struct rte_eth_dev
*dev
);
199 static void ena_rx_queue_release(void *queue
);
200 static void ena_tx_queue_release(void *queue
);
201 static void ena_rx_queue_release_bufs(struct ena_ring
*ring
);
202 static void ena_tx_queue_release_bufs(struct ena_ring
*ring
);
203 static int ena_link_update(struct rte_eth_dev
*dev
,
204 __rte_unused
int wait_to_complete
);
205 static int ena_queue_restart(struct ena_ring
*ring
);
206 static int ena_queue_restart_all(struct rte_eth_dev
*dev
,
207 enum ena_ring_type ring_type
);
208 static void ena_stats_restart(struct rte_eth_dev
*dev
);
209 static void ena_infos_get(__rte_unused
struct rte_eth_dev
*dev
,
210 struct rte_eth_dev_info
*dev_info
);
211 static int ena_rss_reta_update(struct rte_eth_dev
*dev
,
212 struct rte_eth_rss_reta_entry64
*reta_conf
,
214 static int ena_rss_reta_query(struct rte_eth_dev
*dev
,
215 struct rte_eth_rss_reta_entry64
*reta_conf
,
217 static int ena_get_sset_count(struct rte_eth_dev
*dev
, int sset
);
219 static struct eth_dev_ops ena_dev_ops
= {
220 .dev_configure
= ena_dev_configure
,
221 .dev_infos_get
= ena_infos_get
,
222 .rx_queue_setup
= ena_rx_queue_setup
,
223 .tx_queue_setup
= ena_tx_queue_setup
,
224 .dev_start
= ena_start
,
225 .link_update
= ena_link_update
,
226 .stats_get
= ena_stats_get
,
227 .mtu_set
= ena_mtu_set
,
228 .rx_queue_release
= ena_rx_queue_release
,
229 .tx_queue_release
= ena_tx_queue_release
,
230 .dev_close
= ena_close
,
231 .reta_update
= ena_rss_reta_update
,
232 .reta_query
= ena_rss_reta_query
,
235 #define NUMA_NO_NODE SOCKET_ID_ANY
237 static inline int ena_cpu_to_node(int cpu
)
239 struct rte_config
*config
= rte_eal_get_configuration();
241 if (likely(cpu
< RTE_MAX_MEMZONE
))
242 return config
->mem_config
->memzone
[cpu
].socket_id
;
247 static inline void ena_rx_mbuf_prepare(struct rte_mbuf
*mbuf
,
248 struct ena_com_rx_ctx
*ena_rx_ctx
)
250 uint64_t ol_flags
= 0;
252 if (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
)
253 ol_flags
|= PKT_TX_TCP_CKSUM
;
254 else if (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)
255 ol_flags
|= PKT_TX_UDP_CKSUM
;
257 if (ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
)
258 ol_flags
|= PKT_TX_IPV4
;
259 else if (ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV6
)
260 ol_flags
|= PKT_TX_IPV6
;
262 if (unlikely(ena_rx_ctx
->l4_csum_err
))
263 ol_flags
|= PKT_RX_L4_CKSUM_BAD
;
264 if (unlikely(ena_rx_ctx
->l3_csum_err
))
265 ol_flags
|= PKT_RX_IP_CKSUM_BAD
;
267 mbuf
->ol_flags
= ol_flags
;
270 static inline void ena_tx_mbuf_prepare(struct rte_mbuf
*mbuf
,
271 struct ena_com_tx_ctx
*ena_tx_ctx
)
273 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
276 (PKT_TX_L4_MASK
| PKT_TX_IP_CKSUM
| PKT_TX_TCP_SEG
)) {
277 /* check if TSO is required */
278 if (mbuf
->ol_flags
& PKT_TX_TCP_SEG
) {
279 ena_tx_ctx
->tso_enable
= true;
281 ena_meta
->l4_hdr_len
= GET_L4_HDR_LEN(mbuf
);
284 /* check if L3 checksum is needed */
285 if (mbuf
->ol_flags
& PKT_TX_IP_CKSUM
)
286 ena_tx_ctx
->l3_csum_enable
= true;
288 if (mbuf
->ol_flags
& PKT_TX_IPV6
) {
289 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
291 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
293 /* set don't fragment (DF) flag */
294 if (mbuf
->packet_type
&
295 (RTE_PTYPE_L4_NONFRAG
296 | RTE_PTYPE_INNER_L4_NONFRAG
))
297 ena_tx_ctx
->df
= true;
300 /* check if L4 checksum is needed */
301 switch (mbuf
->ol_flags
& PKT_TX_L4_MASK
) {
302 case PKT_TX_TCP_CKSUM
:
303 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
304 ena_tx_ctx
->l4_csum_enable
= true;
306 case PKT_TX_UDP_CKSUM
:
307 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
308 ena_tx_ctx
->l4_csum_enable
= true;
311 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UNKNOWN
;
312 ena_tx_ctx
->l4_csum_enable
= false;
316 ena_meta
->mss
= mbuf
->tso_segsz
;
317 ena_meta
->l3_hdr_len
= mbuf
->l3_len
;
318 ena_meta
->l3_hdr_offset
= mbuf
->l2_len
;
319 /* this param needed only for TSO */
320 ena_meta
->l3_outer_hdr_len
= 0;
321 ena_meta
->l3_outer_hdr_offset
= 0;
323 ena_tx_ctx
->meta_valid
= true;
325 ena_tx_ctx
->meta_valid
= false;
329 static void ena_config_host_info(struct ena_com_dev
*ena_dev
)
331 struct ena_admin_host_info
*host_info
;
334 /* Allocate only the host info */
335 rc
= ena_com_allocate_host_info(ena_dev
);
337 RTE_LOG(ERR
, PMD
, "Cannot allocate host info\n");
341 host_info
= ena_dev
->host_attr
.host_info
;
343 host_info
->os_type
= ENA_ADMIN_OS_DPDK
;
344 host_info
->kernel_ver
= RTE_VERSION
;
345 snprintf((char *)host_info
->kernel_ver_str
,
346 sizeof(host_info
->kernel_ver_str
),
347 "%s", rte_version());
348 host_info
->os_dist
= RTE_VERSION
;
349 snprintf((char *)host_info
->os_dist_str
,
350 sizeof(host_info
->os_dist_str
),
351 "%s", rte_version());
352 host_info
->driver_version
=
353 (DRV_MODULE_VER_MAJOR
) |
354 (DRV_MODULE_VER_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
355 (DRV_MODULE_VER_SUBMINOR
<<
356 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
);
358 rc
= ena_com_set_host_attributes(ena_dev
);
361 RTE_LOG(ERR
, PMD
, "Cannot set host attributes\n");
363 RTE_LOG(ERR
, PMD
, "Cannot set host attributes\n");
371 ena_com_delete_host_info(ena_dev
);
375 ena_get_sset_count(struct rte_eth_dev
*dev
, int sset
)
377 if (sset
!= ETH_SS_STATS
)
380 /* Workaround for clang:
381 * touch internal structures to prevent
384 ENA_TOUCH(ena_stats_global_strings
);
385 ENA_TOUCH(ena_stats_tx_strings
);
386 ENA_TOUCH(ena_stats_rx_strings
);
387 ENA_TOUCH(ena_stats_ena_com_strings
);
389 return dev
->data
->nb_tx_queues
*
390 (ENA_STATS_ARRAY_TX
+ ENA_STATS_ARRAY_RX
) +
391 ENA_STATS_ARRAY_GLOBAL
+ ENA_STATS_ARRAY_ENA_COM
;
394 static void ena_config_debug_area(struct ena_adapter
*adapter
)
399 ss_count
= ena_get_sset_count(adapter
->rte_dev
, ETH_SS_STATS
);
401 RTE_LOG(ERR
, PMD
, "SS count is negative\n");
405 /* allocate 32 bytes for each string and 64bit for the value */
406 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
408 rc
= ena_com_allocate_debug_area(&adapter
->ena_dev
, debug_area_size
);
410 RTE_LOG(ERR
, PMD
, "Cannot allocate debug area\n");
414 rc
= ena_com_set_host_attributes(&adapter
->ena_dev
);
417 RTE_LOG(WARNING
, PMD
, "Cannot set host attributes\n");
419 RTE_LOG(ERR
, PMD
, "Cannot set host attributes\n");
425 ena_com_delete_debug_area(&adapter
->ena_dev
);
428 static void ena_close(struct rte_eth_dev
*dev
)
430 struct ena_adapter
*adapter
=
431 (struct ena_adapter
*)(dev
->data
->dev_private
);
433 adapter
->state
= ENA_ADAPTER_STATE_STOPPED
;
435 ena_rx_queue_release_all(dev
);
436 ena_tx_queue_release_all(dev
);
439 static int ena_rss_reta_update(struct rte_eth_dev
*dev
,
440 struct rte_eth_rss_reta_entry64
*reta_conf
,
443 struct ena_adapter
*adapter
=
444 (struct ena_adapter
*)(dev
->data
->dev_private
);
445 struct ena_com_dev
*ena_dev
= &adapter
->ena_dev
;
451 if ((reta_size
== 0) || (reta_conf
== NULL
))
454 if (reta_size
> ENA_RX_RSS_TABLE_SIZE
) {
455 RTE_LOG(WARNING
, PMD
,
456 "indirection table %d is bigger than supported (%d)\n",
457 reta_size
, ENA_RX_RSS_TABLE_SIZE
);
462 for (i
= 0 ; i
< reta_size
; i
++) {
463 /* each reta_conf is for 64 entries.
464 * to support 128 we use 2 conf of 64
466 conf_idx
= i
/ RTE_RETA_GROUP_SIZE
;
467 idx
= i
% RTE_RETA_GROUP_SIZE
;
468 if (TEST_BIT(reta_conf
[conf_idx
].mask
, idx
)) {
470 ENA_IO_RXQ_IDX(reta_conf
[conf_idx
].reta
[idx
]);
471 ret
= ena_com_indirect_table_fill_entry(ena_dev
,
474 if (unlikely(ret
&& (ret
!= ENA_COM_PERMISSION
))) {
476 "Cannot fill indirect table\n");
483 ret
= ena_com_indirect_table_set(ena_dev
);
484 if (unlikely(ret
&& (ret
!= ENA_COM_PERMISSION
))) {
485 RTE_LOG(ERR
, PMD
, "Cannot flush the indirect table\n");
490 RTE_LOG(DEBUG
, PMD
, "%s(): RSS configured %d entries for port %d\n",
491 __func__
, reta_size
, adapter
->rte_dev
->data
->port_id
);
496 /* Query redirection table. */
497 static int ena_rss_reta_query(struct rte_eth_dev
*dev
,
498 struct rte_eth_rss_reta_entry64
*reta_conf
,
501 struct ena_adapter
*adapter
=
502 (struct ena_adapter
*)(dev
->data
->dev_private
);
503 struct ena_com_dev
*ena_dev
= &adapter
->ena_dev
;
506 u32 indirect_table
[ENA_RX_RSS_TABLE_SIZE
] = {0};
510 if (reta_size
== 0 || reta_conf
== NULL
||
511 (reta_size
> RTE_RETA_GROUP_SIZE
&& ((reta_conf
+ 1) == NULL
)))
514 ret
= ena_com_indirect_table_get(ena_dev
, indirect_table
);
515 if (unlikely(ret
&& (ret
!= ENA_COM_PERMISSION
))) {
516 RTE_LOG(ERR
, PMD
, "cannot get indirect table\n");
521 for (i
= 0 ; i
< reta_size
; i
++) {
522 reta_conf_idx
= i
/ RTE_RETA_GROUP_SIZE
;
523 reta_idx
= i
% RTE_RETA_GROUP_SIZE
;
524 if (TEST_BIT(reta_conf
[reta_conf_idx
].mask
, reta_idx
))
525 reta_conf
[reta_conf_idx
].reta
[reta_idx
] =
526 ENA_IO_RXQ_IDX_REV(indirect_table
[i
]);
532 static int ena_rss_init_default(struct ena_adapter
*adapter
)
534 struct ena_com_dev
*ena_dev
= &adapter
->ena_dev
;
535 uint16_t nb_rx_queues
= adapter
->rte_dev
->data
->nb_rx_queues
;
539 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
541 RTE_LOG(ERR
, PMD
, "Cannot init indirect table\n");
545 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
546 val
= i
% nb_rx_queues
;
547 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
548 ENA_IO_RXQ_IDX(val
));
549 if (unlikely(rc
&& (rc
!= ENA_COM_PERMISSION
))) {
550 RTE_LOG(ERR
, PMD
, "Cannot fill indirect table\n");
555 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_CRC32
, NULL
,
556 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
557 if (unlikely(rc
&& (rc
!= ENA_COM_PERMISSION
))) {
558 RTE_LOG(INFO
, PMD
, "Cannot fill hash function\n");
562 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
563 if (unlikely(rc
&& (rc
!= ENA_COM_PERMISSION
))) {
564 RTE_LOG(INFO
, PMD
, "Cannot fill hash control\n");
568 rc
= ena_com_indirect_table_set(ena_dev
);
569 if (unlikely(rc
&& (rc
!= ENA_COM_PERMISSION
))) {
570 RTE_LOG(ERR
, PMD
, "Cannot flush the indirect table\n");
573 RTE_LOG(DEBUG
, PMD
, "RSS configured for port %d\n",
574 adapter
->rte_dev
->data
->port_id
);
579 ena_com_rss_destroy(ena_dev
);
585 static void ena_rx_queue_release_all(struct rte_eth_dev
*dev
)
587 struct ena_ring
**queues
= (struct ena_ring
**)dev
->data
->rx_queues
;
588 int nb_queues
= dev
->data
->nb_rx_queues
;
591 for (i
= 0; i
< nb_queues
; i
++)
592 ena_rx_queue_release(queues
[i
]);
595 static void ena_tx_queue_release_all(struct rte_eth_dev
*dev
)
597 struct ena_ring
**queues
= (struct ena_ring
**)dev
->data
->tx_queues
;
598 int nb_queues
= dev
->data
->nb_tx_queues
;
601 for (i
= 0; i
< nb_queues
; i
++)
602 ena_tx_queue_release(queues
[i
]);
605 static void ena_rx_queue_release(void *queue
)
607 struct ena_ring
*ring
= (struct ena_ring
*)queue
;
608 struct ena_adapter
*adapter
= ring
->adapter
;
611 ena_assert_msg(ring
->configured
,
612 "API violation - releasing not configured queue");
613 ena_assert_msg(ring
->adapter
->state
!= ENA_ADAPTER_STATE_RUNNING
,
616 /* Destroy HW queue */
617 ena_qid
= ENA_IO_RXQ_IDX(ring
->id
);
618 ena_com_destroy_io_queue(&adapter
->ena_dev
, ena_qid
);
621 ena_rx_queue_release_bufs(ring
);
623 /* Free ring resources */
624 if (ring
->rx_buffer_info
)
625 rte_free(ring
->rx_buffer_info
);
626 ring
->rx_buffer_info
= NULL
;
628 ring
->configured
= 0;
630 RTE_LOG(NOTICE
, PMD
, "RX Queue %d:%d released\n",
631 ring
->port_id
, ring
->id
);
634 static void ena_tx_queue_release(void *queue
)
636 struct ena_ring
*ring
= (struct ena_ring
*)queue
;
637 struct ena_adapter
*adapter
= ring
->adapter
;
640 ena_assert_msg(ring
->configured
,
641 "API violation. Releasing not configured queue");
642 ena_assert_msg(ring
->adapter
->state
!= ENA_ADAPTER_STATE_RUNNING
,
645 /* Destroy HW queue */
646 ena_qid
= ENA_IO_TXQ_IDX(ring
->id
);
647 ena_com_destroy_io_queue(&adapter
->ena_dev
, ena_qid
);
650 ena_tx_queue_release_bufs(ring
);
652 /* Free ring resources */
653 if (ring
->tx_buffer_info
)
654 rte_free(ring
->tx_buffer_info
);
656 if (ring
->empty_tx_reqs
)
657 rte_free(ring
->empty_tx_reqs
);
659 ring
->empty_tx_reqs
= NULL
;
660 ring
->tx_buffer_info
= NULL
;
662 ring
->configured
= 0;
664 RTE_LOG(NOTICE
, PMD
, "TX Queue %d:%d released\n",
665 ring
->port_id
, ring
->id
);
668 static void ena_rx_queue_release_bufs(struct ena_ring
*ring
)
670 unsigned int ring_mask
= ring
->ring_size
- 1;
672 while (ring
->next_to_clean
!= ring
->next_to_use
) {
674 ring
->rx_buffer_info
[ring
->next_to_clean
& ring_mask
];
677 __rte_mbuf_raw_free(m
);
679 ring
->next_to_clean
++;
683 static void ena_tx_queue_release_bufs(struct ena_ring
*ring
)
685 unsigned int ring_mask
= ring
->ring_size
- 1;
687 while (ring
->next_to_clean
!= ring
->next_to_use
) {
688 struct ena_tx_buffer
*tx_buf
=
689 &ring
->tx_buffer_info
[ring
->next_to_clean
& ring_mask
];
692 rte_pktmbuf_free(tx_buf
->mbuf
);
694 ring
->next_to_clean
++;
698 static int ena_link_update(struct rte_eth_dev
*dev
,
699 __rte_unused
int wait_to_complete
)
701 struct rte_eth_link
*link
= &dev
->data
->dev_link
;
703 link
->link_status
= 1;
704 link
->link_speed
= ETH_SPEED_NUM_10G
;
705 link
->link_duplex
= ETH_LINK_FULL_DUPLEX
;
710 static int ena_queue_restart_all(struct rte_eth_dev
*dev
,
711 enum ena_ring_type ring_type
)
713 struct ena_adapter
*adapter
=
714 (struct ena_adapter
*)(dev
->data
->dev_private
);
715 struct ena_ring
*queues
= NULL
;
719 queues
= (ring_type
== ENA_RING_TYPE_RX
) ?
720 adapter
->rx_ring
: adapter
->tx_ring
;
722 for (i
= 0; i
< adapter
->num_queues
; i
++) {
723 if (queues
[i
].configured
) {
724 if (ring_type
== ENA_RING_TYPE_RX
) {
726 dev
->data
->rx_queues
[i
] == &queues
[i
],
727 "Inconsistent state of rx queues\n");
730 dev
->data
->tx_queues
[i
] == &queues
[i
],
731 "Inconsistent state of tx queues\n");
734 rc
= ena_queue_restart(&queues
[i
]);
738 "failed to restart queue %d type(%d)\n",
748 static uint32_t ena_get_mtu_conf(struct ena_adapter
*adapter
)
750 uint32_t max_frame_len
= adapter
->max_mtu
;
752 if (adapter
->rte_eth_dev_data
->dev_conf
.rxmode
.jumbo_frame
== 1)
754 adapter
->rte_eth_dev_data
->dev_conf
.rxmode
.max_rx_pkt_len
;
756 return max_frame_len
;
759 static int ena_check_valid_conf(struct ena_adapter
*adapter
)
761 uint32_t max_frame_len
= ena_get_mtu_conf(adapter
);
763 if (max_frame_len
> adapter
->max_mtu
) {
764 PMD_INIT_LOG(ERR
, "Unsupported MTU of %d\n", max_frame_len
);
772 ena_calc_queue_size(struct ena_com_dev
*ena_dev
,
773 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
775 uint32_t queue_size
= ENA_DEFAULT_RING_SIZE
;
777 queue_size
= RTE_MIN(queue_size
,
778 get_feat_ctx
->max_queues
.max_cq_depth
);
779 queue_size
= RTE_MIN(queue_size
,
780 get_feat_ctx
->max_queues
.max_sq_depth
);
782 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
783 queue_size
= RTE_MIN(queue_size
,
784 get_feat_ctx
->max_queues
.max_llq_depth
);
786 /* Round down to power of 2 */
787 if (!rte_is_power_of_2(queue_size
))
788 queue_size
= rte_align32pow2(queue_size
>> 1);
790 if (queue_size
== 0) {
791 PMD_INIT_LOG(ERR
, "Invalid queue size\n");
798 static void ena_stats_restart(struct rte_eth_dev
*dev
)
800 struct ena_adapter
*adapter
=
801 (struct ena_adapter
*)(dev
->data
->dev_private
);
803 rte_atomic64_init(&adapter
->drv_stats
->ierrors
);
804 rte_atomic64_init(&adapter
->drv_stats
->oerrors
);
805 rte_atomic64_init(&adapter
->drv_stats
->rx_nombuf
);
808 static void ena_stats_get(struct rte_eth_dev
*dev
,
809 struct rte_eth_stats
*stats
)
811 struct ena_admin_basic_stats ena_stats
;
812 struct ena_adapter
*adapter
=
813 (struct ena_adapter
*)(dev
->data
->dev_private
);
814 struct ena_com_dev
*ena_dev
= &adapter
->ena_dev
;
817 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
820 memset(&ena_stats
, 0, sizeof(ena_stats
));
821 rc
= ena_com_get_dev_basic_stats(ena_dev
, &ena_stats
);
823 RTE_LOG(ERR
, PMD
, "Could not retrieve statistics from ENA");
827 /* Set of basic statistics from ENA */
828 stats
->ipackets
= __MERGE_64B_H_L(ena_stats
.rx_pkts_high
,
829 ena_stats
.rx_pkts_low
);
830 stats
->opackets
= __MERGE_64B_H_L(ena_stats
.tx_pkts_high
,
831 ena_stats
.tx_pkts_low
);
832 stats
->ibytes
= __MERGE_64B_H_L(ena_stats
.rx_bytes_high
,
833 ena_stats
.rx_bytes_low
);
834 stats
->obytes
= __MERGE_64B_H_L(ena_stats
.tx_bytes_high
,
835 ena_stats
.tx_bytes_low
);
836 stats
->imissed
= __MERGE_64B_H_L(ena_stats
.rx_drops_high
,
837 ena_stats
.rx_drops_low
);
839 /* Driver related stats */
840 stats
->ierrors
= rte_atomic64_read(&adapter
->drv_stats
->ierrors
);
841 stats
->oerrors
= rte_atomic64_read(&adapter
->drv_stats
->oerrors
);
842 stats
->rx_nombuf
= rte_atomic64_read(&adapter
->drv_stats
->rx_nombuf
);
845 static int ena_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
)
847 struct ena_adapter
*adapter
;
848 struct ena_com_dev
*ena_dev
;
851 ena_assert_msg(dev
->data
!= NULL
, "Uninitialized device");
852 ena_assert_msg(dev
->data
->dev_private
!= NULL
, "Uninitialized device");
853 adapter
= (struct ena_adapter
*)(dev
->data
->dev_private
);
855 ena_dev
= &adapter
->ena_dev
;
856 ena_assert_msg(ena_dev
!= NULL
, "Uninitialized device");
858 if (mtu
> ena_get_mtu_conf(adapter
)) {
860 "Given MTU (%d) exceeds maximum MTU supported (%d)\n",
861 mtu
, ena_get_mtu_conf(adapter
));
866 rc
= ena_com_set_dev_mtu(ena_dev
, mtu
);
868 RTE_LOG(ERR
, PMD
, "Could not set MTU: %d\n", mtu
);
870 RTE_LOG(NOTICE
, PMD
, "Set MTU: %d\n", mtu
);
876 static int ena_start(struct rte_eth_dev
*dev
)
878 struct ena_adapter
*adapter
=
879 (struct ena_adapter
*)(dev
->data
->dev_private
);
882 if (!(adapter
->state
== ENA_ADAPTER_STATE_CONFIG
||
883 adapter
->state
== ENA_ADAPTER_STATE_STOPPED
)) {
884 PMD_INIT_LOG(ERR
, "API violation");
888 rc
= ena_check_valid_conf(adapter
);
892 rc
= ena_queue_restart_all(dev
, ENA_RING_TYPE_RX
);
896 rc
= ena_queue_restart_all(dev
, ENA_RING_TYPE_TX
);
900 if (adapter
->rte_dev
->data
->dev_conf
.rxmode
.mq_mode
&
901 ETH_MQ_RX_RSS_FLAG
) {
902 rc
= ena_rss_init_default(adapter
);
907 ena_stats_restart(dev
);
909 adapter
->state
= ENA_ADAPTER_STATE_RUNNING
;
914 static int ena_queue_restart(struct ena_ring
*ring
)
918 ena_assert_msg(ring
->configured
== 1,
919 "Trying to restart unconfigured queue\n");
921 ring
->next_to_clean
= 0;
922 ring
->next_to_use
= 0;
924 if (ring
->type
== ENA_RING_TYPE_TX
)
927 rc
= ena_populate_rx_queue(ring
, ring
->ring_size
);
928 if ((unsigned int)rc
!= ring
->ring_size
) {
929 PMD_INIT_LOG(ERR
, "Failed to populate rx ring !\n");
936 static int ena_tx_queue_setup(struct rte_eth_dev
*dev
,
939 __rte_unused
unsigned int socket_id
,
940 __rte_unused
const struct rte_eth_txconf
*tx_conf
)
942 struct ena_com_create_io_ctx ctx
=
943 /* policy set to _HOST just to satisfy icc compiler */
944 { ENA_ADMIN_PLACEMENT_POLICY_HOST
,
945 ENA_COM_IO_QUEUE_DIRECTION_TX
, 0, 0, 0, 0 };
946 struct ena_ring
*txq
= NULL
;
947 struct ena_adapter
*adapter
=
948 (struct ena_adapter
*)(dev
->data
->dev_private
);
952 struct ena_com_dev
*ena_dev
= &adapter
->ena_dev
;
954 txq
= &adapter
->tx_ring
[queue_idx
];
956 if (txq
->configured
) {
958 "API violation. Queue %d is already configured\n",
963 if (!rte_is_power_of_2(nb_desc
)) {
965 "Unsupported size of RX queue: %d is not a power of 2.",
970 if (nb_desc
> adapter
->tx_ring_size
) {
972 "Unsupported size of TX queue (max size: %d)\n",
973 adapter
->tx_ring_size
);
977 ena_qid
= ENA_IO_TXQ_IDX(queue_idx
);
979 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
981 ctx
.msix_vector
= -1; /* admin interrupts not used */
982 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
983 ctx
.queue_size
= adapter
->tx_ring_size
;
984 ctx
.numa_node
= ena_cpu_to_node(queue_idx
);
986 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
989 "failed to create io TX queue #%d (qid:%d) rc: %d\n",
990 queue_idx
, ena_qid
, rc
);
992 txq
->ena_com_io_cq
= &ena_dev
->io_cq_queues
[ena_qid
];
993 txq
->ena_com_io_sq
= &ena_dev
->io_sq_queues
[ena_qid
];
995 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
997 &txq
->ena_com_io_cq
);
1000 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1002 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1006 txq
->port_id
= dev
->data
->port_id
;
1007 txq
->next_to_clean
= 0;
1008 txq
->next_to_use
= 0;
1009 txq
->ring_size
= nb_desc
;
1011 txq
->tx_buffer_info
= rte_zmalloc("txq->tx_buffer_info",
1012 sizeof(struct ena_tx_buffer
) *
1014 RTE_CACHE_LINE_SIZE
);
1015 if (!txq
->tx_buffer_info
) {
1016 RTE_LOG(ERR
, PMD
, "failed to alloc mem for tx buffer info\n");
1020 txq
->empty_tx_reqs
= rte_zmalloc("txq->empty_tx_reqs",
1021 sizeof(u16
) * txq
->ring_size
,
1022 RTE_CACHE_LINE_SIZE
);
1023 if (!txq
->empty_tx_reqs
) {
1024 RTE_LOG(ERR
, PMD
, "failed to alloc mem for tx reqs\n");
1025 rte_free(txq
->tx_buffer_info
);
1028 for (i
= 0; i
< txq
->ring_size
; i
++)
1029 txq
->empty_tx_reqs
[i
] = i
;
1031 /* Store pointer to this queue in upper layer */
1032 txq
->configured
= 1;
1033 dev
->data
->tx_queues
[queue_idx
] = txq
;
1038 static int ena_rx_queue_setup(struct rte_eth_dev
*dev
,
1041 __rte_unused
unsigned int socket_id
,
1042 __rte_unused
const struct rte_eth_rxconf
*rx_conf
,
1043 struct rte_mempool
*mp
)
1045 struct ena_com_create_io_ctx ctx
=
1046 /* policy set to _HOST just to satisfy icc compiler */
1047 { ENA_ADMIN_PLACEMENT_POLICY_HOST
,
1048 ENA_COM_IO_QUEUE_DIRECTION_RX
, 0, 0, 0, 0 };
1049 struct ena_adapter
*adapter
=
1050 (struct ena_adapter
*)(dev
->data
->dev_private
);
1051 struct ena_ring
*rxq
= NULL
;
1052 uint16_t ena_qid
= 0;
1054 struct ena_com_dev
*ena_dev
= &adapter
->ena_dev
;
1056 rxq
= &adapter
->rx_ring
[queue_idx
];
1057 if (rxq
->configured
) {
1059 "API violation. Queue %d is already configured\n",
1064 if (!rte_is_power_of_2(nb_desc
)) {
1066 "Unsupported size of TX queue: %d is not a power of 2.",
1071 if (nb_desc
> adapter
->rx_ring_size
) {
1073 "Unsupported size of RX queue (max size: %d)\n",
1074 adapter
->rx_ring_size
);
1078 ena_qid
= ENA_IO_RXQ_IDX(queue_idx
);
1081 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
1082 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
1083 ctx
.msix_vector
= -1; /* admin interrupts not used */
1084 ctx
.queue_size
= adapter
->rx_ring_size
;
1085 ctx
.numa_node
= ena_cpu_to_node(queue_idx
);
1087 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
1089 RTE_LOG(ERR
, PMD
, "failed to create io RX queue #%d rc: %d\n",
1092 rxq
->ena_com_io_cq
= &ena_dev
->io_cq_queues
[ena_qid
];
1093 rxq
->ena_com_io_sq
= &ena_dev
->io_sq_queues
[ena_qid
];
1095 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
1096 &rxq
->ena_com_io_sq
,
1097 &rxq
->ena_com_io_cq
);
1100 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1102 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
1105 rxq
->port_id
= dev
->data
->port_id
;
1106 rxq
->next_to_clean
= 0;
1107 rxq
->next_to_use
= 0;
1108 rxq
->ring_size
= nb_desc
;
1111 rxq
->rx_buffer_info
= rte_zmalloc("rxq->buffer_info",
1112 sizeof(struct rte_mbuf
*) * nb_desc
,
1113 RTE_CACHE_LINE_SIZE
);
1114 if (!rxq
->rx_buffer_info
) {
1115 RTE_LOG(ERR
, PMD
, "failed to alloc mem for rx buffer info\n");
1119 /* Store pointer to this queue in upper layer */
1120 rxq
->configured
= 1;
1121 dev
->data
->rx_queues
[queue_idx
] = rxq
;
1126 static int ena_populate_rx_queue(struct ena_ring
*rxq
, unsigned int count
)
1130 uint16_t ring_size
= rxq
->ring_size
;
1131 uint16_t ring_mask
= ring_size
- 1;
1132 uint16_t next_to_use
= rxq
->next_to_use
;
1134 struct rte_mbuf
**mbufs
= &rxq
->rx_buffer_info
[0];
1136 if (unlikely(!count
))
1139 in_use
= rxq
->next_to_use
- rxq
->next_to_clean
;
1140 ena_assert_msg(((in_use
+ count
) <= ring_size
), "bad ring state");
1142 count
= RTE_MIN(count
,
1143 (uint16_t)(ring_size
- (next_to_use
& ring_mask
)));
1145 /* get resources for incoming packets */
1146 rc
= rte_mempool_get_bulk(rxq
->mb_pool
,
1147 (void **)(&mbufs
[next_to_use
& ring_mask
]),
1149 if (unlikely(rc
< 0)) {
1150 rte_atomic64_inc(&rxq
->adapter
->drv_stats
->rx_nombuf
);
1151 PMD_RX_LOG(DEBUG
, "there are no enough free buffers");
1155 for (i
= 0; i
< count
; i
++) {
1156 uint16_t next_to_use_masked
= next_to_use
& ring_mask
;
1157 struct rte_mbuf
*mbuf
= mbufs
[next_to_use_masked
];
1158 struct ena_com_buf ebuf
;
1160 rte_prefetch0(mbufs
[((next_to_use
+ 4) & ring_mask
)]);
1161 /* prepare physical address for DMA transaction */
1162 ebuf
.paddr
= mbuf
->buf_physaddr
+ RTE_PKTMBUF_HEADROOM
;
1163 ebuf
.len
= mbuf
->buf_len
- RTE_PKTMBUF_HEADROOM
;
1164 /* pass resource to device */
1165 rc
= ena_com_add_single_rx_desc(rxq
->ena_com_io_sq
,
1166 &ebuf
, next_to_use_masked
);
1168 RTE_LOG(WARNING
, PMD
, "failed adding rx desc\n");
1174 /* When we submitted free recources to device... */
1176 /* ...let HW know that it can fill buffers with data */
1178 ena_com_write_sq_doorbell(rxq
->ena_com_io_sq
);
1180 rxq
->next_to_use
= next_to_use
;
1186 static int ena_device_init(struct ena_com_dev
*ena_dev
,
1187 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1190 bool readless_supported
;
1192 /* Initialize mmio registers */
1193 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
1195 RTE_LOG(ERR
, PMD
, "failed to init mmio read less\n");
1199 /* The PCIe configuration space revision id indicate if mmio reg
1202 readless_supported
=
1203 !(((struct rte_pci_device
*)ena_dev
->dmadev
)->id
.class_id
1204 & ENA_MMIO_DISABLE_REG_READ
);
1205 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
1208 rc
= ena_com_dev_reset(ena_dev
);
1210 RTE_LOG(ERR
, PMD
, "cannot reset device\n");
1211 goto err_mmio_read_less
;
1214 /* check FW version */
1215 rc
= ena_com_validate_version(ena_dev
);
1217 RTE_LOG(ERR
, PMD
, "device version is too low\n");
1218 goto err_mmio_read_less
;
1221 ena_dev
->dma_addr_bits
= ena_com_get_dma_width(ena_dev
);
1223 /* ENA device administration layer init */
1224 rc
= ena_com_admin_init(ena_dev
, NULL
, true);
1227 "cannot initialize ena admin queue with device\n");
1228 goto err_mmio_read_less
;
1231 ena_config_host_info(ena_dev
);
1233 /* To enable the msix interrupts the driver needs to know the number
1234 * of queues. So the driver uses polling mode to retrieve this
1237 ena_com_set_admin_polling_mode(ena_dev
, true);
1239 /* Get Device Attributes and features */
1240 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
1243 "cannot get attribute for ena device rc= %d\n", rc
);
1244 goto err_admin_init
;
1250 ena_com_admin_destroy(ena_dev
);
1253 ena_com_mmio_reg_read_request_destroy(ena_dev
);
1258 static int eth_ena_dev_init(struct rte_eth_dev
*eth_dev
)
1260 struct rte_pci_device
*pci_dev
;
1261 struct ena_adapter
*adapter
=
1262 (struct ena_adapter
*)(eth_dev
->data
->dev_private
);
1263 struct ena_com_dev
*ena_dev
= &adapter
->ena_dev
;
1264 struct ena_com_dev_get_features_ctx get_feat_ctx
;
1267 static int adapters_found
;
1269 memset(adapter
, 0, sizeof(struct ena_adapter
));
1270 ena_dev
= &adapter
->ena_dev
;
1272 eth_dev
->dev_ops
= &ena_dev_ops
;
1273 eth_dev
->rx_pkt_burst
= ð_ena_recv_pkts
;
1274 eth_dev
->tx_pkt_burst
= ð_ena_xmit_pkts
;
1275 adapter
->rte_eth_dev_data
= eth_dev
->data
;
1276 adapter
->rte_dev
= eth_dev
;
1278 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1281 pci_dev
= eth_dev
->pci_dev
;
1282 adapter
->pdev
= pci_dev
;
1284 PMD_INIT_LOG(INFO
, "Initializing %x:%x:%x.%d\n",
1285 pci_dev
->addr
.domain
,
1287 pci_dev
->addr
.devid
,
1288 pci_dev
->addr
.function
);
1290 adapter
->regs
= pci_dev
->mem_resource
[ENA_REGS_BAR
].addr
;
1291 adapter
->dev_mem_base
= pci_dev
->mem_resource
[ENA_MEM_BAR
].addr
;
1293 /* Present ENA_MEM_BAR indicates available LLQ mode.
1294 * Use corresponding policy
1296 if (adapter
->dev_mem_base
)
1297 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_DEV
;
1298 else if (adapter
->regs
)
1299 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
1301 PMD_INIT_LOG(CRIT
, "Failed to access registers BAR(%d)\n",
1304 ena_dev
->reg_bar
= adapter
->regs
;
1305 ena_dev
->dmadev
= adapter
->pdev
;
1307 adapter
->id_number
= adapters_found
;
1309 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d",
1310 adapter
->id_number
);
1312 /* device specific initialization routine */
1313 rc
= ena_device_init(ena_dev
, &get_feat_ctx
);
1315 PMD_INIT_LOG(CRIT
, "Failed to init ENA device\n");
1319 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1320 if (get_feat_ctx
.max_queues
.max_llq_num
== 0) {
1322 "Trying to use LLQ but llq_num is 0.\n"
1323 "Fall back into regular queues.\n");
1324 ena_dev
->tx_mem_queue_type
=
1325 ENA_ADMIN_PLACEMENT_POLICY_HOST
;
1326 adapter
->num_queues
=
1327 get_feat_ctx
.max_queues
.max_sq_num
;
1329 adapter
->num_queues
=
1330 get_feat_ctx
.max_queues
.max_llq_num
;
1333 adapter
->num_queues
= get_feat_ctx
.max_queues
.max_sq_num
;
1336 queue_size
= ena_calc_queue_size(ena_dev
, &get_feat_ctx
);
1337 if ((queue_size
<= 0) || (adapter
->num_queues
<= 0))
1340 adapter
->tx_ring_size
= queue_size
;
1341 adapter
->rx_ring_size
= queue_size
;
1343 /* prepare ring structures */
1344 ena_init_rings(adapter
);
1346 ena_config_debug_area(adapter
);
1348 /* Set max MTU for this device */
1349 adapter
->max_mtu
= get_feat_ctx
.dev_attr
.max_mtu
;
1351 /* Copy MAC address and point DPDK to it */
1352 eth_dev
->data
->mac_addrs
= (struct ether_addr
*)adapter
->mac_addr
;
1353 ether_addr_copy((struct ether_addr
*)get_feat_ctx
.dev_attr
.mac_addr
,
1354 (struct ether_addr
*)adapter
->mac_addr
);
1356 adapter
->drv_stats
= rte_zmalloc("adapter stats",
1357 sizeof(*adapter
->drv_stats
),
1358 RTE_CACHE_LINE_SIZE
);
1359 if (!adapter
->drv_stats
) {
1360 RTE_LOG(ERR
, PMD
, "failed to alloc mem for adapter stats\n");
1365 adapter
->state
= ENA_ADAPTER_STATE_INIT
;
1370 static int ena_dev_configure(struct rte_eth_dev
*dev
)
1372 struct ena_adapter
*adapter
=
1373 (struct ena_adapter
*)(dev
->data
->dev_private
);
1375 if (!(adapter
->state
== ENA_ADAPTER_STATE_INIT
||
1376 adapter
->state
== ENA_ADAPTER_STATE_STOPPED
)) {
1377 PMD_INIT_LOG(ERR
, "Illegal adapter state: %d\n",
1382 switch (adapter
->state
) {
1383 case ENA_ADAPTER_STATE_INIT
:
1384 case ENA_ADAPTER_STATE_STOPPED
:
1385 adapter
->state
= ENA_ADAPTER_STATE_CONFIG
;
1387 case ENA_ADAPTER_STATE_CONFIG
:
1388 RTE_LOG(WARNING
, PMD
,
1389 "Ivalid driver state while trying to configure device\n");
1398 static void ena_init_rings(struct ena_adapter
*adapter
)
1402 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1403 struct ena_ring
*ring
= &adapter
->tx_ring
[i
];
1405 ring
->configured
= 0;
1406 ring
->type
= ENA_RING_TYPE_TX
;
1407 ring
->adapter
= adapter
;
1409 ring
->tx_mem_queue_type
= adapter
->ena_dev
.tx_mem_queue_type
;
1410 ring
->tx_max_header_size
= adapter
->ena_dev
.tx_max_header_size
;
1413 for (i
= 0; i
< adapter
->num_queues
; i
++) {
1414 struct ena_ring
*ring
= &adapter
->rx_ring
[i
];
1416 ring
->configured
= 0;
1417 ring
->type
= ENA_RING_TYPE_RX
;
1418 ring
->adapter
= adapter
;
1423 static void ena_infos_get(struct rte_eth_dev
*dev
,
1424 struct rte_eth_dev_info
*dev_info
)
1426 struct ena_adapter
*adapter
;
1427 struct ena_com_dev
*ena_dev
;
1428 struct ena_com_dev_get_features_ctx feat
;
1429 uint32_t rx_feat
= 0, tx_feat
= 0;
1432 ena_assert_msg(dev
->data
!= NULL
, "Uninitialized device");
1433 ena_assert_msg(dev
->data
->dev_private
!= NULL
, "Uninitialized device");
1434 adapter
= (struct ena_adapter
*)(dev
->data
->dev_private
);
1436 ena_dev
= &adapter
->ena_dev
;
1437 ena_assert_msg(ena_dev
!= NULL
, "Uninitialized device");
1439 dev_info
->speed_capa
=
1441 ETH_LINK_SPEED_2_5G
|
1443 ETH_LINK_SPEED_10G
|
1444 ETH_LINK_SPEED_25G
|
1445 ETH_LINK_SPEED_40G
|
1446 ETH_LINK_SPEED_50G
|
1447 ETH_LINK_SPEED_100G
;
1449 /* Get supported features from HW */
1450 rc
= ena_com_get_dev_attr_feat(ena_dev
, &feat
);
1453 "Cannot get attribute for ena device rc= %d\n", rc
);
1457 /* Set Tx & Rx features available for device */
1458 if (feat
.offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
1459 tx_feat
|= DEV_TX_OFFLOAD_TCP_TSO
;
1461 if (feat
.offload
.tx
&
1462 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
1463 tx_feat
|= DEV_TX_OFFLOAD_IPV4_CKSUM
|
1464 DEV_TX_OFFLOAD_UDP_CKSUM
|
1465 DEV_TX_OFFLOAD_TCP_CKSUM
;
1467 if (feat
.offload
.tx
&
1468 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
1469 rx_feat
|= DEV_RX_OFFLOAD_IPV4_CKSUM
|
1470 DEV_RX_OFFLOAD_UDP_CKSUM
|
1471 DEV_RX_OFFLOAD_TCP_CKSUM
;
1473 /* Inform framework about available features */
1474 dev_info
->rx_offload_capa
= rx_feat
;
1475 dev_info
->tx_offload_capa
= tx_feat
;
1477 dev_info
->min_rx_bufsize
= ENA_MIN_FRAME_LEN
;
1478 dev_info
->max_rx_pktlen
= adapter
->max_mtu
;
1479 dev_info
->max_mac_addrs
= 1;
1481 dev_info
->max_rx_queues
= adapter
->num_queues
;
1482 dev_info
->max_tx_queues
= adapter
->num_queues
;
1483 dev_info
->reta_size
= ENA_RX_RSS_TABLE_SIZE
;
1486 static uint16_t eth_ena_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
1489 struct ena_ring
*rx_ring
= (struct ena_ring
*)(rx_queue
);
1490 unsigned int ring_size
= rx_ring
->ring_size
;
1491 unsigned int ring_mask
= ring_size
- 1;
1492 uint16_t next_to_clean
= rx_ring
->next_to_clean
;
1493 uint16_t desc_in_use
= 0;
1494 unsigned int recv_idx
= 0;
1495 struct rte_mbuf
*mbuf
= NULL
;
1496 struct rte_mbuf
*mbuf_head
= NULL
;
1497 struct rte_mbuf
*mbuf_prev
= NULL
;
1498 struct rte_mbuf
**rx_buff_info
= rx_ring
->rx_buffer_info
;
1499 unsigned int completed
;
1501 struct ena_com_rx_ctx ena_rx_ctx
;
1504 /* Check adapter state */
1505 if (unlikely(rx_ring
->adapter
->state
!= ENA_ADAPTER_STATE_RUNNING
)) {
1507 "Trying to receive pkts while device is NOT running\n");
1511 desc_in_use
= rx_ring
->next_to_use
- next_to_clean
;
1512 if (unlikely(nb_pkts
> desc_in_use
))
1513 nb_pkts
= desc_in_use
;
1515 for (completed
= 0; completed
< nb_pkts
; completed
++) {
1518 ena_rx_ctx
.max_bufs
= rx_ring
->ring_size
;
1519 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
1520 ena_rx_ctx
.descs
= 0;
1521 /* receive packet context */
1522 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
1523 rx_ring
->ena_com_io_sq
,
1526 RTE_LOG(ERR
, PMD
, "ena_com_rx_pkt error %d\n", rc
);
1530 if (unlikely(ena_rx_ctx
.descs
== 0))
1533 while (segments
< ena_rx_ctx
.descs
) {
1534 mbuf
= rx_buff_info
[next_to_clean
& ring_mask
];
1535 mbuf
->data_len
= ena_rx_ctx
.ena_bufs
[segments
].len
;
1536 mbuf
->data_off
= RTE_PKTMBUF_HEADROOM
;
1539 if (segments
== 0) {
1540 mbuf
->nb_segs
= ena_rx_ctx
.descs
;
1541 mbuf
->port
= rx_ring
->port_id
;
1545 /* for multi-segment pkts create mbuf chain */
1546 mbuf_prev
->next
= mbuf
;
1548 mbuf_head
->pkt_len
+= mbuf
->data_len
;
1555 /* fill mbuf attributes if any */
1556 ena_rx_mbuf_prepare(mbuf_head
, &ena_rx_ctx
);
1557 mbuf_head
->hash
.rss
= (uint32_t)rx_ring
->id
;
1559 /* pass to DPDK application head mbuf */
1560 rx_pkts
[recv_idx
] = mbuf_head
;
1564 /* Burst refill to save doorbells, memory barriers, const interval */
1565 if (ring_size
- desc_in_use
> ENA_RING_DESCS_RATIO(ring_size
))
1566 ena_populate_rx_queue(rx_ring
, ring_size
- desc_in_use
);
1568 rx_ring
->next_to_clean
= next_to_clean
;
1573 static uint16_t eth_ena_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
1576 struct ena_ring
*tx_ring
= (struct ena_ring
*)(tx_queue
);
1577 uint16_t next_to_use
= tx_ring
->next_to_use
;
1578 uint16_t next_to_clean
= tx_ring
->next_to_clean
;
1579 struct rte_mbuf
*mbuf
;
1580 unsigned int ring_size
= tx_ring
->ring_size
;
1581 unsigned int ring_mask
= ring_size
- 1;
1582 struct ena_com_tx_ctx ena_tx_ctx
;
1583 struct ena_tx_buffer
*tx_info
;
1584 struct ena_com_buf
*ebuf
;
1585 uint16_t rc
, req_id
, total_tx_descs
= 0;
1586 uint16_t sent_idx
= 0, empty_tx_reqs
;
1589 /* Check adapter state */
1590 if (unlikely(tx_ring
->adapter
->state
!= ENA_ADAPTER_STATE_RUNNING
)) {
1592 "Trying to xmit pkts while device is NOT running\n");
1596 empty_tx_reqs
= ring_size
- (next_to_use
- next_to_clean
);
1597 if (nb_pkts
> empty_tx_reqs
)
1598 nb_pkts
= empty_tx_reqs
;
1600 for (sent_idx
= 0; sent_idx
< nb_pkts
; sent_idx
++) {
1601 mbuf
= tx_pkts
[sent_idx
];
1603 req_id
= tx_ring
->empty_tx_reqs
[next_to_use
& ring_mask
];
1604 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1605 tx_info
->mbuf
= mbuf
;
1606 tx_info
->num_of_bufs
= 0;
1607 ebuf
= tx_info
->bufs
;
1609 /* Prepare TX context */
1610 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
1611 memset(&ena_tx_ctx
.ena_meta
, 0x0,
1612 sizeof(struct ena_com_tx_meta
));
1613 ena_tx_ctx
.ena_bufs
= ebuf
;
1614 ena_tx_ctx
.req_id
= req_id
;
1615 if (tx_ring
->tx_mem_queue_type
==
1616 ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1617 /* prepare the push buffer with
1618 * virtual address of the data
1620 ena_tx_ctx
.header_len
=
1621 RTE_MIN(mbuf
->data_len
,
1622 tx_ring
->tx_max_header_size
);
1623 ena_tx_ctx
.push_header
=
1624 (void *)((char *)mbuf
->buf_addr
+
1626 } /* there's no else as we take advantage of memset zeroing */
1628 /* Set TX offloads flags, if applicable */
1629 ena_tx_mbuf_prepare(mbuf
, &ena_tx_ctx
);
1631 if (unlikely(mbuf
->ol_flags
&
1632 (PKT_RX_L4_CKSUM_BAD
| PKT_RX_IP_CKSUM_BAD
)))
1633 rte_atomic64_inc(&tx_ring
->adapter
->drv_stats
->ierrors
);
1635 rte_prefetch0(tx_pkts
[(sent_idx
+ 4) & ring_mask
]);
1637 /* Process first segment taking into
1638 * consideration pushed header
1640 if (mbuf
->data_len
> ena_tx_ctx
.header_len
) {
1641 ebuf
->paddr
= mbuf
->buf_physaddr
+
1643 ena_tx_ctx
.header_len
;
1644 ebuf
->len
= mbuf
->data_len
- ena_tx_ctx
.header_len
;
1646 tx_info
->num_of_bufs
++;
1649 while ((mbuf
= mbuf
->next
) != NULL
) {
1650 ebuf
->paddr
= mbuf
->buf_physaddr
+ mbuf
->data_off
;
1651 ebuf
->len
= mbuf
->data_len
;
1653 tx_info
->num_of_bufs
++;
1656 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
1658 /* Write data to device */
1659 rc
= ena_com_prepare_tx(tx_ring
->ena_com_io_sq
,
1660 &ena_tx_ctx
, &nb_hw_desc
);
1664 tx_info
->tx_descs
= nb_hw_desc
;
1669 /* If there are ready packets to be xmitted... */
1671 /* ...let HW do its best :-) */
1673 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
1675 tx_ring
->next_to_use
= next_to_use
;
1678 /* Clear complete packets */
1679 while (ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
, &req_id
) >= 0) {
1680 /* Get Tx info & store how many descs were processed */
1681 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1682 total_tx_descs
+= tx_info
->tx_descs
;
1684 /* Free whole mbuf chain */
1685 mbuf
= tx_info
->mbuf
;
1686 rte_pktmbuf_free(mbuf
);
1688 /* Put back descriptor to the ring for reuse */
1689 tx_ring
->empty_tx_reqs
[next_to_clean
& ring_mask
] = req_id
;
1692 /* If too many descs to clean, leave it for another run */
1693 if (unlikely(total_tx_descs
> ENA_RING_DESCS_RATIO(ring_size
)))
1697 if (total_tx_descs
> 0) {
1698 /* acknowledge completion of sent packets */
1699 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_tx_descs
);
1700 tx_ring
->next_to_clean
= next_to_clean
;
1706 static struct eth_driver rte_ena_pmd
= {
1708 .id_table
= pci_id_ena_map
,
1709 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
,
1710 .probe
= rte_eth_dev_pci_probe
,
1711 .remove
= rte_eth_dev_pci_remove
,
1713 .eth_dev_init
= eth_ena_dev_init
,
1714 .dev_private_size
= sizeof(struct ena_adapter
),
1717 RTE_PMD_REGISTER_PCI(net_ena
, rte_ena_pmd
.pci_drv
);
1718 RTE_PMD_REGISTER_PCI_TABLE(net_ena
, pci_id_ena_map
);