]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /*- |
2 | * BSD LICENSE | |
3 | * | |
4 | * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * | |
11 | * * Redistributions of source code must retain the above copyright | |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * * Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in | |
15 | * the documentation and/or other materials provided with the | |
16 | * distribution. | |
17 | * * Neither the name of copyright holder nor the names of its | |
18 | * contributors may be used to endorse or promote products derived | |
19 | * from this software without specific prior written permission. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
24 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
25 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
26 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
27 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
28 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
29 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
30 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
31 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
32 | */ | |
33 | ||
34 | #include <rte_ether.h> | |
35 | #include <rte_ethdev.h> | |
36 | #include <rte_tcp.h> | |
37 | #include <rte_atomic.h> | |
38 | #include <rte_dev.h> | |
39 | #include <rte_errno.h> | |
40 | #include <rte_version.h> | |
41 | #include <rte_eal_memconfig.h> | |
42 | ||
43 | #include "ena_ethdev.h" | |
44 | #include "ena_logs.h" | |
45 | #include "ena_platform.h" | |
46 | #include "ena_com.h" | |
47 | #include "ena_eth_com.h" | |
48 | ||
49 | #include <ena_common_defs.h> | |
50 | #include <ena_regs_defs.h> | |
51 | #include <ena_admin_defs.h> | |
52 | #include <ena_eth_io_defs.h> | |
53 | ||
54 | #define DRV_MODULE_VER_MAJOR 1 | |
55 | #define DRV_MODULE_VER_MINOR 0 | |
56 | #define DRV_MODULE_VER_SUBMINOR 0 | |
57 | ||
58 | #define ENA_IO_TXQ_IDX(q) (2 * (q)) | |
59 | #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) | |
60 | /*reverse version of ENA_IO_RXQ_IDX*/ | |
61 | #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) | |
62 | ||
63 | /* While processing submitted and completed descriptors (rx and tx path | |
64 | * respectively) in a loop it is desired to: | |
65 | * - perform batch submissions while populating sumbissmion queue | |
66 | * - avoid blocking transmission of other packets during cleanup phase | |
67 | * Hence the utilization ratio of 1/8 of a queue size. | |
68 | */ | |
69 | #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) | |
70 | ||
71 | #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) | |
72 | #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) | |
73 | ||
74 | #define GET_L4_HDR_LEN(mbuf) \ | |
75 | ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ | |
76 | mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) | |
77 | ||
78 | #define ENA_RX_RSS_TABLE_LOG_SIZE 7 | |
79 | #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) | |
80 | #define ENA_HASH_KEY_SIZE 40 | |
81 | #define ENA_ETH_SS_STATS 0xFF | |
82 | #define ETH_GSTRING_LEN 32 | |
83 | ||
84 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) | |
85 | ||
86 | enum ethtool_stringset { | |
87 | ETH_SS_TEST = 0, | |
88 | ETH_SS_STATS, | |
89 | }; | |
90 | ||
91 | struct ena_stats { | |
92 | char name[ETH_GSTRING_LEN]; | |
93 | int stat_offset; | |
94 | }; | |
95 | ||
96 | #define ENA_STAT_ENA_COM_ENTRY(stat) { \ | |
97 | .name = #stat, \ | |
98 | .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ | |
99 | } | |
100 | ||
101 | #define ENA_STAT_ENTRY(stat, stat_type) { \ | |
102 | .name = #stat, \ | |
103 | .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ | |
104 | } | |
105 | ||
106 | #define ENA_STAT_RX_ENTRY(stat) \ | |
107 | ENA_STAT_ENTRY(stat, rx) | |
108 | ||
109 | #define ENA_STAT_TX_ENTRY(stat) \ | |
110 | ENA_STAT_ENTRY(stat, tx) | |
111 | ||
112 | #define ENA_STAT_GLOBAL_ENTRY(stat) \ | |
113 | ENA_STAT_ENTRY(stat, dev) | |
114 | ||
115 | static const struct ena_stats ena_stats_global_strings[] = { | |
116 | ENA_STAT_GLOBAL_ENTRY(tx_timeout), | |
117 | ENA_STAT_GLOBAL_ENTRY(io_suspend), | |
118 | ENA_STAT_GLOBAL_ENTRY(io_resume), | |
119 | ENA_STAT_GLOBAL_ENTRY(wd_expired), | |
120 | ENA_STAT_GLOBAL_ENTRY(interface_up), | |
121 | ENA_STAT_GLOBAL_ENTRY(interface_down), | |
122 | ENA_STAT_GLOBAL_ENTRY(admin_q_pause), | |
123 | }; | |
124 | ||
125 | static const struct ena_stats ena_stats_tx_strings[] = { | |
126 | ENA_STAT_TX_ENTRY(cnt), | |
127 | ENA_STAT_TX_ENTRY(bytes), | |
128 | ENA_STAT_TX_ENTRY(queue_stop), | |
129 | ENA_STAT_TX_ENTRY(queue_wakeup), | |
130 | ENA_STAT_TX_ENTRY(dma_mapping_err), | |
131 | ENA_STAT_TX_ENTRY(linearize), | |
132 | ENA_STAT_TX_ENTRY(linearize_failed), | |
133 | ENA_STAT_TX_ENTRY(tx_poll), | |
134 | ENA_STAT_TX_ENTRY(doorbells), | |
135 | ENA_STAT_TX_ENTRY(prepare_ctx_err), | |
136 | ENA_STAT_TX_ENTRY(missing_tx_comp), | |
137 | ENA_STAT_TX_ENTRY(bad_req_id), | |
138 | }; | |
139 | ||
140 | static const struct ena_stats ena_stats_rx_strings[] = { | |
141 | ENA_STAT_RX_ENTRY(cnt), | |
142 | ENA_STAT_RX_ENTRY(bytes), | |
143 | ENA_STAT_RX_ENTRY(refil_partial), | |
144 | ENA_STAT_RX_ENTRY(bad_csum), | |
145 | ENA_STAT_RX_ENTRY(page_alloc_fail), | |
146 | ENA_STAT_RX_ENTRY(skb_alloc_fail), | |
147 | ENA_STAT_RX_ENTRY(dma_mapping_err), | |
148 | ENA_STAT_RX_ENTRY(bad_desc_num), | |
149 | ENA_STAT_RX_ENTRY(small_copy_len_pkt), | |
150 | }; | |
151 | ||
152 | static const struct ena_stats ena_stats_ena_com_strings[] = { | |
153 | ENA_STAT_ENA_COM_ENTRY(aborted_cmd), | |
154 | ENA_STAT_ENA_COM_ENTRY(submitted_cmd), | |
155 | ENA_STAT_ENA_COM_ENTRY(completed_cmd), | |
156 | ENA_STAT_ENA_COM_ENTRY(out_of_space), | |
157 | ENA_STAT_ENA_COM_ENTRY(no_completion), | |
158 | }; | |
159 | ||
160 | #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) | |
161 | #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) | |
162 | #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) | |
163 | #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) | |
164 | ||
165 | /** Vendor ID used by Amazon devices */ | |
166 | #define PCI_VENDOR_ID_AMAZON 0x1D0F | |
167 | /** Amazon devices */ | |
168 | #define PCI_DEVICE_ID_ENA_VF 0xEC20 | |
169 | #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 | |
170 | ||
171 | static struct rte_pci_id pci_id_ena_map[] = { | |
172 | { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, | |
173 | { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, | |
174 | { .device_id = 0 }, | |
175 | }; | |
176 | ||
177 | static int ena_device_init(struct ena_com_dev *ena_dev, | |
178 | struct ena_com_dev_get_features_ctx *get_feat_ctx); | |
179 | static int ena_dev_configure(struct rte_eth_dev *dev); | |
180 | static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, | |
181 | uint16_t nb_pkts); | |
182 | static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, | |
183 | uint16_t nb_desc, unsigned int socket_id, | |
184 | const struct rte_eth_txconf *tx_conf); | |
185 | static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, | |
186 | uint16_t nb_desc, unsigned int socket_id, | |
187 | const struct rte_eth_rxconf *rx_conf, | |
188 | struct rte_mempool *mp); | |
189 | static uint16_t eth_ena_recv_pkts(void *rx_queue, | |
190 | struct rte_mbuf **rx_pkts, uint16_t nb_pkts); | |
191 | static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); | |
192 | static void ena_init_rings(struct ena_adapter *adapter); | |
193 | static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); | |
194 | static int ena_start(struct rte_eth_dev *dev); | |
195 | static void ena_close(struct rte_eth_dev *dev); | |
196 | static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); | |
197 | static void ena_rx_queue_release_all(struct rte_eth_dev *dev); | |
198 | static void ena_tx_queue_release_all(struct rte_eth_dev *dev); | |
199 | static void ena_rx_queue_release(void *queue); | |
200 | static void ena_tx_queue_release(void *queue); | |
201 | static void ena_rx_queue_release_bufs(struct ena_ring *ring); | |
202 | static void ena_tx_queue_release_bufs(struct ena_ring *ring); | |
203 | static int ena_link_update(struct rte_eth_dev *dev, | |
204 | __rte_unused int wait_to_complete); | |
205 | static int ena_queue_restart(struct ena_ring *ring); | |
206 | static int ena_queue_restart_all(struct rte_eth_dev *dev, | |
207 | enum ena_ring_type ring_type); | |
208 | static void ena_stats_restart(struct rte_eth_dev *dev); | |
209 | static void ena_infos_get(__rte_unused struct rte_eth_dev *dev, | |
210 | struct rte_eth_dev_info *dev_info); | |
211 | static int ena_rss_reta_update(struct rte_eth_dev *dev, | |
212 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
213 | uint16_t reta_size); | |
214 | static int ena_rss_reta_query(struct rte_eth_dev *dev, | |
215 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
216 | uint16_t reta_size); | |
217 | static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); | |
218 | ||
219 | static struct eth_dev_ops ena_dev_ops = { | |
220 | .dev_configure = ena_dev_configure, | |
221 | .dev_infos_get = ena_infos_get, | |
222 | .rx_queue_setup = ena_rx_queue_setup, | |
223 | .tx_queue_setup = ena_tx_queue_setup, | |
224 | .dev_start = ena_start, | |
225 | .link_update = ena_link_update, | |
226 | .stats_get = ena_stats_get, | |
227 | .mtu_set = ena_mtu_set, | |
228 | .rx_queue_release = ena_rx_queue_release, | |
229 | .tx_queue_release = ena_tx_queue_release, | |
230 | .dev_close = ena_close, | |
231 | .reta_update = ena_rss_reta_update, | |
232 | .reta_query = ena_rss_reta_query, | |
233 | }; | |
234 | ||
235 | #define NUMA_NO_NODE SOCKET_ID_ANY | |
236 | ||
237 | static inline int ena_cpu_to_node(int cpu) | |
238 | { | |
239 | struct rte_config *config = rte_eal_get_configuration(); | |
240 | ||
241 | if (likely(cpu < RTE_MAX_MEMZONE)) | |
242 | return config->mem_config->memzone[cpu].socket_id; | |
243 | ||
244 | return NUMA_NO_NODE; | |
245 | } | |
246 | ||
247 | static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, | |
248 | struct ena_com_rx_ctx *ena_rx_ctx) | |
249 | { | |
250 | uint64_t ol_flags = 0; | |
251 | ||
252 | if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) | |
253 | ol_flags |= PKT_TX_TCP_CKSUM; | |
254 | else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) | |
255 | ol_flags |= PKT_TX_UDP_CKSUM; | |
256 | ||
257 | if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) | |
258 | ol_flags |= PKT_TX_IPV4; | |
259 | else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) | |
260 | ol_flags |= PKT_TX_IPV6; | |
261 | ||
262 | if (unlikely(ena_rx_ctx->l4_csum_err)) | |
263 | ol_flags |= PKT_RX_L4_CKSUM_BAD; | |
264 | if (unlikely(ena_rx_ctx->l3_csum_err)) | |
265 | ol_flags |= PKT_RX_IP_CKSUM_BAD; | |
266 | ||
267 | mbuf->ol_flags = ol_flags; | |
268 | } | |
269 | ||
270 | static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, | |
271 | struct ena_com_tx_ctx *ena_tx_ctx) | |
272 | { | |
273 | struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; | |
274 | ||
275 | if (mbuf->ol_flags & | |
276 | (PKT_TX_L4_MASK | PKT_TX_IP_CKSUM | PKT_TX_TCP_SEG)) { | |
277 | /* check if TSO is required */ | |
278 | if (mbuf->ol_flags & PKT_TX_TCP_SEG) { | |
279 | ena_tx_ctx->tso_enable = true; | |
280 | ||
281 | ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); | |
282 | } | |
283 | ||
284 | /* check if L3 checksum is needed */ | |
285 | if (mbuf->ol_flags & PKT_TX_IP_CKSUM) | |
286 | ena_tx_ctx->l3_csum_enable = true; | |
287 | ||
288 | if (mbuf->ol_flags & PKT_TX_IPV6) { | |
289 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; | |
290 | } else { | |
291 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; | |
292 | ||
293 | /* set don't fragment (DF) flag */ | |
294 | if (mbuf->packet_type & | |
295 | (RTE_PTYPE_L4_NONFRAG | |
296 | | RTE_PTYPE_INNER_L4_NONFRAG)) | |
297 | ena_tx_ctx->df = true; | |
298 | } | |
299 | ||
300 | /* check if L4 checksum is needed */ | |
301 | switch (mbuf->ol_flags & PKT_TX_L4_MASK) { | |
302 | case PKT_TX_TCP_CKSUM: | |
303 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; | |
304 | ena_tx_ctx->l4_csum_enable = true; | |
305 | break; | |
306 | case PKT_TX_UDP_CKSUM: | |
307 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; | |
308 | ena_tx_ctx->l4_csum_enable = true; | |
309 | break; | |
310 | default: | |
311 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; | |
312 | ena_tx_ctx->l4_csum_enable = false; | |
313 | break; | |
314 | } | |
315 | ||
316 | ena_meta->mss = mbuf->tso_segsz; | |
317 | ena_meta->l3_hdr_len = mbuf->l3_len; | |
318 | ena_meta->l3_hdr_offset = mbuf->l2_len; | |
319 | /* this param needed only for TSO */ | |
320 | ena_meta->l3_outer_hdr_len = 0; | |
321 | ena_meta->l3_outer_hdr_offset = 0; | |
322 | ||
323 | ena_tx_ctx->meta_valid = true; | |
324 | } else { | |
325 | ena_tx_ctx->meta_valid = false; | |
326 | } | |
327 | } | |
328 | ||
329 | static void ena_config_host_info(struct ena_com_dev *ena_dev) | |
330 | { | |
331 | struct ena_admin_host_info *host_info; | |
332 | int rc; | |
333 | ||
334 | /* Allocate only the host info */ | |
335 | rc = ena_com_allocate_host_info(ena_dev); | |
336 | if (rc) { | |
337 | RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); | |
338 | return; | |
339 | } | |
340 | ||
341 | host_info = ena_dev->host_attr.host_info; | |
342 | ||
343 | host_info->os_type = ENA_ADMIN_OS_DPDK; | |
344 | host_info->kernel_ver = RTE_VERSION; | |
345 | snprintf((char *)host_info->kernel_ver_str, | |
346 | sizeof(host_info->kernel_ver_str), | |
347 | "%s", rte_version()); | |
348 | host_info->os_dist = RTE_VERSION; | |
349 | snprintf((char *)host_info->os_dist_str, | |
350 | sizeof(host_info->os_dist_str), | |
351 | "%s", rte_version()); | |
352 | host_info->driver_version = | |
353 | (DRV_MODULE_VER_MAJOR) | | |
354 | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | | |
355 | (DRV_MODULE_VER_SUBMINOR << | |
356 | ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); | |
357 | ||
358 | rc = ena_com_set_host_attributes(ena_dev); | |
359 | if (rc) { | |
360 | if (rc == -EPERM) | |
361 | RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); | |
362 | else | |
363 | RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); | |
364 | ||
365 | goto err; | |
366 | } | |
367 | ||
368 | return; | |
369 | ||
370 | err: | |
371 | ena_com_delete_host_info(ena_dev); | |
372 | } | |
373 | ||
374 | static int | |
375 | ena_get_sset_count(struct rte_eth_dev *dev, int sset) | |
376 | { | |
377 | if (sset != ETH_SS_STATS) | |
378 | return -EOPNOTSUPP; | |
379 | ||
380 | /* Workaround for clang: | |
381 | * touch internal structures to prevent | |
382 | * compiler error | |
383 | */ | |
384 | ENA_TOUCH(ena_stats_global_strings); | |
385 | ENA_TOUCH(ena_stats_tx_strings); | |
386 | ENA_TOUCH(ena_stats_rx_strings); | |
387 | ENA_TOUCH(ena_stats_ena_com_strings); | |
388 | ||
389 | return dev->data->nb_tx_queues * | |
390 | (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + | |
391 | ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; | |
392 | } | |
393 | ||
394 | static void ena_config_debug_area(struct ena_adapter *adapter) | |
395 | { | |
396 | u32 debug_area_size; | |
397 | int rc, ss_count; | |
398 | ||
399 | ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); | |
400 | if (ss_count <= 0) { | |
401 | RTE_LOG(ERR, PMD, "SS count is negative\n"); | |
402 | return; | |
403 | } | |
404 | ||
405 | /* allocate 32 bytes for each string and 64bit for the value */ | |
406 | debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; | |
407 | ||
408 | rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); | |
409 | if (rc) { | |
410 | RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); | |
411 | return; | |
412 | } | |
413 | ||
414 | rc = ena_com_set_host_attributes(&adapter->ena_dev); | |
415 | if (rc) { | |
416 | if (rc == -EPERM) | |
417 | RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); | |
418 | else | |
419 | RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); | |
420 | goto err; | |
421 | } | |
422 | ||
423 | return; | |
424 | err: | |
425 | ena_com_delete_debug_area(&adapter->ena_dev); | |
426 | } | |
427 | ||
428 | static void ena_close(struct rte_eth_dev *dev) | |
429 | { | |
430 | struct ena_adapter *adapter = | |
431 | (struct ena_adapter *)(dev->data->dev_private); | |
432 | ||
433 | adapter->state = ENA_ADAPTER_STATE_STOPPED; | |
434 | ||
435 | ena_rx_queue_release_all(dev); | |
436 | ena_tx_queue_release_all(dev); | |
437 | } | |
438 | ||
439 | static int ena_rss_reta_update(struct rte_eth_dev *dev, | |
440 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
441 | uint16_t reta_size) | |
442 | { | |
443 | struct ena_adapter *adapter = | |
444 | (struct ena_adapter *)(dev->data->dev_private); | |
445 | struct ena_com_dev *ena_dev = &adapter->ena_dev; | |
446 | int ret, i; | |
447 | u16 entry_value; | |
448 | int conf_idx; | |
449 | int idx; | |
450 | ||
451 | if ((reta_size == 0) || (reta_conf == NULL)) | |
452 | return -EINVAL; | |
453 | ||
454 | if (reta_size > ENA_RX_RSS_TABLE_SIZE) { | |
455 | RTE_LOG(WARNING, PMD, | |
456 | "indirection table %d is bigger than supported (%d)\n", | |
457 | reta_size, ENA_RX_RSS_TABLE_SIZE); | |
458 | ret = -EINVAL; | |
459 | goto err; | |
460 | } | |
461 | ||
462 | for (i = 0 ; i < reta_size ; i++) { | |
463 | /* each reta_conf is for 64 entries. | |
464 | * to support 128 we use 2 conf of 64 | |
465 | */ | |
466 | conf_idx = i / RTE_RETA_GROUP_SIZE; | |
467 | idx = i % RTE_RETA_GROUP_SIZE; | |
468 | if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { | |
469 | entry_value = | |
470 | ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); | |
471 | ret = ena_com_indirect_table_fill_entry(ena_dev, | |
472 | i, | |
473 | entry_value); | |
474 | if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { | |
475 | RTE_LOG(ERR, PMD, | |
476 | "Cannot fill indirect table\n"); | |
477 | ret = -ENOTSUP; | |
478 | goto err; | |
479 | } | |
480 | } | |
481 | } | |
482 | ||
483 | ret = ena_com_indirect_table_set(ena_dev); | |
484 | if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { | |
485 | RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); | |
486 | ret = -ENOTSUP; | |
487 | goto err; | |
488 | } | |
489 | ||
490 | RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", | |
491 | __func__, reta_size, adapter->rte_dev->data->port_id); | |
492 | err: | |
493 | return ret; | |
494 | } | |
495 | ||
496 | /* Query redirection table. */ | |
497 | static int ena_rss_reta_query(struct rte_eth_dev *dev, | |
498 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
499 | uint16_t reta_size) | |
500 | { | |
501 | struct ena_adapter *adapter = | |
502 | (struct ena_adapter *)(dev->data->dev_private); | |
503 | struct ena_com_dev *ena_dev = &adapter->ena_dev; | |
504 | int ret; | |
505 | int i; | |
506 | u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; | |
507 | int reta_conf_idx; | |
508 | int reta_idx; | |
509 | ||
510 | if (reta_size == 0 || reta_conf == NULL || | |
511 | (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) | |
512 | return -EINVAL; | |
513 | ||
514 | ret = ena_com_indirect_table_get(ena_dev, indirect_table); | |
515 | if (unlikely(ret && (ret != ENA_COM_PERMISSION))) { | |
516 | RTE_LOG(ERR, PMD, "cannot get indirect table\n"); | |
517 | ret = -ENOTSUP; | |
518 | goto err; | |
519 | } | |
520 | ||
521 | for (i = 0 ; i < reta_size ; i++) { | |
522 | reta_conf_idx = i / RTE_RETA_GROUP_SIZE; | |
523 | reta_idx = i % RTE_RETA_GROUP_SIZE; | |
524 | if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) | |
525 | reta_conf[reta_conf_idx].reta[reta_idx] = | |
526 | ENA_IO_RXQ_IDX_REV(indirect_table[i]); | |
527 | } | |
528 | err: | |
529 | return ret; | |
530 | } | |
531 | ||
532 | static int ena_rss_init_default(struct ena_adapter *adapter) | |
533 | { | |
534 | struct ena_com_dev *ena_dev = &adapter->ena_dev; | |
535 | uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; | |
536 | int rc, i; | |
537 | u32 val; | |
538 | ||
539 | rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); | |
540 | if (unlikely(rc)) { | |
541 | RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); | |
542 | goto err_rss_init; | |
543 | } | |
544 | ||
545 | for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { | |
546 | val = i % nb_rx_queues; | |
547 | rc = ena_com_indirect_table_fill_entry(ena_dev, i, | |
548 | ENA_IO_RXQ_IDX(val)); | |
549 | if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { | |
550 | RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); | |
551 | goto err_fill_indir; | |
552 | } | |
553 | } | |
554 | ||
555 | rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, | |
556 | ENA_HASH_KEY_SIZE, 0xFFFFFFFF); | |
557 | if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { | |
558 | RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); | |
559 | goto err_fill_indir; | |
560 | } | |
561 | ||
562 | rc = ena_com_set_default_hash_ctrl(ena_dev); | |
563 | if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { | |
564 | RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); | |
565 | goto err_fill_indir; | |
566 | } | |
567 | ||
568 | rc = ena_com_indirect_table_set(ena_dev); | |
569 | if (unlikely(rc && (rc != ENA_COM_PERMISSION))) { | |
570 | RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); | |
571 | goto err_fill_indir; | |
572 | } | |
573 | RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", | |
574 | adapter->rte_dev->data->port_id); | |
575 | ||
576 | return 0; | |
577 | ||
578 | err_fill_indir: | |
579 | ena_com_rss_destroy(ena_dev); | |
580 | err_rss_init: | |
581 | ||
582 | return rc; | |
583 | } | |
584 | ||
585 | static void ena_rx_queue_release_all(struct rte_eth_dev *dev) | |
586 | { | |
587 | struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; | |
588 | int nb_queues = dev->data->nb_rx_queues; | |
589 | int i; | |
590 | ||
591 | for (i = 0; i < nb_queues; i++) | |
592 | ena_rx_queue_release(queues[i]); | |
593 | } | |
594 | ||
595 | static void ena_tx_queue_release_all(struct rte_eth_dev *dev) | |
596 | { | |
597 | struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; | |
598 | int nb_queues = dev->data->nb_tx_queues; | |
599 | int i; | |
600 | ||
601 | for (i = 0; i < nb_queues; i++) | |
602 | ena_tx_queue_release(queues[i]); | |
603 | } | |
604 | ||
605 | static void ena_rx_queue_release(void *queue) | |
606 | { | |
607 | struct ena_ring *ring = (struct ena_ring *)queue; | |
608 | struct ena_adapter *adapter = ring->adapter; | |
609 | int ena_qid; | |
610 | ||
611 | ena_assert_msg(ring->configured, | |
612 | "API violation - releasing not configured queue"); | |
613 | ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, | |
614 | "API violation"); | |
615 | ||
616 | /* Destroy HW queue */ | |
617 | ena_qid = ENA_IO_RXQ_IDX(ring->id); | |
618 | ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); | |
619 | ||
620 | /* Free all bufs */ | |
621 | ena_rx_queue_release_bufs(ring); | |
622 | ||
623 | /* Free ring resources */ | |
624 | if (ring->rx_buffer_info) | |
625 | rte_free(ring->rx_buffer_info); | |
626 | ring->rx_buffer_info = NULL; | |
627 | ||
628 | ring->configured = 0; | |
629 | ||
630 | RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", | |
631 | ring->port_id, ring->id); | |
632 | } | |
633 | ||
634 | static void ena_tx_queue_release(void *queue) | |
635 | { | |
636 | struct ena_ring *ring = (struct ena_ring *)queue; | |
637 | struct ena_adapter *adapter = ring->adapter; | |
638 | int ena_qid; | |
639 | ||
640 | ena_assert_msg(ring->configured, | |
641 | "API violation. Releasing not configured queue"); | |
642 | ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, | |
643 | "API violation"); | |
644 | ||
645 | /* Destroy HW queue */ | |
646 | ena_qid = ENA_IO_TXQ_IDX(ring->id); | |
647 | ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); | |
648 | ||
649 | /* Free all bufs */ | |
650 | ena_tx_queue_release_bufs(ring); | |
651 | ||
652 | /* Free ring resources */ | |
653 | if (ring->tx_buffer_info) | |
654 | rte_free(ring->tx_buffer_info); | |
655 | ||
656 | if (ring->empty_tx_reqs) | |
657 | rte_free(ring->empty_tx_reqs); | |
658 | ||
659 | ring->empty_tx_reqs = NULL; | |
660 | ring->tx_buffer_info = NULL; | |
661 | ||
662 | ring->configured = 0; | |
663 | ||
664 | RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", | |
665 | ring->port_id, ring->id); | |
666 | } | |
667 | ||
668 | static void ena_rx_queue_release_bufs(struct ena_ring *ring) | |
669 | { | |
670 | unsigned int ring_mask = ring->ring_size - 1; | |
671 | ||
672 | while (ring->next_to_clean != ring->next_to_use) { | |
673 | struct rte_mbuf *m = | |
674 | ring->rx_buffer_info[ring->next_to_clean & ring_mask]; | |
675 | ||
676 | if (m) | |
677 | __rte_mbuf_raw_free(m); | |
678 | ||
679 | ring->next_to_clean++; | |
680 | } | |
681 | } | |
682 | ||
683 | static void ena_tx_queue_release_bufs(struct ena_ring *ring) | |
684 | { | |
685 | unsigned int ring_mask = ring->ring_size - 1; | |
686 | ||
687 | while (ring->next_to_clean != ring->next_to_use) { | |
688 | struct ena_tx_buffer *tx_buf = | |
689 | &ring->tx_buffer_info[ring->next_to_clean & ring_mask]; | |
690 | ||
691 | if (tx_buf->mbuf) | |
692 | rte_pktmbuf_free(tx_buf->mbuf); | |
693 | ||
694 | ring->next_to_clean++; | |
695 | } | |
696 | } | |
697 | ||
698 | static int ena_link_update(struct rte_eth_dev *dev, | |
699 | __rte_unused int wait_to_complete) | |
700 | { | |
701 | struct rte_eth_link *link = &dev->data->dev_link; | |
702 | ||
703 | link->link_status = 1; | |
704 | link->link_speed = ETH_SPEED_NUM_10G; | |
705 | link->link_duplex = ETH_LINK_FULL_DUPLEX; | |
706 | ||
707 | return 0; | |
708 | } | |
709 | ||
710 | static int ena_queue_restart_all(struct rte_eth_dev *dev, | |
711 | enum ena_ring_type ring_type) | |
712 | { | |
713 | struct ena_adapter *adapter = | |
714 | (struct ena_adapter *)(dev->data->dev_private); | |
715 | struct ena_ring *queues = NULL; | |
716 | int i = 0; | |
717 | int rc = 0; | |
718 | ||
719 | queues = (ring_type == ENA_RING_TYPE_RX) ? | |
720 | adapter->rx_ring : adapter->tx_ring; | |
721 | ||
722 | for (i = 0; i < adapter->num_queues; i++) { | |
723 | if (queues[i].configured) { | |
724 | if (ring_type == ENA_RING_TYPE_RX) { | |
725 | ena_assert_msg( | |
726 | dev->data->rx_queues[i] == &queues[i], | |
727 | "Inconsistent state of rx queues\n"); | |
728 | } else { | |
729 | ena_assert_msg( | |
730 | dev->data->tx_queues[i] == &queues[i], | |
731 | "Inconsistent state of tx queues\n"); | |
732 | } | |
733 | ||
734 | rc = ena_queue_restart(&queues[i]); | |
735 | ||
736 | if (rc) { | |
737 | PMD_INIT_LOG(ERR, | |
738 | "failed to restart queue %d type(%d)\n", | |
739 | i, ring_type); | |
740 | return -1; | |
741 | } | |
742 | } | |
743 | } | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) | |
749 | { | |
750 | uint32_t max_frame_len = adapter->max_mtu; | |
751 | ||
752 | if (adapter->rte_eth_dev_data->dev_conf.rxmode.jumbo_frame == 1) | |
753 | max_frame_len = | |
754 | adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; | |
755 | ||
756 | return max_frame_len; | |
757 | } | |
758 | ||
759 | static int ena_check_valid_conf(struct ena_adapter *adapter) | |
760 | { | |
761 | uint32_t max_frame_len = ena_get_mtu_conf(adapter); | |
762 | ||
763 | if (max_frame_len > adapter->max_mtu) { | |
764 | PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len); | |
765 | return -1; | |
766 | } | |
767 | ||
768 | return 0; | |
769 | } | |
770 | ||
771 | static int | |
772 | ena_calc_queue_size(struct ena_com_dev *ena_dev, | |
773 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
774 | { | |
775 | uint32_t queue_size = ENA_DEFAULT_RING_SIZE; | |
776 | ||
777 | queue_size = RTE_MIN(queue_size, | |
778 | get_feat_ctx->max_queues.max_cq_depth); | |
779 | queue_size = RTE_MIN(queue_size, | |
780 | get_feat_ctx->max_queues.max_sq_depth); | |
781 | ||
782 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | |
783 | queue_size = RTE_MIN(queue_size, | |
784 | get_feat_ctx->max_queues.max_llq_depth); | |
785 | ||
786 | /* Round down to power of 2 */ | |
787 | if (!rte_is_power_of_2(queue_size)) | |
788 | queue_size = rte_align32pow2(queue_size >> 1); | |
789 | ||
790 | if (queue_size == 0) { | |
791 | PMD_INIT_LOG(ERR, "Invalid queue size\n"); | |
792 | return -EFAULT; | |
793 | } | |
794 | ||
795 | return queue_size; | |
796 | } | |
797 | ||
798 | static void ena_stats_restart(struct rte_eth_dev *dev) | |
799 | { | |
800 | struct ena_adapter *adapter = | |
801 | (struct ena_adapter *)(dev->data->dev_private); | |
802 | ||
803 | rte_atomic64_init(&adapter->drv_stats->ierrors); | |
804 | rte_atomic64_init(&adapter->drv_stats->oerrors); | |
805 | rte_atomic64_init(&adapter->drv_stats->rx_nombuf); | |
806 | } | |
807 | ||
808 | static void ena_stats_get(struct rte_eth_dev *dev, | |
809 | struct rte_eth_stats *stats) | |
810 | { | |
811 | struct ena_admin_basic_stats ena_stats; | |
812 | struct ena_adapter *adapter = | |
813 | (struct ena_adapter *)(dev->data->dev_private); | |
814 | struct ena_com_dev *ena_dev = &adapter->ena_dev; | |
815 | int rc; | |
816 | ||
817 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
818 | return; | |
819 | ||
820 | memset(&ena_stats, 0, sizeof(ena_stats)); | |
821 | rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); | |
822 | if (unlikely(rc)) { | |
823 | RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); | |
824 | return; | |
825 | } | |
826 | ||
827 | /* Set of basic statistics from ENA */ | |
828 | stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, | |
829 | ena_stats.rx_pkts_low); | |
830 | stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, | |
831 | ena_stats.tx_pkts_low); | |
832 | stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, | |
833 | ena_stats.rx_bytes_low); | |
834 | stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, | |
835 | ena_stats.tx_bytes_low); | |
836 | stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, | |
837 | ena_stats.rx_drops_low); | |
838 | ||
839 | /* Driver related stats */ | |
840 | stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); | |
841 | stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); | |
842 | stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); | |
843 | } | |
844 | ||
845 | static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) | |
846 | { | |
847 | struct ena_adapter *adapter; | |
848 | struct ena_com_dev *ena_dev; | |
849 | int rc = 0; | |
850 | ||
851 | ena_assert_msg(dev->data != NULL, "Uninitialized device"); | |
852 | ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); | |
853 | adapter = (struct ena_adapter *)(dev->data->dev_private); | |
854 | ||
855 | ena_dev = &adapter->ena_dev; | |
856 | ena_assert_msg(ena_dev != NULL, "Uninitialized device"); | |
857 | ||
858 | if (mtu > ena_get_mtu_conf(adapter)) { | |
859 | RTE_LOG(ERR, PMD, | |
860 | "Given MTU (%d) exceeds maximum MTU supported (%d)\n", | |
861 | mtu, ena_get_mtu_conf(adapter)); | |
862 | rc = -EINVAL; | |
863 | goto err; | |
864 | } | |
865 | ||
866 | rc = ena_com_set_dev_mtu(ena_dev, mtu); | |
867 | if (rc) | |
868 | RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); | |
869 | else | |
870 | RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); | |
871 | ||
872 | err: | |
873 | return rc; | |
874 | } | |
875 | ||
876 | static int ena_start(struct rte_eth_dev *dev) | |
877 | { | |
878 | struct ena_adapter *adapter = | |
879 | (struct ena_adapter *)(dev->data->dev_private); | |
880 | int rc = 0; | |
881 | ||
882 | if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG || | |
883 | adapter->state == ENA_ADAPTER_STATE_STOPPED)) { | |
884 | PMD_INIT_LOG(ERR, "API violation"); | |
885 | return -1; | |
886 | } | |
887 | ||
888 | rc = ena_check_valid_conf(adapter); | |
889 | if (rc) | |
890 | return rc; | |
891 | ||
892 | rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); | |
893 | if (rc) | |
894 | return rc; | |
895 | ||
896 | rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); | |
897 | if (rc) | |
898 | return rc; | |
899 | ||
900 | if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & | |
901 | ETH_MQ_RX_RSS_FLAG) { | |
902 | rc = ena_rss_init_default(adapter); | |
903 | if (rc) | |
904 | return rc; | |
905 | } | |
906 | ||
907 | ena_stats_restart(dev); | |
908 | ||
909 | adapter->state = ENA_ADAPTER_STATE_RUNNING; | |
910 | ||
911 | return 0; | |
912 | } | |
913 | ||
914 | static int ena_queue_restart(struct ena_ring *ring) | |
915 | { | |
916 | int rc; | |
917 | ||
918 | ena_assert_msg(ring->configured == 1, | |
919 | "Trying to restart unconfigured queue\n"); | |
920 | ||
921 | ring->next_to_clean = 0; | |
922 | ring->next_to_use = 0; | |
923 | ||
924 | if (ring->type == ENA_RING_TYPE_TX) | |
925 | return 0; | |
926 | ||
927 | rc = ena_populate_rx_queue(ring, ring->ring_size); | |
928 | if ((unsigned int)rc != ring->ring_size) { | |
929 | PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n"); | |
930 | return (-1); | |
931 | } | |
932 | ||
933 | return 0; | |
934 | } | |
935 | ||
936 | static int ena_tx_queue_setup(struct rte_eth_dev *dev, | |
937 | uint16_t queue_idx, | |
938 | uint16_t nb_desc, | |
939 | __rte_unused unsigned int socket_id, | |
940 | __rte_unused const struct rte_eth_txconf *tx_conf) | |
941 | { | |
942 | struct ena_com_create_io_ctx ctx = | |
943 | /* policy set to _HOST just to satisfy icc compiler */ | |
944 | { ENA_ADMIN_PLACEMENT_POLICY_HOST, | |
945 | ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; | |
946 | struct ena_ring *txq = NULL; | |
947 | struct ena_adapter *adapter = | |
948 | (struct ena_adapter *)(dev->data->dev_private); | |
949 | unsigned int i; | |
950 | int ena_qid; | |
951 | int rc; | |
952 | struct ena_com_dev *ena_dev = &adapter->ena_dev; | |
953 | ||
954 | txq = &adapter->tx_ring[queue_idx]; | |
955 | ||
956 | if (txq->configured) { | |
957 | RTE_LOG(CRIT, PMD, | |
958 | "API violation. Queue %d is already configured\n", | |
959 | queue_idx); | |
960 | return -1; | |
961 | } | |
962 | ||
963 | if (!rte_is_power_of_2(nb_desc)) { | |
964 | RTE_LOG(ERR, PMD, | |
965 | "Unsupported size of RX queue: %d is not a power of 2.", | |
966 | nb_desc); | |
967 | return -EINVAL; | |
968 | } | |
969 | ||
970 | if (nb_desc > adapter->tx_ring_size) { | |
971 | RTE_LOG(ERR, PMD, | |
972 | "Unsupported size of TX queue (max size: %d)\n", | |
973 | adapter->tx_ring_size); | |
974 | return -EINVAL; | |
975 | } | |
976 | ||
977 | ena_qid = ENA_IO_TXQ_IDX(queue_idx); | |
978 | ||
979 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; | |
980 | ctx.qid = ena_qid; | |
981 | ctx.msix_vector = -1; /* admin interrupts not used */ | |
982 | ctx.mem_queue_type = ena_dev->tx_mem_queue_type; | |
983 | ctx.queue_size = adapter->tx_ring_size; | |
984 | ctx.numa_node = ena_cpu_to_node(queue_idx); | |
985 | ||
986 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
987 | if (rc) { | |
988 | RTE_LOG(ERR, PMD, | |
989 | "failed to create io TX queue #%d (qid:%d) rc: %d\n", | |
990 | queue_idx, ena_qid, rc); | |
991 | } | |
992 | txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; | |
993 | txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; | |
994 | ||
995 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
996 | &txq->ena_com_io_sq, | |
997 | &txq->ena_com_io_cq); | |
998 | if (rc) { | |
999 | RTE_LOG(ERR, PMD, | |
1000 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", | |
1001 | queue_idx, rc); | |
1002 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
1003 | goto err; | |
1004 | } | |
1005 | ||
1006 | txq->port_id = dev->data->port_id; | |
1007 | txq->next_to_clean = 0; | |
1008 | txq->next_to_use = 0; | |
1009 | txq->ring_size = nb_desc; | |
1010 | ||
1011 | txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", | |
1012 | sizeof(struct ena_tx_buffer) * | |
1013 | txq->ring_size, | |
1014 | RTE_CACHE_LINE_SIZE); | |
1015 | if (!txq->tx_buffer_info) { | |
1016 | RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); | |
1017 | return -ENOMEM; | |
1018 | } | |
1019 | ||
1020 | txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", | |
1021 | sizeof(u16) * txq->ring_size, | |
1022 | RTE_CACHE_LINE_SIZE); | |
1023 | if (!txq->empty_tx_reqs) { | |
1024 | RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); | |
1025 | rte_free(txq->tx_buffer_info); | |
1026 | return -ENOMEM; | |
1027 | } | |
1028 | for (i = 0; i < txq->ring_size; i++) | |
1029 | txq->empty_tx_reqs[i] = i; | |
1030 | ||
1031 | /* Store pointer to this queue in upper layer */ | |
1032 | txq->configured = 1; | |
1033 | dev->data->tx_queues[queue_idx] = txq; | |
1034 | err: | |
1035 | return rc; | |
1036 | } | |
1037 | ||
1038 | static int ena_rx_queue_setup(struct rte_eth_dev *dev, | |
1039 | uint16_t queue_idx, | |
1040 | uint16_t nb_desc, | |
1041 | __rte_unused unsigned int socket_id, | |
1042 | __rte_unused const struct rte_eth_rxconf *rx_conf, | |
1043 | struct rte_mempool *mp) | |
1044 | { | |
1045 | struct ena_com_create_io_ctx ctx = | |
1046 | /* policy set to _HOST just to satisfy icc compiler */ | |
1047 | { ENA_ADMIN_PLACEMENT_POLICY_HOST, | |
1048 | ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; | |
1049 | struct ena_adapter *adapter = | |
1050 | (struct ena_adapter *)(dev->data->dev_private); | |
1051 | struct ena_ring *rxq = NULL; | |
1052 | uint16_t ena_qid = 0; | |
1053 | int rc = 0; | |
1054 | struct ena_com_dev *ena_dev = &adapter->ena_dev; | |
1055 | ||
1056 | rxq = &adapter->rx_ring[queue_idx]; | |
1057 | if (rxq->configured) { | |
1058 | RTE_LOG(CRIT, PMD, | |
1059 | "API violation. Queue %d is already configured\n", | |
1060 | queue_idx); | |
1061 | return -1; | |
1062 | } | |
1063 | ||
1064 | if (!rte_is_power_of_2(nb_desc)) { | |
1065 | RTE_LOG(ERR, PMD, | |
1066 | "Unsupported size of TX queue: %d is not a power of 2.", | |
1067 | nb_desc); | |
1068 | return -EINVAL; | |
1069 | } | |
1070 | ||
1071 | if (nb_desc > adapter->rx_ring_size) { | |
1072 | RTE_LOG(ERR, PMD, | |
1073 | "Unsupported size of RX queue (max size: %d)\n", | |
1074 | adapter->rx_ring_size); | |
1075 | return -EINVAL; | |
1076 | } | |
1077 | ||
1078 | ena_qid = ENA_IO_RXQ_IDX(queue_idx); | |
1079 | ||
1080 | ctx.qid = ena_qid; | |
1081 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; | |
1082 | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
1083 | ctx.msix_vector = -1; /* admin interrupts not used */ | |
1084 | ctx.queue_size = adapter->rx_ring_size; | |
1085 | ctx.numa_node = ena_cpu_to_node(queue_idx); | |
1086 | ||
1087 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
1088 | if (rc) | |
1089 | RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", | |
1090 | queue_idx, rc); | |
1091 | ||
1092 | rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; | |
1093 | rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; | |
1094 | ||
1095 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
1096 | &rxq->ena_com_io_sq, | |
1097 | &rxq->ena_com_io_cq); | |
1098 | if (rc) { | |
1099 | RTE_LOG(ERR, PMD, | |
1100 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", | |
1101 | queue_idx, rc); | |
1102 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
1103 | } | |
1104 | ||
1105 | rxq->port_id = dev->data->port_id; | |
1106 | rxq->next_to_clean = 0; | |
1107 | rxq->next_to_use = 0; | |
1108 | rxq->ring_size = nb_desc; | |
1109 | rxq->mb_pool = mp; | |
1110 | ||
1111 | rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", | |
1112 | sizeof(struct rte_mbuf *) * nb_desc, | |
1113 | RTE_CACHE_LINE_SIZE); | |
1114 | if (!rxq->rx_buffer_info) { | |
1115 | RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); | |
1116 | return -ENOMEM; | |
1117 | } | |
1118 | ||
1119 | /* Store pointer to this queue in upper layer */ | |
1120 | rxq->configured = 1; | |
1121 | dev->data->rx_queues[queue_idx] = rxq; | |
1122 | ||
1123 | return rc; | |
1124 | } | |
1125 | ||
1126 | static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) | |
1127 | { | |
1128 | unsigned int i; | |
1129 | int rc; | |
1130 | uint16_t ring_size = rxq->ring_size; | |
1131 | uint16_t ring_mask = ring_size - 1; | |
1132 | uint16_t next_to_use = rxq->next_to_use; | |
1133 | uint16_t in_use; | |
1134 | struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; | |
1135 | ||
1136 | if (unlikely(!count)) | |
1137 | return 0; | |
1138 | ||
1139 | in_use = rxq->next_to_use - rxq->next_to_clean; | |
1140 | ena_assert_msg(((in_use + count) <= ring_size), "bad ring state"); | |
1141 | ||
1142 | count = RTE_MIN(count, | |
1143 | (uint16_t)(ring_size - (next_to_use & ring_mask))); | |
1144 | ||
1145 | /* get resources for incoming packets */ | |
1146 | rc = rte_mempool_get_bulk(rxq->mb_pool, | |
1147 | (void **)(&mbufs[next_to_use & ring_mask]), | |
1148 | count); | |
1149 | if (unlikely(rc < 0)) { | |
1150 | rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); | |
1151 | PMD_RX_LOG(DEBUG, "there are no enough free buffers"); | |
1152 | return 0; | |
1153 | } | |
1154 | ||
1155 | for (i = 0; i < count; i++) { | |
1156 | uint16_t next_to_use_masked = next_to_use & ring_mask; | |
1157 | struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; | |
1158 | struct ena_com_buf ebuf; | |
1159 | ||
1160 | rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); | |
1161 | /* prepare physical address for DMA transaction */ | |
1162 | ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM; | |
1163 | ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; | |
1164 | /* pass resource to device */ | |
1165 | rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, | |
1166 | &ebuf, next_to_use_masked); | |
1167 | if (unlikely(rc)) { | |
1168 | RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); | |
1169 | break; | |
1170 | } | |
1171 | next_to_use++; | |
1172 | } | |
1173 | ||
1174 | /* When we submitted free recources to device... */ | |
1175 | if (i > 0) { | |
1176 | /* ...let HW know that it can fill buffers with data */ | |
1177 | rte_wmb(); | |
1178 | ena_com_write_sq_doorbell(rxq->ena_com_io_sq); | |
1179 | ||
1180 | rxq->next_to_use = next_to_use; | |
1181 | } | |
1182 | ||
1183 | return i; | |
1184 | } | |
1185 | ||
1186 | static int ena_device_init(struct ena_com_dev *ena_dev, | |
1187 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
1188 | { | |
1189 | int rc; | |
1190 | bool readless_supported; | |
1191 | ||
1192 | /* Initialize mmio registers */ | |
1193 | rc = ena_com_mmio_reg_read_request_init(ena_dev); | |
1194 | if (rc) { | |
1195 | RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); | |
1196 | return rc; | |
1197 | } | |
1198 | ||
1199 | /* The PCIe configuration space revision id indicate if mmio reg | |
1200 | * read is disabled. | |
1201 | */ | |
1202 | readless_supported = | |
1203 | !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id | |
1204 | & ENA_MMIO_DISABLE_REG_READ); | |
1205 | ena_com_set_mmio_read_mode(ena_dev, readless_supported); | |
1206 | ||
1207 | /* reset device */ | |
1208 | rc = ena_com_dev_reset(ena_dev); | |
1209 | if (rc) { | |
1210 | RTE_LOG(ERR, PMD, "cannot reset device\n"); | |
1211 | goto err_mmio_read_less; | |
1212 | } | |
1213 | ||
1214 | /* check FW version */ | |
1215 | rc = ena_com_validate_version(ena_dev); | |
1216 | if (rc) { | |
1217 | RTE_LOG(ERR, PMD, "device version is too low\n"); | |
1218 | goto err_mmio_read_less; | |
1219 | } | |
1220 | ||
1221 | ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); | |
1222 | ||
1223 | /* ENA device administration layer init */ | |
1224 | rc = ena_com_admin_init(ena_dev, NULL, true); | |
1225 | if (rc) { | |
1226 | RTE_LOG(ERR, PMD, | |
1227 | "cannot initialize ena admin queue with device\n"); | |
1228 | goto err_mmio_read_less; | |
1229 | } | |
1230 | ||
1231 | ena_config_host_info(ena_dev); | |
1232 | ||
1233 | /* To enable the msix interrupts the driver needs to know the number | |
1234 | * of queues. So the driver uses polling mode to retrieve this | |
1235 | * information. | |
1236 | */ | |
1237 | ena_com_set_admin_polling_mode(ena_dev, true); | |
1238 | ||
1239 | /* Get Device Attributes and features */ | |
1240 | rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); | |
1241 | if (rc) { | |
1242 | RTE_LOG(ERR, PMD, | |
1243 | "cannot get attribute for ena device rc= %d\n", rc); | |
1244 | goto err_admin_init; | |
1245 | } | |
1246 | ||
1247 | return 0; | |
1248 | ||
1249 | err_admin_init: | |
1250 | ena_com_admin_destroy(ena_dev); | |
1251 | ||
1252 | err_mmio_read_less: | |
1253 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
1254 | ||
1255 | return rc; | |
1256 | } | |
1257 | ||
1258 | static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) | |
1259 | { | |
1260 | struct rte_pci_device *pci_dev; | |
1261 | struct ena_adapter *adapter = | |
1262 | (struct ena_adapter *)(eth_dev->data->dev_private); | |
1263 | struct ena_com_dev *ena_dev = &adapter->ena_dev; | |
1264 | struct ena_com_dev_get_features_ctx get_feat_ctx; | |
1265 | int queue_size, rc; | |
1266 | ||
1267 | static int adapters_found; | |
1268 | ||
1269 | memset(adapter, 0, sizeof(struct ena_adapter)); | |
1270 | ena_dev = &adapter->ena_dev; | |
1271 | ||
1272 | eth_dev->dev_ops = &ena_dev_ops; | |
1273 | eth_dev->rx_pkt_burst = ð_ena_recv_pkts; | |
1274 | eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; | |
1275 | adapter->rte_eth_dev_data = eth_dev->data; | |
1276 | adapter->rte_dev = eth_dev; | |
1277 | ||
1278 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
1279 | return 0; | |
1280 | ||
1281 | pci_dev = eth_dev->pci_dev; | |
1282 | adapter->pdev = pci_dev; | |
1283 | ||
1284 | PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", | |
1285 | pci_dev->addr.domain, | |
1286 | pci_dev->addr.bus, | |
1287 | pci_dev->addr.devid, | |
1288 | pci_dev->addr.function); | |
1289 | ||
1290 | adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; | |
1291 | adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; | |
1292 | ||
1293 | /* Present ENA_MEM_BAR indicates available LLQ mode. | |
1294 | * Use corresponding policy | |
1295 | */ | |
1296 | if (adapter->dev_mem_base) | |
1297 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; | |
1298 | else if (adapter->regs) | |
1299 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
1300 | else | |
1301 | PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", | |
1302 | ENA_REGS_BAR); | |
1303 | ||
1304 | ena_dev->reg_bar = adapter->regs; | |
1305 | ena_dev->dmadev = adapter->pdev; | |
1306 | ||
1307 | adapter->id_number = adapters_found; | |
1308 | ||
1309 | snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", | |
1310 | adapter->id_number); | |
1311 | ||
1312 | /* device specific initialization routine */ | |
1313 | rc = ena_device_init(ena_dev, &get_feat_ctx); | |
1314 | if (rc) { | |
1315 | PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); | |
1316 | return -1; | |
1317 | } | |
1318 | ||
1319 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
1320 | if (get_feat_ctx.max_queues.max_llq_num == 0) { | |
1321 | PMD_INIT_LOG(ERR, | |
1322 | "Trying to use LLQ but llq_num is 0.\n" | |
1323 | "Fall back into regular queues.\n"); | |
1324 | ena_dev->tx_mem_queue_type = | |
1325 | ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
1326 | adapter->num_queues = | |
1327 | get_feat_ctx.max_queues.max_sq_num; | |
1328 | } else { | |
1329 | adapter->num_queues = | |
1330 | get_feat_ctx.max_queues.max_llq_num; | |
1331 | } | |
1332 | } else { | |
1333 | adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; | |
1334 | } | |
1335 | ||
1336 | queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); | |
1337 | if ((queue_size <= 0) || (adapter->num_queues <= 0)) | |
1338 | return -EFAULT; | |
1339 | ||
1340 | adapter->tx_ring_size = queue_size; | |
1341 | adapter->rx_ring_size = queue_size; | |
1342 | ||
1343 | /* prepare ring structures */ | |
1344 | ena_init_rings(adapter); | |
1345 | ||
1346 | ena_config_debug_area(adapter); | |
1347 | ||
1348 | /* Set max MTU for this device */ | |
1349 | adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; | |
1350 | ||
1351 | /* Copy MAC address and point DPDK to it */ | |
1352 | eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; | |
1353 | ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, | |
1354 | (struct ether_addr *)adapter->mac_addr); | |
1355 | ||
1356 | adapter->drv_stats = rte_zmalloc("adapter stats", | |
1357 | sizeof(*adapter->drv_stats), | |
1358 | RTE_CACHE_LINE_SIZE); | |
1359 | if (!adapter->drv_stats) { | |
1360 | RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); | |
1361 | return -ENOMEM; | |
1362 | } | |
1363 | ||
1364 | adapters_found++; | |
1365 | adapter->state = ENA_ADAPTER_STATE_INIT; | |
1366 | ||
1367 | return 0; | |
1368 | } | |
1369 | ||
1370 | static int ena_dev_configure(struct rte_eth_dev *dev) | |
1371 | { | |
1372 | struct ena_adapter *adapter = | |
1373 | (struct ena_adapter *)(dev->data->dev_private); | |
1374 | ||
1375 | if (!(adapter->state == ENA_ADAPTER_STATE_INIT || | |
1376 | adapter->state == ENA_ADAPTER_STATE_STOPPED)) { | |
1377 | PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n", | |
1378 | adapter->state); | |
1379 | return -1; | |
1380 | } | |
1381 | ||
1382 | switch (adapter->state) { | |
1383 | case ENA_ADAPTER_STATE_INIT: | |
1384 | case ENA_ADAPTER_STATE_STOPPED: | |
1385 | adapter->state = ENA_ADAPTER_STATE_CONFIG; | |
1386 | break; | |
1387 | case ENA_ADAPTER_STATE_CONFIG: | |
1388 | RTE_LOG(WARNING, PMD, | |
1389 | "Ivalid driver state while trying to configure device\n"); | |
1390 | break; | |
1391 | default: | |
1392 | break; | |
1393 | } | |
1394 | ||
1395 | return 0; | |
1396 | } | |
1397 | ||
1398 | static void ena_init_rings(struct ena_adapter *adapter) | |
1399 | { | |
1400 | int i; | |
1401 | ||
1402 | for (i = 0; i < adapter->num_queues; i++) { | |
1403 | struct ena_ring *ring = &adapter->tx_ring[i]; | |
1404 | ||
1405 | ring->configured = 0; | |
1406 | ring->type = ENA_RING_TYPE_TX; | |
1407 | ring->adapter = adapter; | |
1408 | ring->id = i; | |
1409 | ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; | |
1410 | ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; | |
1411 | } | |
1412 | ||
1413 | for (i = 0; i < adapter->num_queues; i++) { | |
1414 | struct ena_ring *ring = &adapter->rx_ring[i]; | |
1415 | ||
1416 | ring->configured = 0; | |
1417 | ring->type = ENA_RING_TYPE_RX; | |
1418 | ring->adapter = adapter; | |
1419 | ring->id = i; | |
1420 | } | |
1421 | } | |
1422 | ||
1423 | static void ena_infos_get(struct rte_eth_dev *dev, | |
1424 | struct rte_eth_dev_info *dev_info) | |
1425 | { | |
1426 | struct ena_adapter *adapter; | |
1427 | struct ena_com_dev *ena_dev; | |
1428 | struct ena_com_dev_get_features_ctx feat; | |
1429 | uint32_t rx_feat = 0, tx_feat = 0; | |
1430 | int rc = 0; | |
1431 | ||
1432 | ena_assert_msg(dev->data != NULL, "Uninitialized device"); | |
1433 | ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); | |
1434 | adapter = (struct ena_adapter *)(dev->data->dev_private); | |
1435 | ||
1436 | ena_dev = &adapter->ena_dev; | |
1437 | ena_assert_msg(ena_dev != NULL, "Uninitialized device"); | |
1438 | ||
1439 | dev_info->speed_capa = | |
1440 | ETH_LINK_SPEED_1G | | |
1441 | ETH_LINK_SPEED_2_5G | | |
1442 | ETH_LINK_SPEED_5G | | |
1443 | ETH_LINK_SPEED_10G | | |
1444 | ETH_LINK_SPEED_25G | | |
1445 | ETH_LINK_SPEED_40G | | |
1446 | ETH_LINK_SPEED_50G | | |
1447 | ETH_LINK_SPEED_100G; | |
1448 | ||
1449 | /* Get supported features from HW */ | |
1450 | rc = ena_com_get_dev_attr_feat(ena_dev, &feat); | |
1451 | if (unlikely(rc)) { | |
1452 | RTE_LOG(ERR, PMD, | |
1453 | "Cannot get attribute for ena device rc= %d\n", rc); | |
1454 | return; | |
1455 | } | |
1456 | ||
1457 | /* Set Tx & Rx features available for device */ | |
1458 | if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) | |
1459 | tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; | |
1460 | ||
1461 | if (feat.offload.tx & | |
1462 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) | |
1463 | tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | | |
1464 | DEV_TX_OFFLOAD_UDP_CKSUM | | |
1465 | DEV_TX_OFFLOAD_TCP_CKSUM; | |
1466 | ||
1467 | if (feat.offload.tx & | |
1468 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) | |
1469 | rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | | |
1470 | DEV_RX_OFFLOAD_UDP_CKSUM | | |
1471 | DEV_RX_OFFLOAD_TCP_CKSUM; | |
1472 | ||
1473 | /* Inform framework about available features */ | |
1474 | dev_info->rx_offload_capa = rx_feat; | |
1475 | dev_info->tx_offload_capa = tx_feat; | |
1476 | ||
1477 | dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; | |
1478 | dev_info->max_rx_pktlen = adapter->max_mtu; | |
1479 | dev_info->max_mac_addrs = 1; | |
1480 | ||
1481 | dev_info->max_rx_queues = adapter->num_queues; | |
1482 | dev_info->max_tx_queues = adapter->num_queues; | |
1483 | dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; | |
1484 | } | |
1485 | ||
1486 | static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, | |
1487 | uint16_t nb_pkts) | |
1488 | { | |
1489 | struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); | |
1490 | unsigned int ring_size = rx_ring->ring_size; | |
1491 | unsigned int ring_mask = ring_size - 1; | |
1492 | uint16_t next_to_clean = rx_ring->next_to_clean; | |
1493 | uint16_t desc_in_use = 0; | |
1494 | unsigned int recv_idx = 0; | |
1495 | struct rte_mbuf *mbuf = NULL; | |
1496 | struct rte_mbuf *mbuf_head = NULL; | |
1497 | struct rte_mbuf *mbuf_prev = NULL; | |
1498 | struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; | |
1499 | unsigned int completed; | |
1500 | ||
1501 | struct ena_com_rx_ctx ena_rx_ctx; | |
1502 | int rc = 0; | |
1503 | ||
1504 | /* Check adapter state */ | |
1505 | if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { | |
1506 | RTE_LOG(ALERT, PMD, | |
1507 | "Trying to receive pkts while device is NOT running\n"); | |
1508 | return 0; | |
1509 | } | |
1510 | ||
1511 | desc_in_use = rx_ring->next_to_use - next_to_clean; | |
1512 | if (unlikely(nb_pkts > desc_in_use)) | |
1513 | nb_pkts = desc_in_use; | |
1514 | ||
1515 | for (completed = 0; completed < nb_pkts; completed++) { | |
1516 | int segments = 0; | |
1517 | ||
1518 | ena_rx_ctx.max_bufs = rx_ring->ring_size; | |
1519 | ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; | |
1520 | ena_rx_ctx.descs = 0; | |
1521 | /* receive packet context */ | |
1522 | rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, | |
1523 | rx_ring->ena_com_io_sq, | |
1524 | &ena_rx_ctx); | |
1525 | if (unlikely(rc)) { | |
1526 | RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); | |
1527 | return 0; | |
1528 | } | |
1529 | ||
1530 | if (unlikely(ena_rx_ctx.descs == 0)) | |
1531 | break; | |
1532 | ||
1533 | while (segments < ena_rx_ctx.descs) { | |
1534 | mbuf = rx_buff_info[next_to_clean & ring_mask]; | |
1535 | mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; | |
1536 | mbuf->data_off = RTE_PKTMBUF_HEADROOM; | |
1537 | mbuf->refcnt = 1; | |
1538 | mbuf->next = NULL; | |
1539 | if (segments == 0) { | |
1540 | mbuf->nb_segs = ena_rx_ctx.descs; | |
1541 | mbuf->port = rx_ring->port_id; | |
1542 | mbuf->pkt_len = 0; | |
1543 | mbuf_head = mbuf; | |
1544 | } else { | |
1545 | /* for multi-segment pkts create mbuf chain */ | |
1546 | mbuf_prev->next = mbuf; | |
1547 | } | |
1548 | mbuf_head->pkt_len += mbuf->data_len; | |
1549 | ||
1550 | mbuf_prev = mbuf; | |
1551 | segments++; | |
1552 | next_to_clean++; | |
1553 | } | |
1554 | ||
1555 | /* fill mbuf attributes if any */ | |
1556 | ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); | |
1557 | mbuf_head->hash.rss = (uint32_t)rx_ring->id; | |
1558 | ||
1559 | /* pass to DPDK application head mbuf */ | |
1560 | rx_pkts[recv_idx] = mbuf_head; | |
1561 | recv_idx++; | |
1562 | } | |
1563 | ||
1564 | /* Burst refill to save doorbells, memory barriers, const interval */ | |
1565 | if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) | |
1566 | ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); | |
1567 | ||
1568 | rx_ring->next_to_clean = next_to_clean; | |
1569 | ||
1570 | return recv_idx; | |
1571 | } | |
1572 | ||
1573 | static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, | |
1574 | uint16_t nb_pkts) | |
1575 | { | |
1576 | struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); | |
1577 | uint16_t next_to_use = tx_ring->next_to_use; | |
1578 | uint16_t next_to_clean = tx_ring->next_to_clean; | |
1579 | struct rte_mbuf *mbuf; | |
1580 | unsigned int ring_size = tx_ring->ring_size; | |
1581 | unsigned int ring_mask = ring_size - 1; | |
1582 | struct ena_com_tx_ctx ena_tx_ctx; | |
1583 | struct ena_tx_buffer *tx_info; | |
1584 | struct ena_com_buf *ebuf; | |
1585 | uint16_t rc, req_id, total_tx_descs = 0; | |
1586 | uint16_t sent_idx = 0, empty_tx_reqs; | |
1587 | int nb_hw_desc; | |
1588 | ||
1589 | /* Check adapter state */ | |
1590 | if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { | |
1591 | RTE_LOG(ALERT, PMD, | |
1592 | "Trying to xmit pkts while device is NOT running\n"); | |
1593 | return 0; | |
1594 | } | |
1595 | ||
1596 | empty_tx_reqs = ring_size - (next_to_use - next_to_clean); | |
1597 | if (nb_pkts > empty_tx_reqs) | |
1598 | nb_pkts = empty_tx_reqs; | |
1599 | ||
1600 | for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { | |
1601 | mbuf = tx_pkts[sent_idx]; | |
1602 | ||
1603 | req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; | |
1604 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
1605 | tx_info->mbuf = mbuf; | |
1606 | tx_info->num_of_bufs = 0; | |
1607 | ebuf = tx_info->bufs; | |
1608 | ||
1609 | /* Prepare TX context */ | |
1610 | memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); | |
1611 | memset(&ena_tx_ctx.ena_meta, 0x0, | |
1612 | sizeof(struct ena_com_tx_meta)); | |
1613 | ena_tx_ctx.ena_bufs = ebuf; | |
1614 | ena_tx_ctx.req_id = req_id; | |
1615 | if (tx_ring->tx_mem_queue_type == | |
1616 | ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
1617 | /* prepare the push buffer with | |
1618 | * virtual address of the data | |
1619 | */ | |
1620 | ena_tx_ctx.header_len = | |
1621 | RTE_MIN(mbuf->data_len, | |
1622 | tx_ring->tx_max_header_size); | |
1623 | ena_tx_ctx.push_header = | |
1624 | (void *)((char *)mbuf->buf_addr + | |
1625 | mbuf->data_off); | |
1626 | } /* there's no else as we take advantage of memset zeroing */ | |
1627 | ||
1628 | /* Set TX offloads flags, if applicable */ | |
1629 | ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx); | |
1630 | ||
1631 | if (unlikely(mbuf->ol_flags & | |
1632 | (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) | |
1633 | rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); | |
1634 | ||
1635 | rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); | |
1636 | ||
1637 | /* Process first segment taking into | |
1638 | * consideration pushed header | |
1639 | */ | |
1640 | if (mbuf->data_len > ena_tx_ctx.header_len) { | |
1641 | ebuf->paddr = mbuf->buf_physaddr + | |
1642 | mbuf->data_off + | |
1643 | ena_tx_ctx.header_len; | |
1644 | ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; | |
1645 | ebuf++; | |
1646 | tx_info->num_of_bufs++; | |
1647 | } | |
1648 | ||
1649 | while ((mbuf = mbuf->next) != NULL) { | |
1650 | ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off; | |
1651 | ebuf->len = mbuf->data_len; | |
1652 | ebuf++; | |
1653 | tx_info->num_of_bufs++; | |
1654 | } | |
1655 | ||
1656 | ena_tx_ctx.num_bufs = tx_info->num_of_bufs; | |
1657 | ||
1658 | /* Write data to device */ | |
1659 | rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, | |
1660 | &ena_tx_ctx, &nb_hw_desc); | |
1661 | if (unlikely(rc)) | |
1662 | break; | |
1663 | ||
1664 | tx_info->tx_descs = nb_hw_desc; | |
1665 | ||
1666 | next_to_use++; | |
1667 | } | |
1668 | ||
1669 | /* If there are ready packets to be xmitted... */ | |
1670 | if (sent_idx > 0) { | |
1671 | /* ...let HW do its best :-) */ | |
1672 | rte_wmb(); | |
1673 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); | |
1674 | ||
1675 | tx_ring->next_to_use = next_to_use; | |
1676 | } | |
1677 | ||
1678 | /* Clear complete packets */ | |
1679 | while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { | |
1680 | /* Get Tx info & store how many descs were processed */ | |
1681 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
1682 | total_tx_descs += tx_info->tx_descs; | |
1683 | ||
1684 | /* Free whole mbuf chain */ | |
1685 | mbuf = tx_info->mbuf; | |
1686 | rte_pktmbuf_free(mbuf); | |
1687 | ||
1688 | /* Put back descriptor to the ring for reuse */ | |
1689 | tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; | |
1690 | next_to_clean++; | |
1691 | ||
1692 | /* If too many descs to clean, leave it for another run */ | |
1693 | if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) | |
1694 | break; | |
1695 | } | |
1696 | ||
1697 | if (total_tx_descs > 0) { | |
1698 | /* acknowledge completion of sent packets */ | |
1699 | ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); | |
1700 | tx_ring->next_to_clean = next_to_clean; | |
1701 | } | |
1702 | ||
1703 | return sent_idx; | |
1704 | } | |
1705 | ||
1706 | static struct eth_driver rte_ena_pmd = { | |
1707 | .pci_drv = { | |
1708 | .id_table = pci_id_ena_map, | |
1709 | .drv_flags = RTE_PCI_DRV_NEED_MAPPING, | |
1710 | .probe = rte_eth_dev_pci_probe, | |
1711 | .remove = rte_eth_dev_pci_remove, | |
1712 | }, | |
1713 | .eth_dev_init = eth_ena_dev_init, | |
1714 | .dev_private_size = sizeof(struct ena_adapter), | |
1715 | }; | |
1716 | ||
1717 | RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv); | |
1718 | RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); |