]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * | |
3 | * Copyright (c) 2017-2018 Solarflare Communications Inc. | |
4 | * All rights reserved. | |
5 | * | |
6 | * This software was jointly developed between OKTET Labs (under contract | |
7 | * for Solarflare) and Solarflare Communications, Inc. | |
8 | */ | |
9 | ||
10 | /* EF10 equal stride packed stream receive native datapath implementation */ | |
11 | ||
12 | #include <stdbool.h> | |
13 | ||
14 | #include <rte_byteorder.h> | |
15 | #include <rte_mbuf_ptype.h> | |
16 | #include <rte_mbuf.h> | |
17 | #include <rte_io.h> | |
18 | ||
19 | #include "efx.h" | |
20 | #include "efx_types.h" | |
21 | #include "efx_regs.h" | |
22 | #include "efx_regs_ef10.h" | |
23 | ||
24 | #include "sfc_tweak.h" | |
25 | #include "sfc_dp_rx.h" | |
26 | #include "sfc_kvargs.h" | |
27 | #include "sfc_ef10.h" | |
28 | ||
29 | /* Tunnels are not supported */ | |
30 | #define SFC_EF10_RX_EV_ENCAP_SUPPORT 0 | |
31 | #include "sfc_ef10_rx_ev.h" | |
32 | ||
33 | #define sfc_ef10_essb_rx_err(dpq, ...) \ | |
34 | SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__) | |
35 | ||
36 | #define sfc_ef10_essb_rx_info(dpq, ...) \ | |
37 | SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__) | |
38 | ||
39 | /* | |
40 | * Fake length for RXQ descriptors in equal stride super-buffer mode | |
41 | * to make hardware happy. | |
42 | */ | |
43 | #define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32 | |
44 | ||
45 | /** | |
46 | * Minimum number of Rx buffers the datapath allows to use. | |
47 | * | |
48 | * Each HW Rx descriptor has many Rx buffers. The number of buffers | |
49 | * in one HW Rx descriptor is equal to size of contiguous block | |
50 | * provided by Rx buffers memory pool. The contiguous block size | |
51 | * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf | |
52 | * data size specified on the memory pool creation. Typical rte_mbuf | |
53 | * data size is about 2k which makes a bit less than 32 buffers in | |
54 | * contiguous block with default bucket size equal to 64k. | |
55 | * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN), | |
56 | * it makes about 256 as required minimum. Double it in advertised | |
57 | * minimum to allow for at least 2 refill blocks. | |
58 | */ | |
59 | #define SFC_EF10_ESSB_RX_DESCS_MIN 512 | |
60 | ||
61 | /** | |
62 | * Number of Rx buffers should be aligned to. | |
63 | * | |
64 | * There are no extra requirements on alignment since actual number of | |
65 | * pushed Rx buffers will be multiple by contiguous block size which | |
66 | * is unknown beforehand. | |
67 | */ | |
68 | #define SFC_EF10_ESSB_RX_DESCS_ALIGN 1 | |
69 | ||
70 | /** | |
71 | * Maximum number of descriptors/buffers in the Rx ring. | |
72 | * It should guarantee that corresponding event queue never overfill. | |
73 | */ | |
74 | #define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \ | |
75 | ((_nevs) - 1 /* head must not step on tail */ - \ | |
76 | (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \ | |
77 | 1 /* Rx error */ - 1 /* flush */) | |
78 | ||
79 | struct sfc_ef10_essb_rx_sw_desc { | |
80 | struct rte_mbuf *first_mbuf; | |
81 | }; | |
82 | ||
83 | struct sfc_ef10_essb_rxq { | |
84 | /* Used on data path */ | |
85 | unsigned int flags; | |
86 | #define SFC_EF10_ESSB_RXQ_STARTED 0x1 | |
87 | #define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2 | |
88 | #define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4 | |
89 | unsigned int rxq_ptr_mask; | |
90 | unsigned int block_size; | |
91 | unsigned int buf_stride; | |
92 | unsigned int bufs_ptr; | |
93 | unsigned int completed; | |
94 | unsigned int pending_id; | |
95 | unsigned int bufs_pending; | |
96 | unsigned int left_in_completed; | |
97 | unsigned int left_in_pending; | |
98 | unsigned int evq_read_ptr; | |
99 | unsigned int evq_ptr_mask; | |
100 | efx_qword_t *evq_hw_ring; | |
101 | struct sfc_ef10_essb_rx_sw_desc *sw_ring; | |
102 | uint16_t port_id; | |
103 | ||
104 | /* Used on refill */ | |
105 | unsigned int added; | |
106 | unsigned int max_fill_level; | |
107 | unsigned int refill_threshold; | |
108 | struct rte_mempool *refill_mb_pool; | |
109 | efx_qword_t *rxq_hw_ring; | |
110 | volatile void *doorbell; | |
111 | ||
112 | /* Datapath receive queue anchor */ | |
113 | struct sfc_dp_rxq dp; | |
114 | }; | |
115 | ||
116 | static inline struct sfc_ef10_essb_rxq * | |
117 | sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq) | |
118 | { | |
119 | return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp); | |
120 | } | |
121 | ||
122 | static struct rte_mbuf * | |
123 | sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq, | |
124 | struct rte_mbuf *mbuf) | |
125 | { | |
9f95a23c TL |
126 | struct rte_mbuf *m; |
127 | ||
128 | m = (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride); | |
129 | MBUF_RAW_ALLOC_CHECK(m); | |
130 | return m; | |
11fdf7f2 TL |
131 | } |
132 | ||
133 | static struct rte_mbuf * | |
134 | sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq, | |
135 | struct rte_mbuf *mbuf, unsigned int idx) | |
136 | { | |
9f95a23c TL |
137 | struct rte_mbuf *m; |
138 | ||
139 | m = (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride); | |
140 | MBUF_RAW_ALLOC_CHECK(m); | |
141 | return m; | |
11fdf7f2 TL |
142 | } |
143 | ||
144 | static struct rte_mbuf * | |
145 | sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq) | |
146 | { | |
147 | const struct sfc_ef10_essb_rx_sw_desc *rxd; | |
148 | ||
149 | if (rxq->left_in_completed != 0) { | |
150 | rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; | |
151 | return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, | |
152 | rxq->block_size - rxq->left_in_completed); | |
153 | } else { | |
154 | rxq->completed++; | |
155 | rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask]; | |
156 | rxq->left_in_completed = rxq->block_size; | |
157 | return rxd->first_mbuf; | |
158 | } | |
159 | } | |
160 | ||
161 | static void | |
162 | sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq) | |
163 | { | |
164 | const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask; | |
165 | unsigned int free_space; | |
166 | unsigned int bulks; | |
167 | void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN]; | |
168 | unsigned int added = rxq->added; | |
169 | ||
170 | free_space = rxq->max_fill_level - (added - rxq->completed); | |
171 | ||
172 | if (free_space < rxq->refill_threshold) | |
173 | return; | |
174 | ||
175 | bulks = free_space / RTE_DIM(mbuf_blocks); | |
176 | /* refill_threshold guarantees that bulks is positive */ | |
177 | SFC_ASSERT(bulks > 0); | |
178 | ||
179 | do { | |
180 | unsigned int id; | |
181 | unsigned int i; | |
182 | ||
183 | if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool, | |
184 | mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) { | |
185 | struct rte_eth_dev_data *dev_data = | |
186 | rte_eth_devices[rxq->port_id].data; | |
187 | ||
188 | /* | |
189 | * It is hardly a safe way to increment counter | |
190 | * from different contexts, but all PMDs do it. | |
191 | */ | |
192 | dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks); | |
193 | /* Return if we have posted nothing yet */ | |
194 | if (added == rxq->added) | |
195 | return; | |
196 | /* Push posted */ | |
197 | break; | |
198 | } | |
199 | ||
200 | for (i = 0, id = added & rxq_ptr_mask; | |
201 | i < RTE_DIM(mbuf_blocks); | |
202 | ++i, ++id) { | |
203 | struct rte_mbuf *m = mbuf_blocks[i]; | |
204 | struct sfc_ef10_essb_rx_sw_desc *rxd; | |
205 | ||
206 | SFC_ASSERT((id & ~rxq_ptr_mask) == 0); | |
207 | rxd = &rxq->sw_ring[id]; | |
208 | rxd->first_mbuf = m; | |
209 | ||
210 | /* RX_KER_BYTE_CNT is ignored by firmware */ | |
211 | EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id], | |
212 | ESF_DZ_RX_KER_BYTE_CNT, | |
213 | SFC_EF10_ESSB_RX_FAKE_BUF_SIZE, | |
214 | ESF_DZ_RX_KER_BUF_ADDR, | |
215 | rte_mbuf_data_iova_default(m)); | |
216 | } | |
217 | ||
218 | added += RTE_DIM(mbuf_blocks); | |
219 | ||
220 | } while (--bulks > 0); | |
221 | ||
222 | SFC_ASSERT(rxq->added != added); | |
223 | rxq->added = added; | |
224 | sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask); | |
225 | } | |
226 | ||
227 | static bool | |
228 | sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev) | |
229 | { | |
230 | *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask]; | |
231 | ||
232 | if (!sfc_ef10_ev_present(*rx_ev)) | |
233 | return false; | |
234 | ||
235 | if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) != | |
236 | FSE_AZ_EV_CODE_RX_EV)) { | |
237 | /* | |
238 | * Do not move read_ptr to keep the event for exception | |
239 | * handling | |
240 | */ | |
241 | rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION; | |
242 | sfc_ef10_essb_rx_err(&rxq->dp.dpq, | |
243 | "RxQ exception at EvQ read ptr %#x", | |
244 | rxq->evq_read_ptr); | |
245 | return false; | |
246 | } | |
247 | ||
248 | rxq->evq_read_ptr++; | |
249 | return true; | |
250 | } | |
251 | ||
252 | static void | |
253 | sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev) | |
254 | { | |
255 | unsigned int ready; | |
256 | ||
257 | ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - | |
258 | rxq->bufs_ptr) & | |
259 | EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); | |
260 | ||
261 | rxq->bufs_ptr += ready; | |
262 | rxq->bufs_pending += ready; | |
263 | ||
264 | SFC_ASSERT(ready > 0); | |
265 | do { | |
266 | const struct sfc_ef10_essb_rx_sw_desc *rxd; | |
267 | struct rte_mbuf *m; | |
268 | unsigned int todo_bufs; | |
269 | struct rte_mbuf *m0; | |
270 | ||
271 | rxd = &rxq->sw_ring[rxq->pending_id]; | |
272 | m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, | |
273 | rxq->block_size - rxq->left_in_pending); | |
274 | ||
275 | if (ready < rxq->left_in_pending) { | |
276 | todo_bufs = ready; | |
277 | ready = 0; | |
278 | rxq->left_in_pending -= todo_bufs; | |
279 | } else { | |
280 | todo_bufs = rxq->left_in_pending; | |
281 | ready -= todo_bufs; | |
282 | rxq->left_in_pending = rxq->block_size; | |
283 | if (rxq->pending_id != rxq->rxq_ptr_mask) | |
284 | rxq->pending_id++; | |
285 | else | |
286 | rxq->pending_id = 0; | |
287 | } | |
288 | ||
289 | SFC_ASSERT(todo_bufs > 0); | |
290 | --todo_bufs; | |
291 | ||
292 | sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull); | |
293 | ||
294 | /* Prefetch pseudo-header */ | |
295 | rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); | |
296 | ||
297 | m0 = m; | |
298 | while (todo_bufs-- > 0) { | |
299 | m = sfc_ef10_essb_next_mbuf(rxq, m); | |
300 | m->ol_flags = m0->ol_flags; | |
301 | m->packet_type = m0->packet_type; | |
302 | /* Prefetch pseudo-header */ | |
303 | rte_prefetch0((uint8_t *)m->buf_addr + | |
304 | RTE_PKTMBUF_HEADROOM); | |
305 | } | |
306 | } while (ready > 0); | |
307 | } | |
308 | ||
309 | static unsigned int | |
310 | sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq, | |
311 | struct rte_mbuf **rx_pkts, uint16_t nb_pkts) | |
312 | { | |
313 | unsigned int n_rx_pkts = 0; | |
314 | unsigned int todo_bufs; | |
315 | struct rte_mbuf *m; | |
316 | ||
317 | while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts, | |
318 | rxq->bufs_pending)) > 0) { | |
319 | m = sfc_ef10_essb_maybe_next_completed(rxq); | |
320 | ||
321 | todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed); | |
322 | ||
323 | rxq->bufs_pending -= todo_bufs; | |
324 | rxq->left_in_completed -= todo_bufs; | |
325 | ||
326 | SFC_ASSERT(todo_bufs > 0); | |
327 | todo_bufs--; | |
328 | ||
329 | do { | |
330 | const efx_qword_t *qwordp; | |
331 | uint16_t pkt_len; | |
332 | ||
333 | /* Buffers to be discarded have 0 in packet type */ | |
334 | if (unlikely(m->packet_type == 0)) { | |
9f95a23c | 335 | rte_mbuf_raw_free(m); |
11fdf7f2 TL |
336 | goto next_buf; |
337 | } | |
338 | ||
339 | rx_pkts[n_rx_pkts++] = m; | |
340 | ||
341 | /* Parse pseudo-header */ | |
342 | qwordp = (const efx_qword_t *) | |
343 | ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM); | |
344 | pkt_len = | |
345 | EFX_QWORD_FIELD(*qwordp, | |
346 | ES_EZ_ESSB_RX_PREFIX_DATA_LEN); | |
347 | ||
348 | m->data_off = RTE_PKTMBUF_HEADROOM + | |
349 | ES_EZ_ESSB_RX_PREFIX_LEN; | |
350 | m->port = rxq->port_id; | |
351 | ||
352 | rte_pktmbuf_pkt_len(m) = pkt_len; | |
353 | rte_pktmbuf_data_len(m) = pkt_len; | |
354 | ||
355 | m->ol_flags |= | |
356 | (PKT_RX_RSS_HASH * | |
357 | !!EFX_TEST_QWORD_BIT(*qwordp, | |
358 | ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) | | |
359 | (PKT_RX_FDIR_ID * | |
360 | !!EFX_TEST_QWORD_BIT(*qwordp, | |
361 | ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) | | |
362 | (PKT_RX_FDIR * | |
363 | !!EFX_TEST_QWORD_BIT(*qwordp, | |
364 | ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN)); | |
365 | ||
366 | /* EFX_QWORD_FIELD converts little-endian to CPU */ | |
367 | m->hash.rss = | |
368 | EFX_QWORD_FIELD(*qwordp, | |
369 | ES_EZ_ESSB_RX_PREFIX_HASH); | |
370 | m->hash.fdir.hi = | |
371 | EFX_QWORD_FIELD(*qwordp, | |
372 | ES_EZ_ESSB_RX_PREFIX_MARK); | |
373 | ||
374 | next_buf: | |
375 | m = sfc_ef10_essb_next_mbuf(rxq, m); | |
376 | } while (todo_bufs-- > 0); | |
377 | } | |
378 | ||
379 | return n_rx_pkts; | |
380 | } | |
381 | ||
382 | ||
383 | static uint16_t | |
384 | sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, | |
385 | uint16_t nb_pkts) | |
386 | { | |
387 | struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue); | |
388 | const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; | |
389 | uint16_t n_rx_pkts; | |
390 | efx_qword_t rx_ev; | |
391 | ||
392 | if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | | |
393 | SFC_EF10_ESSB_RXQ_EXCEPTION))) | |
394 | return 0; | |
395 | ||
396 | n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts); | |
397 | ||
398 | while (n_rx_pkts != nb_pkts && | |
399 | sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { | |
400 | /* | |
401 | * DROP_EVENT is an internal to the NIC, software should | |
402 | * never see it and, therefore, may ignore it. | |
403 | */ | |
404 | ||
405 | sfc_ef10_essb_rx_process_ev(rxq, rx_ev); | |
406 | n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq, | |
407 | rx_pkts + n_rx_pkts, | |
408 | nb_pkts - n_rx_pkts); | |
409 | } | |
410 | ||
411 | sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, | |
412 | evq_old_read_ptr, rxq->evq_read_ptr); | |
413 | ||
414 | /* It is not a problem if we refill in the case of exception */ | |
415 | sfc_ef10_essb_rx_qrefill(rxq); | |
416 | ||
417 | return n_rx_pkts; | |
418 | } | |
419 | ||
420 | static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending; | |
421 | static unsigned int | |
422 | sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) | |
423 | { | |
424 | struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); | |
425 | const unsigned int evq_old_read_ptr = rxq->evq_read_ptr; | |
426 | efx_qword_t rx_ev; | |
427 | ||
428 | if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING | | |
429 | SFC_EF10_ESSB_RXQ_EXCEPTION))) | |
430 | return rxq->bufs_pending; | |
431 | ||
432 | while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) { | |
433 | /* | |
434 | * DROP_EVENT is an internal to the NIC, software should | |
435 | * never see it and, therefore, may ignore it. | |
436 | */ | |
437 | sfc_ef10_essb_rx_process_ev(rxq, rx_ev); | |
438 | } | |
439 | ||
440 | sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask, | |
441 | evq_old_read_ptr, rxq->evq_read_ptr); | |
442 | ||
443 | return rxq->bufs_pending; | |
444 | } | |
445 | ||
446 | static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status; | |
447 | static int | |
448 | sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) | |
449 | { | |
450 | struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); | |
451 | unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq); | |
452 | ||
453 | if (offset < pending) | |
454 | return RTE_ETH_RX_DESC_DONE; | |
455 | ||
456 | if (offset < (rxq->added - rxq->completed) * rxq->block_size + | |
457 | rxq->left_in_completed - rxq->block_size) | |
458 | return RTE_ETH_RX_DESC_AVAIL; | |
459 | ||
460 | return RTE_ETH_RX_DESC_UNAVAIL; | |
461 | } | |
462 | ||
463 | static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info; | |
464 | static void | |
465 | sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info) | |
466 | { | |
467 | /* | |
468 | * Number of descriptors just defines maximum number of pushed | |
469 | * descriptors (fill level). | |
470 | */ | |
471 | dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN; | |
472 | dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN; | |
473 | } | |
474 | ||
475 | static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported; | |
476 | static int | |
477 | sfc_ef10_essb_rx_pool_ops_supported(const char *pool) | |
478 | { | |
479 | SFC_ASSERT(pool != NULL); | |
480 | ||
481 | if (strcmp(pool, "bucket") == 0) | |
482 | return 0; | |
483 | ||
484 | return -ENOTSUP; | |
485 | } | |
486 | ||
487 | static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings; | |
488 | static int | |
489 | sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc, | |
9f95a23c | 490 | struct sfc_dp_rx_hw_limits *limits, |
11fdf7f2 TL |
491 | struct rte_mempool *mb_pool, |
492 | unsigned int *rxq_entries, | |
493 | unsigned int *evq_entries, | |
494 | unsigned int *rxq_max_fill_level) | |
495 | { | |
496 | int rc; | |
497 | struct rte_mempool_info mp_info; | |
498 | unsigned int nb_hw_rx_desc; | |
499 | unsigned int max_events; | |
500 | ||
501 | rc = rte_mempool_ops_get_info(mb_pool, &mp_info); | |
502 | if (rc != 0) | |
503 | return -rc; | |
504 | if (mp_info.contig_block_size == 0) | |
505 | return EINVAL; | |
506 | ||
507 | /* | |
508 | * Calculate required number of hardware Rx descriptors each | |
509 | * carrying contig block size Rx buffers. | |
510 | * It cannot be less than Rx write pointer alignment plus 1 | |
511 | * in order to avoid cases when the ring is guaranteed to be | |
512 | * empty. | |
513 | */ | |
514 | nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc, | |
515 | mp_info.contig_block_size), | |
516 | SFC_EF10_RX_WPTR_ALIGN + 1); | |
9f95a23c TL |
517 | if (nb_hw_rx_desc <= limits->rxq_min_entries) { |
518 | *rxq_entries = limits->rxq_min_entries; | |
11fdf7f2 TL |
519 | } else { |
520 | *rxq_entries = rte_align32pow2(nb_hw_rx_desc); | |
9f95a23c | 521 | if (*rxq_entries > limits->rxq_max_entries) |
11fdf7f2 TL |
522 | return EINVAL; |
523 | } | |
524 | ||
525 | max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) * | |
526 | mp_info.contig_block_size + | |
527 | (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ + | |
528 | 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */; | |
529 | ||
530 | *evq_entries = rte_align32pow2(max_events); | |
9f95a23c TL |
531 | *evq_entries = RTE_MAX(*evq_entries, limits->evq_min_entries); |
532 | *evq_entries = RTE_MIN(*evq_entries, limits->evq_max_entries); | |
11fdf7f2 TL |
533 | |
534 | /* | |
535 | * May be even maximum event queue size is insufficient to handle | |
536 | * so many Rx descriptors. If so, we should limit Rx queue fill level. | |
537 | */ | |
538 | *rxq_max_fill_level = RTE_MIN(nb_rx_desc, | |
539 | SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries)); | |
540 | return 0; | |
541 | } | |
542 | ||
543 | static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate; | |
544 | static int | |
545 | sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id, | |
546 | const struct rte_pci_addr *pci_addr, int socket_id, | |
547 | const struct sfc_dp_rx_qcreate_info *info, | |
548 | struct sfc_dp_rxq **dp_rxqp) | |
549 | { | |
550 | struct rte_mempool * const mp = info->refill_mb_pool; | |
551 | struct rte_mempool_info mp_info; | |
552 | struct sfc_ef10_essb_rxq *rxq; | |
553 | int rc; | |
554 | ||
555 | rc = rte_mempool_ops_get_info(mp, &mp_info); | |
556 | if (rc != 0) { | |
557 | /* Positive errno is used in the driver */ | |
558 | rc = -rc; | |
559 | goto fail_get_contig_block_size; | |
560 | } | |
561 | ||
562 | /* Check if the mempool provides block dequeue */ | |
563 | rc = EINVAL; | |
564 | if (mp_info.contig_block_size == 0) | |
565 | goto fail_no_block_dequeue; | |
566 | ||
567 | rc = ENOMEM; | |
568 | rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq), | |
569 | RTE_CACHE_LINE_SIZE, socket_id); | |
570 | if (rxq == NULL) | |
571 | goto fail_rxq_alloc; | |
572 | ||
573 | sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); | |
574 | ||
575 | rc = ENOMEM; | |
576 | rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring", | |
577 | info->rxq_entries, | |
578 | sizeof(*rxq->sw_ring), | |
579 | RTE_CACHE_LINE_SIZE, socket_id); | |
580 | if (rxq->sw_ring == NULL) | |
581 | goto fail_desc_alloc; | |
582 | ||
583 | rxq->block_size = mp_info.contig_block_size; | |
584 | rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size; | |
585 | rxq->rxq_ptr_mask = info->rxq_entries - 1; | |
586 | rxq->evq_ptr_mask = info->evq_entries - 1; | |
587 | rxq->evq_hw_ring = info->evq_hw_ring; | |
588 | rxq->port_id = port_id; | |
589 | ||
590 | rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size; | |
591 | rxq->refill_threshold = | |
592 | RTE_MAX(info->refill_threshold / mp_info.contig_block_size, | |
593 | SFC_EF10_RX_WPTR_ALIGN); | |
594 | rxq->refill_mb_pool = mp; | |
595 | rxq->rxq_hw_ring = info->rxq_hw_ring; | |
596 | ||
597 | rxq->doorbell = (volatile uint8_t *)info->mem_bar + | |
598 | ER_DZ_RX_DESC_UPD_REG_OFST + | |
599 | (info->hw_index << info->vi_window_shift); | |
600 | ||
601 | sfc_ef10_essb_rx_info(&rxq->dp.dpq, | |
602 | "block size is %u, buf stride is %u", | |
603 | rxq->block_size, rxq->buf_stride); | |
604 | sfc_ef10_essb_rx_info(&rxq->dp.dpq, | |
605 | "max fill level is %u descs (%u bufs), " | |
606 | "refill threashold %u descs (%u bufs)", | |
607 | rxq->max_fill_level, | |
608 | rxq->max_fill_level * rxq->block_size, | |
609 | rxq->refill_threshold, | |
610 | rxq->refill_threshold * rxq->block_size); | |
611 | ||
612 | *dp_rxqp = &rxq->dp; | |
613 | return 0; | |
614 | ||
615 | fail_desc_alloc: | |
616 | rte_free(rxq); | |
617 | ||
618 | fail_rxq_alloc: | |
619 | fail_no_block_dequeue: | |
620 | fail_get_contig_block_size: | |
621 | return rc; | |
622 | } | |
623 | ||
624 | static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy; | |
625 | static void | |
626 | sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) | |
627 | { | |
628 | struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); | |
629 | ||
630 | rte_free(rxq->sw_ring); | |
631 | rte_free(rxq); | |
632 | } | |
633 | ||
634 | static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart; | |
635 | static int | |
636 | sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr) | |
637 | { | |
638 | struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); | |
639 | ||
640 | rxq->evq_read_ptr = evq_read_ptr; | |
641 | ||
642 | /* Initialize before refill */ | |
643 | rxq->completed = rxq->pending_id = rxq->added = 0; | |
644 | rxq->left_in_completed = rxq->left_in_pending = rxq->block_size; | |
645 | rxq->bufs_ptr = UINT_MAX; | |
646 | rxq->bufs_pending = 0; | |
647 | ||
648 | sfc_ef10_essb_rx_qrefill(rxq); | |
649 | ||
650 | rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED; | |
651 | rxq->flags &= | |
652 | ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION); | |
653 | ||
654 | return 0; | |
655 | } | |
656 | ||
657 | static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop; | |
658 | static void | |
659 | sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr) | |
660 | { | |
661 | struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); | |
662 | ||
663 | rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING; | |
664 | ||
665 | *evq_read_ptr = rxq->evq_read_ptr; | |
666 | } | |
667 | ||
668 | static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev; | |
669 | static bool | |
670 | sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id) | |
671 | { | |
672 | __rte_unused struct sfc_ef10_essb_rxq *rxq; | |
673 | ||
674 | rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); | |
675 | SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING); | |
676 | ||
677 | /* | |
678 | * It is safe to ignore Rx event since we free all mbufs on | |
679 | * queue purge anyway. | |
680 | */ | |
681 | ||
682 | return false; | |
683 | } | |
684 | ||
685 | static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge; | |
686 | static void | |
687 | sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq) | |
688 | { | |
689 | struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq); | |
690 | unsigned int i; | |
691 | const struct sfc_ef10_essb_rx_sw_desc *rxd; | |
692 | struct rte_mbuf *m; | |
693 | ||
694 | for (i = rxq->completed; i != rxq->added; ++i) { | |
695 | rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask]; | |
696 | m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf, | |
697 | rxq->block_size - rxq->left_in_completed); | |
698 | while (rxq->left_in_completed > 0) { | |
9f95a23c | 699 | rte_mbuf_raw_free(m); |
11fdf7f2 TL |
700 | m = sfc_ef10_essb_next_mbuf(rxq, m); |
701 | rxq->left_in_completed--; | |
702 | } | |
703 | rxq->left_in_completed = rxq->block_size; | |
704 | } | |
705 | ||
706 | rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED; | |
707 | } | |
708 | ||
709 | struct sfc_dp_rx sfc_ef10_essb_rx = { | |
710 | .dp = { | |
711 | .name = SFC_KVARG_DATAPATH_EF10_ESSB, | |
712 | .type = SFC_DP_RX, | |
713 | .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 | | |
714 | SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER, | |
715 | }, | |
716 | .features = SFC_DP_RX_FEAT_FLOW_FLAG | | |
717 | SFC_DP_RX_FEAT_FLOW_MARK | | |
718 | SFC_DP_RX_FEAT_CHECKSUM, | |
719 | .get_dev_info = sfc_ef10_essb_rx_get_dev_info, | |
720 | .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported, | |
721 | .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings, | |
722 | .qcreate = sfc_ef10_essb_rx_qcreate, | |
723 | .qdestroy = sfc_ef10_essb_rx_qdestroy, | |
724 | .qstart = sfc_ef10_essb_rx_qstart, | |
725 | .qstop = sfc_ef10_essb_rx_qstop, | |
726 | .qrx_ev = sfc_ef10_essb_rx_qrx_ev, | |
727 | .qpurge = sfc_ef10_essb_rx_qpurge, | |
728 | .supported_ptypes_get = sfc_ef10_supported_ptypes_get, | |
729 | .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending, | |
730 | .qdesc_status = sfc_ef10_essb_rx_qdesc_status, | |
731 | .pkt_burst = sfc_ef10_essb_recv_pkts, | |
732 | }; |