]>
Commit | Line | Data |
---|---|---|
11fdf7f2 | 1 | /* SPDX-License-Identifier: BSD-3-Clause |
9f95a23c | 2 | * Copyright 2018-2019 NXP |
11fdf7f2 TL |
3 | */ |
4 | ||
5 | #include <string.h> | |
6 | ||
7 | #include <rte_eal.h> | |
8 | #include <rte_fslmc.h> | |
9 | #include <rte_atomic.h> | |
10 | #include <rte_lcore.h> | |
11 | #include <rte_rawdev.h> | |
12 | #include <rte_rawdev_pmd.h> | |
13 | #include <rte_malloc.h> | |
14 | #include <rte_ring.h> | |
15 | #include <rte_mempool.h> | |
9f95a23c TL |
16 | #include <rte_prefetch.h> |
17 | #include <rte_kvargs.h> | |
11fdf7f2 TL |
18 | |
19 | #include <mc/fsl_dpdmai.h> | |
20 | #include <portal/dpaa2_hw_pvt.h> | |
21 | #include <portal/dpaa2_hw_dpio.h> | |
22 | ||
9f95a23c | 23 | #include "rte_pmd_dpaa2_qdma.h" |
11fdf7f2 TL |
24 | #include "dpaa2_qdma.h" |
25 | #include "dpaa2_qdma_logs.h" | |
9f95a23c TL |
26 | |
27 | #define DPAA2_QDMA_NO_PREFETCH "no_prefetch" | |
11fdf7f2 TL |
28 | |
29 | /* Dynamic log type identifier */ | |
30 | int dpaa2_qdma_logtype; | |
31 | ||
9f95a23c TL |
32 | uint32_t dpaa2_coherent_no_alloc_cache; |
33 | uint32_t dpaa2_coherent_alloc_cache; | |
34 | ||
11fdf7f2 TL |
35 | /* QDMA device */ |
36 | static struct qdma_device qdma_dev; | |
37 | ||
38 | /* QDMA H/W queues list */ | |
39 | TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue); | |
40 | static struct qdma_hw_queue_list qdma_queue_list | |
41 | = TAILQ_HEAD_INITIALIZER(qdma_queue_list); | |
42 | ||
43 | /* QDMA Virtual Queues */ | |
9f95a23c | 44 | static struct qdma_virt_queue *qdma_vqs; |
11fdf7f2 TL |
45 | |
46 | /* QDMA per core data */ | |
9f95a23c TL |
47 | static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE]; |
48 | ||
49 | typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev, | |
50 | uint16_t rxq_id, | |
51 | uint16_t *vq_id, | |
52 | struct rte_qdma_job **job, | |
53 | uint16_t nb_jobs); | |
54 | ||
55 | dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob; | |
11fdf7f2 TL |
56 | |
57 | static struct qdma_hw_queue * | |
58 | alloc_hw_queue(uint32_t lcore_id) | |
59 | { | |
60 | struct qdma_hw_queue *queue = NULL; | |
61 | ||
62 | DPAA2_QDMA_FUNC_TRACE(); | |
63 | ||
64 | /* Get a free queue from the list */ | |
65 | TAILQ_FOREACH(queue, &qdma_queue_list, next) { | |
66 | if (queue->num_users == 0) { | |
67 | queue->lcore_id = lcore_id; | |
68 | queue->num_users++; | |
69 | break; | |
70 | } | |
71 | } | |
72 | ||
73 | return queue; | |
74 | } | |
75 | ||
76 | static void | |
77 | free_hw_queue(struct qdma_hw_queue *queue) | |
78 | { | |
79 | DPAA2_QDMA_FUNC_TRACE(); | |
80 | ||
81 | queue->num_users--; | |
82 | } | |
83 | ||
84 | ||
85 | static struct qdma_hw_queue * | |
86 | get_hw_queue(uint32_t lcore_id) | |
87 | { | |
88 | struct qdma_per_core_info *core_info; | |
89 | struct qdma_hw_queue *queue, *temp; | |
90 | uint32_t least_num_users; | |
91 | int num_hw_queues, i; | |
92 | ||
93 | DPAA2_QDMA_FUNC_TRACE(); | |
94 | ||
95 | core_info = &qdma_core_info[lcore_id]; | |
96 | num_hw_queues = core_info->num_hw_queues; | |
97 | ||
98 | /* | |
99 | * Allocate a HW queue if there are less queues | |
100 | * than maximum per core queues configured | |
101 | */ | |
102 | if (num_hw_queues < qdma_dev.max_hw_queues_per_core) { | |
103 | queue = alloc_hw_queue(lcore_id); | |
104 | if (queue) { | |
105 | core_info->hw_queues[num_hw_queues] = queue; | |
106 | core_info->num_hw_queues++; | |
107 | return queue; | |
108 | } | |
109 | } | |
110 | ||
111 | queue = core_info->hw_queues[0]; | |
112 | /* In case there is no queue associated with the core return NULL */ | |
113 | if (!queue) | |
114 | return NULL; | |
115 | ||
116 | /* Fetch the least loaded H/W queue */ | |
117 | least_num_users = core_info->hw_queues[0]->num_users; | |
118 | for (i = 0; i < num_hw_queues; i++) { | |
119 | temp = core_info->hw_queues[i]; | |
120 | if (temp->num_users < least_num_users) | |
121 | queue = temp; | |
122 | } | |
123 | ||
124 | if (queue) | |
125 | queue->num_users++; | |
126 | ||
127 | return queue; | |
128 | } | |
129 | ||
130 | static void | |
131 | put_hw_queue(struct qdma_hw_queue *queue) | |
132 | { | |
133 | struct qdma_per_core_info *core_info; | |
134 | int lcore_id, num_hw_queues, i; | |
135 | ||
136 | DPAA2_QDMA_FUNC_TRACE(); | |
137 | ||
138 | /* | |
139 | * If this is the last user of the queue free it. | |
140 | * Also remove it from QDMA core info. | |
141 | */ | |
142 | if (queue->num_users == 1) { | |
143 | free_hw_queue(queue); | |
144 | ||
145 | /* Remove the physical queue from core info */ | |
146 | lcore_id = queue->lcore_id; | |
147 | core_info = &qdma_core_info[lcore_id]; | |
148 | num_hw_queues = core_info->num_hw_queues; | |
149 | for (i = 0; i < num_hw_queues; i++) { | |
150 | if (queue == core_info->hw_queues[i]) | |
151 | break; | |
152 | } | |
153 | for (; i < num_hw_queues - 1; i++) | |
154 | core_info->hw_queues[i] = core_info->hw_queues[i + 1]; | |
155 | core_info->hw_queues[i] = NULL; | |
156 | } else { | |
157 | queue->num_users--; | |
158 | } | |
159 | } | |
160 | ||
9f95a23c | 161 | int |
11fdf7f2 TL |
162 | rte_qdma_init(void) |
163 | { | |
164 | DPAA2_QDMA_FUNC_TRACE(); | |
165 | ||
166 | rte_spinlock_init(&qdma_dev.lock); | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
9f95a23c | 171 | void |
11fdf7f2 TL |
172 | rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr) |
173 | { | |
174 | DPAA2_QDMA_FUNC_TRACE(); | |
175 | ||
176 | qdma_attr->num_hw_queues = qdma_dev.num_hw_queues; | |
177 | } | |
178 | ||
9f95a23c | 179 | int |
11fdf7f2 TL |
180 | rte_qdma_reset(void) |
181 | { | |
182 | struct qdma_hw_queue *queue; | |
183 | int i; | |
184 | ||
185 | DPAA2_QDMA_FUNC_TRACE(); | |
186 | ||
187 | /* In case QDMA device is not in stopped state, return -EBUSY */ | |
188 | if (qdma_dev.state == 1) { | |
189 | DPAA2_QDMA_ERR( | |
190 | "Device is in running state. Stop before reset."); | |
191 | return -EBUSY; | |
192 | } | |
193 | ||
194 | /* In case there are pending jobs on any VQ, return -EBUSY */ | |
195 | for (i = 0; i < qdma_dev.max_vqs; i++) { | |
196 | if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues != | |
197 | qdma_vqs[i].num_dequeues)) | |
198 | DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i); | |
199 | return -EBUSY; | |
200 | } | |
201 | ||
202 | /* Reset HW queues */ | |
203 | TAILQ_FOREACH(queue, &qdma_queue_list, next) | |
204 | queue->num_users = 0; | |
205 | ||
206 | /* Reset and free virtual queues */ | |
207 | for (i = 0; i < qdma_dev.max_vqs; i++) { | |
208 | if (qdma_vqs[i].status_ring) | |
209 | rte_ring_free(qdma_vqs[i].status_ring); | |
210 | } | |
211 | if (qdma_vqs) | |
212 | rte_free(qdma_vqs); | |
213 | qdma_vqs = NULL; | |
214 | ||
215 | /* Reset per core info */ | |
216 | memset(&qdma_core_info, 0, | |
217 | sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE); | |
218 | ||
219 | /* Free the FLE pool */ | |
220 | if (qdma_dev.fle_pool) | |
221 | rte_mempool_free(qdma_dev.fle_pool); | |
222 | ||
223 | /* Reset QDMA device structure */ | |
224 | qdma_dev.mode = RTE_QDMA_MODE_HW; | |
225 | qdma_dev.max_hw_queues_per_core = 0; | |
226 | qdma_dev.fle_pool = NULL; | |
227 | qdma_dev.fle_pool_count = 0; | |
228 | qdma_dev.max_vqs = 0; | |
229 | ||
230 | return 0; | |
231 | } | |
232 | ||
9f95a23c | 233 | int |
11fdf7f2 TL |
234 | rte_qdma_configure(struct rte_qdma_config *qdma_config) |
235 | { | |
236 | int ret; | |
9f95a23c | 237 | char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */ |
11fdf7f2 TL |
238 | |
239 | DPAA2_QDMA_FUNC_TRACE(); | |
240 | ||
241 | /* In case QDMA device is not in stopped state, return -EBUSY */ | |
242 | if (qdma_dev.state == 1) { | |
243 | DPAA2_QDMA_ERR( | |
244 | "Device is in running state. Stop before config."); | |
245 | return -1; | |
246 | } | |
247 | ||
248 | /* Reset the QDMA device */ | |
249 | ret = rte_qdma_reset(); | |
250 | if (ret) { | |
251 | DPAA2_QDMA_ERR("Resetting QDMA failed"); | |
252 | return ret; | |
253 | } | |
254 | ||
255 | /* Set mode */ | |
256 | qdma_dev.mode = qdma_config->mode; | |
257 | ||
258 | /* Set max HW queue per core */ | |
259 | if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) { | |
260 | DPAA2_QDMA_ERR("H/W queues per core is more than: %d", | |
261 | MAX_HW_QUEUE_PER_CORE); | |
262 | return -EINVAL; | |
263 | } | |
264 | qdma_dev.max_hw_queues_per_core = | |
265 | qdma_config->max_hw_queues_per_core; | |
266 | ||
267 | /* Allocate Virtual Queues */ | |
268 | qdma_vqs = rte_malloc("qdma_virtual_queues", | |
269 | (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs), | |
270 | RTE_CACHE_LINE_SIZE); | |
271 | if (!qdma_vqs) { | |
272 | DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed"); | |
273 | return -ENOMEM; | |
274 | } | |
275 | qdma_dev.max_vqs = qdma_config->max_vqs; | |
276 | ||
9f95a23c TL |
277 | /* Allocate FLE pool; just append PID so that in case of |
278 | * multiprocess, the pool's don't collide. | |
279 | */ | |
280 | snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u", | |
281 | getpid()); | |
282 | qdma_dev.fle_pool = rte_mempool_create(fle_pool_name, | |
11fdf7f2 TL |
283 | qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE, |
284 | QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0, | |
285 | NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); | |
286 | if (!qdma_dev.fle_pool) { | |
287 | DPAA2_QDMA_ERR("qdma_fle_pool create failed"); | |
288 | rte_free(qdma_vqs); | |
289 | qdma_vqs = NULL; | |
290 | return -ENOMEM; | |
291 | } | |
292 | qdma_dev.fle_pool_count = qdma_config->fle_pool_count; | |
293 | ||
294 | return 0; | |
295 | } | |
296 | ||
9f95a23c | 297 | int |
11fdf7f2 TL |
298 | rte_qdma_start(void) |
299 | { | |
300 | DPAA2_QDMA_FUNC_TRACE(); | |
301 | ||
302 | qdma_dev.state = 1; | |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
9f95a23c | 307 | int |
11fdf7f2 TL |
308 | rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags) |
309 | { | |
310 | char ring_name[32]; | |
311 | int i; | |
312 | ||
313 | DPAA2_QDMA_FUNC_TRACE(); | |
314 | ||
315 | rte_spinlock_lock(&qdma_dev.lock); | |
316 | ||
317 | /* Get a free Virtual Queue */ | |
318 | for (i = 0; i < qdma_dev.max_vqs; i++) { | |
319 | if (qdma_vqs[i].in_use == 0) | |
320 | break; | |
321 | } | |
322 | ||
323 | /* Return in case no VQ is free */ | |
324 | if (i == qdma_dev.max_vqs) { | |
325 | rte_spinlock_unlock(&qdma_dev.lock); | |
9f95a23c | 326 | DPAA2_QDMA_ERR("Unable to get lock on QDMA device"); |
11fdf7f2 TL |
327 | return -ENODEV; |
328 | } | |
329 | ||
330 | if (qdma_dev.mode == RTE_QDMA_MODE_HW || | |
331 | (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) { | |
332 | /* Allocate HW queue for a VQ */ | |
333 | qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id); | |
334 | qdma_vqs[i].exclusive_hw_queue = 1; | |
335 | } else { | |
336 | /* Allocate a Ring for Virutal Queue in VQ mode */ | |
9f95a23c | 337 | snprintf(ring_name, sizeof(ring_name), "status ring %d", i); |
11fdf7f2 TL |
338 | qdma_vqs[i].status_ring = rte_ring_create(ring_name, |
339 | qdma_dev.fle_pool_count, rte_socket_id(), 0); | |
340 | if (!qdma_vqs[i].status_ring) { | |
341 | DPAA2_QDMA_ERR("Status ring creation failed for vq"); | |
342 | rte_spinlock_unlock(&qdma_dev.lock); | |
343 | return rte_errno; | |
344 | } | |
345 | ||
346 | /* Get a HW queue (shared) for a VQ */ | |
347 | qdma_vqs[i].hw_queue = get_hw_queue(lcore_id); | |
348 | qdma_vqs[i].exclusive_hw_queue = 0; | |
349 | } | |
350 | ||
351 | if (qdma_vqs[i].hw_queue == NULL) { | |
352 | DPAA2_QDMA_ERR("No H/W queue available for VQ"); | |
353 | if (qdma_vqs[i].status_ring) | |
354 | rte_ring_free(qdma_vqs[i].status_ring); | |
355 | qdma_vqs[i].status_ring = NULL; | |
356 | rte_spinlock_unlock(&qdma_dev.lock); | |
357 | return -ENODEV; | |
358 | } | |
359 | ||
360 | qdma_vqs[i].in_use = 1; | |
361 | qdma_vqs[i].lcore_id = lcore_id; | |
9f95a23c | 362 | memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp)); |
11fdf7f2 TL |
363 | rte_spinlock_unlock(&qdma_dev.lock); |
364 | ||
365 | return i; | |
366 | } | |
367 | ||
9f95a23c TL |
368 | /*create vq for route-by-port*/ |
369 | int | |
370 | rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags, | |
371 | struct rte_qdma_rbp *rbp) | |
372 | { | |
373 | int i; | |
374 | ||
375 | i = rte_qdma_vq_create(lcore_id, flags); | |
376 | ||
377 | memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp)); | |
378 | ||
379 | return i; | |
380 | } | |
381 | ||
11fdf7f2 TL |
382 | static void |
383 | dpaa2_qdma_populate_fle(struct qbman_fle *fle, | |
9f95a23c | 384 | struct rte_qdma_rbp *rbp, |
11fdf7f2 TL |
385 | uint64_t src, uint64_t dest, |
386 | size_t len, uint32_t flags) | |
387 | { | |
388 | struct qdma_sdd *sdd; | |
389 | ||
11fdf7f2 TL |
390 | sdd = (struct qdma_sdd *)((uint8_t *)(fle) + |
391 | (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle))); | |
392 | ||
393 | /* first frame list to source descriptor */ | |
394 | DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd)); | |
395 | DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd)))); | |
396 | ||
397 | /* source and destination descriptor */ | |
9f95a23c TL |
398 | if (rbp && rbp->enable) { |
399 | /* source */ | |
400 | sdd->read_cmd.portid = rbp->sportid; | |
401 | sdd->rbpcmd_simple.pfid = rbp->spfid; | |
402 | sdd->rbpcmd_simple.vfid = rbp->svfid; | |
403 | ||
404 | if (rbp->srbp) { | |
405 | sdd->read_cmd.rbp = rbp->srbp; | |
406 | sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW; | |
407 | } else { | |
408 | sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache; | |
409 | } | |
410 | sdd++; | |
411 | /* destination */ | |
412 | sdd->write_cmd.portid = rbp->dportid; | |
413 | sdd->rbpcmd_simple.pfid = rbp->dpfid; | |
414 | sdd->rbpcmd_simple.vfid = rbp->dvfid; | |
415 | ||
416 | if (rbp->drbp) { | |
417 | sdd->write_cmd.rbp = rbp->drbp; | |
418 | sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW; | |
419 | } else { | |
420 | sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache; | |
421 | } | |
11fdf7f2 | 422 | |
9f95a23c TL |
423 | } else { |
424 | sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache; | |
425 | sdd++; | |
426 | sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache; | |
427 | } | |
11fdf7f2 TL |
428 | fle++; |
429 | /* source frame list to source buffer */ | |
430 | if (flags & RTE_QDMA_JOB_SRC_PHY) { | |
431 | DPAA2_SET_FLE_ADDR(fle, src); | |
432 | DPAA2_SET_FLE_BMT(fle); | |
433 | } else { | |
434 | DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src)); | |
435 | } | |
436 | DPAA2_SET_FLE_LEN(fle, len); | |
437 | ||
438 | fle++; | |
439 | /* destination frame list to destination buffer */ | |
440 | if (flags & RTE_QDMA_JOB_DEST_PHY) { | |
441 | DPAA2_SET_FLE_BMT(fle); | |
442 | DPAA2_SET_FLE_ADDR(fle, dest); | |
443 | } else { | |
444 | DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest)); | |
445 | } | |
446 | DPAA2_SET_FLE_LEN(fle, len); | |
447 | ||
448 | /* Final bit: 1, for last frame list */ | |
449 | DPAA2_SET_FLE_FIN(fle); | |
450 | } | |
451 | ||
9f95a23c TL |
452 | static inline uint16_t dpdmai_dev_set_fd(struct qbman_fd *fd, |
453 | struct rte_qdma_job *job, | |
454 | struct rte_qdma_rbp *rbp, | |
455 | uint16_t vq_id) | |
11fdf7f2 TL |
456 | { |
457 | struct qdma_io_meta *io_meta; | |
11fdf7f2 | 458 | struct qbman_fle *fle; |
9f95a23c TL |
459 | int ret = 0; |
460 | /* | |
461 | * Get an FLE/SDD from FLE pool. | |
462 | * Note: IO metadata is before the FLE and SDD memory. | |
463 | */ | |
464 | ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta)); | |
465 | if (ret) { | |
466 | DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE"); | |
467 | return ret; | |
468 | } | |
469 | ||
470 | /* Set the metadata */ | |
471 | io_meta->cnxt = (size_t)job; | |
472 | io_meta->id = vq_id; | |
473 | ||
474 | fle = (struct qbman_fle *)(io_meta + 1); | |
475 | ||
476 | DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle)); | |
477 | DPAA2_SET_FD_COMPOUND_FMT(fd); | |
478 | DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX); | |
479 | ||
480 | /* Populate FLE */ | |
481 | memset(fle, 0, QDMA_FLE_POOL_SIZE); | |
482 | dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest, | |
483 | job->len, job->flags); | |
484 | ||
485 | return 0; | |
486 | } | |
487 | ||
488 | static int | |
489 | dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev, | |
490 | uint16_t txq_id, | |
491 | uint16_t vq_id, | |
492 | struct rte_qdma_rbp *rbp, | |
493 | struct rte_qdma_job **job, | |
494 | uint16_t nb_jobs) | |
495 | { | |
496 | struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX]; | |
497 | struct dpaa2_queue *txq; | |
11fdf7f2 TL |
498 | struct qbman_eq_desc eqdesc; |
499 | struct qbman_swp *swp; | |
500 | int ret; | |
9f95a23c TL |
501 | uint32_t num_to_send = 0; |
502 | uint16_t num_tx = 0; | |
11fdf7f2 TL |
503 | |
504 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { | |
505 | ret = dpaa2_affine_qbman_swp(); | |
506 | if (ret) { | |
507 | DPAA2_QDMA_ERR("Failure in affining portal"); | |
508 | return 0; | |
509 | } | |
510 | } | |
511 | swp = DPAA2_PER_LCORE_PORTAL; | |
512 | ||
513 | txq = &(dpdmai_dev->tx_queue[txq_id]); | |
514 | ||
515 | /* Prepare enqueue descriptor */ | |
516 | qbman_eq_desc_clear(&eqdesc); | |
517 | qbman_eq_desc_set_fq(&eqdesc, txq->fqid); | |
518 | qbman_eq_desc_set_no_orp(&eqdesc, 0); | |
519 | qbman_eq_desc_set_response(&eqdesc, 0, 0); | |
520 | ||
9f95a23c | 521 | memset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd)); |
11fdf7f2 | 522 | |
9f95a23c TL |
523 | while (nb_jobs > 0) { |
524 | uint32_t loop; | |
11fdf7f2 | 525 | |
9f95a23c TL |
526 | num_to_send = (nb_jobs > dpaa2_eqcr_size) ? |
527 | dpaa2_eqcr_size : nb_jobs; | |
11fdf7f2 | 528 | |
9f95a23c TL |
529 | for (loop = 0; loop < num_to_send; loop++) { |
530 | ret = dpdmai_dev_set_fd(&fd[loop], | |
531 | job[num_tx], rbp, vq_id); | |
532 | if (ret < 0) { | |
533 | /* Set nb_jobs to loop, so outer while loop | |
534 | * breaks out. | |
535 | */ | |
536 | nb_jobs = loop; | |
537 | break; | |
538 | } | |
11fdf7f2 | 539 | |
9f95a23c TL |
540 | num_tx++; |
541 | } | |
11fdf7f2 | 542 | |
9f95a23c TL |
543 | /* Enqueue the packet to the QBMAN */ |
544 | uint32_t enqueue_loop = 0; | |
545 | while (enqueue_loop < loop) { | |
546 | enqueue_loop += qbman_swp_enqueue_multiple(swp, | |
547 | &eqdesc, | |
548 | &fd[enqueue_loop], | |
549 | NULL, | |
550 | loop - enqueue_loop); | |
551 | } | |
552 | nb_jobs -= loop; | |
553 | } | |
554 | return num_tx; | |
11fdf7f2 TL |
555 | } |
556 | ||
9f95a23c | 557 | int |
11fdf7f2 TL |
558 | rte_qdma_vq_enqueue_multi(uint16_t vq_id, |
559 | struct rte_qdma_job **job, | |
560 | uint16_t nb_jobs) | |
11fdf7f2 TL |
561 | { |
562 | struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id]; | |
563 | struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue; | |
564 | struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev; | |
565 | int ret; | |
566 | ||
11fdf7f2 TL |
567 | /* Return error in case of wrong lcore_id */ |
568 | if (rte_lcore_id() != qdma_vq->lcore_id) { | |
569 | DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core", | |
570 | vq_id); | |
571 | return -EINVAL; | |
572 | } | |
573 | ||
9f95a23c TL |
574 | ret = dpdmai_dev_enqueue_multi(dpdmai_dev, |
575 | qdma_pq->queue_id, | |
576 | vq_id, | |
577 | &qdma_vq->rbp, | |
578 | job, | |
579 | nb_jobs); | |
11fdf7f2 TL |
580 | if (ret < 0) { |
581 | DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret); | |
582 | return ret; | |
583 | } | |
584 | ||
9f95a23c | 585 | qdma_vq->num_enqueues += ret; |
11fdf7f2 | 586 | |
9f95a23c TL |
587 | return ret; |
588 | } | |
589 | ||
590 | int | |
591 | rte_qdma_vq_enqueue(uint16_t vq_id, | |
592 | struct rte_qdma_job *job) | |
593 | { | |
594 | return rte_qdma_vq_enqueue_multi(vq_id, &job, 1); | |
595 | } | |
596 | ||
597 | static inline uint16_t dpdmai_dev_get_job(const struct qbman_fd *fd, | |
598 | struct rte_qdma_job **job) | |
599 | { | |
600 | struct qbman_fle *fle; | |
601 | struct qdma_io_meta *io_meta; | |
602 | uint16_t vqid; | |
603 | /* | |
604 | * Fetch metadata from FLE. job and vq_id were set | |
605 | * in metadata in the enqueue operation. | |
606 | */ | |
607 | fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); | |
608 | io_meta = (struct qdma_io_meta *)(fle) - 1; | |
609 | ||
610 | *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt; | |
611 | (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) | | |
612 | (DPAA2_GET_FD_FRC(fd) & 0xFF); | |
613 | ||
614 | vqid = io_meta->id; | |
615 | ||
616 | /* Free FLE to the pool */ | |
617 | rte_mempool_put(qdma_dev.fle_pool, io_meta); | |
618 | ||
619 | return vqid; | |
11fdf7f2 TL |
620 | } |
621 | ||
622 | /* Function to receive a QDMA job for a given device and queue*/ | |
623 | static int | |
9f95a23c TL |
624 | dpdmai_dev_dequeue_multijob_prefetch( |
625 | struct dpaa2_dpdmai_dev *dpdmai_dev, | |
626 | uint16_t rxq_id, | |
627 | uint16_t *vq_id, | |
628 | struct rte_qdma_job **job, | |
629 | uint16_t nb_jobs) | |
11fdf7f2 | 630 | { |
11fdf7f2 | 631 | struct dpaa2_queue *rxq; |
9f95a23c | 632 | struct qbman_result *dq_storage, *dq_storage1 = NULL; |
11fdf7f2 | 633 | struct qbman_pull_desc pulldesc; |
11fdf7f2 | 634 | struct qbman_swp *swp; |
9f95a23c | 635 | struct queue_storage_info_t *q_storage; |
11fdf7f2 | 636 | uint32_t fqid; |
9f95a23c TL |
637 | uint8_t status, pending; |
638 | uint8_t num_rx = 0; | |
639 | const struct qbman_fd *fd; | |
640 | uint16_t vqid; | |
641 | int ret, pull_size; | |
11fdf7f2 TL |
642 | |
643 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { | |
644 | ret = dpaa2_affine_qbman_swp(); | |
645 | if (ret) { | |
646 | DPAA2_QDMA_ERR("Failure in affining portal"); | |
647 | return 0; | |
648 | } | |
649 | } | |
650 | swp = DPAA2_PER_LCORE_PORTAL; | |
651 | ||
9f95a23c | 652 | pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs; |
11fdf7f2 | 653 | rxq = &(dpdmai_dev->rx_queue[rxq_id]); |
11fdf7f2 | 654 | fqid = rxq->fqid; |
9f95a23c TL |
655 | q_storage = rxq->q_storage; |
656 | ||
657 | if (unlikely(!q_storage->active_dqs)) { | |
658 | q_storage->toggle = 0; | |
659 | dq_storage = q_storage->dq_storage[q_storage->toggle]; | |
660 | q_storage->last_num_pkts = pull_size; | |
661 | qbman_pull_desc_clear(&pulldesc); | |
662 | qbman_pull_desc_set_numframes(&pulldesc, | |
663 | q_storage->last_num_pkts); | |
664 | qbman_pull_desc_set_fq(&pulldesc, fqid); | |
665 | qbman_pull_desc_set_storage(&pulldesc, dq_storage, | |
666 | (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); | |
667 | if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { | |
668 | while (!qbman_check_command_complete( | |
669 | get_swp_active_dqs( | |
670 | DPAA2_PER_LCORE_DPIO->index))) | |
671 | ; | |
672 | clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); | |
673 | } | |
674 | while (1) { | |
675 | if (qbman_swp_pull(swp, &pulldesc)) { | |
676 | DPAA2_QDMA_DP_WARN( | |
677 | "VDQ command not issued.QBMAN busy\n"); | |
678 | /* Portal was busy, try again */ | |
679 | continue; | |
680 | } | |
681 | break; | |
682 | } | |
683 | q_storage->active_dqs = dq_storage; | |
684 | q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; | |
685 | set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, | |
686 | dq_storage); | |
687 | } | |
688 | ||
689 | dq_storage = q_storage->active_dqs; | |
690 | rte_prefetch0((void *)(size_t)(dq_storage)); | |
691 | rte_prefetch0((void *)(size_t)(dq_storage + 1)); | |
11fdf7f2 | 692 | |
9f95a23c TL |
693 | /* Prepare next pull descriptor. This will give space for the |
694 | * prefething done on DQRR entries | |
695 | */ | |
696 | q_storage->toggle ^= 1; | |
697 | dq_storage1 = q_storage->dq_storage[q_storage->toggle]; | |
11fdf7f2 | 698 | qbman_pull_desc_clear(&pulldesc); |
9f95a23c | 699 | qbman_pull_desc_set_numframes(&pulldesc, pull_size); |
11fdf7f2 | 700 | qbman_pull_desc_set_fq(&pulldesc, fqid); |
9f95a23c TL |
701 | qbman_pull_desc_set_storage(&pulldesc, dq_storage1, |
702 | (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); | |
11fdf7f2 | 703 | |
9f95a23c TL |
704 | /* Check if the previous issued command is completed. |
705 | * Also seems like the SWP is shared between the Ethernet Driver | |
706 | * and the SEC driver. | |
707 | */ | |
708 | while (!qbman_check_command_complete(dq_storage)) | |
709 | ; | |
710 | if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) | |
711 | clear_swp_active_dqs(q_storage->active_dpio_id); | |
712 | ||
713 | pending = 1; | |
714 | ||
715 | do { | |
716 | /* Loop until the dq_storage is updated with | |
717 | * new token by QBMAN | |
718 | */ | |
719 | while (!qbman_check_new_result(dq_storage)) | |
720 | ; | |
721 | rte_prefetch0((void *)((size_t)(dq_storage + 2))); | |
722 | /* Check whether Last Pull command is Expired and | |
723 | * setting Condition for Loop termination | |
724 | */ | |
725 | if (qbman_result_DQ_is_pull_complete(dq_storage)) { | |
726 | pending = 0; | |
727 | /* Check for valid frame. */ | |
728 | status = qbman_result_DQ_flags(dq_storage); | |
729 | if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) | |
730 | continue; | |
731 | } | |
732 | fd = qbman_result_DQ_fd(dq_storage); | |
733 | ||
734 | vqid = dpdmai_dev_get_job(fd, &job[num_rx]); | |
735 | if (vq_id) | |
736 | vq_id[num_rx] = vqid; | |
737 | ||
738 | dq_storage++; | |
739 | num_rx++; | |
740 | } while (pending); | |
741 | ||
742 | if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { | |
743 | while (!qbman_check_command_complete( | |
744 | get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) | |
745 | ; | |
746 | clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); | |
747 | } | |
748 | /* issue a volatile dequeue command for next pull */ | |
11fdf7f2 TL |
749 | while (1) { |
750 | if (qbman_swp_pull(swp, &pulldesc)) { | |
9f95a23c TL |
751 | DPAA2_QDMA_DP_WARN("VDQ command is not issued." |
752 | "QBMAN is busy (2)\n"); | |
11fdf7f2 TL |
753 | continue; |
754 | } | |
755 | break; | |
756 | } | |
757 | ||
9f95a23c TL |
758 | q_storage->active_dqs = dq_storage1; |
759 | q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; | |
760 | set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); | |
11fdf7f2 | 761 | |
9f95a23c TL |
762 | return num_rx; |
763 | } | |
11fdf7f2 | 764 | |
9f95a23c TL |
765 | static int |
766 | dpdmai_dev_dequeue_multijob_no_prefetch( | |
767 | struct dpaa2_dpdmai_dev *dpdmai_dev, | |
768 | uint16_t rxq_id, | |
769 | uint16_t *vq_id, | |
770 | struct rte_qdma_job **job, | |
771 | uint16_t nb_jobs) | |
772 | { | |
773 | struct dpaa2_queue *rxq; | |
774 | struct qbman_result *dq_storage; | |
775 | struct qbman_pull_desc pulldesc; | |
776 | struct qbman_swp *swp; | |
777 | uint32_t fqid; | |
778 | uint8_t status, pending; | |
779 | uint8_t num_rx = 0; | |
780 | const struct qbman_fd *fd; | |
781 | uint16_t vqid; | |
782 | int ret, next_pull = nb_jobs, num_pulled = 0; | |
11fdf7f2 | 783 | |
9f95a23c TL |
784 | if (unlikely(!DPAA2_PER_LCORE_DPIO)) { |
785 | ret = dpaa2_affine_qbman_swp(); | |
786 | if (ret) { | |
787 | DPAA2_QDMA_ERR("Failure in affining portal"); | |
788 | return 0; | |
789 | } | |
790 | } | |
791 | swp = DPAA2_PER_LCORE_PORTAL; | |
11fdf7f2 | 792 | |
9f95a23c TL |
793 | rxq = &(dpdmai_dev->rx_queue[rxq_id]); |
794 | fqid = rxq->fqid; | |
11fdf7f2 | 795 | |
9f95a23c TL |
796 | do { |
797 | dq_storage = rxq->q_storage->dq_storage[0]; | |
798 | /* Prepare dequeue descriptor */ | |
799 | qbman_pull_desc_clear(&pulldesc); | |
800 | qbman_pull_desc_set_fq(&pulldesc, fqid); | |
801 | qbman_pull_desc_set_storage(&pulldesc, dq_storage, | |
802 | (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); | |
803 | ||
804 | if (next_pull > dpaa2_dqrr_size) { | |
805 | qbman_pull_desc_set_numframes(&pulldesc, | |
806 | dpaa2_dqrr_size); | |
807 | next_pull -= dpaa2_dqrr_size; | |
808 | } else { | |
809 | qbman_pull_desc_set_numframes(&pulldesc, next_pull); | |
810 | next_pull = 0; | |
811 | } | |
11fdf7f2 | 812 | |
9f95a23c TL |
813 | while (1) { |
814 | if (qbman_swp_pull(swp, &pulldesc)) { | |
815 | DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy"); | |
816 | /* Portal was busy, try again */ | |
817 | continue; | |
818 | } | |
819 | break; | |
820 | } | |
11fdf7f2 | 821 | |
9f95a23c TL |
822 | rte_prefetch0((void *)((size_t)(dq_storage + 1))); |
823 | /* Check if the previous issued command is completed. */ | |
824 | while (!qbman_check_command_complete(dq_storage)) | |
825 | ; | |
826 | ||
827 | num_pulled = 0; | |
828 | pending = 1; | |
829 | ||
830 | do { | |
831 | /* Loop until dq_storage is updated | |
832 | * with new token by QBMAN | |
833 | */ | |
834 | while (!qbman_check_new_result(dq_storage)) | |
835 | ; | |
836 | rte_prefetch0((void *)((size_t)(dq_storage + 2))); | |
837 | ||
838 | if (qbman_result_DQ_is_pull_complete(dq_storage)) { | |
839 | pending = 0; | |
840 | /* Check for valid frame. */ | |
841 | status = qbman_result_DQ_flags(dq_storage); | |
842 | if (unlikely((status & | |
843 | QBMAN_DQ_STAT_VALIDFRAME) == 0)) | |
844 | continue; | |
845 | } | |
846 | fd = qbman_result_DQ_fd(dq_storage); | |
11fdf7f2 | 847 | |
9f95a23c TL |
848 | vqid = dpdmai_dev_get_job(fd, &job[num_rx]); |
849 | if (vq_id) | |
850 | vq_id[num_rx] = vqid; | |
11fdf7f2 | 851 | |
9f95a23c TL |
852 | dq_storage++; |
853 | num_rx++; | |
854 | num_pulled++; | |
11fdf7f2 | 855 | |
9f95a23c TL |
856 | } while (pending); |
857 | /* Last VDQ provided all packets and more packets are requested */ | |
858 | } while (next_pull && num_pulled == dpaa2_dqrr_size); | |
11fdf7f2 | 859 | |
9f95a23c | 860 | return num_rx; |
11fdf7f2 TL |
861 | } |
862 | ||
9f95a23c TL |
863 | int |
864 | rte_qdma_vq_dequeue_multi(uint16_t vq_id, | |
865 | struct rte_qdma_job **job, | |
866 | uint16_t nb_jobs) | |
11fdf7f2 TL |
867 | { |
868 | struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id]; | |
869 | struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue; | |
11fdf7f2 | 870 | struct qdma_virt_queue *temp_qdma_vq; |
9f95a23c TL |
871 | struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev; |
872 | int ring_count, ret = 0, i; | |
11fdf7f2 TL |
873 | |
874 | /* Return error in case of wrong lcore_id */ | |
875 | if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) { | |
9f95a23c | 876 | DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core", |
11fdf7f2 | 877 | vq_id); |
9f95a23c | 878 | return -1; |
11fdf7f2 TL |
879 | } |
880 | ||
881 | /* Only dequeue when there are pending jobs on VQ */ | |
882 | if (qdma_vq->num_enqueues == qdma_vq->num_dequeues) | |
9f95a23c TL |
883 | return 0; |
884 | ||
885 | if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs)) | |
886 | nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues); | |
11fdf7f2 TL |
887 | |
888 | if (qdma_vq->exclusive_hw_queue) { | |
889 | /* In case of exclusive queue directly fetch from HW queue */ | |
9f95a23c TL |
890 | ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id, |
891 | NULL, job, nb_jobs); | |
11fdf7f2 TL |
892 | if (ret < 0) { |
893 | DPAA2_QDMA_ERR( | |
894 | "Dequeue from DPDMAI device failed: %d", ret); | |
9f95a23c | 895 | return ret; |
11fdf7f2 | 896 | } |
9f95a23c | 897 | qdma_vq->num_dequeues += ret; |
11fdf7f2 | 898 | } else { |
9f95a23c | 899 | uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX]; |
11fdf7f2 TL |
900 | /* |
901 | * Get the QDMA completed jobs from the software ring. | |
902 | * In case they are not available on the ring poke the HW | |
903 | * to fetch completed jobs from corresponding HW queues | |
904 | */ | |
905 | ring_count = rte_ring_count(qdma_vq->status_ring); | |
9f95a23c | 906 | if (ring_count < nb_jobs) { |
11fdf7f2 | 907 | /* TODO - How to have right budget */ |
9f95a23c TL |
908 | ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, |
909 | qdma_pq->queue_id, | |
910 | temp_vq_id, job, nb_jobs); | |
911 | for (i = 0; i < ret; i++) { | |
912 | temp_qdma_vq = &qdma_vqs[temp_vq_id[i]]; | |
11fdf7f2 | 913 | rte_ring_enqueue(temp_qdma_vq->status_ring, |
9f95a23c | 914 | (void *)(job[i])); |
11fdf7f2 | 915 | } |
9f95a23c TL |
916 | ring_count = rte_ring_count( |
917 | qdma_vq->status_ring); | |
11fdf7f2 TL |
918 | } |
919 | ||
9f95a23c TL |
920 | if (ring_count) { |
921 | /* Dequeue job from the software ring | |
922 | * to provide to the user | |
923 | */ | |
924 | ret = rte_ring_dequeue_bulk(qdma_vq->status_ring, | |
925 | (void **)job, ring_count, NULL); | |
926 | if (ret) | |
927 | qdma_vq->num_dequeues += ret; | |
928 | } | |
11fdf7f2 TL |
929 | } |
930 | ||
9f95a23c TL |
931 | return ret; |
932 | } | |
933 | ||
934 | struct rte_qdma_job * | |
935 | rte_qdma_vq_dequeue(uint16_t vq_id) | |
936 | { | |
937 | int ret; | |
938 | struct rte_qdma_job *job = NULL; | |
939 | ||
940 | ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1); | |
941 | if (ret < 0) | |
942 | DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret); | |
943 | ||
11fdf7f2 TL |
944 | return job; |
945 | } | |
946 | ||
9f95a23c | 947 | void |
11fdf7f2 TL |
948 | rte_qdma_vq_stats(uint16_t vq_id, |
949 | struct rte_qdma_vq_stats *vq_status) | |
950 | { | |
951 | struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id]; | |
952 | ||
11fdf7f2 TL |
953 | if (qdma_vq->in_use) { |
954 | vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue; | |
955 | vq_status->lcore_id = qdma_vq->lcore_id; | |
956 | vq_status->num_enqueues = qdma_vq->num_enqueues; | |
957 | vq_status->num_dequeues = qdma_vq->num_dequeues; | |
958 | vq_status->num_pending_jobs = vq_status->num_enqueues - | |
959 | vq_status->num_dequeues; | |
960 | } | |
961 | } | |
962 | ||
9f95a23c | 963 | int |
11fdf7f2 TL |
964 | rte_qdma_vq_destroy(uint16_t vq_id) |
965 | { | |
966 | struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id]; | |
967 | ||
968 | DPAA2_QDMA_FUNC_TRACE(); | |
969 | ||
970 | /* In case there are pending jobs on any VQ, return -EBUSY */ | |
971 | if (qdma_vq->num_enqueues != qdma_vq->num_dequeues) | |
972 | return -EBUSY; | |
973 | ||
974 | rte_spinlock_lock(&qdma_dev.lock); | |
975 | ||
976 | if (qdma_vq->exclusive_hw_queue) | |
977 | free_hw_queue(qdma_vq->hw_queue); | |
978 | else { | |
979 | if (qdma_vqs->status_ring) | |
980 | rte_ring_free(qdma_vqs->status_ring); | |
981 | ||
982 | put_hw_queue(qdma_vq->hw_queue); | |
983 | } | |
984 | ||
985 | memset(qdma_vq, 0, sizeof(struct qdma_virt_queue)); | |
986 | ||
9f95a23c TL |
987 | rte_spinlock_unlock(&qdma_dev.lock); |
988 | ||
989 | return 0; | |
990 | } | |
991 | ||
992 | int | |
993 | rte_qdma_vq_destroy_rbp(uint16_t vq_id) | |
994 | { | |
995 | struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id]; | |
996 | ||
997 | DPAA2_QDMA_FUNC_TRACE(); | |
998 | ||
999 | /* In case there are pending jobs on any VQ, return -EBUSY */ | |
1000 | if (qdma_vq->num_enqueues != qdma_vq->num_dequeues) | |
1001 | return -EBUSY; | |
1002 | ||
11fdf7f2 TL |
1003 | rte_spinlock_lock(&qdma_dev.lock); |
1004 | ||
9f95a23c TL |
1005 | if (qdma_vq->exclusive_hw_queue) { |
1006 | free_hw_queue(qdma_vq->hw_queue); | |
1007 | } else { | |
1008 | if (qdma_vqs->status_ring) | |
1009 | rte_ring_free(qdma_vqs->status_ring); | |
1010 | ||
1011 | put_hw_queue(qdma_vq->hw_queue); | |
1012 | } | |
1013 | ||
1014 | memset(qdma_vq, 0, sizeof(struct qdma_virt_queue)); | |
1015 | ||
1016 | rte_spinlock_unlock(&qdma_dev.lock); | |
1017 | ||
11fdf7f2 TL |
1018 | return 0; |
1019 | } | |
1020 | ||
9f95a23c | 1021 | void |
11fdf7f2 TL |
1022 | rte_qdma_stop(void) |
1023 | { | |
1024 | DPAA2_QDMA_FUNC_TRACE(); | |
1025 | ||
1026 | qdma_dev.state = 0; | |
1027 | } | |
1028 | ||
9f95a23c | 1029 | void |
11fdf7f2 TL |
1030 | rte_qdma_destroy(void) |
1031 | { | |
1032 | DPAA2_QDMA_FUNC_TRACE(); | |
1033 | ||
1034 | rte_qdma_reset(); | |
1035 | } | |
1036 | ||
1037 | static const struct rte_rawdev_ops dpaa2_qdma_ops; | |
1038 | ||
1039 | static int | |
1040 | add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev) | |
1041 | { | |
1042 | struct qdma_hw_queue *queue; | |
1043 | int i; | |
1044 | ||
1045 | DPAA2_QDMA_FUNC_TRACE(); | |
1046 | ||
1047 | for (i = 0; i < dpdmai_dev->num_queues; i++) { | |
1048 | queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0); | |
1049 | if (!queue) { | |
1050 | DPAA2_QDMA_ERR( | |
1051 | "Memory allocation failed for QDMA queue"); | |
1052 | return -ENOMEM; | |
1053 | } | |
1054 | ||
1055 | queue->dpdmai_dev = dpdmai_dev; | |
1056 | queue->queue_id = i; | |
1057 | ||
1058 | TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next); | |
1059 | qdma_dev.num_hw_queues++; | |
1060 | } | |
1061 | ||
1062 | return 0; | |
1063 | } | |
1064 | ||
1065 | static void | |
1066 | remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev) | |
1067 | { | |
1068 | struct qdma_hw_queue *queue = NULL; | |
1069 | struct qdma_hw_queue *tqueue = NULL; | |
1070 | ||
1071 | DPAA2_QDMA_FUNC_TRACE(); | |
1072 | ||
1073 | TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) { | |
1074 | if (queue->dpdmai_dev == dpdmai_dev) { | |
1075 | TAILQ_REMOVE(&qdma_queue_list, queue, next); | |
1076 | rte_free(queue); | |
1077 | queue = NULL; | |
1078 | } | |
1079 | } | |
1080 | } | |
1081 | ||
1082 | static int | |
1083 | dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev) | |
1084 | { | |
1085 | struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private; | |
1086 | int ret, i; | |
1087 | ||
1088 | DPAA2_QDMA_FUNC_TRACE(); | |
1089 | ||
11fdf7f2 TL |
1090 | /* Remove HW queues from global list */ |
1091 | remove_hw_queues_from_list(dpdmai_dev); | |
1092 | ||
1093 | ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW, | |
1094 | dpdmai_dev->token); | |
1095 | if (ret) | |
1096 | DPAA2_QDMA_ERR("dmdmai disable failed"); | |
1097 | ||
1098 | /* Set up the DQRR storage for Rx */ | |
9f95a23c | 1099 | for (i = 0; i < dpdmai_dev->num_queues; i++) { |
11fdf7f2 TL |
1100 | struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]); |
1101 | ||
1102 | if (rxq->q_storage) { | |
1103 | dpaa2_free_dq_storage(rxq->q_storage); | |
1104 | rte_free(rxq->q_storage); | |
1105 | } | |
1106 | } | |
1107 | ||
1108 | /* Close the device at underlying layer*/ | |
1109 | ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token); | |
1110 | if (ret) | |
1111 | DPAA2_QDMA_ERR("Failure closing dpdmai device"); | |
1112 | ||
1113 | return 0; | |
1114 | } | |
1115 | ||
9f95a23c TL |
1116 | static int |
1117 | check_devargs_handler(__rte_unused const char *key, const char *value, | |
1118 | __rte_unused void *opaque) | |
1119 | { | |
1120 | if (strcmp(value, "1")) | |
1121 | return -1; | |
1122 | ||
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | static int | |
1127 | dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) | |
1128 | { | |
1129 | struct rte_kvargs *kvlist; | |
1130 | ||
1131 | if (!devargs) | |
1132 | return 0; | |
1133 | ||
1134 | kvlist = rte_kvargs_parse(devargs->args, NULL); | |
1135 | if (!kvlist) | |
1136 | return 0; | |
1137 | ||
1138 | if (!rte_kvargs_count(kvlist, key)) { | |
1139 | rte_kvargs_free(kvlist); | |
1140 | return 0; | |
1141 | } | |
1142 | ||
1143 | if (rte_kvargs_process(kvlist, key, | |
1144 | check_devargs_handler, NULL) < 0) { | |
1145 | rte_kvargs_free(kvlist); | |
1146 | return 0; | |
1147 | } | |
1148 | rte_kvargs_free(kvlist); | |
1149 | ||
1150 | return 1; | |
1151 | } | |
1152 | ||
11fdf7f2 TL |
1153 | static int |
1154 | dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id) | |
1155 | { | |
1156 | struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private; | |
1157 | struct dpdmai_rx_queue_cfg rx_queue_cfg; | |
1158 | struct dpdmai_attr attr; | |
1159 | struct dpdmai_rx_queue_attr rx_attr; | |
1160 | struct dpdmai_tx_queue_attr tx_attr; | |
1161 | int ret, i; | |
1162 | ||
1163 | DPAA2_QDMA_FUNC_TRACE(); | |
1164 | ||
11fdf7f2 TL |
1165 | /* Open DPDMAI device */ |
1166 | dpdmai_dev->dpdmai_id = dpdmai_id; | |
1167 | dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX]; | |
1168 | ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW, | |
1169 | dpdmai_dev->dpdmai_id, &dpdmai_dev->token); | |
1170 | if (ret) { | |
1171 | DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret); | |
1172 | return ret; | |
1173 | } | |
1174 | ||
1175 | /* Get DPDMAI attributes */ | |
1176 | ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW, | |
1177 | dpdmai_dev->token, &attr); | |
1178 | if (ret) { | |
1179 | DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d", | |
1180 | ret); | |
1181 | goto init_err; | |
1182 | } | |
9f95a23c | 1183 | dpdmai_dev->num_queues = attr.num_of_queues; |
11fdf7f2 TL |
1184 | |
1185 | /* Set up Rx Queues */ | |
9f95a23c | 1186 | for (i = 0; i < dpdmai_dev->num_queues; i++) { |
11fdf7f2 TL |
1187 | struct dpaa2_queue *rxq; |
1188 | ||
1189 | memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg)); | |
1190 | ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai, | |
1191 | CMD_PRI_LOW, | |
1192 | dpdmai_dev->token, | |
9f95a23c | 1193 | i, 0, &rx_queue_cfg); |
11fdf7f2 TL |
1194 | if (ret) { |
1195 | DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d", | |
1196 | ret); | |
1197 | goto init_err; | |
1198 | } | |
1199 | ||
1200 | /* Allocate DQ storage for the DPDMAI Rx queues */ | |
1201 | rxq = &(dpdmai_dev->rx_queue[i]); | |
1202 | rxq->q_storage = rte_malloc("dq_storage", | |
1203 | sizeof(struct queue_storage_info_t), | |
1204 | RTE_CACHE_LINE_SIZE); | |
1205 | if (!rxq->q_storage) { | |
1206 | DPAA2_QDMA_ERR("q_storage allocation failed"); | |
1207 | ret = -ENOMEM; | |
1208 | goto init_err; | |
1209 | } | |
1210 | ||
1211 | memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t)); | |
1212 | ret = dpaa2_alloc_dq_storage(rxq->q_storage); | |
1213 | if (ret) { | |
1214 | DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed"); | |
1215 | goto init_err; | |
1216 | } | |
1217 | } | |
1218 | ||
1219 | /* Get Rx and Tx queues FQID's */ | |
9f95a23c | 1220 | for (i = 0; i < dpdmai_dev->num_queues; i++) { |
11fdf7f2 | 1221 | ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW, |
9f95a23c | 1222 | dpdmai_dev->token, i, 0, &rx_attr); |
11fdf7f2 TL |
1223 | if (ret) { |
1224 | DPAA2_QDMA_ERR("Reading device failed with err: %d", | |
1225 | ret); | |
1226 | goto init_err; | |
1227 | } | |
1228 | dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid; | |
1229 | ||
1230 | ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW, | |
9f95a23c | 1231 | dpdmai_dev->token, i, 0, &tx_attr); |
11fdf7f2 TL |
1232 | if (ret) { |
1233 | DPAA2_QDMA_ERR("Reading device failed with err: %d", | |
1234 | ret); | |
1235 | goto init_err; | |
1236 | } | |
1237 | dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid; | |
1238 | } | |
1239 | ||
1240 | /* Enable the device */ | |
1241 | ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW, | |
1242 | dpdmai_dev->token); | |
1243 | if (ret) { | |
1244 | DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret); | |
1245 | goto init_err; | |
1246 | } | |
1247 | ||
1248 | /* Add the HW queue to the global list */ | |
1249 | ret = add_hw_queues_to_list(dpdmai_dev); | |
1250 | if (ret) { | |
1251 | DPAA2_QDMA_ERR("Adding H/W queue to list failed"); | |
1252 | goto init_err; | |
1253 | } | |
9f95a23c TL |
1254 | |
1255 | if (dpaa2_get_devargs(rawdev->device->devargs, | |
1256 | DPAA2_QDMA_NO_PREFETCH)) { | |
1257 | /* If no prefetch is configured. */ | |
1258 | dpdmai_dev_dequeue_multijob = | |
1259 | dpdmai_dev_dequeue_multijob_no_prefetch; | |
1260 | DPAA2_QDMA_INFO("No Prefetch RX Mode enabled"); | |
1261 | } else { | |
1262 | dpdmai_dev_dequeue_multijob = | |
1263 | dpdmai_dev_dequeue_multijob_prefetch; | |
1264 | } | |
1265 | ||
1266 | if (!dpaa2_coherent_no_alloc_cache) { | |
1267 | if (dpaa2_svr_family == SVR_LX2160A) { | |
1268 | dpaa2_coherent_no_alloc_cache = | |
1269 | DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE; | |
1270 | dpaa2_coherent_alloc_cache = | |
1271 | DPAA2_LX2_COHERENT_ALLOCATE_CACHE; | |
1272 | } else { | |
1273 | dpaa2_coherent_no_alloc_cache = | |
1274 | DPAA2_COHERENT_NO_ALLOCATE_CACHE; | |
1275 | dpaa2_coherent_alloc_cache = | |
1276 | DPAA2_COHERENT_ALLOCATE_CACHE; | |
1277 | } | |
1278 | } | |
1279 | ||
11fdf7f2 TL |
1280 | DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully"); |
1281 | ||
1282 | return 0; | |
1283 | init_err: | |
1284 | dpaa2_dpdmai_dev_uninit(rawdev); | |
1285 | return ret; | |
1286 | } | |
1287 | ||
1288 | static int | |
1289 | rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv, | |
1290 | struct rte_dpaa2_device *dpaa2_dev) | |
1291 | { | |
1292 | struct rte_rawdev *rawdev; | |
1293 | int ret; | |
1294 | ||
1295 | DPAA2_QDMA_FUNC_TRACE(); | |
1296 | ||
1297 | rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name, | |
1298 | sizeof(struct dpaa2_dpdmai_dev), | |
1299 | rte_socket_id()); | |
1300 | if (!rawdev) { | |
1301 | DPAA2_QDMA_ERR("Unable to allocate rawdevice"); | |
1302 | return -EINVAL; | |
1303 | } | |
1304 | ||
1305 | dpaa2_dev->rawdev = rawdev; | |
1306 | rawdev->dev_ops = &dpaa2_qdma_ops; | |
1307 | rawdev->device = &dpaa2_dev->device; | |
1308 | rawdev->driver_name = dpaa2_drv->driver.name; | |
1309 | ||
1310 | /* Invoke PMD device initialization function */ | |
1311 | ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id); | |
1312 | if (ret) { | |
1313 | rte_rawdev_pmd_release(rawdev); | |
1314 | return ret; | |
1315 | } | |
1316 | ||
1317 | return 0; | |
1318 | } | |
1319 | ||
1320 | static int | |
1321 | rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev) | |
1322 | { | |
1323 | struct rte_rawdev *rawdev = dpaa2_dev->rawdev; | |
1324 | int ret; | |
1325 | ||
1326 | DPAA2_QDMA_FUNC_TRACE(); | |
1327 | ||
1328 | dpaa2_dpdmai_dev_uninit(rawdev); | |
1329 | ||
1330 | ret = rte_rawdev_pmd_release(rawdev); | |
1331 | if (ret) | |
1332 | DPAA2_QDMA_ERR("Device cleanup failed"); | |
1333 | ||
1334 | return 0; | |
1335 | } | |
1336 | ||
1337 | static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = { | |
1338 | .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA, | |
1339 | .drv_type = DPAA2_QDMA, | |
1340 | .probe = rte_dpaa2_qdma_probe, | |
1341 | .remove = rte_dpaa2_qdma_remove, | |
1342 | }; | |
1343 | ||
1344 | RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd); | |
9f95a23c TL |
1345 | RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma, |
1346 | "no_prefetch=<int> "); | |
11fdf7f2 TL |
1347 | |
1348 | RTE_INIT(dpaa2_qdma_init_log) | |
1349 | { | |
1350 | dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma"); | |
1351 | if (dpaa2_qdma_logtype >= 0) | |
1352 | rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO); | |
1353 | } |