1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
29 /* Dynamic log type identifier */
30 int dpaa2_qdma_logtype
;
32 uint32_t dpaa2_coherent_no_alloc_cache
;
33 uint32_t dpaa2_coherent_alloc_cache
;
36 static struct qdma_device qdma_dev
;
38 /* QDMA H/W queues list */
39 TAILQ_HEAD(qdma_hw_queue_list
, qdma_hw_queue
);
40 static struct qdma_hw_queue_list qdma_queue_list
41 = TAILQ_HEAD_INITIALIZER(qdma_queue_list
);
43 /* QDMA Virtual Queues */
44 static struct qdma_virt_queue
*qdma_vqs
;
46 /* QDMA per core data */
47 static struct qdma_per_core_info qdma_core_info
[RTE_MAX_LCORE
];
49 typedef int (dpdmai_dev_dequeue_multijob_t
)(struct dpaa2_dpdmai_dev
*dpdmai_dev
,
52 struct rte_qdma_job
**job
,
55 dpdmai_dev_dequeue_multijob_t
*dpdmai_dev_dequeue_multijob
;
57 typedef uint16_t (dpdmai_dev_get_job_t
)(const struct qbman_fd
*fd
,
58 struct rte_qdma_job
**job
);
59 typedef int (dpdmai_dev_set_fd_t
)(struct qbman_fd
*fd
,
60 struct rte_qdma_job
*job
,
61 struct rte_qdma_rbp
*rbp
,
63 dpdmai_dev_get_job_t
*dpdmai_dev_get_job
;
64 dpdmai_dev_set_fd_t
*dpdmai_dev_set_fd
;
67 qdma_populate_fd_pci(phys_addr_t src
, phys_addr_t dest
,
68 uint32_t len
, struct qbman_fd
*fd
,
69 struct rte_qdma_rbp
*rbp
)
71 fd
->simple_pci
.saddr_lo
= lower_32_bits((uint64_t) (src
));
72 fd
->simple_pci
.saddr_hi
= upper_32_bits((uint64_t) (src
));
74 fd
->simple_pci
.len_sl
= len
;
76 fd
->simple_pci
.bmt
= 1;
77 fd
->simple_pci
.fmt
= 3;
78 fd
->simple_pci
.sl
= 1;
79 fd
->simple_pci
.ser
= 1;
81 fd
->simple_pci
.sportid
= rbp
->sportid
; /*pcie 3 */
82 fd
->simple_pci
.srbp
= rbp
->srbp
;
84 fd
->simple_pci
.rdttype
= 0;
86 fd
->simple_pci
.rdttype
= dpaa2_coherent_alloc_cache
;
88 /*dest is pcie memory */
89 fd
->simple_pci
.dportid
= rbp
->dportid
; /*pcie 3 */
90 fd
->simple_pci
.drbp
= rbp
->drbp
;
92 fd
->simple_pci
.wrttype
= 0;
94 fd
->simple_pci
.wrttype
= dpaa2_coherent_no_alloc_cache
;
96 fd
->simple_pci
.daddr_lo
= lower_32_bits((uint64_t) (dest
));
97 fd
->simple_pci
.daddr_hi
= upper_32_bits((uint64_t) (dest
));
103 qdma_populate_fd_ddr(phys_addr_t src
, phys_addr_t dest
,
104 uint32_t len
, struct qbman_fd
*fd
)
106 fd
->simple_ddr
.saddr_lo
= lower_32_bits((uint64_t) (src
));
107 fd
->simple_ddr
.saddr_hi
= upper_32_bits((uint64_t) (src
));
109 fd
->simple_ddr
.len
= len
;
111 fd
->simple_ddr
.bmt
= 1;
112 fd
->simple_ddr
.fmt
= 3;
113 fd
->simple_ddr
.sl
= 1;
114 fd
->simple_ddr
.ser
= 1;
116 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
117 * Coherent copy of cacheable memory,
118 * lookup in downstream cache, no allocate
121 fd
->simple_ddr
.rns
= 0;
122 fd
->simple_ddr
.rdttype
= dpaa2_coherent_alloc_cache
;
124 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
125 * Coherent write of cacheable memory,
126 * lookup in downstream cache, no allocate on miss
128 fd
->simple_ddr
.wns
= 0;
129 fd
->simple_ddr
.wrttype
= dpaa2_coherent_no_alloc_cache
;
131 fd
->simple_ddr
.daddr_lo
= lower_32_bits((uint64_t) (dest
));
132 fd
->simple_ddr
.daddr_hi
= upper_32_bits((uint64_t) (dest
));
138 dpaa2_qdma_populate_fle(struct qbman_fle
*fle
,
139 struct rte_qdma_rbp
*rbp
,
140 uint64_t src
, uint64_t dest
,
141 size_t len
, uint32_t flags
)
143 struct qdma_sdd
*sdd
;
145 sdd
= (struct qdma_sdd
*)((uint8_t *)(fle
) +
146 (DPAA2_QDMA_MAX_FLE
* sizeof(struct qbman_fle
)));
148 /* first frame list to source descriptor */
149 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sdd
));
150 DPAA2_SET_FLE_LEN(fle
, (2 * (sizeof(struct qdma_sdd
))));
152 /* source and destination descriptor */
153 if (rbp
&& rbp
->enable
) {
155 sdd
->read_cmd
.portid
= rbp
->sportid
;
156 sdd
->rbpcmd_simple
.pfid
= rbp
->spfid
;
157 sdd
->rbpcmd_simple
.vfid
= rbp
->svfid
;
160 sdd
->read_cmd
.rbp
= rbp
->srbp
;
161 sdd
->read_cmd
.rdtype
= DPAA2_RBP_MEM_RW
;
163 sdd
->read_cmd
.rdtype
= dpaa2_coherent_no_alloc_cache
;
167 sdd
->write_cmd
.portid
= rbp
->dportid
;
168 sdd
->rbpcmd_simple
.pfid
= rbp
->dpfid
;
169 sdd
->rbpcmd_simple
.vfid
= rbp
->dvfid
;
172 sdd
->write_cmd
.rbp
= rbp
->drbp
;
173 sdd
->write_cmd
.wrttype
= DPAA2_RBP_MEM_RW
;
175 sdd
->write_cmd
.wrttype
= dpaa2_coherent_alloc_cache
;
179 sdd
->read_cmd
.rdtype
= dpaa2_coherent_no_alloc_cache
;
181 sdd
->write_cmd
.wrttype
= dpaa2_coherent_alloc_cache
;
184 /* source frame list to source buffer */
185 if (flags
& RTE_QDMA_JOB_SRC_PHY
) {
186 DPAA2_SET_FLE_ADDR(fle
, src
);
187 DPAA2_SET_FLE_BMT(fle
);
189 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(src
));
191 DPAA2_SET_FLE_LEN(fle
, len
);
194 /* destination frame list to destination buffer */
195 if (flags
& RTE_QDMA_JOB_DEST_PHY
) {
196 DPAA2_SET_FLE_BMT(fle
);
197 DPAA2_SET_FLE_ADDR(fle
, dest
);
199 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(dest
));
201 DPAA2_SET_FLE_LEN(fle
, len
);
203 /* Final bit: 1, for last frame list */
204 DPAA2_SET_FLE_FIN(fle
);
207 static inline int dpdmai_dev_set_fd_us(struct qbman_fd
*fd
,
208 struct rte_qdma_job
*job
,
209 struct rte_qdma_rbp
*rbp
,
212 struct rte_qdma_job
**ppjob
;
216 if (job
->src
& QDMA_RBP_UPPER_ADDRESS_MASK
)
217 iova
= (size_t)job
->dest
;
219 iova
= (size_t)job
->src
;
221 /* Set the metadata */
223 ppjob
= (struct rte_qdma_job
**)DPAA2_IOVA_TO_VADDR(iova
) - 1;
226 if ((rbp
->drbp
== 1) || (rbp
->srbp
== 1))
227 ret
= qdma_populate_fd_pci((phys_addr_t
) job
->src
,
228 (phys_addr_t
) job
->dest
,
231 ret
= qdma_populate_fd_ddr((phys_addr_t
) job
->src
,
232 (phys_addr_t
) job
->dest
,
236 static inline int dpdmai_dev_set_fd_lf(struct qbman_fd
*fd
,
237 struct rte_qdma_job
*job
,
238 struct rte_qdma_rbp
*rbp
,
241 struct rte_qdma_job
**ppjob
;
242 struct qbman_fle
*fle
;
245 * Get an FLE/SDD from FLE pool.
246 * Note: IO metadata is before the FLE and SDD memory.
248 ret
= rte_mempool_get(qdma_dev
.fle_pool
, (void **)(&ppjob
));
250 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
254 /* Set the metadata */
258 fle
= (struct qbman_fle
*)(ppjob
+ 1);
260 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(fle
));
261 DPAA2_SET_FD_COMPOUND_FMT(fd
);
262 DPAA2_SET_FD_FRC(fd
, QDMA_SER_CTX
);
265 memset(fle
, 0, QDMA_FLE_POOL_SIZE
);
266 dpaa2_qdma_populate_fle(fle
, rbp
, job
->src
, job
->dest
,
267 job
->len
, job
->flags
);
272 static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd
*fd
,
273 struct rte_qdma_job
**job
)
277 struct rte_qdma_job
**ppjob
;
279 if (fd
->simple_pci
.saddr_hi
& (QDMA_RBP_UPPER_ADDRESS_MASK
>> 32))
280 iova
= (size_t) (((uint64_t)fd
->simple_pci
.daddr_hi
) << 32
281 | (uint64_t)fd
->simple_pci
.daddr_lo
);
283 iova
= (size_t)(((uint64_t)fd
->simple_pci
.saddr_hi
) << 32
284 | (uint64_t)fd
->simple_pci
.saddr_lo
);
286 ppjob
= (struct rte_qdma_job
**)DPAA2_IOVA_TO_VADDR(iova
) - 1;
287 *job
= (struct rte_qdma_job
*)*ppjob
;
288 (*job
)->status
= (fd
->simple_pci
.acc_err
<< 8) | (fd
->simple_pci
.error
);
289 vqid
= (*job
)->vq_id
;
294 static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd
*fd
,
295 struct rte_qdma_job
**job
)
297 struct rte_qdma_job
**ppjob
;
300 * Fetch metadata from FLE. job and vq_id were set
301 * in metadata in the enqueue operation.
303 ppjob
= (struct rte_qdma_job
**)
304 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd
));
307 *job
= (struct rte_qdma_job
*)*ppjob
;
308 (*job
)->status
= (DPAA2_GET_FD_ERR(fd
) << 8) |
309 (DPAA2_GET_FD_FRC(fd
) & 0xFF);
310 vqid
= (*job
)->vq_id
;
312 /* Free FLE to the pool */
313 rte_mempool_put(qdma_dev
.fle_pool
, (void *)ppjob
);
318 static struct qdma_hw_queue
*
319 alloc_hw_queue(uint32_t lcore_id
)
321 struct qdma_hw_queue
*queue
= NULL
;
323 DPAA2_QDMA_FUNC_TRACE();
325 /* Get a free queue from the list */
326 TAILQ_FOREACH(queue
, &qdma_queue_list
, next
) {
327 if (queue
->num_users
== 0) {
328 queue
->lcore_id
= lcore_id
;
338 free_hw_queue(struct qdma_hw_queue
*queue
)
340 DPAA2_QDMA_FUNC_TRACE();
346 static struct qdma_hw_queue
*
347 get_hw_queue(uint32_t lcore_id
)
349 struct qdma_per_core_info
*core_info
;
350 struct qdma_hw_queue
*queue
, *temp
;
351 uint32_t least_num_users
;
352 int num_hw_queues
, i
;
354 DPAA2_QDMA_FUNC_TRACE();
356 core_info
= &qdma_core_info
[lcore_id
];
357 num_hw_queues
= core_info
->num_hw_queues
;
360 * Allocate a HW queue if there are less queues
361 * than maximum per core queues configured
363 if (num_hw_queues
< qdma_dev
.max_hw_queues_per_core
) {
364 queue
= alloc_hw_queue(lcore_id
);
366 core_info
->hw_queues
[num_hw_queues
] = queue
;
367 core_info
->num_hw_queues
++;
372 queue
= core_info
->hw_queues
[0];
373 /* In case there is no queue associated with the core return NULL */
377 /* Fetch the least loaded H/W queue */
378 least_num_users
= core_info
->hw_queues
[0]->num_users
;
379 for (i
= 0; i
< num_hw_queues
; i
++) {
380 temp
= core_info
->hw_queues
[i
];
381 if (temp
->num_users
< least_num_users
)
392 put_hw_queue(struct qdma_hw_queue
*queue
)
394 struct qdma_per_core_info
*core_info
;
395 int lcore_id
, num_hw_queues
, i
;
397 DPAA2_QDMA_FUNC_TRACE();
400 * If this is the last user of the queue free it.
401 * Also remove it from QDMA core info.
403 if (queue
->num_users
== 1) {
404 free_hw_queue(queue
);
406 /* Remove the physical queue from core info */
407 lcore_id
= queue
->lcore_id
;
408 core_info
= &qdma_core_info
[lcore_id
];
409 num_hw_queues
= core_info
->num_hw_queues
;
410 for (i
= 0; i
< num_hw_queues
; i
++) {
411 if (queue
== core_info
->hw_queues
[i
])
414 for (; i
< num_hw_queues
- 1; i
++)
415 core_info
->hw_queues
[i
] = core_info
->hw_queues
[i
+ 1];
416 core_info
->hw_queues
[i
] = NULL
;
425 DPAA2_QDMA_FUNC_TRACE();
427 rte_spinlock_init(&qdma_dev
.lock
);
433 rte_qdma_attr_get(struct rte_qdma_attr
*qdma_attr
)
435 DPAA2_QDMA_FUNC_TRACE();
437 qdma_attr
->num_hw_queues
= qdma_dev
.num_hw_queues
;
443 struct qdma_hw_queue
*queue
;
446 DPAA2_QDMA_FUNC_TRACE();
448 /* In case QDMA device is not in stopped state, return -EBUSY */
449 if (qdma_dev
.state
== 1) {
451 "Device is in running state. Stop before reset.");
455 /* In case there are pending jobs on any VQ, return -EBUSY */
456 for (i
= 0; i
< qdma_dev
.max_vqs
; i
++) {
457 if (qdma_vqs
[i
].in_use
&& (qdma_vqs
[i
].num_enqueues
!=
458 qdma_vqs
[i
].num_dequeues
))
459 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i
);
463 /* Reset HW queues */
464 TAILQ_FOREACH(queue
, &qdma_queue_list
, next
)
465 queue
->num_users
= 0;
467 /* Reset and free virtual queues */
468 for (i
= 0; i
< qdma_dev
.max_vqs
; i
++) {
469 if (qdma_vqs
[i
].status_ring
)
470 rte_ring_free(qdma_vqs
[i
].status_ring
);
476 /* Reset per core info */
477 memset(&qdma_core_info
, 0,
478 sizeof(struct qdma_per_core_info
) * RTE_MAX_LCORE
);
480 /* Free the FLE pool */
481 if (qdma_dev
.fle_pool
)
482 rte_mempool_free(qdma_dev
.fle_pool
);
484 /* Reset QDMA device structure */
485 qdma_dev
.mode
= RTE_QDMA_MODE_HW
;
486 qdma_dev
.max_hw_queues_per_core
= 0;
487 qdma_dev
.fle_pool
= NULL
;
488 qdma_dev
.fle_pool_count
= 0;
489 qdma_dev
.max_vqs
= 0;
495 rte_qdma_configure(struct rte_qdma_config
*qdma_config
)
498 char fle_pool_name
[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
500 DPAA2_QDMA_FUNC_TRACE();
502 /* In case QDMA device is not in stopped state, return -EBUSY */
503 if (qdma_dev
.state
== 1) {
505 "Device is in running state. Stop before config.");
509 /* Reset the QDMA device */
510 ret
= rte_qdma_reset();
512 DPAA2_QDMA_ERR("Resetting QDMA failed");
517 qdma_dev
.mode
= qdma_config
->mode
;
519 /* Set max HW queue per core */
520 if (qdma_config
->max_hw_queues_per_core
> MAX_HW_QUEUE_PER_CORE
) {
521 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
522 MAX_HW_QUEUE_PER_CORE
);
525 qdma_dev
.max_hw_queues_per_core
=
526 qdma_config
->max_hw_queues_per_core
;
528 /* Allocate Virtual Queues */
529 qdma_vqs
= rte_malloc("qdma_virtual_queues",
530 (sizeof(struct qdma_virt_queue
) * qdma_config
->max_vqs
),
531 RTE_CACHE_LINE_SIZE
);
533 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
536 qdma_dev
.max_vqs
= qdma_config
->max_vqs
;
538 /* Allocate FLE pool; just append PID so that in case of
539 * multiprocess, the pool's don't collide.
541 snprintf(fle_pool_name
, sizeof(fle_pool_name
), "qdma_fle_pool%u",
543 qdma_dev
.fle_pool
= rte_mempool_create(fle_pool_name
,
544 qdma_config
->fle_pool_count
, QDMA_FLE_POOL_SIZE
,
545 QDMA_FLE_CACHE_SIZE(qdma_config
->fle_pool_count
), 0,
546 NULL
, NULL
, NULL
, NULL
, SOCKET_ID_ANY
, 0);
547 if (!qdma_dev
.fle_pool
) {
548 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
553 qdma_dev
.fle_pool_count
= qdma_config
->fle_pool_count
;
555 if (qdma_config
->format
== RTE_QDMA_ULTRASHORT_FORMAT
) {
556 dpdmai_dev_get_job
= dpdmai_dev_get_job_us
;
557 dpdmai_dev_set_fd
= dpdmai_dev_set_fd_us
;
559 dpdmai_dev_get_job
= dpdmai_dev_get_job_lf
;
560 dpdmai_dev_set_fd
= dpdmai_dev_set_fd_lf
;
568 DPAA2_QDMA_FUNC_TRACE();
576 rte_qdma_vq_create(uint32_t lcore_id
, uint32_t flags
)
581 DPAA2_QDMA_FUNC_TRACE();
583 rte_spinlock_lock(&qdma_dev
.lock
);
585 /* Get a free Virtual Queue */
586 for (i
= 0; i
< qdma_dev
.max_vqs
; i
++) {
587 if (qdma_vqs
[i
].in_use
== 0)
591 /* Return in case no VQ is free */
592 if (i
== qdma_dev
.max_vqs
) {
593 rte_spinlock_unlock(&qdma_dev
.lock
);
594 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
598 if (qdma_dev
.mode
== RTE_QDMA_MODE_HW
||
599 (flags
& RTE_QDMA_VQ_EXCLUSIVE_PQ
)) {
600 /* Allocate HW queue for a VQ */
601 qdma_vqs
[i
].hw_queue
= alloc_hw_queue(lcore_id
);
602 qdma_vqs
[i
].exclusive_hw_queue
= 1;
604 /* Allocate a Ring for Virutal Queue in VQ mode */
605 snprintf(ring_name
, sizeof(ring_name
), "status ring %d", i
);
606 qdma_vqs
[i
].status_ring
= rte_ring_create(ring_name
,
607 qdma_dev
.fle_pool_count
, rte_socket_id(), 0);
608 if (!qdma_vqs
[i
].status_ring
) {
609 DPAA2_QDMA_ERR("Status ring creation failed for vq");
610 rte_spinlock_unlock(&qdma_dev
.lock
);
614 /* Get a HW queue (shared) for a VQ */
615 qdma_vqs
[i
].hw_queue
= get_hw_queue(lcore_id
);
616 qdma_vqs
[i
].exclusive_hw_queue
= 0;
619 if (qdma_vqs
[i
].hw_queue
== NULL
) {
620 DPAA2_QDMA_ERR("No H/W queue available for VQ");
621 if (qdma_vqs
[i
].status_ring
)
622 rte_ring_free(qdma_vqs
[i
].status_ring
);
623 qdma_vqs
[i
].status_ring
= NULL
;
624 rte_spinlock_unlock(&qdma_dev
.lock
);
628 qdma_vqs
[i
].in_use
= 1;
629 qdma_vqs
[i
].lcore_id
= lcore_id
;
630 memset(&qdma_vqs
[i
].rbp
, 0, sizeof(struct rte_qdma_rbp
));
631 rte_spinlock_unlock(&qdma_dev
.lock
);
636 /*create vq for route-by-port*/
638 rte_qdma_vq_create_rbp(uint32_t lcore_id
, uint32_t flags
,
639 struct rte_qdma_rbp
*rbp
)
643 i
= rte_qdma_vq_create(lcore_id
, flags
);
645 memcpy(&qdma_vqs
[i
].rbp
, rbp
, sizeof(struct rte_qdma_rbp
));
651 dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev
*dpdmai_dev
,
654 struct rte_qdma_rbp
*rbp
,
655 struct rte_qdma_job
**job
,
658 struct qbman_fd fd
[RTE_QDMA_BURST_NB_MAX
];
659 struct dpaa2_queue
*txq
;
660 struct qbman_eq_desc eqdesc
;
661 struct qbman_swp
*swp
;
663 uint32_t num_to_send
= 0;
666 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
667 ret
= dpaa2_affine_qbman_swp();
670 "Failed to allocate IO portal, tid: %d\n",
675 swp
= DPAA2_PER_LCORE_PORTAL
;
677 txq
= &(dpdmai_dev
->tx_queue
[txq_id
]);
679 /* Prepare enqueue descriptor */
680 qbman_eq_desc_clear(&eqdesc
);
681 qbman_eq_desc_set_fq(&eqdesc
, txq
->fqid
);
682 qbman_eq_desc_set_no_orp(&eqdesc
, 0);
683 qbman_eq_desc_set_response(&eqdesc
, 0, 0);
685 memset(fd
, 0, RTE_QDMA_BURST_NB_MAX
* sizeof(struct qbman_fd
));
687 while (nb_jobs
> 0) {
690 num_to_send
= (nb_jobs
> dpaa2_eqcr_size
) ?
691 dpaa2_eqcr_size
: nb_jobs
;
693 for (loop
= 0; loop
< num_to_send
; loop
++) {
694 ret
= dpdmai_dev_set_fd(&fd
[loop
],
695 job
[num_tx
], rbp
, vq_id
);
697 /* Set nb_jobs to loop, so outer while loop
707 /* Enqueue the packet to the QBMAN */
708 uint32_t enqueue_loop
= 0, retry_count
= 0;
709 while (enqueue_loop
< loop
) {
710 ret
= qbman_swp_enqueue_multiple(swp
,
714 loop
- enqueue_loop
);
715 if (unlikely(ret
< 0)) {
717 if (retry_count
> DPAA2_MAX_TX_RETRY_COUNT
)
718 return num_tx
- (loop
- enqueue_loop
);
730 rte_qdma_vq_enqueue_multi(uint16_t vq_id
,
731 struct rte_qdma_job
**job
,
734 struct qdma_virt_queue
*qdma_vq
= &qdma_vqs
[vq_id
];
735 struct qdma_hw_queue
*qdma_pq
= qdma_vq
->hw_queue
;
736 struct dpaa2_dpdmai_dev
*dpdmai_dev
= qdma_pq
->dpdmai_dev
;
739 /* Return error in case of wrong lcore_id */
740 if (rte_lcore_id() != qdma_vq
->lcore_id
) {
741 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
746 ret
= dpdmai_dev_enqueue_multi(dpdmai_dev
,
753 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret
);
757 qdma_vq
->num_enqueues
+= ret
;
763 rte_qdma_vq_enqueue(uint16_t vq_id
,
764 struct rte_qdma_job
*job
)
766 return rte_qdma_vq_enqueue_multi(vq_id
, &job
, 1);
769 /* Function to receive a QDMA job for a given device and queue*/
771 dpdmai_dev_dequeue_multijob_prefetch(
772 struct dpaa2_dpdmai_dev
*dpdmai_dev
,
775 struct rte_qdma_job
**job
,
778 struct dpaa2_queue
*rxq
;
779 struct qbman_result
*dq_storage
, *dq_storage1
= NULL
;
780 struct qbman_pull_desc pulldesc
;
781 struct qbman_swp
*swp
;
782 struct queue_storage_info_t
*q_storage
;
784 uint8_t status
, pending
;
786 const struct qbman_fd
*fd
;
790 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
791 ret
= dpaa2_affine_qbman_swp();
794 "Failed to allocate IO portal, tid: %d\n",
799 swp
= DPAA2_PER_LCORE_PORTAL
;
801 pull_size
= (nb_jobs
> dpaa2_dqrr_size
) ? dpaa2_dqrr_size
: nb_jobs
;
802 rxq
= &(dpdmai_dev
->rx_queue
[rxq_id
]);
804 q_storage
= rxq
->q_storage
;
806 if (unlikely(!q_storage
->active_dqs
)) {
807 q_storage
->toggle
= 0;
808 dq_storage
= q_storage
->dq_storage
[q_storage
->toggle
];
809 q_storage
->last_num_pkts
= pull_size
;
810 qbman_pull_desc_clear(&pulldesc
);
811 qbman_pull_desc_set_numframes(&pulldesc
,
812 q_storage
->last_num_pkts
);
813 qbman_pull_desc_set_fq(&pulldesc
, fqid
);
814 qbman_pull_desc_set_storage(&pulldesc
, dq_storage
,
815 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage
)), 1);
816 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO
->index
)) {
817 while (!qbman_check_command_complete(
819 DPAA2_PER_LCORE_DPIO
->index
)))
821 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO
->index
);
824 if (qbman_swp_pull(swp
, &pulldesc
)) {
826 "VDQ command not issued.QBMAN busy\n");
827 /* Portal was busy, try again */
832 q_storage
->active_dqs
= dq_storage
;
833 q_storage
->active_dpio_id
= DPAA2_PER_LCORE_DPIO
->index
;
834 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO
->index
,
838 dq_storage
= q_storage
->active_dqs
;
839 rte_prefetch0((void *)(size_t)(dq_storage
));
840 rte_prefetch0((void *)(size_t)(dq_storage
+ 1));
842 /* Prepare next pull descriptor. This will give space for the
843 * prefething done on DQRR entries
845 q_storage
->toggle
^= 1;
846 dq_storage1
= q_storage
->dq_storage
[q_storage
->toggle
];
847 qbman_pull_desc_clear(&pulldesc
);
848 qbman_pull_desc_set_numframes(&pulldesc
, pull_size
);
849 qbman_pull_desc_set_fq(&pulldesc
, fqid
);
850 qbman_pull_desc_set_storage(&pulldesc
, dq_storage1
,
851 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1
)), 1);
853 /* Check if the previous issued command is completed.
854 * Also seems like the SWP is shared between the Ethernet Driver
855 * and the SEC driver.
857 while (!qbman_check_command_complete(dq_storage
))
859 if (dq_storage
== get_swp_active_dqs(q_storage
->active_dpio_id
))
860 clear_swp_active_dqs(q_storage
->active_dpio_id
);
865 /* Loop until the dq_storage is updated with
868 while (!qbman_check_new_result(dq_storage
))
870 rte_prefetch0((void *)((size_t)(dq_storage
+ 2)));
871 /* Check whether Last Pull command is Expired and
872 * setting Condition for Loop termination
874 if (qbman_result_DQ_is_pull_complete(dq_storage
)) {
876 /* Check for valid frame. */
877 status
= qbman_result_DQ_flags(dq_storage
);
878 if (unlikely((status
& QBMAN_DQ_STAT_VALIDFRAME
) == 0))
881 fd
= qbman_result_DQ_fd(dq_storage
);
883 vqid
= dpdmai_dev_get_job(fd
, &job
[num_rx
]);
885 vq_id
[num_rx
] = vqid
;
891 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO
->index
)) {
892 while (!qbman_check_command_complete(
893 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO
->index
)))
895 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO
->index
);
897 /* issue a volatile dequeue command for next pull */
899 if (qbman_swp_pull(swp
, &pulldesc
)) {
900 DPAA2_QDMA_DP_WARN("VDQ command is not issued."
901 "QBMAN is busy (2)\n");
907 q_storage
->active_dqs
= dq_storage1
;
908 q_storage
->active_dpio_id
= DPAA2_PER_LCORE_DPIO
->index
;
909 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO
->index
, dq_storage1
);
915 dpdmai_dev_dequeue_multijob_no_prefetch(
916 struct dpaa2_dpdmai_dev
*dpdmai_dev
,
919 struct rte_qdma_job
**job
,
922 struct dpaa2_queue
*rxq
;
923 struct qbman_result
*dq_storage
;
924 struct qbman_pull_desc pulldesc
;
925 struct qbman_swp
*swp
;
927 uint8_t status
, pending
;
929 const struct qbman_fd
*fd
;
931 int ret
, next_pull
= nb_jobs
, num_pulled
= 0;
933 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
934 ret
= dpaa2_affine_qbman_swp();
937 "Failed to allocate IO portal, tid: %d\n",
942 swp
= DPAA2_PER_LCORE_PORTAL
;
944 rxq
= &(dpdmai_dev
->rx_queue
[rxq_id
]);
948 dq_storage
= rxq
->q_storage
->dq_storage
[0];
949 /* Prepare dequeue descriptor */
950 qbman_pull_desc_clear(&pulldesc
);
951 qbman_pull_desc_set_fq(&pulldesc
, fqid
);
952 qbman_pull_desc_set_storage(&pulldesc
, dq_storage
,
953 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage
)), 1);
955 if (next_pull
> dpaa2_dqrr_size
) {
956 qbman_pull_desc_set_numframes(&pulldesc
,
958 next_pull
-= dpaa2_dqrr_size
;
960 qbman_pull_desc_set_numframes(&pulldesc
, next_pull
);
965 if (qbman_swp_pull(swp
, &pulldesc
)) {
966 DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
967 /* Portal was busy, try again */
973 rte_prefetch0((void *)((size_t)(dq_storage
+ 1)));
974 /* Check if the previous issued command is completed. */
975 while (!qbman_check_command_complete(dq_storage
))
982 /* Loop until dq_storage is updated
983 * with new token by QBMAN
985 while (!qbman_check_new_result(dq_storage
))
987 rte_prefetch0((void *)((size_t)(dq_storage
+ 2)));
989 if (qbman_result_DQ_is_pull_complete(dq_storage
)) {
991 /* Check for valid frame. */
992 status
= qbman_result_DQ_flags(dq_storage
);
993 if (unlikely((status
&
994 QBMAN_DQ_STAT_VALIDFRAME
) == 0))
997 fd
= qbman_result_DQ_fd(dq_storage
);
999 vqid
= dpdmai_dev_get_job(fd
, &job
[num_rx
]);
1001 vq_id
[num_rx
] = vqid
;
1008 /* Last VDQ provided all packets and more packets are requested */
1009 } while (next_pull
&& num_pulled
== dpaa2_dqrr_size
);
1015 rte_qdma_vq_dequeue_multi(uint16_t vq_id
,
1016 struct rte_qdma_job
**job
,
1019 struct qdma_virt_queue
*qdma_vq
= &qdma_vqs
[vq_id
];
1020 struct qdma_hw_queue
*qdma_pq
= qdma_vq
->hw_queue
;
1021 struct qdma_virt_queue
*temp_qdma_vq
;
1022 struct dpaa2_dpdmai_dev
*dpdmai_dev
= qdma_pq
->dpdmai_dev
;
1023 int ring_count
, ret
= 0, i
;
1025 /* Return error in case of wrong lcore_id */
1026 if (rte_lcore_id() != (unsigned int)(qdma_vq
->lcore_id
)) {
1027 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
1032 /* Only dequeue when there are pending jobs on VQ */
1033 if (qdma_vq
->num_enqueues
== qdma_vq
->num_dequeues
)
1036 if (qdma_vq
->num_enqueues
< (qdma_vq
->num_dequeues
+ nb_jobs
))
1037 nb_jobs
= (qdma_vq
->num_enqueues
- qdma_vq
->num_dequeues
);
1039 if (qdma_vq
->exclusive_hw_queue
) {
1040 /* In case of exclusive queue directly fetch from HW queue */
1041 ret
= dpdmai_dev_dequeue_multijob(dpdmai_dev
, qdma_pq
->queue_id
,
1042 NULL
, job
, nb_jobs
);
1045 "Dequeue from DPDMAI device failed: %d", ret
);
1048 qdma_vq
->num_dequeues
+= ret
;
1050 uint16_t temp_vq_id
[RTE_QDMA_BURST_NB_MAX
];
1052 * Get the QDMA completed jobs from the software ring.
1053 * In case they are not available on the ring poke the HW
1054 * to fetch completed jobs from corresponding HW queues
1056 ring_count
= rte_ring_count(qdma_vq
->status_ring
);
1057 if (ring_count
< nb_jobs
) {
1058 /* TODO - How to have right budget */
1059 ret
= dpdmai_dev_dequeue_multijob(dpdmai_dev
,
1061 temp_vq_id
, job
, nb_jobs
);
1062 for (i
= 0; i
< ret
; i
++) {
1063 temp_qdma_vq
= &qdma_vqs
[temp_vq_id
[i
]];
1064 rte_ring_enqueue(temp_qdma_vq
->status_ring
,
1067 ring_count
= rte_ring_count(
1068 qdma_vq
->status_ring
);
1072 /* Dequeue job from the software ring
1073 * to provide to the user
1075 ret
= rte_ring_dequeue_bulk(qdma_vq
->status_ring
,
1076 (void **)job
, ring_count
, NULL
);
1078 qdma_vq
->num_dequeues
+= ret
;
1085 struct rte_qdma_job
*
1086 rte_qdma_vq_dequeue(uint16_t vq_id
)
1089 struct rte_qdma_job
*job
= NULL
;
1091 ret
= rte_qdma_vq_dequeue_multi(vq_id
, &job
, 1);
1093 DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret
);
1099 rte_qdma_vq_stats(uint16_t vq_id
,
1100 struct rte_qdma_vq_stats
*vq_status
)
1102 struct qdma_virt_queue
*qdma_vq
= &qdma_vqs
[vq_id
];
1104 if (qdma_vq
->in_use
) {
1105 vq_status
->exclusive_hw_queue
= qdma_vq
->exclusive_hw_queue
;
1106 vq_status
->lcore_id
= qdma_vq
->lcore_id
;
1107 vq_status
->num_enqueues
= qdma_vq
->num_enqueues
;
1108 vq_status
->num_dequeues
= qdma_vq
->num_dequeues
;
1109 vq_status
->num_pending_jobs
= vq_status
->num_enqueues
-
1110 vq_status
->num_dequeues
;
1115 rte_qdma_vq_destroy(uint16_t vq_id
)
1117 struct qdma_virt_queue
*qdma_vq
= &qdma_vqs
[vq_id
];
1119 DPAA2_QDMA_FUNC_TRACE();
1121 /* In case there are pending jobs on any VQ, return -EBUSY */
1122 if (qdma_vq
->num_enqueues
!= qdma_vq
->num_dequeues
)
1125 rte_spinlock_lock(&qdma_dev
.lock
);
1127 if (qdma_vq
->exclusive_hw_queue
)
1128 free_hw_queue(qdma_vq
->hw_queue
);
1130 if (qdma_vqs
->status_ring
)
1131 rte_ring_free(qdma_vqs
->status_ring
);
1133 put_hw_queue(qdma_vq
->hw_queue
);
1136 memset(qdma_vq
, 0, sizeof(struct qdma_virt_queue
));
1138 rte_spinlock_unlock(&qdma_dev
.lock
);
1144 rte_qdma_vq_destroy_rbp(uint16_t vq_id
)
1146 struct qdma_virt_queue
*qdma_vq
= &qdma_vqs
[vq_id
];
1148 DPAA2_QDMA_FUNC_TRACE();
1150 /* In case there are pending jobs on any VQ, return -EBUSY */
1151 if (qdma_vq
->num_enqueues
!= qdma_vq
->num_dequeues
)
1154 rte_spinlock_lock(&qdma_dev
.lock
);
1156 if (qdma_vq
->exclusive_hw_queue
) {
1157 free_hw_queue(qdma_vq
->hw_queue
);
1159 if (qdma_vqs
->status_ring
)
1160 rte_ring_free(qdma_vqs
->status_ring
);
1162 put_hw_queue(qdma_vq
->hw_queue
);
1165 memset(qdma_vq
, 0, sizeof(struct qdma_virt_queue
));
1167 rte_spinlock_unlock(&qdma_dev
.lock
);
1175 DPAA2_QDMA_FUNC_TRACE();
1181 rte_qdma_destroy(void)
1183 DPAA2_QDMA_FUNC_TRACE();
1188 static const struct rte_rawdev_ops dpaa2_qdma_ops
;
1191 add_hw_queues_to_list(struct dpaa2_dpdmai_dev
*dpdmai_dev
)
1193 struct qdma_hw_queue
*queue
;
1196 DPAA2_QDMA_FUNC_TRACE();
1198 for (i
= 0; i
< dpdmai_dev
->num_queues
; i
++) {
1199 queue
= rte_zmalloc(NULL
, sizeof(struct qdma_hw_queue
), 0);
1202 "Memory allocation failed for QDMA queue");
1206 queue
->dpdmai_dev
= dpdmai_dev
;
1207 queue
->queue_id
= i
;
1209 TAILQ_INSERT_TAIL(&qdma_queue_list
, queue
, next
);
1210 qdma_dev
.num_hw_queues
++;
1217 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev
*dpdmai_dev
)
1219 struct qdma_hw_queue
*queue
= NULL
;
1220 struct qdma_hw_queue
*tqueue
= NULL
;
1222 DPAA2_QDMA_FUNC_TRACE();
1224 TAILQ_FOREACH_SAFE(queue
, &qdma_queue_list
, next
, tqueue
) {
1225 if (queue
->dpdmai_dev
== dpdmai_dev
) {
1226 TAILQ_REMOVE(&qdma_queue_list
, queue
, next
);
1234 dpaa2_dpdmai_dev_uninit(struct rte_rawdev
*rawdev
)
1236 struct dpaa2_dpdmai_dev
*dpdmai_dev
= rawdev
->dev_private
;
1239 DPAA2_QDMA_FUNC_TRACE();
1241 /* Remove HW queues from global list */
1242 remove_hw_queues_from_list(dpdmai_dev
);
1244 ret
= dpdmai_disable(&dpdmai_dev
->dpdmai
, CMD_PRI_LOW
,
1247 DPAA2_QDMA_ERR("dmdmai disable failed");
1249 /* Set up the DQRR storage for Rx */
1250 for (i
= 0; i
< dpdmai_dev
->num_queues
; i
++) {
1251 struct dpaa2_queue
*rxq
= &(dpdmai_dev
->rx_queue
[i
]);
1253 if (rxq
->q_storage
) {
1254 dpaa2_free_dq_storage(rxq
->q_storage
);
1255 rte_free(rxq
->q_storage
);
1259 /* Close the device at underlying layer*/
1260 ret
= dpdmai_close(&dpdmai_dev
->dpdmai
, CMD_PRI_LOW
, dpdmai_dev
->token
);
1262 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1268 check_devargs_handler(__rte_unused
const char *key
, const char *value
,
1269 __rte_unused
void *opaque
)
1271 if (strcmp(value
, "1"))
1278 dpaa2_get_devargs(struct rte_devargs
*devargs
, const char *key
)
1280 struct rte_kvargs
*kvlist
;
1285 kvlist
= rte_kvargs_parse(devargs
->args
, NULL
);
1289 if (!rte_kvargs_count(kvlist
, key
)) {
1290 rte_kvargs_free(kvlist
);
1294 if (rte_kvargs_process(kvlist
, key
,
1295 check_devargs_handler
, NULL
) < 0) {
1296 rte_kvargs_free(kvlist
);
1299 rte_kvargs_free(kvlist
);
1305 dpaa2_dpdmai_dev_init(struct rte_rawdev
*rawdev
, int dpdmai_id
)
1307 struct dpaa2_dpdmai_dev
*dpdmai_dev
= rawdev
->dev_private
;
1308 struct dpdmai_rx_queue_cfg rx_queue_cfg
;
1309 struct dpdmai_attr attr
;
1310 struct dpdmai_rx_queue_attr rx_attr
;
1311 struct dpdmai_tx_queue_attr tx_attr
;
1314 DPAA2_QDMA_FUNC_TRACE();
1316 /* Open DPDMAI device */
1317 dpdmai_dev
->dpdmai_id
= dpdmai_id
;
1318 dpdmai_dev
->dpdmai
.regs
= dpaa2_get_mcp_ptr(MC_PORTAL_INDEX
);
1319 ret
= dpdmai_open(&dpdmai_dev
->dpdmai
, CMD_PRI_LOW
,
1320 dpdmai_dev
->dpdmai_id
, &dpdmai_dev
->token
);
1322 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret
);
1326 /* Get DPDMAI attributes */
1327 ret
= dpdmai_get_attributes(&dpdmai_dev
->dpdmai
, CMD_PRI_LOW
,
1328 dpdmai_dev
->token
, &attr
);
1330 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1334 dpdmai_dev
->num_queues
= attr
.num_of_queues
;
1336 /* Set up Rx Queues */
1337 for (i
= 0; i
< dpdmai_dev
->num_queues
; i
++) {
1338 struct dpaa2_queue
*rxq
;
1340 memset(&rx_queue_cfg
, 0, sizeof(struct dpdmai_rx_queue_cfg
));
1341 ret
= dpdmai_set_rx_queue(&dpdmai_dev
->dpdmai
,
1344 i
, 0, &rx_queue_cfg
);
1346 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1351 /* Allocate DQ storage for the DPDMAI Rx queues */
1352 rxq
= &(dpdmai_dev
->rx_queue
[i
]);
1353 rxq
->q_storage
= rte_malloc("dq_storage",
1354 sizeof(struct queue_storage_info_t
),
1355 RTE_CACHE_LINE_SIZE
);
1356 if (!rxq
->q_storage
) {
1357 DPAA2_QDMA_ERR("q_storage allocation failed");
1362 memset(rxq
->q_storage
, 0, sizeof(struct queue_storage_info_t
));
1363 ret
= dpaa2_alloc_dq_storage(rxq
->q_storage
);
1365 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1370 /* Get Rx and Tx queues FQID's */
1371 for (i
= 0; i
< dpdmai_dev
->num_queues
; i
++) {
1372 ret
= dpdmai_get_rx_queue(&dpdmai_dev
->dpdmai
, CMD_PRI_LOW
,
1373 dpdmai_dev
->token
, i
, 0, &rx_attr
);
1375 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1379 dpdmai_dev
->rx_queue
[i
].fqid
= rx_attr
.fqid
;
1381 ret
= dpdmai_get_tx_queue(&dpdmai_dev
->dpdmai
, CMD_PRI_LOW
,
1382 dpdmai_dev
->token
, i
, 0, &tx_attr
);
1384 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1388 dpdmai_dev
->tx_queue
[i
].fqid
= tx_attr
.fqid
;
1391 /* Enable the device */
1392 ret
= dpdmai_enable(&dpdmai_dev
->dpdmai
, CMD_PRI_LOW
,
1395 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret
);
1399 /* Add the HW queue to the global list */
1400 ret
= add_hw_queues_to_list(dpdmai_dev
);
1402 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1406 if (dpaa2_get_devargs(rawdev
->device
->devargs
,
1407 DPAA2_QDMA_NO_PREFETCH
)) {
1408 /* If no prefetch is configured. */
1409 dpdmai_dev_dequeue_multijob
=
1410 dpdmai_dev_dequeue_multijob_no_prefetch
;
1411 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1413 dpdmai_dev_dequeue_multijob
=
1414 dpdmai_dev_dequeue_multijob_prefetch
;
1417 if (!dpaa2_coherent_no_alloc_cache
) {
1418 if (dpaa2_svr_family
== SVR_LX2160A
) {
1419 dpaa2_coherent_no_alloc_cache
=
1420 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE
;
1421 dpaa2_coherent_alloc_cache
=
1422 DPAA2_LX2_COHERENT_ALLOCATE_CACHE
;
1424 dpaa2_coherent_no_alloc_cache
=
1425 DPAA2_COHERENT_NO_ALLOCATE_CACHE
;
1426 dpaa2_coherent_alloc_cache
=
1427 DPAA2_COHERENT_ALLOCATE_CACHE
;
1431 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1435 dpaa2_dpdmai_dev_uninit(rawdev
);
1440 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver
*dpaa2_drv
,
1441 struct rte_dpaa2_device
*dpaa2_dev
)
1443 struct rte_rawdev
*rawdev
;
1446 DPAA2_QDMA_FUNC_TRACE();
1448 rawdev
= rte_rawdev_pmd_allocate(dpaa2_dev
->device
.name
,
1449 sizeof(struct dpaa2_dpdmai_dev
),
1452 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1456 dpaa2_dev
->rawdev
= rawdev
;
1457 rawdev
->dev_ops
= &dpaa2_qdma_ops
;
1458 rawdev
->device
= &dpaa2_dev
->device
;
1459 rawdev
->driver_name
= dpaa2_drv
->driver
.name
;
1461 /* Invoke PMD device initialization function */
1462 ret
= dpaa2_dpdmai_dev_init(rawdev
, dpaa2_dev
->object_id
);
1464 rte_rawdev_pmd_release(rawdev
);
1472 rte_dpaa2_qdma_remove(struct rte_dpaa2_device
*dpaa2_dev
)
1474 struct rte_rawdev
*rawdev
= dpaa2_dev
->rawdev
;
1477 DPAA2_QDMA_FUNC_TRACE();
1479 dpaa2_dpdmai_dev_uninit(rawdev
);
1481 ret
= rte_rawdev_pmd_release(rawdev
);
1483 DPAA2_QDMA_ERR("Device cleanup failed");
1488 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd
= {
1489 .drv_flags
= RTE_DPAA2_DRV_IOVA_AS_VA
,
1490 .drv_type
= DPAA2_QDMA
,
1491 .probe
= rte_dpaa2_qdma_probe
,
1492 .remove
= rte_dpaa2_qdma_remove
,
1495 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma
, rte_dpaa2_qdma_pmd
);
1496 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma
,
1497 "no_prefetch=<int> ");
1499 RTE_INIT(dpaa2_qdma_init_log
)
1501 dpaa2_qdma_logtype
= rte_log_register("pmd.raw.dpaa2.qdma");
1502 if (dpaa2_qdma_logtype
>= 0)
1503 rte_log_set_level(dpaa2_qdma_logtype
, RTE_LOG_INFO
);