1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2019 NXP
17 #include <sys/types.h>
18 #include <sys/queue.h>
19 #include <sys/ioctl.h>
22 #include <sys/syscall.h>
23 #include <sys/epoll.h>
24 #include<sys/eventfd.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_malloc.h>
29 #include <rte_memcpy.h>
30 #include <rte_string_fns.h>
31 #include <rte_cycles.h>
32 #include <rte_kvargs.h>
35 #include <fslmc_logs.h>
36 #include <rte_fslmc.h>
37 #include "dpaa2_hw_pvt.h"
38 #include "dpaa2_hw_dpio.h"
39 #include <mc/fsl_dpmng.h>
41 #define NUM_HOST_CPUS RTE_MAX_LCORE
43 struct dpaa2_io_portal_t dpaa2_io_portal
[RTE_MAX_LCORE
];
44 RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t
, _dpaa2_io
);
46 struct swp_active_dqs rte_global_active_dqs_list
[NUM_MAX_SWP
];
48 TAILQ_HEAD(dpio_dev_list
, dpaa2_dpio_dev
);
49 static struct dpio_dev_list dpio_dev_list
50 = TAILQ_HEAD_INITIALIZER(dpio_dev_list
); /*!< DPIO device list */
51 static uint32_t io_space_count
;
53 /* Variable to store DPAA2 platform type */
54 uint32_t dpaa2_svr_family
;
56 /* Physical core id for lcores running on dpaa2. */
57 /* DPAA2 only support 1 lcore to 1 phy cpu mapping */
58 static unsigned int dpaa2_cpu
[RTE_MAX_LCORE
];
60 /* Variable to store DPAA2 DQRR size */
61 uint8_t dpaa2_dqrr_size
;
62 /* Variable to store DPAA2 EQCR size */
63 uint8_t dpaa2_eqcr_size
;
65 /*Stashing Macros default for LS208x*/
66 static int dpaa2_core_cluster_base
= 0x04;
67 static int dpaa2_cluster_sz
= 2;
69 /* For LS208X platform There are four clusters with following mapping:
70 * Cluster 1 (ID = x04) : CPU0, CPU1;
71 * Cluster 2 (ID = x05) : CPU2, CPU3;
72 * Cluster 3 (ID = x06) : CPU4, CPU5;
73 * Cluster 4 (ID = x07) : CPU6, CPU7;
75 /* For LS108X platform There are two clusters with following mapping:
76 * Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
77 * Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
79 /* For LX2160 platform There are four clusters with following mapping:
80 * Cluster 1 (ID = x00) : CPU0, CPU1;
81 * Cluster 2 (ID = x01) : CPU2, CPU3;
82 * Cluster 3 (ID = x02) : CPU4, CPU5;
83 * Cluster 4 (ID = x03) : CPU6, CPU7;
84 * Cluster 1 (ID = x04) : CPU8, CPU9;
85 * Cluster 2 (ID = x05) : CPU10, CP11;
86 * Cluster 3 (ID = x06) : CPU12, CPU13;
87 * Cluster 4 (ID = x07) : CPU14, CPU15;
91 dpaa2_core_cluster_sdest(int cpu_id
)
93 int x
= cpu_id
/ dpaa2_cluster_sz
;
95 return dpaa2_core_cluster_base
+ x
;
98 #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
100 dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id
, int lcoreid
)
102 #define STRING_LEN 28
103 #define COMMAND_LEN 50
104 uint32_t cpu_mask
= 1;
107 char *temp
= NULL
, *token
= NULL
;
108 char string
[STRING_LEN
], command
[COMMAND_LEN
];
111 snprintf(string
, STRING_LEN
, "dpio.%d", dpio_id
);
112 file
= fopen("/proc/interrupts", "r");
114 DPAA2_BUS_WARN("Failed to open /proc/interrupts file");
117 while (getline(&temp
, &len
, file
) != -1) {
118 if ((strstr(temp
, string
)) != NULL
) {
119 token
= strtok(temp
, ":");
125 DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d",
133 cpu_mask
= cpu_mask
<< dpaa2_cpu
[lcoreid
];
134 snprintf(command
, COMMAND_LEN
, "echo %X > /proc/irq/%s/smp_affinity",
136 ret
= system(command
);
139 "Failed to affine interrupts on respective core");
141 DPAA2_BUS_DEBUG(" %s command is executed", command
);
147 static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev
*dpio_dev
, int lcoreid
)
149 struct epoll_event epoll_ev
;
150 int eventfd
, dpio_epoll_fd
, ret
;
151 int threshold
= 0x3, timeout
= 0xFF;
153 dpio_epoll_fd
= epoll_create(1);
154 ret
= rte_dpaa2_intr_enable(&dpio_dev
->intr_handle
, 0);
156 DPAA2_BUS_ERR("Interrupt registeration failed");
160 if (getenv("DPAA2_PORTAL_INTR_THRESHOLD"))
161 threshold
= atoi(getenv("DPAA2_PORTAL_INTR_THRESHOLD"));
163 if (getenv("DPAA2_PORTAL_INTR_TIMEOUT"))
164 sscanf(getenv("DPAA2_PORTAL_INTR_TIMEOUT"), "%x", &timeout
);
166 qbman_swp_interrupt_set_trigger(dpio_dev
->sw_portal
,
167 QBMAN_SWP_INTERRUPT_DQRI
);
168 qbman_swp_interrupt_clear_status(dpio_dev
->sw_portal
, 0xffffffff);
169 qbman_swp_interrupt_set_inhibit(dpio_dev
->sw_portal
, 0);
170 qbman_swp_dqrr_thrshld_write(dpio_dev
->sw_portal
, threshold
);
171 qbman_swp_intr_timeout_write(dpio_dev
->sw_portal
, timeout
);
173 eventfd
= dpio_dev
->intr_handle
.fd
;
174 epoll_ev
.events
= EPOLLIN
| EPOLLPRI
| EPOLLET
;
175 epoll_ev
.data
.fd
= eventfd
;
177 ret
= epoll_ctl(dpio_epoll_fd
, EPOLL_CTL_ADD
, eventfd
, &epoll_ev
);
179 DPAA2_BUS_ERR("epoll_ctl failed");
182 dpio_dev
->epoll_fd
= dpio_epoll_fd
;
184 dpaa2_affine_dpio_intr_to_respective_core(dpio_dev
->hw_id
, lcoreid
);
191 dpaa2_configure_stashing(struct dpaa2_dpio_dev
*dpio_dev
, int lcoreid
)
196 /* Set the Stashing Destination */
198 lcoreid
= rte_get_master_lcore();
200 DPAA2_BUS_ERR("Getting CPU Index failed");
205 cpu_id
= dpaa2_cpu
[lcoreid
];
207 /* Set the STASH Destination depending on Current CPU ID.
208 * Valid values of SDEST are 4,5,6,7. Where,
211 sdest
= dpaa2_core_cluster_sdest(cpu_id
);
212 DPAA2_BUS_DEBUG("Portal= %d CPU= %u lcore id =%u SDEST= %d",
213 dpio_dev
->index
, cpu_id
, lcoreid
, sdest
);
215 ret
= dpio_set_stashing_destination(dpio_dev
->dpio
, CMD_PRI_LOW
,
216 dpio_dev
->token
, sdest
);
218 DPAA2_BUS_ERR("%d ERROR in SDEST", ret
);
222 #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
223 if (dpaa2_dpio_intr_init(dpio_dev
, lcoreid
)) {
224 DPAA2_BUS_ERR("Interrupt registration failed for dpio");
232 static struct dpaa2_dpio_dev
*dpaa2_get_qbman_swp(int lcoreid
)
234 struct dpaa2_dpio_dev
*dpio_dev
= NULL
;
237 /* Get DPIO dev handle from list using index */
238 TAILQ_FOREACH(dpio_dev
, &dpio_dev_list
, next
) {
239 if (dpio_dev
&& rte_atomic16_test_and_set(&dpio_dev
->ref_count
))
245 DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu",
246 dpio_dev
, dpio_dev
->index
, syscall(SYS_gettid
));
248 ret
= dpaa2_configure_stashing(dpio_dev
, lcoreid
);
250 DPAA2_BUS_ERR("dpaa2_configure_stashing failed");
256 dpaa2_affine_qbman_swp(void)
258 unsigned int lcore_id
= rte_lcore_id();
259 uint64_t tid
= syscall(SYS_gettid
);
261 if (lcore_id
== LCORE_ID_ANY
)
262 lcore_id
= rte_get_master_lcore();
263 /* if the core id is not supported */
264 else if (lcore_id
>= RTE_MAX_LCORE
)
267 if (dpaa2_io_portal
[lcore_id
].dpio_dev
) {
268 DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared"
269 " between thread %" PRIu64
" and current "
271 dpaa2_io_portal
[lcore_id
].dpio_dev
,
272 dpaa2_io_portal
[lcore_id
].dpio_dev
->index
,
273 dpaa2_io_portal
[lcore_id
].net_tid
,
275 RTE_PER_LCORE(_dpaa2_io
).dpio_dev
276 = dpaa2_io_portal
[lcore_id
].dpio_dev
;
277 rte_atomic16_inc(&dpaa2_io_portal
278 [lcore_id
].dpio_dev
->ref_count
);
279 dpaa2_io_portal
[lcore_id
].net_tid
= tid
;
281 DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - "
283 dpaa2_io_portal
[lcore_id
].dpio_dev
,
284 dpaa2_io_portal
[lcore_id
].dpio_dev
->index
,
289 /* Populate the dpaa2_io_portal structure */
290 dpaa2_io_portal
[lcore_id
].dpio_dev
= dpaa2_get_qbman_swp(lcore_id
);
292 if (dpaa2_io_portal
[lcore_id
].dpio_dev
) {
293 RTE_PER_LCORE(_dpaa2_io
).dpio_dev
294 = dpaa2_io_portal
[lcore_id
].dpio_dev
;
295 dpaa2_io_portal
[lcore_id
].net_tid
= tid
;
304 dpaa2_affine_qbman_ethrx_swp(void)
306 unsigned int lcore_id
= rte_lcore_id();
307 uint64_t tid
= syscall(SYS_gettid
);
309 if (lcore_id
== LCORE_ID_ANY
)
310 lcore_id
= rte_get_master_lcore();
311 /* if the core id is not supported */
312 else if (lcore_id
>= RTE_MAX_LCORE
)
315 if (dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
) {
317 "DPAA Portal=%p (%d) is being shared between thread"
318 " %" PRIu64
" and current %" PRIu64
"\n",
319 dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
,
320 dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
->index
,
321 dpaa2_io_portal
[lcore_id
].sec_tid
,
323 RTE_PER_LCORE(_dpaa2_io
).ethrx_dpio_dev
324 = dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
;
325 rte_atomic16_inc(&dpaa2_io_portal
326 [lcore_id
].ethrx_dpio_dev
->ref_count
);
327 dpaa2_io_portal
[lcore_id
].sec_tid
= tid
;
330 "Old Portal=%p (%d) affined thread"
332 dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
,
333 dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
->index
,
338 /* Populate the dpaa2_io_portal structure */
339 dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
=
340 dpaa2_get_qbman_swp(lcore_id
);
342 if (dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
) {
343 RTE_PER_LCORE(_dpaa2_io
).ethrx_dpio_dev
344 = dpaa2_io_portal
[lcore_id
].ethrx_dpio_dev
;
345 dpaa2_io_portal
[lcore_id
].sec_tid
= tid
;
353 * This checks for not supported lcore mappings as well as get the physical
354 * cpuid for the lcore.
355 * one lcore can only map to 1 cpu i.e. 1@10-14 not supported.
356 * one cpu can be mapped to more than one lcores.
359 dpaa2_check_lcore_cpuset(void)
361 unsigned int lcore_id
, i
;
364 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++)
365 dpaa2_cpu
[lcore_id
] = 0xffffffff;
367 for (lcore_id
= 0; lcore_id
< RTE_MAX_LCORE
; lcore_id
++) {
368 rte_cpuset_t cpuset
= rte_lcore_cpuset(lcore_id
);
370 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
371 if (!CPU_ISSET(i
, &cpuset
))
373 if (i
>= RTE_MAX_LCORE
) {
374 DPAA2_BUS_ERR("ERR:lcore map to core %u (>= %u) not supported",
379 RTE_LOG(DEBUG
, EAL
, "lcore id = %u cpu=%u\n",
381 if (dpaa2_cpu
[lcore_id
] != 0xffffffff) {
382 DPAA2_BUS_ERR("ERR:lcore map to multi-cpu not supported");
386 dpaa2_cpu
[lcore_id
] = i
;
393 dpaa2_create_dpio_device(int vdev_fd
,
394 struct vfio_device_info
*obj_info
,
397 struct dpaa2_dpio_dev
*dpio_dev
= NULL
;
398 struct vfio_region_info reg_info
= { .argsz
= sizeof(reg_info
)};
399 struct qbman_swp_desc p_des
;
400 struct dpio_attr attr
;
401 static int check_lcore_cpuset
;
403 if (obj_info
->num_regions
< NUM_DPIO_REGIONS
) {
404 DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
408 dpio_dev
= rte_zmalloc(NULL
, sizeof(struct dpaa2_dpio_dev
),
409 RTE_CACHE_LINE_SIZE
);
411 DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
415 dpio_dev
->dpio
= NULL
;
416 dpio_dev
->hw_id
= object_id
;
417 rte_atomic16_init(&dpio_dev
->ref_count
);
418 /* Using single portal for all devices */
419 dpio_dev
->mc_portal
= dpaa2_get_mcp_ptr(MC_PORTAL_INDEX
);
421 if (!check_lcore_cpuset
) {
422 check_lcore_cpuset
= 1;
424 if (dpaa2_check_lcore_cpuset() < 0)
428 dpio_dev
->dpio
= rte_zmalloc(NULL
, sizeof(struct fsl_mc_io
),
429 RTE_CACHE_LINE_SIZE
);
430 if (!dpio_dev
->dpio
) {
431 DPAA2_BUS_ERR("Memory allocation failure");
435 dpio_dev
->dpio
->regs
= dpio_dev
->mc_portal
;
436 if (dpio_open(dpio_dev
->dpio
, CMD_PRI_LOW
, dpio_dev
->hw_id
,
438 DPAA2_BUS_ERR("Failed to allocate IO space");
442 if (dpio_reset(dpio_dev
->dpio
, CMD_PRI_LOW
, dpio_dev
->token
)) {
443 DPAA2_BUS_ERR("Failed to reset dpio");
447 if (dpio_enable(dpio_dev
->dpio
, CMD_PRI_LOW
, dpio_dev
->token
)) {
448 DPAA2_BUS_ERR("Failed to Enable dpio");
452 if (dpio_get_attributes(dpio_dev
->dpio
, CMD_PRI_LOW
,
453 dpio_dev
->token
, &attr
)) {
454 DPAA2_BUS_ERR("DPIO Get attribute failed");
458 /* find the SoC type for the first time */
459 if (!dpaa2_svr_family
) {
460 struct mc_soc_version mc_plat_info
= {0};
462 if (mc_get_soc_version(dpio_dev
->dpio
,
463 CMD_PRI_LOW
, &mc_plat_info
)) {
464 DPAA2_BUS_ERR("Unable to get SoC version information");
465 } else if ((mc_plat_info
.svr
& 0xffff0000) == SVR_LS1080A
) {
466 dpaa2_core_cluster_base
= 0x02;
467 dpaa2_cluster_sz
= 4;
468 DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected");
469 } else if ((mc_plat_info
.svr
& 0xffff0000) == SVR_LX2160A
) {
470 dpaa2_core_cluster_base
= 0x00;
471 dpaa2_cluster_sz
= 2;
472 DPAA2_BUS_DEBUG("LX2160 Platform Detected");
474 dpaa2_svr_family
= (mc_plat_info
.svr
& 0xffff0000);
476 if (dpaa2_svr_family
== SVR_LX2160A
) {
477 dpaa2_dqrr_size
= DPAA2_LX2_DQRR_RING_SIZE
;
478 dpaa2_eqcr_size
= DPAA2_LX2_EQCR_RING_SIZE
;
480 dpaa2_dqrr_size
= DPAA2_DQRR_RING_SIZE
;
481 dpaa2_eqcr_size
= DPAA2_EQCR_RING_SIZE
;
485 if (dpaa2_svr_family
== SVR_LX2160A
)
486 reg_info
.index
= DPAA2_SWP_CENA_MEM_REGION
;
488 reg_info
.index
= DPAA2_SWP_CENA_REGION
;
490 if (ioctl(vdev_fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
)) {
491 DPAA2_BUS_ERR("vfio: error getting region info");
495 dpio_dev
->ce_size
= reg_info
.size
;
496 dpio_dev
->qbman_portal_ce_paddr
= (size_t)mmap(NULL
, reg_info
.size
,
497 PROT_WRITE
| PROT_READ
, MAP_SHARED
,
498 vdev_fd
, reg_info
.offset
);
500 reg_info
.index
= DPAA2_SWP_CINH_REGION
;
501 if (ioctl(vdev_fd
, VFIO_DEVICE_GET_REGION_INFO
, ®_info
)) {
502 DPAA2_BUS_ERR("vfio: error getting region info");
506 dpio_dev
->ci_size
= reg_info
.size
;
507 dpio_dev
->qbman_portal_ci_paddr
= (size_t)mmap(NULL
, reg_info
.size
,
508 PROT_WRITE
| PROT_READ
, MAP_SHARED
,
509 vdev_fd
, reg_info
.offset
);
511 /* Configure & setup SW portal */
513 p_des
.idx
= attr
.qbman_portal_id
;
514 p_des
.cena_bar
= (void *)(dpio_dev
->qbman_portal_ce_paddr
);
515 p_des
.cinh_bar
= (void *)(dpio_dev
->qbman_portal_ci_paddr
);
517 p_des
.qman_version
= attr
.qbman_version
;
518 p_des
.eqcr_mode
= qman_eqcr_vb_ring
;
519 p_des
.cena_access_mode
= qman_cena_fastest_access
;
521 dpio_dev
->sw_portal
= qbman_swp_init(&p_des
);
522 if (dpio_dev
->sw_portal
== NULL
) {
523 DPAA2_BUS_ERR("QBMan SW Portal Init failed");
528 dpio_dev
->index
= io_space_count
;
530 if (rte_dpaa2_vfio_setup_intr(&dpio_dev
->intr_handle
, vdev_fd
, 1)) {
531 DPAA2_BUS_ERR("Fail to setup interrupt for %d",
536 dpio_dev
->eqresp
= rte_zmalloc(NULL
, MAX_EQ_RESP_ENTRIES
*
537 (sizeof(struct qbman_result
) +
538 sizeof(struct eqresp_metadata
)),
539 RTE_CACHE_LINE_SIZE
);
540 if (!dpio_dev
->eqresp
) {
541 DPAA2_BUS_ERR("Memory allocation failed for eqresp");
544 dpio_dev
->eqresp_meta
= (struct eqresp_metadata
*)(dpio_dev
->eqresp
+
545 MAX_EQ_RESP_ENTRIES
);
548 TAILQ_INSERT_TAIL(&dpio_dev_list
, dpio_dev
, next
);
553 if (dpio_dev
->dpio
) {
554 dpio_disable(dpio_dev
->dpio
, CMD_PRI_LOW
, dpio_dev
->token
);
555 dpio_close(dpio_dev
->dpio
, CMD_PRI_LOW
, dpio_dev
->token
);
556 rte_free(dpio_dev
->dpio
);
561 /* For each element in the list, cleanup */
562 TAILQ_FOREACH(dpio_dev
, &dpio_dev_list
, next
) {
563 if (dpio_dev
->dpio
) {
564 dpio_disable(dpio_dev
->dpio
, CMD_PRI_LOW
,
566 dpio_close(dpio_dev
->dpio
, CMD_PRI_LOW
,
568 rte_free(dpio_dev
->dpio
);
573 /* Preventing re-use of the list with old entries */
574 TAILQ_INIT(&dpio_dev_list
);
580 dpaa2_free_dq_storage(struct queue_storage_info_t
*q_storage
)
584 for (i
= 0; i
< NUM_DQS_PER_QUEUE
; i
++) {
585 if (q_storage
->dq_storage
[i
])
586 rte_free(q_storage
->dq_storage
[i
]);
591 dpaa2_alloc_dq_storage(struct queue_storage_info_t
*q_storage
)
595 for (i
= 0; i
< NUM_DQS_PER_QUEUE
; i
++) {
596 q_storage
->dq_storage
[i
] = rte_malloc(NULL
,
597 dpaa2_dqrr_size
* sizeof(struct qbman_result
),
598 RTE_CACHE_LINE_SIZE
);
599 if (!q_storage
->dq_storage
[i
])
605 rte_free(q_storage
->dq_storage
[i
]);
611 dpaa2_free_eq_descriptors(void)
613 struct dpaa2_dpio_dev
*dpio_dev
= DPAA2_PER_LCORE_DPIO
;
614 struct qbman_result
*eqresp
;
615 struct eqresp_metadata
*eqresp_meta
;
616 struct dpaa2_queue
*txq
;
618 while (dpio_dev
->eqresp_ci
!= dpio_dev
->eqresp_pi
) {
619 eqresp
= &dpio_dev
->eqresp
[dpio_dev
->eqresp_ci
];
620 eqresp_meta
= &dpio_dev
->eqresp_meta
[dpio_dev
->eqresp_ci
];
622 if (!qbman_result_eqresp_rspid(eqresp
))
625 if (qbman_result_eqresp_rc(eqresp
)) {
626 txq
= eqresp_meta
->dpaa2_q
;
627 txq
->cb_eqresp_free(dpio_dev
->eqresp_ci
);
629 qbman_result_eqresp_set_rspid(eqresp
, 0);
631 dpio_dev
->eqresp_ci
+ 1 < MAX_EQ_RESP_ENTRIES
?
632 dpio_dev
->eqresp_ci
++ : (dpio_dev
->eqresp_ci
= 0);
635 /* Return 1 less entry so that PI and CI are never same in a
636 * case there all the EQ responses are in use.
638 if (dpio_dev
->eqresp_ci
> dpio_dev
->eqresp_pi
)
639 return dpio_dev
->eqresp_ci
- dpio_dev
->eqresp_pi
- 1;
641 return dpio_dev
->eqresp_ci
- dpio_dev
->eqresp_pi
+
642 MAX_EQ_RESP_ENTRIES
- 1;
645 static struct rte_dpaa2_object rte_dpaa2_dpio_obj
= {
646 .dev_type
= DPAA2_IO
,
647 .create
= dpaa2_create_dpio_device
,
650 RTE_PMD_REGISTER_DPAA2_OBJECT(dpio
, rte_dpaa2_dpio_obj
);