]>
Commit | Line | Data |
---|---|---|
9f95a23c | 1 | /* SPDX-License-Identifier: BSD-3-Clause |
11fdf7f2 TL |
2 | * |
3 | * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. | |
9f95a23c | 4 | * Copyright 2016-2018 NXP |
11fdf7f2 | 5 | * |
11fdf7f2 TL |
6 | */ |
7 | #include <unistd.h> | |
8 | #include <stdio.h> | |
9 | #include <string.h> | |
10 | #include <stdlib.h> | |
11 | #include <fcntl.h> | |
12 | #include <errno.h> | |
13 | #include <stdarg.h> | |
14 | #include <inttypes.h> | |
15 | #include <signal.h> | |
16 | #include <pthread.h> | |
17 | #include <sys/types.h> | |
18 | #include <sys/queue.h> | |
19 | #include <sys/ioctl.h> | |
20 | #include <sys/stat.h> | |
21 | #include <sys/mman.h> | |
22 | #include <sys/syscall.h> | |
9f95a23c TL |
23 | #include <sys/epoll.h> |
24 | #include<sys/eventfd.h> | |
11fdf7f2 TL |
25 | |
26 | #include <rte_mbuf.h> | |
9f95a23c | 27 | #include <rte_ethdev_driver.h> |
11fdf7f2 TL |
28 | #include <rte_malloc.h> |
29 | #include <rte_memcpy.h> | |
30 | #include <rte_string_fns.h> | |
31 | #include <rte_cycles.h> | |
32 | #include <rte_kvargs.h> | |
33 | #include <rte_dev.h> | |
11fdf7f2 TL |
34 | |
35 | #include <fslmc_logs.h> | |
9f95a23c | 36 | #include <rte_fslmc.h> |
11fdf7f2 TL |
37 | #include "dpaa2_hw_pvt.h" |
38 | #include "dpaa2_hw_dpio.h" | |
9f95a23c | 39 | #include <mc/fsl_dpmng.h> |
11fdf7f2 TL |
40 | |
41 | #define NUM_HOST_CPUS RTE_MAX_LCORE | |
42 | ||
43 | struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE]; | |
44 | RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io); | |
45 | ||
9f95a23c TL |
46 | struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP]; |
47 | ||
48 | TAILQ_HEAD(dpio_dev_list, dpaa2_dpio_dev); | |
49 | static struct dpio_dev_list dpio_dev_list | |
50 | = TAILQ_HEAD_INITIALIZER(dpio_dev_list); /*!< DPIO device list */ | |
11fdf7f2 TL |
51 | static uint32_t io_space_count; |
52 | ||
9f95a23c TL |
53 | /* Variable to store DPAA2 platform type */ |
54 | uint32_t dpaa2_svr_family; | |
55 | ||
56 | /* Physical core id for lcores running on dpaa2. */ | |
57 | /* DPAA2 only support 1 lcore to 1 phy cpu mapping */ | |
58 | static unsigned int dpaa2_cpu[RTE_MAX_LCORE]; | |
59 | ||
60 | /* Variable to store DPAA2 DQRR size */ | |
61 | uint8_t dpaa2_dqrr_size; | |
62 | /* Variable to store DPAA2 EQCR size */ | |
63 | uint8_t dpaa2_eqcr_size; | |
64 | ||
11fdf7f2 TL |
65 | /*Stashing Macros default for LS208x*/ |
66 | static int dpaa2_core_cluster_base = 0x04; | |
67 | static int dpaa2_cluster_sz = 2; | |
68 | ||
69 | /* For LS208X platform There are four clusters with following mapping: | |
70 | * Cluster 1 (ID = x04) : CPU0, CPU1; | |
71 | * Cluster 2 (ID = x05) : CPU2, CPU3; | |
72 | * Cluster 3 (ID = x06) : CPU4, CPU5; | |
73 | * Cluster 4 (ID = x07) : CPU6, CPU7; | |
74 | */ | |
75 | /* For LS108X platform There are two clusters with following mapping: | |
76 | * Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3; | |
77 | * Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7; | |
78 | */ | |
9f95a23c TL |
79 | /* For LX2160 platform There are four clusters with following mapping: |
80 | * Cluster 1 (ID = x00) : CPU0, CPU1; | |
81 | * Cluster 2 (ID = x01) : CPU2, CPU3; | |
82 | * Cluster 3 (ID = x02) : CPU4, CPU5; | |
83 | * Cluster 4 (ID = x03) : CPU6, CPU7; | |
84 | * Cluster 1 (ID = x04) : CPU8, CPU9; | |
85 | * Cluster 2 (ID = x05) : CPU10, CP11; | |
86 | * Cluster 3 (ID = x06) : CPU12, CPU13; | |
87 | * Cluster 4 (ID = x07) : CPU14, CPU15; | |
11fdf7f2 | 88 | */ |
9f95a23c | 89 | |
11fdf7f2 TL |
90 | static int |
91 | dpaa2_core_cluster_sdest(int cpu_id) | |
92 | { | |
93 | int x = cpu_id / dpaa2_cluster_sz; | |
94 | ||
11fdf7f2 TL |
95 | return dpaa2_core_cluster_base + x; |
96 | } | |
97 | ||
9f95a23c TL |
98 | #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV |
99 | static void | |
100 | dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id, int lcoreid) | |
11fdf7f2 | 101 | { |
9f95a23c TL |
102 | #define STRING_LEN 28 |
103 | #define COMMAND_LEN 50 | |
104 | uint32_t cpu_mask = 1; | |
105 | int ret; | |
106 | size_t len = 0; | |
107 | char *temp = NULL, *token = NULL; | |
108 | char string[STRING_LEN], command[COMMAND_LEN]; | |
109 | FILE *file; | |
110 | ||
111 | snprintf(string, STRING_LEN, "dpio.%d", dpio_id); | |
112 | file = fopen("/proc/interrupts", "r"); | |
113 | if (!file) { | |
114 | DPAA2_BUS_WARN("Failed to open /proc/interrupts file"); | |
115 | return; | |
11fdf7f2 | 116 | } |
9f95a23c TL |
117 | while (getline(&temp, &len, file) != -1) { |
118 | if ((strstr(temp, string)) != NULL) { | |
119 | token = strtok(temp, ":"); | |
120 | break; | |
121 | } | |
11fdf7f2 TL |
122 | } |
123 | ||
9f95a23c TL |
124 | if (!token) { |
125 | DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d", | |
126 | dpio_id); | |
127 | if (temp) | |
128 | free(temp); | |
129 | fclose(file); | |
130 | return; | |
11fdf7f2 TL |
131 | } |
132 | ||
9f95a23c TL |
133 | cpu_mask = cpu_mask << dpaa2_cpu[lcoreid]; |
134 | snprintf(command, COMMAND_LEN, "echo %X > /proc/irq/%s/smp_affinity", | |
135 | cpu_mask, token); | |
136 | ret = system(command); | |
137 | if (ret < 0) | |
138 | DPAA2_BUS_DEBUG( | |
139 | "Failed to affine interrupts on respective core"); | |
140 | else | |
141 | DPAA2_BUS_DEBUG(" %s command is executed", command); | |
142 | ||
143 | free(temp); | |
144 | fclose(file); | |
145 | } | |
11fdf7f2 | 146 | |
9f95a23c TL |
147 | static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev, int lcoreid) |
148 | { | |
149 | struct epoll_event epoll_ev; | |
150 | int eventfd, dpio_epoll_fd, ret; | |
151 | int threshold = 0x3, timeout = 0xFF; | |
152 | ||
153 | dpio_epoll_fd = epoll_create(1); | |
154 | ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0); | |
155 | if (ret) { | |
156 | DPAA2_BUS_ERR("Interrupt registeration failed"); | |
11fdf7f2 TL |
157 | return -1; |
158 | } | |
159 | ||
9f95a23c TL |
160 | if (getenv("DPAA2_PORTAL_INTR_THRESHOLD")) |
161 | threshold = atoi(getenv("DPAA2_PORTAL_INTR_THRESHOLD")); | |
11fdf7f2 | 162 | |
9f95a23c TL |
163 | if (getenv("DPAA2_PORTAL_INTR_TIMEOUT")) |
164 | sscanf(getenv("DPAA2_PORTAL_INTR_TIMEOUT"), "%x", &timeout); | |
11fdf7f2 | 165 | |
9f95a23c TL |
166 | qbman_swp_interrupt_set_trigger(dpio_dev->sw_portal, |
167 | QBMAN_SWP_INTERRUPT_DQRI); | |
168 | qbman_swp_interrupt_clear_status(dpio_dev->sw_portal, 0xffffffff); | |
169 | qbman_swp_interrupt_set_inhibit(dpio_dev->sw_portal, 0); | |
170 | qbman_swp_dqrr_thrshld_write(dpio_dev->sw_portal, threshold); | |
171 | qbman_swp_intr_timeout_write(dpio_dev->sw_portal, timeout); | |
172 | ||
173 | eventfd = dpio_dev->intr_handle.fd; | |
174 | epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET; | |
175 | epoll_ev.data.fd = eventfd; | |
176 | ||
177 | ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev); | |
178 | if (ret < 0) { | |
179 | DPAA2_BUS_ERR("epoll_ctl failed"); | |
11fdf7f2 TL |
180 | return -1; |
181 | } | |
9f95a23c | 182 | dpio_dev->epoll_fd = dpio_epoll_fd; |
11fdf7f2 | 183 | |
9f95a23c | 184 | dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id, lcoreid); |
11fdf7f2 TL |
185 | |
186 | return 0; | |
187 | } | |
9f95a23c | 188 | #endif |
11fdf7f2 TL |
189 | |
190 | static int | |
9f95a23c | 191 | dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int lcoreid) |
11fdf7f2 | 192 | { |
9f95a23c TL |
193 | int sdest, ret; |
194 | int cpu_id; | |
11fdf7f2 TL |
195 | |
196 | /* Set the Stashing Destination */ | |
9f95a23c TL |
197 | if (lcoreid < 0) { |
198 | lcoreid = rte_get_master_lcore(); | |
199 | if (lcoreid < 0) { | |
200 | DPAA2_BUS_ERR("Getting CPU Index failed"); | |
11fdf7f2 TL |
201 | return -1; |
202 | } | |
203 | } | |
9f95a23c TL |
204 | |
205 | cpu_id = dpaa2_cpu[lcoreid]; | |
206 | ||
11fdf7f2 TL |
207 | /* Set the STASH Destination depending on Current CPU ID. |
208 | * Valid values of SDEST are 4,5,6,7. Where, | |
11fdf7f2 TL |
209 | */ |
210 | ||
211 | sdest = dpaa2_core_cluster_sdest(cpu_id); | |
9f95a23c TL |
212 | DPAA2_BUS_DEBUG("Portal= %d CPU= %u lcore id =%u SDEST= %d", |
213 | dpio_dev->index, cpu_id, lcoreid, sdest); | |
11fdf7f2 TL |
214 | |
215 | ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW, | |
216 | dpio_dev->token, sdest); | |
217 | if (ret) { | |
9f95a23c | 218 | DPAA2_BUS_ERR("%d ERROR in SDEST", ret); |
11fdf7f2 TL |
219 | return -1; |
220 | } | |
221 | ||
9f95a23c TL |
222 | #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV |
223 | if (dpaa2_dpio_intr_init(dpio_dev, lcoreid)) { | |
224 | DPAA2_BUS_ERR("Interrupt registration failed for dpio"); | |
225 | return -1; | |
226 | } | |
227 | #endif | |
228 | ||
11fdf7f2 TL |
229 | return 0; |
230 | } | |
231 | ||
9f95a23c | 232 | static struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int lcoreid) |
11fdf7f2 TL |
233 | { |
234 | struct dpaa2_dpio_dev *dpio_dev = NULL; | |
235 | int ret; | |
236 | ||
237 | /* Get DPIO dev handle from list using index */ | |
9f95a23c | 238 | TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) { |
11fdf7f2 TL |
239 | if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count)) |
240 | break; | |
241 | } | |
242 | if (!dpio_dev) | |
243 | return NULL; | |
244 | ||
9f95a23c TL |
245 | DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu", |
246 | dpio_dev, dpio_dev->index, syscall(SYS_gettid)); | |
11fdf7f2 | 247 | |
9f95a23c | 248 | ret = dpaa2_configure_stashing(dpio_dev, lcoreid); |
11fdf7f2 | 249 | if (ret) |
9f95a23c | 250 | DPAA2_BUS_ERR("dpaa2_configure_stashing failed"); |
11fdf7f2 TL |
251 | |
252 | return dpio_dev; | |
253 | } | |
254 | ||
255 | int | |
256 | dpaa2_affine_qbman_swp(void) | |
257 | { | |
258 | unsigned int lcore_id = rte_lcore_id(); | |
259 | uint64_t tid = syscall(SYS_gettid); | |
260 | ||
261 | if (lcore_id == LCORE_ID_ANY) | |
262 | lcore_id = rte_get_master_lcore(); | |
263 | /* if the core id is not supported */ | |
264 | else if (lcore_id >= RTE_MAX_LCORE) | |
265 | return -1; | |
266 | ||
267 | if (dpaa2_io_portal[lcore_id].dpio_dev) { | |
9f95a23c TL |
268 | DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared" |
269 | " between thread %" PRIu64 " and current " | |
270 | "%" PRIu64 "\n", | |
11fdf7f2 TL |
271 | dpaa2_io_portal[lcore_id].dpio_dev, |
272 | dpaa2_io_portal[lcore_id].dpio_dev->index, | |
273 | dpaa2_io_portal[lcore_id].net_tid, | |
274 | tid); | |
275 | RTE_PER_LCORE(_dpaa2_io).dpio_dev | |
276 | = dpaa2_io_portal[lcore_id].dpio_dev; | |
277 | rte_atomic16_inc(&dpaa2_io_portal | |
278 | [lcore_id].dpio_dev->ref_count); | |
279 | dpaa2_io_portal[lcore_id].net_tid = tid; | |
280 | ||
9f95a23c TL |
281 | DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - " |
282 | "%" PRIu64 "\n", | |
11fdf7f2 TL |
283 | dpaa2_io_portal[lcore_id].dpio_dev, |
284 | dpaa2_io_portal[lcore_id].dpio_dev->index, | |
285 | tid); | |
286 | return 0; | |
287 | } | |
288 | ||
289 | /* Populate the dpaa2_io_portal structure */ | |
9f95a23c | 290 | dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp(lcore_id); |
11fdf7f2 TL |
291 | |
292 | if (dpaa2_io_portal[lcore_id].dpio_dev) { | |
293 | RTE_PER_LCORE(_dpaa2_io).dpio_dev | |
294 | = dpaa2_io_portal[lcore_id].dpio_dev; | |
295 | dpaa2_io_portal[lcore_id].net_tid = tid; | |
296 | ||
297 | return 0; | |
298 | } else { | |
299 | return -1; | |
300 | } | |
301 | } | |
302 | ||
303 | int | |
9f95a23c | 304 | dpaa2_affine_qbman_ethrx_swp(void) |
11fdf7f2 TL |
305 | { |
306 | unsigned int lcore_id = rte_lcore_id(); | |
307 | uint64_t tid = syscall(SYS_gettid); | |
308 | ||
309 | if (lcore_id == LCORE_ID_ANY) | |
310 | lcore_id = rte_get_master_lcore(); | |
311 | /* if the core id is not supported */ | |
312 | else if (lcore_id >= RTE_MAX_LCORE) | |
313 | return -1; | |
314 | ||
9f95a23c TL |
315 | if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { |
316 | DPAA2_BUS_DP_INFO( | |
317 | "DPAA Portal=%p (%d) is being shared between thread" | |
318 | " %" PRIu64 " and current %" PRIu64 "\n", | |
319 | dpaa2_io_portal[lcore_id].ethrx_dpio_dev, | |
320 | dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, | |
321 | dpaa2_io_portal[lcore_id].sec_tid, | |
322 | tid); | |
323 | RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev | |
324 | = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; | |
11fdf7f2 | 325 | rte_atomic16_inc(&dpaa2_io_portal |
9f95a23c | 326 | [lcore_id].ethrx_dpio_dev->ref_count); |
11fdf7f2 TL |
327 | dpaa2_io_portal[lcore_id].sec_tid = tid; |
328 | ||
9f95a23c TL |
329 | DPAA2_BUS_DP_DEBUG( |
330 | "Old Portal=%p (%d) affined thread" | |
331 | " - %" PRIu64 "\n", | |
332 | dpaa2_io_portal[lcore_id].ethrx_dpio_dev, | |
333 | dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, | |
334 | tid); | |
11fdf7f2 TL |
335 | return 0; |
336 | } | |
337 | ||
338 | /* Populate the dpaa2_io_portal structure */ | |
9f95a23c TL |
339 | dpaa2_io_portal[lcore_id].ethrx_dpio_dev = |
340 | dpaa2_get_qbman_swp(lcore_id); | |
11fdf7f2 | 341 | |
9f95a23c TL |
342 | if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { |
343 | RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev | |
344 | = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; | |
11fdf7f2 TL |
345 | dpaa2_io_portal[lcore_id].sec_tid = tid; |
346 | return 0; | |
347 | } else { | |
348 | return -1; | |
349 | } | |
350 | } | |
351 | ||
9f95a23c TL |
352 | /* |
353 | * This checks for not supported lcore mappings as well as get the physical | |
354 | * cpuid for the lcore. | |
355 | * one lcore can only map to 1 cpu i.e. 1@10-14 not supported. | |
356 | * one cpu can be mapped to more than one lcores. | |
357 | */ | |
358 | static int | |
359 | dpaa2_check_lcore_cpuset(void) | |
360 | { | |
361 | unsigned int lcore_id, i; | |
362 | int ret = 0; | |
363 | ||
364 | for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) | |
365 | dpaa2_cpu[lcore_id] = 0xffffffff; | |
366 | ||
367 | for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { | |
368 | for (i = 0; i < RTE_MAX_LCORE; i++) { | |
369 | if (CPU_ISSET(i, &lcore_config[lcore_id].cpuset)) { | |
370 | RTE_LOG(DEBUG, EAL, "lcore id = %u cpu=%u\n", | |
371 | lcore_id, i); | |
372 | if (dpaa2_cpu[lcore_id] != 0xffffffff) { | |
373 | DPAA2_BUS_ERR( | |
374 | "ERR:lcore map to multi-cpu not supported"); | |
375 | ret = -1; | |
376 | } else { | |
377 | dpaa2_cpu[lcore_id] = i; | |
378 | } | |
379 | } | |
380 | } | |
381 | } | |
382 | return ret; | |
383 | } | |
384 | ||
385 | static int | |
386 | dpaa2_create_dpio_device(int vdev_fd, | |
11fdf7f2 | 387 | struct vfio_device_info *obj_info, |
9f95a23c | 388 | int object_id) |
11fdf7f2 | 389 | { |
9f95a23c | 390 | struct dpaa2_dpio_dev *dpio_dev = NULL; |
11fdf7f2 | 391 | struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)}; |
9f95a23c TL |
392 | struct qbman_swp_desc p_des; |
393 | struct dpio_attr attr; | |
394 | static int check_lcore_cpuset; | |
11fdf7f2 TL |
395 | |
396 | if (obj_info->num_regions < NUM_DPIO_REGIONS) { | |
9f95a23c | 397 | DPAA2_BUS_ERR("Not sufficient number of DPIO regions"); |
11fdf7f2 TL |
398 | return -1; |
399 | } | |
400 | ||
9f95a23c TL |
401 | dpio_dev = rte_zmalloc(NULL, sizeof(struct dpaa2_dpio_dev), |
402 | RTE_CACHE_LINE_SIZE); | |
11fdf7f2 | 403 | if (!dpio_dev) { |
9f95a23c | 404 | DPAA2_BUS_ERR("Memory allocation failed for DPIO Device"); |
11fdf7f2 TL |
405 | return -1; |
406 | } | |
407 | ||
11fdf7f2 TL |
408 | dpio_dev->dpio = NULL; |
409 | dpio_dev->hw_id = object_id; | |
11fdf7f2 TL |
410 | rte_atomic16_init(&dpio_dev->ref_count); |
411 | /* Using single portal for all devices */ | |
412 | dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX]; | |
413 | ||
9f95a23c TL |
414 | if (!check_lcore_cpuset) { |
415 | check_lcore_cpuset = 1; | |
416 | ||
417 | if (dpaa2_check_lcore_cpuset() < 0) | |
418 | goto err; | |
11fdf7f2 TL |
419 | } |
420 | ||
9f95a23c TL |
421 | dpio_dev->dpio = rte_zmalloc(NULL, sizeof(struct fsl_mc_io), |
422 | RTE_CACHE_LINE_SIZE); | |
423 | if (!dpio_dev->dpio) { | |
424 | DPAA2_BUS_ERR("Memory allocation failure"); | |
425 | goto err; | |
426 | } | |
11fdf7f2 | 427 | |
9f95a23c TL |
428 | dpio_dev->dpio->regs = dpio_dev->mc_portal; |
429 | if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id, | |
430 | &dpio_dev->token)) { | |
431 | DPAA2_BUS_ERR("Failed to allocate IO space"); | |
432 | goto err; | |
11fdf7f2 TL |
433 | } |
434 | ||
9f95a23c TL |
435 | if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { |
436 | DPAA2_BUS_ERR("Failed to reset dpio"); | |
437 | goto err; | |
438 | } | |
439 | ||
440 | if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) { | |
441 | DPAA2_BUS_ERR("Failed to Enable dpio"); | |
442 | goto err; | |
443 | } | |
444 | ||
445 | if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW, | |
446 | dpio_dev->token, &attr)) { | |
447 | DPAA2_BUS_ERR("DPIO Get attribute failed"); | |
448 | goto err; | |
449 | } | |
450 | ||
451 | /* find the SoC type for the first time */ | |
452 | if (!dpaa2_svr_family) { | |
453 | struct mc_soc_version mc_plat_info = {0}; | |
454 | ||
455 | if (mc_get_soc_version(dpio_dev->dpio, | |
456 | CMD_PRI_LOW, &mc_plat_info)) { | |
457 | DPAA2_BUS_ERR("Unable to get SoC version information"); | |
458 | } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) { | |
459 | dpaa2_core_cluster_base = 0x02; | |
460 | dpaa2_cluster_sz = 4; | |
461 | DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected"); | |
462 | } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) { | |
463 | dpaa2_core_cluster_base = 0x00; | |
464 | dpaa2_cluster_sz = 2; | |
465 | DPAA2_BUS_DEBUG("LX2160 Platform Detected"); | |
466 | } | |
467 | dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000); | |
468 | ||
469 | if (dpaa2_svr_family == SVR_LX2160A) { | |
470 | dpaa2_dqrr_size = DPAA2_LX2_DQRR_RING_SIZE; | |
471 | dpaa2_eqcr_size = DPAA2_LX2_EQCR_RING_SIZE; | |
472 | } else { | |
473 | dpaa2_dqrr_size = DPAA2_DQRR_RING_SIZE; | |
474 | dpaa2_eqcr_size = DPAA2_EQCR_RING_SIZE; | |
475 | } | |
476 | } | |
477 | ||
478 | if (dpaa2_svr_family == SVR_LX2160A) | |
479 | reg_info.index = DPAA2_SWP_CENA_MEM_REGION; | |
480 | else | |
481 | reg_info.index = DPAA2_SWP_CENA_REGION; | |
482 | ||
483 | if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { | |
484 | DPAA2_BUS_ERR("vfio: error getting region info"); | |
485 | goto err; | |
486 | } | |
487 | ||
488 | dpio_dev->ce_size = reg_info.size; | |
489 | dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size, | |
490 | PROT_WRITE | PROT_READ, MAP_SHARED, | |
491 | vdev_fd, reg_info.offset); | |
492 | ||
493 | reg_info.index = DPAA2_SWP_CINH_REGION; | |
494 | if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { | |
495 | DPAA2_BUS_ERR("vfio: error getting region info"); | |
496 | goto err; | |
11fdf7f2 TL |
497 | } |
498 | ||
11fdf7f2 | 499 | dpio_dev->ci_size = reg_info.size; |
9f95a23c | 500 | dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size, |
11fdf7f2 | 501 | PROT_WRITE | PROT_READ, MAP_SHARED, |
9f95a23c | 502 | vdev_fd, reg_info.offset); |
11fdf7f2 | 503 | |
9f95a23c TL |
504 | /* Configure & setup SW portal */ |
505 | p_des.block = NULL; | |
506 | p_des.idx = attr.qbman_portal_id; | |
507 | p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr); | |
508 | p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr); | |
509 | p_des.irq = -1; | |
510 | p_des.qman_version = attr.qbman_version; | |
511 | p_des.eqcr_mode = qman_eqcr_vb_ring; | |
512 | p_des.cena_access_mode = qman_cena_fastest_access; | |
513 | ||
514 | dpio_dev->sw_portal = qbman_swp_init(&p_des); | |
515 | if (dpio_dev->sw_portal == NULL) { | |
516 | DPAA2_BUS_ERR("QBMan SW Portal Init failed"); | |
517 | goto err; | |
11fdf7f2 TL |
518 | } |
519 | ||
520 | io_space_count++; | |
521 | dpio_dev->index = io_space_count; | |
9f95a23c TL |
522 | |
523 | if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) { | |
524 | DPAA2_BUS_ERR("Fail to setup interrupt for %d", | |
525 | dpio_dev->hw_id); | |
526 | goto err; | |
527 | } | |
528 | ||
529 | dpio_dev->eqresp = rte_zmalloc(NULL, MAX_EQ_RESP_ENTRIES * | |
530 | (sizeof(struct qbman_result) + | |
531 | sizeof(struct eqresp_metadata)), | |
532 | RTE_CACHE_LINE_SIZE); | |
533 | if (!dpio_dev->eqresp) { | |
534 | DPAA2_BUS_ERR("Memory allocation failed for eqresp"); | |
535 | goto err; | |
536 | } | |
537 | dpio_dev->eqresp_meta = (struct eqresp_metadata *)(dpio_dev->eqresp + | |
538 | MAX_EQ_RESP_ENTRIES); | |
539 | ||
540 | ||
541 | TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next); | |
11fdf7f2 TL |
542 | |
543 | return 0; | |
9f95a23c TL |
544 | |
545 | err: | |
546 | if (dpio_dev->dpio) { | |
547 | dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); | |
548 | dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token); | |
549 | rte_free(dpio_dev->dpio); | |
550 | } | |
551 | ||
552 | rte_free(dpio_dev); | |
553 | ||
554 | /* For each element in the list, cleanup */ | |
555 | TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) { | |
556 | if (dpio_dev->dpio) { | |
557 | dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, | |
558 | dpio_dev->token); | |
559 | dpio_close(dpio_dev->dpio, CMD_PRI_LOW, | |
560 | dpio_dev->token); | |
561 | rte_free(dpio_dev->dpio); | |
562 | } | |
563 | rte_free(dpio_dev); | |
564 | } | |
565 | ||
566 | /* Preventing re-use of the list with old entries */ | |
567 | TAILQ_INIT(&dpio_dev_list); | |
568 | ||
569 | return -1; | |
11fdf7f2 TL |
570 | } |
571 | ||
572 | void | |
573 | dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage) | |
574 | { | |
575 | int i = 0; | |
576 | ||
577 | for (i = 0; i < NUM_DQS_PER_QUEUE; i++) { | |
578 | if (q_storage->dq_storage[i]) | |
579 | rte_free(q_storage->dq_storage[i]); | |
580 | } | |
581 | } | |
582 | ||
583 | int | |
584 | dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage) | |
585 | { | |
586 | int i = 0; | |
587 | ||
588 | for (i = 0; i < NUM_DQS_PER_QUEUE; i++) { | |
589 | q_storage->dq_storage[i] = rte_malloc(NULL, | |
9f95a23c | 590 | dpaa2_dqrr_size * sizeof(struct qbman_result), |
11fdf7f2 TL |
591 | RTE_CACHE_LINE_SIZE); |
592 | if (!q_storage->dq_storage[i]) | |
593 | goto fail; | |
594 | } | |
595 | return 0; | |
596 | fail: | |
9f95a23c | 597 | while (--i >= 0) |
11fdf7f2 TL |
598 | rte_free(q_storage->dq_storage[i]); |
599 | ||
600 | return -1; | |
601 | } | |
9f95a23c TL |
602 | |
603 | uint32_t | |
604 | dpaa2_free_eq_descriptors(void) | |
605 | { | |
606 | struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; | |
607 | struct qbman_result *eqresp; | |
608 | struct eqresp_metadata *eqresp_meta; | |
609 | struct dpaa2_queue *txq; | |
610 | ||
611 | while (dpio_dev->eqresp_ci != dpio_dev->eqresp_pi) { | |
612 | eqresp = &dpio_dev->eqresp[dpio_dev->eqresp_ci]; | |
613 | eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_ci]; | |
614 | ||
615 | if (!qbman_result_eqresp_rspid(eqresp)) | |
616 | break; | |
617 | ||
618 | if (qbman_result_eqresp_rc(eqresp)) { | |
619 | txq = eqresp_meta->dpaa2_q; | |
620 | txq->cb_eqresp_free(dpio_dev->eqresp_ci); | |
621 | } | |
622 | qbman_result_eqresp_set_rspid(eqresp, 0); | |
623 | ||
624 | dpio_dev->eqresp_ci + 1 < MAX_EQ_RESP_ENTRIES ? | |
625 | dpio_dev->eqresp_ci++ : (dpio_dev->eqresp_ci = 0); | |
626 | } | |
627 | ||
628 | /* Return 1 less entry so that PI and CI are never same in a | |
629 | * case there all the EQ responses are in use. | |
630 | */ | |
631 | if (dpio_dev->eqresp_ci > dpio_dev->eqresp_pi) | |
632 | return dpio_dev->eqresp_ci - dpio_dev->eqresp_pi - 1; | |
633 | else | |
634 | return dpio_dev->eqresp_ci - dpio_dev->eqresp_pi + | |
635 | MAX_EQ_RESP_ENTRIES - 1; | |
636 | } | |
637 | ||
638 | static struct rte_dpaa2_object rte_dpaa2_dpio_obj = { | |
639 | .dev_type = DPAA2_IO, | |
640 | .create = dpaa2_create_dpio_device, | |
641 | }; | |
642 | ||
643 | RTE_PMD_REGISTER_DPAA2_OBJECT(dpio, rte_dpaa2_dpio_obj); |