]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / raw / dpaa2_cmdif / dpaa2_cmdif.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 NXP
3 */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8
9 #include <rte_bus_vdev.h>
10 #include <rte_atomic.h>
11 #include <rte_interrupts.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_lcore.h>
14
15 #include <rte_rawdev.h>
16 #include <rte_rawdev_pmd.h>
17
18 #include <portal/dpaa2_hw_pvt.h>
19 #include <portal/dpaa2_hw_dpio.h>
20 #include "dpaa2_cmdif_logs.h"
21 #include "rte_pmd_dpaa2_cmdif.h"
22
23 /* Dynamic log type identifier */
24 int dpaa2_cmdif_logtype;
25
26 /* CMDIF driver name */
27 #define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
28
29 /* CMDIF driver object */
30 static struct rte_vdev_driver dpaa2_cmdif_drv;
31
32 /*
33 * This API provides the DPCI device ID in 'attr_value'.
34 * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
35 */
36 static int
37 dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
38 const char *attr_name,
39 uint64_t *attr_value)
40 {
41 struct dpaa2_dpci_dev *cidev = dev->dev_private;
42
43 DPAA2_CMDIF_FUNC_TRACE();
44
45 RTE_SET_USED(attr_name);
46
47 if (!attr_value) {
48 DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
49 return -EINVAL;
50 }
51 *attr_value = cidev->dpci_id;
52
53 return 0;
54 }
55
56 static int
57 dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
58 struct rte_rawdev_buf **buffers,
59 unsigned int count,
60 rte_rawdev_obj_t context)
61 {
62 struct dpaa2_dpci_dev *cidev = dev->dev_private;
63 struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
64 struct dpaa2_queue *txq;
65 struct qbman_fd fd;
66 struct qbman_eq_desc eqdesc;
67 struct qbman_swp *swp;
68 int ret;
69
70 DPAA2_CMDIF_FUNC_TRACE();
71
72 RTE_SET_USED(count);
73
74 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
75 ret = dpaa2_affine_qbman_swp();
76 if (ret) {
77 DPAA2_CMDIF_ERR("Failure in affining portal\n");
78 return 0;
79 }
80 }
81 swp = DPAA2_PER_LCORE_PORTAL;
82
83 cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
84 txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
85
86 /* Prepare enqueue descriptor */
87 qbman_eq_desc_clear(&eqdesc);
88 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
89 qbman_eq_desc_set_no_orp(&eqdesc, 0);
90 qbman_eq_desc_set_response(&eqdesc, 0, 0);
91
92 /* Set some of the FD parameters to i.
93 * For performance reasons do not memset
94 */
95 fd.simple.bpid_offset = 0;
96 fd.simple.ctrl = 0;
97
98 DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
99 DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
100 DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
101 DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
102
103 /* Enqueue a packet to the QBMAN */
104 do {
105 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
106 if (ret < 0 && ret != -EBUSY)
107 DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
108 } while (ret == -EBUSY);
109
110 DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
111
112 return 0;
113 }
114
115 static int
116 dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
117 struct rte_rawdev_buf **buffers,
118 unsigned int count,
119 rte_rawdev_obj_t context)
120 {
121 struct dpaa2_dpci_dev *cidev = dev->dev_private;
122 struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
123 struct dpaa2_queue *rxq;
124 struct qbman_swp *swp;
125 struct qbman_result *dq_storage;
126 const struct qbman_fd *fd;
127 struct qbman_pull_desc pulldesc;
128 uint8_t status;
129 int ret;
130
131 DPAA2_CMDIF_FUNC_TRACE();
132
133 RTE_SET_USED(count);
134
135 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
136 ret = dpaa2_affine_qbman_swp();
137 if (ret) {
138 DPAA2_CMDIF_ERR("Failure in affining portal\n");
139 return 0;
140 }
141 }
142 swp = DPAA2_PER_LCORE_PORTAL;
143
144 cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
145 rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
146 dq_storage = rxq->q_storage->dq_storage[0];
147
148 qbman_pull_desc_clear(&pulldesc);
149 qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
150 qbman_pull_desc_set_numframes(&pulldesc, 1);
151 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
152 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
153
154 while (1) {
155 if (qbman_swp_pull(swp, &pulldesc)) {
156 DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
157 /* Portal was busy, try again */
158 continue;
159 }
160 break;
161 }
162
163 /* Check if previous issued command is completed. */
164 while (!qbman_check_command_complete(dq_storage))
165 ;
166 /* Loop until the dq_storage is updated with new token by QBMAN */
167 while (!qbman_result_has_new_result(swp, dq_storage))
168 ;
169
170 /* Check for valid frame. */
171 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
172 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
173 DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
174 return 0;
175 }
176
177 fd = qbman_result_DQ_fd(dq_storage);
178
179 buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
180 DPAA2_GET_FD_ADDR(fd) + DPAA2_GET_FD_OFFSET(fd));
181 cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
182 cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
183 cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
184
185 DPAA2_CMDIF_DP_DEBUG("packet received\n");
186
187 return 1;
188 }
189
190 static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
191 .attr_get = dpaa2_cmdif_get_attr,
192 .enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
193 .dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
194 };
195
196 static int
197 dpaa2_cmdif_create(const char *name,
198 struct rte_vdev_device *vdev,
199 int socket_id)
200 {
201 struct rte_rawdev *rawdev;
202 struct dpaa2_dpci_dev *cidev;
203
204 /* Allocate device structure */
205 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
206 socket_id);
207 if (!rawdev) {
208 DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
209 return -EINVAL;
210 }
211
212 rawdev->dev_ops = &dpaa2_cmdif_ops;
213 rawdev->device = &vdev->device;
214 rawdev->driver_name = vdev->device.driver->name;
215
216 /* For secondary processes, the primary has done all the work */
217 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
218 return 0;
219
220 cidev = rte_dpaa2_alloc_dpci_dev();
221 if (!cidev) {
222 DPAA2_CMDIF_ERR("Unable to allocate CI device");
223 rte_rawdev_pmd_release(rawdev);
224 return -ENODEV;
225 }
226
227 rawdev->dev_private = cidev;
228
229 return 0;
230 }
231
232 static int
233 dpaa2_cmdif_destroy(const char *name)
234 {
235 int ret;
236 struct rte_rawdev *rdev;
237
238 rdev = rte_rawdev_pmd_get_named_dev(name);
239 if (!rdev) {
240 DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
241 return -EINVAL;
242 }
243
244 /* The primary process will only free the DPCI device */
245 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
246 rte_dpaa2_free_dpci_dev(rdev->dev_private);
247
248 ret = rte_rawdev_pmd_release(rdev);
249 if (ret)
250 DPAA2_CMDIF_DEBUG("Device cleanup failed");
251
252 return 0;
253 }
254
255 static int
256 dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
257 {
258 const char *name;
259 int ret = 0;
260
261 name = rte_vdev_device_name(vdev);
262
263 DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
264
265 ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
266
267 return ret;
268 }
269
270 static int
271 dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
272 {
273 const char *name;
274 int ret;
275
276 name = rte_vdev_device_name(vdev);
277
278 DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
279
280 ret = dpaa2_cmdif_destroy(name);
281
282 return ret;
283 }
284
285 static struct rte_vdev_driver dpaa2_cmdif_drv = {
286 .probe = dpaa2_cmdif_probe,
287 .remove = dpaa2_cmdif_remove
288 };
289
290 RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
291
292 RTE_INIT(dpaa2_cmdif_init_log)
293 {
294 dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
295 if (dpaa2_cmdif_logtype >= 0)
296 rte_log_set_level(dpaa2_cmdif_logtype, RTE_LOG_INFO);
297 }