]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/crypto/qat/qat_qp.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / drivers / crypto / qat / qat_qp.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <rte_common.h>
35 #include <rte_dev.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_atomic.h>
40 #include <rte_prefetch.h>
41
42 #include "qat_logs.h"
43 #include "qat_crypto.h"
44 #include "qat_algs.h"
45 #include "adf_transport_access_macros.h"
46
47 #define ADF_MAX_SYM_DESC 4096
48 #define ADF_MIN_SYM_DESC 128
49 #define ADF_SYM_TX_RING_DESC_SIZE 128
50 #define ADF_SYM_RX_RING_DESC_SIZE 32
51 #define ADF_SYM_TX_QUEUE_STARTOFF 2
52 /* Offset from bundle start to 1st Sym Tx queue */
53 #define ADF_SYM_RX_QUEUE_STARTOFF 10
54 #define ADF_ARB_REG_SLOT 0x1000
55 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
56
57 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
58 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
59 (ADF_ARB_REG_SLOT * index), value)
60
61 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
62 uint32_t queue_size_bytes);
63 static int qat_tx_queue_create(struct rte_cryptodev *dev,
64 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
65 int socket_id);
66 static int qat_rx_queue_create(struct rte_cryptodev *dev,
67 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
68 int socket_id);
69 static void qat_queue_delete(struct qat_queue *queue);
70 static int qat_queue_create(struct rte_cryptodev *dev,
71 struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
72 int socket_id);
73 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
74 uint32_t *queue_size_for_csr);
75 static void adf_configure_queues(struct qat_qp *queue);
76 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
77 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
78
79 static const struct rte_memzone *
80 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
81 int socket_id)
82 {
83 const struct rte_memzone *mz;
84 unsigned memzone_flags = 0;
85 const struct rte_memseg *ms;
86
87 PMD_INIT_FUNC_TRACE();
88 mz = rte_memzone_lookup(queue_name);
89 if (mz != 0) {
90 if (((size_t)queue_size <= mz->len) &&
91 ((socket_id == SOCKET_ID_ANY) ||
92 (socket_id == mz->socket_id))) {
93 PMD_DRV_LOG(DEBUG, "re-use memzone already "
94 "allocated for %s", queue_name);
95 return mz;
96 }
97
98 PMD_DRV_LOG(ERR, "Incompatible memzone already "
99 "allocated %s, size %u, socket %d. "
100 "Requested size %u, socket %u",
101 queue_name, (uint32_t)mz->len,
102 mz->socket_id, queue_size, socket_id);
103 return NULL;
104 }
105
106 PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
107 queue_name, queue_size, socket_id);
108 ms = rte_eal_get_physmem_layout();
109 switch (ms[0].hugepage_sz) {
110 case(RTE_PGSIZE_2M):
111 memzone_flags = RTE_MEMZONE_2MB;
112 break;
113 case(RTE_PGSIZE_1G):
114 memzone_flags = RTE_MEMZONE_1GB;
115 break;
116 case(RTE_PGSIZE_16M):
117 memzone_flags = RTE_MEMZONE_16MB;
118 break;
119 case(RTE_PGSIZE_16G):
120 memzone_flags = RTE_MEMZONE_16GB;
121 break;
122 default:
123 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
124 }
125 #ifdef RTE_LIBRTE_XEN_DOM0
126 return rte_memzone_reserve_bounded(queue_name, queue_size,
127 socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
128 #else
129 return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
130 memzone_flags, queue_size);
131 #endif
132 }
133
134 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
135 const struct rte_cryptodev_qp_conf *qp_conf,
136 int socket_id)
137 {
138 struct qat_qp *qp;
139 struct rte_pci_device *pci_dev;
140 int ret;
141 char op_cookie_pool_name[RTE_RING_NAMESIZE];
142 uint32_t i;
143
144 PMD_INIT_FUNC_TRACE();
145
146 /* If qp is already in use free ring memory and qp metadata. */
147 if (dev->data->queue_pairs[queue_pair_id] != NULL) {
148 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
149 if (ret < 0)
150 return ret;
151 }
152
153 if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
154 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
155 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
156 qp_conf->nb_descriptors);
157 return -EINVAL;
158 }
159
160 pci_dev = RTE_DEV_TO_PCI(dev->device);
161
162 if (pci_dev->mem_resource[0].addr == NULL) {
163 PMD_DRV_LOG(ERR, "Could not find VF config space "
164 "(UIO driver attached?).");
165 return -EINVAL;
166 }
167
168 if (queue_pair_id >=
169 (ADF_NUM_SYM_QPS_PER_BUNDLE *
170 ADF_NUM_BUNDLES_PER_DEV)) {
171 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
172 queue_pair_id);
173 return -EINVAL;
174 }
175 /* Allocate the queue pair data structure. */
176 qp = rte_zmalloc("qat PMD qp metadata",
177 sizeof(*qp), RTE_CACHE_LINE_SIZE);
178 if (qp == NULL) {
179 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
180 return -ENOMEM;
181 }
182 qp->nb_descriptors = qp_conf->nb_descriptors;
183 qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
184 qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
185 RTE_CACHE_LINE_SIZE);
186
187 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
188 rte_atomic16_init(&qp->inflights16);
189
190 if (qat_tx_queue_create(dev, &(qp->tx_q),
191 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
192 PMD_INIT_LOG(ERR, "Tx queue create failed "
193 "queue_pair_id=%u", queue_pair_id);
194 goto create_err;
195 }
196
197 if (qat_rx_queue_create(dev, &(qp->rx_q),
198 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
199 PMD_DRV_LOG(ERR, "Rx queue create failed "
200 "queue_pair_id=%hu", queue_pair_id);
201 qat_queue_delete(&(qp->tx_q));
202 goto create_err;
203 }
204
205 adf_configure_queues(qp);
206 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
207 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
208 dev->driver->pci_drv.driver.name, dev->data->dev_id,
209 queue_pair_id);
210
211 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
212 if (qp->op_cookie_pool == NULL)
213 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
214 qp->nb_descriptors,
215 sizeof(struct qat_crypto_op_cookie), 64, 0,
216 NULL, NULL, NULL, NULL, socket_id,
217 0);
218 if (!qp->op_cookie_pool) {
219 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
220 " op mempool");
221 goto create_err;
222 }
223
224 for (i = 0; i < qp->nb_descriptors; i++) {
225 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
226 PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
227 return -EFAULT;
228 }
229
230 struct qat_crypto_op_cookie *sql_cookie =
231 qp->op_cookies[i];
232
233 sql_cookie->qat_sgl_src_phys_addr =
234 rte_mempool_virt2phy(qp->op_cookie_pool,
235 sql_cookie) +
236 offsetof(struct qat_crypto_op_cookie,
237 qat_sgl_list_src);
238
239 sql_cookie->qat_sgl_dst_phys_addr =
240 rte_mempool_virt2phy(qp->op_cookie_pool,
241 sql_cookie) +
242 offsetof(struct qat_crypto_op_cookie,
243 qat_sgl_list_dst);
244 }
245 dev->data->queue_pairs[queue_pair_id] = qp;
246 return 0;
247
248 create_err:
249 rte_free(qp);
250 return -EFAULT;
251 }
252
253 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
254 {
255 struct qat_qp *qp =
256 (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
257 uint32_t i;
258
259 PMD_INIT_FUNC_TRACE();
260 if (qp == NULL) {
261 PMD_DRV_LOG(DEBUG, "qp already freed");
262 return 0;
263 }
264
265 /* Don't free memory if there are still responses to be processed */
266 if (rte_atomic16_read(&(qp->inflights16)) == 0) {
267 qat_queue_delete(&(qp->tx_q));
268 qat_queue_delete(&(qp->rx_q));
269 } else {
270 return -EAGAIN;
271 }
272
273 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
274
275 for (i = 0; i < qp->nb_descriptors; i++)
276 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
277
278 if (qp->op_cookie_pool)
279 rte_mempool_free(qp->op_cookie_pool);
280
281 rte_free(qp->op_cookies);
282 rte_free(qp);
283 dev->data->queue_pairs[queue_pair_id] = NULL;
284 return 0;
285 }
286
287 static int qat_tx_queue_create(struct rte_cryptodev *dev,
288 struct qat_queue *queue, uint8_t qp_id,
289 uint32_t nb_desc, int socket_id)
290 {
291 PMD_INIT_FUNC_TRACE();
292 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
293 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
294 ADF_SYM_TX_QUEUE_STARTOFF;
295 PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
296 nb_desc, qp_id, queue->hw_bundle_number,
297 queue->hw_queue_number);
298
299 return qat_queue_create(dev, queue, nb_desc,
300 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
301 }
302
303 static int qat_rx_queue_create(struct rte_cryptodev *dev,
304 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
305 int socket_id)
306 {
307 PMD_INIT_FUNC_TRACE();
308 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
309 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
310 ADF_SYM_RX_QUEUE_STARTOFF;
311
312 PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
313 nb_desc, qp_id, queue->hw_bundle_number,
314 queue->hw_queue_number);
315 return qat_queue_create(dev, queue, nb_desc,
316 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
317 }
318
319 static void qat_queue_delete(struct qat_queue *queue)
320 {
321 const struct rte_memzone *mz;
322 int status = 0;
323
324 if (queue == NULL) {
325 PMD_DRV_LOG(DEBUG, "Invalid queue");
326 return;
327 }
328 mz = rte_memzone_lookup(queue->memz_name);
329 if (mz != NULL) {
330 /* Write an unused pattern to the queue memory. */
331 memset(queue->base_addr, 0x7F, queue->queue_size);
332 status = rte_memzone_free(mz);
333 if (status != 0)
334 PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
335 status, queue->memz_name);
336 } else {
337 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
338 queue->memz_name);
339 }
340 }
341
342 static int
343 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
344 uint32_t nb_desc, uint8_t desc_size, int socket_id)
345 {
346 uint64_t queue_base;
347 void *io_addr;
348 const struct rte_memzone *qp_mz;
349 uint32_t queue_size_bytes = nb_desc*desc_size;
350 struct rte_pci_device *pci_dev;
351
352 PMD_INIT_FUNC_TRACE();
353 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
354 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
355 return -EINVAL;
356 }
357
358 /*
359 * Allocate a memzone for the queue - create a unique name.
360 */
361 snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
362 dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
363 queue->hw_bundle_number, queue->hw_queue_number);
364 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
365 socket_id);
366 if (qp_mz == NULL) {
367 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
368 return -ENOMEM;
369 }
370
371 queue->base_addr = (char *)qp_mz->addr;
372 queue->base_phys_addr = qp_mz->phys_addr;
373 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
374 queue_size_bytes)) {
375 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
376 " 0x%"PRIx64"\n",
377 queue->base_phys_addr);
378 return -EFAULT;
379 }
380
381 if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
382 != 0) {
383 PMD_DRV_LOG(ERR, "Invalid num inflights");
384 return -EINVAL;
385 }
386
387 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
388 ADF_BYTES_TO_MSG_SIZE(desc_size));
389 queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
390 PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
391 " msg_size %u, max_inflights %u modulo %u",
392 queue->queue_size, queue_size_bytes,
393 nb_desc, desc_size, queue->max_inflights,
394 queue->modulo);
395
396 if (queue->max_inflights < 2) {
397 PMD_DRV_LOG(ERR, "Invalid num inflights");
398 return -EINVAL;
399 }
400 queue->head = 0;
401 queue->tail = 0;
402 queue->msg_size = desc_size;
403
404 /*
405 * Write an unused pattern to the queue memory.
406 */
407 memset(queue->base_addr, 0x7F, queue_size_bytes);
408
409 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
410 queue->queue_size);
411 pci_dev = RTE_DEV_TO_PCI(dev->device);
412
413 io_addr = pci_dev->mem_resource[0].addr;
414
415 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
416 queue->hw_queue_number, queue_base);
417 return 0;
418 }
419
420 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
421 uint32_t queue_size_bytes)
422 {
423 PMD_INIT_FUNC_TRACE();
424 if (((queue_size_bytes - 1) & phys_addr) != 0)
425 return -EINVAL;
426 return 0;
427 }
428
429 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
430 uint32_t *p_queue_size_for_csr)
431 {
432 uint8_t i = ADF_MIN_RING_SIZE;
433
434 PMD_INIT_FUNC_TRACE();
435 for (; i <= ADF_MAX_RING_SIZE; i++)
436 if ((msg_size * msg_num) ==
437 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
438 *p_queue_size_for_csr = i;
439 return 0;
440 }
441 PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
442 return -EINVAL;
443 }
444
445 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
446 {
447 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
448 (ADF_ARB_REG_SLOT *
449 txq->hw_bundle_number);
450 uint32_t value;
451
452 PMD_INIT_FUNC_TRACE();
453 value = ADF_CSR_RD(base_addr, arb_csr_offset);
454 value |= (0x01 << txq->hw_queue_number);
455 ADF_CSR_WR(base_addr, arb_csr_offset, value);
456 }
457
458 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
459 {
460 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
461 (ADF_ARB_REG_SLOT *
462 txq->hw_bundle_number);
463 uint32_t value;
464
465 PMD_INIT_FUNC_TRACE();
466 value = ADF_CSR_RD(base_addr, arb_csr_offset);
467 value ^= (0x01 << txq->hw_queue_number);
468 ADF_CSR_WR(base_addr, arb_csr_offset, value);
469 }
470
471 static void adf_configure_queues(struct qat_qp *qp)
472 {
473 uint32_t queue_config;
474 struct qat_queue *queue = &qp->tx_q;
475
476 PMD_INIT_FUNC_TRACE();
477 queue_config = BUILD_RING_CONFIG(queue->queue_size);
478
479 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
480 queue->hw_queue_number, queue_config);
481
482 queue = &qp->rx_q;
483 queue_config =
484 BUILD_RESP_RING_CONFIG(queue->queue_size,
485 ADF_RING_NEAR_WATERMARK_512,
486 ADF_RING_NEAR_WATERMARK_0);
487
488 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
489 queue->hw_queue_number, queue_config);
490 }