]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/common/qat/qat_qp.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / common / qat / qat_qp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
3 */
4
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_atomic.h>
12 #include <rte_prefetch.h>
13
14 #include "qat_logs.h"
15 #include "qat_device.h"
16 #include "qat_qp.h"
17 #include "qat_sym.h"
18 #include "qat_asym.h"
19 #include "qat_comp.h"
20 #include "adf_transport_access_macros.h"
21
22
23 #define ADF_MAX_DESC 4096
24 #define ADF_MIN_DESC 128
25
26 #define ADF_ARB_REG_SLOT 0x1000
27 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
28
29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
30 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
31 (ADF_ARB_REG_SLOT * index), value)
32
33 __extension__
34 const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
35 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
36 /* queue pairs which provide an asymmetric crypto service */
37 [QAT_SERVICE_ASYMMETRIC] = {
38 {
39 .service_type = QAT_SERVICE_ASYMMETRIC,
40 .hw_bundle_num = 0,
41 .tx_ring_num = 0,
42 .rx_ring_num = 8,
43 .tx_msg_size = 64,
44 .rx_msg_size = 32,
45
46 }, {
47 .service_type = QAT_SERVICE_ASYMMETRIC,
48 .hw_bundle_num = 0,
49 .tx_ring_num = 1,
50 .rx_ring_num = 9,
51 .tx_msg_size = 64,
52 .rx_msg_size = 32,
53 }
54 },
55 /* queue pairs which provide a symmetric crypto service */
56 [QAT_SERVICE_SYMMETRIC] = {
57 {
58 .service_type = QAT_SERVICE_SYMMETRIC,
59 .hw_bundle_num = 0,
60 .tx_ring_num = 2,
61 .rx_ring_num = 10,
62 .tx_msg_size = 128,
63 .rx_msg_size = 32,
64 },
65 {
66 .service_type = QAT_SERVICE_SYMMETRIC,
67 .hw_bundle_num = 0,
68 .tx_ring_num = 3,
69 .rx_ring_num = 11,
70 .tx_msg_size = 128,
71 .rx_msg_size = 32,
72 }
73 },
74 /* queue pairs which provide a compression service */
75 [QAT_SERVICE_COMPRESSION] = {
76 {
77 .service_type = QAT_SERVICE_COMPRESSION,
78 .hw_bundle_num = 0,
79 .tx_ring_num = 6,
80 .rx_ring_num = 14,
81 .tx_msg_size = 128,
82 .rx_msg_size = 32,
83 }, {
84 .service_type = QAT_SERVICE_COMPRESSION,
85 .hw_bundle_num = 0,
86 .tx_ring_num = 7,
87 .rx_ring_num = 15,
88 .tx_msg_size = 128,
89 .rx_msg_size = 32,
90 }
91 }
92 };
93
94 __extension__
95 const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES]
96 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
97 /* queue pairs which provide an asymmetric crypto service */
98 [QAT_SERVICE_ASYMMETRIC] = {
99 {
100 .service_type = QAT_SERVICE_ASYMMETRIC,
101 .hw_bundle_num = 0,
102 .tx_ring_num = 0,
103 .rx_ring_num = 4,
104 .tx_msg_size = 64,
105 .rx_msg_size = 32,
106 }
107 },
108 /* queue pairs which provide a symmetric crypto service */
109 [QAT_SERVICE_SYMMETRIC] = {
110 {
111 .service_type = QAT_SERVICE_SYMMETRIC,
112 .hw_bundle_num = 0,
113 .tx_ring_num = 1,
114 .rx_ring_num = 5,
115 .tx_msg_size = 128,
116 .rx_msg_size = 32,
117 }
118 },
119 /* queue pairs which provide a compression service */
120 [QAT_SERVICE_COMPRESSION] = {
121 {
122 .service_type = QAT_SERVICE_COMPRESSION,
123 .hw_bundle_num = 0,
124 .tx_ring_num = 3,
125 .rx_ring_num = 7,
126 .tx_msg_size = 128,
127 .rx_msg_size = 32,
128 }
129 }
130 };
131
132 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
133 uint32_t queue_size_bytes);
134 static void qat_queue_delete(struct qat_queue *queue);
135 static int qat_queue_create(struct qat_pci_device *qat_dev,
136 struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
137 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
138 uint32_t *queue_size_for_csr);
139 static void adf_configure_queues(struct qat_qp *queue);
140 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
141 rte_spinlock_t *lock);
142 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
143 rte_spinlock_t *lock);
144
145
146 int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
147 enum qat_service_type service)
148 {
149 int i, count;
150
151 for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
152 if (qp_hw_data[i].service_type == service)
153 count++;
154 return count;
155 }
156
157 static const struct rte_memzone *
158 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
159 int socket_id)
160 {
161 const struct rte_memzone *mz;
162
163 mz = rte_memzone_lookup(queue_name);
164 if (mz != 0) {
165 if (((size_t)queue_size <= mz->len) &&
166 ((socket_id == SOCKET_ID_ANY) ||
167 (socket_id == mz->socket_id))) {
168 QAT_LOG(DEBUG, "re-use memzone already "
169 "allocated for %s", queue_name);
170 return mz;
171 }
172
173 QAT_LOG(ERR, "Incompatible memzone already "
174 "allocated %s, size %u, socket %d. "
175 "Requested size %u, socket %u",
176 queue_name, (uint32_t)mz->len,
177 mz->socket_id, queue_size, socket_id);
178 return NULL;
179 }
180
181 QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
182 queue_name, queue_size, socket_id);
183 return rte_memzone_reserve_aligned(queue_name, queue_size,
184 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
185 }
186
187 int qat_qp_setup(struct qat_pci_device *qat_dev,
188 struct qat_qp **qp_addr,
189 uint16_t queue_pair_id,
190 struct qat_qp_config *qat_qp_conf)
191
192 {
193 struct qat_qp *qp;
194 struct rte_pci_device *pci_dev = qat_dev->pci_dev;
195 char op_cookie_pool_name[RTE_RING_NAMESIZE];
196 uint32_t i;
197
198 QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
199 queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
200
201 if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
202 (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
203 QAT_LOG(ERR, "Can't create qp for %u descriptors",
204 qat_qp_conf->nb_descriptors);
205 return -EINVAL;
206 }
207
208 if (pci_dev->mem_resource[0].addr == NULL) {
209 QAT_LOG(ERR, "Could not find VF config space "
210 "(UIO driver attached?).");
211 return -EINVAL;
212 }
213
214 /* Allocate the queue pair data structure. */
215 qp = rte_zmalloc_socket("qat PMD qp metadata",
216 sizeof(*qp), RTE_CACHE_LINE_SIZE,
217 qat_qp_conf->socket_id);
218 if (qp == NULL) {
219 QAT_LOG(ERR, "Failed to alloc mem for qp struct");
220 return -ENOMEM;
221 }
222 qp->nb_descriptors = qat_qp_conf->nb_descriptors;
223 qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer",
224 qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
225 RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id);
226 if (qp->op_cookies == NULL) {
227 QAT_LOG(ERR, "Failed to alloc mem for cookie");
228 rte_free(qp);
229 return -ENOMEM;
230 }
231
232 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
233 qp->inflights16 = 0;
234
235 if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
236 ADF_RING_DIR_TX) != 0) {
237 QAT_LOG(ERR, "Tx queue create failed "
238 "queue_pair_id=%u", queue_pair_id);
239 goto create_err;
240 }
241
242 if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
243 ADF_RING_DIR_RX) != 0) {
244 QAT_LOG(ERR, "Rx queue create failed "
245 "queue_pair_id=%hu", queue_pair_id);
246 qat_queue_delete(&(qp->tx_q));
247 goto create_err;
248 }
249
250 adf_configure_queues(qp);
251 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
252 &qat_dev->arb_csr_lock);
253
254 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
255 "%s%d_cookies_%s_qp%hu",
256 pci_dev->driver->driver.name, qat_dev->qat_dev_id,
257 qat_qp_conf->service_str, queue_pair_id);
258
259 QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
260 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
261 if (qp->op_cookie_pool == NULL)
262 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
263 qp->nb_descriptors,
264 qat_qp_conf->cookie_size, 64, 0,
265 NULL, NULL, NULL, NULL,
266 qat_dev->pci_dev->device.numa_node,
267 0);
268 if (!qp->op_cookie_pool) {
269 QAT_LOG(ERR, "QAT PMD Cannot create"
270 " op mempool");
271 goto create_err;
272 }
273
274 for (i = 0; i < qp->nb_descriptors; i++) {
275 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
276 QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
277 goto create_err;
278 }
279 memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size);
280 }
281
282 qp->qat_dev_gen = qat_dev->qat_dev_gen;
283 qp->build_request = qat_qp_conf->build_request;
284 qp->service_type = qat_qp_conf->hw->service_type;
285 qp->qat_dev = qat_dev;
286
287 QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
288 queue_pair_id, op_cookie_pool_name);
289
290 *qp_addr = qp;
291 return 0;
292
293 create_err:
294 if (qp->op_cookie_pool)
295 rte_mempool_free(qp->op_cookie_pool);
296 rte_free(qp->op_cookies);
297 rte_free(qp);
298 return -EFAULT;
299 }
300
301 int qat_qp_release(struct qat_qp **qp_addr)
302 {
303 struct qat_qp *qp = *qp_addr;
304 uint32_t i;
305
306 if (qp == NULL) {
307 QAT_LOG(DEBUG, "qp already freed");
308 return 0;
309 }
310
311 QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
312 qp->qat_dev->qat_dev_id);
313
314 /* Don't free memory if there are still responses to be processed */
315 if (qp->inflights16 == 0) {
316 qat_queue_delete(&(qp->tx_q));
317 qat_queue_delete(&(qp->rx_q));
318 } else {
319 return -EAGAIN;
320 }
321
322 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
323 &qp->qat_dev->arb_csr_lock);
324
325 for (i = 0; i < qp->nb_descriptors; i++)
326 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
327
328 if (qp->op_cookie_pool)
329 rte_mempool_free(qp->op_cookie_pool);
330
331 rte_free(qp->op_cookies);
332 rte_free(qp);
333 *qp_addr = NULL;
334 return 0;
335 }
336
337
338 static void qat_queue_delete(struct qat_queue *queue)
339 {
340 const struct rte_memzone *mz;
341 int status = 0;
342
343 if (queue == NULL) {
344 QAT_LOG(DEBUG, "Invalid queue");
345 return;
346 }
347 QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
348 queue->hw_queue_number, queue->memz_name);
349
350 mz = rte_memzone_lookup(queue->memz_name);
351 if (mz != NULL) {
352 /* Write an unused pattern to the queue memory. */
353 memset(queue->base_addr, 0x7F, queue->queue_size);
354 status = rte_memzone_free(mz);
355 if (status != 0)
356 QAT_LOG(ERR, "Error %d on freeing queue %s",
357 status, queue->memz_name);
358 } else {
359 QAT_LOG(DEBUG, "queue %s doesn't exist",
360 queue->memz_name);
361 }
362 }
363
364 static int
365 qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
366 struct qat_qp_config *qp_conf, uint8_t dir)
367 {
368 uint64_t queue_base;
369 void *io_addr;
370 const struct rte_memzone *qp_mz;
371 struct rte_pci_device *pci_dev = qat_dev->pci_dev;
372 int ret = 0;
373 uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
374 qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
375 uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
376
377 queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
378 queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
379 qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
380
381 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
382 QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
383 return -EINVAL;
384 }
385
386 /*
387 * Allocate a memzone for the queue - create a unique name.
388 */
389 snprintf(queue->memz_name, sizeof(queue->memz_name),
390 "%s_%d_%s_%s_%d_%d",
391 pci_dev->driver->driver.name, qat_dev->qat_dev_id,
392 qp_conf->service_str, "qp_mem",
393 queue->hw_bundle_number, queue->hw_queue_number);
394 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
395 qat_dev->pci_dev->device.numa_node);
396 if (qp_mz == NULL) {
397 QAT_LOG(ERR, "Failed to allocate ring memzone");
398 return -ENOMEM;
399 }
400
401 queue->base_addr = (char *)qp_mz->addr;
402 queue->base_phys_addr = qp_mz->iova;
403 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
404 queue_size_bytes)) {
405 QAT_LOG(ERR, "Invalid alignment on queue create "
406 " 0x%"PRIx64"\n",
407 queue->base_phys_addr);
408 ret = -EFAULT;
409 goto queue_create_err;
410 }
411
412 if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
413 &(queue->queue_size)) != 0) {
414 QAT_LOG(ERR, "Invalid num inflights");
415 ret = -EINVAL;
416 goto queue_create_err;
417 }
418
419 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
420 ADF_BYTES_TO_MSG_SIZE(desc_size));
421 queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
422
423 if (queue->max_inflights < 2) {
424 QAT_LOG(ERR, "Invalid num inflights");
425 ret = -EINVAL;
426 goto queue_create_err;
427 }
428 queue->head = 0;
429 queue->tail = 0;
430 queue->msg_size = desc_size;
431
432 /*
433 * Write an unused pattern to the queue memory.
434 */
435 memset(queue->base_addr, 0x7F, queue_size_bytes);
436
437 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
438 queue->queue_size);
439
440 io_addr = pci_dev->mem_resource[0].addr;
441
442 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
443 queue->hw_queue_number, queue_base);
444
445 QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
446 " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u",
447 queue->memz_name,
448 queue->queue_size, queue_size_bytes,
449 qp_conf->nb_descriptors, desc_size,
450 queue->max_inflights, queue->modulo_mask);
451
452 return 0;
453
454 queue_create_err:
455 rte_memzone_free(qp_mz);
456 return ret;
457 }
458
459 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
460 uint32_t queue_size_bytes)
461 {
462 if (((queue_size_bytes - 1) & phys_addr) != 0)
463 return -EINVAL;
464 return 0;
465 }
466
467 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
468 uint32_t *p_queue_size_for_csr)
469 {
470 uint8_t i = ADF_MIN_RING_SIZE;
471
472 for (; i <= ADF_MAX_RING_SIZE; i++)
473 if ((msg_size * msg_num) ==
474 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
475 *p_queue_size_for_csr = i;
476 return 0;
477 }
478 QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
479 return -EINVAL;
480 }
481
482 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
483 rte_spinlock_t *lock)
484 {
485 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
486 (ADF_ARB_REG_SLOT *
487 txq->hw_bundle_number);
488 uint32_t value;
489
490 rte_spinlock_lock(lock);
491 value = ADF_CSR_RD(base_addr, arb_csr_offset);
492 value |= (0x01 << txq->hw_queue_number);
493 ADF_CSR_WR(base_addr, arb_csr_offset, value);
494 rte_spinlock_unlock(lock);
495 }
496
497 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
498 rte_spinlock_t *lock)
499 {
500 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
501 (ADF_ARB_REG_SLOT *
502 txq->hw_bundle_number);
503 uint32_t value;
504
505 rte_spinlock_lock(lock);
506 value = ADF_CSR_RD(base_addr, arb_csr_offset);
507 value &= ~(0x01 << txq->hw_queue_number);
508 ADF_CSR_WR(base_addr, arb_csr_offset, value);
509 rte_spinlock_unlock(lock);
510 }
511
512 static void adf_configure_queues(struct qat_qp *qp)
513 {
514 uint32_t queue_config;
515 struct qat_queue *queue = &qp->tx_q;
516
517 queue_config = BUILD_RING_CONFIG(queue->queue_size);
518
519 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
520 queue->hw_queue_number, queue_config);
521
522 queue = &qp->rx_q;
523 queue_config =
524 BUILD_RESP_RING_CONFIG(queue->queue_size,
525 ADF_RING_NEAR_WATERMARK_512,
526 ADF_RING_NEAR_WATERMARK_0);
527
528 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
529 queue->hw_queue_number, queue_config);
530 }
531
532 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
533 {
534 return data & modulo_mask;
535 }
536
537 static inline void
538 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
539 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
540 q->hw_queue_number, q->tail);
541 q->nb_pending_requests = 0;
542 q->csr_tail = q->tail;
543 }
544
545 static inline
546 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
547 {
548 uint32_t old_head, new_head;
549 uint32_t max_head;
550
551 old_head = q->csr_head;
552 new_head = q->head;
553 max_head = qp->nb_descriptors * q->msg_size;
554
555 /* write out free descriptors */
556 void *cur_desc = (uint8_t *)q->base_addr + old_head;
557
558 if (new_head < old_head) {
559 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
560 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
561 } else {
562 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
563 }
564 q->nb_processed_responses = 0;
565 q->csr_head = new_head;
566
567 /* write current head to CSR */
568 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
569 q->hw_queue_number, new_head);
570 }
571
572 uint16_t
573 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
574 {
575 register struct qat_queue *queue;
576 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
577 register uint32_t nb_ops_sent = 0;
578 register int ret;
579 uint16_t nb_ops_possible = nb_ops;
580 register uint8_t *base_addr;
581 register uint32_t tail;
582 int overflow;
583
584 if (unlikely(nb_ops == 0))
585 return 0;
586
587 /* read params used a lot in main loop into registers */
588 queue = &(tmp_qp->tx_q);
589 base_addr = (uint8_t *)queue->base_addr;
590 tail = queue->tail;
591
592 /* Find how many can actually fit on the ring */
593 tmp_qp->inflights16 += nb_ops;
594 overflow = tmp_qp->inflights16 - queue->max_inflights;
595 if (overflow > 0) {
596 tmp_qp->inflights16 -= overflow;
597 nb_ops_possible = nb_ops - overflow;
598 if (nb_ops_possible == 0)
599 return 0;
600 }
601
602 while (nb_ops_sent != nb_ops_possible) {
603 ret = tmp_qp->build_request(*ops, base_addr + tail,
604 tmp_qp->op_cookies[tail / queue->msg_size],
605 tmp_qp->qat_dev_gen);
606 if (ret != 0) {
607 tmp_qp->stats.enqueue_err_count++;
608 /*
609 * This message cannot be enqueued,
610 * decrease number of ops that wasn't sent
611 */
612 tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
613 if (nb_ops_sent == 0)
614 return 0;
615 goto kick_tail;
616 }
617
618 tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
619 ops++;
620 nb_ops_sent++;
621 }
622 kick_tail:
623 queue->tail = tail;
624 tmp_qp->stats.enqueued_count += nb_ops_sent;
625 queue->nb_pending_requests += nb_ops_sent;
626 if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
627 queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
628 txq_write_tail(tmp_qp, queue);
629 }
630 return nb_ops_sent;
631 }
632
633 uint16_t
634 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
635 {
636 struct qat_queue *rx_queue, *tx_queue;
637 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
638 uint32_t head;
639 uint32_t resp_counter = 0;
640 uint8_t *resp_msg;
641
642 rx_queue = &(tmp_qp->rx_q);
643 tx_queue = &(tmp_qp->tx_q);
644 head = rx_queue->head;
645 resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
646
647 while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
648 resp_counter != nb_ops) {
649
650 if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
651 qat_sym_process_response(ops, resp_msg);
652 else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
653 qat_comp_process_response(ops, resp_msg,
654 &tmp_qp->stats.dequeue_err_count);
655 else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
656 #ifdef BUILD_QAT_ASYM
657 qat_asym_process_response(ops, resp_msg,
658 tmp_qp->op_cookies[head / rx_queue->msg_size]);
659 #endif
660 }
661
662 head = adf_modulo(head + rx_queue->msg_size,
663 rx_queue->modulo_mask);
664
665 resp_msg = (uint8_t *)rx_queue->base_addr + head;
666 ops++;
667 resp_counter++;
668 }
669 if (resp_counter > 0) {
670 rx_queue->head = head;
671 tmp_qp->stats.dequeued_count += resp_counter;
672 rx_queue->nb_processed_responses += resp_counter;
673 tmp_qp->inflights16 -= resp_counter;
674
675 if (rx_queue->nb_processed_responses >
676 QAT_CSR_HEAD_WRITE_THRESH)
677 rxq_free_desc(tmp_qp, rx_queue);
678 }
679 /* also check if tail needs to be advanced */
680 if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
681 tx_queue->tail != tx_queue->csr_tail) {
682 txq_write_tail(tmp_qp, tx_queue);
683 }
684 return resp_counter;
685 }
686
687 __rte_weak int
688 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
689 uint64_t *dequeue_err_count __rte_unused)
690 {
691 return 0;
692 }