4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_atomic.h>
40 #include <rte_prefetch.h>
43 #include "qat_crypto.h"
45 #include "adf_transport_access_macros.h"
47 #define ADF_MAX_SYM_DESC 4096
48 #define ADF_MIN_SYM_DESC 128
49 #define ADF_SYM_TX_RING_DESC_SIZE 128
50 #define ADF_SYM_RX_RING_DESC_SIZE 32
51 #define ADF_SYM_TX_QUEUE_STARTOFF 2
52 /* Offset from bundle start to 1st Sym Tx queue */
53 #define ADF_SYM_RX_QUEUE_STARTOFF 10
54 #define ADF_ARB_REG_SLOT 0x1000
55 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
57 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
58 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
59 (ADF_ARB_REG_SLOT * index), value)
61 static int qat_qp_check_queue_alignment(uint64_t phys_addr
,
62 uint32_t queue_size_bytes
);
63 static int qat_tx_queue_create(struct rte_cryptodev
*dev
,
64 struct qat_queue
*queue
, uint8_t id
, uint32_t nb_desc
,
66 static int qat_rx_queue_create(struct rte_cryptodev
*dev
,
67 struct qat_queue
*queue
, uint8_t id
, uint32_t nb_desc
,
69 static void qat_queue_delete(struct qat_queue
*queue
);
70 static int qat_queue_create(struct rte_cryptodev
*dev
,
71 struct qat_queue
*queue
, uint32_t nb_desc
, uint8_t desc_size
,
73 static int adf_verify_queue_size(uint32_t msg_size
, uint32_t msg_num
,
74 uint32_t *queue_size_for_csr
);
75 static void adf_configure_queues(struct qat_qp
*queue
);
76 static void adf_queue_arb_enable(struct qat_queue
*txq
, void *base_addr
);
77 static void adf_queue_arb_disable(struct qat_queue
*txq
, void *base_addr
);
79 static const struct rte_memzone
*
80 queue_dma_zone_reserve(const char *queue_name
, uint32_t queue_size
,
83 const struct rte_memzone
*mz
;
84 unsigned memzone_flags
= 0;
85 const struct rte_memseg
*ms
;
87 PMD_INIT_FUNC_TRACE();
88 mz
= rte_memzone_lookup(queue_name
);
90 if (((size_t)queue_size
<= mz
->len
) &&
91 ((socket_id
== SOCKET_ID_ANY
) ||
92 (socket_id
== mz
->socket_id
))) {
93 PMD_DRV_LOG(DEBUG
, "re-use memzone already "
94 "allocated for %s", queue_name
);
98 PMD_DRV_LOG(ERR
, "Incompatible memzone already "
99 "allocated %s, size %u, socket %d. "
100 "Requested size %u, socket %u",
101 queue_name
, (uint32_t)mz
->len
,
102 mz
->socket_id
, queue_size
, socket_id
);
106 PMD_DRV_LOG(DEBUG
, "Allocate memzone for %s, size %u on socket %u",
107 queue_name
, queue_size
, socket_id
);
108 ms
= rte_eal_get_physmem_layout();
109 switch (ms
[0].hugepage_sz
) {
111 memzone_flags
= RTE_MEMZONE_2MB
;
114 memzone_flags
= RTE_MEMZONE_1GB
;
116 case(RTE_PGSIZE_16M
):
117 memzone_flags
= RTE_MEMZONE_16MB
;
119 case(RTE_PGSIZE_16G
):
120 memzone_flags
= RTE_MEMZONE_16GB
;
123 memzone_flags
= RTE_MEMZONE_SIZE_HINT_ONLY
;
125 #ifdef RTE_LIBRTE_XEN_DOM0
126 return rte_memzone_reserve_bounded(queue_name
, queue_size
,
127 socket_id
, 0, RTE_CACHE_LINE_SIZE
, RTE_PGSIZE_2M
);
129 return rte_memzone_reserve_aligned(queue_name
, queue_size
, socket_id
,
130 memzone_flags
, queue_size
);
134 int qat_crypto_sym_qp_setup(struct rte_cryptodev
*dev
, uint16_t queue_pair_id
,
135 const struct rte_cryptodev_qp_conf
*qp_conf
,
139 struct rte_pci_device
*pci_dev
;
141 char op_cookie_pool_name
[RTE_RING_NAMESIZE
];
144 PMD_INIT_FUNC_TRACE();
146 /* If qp is already in use free ring memory and qp metadata. */
147 if (dev
->data
->queue_pairs
[queue_pair_id
] != NULL
) {
148 ret
= qat_crypto_sym_qp_release(dev
, queue_pair_id
);
153 if ((qp_conf
->nb_descriptors
> ADF_MAX_SYM_DESC
) ||
154 (qp_conf
->nb_descriptors
< ADF_MIN_SYM_DESC
)) {
155 PMD_DRV_LOG(ERR
, "Can't create qp for %u descriptors",
156 qp_conf
->nb_descriptors
);
160 pci_dev
= RTE_DEV_TO_PCI(dev
->device
);
162 if (pci_dev
->mem_resource
[0].addr
== NULL
) {
163 PMD_DRV_LOG(ERR
, "Could not find VF config space "
164 "(UIO driver attached?).");
169 (ADF_NUM_SYM_QPS_PER_BUNDLE
*
170 ADF_NUM_BUNDLES_PER_DEV
)) {
171 PMD_DRV_LOG(ERR
, "qp_id %u invalid for this device",
175 /* Allocate the queue pair data structure. */
176 qp
= rte_zmalloc("qat PMD qp metadata",
177 sizeof(*qp
), RTE_CACHE_LINE_SIZE
);
179 PMD_DRV_LOG(ERR
, "Failed to alloc mem for qp struct");
182 qp
->nb_descriptors
= qp_conf
->nb_descriptors
;
183 qp
->op_cookies
= rte_zmalloc("qat PMD op cookie pointer",
184 qp_conf
->nb_descriptors
* sizeof(*qp
->op_cookies
),
185 RTE_CACHE_LINE_SIZE
);
187 qp
->mmap_bar_addr
= pci_dev
->mem_resource
[0].addr
;
188 rte_atomic16_init(&qp
->inflights16
);
190 if (qat_tx_queue_create(dev
, &(qp
->tx_q
),
191 queue_pair_id
, qp_conf
->nb_descriptors
, socket_id
) != 0) {
192 PMD_INIT_LOG(ERR
, "Tx queue create failed "
193 "queue_pair_id=%u", queue_pair_id
);
197 if (qat_rx_queue_create(dev
, &(qp
->rx_q
),
198 queue_pair_id
, qp_conf
->nb_descriptors
, socket_id
) != 0) {
199 PMD_DRV_LOG(ERR
, "Rx queue create failed "
200 "queue_pair_id=%hu", queue_pair_id
);
201 qat_queue_delete(&(qp
->tx_q
));
205 adf_configure_queues(qp
);
206 adf_queue_arb_enable(&qp
->tx_q
, qp
->mmap_bar_addr
);
207 snprintf(op_cookie_pool_name
, RTE_RING_NAMESIZE
, "%s_qp_op_%d_%hu",
208 dev
->driver
->pci_drv
.driver
.name
, dev
->data
->dev_id
,
211 qp
->op_cookie_pool
= rte_mempool_lookup(op_cookie_pool_name
);
212 if (qp
->op_cookie_pool
== NULL
)
213 qp
->op_cookie_pool
= rte_mempool_create(op_cookie_pool_name
,
215 sizeof(struct qat_crypto_op_cookie
), 64, 0,
216 NULL
, NULL
, NULL
, NULL
, socket_id
,
218 if (!qp
->op_cookie_pool
) {
219 PMD_DRV_LOG(ERR
, "QAT PMD Cannot create"
224 for (i
= 0; i
< qp
->nb_descriptors
; i
++) {
225 if (rte_mempool_get(qp
->op_cookie_pool
, &qp
->op_cookies
[i
])) {
226 PMD_DRV_LOG(ERR
, "QAT PMD Cannot get op_cookie");
230 struct qat_crypto_op_cookie
*sql_cookie
=
233 sql_cookie
->qat_sgl_src_phys_addr
=
234 rte_mempool_virt2phy(qp
->op_cookie_pool
,
236 offsetof(struct qat_crypto_op_cookie
,
239 sql_cookie
->qat_sgl_dst_phys_addr
=
240 rte_mempool_virt2phy(qp
->op_cookie_pool
,
242 offsetof(struct qat_crypto_op_cookie
,
245 dev
->data
->queue_pairs
[queue_pair_id
] = qp
;
253 int qat_crypto_sym_qp_release(struct rte_cryptodev
*dev
, uint16_t queue_pair_id
)
256 (struct qat_qp
*)dev
->data
->queue_pairs
[queue_pair_id
];
259 PMD_INIT_FUNC_TRACE();
261 PMD_DRV_LOG(DEBUG
, "qp already freed");
265 /* Don't free memory if there are still responses to be processed */
266 if (rte_atomic16_read(&(qp
->inflights16
)) == 0) {
267 qat_queue_delete(&(qp
->tx_q
));
268 qat_queue_delete(&(qp
->rx_q
));
273 adf_queue_arb_disable(&(qp
->tx_q
), qp
->mmap_bar_addr
);
275 for (i
= 0; i
< qp
->nb_descriptors
; i
++)
276 rte_mempool_put(qp
->op_cookie_pool
, qp
->op_cookies
[i
]);
278 if (qp
->op_cookie_pool
)
279 rte_mempool_free(qp
->op_cookie_pool
);
281 rte_free(qp
->op_cookies
);
283 dev
->data
->queue_pairs
[queue_pair_id
] = NULL
;
287 static int qat_tx_queue_create(struct rte_cryptodev
*dev
,
288 struct qat_queue
*queue
, uint8_t qp_id
,
289 uint32_t nb_desc
, int socket_id
)
291 PMD_INIT_FUNC_TRACE();
292 queue
->hw_bundle_number
= qp_id
/ADF_NUM_SYM_QPS_PER_BUNDLE
;
293 queue
->hw_queue_number
= (qp_id
%ADF_NUM_SYM_QPS_PER_BUNDLE
) +
294 ADF_SYM_TX_QUEUE_STARTOFF
;
295 PMD_DRV_LOG(DEBUG
, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
296 nb_desc
, qp_id
, queue
->hw_bundle_number
,
297 queue
->hw_queue_number
);
299 return qat_queue_create(dev
, queue
, nb_desc
,
300 ADF_SYM_TX_RING_DESC_SIZE
, socket_id
);
303 static int qat_rx_queue_create(struct rte_cryptodev
*dev
,
304 struct qat_queue
*queue
, uint8_t qp_id
, uint32_t nb_desc
,
307 PMD_INIT_FUNC_TRACE();
308 queue
->hw_bundle_number
= qp_id
/ADF_NUM_SYM_QPS_PER_BUNDLE
;
309 queue
->hw_queue_number
= (qp_id
%ADF_NUM_SYM_QPS_PER_BUNDLE
) +
310 ADF_SYM_RX_QUEUE_STARTOFF
;
312 PMD_DRV_LOG(DEBUG
, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
313 nb_desc
, qp_id
, queue
->hw_bundle_number
,
314 queue
->hw_queue_number
);
315 return qat_queue_create(dev
, queue
, nb_desc
,
316 ADF_SYM_RX_RING_DESC_SIZE
, socket_id
);
319 static void qat_queue_delete(struct qat_queue
*queue
)
321 const struct rte_memzone
*mz
;
325 PMD_DRV_LOG(DEBUG
, "Invalid queue");
328 mz
= rte_memzone_lookup(queue
->memz_name
);
330 /* Write an unused pattern to the queue memory. */
331 memset(queue
->base_addr
, 0x7F, queue
->queue_size
);
332 status
= rte_memzone_free(mz
);
334 PMD_DRV_LOG(ERR
, "Error %d on freeing queue %s",
335 status
, queue
->memz_name
);
337 PMD_DRV_LOG(DEBUG
, "queue %s doesn't exist",
343 qat_queue_create(struct rte_cryptodev
*dev
, struct qat_queue
*queue
,
344 uint32_t nb_desc
, uint8_t desc_size
, int socket_id
)
348 const struct rte_memzone
*qp_mz
;
349 uint32_t queue_size_bytes
= nb_desc
*desc_size
;
350 struct rte_pci_device
*pci_dev
;
352 PMD_INIT_FUNC_TRACE();
353 if (desc_size
> ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE
)) {
354 PMD_DRV_LOG(ERR
, "Invalid descriptor size %d", desc_size
);
359 * Allocate a memzone for the queue - create a unique name.
361 snprintf(queue
->memz_name
, sizeof(queue
->memz_name
), "%s_%s_%d_%d_%d",
362 dev
->driver
->pci_drv
.driver
.name
, "qp_mem", dev
->data
->dev_id
,
363 queue
->hw_bundle_number
, queue
->hw_queue_number
);
364 qp_mz
= queue_dma_zone_reserve(queue
->memz_name
, queue_size_bytes
,
367 PMD_DRV_LOG(ERR
, "Failed to allocate ring memzone");
371 queue
->base_addr
= (char *)qp_mz
->addr
;
372 queue
->base_phys_addr
= qp_mz
->phys_addr
;
373 if (qat_qp_check_queue_alignment(queue
->base_phys_addr
,
375 PMD_DRV_LOG(ERR
, "Invalid alignment on queue create "
377 queue
->base_phys_addr
);
381 if (adf_verify_queue_size(desc_size
, nb_desc
, &(queue
->queue_size
))
383 PMD_DRV_LOG(ERR
, "Invalid num inflights");
387 queue
->max_inflights
= ADF_MAX_INFLIGHTS(queue
->queue_size
,
388 ADF_BYTES_TO_MSG_SIZE(desc_size
));
389 queue
->modulo
= ADF_RING_SIZE_MODULO(queue
->queue_size
);
390 PMD_DRV_LOG(DEBUG
, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
391 " msg_size %u, max_inflights %u modulo %u",
392 queue
->queue_size
, queue_size_bytes
,
393 nb_desc
, desc_size
, queue
->max_inflights
,
396 if (queue
->max_inflights
< 2) {
397 PMD_DRV_LOG(ERR
, "Invalid num inflights");
402 queue
->msg_size
= desc_size
;
405 * Write an unused pattern to the queue memory.
407 memset(queue
->base_addr
, 0x7F, queue_size_bytes
);
409 queue_base
= BUILD_RING_BASE_ADDR(queue
->base_phys_addr
,
411 pci_dev
= RTE_DEV_TO_PCI(dev
->device
);
413 io_addr
= pci_dev
->mem_resource
[0].addr
;
415 WRITE_CSR_RING_BASE(io_addr
, queue
->hw_bundle_number
,
416 queue
->hw_queue_number
, queue_base
);
420 static int qat_qp_check_queue_alignment(uint64_t phys_addr
,
421 uint32_t queue_size_bytes
)
423 PMD_INIT_FUNC_TRACE();
424 if (((queue_size_bytes
- 1) & phys_addr
) != 0)
429 static int adf_verify_queue_size(uint32_t msg_size
, uint32_t msg_num
,
430 uint32_t *p_queue_size_for_csr
)
432 uint8_t i
= ADF_MIN_RING_SIZE
;
434 PMD_INIT_FUNC_TRACE();
435 for (; i
<= ADF_MAX_RING_SIZE
; i
++)
436 if ((msg_size
* msg_num
) ==
437 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i
)) {
438 *p_queue_size_for_csr
= i
;
441 PMD_DRV_LOG(ERR
, "Invalid ring size %d", msg_size
* msg_num
);
445 static void adf_queue_arb_enable(struct qat_queue
*txq
, void *base_addr
)
447 uint32_t arb_csr_offset
= ADF_ARB_RINGSRVARBEN_OFFSET
+
449 txq
->hw_bundle_number
);
452 PMD_INIT_FUNC_TRACE();
453 value
= ADF_CSR_RD(base_addr
, arb_csr_offset
);
454 value
|= (0x01 << txq
->hw_queue_number
);
455 ADF_CSR_WR(base_addr
, arb_csr_offset
, value
);
458 static void adf_queue_arb_disable(struct qat_queue
*txq
, void *base_addr
)
460 uint32_t arb_csr_offset
= ADF_ARB_RINGSRVARBEN_OFFSET
+
462 txq
->hw_bundle_number
);
465 PMD_INIT_FUNC_TRACE();
466 value
= ADF_CSR_RD(base_addr
, arb_csr_offset
);
467 value
^= (0x01 << txq
->hw_queue_number
);
468 ADF_CSR_WR(base_addr
, arb_csr_offset
, value
);
471 static void adf_configure_queues(struct qat_qp
*qp
)
473 uint32_t queue_config
;
474 struct qat_queue
*queue
= &qp
->tx_q
;
476 PMD_INIT_FUNC_TRACE();
477 queue_config
= BUILD_RING_CONFIG(queue
->queue_size
);
479 WRITE_CSR_RING_CONFIG(qp
->mmap_bar_addr
, queue
->hw_bundle_number
,
480 queue
->hw_queue_number
, queue_config
);
484 BUILD_RESP_RING_CONFIG(queue
->queue_size
,
485 ADF_RING_NEAR_WATERMARK_512
,
486 ADF_RING_NEAR_WATERMARK_0
);
488 WRITE_CSR_RING_CONFIG(qp
->mmap_bar_addr
, queue
->hw_bundle_number
,
489 queue
->hw_queue_number
, queue_config
);