4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_atomic.h>
40 #include <rte_prefetch.h>
43 #include "qat_crypto.h"
44 #include "adf_transport_access_macros.h"
46 #define ADF_MAX_SYM_DESC 4096
47 #define ADF_MIN_SYM_DESC 128
48 #define ADF_SYM_TX_RING_DESC_SIZE 128
49 #define ADF_SYM_RX_RING_DESC_SIZE 32
50 #define ADF_SYM_TX_QUEUE_STARTOFF 2
51 /* Offset from bundle start to 1st Sym Tx queue */
52 #define ADF_SYM_RX_QUEUE_STARTOFF 10
53 #define ADF_ARB_REG_SLOT 0x1000
54 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
56 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
57 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
58 (ADF_ARB_REG_SLOT * index), value)
60 static int qat_qp_check_queue_alignment(uint64_t phys_addr
,
61 uint32_t queue_size_bytes
);
62 static int qat_tx_queue_create(struct rte_cryptodev
*dev
,
63 struct qat_queue
*queue
, uint8_t id
, uint32_t nb_desc
,
65 static int qat_rx_queue_create(struct rte_cryptodev
*dev
,
66 struct qat_queue
*queue
, uint8_t id
, uint32_t nb_desc
,
68 static void qat_queue_delete(struct qat_queue
*queue
);
69 static int qat_queue_create(struct rte_cryptodev
*dev
,
70 struct qat_queue
*queue
, uint32_t nb_desc
, uint8_t desc_size
,
72 static int adf_verify_queue_size(uint32_t msg_size
, uint32_t msg_num
,
73 uint32_t *queue_size_for_csr
);
74 static void adf_configure_queues(struct qat_qp
*queue
);
75 static void adf_queue_arb_enable(struct qat_queue
*txq
, void *base_addr
);
76 static void adf_queue_arb_disable(struct qat_queue
*txq
, void *base_addr
);
78 static const struct rte_memzone
*
79 queue_dma_zone_reserve(const char *queue_name
, uint32_t queue_size
,
82 const struct rte_memzone
*mz
;
83 unsigned memzone_flags
= 0;
84 const struct rte_memseg
*ms
;
86 PMD_INIT_FUNC_TRACE();
87 mz
= rte_memzone_lookup(queue_name
);
89 if (((size_t)queue_size
<= mz
->len
) &&
90 ((socket_id
== SOCKET_ID_ANY
) ||
91 (socket_id
== mz
->socket_id
))) {
92 PMD_DRV_LOG(DEBUG
, "re-use memzone already "
93 "allocated for %s", queue_name
);
97 PMD_DRV_LOG(ERR
, "Incompatible memzone already "
98 "allocated %s, size %u, socket %d. "
99 "Requested size %u, socket %u",
100 queue_name
, (uint32_t)mz
->len
,
101 mz
->socket_id
, queue_size
, socket_id
);
105 PMD_DRV_LOG(DEBUG
, "Allocate memzone for %s, size %u on socket %u",
106 queue_name
, queue_size
, socket_id
);
107 ms
= rte_eal_get_physmem_layout();
108 switch (ms
[0].hugepage_sz
) {
110 memzone_flags
= RTE_MEMZONE_2MB
;
113 memzone_flags
= RTE_MEMZONE_1GB
;
115 case(RTE_PGSIZE_16M
):
116 memzone_flags
= RTE_MEMZONE_16MB
;
118 case(RTE_PGSIZE_16G
):
119 memzone_flags
= RTE_MEMZONE_16GB
;
122 memzone_flags
= RTE_MEMZONE_SIZE_HINT_ONLY
;
124 #ifdef RTE_LIBRTE_XEN_DOM0
125 return rte_memzone_reserve_bounded(queue_name
, queue_size
,
126 socket_id
, 0, RTE_CACHE_LINE_SIZE
, RTE_PGSIZE_2M
);
128 return rte_memzone_reserve_aligned(queue_name
, queue_size
, socket_id
,
129 memzone_flags
, queue_size
);
133 int qat_crypto_sym_qp_setup(struct rte_cryptodev
*dev
, uint16_t queue_pair_id
,
134 const struct rte_cryptodev_qp_conf
*qp_conf
,
140 PMD_INIT_FUNC_TRACE();
142 /* If qp is already in use free ring memory and qp metadata. */
143 if (dev
->data
->queue_pairs
[queue_pair_id
] != NULL
) {
144 ret
= qat_crypto_sym_qp_release(dev
, queue_pair_id
);
149 if ((qp_conf
->nb_descriptors
> ADF_MAX_SYM_DESC
) ||
150 (qp_conf
->nb_descriptors
< ADF_MIN_SYM_DESC
)) {
151 PMD_DRV_LOG(ERR
, "Can't create qp for %u descriptors",
152 qp_conf
->nb_descriptors
);
156 if (dev
->pci_dev
->mem_resource
[0].addr
== NULL
) {
157 PMD_DRV_LOG(ERR
, "Could not find VF config space "
158 "(UIO driver attached?).");
163 (ADF_NUM_SYM_QPS_PER_BUNDLE
*
164 ADF_NUM_BUNDLES_PER_DEV
)) {
165 PMD_DRV_LOG(ERR
, "qp_id %u invalid for this device",
170 /* Allocate the queue pair data structure. */
171 qp
= rte_zmalloc("qat PMD qp metadata",
172 sizeof(*qp
), RTE_CACHE_LINE_SIZE
);
174 PMD_DRV_LOG(ERR
, "Failed to alloc mem for qp struct");
177 qp
->mmap_bar_addr
= dev
->pci_dev
->mem_resource
[0].addr
;
178 rte_atomic16_init(&qp
->inflights16
);
180 if (qat_tx_queue_create(dev
, &(qp
->tx_q
),
181 queue_pair_id
, qp_conf
->nb_descriptors
, socket_id
) != 0) {
182 PMD_INIT_LOG(ERR
, "Tx queue create failed "
183 "queue_pair_id=%u", queue_pair_id
);
187 if (qat_rx_queue_create(dev
, &(qp
->rx_q
),
188 queue_pair_id
, qp_conf
->nb_descriptors
, socket_id
) != 0) {
189 PMD_DRV_LOG(ERR
, "Rx queue create failed "
190 "queue_pair_id=%hu", queue_pair_id
);
191 qat_queue_delete(&(qp
->tx_q
));
194 adf_configure_queues(qp
);
195 adf_queue_arb_enable(&qp
->tx_q
, qp
->mmap_bar_addr
);
196 dev
->data
->queue_pairs
[queue_pair_id
] = qp
;
204 int qat_crypto_sym_qp_release(struct rte_cryptodev
*dev
, uint16_t queue_pair_id
)
207 (struct qat_qp
*)dev
->data
->queue_pairs
[queue_pair_id
];
209 PMD_INIT_FUNC_TRACE();
211 PMD_DRV_LOG(DEBUG
, "qp already freed");
215 /* Don't free memory if there are still responses to be processed */
216 if (rte_atomic16_read(&(qp
->inflights16
)) == 0) {
217 qat_queue_delete(&(qp
->tx_q
));
218 qat_queue_delete(&(qp
->rx_q
));
223 adf_queue_arb_disable(&(qp
->tx_q
), qp
->mmap_bar_addr
);
225 dev
->data
->queue_pairs
[queue_pair_id
] = NULL
;
229 static int qat_tx_queue_create(struct rte_cryptodev
*dev
,
230 struct qat_queue
*queue
, uint8_t qp_id
,
231 uint32_t nb_desc
, int socket_id
)
233 PMD_INIT_FUNC_TRACE();
234 queue
->hw_bundle_number
= qp_id
/ADF_NUM_SYM_QPS_PER_BUNDLE
;
235 queue
->hw_queue_number
= (qp_id
%ADF_NUM_SYM_QPS_PER_BUNDLE
) +
236 ADF_SYM_TX_QUEUE_STARTOFF
;
237 PMD_DRV_LOG(DEBUG
, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
238 nb_desc
, qp_id
, queue
->hw_bundle_number
,
239 queue
->hw_queue_number
);
241 return qat_queue_create(dev
, queue
, nb_desc
,
242 ADF_SYM_TX_RING_DESC_SIZE
, socket_id
);
245 static int qat_rx_queue_create(struct rte_cryptodev
*dev
,
246 struct qat_queue
*queue
, uint8_t qp_id
, uint32_t nb_desc
,
249 PMD_INIT_FUNC_TRACE();
250 queue
->hw_bundle_number
= qp_id
/ADF_NUM_SYM_QPS_PER_BUNDLE
;
251 queue
->hw_queue_number
= (qp_id
%ADF_NUM_SYM_QPS_PER_BUNDLE
) +
252 ADF_SYM_RX_QUEUE_STARTOFF
;
254 PMD_DRV_LOG(DEBUG
, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
255 nb_desc
, qp_id
, queue
->hw_bundle_number
,
256 queue
->hw_queue_number
);
257 return qat_queue_create(dev
, queue
, nb_desc
,
258 ADF_SYM_RX_RING_DESC_SIZE
, socket_id
);
261 static void qat_queue_delete(struct qat_queue
*queue
)
263 const struct rte_memzone
*mz
;
267 PMD_DRV_LOG(DEBUG
, "Invalid queue");
270 mz
= rte_memzone_lookup(queue
->memz_name
);
272 /* Write an unused pattern to the queue memory. */
273 memset(queue
->base_addr
, 0x7F, queue
->queue_size
);
274 status
= rte_memzone_free(mz
);
276 PMD_DRV_LOG(ERR
, "Error %d on freeing queue %s",
277 status
, queue
->memz_name
);
279 PMD_DRV_LOG(DEBUG
, "queue %s doesn't exist",
285 qat_queue_create(struct rte_cryptodev
*dev
, struct qat_queue
*queue
,
286 uint32_t nb_desc
, uint8_t desc_size
, int socket_id
)
290 const struct rte_memzone
*qp_mz
;
291 uint32_t queue_size_bytes
= nb_desc
*desc_size
;
293 PMD_INIT_FUNC_TRACE();
294 if (desc_size
> ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE
)) {
295 PMD_DRV_LOG(ERR
, "Invalid descriptor size %d", desc_size
);
300 * Allocate a memzone for the queue - create a unique name.
302 snprintf(queue
->memz_name
, sizeof(queue
->memz_name
), "%s_%s_%d_%d_%d",
303 dev
->driver
->pci_drv
.driver
.name
, "qp_mem", dev
->data
->dev_id
,
304 queue
->hw_bundle_number
, queue
->hw_queue_number
);
305 qp_mz
= queue_dma_zone_reserve(queue
->memz_name
, queue_size_bytes
,
308 PMD_DRV_LOG(ERR
, "Failed to allocate ring memzone");
312 queue
->base_addr
= (char *)qp_mz
->addr
;
313 queue
->base_phys_addr
= qp_mz
->phys_addr
;
314 if (qat_qp_check_queue_alignment(queue
->base_phys_addr
,
316 PMD_DRV_LOG(ERR
, "Invalid alignment on queue create "
318 queue
->base_phys_addr
);
322 if (adf_verify_queue_size(desc_size
, nb_desc
, &(queue
->queue_size
))
324 PMD_DRV_LOG(ERR
, "Invalid num inflights");
328 queue
->max_inflights
= ADF_MAX_INFLIGHTS(queue
->queue_size
,
329 ADF_BYTES_TO_MSG_SIZE(desc_size
));
330 queue
->modulo
= ADF_RING_SIZE_MODULO(queue
->queue_size
);
331 PMD_DRV_LOG(DEBUG
, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
332 " msg_size %u, max_inflights %u modulo %u",
333 queue
->queue_size
, queue_size_bytes
,
334 nb_desc
, desc_size
, queue
->max_inflights
,
337 if (queue
->max_inflights
< 2) {
338 PMD_DRV_LOG(ERR
, "Invalid num inflights");
343 queue
->msg_size
= desc_size
;
346 * Write an unused pattern to the queue memory.
348 memset(queue
->base_addr
, 0x7F, queue_size_bytes
);
350 queue_base
= BUILD_RING_BASE_ADDR(queue
->base_phys_addr
,
352 io_addr
= dev
->pci_dev
->mem_resource
[0].addr
;
354 WRITE_CSR_RING_BASE(io_addr
, queue
->hw_bundle_number
,
355 queue
->hw_queue_number
, queue_base
);
359 static int qat_qp_check_queue_alignment(uint64_t phys_addr
,
360 uint32_t queue_size_bytes
)
362 PMD_INIT_FUNC_TRACE();
363 if (((queue_size_bytes
- 1) & phys_addr
) != 0)
368 static int adf_verify_queue_size(uint32_t msg_size
, uint32_t msg_num
,
369 uint32_t *p_queue_size_for_csr
)
371 uint8_t i
= ADF_MIN_RING_SIZE
;
373 PMD_INIT_FUNC_TRACE();
374 for (; i
<= ADF_MAX_RING_SIZE
; i
++)
375 if ((msg_size
* msg_num
) ==
376 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i
)) {
377 *p_queue_size_for_csr
= i
;
380 PMD_DRV_LOG(ERR
, "Invalid ring size %d", msg_size
* msg_num
);
384 static void adf_queue_arb_enable(struct qat_queue
*txq
, void *base_addr
)
386 uint32_t arb_csr_offset
= ADF_ARB_RINGSRVARBEN_OFFSET
+
388 txq
->hw_bundle_number
);
391 PMD_INIT_FUNC_TRACE();
392 value
= ADF_CSR_RD(base_addr
, arb_csr_offset
);
393 value
|= (0x01 << txq
->hw_queue_number
);
394 ADF_CSR_WR(base_addr
, arb_csr_offset
, value
);
397 static void adf_queue_arb_disable(struct qat_queue
*txq
, void *base_addr
)
399 uint32_t arb_csr_offset
= ADF_ARB_RINGSRVARBEN_OFFSET
+
401 txq
->hw_bundle_number
);
404 PMD_INIT_FUNC_TRACE();
405 value
= ADF_CSR_RD(base_addr
, arb_csr_offset
);
406 value
^= (0x01 << txq
->hw_queue_number
);
407 ADF_CSR_WR(base_addr
, arb_csr_offset
, value
);
410 static void adf_configure_queues(struct qat_qp
*qp
)
412 uint32_t queue_config
;
413 struct qat_queue
*queue
= &qp
->tx_q
;
415 PMD_INIT_FUNC_TRACE();
416 queue_config
= BUILD_RING_CONFIG(queue
->queue_size
);
418 WRITE_CSR_RING_CONFIG(qp
->mmap_bar_addr
, queue
->hw_bundle_number
,
419 queue
->hw_queue_number
, queue_config
);
423 BUILD_RESP_RING_CONFIG(queue
->queue_size
,
424 ADF_RING_NEAR_WATERMARK_512
,
425 ADF_RING_NEAR_WATERMARK_0
);
427 WRITE_CSR_RING_CONFIG(qp
->mmap_bar_addr
, queue
->hw_bundle_number
,
428 queue
->hw_queue_number
, queue_config
);