1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
4 #include <rte_cryptodev_pmd.h>
7 #include "virtio_cryptodev.h"
8 #include "virtio_crypto_algs.h"
11 vq_ring_free_chain(struct virtqueue
*vq
, uint16_t desc_idx
)
13 struct vring_desc
*dp
, *dp_tail
;
14 struct vq_desc_extra
*dxp
;
15 uint16_t desc_idx_last
= desc_idx
;
17 dp
= &vq
->vq_ring
.desc
[desc_idx
];
18 dxp
= &vq
->vq_descx
[desc_idx
];
19 vq
->vq_free_cnt
= (uint16_t)(vq
->vq_free_cnt
+ dxp
->ndescs
);
20 if ((dp
->flags
& VRING_DESC_F_INDIRECT
) == 0) {
21 while (dp
->flags
& VRING_DESC_F_NEXT
) {
22 desc_idx_last
= dp
->next
;
23 dp
= &vq
->vq_ring
.desc
[dp
->next
];
29 * We must append the existing free chain, if any, to the end of
30 * newly freed chain. If the virtqueue was completely used, then
31 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
33 if (vq
->vq_desc_tail_idx
== VQ_RING_DESC_CHAIN_END
) {
34 vq
->vq_desc_head_idx
= desc_idx
;
36 dp_tail
= &vq
->vq_ring
.desc
[vq
->vq_desc_tail_idx
];
37 dp_tail
->next
= desc_idx
;
40 vq
->vq_desc_tail_idx
= desc_idx_last
;
41 dp
->next
= VQ_RING_DESC_CHAIN_END
;
45 virtqueue_dequeue_burst_rx(struct virtqueue
*vq
,
46 struct rte_crypto_op
**rx_pkts
, uint16_t num
)
48 struct vring_used_elem
*uep
;
49 struct rte_crypto_op
*cop
;
50 uint16_t used_idx
, desc_idx
;
52 struct virtio_crypto_inhdr
*inhdr
;
53 struct virtio_crypto_op_cookie
*op_cookie
;
55 /* Caller does the check */
56 for (i
= 0; i
< num
; i
++) {
57 used_idx
= (uint16_t)(vq
->vq_used_cons_idx
58 & (vq
->vq_nentries
- 1));
59 uep
= &vq
->vq_ring
.used
->ring
[used_idx
];
60 desc_idx
= (uint16_t)uep
->id
;
61 cop
= (struct rte_crypto_op
*)
62 vq
->vq_descx
[desc_idx
].crypto_op
;
63 if (unlikely(cop
== NULL
)) {
64 VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
66 vq
->vq_used_cons_idx
);
70 op_cookie
= (struct virtio_crypto_op_cookie
*)
71 vq
->vq_descx
[desc_idx
].cookie
;
72 inhdr
= &(op_cookie
->inhdr
);
73 switch (inhdr
->status
) {
74 case VIRTIO_CRYPTO_OK
:
75 cop
->status
= RTE_CRYPTO_OP_STATUS_SUCCESS
;
77 case VIRTIO_CRYPTO_ERR
:
78 cop
->status
= RTE_CRYPTO_OP_STATUS_ERROR
;
79 vq
->packets_received_failed
++;
81 case VIRTIO_CRYPTO_BADMSG
:
82 cop
->status
= RTE_CRYPTO_OP_STATUS_INVALID_ARGS
;
83 vq
->packets_received_failed
++;
85 case VIRTIO_CRYPTO_NOTSUPP
:
86 cop
->status
= RTE_CRYPTO_OP_STATUS_INVALID_ARGS
;
87 vq
->packets_received_failed
++;
89 case VIRTIO_CRYPTO_INVSESS
:
90 cop
->status
= RTE_CRYPTO_OP_STATUS_INVALID_SESSION
;
91 vq
->packets_received_failed
++;
97 vq
->packets_received_total
++;
100 rte_mempool_put(vq
->mpool
, op_cookie
);
102 vq
->vq_used_cons_idx
++;
103 vq_ring_free_chain(vq
, desc_idx
);
104 vq
->vq_descx
[desc_idx
].crypto_op
= NULL
;
111 virtqueue_crypto_sym_pkt_header_arrange(
112 struct rte_crypto_op
*cop
,
113 struct virtio_crypto_op_data_req
*data
,
114 struct virtio_crypto_session
*session
)
116 struct rte_crypto_sym_op
*sym_op
= cop
->sym
;
117 struct virtio_crypto_op_data_req
*req_data
= data
;
118 struct virtio_crypto_op_ctrl_req
*ctrl
= &session
->ctrl
;
119 struct virtio_crypto_sym_create_session_req
*sym_sess_req
=
120 &ctrl
->u
.sym_create_session
;
121 struct virtio_crypto_alg_chain_session_para
*chain_para
=
122 &sym_sess_req
->u
.chain
.para
;
123 struct virtio_crypto_cipher_session_para
*cipher_para
;
125 req_data
->header
.session_id
= session
->session_id
;
127 switch (sym_sess_req
->op_type
) {
128 case VIRTIO_CRYPTO_SYM_OP_CIPHER
:
129 req_data
->u
.sym_req
.op_type
= VIRTIO_CRYPTO_SYM_OP_CIPHER
;
131 cipher_para
= &sym_sess_req
->u
.cipher
.para
;
132 if (cipher_para
->op
== VIRTIO_CRYPTO_OP_ENCRYPT
)
133 req_data
->header
.opcode
= VIRTIO_CRYPTO_CIPHER_ENCRYPT
;
135 req_data
->header
.opcode
= VIRTIO_CRYPTO_CIPHER_DECRYPT
;
137 req_data
->u
.sym_req
.u
.cipher
.para
.iv_len
138 = session
->iv
.length
;
140 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
=
141 (sym_op
->cipher
.data
.length
+
142 sym_op
->cipher
.data
.offset
);
143 req_data
->u
.sym_req
.u
.cipher
.para
.dst_data_len
=
144 req_data
->u
.sym_req
.u
.cipher
.para
.src_data_len
;
146 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING
:
147 req_data
->u
.sym_req
.op_type
=
148 VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING
;
150 cipher_para
= &chain_para
->cipher_param
;
151 if (cipher_para
->op
== VIRTIO_CRYPTO_OP_ENCRYPT
)
152 req_data
->header
.opcode
= VIRTIO_CRYPTO_CIPHER_ENCRYPT
;
154 req_data
->header
.opcode
= VIRTIO_CRYPTO_CIPHER_DECRYPT
;
156 req_data
->u
.sym_req
.u
.chain
.para
.iv_len
= session
->iv
.length
;
157 req_data
->u
.sym_req
.u
.chain
.para
.aad_len
= session
->aad
.length
;
159 req_data
->u
.sym_req
.u
.chain
.para
.src_data_len
=
160 (sym_op
->cipher
.data
.length
+
161 sym_op
->cipher
.data
.offset
);
162 req_data
->u
.sym_req
.u
.chain
.para
.dst_data_len
=
163 req_data
->u
.sym_req
.u
.chain
.para
.src_data_len
;
164 req_data
->u
.sym_req
.u
.chain
.para
.cipher_start_src_offset
=
165 sym_op
->cipher
.data
.offset
;
166 req_data
->u
.sym_req
.u
.chain
.para
.len_to_cipher
=
167 sym_op
->cipher
.data
.length
;
168 req_data
->u
.sym_req
.u
.chain
.para
.hash_start_src_offset
=
169 sym_op
->auth
.data
.offset
;
170 req_data
->u
.sym_req
.u
.chain
.para
.len_to_hash
=
171 sym_op
->auth
.data
.length
;
172 req_data
->u
.sym_req
.u
.chain
.para
.aad_len
=
175 if (chain_para
->hash_mode
== VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN
)
176 req_data
->u
.sym_req
.u
.chain
.para
.hash_result_len
=
177 chain_para
->u
.hash_param
.hash_result_len
;
178 if (chain_para
->hash_mode
==
179 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH
)
180 req_data
->u
.sym_req
.u
.chain
.para
.hash_result_len
=
181 chain_para
->u
.mac_param
.hash_result_len
;
191 virtqueue_crypto_sym_enqueue_xmit(
192 struct virtqueue
*txvq
,
193 struct rte_crypto_op
*cop
)
199 struct vq_desc_extra
*dxp
;
200 struct vring_desc
*start_dp
;
201 struct vring_desc
*desc
;
202 uint64_t indirect_op_data_req_phys_addr
;
203 uint16_t req_data_len
= sizeof(struct virtio_crypto_op_data_req
);
204 uint32_t indirect_vring_addr_offset
= req_data_len
+
205 sizeof(struct virtio_crypto_inhdr
);
206 uint32_t indirect_iv_addr_offset
= indirect_vring_addr_offset
+
207 sizeof(struct vring_desc
) * NUM_ENTRY_VIRTIO_CRYPTO_OP
;
208 struct rte_crypto_sym_op
*sym_op
= cop
->sym
;
209 struct virtio_crypto_session
*session
=
210 (struct virtio_crypto_session
*)get_sym_session_private_data(
211 cop
->sym
->session
, cryptodev_virtio_driver_id
);
212 struct virtio_crypto_op_data_req
*op_data_req
;
213 uint32_t hash_result_len
= 0;
214 struct virtio_crypto_op_cookie
*crypto_op_cookie
;
215 struct virtio_crypto_alg_chain_session_para
*para
;
217 if (unlikely(sym_op
->m_src
->nb_segs
!= 1))
219 if (unlikely(txvq
->vq_free_cnt
== 0))
221 if (unlikely(txvq
->vq_free_cnt
< needed
))
223 head_idx
= txvq
->vq_desc_head_idx
;
224 if (unlikely(head_idx
>= txvq
->vq_nentries
))
226 if (unlikely(session
== NULL
))
229 dxp
= &txvq
->vq_descx
[head_idx
];
231 if (rte_mempool_get(txvq
->mpool
, &dxp
->cookie
)) {
232 VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
235 crypto_op_cookie
= dxp
->cookie
;
236 indirect_op_data_req_phys_addr
=
237 rte_mempool_virt2iova(crypto_op_cookie
);
238 op_data_req
= (struct virtio_crypto_op_data_req
*)crypto_op_cookie
;
240 if (virtqueue_crypto_sym_pkt_header_arrange(cop
, op_data_req
, session
))
243 /* status is initialized to VIRTIO_CRYPTO_ERR */
244 ((struct virtio_crypto_inhdr
*)
245 ((uint8_t *)op_data_req
+ req_data_len
))->status
=
248 /* point to indirect vring entry */
249 desc
= (struct vring_desc
*)
250 ((uint8_t *)op_data_req
+ indirect_vring_addr_offset
);
251 for (idx
= 0; idx
< (NUM_ENTRY_VIRTIO_CRYPTO_OP
- 1); idx
++)
252 desc
[idx
].next
= idx
+ 1;
253 desc
[NUM_ENTRY_VIRTIO_CRYPTO_OP
- 1].next
= VQ_RING_DESC_CHAIN_END
;
257 /* indirect vring: first part, virtio_crypto_op_data_req */
258 desc
[idx
].addr
= indirect_op_data_req_phys_addr
;
259 desc
[idx
].len
= req_data_len
;
260 desc
[idx
++].flags
= VRING_DESC_F_NEXT
;
262 /* indirect vring: iv of cipher */
263 if (session
->iv
.length
) {
265 desc
[idx
].addr
= cop
->phys_addr
+ session
->iv
.offset
;
267 rte_memcpy(crypto_op_cookie
->iv
,
268 rte_crypto_op_ctod_offset(cop
,
269 uint8_t *, session
->iv
.offset
),
271 desc
[idx
].addr
= indirect_op_data_req_phys_addr
+
272 indirect_iv_addr_offset
;
275 desc
[idx
].len
= session
->iv
.length
;
276 desc
[idx
++].flags
= VRING_DESC_F_NEXT
;
279 /* indirect vring: additional auth data */
280 if (session
->aad
.length
) {
281 desc
[idx
].addr
= session
->aad
.phys_addr
;
282 desc
[idx
].len
= session
->aad
.length
;
283 desc
[idx
++].flags
= VRING_DESC_F_NEXT
;
286 /* indirect vring: src data */
287 desc
[idx
].addr
= rte_pktmbuf_mtophys_offset(sym_op
->m_src
, 0);
288 desc
[idx
].len
= (sym_op
->cipher
.data
.offset
289 + sym_op
->cipher
.data
.length
);
290 desc
[idx
++].flags
= VRING_DESC_F_NEXT
;
292 /* indirect vring: dst data */
294 desc
[idx
].addr
= rte_pktmbuf_mtophys_offset(sym_op
->m_dst
, 0);
295 desc
[idx
].len
= (sym_op
->cipher
.data
.offset
296 + sym_op
->cipher
.data
.length
);
298 desc
[idx
].addr
= rte_pktmbuf_mtophys_offset(sym_op
->m_src
, 0);
299 desc
[idx
].len
= (sym_op
->cipher
.data
.offset
300 + sym_op
->cipher
.data
.length
);
302 desc
[idx
++].flags
= VRING_DESC_F_WRITE
| VRING_DESC_F_NEXT
;
304 /* indirect vring: digest result */
305 para
= &(session
->ctrl
.u
.sym_create_session
.u
.chain
.para
);
306 if (para
->hash_mode
== VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN
)
307 hash_result_len
= para
->u
.hash_param
.hash_result_len
;
308 if (para
->hash_mode
== VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH
)
309 hash_result_len
= para
->u
.mac_param
.hash_result_len
;
310 if (hash_result_len
> 0) {
311 desc
[idx
].addr
= sym_op
->auth
.digest
.phys_addr
;
312 desc
[idx
].len
= hash_result_len
;
313 desc
[idx
++].flags
= VRING_DESC_F_WRITE
| VRING_DESC_F_NEXT
;
316 /* indirect vring: last part, status returned */
317 desc
[idx
].addr
= indirect_op_data_req_phys_addr
+ req_data_len
;
318 desc
[idx
].len
= sizeof(struct virtio_crypto_inhdr
);
319 desc
[idx
++].flags
= VRING_DESC_F_WRITE
;
323 /* save the infos to use when receiving packets */
324 dxp
->crypto_op
= (void *)cop
;
325 dxp
->ndescs
= needed
;
327 /* use a single buffer */
328 start_dp
= txvq
->vq_ring
.desc
;
329 start_dp
[head_idx
].addr
= indirect_op_data_req_phys_addr
+
330 indirect_vring_addr_offset
;
331 start_dp
[head_idx
].len
= num_entry
* sizeof(struct vring_desc
);
332 start_dp
[head_idx
].flags
= VRING_DESC_F_INDIRECT
;
334 idx
= start_dp
[head_idx
].next
;
335 txvq
->vq_desc_head_idx
= idx
;
336 if (txvq
->vq_desc_head_idx
== VQ_RING_DESC_CHAIN_END
)
337 txvq
->vq_desc_tail_idx
= idx
;
338 txvq
->vq_free_cnt
= (uint16_t)(txvq
->vq_free_cnt
- needed
);
339 vq_update_avail_ring(txvq
, head_idx
);
345 virtqueue_crypto_enqueue_xmit(struct virtqueue
*txvq
,
346 struct rte_crypto_op
*cop
)
351 case RTE_CRYPTO_OP_TYPE_SYMMETRIC
:
352 ret
= virtqueue_crypto_sym_enqueue_xmit(txvq
, cop
);
355 VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
365 virtio_crypto_vring_start(struct virtqueue
*vq
)
367 struct virtio_crypto_hw
*hw
= vq
->hw
;
368 int i
, size
= vq
->vq_nentries
;
369 struct vring
*vr
= &vq
->vq_ring
;
370 uint8_t *ring_mem
= vq
->vq_ring_virt_mem
;
372 PMD_INIT_FUNC_TRACE();
374 vring_init(vr
, size
, ring_mem
, VIRTIO_PCI_VRING_ALIGN
);
375 vq
->vq_desc_tail_idx
= (uint16_t)(vq
->vq_nentries
- 1);
376 vq
->vq_free_cnt
= vq
->vq_nentries
;
378 /* Chain all the descriptors in the ring with an END */
379 for (i
= 0; i
< size
- 1; i
++)
380 vr
->desc
[i
].next
= (uint16_t)(i
+ 1);
381 vr
->desc
[i
].next
= VQ_RING_DESC_CHAIN_END
;
384 * Disable device(host) interrupting guest
386 virtqueue_disable_intr(vq
);
389 * Set guest physical address of the virtqueue
390 * in VIRTIO_PCI_QUEUE_PFN config register of device
391 * to share with the backend
393 if (VTPCI_OPS(hw
)->setup_queue(hw
, vq
) < 0) {
394 VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
402 virtio_crypto_ctrlq_start(struct rte_cryptodev
*dev
)
404 struct virtio_crypto_hw
*hw
= dev
->data
->dev_private
;
407 virtio_crypto_vring_start(hw
->cvq
);
408 VIRTQUEUE_DUMP((struct virtqueue
*)hw
->cvq
);
413 virtio_crypto_dataq_start(struct rte_cryptodev
*dev
)
417 * - Setup vring structure for data queues
420 struct virtio_crypto_hw
*hw
= dev
->data
->dev_private
;
422 PMD_INIT_FUNC_TRACE();
424 /* Start data vring. */
425 for (i
= 0; i
< hw
->max_dataqueues
; i
++) {
426 virtio_crypto_vring_start(dev
->data
->queue_pairs
[i
]);
427 VIRTQUEUE_DUMP((struct virtqueue
*)dev
->data
->queue_pairs
[i
]);
431 /* vring size of data queue is 1024 */
432 #define VIRTIO_MBUF_BURST_SZ 1024
435 virtio_crypto_pkt_rx_burst(void *tx_queue
, struct rte_crypto_op
**rx_pkts
,
438 struct virtqueue
*txvq
= tx_queue
;
439 uint16_t nb_used
, num
, nb_rx
;
441 nb_used
= VIRTQUEUE_NUSED(txvq
);
445 num
= (uint16_t)(likely(nb_used
<= nb_pkts
) ? nb_used
: nb_pkts
);
446 num
= (uint16_t)(likely(num
<= VIRTIO_MBUF_BURST_SZ
)
447 ? num
: VIRTIO_MBUF_BURST_SZ
);
452 nb_rx
= virtqueue_dequeue_burst_rx(txvq
, rx_pkts
, num
);
453 VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used
, num
);
459 virtio_crypto_pkt_tx_burst(void *tx_queue
, struct rte_crypto_op
**tx_pkts
,
462 struct virtqueue
*txvq
;
466 if (unlikely(nb_pkts
< 1))
468 if (unlikely(tx_queue
== NULL
)) {
469 VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
474 VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts
);
476 for (nb_tx
= 0; nb_tx
< nb_pkts
; nb_tx
++) {
477 struct rte_mbuf
*txm
= tx_pkts
[nb_tx
]->sym
->m_src
;
478 /* nb_segs is always 1 at virtio crypto situation */
479 int need
= txm
->nb_segs
- txvq
->vq_free_cnt
;
482 * Positive value indicates it hasn't enough space in vring
485 if (unlikely(need
> 0)) {
487 * try it again because the receive process may be
490 need
= txm
->nb_segs
- txvq
->vq_free_cnt
;
491 if (unlikely(need
> 0)) {
492 VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
493 "descriptors to transmit");
498 txvq
->packets_sent_total
++;
500 /* Enqueue Packet buffers */
501 error
= virtqueue_crypto_enqueue_xmit(txvq
, tx_pkts
[nb_tx
]);
502 if (unlikely(error
)) {
504 VIRTIO_CRYPTO_TX_LOG_ERR(
505 "virtqueue_enqueue Free count = 0");
506 else if (error
== EMSGSIZE
)
507 VIRTIO_CRYPTO_TX_LOG_ERR(
508 "virtqueue_enqueue Free count < 1");
510 VIRTIO_CRYPTO_TX_LOG_ERR(
511 "virtqueue_enqueue error: %d", error
);
512 txvq
->packets_sent_failed
++;
518 vq_update_avail_idx(txvq
);
520 if (unlikely(virtqueue_kick_prepare(txvq
))) {
521 virtqueue_notify(txvq
);
522 VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");