]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/crypto/caam_jr/caam_jr.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / crypto / caam_jr / caam_jr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017-2019 NXP
3 */
4
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sched.h>
8 #include <net/if.h>
9
10 #include <rte_byteorder.h>
11 #include <rte_common.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_crypto.h>
14 #include <rte_cryptodev.h>
15 #include <rte_bus_vdev.h>
16 #include <rte_malloc.h>
17 #include <rte_security_driver.h>
18 #include <rte_hexdump.h>
19
20 #include <caam_jr_capabilities.h>
21 #include <caam_jr_config.h>
22 #include <caam_jr_hw_specific.h>
23 #include <caam_jr_pvt.h>
24 #include <caam_jr_desc.h>
25 #include <caam_jr_log.h>
26
27 /* RTA header files */
28 #include <desc/common.h>
29 #include <desc/algo.h>
30 #include <dpaa_of.h>
31 #ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG
32 #define CAAM_JR_DBG 1
33 #else
34 #define CAAM_JR_DBG 0
35 #endif
36 #define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
37 static uint8_t cryptodev_driver_id;
38 int caam_jr_logtype;
39
40 /* Lists the states possible for the SEC user space driver. */
41 enum sec_driver_state_e {
42 SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
43 SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/
44 SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */
45 };
46
47 /* Job rings used for communication with SEC HW */
48 static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
49
50 /* The current state of SEC user space driver */
51 static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
52
53 /* The number of job rings used by SEC user space driver */
54 static int g_job_rings_no;
55 static int g_job_rings_max;
56
57 struct sec_outring_entry {
58 phys_addr_t desc; /* Pointer to completed descriptor */
59 uint32_t status; /* Status for completed descriptor */
60 } __rte_packed;
61
62 /* virtual address conversin when mempool support is available for ctx */
63 static inline phys_addr_t
64 caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
65 {
66 return (size_t)vaddr - ctx->vtop_offset;
67 }
68
69 static inline void
70 caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
71 {
72 /* report op status to sym->op and then free the ctx memory */
73 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
74 }
75
76 static inline struct caam_jr_op_ctx *
77 caam_jr_alloc_ctx(struct caam_jr_session *ses)
78 {
79 struct caam_jr_op_ctx *ctx;
80 int ret;
81
82 ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
83 if (!ctx || ret) {
84 CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
85 return NULL;
86 }
87 /*
88 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
89 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
90 * to clear all the SG entries. caam_jr_alloc_ctx() is called for
91 * each packet, memset is costlier than dcbz_64().
92 */
93 dcbz_64(&ctx->sg[SG_CACHELINE_0]);
94 dcbz_64(&ctx->sg[SG_CACHELINE_1]);
95 dcbz_64(&ctx->sg[SG_CACHELINE_2]);
96 dcbz_64(&ctx->sg[SG_CACHELINE_3]);
97
98 ctx->ctx_pool = ses->ctx_pool;
99 ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
100
101 return ctx;
102 }
103
104 static
105 void caam_jr_stats_get(struct rte_cryptodev *dev,
106 struct rte_cryptodev_stats *stats)
107 {
108 struct caam_jr_qp **qp = (struct caam_jr_qp **)
109 dev->data->queue_pairs;
110 int i;
111
112 PMD_INIT_FUNC_TRACE();
113 if (stats == NULL) {
114 CAAM_JR_ERR("Invalid stats ptr NULL");
115 return;
116 }
117 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
118 if (qp[i] == NULL) {
119 CAAM_JR_WARN("Uninitialised queue pair");
120 continue;
121 }
122
123 stats->enqueued_count += qp[i]->tx_pkts;
124 stats->dequeued_count += qp[i]->rx_pkts;
125 stats->enqueue_err_count += qp[i]->tx_errs;
126 stats->dequeue_err_count += qp[i]->rx_errs;
127 CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
128 "\n\tTX Ring Full = %" PRIu64,
129 qp[i]->rx_poll_err,
130 qp[i]->tx_ring_full);
131 }
132 }
133
134 static
135 void caam_jr_stats_reset(struct rte_cryptodev *dev)
136 {
137 int i;
138 struct caam_jr_qp **qp = (struct caam_jr_qp **)
139 (dev->data->queue_pairs);
140
141 PMD_INIT_FUNC_TRACE();
142 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
143 if (qp[i] == NULL) {
144 CAAM_JR_WARN("Uninitialised queue pair");
145 continue;
146 }
147 qp[i]->rx_pkts = 0;
148 qp[i]->rx_errs = 0;
149 qp[i]->rx_poll_err = 0;
150 qp[i]->tx_pkts = 0;
151 qp[i]->tx_errs = 0;
152 qp[i]->tx_ring_full = 0;
153 }
154 }
155
156 static inline int
157 is_cipher_only(struct caam_jr_session *ses)
158 {
159 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
160 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
161 }
162
163 static inline int
164 is_auth_only(struct caam_jr_session *ses)
165 {
166 return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
167 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
168 }
169
170 static inline int
171 is_aead(struct caam_jr_session *ses)
172 {
173 return ((ses->cipher_alg == 0) &&
174 (ses->auth_alg == 0) &&
175 (ses->aead_alg != 0));
176 }
177
178 static inline int
179 is_auth_cipher(struct caam_jr_session *ses)
180 {
181 return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
182 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
183 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
184 }
185
186 static inline int
187 is_proto_ipsec(struct caam_jr_session *ses)
188 {
189 return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
190 }
191
192 static inline int
193 is_encode(struct caam_jr_session *ses)
194 {
195 return ses->dir == DIR_ENC;
196 }
197
198 static inline int
199 is_decode(struct caam_jr_session *ses)
200 {
201 return ses->dir == DIR_DEC;
202 }
203
204 static inline void
205 caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
206 {
207 switch (ses->auth_alg) {
208 case RTE_CRYPTO_AUTH_NULL:
209 ses->digest_length = 0;
210 break;
211 case RTE_CRYPTO_AUTH_MD5_HMAC:
212 alginfo_a->algtype =
213 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
214 OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
215 alginfo_a->algmode = OP_ALG_AAI_HMAC;
216 break;
217 case RTE_CRYPTO_AUTH_SHA1_HMAC:
218 alginfo_a->algtype =
219 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
220 OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
221 alginfo_a->algmode = OP_ALG_AAI_HMAC;
222 break;
223 case RTE_CRYPTO_AUTH_SHA224_HMAC:
224 alginfo_a->algtype =
225 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
226 OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
227 alginfo_a->algmode = OP_ALG_AAI_HMAC;
228 break;
229 case RTE_CRYPTO_AUTH_SHA256_HMAC:
230 alginfo_a->algtype =
231 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
232 OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
233 alginfo_a->algmode = OP_ALG_AAI_HMAC;
234 break;
235 case RTE_CRYPTO_AUTH_SHA384_HMAC:
236 alginfo_a->algtype =
237 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
238 OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
239 alginfo_a->algmode = OP_ALG_AAI_HMAC;
240 break;
241 case RTE_CRYPTO_AUTH_SHA512_HMAC:
242 alginfo_a->algtype =
243 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
244 OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
245 alginfo_a->algmode = OP_ALG_AAI_HMAC;
246 break;
247 default:
248 CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
249 }
250 }
251
252 static inline void
253 caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
254 {
255 switch (ses->cipher_alg) {
256 case RTE_CRYPTO_CIPHER_NULL:
257 break;
258 case RTE_CRYPTO_CIPHER_AES_CBC:
259 alginfo_c->algtype =
260 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
261 OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
262 alginfo_c->algmode = OP_ALG_AAI_CBC;
263 break;
264 case RTE_CRYPTO_CIPHER_3DES_CBC:
265 alginfo_c->algtype =
266 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
267 OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
268 alginfo_c->algmode = OP_ALG_AAI_CBC;
269 break;
270 case RTE_CRYPTO_CIPHER_AES_CTR:
271 alginfo_c->algtype =
272 (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
273 OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
274 alginfo_c->algmode = OP_ALG_AAI_CTR;
275 break;
276 default:
277 CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
278 }
279 }
280
281 static inline void
282 caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
283 {
284 switch (ses->aead_alg) {
285 case RTE_CRYPTO_AEAD_AES_GCM:
286 alginfo->algtype = OP_ALG_ALGSEL_AES;
287 alginfo->algmode = OP_ALG_AAI_GCM;
288 break;
289 default:
290 CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
291 }
292 }
293
294 /* prepare command block of the session */
295 static int
296 caam_jr_prep_cdb(struct caam_jr_session *ses)
297 {
298 struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
299 int32_t shared_desc_len = 0;
300 struct sec_cdb *cdb;
301 int err;
302 #if CAAM_BYTE_ORDER == CORE_BYTE_ORDER
303 int swap = false;
304 #else
305 int swap = true;
306 #endif
307
308 if (ses->cdb)
309 caam_jr_dma_free(ses->cdb);
310
311 cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
312 if (!cdb) {
313 CAAM_JR_ERR("failed to allocate memory for cdb\n");
314 return -1;
315 }
316
317 ses->cdb = cdb;
318
319 memset(cdb, 0, sizeof(struct sec_cdb));
320
321 if (is_cipher_only(ses)) {
322 caam_cipher_alg(ses, &alginfo_c);
323 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
324 CAAM_JR_ERR("not supported cipher alg");
325 rte_free(cdb);
326 return -ENOTSUP;
327 }
328
329 alginfo_c.key = (size_t)ses->cipher_key.data;
330 alginfo_c.keylen = ses->cipher_key.length;
331 alginfo_c.key_enc_flags = 0;
332 alginfo_c.key_type = RTA_DATA_IMM;
333
334 shared_desc_len = cnstr_shdsc_blkcipher(
335 cdb->sh_desc, true,
336 swap, SHR_NEVER, &alginfo_c,
337 ses->iv.length,
338 ses->dir);
339 } else if (is_auth_only(ses)) {
340 caam_auth_alg(ses, &alginfo_a);
341 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
342 CAAM_JR_ERR("not supported auth alg");
343 rte_free(cdb);
344 return -ENOTSUP;
345 }
346
347 alginfo_a.key = (size_t)ses->auth_key.data;
348 alginfo_a.keylen = ses->auth_key.length;
349 alginfo_a.key_enc_flags = 0;
350 alginfo_a.key_type = RTA_DATA_IMM;
351
352 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
353 swap, SHR_NEVER, &alginfo_a,
354 !ses->dir,
355 ses->digest_length);
356 } else if (is_aead(ses)) {
357 caam_aead_alg(ses, &alginfo);
358 if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
359 CAAM_JR_ERR("not supported aead alg");
360 rte_free(cdb);
361 return -ENOTSUP;
362 }
363 alginfo.key = (size_t)ses->aead_key.data;
364 alginfo.keylen = ses->aead_key.length;
365 alginfo.key_enc_flags = 0;
366 alginfo.key_type = RTA_DATA_IMM;
367
368 if (ses->dir == DIR_ENC)
369 shared_desc_len = cnstr_shdsc_gcm_encap(
370 cdb->sh_desc, true, swap,
371 SHR_NEVER, &alginfo,
372 ses->iv.length,
373 ses->digest_length);
374 else
375 shared_desc_len = cnstr_shdsc_gcm_decap(
376 cdb->sh_desc, true, swap,
377 SHR_NEVER, &alginfo,
378 ses->iv.length,
379 ses->digest_length);
380 } else {
381 caam_cipher_alg(ses, &alginfo_c);
382 if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
383 CAAM_JR_ERR("not supported cipher alg");
384 rte_free(cdb);
385 return -ENOTSUP;
386 }
387
388 alginfo_c.key = (size_t)ses->cipher_key.data;
389 alginfo_c.keylen = ses->cipher_key.length;
390 alginfo_c.key_enc_flags = 0;
391 alginfo_c.key_type = RTA_DATA_IMM;
392
393 caam_auth_alg(ses, &alginfo_a);
394 if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
395 CAAM_JR_ERR("not supported auth alg");
396 rte_free(cdb);
397 return -ENOTSUP;
398 }
399
400 alginfo_a.key = (size_t)ses->auth_key.data;
401 alginfo_a.keylen = ses->auth_key.length;
402 alginfo_a.key_enc_flags = 0;
403 alginfo_a.key_type = RTA_DATA_IMM;
404
405 cdb->sh_desc[0] = alginfo_c.keylen;
406 cdb->sh_desc[1] = alginfo_a.keylen;
407 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
408 MIN_JOB_DESC_SIZE,
409 (unsigned int *)cdb->sh_desc,
410 &cdb->sh_desc[2], 2);
411
412 if (err < 0) {
413 CAAM_JR_ERR("Crypto: Incorrect key lengths");
414 rte_free(cdb);
415 return err;
416 }
417 if (cdb->sh_desc[2] & 1)
418 alginfo_c.key_type = RTA_DATA_IMM;
419 else {
420 alginfo_c.key = (size_t)caam_jr_mem_vtop(
421 (void *)(size_t)alginfo_c.key);
422 alginfo_c.key_type = RTA_DATA_PTR;
423 }
424 if (cdb->sh_desc[2] & (1<<1))
425 alginfo_a.key_type = RTA_DATA_IMM;
426 else {
427 alginfo_a.key = (size_t)caam_jr_mem_vtop(
428 (void *)(size_t)alginfo_a.key);
429 alginfo_a.key_type = RTA_DATA_PTR;
430 }
431 cdb->sh_desc[0] = 0;
432 cdb->sh_desc[1] = 0;
433 cdb->sh_desc[2] = 0;
434 if (is_proto_ipsec(ses)) {
435 if (ses->dir == DIR_ENC) {
436 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
437 cdb->sh_desc,
438 true, swap, SHR_SERIAL,
439 &ses->encap_pdb,
440 (uint8_t *)&ses->ip4_hdr,
441 &alginfo_c, &alginfo_a);
442 } else if (ses->dir == DIR_DEC) {
443 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
444 cdb->sh_desc,
445 true, swap, SHR_SERIAL,
446 &ses->decap_pdb,
447 &alginfo_c, &alginfo_a);
448 }
449 } else {
450 /* Auth_only_len is overwritten in fd for each job */
451 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
452 true, swap, SHR_SERIAL,
453 &alginfo_c, &alginfo_a,
454 ses->iv.length,
455 ses->digest_length, ses->dir);
456 }
457 }
458
459 if (shared_desc_len < 0) {
460 CAAM_JR_ERR("error in preparing command block");
461 return shared_desc_len;
462 }
463
464 #if CAAM_JR_DBG
465 SEC_DUMP_DESC(cdb->sh_desc);
466 #endif
467
468 cdb->sh_hdr.hi.field.idlen = shared_desc_len;
469
470 return 0;
471 }
472
473 /* @brief Poll the HW for already processed jobs in the JR
474 * and silently discard the available jobs or notify them to UA
475 * with indicated error code.
476 *
477 * @param [in,out] job_ring The job ring to poll.
478 * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if
479 * descriptors are to be discarded
480 * or notified to UA with given error_code.
481 * @param [out] notified_descs Number of notified descriptors. Can be NULL
482 * if do_notify is #FALSE
483 */
484 static void
485 hw_flush_job_ring(struct sec_job_ring_t *job_ring,
486 uint32_t do_notify,
487 uint32_t *notified_descs)
488 {
489 int32_t jobs_no_to_discard = 0;
490 int32_t discarded_descs_no = 0;
491
492 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
493 job_ring, job_ring->pidx, job_ring->cidx, do_notify);
494
495 jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
496
497 /* Discard all jobs */
498 CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
499 job_ring, job_ring->pidx, job_ring->cidx,
500 jobs_no_to_discard);
501
502 while (jobs_no_to_discard > discarded_descs_no) {
503 discarded_descs_no++;
504 /* Now increment the consumer index for the current job ring,
505 * AFTER saving job in temporary location!
506 * Increment the consumer index for the current job ring
507 */
508 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
509 SEC_JOB_RING_SIZE);
510
511 hw_remove_entries(job_ring, 1);
512 }
513
514 if (do_notify == true) {
515 ASSERT(notified_descs != NULL);
516 *notified_descs = discarded_descs_no;
517 }
518 }
519
520 /* @brief Poll the HW for already processed jobs in the JR
521 * and notify the available jobs to UA.
522 *
523 * @param [in] job_ring The job ring to poll.
524 * @param [in] limit The maximum number of jobs to notify.
525 * If set to negative value, all available jobs are
526 * notified.
527 *
528 * @retval >=0 for No of jobs notified to UA.
529 * @retval -1 for error
530 */
531 static int
532 hw_poll_job_ring(struct sec_job_ring_t *job_ring,
533 struct rte_crypto_op **ops, int32_t limit,
534 struct caam_jr_qp *jr_qp)
535 {
536 int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
537 int32_t number_of_jobs_available = 0;
538 int32_t notified_descs_no = 0;
539 uint32_t sec_error_code = 0;
540 struct job_descriptor *current_desc;
541 phys_addr_t current_desc_addr;
542 phys_addr_t *temp_addr;
543 struct caam_jr_op_ctx *ctx;
544
545 /* TODO check for ops have memory*/
546 /* check here if any JR error that cannot be written
547 * in the output status word has occurred
548 */
549 if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
550 CAAM_JR_INFO("err received");
551 sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
552 GET_JR_REG(JRINT, job_ring));
553 if (unlikely(sec_error_code)) {
554 hw_job_ring_error_print(job_ring, sec_error_code);
555 return -1;
556 }
557 }
558 /* compute the number of jobs available in the job ring based on the
559 * producer and consumer index values.
560 */
561 number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
562 /* Compute the number of notifications that need to be raised to UA
563 * If limit > total number of done jobs -> notify all done jobs
564 * If limit = 0 -> error
565 * If limit < total number of done jobs -> notify a number
566 * of done jobs equal with limit
567 */
568 jobs_no_to_notify = (limit > number_of_jobs_available) ?
569 number_of_jobs_available : limit;
570 CAAM_JR_DP_DEBUG(
571 "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
572 job_ring, job_ring->pidx, job_ring->cidx,
573 limit, number_of_jobs_available, jobs_no_to_notify);
574
575 rte_smp_rmb();
576
577 while (jobs_no_to_notify > notified_descs_no) {
578 static uint64_t false_alarm;
579 static uint64_t real_poll;
580
581 /* Get job status here */
582 sec_error_code = job_ring->output_ring[job_ring->cidx].status;
583 /* Get completed descriptor */
584 temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
585 current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
586
587 real_poll++;
588 /* todo check if it is false alarm no desc present */
589 if (!current_desc_addr) {
590 false_alarm++;
591 printf("false alarm %" PRIu64 "real %" PRIu64
592 " sec_err =0x%x cidx Index =0%d\n",
593 false_alarm, real_poll,
594 sec_error_code, job_ring->cidx);
595 rte_panic("CAAM JR descriptor NULL");
596 return notified_descs_no;
597 }
598 current_desc = (struct job_descriptor *)
599 caam_jr_dma_ptov(current_desc_addr);
600 /* now increment the consumer index for the current job ring,
601 * AFTER saving job in temporary location!
602 */
603 job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
604 SEC_JOB_RING_SIZE);
605 /* Signal that the job has been processed and the slot is free*/
606 hw_remove_entries(job_ring, 1);
607 /*TODO for multiple ops, packets*/
608 ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
609 if (unlikely(sec_error_code)) {
610 CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
611 job_ring->cidx, sec_error_code);
612 hw_handle_job_ring_error(job_ring, sec_error_code);
613 //todo improve with exact errors
614 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
615 jr_qp->rx_errs++;
616 } else {
617 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
618 #if CAAM_JR_DBG
619 if (ctx->op->sym->m_dst) {
620 rte_hexdump(stdout, "PROCESSED",
621 rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
622 rte_pktmbuf_data_len(ctx->op->sym->m_dst));
623 } else {
624 rte_hexdump(stdout, "PROCESSED",
625 rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
626 rte_pktmbuf_data_len(ctx->op->sym->m_src));
627 }
628 #endif
629 }
630 if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
631 struct ip *ip4_hdr;
632
633 if (ctx->op->sym->m_dst) {
634 /*TODO check for ip header or other*/
635 ip4_hdr = (struct ip *)
636 rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
637 ctx->op->sym->m_dst->pkt_len =
638 rte_be_to_cpu_16(ip4_hdr->ip_len);
639 ctx->op->sym->m_dst->data_len =
640 rte_be_to_cpu_16(ip4_hdr->ip_len);
641 } else {
642 ip4_hdr = (struct ip *)
643 rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
644 ctx->op->sym->m_src->pkt_len =
645 rte_be_to_cpu_16(ip4_hdr->ip_len);
646 ctx->op->sym->m_src->data_len =
647 rte_be_to_cpu_16(ip4_hdr->ip_len);
648 }
649 }
650 *ops = ctx->op;
651 caam_jr_op_ending(ctx);
652 ops++;
653 notified_descs_no++;
654 }
655 return notified_descs_no;
656 }
657
658 static uint16_t
659 caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
660 uint16_t nb_ops)
661 {
662 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
663 struct sec_job_ring_t *ring = jr_qp->ring;
664 int num_rx;
665 int ret;
666
667 CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
668
669 /* Poll job ring
670 * If nb_ops < 0 -> poll JR until no more notifications are available.
671 * If nb_ops > 0 -> poll JR until limit is reached.
672 */
673
674 /* Run hw poll job ring */
675 num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
676 if (num_rx < 0) {
677 CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
678 return 0;
679 }
680
681 CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
682
683 if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
684 if (num_rx < nb_ops) {
685 ret = caam_jr_enable_irqs(ring->irq_fd);
686 SEC_ASSERT(ret == 0, ret,
687 "Failed to enable irqs for job ring %p", ring);
688 }
689 } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
690
691 /* Always enable IRQ generation when in pure IRQ mode */
692 ret = caam_jr_enable_irqs(ring->irq_fd);
693 SEC_ASSERT(ret == 0, ret,
694 "Failed to enable irqs for job ring %p", ring);
695 }
696
697 jr_qp->rx_pkts += num_rx;
698
699 return num_rx;
700 }
701
702 /**
703 * packet looks like:
704 * |<----data_len------->|
705 * |ip_header|ah_header|icv|payload|
706 * ^
707 * |
708 * mbuf->pkt.data
709 */
710 static inline struct caam_jr_op_ctx *
711 build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
712 {
713 struct rte_crypto_sym_op *sym = op->sym;
714 struct rte_mbuf *mbuf = sym->m_src;
715 struct caam_jr_op_ctx *ctx;
716 struct sec4_sg_entry *sg;
717 int length;
718 struct sec_cdb *cdb;
719 uint64_t sdesc_offset;
720 struct sec_job_descriptor_t *jobdescr;
721 uint8_t extra_segs;
722
723 if (is_decode(ses))
724 extra_segs = 2;
725 else
726 extra_segs = 1;
727
728 if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
729 CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
730 MAX_SG_ENTRIES);
731 return NULL;
732 }
733
734 ctx = caam_jr_alloc_ctx(ses);
735 if (!ctx)
736 return NULL;
737
738 ctx->op = op;
739
740 cdb = ses->cdb;
741 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
742
743 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
744
745 SEC_JD_INIT(jobdescr);
746 SEC_JD_SET_SD(jobdescr,
747 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
748 cdb->sh_hdr.hi.field.idlen);
749
750 /* output */
751 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
752 0, ses->digest_length);
753
754 /*input */
755 sg = &ctx->sg[0];
756 length = sym->auth.data.length;
757 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
758 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
759
760 /* Successive segs */
761 mbuf = mbuf->next;
762 while (mbuf) {
763 sg++;
764 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
765 sg->len = cpu_to_caam32(mbuf->data_len);
766 mbuf = mbuf->next;
767 }
768
769 if (is_decode(ses)) {
770 /* digest verification case */
771 sg++;
772 /* hash result or digest, save digest first */
773 rte_memcpy(ctx->digest, sym->auth.digest.data,
774 ses->digest_length);
775 #if CAAM_JR_DBG
776 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
777 #endif
778 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
779 sg->len = cpu_to_caam32(ses->digest_length);
780 length += ses->digest_length;
781 } else {
782 sg->len -= ses->digest_length;
783 }
784
785 /* last element*/
786 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
787
788 SEC_JD_SET_IN_PTR(jobdescr,
789 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
790 /* enabling sg list */
791 (jobdescr)->seq_in.command.word |= 0x01000000;
792
793 return ctx;
794 }
795
796 static inline struct caam_jr_op_ctx *
797 build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
798 {
799 struct rte_crypto_sym_op *sym = op->sym;
800 struct caam_jr_op_ctx *ctx;
801 struct sec4_sg_entry *sg;
802 rte_iova_t start_addr;
803 struct sec_cdb *cdb;
804 uint64_t sdesc_offset;
805 struct sec_job_descriptor_t *jobdescr;
806
807 ctx = caam_jr_alloc_ctx(ses);
808 if (!ctx)
809 return NULL;
810
811 ctx->op = op;
812
813 cdb = ses->cdb;
814 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
815
816 start_addr = rte_pktmbuf_iova(sym->m_src);
817
818 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
819
820 SEC_JD_INIT(jobdescr);
821 SEC_JD_SET_SD(jobdescr,
822 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
823 cdb->sh_hdr.hi.field.idlen);
824
825 /* output */
826 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
827 0, ses->digest_length);
828
829 /*input */
830 if (is_decode(ses)) {
831 sg = &ctx->sg[0];
832 SEC_JD_SET_IN_PTR(jobdescr,
833 (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
834 (sym->auth.data.length + ses->digest_length));
835 /* enabling sg list */
836 (jobdescr)->seq_in.command.word |= 0x01000000;
837
838 /* hash result or digest, save digest first */
839 rte_memcpy(ctx->digest, sym->auth.digest.data,
840 ses->digest_length);
841 sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
842 sg->len = cpu_to_caam32(sym->auth.data.length);
843
844 #if CAAM_JR_DBG
845 rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
846 #endif
847 /* let's check digest by hw */
848 sg++;
849 sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
850 sg->len = cpu_to_caam32(ses->digest_length);
851 /* last element*/
852 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
853 } else {
854 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
855 sym->auth.data.offset, sym->auth.data.length);
856 }
857 return ctx;
858 }
859
860 static inline struct caam_jr_op_ctx *
861 build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
862 {
863 struct rte_crypto_sym_op *sym = op->sym;
864 struct rte_mbuf *mbuf = sym->m_src;
865 struct caam_jr_op_ctx *ctx;
866 struct sec4_sg_entry *sg, *in_sg;
867 int length;
868 struct sec_cdb *cdb;
869 uint64_t sdesc_offset;
870 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
871 ses->iv.offset);
872 struct sec_job_descriptor_t *jobdescr;
873 uint8_t reg_segs;
874
875 if (sym->m_dst) {
876 mbuf = sym->m_dst;
877 reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
878 } else {
879 mbuf = sym->m_src;
880 reg_segs = mbuf->nb_segs * 2 + 2;
881 }
882
883 if (reg_segs > MAX_SG_ENTRIES) {
884 CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
885 MAX_SG_ENTRIES);
886 return NULL;
887 }
888
889 ctx = caam_jr_alloc_ctx(ses);
890 if (!ctx)
891 return NULL;
892
893 ctx->op = op;
894 cdb = ses->cdb;
895 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
896
897 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
898
899 SEC_JD_INIT(jobdescr);
900 SEC_JD_SET_SD(jobdescr,
901 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
902 cdb->sh_hdr.hi.field.idlen);
903
904 #if CAAM_JR_DBG
905 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
906 sym->m_src->data_off, sym->cipher.data.offset,
907 sym->cipher.data.length, ses->iv.length);
908 #endif
909 /* output */
910 if (sym->m_dst)
911 mbuf = sym->m_dst;
912 else
913 mbuf = sym->m_src;
914
915 sg = &ctx->sg[0];
916 length = sym->cipher.data.length;
917
918 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
919 + sym->cipher.data.offset);
920 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
921
922 /* Successive segs */
923 mbuf = mbuf->next;
924 while (mbuf) {
925 sg++;
926 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
927 sg->len = cpu_to_caam32(mbuf->data_len);
928 mbuf = mbuf->next;
929 }
930 /* last element*/
931 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
932
933 SEC_JD_SET_OUT_PTR(jobdescr,
934 (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
935 length);
936 /*enabling sg bit */
937 (jobdescr)->seq_out.command.word |= 0x01000000;
938
939 /*input */
940 sg++;
941 mbuf = sym->m_src;
942 in_sg = sg;
943
944 length = sym->cipher.data.length + ses->iv.length;
945
946 /* IV */
947 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
948 sg->len = cpu_to_caam32(ses->iv.length);
949
950 /* 1st seg */
951 sg++;
952 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
953 + sym->cipher.data.offset);
954 sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
955
956 /* Successive segs */
957 mbuf = mbuf->next;
958 while (mbuf) {
959 sg++;
960 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
961 sg->len = cpu_to_caam32(mbuf->data_len);
962 mbuf = mbuf->next;
963 }
964 /* last element*/
965 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
966
967
968 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
969 length);
970 /*enabling sg bit */
971 (jobdescr)->seq_in.command.word |= 0x01000000;
972
973 return ctx;
974 }
975
976 static inline struct caam_jr_op_ctx *
977 build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
978 {
979 struct rte_crypto_sym_op *sym = op->sym;
980 struct caam_jr_op_ctx *ctx;
981 struct sec4_sg_entry *sg;
982 rte_iova_t src_start_addr, dst_start_addr;
983 struct sec_cdb *cdb;
984 uint64_t sdesc_offset;
985 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
986 ses->iv.offset);
987 struct sec_job_descriptor_t *jobdescr;
988
989 ctx = caam_jr_alloc_ctx(ses);
990 if (!ctx)
991 return NULL;
992
993 ctx->op = op;
994 cdb = ses->cdb;
995 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
996
997 src_start_addr = rte_pktmbuf_iova(sym->m_src);
998 if (sym->m_dst)
999 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1000 else
1001 dst_start_addr = src_start_addr;
1002
1003 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1004
1005 SEC_JD_INIT(jobdescr);
1006 SEC_JD_SET_SD(jobdescr,
1007 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1008 cdb->sh_hdr.hi.field.idlen);
1009
1010 #if CAAM_JR_DBG
1011 CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
1012 sym->m_src->data_off, sym->cipher.data.offset,
1013 sym->cipher.data.length, ses->iv.length);
1014 #endif
1015 /* output */
1016 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
1017 sym->cipher.data.offset,
1018 sym->cipher.data.length + ses->iv.length);
1019
1020 /*input */
1021 sg = &ctx->sg[0];
1022 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
1023 sym->cipher.data.length + ses->iv.length);
1024 /*enabling sg bit */
1025 (jobdescr)->seq_in.command.word |= 0x01000000;
1026
1027 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1028 sg->len = cpu_to_caam32(ses->iv.length);
1029
1030 sg = &ctx->sg[1];
1031 sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
1032 sg->len = cpu_to_caam32(sym->cipher.data.length);
1033 /* last element*/
1034 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1035
1036 return ctx;
1037 }
1038
1039 /* For decapsulation:
1040 * Input:
1041 * +----+----------------+--------------------------------+-----+
1042 * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
1043 * +----+----------------+--------------------------------+-----+
1044 * Output:
1045 * +----+--------------------------+
1046 * | Decrypted & authenticated data |
1047 * +----+--------------------------+
1048 */
1049
1050 static inline struct caam_jr_op_ctx *
1051 build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
1052 {
1053 struct rte_crypto_sym_op *sym = op->sym;
1054 struct caam_jr_op_ctx *ctx;
1055 struct sec4_sg_entry *sg, *out_sg, *in_sg;
1056 struct rte_mbuf *mbuf;
1057 uint32_t length = 0;
1058 struct sec_cdb *cdb;
1059 uint64_t sdesc_offset;
1060 uint8_t req_segs;
1061 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1062 ses->iv.offset);
1063 struct sec_job_descriptor_t *jobdescr;
1064 uint16_t auth_hdr_len = sym->cipher.data.offset -
1065 sym->auth.data.offset;
1066 uint16_t auth_tail_len = sym->auth.data.length -
1067 sym->cipher.data.length - auth_hdr_len;
1068 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1069
1070 if (sym->m_dst) {
1071 mbuf = sym->m_dst;
1072 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
1073 } else {
1074 mbuf = sym->m_src;
1075 req_segs = mbuf->nb_segs * 2 + 3;
1076 }
1077
1078 if (req_segs > MAX_SG_ENTRIES) {
1079 CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1080 MAX_SG_ENTRIES);
1081 return NULL;
1082 }
1083
1084 ctx = caam_jr_alloc_ctx(ses);
1085 if (!ctx)
1086 return NULL;
1087
1088 ctx->op = op;
1089 cdb = ses->cdb;
1090 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1091
1092 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1093
1094 SEC_JD_INIT(jobdescr);
1095 SEC_JD_SET_SD(jobdescr,
1096 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1097 cdb->sh_hdr.hi.field.idlen);
1098
1099 /* output */
1100 if (sym->m_dst)
1101 mbuf = sym->m_dst;
1102 else
1103 mbuf = sym->m_src;
1104
1105 out_sg = &ctx->sg[0];
1106 if (is_encode(ses))
1107 length = sym->auth.data.length + ses->digest_length;
1108 else
1109 length = sym->auth.data.length;
1110
1111 sg = &ctx->sg[0];
1112
1113 /* 1st seg */
1114 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1115 + sym->auth.data.offset);
1116 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1117
1118 /* Successive segs */
1119 mbuf = mbuf->next;
1120 while (mbuf) {
1121 sg++;
1122 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1123 sg->len = cpu_to_caam32(mbuf->data_len);
1124 mbuf = mbuf->next;
1125 }
1126
1127 if (is_encode(ses)) {
1128 /* set auth output */
1129 sg++;
1130 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1131 sg->len = cpu_to_caam32(ses->digest_length);
1132 }
1133 /* last element*/
1134 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1135
1136 SEC_JD_SET_OUT_PTR(jobdescr,
1137 (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
1138 /* set sg bit */
1139 (jobdescr)->seq_out.command.word |= 0x01000000;
1140
1141 /* input */
1142 sg++;
1143 mbuf = sym->m_src;
1144 in_sg = sg;
1145 if (is_encode(ses))
1146 length = ses->iv.length + sym->auth.data.length;
1147 else
1148 length = ses->iv.length + sym->auth.data.length
1149 + ses->digest_length;
1150
1151 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1152 sg->len = cpu_to_caam32(ses->iv.length);
1153
1154 sg++;
1155 /* 1st seg */
1156 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
1157 + sym->auth.data.offset);
1158 sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
1159
1160 /* Successive segs */
1161 mbuf = mbuf->next;
1162 while (mbuf) {
1163 sg++;
1164 sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
1165 sg->len = cpu_to_caam32(mbuf->data_len);
1166 mbuf = mbuf->next;
1167 }
1168
1169 if (is_decode(ses)) {
1170 sg++;
1171 rte_memcpy(ctx->digest, sym->auth.digest.data,
1172 ses->digest_length);
1173 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1174 sg->len = cpu_to_caam32(ses->digest_length);
1175 }
1176 /* last element*/
1177 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1178
1179 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
1180 length);
1181 /* set sg bit */
1182 (jobdescr)->seq_in.command.word |= 0x01000000;
1183 /* Auth_only_len is set as 0 in descriptor and it is
1184 * overwritten here in the jd which will update
1185 * the DPOVRD reg.
1186 */
1187 if (auth_only_len)
1188 /* set sg bit */
1189 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1190
1191 return ctx;
1192 }
1193
1194 static inline struct caam_jr_op_ctx *
1195 build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
1196 {
1197 struct rte_crypto_sym_op *sym = op->sym;
1198 struct caam_jr_op_ctx *ctx;
1199 struct sec4_sg_entry *sg;
1200 rte_iova_t src_start_addr, dst_start_addr;
1201 uint32_t length = 0;
1202 struct sec_cdb *cdb;
1203 uint64_t sdesc_offset;
1204 uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1205 ses->iv.offset);
1206 struct sec_job_descriptor_t *jobdescr;
1207 uint16_t auth_hdr_len = sym->cipher.data.offset -
1208 sym->auth.data.offset;
1209 uint16_t auth_tail_len = sym->auth.data.length -
1210 sym->cipher.data.length - auth_hdr_len;
1211 uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
1212
1213 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1214 if (sym->m_dst)
1215 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1216 else
1217 dst_start_addr = src_start_addr;
1218
1219 ctx = caam_jr_alloc_ctx(ses);
1220 if (!ctx)
1221 return NULL;
1222
1223 ctx->op = op;
1224 cdb = ses->cdb;
1225 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1226
1227 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1228
1229 SEC_JD_INIT(jobdescr);
1230 SEC_JD_SET_SD(jobdescr,
1231 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1232 cdb->sh_hdr.hi.field.idlen);
1233
1234 /* input */
1235 sg = &ctx->sg[0];
1236 if (is_encode(ses)) {
1237 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1238 sg->len = cpu_to_caam32(ses->iv.length);
1239 length += ses->iv.length;
1240
1241 sg++;
1242 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1243 sg->len = cpu_to_caam32(sym->auth.data.length);
1244 length += sym->auth.data.length;
1245 /* last element*/
1246 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1247 } else {
1248 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
1249 sg->len = cpu_to_caam32(ses->iv.length);
1250 length += ses->iv.length;
1251
1252 sg++;
1253 sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
1254 sg->len = cpu_to_caam32(sym->auth.data.length);
1255 length += sym->auth.data.length;
1256
1257 rte_memcpy(ctx->digest, sym->auth.digest.data,
1258 ses->digest_length);
1259 sg++;
1260 sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
1261 sg->len = cpu_to_caam32(ses->digest_length);
1262 length += ses->digest_length;
1263 /* last element*/
1264 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1265 }
1266
1267 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
1268 length);
1269 /* set sg bit */
1270 (jobdescr)->seq_in.command.word |= 0x01000000;
1271
1272 /* output */
1273 sg = &ctx->sg[6];
1274
1275 sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
1276 sg->len = cpu_to_caam32(sym->cipher.data.length);
1277 length = sym->cipher.data.length;
1278
1279 if (is_encode(ses)) {
1280 /* set auth output */
1281 sg++;
1282 sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
1283 sg->len = cpu_to_caam32(ses->digest_length);
1284 length += ses->digest_length;
1285 }
1286 /* last element*/
1287 sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
1288
1289 SEC_JD_SET_OUT_PTR(jobdescr,
1290 (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
1291 /* set sg bit */
1292 (jobdescr)->seq_out.command.word |= 0x01000000;
1293
1294 /* Auth_only_len is set as 0 in descriptor and it is
1295 * overwritten here in the jd which will update
1296 * the DPOVRD reg.
1297 */
1298 if (auth_only_len)
1299 /* set sg bit */
1300 (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
1301
1302 return ctx;
1303 }
1304
1305 static inline struct caam_jr_op_ctx *
1306 build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
1307 {
1308 struct rte_crypto_sym_op *sym = op->sym;
1309 struct caam_jr_op_ctx *ctx = NULL;
1310 phys_addr_t src_start_addr, dst_start_addr;
1311 struct sec_cdb *cdb;
1312 uint64_t sdesc_offset;
1313 struct sec_job_descriptor_t *jobdescr;
1314
1315 ctx = caam_jr_alloc_ctx(ses);
1316 if (!ctx)
1317 return NULL;
1318 ctx->op = op;
1319
1320 src_start_addr = rte_pktmbuf_iova(sym->m_src);
1321 if (sym->m_dst)
1322 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1323 else
1324 dst_start_addr = src_start_addr;
1325
1326 cdb = ses->cdb;
1327 sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
1328
1329 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1330
1331 SEC_JD_INIT(jobdescr);
1332 SEC_JD_SET_SD(jobdescr,
1333 (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
1334 cdb->sh_hdr.hi.field.idlen);
1335
1336 /* output */
1337 SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
1338 sym->m_src->buf_len - sym->m_src->data_off);
1339 /* input */
1340 SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
1341 sym->m_src->pkt_len);
1342 sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1343
1344 return ctx;
1345 }
1346
1347 static int
1348 caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
1349 {
1350 struct sec_job_ring_t *ring = qp->ring;
1351 struct caam_jr_session *ses;
1352 struct caam_jr_op_ctx *ctx = NULL;
1353 struct sec_job_descriptor_t *jobdescr __rte_unused;
1354
1355 switch (op->sess_type) {
1356 case RTE_CRYPTO_OP_WITH_SESSION:
1357 ses = (struct caam_jr_session *)
1358 get_sym_session_private_data(op->sym->session,
1359 cryptodev_driver_id);
1360 break;
1361 case RTE_CRYPTO_OP_SECURITY_SESSION:
1362 ses = (struct caam_jr_session *)
1363 get_sec_session_private_data(
1364 op->sym->sec_session);
1365 break;
1366 default:
1367 CAAM_JR_DP_ERR("sessionless crypto op not supported");
1368 qp->tx_errs++;
1369 return -1;
1370 }
1371
1372 if (unlikely(!ses->qp || ses->qp != qp)) {
1373 CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
1374 ses->qp = qp;
1375 caam_jr_prep_cdb(ses);
1376 }
1377
1378 if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1379 if (is_auth_cipher(ses))
1380 ctx = build_cipher_auth(op, ses);
1381 else if (is_aead(ses))
1382 goto err1;
1383 else if (is_auth_only(ses))
1384 ctx = build_auth_only(op, ses);
1385 else if (is_cipher_only(ses))
1386 ctx = build_cipher_only(op, ses);
1387 else if (is_proto_ipsec(ses))
1388 ctx = build_proto(op, ses);
1389 } else {
1390 if (is_auth_cipher(ses))
1391 ctx = build_cipher_auth_sg(op, ses);
1392 else if (is_aead(ses))
1393 goto err1;
1394 else if (is_auth_only(ses))
1395 ctx = build_auth_only_sg(op, ses);
1396 else if (is_cipher_only(ses))
1397 ctx = build_cipher_only_sg(op, ses);
1398 }
1399 err1:
1400 if (unlikely(!ctx)) {
1401 qp->tx_errs++;
1402 CAAM_JR_ERR("not supported sec op");
1403 return -1;
1404 }
1405 #if CAAM_JR_DBG
1406 if (is_decode(ses))
1407 rte_hexdump(stdout, "DECODE",
1408 rte_pktmbuf_mtod(op->sym->m_src, void *),
1409 rte_pktmbuf_data_len(op->sym->m_src));
1410 else
1411 rte_hexdump(stdout, "ENCODE",
1412 rte_pktmbuf_mtod(op->sym->m_src, void *),
1413 rte_pktmbuf_data_len(op->sym->m_src));
1414
1415 printf("\n JD before conversion\n");
1416 for (int i = 0; i < 12; i++)
1417 printf("\n 0x%08x", ctx->jobdes.desc[i]);
1418 #endif
1419
1420 CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
1421 ring, ring->pidx, ring->cidx);
1422
1423 /* todo - do we want to retry */
1424 if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
1425 SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
1426 CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
1427 ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
1428 caam_jr_op_ending(ctx);
1429 qp->tx_ring_full++;
1430 return -EBUSY;
1431 }
1432
1433 #if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
1434 jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
1435
1436 jobdescr->deschdr.command.word =
1437 cpu_to_caam32(jobdescr->deschdr.command.word);
1438 jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
1439 jobdescr->seq_out.command.word =
1440 cpu_to_caam32(jobdescr->seq_out.command.word);
1441 jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
1442 jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
1443 jobdescr->seq_in.command.word =
1444 cpu_to_caam32(jobdescr->seq_in.command.word);
1445 jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
1446 jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
1447 jobdescr->load_dpovrd.command.word =
1448 cpu_to_caam32(jobdescr->load_dpovrd.command.word);
1449 jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
1450 #endif
1451
1452 /* Set ptr in input ring to current descriptor */
1453 sec_write_addr(&ring->input_ring[ring->pidx],
1454 (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
1455 rte_smp_wmb();
1456
1457 /* Notify HW that a new job is enqueued */
1458 hw_enqueue_desc_on_job_ring(ring);
1459
1460 /* increment the producer index for the current job ring */
1461 ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
1462
1463 return 0;
1464 }
1465
1466 static uint16_t
1467 caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1468 uint16_t nb_ops)
1469 {
1470 /* Function to transmit the frames to given device and queuepair */
1471 uint32_t loop;
1472 int32_t ret;
1473 struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
1474 uint16_t num_tx = 0;
1475 /*Prepare each packet which is to be sent*/
1476 for (loop = 0; loop < nb_ops; loop++) {
1477 ret = caam_jr_enqueue_op(ops[loop], jr_qp);
1478 if (!ret)
1479 num_tx++;
1480 }
1481
1482 jr_qp->tx_pkts += num_tx;
1483
1484 return num_tx;
1485 }
1486
1487 /* Release queue pair */
1488 static int
1489 caam_jr_queue_pair_release(struct rte_cryptodev *dev,
1490 uint16_t qp_id)
1491 {
1492 struct sec_job_ring_t *internals;
1493 struct caam_jr_qp *qp = NULL;
1494
1495 PMD_INIT_FUNC_TRACE();
1496 CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
1497
1498 internals = dev->data->dev_private;
1499 if (qp_id >= internals->max_nb_queue_pairs) {
1500 CAAM_JR_ERR("Max supported qpid %d",
1501 internals->max_nb_queue_pairs);
1502 return -EINVAL;
1503 }
1504
1505 qp = &internals->qps[qp_id];
1506 qp->ring = NULL;
1507 dev->data->queue_pairs[qp_id] = NULL;
1508
1509 return 0;
1510 }
1511
1512 /* Setup a queue pair */
1513 static int
1514 caam_jr_queue_pair_setup(
1515 struct rte_cryptodev *dev, uint16_t qp_id,
1516 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1517 __rte_unused int socket_id)
1518 {
1519 struct sec_job_ring_t *internals;
1520 struct caam_jr_qp *qp = NULL;
1521
1522 PMD_INIT_FUNC_TRACE();
1523 CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1524
1525 internals = dev->data->dev_private;
1526 if (qp_id >= internals->max_nb_queue_pairs) {
1527 CAAM_JR_ERR("Max supported qpid %d",
1528 internals->max_nb_queue_pairs);
1529 return -EINVAL;
1530 }
1531
1532 qp = &internals->qps[qp_id];
1533 qp->ring = internals;
1534 dev->data->queue_pairs[qp_id] = qp;
1535
1536 return 0;
1537 }
1538
1539 /* Returns the size of the aesni gcm session structure */
1540 static unsigned int
1541 caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1542 {
1543 PMD_INIT_FUNC_TRACE();
1544
1545 return sizeof(struct caam_jr_session);
1546 }
1547
1548 static int
1549 caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
1550 struct rte_crypto_sym_xform *xform,
1551 struct caam_jr_session *session)
1552 {
1553 session->cipher_alg = xform->cipher.algo;
1554 session->iv.length = xform->cipher.iv.length;
1555 session->iv.offset = xform->cipher.iv.offset;
1556 session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1557 RTE_CACHE_LINE_SIZE);
1558 if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1559 CAAM_JR_ERR("No Memory for cipher key\n");
1560 return -ENOMEM;
1561 }
1562 session->cipher_key.length = xform->cipher.key.length;
1563
1564 memcpy(session->cipher_key.data, xform->cipher.key.data,
1565 xform->cipher.key.length);
1566 session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1567 DIR_ENC : DIR_DEC;
1568
1569 return 0;
1570 }
1571
1572 static int
1573 caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
1574 struct rte_crypto_sym_xform *xform,
1575 struct caam_jr_session *session)
1576 {
1577 session->auth_alg = xform->auth.algo;
1578 session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1579 RTE_CACHE_LINE_SIZE);
1580 if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1581 CAAM_JR_ERR("No Memory for auth key\n");
1582 return -ENOMEM;
1583 }
1584 session->auth_key.length = xform->auth.key.length;
1585 session->digest_length = xform->auth.digest_length;
1586
1587 memcpy(session->auth_key.data, xform->auth.key.data,
1588 xform->auth.key.length);
1589 session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1590 DIR_ENC : DIR_DEC;
1591
1592 return 0;
1593 }
1594
1595 static int
1596 caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
1597 struct rte_crypto_sym_xform *xform,
1598 struct caam_jr_session *session)
1599 {
1600 session->aead_alg = xform->aead.algo;
1601 session->iv.length = xform->aead.iv.length;
1602 session->iv.offset = xform->aead.iv.offset;
1603 session->auth_only_len = xform->aead.aad_length;
1604 session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1605 RTE_CACHE_LINE_SIZE);
1606 if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1607 CAAM_JR_ERR("No Memory for aead key\n");
1608 return -ENOMEM;
1609 }
1610 session->aead_key.length = xform->aead.key.length;
1611 session->digest_length = xform->aead.digest_length;
1612
1613 memcpy(session->aead_key.data, xform->aead.key.data,
1614 xform->aead.key.length);
1615 session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1616 DIR_ENC : DIR_DEC;
1617
1618 return 0;
1619 }
1620
1621 static int
1622 caam_jr_set_session_parameters(struct rte_cryptodev *dev,
1623 struct rte_crypto_sym_xform *xform, void *sess)
1624 {
1625 struct sec_job_ring_t *internals = dev->data->dev_private;
1626 struct caam_jr_session *session = sess;
1627
1628 PMD_INIT_FUNC_TRACE();
1629
1630 if (unlikely(sess == NULL)) {
1631 CAAM_JR_ERR("invalid session struct");
1632 return -EINVAL;
1633 }
1634
1635 /* Default IV length = 0 */
1636 session->iv.length = 0;
1637
1638 /* Cipher Only */
1639 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1640 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1641 caam_jr_cipher_init(dev, xform, session);
1642
1643 /* Authentication Only */
1644 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1645 xform->next == NULL) {
1646 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1647 caam_jr_auth_init(dev, xform, session);
1648
1649 /* Cipher then Authenticate */
1650 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1651 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1652 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1653 caam_jr_cipher_init(dev, xform, session);
1654 caam_jr_auth_init(dev, xform->next, session);
1655 } else {
1656 CAAM_JR_ERR("Not supported: Auth then Cipher");
1657 goto err1;
1658 }
1659
1660 /* Authenticate then Cipher */
1661 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1662 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1663 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1664 caam_jr_auth_init(dev, xform, session);
1665 caam_jr_cipher_init(dev, xform->next, session);
1666 } else {
1667 CAAM_JR_ERR("Not supported: Auth then Cipher");
1668 goto err1;
1669 }
1670
1671 /* AEAD operation for AES-GCM kind of Algorithms */
1672 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1673 xform->next == NULL) {
1674 caam_jr_aead_init(dev, xform, session);
1675
1676 } else {
1677 CAAM_JR_ERR("Invalid crypto type");
1678 return -EINVAL;
1679 }
1680 session->ctx_pool = internals->ctx_pool;
1681
1682 return 0;
1683
1684 err1:
1685 rte_free(session->cipher_key.data);
1686 rte_free(session->auth_key.data);
1687 memset(session, 0, sizeof(struct caam_jr_session));
1688
1689 return -EINVAL;
1690 }
1691
1692 static int
1693 caam_jr_sym_session_configure(struct rte_cryptodev *dev,
1694 struct rte_crypto_sym_xform *xform,
1695 struct rte_cryptodev_sym_session *sess,
1696 struct rte_mempool *mempool)
1697 {
1698 void *sess_private_data;
1699 int ret;
1700
1701 PMD_INIT_FUNC_TRACE();
1702
1703 if (rte_mempool_get(mempool, &sess_private_data)) {
1704 CAAM_JR_ERR("Couldn't get object from session mempool");
1705 return -ENOMEM;
1706 }
1707
1708 memset(sess_private_data, 0, sizeof(struct caam_jr_session));
1709 ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
1710 if (ret != 0) {
1711 CAAM_JR_ERR("failed to configure session parameters");
1712 /* Return session to mempool */
1713 rte_mempool_put(mempool, sess_private_data);
1714 return ret;
1715 }
1716
1717 set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
1718
1719 return 0;
1720 }
1721
1722 /* Clear the memory of session so it doesn't leave key material behind */
1723 static void
1724 caam_jr_sym_session_clear(struct rte_cryptodev *dev,
1725 struct rte_cryptodev_sym_session *sess)
1726 {
1727 uint8_t index = dev->driver_id;
1728 void *sess_priv = get_sym_session_private_data(sess, index);
1729 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1730
1731 PMD_INIT_FUNC_TRACE();
1732
1733 if (sess_priv) {
1734 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1735
1736 rte_free(s->cipher_key.data);
1737 rte_free(s->auth_key.data);
1738 memset(s, 0, sizeof(struct caam_jr_session));
1739 set_sym_session_private_data(sess, index, NULL);
1740 rte_mempool_put(sess_mp, sess_priv);
1741 }
1742 }
1743
1744 static int
1745 caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1746 struct rte_security_session_conf *conf,
1747 void *sess)
1748 {
1749 struct sec_job_ring_t *internals = dev->data->dev_private;
1750 struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1751 struct rte_crypto_auth_xform *auth_xform;
1752 struct rte_crypto_cipher_xform *cipher_xform;
1753 struct caam_jr_session *session = (struct caam_jr_session *)sess;
1754
1755 PMD_INIT_FUNC_TRACE();
1756
1757 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1758 cipher_xform = &conf->crypto_xform->cipher;
1759 auth_xform = &conf->crypto_xform->next->auth;
1760 } else {
1761 auth_xform = &conf->crypto_xform->auth;
1762 cipher_xform = &conf->crypto_xform->next->cipher;
1763 }
1764 session->proto_alg = conf->protocol;
1765 session->cipher_key.data = rte_zmalloc(NULL,
1766 cipher_xform->key.length,
1767 RTE_CACHE_LINE_SIZE);
1768 if (session->cipher_key.data == NULL &&
1769 cipher_xform->key.length > 0) {
1770 CAAM_JR_ERR("No Memory for cipher key\n");
1771 return -ENOMEM;
1772 }
1773
1774 session->cipher_key.length = cipher_xform->key.length;
1775 session->auth_key.data = rte_zmalloc(NULL,
1776 auth_xform->key.length,
1777 RTE_CACHE_LINE_SIZE);
1778 if (session->auth_key.data == NULL &&
1779 auth_xform->key.length > 0) {
1780 CAAM_JR_ERR("No Memory for auth key\n");
1781 rte_free(session->cipher_key.data);
1782 return -ENOMEM;
1783 }
1784 session->auth_key.length = auth_xform->key.length;
1785 memcpy(session->cipher_key.data, cipher_xform->key.data,
1786 cipher_xform->key.length);
1787 memcpy(session->auth_key.data, auth_xform->key.data,
1788 auth_xform->key.length);
1789
1790 switch (auth_xform->algo) {
1791 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1792 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1793 break;
1794 case RTE_CRYPTO_AUTH_MD5_HMAC:
1795 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1796 break;
1797 case RTE_CRYPTO_AUTH_SHA256_HMAC:
1798 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1799 break;
1800 case RTE_CRYPTO_AUTH_SHA384_HMAC:
1801 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1802 break;
1803 case RTE_CRYPTO_AUTH_SHA512_HMAC:
1804 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1805 break;
1806 case RTE_CRYPTO_AUTH_AES_CMAC:
1807 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1808 break;
1809 case RTE_CRYPTO_AUTH_NULL:
1810 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1811 break;
1812 case RTE_CRYPTO_AUTH_SHA224_HMAC:
1813 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1814 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1815 case RTE_CRYPTO_AUTH_SHA1:
1816 case RTE_CRYPTO_AUTH_SHA256:
1817 case RTE_CRYPTO_AUTH_SHA512:
1818 case RTE_CRYPTO_AUTH_SHA224:
1819 case RTE_CRYPTO_AUTH_SHA384:
1820 case RTE_CRYPTO_AUTH_MD5:
1821 case RTE_CRYPTO_AUTH_AES_GMAC:
1822 case RTE_CRYPTO_AUTH_KASUMI_F9:
1823 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1824 case RTE_CRYPTO_AUTH_ZUC_EIA3:
1825 CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
1826 auth_xform->algo);
1827 goto out;
1828 default:
1829 CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
1830 auth_xform->algo);
1831 goto out;
1832 }
1833
1834 switch (cipher_xform->algo) {
1835 case RTE_CRYPTO_CIPHER_AES_CBC:
1836 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1837 break;
1838 case RTE_CRYPTO_CIPHER_3DES_CBC:
1839 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1840 break;
1841 case RTE_CRYPTO_CIPHER_AES_CTR:
1842 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1843 break;
1844 case RTE_CRYPTO_CIPHER_NULL:
1845 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1846 case RTE_CRYPTO_CIPHER_3DES_ECB:
1847 case RTE_CRYPTO_CIPHER_AES_ECB:
1848 case RTE_CRYPTO_CIPHER_KASUMI_F8:
1849 CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
1850 cipher_xform->algo);
1851 goto out;
1852 default:
1853 CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
1854 cipher_xform->algo);
1855 goto out;
1856 }
1857
1858 if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1859 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1860 sizeof(session->ip4_hdr));
1861 session->ip4_hdr.ip_v = IPVERSION;
1862 session->ip4_hdr.ip_hl = 5;
1863 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1864 sizeof(session->ip4_hdr));
1865 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1866 session->ip4_hdr.ip_id = 0;
1867 session->ip4_hdr.ip_off = 0;
1868 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1869 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1870 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1871 : IPPROTO_AH;
1872 session->ip4_hdr.ip_sum = 0;
1873 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
1874 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
1875 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
1876 (void *)&session->ip4_hdr,
1877 sizeof(struct ip));
1878
1879 session->encap_pdb.options =
1880 (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
1881 PDBOPTS_ESP_OIHI_PDB_INL |
1882 PDBOPTS_ESP_IVSRC |
1883 PDBHMO_ESP_ENCAP_DTTL;
1884 if (ipsec_xform->options.esn)
1885 session->encap_pdb.options |= PDBOPTS_ESP_ESN;
1886 session->encap_pdb.spi = ipsec_xform->spi;
1887 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
1888
1889 session->dir = DIR_ENC;
1890 } else if (ipsec_xform->direction ==
1891 RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
1892 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
1893 session->decap_pdb.options = sizeof(struct ip) << 16;
1894 if (ipsec_xform->options.esn)
1895 session->decap_pdb.options |= PDBOPTS_ESP_ESN;
1896 session->dir = DIR_DEC;
1897 } else
1898 goto out;
1899 session->ctx_pool = internals->ctx_pool;
1900
1901 return 0;
1902 out:
1903 rte_free(session->auth_key.data);
1904 rte_free(session->cipher_key.data);
1905 memset(session, 0, sizeof(struct caam_jr_session));
1906 return -1;
1907 }
1908
1909 static int
1910 caam_jr_security_session_create(void *dev,
1911 struct rte_security_session_conf *conf,
1912 struct rte_security_session *sess,
1913 struct rte_mempool *mempool)
1914 {
1915 void *sess_private_data;
1916 struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
1917 int ret;
1918
1919 if (rte_mempool_get(mempool, &sess_private_data)) {
1920 CAAM_JR_ERR("Couldn't get object from session mempool");
1921 return -ENOMEM;
1922 }
1923
1924 switch (conf->protocol) {
1925 case RTE_SECURITY_PROTOCOL_IPSEC:
1926 ret = caam_jr_set_ipsec_session(cdev, conf,
1927 sess_private_data);
1928 break;
1929 case RTE_SECURITY_PROTOCOL_MACSEC:
1930 return -ENOTSUP;
1931 default:
1932 return -EINVAL;
1933 }
1934 if (ret != 0) {
1935 CAAM_JR_ERR("failed to configure session parameters");
1936 /* Return session to mempool */
1937 rte_mempool_put(mempool, sess_private_data);
1938 return ret;
1939 }
1940
1941 set_sec_session_private_data(sess, sess_private_data);
1942
1943 return ret;
1944 }
1945
1946 /* Clear the memory of session so it doesn't leave key material behind */
1947 static int
1948 caam_jr_security_session_destroy(void *dev __rte_unused,
1949 struct rte_security_session *sess)
1950 {
1951 PMD_INIT_FUNC_TRACE();
1952 void *sess_priv = get_sec_session_private_data(sess);
1953
1954 struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
1955
1956 if (sess_priv) {
1957 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1958
1959 rte_free(s->cipher_key.data);
1960 rte_free(s->auth_key.data);
1961 memset(sess, 0, sizeof(struct caam_jr_session));
1962 set_sec_session_private_data(sess, NULL);
1963 rte_mempool_put(sess_mp, sess_priv);
1964 }
1965 return 0;
1966 }
1967
1968
1969 static int
1970 caam_jr_dev_configure(struct rte_cryptodev *dev,
1971 struct rte_cryptodev_config *config __rte_unused)
1972 {
1973 char str[20];
1974 struct sec_job_ring_t *internals;
1975
1976 PMD_INIT_FUNC_TRACE();
1977
1978 internals = dev->data->dev_private;
1979 snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
1980 if (!internals->ctx_pool) {
1981 internals->ctx_pool = rte_mempool_create((const char *)str,
1982 CTX_POOL_NUM_BUFS,
1983 sizeof(struct caam_jr_op_ctx),
1984 CTX_POOL_CACHE_SIZE, 0,
1985 NULL, NULL, NULL, NULL,
1986 SOCKET_ID_ANY, 0);
1987 if (!internals->ctx_pool) {
1988 CAAM_JR_ERR("%s create failed\n", str);
1989 return -ENOMEM;
1990 }
1991 } else
1992 CAAM_JR_INFO("mempool already created for dev_id : %d",
1993 dev->data->dev_id);
1994
1995 return 0;
1996 }
1997
1998 static int
1999 caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
2000 {
2001 PMD_INIT_FUNC_TRACE();
2002 return 0;
2003 }
2004
2005 static void
2006 caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
2007 {
2008 PMD_INIT_FUNC_TRACE();
2009 }
2010
2011 static int
2012 caam_jr_dev_close(struct rte_cryptodev *dev)
2013 {
2014 struct sec_job_ring_t *internals;
2015
2016 PMD_INIT_FUNC_TRACE();
2017
2018 if (dev == NULL)
2019 return -ENOMEM;
2020
2021 internals = dev->data->dev_private;
2022 rte_mempool_free(internals->ctx_pool);
2023 internals->ctx_pool = NULL;
2024
2025 return 0;
2026 }
2027
2028 static void
2029 caam_jr_dev_infos_get(struct rte_cryptodev *dev,
2030 struct rte_cryptodev_info *info)
2031 {
2032 struct sec_job_ring_t *internals = dev->data->dev_private;
2033
2034 PMD_INIT_FUNC_TRACE();
2035 if (info != NULL) {
2036 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2037 info->feature_flags = dev->feature_flags;
2038 info->capabilities = caam_jr_get_cryptodev_capabilities();
2039 info->sym.max_nb_sessions = internals->max_nb_sessions;
2040 info->driver_id = cryptodev_driver_id;
2041 }
2042 }
2043
2044 static struct rte_cryptodev_ops caam_jr_ops = {
2045 .dev_configure = caam_jr_dev_configure,
2046 .dev_start = caam_jr_dev_start,
2047 .dev_stop = caam_jr_dev_stop,
2048 .dev_close = caam_jr_dev_close,
2049 .dev_infos_get = caam_jr_dev_infos_get,
2050 .stats_get = caam_jr_stats_get,
2051 .stats_reset = caam_jr_stats_reset,
2052 .queue_pair_setup = caam_jr_queue_pair_setup,
2053 .queue_pair_release = caam_jr_queue_pair_release,
2054 .sym_session_get_size = caam_jr_sym_session_get_size,
2055 .sym_session_configure = caam_jr_sym_session_configure,
2056 .sym_session_clear = caam_jr_sym_session_clear
2057 };
2058
2059 static struct rte_security_ops caam_jr_security_ops = {
2060 .session_create = caam_jr_security_session_create,
2061 .session_update = NULL,
2062 .session_stats_get = NULL,
2063 .session_destroy = caam_jr_security_session_destroy,
2064 .set_pkt_metadata = NULL,
2065 .capabilities_get = caam_jr_get_security_capabilities
2066 };
2067
2068 /* @brief Flush job rings of any processed descs.
2069 * The processed descs are silently dropped,
2070 * WITHOUT being notified to UA.
2071 */
2072 static void
2073 close_job_ring(struct sec_job_ring_t *job_ring)
2074 {
2075 if (job_ring->irq_fd != -1) {
2076 /* Producer index is frozen. If consumer index is not equal
2077 * with producer index, then we have descs to flush.
2078 */
2079 while (job_ring->pidx != job_ring->cidx)
2080 hw_flush_job_ring(job_ring, false, NULL);
2081
2082 /* free the uio job ring */
2083 free_job_ring(job_ring->irq_fd);
2084 job_ring->irq_fd = -1;
2085 caam_jr_dma_free(job_ring->input_ring);
2086 caam_jr_dma_free(job_ring->output_ring);
2087 g_job_rings_no--;
2088 }
2089 }
2090
2091 /** @brief Release the software and hardware resources tied to a job ring.
2092 * @param [in] job_ring The job ring
2093 *
2094 * @retval 0 for success
2095 * @retval -1 for error
2096 */
2097 static int
2098 shutdown_job_ring(struct sec_job_ring_t *job_ring)
2099 {
2100 int ret = 0;
2101
2102 PMD_INIT_FUNC_TRACE();
2103 ASSERT(job_ring != NULL);
2104 ret = hw_shutdown_job_ring(job_ring);
2105 SEC_ASSERT(ret == 0, ret,
2106 "Failed to shutdown hardware job ring %p",
2107 job_ring);
2108
2109 if (job_ring->coalescing_en)
2110 hw_job_ring_disable_coalescing(job_ring);
2111
2112 if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
2113 ret = caam_jr_disable_irqs(job_ring->irq_fd);
2114 SEC_ASSERT(ret == 0, ret,
2115 "Failed to disable irqs for job ring %p",
2116 job_ring);
2117 }
2118
2119 return ret;
2120 }
2121
2122 /*
2123 * @brief Release the resources used by the SEC user space driver.
2124 *
2125 * Reset and release SEC's job rings indicated by the User Application at
2126 * init_job_ring() and free any memory allocated internally.
2127 * Call once during application tear down.
2128 *
2129 * @note In case there are any descriptors in-flight (descriptors received by
2130 * SEC driver for processing and for which no response was yet provided to UA),
2131 * the descriptors are discarded without any notifications to User Application.
2132 *
2133 * @retval ::0 is returned for a successful execution
2134 * @retval ::-1 is returned if SEC driver release is in progress
2135 */
2136 static int
2137 caam_jr_dev_uninit(struct rte_cryptodev *dev)
2138 {
2139 struct sec_job_ring_t *internals;
2140
2141 PMD_INIT_FUNC_TRACE();
2142 if (dev == NULL)
2143 return -ENODEV;
2144
2145 internals = dev->data->dev_private;
2146 rte_free(dev->security_ctx);
2147
2148 /* If any descriptors in flight , poll and wait
2149 * until all descriptors are received and silently discarded.
2150 */
2151 if (internals) {
2152 shutdown_job_ring(internals);
2153 close_job_ring(internals);
2154 rte_mempool_free(internals->ctx_pool);
2155 }
2156
2157 CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
2158
2159 /* last caam jr instance) */
2160 if (g_job_rings_no == 0)
2161 g_driver_state = SEC_DRIVER_STATE_IDLE;
2162
2163 return SEC_SUCCESS;
2164 }
2165
2166 /* @brief Initialize the software and hardware resources tied to a job ring.
2167 * @param [in] jr_mode; Model to be used by SEC Driver to receive
2168 * notifications from SEC. Can be either
2169 * of the three: #SEC_NOTIFICATION_TYPE_NAPI
2170 * #SEC_NOTIFICATION_TYPE_IRQ or
2171 * #SEC_NOTIFICATION_TYPE_POLL
2172 * @param [in] NAPI_mode The NAPI work mode to configure a job ring at
2173 * startup. Used only when #SEC_NOTIFICATION_TYPE
2174 * is set to #SEC_NOTIFICATION_TYPE_NAPI.
2175 * @param [in] irq_coalescing_timer This value determines the maximum
2176 * amount of time after processing a
2177 * descriptor before raising an interrupt.
2178 * @param [in] irq_coalescing_count This value determines how many
2179 * descriptors are completed before
2180 * raising an interrupt.
2181 * @param [in] reg_base_addr, The job ring base address register
2182 * @param [in] irq_id The job ring interrupt identification number.
2183 * @retval job_ring_handle for successful job ring configuration
2184 * @retval NULL on error
2185 *
2186 */
2187 static void *
2188 init_job_ring(void *reg_base_addr, int irq_id)
2189 {
2190 struct sec_job_ring_t *job_ring = NULL;
2191 int i, ret = 0;
2192 int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
2193 int napi_mode = 0;
2194 int irq_coalescing_timer = 0;
2195 int irq_coalescing_count = 0;
2196
2197 for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
2198 if (g_job_rings[i].irq_fd == -1) {
2199 job_ring = &g_job_rings[i];
2200 g_job_rings_no++;
2201 break;
2202 }
2203 }
2204 if (job_ring == NULL) {
2205 CAAM_JR_ERR("No free job ring\n");
2206 return NULL;
2207 }
2208
2209 job_ring->register_base_addr = reg_base_addr;
2210 job_ring->jr_mode = jr_mode;
2211 job_ring->napi_mode = 0;
2212 job_ring->irq_fd = irq_id;
2213
2214 /* Allocate mem for input and output ring */
2215
2216 /* Allocate memory for input ring */
2217 job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2218 SEC_DMA_MEM_INPUT_RING_SIZE);
2219 memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
2220
2221 /* Allocate memory for output ring */
2222 job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
2223 SEC_DMA_MEM_OUTPUT_RING_SIZE);
2224 memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
2225
2226 /* Reset job ring in SEC hw and configure job ring registers */
2227 ret = hw_reset_job_ring(job_ring);
2228 if (ret != 0) {
2229 CAAM_JR_ERR("Failed to reset hardware job ring");
2230 goto cleanup;
2231 }
2232
2233 if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
2234 /* When SEC US driver works in NAPI mode, the UA can select
2235 * if the driver starts with IRQs on or off.
2236 */
2237 if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
2238 CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
2239 job_ring);
2240 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2241 if (ret != 0) {
2242 CAAM_JR_ERR("Failed to enable irqs for job ring");
2243 goto cleanup;
2244 }
2245 }
2246 } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
2247 /* When SEC US driver works in pure interrupt mode,
2248 * IRQ's are always enabled.
2249 */
2250 CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
2251 job_ring);
2252 ret = caam_jr_enable_irqs(job_ring->irq_fd);
2253 if (ret != 0) {
2254 CAAM_JR_ERR("Failed to enable irqs for job ring");
2255 goto cleanup;
2256 }
2257 }
2258 if (irq_coalescing_timer || irq_coalescing_count) {
2259 hw_job_ring_set_coalescing_param(job_ring,
2260 irq_coalescing_timer,
2261 irq_coalescing_count);
2262
2263 hw_job_ring_enable_coalescing(job_ring);
2264 job_ring->coalescing_en = 1;
2265 }
2266
2267 job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
2268 job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
2269 job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
2270
2271 return job_ring;
2272 cleanup:
2273 caam_jr_dma_free(job_ring->output_ring);
2274 caam_jr_dma_free(job_ring->input_ring);
2275 return NULL;
2276 }
2277
2278
2279 static int
2280 caam_jr_dev_init(const char *name,
2281 struct rte_vdev_device *vdev,
2282 struct rte_cryptodev_pmd_init_params *init_params)
2283 {
2284 struct rte_cryptodev *dev;
2285 struct rte_security_ctx *security_instance;
2286 struct uio_job_ring *job_ring;
2287 char str[RTE_CRYPTODEV_NAME_MAX_LEN];
2288
2289 PMD_INIT_FUNC_TRACE();
2290
2291 /* Validate driver state */
2292 if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
2293 g_job_rings_max = sec_configure();
2294 if (!g_job_rings_max) {
2295 CAAM_JR_ERR("No job ring detected on UIO !!!!");
2296 return -1;
2297 }
2298 /* Update driver state */
2299 g_driver_state = SEC_DRIVER_STATE_STARTED;
2300 }
2301
2302 if (g_job_rings_no >= g_job_rings_max) {
2303 CAAM_JR_ERR("No more job rings available max=%d!!!!",
2304 g_job_rings_max);
2305 return -1;
2306 }
2307
2308 job_ring = config_job_ring();
2309 if (job_ring == NULL) {
2310 CAAM_JR_ERR("failed to create job ring");
2311 goto init_error;
2312 }
2313
2314 snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
2315
2316 dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
2317 if (dev == NULL) {
2318 CAAM_JR_ERR("failed to create cryptodev vdev");
2319 goto cleanup;
2320 }
2321 /*TODO free it during teardown*/
2322 dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
2323 job_ring->uio_fd);
2324
2325 if (!dev->data->dev_private) {
2326 CAAM_JR_ERR("Ring memory allocation failed\n");
2327 goto cleanup2;
2328 }
2329
2330 dev->driver_id = cryptodev_driver_id;
2331 dev->dev_ops = &caam_jr_ops;
2332
2333 /* register rx/tx burst functions for data path */
2334 dev->dequeue_burst = caam_jr_dequeue_burst;
2335 dev->enqueue_burst = caam_jr_enqueue_burst;
2336 dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2337 RTE_CRYPTODEV_FF_HW_ACCELERATED |
2338 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2339 RTE_CRYPTODEV_FF_SECURITY |
2340 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2341 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2342 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2343 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2344 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2345
2346 /* For secondary processes, we don't initialise any further as primary
2347 * has already done this work. Only check we don't need a different
2348 * RX function
2349 */
2350 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2351 CAAM_JR_WARN("Device already init by primary process");
2352 return 0;
2353 }
2354
2355 /*TODO free it during teardown*/
2356 security_instance = rte_malloc("caam_jr",
2357 sizeof(struct rte_security_ctx), 0);
2358 if (security_instance == NULL) {
2359 CAAM_JR_ERR("memory allocation failed\n");
2360 //todo error handling.
2361 goto cleanup2;
2362 }
2363
2364 security_instance->device = (void *)dev;
2365 security_instance->ops = &caam_jr_security_ops;
2366 security_instance->sess_cnt = 0;
2367 dev->security_ctx = security_instance;
2368
2369 RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
2370
2371 return 0;
2372
2373 cleanup2:
2374 caam_jr_dev_uninit(dev);
2375 rte_cryptodev_pmd_release_device(dev);
2376 cleanup:
2377 free_job_ring(job_ring->uio_fd);
2378 init_error:
2379 CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
2380 init_params->name);
2381
2382 return -ENXIO;
2383 }
2384
2385 /** Initialise CAAM JR crypto device */
2386 static int
2387 cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
2388 {
2389 struct rte_cryptodev_pmd_init_params init_params = {
2390 "",
2391 sizeof(struct sec_job_ring_t),
2392 rte_socket_id(),
2393 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
2394 };
2395 const char *name;
2396 const char *input_args;
2397
2398 name = rte_vdev_device_name(vdev);
2399 if (name == NULL)
2400 return -EINVAL;
2401
2402 input_args = rte_vdev_device_args(vdev);
2403 rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
2404
2405 /* if sec device version is not configured */
2406 if (!rta_get_sec_era()) {
2407 const struct device_node *caam_node;
2408
2409 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2410 const uint32_t *prop = of_get_property(caam_node,
2411 "fsl,sec-era",
2412 NULL);
2413 if (prop) {
2414 rta_set_sec_era(
2415 INTL_SEC_ERA(cpu_to_caam32(*prop)));
2416 break;
2417 }
2418 }
2419 }
2420 #ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
2421 if (rta_get_sec_era() > RTA_SEC_ERA_8) {
2422 RTE_LOG(ERR, PMD,
2423 "CAAM is compiled in BE mode for device with sec era > 8???\n");
2424 return -EINVAL;
2425 }
2426 #endif
2427
2428 return caam_jr_dev_init(name, vdev, &init_params);
2429 }
2430
2431 /** Uninitialise CAAM JR crypto device */
2432 static int
2433 cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
2434 {
2435 struct rte_cryptodev *cryptodev;
2436 const char *name;
2437
2438 name = rte_vdev_device_name(vdev);
2439 if (name == NULL)
2440 return -EINVAL;
2441
2442 cryptodev = rte_cryptodev_pmd_get_named_dev(name);
2443 if (cryptodev == NULL)
2444 return -ENODEV;
2445
2446 caam_jr_dev_uninit(cryptodev);
2447
2448 return rte_cryptodev_pmd_destroy(cryptodev);
2449 }
2450
2451 static void
2452 sec_job_rings_init(void)
2453 {
2454 int i;
2455
2456 for (i = 0; i < MAX_SEC_JOB_RINGS; i++)
2457 g_job_rings[i].irq_fd = -1;
2458 }
2459
2460 static struct rte_vdev_driver cryptodev_caam_jr_drv = {
2461 .probe = cryptodev_caam_jr_probe,
2462 .remove = cryptodev_caam_jr_remove
2463 };
2464
2465 static struct cryptodev_driver caam_jr_crypto_drv;
2466
2467 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
2468 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
2469 "max_nb_queue_pairs=<int>"
2470 "socket_id=<int>");
2471 RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
2472 cryptodev_driver_id);
2473
2474 RTE_INIT(caam_jr_init)
2475 {
2476 sec_uio_job_rings_init();
2477 sec_job_rings_init();
2478 }
2479
2480 RTE_INIT(caam_jr_init_log)
2481 {
2482 caam_jr_logtype = rte_log_register("pmd.crypto.caam");
2483 if (caam_jr_logtype >= 0)
2484 rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
2485 }