]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/crypto/qat/qat_sym_session.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / crypto / qat / qat_sym_session.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2015-2019 Intel Corporation
3 */
4
5 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
6 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
7 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
8 #include <openssl/evp.h> /* Needed for bpi runt block processing */
9
10 #include <rte_memcpy.h>
11 #include <rte_common.h>
12 #include <rte_spinlock.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_crypto_sym.h>
17
18 #include "qat_logs.h"
19 #include "qat_sym_session.h"
20 #include "qat_sym_pmd.h"
21
22 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
23 static const uint8_t sha1InitialState[] = {
24 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba,
25 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0};
26
27 /* SHA 224 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
28 static const uint8_t sha224InitialState[] = {
29 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd,
30 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58,
31 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4};
32
33 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
34 static const uint8_t sha256InitialState[] = {
35 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
36 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
37 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19};
38
39 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
40 static const uint8_t sha384InitialState[] = {
41 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
42 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
43 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
44 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
45 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
46 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4};
47
48 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
49 static const uint8_t sha512InitialState[] = {
50 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
51 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
52 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
53 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
54 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
55 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79};
56
57 /** Frees a context previously created
58 * Depends on openssl libcrypto
59 */
60 static void
61 bpi_cipher_ctx_free(void *bpi_ctx)
62 {
63 if (bpi_ctx != NULL)
64 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
65 }
66
67 /** Creates a context in either AES or DES in ECB mode
68 * Depends on openssl libcrypto
69 */
70 static int
71 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
72 enum rte_crypto_cipher_operation direction __rte_unused,
73 const uint8_t *key, uint16_t key_length, void **ctx)
74 {
75 const EVP_CIPHER *algo = NULL;
76 int ret;
77 *ctx = EVP_CIPHER_CTX_new();
78
79 if (*ctx == NULL) {
80 ret = -ENOMEM;
81 goto ctx_init_err;
82 }
83
84 if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
85 algo = EVP_des_ecb();
86 else
87 if (key_length == ICP_QAT_HW_AES_128_KEY_SZ)
88 algo = EVP_aes_128_ecb();
89 else
90 algo = EVP_aes_256_ecb();
91
92 /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
93 if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
94 ret = -EINVAL;
95 goto ctx_init_err;
96 }
97
98 return 0;
99
100 ctx_init_err:
101 if (*ctx != NULL)
102 EVP_CIPHER_CTX_free(*ctx);
103 return ret;
104 }
105
106 static int
107 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
108 struct qat_sym_dev_private *internals)
109 {
110 int i = 0;
111 const struct rte_cryptodev_capabilities *capability;
112
113 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
114 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
115 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
116 continue;
117
118 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
119 continue;
120
121 if (capability->sym.cipher.algo == algo)
122 return 1;
123 }
124 return 0;
125 }
126
127 static int
128 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
129 struct qat_sym_dev_private *internals)
130 {
131 int i = 0;
132 const struct rte_cryptodev_capabilities *capability;
133
134 while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
135 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
136 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
137 continue;
138
139 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
140 continue;
141
142 if (capability->sym.auth.algo == algo)
143 return 1;
144 }
145 return 0;
146 }
147
148 void
149 qat_sym_session_clear(struct rte_cryptodev *dev,
150 struct rte_cryptodev_sym_session *sess)
151 {
152 uint8_t index = dev->driver_id;
153 void *sess_priv = get_sym_session_private_data(sess, index);
154 struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
155
156 if (sess_priv) {
157 if (s->bpi_ctx)
158 bpi_cipher_ctx_free(s->bpi_ctx);
159 memset(s, 0, qat_sym_session_get_private_size(dev));
160 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
161
162 set_sym_session_private_data(sess, index, NULL);
163 rte_mempool_put(sess_mp, sess_priv);
164 }
165 }
166
167 static int
168 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
169 {
170 /* Cipher Only */
171 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
172 return ICP_QAT_FW_LA_CMD_CIPHER;
173
174 /* Authentication Only */
175 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
176 return ICP_QAT_FW_LA_CMD_AUTH;
177
178 /* AEAD */
179 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
180 /* AES-GCM and AES-CCM works with different direction
181 * GCM first encrypts and generate hash where AES-CCM
182 * first generate hash and encrypts. Similar relation
183 * applies to decryption.
184 */
185 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
186 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
187 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
188 else
189 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
190 else
191 if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
192 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
193 else
194 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
195 }
196
197 if (xform->next == NULL)
198 return -1;
199
200 /* Cipher then Authenticate */
201 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
202 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
203 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
204
205 /* Authenticate then Cipher */
206 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
207 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
208 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
209
210 return -1;
211 }
212
213 static struct rte_crypto_auth_xform *
214 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
215 {
216 do {
217 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
218 return &xform->auth;
219
220 xform = xform->next;
221 } while (xform);
222
223 return NULL;
224 }
225
226 static struct rte_crypto_cipher_xform *
227 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
228 {
229 do {
230 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
231 return &xform->cipher;
232
233 xform = xform->next;
234 } while (xform);
235
236 return NULL;
237 }
238
239 int
240 qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
241 struct rte_crypto_sym_xform *xform,
242 struct qat_sym_session *session)
243 {
244 struct qat_sym_dev_private *internals = dev->data->dev_private;
245 struct rte_crypto_cipher_xform *cipher_xform = NULL;
246 int ret;
247
248 /* Get cipher xform from crypto xform chain */
249 cipher_xform = qat_get_cipher_xform(xform);
250
251 session->cipher_iv.offset = cipher_xform->iv.offset;
252 session->cipher_iv.length = cipher_xform->iv.length;
253
254 switch (cipher_xform->algo) {
255 case RTE_CRYPTO_CIPHER_AES_CBC:
256 if (qat_sym_validate_aes_key(cipher_xform->key.length,
257 &session->qat_cipher_alg) != 0) {
258 QAT_LOG(ERR, "Invalid AES cipher key size");
259 ret = -EINVAL;
260 goto error_out;
261 }
262 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
263 break;
264 case RTE_CRYPTO_CIPHER_AES_CTR:
265 if (qat_sym_validate_aes_key(cipher_xform->key.length,
266 &session->qat_cipher_alg) != 0) {
267 QAT_LOG(ERR, "Invalid AES cipher key size");
268 ret = -EINVAL;
269 goto error_out;
270 }
271 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
272 break;
273 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
274 if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
275 &session->qat_cipher_alg) != 0) {
276 QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
277 ret = -EINVAL;
278 goto error_out;
279 }
280 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
281 break;
282 case RTE_CRYPTO_CIPHER_NULL:
283 session->qat_cipher_alg = ICP_QAT_HW_CIPHER_ALGO_NULL;
284 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
285 break;
286 case RTE_CRYPTO_CIPHER_KASUMI_F8:
287 if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
288 &session->qat_cipher_alg) != 0) {
289 QAT_LOG(ERR, "Invalid KASUMI cipher key size");
290 ret = -EINVAL;
291 goto error_out;
292 }
293 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
294 break;
295 case RTE_CRYPTO_CIPHER_3DES_CBC:
296 if (qat_sym_validate_3des_key(cipher_xform->key.length,
297 &session->qat_cipher_alg) != 0) {
298 QAT_LOG(ERR, "Invalid 3DES cipher key size");
299 ret = -EINVAL;
300 goto error_out;
301 }
302 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
303 break;
304 case RTE_CRYPTO_CIPHER_DES_CBC:
305 if (qat_sym_validate_des_key(cipher_xform->key.length,
306 &session->qat_cipher_alg) != 0) {
307 QAT_LOG(ERR, "Invalid DES cipher key size");
308 ret = -EINVAL;
309 goto error_out;
310 }
311 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
312 break;
313 case RTE_CRYPTO_CIPHER_3DES_CTR:
314 if (qat_sym_validate_3des_key(cipher_xform->key.length,
315 &session->qat_cipher_alg) != 0) {
316 QAT_LOG(ERR, "Invalid 3DES cipher key size");
317 ret = -EINVAL;
318 goto error_out;
319 }
320 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
321 break;
322 case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
323 ret = bpi_cipher_ctx_init(
324 cipher_xform->algo,
325 cipher_xform->op,
326 cipher_xform->key.data,
327 cipher_xform->key.length,
328 &session->bpi_ctx);
329 if (ret != 0) {
330 QAT_LOG(ERR, "failed to create DES BPI ctx");
331 goto error_out;
332 }
333 if (qat_sym_validate_des_key(cipher_xform->key.length,
334 &session->qat_cipher_alg) != 0) {
335 QAT_LOG(ERR, "Invalid DES cipher key size");
336 ret = -EINVAL;
337 goto error_out;
338 }
339 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
340 break;
341 case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
342 ret = bpi_cipher_ctx_init(
343 cipher_xform->algo,
344 cipher_xform->op,
345 cipher_xform->key.data,
346 cipher_xform->key.length,
347 &session->bpi_ctx);
348 if (ret != 0) {
349 QAT_LOG(ERR, "failed to create AES BPI ctx");
350 goto error_out;
351 }
352 if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
353 &session->qat_cipher_alg) != 0) {
354 QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
355 ret = -EINVAL;
356 goto error_out;
357 }
358 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
359 break;
360 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
361 if (!qat_is_cipher_alg_supported(
362 cipher_xform->algo, internals)) {
363 QAT_LOG(ERR, "%s not supported on this device",
364 rte_crypto_cipher_algorithm_strings
365 [cipher_xform->algo]);
366 ret = -ENOTSUP;
367 goto error_out;
368 }
369 if (qat_sym_validate_zuc_key(cipher_xform->key.length,
370 &session->qat_cipher_alg) != 0) {
371 QAT_LOG(ERR, "Invalid ZUC cipher key size");
372 ret = -EINVAL;
373 goto error_out;
374 }
375 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
376 break;
377 case RTE_CRYPTO_CIPHER_AES_XTS:
378 if ((cipher_xform->key.length/2) == ICP_QAT_HW_AES_192_KEY_SZ) {
379 QAT_LOG(ERR, "AES-XTS-192 not supported");
380 ret = -EINVAL;
381 goto error_out;
382 }
383 if (qat_sym_validate_aes_key((cipher_xform->key.length/2),
384 &session->qat_cipher_alg) != 0) {
385 QAT_LOG(ERR, "Invalid AES-XTS cipher key size");
386 ret = -EINVAL;
387 goto error_out;
388 }
389 session->qat_mode = ICP_QAT_HW_CIPHER_XTS_MODE;
390 break;
391 case RTE_CRYPTO_CIPHER_3DES_ECB:
392 case RTE_CRYPTO_CIPHER_AES_ECB:
393 case RTE_CRYPTO_CIPHER_AES_F8:
394 case RTE_CRYPTO_CIPHER_ARC4:
395 QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
396 cipher_xform->algo);
397 ret = -ENOTSUP;
398 goto error_out;
399 default:
400 QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
401 cipher_xform->algo);
402 ret = -EINVAL;
403 goto error_out;
404 }
405
406 if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
407 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
408 else
409 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
410
411 if (qat_sym_session_aead_create_cd_cipher(session,
412 cipher_xform->key.data,
413 cipher_xform->key.length)) {
414 ret = -EINVAL;
415 goto error_out;
416 }
417
418 return 0;
419
420 error_out:
421 if (session->bpi_ctx) {
422 bpi_cipher_ctx_free(session->bpi_ctx);
423 session->bpi_ctx = NULL;
424 }
425 return ret;
426 }
427
428 int
429 qat_sym_session_configure(struct rte_cryptodev *dev,
430 struct rte_crypto_sym_xform *xform,
431 struct rte_cryptodev_sym_session *sess,
432 struct rte_mempool *mempool)
433 {
434 void *sess_private_data;
435 int ret;
436
437 if (rte_mempool_get(mempool, &sess_private_data)) {
438 CDEV_LOG_ERR(
439 "Couldn't get object from session mempool");
440 return -ENOMEM;
441 }
442
443 ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
444 if (ret != 0) {
445 QAT_LOG(ERR,
446 "Crypto QAT PMD: failed to configure session parameters");
447
448 /* Return session to mempool */
449 rte_mempool_put(mempool, sess_private_data);
450 return ret;
451 }
452
453 set_sym_session_private_data(sess, dev->driver_id,
454 sess_private_data);
455
456 return 0;
457 }
458
459 static void
460 qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
461 uint8_t hash_flag)
462 {
463 struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
464 struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
465 (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
466 session->fw_req.cd_ctrl.content_desc_ctrl_lw;
467
468 /* Set the Use Extended Protocol Flags bit in LW 1 */
469 QAT_FIELD_SET(header->comn_req_flags,
470 QAT_COMN_EXT_FLAGS_USED,
471 QAT_COMN_EXT_FLAGS_BITPOS,
472 QAT_COMN_EXT_FLAGS_MASK);
473
474 /* Set Hash Flags in LW 28 */
475 cd_ctrl->hash_flags |= hash_flag;
476
477 /* Set proto flags in LW 1 */
478 switch (session->qat_cipher_alg) {
479 case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
480 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
481 ICP_QAT_FW_LA_SNOW_3G_PROTO);
482 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
483 header->serv_specif_flags, 0);
484 break;
485 case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
486 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
487 ICP_QAT_FW_LA_NO_PROTO);
488 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
489 header->serv_specif_flags,
490 ICP_QAT_FW_LA_ZUC_3G_PROTO);
491 break;
492 default:
493 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
494 ICP_QAT_FW_LA_NO_PROTO);
495 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
496 header->serv_specif_flags, 0);
497 break;
498 }
499 }
500
501 static void
502 qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
503 struct qat_sym_session *session)
504 {
505 const struct qat_sym_dev_private *qat_private = dev->data->dev_private;
506 enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
507 QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
508
509 if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
510 session->qat_cipher_alg !=
511 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
512 session->min_qat_dev_gen = min_dev_gen;
513 qat_sym_session_set_ext_hash_flags(session,
514 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
515 } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
516 session->qat_cipher_alg !=
517 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
518 session->min_qat_dev_gen = min_dev_gen;
519 qat_sym_session_set_ext_hash_flags(session,
520 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
521 } else if ((session->aes_cmac ||
522 session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
523 (session->qat_cipher_alg ==
524 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
525 session->qat_cipher_alg ==
526 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
527 session->min_qat_dev_gen = min_dev_gen;
528 qat_sym_session_set_ext_hash_flags(session, 0);
529 }
530 }
531
532 int
533 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
534 struct rte_crypto_sym_xform *xform, void *session_private)
535 {
536 struct qat_sym_session *session = session_private;
537 int ret;
538 int qat_cmd_id;
539
540 /* Set context descriptor physical address */
541 session->cd_paddr = rte_mempool_virt2iova(session) +
542 offsetof(struct qat_sym_session, cd);
543
544 session->min_qat_dev_gen = QAT_GEN1;
545
546 /* Get requested QAT command id */
547 qat_cmd_id = qat_get_cmd_id(xform);
548 if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
549 QAT_LOG(ERR, "Unsupported xform chain requested");
550 return -ENOTSUP;
551 }
552 session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
553 switch (session->qat_cmd) {
554 case ICP_QAT_FW_LA_CMD_CIPHER:
555 ret = qat_sym_session_configure_cipher(dev, xform, session);
556 if (ret < 0)
557 return ret;
558 break;
559 case ICP_QAT_FW_LA_CMD_AUTH:
560 ret = qat_sym_session_configure_auth(dev, xform, session);
561 if (ret < 0)
562 return ret;
563 break;
564 case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
565 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
566 ret = qat_sym_session_configure_aead(dev, xform,
567 session);
568 if (ret < 0)
569 return ret;
570 } else {
571 ret = qat_sym_session_configure_cipher(dev,
572 xform, session);
573 if (ret < 0)
574 return ret;
575 ret = qat_sym_session_configure_auth(dev,
576 xform, session);
577 if (ret < 0)
578 return ret;
579 /* Special handling of mixed hash+cipher algorithms */
580 qat_sym_session_handle_mixed(dev, session);
581 }
582 break;
583 case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
584 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
585 ret = qat_sym_session_configure_aead(dev, xform,
586 session);
587 if (ret < 0)
588 return ret;
589 } else {
590 ret = qat_sym_session_configure_auth(dev,
591 xform, session);
592 if (ret < 0)
593 return ret;
594 ret = qat_sym_session_configure_cipher(dev,
595 xform, session);
596 if (ret < 0)
597 return ret;
598 /* Special handling of mixed hash+cipher algorithms */
599 qat_sym_session_handle_mixed(dev, session);
600 }
601 break;
602 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
603 case ICP_QAT_FW_LA_CMD_TRNG_TEST:
604 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
605 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
606 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
607 case ICP_QAT_FW_LA_CMD_MGF1:
608 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
609 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
610 case ICP_QAT_FW_LA_CMD_DELIMITER:
611 QAT_LOG(ERR, "Unsupported Service %u",
612 session->qat_cmd);
613 return -ENOTSUP;
614 default:
615 QAT_LOG(ERR, "Unsupported Service %u",
616 session->qat_cmd);
617 return -ENOTSUP;
618 }
619
620 return 0;
621 }
622
623 static int
624 qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals,
625 struct qat_sym_session *session,
626 struct rte_crypto_aead_xform *aead_xform)
627 {
628 enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
629
630 if (qat_dev_gen == QAT_GEN3 &&
631 aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) {
632 /* Use faster Single-Pass GCM */
633 struct icp_qat_fw_la_cipher_req_params *cipher_param =
634 (void *) &session->fw_req.serv_specif_rqpars;
635
636 session->is_single_pass = 1;
637 session->min_qat_dev_gen = QAT_GEN3;
638 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
639 session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
640 session->cipher_iv.offset = aead_xform->iv.offset;
641 session->cipher_iv.length = aead_xform->iv.length;
642 if (qat_sym_session_aead_create_cd_cipher(session,
643 aead_xform->key.data, aead_xform->key.length))
644 return -EINVAL;
645 session->aad_len = aead_xform->aad_length;
646 session->digest_length = aead_xform->digest_length;
647 if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
648 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
649 session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
650 ICP_QAT_FW_LA_RET_AUTH_SET(
651 session->fw_req.comn_hdr.serv_specif_flags,
652 ICP_QAT_FW_LA_RET_AUTH_RES);
653 } else {
654 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
655 session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
656 ICP_QAT_FW_LA_CMP_AUTH_SET(
657 session->fw_req.comn_hdr.serv_specif_flags,
658 ICP_QAT_FW_LA_CMP_AUTH_RES);
659 }
660 ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
661 session->fw_req.comn_hdr.serv_specif_flags,
662 ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
663 ICP_QAT_FW_LA_PROTO_SET(
664 session->fw_req.comn_hdr.serv_specif_flags,
665 ICP_QAT_FW_LA_NO_PROTO);
666 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
667 session->fw_req.comn_hdr.serv_specif_flags,
668 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
669 session->fw_req.comn_hdr.service_cmd_id =
670 ICP_QAT_FW_LA_CMD_CIPHER;
671 session->cd.cipher.cipher_config.val =
672 ICP_QAT_HW_CIPHER_CONFIG_BUILD(
673 ICP_QAT_HW_CIPHER_AEAD_MODE,
674 session->qat_cipher_alg,
675 ICP_QAT_HW_CIPHER_NO_CONVERT,
676 session->qat_dir);
677 QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
678 aead_xform->digest_length,
679 QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
680 QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
681 session->cd.cipher.cipher_config.reserved =
682 ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
683 aead_xform->aad_length);
684 cipher_param->spc_aad_sz = aead_xform->aad_length;
685 cipher_param->spc_auth_res_sz = aead_xform->digest_length;
686 }
687 return 0;
688 }
689
690 int
691 qat_sym_session_configure_auth(struct rte_cryptodev *dev,
692 struct rte_crypto_sym_xform *xform,
693 struct qat_sym_session *session)
694 {
695 struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
696 struct qat_sym_dev_private *internals = dev->data->dev_private;
697 const uint8_t *key_data = auth_xform->key.data;
698 uint8_t key_length = auth_xform->key.length;
699 session->aes_cmac = 0;
700
701 session->auth_iv.offset = auth_xform->iv.offset;
702 session->auth_iv.length = auth_xform->iv.length;
703 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
704
705 switch (auth_xform->algo) {
706 case RTE_CRYPTO_AUTH_SHA1:
707 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
708 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
709 break;
710 case RTE_CRYPTO_AUTH_SHA224:
711 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
712 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
713 break;
714 case RTE_CRYPTO_AUTH_SHA256:
715 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
716 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
717 break;
718 case RTE_CRYPTO_AUTH_SHA384:
719 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
720 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
721 break;
722 case RTE_CRYPTO_AUTH_SHA512:
723 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
724 session->auth_mode = ICP_QAT_HW_AUTH_MODE0;
725 break;
726 case RTE_CRYPTO_AUTH_SHA1_HMAC:
727 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
728 break;
729 case RTE_CRYPTO_AUTH_SHA224_HMAC:
730 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
731 break;
732 case RTE_CRYPTO_AUTH_SHA256_HMAC:
733 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
734 break;
735 case RTE_CRYPTO_AUTH_SHA384_HMAC:
736 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
737 break;
738 case RTE_CRYPTO_AUTH_SHA512_HMAC:
739 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
740 break;
741 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
742 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
743 break;
744 case RTE_CRYPTO_AUTH_AES_CMAC:
745 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
746 session->aes_cmac = 1;
747 break;
748 case RTE_CRYPTO_AUTH_AES_GMAC:
749 if (qat_sym_validate_aes_key(auth_xform->key.length,
750 &session->qat_cipher_alg) != 0) {
751 QAT_LOG(ERR, "Invalid AES key size");
752 return -EINVAL;
753 }
754 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
755 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
756 if (session->auth_iv.length == 0)
757 session->auth_iv.length = AES_GCM_J0_LEN;
758
759 break;
760 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
761 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
762 break;
763 case RTE_CRYPTO_AUTH_MD5_HMAC:
764 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
765 break;
766 case RTE_CRYPTO_AUTH_NULL:
767 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
768 break;
769 case RTE_CRYPTO_AUTH_KASUMI_F9:
770 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
771 break;
772 case RTE_CRYPTO_AUTH_ZUC_EIA3:
773 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
774 QAT_LOG(ERR, "%s not supported on this device",
775 rte_crypto_auth_algorithm_strings
776 [auth_xform->algo]);
777 return -ENOTSUP;
778 }
779 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
780 break;
781 case RTE_CRYPTO_AUTH_MD5:
782 case RTE_CRYPTO_AUTH_AES_CBC_MAC:
783 QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
784 auth_xform->algo);
785 return -ENOTSUP;
786 default:
787 QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
788 auth_xform->algo);
789 return -EINVAL;
790 }
791
792 if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
793 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
794 session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
795 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
796 /*
797 * It needs to create cipher desc content first,
798 * then authentication
799 */
800
801 if (qat_sym_session_aead_create_cd_cipher(session,
802 auth_xform->key.data,
803 auth_xform->key.length))
804 return -EINVAL;
805
806 if (qat_sym_session_aead_create_cd_auth(session,
807 key_data,
808 key_length,
809 0,
810 auth_xform->digest_length,
811 auth_xform->op))
812 return -EINVAL;
813 } else {
814 session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
815 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
816 /*
817 * It needs to create authentication desc content first,
818 * then cipher
819 */
820
821 if (qat_sym_session_aead_create_cd_auth(session,
822 key_data,
823 key_length,
824 0,
825 auth_xform->digest_length,
826 auth_xform->op))
827 return -EINVAL;
828
829 if (qat_sym_session_aead_create_cd_cipher(session,
830 auth_xform->key.data,
831 auth_xform->key.length))
832 return -EINVAL;
833 }
834 /* Restore to authentication only only */
835 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
836 } else {
837 if (qat_sym_session_aead_create_cd_auth(session,
838 key_data,
839 key_length,
840 0,
841 auth_xform->digest_length,
842 auth_xform->op))
843 return -EINVAL;
844 }
845
846 session->digest_length = auth_xform->digest_length;
847 return 0;
848 }
849
850 int
851 qat_sym_session_configure_aead(struct rte_cryptodev *dev,
852 struct rte_crypto_sym_xform *xform,
853 struct qat_sym_session *session)
854 {
855 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
856 enum rte_crypto_auth_operation crypto_operation;
857
858 /*
859 * Store AEAD IV parameters as cipher IV,
860 * to avoid unnecessary memory usage
861 */
862 session->cipher_iv.offset = xform->aead.iv.offset;
863 session->cipher_iv.length = xform->aead.iv.length;
864
865 session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
866
867 switch (aead_xform->algo) {
868 case RTE_CRYPTO_AEAD_AES_GCM:
869 if (qat_sym_validate_aes_key(aead_xform->key.length,
870 &session->qat_cipher_alg) != 0) {
871 QAT_LOG(ERR, "Invalid AES key size");
872 return -EINVAL;
873 }
874 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
875 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
876 if (session->cipher_iv.length == 0)
877 session->cipher_iv.length = AES_GCM_J0_LEN;
878
879 break;
880 case RTE_CRYPTO_AEAD_AES_CCM:
881 if (qat_sym_validate_aes_key(aead_xform->key.length,
882 &session->qat_cipher_alg) != 0) {
883 QAT_LOG(ERR, "Invalid AES key size");
884 return -EINVAL;
885 }
886 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
887 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
888 break;
889 default:
890 QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
891 aead_xform->algo);
892 return -EINVAL;
893 }
894
895 session->is_single_pass = 0;
896 if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
897 /* Use faster Single-Pass GCM if possible */
898 int res = qat_sym_session_handle_single_pass(
899 dev->data->dev_private, session, aead_xform);
900 if (res < 0)
901 return res;
902 if (session->is_single_pass)
903 return 0;
904 }
905
906 if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
907 aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
908 (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
909 aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
910 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
911 /*
912 * It needs to create cipher desc content first,
913 * then authentication
914 */
915 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
916 RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
917
918 if (qat_sym_session_aead_create_cd_cipher(session,
919 aead_xform->key.data,
920 aead_xform->key.length))
921 return -EINVAL;
922
923 if (qat_sym_session_aead_create_cd_auth(session,
924 aead_xform->key.data,
925 aead_xform->key.length,
926 aead_xform->aad_length,
927 aead_xform->digest_length,
928 crypto_operation))
929 return -EINVAL;
930 } else {
931 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
932 /*
933 * It needs to create authentication desc content first,
934 * then cipher
935 */
936
937 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
938 RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
939
940 if (qat_sym_session_aead_create_cd_auth(session,
941 aead_xform->key.data,
942 aead_xform->key.length,
943 aead_xform->aad_length,
944 aead_xform->digest_length,
945 crypto_operation))
946 return -EINVAL;
947
948 if (qat_sym_session_aead_create_cd_cipher(session,
949 aead_xform->key.data,
950 aead_xform->key.length))
951 return -EINVAL;
952 }
953
954 session->digest_length = aead_xform->digest_length;
955 return 0;
956 }
957
958 unsigned int qat_sym_session_get_private_size(
959 struct rte_cryptodev *dev __rte_unused)
960 {
961 return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
962 }
963
964 /* returns block size in bytes per cipher algo */
965 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
966 {
967 switch (qat_cipher_alg) {
968 case ICP_QAT_HW_CIPHER_ALGO_DES:
969 return ICP_QAT_HW_DES_BLK_SZ;
970 case ICP_QAT_HW_CIPHER_ALGO_3DES:
971 return ICP_QAT_HW_3DES_BLK_SZ;
972 case ICP_QAT_HW_CIPHER_ALGO_AES128:
973 case ICP_QAT_HW_CIPHER_ALGO_AES192:
974 case ICP_QAT_HW_CIPHER_ALGO_AES256:
975 return ICP_QAT_HW_AES_BLK_SZ;
976 default:
977 QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
978 return -EFAULT;
979 };
980 return -EFAULT;
981 }
982
983 /*
984 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
985 * This is digest size rounded up to nearest quadword
986 */
987 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
988 {
989 switch (qat_hash_alg) {
990 case ICP_QAT_HW_AUTH_ALGO_SHA1:
991 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
992 QAT_HW_DEFAULT_ALIGNMENT);
993 case ICP_QAT_HW_AUTH_ALGO_SHA224:
994 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
995 QAT_HW_DEFAULT_ALIGNMENT);
996 case ICP_QAT_HW_AUTH_ALGO_SHA256:
997 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
998 QAT_HW_DEFAULT_ALIGNMENT);
999 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1000 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
1001 QAT_HW_DEFAULT_ALIGNMENT);
1002 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1003 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1004 QAT_HW_DEFAULT_ALIGNMENT);
1005 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1006 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
1007 QAT_HW_DEFAULT_ALIGNMENT);
1008 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1009 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1010 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
1011 QAT_HW_DEFAULT_ALIGNMENT);
1012 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1013 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
1014 QAT_HW_DEFAULT_ALIGNMENT);
1015 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1016 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
1017 QAT_HW_DEFAULT_ALIGNMENT);
1018 case ICP_QAT_HW_AUTH_ALGO_MD5:
1019 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
1020 QAT_HW_DEFAULT_ALIGNMENT);
1021 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1022 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
1023 QAT_HW_DEFAULT_ALIGNMENT);
1024 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1025 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
1026 QAT_HW_DEFAULT_ALIGNMENT);
1027 case ICP_QAT_HW_AUTH_ALGO_NULL:
1028 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
1029 QAT_HW_DEFAULT_ALIGNMENT);
1030 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1031 /* return maximum state1 size in this case */
1032 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
1033 QAT_HW_DEFAULT_ALIGNMENT);
1034 default:
1035 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1036 return -EFAULT;
1037 };
1038 return -EFAULT;
1039 }
1040
1041 /* returns digest size in bytes per hash algo */
1042 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1043 {
1044 switch (qat_hash_alg) {
1045 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1046 return ICP_QAT_HW_SHA1_STATE1_SZ;
1047 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1048 return ICP_QAT_HW_SHA224_STATE1_SZ;
1049 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1050 return ICP_QAT_HW_SHA256_STATE1_SZ;
1051 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1052 return ICP_QAT_HW_SHA384_STATE1_SZ;
1053 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1054 return ICP_QAT_HW_SHA512_STATE1_SZ;
1055 case ICP_QAT_HW_AUTH_ALGO_MD5:
1056 return ICP_QAT_HW_MD5_STATE1_SZ;
1057 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1058 return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1059 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1060 /* return maximum digest size in this case */
1061 return ICP_QAT_HW_SHA512_STATE1_SZ;
1062 default:
1063 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1064 return -EFAULT;
1065 };
1066 return -EFAULT;
1067 }
1068
1069 /* returns block size in byes per hash algo */
1070 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
1071 {
1072 switch (qat_hash_alg) {
1073 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1074 return SHA_CBLOCK;
1075 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1076 return SHA256_CBLOCK;
1077 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1078 return SHA256_CBLOCK;
1079 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1080 return SHA512_CBLOCK;
1081 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1082 return SHA512_CBLOCK;
1083 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1084 return 16;
1085 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1086 return ICP_QAT_HW_AES_BLK_SZ;
1087 case ICP_QAT_HW_AUTH_ALGO_MD5:
1088 return MD5_CBLOCK;
1089 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
1090 /* return maximum block size in this case */
1091 return SHA512_CBLOCK;
1092 default:
1093 QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
1094 return -EFAULT;
1095 };
1096 return -EFAULT;
1097 }
1098
1099 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
1100 {
1101 SHA_CTX ctx;
1102
1103 if (!SHA1_Init(&ctx))
1104 return -EFAULT;
1105 SHA1_Transform(&ctx, data_in);
1106 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
1107 return 0;
1108 }
1109
1110 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
1111 {
1112 SHA256_CTX ctx;
1113
1114 if (!SHA224_Init(&ctx))
1115 return -EFAULT;
1116 SHA256_Transform(&ctx, data_in);
1117 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1118 return 0;
1119 }
1120
1121 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
1122 {
1123 SHA256_CTX ctx;
1124
1125 if (!SHA256_Init(&ctx))
1126 return -EFAULT;
1127 SHA256_Transform(&ctx, data_in);
1128 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
1129 return 0;
1130 }
1131
1132 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
1133 {
1134 SHA512_CTX ctx;
1135
1136 if (!SHA384_Init(&ctx))
1137 return -EFAULT;
1138 SHA512_Transform(&ctx, data_in);
1139 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1140 return 0;
1141 }
1142
1143 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
1144 {
1145 SHA512_CTX ctx;
1146
1147 if (!SHA512_Init(&ctx))
1148 return -EFAULT;
1149 SHA512_Transform(&ctx, data_in);
1150 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
1151 return 0;
1152 }
1153
1154 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
1155 {
1156 MD5_CTX ctx;
1157
1158 if (!MD5_Init(&ctx))
1159 return -EFAULT;
1160 MD5_Transform(&ctx, data_in);
1161 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
1162
1163 return 0;
1164 }
1165
1166 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
1167 uint8_t *data_in,
1168 uint8_t *data_out)
1169 {
1170 int digest_size;
1171 uint8_t digest[qat_hash_get_digest_size(
1172 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1173 uint32_t *hash_state_out_be32;
1174 uint64_t *hash_state_out_be64;
1175 int i;
1176
1177 digest_size = qat_hash_get_digest_size(hash_alg);
1178 if (digest_size <= 0)
1179 return -EFAULT;
1180
1181 hash_state_out_be32 = (uint32_t *)data_out;
1182 hash_state_out_be64 = (uint64_t *)data_out;
1183
1184 switch (hash_alg) {
1185 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1186 if (partial_hash_sha1(data_in, digest))
1187 return -EFAULT;
1188 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1189 *hash_state_out_be32 =
1190 rte_bswap32(*(((uint32_t *)digest)+i));
1191 break;
1192 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1193 if (partial_hash_sha224(data_in, digest))
1194 return -EFAULT;
1195 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1196 *hash_state_out_be32 =
1197 rte_bswap32(*(((uint32_t *)digest)+i));
1198 break;
1199 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1200 if (partial_hash_sha256(data_in, digest))
1201 return -EFAULT;
1202 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
1203 *hash_state_out_be32 =
1204 rte_bswap32(*(((uint32_t *)digest)+i));
1205 break;
1206 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1207 if (partial_hash_sha384(data_in, digest))
1208 return -EFAULT;
1209 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1210 *hash_state_out_be64 =
1211 rte_bswap64(*(((uint64_t *)digest)+i));
1212 break;
1213 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1214 if (partial_hash_sha512(data_in, digest))
1215 return -EFAULT;
1216 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
1217 *hash_state_out_be64 =
1218 rte_bswap64(*(((uint64_t *)digest)+i));
1219 break;
1220 case ICP_QAT_HW_AUTH_ALGO_MD5:
1221 if (partial_hash_md5(data_in, data_out))
1222 return -EFAULT;
1223 break;
1224 default:
1225 QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
1226 return -EFAULT;
1227 }
1228
1229 return 0;
1230 }
1231 #define HMAC_IPAD_VALUE 0x36
1232 #define HMAC_OPAD_VALUE 0x5c
1233 #define HASH_XCBC_PRECOMP_KEY_NUM 3
1234
1235 static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
1236
1237 static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
1238 {
1239 int i;
1240
1241 derived[0] = base[0] << 1;
1242 for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
1243 derived[i] = base[i] << 1;
1244 derived[i - 1] |= base[i] >> 7;
1245 }
1246
1247 if (base[0] & 0x80)
1248 derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
1249 }
1250
1251 static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
1252 const uint8_t *auth_key,
1253 uint16_t auth_keylen,
1254 uint8_t *p_state_buf,
1255 uint16_t *p_state_len,
1256 uint8_t aes_cmac)
1257 {
1258 int block_size;
1259 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1260 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
1261 int i;
1262
1263 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
1264
1265 /* CMAC */
1266 if (aes_cmac) {
1267 AES_KEY enc_key;
1268 uint8_t *in = NULL;
1269 uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
1270 uint8_t *k1, *k2;
1271
1272 auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
1273
1274 in = rte_zmalloc("AES CMAC K1",
1275 ICP_QAT_HW_AES_128_KEY_SZ, 16);
1276
1277 if (in == NULL) {
1278 QAT_LOG(ERR, "Failed to alloc memory");
1279 return -ENOMEM;
1280 }
1281
1282 rte_memcpy(in, AES_CMAC_SEED,
1283 ICP_QAT_HW_AES_128_KEY_SZ);
1284 rte_memcpy(p_state_buf, auth_key, auth_keylen);
1285
1286 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1287 &enc_key) != 0) {
1288 rte_free(in);
1289 return -EFAULT;
1290 }
1291
1292 AES_encrypt(in, k0, &enc_key);
1293
1294 k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1295 k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1296
1297 aes_cmac_key_derive(k0, k1);
1298 aes_cmac_key_derive(k1, k2);
1299
1300 memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
1301 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1302 rte_free(in);
1303 return 0;
1304 } else {
1305 static uint8_t qat_aes_xcbc_key_seed[
1306 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
1307 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1308 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
1309 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1310 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
1311 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1312 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
1313 };
1314
1315 uint8_t *in = NULL;
1316 uint8_t *out = p_state_buf;
1317 int x;
1318 AES_KEY enc_key;
1319
1320 in = rte_zmalloc("working mem for key",
1321 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
1322 if (in == NULL) {
1323 QAT_LOG(ERR, "Failed to alloc memory");
1324 return -ENOMEM;
1325 }
1326
1327 rte_memcpy(in, qat_aes_xcbc_key_seed,
1328 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1329 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
1330 if (AES_set_encrypt_key(auth_key,
1331 auth_keylen << 3,
1332 &enc_key) != 0) {
1333 rte_free(in -
1334 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
1335 memset(out -
1336 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
1337 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
1338 return -EFAULT;
1339 }
1340 AES_encrypt(in, out, &enc_key);
1341 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1342 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
1343 }
1344 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
1345 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
1346 return 0;
1347 }
1348
1349 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
1350 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
1351 uint8_t *in = NULL;
1352 uint8_t *out = p_state_buf;
1353 AES_KEY enc_key;
1354
1355 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
1356 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1357 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
1358 in = rte_zmalloc("working mem for key",
1359 ICP_QAT_HW_GALOIS_H_SZ, 16);
1360 if (in == NULL) {
1361 QAT_LOG(ERR, "Failed to alloc memory");
1362 return -ENOMEM;
1363 }
1364
1365 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
1366 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
1367 &enc_key) != 0) {
1368 return -EFAULT;
1369 }
1370 AES_encrypt(in, out, &enc_key);
1371 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
1372 ICP_QAT_HW_GALOIS_LEN_A_SZ +
1373 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
1374 rte_free(in);
1375 return 0;
1376 }
1377
1378 block_size = qat_hash_get_block_size(hash_alg);
1379 if (block_size < 0)
1380 return block_size;
1381 /* init ipad and opad from key and xor with fixed values */
1382 memset(ipad, 0, block_size);
1383 memset(opad, 0, block_size);
1384
1385 if (auth_keylen > (unsigned int)block_size) {
1386 QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
1387 return -EFAULT;
1388 }
1389 rte_memcpy(ipad, auth_key, auth_keylen);
1390 rte_memcpy(opad, auth_key, auth_keylen);
1391
1392 for (i = 0; i < block_size; i++) {
1393 uint8_t *ipad_ptr = ipad + i;
1394 uint8_t *opad_ptr = opad + i;
1395 *ipad_ptr ^= HMAC_IPAD_VALUE;
1396 *opad_ptr ^= HMAC_OPAD_VALUE;
1397 }
1398
1399 /* do partial hash of ipad and copy to state1 */
1400 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
1401 memset(ipad, 0, block_size);
1402 memset(opad, 0, block_size);
1403 QAT_LOG(ERR, "ipad precompute failed");
1404 return -EFAULT;
1405 }
1406
1407 /*
1408 * State len is a multiple of 8, so may be larger than the digest.
1409 * Put the partial hash of opad state_len bytes after state1
1410 */
1411 *p_state_len = qat_hash_get_state1_size(hash_alg);
1412 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
1413 memset(ipad, 0, block_size);
1414 memset(opad, 0, block_size);
1415 QAT_LOG(ERR, "opad precompute failed");
1416 return -EFAULT;
1417 }
1418
1419 /* don't leave data lying around */
1420 memset(ipad, 0, block_size);
1421 memset(opad, 0, block_size);
1422 return 0;
1423 }
1424
1425 static void
1426 qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
1427 enum qat_sym_proto_flag proto_flags)
1428 {
1429 header->hdr_flags =
1430 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
1431 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
1432 header->comn_req_flags =
1433 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
1434 QAT_COMN_PTR_TYPE_FLAT);
1435 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
1436 ICP_QAT_FW_LA_PARTIAL_NONE);
1437 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
1438 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
1439
1440 switch (proto_flags) {
1441 case QAT_CRYPTO_PROTO_FLAG_NONE:
1442 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1443 ICP_QAT_FW_LA_NO_PROTO);
1444 break;
1445 case QAT_CRYPTO_PROTO_FLAG_CCM:
1446 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1447 ICP_QAT_FW_LA_CCM_PROTO);
1448 break;
1449 case QAT_CRYPTO_PROTO_FLAG_GCM:
1450 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1451 ICP_QAT_FW_LA_GCM_PROTO);
1452 break;
1453 case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
1454 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
1455 ICP_QAT_FW_LA_SNOW_3G_PROTO);
1456 break;
1457 case QAT_CRYPTO_PROTO_FLAG_ZUC:
1458 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
1459 ICP_QAT_FW_LA_ZUC_3G_PROTO);
1460 break;
1461 }
1462
1463 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
1464 ICP_QAT_FW_LA_NO_UPDATE_STATE);
1465 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
1466 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
1467 }
1468
1469 /*
1470 * Snow3G and ZUC should never use this function
1471 * and set its protocol flag in both cipher and auth part of content
1472 * descriptor building function
1473 */
1474 static enum qat_sym_proto_flag
1475 qat_get_crypto_proto_flag(uint16_t flags)
1476 {
1477 int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
1478 enum qat_sym_proto_flag qat_proto_flag =
1479 QAT_CRYPTO_PROTO_FLAG_NONE;
1480
1481 switch (proto) {
1482 case ICP_QAT_FW_LA_GCM_PROTO:
1483 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1484 break;
1485 case ICP_QAT_FW_LA_CCM_PROTO:
1486 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1487 break;
1488 }
1489
1490 return qat_proto_flag;
1491 }
1492
1493 int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
1494 const uint8_t *cipherkey,
1495 uint32_t cipherkeylen)
1496 {
1497 struct icp_qat_hw_cipher_algo_blk *cipher;
1498 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1499 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1500 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1501 void *ptr = &req_tmpl->cd_ctrl;
1502 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1503 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1504 enum icp_qat_hw_cipher_convert key_convert;
1505 enum qat_sym_proto_flag qat_proto_flag =
1506 QAT_CRYPTO_PROTO_FLAG_NONE;
1507 uint32_t total_key_size;
1508 uint16_t cipher_offset, cd_size;
1509 uint32_t wordIndex = 0;
1510 uint32_t *temp_key = NULL;
1511
1512 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1513 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1514 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1515 ICP_QAT_FW_SLICE_CIPHER);
1516 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1517 ICP_QAT_FW_SLICE_DRAM_WR);
1518 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1519 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1520 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1521 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1522 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1523 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1524 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1525 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1526 ICP_QAT_FW_SLICE_CIPHER);
1527 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1528 ICP_QAT_FW_SLICE_AUTH);
1529 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1530 ICP_QAT_FW_SLICE_AUTH);
1531 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1532 ICP_QAT_FW_SLICE_DRAM_WR);
1533 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1534 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1535 QAT_LOG(ERR, "Invalid param, must be a cipher command.");
1536 return -EFAULT;
1537 }
1538
1539 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
1540 /*
1541 * CTR Streaming ciphers are a special case. Decrypt = encrypt
1542 * Overriding default values previously set
1543 */
1544 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
1545 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1546 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
1547 || cdesc->qat_cipher_alg ==
1548 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
1549 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1550 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
1551 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
1552 else
1553 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
1554
1555 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1556 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
1557 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1558 cipher_cd_ctrl->cipher_state_sz =
1559 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1560 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1561
1562 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1563 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
1564 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
1565 cipher_cd_ctrl->cipher_padding_sz =
1566 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
1567 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
1568 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
1569 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
1570 qat_proto_flag =
1571 qat_get_crypto_proto_flag(header->serv_specif_flags);
1572 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
1573 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
1574 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
1575 qat_proto_flag =
1576 qat_get_crypto_proto_flag(header->serv_specif_flags);
1577 } else if (cdesc->qat_cipher_alg ==
1578 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1579 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
1580 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1581 cipher_cd_ctrl->cipher_state_sz =
1582 ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1583 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1584 cdesc->min_qat_dev_gen = QAT_GEN2;
1585 } else {
1586 total_key_size = cipherkeylen;
1587 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
1588 qat_proto_flag =
1589 qat_get_crypto_proto_flag(header->serv_specif_flags);
1590 }
1591 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
1592 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1593 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
1594
1595 header->service_cmd_id = cdesc->qat_cmd;
1596 qat_sym_session_init_common_hdr(header, qat_proto_flag);
1597
1598 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
1599 cipher->cipher_config.val =
1600 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
1601 cdesc->qat_cipher_alg, key_convert,
1602 cdesc->qat_dir);
1603
1604 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
1605 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
1606 sizeof(struct icp_qat_hw_cipher_config)
1607 + cipherkeylen);
1608 memcpy(cipher->key, cipherkey, cipherkeylen);
1609 memcpy(temp_key, cipherkey, cipherkeylen);
1610
1611 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
1612 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
1613 wordIndex++)
1614 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
1615
1616 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1617 cipherkeylen + cipherkeylen;
1618 } else {
1619 memcpy(cipher->key, cipherkey, cipherkeylen);
1620 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
1621 cipherkeylen;
1622 }
1623
1624 if (total_key_size > cipherkeylen) {
1625 uint32_t padding_size = total_key_size-cipherkeylen;
1626 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1627 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
1628 /* K3 not provided so use K1 = K3*/
1629 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
1630 } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
1631 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
1632 /* K2 and K3 not provided so use K1 = K2 = K3*/
1633 memcpy(cdesc->cd_cur_ptr, cipherkey,
1634 cipherkeylen);
1635 memcpy(cdesc->cd_cur_ptr+cipherkeylen,
1636 cipherkey, cipherkeylen);
1637 } else
1638 memset(cdesc->cd_cur_ptr, 0, padding_size);
1639
1640 cdesc->cd_cur_ptr += padding_size;
1641 }
1642 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1643 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1644
1645 return 0;
1646 }
1647
1648 int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
1649 const uint8_t *authkey,
1650 uint32_t authkeylen,
1651 uint32_t aad_length,
1652 uint32_t digestsize,
1653 unsigned int operation)
1654 {
1655 struct icp_qat_hw_auth_setup *hash;
1656 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
1657 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
1658 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
1659 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
1660 void *ptr = &req_tmpl->cd_ctrl;
1661 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
1662 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
1663 struct icp_qat_fw_la_auth_req_params *auth_param =
1664 (struct icp_qat_fw_la_auth_req_params *)
1665 ((char *)&req_tmpl->serv_specif_rqpars +
1666 ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
1667 uint16_t state1_size = 0, state2_size = 0, cd_extra_size = 0;
1668 uint16_t hash_offset, cd_size;
1669 uint32_t *aad_len = NULL;
1670 uint32_t wordIndex = 0;
1671 uint32_t *pTempKey;
1672 enum qat_sym_proto_flag qat_proto_flag =
1673 QAT_CRYPTO_PROTO_FLAG_NONE;
1674
1675 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1676 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1677 ICP_QAT_FW_SLICE_AUTH);
1678 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1679 ICP_QAT_FW_SLICE_DRAM_WR);
1680 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1681 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
1682 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
1683 ICP_QAT_FW_SLICE_AUTH);
1684 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
1685 ICP_QAT_FW_SLICE_CIPHER);
1686 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
1687 ICP_QAT_FW_SLICE_CIPHER);
1688 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
1689 ICP_QAT_FW_SLICE_DRAM_WR);
1690 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
1691 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1692 QAT_LOG(ERR, "Invalid param, must be a hash command.");
1693 return -EFAULT;
1694 }
1695
1696 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
1697 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1698 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
1699 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1700 ICP_QAT_FW_LA_CMP_AUTH_RES);
1701 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
1702 } else {
1703 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
1704 ICP_QAT_FW_LA_RET_AUTH_RES);
1705 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
1706 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
1707 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
1708 }
1709
1710 /*
1711 * Setup the inner hash config
1712 */
1713 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
1714 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
1715 hash->auth_config.reserved = 0;
1716 hash->auth_config.config =
1717 ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode,
1718 cdesc->qat_hash_alg, digestsize);
1719
1720 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0
1721 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
1722 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
1723 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
1724 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
1725 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
1726 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL
1727 )
1728 hash->auth_counter.counter = 0;
1729 else {
1730 int block_size = qat_hash_get_block_size(cdesc->qat_hash_alg);
1731
1732 if (block_size < 0)
1733 return block_size;
1734 hash->auth_counter.counter = rte_bswap32(block_size);
1735 }
1736
1737 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
1738
1739 /*
1740 * cd_cur_ptr now points at the state1 information.
1741 */
1742 switch (cdesc->qat_hash_alg) {
1743 case ICP_QAT_HW_AUTH_ALGO_SHA1:
1744 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1745 /* Plain SHA-1 */
1746 rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState,
1747 sizeof(sha1InitialState));
1748 state1_size = qat_hash_get_state1_size(
1749 cdesc->qat_hash_alg);
1750 break;
1751 }
1752 /* SHA-1 HMAC */
1753 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
1754 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1755 cdesc->aes_cmac)) {
1756 QAT_LOG(ERR, "(SHA)precompute failed");
1757 return -EFAULT;
1758 }
1759 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
1760 break;
1761 case ICP_QAT_HW_AUTH_ALGO_SHA224:
1762 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1763 /* Plain SHA-224 */
1764 rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState,
1765 sizeof(sha224InitialState));
1766 state1_size = qat_hash_get_state1_size(
1767 cdesc->qat_hash_alg);
1768 break;
1769 }
1770 /* SHA-224 HMAC */
1771 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
1772 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1773 cdesc->aes_cmac)) {
1774 QAT_LOG(ERR, "(SHA)precompute failed");
1775 return -EFAULT;
1776 }
1777 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
1778 break;
1779 case ICP_QAT_HW_AUTH_ALGO_SHA256:
1780 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1781 /* Plain SHA-256 */
1782 rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState,
1783 sizeof(sha256InitialState));
1784 state1_size = qat_hash_get_state1_size(
1785 cdesc->qat_hash_alg);
1786 break;
1787 }
1788 /* SHA-256 HMAC */
1789 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
1790 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1791 cdesc->aes_cmac)) {
1792 QAT_LOG(ERR, "(SHA)precompute failed");
1793 return -EFAULT;
1794 }
1795 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
1796 break;
1797 case ICP_QAT_HW_AUTH_ALGO_SHA384:
1798 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1799 /* Plain SHA-384 */
1800 rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState,
1801 sizeof(sha384InitialState));
1802 state1_size = qat_hash_get_state1_size(
1803 cdesc->qat_hash_alg);
1804 break;
1805 }
1806 /* SHA-384 HMAC */
1807 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
1808 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1809 cdesc->aes_cmac)) {
1810 QAT_LOG(ERR, "(SHA)precompute failed");
1811 return -EFAULT;
1812 }
1813 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
1814 break;
1815 case ICP_QAT_HW_AUTH_ALGO_SHA512:
1816 if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) {
1817 /* Plain SHA-512 */
1818 rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState,
1819 sizeof(sha512InitialState));
1820 state1_size = qat_hash_get_state1_size(
1821 cdesc->qat_hash_alg);
1822 break;
1823 }
1824 /* SHA-512 HMAC */
1825 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
1826 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1827 cdesc->aes_cmac)) {
1828 QAT_LOG(ERR, "(SHA)precompute failed");
1829 return -EFAULT;
1830 }
1831 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
1832 break;
1833 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
1834 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
1835
1836 if (cdesc->aes_cmac)
1837 memset(cdesc->cd_cur_ptr, 0, state1_size);
1838 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
1839 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
1840 &state2_size, cdesc->aes_cmac)) {
1841 cdesc->aes_cmac ? QAT_LOG(ERR,
1842 "(CMAC)precompute failed")
1843 : QAT_LOG(ERR,
1844 "(XCBC)precompute failed");
1845 return -EFAULT;
1846 }
1847 break;
1848 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
1849 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
1850 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
1851 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
1852 if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
1853 authkeylen, cdesc->cd_cur_ptr + state1_size,
1854 &state2_size, cdesc->aes_cmac)) {
1855 QAT_LOG(ERR, "(GCM)precompute failed");
1856 return -EFAULT;
1857 }
1858 /*
1859 * Write (the length of AAD) into bytes 16-19 of state2
1860 * in big-endian format. This field is 8 bytes
1861 */
1862 auth_param->u2.aad_sz =
1863 RTE_ALIGN_CEIL(aad_length, 16);
1864 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
1865
1866 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
1867 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
1868 ICP_QAT_HW_GALOIS_H_SZ);
1869 *aad_len = rte_bswap32(aad_length);
1870 cdesc->aad_len = aad_length;
1871 break;
1872 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
1873 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
1874 state1_size = qat_hash_get_state1_size(
1875 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
1876 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
1877 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1878
1879 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
1880 (cdesc->cd_cur_ptr + state1_size + state2_size);
1881 cipherconfig->cipher_config.val =
1882 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
1883 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
1884 ICP_QAT_HW_CIPHER_KEY_CONVERT,
1885 ICP_QAT_HW_CIPHER_ENCRYPT);
1886 memcpy(cipherconfig->key, authkey, authkeylen);
1887 memset(cipherconfig->key + authkeylen,
1888 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
1889 cd_extra_size += sizeof(struct icp_qat_hw_cipher_config) +
1890 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
1891 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
1892 break;
1893 case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
1894 hash->auth_config.config =
1895 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
1896 cdesc->qat_hash_alg, digestsize);
1897 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
1898 state1_size = qat_hash_get_state1_size(
1899 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
1900 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
1901 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
1902 + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
1903
1904 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1905 cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
1906 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
1907 cdesc->min_qat_dev_gen = QAT_GEN2;
1908
1909 break;
1910 case ICP_QAT_HW_AUTH_ALGO_MD5:
1911 if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
1912 authkeylen, cdesc->cd_cur_ptr, &state1_size,
1913 cdesc->aes_cmac)) {
1914 QAT_LOG(ERR, "(MD5)precompute failed");
1915 return -EFAULT;
1916 }
1917 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
1918 break;
1919 case ICP_QAT_HW_AUTH_ALGO_NULL:
1920 state1_size = qat_hash_get_state1_size(
1921 ICP_QAT_HW_AUTH_ALGO_NULL);
1922 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
1923 break;
1924 case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
1925 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
1926 state1_size = qat_hash_get_state1_size(
1927 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
1928 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
1929 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
1930
1931 if (aad_length > 0) {
1932 aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
1933 ICP_QAT_HW_CCM_AAD_LEN_INFO;
1934 auth_param->u2.aad_sz =
1935 RTE_ALIGN_CEIL(aad_length,
1936 ICP_QAT_HW_CCM_AAD_ALIGNMENT);
1937 } else {
1938 auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
1939 }
1940 cdesc->aad_len = aad_length;
1941 hash->auth_counter.counter = 0;
1942
1943 hash_cd_ctrl->outer_prefix_sz = digestsize;
1944 auth_param->hash_state_sz = digestsize;
1945
1946 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1947 break;
1948 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
1949 state1_size = qat_hash_get_state1_size(
1950 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
1951 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
1952 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
1953 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
1954 + authkeylen);
1955 /*
1956 * The Inner Hash Initial State2 block must contain IK
1957 * (Initialisation Key), followed by IK XOR-ed with KM
1958 * (Key Modifier): IK||(IK^KM).
1959 */
1960 /* write the auth key */
1961 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
1962 /* initialise temp key with auth key */
1963 memcpy(pTempKey, authkey, authkeylen);
1964 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
1965 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
1966 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
1967 break;
1968 default:
1969 QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
1970 return -EFAULT;
1971 }
1972
1973 /* Request template setup */
1974 qat_sym_session_init_common_hdr(header, qat_proto_flag);
1975 header->service_cmd_id = cdesc->qat_cmd;
1976
1977 /* Auth CD config setup */
1978 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
1979 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
1980 hash_cd_ctrl->inner_res_sz = digestsize;
1981 hash_cd_ctrl->final_sz = digestsize;
1982 hash_cd_ctrl->inner_state1_sz = state1_size;
1983 auth_param->auth_res_sz = digestsize;
1984
1985 hash_cd_ctrl->inner_state2_sz = state2_size;
1986 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
1987 ((sizeof(struct icp_qat_hw_auth_setup) +
1988 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
1989 >> 3);
1990
1991 cdesc->cd_cur_ptr += state1_size + state2_size + cd_extra_size;
1992 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
1993
1994 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
1995 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
1996
1997 return 0;
1998 }
1999
2000 int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2001 {
2002 switch (key_len) {
2003 case ICP_QAT_HW_AES_128_KEY_SZ:
2004 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2005 break;
2006 case ICP_QAT_HW_AES_192_KEY_SZ:
2007 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
2008 break;
2009 case ICP_QAT_HW_AES_256_KEY_SZ:
2010 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2011 break;
2012 default:
2013 return -EINVAL;
2014 }
2015 return 0;
2016 }
2017
2018 int qat_sym_validate_aes_docsisbpi_key(int key_len,
2019 enum icp_qat_hw_cipher_algo *alg)
2020 {
2021 switch (key_len) {
2022 case ICP_QAT_HW_AES_128_KEY_SZ:
2023 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
2024 break;
2025 case ICP_QAT_HW_AES_256_KEY_SZ:
2026 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
2027 break;
2028 default:
2029 return -EINVAL;
2030 }
2031 return 0;
2032 }
2033
2034 int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2035 {
2036 switch (key_len) {
2037 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
2038 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
2039 break;
2040 default:
2041 return -EINVAL;
2042 }
2043 return 0;
2044 }
2045
2046 int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2047 {
2048 switch (key_len) {
2049 case ICP_QAT_HW_KASUMI_KEY_SZ:
2050 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
2051 break;
2052 default:
2053 return -EINVAL;
2054 }
2055 return 0;
2056 }
2057
2058 int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2059 {
2060 switch (key_len) {
2061 case ICP_QAT_HW_DES_KEY_SZ:
2062 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
2063 break;
2064 default:
2065 return -EINVAL;
2066 }
2067 return 0;
2068 }
2069
2070 int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2071 {
2072 switch (key_len) {
2073 case QAT_3DES_KEY_SZ_OPT1:
2074 case QAT_3DES_KEY_SZ_OPT2:
2075 case QAT_3DES_KEY_SZ_OPT3:
2076 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
2077 break;
2078 default:
2079 return -EINVAL;
2080 }
2081 return 0;
2082 }
2083
2084 int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
2085 {
2086 switch (key_len) {
2087 case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
2088 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
2089 break;
2090 default:
2091 return -EINVAL;
2092 }
2093 return 0;
2094 }