]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * Contact Information:
17 * qat-linux@intel.com
18 *
19 * BSD LICENSE
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
24 *
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
30 * distribution.
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
62
63
64 /*
65 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66 * This is digest size rounded up to nearest quadword
67 */
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 {
70 switch (qat_hash_alg) {
71 case ICP_QAT_HW_AUTH_ALGO_SHA1:
72 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73 QAT_HW_DEFAULT_ALIGNMENT);
74 case ICP_QAT_HW_AUTH_ALGO_SHA224:
75 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76 QAT_HW_DEFAULT_ALIGNMENT);
77 case ICP_QAT_HW_AUTH_ALGO_SHA256:
78 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79 QAT_HW_DEFAULT_ALIGNMENT);
80 case ICP_QAT_HW_AUTH_ALGO_SHA384:
81 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82 QAT_HW_DEFAULT_ALIGNMENT);
83 case ICP_QAT_HW_AUTH_ALGO_SHA512:
84 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85 QAT_HW_DEFAULT_ALIGNMENT);
86 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
89 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95 QAT_HW_DEFAULT_ALIGNMENT);
96 case ICP_QAT_HW_AUTH_ALGO_MD5:
97 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98 QAT_HW_DEFAULT_ALIGNMENT);
99 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101 QAT_HW_DEFAULT_ALIGNMENT);
102 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
103 /* return maximum state1 size in this case */
104 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
105 QAT_HW_DEFAULT_ALIGNMENT);
106 default:
107 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
108 return -EFAULT;
109 };
110 return -EFAULT;
111 }
112
113 /* returns digest size in bytes per hash algo */
114 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
115 {
116 switch (qat_hash_alg) {
117 case ICP_QAT_HW_AUTH_ALGO_SHA1:
118 return ICP_QAT_HW_SHA1_STATE1_SZ;
119 case ICP_QAT_HW_AUTH_ALGO_SHA224:
120 return ICP_QAT_HW_SHA224_STATE1_SZ;
121 case ICP_QAT_HW_AUTH_ALGO_SHA256:
122 return ICP_QAT_HW_SHA256_STATE1_SZ;
123 case ICP_QAT_HW_AUTH_ALGO_SHA384:
124 return ICP_QAT_HW_SHA384_STATE1_SZ;
125 case ICP_QAT_HW_AUTH_ALGO_SHA512:
126 return ICP_QAT_HW_SHA512_STATE1_SZ;
127 case ICP_QAT_HW_AUTH_ALGO_MD5:
128 return ICP_QAT_HW_MD5_STATE1_SZ;
129 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
130 /* return maximum digest size in this case */
131 return ICP_QAT_HW_SHA512_STATE1_SZ;
132 default:
133 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
134 return -EFAULT;
135 };
136 return -EFAULT;
137 }
138
139 /* returns block size in byes per hash algo */
140 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
141 {
142 switch (qat_hash_alg) {
143 case ICP_QAT_HW_AUTH_ALGO_SHA1:
144 return SHA_CBLOCK;
145 case ICP_QAT_HW_AUTH_ALGO_SHA224:
146 return SHA256_CBLOCK;
147 case ICP_QAT_HW_AUTH_ALGO_SHA256:
148 return SHA256_CBLOCK;
149 case ICP_QAT_HW_AUTH_ALGO_SHA384:
150 return SHA512_CBLOCK;
151 case ICP_QAT_HW_AUTH_ALGO_SHA512:
152 return SHA512_CBLOCK;
153 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
154 return 16;
155 case ICP_QAT_HW_AUTH_ALGO_MD5:
156 return MD5_CBLOCK;
157 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
158 /* return maximum block size in this case */
159 return SHA512_CBLOCK;
160 default:
161 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
162 return -EFAULT;
163 };
164 return -EFAULT;
165 }
166
167 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
168 {
169 SHA_CTX ctx;
170
171 if (!SHA1_Init(&ctx))
172 return -EFAULT;
173 SHA1_Transform(&ctx, data_in);
174 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
175 return 0;
176 }
177
178 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
179 {
180 SHA256_CTX ctx;
181
182 if (!SHA224_Init(&ctx))
183 return -EFAULT;
184 SHA256_Transform(&ctx, data_in);
185 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
186 return 0;
187 }
188
189 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
190 {
191 SHA256_CTX ctx;
192
193 if (!SHA256_Init(&ctx))
194 return -EFAULT;
195 SHA256_Transform(&ctx, data_in);
196 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
197 return 0;
198 }
199
200 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
201 {
202 SHA512_CTX ctx;
203
204 if (!SHA384_Init(&ctx))
205 return -EFAULT;
206 SHA512_Transform(&ctx, data_in);
207 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
208 return 0;
209 }
210
211 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
212 {
213 SHA512_CTX ctx;
214
215 if (!SHA512_Init(&ctx))
216 return -EFAULT;
217 SHA512_Transform(&ctx, data_in);
218 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
219 return 0;
220 }
221
222 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
223 {
224 MD5_CTX ctx;
225
226 if (!MD5_Init(&ctx))
227 return -EFAULT;
228 MD5_Transform(&ctx, data_in);
229 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
230
231 return 0;
232 }
233
234 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
235 uint8_t *data_in,
236 uint8_t *data_out)
237 {
238 int digest_size;
239 uint8_t digest[qat_hash_get_digest_size(
240 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
241 uint32_t *hash_state_out_be32;
242 uint64_t *hash_state_out_be64;
243 int i;
244
245 PMD_INIT_FUNC_TRACE();
246 digest_size = qat_hash_get_digest_size(hash_alg);
247 if (digest_size <= 0)
248 return -EFAULT;
249
250 hash_state_out_be32 = (uint32_t *)data_out;
251 hash_state_out_be64 = (uint64_t *)data_out;
252
253 switch (hash_alg) {
254 case ICP_QAT_HW_AUTH_ALGO_SHA1:
255 if (partial_hash_sha1(data_in, digest))
256 return -EFAULT;
257 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
258 *hash_state_out_be32 =
259 rte_bswap32(*(((uint32_t *)digest)+i));
260 break;
261 case ICP_QAT_HW_AUTH_ALGO_SHA224:
262 if (partial_hash_sha224(data_in, digest))
263 return -EFAULT;
264 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
265 *hash_state_out_be32 =
266 rte_bswap32(*(((uint32_t *)digest)+i));
267 break;
268 case ICP_QAT_HW_AUTH_ALGO_SHA256:
269 if (partial_hash_sha256(data_in, digest))
270 return -EFAULT;
271 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
272 *hash_state_out_be32 =
273 rte_bswap32(*(((uint32_t *)digest)+i));
274 break;
275 case ICP_QAT_HW_AUTH_ALGO_SHA384:
276 if (partial_hash_sha384(data_in, digest))
277 return -EFAULT;
278 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
279 *hash_state_out_be64 =
280 rte_bswap64(*(((uint64_t *)digest)+i));
281 break;
282 case ICP_QAT_HW_AUTH_ALGO_SHA512:
283 if (partial_hash_sha512(data_in, digest))
284 return -EFAULT;
285 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
286 *hash_state_out_be64 =
287 rte_bswap64(*(((uint64_t *)digest)+i));
288 break;
289 case ICP_QAT_HW_AUTH_ALGO_MD5:
290 if (partial_hash_md5(data_in, data_out))
291 return -EFAULT;
292 break;
293 default:
294 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
295 return -EFAULT;
296 }
297
298 return 0;
299 }
300 #define HMAC_IPAD_VALUE 0x36
301 #define HMAC_OPAD_VALUE 0x5c
302 #define HASH_XCBC_PRECOMP_KEY_NUM 3
303
304 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
305 const uint8_t *auth_key,
306 uint16_t auth_keylen,
307 uint8_t *p_state_buf,
308 uint16_t *p_state_len)
309 {
310 int block_size;
311 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
312 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
313 int i;
314
315 PMD_INIT_FUNC_TRACE();
316 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
317 static uint8_t qat_aes_xcbc_key_seed[
318 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
319 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
320 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
321 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
322 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
323 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
324 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
325 };
326
327 uint8_t *in = NULL;
328 uint8_t *out = p_state_buf;
329 int x;
330 AES_KEY enc_key;
331
332 in = rte_zmalloc("working mem for key",
333 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
334 rte_memcpy(in, qat_aes_xcbc_key_seed,
335 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
336 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
337 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
338 &enc_key) != 0) {
339 rte_free(in -
340 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
341 memset(out -
342 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
343 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
344 return -EFAULT;
345 }
346 AES_encrypt(in, out, &enc_key);
347 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
348 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
349 }
350 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
351 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
352 return 0;
353 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
354 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
355 uint8_t *in = NULL;
356 uint8_t *out = p_state_buf;
357 AES_KEY enc_key;
358
359 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
360 ICP_QAT_HW_GALOIS_LEN_A_SZ +
361 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
362 in = rte_zmalloc("working mem for key",
363 ICP_QAT_HW_GALOIS_H_SZ, 16);
364 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
365 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
366 &enc_key) != 0) {
367 return -EFAULT;
368 }
369 AES_encrypt(in, out, &enc_key);
370 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
371 ICP_QAT_HW_GALOIS_LEN_A_SZ +
372 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
373 rte_free(in);
374 return 0;
375 }
376
377 block_size = qat_hash_get_block_size(hash_alg);
378 if (block_size <= 0)
379 return -EFAULT;
380 /* init ipad and opad from key and xor with fixed values */
381 memset(ipad, 0, block_size);
382 memset(opad, 0, block_size);
383
384 if (auth_keylen > (unsigned int)block_size) {
385 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
386 return -EFAULT;
387 }
388 rte_memcpy(ipad, auth_key, auth_keylen);
389 rte_memcpy(opad, auth_key, auth_keylen);
390
391 for (i = 0; i < block_size; i++) {
392 uint8_t *ipad_ptr = ipad + i;
393 uint8_t *opad_ptr = opad + i;
394 *ipad_ptr ^= HMAC_IPAD_VALUE;
395 *opad_ptr ^= HMAC_OPAD_VALUE;
396 }
397
398 /* do partial hash of ipad and copy to state1 */
399 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
400 memset(ipad, 0, block_size);
401 memset(opad, 0, block_size);
402 PMD_DRV_LOG(ERR, "ipad precompute failed");
403 return -EFAULT;
404 }
405
406 /*
407 * State len is a multiple of 8, so may be larger than the digest.
408 * Put the partial hash of opad state_len bytes after state1
409 */
410 *p_state_len = qat_hash_get_state1_size(hash_alg);
411 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
412 memset(ipad, 0, block_size);
413 memset(opad, 0, block_size);
414 PMD_DRV_LOG(ERR, "opad precompute failed");
415 return -EFAULT;
416 }
417
418 /* don't leave data lying around */
419 memset(ipad, 0, block_size);
420 memset(opad, 0, block_size);
421 return 0;
422 }
423
424 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
425 uint16_t proto)
426 {
427 PMD_INIT_FUNC_TRACE();
428 header->hdr_flags =
429 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
430 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
431 header->comn_req_flags =
432 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
433 QAT_COMN_PTR_TYPE_FLAT);
434 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
435 ICP_QAT_FW_LA_PARTIAL_NONE);
436 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
437 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
438 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
439 proto);
440 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
441 ICP_QAT_FW_LA_NO_UPDATE_STATE);
442 }
443
444 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
445 uint8_t *cipherkey,
446 uint32_t cipherkeylen)
447 {
448 struct icp_qat_hw_cipher_algo_blk *cipher;
449 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
450 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
451 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
452 void *ptr = &req_tmpl->cd_ctrl;
453 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
454 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
455 enum icp_qat_hw_cipher_convert key_convert;
456 uint32_t total_key_size;
457 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
458 uint16_t cipher_offset, cd_size;
459 uint32_t wordIndex = 0;
460 uint32_t *temp_key = NULL;
461 PMD_INIT_FUNC_TRACE();
462
463 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
464 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
465 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
466 ICP_QAT_FW_SLICE_CIPHER);
467 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
468 ICP_QAT_FW_SLICE_DRAM_WR);
469 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
470 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
471 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
472 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
473 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
474 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
475 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
476 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
477 ICP_QAT_FW_SLICE_CIPHER);
478 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
479 ICP_QAT_FW_SLICE_AUTH);
480 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
481 ICP_QAT_FW_SLICE_AUTH);
482 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
483 ICP_QAT_FW_SLICE_DRAM_WR);
484 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
485 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
486 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
487 return -EFAULT;
488 }
489
490 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
491 /*
492 * CTR Streaming ciphers are a special case. Decrypt = encrypt
493 * Overriding default values previously set
494 */
495 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
496 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
497 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
498 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
499 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
500 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
501 else
502 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
503
504 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
505 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
506 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
507 cipher_cd_ctrl->cipher_state_sz =
508 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
509 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
510 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
511 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
512 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
513 cipher_cd_ctrl->cipher_padding_sz =
514 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
515 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
516 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
517 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
518 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
519 } else {
520 total_key_size = cipherkeylen;
521 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
522 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
523 }
524 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
525 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
526 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
527
528 header->service_cmd_id = cdesc->qat_cmd;
529 qat_alg_init_common_hdr(header, proto);
530
531 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
532
533 cipher->cipher_config.val =
534 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
535 cdesc->qat_cipher_alg, key_convert,
536 cdesc->qat_dir);
537
538 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
539 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
540 sizeof(struct icp_qat_hw_cipher_config)
541 + cipherkeylen);
542 memcpy(cipher->key, cipherkey, cipherkeylen);
543 memcpy(temp_key, cipherkey, cipherkeylen);
544
545 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
546 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
547 wordIndex++)
548 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
549
550 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
551 cipherkeylen + cipherkeylen;
552 } else {
553 memcpy(cipher->key, cipherkey, cipherkeylen);
554 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
555 cipherkeylen;
556 }
557
558 if (total_key_size > cipherkeylen) {
559 uint32_t padding_size = total_key_size-cipherkeylen;
560 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
561 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
562 /* K3 not provided so use K1 = K3*/
563 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
564 else
565 memset(cdesc->cd_cur_ptr, 0, padding_size);
566 cdesc->cd_cur_ptr += padding_size;
567 }
568 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
569 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
570
571 return 0;
572 }
573
574 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
575 uint8_t *authkey,
576 uint32_t authkeylen,
577 uint32_t add_auth_data_length,
578 uint32_t digestsize,
579 unsigned int operation)
580 {
581 struct icp_qat_hw_auth_setup *hash;
582 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
583 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
584 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
585 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
586 void *ptr = &req_tmpl->cd_ctrl;
587 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
588 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
589 struct icp_qat_fw_la_auth_req_params *auth_param =
590 (struct icp_qat_fw_la_auth_req_params *)
591 ((char *)&req_tmpl->serv_specif_rqpars +
592 sizeof(struct icp_qat_fw_la_cipher_req_params));
593 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
594 uint16_t state1_size = 0, state2_size = 0;
595 uint16_t hash_offset, cd_size;
596 uint32_t *aad_len = NULL;
597 uint32_t wordIndex = 0;
598 uint32_t *pTempKey;
599
600 PMD_INIT_FUNC_TRACE();
601
602 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
603 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
604 ICP_QAT_FW_SLICE_AUTH);
605 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
606 ICP_QAT_FW_SLICE_DRAM_WR);
607 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
608 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
609 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
610 ICP_QAT_FW_SLICE_AUTH);
611 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
612 ICP_QAT_FW_SLICE_CIPHER);
613 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
614 ICP_QAT_FW_SLICE_CIPHER);
615 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
616 ICP_QAT_FW_SLICE_DRAM_WR);
617 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
618 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
619 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
620 return -EFAULT;
621 }
622
623 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
624 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
625 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
626 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
627 ICP_QAT_FW_LA_CMP_AUTH_RES);
628 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
629 } else {
630 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
631 ICP_QAT_FW_LA_RET_AUTH_RES);
632 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
633 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
634 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
635 }
636
637 /*
638 * Setup the inner hash config
639 */
640 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
641 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
642 hash->auth_config.reserved = 0;
643 hash->auth_config.config =
644 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
645 cdesc->qat_hash_alg, digestsize);
646
647 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
648 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
649 hash->auth_counter.counter = 0;
650 else
651 hash->auth_counter.counter = rte_bswap32(
652 qat_hash_get_block_size(cdesc->qat_hash_alg));
653
654 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
655
656 /*
657 * cd_cur_ptr now points at the state1 information.
658 */
659 switch (cdesc->qat_hash_alg) {
660 case ICP_QAT_HW_AUTH_ALGO_SHA1:
661 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
662 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
663 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
664 return -EFAULT;
665 }
666 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
667 break;
668 case ICP_QAT_HW_AUTH_ALGO_SHA224:
669 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
670 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
671 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
672 return -EFAULT;
673 }
674 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
675 break;
676 case ICP_QAT_HW_AUTH_ALGO_SHA256:
677 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
678 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
679 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
680 return -EFAULT;
681 }
682 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
683 break;
684 case ICP_QAT_HW_AUTH_ALGO_SHA384:
685 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
686 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
687 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
688 return -EFAULT;
689 }
690 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
691 break;
692 case ICP_QAT_HW_AUTH_ALGO_SHA512:
693 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
694 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
695 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
696 return -EFAULT;
697 }
698 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
699 break;
700 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
701 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
702 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
703 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
704 &state2_size)) {
705 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
706 return -EFAULT;
707 }
708 break;
709 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
710 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
711 proto = ICP_QAT_FW_LA_GCM_PROTO;
712 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
713 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
714 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
715 &state2_size)) {
716 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
717 return -EFAULT;
718 }
719 /*
720 * Write (the length of AAD) into bytes 16-19 of state2
721 * in big-endian format. This field is 8 bytes
722 */
723 auth_param->u2.aad_sz =
724 RTE_ALIGN_CEIL(add_auth_data_length, 16);
725 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
726
727 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
728 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
729 ICP_QAT_HW_GALOIS_H_SZ);
730 *aad_len = rte_bswap32(add_auth_data_length);
731 break;
732 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
733 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
734 state1_size = qat_hash_get_state1_size(
735 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
736 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
737 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
738
739 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
740 (cdesc->cd_cur_ptr + state1_size + state2_size);
741 cipherconfig->cipher_config.val =
742 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
743 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
744 ICP_QAT_HW_CIPHER_KEY_CONVERT,
745 ICP_QAT_HW_CIPHER_ENCRYPT);
746 memcpy(cipherconfig->key, authkey, authkeylen);
747 memset(cipherconfig->key + authkeylen,
748 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
749 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
750 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
751 auth_param->hash_state_sz =
752 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
753 break;
754 case ICP_QAT_HW_AUTH_ALGO_MD5:
755 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
756 authkey, authkeylen, cdesc->cd_cur_ptr,
757 &state1_size)) {
758 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
759 return -EFAULT;
760 }
761 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
762 break;
763 case ICP_QAT_HW_AUTH_ALGO_NULL:
764 break;
765 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
766 state1_size = qat_hash_get_state1_size(
767 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
768 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
769 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
770 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
771 + authkeylen);
772 /*
773 * The Inner Hash Initial State2 block must contain IK
774 * (Initialisation Key), followed by IK XOR-ed with KM
775 * (Key Modifier): IK||(IK^KM).
776 */
777 /* write the auth key */
778 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
779 /* initialise temp key with auth key */
780 memcpy(pTempKey, authkey, authkeylen);
781 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
782 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
783 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
784 break;
785 default:
786 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
787 return -EFAULT;
788 }
789
790 /* Request template setup */
791 qat_alg_init_common_hdr(header, proto);
792 header->service_cmd_id = cdesc->qat_cmd;
793
794 /* Auth CD config setup */
795 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
796 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
797 hash_cd_ctrl->inner_res_sz = digestsize;
798 hash_cd_ctrl->final_sz = digestsize;
799 hash_cd_ctrl->inner_state1_sz = state1_size;
800 auth_param->auth_res_sz = digestsize;
801
802 hash_cd_ctrl->inner_state2_sz = state2_size;
803 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
804 ((sizeof(struct icp_qat_hw_auth_setup) +
805 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
806 >> 3);
807
808 cdesc->cd_cur_ptr += state1_size + state2_size;
809 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
810
811 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
812 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
813
814 return 0;
815 }
816
817 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
818 {
819 switch (key_len) {
820 case ICP_QAT_HW_AES_128_KEY_SZ:
821 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
822 break;
823 case ICP_QAT_HW_AES_192_KEY_SZ:
824 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
825 break;
826 case ICP_QAT_HW_AES_256_KEY_SZ:
827 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
828 break;
829 default:
830 return -EINVAL;
831 }
832 return 0;
833 }
834
835 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
836 {
837 switch (key_len) {
838 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
839 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
840 break;
841 default:
842 return -EINVAL;
843 }
844 return 0;
845 }
846
847 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
848 {
849 switch (key_len) {
850 case ICP_QAT_HW_KASUMI_KEY_SZ:
851 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
852 break;
853 default:
854 return -EINVAL;
855 }
856 return 0;
857 }
858
859 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
860 {
861 switch (key_len) {
862 case QAT_3DES_KEY_SZ_OPT1:
863 case QAT_3DES_KEY_SZ_OPT2:
864 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
865 break;
866 default:
867 return -EINVAL;
868 }
869 return 0;
870 }