]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/qat/qat_common/qat_algs.c
crypto: aead - Remove CRYPTO_ALG_AEAD_NEW flag
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <linux/dma-mapping.h>
57 #include "adf_accel_devices.h"
58 #include "adf_transport.h"
59 #include "adf_common_drv.h"
60 #include "qat_crypto.h"
61 #include "icp_qat_hw.h"
62 #include "icp_qat_fw.h"
63 #include "icp_qat_fw_la.h"
64
65 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
69
70 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
74
75 static DEFINE_MUTEX(algs_lock);
76 static unsigned int active_devs;
77
78 struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82 } __packed;
83
84 struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
90
91 /* Common content descriptor */
92 struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103 } __aligned(64);
104
105 struct qat_alg_aead_ctx {
106 struct qat_alg_cd *enc_cd;
107 struct qat_alg_cd *dec_cd;
108 dma_addr_t enc_cd_paddr;
109 dma_addr_t dec_cd_paddr;
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
114 struct qat_crypto_instance *inst;
115 };
116
117 struct qat_alg_ablkcipher_ctx {
118 struct icp_qat_hw_cipher_algo_blk *enc_cd;
119 struct icp_qat_hw_cipher_algo_blk *dec_cd;
120 dma_addr_t enc_cd_paddr;
121 dma_addr_t dec_cd_paddr;
122 struct icp_qat_fw_la_bulk_req enc_fw_req;
123 struct icp_qat_fw_la_bulk_req dec_fw_req;
124 struct qat_crypto_instance *inst;
125 struct crypto_tfm *tfm;
126 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
127 };
128
129 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
130 {
131 switch (qat_hash_alg) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1:
133 return ICP_QAT_HW_SHA1_STATE1_SZ;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256:
135 return ICP_QAT_HW_SHA256_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512:
137 return ICP_QAT_HW_SHA512_STATE1_SZ;
138 default:
139 return -EFAULT;
140 };
141 return -EFAULT;
142 }
143
144 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
145 struct qat_alg_aead_ctx *ctx,
146 const uint8_t *auth_key,
147 unsigned int auth_keylen)
148 {
149 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
150 struct sha1_state sha1;
151 struct sha256_state sha256;
152 struct sha512_state sha512;
153 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
155 char ipad[block_size];
156 char opad[block_size];
157 __be32 *hash_state_out;
158 __be64 *hash512_state_out;
159 int i, offset;
160
161 memset(ipad, 0, block_size);
162 memset(opad, 0, block_size);
163 shash->tfm = ctx->hash_tfm;
164 shash->flags = 0x0;
165
166 if (auth_keylen > block_size) {
167 int ret = crypto_shash_digest(shash, auth_key,
168 auth_keylen, ipad);
169 if (ret)
170 return ret;
171
172 memcpy(opad, ipad, digest_size);
173 } else {
174 memcpy(ipad, auth_key, auth_keylen);
175 memcpy(opad, auth_key, auth_keylen);
176 }
177
178 for (i = 0; i < block_size; i++) {
179 char *ipad_ptr = ipad + i;
180 char *opad_ptr = opad + i;
181 *ipad_ptr ^= 0x36;
182 *opad_ptr ^= 0x5C;
183 }
184
185 if (crypto_shash_init(shash))
186 return -EFAULT;
187
188 if (crypto_shash_update(shash, ipad, block_size))
189 return -EFAULT;
190
191 hash_state_out = (__be32 *)hash->sha.state1;
192 hash512_state_out = (__be64 *)hash_state_out;
193
194 switch (ctx->qat_hash_alg) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
196 if (crypto_shash_export(shash, &sha1))
197 return -EFAULT;
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199 *hash_state_out = cpu_to_be32(*(sha1.state + i));
200 break;
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
202 if (crypto_shash_export(shash, &sha256))
203 return -EFAULT;
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205 *hash_state_out = cpu_to_be32(*(sha256.state + i));
206 break;
207 case ICP_QAT_HW_AUTH_ALGO_SHA512:
208 if (crypto_shash_export(shash, &sha512))
209 return -EFAULT;
210 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
212 break;
213 default:
214 return -EFAULT;
215 }
216
217 if (crypto_shash_init(shash))
218 return -EFAULT;
219
220 if (crypto_shash_update(shash, opad, block_size))
221 return -EFAULT;
222
223 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225 hash512_state_out = (__be64 *)hash_state_out;
226
227 switch (ctx->qat_hash_alg) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1:
229 if (crypto_shash_export(shash, &sha1))
230 return -EFAULT;
231 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232 *hash_state_out = cpu_to_be32(*(sha1.state + i));
233 break;
234 case ICP_QAT_HW_AUTH_ALGO_SHA256:
235 if (crypto_shash_export(shash, &sha256))
236 return -EFAULT;
237 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238 *hash_state_out = cpu_to_be32(*(sha256.state + i));
239 break;
240 case ICP_QAT_HW_AUTH_ALGO_SHA512:
241 if (crypto_shash_export(shash, &sha512))
242 return -EFAULT;
243 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
245 break;
246 default:
247 return -EFAULT;
248 }
249 memzero_explicit(ipad, block_size);
250 memzero_explicit(opad, block_size);
251 return 0;
252 }
253
254 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
255 {
256 header->hdr_flags =
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259 header->comn_req_flags =
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261 QAT_COMN_PTR_TYPE_SGL);
262 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_PARTIAL_NONE);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267 ICP_QAT_FW_LA_NO_PROTO);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE);
270 }
271
272 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
273 int alg,
274 struct crypto_authenc_keys *keys)
275 {
276 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
277 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
278 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
279 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
280 struct icp_qat_hw_auth_algo_blk *hash =
281 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
282 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
283 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
284 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
285 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
286 void *ptr = &req_tmpl->cd_ctrl;
287 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
288 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
289
290 /* CD setup */
291 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
292 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
293 hash->sha.inner_setup.auth_config.config =
294 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
295 ctx->qat_hash_alg, digestsize);
296 hash->sha.inner_setup.auth_counter.counter =
297 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
298
299 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
300 return -EFAULT;
301
302 /* Request setup */
303 qat_alg_init_common_hdr(header);
304 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
305 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
307 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
308 ICP_QAT_FW_LA_RET_AUTH_RES);
309 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
310 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
311 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
312 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
313
314 /* Cipher CD config setup */
315 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
316 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
317 cipher_cd_ctrl->cipher_cfg_offset = 0;
318 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
319 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
320 /* Auth CD config setup */
321 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
322 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
323 hash_cd_ctrl->inner_res_sz = digestsize;
324 hash_cd_ctrl->final_sz = digestsize;
325
326 switch (ctx->qat_hash_alg) {
327 case ICP_QAT_HW_AUTH_ALGO_SHA1:
328 hash_cd_ctrl->inner_state1_sz =
329 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
330 hash_cd_ctrl->inner_state2_sz =
331 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
332 break;
333 case ICP_QAT_HW_AUTH_ALGO_SHA256:
334 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
335 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
336 break;
337 case ICP_QAT_HW_AUTH_ALGO_SHA512:
338 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
339 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
340 break;
341 default:
342 break;
343 }
344 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
345 ((sizeof(struct icp_qat_hw_auth_setup) +
346 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
347 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
348 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
349 return 0;
350 }
351
352 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
353 int alg,
354 struct crypto_authenc_keys *keys)
355 {
356 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
357 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
358 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
359 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
360 struct icp_qat_hw_cipher_algo_blk *cipher =
361 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
362 sizeof(struct icp_qat_hw_auth_setup) +
363 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
364 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
365 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
366 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
367 void *ptr = &req_tmpl->cd_ctrl;
368 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
369 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
370 struct icp_qat_fw_la_auth_req_params *auth_param =
371 (struct icp_qat_fw_la_auth_req_params *)
372 ((char *)&req_tmpl->serv_specif_rqpars +
373 sizeof(struct icp_qat_fw_la_cipher_req_params));
374
375 /* CD setup */
376 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
377 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
378 hash->sha.inner_setup.auth_config.config =
379 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
380 ctx->qat_hash_alg,
381 digestsize);
382 hash->sha.inner_setup.auth_counter.counter =
383 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
384
385 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
386 return -EFAULT;
387
388 /* Request setup */
389 qat_alg_init_common_hdr(header);
390 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
391 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
392 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
393 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
395 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_CMP_AUTH_RES);
397 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
398 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
399
400 /* Cipher CD config setup */
401 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
402 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
403 cipher_cd_ctrl->cipher_cfg_offset =
404 (sizeof(struct icp_qat_hw_auth_setup) +
405 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
406 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
407 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
408
409 /* Auth CD config setup */
410 hash_cd_ctrl->hash_cfg_offset = 0;
411 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
412 hash_cd_ctrl->inner_res_sz = digestsize;
413 hash_cd_ctrl->final_sz = digestsize;
414
415 switch (ctx->qat_hash_alg) {
416 case ICP_QAT_HW_AUTH_ALGO_SHA1:
417 hash_cd_ctrl->inner_state1_sz =
418 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
419 hash_cd_ctrl->inner_state2_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
421 break;
422 case ICP_QAT_HW_AUTH_ALGO_SHA256:
423 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
424 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
425 break;
426 case ICP_QAT_HW_AUTH_ALGO_SHA512:
427 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
428 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
429 break;
430 default:
431 break;
432 }
433
434 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
435 ((sizeof(struct icp_qat_hw_auth_setup) +
436 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
437 auth_param->auth_res_sz = digestsize;
438 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
439 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
440 return 0;
441 }
442
443 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
444 struct icp_qat_fw_la_bulk_req *req,
445 struct icp_qat_hw_cipher_algo_blk *cd,
446 const uint8_t *key, unsigned int keylen)
447 {
448 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
449 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
450 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
451
452 memcpy(cd->aes.key, key, keylen);
453 qat_alg_init_common_hdr(header);
454 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
455 cd_pars->u.s.content_desc_params_sz =
456 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
457 /* Cipher CD config setup */
458 cd_ctrl->cipher_key_sz = keylen >> 3;
459 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
460 cd_ctrl->cipher_cfg_offset = 0;
461 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
462 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
463 }
464
465 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
466 int alg, const uint8_t *key,
467 unsigned int keylen)
468 {
469 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
470 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
471 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
472
473 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
474 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
475 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
476 }
477
478 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
479 int alg, const uint8_t *key,
480 unsigned int keylen)
481 {
482 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
483 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
484 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
485
486 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
487 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
488 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
489 }
490
491 static int qat_alg_validate_key(int key_len, int *alg)
492 {
493 switch (key_len) {
494 case AES_KEYSIZE_128:
495 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
496 break;
497 case AES_KEYSIZE_192:
498 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
499 break;
500 case AES_KEYSIZE_256:
501 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
502 break;
503 default:
504 return -EINVAL;
505 }
506 return 0;
507 }
508
509 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
510 const uint8_t *key, unsigned int keylen)
511 {
512 struct crypto_authenc_keys keys;
513 int alg;
514
515 if (crypto_authenc_extractkeys(&keys, key, keylen))
516 goto bad_key;
517
518 if (qat_alg_validate_key(keys.enckeylen, &alg))
519 goto bad_key;
520
521 if (qat_alg_aead_init_enc_session(tfm, alg, &keys))
522 goto error;
523
524 if (qat_alg_aead_init_dec_session(tfm, alg, &keys))
525 goto error;
526
527 return 0;
528 bad_key:
529 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
530 return -EINVAL;
531 error:
532 return -EFAULT;
533 }
534
535 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
536 const uint8_t *key,
537 unsigned int keylen)
538 {
539 int alg;
540
541 if (qat_alg_validate_key(keylen, &alg))
542 goto bad_key;
543
544 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
545 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
546 return 0;
547 bad_key:
548 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
549 return -EINVAL;
550 }
551
552 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
553 unsigned int keylen)
554 {
555 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
556 struct device *dev;
557
558 if (ctx->enc_cd) {
559 /* rekeying */
560 dev = &GET_DEV(ctx->inst->accel_dev);
561 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
562 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
563 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
564 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
565 } else {
566 /* new key */
567 int node = get_current_node();
568 struct qat_crypto_instance *inst =
569 qat_crypto_get_instance_node(node);
570 if (!inst) {
571 return -EINVAL;
572 }
573
574 dev = &GET_DEV(inst->accel_dev);
575 ctx->inst = inst;
576 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
577 &ctx->enc_cd_paddr,
578 GFP_ATOMIC);
579 if (!ctx->enc_cd) {
580 return -ENOMEM;
581 }
582 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
583 &ctx->dec_cd_paddr,
584 GFP_ATOMIC);
585 if (!ctx->dec_cd) {
586 goto out_free_enc;
587 }
588 }
589 if (qat_alg_aead_init_sessions(tfm, key, keylen))
590 goto out_free_all;
591
592 return 0;
593
594 out_free_all:
595 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
596 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
597 ctx->dec_cd, ctx->dec_cd_paddr);
598 ctx->dec_cd = NULL;
599 out_free_enc:
600 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
601 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
602 ctx->enc_cd, ctx->enc_cd_paddr);
603 ctx->enc_cd = NULL;
604 return -ENOMEM;
605 }
606
607 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
608 struct qat_crypto_request *qat_req)
609 {
610 struct device *dev = &GET_DEV(inst->accel_dev);
611 struct qat_alg_buf_list *bl = qat_req->buf.bl;
612 struct qat_alg_buf_list *blout = qat_req->buf.blout;
613 dma_addr_t blp = qat_req->buf.blp;
614 dma_addr_t blpout = qat_req->buf.bloutp;
615 size_t sz = qat_req->buf.sz;
616 size_t sz_out = qat_req->buf.sz_out;
617 int i;
618
619 for (i = 0; i < bl->num_bufs; i++)
620 dma_unmap_single(dev, bl->bufers[i].addr,
621 bl->bufers[i].len, DMA_BIDIRECTIONAL);
622
623 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
624 kfree(bl);
625 if (blp != blpout) {
626 /* If out of place operation dma unmap only data */
627 int bufless = blout->num_bufs - blout->num_mapped_bufs;
628
629 for (i = bufless; i < blout->num_bufs; i++) {
630 dma_unmap_single(dev, blout->bufers[i].addr,
631 blout->bufers[i].len,
632 DMA_BIDIRECTIONAL);
633 }
634 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
635 kfree(blout);
636 }
637 }
638
639 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
640 struct scatterlist *sgl,
641 struct scatterlist *sglout,
642 struct qat_crypto_request *qat_req)
643 {
644 struct device *dev = &GET_DEV(inst->accel_dev);
645 int i, sg_nctr = 0;
646 int n = sg_nents(sgl);
647 struct qat_alg_buf_list *bufl;
648 struct qat_alg_buf_list *buflout = NULL;
649 dma_addr_t blp;
650 dma_addr_t bloutp = 0;
651 struct scatterlist *sg;
652 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
653 ((1 + n) * sizeof(struct qat_alg_buf));
654
655 if (unlikely(!n))
656 return -EINVAL;
657
658 bufl = kzalloc_node(sz, GFP_ATOMIC,
659 dev_to_node(&GET_DEV(inst->accel_dev)));
660 if (unlikely(!bufl))
661 return -ENOMEM;
662
663 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
664 if (unlikely(dma_mapping_error(dev, blp)))
665 goto err;
666
667 for_each_sg(sgl, sg, n, i) {
668 int y = sg_nctr;
669
670 if (!sg->length)
671 continue;
672
673 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
674 sg->length,
675 DMA_BIDIRECTIONAL);
676 bufl->bufers[y].len = sg->length;
677 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
678 goto err;
679 sg_nctr++;
680 }
681 bufl->num_bufs = sg_nctr;
682 qat_req->buf.bl = bufl;
683 qat_req->buf.blp = blp;
684 qat_req->buf.sz = sz;
685 /* Handle out of place operation */
686 if (sgl != sglout) {
687 struct qat_alg_buf *bufers;
688
689 n = sg_nents(sglout);
690 sz_out = sizeof(struct qat_alg_buf_list) +
691 ((1 + n) * sizeof(struct qat_alg_buf));
692 sg_nctr = 0;
693 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
694 dev_to_node(&GET_DEV(inst->accel_dev)));
695 if (unlikely(!buflout))
696 goto err;
697 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
698 if (unlikely(dma_mapping_error(dev, bloutp)))
699 goto err;
700 bufers = buflout->bufers;
701 for_each_sg(sglout, sg, n, i) {
702 int y = sg_nctr;
703
704 if (!sg->length)
705 continue;
706
707 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
708 sg->length,
709 DMA_BIDIRECTIONAL);
710 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
711 goto err;
712 bufers[y].len = sg->length;
713 sg_nctr++;
714 }
715 buflout->num_bufs = sg_nctr;
716 buflout->num_mapped_bufs = sg_nctr;
717 qat_req->buf.blout = buflout;
718 qat_req->buf.bloutp = bloutp;
719 qat_req->buf.sz_out = sz_out;
720 } else {
721 /* Otherwise set the src and dst to the same address */
722 qat_req->buf.bloutp = qat_req->buf.blp;
723 qat_req->buf.sz_out = 0;
724 }
725 return 0;
726 err:
727 dev_err(dev, "Failed to map buf for dma\n");
728 sg_nctr = 0;
729 for (i = 0; i < n; i++)
730 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
731 dma_unmap_single(dev, bufl->bufers[i].addr,
732 bufl->bufers[i].len,
733 DMA_BIDIRECTIONAL);
734
735 if (!dma_mapping_error(dev, blp))
736 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
737 kfree(bufl);
738 if (sgl != sglout && buflout) {
739 n = sg_nents(sglout);
740 for (i = 0; i < n; i++)
741 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
742 dma_unmap_single(dev, buflout->bufers[i].addr,
743 buflout->bufers[i].len,
744 DMA_BIDIRECTIONAL);
745 if (!dma_mapping_error(dev, bloutp))
746 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
747 kfree(buflout);
748 }
749 return -ENOMEM;
750 }
751
752 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
753 struct qat_crypto_request *qat_req)
754 {
755 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
756 struct qat_crypto_instance *inst = ctx->inst;
757 struct aead_request *areq = qat_req->aead_req;
758 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
759 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
760
761 qat_alg_free_bufl(inst, qat_req);
762 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
763 res = -EBADMSG;
764 areq->base.complete(&areq->base, res);
765 }
766
767 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
768 struct qat_crypto_request *qat_req)
769 {
770 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
771 struct qat_crypto_instance *inst = ctx->inst;
772 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
773 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
774 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
775
776 qat_alg_free_bufl(inst, qat_req);
777 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
778 res = -EINVAL;
779 areq->base.complete(&areq->base, res);
780 }
781
782 void qat_alg_callback(void *resp)
783 {
784 struct icp_qat_fw_la_resp *qat_resp = resp;
785 struct qat_crypto_request *qat_req =
786 (void *)(__force long)qat_resp->opaque_data;
787
788 qat_req->cb(qat_resp, qat_req);
789 }
790
791 static int qat_alg_aead_dec(struct aead_request *areq)
792 {
793 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
794 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
795 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
796 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
797 struct icp_qat_fw_la_cipher_req_params *cipher_param;
798 struct icp_qat_fw_la_auth_req_params *auth_param;
799 struct icp_qat_fw_la_bulk_req *msg;
800 int digst_size = crypto_aead_authsize(aead_tfm);
801 int ret, ctr = 0;
802
803 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
804 if (unlikely(ret))
805 return ret;
806
807 msg = &qat_req->req;
808 *msg = ctx->dec_fw_req;
809 qat_req->aead_ctx = ctx;
810 qat_req->aead_req = areq;
811 qat_req->cb = qat_aead_alg_callback;
812 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
813 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
814 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
815 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
816 cipher_param->cipher_length = areq->cryptlen - digst_size;
817 cipher_param->cipher_offset = areq->assoclen;
818 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
819 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
820 auth_param->auth_off = 0;
821 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
822 do {
823 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
824 } while (ret == -EAGAIN && ctr++ < 10);
825
826 if (ret == -EAGAIN) {
827 qat_alg_free_bufl(ctx->inst, qat_req);
828 return -EBUSY;
829 }
830 return -EINPROGRESS;
831 }
832
833 static int qat_alg_aead_enc(struct aead_request *areq)
834 {
835 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
836 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
837 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
838 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
839 struct icp_qat_fw_la_cipher_req_params *cipher_param;
840 struct icp_qat_fw_la_auth_req_params *auth_param;
841 struct icp_qat_fw_la_bulk_req *msg;
842 uint8_t *iv = areq->iv;
843 int ret, ctr = 0;
844
845 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
846 if (unlikely(ret))
847 return ret;
848
849 msg = &qat_req->req;
850 *msg = ctx->enc_fw_req;
851 qat_req->aead_ctx = ctx;
852 qat_req->aead_req = areq;
853 qat_req->cb = qat_aead_alg_callback;
854 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
855 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
856 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
857 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
858 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
859
860 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
861 cipher_param->cipher_length = areq->cryptlen;
862 cipher_param->cipher_offset = areq->assoclen;
863
864 auth_param->auth_off = 0;
865 auth_param->auth_len = areq->assoclen + areq->cryptlen;
866
867 do {
868 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
869 } while (ret == -EAGAIN && ctr++ < 10);
870
871 if (ret == -EAGAIN) {
872 qat_alg_free_bufl(ctx->inst, qat_req);
873 return -EBUSY;
874 }
875 return -EINPROGRESS;
876 }
877
878 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
879 const uint8_t *key,
880 unsigned int keylen)
881 {
882 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
883 struct device *dev;
884
885 spin_lock(&ctx->lock);
886 if (ctx->enc_cd) {
887 /* rekeying */
888 dev = &GET_DEV(ctx->inst->accel_dev);
889 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
890 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
891 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
892 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
893 } else {
894 /* new key */
895 int node = get_current_node();
896 struct qat_crypto_instance *inst =
897 qat_crypto_get_instance_node(node);
898 if (!inst) {
899 spin_unlock(&ctx->lock);
900 return -EINVAL;
901 }
902
903 dev = &GET_DEV(inst->accel_dev);
904 ctx->inst = inst;
905 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
906 &ctx->enc_cd_paddr,
907 GFP_ATOMIC);
908 if (!ctx->enc_cd) {
909 spin_unlock(&ctx->lock);
910 return -ENOMEM;
911 }
912 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
913 &ctx->dec_cd_paddr,
914 GFP_ATOMIC);
915 if (!ctx->dec_cd) {
916 spin_unlock(&ctx->lock);
917 goto out_free_enc;
918 }
919 }
920 spin_unlock(&ctx->lock);
921 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
922 goto out_free_all;
923
924 return 0;
925
926 out_free_all:
927 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
928 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
929 ctx->dec_cd, ctx->dec_cd_paddr);
930 ctx->dec_cd = NULL;
931 out_free_enc:
932 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
933 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
934 ctx->enc_cd, ctx->enc_cd_paddr);
935 ctx->enc_cd = NULL;
936 return -ENOMEM;
937 }
938
939 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
940 {
941 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
942 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
943 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
944 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
945 struct icp_qat_fw_la_cipher_req_params *cipher_param;
946 struct icp_qat_fw_la_bulk_req *msg;
947 int ret, ctr = 0;
948
949 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
950 if (unlikely(ret))
951 return ret;
952
953 msg = &qat_req->req;
954 *msg = ctx->enc_fw_req;
955 qat_req->ablkcipher_ctx = ctx;
956 qat_req->ablkcipher_req = req;
957 qat_req->cb = qat_ablkcipher_alg_callback;
958 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
959 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
960 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
961 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
962 cipher_param->cipher_length = req->nbytes;
963 cipher_param->cipher_offset = 0;
964 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
965 do {
966 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
967 } while (ret == -EAGAIN && ctr++ < 10);
968
969 if (ret == -EAGAIN) {
970 qat_alg_free_bufl(ctx->inst, qat_req);
971 return -EBUSY;
972 }
973 return -EINPROGRESS;
974 }
975
976 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
977 {
978 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
979 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
980 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
981 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
982 struct icp_qat_fw_la_cipher_req_params *cipher_param;
983 struct icp_qat_fw_la_bulk_req *msg;
984 int ret, ctr = 0;
985
986 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
987 if (unlikely(ret))
988 return ret;
989
990 msg = &qat_req->req;
991 *msg = ctx->dec_fw_req;
992 qat_req->ablkcipher_ctx = ctx;
993 qat_req->ablkcipher_req = req;
994 qat_req->cb = qat_ablkcipher_alg_callback;
995 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
996 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
997 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
998 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
999 cipher_param->cipher_length = req->nbytes;
1000 cipher_param->cipher_offset = 0;
1001 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1002 do {
1003 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1004 } while (ret == -EAGAIN && ctr++ < 10);
1005
1006 if (ret == -EAGAIN) {
1007 qat_alg_free_bufl(ctx->inst, qat_req);
1008 return -EBUSY;
1009 }
1010 return -EINPROGRESS;
1011 }
1012
1013 static int qat_alg_aead_init(struct crypto_aead *tfm,
1014 enum icp_qat_hw_auth_algo hash,
1015 const char *hash_name)
1016 {
1017 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1018
1019 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1020 if (IS_ERR(ctx->hash_tfm))
1021 return PTR_ERR(ctx->hash_tfm);
1022 ctx->qat_hash_alg = hash;
1023 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1024 sizeof(struct qat_crypto_request));
1025 return 0;
1026 }
1027
1028 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1029 {
1030 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1031 }
1032
1033 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1034 {
1035 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1036 }
1037
1038 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1039 {
1040 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1041 }
1042
1043 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1044 {
1045 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1046 struct qat_crypto_instance *inst = ctx->inst;
1047 struct device *dev;
1048
1049 crypto_free_shash(ctx->hash_tfm);
1050
1051 if (!inst)
1052 return;
1053
1054 dev = &GET_DEV(inst->accel_dev);
1055 if (ctx->enc_cd) {
1056 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1057 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1058 ctx->enc_cd, ctx->enc_cd_paddr);
1059 }
1060 if (ctx->dec_cd) {
1061 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1062 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1063 ctx->dec_cd, ctx->dec_cd_paddr);
1064 }
1065 qat_crypto_put_instance(inst);
1066 }
1067
1068 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1069 {
1070 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1071
1072 spin_lock_init(&ctx->lock);
1073 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1074 sizeof(struct qat_crypto_request);
1075 ctx->tfm = tfm;
1076 return 0;
1077 }
1078
1079 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1080 {
1081 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1082 struct qat_crypto_instance *inst = ctx->inst;
1083 struct device *dev;
1084
1085 if (!inst)
1086 return;
1087
1088 dev = &GET_DEV(inst->accel_dev);
1089 if (ctx->enc_cd) {
1090 memset(ctx->enc_cd, 0,
1091 sizeof(struct icp_qat_hw_cipher_algo_blk));
1092 dma_free_coherent(dev,
1093 sizeof(struct icp_qat_hw_cipher_algo_blk),
1094 ctx->enc_cd, ctx->enc_cd_paddr);
1095 }
1096 if (ctx->dec_cd) {
1097 memset(ctx->dec_cd, 0,
1098 sizeof(struct icp_qat_hw_cipher_algo_blk));
1099 dma_free_coherent(dev,
1100 sizeof(struct icp_qat_hw_cipher_algo_blk),
1101 ctx->dec_cd, ctx->dec_cd_paddr);
1102 }
1103 qat_crypto_put_instance(inst);
1104 }
1105
1106
1107 static struct aead_alg qat_aeads[] = { {
1108 .base = {
1109 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1110 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1111 .cra_priority = 4001,
1112 .cra_flags = CRYPTO_ALG_ASYNC,
1113 .cra_blocksize = AES_BLOCK_SIZE,
1114 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1115 .cra_module = THIS_MODULE,
1116 },
1117 .init = qat_alg_aead_sha1_init,
1118 .exit = qat_alg_aead_exit,
1119 .setkey = qat_alg_aead_setkey,
1120 .decrypt = qat_alg_aead_dec,
1121 .encrypt = qat_alg_aead_enc,
1122 .ivsize = AES_BLOCK_SIZE,
1123 .maxauthsize = SHA1_DIGEST_SIZE,
1124 }, {
1125 .base = {
1126 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1127 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1128 .cra_priority = 4001,
1129 .cra_flags = CRYPTO_ALG_ASYNC,
1130 .cra_blocksize = AES_BLOCK_SIZE,
1131 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1132 .cra_module = THIS_MODULE,
1133 },
1134 .init = qat_alg_aead_sha256_init,
1135 .exit = qat_alg_aead_exit,
1136 .setkey = qat_alg_aead_setkey,
1137 .decrypt = qat_alg_aead_dec,
1138 .encrypt = qat_alg_aead_enc,
1139 .ivsize = AES_BLOCK_SIZE,
1140 .maxauthsize = SHA256_DIGEST_SIZE,
1141 }, {
1142 .base = {
1143 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1144 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1145 .cra_priority = 4001,
1146 .cra_flags = CRYPTO_ALG_ASYNC,
1147 .cra_blocksize = AES_BLOCK_SIZE,
1148 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1149 .cra_module = THIS_MODULE,
1150 },
1151 .init = qat_alg_aead_sha512_init,
1152 .exit = qat_alg_aead_exit,
1153 .setkey = qat_alg_aead_setkey,
1154 .decrypt = qat_alg_aead_dec,
1155 .encrypt = qat_alg_aead_enc,
1156 .ivsize = AES_BLOCK_SIZE,
1157 .maxauthsize = SHA512_DIGEST_SIZE,
1158 } };
1159
1160 static struct crypto_alg qat_algs[] = { {
1161 .cra_name = "cbc(aes)",
1162 .cra_driver_name = "qat_aes_cbc",
1163 .cra_priority = 4001,
1164 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1165 .cra_blocksize = AES_BLOCK_SIZE,
1166 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1167 .cra_alignmask = 0,
1168 .cra_type = &crypto_ablkcipher_type,
1169 .cra_module = THIS_MODULE,
1170 .cra_init = qat_alg_ablkcipher_init,
1171 .cra_exit = qat_alg_ablkcipher_exit,
1172 .cra_u = {
1173 .ablkcipher = {
1174 .setkey = qat_alg_ablkcipher_setkey,
1175 .decrypt = qat_alg_ablkcipher_decrypt,
1176 .encrypt = qat_alg_ablkcipher_encrypt,
1177 .min_keysize = AES_MIN_KEY_SIZE,
1178 .max_keysize = AES_MAX_KEY_SIZE,
1179 .ivsize = AES_BLOCK_SIZE,
1180 },
1181 },
1182 } };
1183
1184 int qat_algs_register(void)
1185 {
1186 int ret = 0, i;
1187
1188 mutex_lock(&algs_lock);
1189 if (++active_devs != 1)
1190 goto unlock;
1191
1192 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1193 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1194
1195 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1196 if (ret)
1197 goto unlock;
1198
1199 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1200 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1201
1202 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1203 if (ret)
1204 goto unreg_algs;
1205
1206 unlock:
1207 mutex_unlock(&algs_lock);
1208 return ret;
1209
1210 unreg_algs:
1211 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1212 goto unlock;
1213 }
1214
1215 int qat_algs_unregister(void)
1216 {
1217 mutex_lock(&algs_lock);
1218 if (--active_devs != 0)
1219 goto unlock;
1220
1221 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1222 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1223
1224 unlock:
1225 mutex_unlock(&algs_lock);
1226 return 0;
1227 }
1228
1229 int qat_algs_init(void)
1230 {
1231 return 0;
1232 }
1233
1234 void qat_algs_exit(void)
1235 {
1236 }