]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/crypto/qat/qat_common/qat_algs.c
ath10k: fix compilation warnings in wmi phyerr pull function
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static atomic_t active_dev;
77
78 struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82 } __packed;
83
84 struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
90
91 /* Common content descriptor */
92 struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103 } __aligned(64);
104
105 struct qat_alg_aead_ctx {
106 struct qat_alg_cd *enc_cd;
107 struct qat_alg_cd *dec_cd;
108 dma_addr_t enc_cd_paddr;
109 dma_addr_t dec_cd_paddr;
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
114 struct qat_crypto_instance *inst;
115 struct crypto_tfm *tfm;
116 uint8_t salt[AES_BLOCK_SIZE];
117 spinlock_t lock; /* protects qat_alg_aead_ctx struct */
118 };
119
120 struct qat_alg_ablkcipher_ctx {
121 struct icp_qat_hw_cipher_algo_blk *enc_cd;
122 struct icp_qat_hw_cipher_algo_blk *dec_cd;
123 dma_addr_t enc_cd_paddr;
124 dma_addr_t dec_cd_paddr;
125 struct icp_qat_fw_la_bulk_req enc_fw_req;
126 struct icp_qat_fw_la_bulk_req dec_fw_req;
127 struct qat_crypto_instance *inst;
128 struct crypto_tfm *tfm;
129 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
130 };
131
132 static int get_current_node(void)
133 {
134 return cpu_data(current_thread_info()->cpu).phys_proc_id;
135 }
136
137 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
138 {
139 switch (qat_hash_alg) {
140 case ICP_QAT_HW_AUTH_ALGO_SHA1:
141 return ICP_QAT_HW_SHA1_STATE1_SZ;
142 case ICP_QAT_HW_AUTH_ALGO_SHA256:
143 return ICP_QAT_HW_SHA256_STATE1_SZ;
144 case ICP_QAT_HW_AUTH_ALGO_SHA512:
145 return ICP_QAT_HW_SHA512_STATE1_SZ;
146 default:
147 return -EFAULT;
148 };
149 return -EFAULT;
150 }
151
152 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
153 struct qat_alg_aead_ctx *ctx,
154 const uint8_t *auth_key,
155 unsigned int auth_keylen)
156 {
157 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
158 struct sha1_state sha1;
159 struct sha256_state sha256;
160 struct sha512_state sha512;
161 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
162 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
163 char ipad[block_size];
164 char opad[block_size];
165 __be32 *hash_state_out;
166 __be64 *hash512_state_out;
167 int i, offset;
168
169 memset(ipad, 0, block_size);
170 memset(opad, 0, block_size);
171 shash->tfm = ctx->hash_tfm;
172 shash->flags = 0x0;
173
174 if (auth_keylen > block_size) {
175 int ret = crypto_shash_digest(shash, auth_key,
176 auth_keylen, ipad);
177 if (ret)
178 return ret;
179
180 memcpy(opad, ipad, digest_size);
181 } else {
182 memcpy(ipad, auth_key, auth_keylen);
183 memcpy(opad, auth_key, auth_keylen);
184 }
185
186 for (i = 0; i < block_size; i++) {
187 char *ipad_ptr = ipad + i;
188 char *opad_ptr = opad + i;
189 *ipad_ptr ^= 0x36;
190 *opad_ptr ^= 0x5C;
191 }
192
193 if (crypto_shash_init(shash))
194 return -EFAULT;
195
196 if (crypto_shash_update(shash, ipad, block_size))
197 return -EFAULT;
198
199 hash_state_out = (__be32 *)hash->sha.state1;
200 hash512_state_out = (__be64 *)hash_state_out;
201
202 switch (ctx->qat_hash_alg) {
203 case ICP_QAT_HW_AUTH_ALGO_SHA1:
204 if (crypto_shash_export(shash, &sha1))
205 return -EFAULT;
206 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207 *hash_state_out = cpu_to_be32(*(sha1.state + i));
208 break;
209 case ICP_QAT_HW_AUTH_ALGO_SHA256:
210 if (crypto_shash_export(shash, &sha256))
211 return -EFAULT;
212 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213 *hash_state_out = cpu_to_be32(*(sha256.state + i));
214 break;
215 case ICP_QAT_HW_AUTH_ALGO_SHA512:
216 if (crypto_shash_export(shash, &sha512))
217 return -EFAULT;
218 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
220 break;
221 default:
222 return -EFAULT;
223 }
224
225 if (crypto_shash_init(shash))
226 return -EFAULT;
227
228 if (crypto_shash_update(shash, opad, block_size))
229 return -EFAULT;
230
231 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233 hash512_state_out = (__be64 *)hash_state_out;
234
235 switch (ctx->qat_hash_alg) {
236 case ICP_QAT_HW_AUTH_ALGO_SHA1:
237 if (crypto_shash_export(shash, &sha1))
238 return -EFAULT;
239 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240 *hash_state_out = cpu_to_be32(*(sha1.state + i));
241 break;
242 case ICP_QAT_HW_AUTH_ALGO_SHA256:
243 if (crypto_shash_export(shash, &sha256))
244 return -EFAULT;
245 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246 *hash_state_out = cpu_to_be32(*(sha256.state + i));
247 break;
248 case ICP_QAT_HW_AUTH_ALGO_SHA512:
249 if (crypto_shash_export(shash, &sha512))
250 return -EFAULT;
251 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
253 break;
254 default:
255 return -EFAULT;
256 }
257 memzero_explicit(ipad, block_size);
258 memzero_explicit(opad, block_size);
259 return 0;
260 }
261
262 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
263 {
264 header->hdr_flags =
265 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
266 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
267 header->comn_req_flags =
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
269 QAT_COMN_PTR_TYPE_SGL);
270 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_PARTIAL_NONE);
272 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
273 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
274 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
275 ICP_QAT_FW_LA_NO_PROTO);
276 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_NO_UPDATE_STATE);
278 }
279
280 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
281 int alg,
282 struct crypto_authenc_keys *keys)
283 {
284 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
285 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
286 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
287 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
288 struct icp_qat_hw_auth_algo_blk *hash =
289 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
290 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
291 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
292 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
293 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
294 void *ptr = &req_tmpl->cd_ctrl;
295 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
296 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
297
298 /* CD setup */
299 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
300 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
301 hash->sha.inner_setup.auth_config.config =
302 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
303 ctx->qat_hash_alg, digestsize);
304 hash->sha.inner_setup.auth_counter.counter =
305 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
306
307 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
308 return -EFAULT;
309
310 /* Request setup */
311 qat_alg_init_common_hdr(header);
312 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
313 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
314 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
315 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
316 ICP_QAT_FW_LA_RET_AUTH_RES);
317 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
318 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
319 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
320 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
321
322 /* Cipher CD config setup */
323 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
324 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
325 cipher_cd_ctrl->cipher_cfg_offset = 0;
326 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
327 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
328 /* Auth CD config setup */
329 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
330 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
331 hash_cd_ctrl->inner_res_sz = digestsize;
332 hash_cd_ctrl->final_sz = digestsize;
333
334 switch (ctx->qat_hash_alg) {
335 case ICP_QAT_HW_AUTH_ALGO_SHA1:
336 hash_cd_ctrl->inner_state1_sz =
337 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
338 hash_cd_ctrl->inner_state2_sz =
339 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
340 break;
341 case ICP_QAT_HW_AUTH_ALGO_SHA256:
342 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
343 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
344 break;
345 case ICP_QAT_HW_AUTH_ALGO_SHA512:
346 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
347 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
348 break;
349 default:
350 break;
351 }
352 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
353 ((sizeof(struct icp_qat_hw_auth_setup) +
354 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
357 return 0;
358 }
359
360 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
361 int alg,
362 struct crypto_authenc_keys *keys)
363 {
364 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
365 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
366 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
367 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
368 struct icp_qat_hw_cipher_algo_blk *cipher =
369 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
370 sizeof(struct icp_qat_hw_auth_setup) +
371 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
372 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
373 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
374 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
375 void *ptr = &req_tmpl->cd_ctrl;
376 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
377 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
378 struct icp_qat_fw_la_auth_req_params *auth_param =
379 (struct icp_qat_fw_la_auth_req_params *)
380 ((char *)&req_tmpl->serv_specif_rqpars +
381 sizeof(struct icp_qat_fw_la_cipher_req_params));
382
383 /* CD setup */
384 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
385 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
386 hash->sha.inner_setup.auth_config.config =
387 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
388 ctx->qat_hash_alg,
389 digestsize);
390 hash->sha.inner_setup.auth_counter.counter =
391 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
392
393 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
394 return -EFAULT;
395
396 /* Request setup */
397 qat_alg_init_common_hdr(header);
398 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
399 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
400 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
401 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
402 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
403 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
404 ICP_QAT_FW_LA_CMP_AUTH_RES);
405 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
406 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
407
408 /* Cipher CD config setup */
409 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
410 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
411 cipher_cd_ctrl->cipher_cfg_offset =
412 (sizeof(struct icp_qat_hw_auth_setup) +
413 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
414 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
415 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
416
417 /* Auth CD config setup */
418 hash_cd_ctrl->hash_cfg_offset = 0;
419 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
420 hash_cd_ctrl->inner_res_sz = digestsize;
421 hash_cd_ctrl->final_sz = digestsize;
422
423 switch (ctx->qat_hash_alg) {
424 case ICP_QAT_HW_AUTH_ALGO_SHA1:
425 hash_cd_ctrl->inner_state1_sz =
426 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
427 hash_cd_ctrl->inner_state2_sz =
428 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
429 break;
430 case ICP_QAT_HW_AUTH_ALGO_SHA256:
431 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
432 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
433 break;
434 case ICP_QAT_HW_AUTH_ALGO_SHA512:
435 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
436 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
437 break;
438 default:
439 break;
440 }
441
442 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
443 ((sizeof(struct icp_qat_hw_auth_setup) +
444 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
445 auth_param->auth_res_sz = digestsize;
446 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
447 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
448 return 0;
449 }
450
451 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
452 struct icp_qat_fw_la_bulk_req *req,
453 struct icp_qat_hw_cipher_algo_blk *cd,
454 const uint8_t *key, unsigned int keylen)
455 {
456 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
457 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
458 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
459
460 memcpy(cd->aes.key, key, keylen);
461 qat_alg_init_common_hdr(header);
462 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
463 cd_pars->u.s.content_desc_params_sz =
464 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
465 /* Cipher CD config setup */
466 cd_ctrl->cipher_key_sz = keylen >> 3;
467 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
468 cd_ctrl->cipher_cfg_offset = 0;
469 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
470 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
471 }
472
473 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
474 int alg, const uint8_t *key,
475 unsigned int keylen)
476 {
477 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
478 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
479 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
480
481 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
482 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
483 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
484 }
485
486 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
487 int alg, const uint8_t *key,
488 unsigned int keylen)
489 {
490 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
491 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
492 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
493
494 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
495 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
496 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
497 }
498
499 static int qat_alg_validate_key(int key_len, int *alg)
500 {
501 switch (key_len) {
502 case AES_KEYSIZE_128:
503 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
504 break;
505 case AES_KEYSIZE_192:
506 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
507 break;
508 case AES_KEYSIZE_256:
509 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
510 break;
511 default:
512 return -EINVAL;
513 }
514 return 0;
515 }
516
517 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
518 const uint8_t *key, unsigned int keylen)
519 {
520 struct crypto_authenc_keys keys;
521 int alg;
522
523 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
524 return -EFAULT;
525
526 if (crypto_authenc_extractkeys(&keys, key, keylen))
527 goto bad_key;
528
529 if (qat_alg_validate_key(keys.enckeylen, &alg))
530 goto bad_key;
531
532 if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
533 goto error;
534
535 if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
536 goto error;
537
538 return 0;
539 bad_key:
540 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
541 return -EINVAL;
542 error:
543 return -EFAULT;
544 }
545
546 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
547 const uint8_t *key,
548 unsigned int keylen)
549 {
550 int alg;
551
552 if (qat_alg_validate_key(keylen, &alg))
553 goto bad_key;
554
555 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
556 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
557 return 0;
558 bad_key:
559 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
560 return -EINVAL;
561 }
562
563 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
564 unsigned int keylen)
565 {
566 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
567 struct device *dev;
568
569 spin_lock(&ctx->lock);
570 if (ctx->enc_cd) {
571 /* rekeying */
572 dev = &GET_DEV(ctx->inst->accel_dev);
573 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
574 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
575 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
576 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
577 } else {
578 /* new key */
579 int node = get_current_node();
580 struct qat_crypto_instance *inst =
581 qat_crypto_get_instance_node(node);
582 if (!inst) {
583 spin_unlock(&ctx->lock);
584 return -EINVAL;
585 }
586
587 dev = &GET_DEV(inst->accel_dev);
588 ctx->inst = inst;
589 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
590 &ctx->enc_cd_paddr,
591 GFP_ATOMIC);
592 if (!ctx->enc_cd) {
593 spin_unlock(&ctx->lock);
594 return -ENOMEM;
595 }
596 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
597 &ctx->dec_cd_paddr,
598 GFP_ATOMIC);
599 if (!ctx->dec_cd) {
600 spin_unlock(&ctx->lock);
601 goto out_free_enc;
602 }
603 }
604 spin_unlock(&ctx->lock);
605 if (qat_alg_aead_init_sessions(ctx, key, keylen))
606 goto out_free_all;
607
608 return 0;
609
610 out_free_all:
611 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
612 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
613 ctx->dec_cd, ctx->dec_cd_paddr);
614 ctx->dec_cd = NULL;
615 out_free_enc:
616 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
617 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
618 ctx->enc_cd, ctx->enc_cd_paddr);
619 ctx->enc_cd = NULL;
620 return -ENOMEM;
621 }
622
623 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
624 struct qat_crypto_request *qat_req)
625 {
626 struct device *dev = &GET_DEV(inst->accel_dev);
627 struct qat_alg_buf_list *bl = qat_req->buf.bl;
628 struct qat_alg_buf_list *blout = qat_req->buf.blout;
629 dma_addr_t blp = qat_req->buf.blp;
630 dma_addr_t blpout = qat_req->buf.bloutp;
631 size_t sz = qat_req->buf.sz;
632 size_t sz_out = qat_req->buf.sz_out;
633 int i;
634
635 for (i = 0; i < bl->num_bufs; i++)
636 dma_unmap_single(dev, bl->bufers[i].addr,
637 bl->bufers[i].len, DMA_BIDIRECTIONAL);
638
639 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
640 kfree(bl);
641 if (blp != blpout) {
642 /* If out of place operation dma unmap only data */
643 int bufless = blout->num_bufs - blout->num_mapped_bufs;
644
645 for (i = bufless; i < blout->num_bufs; i++) {
646 dma_unmap_single(dev, blout->bufers[i].addr,
647 blout->bufers[i].len,
648 DMA_BIDIRECTIONAL);
649 }
650 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
651 kfree(blout);
652 }
653 }
654
655 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
656 struct scatterlist *assoc, int assoclen,
657 struct scatterlist *sgl,
658 struct scatterlist *sglout, uint8_t *iv,
659 uint8_t ivlen,
660 struct qat_crypto_request *qat_req)
661 {
662 struct device *dev = &GET_DEV(inst->accel_dev);
663 int i, bufs = 0, sg_nctr = 0;
664 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
665 struct qat_alg_buf_list *bufl;
666 struct qat_alg_buf_list *buflout = NULL;
667 dma_addr_t blp;
668 dma_addr_t bloutp = 0;
669 struct scatterlist *sg;
670 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
671 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
672
673 if (unlikely(!n))
674 return -EINVAL;
675
676 bufl = kzalloc_node(sz, GFP_ATOMIC,
677 dev_to_node(&GET_DEV(inst->accel_dev)));
678 if (unlikely(!bufl))
679 return -ENOMEM;
680
681 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
682 if (unlikely(dma_mapping_error(dev, blp)))
683 goto err;
684
685 for_each_sg(assoc, sg, assoc_n, i) {
686 if (!sg->length)
687 continue;
688
689 if (!(assoclen > 0))
690 break;
691
692 bufl->bufers[bufs].addr =
693 dma_map_single(dev, sg_virt(sg),
694 min_t(int, assoclen, sg->length),
695 DMA_BIDIRECTIONAL);
696 bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
697 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
698 goto err;
699 bufs++;
700 assoclen -= sg->length;
701 }
702
703 if (ivlen) {
704 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
705 DMA_BIDIRECTIONAL);
706 bufl->bufers[bufs].len = ivlen;
707 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
708 goto err;
709 bufs++;
710 }
711
712 for_each_sg(sgl, sg, n, i) {
713 int y = sg_nctr + bufs;
714
715 if (!sg->length)
716 continue;
717
718 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
719 sg->length,
720 DMA_BIDIRECTIONAL);
721 bufl->bufers[y].len = sg->length;
722 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
723 goto err;
724 sg_nctr++;
725 }
726 bufl->num_bufs = sg_nctr + bufs;
727 qat_req->buf.bl = bufl;
728 qat_req->buf.blp = blp;
729 qat_req->buf.sz = sz;
730 /* Handle out of place operation */
731 if (sgl != sglout) {
732 struct qat_alg_buf *bufers;
733
734 n = sg_nents(sglout);
735 sz_out = sizeof(struct qat_alg_buf_list) +
736 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
737 sg_nctr = 0;
738 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
739 dev_to_node(&GET_DEV(inst->accel_dev)));
740 if (unlikely(!buflout))
741 goto err;
742 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
743 if (unlikely(dma_mapping_error(dev, bloutp)))
744 goto err;
745 bufers = buflout->bufers;
746 /* For out of place operation dma map only data and
747 * reuse assoc mapping and iv */
748 for (i = 0; i < bufs; i++) {
749 bufers[i].len = bufl->bufers[i].len;
750 bufers[i].addr = bufl->bufers[i].addr;
751 }
752 for_each_sg(sglout, sg, n, i) {
753 int y = sg_nctr + bufs;
754
755 if (!sg->length)
756 continue;
757
758 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
759 sg->length,
760 DMA_BIDIRECTIONAL);
761 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
762 goto err;
763 bufers[y].len = sg->length;
764 sg_nctr++;
765 }
766 buflout->num_bufs = sg_nctr + bufs;
767 buflout->num_mapped_bufs = sg_nctr;
768 qat_req->buf.blout = buflout;
769 qat_req->buf.bloutp = bloutp;
770 qat_req->buf.sz_out = sz_out;
771 } else {
772 /* Otherwise set the src and dst to the same address */
773 qat_req->buf.bloutp = qat_req->buf.blp;
774 qat_req->buf.sz_out = 0;
775 }
776 return 0;
777 err:
778 dev_err(dev, "Failed to map buf for dma\n");
779 sg_nctr = 0;
780 for (i = 0; i < n + bufs; i++)
781 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
782 dma_unmap_single(dev, bufl->bufers[i].addr,
783 bufl->bufers[i].len,
784 DMA_BIDIRECTIONAL);
785
786 if (!dma_mapping_error(dev, blp))
787 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
788 kfree(bufl);
789 if (sgl != sglout && buflout) {
790 n = sg_nents(sglout);
791 for (i = bufs; i < n + bufs; i++)
792 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
793 dma_unmap_single(dev, buflout->bufers[i].addr,
794 buflout->bufers[i].len,
795 DMA_BIDIRECTIONAL);
796 if (!dma_mapping_error(dev, bloutp))
797 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
798 kfree(buflout);
799 }
800 return -ENOMEM;
801 }
802
803 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
804 struct qat_crypto_request *qat_req)
805 {
806 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
807 struct qat_crypto_instance *inst = ctx->inst;
808 struct aead_request *areq = qat_req->aead_req;
809 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
810 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
811
812 qat_alg_free_bufl(inst, qat_req);
813 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
814 res = -EBADMSG;
815 areq->base.complete(&areq->base, res);
816 }
817
818 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
819 struct qat_crypto_request *qat_req)
820 {
821 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
822 struct qat_crypto_instance *inst = ctx->inst;
823 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
824 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
825 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
826
827 qat_alg_free_bufl(inst, qat_req);
828 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
829 res = -EINVAL;
830 areq->base.complete(&areq->base, res);
831 }
832
833 void qat_alg_callback(void *resp)
834 {
835 struct icp_qat_fw_la_resp *qat_resp = resp;
836 struct qat_crypto_request *qat_req =
837 (void *)(__force long)qat_resp->opaque_data;
838
839 qat_req->cb(qat_resp, qat_req);
840 }
841
842 static int qat_alg_aead_dec(struct aead_request *areq)
843 {
844 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
845 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
846 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
847 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
848 struct icp_qat_fw_la_cipher_req_params *cipher_param;
849 struct icp_qat_fw_la_auth_req_params *auth_param;
850 struct icp_qat_fw_la_bulk_req *msg;
851 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
852 int ret, ctr = 0;
853
854 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
855 areq->src, areq->dst, areq->iv,
856 AES_BLOCK_SIZE, qat_req);
857 if (unlikely(ret))
858 return ret;
859
860 msg = &qat_req->req;
861 *msg = ctx->dec_fw_req;
862 qat_req->aead_ctx = ctx;
863 qat_req->aead_req = areq;
864 qat_req->cb = qat_aead_alg_callback;
865 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
866 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
867 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
868 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
869 cipher_param->cipher_length = areq->cryptlen - digst_size;
870 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
871 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
872 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
873 auth_param->auth_off = 0;
874 auth_param->auth_len = areq->assoclen +
875 cipher_param->cipher_length + AES_BLOCK_SIZE;
876 do {
877 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
878 } while (ret == -EAGAIN && ctr++ < 10);
879
880 if (ret == -EAGAIN) {
881 qat_alg_free_bufl(ctx->inst, qat_req);
882 return -EBUSY;
883 }
884 return -EINPROGRESS;
885 }
886
887 static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
888 int enc_iv)
889 {
890 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
891 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
892 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
893 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
894 struct icp_qat_fw_la_cipher_req_params *cipher_param;
895 struct icp_qat_fw_la_auth_req_params *auth_param;
896 struct icp_qat_fw_la_bulk_req *msg;
897 int ret, ctr = 0;
898
899 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
900 areq->src, areq->dst, iv, AES_BLOCK_SIZE,
901 qat_req);
902 if (unlikely(ret))
903 return ret;
904
905 msg = &qat_req->req;
906 *msg = ctx->enc_fw_req;
907 qat_req->aead_ctx = ctx;
908 qat_req->aead_req = areq;
909 qat_req->cb = qat_aead_alg_callback;
910 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
911 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
912 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
913 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
914 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
915
916 if (enc_iv) {
917 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
918 cipher_param->cipher_offset = areq->assoclen;
919 } else {
920 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
921 cipher_param->cipher_length = areq->cryptlen;
922 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
923 }
924 auth_param->auth_off = 0;
925 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
926
927 do {
928 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
929 } while (ret == -EAGAIN && ctr++ < 10);
930
931 if (ret == -EAGAIN) {
932 qat_alg_free_bufl(ctx->inst, qat_req);
933 return -EBUSY;
934 }
935 return -EINPROGRESS;
936 }
937
938 static int qat_alg_aead_enc(struct aead_request *areq)
939 {
940 return qat_alg_aead_enc_internal(areq, areq->iv, 0);
941 }
942
943 static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
944 {
945 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
946 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
947 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
948 __be64 seq;
949
950 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
951 seq = cpu_to_be64(req->seq);
952 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
953 &seq, sizeof(uint64_t));
954 return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
955 }
956
957 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
958 const uint8_t *key,
959 unsigned int keylen)
960 {
961 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
962 struct device *dev;
963
964 spin_lock(&ctx->lock);
965 if (ctx->enc_cd) {
966 /* rekeying */
967 dev = &GET_DEV(ctx->inst->accel_dev);
968 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
969 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
970 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
971 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
972 } else {
973 /* new key */
974 int node = get_current_node();
975 struct qat_crypto_instance *inst =
976 qat_crypto_get_instance_node(node);
977 if (!inst) {
978 spin_unlock(&ctx->lock);
979 return -EINVAL;
980 }
981
982 dev = &GET_DEV(inst->accel_dev);
983 ctx->inst = inst;
984 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
985 &ctx->enc_cd_paddr,
986 GFP_ATOMIC);
987 if (!ctx->enc_cd) {
988 spin_unlock(&ctx->lock);
989 return -ENOMEM;
990 }
991 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
992 &ctx->dec_cd_paddr,
993 GFP_ATOMIC);
994 if (!ctx->dec_cd) {
995 spin_unlock(&ctx->lock);
996 goto out_free_enc;
997 }
998 }
999 spin_unlock(&ctx->lock);
1000 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
1001 goto out_free_all;
1002
1003 return 0;
1004
1005 out_free_all:
1006 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
1007 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1008 ctx->dec_cd, ctx->dec_cd_paddr);
1009 ctx->dec_cd = NULL;
1010 out_free_enc:
1011 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
1012 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1013 ctx->enc_cd, ctx->enc_cd_paddr);
1014 ctx->enc_cd = NULL;
1015 return -ENOMEM;
1016 }
1017
1018 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1019 {
1020 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1021 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1022 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1023 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1024 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1025 struct icp_qat_fw_la_bulk_req *msg;
1026 int ret, ctr = 0;
1027
1028 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
1029 NULL, 0, qat_req);
1030 if (unlikely(ret))
1031 return ret;
1032
1033 msg = &qat_req->req;
1034 *msg = ctx->enc_fw_req;
1035 qat_req->ablkcipher_ctx = ctx;
1036 qat_req->ablkcipher_req = req;
1037 qat_req->cb = qat_ablkcipher_alg_callback;
1038 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1039 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1040 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1041 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1042 cipher_param->cipher_length = req->nbytes;
1043 cipher_param->cipher_offset = 0;
1044 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1045 do {
1046 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1047 } while (ret == -EAGAIN && ctr++ < 10);
1048
1049 if (ret == -EAGAIN) {
1050 qat_alg_free_bufl(ctx->inst, qat_req);
1051 return -EBUSY;
1052 }
1053 return -EINPROGRESS;
1054 }
1055
1056 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1057 {
1058 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1059 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1060 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1061 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1062 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1063 struct icp_qat_fw_la_bulk_req *msg;
1064 int ret, ctr = 0;
1065
1066 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
1067 NULL, 0, qat_req);
1068 if (unlikely(ret))
1069 return ret;
1070
1071 msg = &qat_req->req;
1072 *msg = ctx->dec_fw_req;
1073 qat_req->ablkcipher_ctx = ctx;
1074 qat_req->ablkcipher_req = req;
1075 qat_req->cb = qat_ablkcipher_alg_callback;
1076 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1077 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1078 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1079 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1080 cipher_param->cipher_length = req->nbytes;
1081 cipher_param->cipher_offset = 0;
1082 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1083 do {
1084 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1085 } while (ret == -EAGAIN && ctr++ < 10);
1086
1087 if (ret == -EAGAIN) {
1088 qat_alg_free_bufl(ctx->inst, qat_req);
1089 return -EBUSY;
1090 }
1091 return -EINPROGRESS;
1092 }
1093
1094 static int qat_alg_aead_init(struct crypto_tfm *tfm,
1095 enum icp_qat_hw_auth_algo hash,
1096 const char *hash_name)
1097 {
1098 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1099
1100 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1101 if (IS_ERR(ctx->hash_tfm))
1102 return -EFAULT;
1103 spin_lock_init(&ctx->lock);
1104 ctx->qat_hash_alg = hash;
1105 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
1106 sizeof(struct aead_request) +
1107 sizeof(struct qat_crypto_request));
1108 ctx->tfm = tfm;
1109 return 0;
1110 }
1111
1112 static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
1113 {
1114 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1115 }
1116
1117 static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
1118 {
1119 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1120 }
1121
1122 static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
1123 {
1124 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1125 }
1126
1127 static void qat_alg_aead_exit(struct crypto_tfm *tfm)
1128 {
1129 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1130 struct qat_crypto_instance *inst = ctx->inst;
1131 struct device *dev;
1132
1133 if (!IS_ERR(ctx->hash_tfm))
1134 crypto_free_shash(ctx->hash_tfm);
1135
1136 if (!inst)
1137 return;
1138
1139 dev = &GET_DEV(inst->accel_dev);
1140 if (ctx->enc_cd) {
1141 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1142 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1143 ctx->enc_cd, ctx->enc_cd_paddr);
1144 }
1145 if (ctx->dec_cd) {
1146 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1147 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1148 ctx->dec_cd, ctx->dec_cd_paddr);
1149 }
1150 qat_crypto_put_instance(inst);
1151 }
1152
1153 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1154 {
1155 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1156
1157 spin_lock_init(&ctx->lock);
1158 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1159 sizeof(struct qat_crypto_request);
1160 ctx->tfm = tfm;
1161 return 0;
1162 }
1163
1164 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1165 {
1166 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1167 struct qat_crypto_instance *inst = ctx->inst;
1168 struct device *dev;
1169
1170 if (!inst)
1171 return;
1172
1173 dev = &GET_DEV(inst->accel_dev);
1174 if (ctx->enc_cd) {
1175 memset(ctx->enc_cd, 0,
1176 sizeof(struct icp_qat_hw_cipher_algo_blk));
1177 dma_free_coherent(dev,
1178 sizeof(struct icp_qat_hw_cipher_algo_blk),
1179 ctx->enc_cd, ctx->enc_cd_paddr);
1180 }
1181 if (ctx->dec_cd) {
1182 memset(ctx->dec_cd, 0,
1183 sizeof(struct icp_qat_hw_cipher_algo_blk));
1184 dma_free_coherent(dev,
1185 sizeof(struct icp_qat_hw_cipher_algo_blk),
1186 ctx->dec_cd, ctx->dec_cd_paddr);
1187 }
1188 qat_crypto_put_instance(inst);
1189 }
1190
1191 static struct crypto_alg qat_algs[] = { {
1192 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1193 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1194 .cra_priority = 4001,
1195 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1196 .cra_blocksize = AES_BLOCK_SIZE,
1197 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1198 .cra_alignmask = 0,
1199 .cra_type = &crypto_aead_type,
1200 .cra_module = THIS_MODULE,
1201 .cra_init = qat_alg_aead_sha1_init,
1202 .cra_exit = qat_alg_aead_exit,
1203 .cra_u = {
1204 .aead = {
1205 .setkey = qat_alg_aead_setkey,
1206 .decrypt = qat_alg_aead_dec,
1207 .encrypt = qat_alg_aead_enc,
1208 .givencrypt = qat_alg_aead_genivenc,
1209 .ivsize = AES_BLOCK_SIZE,
1210 .maxauthsize = SHA1_DIGEST_SIZE,
1211 },
1212 },
1213 }, {
1214 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1215 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1216 .cra_priority = 4001,
1217 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1218 .cra_blocksize = AES_BLOCK_SIZE,
1219 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1220 .cra_alignmask = 0,
1221 .cra_type = &crypto_aead_type,
1222 .cra_module = THIS_MODULE,
1223 .cra_init = qat_alg_aead_sha256_init,
1224 .cra_exit = qat_alg_aead_exit,
1225 .cra_u = {
1226 .aead = {
1227 .setkey = qat_alg_aead_setkey,
1228 .decrypt = qat_alg_aead_dec,
1229 .encrypt = qat_alg_aead_enc,
1230 .givencrypt = qat_alg_aead_genivenc,
1231 .ivsize = AES_BLOCK_SIZE,
1232 .maxauthsize = SHA256_DIGEST_SIZE,
1233 },
1234 },
1235 }, {
1236 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1237 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1238 .cra_priority = 4001,
1239 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1240 .cra_blocksize = AES_BLOCK_SIZE,
1241 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1242 .cra_alignmask = 0,
1243 .cra_type = &crypto_aead_type,
1244 .cra_module = THIS_MODULE,
1245 .cra_init = qat_alg_aead_sha512_init,
1246 .cra_exit = qat_alg_aead_exit,
1247 .cra_u = {
1248 .aead = {
1249 .setkey = qat_alg_aead_setkey,
1250 .decrypt = qat_alg_aead_dec,
1251 .encrypt = qat_alg_aead_enc,
1252 .givencrypt = qat_alg_aead_genivenc,
1253 .ivsize = AES_BLOCK_SIZE,
1254 .maxauthsize = SHA512_DIGEST_SIZE,
1255 },
1256 },
1257 }, {
1258 .cra_name = "cbc(aes)",
1259 .cra_driver_name = "qat_aes_cbc",
1260 .cra_priority = 4001,
1261 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1262 .cra_blocksize = AES_BLOCK_SIZE,
1263 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1264 .cra_alignmask = 0,
1265 .cra_type = &crypto_ablkcipher_type,
1266 .cra_module = THIS_MODULE,
1267 .cra_init = qat_alg_ablkcipher_init,
1268 .cra_exit = qat_alg_ablkcipher_exit,
1269 .cra_u = {
1270 .ablkcipher = {
1271 .setkey = qat_alg_ablkcipher_setkey,
1272 .decrypt = qat_alg_ablkcipher_decrypt,
1273 .encrypt = qat_alg_ablkcipher_encrypt,
1274 .min_keysize = AES_MIN_KEY_SIZE,
1275 .max_keysize = AES_MAX_KEY_SIZE,
1276 .ivsize = AES_BLOCK_SIZE,
1277 },
1278 },
1279 } };
1280
1281 int qat_algs_register(void)
1282 {
1283 if (atomic_add_return(1, &active_dev) == 1) {
1284 int i;
1285
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1287 qat_algs[i].cra_flags =
1288 (qat_algs[i].cra_type == &crypto_aead_type) ?
1289 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1290 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1291
1292 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 }
1294 return 0;
1295 }
1296
1297 int qat_algs_unregister(void)
1298 {
1299 if (atomic_sub_return(1, &active_dev) == 0)
1300 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1301 return 0;
1302 }
1303
1304 int qat_algs_init(void)
1305 {
1306 atomic_set(&active_dev, 0);
1307 crypto_get_default_rng();
1308 return 0;
1309 }
1310
1311 void qat_algs_exit(void)
1312 {
1313 crypto_put_default_rng();
1314 }