2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static atomic_t active_dev
;
84 struct qat_alg_buf_list
{
87 uint32_t num_mapped_bufs
;
88 struct qat_alg_buf bufers
[];
89 } __packed
__aligned(64);
91 /* Common content descriptor */
94 struct qat_enc
{ /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher
;
96 struct icp_qat_hw_auth_algo_blk hash
;
98 struct qat_dec
{ /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash
;
100 struct icp_qat_hw_cipher_algo_blk cipher
;
105 struct qat_alg_aead_ctx
{
106 struct qat_alg_cd
*enc_cd
;
107 struct qat_alg_cd
*dec_cd
;
108 dma_addr_t enc_cd_paddr
;
109 dma_addr_t dec_cd_paddr
;
110 struct icp_qat_fw_la_bulk_req enc_fw_req
;
111 struct icp_qat_fw_la_bulk_req dec_fw_req
;
112 struct crypto_shash
*hash_tfm
;
113 enum icp_qat_hw_auth_algo qat_hash_alg
;
114 struct qat_crypto_instance
*inst
;
115 struct crypto_tfm
*tfm
;
116 uint8_t salt
[AES_BLOCK_SIZE
];
117 spinlock_t lock
; /* protects qat_alg_aead_ctx struct */
120 struct qat_alg_ablkcipher_ctx
{
121 struct icp_qat_hw_cipher_algo_blk
*enc_cd
;
122 struct icp_qat_hw_cipher_algo_blk
*dec_cd
;
123 dma_addr_t enc_cd_paddr
;
124 dma_addr_t dec_cd_paddr
;
125 struct icp_qat_fw_la_bulk_req enc_fw_req
;
126 struct icp_qat_fw_la_bulk_req dec_fw_req
;
127 struct qat_crypto_instance
*inst
;
128 struct crypto_tfm
*tfm
;
129 spinlock_t lock
; /* protects qat_alg_ablkcipher_ctx struct */
132 static int get_current_node(void)
134 return cpu_data(current_thread_info()->cpu
).phys_proc_id
;
137 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg
)
139 switch (qat_hash_alg
) {
140 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
141 return ICP_QAT_HW_SHA1_STATE1_SZ
;
142 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
143 return ICP_QAT_HW_SHA256_STATE1_SZ
;
144 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
145 return ICP_QAT_HW_SHA512_STATE1_SZ
;
152 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk
*hash
,
153 struct qat_alg_aead_ctx
*ctx
,
154 const uint8_t *auth_key
,
155 unsigned int auth_keylen
)
157 SHASH_DESC_ON_STACK(shash
, ctx
->hash_tfm
);
158 struct sha1_state sha1
;
159 struct sha256_state sha256
;
160 struct sha512_state sha512
;
161 int block_size
= crypto_shash_blocksize(ctx
->hash_tfm
);
162 int digest_size
= crypto_shash_digestsize(ctx
->hash_tfm
);
163 char ipad
[block_size
];
164 char opad
[block_size
];
165 __be32
*hash_state_out
;
166 __be64
*hash512_state_out
;
169 memset(ipad
, 0, block_size
);
170 memset(opad
, 0, block_size
);
171 shash
->tfm
= ctx
->hash_tfm
;
174 if (auth_keylen
> block_size
) {
175 int ret
= crypto_shash_digest(shash
, auth_key
,
180 memcpy(opad
, ipad
, digest_size
);
182 memcpy(ipad
, auth_key
, auth_keylen
);
183 memcpy(opad
, auth_key
, auth_keylen
);
186 for (i
= 0; i
< block_size
; i
++) {
187 char *ipad_ptr
= ipad
+ i
;
188 char *opad_ptr
= opad
+ i
;
193 if (crypto_shash_init(shash
))
196 if (crypto_shash_update(shash
, ipad
, block_size
))
199 hash_state_out
= (__be32
*)hash
->sha
.state1
;
200 hash512_state_out
= (__be64
*)hash_state_out
;
202 switch (ctx
->qat_hash_alg
) {
203 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
204 if (crypto_shash_export(shash
, &sha1
))
206 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
207 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
209 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
210 if (crypto_shash_export(shash
, &sha256
))
212 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
213 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
215 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
216 if (crypto_shash_export(shash
, &sha512
))
218 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
219 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
225 if (crypto_shash_init(shash
))
228 if (crypto_shash_update(shash
, opad
, block_size
))
231 offset
= round_up(qat_get_inter_state_size(ctx
->qat_hash_alg
), 8);
232 hash_state_out
= (__be32
*)(hash
->sha
.state1
+ offset
);
233 hash512_state_out
= (__be64
*)hash_state_out
;
235 switch (ctx
->qat_hash_alg
) {
236 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
237 if (crypto_shash_export(shash
, &sha1
))
239 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
240 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
242 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
243 if (crypto_shash_export(shash
, &sha256
))
245 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
246 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
248 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
249 if (crypto_shash_export(shash
, &sha512
))
251 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
252 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
257 memzero_explicit(ipad
, block_size
);
258 memzero_explicit(opad
, block_size
);
262 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr
*header
)
265 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET
);
266 header
->service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_LA
;
267 header
->comn_req_flags
=
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR
,
269 QAT_COMN_PTR_TYPE_SGL
);
270 ICP_QAT_FW_LA_PARTIAL_SET(header
->serv_specif_flags
,
271 ICP_QAT_FW_LA_PARTIAL_NONE
);
272 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header
->serv_specif_flags
,
273 ICP_QAT_FW_CIPH_IV_16BYTE_DATA
);
274 ICP_QAT_FW_LA_PROTO_SET(header
->serv_specif_flags
,
275 ICP_QAT_FW_LA_NO_PROTO
);
276 ICP_QAT_FW_LA_UPDATE_STATE_SET(header
->serv_specif_flags
,
277 ICP_QAT_FW_LA_NO_UPDATE_STATE
);
280 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx
*ctx
,
282 struct crypto_authenc_keys
*keys
)
284 struct crypto_aead
*aead_tfm
= __crypto_aead_cast(ctx
->tfm
);
285 unsigned int digestsize
= crypto_aead_crt(aead_tfm
)->authsize
;
286 struct qat_enc
*enc_ctx
= &ctx
->enc_cd
->qat_enc_cd
;
287 struct icp_qat_hw_cipher_algo_blk
*cipher
= &enc_ctx
->cipher
;
288 struct icp_qat_hw_auth_algo_blk
*hash
=
289 (struct icp_qat_hw_auth_algo_blk
*)((char *)enc_ctx
+
290 sizeof(struct icp_qat_hw_auth_setup
) + keys
->enckeylen
);
291 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->enc_fw_req
;
292 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
293 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
294 void *ptr
= &req_tmpl
->cd_ctrl
;
295 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
296 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
299 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
300 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
301 hash
->sha
.inner_setup
.auth_config
.config
=
302 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
303 ctx
->qat_hash_alg
, digestsize
);
304 hash
->sha
.inner_setup
.auth_counter
.counter
=
305 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
307 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
311 qat_alg_init_common_hdr(header
);
312 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER_HASH
;
313 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
314 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
315 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
316 ICP_QAT_FW_LA_RET_AUTH_RES
);
317 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
318 ICP_QAT_FW_LA_NO_CMP_AUTH_RES
);
319 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
320 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
322 /* Cipher CD config setup */
323 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
324 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
325 cipher_cd_ctrl
->cipher_cfg_offset
= 0;
326 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
327 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
328 /* Auth CD config setup */
329 hash_cd_ctrl
->hash_cfg_offset
= ((char *)hash
- (char *)cipher
) >> 3;
330 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
331 hash_cd_ctrl
->inner_res_sz
= digestsize
;
332 hash_cd_ctrl
->final_sz
= digestsize
;
334 switch (ctx
->qat_hash_alg
) {
335 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
336 hash_cd_ctrl
->inner_state1_sz
=
337 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
338 hash_cd_ctrl
->inner_state2_sz
=
339 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
341 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
342 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
343 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
345 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
346 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
347 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
352 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
353 ((sizeof(struct icp_qat_hw_auth_setup
) +
354 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
360 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx
*ctx
,
362 struct crypto_authenc_keys
*keys
)
364 struct crypto_aead
*aead_tfm
= __crypto_aead_cast(ctx
->tfm
);
365 unsigned int digestsize
= crypto_aead_crt(aead_tfm
)->authsize
;
366 struct qat_dec
*dec_ctx
= &ctx
->dec_cd
->qat_dec_cd
;
367 struct icp_qat_hw_auth_algo_blk
*hash
= &dec_ctx
->hash
;
368 struct icp_qat_hw_cipher_algo_blk
*cipher
=
369 (struct icp_qat_hw_cipher_algo_blk
*)((char *)dec_ctx
+
370 sizeof(struct icp_qat_hw_auth_setup
) +
371 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2);
372 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->dec_fw_req
;
373 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
374 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
375 void *ptr
= &req_tmpl
->cd_ctrl
;
376 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
377 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
378 struct icp_qat_fw_la_auth_req_params
*auth_param
=
379 (struct icp_qat_fw_la_auth_req_params
*)
380 ((char *)&req_tmpl
->serv_specif_rqpars
+
381 sizeof(struct icp_qat_fw_la_cipher_req_params
));
384 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
385 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
386 hash
->sha
.inner_setup
.auth_config
.config
=
387 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
390 hash
->sha
.inner_setup
.auth_counter
.counter
=
391 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
393 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
397 qat_alg_init_common_hdr(header
);
398 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_HASH_CIPHER
;
399 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
400 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
401 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
402 ICP_QAT_FW_LA_NO_RET_AUTH_RES
);
403 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
404 ICP_QAT_FW_LA_CMP_AUTH_RES
);
405 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
406 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
408 /* Cipher CD config setup */
409 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
410 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
411 cipher_cd_ctrl
->cipher_cfg_offset
=
412 (sizeof(struct icp_qat_hw_auth_setup
) +
413 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2) >> 3;
414 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
415 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
417 /* Auth CD config setup */
418 hash_cd_ctrl
->hash_cfg_offset
= 0;
419 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
420 hash_cd_ctrl
->inner_res_sz
= digestsize
;
421 hash_cd_ctrl
->final_sz
= digestsize
;
423 switch (ctx
->qat_hash_alg
) {
424 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
425 hash_cd_ctrl
->inner_state1_sz
=
426 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
427 hash_cd_ctrl
->inner_state2_sz
=
428 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
430 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
431 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
432 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
434 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
435 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
436 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
442 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
443 ((sizeof(struct icp_qat_hw_auth_setup
) +
444 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
445 auth_param
->auth_res_sz
= digestsize
;
446 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
447 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
451 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx
*ctx
,
452 struct icp_qat_fw_la_bulk_req
*req
,
453 struct icp_qat_hw_cipher_algo_blk
*cd
,
454 const uint8_t *key
, unsigned int keylen
)
456 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
457 struct icp_qat_fw_comn_req_hdr
*header
= &req
->comn_hdr
;
458 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cd_ctrl
= (void *)&req
->cd_ctrl
;
460 memcpy(cd
->aes
.key
, key
, keylen
);
461 qat_alg_init_common_hdr(header
);
462 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER
;
463 cd_pars
->u
.s
.content_desc_params_sz
=
464 sizeof(struct icp_qat_hw_cipher_algo_blk
) >> 3;
465 /* Cipher CD config setup */
466 cd_ctrl
->cipher_key_sz
= keylen
>> 3;
467 cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
468 cd_ctrl
->cipher_cfg_offset
= 0;
469 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
470 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
473 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx
*ctx
,
474 int alg
, const uint8_t *key
,
477 struct icp_qat_hw_cipher_algo_blk
*enc_cd
= ctx
->enc_cd
;
478 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->enc_fw_req
;
479 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
481 qat_alg_ablkcipher_init_com(ctx
, req
, enc_cd
, key
, keylen
);
482 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
483 enc_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
486 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx
*ctx
,
487 int alg
, const uint8_t *key
,
490 struct icp_qat_hw_cipher_algo_blk
*dec_cd
= ctx
->dec_cd
;
491 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->dec_fw_req
;
492 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
494 qat_alg_ablkcipher_init_com(ctx
, req
, dec_cd
, key
, keylen
);
495 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
496 dec_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
499 static int qat_alg_validate_key(int key_len
, int *alg
)
502 case AES_KEYSIZE_128
:
503 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
505 case AES_KEYSIZE_192
:
506 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES192
;
508 case AES_KEYSIZE_256
:
509 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
517 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx
*ctx
,
518 const uint8_t *key
, unsigned int keylen
)
520 struct crypto_authenc_keys keys
;
523 if (crypto_rng_get_bytes(crypto_default_rng
, ctx
->salt
, AES_BLOCK_SIZE
))
526 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
529 if (qat_alg_validate_key(keys
.enckeylen
, &alg
))
532 if (qat_alg_aead_init_enc_session(ctx
, alg
, &keys
))
535 if (qat_alg_aead_init_dec_session(ctx
, alg
, &keys
))
540 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
546 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx
*ctx
,
552 if (qat_alg_validate_key(keylen
, &alg
))
555 qat_alg_ablkcipher_init_enc(ctx
, alg
, key
, keylen
);
556 qat_alg_ablkcipher_init_dec(ctx
, alg
, key
, keylen
);
559 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
563 static int qat_alg_aead_setkey(struct crypto_aead
*tfm
, const uint8_t *key
,
566 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
569 spin_lock(&ctx
->lock
);
572 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
573 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
574 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
575 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
576 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
579 int node
= get_current_node();
580 struct qat_crypto_instance
*inst
=
581 qat_crypto_get_instance_node(node
);
583 spin_unlock(&ctx
->lock
);
587 dev
= &GET_DEV(inst
->accel_dev
);
589 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
593 spin_unlock(&ctx
->lock
);
596 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
600 spin_unlock(&ctx
->lock
);
604 spin_unlock(&ctx
->lock
);
605 if (qat_alg_aead_init_sessions(ctx
, key
, keylen
))
611 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
612 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
613 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
616 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
617 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
618 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
623 static void qat_alg_free_bufl(struct qat_crypto_instance
*inst
,
624 struct qat_crypto_request
*qat_req
)
626 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
627 struct qat_alg_buf_list
*bl
= qat_req
->buf
.bl
;
628 struct qat_alg_buf_list
*blout
= qat_req
->buf
.blout
;
629 dma_addr_t blp
= qat_req
->buf
.blp
;
630 dma_addr_t blpout
= qat_req
->buf
.bloutp
;
631 size_t sz
= qat_req
->buf
.sz
;
632 size_t sz_out
= qat_req
->buf
.sz_out
;
635 for (i
= 0; i
< bl
->num_bufs
; i
++)
636 dma_unmap_single(dev
, bl
->bufers
[i
].addr
,
637 bl
->bufers
[i
].len
, DMA_BIDIRECTIONAL
);
639 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
642 /* If out of place operation dma unmap only data */
643 int bufless
= blout
->num_bufs
- blout
->num_mapped_bufs
;
645 for (i
= bufless
; i
< blout
->num_bufs
; i
++) {
646 dma_unmap_single(dev
, blout
->bufers
[i
].addr
,
647 blout
->bufers
[i
].len
,
650 dma_unmap_single(dev
, blpout
, sz_out
, DMA_TO_DEVICE
);
655 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance
*inst
,
656 struct scatterlist
*assoc
, int assoclen
,
657 struct scatterlist
*sgl
,
658 struct scatterlist
*sglout
, uint8_t *iv
,
660 struct qat_crypto_request
*qat_req
)
662 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
663 int i
, bufs
= 0, sg_nctr
= 0;
664 int n
= sg_nents(sgl
), assoc_n
= sg_nents(assoc
);
665 struct qat_alg_buf_list
*bufl
;
666 struct qat_alg_buf_list
*buflout
= NULL
;
668 dma_addr_t bloutp
= 0;
669 struct scatterlist
*sg
;
670 size_t sz_out
, sz
= sizeof(struct qat_alg_buf_list
) +
671 ((1 + n
+ assoc_n
) * sizeof(struct qat_alg_buf
));
676 bufl
= kzalloc_node(sz
, GFP_ATOMIC
,
677 dev_to_node(&GET_DEV(inst
->accel_dev
)));
681 blp
= dma_map_single(dev
, bufl
, sz
, DMA_TO_DEVICE
);
682 if (unlikely(dma_mapping_error(dev
, blp
)))
685 for_each_sg(assoc
, sg
, assoc_n
, i
) {
692 bufl
->bufers
[bufs
].addr
=
693 dma_map_single(dev
, sg_virt(sg
),
694 min_t(int, assoclen
, sg
->length
),
696 bufl
->bufers
[bufs
].len
= min_t(int, assoclen
, sg
->length
);
697 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[bufs
].addr
)))
700 assoclen
-= sg
->length
;
704 bufl
->bufers
[bufs
].addr
= dma_map_single(dev
, iv
, ivlen
,
706 bufl
->bufers
[bufs
].len
= ivlen
;
707 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[bufs
].addr
)))
712 for_each_sg(sgl
, sg
, n
, i
) {
713 int y
= sg_nctr
+ bufs
;
718 bufl
->bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
721 bufl
->bufers
[y
].len
= sg
->length
;
722 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[y
].addr
)))
726 bufl
->num_bufs
= sg_nctr
+ bufs
;
727 qat_req
->buf
.bl
= bufl
;
728 qat_req
->buf
.blp
= blp
;
729 qat_req
->buf
.sz
= sz
;
730 /* Handle out of place operation */
732 struct qat_alg_buf
*bufers
;
734 n
= sg_nents(sglout
);
735 sz_out
= sizeof(struct qat_alg_buf_list
) +
736 ((1 + n
+ assoc_n
) * sizeof(struct qat_alg_buf
));
738 buflout
= kzalloc_node(sz_out
, GFP_ATOMIC
,
739 dev_to_node(&GET_DEV(inst
->accel_dev
)));
740 if (unlikely(!buflout
))
742 bloutp
= dma_map_single(dev
, buflout
, sz_out
, DMA_TO_DEVICE
);
743 if (unlikely(dma_mapping_error(dev
, bloutp
)))
745 bufers
= buflout
->bufers
;
746 /* For out of place operation dma map only data and
747 * reuse assoc mapping and iv */
748 for (i
= 0; i
< bufs
; i
++) {
749 bufers
[i
].len
= bufl
->bufers
[i
].len
;
750 bufers
[i
].addr
= bufl
->bufers
[i
].addr
;
752 for_each_sg(sglout
, sg
, n
, i
) {
753 int y
= sg_nctr
+ bufs
;
758 bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
761 if (unlikely(dma_mapping_error(dev
, bufers
[y
].addr
)))
763 bufers
[y
].len
= sg
->length
;
766 buflout
->num_bufs
= sg_nctr
+ bufs
;
767 buflout
->num_mapped_bufs
= sg_nctr
;
768 qat_req
->buf
.blout
= buflout
;
769 qat_req
->buf
.bloutp
= bloutp
;
770 qat_req
->buf
.sz_out
= sz_out
;
772 /* Otherwise set the src and dst to the same address */
773 qat_req
->buf
.bloutp
= qat_req
->buf
.blp
;
774 qat_req
->buf
.sz_out
= 0;
778 dev_err(dev
, "Failed to map buf for dma\n");
780 for (i
= 0; i
< n
+ bufs
; i
++)
781 if (!dma_mapping_error(dev
, bufl
->bufers
[i
].addr
))
782 dma_unmap_single(dev
, bufl
->bufers
[i
].addr
,
786 if (!dma_mapping_error(dev
, blp
))
787 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
789 if (sgl
!= sglout
&& buflout
) {
790 n
= sg_nents(sglout
);
791 for (i
= bufs
; i
< n
+ bufs
; i
++)
792 if (!dma_mapping_error(dev
, buflout
->bufers
[i
].addr
))
793 dma_unmap_single(dev
, buflout
->bufers
[i
].addr
,
794 buflout
->bufers
[i
].len
,
796 if (!dma_mapping_error(dev
, bloutp
))
797 dma_unmap_single(dev
, bloutp
, sz_out
, DMA_TO_DEVICE
);
803 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
804 struct qat_crypto_request
*qat_req
)
806 struct qat_alg_aead_ctx
*ctx
= qat_req
->aead_ctx
;
807 struct qat_crypto_instance
*inst
= ctx
->inst
;
808 struct aead_request
*areq
= qat_req
->aead_req
;
809 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
810 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
812 qat_alg_free_bufl(inst
, qat_req
);
813 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
815 areq
->base
.complete(&areq
->base
, res
);
818 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
819 struct qat_crypto_request
*qat_req
)
821 struct qat_alg_ablkcipher_ctx
*ctx
= qat_req
->ablkcipher_ctx
;
822 struct qat_crypto_instance
*inst
= ctx
->inst
;
823 struct ablkcipher_request
*areq
= qat_req
->ablkcipher_req
;
824 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
825 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
827 qat_alg_free_bufl(inst
, qat_req
);
828 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
830 areq
->base
.complete(&areq
->base
, res
);
833 void qat_alg_callback(void *resp
)
835 struct icp_qat_fw_la_resp
*qat_resp
= resp
;
836 struct qat_crypto_request
*qat_req
=
837 (void *)(__force
long)qat_resp
->opaque_data
;
839 qat_req
->cb(qat_resp
, qat_req
);
842 static int qat_alg_aead_dec(struct aead_request
*areq
)
844 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
845 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
846 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
847 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
848 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
849 struct icp_qat_fw_la_auth_req_params
*auth_param
;
850 struct icp_qat_fw_la_bulk_req
*msg
;
851 int digst_size
= crypto_aead_crt(aead_tfm
)->authsize
;
854 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->assoc
, areq
->assoclen
,
855 areq
->src
, areq
->dst
, areq
->iv
,
856 AES_BLOCK_SIZE
, qat_req
);
861 *msg
= ctx
->dec_fw_req
;
862 qat_req
->aead_ctx
= ctx
;
863 qat_req
->aead_req
= areq
;
864 qat_req
->cb
= qat_aead_alg_callback
;
865 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
866 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
867 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
868 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
869 cipher_param
->cipher_length
= areq
->cryptlen
- digst_size
;
870 cipher_param
->cipher_offset
= areq
->assoclen
+ AES_BLOCK_SIZE
;
871 memcpy(cipher_param
->u
.cipher_IV_array
, areq
->iv
, AES_BLOCK_SIZE
);
872 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
873 auth_param
->auth_off
= 0;
874 auth_param
->auth_len
= areq
->assoclen
+
875 cipher_param
->cipher_length
+ AES_BLOCK_SIZE
;
877 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
878 } while (ret
== -EAGAIN
&& ctr
++ < 10);
880 if (ret
== -EAGAIN
) {
881 qat_alg_free_bufl(ctx
->inst
, qat_req
);
887 static int qat_alg_aead_enc_internal(struct aead_request
*areq
, uint8_t *iv
,
890 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
891 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
892 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
893 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
894 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
895 struct icp_qat_fw_la_auth_req_params
*auth_param
;
896 struct icp_qat_fw_la_bulk_req
*msg
;
899 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->assoc
, areq
->assoclen
,
900 areq
->src
, areq
->dst
, iv
, AES_BLOCK_SIZE
,
906 *msg
= ctx
->enc_fw_req
;
907 qat_req
->aead_ctx
= ctx
;
908 qat_req
->aead_req
= areq
;
909 qat_req
->cb
= qat_aead_alg_callback
;
910 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
911 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
912 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
913 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
914 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
917 cipher_param
->cipher_length
= areq
->cryptlen
+ AES_BLOCK_SIZE
;
918 cipher_param
->cipher_offset
= areq
->assoclen
;
920 memcpy(cipher_param
->u
.cipher_IV_array
, iv
, AES_BLOCK_SIZE
);
921 cipher_param
->cipher_length
= areq
->cryptlen
;
922 cipher_param
->cipher_offset
= areq
->assoclen
+ AES_BLOCK_SIZE
;
924 auth_param
->auth_off
= 0;
925 auth_param
->auth_len
= areq
->assoclen
+ areq
->cryptlen
+ AES_BLOCK_SIZE
;
928 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
929 } while (ret
== -EAGAIN
&& ctr
++ < 10);
931 if (ret
== -EAGAIN
) {
932 qat_alg_free_bufl(ctx
->inst
, qat_req
);
938 static int qat_alg_aead_enc(struct aead_request
*areq
)
940 return qat_alg_aead_enc_internal(areq
, areq
->iv
, 0);
943 static int qat_alg_aead_genivenc(struct aead_givcrypt_request
*req
)
945 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(&req
->areq
);
946 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
947 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
950 memcpy(req
->giv
, ctx
->salt
, AES_BLOCK_SIZE
);
951 seq
= cpu_to_be64(req
->seq
);
952 memcpy(req
->giv
+ AES_BLOCK_SIZE
- sizeof(uint64_t),
953 &seq
, sizeof(uint64_t));
954 return qat_alg_aead_enc_internal(&req
->areq
, req
->giv
, 1);
957 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
961 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
964 spin_lock(&ctx
->lock
);
967 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
968 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
969 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
970 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
971 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
974 int node
= get_current_node();
975 struct qat_crypto_instance
*inst
=
976 qat_crypto_get_instance_node(node
);
978 spin_unlock(&ctx
->lock
);
982 dev
= &GET_DEV(inst
->accel_dev
);
984 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
988 spin_unlock(&ctx
->lock
);
991 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
995 spin_unlock(&ctx
->lock
);
999 spin_unlock(&ctx
->lock
);
1000 if (qat_alg_ablkcipher_init_sessions(ctx
, key
, keylen
))
1006 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->enc_cd
));
1007 dma_free_coherent(dev
, sizeof(*ctx
->enc_cd
),
1008 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1011 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->dec_cd
));
1012 dma_free_coherent(dev
, sizeof(*ctx
->dec_cd
),
1013 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1018 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request
*req
)
1020 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1021 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1022 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1023 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1024 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1025 struct icp_qat_fw_la_bulk_req
*msg
;
1028 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, NULL
, 0, req
->src
, req
->dst
,
1033 msg
= &qat_req
->req
;
1034 *msg
= ctx
->enc_fw_req
;
1035 qat_req
->ablkcipher_ctx
= ctx
;
1036 qat_req
->ablkcipher_req
= req
;
1037 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1038 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1039 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1040 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1041 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1042 cipher_param
->cipher_length
= req
->nbytes
;
1043 cipher_param
->cipher_offset
= 0;
1044 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
1046 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1047 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1049 if (ret
== -EAGAIN
) {
1050 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1053 return -EINPROGRESS
;
1056 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request
*req
)
1058 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1059 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1060 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1061 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1062 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1063 struct icp_qat_fw_la_bulk_req
*msg
;
1066 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, NULL
, 0, req
->src
, req
->dst
,
1071 msg
= &qat_req
->req
;
1072 *msg
= ctx
->dec_fw_req
;
1073 qat_req
->ablkcipher_ctx
= ctx
;
1074 qat_req
->ablkcipher_req
= req
;
1075 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1076 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1077 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1078 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1079 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1080 cipher_param
->cipher_length
= req
->nbytes
;
1081 cipher_param
->cipher_offset
= 0;
1082 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
1084 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1085 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1087 if (ret
== -EAGAIN
) {
1088 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1091 return -EINPROGRESS
;
1094 static int qat_alg_aead_init(struct crypto_tfm
*tfm
,
1095 enum icp_qat_hw_auth_algo hash
,
1096 const char *hash_name
)
1098 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1100 ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1101 if (IS_ERR(ctx
->hash_tfm
))
1103 spin_lock_init(&ctx
->lock
);
1104 ctx
->qat_hash_alg
= hash
;
1105 crypto_aead_set_reqsize(__crypto_aead_cast(tfm
),
1106 sizeof(struct aead_request
) +
1107 sizeof(struct qat_crypto_request
));
1112 static int qat_alg_aead_sha1_init(struct crypto_tfm
*tfm
)
1114 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA1
, "sha1");
1117 static int qat_alg_aead_sha256_init(struct crypto_tfm
*tfm
)
1119 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA256
, "sha256");
1122 static int qat_alg_aead_sha512_init(struct crypto_tfm
*tfm
)
1124 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA512
, "sha512");
1127 static void qat_alg_aead_exit(struct crypto_tfm
*tfm
)
1129 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1130 struct qat_crypto_instance
*inst
= ctx
->inst
;
1133 if (!IS_ERR(ctx
->hash_tfm
))
1134 crypto_free_shash(ctx
->hash_tfm
);
1139 dev
= &GET_DEV(inst
->accel_dev
);
1141 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
1142 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1143 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1146 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
1147 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1148 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1150 qat_crypto_put_instance(inst
);
1153 static int qat_alg_ablkcipher_init(struct crypto_tfm
*tfm
)
1155 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1157 spin_lock_init(&ctx
->lock
);
1158 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
1159 sizeof(struct qat_crypto_request
);
1164 static void qat_alg_ablkcipher_exit(struct crypto_tfm
*tfm
)
1166 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1167 struct qat_crypto_instance
*inst
= ctx
->inst
;
1173 dev
= &GET_DEV(inst
->accel_dev
);
1175 memset(ctx
->enc_cd
, 0,
1176 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1177 dma_free_coherent(dev
,
1178 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1179 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1182 memset(ctx
->dec_cd
, 0,
1183 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1184 dma_free_coherent(dev
,
1185 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1186 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1188 qat_crypto_put_instance(inst
);
1191 static struct crypto_alg qat_algs
[] = { {
1192 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1193 .cra_driver_name
= "qat_aes_cbc_hmac_sha1",
1194 .cra_priority
= 4001,
1195 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1196 .cra_blocksize
= AES_BLOCK_SIZE
,
1197 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1199 .cra_type
= &crypto_aead_type
,
1200 .cra_module
= THIS_MODULE
,
1201 .cra_init
= qat_alg_aead_sha1_init
,
1202 .cra_exit
= qat_alg_aead_exit
,
1205 .setkey
= qat_alg_aead_setkey
,
1206 .decrypt
= qat_alg_aead_dec
,
1207 .encrypt
= qat_alg_aead_enc
,
1208 .givencrypt
= qat_alg_aead_genivenc
,
1209 .ivsize
= AES_BLOCK_SIZE
,
1210 .maxauthsize
= SHA1_DIGEST_SIZE
,
1214 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1215 .cra_driver_name
= "qat_aes_cbc_hmac_sha256",
1216 .cra_priority
= 4001,
1217 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1218 .cra_blocksize
= AES_BLOCK_SIZE
,
1219 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1221 .cra_type
= &crypto_aead_type
,
1222 .cra_module
= THIS_MODULE
,
1223 .cra_init
= qat_alg_aead_sha256_init
,
1224 .cra_exit
= qat_alg_aead_exit
,
1227 .setkey
= qat_alg_aead_setkey
,
1228 .decrypt
= qat_alg_aead_dec
,
1229 .encrypt
= qat_alg_aead_enc
,
1230 .givencrypt
= qat_alg_aead_genivenc
,
1231 .ivsize
= AES_BLOCK_SIZE
,
1232 .maxauthsize
= SHA256_DIGEST_SIZE
,
1236 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1237 .cra_driver_name
= "qat_aes_cbc_hmac_sha512",
1238 .cra_priority
= 4001,
1239 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1240 .cra_blocksize
= AES_BLOCK_SIZE
,
1241 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1243 .cra_type
= &crypto_aead_type
,
1244 .cra_module
= THIS_MODULE
,
1245 .cra_init
= qat_alg_aead_sha512_init
,
1246 .cra_exit
= qat_alg_aead_exit
,
1249 .setkey
= qat_alg_aead_setkey
,
1250 .decrypt
= qat_alg_aead_dec
,
1251 .encrypt
= qat_alg_aead_enc
,
1252 .givencrypt
= qat_alg_aead_genivenc
,
1253 .ivsize
= AES_BLOCK_SIZE
,
1254 .maxauthsize
= SHA512_DIGEST_SIZE
,
1258 .cra_name
= "cbc(aes)",
1259 .cra_driver_name
= "qat_aes_cbc",
1260 .cra_priority
= 4001,
1261 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1262 .cra_blocksize
= AES_BLOCK_SIZE
,
1263 .cra_ctxsize
= sizeof(struct qat_alg_ablkcipher_ctx
),
1265 .cra_type
= &crypto_ablkcipher_type
,
1266 .cra_module
= THIS_MODULE
,
1267 .cra_init
= qat_alg_ablkcipher_init
,
1268 .cra_exit
= qat_alg_ablkcipher_exit
,
1271 .setkey
= qat_alg_ablkcipher_setkey
,
1272 .decrypt
= qat_alg_ablkcipher_decrypt
,
1273 .encrypt
= qat_alg_ablkcipher_encrypt
,
1274 .min_keysize
= AES_MIN_KEY_SIZE
,
1275 .max_keysize
= AES_MAX_KEY_SIZE
,
1276 .ivsize
= AES_BLOCK_SIZE
,
1281 int qat_algs_register(void)
1283 if (atomic_add_return(1, &active_dev
) == 1) {
1286 for (i
= 0; i
< ARRAY_SIZE(qat_algs
); i
++)
1287 qat_algs
[i
].cra_flags
=
1288 (qat_algs
[i
].cra_type
== &crypto_aead_type
) ?
1289 CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
:
1290 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
;
1292 return crypto_register_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1297 int qat_algs_unregister(void)
1299 if (atomic_sub_return(1, &active_dev
) == 0)
1300 return crypto_unregister_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1304 int qat_algs_init(void)
1306 atomic_set(&active_dev
, 0);
1307 crypto_get_default_rng();
1311 void qat_algs_exit(void)
1313 crypto_put_default_rng();