2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <linux/dma-mapping.h>
57 #include "adf_accel_devices.h"
58 #include "adf_transport.h"
59 #include "adf_common_drv.h"
60 #include "qat_crypto.h"
61 #include "icp_qat_hw.h"
62 #include "icp_qat_fw.h"
63 #include "icp_qat_fw_la.h"
65 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
70 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
75 static DEFINE_MUTEX(algs_lock
);
76 static unsigned int active_devs
;
84 struct qat_alg_buf_list
{
87 uint32_t num_mapped_bufs
;
88 struct qat_alg_buf bufers
[];
89 } __packed
__aligned(64);
91 /* Common content descriptor */
94 struct qat_enc
{ /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher
;
96 struct icp_qat_hw_auth_algo_blk hash
;
98 struct qat_dec
{ /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash
;
100 struct icp_qat_hw_cipher_algo_blk cipher
;
105 struct qat_alg_aead_ctx
{
106 struct qat_alg_cd
*enc_cd
;
107 struct qat_alg_cd
*dec_cd
;
108 dma_addr_t enc_cd_paddr
;
109 dma_addr_t dec_cd_paddr
;
110 struct icp_qat_fw_la_bulk_req enc_fw_req
;
111 struct icp_qat_fw_la_bulk_req dec_fw_req
;
112 struct crypto_shash
*hash_tfm
;
113 enum icp_qat_hw_auth_algo qat_hash_alg
;
114 struct qat_crypto_instance
*inst
;
117 struct qat_alg_ablkcipher_ctx
{
118 struct icp_qat_hw_cipher_algo_blk
*enc_cd
;
119 struct icp_qat_hw_cipher_algo_blk
*dec_cd
;
120 dma_addr_t enc_cd_paddr
;
121 dma_addr_t dec_cd_paddr
;
122 struct icp_qat_fw_la_bulk_req enc_fw_req
;
123 struct icp_qat_fw_la_bulk_req dec_fw_req
;
124 struct qat_crypto_instance
*inst
;
125 struct crypto_tfm
*tfm
;
126 spinlock_t lock
; /* protects qat_alg_ablkcipher_ctx struct */
129 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg
)
131 switch (qat_hash_alg
) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
133 return ICP_QAT_HW_SHA1_STATE1_SZ
;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
135 return ICP_QAT_HW_SHA256_STATE1_SZ
;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
137 return ICP_QAT_HW_SHA512_STATE1_SZ
;
144 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk
*hash
,
145 struct qat_alg_aead_ctx
*ctx
,
146 const uint8_t *auth_key
,
147 unsigned int auth_keylen
)
149 SHASH_DESC_ON_STACK(shash
, ctx
->hash_tfm
);
150 struct sha1_state sha1
;
151 struct sha256_state sha256
;
152 struct sha512_state sha512
;
153 int block_size
= crypto_shash_blocksize(ctx
->hash_tfm
);
154 int digest_size
= crypto_shash_digestsize(ctx
->hash_tfm
);
155 char ipad
[block_size
];
156 char opad
[block_size
];
157 __be32
*hash_state_out
;
158 __be64
*hash512_state_out
;
161 memset(ipad
, 0, block_size
);
162 memset(opad
, 0, block_size
);
163 shash
->tfm
= ctx
->hash_tfm
;
166 if (auth_keylen
> block_size
) {
167 int ret
= crypto_shash_digest(shash
, auth_key
,
172 memcpy(opad
, ipad
, digest_size
);
174 memcpy(ipad
, auth_key
, auth_keylen
);
175 memcpy(opad
, auth_key
, auth_keylen
);
178 for (i
= 0; i
< block_size
; i
++) {
179 char *ipad_ptr
= ipad
+ i
;
180 char *opad_ptr
= opad
+ i
;
185 if (crypto_shash_init(shash
))
188 if (crypto_shash_update(shash
, ipad
, block_size
))
191 hash_state_out
= (__be32
*)hash
->sha
.state1
;
192 hash512_state_out
= (__be64
*)hash_state_out
;
194 switch (ctx
->qat_hash_alg
) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
196 if (crypto_shash_export(shash
, &sha1
))
198 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
199 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
201 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
202 if (crypto_shash_export(shash
, &sha256
))
204 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
205 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
207 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
208 if (crypto_shash_export(shash
, &sha512
))
210 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
211 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
217 if (crypto_shash_init(shash
))
220 if (crypto_shash_update(shash
, opad
, block_size
))
223 offset
= round_up(qat_get_inter_state_size(ctx
->qat_hash_alg
), 8);
224 hash_state_out
= (__be32
*)(hash
->sha
.state1
+ offset
);
225 hash512_state_out
= (__be64
*)hash_state_out
;
227 switch (ctx
->qat_hash_alg
) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
229 if (crypto_shash_export(shash
, &sha1
))
231 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
232 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
234 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
235 if (crypto_shash_export(shash
, &sha256
))
237 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
238 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
240 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
241 if (crypto_shash_export(shash
, &sha512
))
243 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
244 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
249 memzero_explicit(ipad
, block_size
);
250 memzero_explicit(opad
, block_size
);
254 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr
*header
)
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET
);
258 header
->service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_LA
;
259 header
->comn_req_flags
=
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR
,
261 QAT_COMN_PTR_TYPE_SGL
);
262 ICP_QAT_FW_LA_PARTIAL_SET(header
->serv_specif_flags
,
263 ICP_QAT_FW_LA_PARTIAL_NONE
);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header
->serv_specif_flags
,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA
);
266 ICP_QAT_FW_LA_PROTO_SET(header
->serv_specif_flags
,
267 ICP_QAT_FW_LA_NO_PROTO
);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header
->serv_specif_flags
,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE
);
272 static int qat_alg_aead_init_enc_session(struct crypto_aead
*aead_tfm
,
274 struct crypto_authenc_keys
*keys
)
276 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(aead_tfm
);
277 unsigned int digestsize
= crypto_aead_authsize(aead_tfm
);
278 struct qat_enc
*enc_ctx
= &ctx
->enc_cd
->qat_enc_cd
;
279 struct icp_qat_hw_cipher_algo_blk
*cipher
= &enc_ctx
->cipher
;
280 struct icp_qat_hw_auth_algo_blk
*hash
=
281 (struct icp_qat_hw_auth_algo_blk
*)((char *)enc_ctx
+
282 sizeof(struct icp_qat_hw_auth_setup
) + keys
->enckeylen
);
283 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->enc_fw_req
;
284 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
285 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
286 void *ptr
= &req_tmpl
->cd_ctrl
;
287 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
288 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
291 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
292 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
293 hash
->sha
.inner_setup
.auth_config
.config
=
294 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
295 ctx
->qat_hash_alg
, digestsize
);
296 hash
->sha
.inner_setup
.auth_counter
.counter
=
297 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
299 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
303 qat_alg_init_common_hdr(header
);
304 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER_HASH
;
305 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
307 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
308 ICP_QAT_FW_LA_RET_AUTH_RES
);
309 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
310 ICP_QAT_FW_LA_NO_CMP_AUTH_RES
);
311 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
312 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
314 /* Cipher CD config setup */
315 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
316 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
317 cipher_cd_ctrl
->cipher_cfg_offset
= 0;
318 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
319 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
320 /* Auth CD config setup */
321 hash_cd_ctrl
->hash_cfg_offset
= ((char *)hash
- (char *)cipher
) >> 3;
322 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
323 hash_cd_ctrl
->inner_res_sz
= digestsize
;
324 hash_cd_ctrl
->final_sz
= digestsize
;
326 switch (ctx
->qat_hash_alg
) {
327 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
328 hash_cd_ctrl
->inner_state1_sz
=
329 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
330 hash_cd_ctrl
->inner_state2_sz
=
331 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
333 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
334 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
335 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
337 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
338 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
339 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
344 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
345 ((sizeof(struct icp_qat_hw_auth_setup
) +
346 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
347 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
348 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
352 static int qat_alg_aead_init_dec_session(struct crypto_aead
*aead_tfm
,
354 struct crypto_authenc_keys
*keys
)
356 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(aead_tfm
);
357 unsigned int digestsize
= crypto_aead_authsize(aead_tfm
);
358 struct qat_dec
*dec_ctx
= &ctx
->dec_cd
->qat_dec_cd
;
359 struct icp_qat_hw_auth_algo_blk
*hash
= &dec_ctx
->hash
;
360 struct icp_qat_hw_cipher_algo_blk
*cipher
=
361 (struct icp_qat_hw_cipher_algo_blk
*)((char *)dec_ctx
+
362 sizeof(struct icp_qat_hw_auth_setup
) +
363 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2);
364 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->dec_fw_req
;
365 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
366 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
367 void *ptr
= &req_tmpl
->cd_ctrl
;
368 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
369 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
370 struct icp_qat_fw_la_auth_req_params
*auth_param
=
371 (struct icp_qat_fw_la_auth_req_params
*)
372 ((char *)&req_tmpl
->serv_specif_rqpars
+
373 sizeof(struct icp_qat_fw_la_cipher_req_params
));
376 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
377 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
378 hash
->sha
.inner_setup
.auth_config
.config
=
379 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
382 hash
->sha
.inner_setup
.auth_counter
.counter
=
383 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
385 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
389 qat_alg_init_common_hdr(header
);
390 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_HASH_CIPHER
;
391 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
392 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
393 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
394 ICP_QAT_FW_LA_NO_RET_AUTH_RES
);
395 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
396 ICP_QAT_FW_LA_CMP_AUTH_RES
);
397 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
398 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
400 /* Cipher CD config setup */
401 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
402 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
403 cipher_cd_ctrl
->cipher_cfg_offset
=
404 (sizeof(struct icp_qat_hw_auth_setup
) +
405 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2) >> 3;
406 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
407 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
409 /* Auth CD config setup */
410 hash_cd_ctrl
->hash_cfg_offset
= 0;
411 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
412 hash_cd_ctrl
->inner_res_sz
= digestsize
;
413 hash_cd_ctrl
->final_sz
= digestsize
;
415 switch (ctx
->qat_hash_alg
) {
416 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
417 hash_cd_ctrl
->inner_state1_sz
=
418 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
419 hash_cd_ctrl
->inner_state2_sz
=
420 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
422 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
423 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
424 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
426 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
427 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
428 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
434 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
435 ((sizeof(struct icp_qat_hw_auth_setup
) +
436 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
437 auth_param
->auth_res_sz
= digestsize
;
438 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
439 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
443 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx
*ctx
,
444 struct icp_qat_fw_la_bulk_req
*req
,
445 struct icp_qat_hw_cipher_algo_blk
*cd
,
446 const uint8_t *key
, unsigned int keylen
)
448 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
449 struct icp_qat_fw_comn_req_hdr
*header
= &req
->comn_hdr
;
450 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cd_ctrl
= (void *)&req
->cd_ctrl
;
452 memcpy(cd
->aes
.key
, key
, keylen
);
453 qat_alg_init_common_hdr(header
);
454 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER
;
455 cd_pars
->u
.s
.content_desc_params_sz
=
456 sizeof(struct icp_qat_hw_cipher_algo_blk
) >> 3;
457 /* Cipher CD config setup */
458 cd_ctrl
->cipher_key_sz
= keylen
>> 3;
459 cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
460 cd_ctrl
->cipher_cfg_offset
= 0;
461 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
462 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
465 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx
*ctx
,
466 int alg
, const uint8_t *key
,
469 struct icp_qat_hw_cipher_algo_blk
*enc_cd
= ctx
->enc_cd
;
470 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->enc_fw_req
;
471 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
473 qat_alg_ablkcipher_init_com(ctx
, req
, enc_cd
, key
, keylen
);
474 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
475 enc_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
478 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx
*ctx
,
479 int alg
, const uint8_t *key
,
482 struct icp_qat_hw_cipher_algo_blk
*dec_cd
= ctx
->dec_cd
;
483 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->dec_fw_req
;
484 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
486 qat_alg_ablkcipher_init_com(ctx
, req
, dec_cd
, key
, keylen
);
487 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
488 dec_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
491 static int qat_alg_validate_key(int key_len
, int *alg
)
494 case AES_KEYSIZE_128
:
495 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
497 case AES_KEYSIZE_192
:
498 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES192
;
500 case AES_KEYSIZE_256
:
501 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
509 static int qat_alg_aead_init_sessions(struct crypto_aead
*tfm
,
510 const uint8_t *key
, unsigned int keylen
)
512 struct crypto_authenc_keys keys
;
515 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
518 if (qat_alg_validate_key(keys
.enckeylen
, &alg
))
521 if (qat_alg_aead_init_enc_session(tfm
, alg
, &keys
))
524 if (qat_alg_aead_init_dec_session(tfm
, alg
, &keys
))
529 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
535 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx
*ctx
,
541 if (qat_alg_validate_key(keylen
, &alg
))
544 qat_alg_ablkcipher_init_enc(ctx
, alg
, key
, keylen
);
545 qat_alg_ablkcipher_init_dec(ctx
, alg
, key
, keylen
);
548 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
552 static int qat_alg_aead_setkey(struct crypto_aead
*tfm
, const uint8_t *key
,
555 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
560 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
561 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
562 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
563 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
564 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
567 int node
= get_current_node();
568 struct qat_crypto_instance
*inst
=
569 qat_crypto_get_instance_node(node
);
574 dev
= &GET_DEV(inst
->accel_dev
);
576 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
582 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
589 if (qat_alg_aead_init_sessions(tfm
, key
, keylen
))
595 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
596 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
597 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
600 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
601 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
602 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
607 static void qat_alg_free_bufl(struct qat_crypto_instance
*inst
,
608 struct qat_crypto_request
*qat_req
)
610 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
611 struct qat_alg_buf_list
*bl
= qat_req
->buf
.bl
;
612 struct qat_alg_buf_list
*blout
= qat_req
->buf
.blout
;
613 dma_addr_t blp
= qat_req
->buf
.blp
;
614 dma_addr_t blpout
= qat_req
->buf
.bloutp
;
615 size_t sz
= qat_req
->buf
.sz
;
616 size_t sz_out
= qat_req
->buf
.sz_out
;
619 for (i
= 0; i
< bl
->num_bufs
; i
++)
620 dma_unmap_single(dev
, bl
->bufers
[i
].addr
,
621 bl
->bufers
[i
].len
, DMA_BIDIRECTIONAL
);
623 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
626 /* If out of place operation dma unmap only data */
627 int bufless
= blout
->num_bufs
- blout
->num_mapped_bufs
;
629 for (i
= bufless
; i
< blout
->num_bufs
; i
++) {
630 dma_unmap_single(dev
, blout
->bufers
[i
].addr
,
631 blout
->bufers
[i
].len
,
634 dma_unmap_single(dev
, blpout
, sz_out
, DMA_TO_DEVICE
);
639 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance
*inst
,
640 struct scatterlist
*sgl
,
641 struct scatterlist
*sglout
,
642 struct qat_crypto_request
*qat_req
)
644 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
646 int n
= sg_nents(sgl
);
647 struct qat_alg_buf_list
*bufl
;
648 struct qat_alg_buf_list
*buflout
= NULL
;
650 dma_addr_t bloutp
= 0;
651 struct scatterlist
*sg
;
652 size_t sz_out
, sz
= sizeof(struct qat_alg_buf_list
) +
653 ((1 + n
) * sizeof(struct qat_alg_buf
));
658 bufl
= kzalloc_node(sz
, GFP_ATOMIC
,
659 dev_to_node(&GET_DEV(inst
->accel_dev
)));
663 blp
= dma_map_single(dev
, bufl
, sz
, DMA_TO_DEVICE
);
664 if (unlikely(dma_mapping_error(dev
, blp
)))
667 for_each_sg(sgl
, sg
, n
, i
) {
673 bufl
->bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
676 bufl
->bufers
[y
].len
= sg
->length
;
677 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[y
].addr
)))
681 bufl
->num_bufs
= sg_nctr
;
682 qat_req
->buf
.bl
= bufl
;
683 qat_req
->buf
.blp
= blp
;
684 qat_req
->buf
.sz
= sz
;
685 /* Handle out of place operation */
687 struct qat_alg_buf
*bufers
;
689 n
= sg_nents(sglout
);
690 sz_out
= sizeof(struct qat_alg_buf_list
) +
691 ((1 + n
) * sizeof(struct qat_alg_buf
));
693 buflout
= kzalloc_node(sz_out
, GFP_ATOMIC
,
694 dev_to_node(&GET_DEV(inst
->accel_dev
)));
695 if (unlikely(!buflout
))
697 bloutp
= dma_map_single(dev
, buflout
, sz_out
, DMA_TO_DEVICE
);
698 if (unlikely(dma_mapping_error(dev
, bloutp
)))
700 bufers
= buflout
->bufers
;
701 for_each_sg(sglout
, sg
, n
, i
) {
707 bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
710 if (unlikely(dma_mapping_error(dev
, bufers
[y
].addr
)))
712 bufers
[y
].len
= sg
->length
;
715 buflout
->num_bufs
= sg_nctr
;
716 buflout
->num_mapped_bufs
= sg_nctr
;
717 qat_req
->buf
.blout
= buflout
;
718 qat_req
->buf
.bloutp
= bloutp
;
719 qat_req
->buf
.sz_out
= sz_out
;
721 /* Otherwise set the src and dst to the same address */
722 qat_req
->buf
.bloutp
= qat_req
->buf
.blp
;
723 qat_req
->buf
.sz_out
= 0;
727 dev_err(dev
, "Failed to map buf for dma\n");
729 for (i
= 0; i
< n
; i
++)
730 if (!dma_mapping_error(dev
, bufl
->bufers
[i
].addr
))
731 dma_unmap_single(dev
, bufl
->bufers
[i
].addr
,
735 if (!dma_mapping_error(dev
, blp
))
736 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
738 if (sgl
!= sglout
&& buflout
) {
739 n
= sg_nents(sglout
);
740 for (i
= 0; i
< n
; i
++)
741 if (!dma_mapping_error(dev
, buflout
->bufers
[i
].addr
))
742 dma_unmap_single(dev
, buflout
->bufers
[i
].addr
,
743 buflout
->bufers
[i
].len
,
745 if (!dma_mapping_error(dev
, bloutp
))
746 dma_unmap_single(dev
, bloutp
, sz_out
, DMA_TO_DEVICE
);
752 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
753 struct qat_crypto_request
*qat_req
)
755 struct qat_alg_aead_ctx
*ctx
= qat_req
->aead_ctx
;
756 struct qat_crypto_instance
*inst
= ctx
->inst
;
757 struct aead_request
*areq
= qat_req
->aead_req
;
758 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
759 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
761 qat_alg_free_bufl(inst
, qat_req
);
762 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
764 areq
->base
.complete(&areq
->base
, res
);
767 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
768 struct qat_crypto_request
*qat_req
)
770 struct qat_alg_ablkcipher_ctx
*ctx
= qat_req
->ablkcipher_ctx
;
771 struct qat_crypto_instance
*inst
= ctx
->inst
;
772 struct ablkcipher_request
*areq
= qat_req
->ablkcipher_req
;
773 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
774 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
776 qat_alg_free_bufl(inst
, qat_req
);
777 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
779 areq
->base
.complete(&areq
->base
, res
);
782 void qat_alg_callback(void *resp
)
784 struct icp_qat_fw_la_resp
*qat_resp
= resp
;
785 struct qat_crypto_request
*qat_req
=
786 (void *)(__force
long)qat_resp
->opaque_data
;
788 qat_req
->cb(qat_resp
, qat_req
);
791 static int qat_alg_aead_dec(struct aead_request
*areq
)
793 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
794 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
795 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
796 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
797 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
798 struct icp_qat_fw_la_auth_req_params
*auth_param
;
799 struct icp_qat_fw_la_bulk_req
*msg
;
800 int digst_size
= crypto_aead_authsize(aead_tfm
);
803 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->src
, areq
->dst
, qat_req
);
808 *msg
= ctx
->dec_fw_req
;
809 qat_req
->aead_ctx
= ctx
;
810 qat_req
->aead_req
= areq
;
811 qat_req
->cb
= qat_aead_alg_callback
;
812 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
813 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
814 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
815 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
816 cipher_param
->cipher_length
= areq
->cryptlen
- digst_size
;
817 cipher_param
->cipher_offset
= areq
->assoclen
;
818 memcpy(cipher_param
->u
.cipher_IV_array
, areq
->iv
, AES_BLOCK_SIZE
);
819 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
820 auth_param
->auth_off
= 0;
821 auth_param
->auth_len
= areq
->assoclen
+ cipher_param
->cipher_length
;
823 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
824 } while (ret
== -EAGAIN
&& ctr
++ < 10);
826 if (ret
== -EAGAIN
) {
827 qat_alg_free_bufl(ctx
->inst
, qat_req
);
833 static int qat_alg_aead_enc(struct aead_request
*areq
)
835 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
836 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
837 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
838 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
839 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
840 struct icp_qat_fw_la_auth_req_params
*auth_param
;
841 struct icp_qat_fw_la_bulk_req
*msg
;
842 uint8_t *iv
= areq
->iv
;
845 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->src
, areq
->dst
, qat_req
);
850 *msg
= ctx
->enc_fw_req
;
851 qat_req
->aead_ctx
= ctx
;
852 qat_req
->aead_req
= areq
;
853 qat_req
->cb
= qat_aead_alg_callback
;
854 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
855 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
856 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
857 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
858 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
860 memcpy(cipher_param
->u
.cipher_IV_array
, iv
, AES_BLOCK_SIZE
);
861 cipher_param
->cipher_length
= areq
->cryptlen
;
862 cipher_param
->cipher_offset
= areq
->assoclen
;
864 auth_param
->auth_off
= 0;
865 auth_param
->auth_len
= areq
->assoclen
+ areq
->cryptlen
;
868 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
869 } while (ret
== -EAGAIN
&& ctr
++ < 10);
871 if (ret
== -EAGAIN
) {
872 qat_alg_free_bufl(ctx
->inst
, qat_req
);
878 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
882 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
885 spin_lock(&ctx
->lock
);
888 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
889 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
890 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
891 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
892 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
895 int node
= get_current_node();
896 struct qat_crypto_instance
*inst
=
897 qat_crypto_get_instance_node(node
);
899 spin_unlock(&ctx
->lock
);
903 dev
= &GET_DEV(inst
->accel_dev
);
905 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
909 spin_unlock(&ctx
->lock
);
912 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
916 spin_unlock(&ctx
->lock
);
920 spin_unlock(&ctx
->lock
);
921 if (qat_alg_ablkcipher_init_sessions(ctx
, key
, keylen
))
927 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->enc_cd
));
928 dma_free_coherent(dev
, sizeof(*ctx
->enc_cd
),
929 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
932 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->dec_cd
));
933 dma_free_coherent(dev
, sizeof(*ctx
->dec_cd
),
934 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
939 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request
*req
)
941 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
942 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
943 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
944 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
945 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
946 struct icp_qat_fw_la_bulk_req
*msg
;
949 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, req
->src
, req
->dst
, qat_req
);
954 *msg
= ctx
->enc_fw_req
;
955 qat_req
->ablkcipher_ctx
= ctx
;
956 qat_req
->ablkcipher_req
= req
;
957 qat_req
->cb
= qat_ablkcipher_alg_callback
;
958 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
959 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
960 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
961 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
962 cipher_param
->cipher_length
= req
->nbytes
;
963 cipher_param
->cipher_offset
= 0;
964 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
966 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
967 } while (ret
== -EAGAIN
&& ctr
++ < 10);
969 if (ret
== -EAGAIN
) {
970 qat_alg_free_bufl(ctx
->inst
, qat_req
);
976 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request
*req
)
978 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
979 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
980 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
981 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
982 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
983 struct icp_qat_fw_la_bulk_req
*msg
;
986 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, req
->src
, req
->dst
, qat_req
);
991 *msg
= ctx
->dec_fw_req
;
992 qat_req
->ablkcipher_ctx
= ctx
;
993 qat_req
->ablkcipher_req
= req
;
994 qat_req
->cb
= qat_ablkcipher_alg_callback
;
995 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
996 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
997 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
998 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
999 cipher_param
->cipher_length
= req
->nbytes
;
1000 cipher_param
->cipher_offset
= 0;
1001 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
1003 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1004 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1006 if (ret
== -EAGAIN
) {
1007 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1010 return -EINPROGRESS
;
1013 static int qat_alg_aead_init(struct crypto_aead
*tfm
,
1014 enum icp_qat_hw_auth_algo hash
,
1015 const char *hash_name
)
1017 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1019 ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1020 if (IS_ERR(ctx
->hash_tfm
))
1021 return PTR_ERR(ctx
->hash_tfm
);
1022 ctx
->qat_hash_alg
= hash
;
1023 crypto_aead_set_reqsize(tfm
, sizeof(struct aead_request
) +
1024 sizeof(struct qat_crypto_request
));
1028 static int qat_alg_aead_sha1_init(struct crypto_aead
*tfm
)
1030 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA1
, "sha1");
1033 static int qat_alg_aead_sha256_init(struct crypto_aead
*tfm
)
1035 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA256
, "sha256");
1038 static int qat_alg_aead_sha512_init(struct crypto_aead
*tfm
)
1040 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA512
, "sha512");
1043 static void qat_alg_aead_exit(struct crypto_aead
*tfm
)
1045 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1046 struct qat_crypto_instance
*inst
= ctx
->inst
;
1049 crypto_free_shash(ctx
->hash_tfm
);
1054 dev
= &GET_DEV(inst
->accel_dev
);
1056 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
1057 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1058 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1061 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
1062 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1063 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1065 qat_crypto_put_instance(inst
);
1068 static int qat_alg_ablkcipher_init(struct crypto_tfm
*tfm
)
1070 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1072 spin_lock_init(&ctx
->lock
);
1073 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
1074 sizeof(struct qat_crypto_request
);
1079 static void qat_alg_ablkcipher_exit(struct crypto_tfm
*tfm
)
1081 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1082 struct qat_crypto_instance
*inst
= ctx
->inst
;
1088 dev
= &GET_DEV(inst
->accel_dev
);
1090 memset(ctx
->enc_cd
, 0,
1091 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1092 dma_free_coherent(dev
,
1093 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1094 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1097 memset(ctx
->dec_cd
, 0,
1098 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1099 dma_free_coherent(dev
,
1100 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1101 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1103 qat_crypto_put_instance(inst
);
1107 static struct aead_alg qat_aeads
[] = { {
1109 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1110 .cra_driver_name
= "qat_aes_cbc_hmac_sha1",
1111 .cra_priority
= 4001,
1112 .cra_flags
= CRYPTO_ALG_ASYNC
,
1113 .cra_blocksize
= AES_BLOCK_SIZE
,
1114 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1115 .cra_module
= THIS_MODULE
,
1117 .init
= qat_alg_aead_sha1_init
,
1118 .exit
= qat_alg_aead_exit
,
1119 .setkey
= qat_alg_aead_setkey
,
1120 .decrypt
= qat_alg_aead_dec
,
1121 .encrypt
= qat_alg_aead_enc
,
1122 .ivsize
= AES_BLOCK_SIZE
,
1123 .maxauthsize
= SHA1_DIGEST_SIZE
,
1126 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1127 .cra_driver_name
= "qat_aes_cbc_hmac_sha256",
1128 .cra_priority
= 4001,
1129 .cra_flags
= CRYPTO_ALG_ASYNC
,
1130 .cra_blocksize
= AES_BLOCK_SIZE
,
1131 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1132 .cra_module
= THIS_MODULE
,
1134 .init
= qat_alg_aead_sha256_init
,
1135 .exit
= qat_alg_aead_exit
,
1136 .setkey
= qat_alg_aead_setkey
,
1137 .decrypt
= qat_alg_aead_dec
,
1138 .encrypt
= qat_alg_aead_enc
,
1139 .ivsize
= AES_BLOCK_SIZE
,
1140 .maxauthsize
= SHA256_DIGEST_SIZE
,
1143 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1144 .cra_driver_name
= "qat_aes_cbc_hmac_sha512",
1145 .cra_priority
= 4001,
1146 .cra_flags
= CRYPTO_ALG_ASYNC
,
1147 .cra_blocksize
= AES_BLOCK_SIZE
,
1148 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1149 .cra_module
= THIS_MODULE
,
1151 .init
= qat_alg_aead_sha512_init
,
1152 .exit
= qat_alg_aead_exit
,
1153 .setkey
= qat_alg_aead_setkey
,
1154 .decrypt
= qat_alg_aead_dec
,
1155 .encrypt
= qat_alg_aead_enc
,
1156 .ivsize
= AES_BLOCK_SIZE
,
1157 .maxauthsize
= SHA512_DIGEST_SIZE
,
1160 static struct crypto_alg qat_algs
[] = { {
1161 .cra_name
= "cbc(aes)",
1162 .cra_driver_name
= "qat_aes_cbc",
1163 .cra_priority
= 4001,
1164 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1165 .cra_blocksize
= AES_BLOCK_SIZE
,
1166 .cra_ctxsize
= sizeof(struct qat_alg_ablkcipher_ctx
),
1168 .cra_type
= &crypto_ablkcipher_type
,
1169 .cra_module
= THIS_MODULE
,
1170 .cra_init
= qat_alg_ablkcipher_init
,
1171 .cra_exit
= qat_alg_ablkcipher_exit
,
1174 .setkey
= qat_alg_ablkcipher_setkey
,
1175 .decrypt
= qat_alg_ablkcipher_decrypt
,
1176 .encrypt
= qat_alg_ablkcipher_encrypt
,
1177 .min_keysize
= AES_MIN_KEY_SIZE
,
1178 .max_keysize
= AES_MAX_KEY_SIZE
,
1179 .ivsize
= AES_BLOCK_SIZE
,
1184 int qat_algs_register(void)
1188 mutex_lock(&algs_lock
);
1189 if (++active_devs
!= 1)
1192 for (i
= 0; i
< ARRAY_SIZE(qat_algs
); i
++)
1193 qat_algs
[i
].cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
;
1195 ret
= crypto_register_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1199 for (i
= 0; i
< ARRAY_SIZE(qat_aeads
); i
++)
1200 qat_aeads
[i
].base
.cra_flags
= CRYPTO_ALG_ASYNC
;
1202 ret
= crypto_register_aeads(qat_aeads
, ARRAY_SIZE(qat_aeads
));
1207 mutex_unlock(&algs_lock
);
1211 crypto_unregister_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1215 int qat_algs_unregister(void)
1217 mutex_lock(&algs_lock
);
1218 if (--active_devs
!= 0)
1221 crypto_unregister_aeads(qat_aeads
, ARRAY_SIZE(qat_aeads
));
1222 crypto_unregister_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1225 mutex_unlock(&algs_lock
);
1229 int qat_algs_init(void)
1234 void qat_algs_exit(void)