]>
Commit | Line | Data |
---|---|---|
d370cec3 TS |
1 | /* |
2 | This file is provided under a dual BSD/GPLv2 license. When using or | |
3 | redistributing this file, you may do so under either license. | |
4 | ||
5 | GPL LICENSE SUMMARY | |
6 | Copyright(c) 2014 Intel Corporation. | |
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of version 2 of the GNU General Public License as | |
9 | published by the Free Software Foundation. | |
10 | ||
11 | This program is distributed in the hope that it will be useful, but | |
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | General Public License for more details. | |
15 | ||
16 | Contact Information: | |
17 | qat-linux@intel.com | |
18 | ||
19 | BSD LICENSE | |
20 | Copyright(c) 2014 Intel Corporation. | |
21 | Redistribution and use in source and binary forms, with or without | |
22 | modification, are permitted provided that the following conditions | |
23 | are met: | |
24 | ||
25 | * Redistributions of source code must retain the above copyright | |
26 | notice, this list of conditions and the following disclaimer. | |
27 | * Redistributions in binary form must reproduce the above copyright | |
28 | notice, this list of conditions and the following disclaimer in | |
29 | the documentation and/or other materials provided with the | |
30 | distribution. | |
31 | * Neither the name of Intel Corporation nor the names of its | |
32 | contributors may be used to endorse or promote products derived | |
33 | from this software without specific prior written permission. | |
34 | ||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
46 | */ | |
47 | #include <linux/module.h> | |
48 | #include <linux/slab.h> | |
49 | #include <linux/crypto.h> | |
50 | #include <crypto/aead.h> | |
51 | #include <crypto/aes.h> | |
52 | #include <crypto/sha.h> | |
53 | #include <crypto/hash.h> | |
54 | #include <crypto/algapi.h> | |
55 | #include <crypto/authenc.h> | |
56 | #include <crypto/rng.h> | |
57 | #include <linux/dma-mapping.h> | |
58 | #include "adf_accel_devices.h" | |
59 | #include "adf_transport.h" | |
60 | #include "adf_common_drv.h" | |
61 | #include "qat_crypto.h" | |
62 | #include "icp_qat_hw.h" | |
63 | #include "icp_qat_fw.h" | |
64 | #include "icp_qat_fw_la.h" | |
65 | ||
66 | #define QAT_AES_HW_CONFIG_ENC(alg) \ | |
67 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | |
68 | ICP_QAT_HW_CIPHER_NO_CONVERT, \ | |
69 | ICP_QAT_HW_CIPHER_ENCRYPT) | |
70 | ||
71 | #define QAT_AES_HW_CONFIG_DEC(alg) \ | |
72 | ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ | |
73 | ICP_QAT_HW_CIPHER_KEY_CONVERT, \ | |
74 | ICP_QAT_HW_CIPHER_DECRYPT) | |
75 | ||
76 | static atomic_t active_dev; | |
77 | ||
78 | struct qat_alg_buf { | |
79 | uint32_t len; | |
80 | uint32_t resrvd; | |
81 | uint64_t addr; | |
82 | } __packed; | |
83 | ||
84 | struct qat_alg_buf_list { | |
85 | uint64_t resrvd; | |
86 | uint32_t num_bufs; | |
87 | uint32_t num_mapped_bufs; | |
88 | struct qat_alg_buf bufers[]; | |
89 | } __packed __aligned(64); | |
90 | ||
91 | /* Common content descriptor */ | |
92 | struct qat_alg_cd { | |
93 | union { | |
94 | struct qat_enc { /* Encrypt content desc */ | |
95 | struct icp_qat_hw_cipher_algo_blk cipher; | |
96 | struct icp_qat_hw_auth_algo_blk hash; | |
97 | } qat_enc_cd; | |
98 | struct qat_dec { /* Decrytp content desc */ | |
99 | struct icp_qat_hw_auth_algo_blk hash; | |
100 | struct icp_qat_hw_cipher_algo_blk cipher; | |
101 | } qat_dec_cd; | |
102 | }; | |
103 | } __aligned(64); | |
104 | ||
105 | #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk) | |
106 | ||
107 | struct qat_auth_state { | |
26c3af6c | 108 | uint8_t data[MAX_AUTH_STATE_SIZE + 64]; |
d370cec3 TS |
109 | } __aligned(64); |
110 | ||
111 | struct qat_alg_session_ctx { | |
112 | struct qat_alg_cd *enc_cd; | |
113 | dma_addr_t enc_cd_paddr; | |
114 | struct qat_alg_cd *dec_cd; | |
115 | dma_addr_t dec_cd_paddr; | |
d370cec3 TS |
116 | struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl; |
117 | struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl; | |
118 | struct qat_crypto_instance *inst; | |
119 | struct crypto_tfm *tfm; | |
120 | struct crypto_shash *hash_tfm; | |
121 | enum icp_qat_hw_auth_algo qat_hash_alg; | |
122 | uint8_t salt[AES_BLOCK_SIZE]; | |
123 | spinlock_t lock; /* protects qat_alg_session_ctx struct */ | |
124 | }; | |
125 | ||
126 | static int get_current_node(void) | |
127 | { | |
128 | return cpu_data(current_thread_info()->cpu).phys_proc_id; | |
129 | } | |
130 | ||
131 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) | |
132 | { | |
133 | switch (qat_hash_alg) { | |
134 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
135 | return ICP_QAT_HW_SHA1_STATE1_SZ; | |
136 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
137 | return ICP_QAT_HW_SHA256_STATE1_SZ; | |
138 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
139 | return ICP_QAT_HW_SHA512_STATE1_SZ; | |
140 | default: | |
141 | return -EFAULT; | |
142 | }; | |
143 | return -EFAULT; | |
144 | } | |
145 | ||
146 | static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | |
147 | struct qat_alg_session_ctx *ctx, | |
148 | const uint8_t *auth_key, | |
26c3af6c | 149 | unsigned int auth_keylen) |
d370cec3 | 150 | { |
26c3af6c | 151 | struct qat_auth_state auth_state; |
37e52654 | 152 | SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); |
d370cec3 TS |
153 | struct sha1_state sha1; |
154 | struct sha256_state sha256; | |
155 | struct sha512_state sha512; | |
156 | int block_size = crypto_shash_blocksize(ctx->hash_tfm); | |
157 | int digest_size = crypto_shash_digestsize(ctx->hash_tfm); | |
26c3af6c | 158 | uint8_t *ipad = auth_state.data; |
d370cec3 TS |
159 | uint8_t *opad = ipad + block_size; |
160 | __be32 *hash_state_out; | |
161 | __be64 *hash512_state_out; | |
162 | int i, offset; | |
163 | ||
26c3af6c | 164 | memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64); |
37e52654 BW |
165 | shash->tfm = ctx->hash_tfm; |
166 | shash->flags = 0x0; | |
d370cec3 TS |
167 | |
168 | if (auth_keylen > block_size) { | |
169 | char buff[SHA512_BLOCK_SIZE]; | |
37e52654 | 170 | int ret = crypto_shash_digest(shash, auth_key, |
d370cec3 TS |
171 | auth_keylen, buff); |
172 | if (ret) | |
173 | return ret; | |
174 | ||
175 | memcpy(ipad, buff, digest_size); | |
176 | memcpy(opad, buff, digest_size); | |
177 | memset(ipad + digest_size, 0, block_size - digest_size); | |
178 | memset(opad + digest_size, 0, block_size - digest_size); | |
179 | } else { | |
180 | memcpy(ipad, auth_key, auth_keylen); | |
181 | memcpy(opad, auth_key, auth_keylen); | |
182 | memset(ipad + auth_keylen, 0, block_size - auth_keylen); | |
183 | memset(opad + auth_keylen, 0, block_size - auth_keylen); | |
184 | } | |
185 | ||
186 | for (i = 0; i < block_size; i++) { | |
187 | char *ipad_ptr = ipad + i; | |
188 | char *opad_ptr = opad + i; | |
189 | *ipad_ptr ^= 0x36; | |
190 | *opad_ptr ^= 0x5C; | |
191 | } | |
192 | ||
37e52654 | 193 | if (crypto_shash_init(shash)) |
d370cec3 TS |
194 | return -EFAULT; |
195 | ||
37e52654 | 196 | if (crypto_shash_update(shash, ipad, block_size)) |
d370cec3 TS |
197 | return -EFAULT; |
198 | ||
199 | hash_state_out = (__be32 *)hash->sha.state1; | |
200 | hash512_state_out = (__be64 *)hash_state_out; | |
201 | ||
202 | switch (ctx->qat_hash_alg) { | |
203 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
37e52654 | 204 | if (crypto_shash_export(shash, &sha1)) |
d370cec3 TS |
205 | return -EFAULT; |
206 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
207 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | |
208 | break; | |
209 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
37e52654 | 210 | if (crypto_shash_export(shash, &sha256)) |
d370cec3 TS |
211 | return -EFAULT; |
212 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
213 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | |
214 | break; | |
215 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
37e52654 | 216 | if (crypto_shash_export(shash, &sha512)) |
d370cec3 TS |
217 | return -EFAULT; |
218 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | |
219 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | |
220 | break; | |
221 | default: | |
222 | return -EFAULT; | |
223 | } | |
224 | ||
37e52654 | 225 | if (crypto_shash_init(shash)) |
d370cec3 TS |
226 | return -EFAULT; |
227 | ||
37e52654 | 228 | if (crypto_shash_update(shash, opad, block_size)) |
d370cec3 TS |
229 | return -EFAULT; |
230 | ||
231 | offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); | |
232 | hash_state_out = (__be32 *)(hash->sha.state1 + offset); | |
233 | hash512_state_out = (__be64 *)hash_state_out; | |
234 | ||
235 | switch (ctx->qat_hash_alg) { | |
236 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
37e52654 | 237 | if (crypto_shash_export(shash, &sha1)) |
d370cec3 TS |
238 | return -EFAULT; |
239 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
240 | *hash_state_out = cpu_to_be32(*(sha1.state + i)); | |
241 | break; | |
242 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
37e52654 | 243 | if (crypto_shash_export(shash, &sha256)) |
d370cec3 TS |
244 | return -EFAULT; |
245 | for (i = 0; i < digest_size >> 2; i++, hash_state_out++) | |
246 | *hash_state_out = cpu_to_be32(*(sha256.state + i)); | |
247 | break; | |
248 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
37e52654 | 249 | if (crypto_shash_export(shash, &sha512)) |
d370cec3 TS |
250 | return -EFAULT; |
251 | for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) | |
252 | *hash512_state_out = cpu_to_be64(*(sha512.state + i)); | |
253 | break; | |
254 | default: | |
255 | return -EFAULT; | |
256 | } | |
257 | return 0; | |
258 | } | |
259 | ||
260 | static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | |
261 | { | |
262 | header->hdr_flags = | |
263 | ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); | |
264 | header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; | |
265 | header->comn_req_flags = | |
266 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, | |
267 | QAT_COMN_PTR_TYPE_SGL); | |
268 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | |
269 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | |
270 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, | |
271 | ICP_QAT_FW_LA_PARTIAL_NONE); | |
272 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | |
273 | ICP_QAT_FW_CIPH_IV_16BYTE_DATA); | |
274 | ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, | |
275 | ICP_QAT_FW_LA_NO_PROTO); | |
276 | ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, | |
277 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | |
278 | } | |
279 | ||
280 | static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx, | |
281 | int alg, struct crypto_authenc_keys *keys) | |
282 | { | |
283 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | |
284 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | |
285 | struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; | |
286 | struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher; | |
287 | struct icp_qat_hw_auth_algo_blk *hash = | |
288 | (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx + | |
289 | sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen); | |
290 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl; | |
291 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | |
292 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | |
293 | void *ptr = &req_tmpl->cd_ctrl; | |
294 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | |
295 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | |
d370cec3 TS |
296 | |
297 | /* CD setup */ | |
298 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg); | |
299 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | |
300 | hash->sha.inner_setup.auth_config.config = | |
301 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | |
302 | ctx->qat_hash_alg, digestsize); | |
303 | hash->sha.inner_setup.auth_counter.counter = | |
304 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | |
305 | ||
26c3af6c | 306 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen)) |
d370cec3 TS |
307 | return -EFAULT; |
308 | ||
309 | /* Request setup */ | |
310 | qat_alg_init_common_hdr(header); | |
311 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; | |
312 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | |
313 | ICP_QAT_FW_LA_RET_AUTH_RES); | |
314 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | |
315 | ICP_QAT_FW_LA_NO_CMP_AUTH_RES); | |
316 | cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; | |
317 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | |
318 | ||
319 | /* Cipher CD config setup */ | |
320 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | |
321 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | |
322 | cipher_cd_ctrl->cipher_cfg_offset = 0; | |
323 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | |
324 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | |
325 | /* Auth CD config setup */ | |
326 | hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; | |
327 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | |
328 | hash_cd_ctrl->inner_res_sz = digestsize; | |
329 | hash_cd_ctrl->final_sz = digestsize; | |
330 | ||
331 | switch (ctx->qat_hash_alg) { | |
332 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
333 | hash_cd_ctrl->inner_state1_sz = | |
334 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | |
335 | hash_cd_ctrl->inner_state2_sz = | |
336 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | |
337 | break; | |
338 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
339 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | |
340 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | |
341 | break; | |
342 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
343 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | |
344 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | |
345 | break; | |
346 | default: | |
347 | break; | |
348 | } | |
349 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | |
350 | ((sizeof(struct icp_qat_hw_auth_setup) + | |
351 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | |
d370cec3 TS |
352 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); |
353 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | |
354 | return 0; | |
355 | } | |
356 | ||
357 | static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, | |
358 | int alg, struct crypto_authenc_keys *keys) | |
359 | { | |
360 | struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); | |
361 | unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; | |
362 | struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; | |
363 | struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; | |
364 | struct icp_qat_hw_cipher_algo_blk *cipher = | |
365 | (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + | |
366 | sizeof(struct icp_qat_hw_auth_setup) + | |
367 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); | |
368 | struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; | |
369 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; | |
370 | struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; | |
371 | void *ptr = &req_tmpl->cd_ctrl; | |
372 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; | |
373 | struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; | |
374 | struct icp_qat_fw_la_auth_req_params *auth_param = | |
375 | (struct icp_qat_fw_la_auth_req_params *) | |
376 | ((char *)&req_tmpl->serv_specif_rqpars + | |
377 | sizeof(struct icp_qat_fw_la_cipher_req_params)); | |
378 | ||
379 | /* CD setup */ | |
380 | cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); | |
381 | memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); | |
382 | hash->sha.inner_setup.auth_config.config = | |
383 | ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, | |
384 | ctx->qat_hash_alg, | |
385 | digestsize); | |
386 | hash->sha.inner_setup.auth_counter.counter = | |
387 | cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); | |
388 | ||
26c3af6c | 389 | if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen)) |
d370cec3 TS |
390 | return -EFAULT; |
391 | ||
392 | /* Request setup */ | |
393 | qat_alg_init_common_hdr(header); | |
394 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; | |
395 | ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, | |
396 | ICP_QAT_FW_LA_NO_RET_AUTH_RES); | |
397 | ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, | |
398 | ICP_QAT_FW_LA_CMP_AUTH_RES); | |
399 | cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; | |
400 | cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; | |
401 | ||
402 | /* Cipher CD config setup */ | |
403 | cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; | |
404 | cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; | |
405 | cipher_cd_ctrl->cipher_cfg_offset = | |
406 | (sizeof(struct icp_qat_hw_auth_setup) + | |
407 | roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; | |
408 | ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | |
409 | ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); | |
410 | ||
411 | /* Auth CD config setup */ | |
412 | hash_cd_ctrl->hash_cfg_offset = 0; | |
413 | hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; | |
414 | hash_cd_ctrl->inner_res_sz = digestsize; | |
415 | hash_cd_ctrl->final_sz = digestsize; | |
416 | ||
417 | switch (ctx->qat_hash_alg) { | |
418 | case ICP_QAT_HW_AUTH_ALGO_SHA1: | |
419 | hash_cd_ctrl->inner_state1_sz = | |
420 | round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); | |
421 | hash_cd_ctrl->inner_state2_sz = | |
422 | round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); | |
423 | break; | |
424 | case ICP_QAT_HW_AUTH_ALGO_SHA256: | |
425 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; | |
426 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; | |
427 | break; | |
428 | case ICP_QAT_HW_AUTH_ALGO_SHA512: | |
429 | hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; | |
430 | hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; | |
431 | break; | |
432 | default: | |
433 | break; | |
434 | } | |
435 | ||
436 | hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + | |
437 | ((sizeof(struct icp_qat_hw_auth_setup) + | |
438 | round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); | |
d370cec3 TS |
439 | auth_param->auth_res_sz = digestsize; |
440 | ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); | |
441 | ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); | |
442 | return 0; | |
443 | } | |
444 | ||
445 | static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx, | |
446 | const uint8_t *key, unsigned int keylen) | |
447 | { | |
448 | struct crypto_authenc_keys keys; | |
449 | int alg; | |
450 | ||
451 | if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) | |
452 | return -EFAULT; | |
453 | ||
454 | if (crypto_authenc_extractkeys(&keys, key, keylen)) | |
455 | goto bad_key; | |
456 | ||
457 | switch (keys.enckeylen) { | |
458 | case AES_KEYSIZE_128: | |
459 | alg = ICP_QAT_HW_CIPHER_ALGO_AES128; | |
460 | break; | |
461 | case AES_KEYSIZE_192: | |
462 | alg = ICP_QAT_HW_CIPHER_ALGO_AES192; | |
463 | break; | |
464 | case AES_KEYSIZE_256: | |
465 | alg = ICP_QAT_HW_CIPHER_ALGO_AES256; | |
466 | break; | |
467 | default: | |
468 | goto bad_key; | |
469 | break; | |
470 | } | |
471 | ||
472 | if (qat_alg_init_enc_session(ctx, alg, &keys)) | |
473 | goto error; | |
474 | ||
475 | if (qat_alg_init_dec_session(ctx, alg, &keys)) | |
476 | goto error; | |
477 | ||
478 | return 0; | |
479 | bad_key: | |
480 | crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
481 | return -EINVAL; | |
482 | error: | |
483 | return -EFAULT; | |
484 | } | |
485 | ||
486 | static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key, | |
487 | unsigned int keylen) | |
488 | { | |
489 | struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm); | |
490 | struct device *dev; | |
491 | ||
492 | spin_lock(&ctx->lock); | |
493 | if (ctx->enc_cd) { | |
494 | /* rekeying */ | |
495 | dev = &GET_DEV(ctx->inst->accel_dev); | |
496 | memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); | |
497 | memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); | |
d370cec3 TS |
498 | memset(&ctx->enc_fw_req_tmpl, 0, |
499 | sizeof(struct icp_qat_fw_la_bulk_req)); | |
500 | memset(&ctx->dec_fw_req_tmpl, 0, | |
501 | sizeof(struct icp_qat_fw_la_bulk_req)); | |
502 | } else { | |
503 | /* new key */ | |
504 | int node = get_current_node(); | |
505 | struct qat_crypto_instance *inst = | |
506 | qat_crypto_get_instance_node(node); | |
507 | if (!inst) { | |
508 | spin_unlock(&ctx->lock); | |
509 | return -EINVAL; | |
510 | } | |
511 | ||
512 | dev = &GET_DEV(inst->accel_dev); | |
513 | ctx->inst = inst; | |
514 | ctx->enc_cd = dma_zalloc_coherent(dev, | |
515 | sizeof(struct qat_alg_cd), | |
516 | &ctx->enc_cd_paddr, | |
517 | GFP_ATOMIC); | |
518 | if (!ctx->enc_cd) { | |
519 | spin_unlock(&ctx->lock); | |
520 | return -ENOMEM; | |
521 | } | |
522 | ctx->dec_cd = dma_zalloc_coherent(dev, | |
523 | sizeof(struct qat_alg_cd), | |
524 | &ctx->dec_cd_paddr, | |
525 | GFP_ATOMIC); | |
526 | if (!ctx->dec_cd) { | |
527 | spin_unlock(&ctx->lock); | |
528 | goto out_free_enc; | |
529 | } | |
d370cec3 TS |
530 | } |
531 | spin_unlock(&ctx->lock); | |
532 | if (qat_alg_init_sessions(ctx, key, keylen)) | |
533 | goto out_free_all; | |
534 | ||
535 | return 0; | |
536 | ||
537 | out_free_all: | |
d370cec3 TS |
538 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), |
539 | ctx->dec_cd, ctx->dec_cd_paddr); | |
540 | ctx->dec_cd = NULL; | |
541 | out_free_enc: | |
542 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | |
543 | ctx->enc_cd, ctx->enc_cd_paddr); | |
544 | ctx->enc_cd = NULL; | |
545 | return -ENOMEM; | |
546 | } | |
547 | ||
548 | static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | |
549 | struct qat_crypto_request *qat_req) | |
550 | { | |
551 | struct device *dev = &GET_DEV(inst->accel_dev); | |
552 | struct qat_alg_buf_list *bl = qat_req->buf.bl; | |
553 | struct qat_alg_buf_list *blout = qat_req->buf.blout; | |
554 | dma_addr_t blp = qat_req->buf.blp; | |
555 | dma_addr_t blpout = qat_req->buf.bloutp; | |
556 | size_t sz = qat_req->buf.sz; | |
557 | int i, bufs = bl->num_bufs; | |
558 | ||
559 | for (i = 0; i < bl->num_bufs; i++) | |
560 | dma_unmap_single(dev, bl->bufers[i].addr, | |
561 | bl->bufers[i].len, DMA_BIDIRECTIONAL); | |
562 | ||
563 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | |
564 | kfree(bl); | |
565 | if (blp != blpout) { | |
566 | /* If out of place operation dma unmap only data */ | |
567 | int bufless = bufs - blout->num_mapped_bufs; | |
d65071ec | 568 | |
d370cec3 TS |
569 | for (i = bufless; i < bufs; i++) { |
570 | dma_unmap_single(dev, blout->bufers[i].addr, | |
571 | blout->bufers[i].len, | |
572 | DMA_BIDIRECTIONAL); | |
573 | } | |
574 | dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); | |
575 | kfree(blout); | |
576 | } | |
577 | } | |
578 | ||
579 | static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |
580 | struct scatterlist *assoc, | |
581 | struct scatterlist *sgl, | |
582 | struct scatterlist *sglout, uint8_t *iv, | |
583 | uint8_t ivlen, | |
584 | struct qat_crypto_request *qat_req) | |
585 | { | |
586 | struct device *dev = &GET_DEV(inst->accel_dev); | |
587 | int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); | |
588 | struct qat_alg_buf_list *bufl; | |
589 | struct qat_alg_buf_list *buflout = NULL; | |
590 | dma_addr_t blp; | |
591 | dma_addr_t bloutp = 0; | |
592 | struct scatterlist *sg; | |
593 | size_t sz = sizeof(struct qat_alg_buf_list) + | |
594 | ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); | |
595 | ||
596 | if (unlikely(!n)) | |
597 | return -EINVAL; | |
598 | ||
09adc878 TS |
599 | bufl = kmalloc_node(sz, GFP_ATOMIC, |
600 | dev_to_node(&GET_DEV(inst->accel_dev))); | |
d370cec3 TS |
601 | if (unlikely(!bufl)) |
602 | return -ENOMEM; | |
603 | ||
604 | blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); | |
605 | if (unlikely(dma_mapping_error(dev, blp))) | |
606 | goto err; | |
607 | ||
608 | for_each_sg(assoc, sg, assoc_n, i) { | |
923a6e5e TS |
609 | if (!sg->length) |
610 | continue; | |
d370cec3 TS |
611 | bufl->bufers[bufs].addr = dma_map_single(dev, |
612 | sg_virt(sg), | |
613 | sg->length, | |
614 | DMA_BIDIRECTIONAL); | |
615 | bufl->bufers[bufs].len = sg->length; | |
616 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | |
617 | goto err; | |
618 | bufs++; | |
619 | } | |
620 | bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, | |
621 | DMA_BIDIRECTIONAL); | |
622 | bufl->bufers[bufs].len = ivlen; | |
623 | if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) | |
624 | goto err; | |
625 | bufs++; | |
626 | ||
627 | for_each_sg(sgl, sg, n, i) { | |
628 | int y = i + bufs; | |
d65071ec | 629 | |
d370cec3 TS |
630 | bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
631 | sg->length, | |
632 | DMA_BIDIRECTIONAL); | |
633 | bufl->bufers[y].len = sg->length; | |
634 | if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) | |
635 | goto err; | |
636 | } | |
637 | bufl->num_bufs = n + bufs; | |
638 | qat_req->buf.bl = bufl; | |
639 | qat_req->buf.blp = blp; | |
640 | qat_req->buf.sz = sz; | |
641 | /* Handle out of place operation */ | |
642 | if (sgl != sglout) { | |
643 | struct qat_alg_buf *bufers; | |
644 | ||
645 | buflout = kmalloc_node(sz, GFP_ATOMIC, | |
09adc878 | 646 | dev_to_node(&GET_DEV(inst->accel_dev))); |
d370cec3 TS |
647 | if (unlikely(!buflout)) |
648 | goto err; | |
649 | bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); | |
650 | if (unlikely(dma_mapping_error(dev, bloutp))) | |
651 | goto err; | |
652 | bufers = buflout->bufers; | |
653 | /* For out of place operation dma map only data and | |
654 | * reuse assoc mapping and iv */ | |
655 | for (i = 0; i < bufs; i++) { | |
656 | bufers[i].len = bufl->bufers[i].len; | |
657 | bufers[i].addr = bufl->bufers[i].addr; | |
658 | } | |
659 | for_each_sg(sglout, sg, n, i) { | |
660 | int y = i + bufs; | |
d65071ec | 661 | |
d370cec3 TS |
662 | bufers[y].addr = dma_map_single(dev, sg_virt(sg), |
663 | sg->length, | |
664 | DMA_BIDIRECTIONAL); | |
665 | buflout->bufers[y].len = sg->length; | |
666 | if (unlikely(dma_mapping_error(dev, bufers[y].addr))) | |
667 | goto err; | |
668 | } | |
669 | buflout->num_bufs = n + bufs; | |
670 | buflout->num_mapped_bufs = n; | |
671 | qat_req->buf.blout = buflout; | |
672 | qat_req->buf.bloutp = bloutp; | |
673 | } else { | |
674 | /* Otherwise set the src and dst to the same address */ | |
675 | qat_req->buf.bloutp = qat_req->buf.blp; | |
676 | } | |
677 | return 0; | |
678 | err: | |
679 | dev_err(dev, "Failed to map buf for dma\n"); | |
680 | for_each_sg(sgl, sg, n + bufs, i) { | |
681 | if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { | |
682 | dma_unmap_single(dev, bufl->bufers[i].addr, | |
683 | bufl->bufers[i].len, | |
684 | DMA_BIDIRECTIONAL); | |
685 | } | |
686 | } | |
687 | if (!dma_mapping_error(dev, blp)) | |
688 | dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); | |
689 | kfree(bufl); | |
690 | if (sgl != sglout && buflout) { | |
691 | for_each_sg(sglout, sg, n, i) { | |
692 | int y = i + bufs; | |
d65071ec | 693 | |
d370cec3 TS |
694 | if (!dma_mapping_error(dev, buflout->bufers[y].addr)) |
695 | dma_unmap_single(dev, buflout->bufers[y].addr, | |
696 | buflout->bufers[y].len, | |
697 | DMA_BIDIRECTIONAL); | |
698 | } | |
699 | if (!dma_mapping_error(dev, bloutp)) | |
700 | dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); | |
701 | kfree(buflout); | |
702 | } | |
703 | return -ENOMEM; | |
704 | } | |
705 | ||
706 | void qat_alg_callback(void *resp) | |
707 | { | |
708 | struct icp_qat_fw_la_resp *qat_resp = resp; | |
709 | struct qat_crypto_request *qat_req = | |
bce3cc61 | 710 | (void *)(__force long)qat_resp->opaque_data; |
d370cec3 TS |
711 | struct qat_alg_session_ctx *ctx = qat_req->ctx; |
712 | struct qat_crypto_instance *inst = ctx->inst; | |
713 | struct aead_request *areq = qat_req->areq; | |
714 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; | |
715 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); | |
716 | ||
717 | qat_alg_free_bufl(inst, qat_req); | |
718 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) | |
719 | res = -EBADMSG; | |
45cff260 | 720 | areq->base.complete(&areq->base, res); |
d370cec3 TS |
721 | } |
722 | ||
723 | static int qat_alg_dec(struct aead_request *areq) | |
724 | { | |
725 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | |
726 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | |
727 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
728 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | |
729 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | |
730 | struct icp_qat_fw_la_auth_req_params *auth_param; | |
731 | struct icp_qat_fw_la_bulk_req *msg; | |
732 | int digst_size = crypto_aead_crt(aead_tfm)->authsize; | |
733 | int ret, ctr = 0; | |
734 | ||
735 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | |
736 | areq->iv, AES_BLOCK_SIZE, qat_req); | |
737 | if (unlikely(ret)) | |
738 | return ret; | |
739 | ||
740 | msg = &qat_req->req; | |
741 | *msg = ctx->dec_fw_req_tmpl; | |
742 | qat_req->ctx = ctx; | |
743 | qat_req->areq = areq; | |
bce3cc61 | 744 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
d370cec3 TS |
745 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
746 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | |
747 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | |
748 | cipher_param->cipher_length = areq->cryptlen - digst_size; | |
749 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | |
750 | memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); | |
751 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | |
752 | auth_param->auth_off = 0; | |
753 | auth_param->auth_len = areq->assoclen + | |
754 | cipher_param->cipher_length + AES_BLOCK_SIZE; | |
755 | do { | |
756 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | |
757 | } while (ret == -EAGAIN && ctr++ < 10); | |
758 | ||
759 | if (ret == -EAGAIN) { | |
760 | qat_alg_free_bufl(ctx->inst, qat_req); | |
761 | return -EBUSY; | |
762 | } | |
763 | return -EINPROGRESS; | |
764 | } | |
765 | ||
766 | static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv, | |
767 | int enc_iv) | |
768 | { | |
769 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq); | |
770 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | |
771 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
772 | struct qat_crypto_request *qat_req = aead_request_ctx(areq); | |
773 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | |
774 | struct icp_qat_fw_la_auth_req_params *auth_param; | |
775 | struct icp_qat_fw_la_bulk_req *msg; | |
776 | int ret, ctr = 0; | |
777 | ||
778 | ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, | |
779 | iv, AES_BLOCK_SIZE, qat_req); | |
780 | if (unlikely(ret)) | |
781 | return ret; | |
782 | ||
783 | msg = &qat_req->req; | |
784 | *msg = ctx->enc_fw_req_tmpl; | |
785 | qat_req->ctx = ctx; | |
786 | qat_req->areq = areq; | |
bce3cc61 | 787 | qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req; |
d370cec3 TS |
788 | qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; |
789 | qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; | |
790 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | |
791 | auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); | |
792 | ||
793 | if (enc_iv) { | |
794 | cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE; | |
795 | cipher_param->cipher_offset = areq->assoclen; | |
796 | } else { | |
797 | memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); | |
798 | cipher_param->cipher_length = areq->cryptlen; | |
799 | cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE; | |
800 | } | |
801 | auth_param->auth_off = 0; | |
802 | auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE; | |
803 | ||
804 | do { | |
805 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | |
806 | } while (ret == -EAGAIN && ctr++ < 10); | |
807 | ||
808 | if (ret == -EAGAIN) { | |
809 | qat_alg_free_bufl(ctx->inst, qat_req); | |
810 | return -EBUSY; | |
811 | } | |
812 | return -EINPROGRESS; | |
813 | } | |
814 | ||
815 | static int qat_alg_enc(struct aead_request *areq) | |
816 | { | |
817 | return qat_alg_enc_internal(areq, areq->iv, 0); | |
818 | } | |
819 | ||
820 | static int qat_alg_genivenc(struct aead_givcrypt_request *req) | |
821 | { | |
822 | struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq); | |
823 | struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm); | |
824 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
825 | __be64 seq; | |
826 | ||
827 | memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); | |
828 | seq = cpu_to_be64(req->seq); | |
829 | memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t), | |
830 | &seq, sizeof(uint64_t)); | |
831 | return qat_alg_enc_internal(&req->areq, req->giv, 1); | |
832 | } | |
833 | ||
834 | static int qat_alg_init(struct crypto_tfm *tfm, | |
835 | enum icp_qat_hw_auth_algo hash, const char *hash_name) | |
836 | { | |
837 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
838 | ||
839 | memset(ctx, '\0', sizeof(*ctx)); | |
840 | ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); | |
841 | if (IS_ERR(ctx->hash_tfm)) | |
842 | return -EFAULT; | |
843 | spin_lock_init(&ctx->lock); | |
844 | ctx->qat_hash_alg = hash; | |
845 | tfm->crt_aead.reqsize = sizeof(struct aead_request) + | |
846 | sizeof(struct qat_crypto_request); | |
847 | ctx->tfm = tfm; | |
848 | return 0; | |
849 | } | |
850 | ||
851 | static int qat_alg_sha1_init(struct crypto_tfm *tfm) | |
852 | { | |
853 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1"); | |
854 | } | |
855 | ||
856 | static int qat_alg_sha256_init(struct crypto_tfm *tfm) | |
857 | { | |
858 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256"); | |
859 | } | |
860 | ||
861 | static int qat_alg_sha512_init(struct crypto_tfm *tfm) | |
862 | { | |
863 | return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512"); | |
864 | } | |
865 | ||
866 | static void qat_alg_exit(struct crypto_tfm *tfm) | |
867 | { | |
868 | struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm); | |
869 | struct qat_crypto_instance *inst = ctx->inst; | |
870 | struct device *dev; | |
871 | ||
872 | if (!IS_ERR(ctx->hash_tfm)) | |
873 | crypto_free_shash(ctx->hash_tfm); | |
874 | ||
875 | if (!inst) | |
876 | return; | |
877 | ||
878 | dev = &GET_DEV(inst->accel_dev); | |
879 | if (ctx->enc_cd) | |
880 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | |
881 | ctx->enc_cd, ctx->enc_cd_paddr); | |
882 | if (ctx->dec_cd) | |
883 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | |
884 | ctx->dec_cd, ctx->dec_cd_paddr); | |
d370cec3 TS |
885 | qat_crypto_put_instance(inst); |
886 | } | |
887 | ||
888 | static struct crypto_alg qat_algs[] = { { | |
889 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | |
890 | .cra_driver_name = "qat_aes_cbc_hmac_sha1", | |
891 | .cra_priority = 4001, | |
892 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | |
893 | .cra_blocksize = AES_BLOCK_SIZE, | |
894 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | |
895 | .cra_alignmask = 0, | |
896 | .cra_type = &crypto_aead_type, | |
897 | .cra_module = THIS_MODULE, | |
898 | .cra_init = qat_alg_sha1_init, | |
899 | .cra_exit = qat_alg_exit, | |
900 | .cra_u = { | |
901 | .aead = { | |
902 | .setkey = qat_alg_setkey, | |
903 | .decrypt = qat_alg_dec, | |
904 | .encrypt = qat_alg_enc, | |
905 | .givencrypt = qat_alg_genivenc, | |
906 | .ivsize = AES_BLOCK_SIZE, | |
907 | .maxauthsize = SHA1_DIGEST_SIZE, | |
908 | }, | |
909 | }, | |
910 | }, { | |
911 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | |
912 | .cra_driver_name = "qat_aes_cbc_hmac_sha256", | |
913 | .cra_priority = 4001, | |
914 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | |
915 | .cra_blocksize = AES_BLOCK_SIZE, | |
916 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | |
917 | .cra_alignmask = 0, | |
918 | .cra_type = &crypto_aead_type, | |
919 | .cra_module = THIS_MODULE, | |
920 | .cra_init = qat_alg_sha256_init, | |
921 | .cra_exit = qat_alg_exit, | |
922 | .cra_u = { | |
923 | .aead = { | |
924 | .setkey = qat_alg_setkey, | |
925 | .decrypt = qat_alg_dec, | |
926 | .encrypt = qat_alg_enc, | |
927 | .givencrypt = qat_alg_genivenc, | |
928 | .ivsize = AES_BLOCK_SIZE, | |
929 | .maxauthsize = SHA256_DIGEST_SIZE, | |
930 | }, | |
931 | }, | |
932 | }, { | |
933 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | |
934 | .cra_driver_name = "qat_aes_cbc_hmac_sha512", | |
935 | .cra_priority = 4001, | |
936 | .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | |
937 | .cra_blocksize = AES_BLOCK_SIZE, | |
938 | .cra_ctxsize = sizeof(struct qat_alg_session_ctx), | |
939 | .cra_alignmask = 0, | |
940 | .cra_type = &crypto_aead_type, | |
941 | .cra_module = THIS_MODULE, | |
942 | .cra_init = qat_alg_sha512_init, | |
943 | .cra_exit = qat_alg_exit, | |
944 | .cra_u = { | |
945 | .aead = { | |
946 | .setkey = qat_alg_setkey, | |
947 | .decrypt = qat_alg_dec, | |
948 | .encrypt = qat_alg_enc, | |
949 | .givencrypt = qat_alg_genivenc, | |
950 | .ivsize = AES_BLOCK_SIZE, | |
951 | .maxauthsize = SHA512_DIGEST_SIZE, | |
952 | }, | |
953 | }, | |
954 | } }; | |
955 | ||
956 | int qat_algs_register(void) | |
957 | { | |
958 | if (atomic_add_return(1, &active_dev) == 1) { | |
959 | int i; | |
960 | ||
961 | for (i = 0; i < ARRAY_SIZE(qat_algs); i++) | |
962 | qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD | | |
963 | CRYPTO_ALG_ASYNC; | |
964 | return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); | |
965 | } | |
966 | return 0; | |
967 | } | |
968 | ||
969 | int qat_algs_unregister(void) | |
970 | { | |
971 | if (atomic_sub_return(1, &active_dev) == 0) | |
972 | return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); | |
973 | return 0; | |
974 | } | |
975 | ||
976 | int qat_algs_init(void) | |
977 | { | |
978 | atomic_set(&active_dev, 0); | |
979 | crypto_get_default_rng(); | |
980 | return 0; | |
981 | } | |
982 | ||
983 | void qat_algs_exit(void) | |
984 | { | |
985 | crypto_put_default_rng(); | |
986 | } |