]>
Commit | Line | Data |
---|---|---|
045e3678 YK |
1 | /* |
2 | * caam - Freescale FSL CAAM support for ahash functions of crypto API | |
3 | * | |
4 | * Copyright 2011 Freescale Semiconductor, Inc. | |
5 | * | |
6 | * Based on caamalg.c crypto API driver. | |
7 | * | |
8 | * relationship of digest job descriptor or first job descriptor after init to | |
9 | * shared descriptors: | |
10 | * | |
11 | * --------------- --------------- | |
12 | * | JobDesc #1 |-------------------->| ShareDesc | | |
13 | * | *(packet 1) | | (hashKey) | | |
14 | * --------------- | (operation) | | |
15 | * --------------- | |
16 | * | |
17 | * relationship of subsequent job descriptors to shared descriptors: | |
18 | * | |
19 | * --------------- --------------- | |
20 | * | JobDesc #2 |-------------------->| ShareDesc | | |
21 | * | *(packet 2) | |------------->| (hashKey) | | |
22 | * --------------- | |-------->| (operation) | | |
23 | * . | | | (load ctx2) | | |
24 | * . | | --------------- | |
25 | * --------------- | | | |
26 | * | JobDesc #3 |------| | | |
27 | * | *(packet 3) | | | |
28 | * --------------- | | |
29 | * . | | |
30 | * . | | |
31 | * --------------- | | |
32 | * | JobDesc #4 |------------ | |
33 | * | *(packet 4) | | |
34 | * --------------- | |
35 | * | |
36 | * The SharedDesc never changes for a connection unless rekeyed, but | |
37 | * each packet will likely be in a different place. So all we need | |
38 | * to know to process the packet is where the input is, where the | |
39 | * output goes, and what context we want to process with. Context is | |
40 | * in the SharedDesc, packet references in the JobDesc. | |
41 | * | |
42 | * So, a job desc looks like: | |
43 | * | |
44 | * --------------------- | |
45 | * | Header | | |
46 | * | ShareDesc Pointer | | |
47 | * | SEQ_OUT_PTR | | |
48 | * | (output buffer) | | |
49 | * | (output length) | | |
50 | * | SEQ_IN_PTR | | |
51 | * | (input buffer) | | |
52 | * | (input length) | | |
53 | * --------------------- | |
54 | */ | |
55 | ||
56 | #include "compat.h" | |
57 | ||
58 | #include "regs.h" | |
59 | #include "intern.h" | |
60 | #include "desc_constr.h" | |
61 | #include "jr.h" | |
62 | #include "error.h" | |
63 | #include "sg_sw_sec4.h" | |
64 | #include "key_gen.h" | |
65 | ||
66 | #define CAAM_CRA_PRIORITY 3000 | |
67 | ||
68 | /* max hash key is max split key size */ | |
69 | #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) | |
70 | ||
71 | #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE | |
72 | #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE | |
73 | ||
74 | /* length of descriptors text */ | |
045e3678 YK |
75 | #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) |
76 | #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) | |
77 | #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | |
78 | #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) | |
79 | #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) | |
80 | #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) | |
81 | ||
82 | #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ | |
83 | CAAM_MAX_HASH_KEY_SIZE) | |
84 | #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) | |
85 | ||
86 | /* caam context sizes for hashes: running digest + 8 */ | |
87 | #define HASH_MSG_LEN 8 | |
88 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) | |
89 | ||
90 | #ifdef DEBUG | |
91 | /* for print_hex_dumps with line references */ | |
045e3678 YK |
92 | #define debug(format, arg...) printk(format, arg) |
93 | #else | |
94 | #define debug(format, arg...) | |
95 | #endif | |
96 | ||
cfc6f11b RG |
97 | |
98 | static struct list_head hash_list; | |
99 | ||
045e3678 YK |
100 | /* ahash per-session context */ |
101 | struct caam_hash_ctx { | |
102 | struct device *jrdev; | |
103 | u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; | |
104 | u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; | |
105 | u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; | |
106 | u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; | |
107 | u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; | |
108 | dma_addr_t sh_desc_update_dma; | |
109 | dma_addr_t sh_desc_update_first_dma; | |
110 | dma_addr_t sh_desc_fin_dma; | |
111 | dma_addr_t sh_desc_digest_dma; | |
112 | dma_addr_t sh_desc_finup_dma; | |
113 | u32 alg_type; | |
114 | u32 alg_op; | |
115 | u8 key[CAAM_MAX_HASH_KEY_SIZE]; | |
116 | dma_addr_t key_dma; | |
117 | int ctx_len; | |
118 | unsigned int split_key_len; | |
119 | unsigned int split_key_pad_len; | |
120 | }; | |
121 | ||
122 | /* ahash state */ | |
123 | struct caam_hash_state { | |
124 | dma_addr_t buf_dma; | |
125 | dma_addr_t ctx_dma; | |
126 | u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | |
127 | int buflen_0; | |
128 | u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; | |
129 | int buflen_1; | |
130 | u8 caam_ctx[MAX_CTX_LEN]; | |
131 | int (*update)(struct ahash_request *req); | |
132 | int (*final)(struct ahash_request *req); | |
133 | int (*finup)(struct ahash_request *req); | |
134 | int current_buf; | |
135 | }; | |
136 | ||
137 | /* Common job descriptor seq in/out ptr routines */ | |
138 | ||
139 | /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ | |
ce572085 HG |
140 | static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, |
141 | struct caam_hash_state *state, | |
142 | int ctx_len) | |
045e3678 YK |
143 | { |
144 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, | |
145 | ctx_len, DMA_FROM_DEVICE); | |
ce572085 HG |
146 | if (dma_mapping_error(jrdev, state->ctx_dma)) { |
147 | dev_err(jrdev, "unable to map ctx\n"); | |
148 | return -ENOMEM; | |
149 | } | |
150 | ||
045e3678 | 151 | append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); |
ce572085 HG |
152 | |
153 | return 0; | |
045e3678 YK |
154 | } |
155 | ||
156 | /* Map req->result, and append seq_out_ptr command that points to it */ | |
157 | static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, | |
158 | u8 *result, int digestsize) | |
159 | { | |
160 | dma_addr_t dst_dma; | |
161 | ||
162 | dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); | |
163 | append_seq_out_ptr(desc, dst_dma, digestsize, 0); | |
164 | ||
165 | return dst_dma; | |
166 | } | |
167 | ||
168 | /* Map current buffer in state and put it in link table */ | |
169 | static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, | |
170 | struct sec4_sg_entry *sec4_sg, | |
171 | u8 *buf, int buflen) | |
172 | { | |
173 | dma_addr_t buf_dma; | |
174 | ||
175 | buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | |
176 | dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); | |
177 | ||
178 | return buf_dma; | |
179 | } | |
180 | ||
181 | /* Map req->src and put it in link table */ | |
182 | static inline void src_map_to_sec4_sg(struct device *jrdev, | |
183 | struct scatterlist *src, int src_nents, | |
643b39b0 YK |
184 | struct sec4_sg_entry *sec4_sg, |
185 | bool chained) | |
045e3678 | 186 | { |
643b39b0 | 187 | dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); |
045e3678 YK |
188 | sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); |
189 | } | |
190 | ||
191 | /* | |
192 | * Only put buffer in link table if it contains data, which is possible, | |
193 | * since a buffer has previously been used, and needs to be unmapped, | |
194 | */ | |
195 | static inline dma_addr_t | |
196 | try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, | |
197 | u8 *buf, dma_addr_t buf_dma, int buflen, | |
198 | int last_buflen) | |
199 | { | |
200 | if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) | |
201 | dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); | |
202 | if (buflen) | |
203 | buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); | |
204 | else | |
205 | buf_dma = 0; | |
206 | ||
207 | return buf_dma; | |
208 | } | |
209 | ||
210 | /* Map state->caam_ctx, and add it to link table */ | |
ce572085 HG |
211 | static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, |
212 | struct caam_hash_state *state, int ctx_len, | |
213 | struct sec4_sg_entry *sec4_sg, u32 flag) | |
045e3678 YK |
214 | { |
215 | state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); | |
ce572085 HG |
216 | if (dma_mapping_error(jrdev, state->ctx_dma)) { |
217 | dev_err(jrdev, "unable to map ctx\n"); | |
218 | return -ENOMEM; | |
219 | } | |
220 | ||
045e3678 | 221 | dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); |
ce572085 HG |
222 | |
223 | return 0; | |
045e3678 YK |
224 | } |
225 | ||
226 | /* Common shared descriptor commands */ | |
227 | static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | |
228 | { | |
229 | append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, | |
230 | ctx->split_key_len, CLASS_2 | | |
231 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | |
232 | } | |
233 | ||
234 | /* Append key if it has been set */ | |
235 | static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) | |
236 | { | |
237 | u32 *key_jump_cmd; | |
238 | ||
61bb86bb | 239 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
045e3678 YK |
240 | |
241 | if (ctx->split_key_len) { | |
242 | /* Skip if already shared */ | |
243 | key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | | |
244 | JUMP_COND_SHRD); | |
245 | ||
246 | append_key_ahash(desc, ctx); | |
247 | ||
248 | set_jump_tgt_here(desc, key_jump_cmd); | |
249 | } | |
250 | ||
251 | /* Propagate errors from shared to job descriptor */ | |
252 | append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); | |
253 | } | |
254 | ||
255 | /* | |
256 | * For ahash read data from seqin following state->caam_ctx, | |
257 | * and write resulting class2 context to seqout, which may be state->caam_ctx | |
258 | * or req->result | |
259 | */ | |
260 | static inline void ahash_append_load_str(u32 *desc, int digestsize) | |
261 | { | |
262 | /* Calculate remaining bytes to read */ | |
263 | append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); | |
264 | ||
265 | /* Read remaining bytes */ | |
266 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | | |
267 | FIFOLD_TYPE_MSG | KEY_VLF); | |
268 | ||
269 | /* Store class2 context bytes */ | |
270 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | |
271 | LDST_SRCDST_BYTE_CONTEXT); | |
272 | } | |
273 | ||
274 | /* | |
275 | * For ahash update, final and finup, import context, read and write to seqout | |
276 | */ | |
277 | static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, | |
278 | int digestsize, | |
279 | struct caam_hash_ctx *ctx) | |
280 | { | |
281 | init_sh_desc_key_ahash(desc, ctx); | |
282 | ||
283 | /* Import context from software */ | |
284 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | |
285 | LDST_CLASS_2_CCB | ctx->ctx_len); | |
286 | ||
287 | /* Class 2 operation */ | |
288 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | |
289 | ||
290 | /* | |
291 | * Load from buf and/or src and write to req->result or state->context | |
292 | */ | |
293 | ahash_append_load_str(desc, digestsize); | |
294 | } | |
295 | ||
296 | /* For ahash firsts and digest, read and write to seqout */ | |
297 | static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, | |
298 | int digestsize, struct caam_hash_ctx *ctx) | |
299 | { | |
300 | init_sh_desc_key_ahash(desc, ctx); | |
301 | ||
302 | /* Class 2 operation */ | |
303 | append_operation(desc, op | state | OP_ALG_ENCRYPT); | |
304 | ||
305 | /* | |
306 | * Load from buf and/or src and write to req->result or state->context | |
307 | */ | |
308 | ahash_append_load_str(desc, digestsize); | |
309 | } | |
310 | ||
311 | static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |
312 | { | |
313 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
314 | int digestsize = crypto_ahash_digestsize(ahash); | |
315 | struct device *jrdev = ctx->jrdev; | |
316 | u32 have_key = 0; | |
317 | u32 *desc; | |
318 | ||
319 | if (ctx->split_key_len) | |
320 | have_key = OP_ALG_AAI_HMAC_PRECOMP; | |
321 | ||
322 | /* ahash_update shared descriptor */ | |
323 | desc = ctx->sh_desc_update; | |
324 | ||
61bb86bb | 325 | init_sh_desc(desc, HDR_SHARE_SERIAL); |
045e3678 YK |
326 | |
327 | /* Import context from software */ | |
328 | append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | | |
329 | LDST_CLASS_2_CCB | ctx->ctx_len); | |
330 | ||
331 | /* Class 2 operation */ | |
332 | append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | | |
333 | OP_ALG_ENCRYPT); | |
334 | ||
335 | /* Load data and write to result or context */ | |
336 | ahash_append_load_str(desc, ctx->ctx_len); | |
337 | ||
338 | ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | |
339 | DMA_TO_DEVICE); | |
340 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { | |
341 | dev_err(jrdev, "unable to map shared descriptor\n"); | |
342 | return -ENOMEM; | |
343 | } | |
344 | #ifdef DEBUG | |
514df281 AP |
345 | print_hex_dump(KERN_ERR, |
346 | "ahash update shdesc@"__stringify(__LINE__)": ", | |
045e3678 YK |
347 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
348 | #endif | |
349 | ||
350 | /* ahash_update_first shared descriptor */ | |
351 | desc = ctx->sh_desc_update_first; | |
352 | ||
353 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, | |
354 | ctx->ctx_len, ctx); | |
355 | ||
356 | ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, | |
357 | desc_bytes(desc), | |
358 | DMA_TO_DEVICE); | |
359 | if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { | |
360 | dev_err(jrdev, "unable to map shared descriptor\n"); | |
361 | return -ENOMEM; | |
362 | } | |
363 | #ifdef DEBUG | |
514df281 AP |
364 | print_hex_dump(KERN_ERR, |
365 | "ahash update first shdesc@"__stringify(__LINE__)": ", | |
045e3678 YK |
366 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
367 | #endif | |
368 | ||
369 | /* ahash_final shared descriptor */ | |
370 | desc = ctx->sh_desc_fin; | |
371 | ||
372 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | |
373 | OP_ALG_AS_FINALIZE, digestsize, ctx); | |
374 | ||
375 | ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | |
376 | DMA_TO_DEVICE); | |
377 | if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { | |
378 | dev_err(jrdev, "unable to map shared descriptor\n"); | |
379 | return -ENOMEM; | |
380 | } | |
381 | #ifdef DEBUG | |
514df281 | 382 | print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
383 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
384 | desc_bytes(desc), 1); | |
385 | #endif | |
386 | ||
387 | /* ahash_finup shared descriptor */ | |
388 | desc = ctx->sh_desc_finup; | |
389 | ||
390 | ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, | |
391 | OP_ALG_AS_FINALIZE, digestsize, ctx); | |
392 | ||
393 | ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), | |
394 | DMA_TO_DEVICE); | |
395 | if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { | |
396 | dev_err(jrdev, "unable to map shared descriptor\n"); | |
397 | return -ENOMEM; | |
398 | } | |
399 | #ifdef DEBUG | |
514df281 | 400 | print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
401 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
402 | desc_bytes(desc), 1); | |
403 | #endif | |
404 | ||
405 | /* ahash_digest shared descriptor */ | |
406 | desc = ctx->sh_desc_digest; | |
407 | ||
408 | ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, | |
409 | digestsize, ctx); | |
410 | ||
411 | ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, | |
412 | desc_bytes(desc), | |
413 | DMA_TO_DEVICE); | |
414 | if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { | |
415 | dev_err(jrdev, "unable to map shared descriptor\n"); | |
416 | return -ENOMEM; | |
417 | } | |
418 | #ifdef DEBUG | |
514df281 AP |
419 | print_hex_dump(KERN_ERR, |
420 | "ahash digest shdesc@"__stringify(__LINE__)": ", | |
045e3678 YK |
421 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
422 | desc_bytes(desc), 1); | |
423 | #endif | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
66b3e887 | 428 | static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, |
045e3678 YK |
429 | u32 keylen) |
430 | { | |
431 | return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, | |
432 | ctx->split_key_pad_len, key_in, keylen, | |
433 | ctx->alg_op); | |
434 | } | |
435 | ||
436 | /* Digest hash size if it is too large */ | |
66b3e887 | 437 | static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, |
045e3678 YK |
438 | u32 *keylen, u8 *key_out, u32 digestsize) |
439 | { | |
440 | struct device *jrdev = ctx->jrdev; | |
441 | u32 *desc; | |
442 | struct split_key_result result; | |
443 | dma_addr_t src_dma, dst_dma; | |
444 | int ret = 0; | |
445 | ||
9c23b7d3 | 446 | desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); |
2af8f4a2 KP |
447 | if (!desc) { |
448 | dev_err(jrdev, "unable to allocate key input memory\n"); | |
449 | return -ENOMEM; | |
450 | } | |
045e3678 YK |
451 | |
452 | init_job_desc(desc, 0); | |
453 | ||
454 | src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, | |
455 | DMA_TO_DEVICE); | |
456 | if (dma_mapping_error(jrdev, src_dma)) { | |
457 | dev_err(jrdev, "unable to map key input memory\n"); | |
458 | kfree(desc); | |
459 | return -ENOMEM; | |
460 | } | |
461 | dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, | |
462 | DMA_FROM_DEVICE); | |
463 | if (dma_mapping_error(jrdev, dst_dma)) { | |
464 | dev_err(jrdev, "unable to map key output memory\n"); | |
465 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); | |
466 | kfree(desc); | |
467 | return -ENOMEM; | |
468 | } | |
469 | ||
470 | /* Job descriptor to perform unkeyed hash on key_in */ | |
471 | append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | | |
472 | OP_ALG_AS_INITFINAL); | |
473 | append_seq_in_ptr(desc, src_dma, *keylen, 0); | |
474 | append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | | |
475 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); | |
476 | append_seq_out_ptr(desc, dst_dma, digestsize, 0); | |
477 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | |
478 | LDST_SRCDST_BYTE_CONTEXT); | |
479 | ||
480 | #ifdef DEBUG | |
514df281 | 481 | print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", |
045e3678 | 482 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); |
514df281 | 483 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
484 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
485 | #endif | |
486 | ||
487 | result.err = 0; | |
488 | init_completion(&result.completion); | |
489 | ||
490 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | |
491 | if (!ret) { | |
492 | /* in progress */ | |
493 | wait_for_completion_interruptible(&result.completion); | |
494 | ret = result.err; | |
495 | #ifdef DEBUG | |
514df281 AP |
496 | print_hex_dump(KERN_ERR, |
497 | "digested key@"__stringify(__LINE__)": ", | |
045e3678 YK |
498 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, |
499 | digestsize, 1); | |
500 | #endif | |
501 | } | |
045e3678 YK |
502 | dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); |
503 | dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); | |
504 | ||
e11aa9f1 HG |
505 | *keylen = digestsize; |
506 | ||
045e3678 YK |
507 | kfree(desc); |
508 | ||
509 | return ret; | |
510 | } | |
511 | ||
512 | static int ahash_setkey(struct crypto_ahash *ahash, | |
513 | const u8 *key, unsigned int keylen) | |
514 | { | |
515 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | |
516 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | |
517 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
518 | struct device *jrdev = ctx->jrdev; | |
519 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | |
520 | int digestsize = crypto_ahash_digestsize(ahash); | |
521 | int ret = 0; | |
522 | u8 *hashed_key = NULL; | |
523 | ||
524 | #ifdef DEBUG | |
525 | printk(KERN_ERR "keylen %d\n", keylen); | |
526 | #endif | |
527 | ||
528 | if (keylen > blocksize) { | |
529 | hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | | |
530 | GFP_DMA); | |
531 | if (!hashed_key) | |
532 | return -ENOMEM; | |
533 | ret = hash_digest_key(ctx, key, &keylen, hashed_key, | |
534 | digestsize); | |
535 | if (ret) | |
536 | goto badkey; | |
537 | key = hashed_key; | |
538 | } | |
539 | ||
540 | /* Pick class 2 key length from algorithm submask */ | |
541 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | |
542 | OP_ALG_ALGSEL_SHIFT] * 2; | |
543 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | |
544 | ||
545 | #ifdef DEBUG | |
546 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | |
547 | ctx->split_key_len, ctx->split_key_pad_len); | |
514df281 | 548 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", |
045e3678 YK |
549 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
550 | #endif | |
551 | ||
552 | ret = gen_split_hash_key(ctx, key, keylen); | |
553 | if (ret) | |
554 | goto badkey; | |
555 | ||
556 | ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | |
557 | DMA_TO_DEVICE); | |
558 | if (dma_mapping_error(jrdev, ctx->key_dma)) { | |
559 | dev_err(jrdev, "unable to map key i/o memory\n"); | |
3d67be27 HG |
560 | ret = -ENOMEM; |
561 | goto map_err; | |
045e3678 YK |
562 | } |
563 | #ifdef DEBUG | |
514df281 | 564 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", |
045e3678 YK |
565 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
566 | ctx->split_key_pad_len, 1); | |
567 | #endif | |
568 | ||
569 | ret = ahash_set_sh_desc(ahash); | |
570 | if (ret) { | |
571 | dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, | |
572 | DMA_TO_DEVICE); | |
573 | } | |
574 | ||
3d67be27 | 575 | map_err: |
045e3678 YK |
576 | kfree(hashed_key); |
577 | return ret; | |
578 | badkey: | |
579 | kfree(hashed_key); | |
580 | crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
581 | return -EINVAL; | |
582 | } | |
583 | ||
584 | /* | |
585 | * ahash_edesc - s/w-extended ahash descriptor | |
586 | * @dst_dma: physical mapped address of req->result | |
587 | * @sec4_sg_dma: physical mapped address of h/w link table | |
643b39b0 | 588 | * @chained: if source is chained |
045e3678 YK |
589 | * @src_nents: number of segments in input scatterlist |
590 | * @sec4_sg_bytes: length of dma mapped sec4_sg space | |
591 | * @sec4_sg: pointer to h/w link table | |
592 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | |
593 | */ | |
594 | struct ahash_edesc { | |
595 | dma_addr_t dst_dma; | |
596 | dma_addr_t sec4_sg_dma; | |
643b39b0 | 597 | bool chained; |
045e3678 YK |
598 | int src_nents; |
599 | int sec4_sg_bytes; | |
600 | struct sec4_sg_entry *sec4_sg; | |
601 | u32 hw_desc[0]; | |
602 | }; | |
603 | ||
604 | static inline void ahash_unmap(struct device *dev, | |
605 | struct ahash_edesc *edesc, | |
606 | struct ahash_request *req, int dst_len) | |
607 | { | |
608 | if (edesc->src_nents) | |
643b39b0 YK |
609 | dma_unmap_sg_chained(dev, req->src, edesc->src_nents, |
610 | DMA_TO_DEVICE, edesc->chained); | |
045e3678 YK |
611 | if (edesc->dst_dma) |
612 | dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); | |
613 | ||
614 | if (edesc->sec4_sg_bytes) | |
615 | dma_unmap_single(dev, edesc->sec4_sg_dma, | |
616 | edesc->sec4_sg_bytes, DMA_TO_DEVICE); | |
617 | } | |
618 | ||
619 | static inline void ahash_unmap_ctx(struct device *dev, | |
620 | struct ahash_edesc *edesc, | |
621 | struct ahash_request *req, int dst_len, u32 flag) | |
622 | { | |
623 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
624 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
625 | struct caam_hash_state *state = ahash_request_ctx(req); | |
626 | ||
627 | if (state->ctx_dma) | |
628 | dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); | |
629 | ahash_unmap(dev, edesc, req, dst_len); | |
630 | } | |
631 | ||
632 | static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |
633 | void *context) | |
634 | { | |
635 | struct ahash_request *req = context; | |
636 | struct ahash_edesc *edesc; | |
637 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
638 | int digestsize = crypto_ahash_digestsize(ahash); | |
639 | #ifdef DEBUG | |
640 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
641 | struct caam_hash_state *state = ahash_request_ctx(req); | |
642 | ||
643 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
644 | #endif | |
645 | ||
646 | edesc = (struct ahash_edesc *)((char *)desc - | |
647 | offsetof(struct ahash_edesc, hw_desc)); | |
fa9659cd MV |
648 | if (err) |
649 | caam_jr_strstatus(jrdev, err); | |
045e3678 YK |
650 | |
651 | ahash_unmap(jrdev, edesc, req, digestsize); | |
652 | kfree(edesc); | |
653 | ||
654 | #ifdef DEBUG | |
514df281 | 655 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
045e3678 YK |
656 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
657 | ctx->ctx_len, 1); | |
658 | if (req->result) | |
514df281 | 659 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
045e3678 YK |
660 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
661 | digestsize, 1); | |
662 | #endif | |
663 | ||
664 | req->base.complete(&req->base, err); | |
665 | } | |
666 | ||
667 | static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |
668 | void *context) | |
669 | { | |
670 | struct ahash_request *req = context; | |
671 | struct ahash_edesc *edesc; | |
672 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
673 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
674 | #ifdef DEBUG | |
675 | struct caam_hash_state *state = ahash_request_ctx(req); | |
676 | int digestsize = crypto_ahash_digestsize(ahash); | |
677 | ||
678 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
679 | #endif | |
680 | ||
681 | edesc = (struct ahash_edesc *)((char *)desc - | |
682 | offsetof(struct ahash_edesc, hw_desc)); | |
fa9659cd MV |
683 | if (err) |
684 | caam_jr_strstatus(jrdev, err); | |
045e3678 YK |
685 | |
686 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); | |
687 | kfree(edesc); | |
688 | ||
689 | #ifdef DEBUG | |
514df281 | 690 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
045e3678 YK |
691 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
692 | ctx->ctx_len, 1); | |
693 | if (req->result) | |
514df281 | 694 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
045e3678 YK |
695 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
696 | digestsize, 1); | |
697 | #endif | |
698 | ||
699 | req->base.complete(&req->base, err); | |
700 | } | |
701 | ||
702 | static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |
703 | void *context) | |
704 | { | |
705 | struct ahash_request *req = context; | |
706 | struct ahash_edesc *edesc; | |
707 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
708 | int digestsize = crypto_ahash_digestsize(ahash); | |
709 | #ifdef DEBUG | |
710 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
711 | struct caam_hash_state *state = ahash_request_ctx(req); | |
712 | ||
713 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
714 | #endif | |
715 | ||
716 | edesc = (struct ahash_edesc *)((char *)desc - | |
717 | offsetof(struct ahash_edesc, hw_desc)); | |
fa9659cd MV |
718 | if (err) |
719 | caam_jr_strstatus(jrdev, err); | |
045e3678 | 720 | |
bc9e05f9 | 721 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); |
045e3678 YK |
722 | kfree(edesc); |
723 | ||
724 | #ifdef DEBUG | |
514df281 | 725 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
045e3678 YK |
726 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
727 | ctx->ctx_len, 1); | |
728 | if (req->result) | |
514df281 | 729 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
045e3678 YK |
730 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
731 | digestsize, 1); | |
732 | #endif | |
733 | ||
734 | req->base.complete(&req->base, err); | |
735 | } | |
736 | ||
737 | static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |
738 | void *context) | |
739 | { | |
740 | struct ahash_request *req = context; | |
741 | struct ahash_edesc *edesc; | |
742 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
743 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
744 | #ifdef DEBUG | |
745 | struct caam_hash_state *state = ahash_request_ctx(req); | |
746 | int digestsize = crypto_ahash_digestsize(ahash); | |
747 | ||
748 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
749 | #endif | |
750 | ||
751 | edesc = (struct ahash_edesc *)((char *)desc - | |
752 | offsetof(struct ahash_edesc, hw_desc)); | |
fa9659cd MV |
753 | if (err) |
754 | caam_jr_strstatus(jrdev, err); | |
045e3678 | 755 | |
ef62b231 | 756 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); |
045e3678 YK |
757 | kfree(edesc); |
758 | ||
759 | #ifdef DEBUG | |
514df281 | 760 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", |
045e3678 YK |
761 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
762 | ctx->ctx_len, 1); | |
763 | if (req->result) | |
514df281 | 764 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", |
045e3678 YK |
765 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
766 | digestsize, 1); | |
767 | #endif | |
768 | ||
769 | req->base.complete(&req->base, err); | |
770 | } | |
771 | ||
772 | /* submit update job descriptor */ | |
773 | static int ahash_update_ctx(struct ahash_request *req) | |
774 | { | |
775 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
776 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
777 | struct caam_hash_state *state = ahash_request_ctx(req); | |
778 | struct device *jrdev = ctx->jrdev; | |
779 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
780 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
781 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | |
782 | int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; | |
783 | u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; | |
784 | int *next_buflen = state->current_buf ? &state->buflen_0 : | |
785 | &state->buflen_1, last_buflen; | |
786 | int in_len = *buflen + req->nbytes, to_hash; | |
787 | u32 *sh_desc = ctx->sh_desc_update, *desc; | |
788 | dma_addr_t ptr = ctx->sh_desc_update_dma; | |
789 | int src_nents, sec4_sg_bytes, sec4_sg_src_index; | |
790 | struct ahash_edesc *edesc; | |
643b39b0 | 791 | bool chained = false; |
045e3678 YK |
792 | int ret = 0; |
793 | int sh_len; | |
794 | ||
795 | last_buflen = *next_buflen; | |
796 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | |
797 | to_hash = in_len - *next_buflen; | |
798 | ||
799 | if (to_hash) { | |
643b39b0 YK |
800 | src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), |
801 | &chained); | |
045e3678 YK |
802 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); |
803 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | |
804 | sizeof(struct sec4_sg_entry); | |
805 | ||
806 | /* | |
807 | * allocate space for base edesc and hw desc commands, | |
808 | * link tables | |
809 | */ | |
810 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | |
811 | sec4_sg_bytes, GFP_DMA | flags); | |
812 | if (!edesc) { | |
813 | dev_err(jrdev, | |
814 | "could not allocate extended descriptor\n"); | |
815 | return -ENOMEM; | |
816 | } | |
817 | ||
818 | edesc->src_nents = src_nents; | |
643b39b0 | 819 | edesc->chained = chained; |
045e3678 YK |
820 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
821 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | |
822 | DESC_JOB_IO_LEN; | |
045e3678 | 823 | |
ce572085 HG |
824 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
825 | edesc->sec4_sg, DMA_BIDIRECTIONAL); | |
826 | if (ret) | |
827 | return ret; | |
045e3678 YK |
828 | |
829 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, | |
830 | edesc->sec4_sg + 1, | |
831 | buf, state->buf_dma, | |
832 | *buflen, last_buflen); | |
833 | ||
834 | if (src_nents) { | |
835 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | |
643b39b0 YK |
836 | edesc->sec4_sg + sec4_sg_src_index, |
837 | chained); | |
8af7b0f8 | 838 | if (*next_buflen) |
307fd543 CS |
839 | scatterwalk_map_and_copy(next_buf, req->src, |
840 | to_hash - *buflen, | |
841 | *next_buflen, 0); | |
045e3678 YK |
842 | } else { |
843 | (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= | |
844 | SEC4_SG_LEN_FIN; | |
845 | } | |
846 | ||
8af7b0f8 VM |
847 | state->current_buf = !state->current_buf; |
848 | ||
045e3678 YK |
849 | sh_len = desc_len(sh_desc); |
850 | desc = edesc->hw_desc; | |
851 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | |
852 | HDR_REVERSE); | |
853 | ||
1da2be33 RG |
854 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
855 | sec4_sg_bytes, | |
856 | DMA_TO_DEVICE); | |
ce572085 HG |
857 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
858 | dev_err(jrdev, "unable to map S/G table\n"); | |
859 | return -ENOMEM; | |
860 | } | |
1da2be33 | 861 | |
045e3678 YK |
862 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
863 | to_hash, LDST_SGF); | |
864 | ||
865 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); | |
866 | ||
867 | #ifdef DEBUG | |
514df281 | 868 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
869 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
870 | desc_bytes(desc), 1); | |
871 | #endif | |
872 | ||
873 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); | |
874 | if (!ret) { | |
875 | ret = -EINPROGRESS; | |
876 | } else { | |
877 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | |
878 | DMA_BIDIRECTIONAL); | |
879 | kfree(edesc); | |
880 | } | |
881 | } else if (*next_buflen) { | |
307fd543 CS |
882 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
883 | req->nbytes, 0); | |
045e3678 YK |
884 | *buflen = *next_buflen; |
885 | *next_buflen = last_buflen; | |
886 | } | |
887 | #ifdef DEBUG | |
514df281 | 888 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", |
045e3678 | 889 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
514df281 | 890 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |
045e3678 YK |
891 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
892 | *next_buflen, 1); | |
893 | #endif | |
894 | ||
895 | return ret; | |
896 | } | |
897 | ||
898 | static int ahash_final_ctx(struct ahash_request *req) | |
899 | { | |
900 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
901 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
902 | struct caam_hash_state *state = ahash_request_ctx(req); | |
903 | struct device *jrdev = ctx->jrdev; | |
904 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
905 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
906 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | |
907 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | |
908 | int last_buflen = state->current_buf ? state->buflen_0 : | |
909 | state->buflen_1; | |
910 | u32 *sh_desc = ctx->sh_desc_fin, *desc; | |
911 | dma_addr_t ptr = ctx->sh_desc_fin_dma; | |
b310c178 | 912 | int sec4_sg_bytes, sec4_sg_src_index; |
045e3678 YK |
913 | int digestsize = crypto_ahash_digestsize(ahash); |
914 | struct ahash_edesc *edesc; | |
915 | int ret = 0; | |
916 | int sh_len; | |
917 | ||
b310c178 HG |
918 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
919 | sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); | |
045e3678 YK |
920 | |
921 | /* allocate space for base edesc and hw desc commands, link tables */ | |
922 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | |
923 | sec4_sg_bytes, GFP_DMA | flags); | |
924 | if (!edesc) { | |
925 | dev_err(jrdev, "could not allocate extended descriptor\n"); | |
926 | return -ENOMEM; | |
927 | } | |
928 | ||
929 | sh_len = desc_len(sh_desc); | |
930 | desc = edesc->hw_desc; | |
931 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | |
932 | ||
933 | edesc->sec4_sg_bytes = sec4_sg_bytes; | |
934 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | |
935 | DESC_JOB_IO_LEN; | |
045e3678 YK |
936 | edesc->src_nents = 0; |
937 | ||
ce572085 HG |
938 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
939 | edesc->sec4_sg, DMA_TO_DEVICE); | |
940 | if (ret) | |
941 | return ret; | |
045e3678 YK |
942 | |
943 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | |
944 | buf, state->buf_dma, buflen, | |
945 | last_buflen); | |
b310c178 | 946 | (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; |
045e3678 | 947 | |
1da2be33 RG |
948 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
949 | sec4_sg_bytes, DMA_TO_DEVICE); | |
ce572085 HG |
950 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
951 | dev_err(jrdev, "unable to map S/G table\n"); | |
952 | return -ENOMEM; | |
953 | } | |
1da2be33 | 954 | |
045e3678 YK |
955 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, |
956 | LDST_SGF); | |
957 | ||
958 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | |
959 | digestsize); | |
ce572085 HG |
960 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
961 | dev_err(jrdev, "unable to map dst\n"); | |
962 | return -ENOMEM; | |
963 | } | |
045e3678 YK |
964 | |
965 | #ifdef DEBUG | |
514df281 | 966 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
967 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
968 | #endif | |
969 | ||
970 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | |
971 | if (!ret) { | |
972 | ret = -EINPROGRESS; | |
973 | } else { | |
974 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | |
975 | kfree(edesc); | |
976 | } | |
977 | ||
978 | return ret; | |
979 | } | |
980 | ||
981 | static int ahash_finup_ctx(struct ahash_request *req) | |
982 | { | |
983 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
984 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
985 | struct caam_hash_state *state = ahash_request_ctx(req); | |
986 | struct device *jrdev = ctx->jrdev; | |
987 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
988 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
989 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | |
990 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | |
991 | int last_buflen = state->current_buf ? state->buflen_0 : | |
992 | state->buflen_1; | |
993 | u32 *sh_desc = ctx->sh_desc_finup, *desc; | |
994 | dma_addr_t ptr = ctx->sh_desc_finup_dma; | |
995 | int sec4_sg_bytes, sec4_sg_src_index; | |
996 | int src_nents; | |
997 | int digestsize = crypto_ahash_digestsize(ahash); | |
998 | struct ahash_edesc *edesc; | |
643b39b0 | 999 | bool chained = false; |
045e3678 YK |
1000 | int ret = 0; |
1001 | int sh_len; | |
1002 | ||
643b39b0 | 1003 | src_nents = __sg_count(req->src, req->nbytes, &chained); |
045e3678 YK |
1004 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); |
1005 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | |
1006 | sizeof(struct sec4_sg_entry); | |
1007 | ||
1008 | /* allocate space for base edesc and hw desc commands, link tables */ | |
1009 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | |
1010 | sec4_sg_bytes, GFP_DMA | flags); | |
1011 | if (!edesc) { | |
1012 | dev_err(jrdev, "could not allocate extended descriptor\n"); | |
1013 | return -ENOMEM; | |
1014 | } | |
1015 | ||
1016 | sh_len = desc_len(sh_desc); | |
1017 | desc = edesc->hw_desc; | |
1018 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | |
1019 | ||
1020 | edesc->src_nents = src_nents; | |
643b39b0 | 1021 | edesc->chained = chained; |
045e3678 YK |
1022 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1023 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | |
1024 | DESC_JOB_IO_LEN; | |
045e3678 | 1025 | |
ce572085 HG |
1026 | ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, |
1027 | edesc->sec4_sg, DMA_TO_DEVICE); | |
1028 | if (ret) | |
1029 | return ret; | |
045e3678 YK |
1030 | |
1031 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, | |
1032 | buf, state->buf_dma, buflen, | |
1033 | last_buflen); | |
1034 | ||
1035 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + | |
643b39b0 | 1036 | sec4_sg_src_index, chained); |
045e3678 | 1037 | |
1da2be33 RG |
1038 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1039 | sec4_sg_bytes, DMA_TO_DEVICE); | |
ce572085 HG |
1040 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
1041 | dev_err(jrdev, "unable to map S/G table\n"); | |
1042 | return -ENOMEM; | |
1043 | } | |
1da2be33 | 1044 | |
045e3678 YK |
1045 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + |
1046 | buflen + req->nbytes, LDST_SGF); | |
1047 | ||
1048 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | |
1049 | digestsize); | |
ce572085 HG |
1050 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
1051 | dev_err(jrdev, "unable to map dst\n"); | |
1052 | return -ENOMEM; | |
1053 | } | |
045e3678 YK |
1054 | |
1055 | #ifdef DEBUG | |
514df281 | 1056 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
1057 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1058 | #endif | |
1059 | ||
1060 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | |
1061 | if (!ret) { | |
1062 | ret = -EINPROGRESS; | |
1063 | } else { | |
1064 | ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); | |
1065 | kfree(edesc); | |
1066 | } | |
1067 | ||
1068 | return ret; | |
1069 | } | |
1070 | ||
1071 | static int ahash_digest(struct ahash_request *req) | |
1072 | { | |
1073 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
1074 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
1075 | struct device *jrdev = ctx->jrdev; | |
1076 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
1077 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
1078 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | |
1079 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | |
1080 | int digestsize = crypto_ahash_digestsize(ahash); | |
1081 | int src_nents, sec4_sg_bytes; | |
1082 | dma_addr_t src_dma; | |
1083 | struct ahash_edesc *edesc; | |
643b39b0 | 1084 | bool chained = false; |
045e3678 YK |
1085 | int ret = 0; |
1086 | u32 options; | |
1087 | int sh_len; | |
1088 | ||
643b39b0 YK |
1089 | src_nents = sg_count(req->src, req->nbytes, &chained); |
1090 | dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, | |
1091 | chained); | |
045e3678 YK |
1092 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); |
1093 | ||
1094 | /* allocate space for base edesc and hw desc commands, link tables */ | |
1095 | edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + | |
1096 | DESC_JOB_IO_LEN, GFP_DMA | flags); | |
1097 | if (!edesc) { | |
1098 | dev_err(jrdev, "could not allocate extended descriptor\n"); | |
1099 | return -ENOMEM; | |
1100 | } | |
1101 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | |
1102 | DESC_JOB_IO_LEN; | |
45e9af78 | 1103 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
045e3678 | 1104 | edesc->src_nents = src_nents; |
643b39b0 | 1105 | edesc->chained = chained; |
045e3678 YK |
1106 | |
1107 | sh_len = desc_len(sh_desc); | |
1108 | desc = edesc->hw_desc; | |
1109 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | |
1110 | ||
1111 | if (src_nents) { | |
1112 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | |
1da2be33 RG |
1113 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1114 | sec4_sg_bytes, DMA_TO_DEVICE); | |
ce572085 HG |
1115 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
1116 | dev_err(jrdev, "unable to map S/G table\n"); | |
1117 | return -ENOMEM; | |
1118 | } | |
045e3678 YK |
1119 | src_dma = edesc->sec4_sg_dma; |
1120 | options = LDST_SGF; | |
1121 | } else { | |
1122 | src_dma = sg_dma_address(req->src); | |
1123 | options = 0; | |
1124 | } | |
1125 | append_seq_in_ptr(desc, src_dma, req->nbytes, options); | |
1126 | ||
1127 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | |
1128 | digestsize); | |
ce572085 HG |
1129 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
1130 | dev_err(jrdev, "unable to map dst\n"); | |
1131 | return -ENOMEM; | |
1132 | } | |
045e3678 YK |
1133 | |
1134 | #ifdef DEBUG | |
514df281 | 1135 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
1136 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1137 | #endif | |
1138 | ||
1139 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | |
1140 | if (!ret) { | |
1141 | ret = -EINPROGRESS; | |
1142 | } else { | |
1143 | ahash_unmap(jrdev, edesc, req, digestsize); | |
1144 | kfree(edesc); | |
1145 | } | |
1146 | ||
1147 | return ret; | |
1148 | } | |
1149 | ||
1150 | /* submit ahash final if it the first job descriptor */ | |
1151 | static int ahash_final_no_ctx(struct ahash_request *req) | |
1152 | { | |
1153 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
1154 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
1155 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1156 | struct device *jrdev = ctx->jrdev; | |
1157 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
1158 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
1159 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | |
1160 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | |
1161 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | |
1162 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | |
1163 | int digestsize = crypto_ahash_digestsize(ahash); | |
1164 | struct ahash_edesc *edesc; | |
1165 | int ret = 0; | |
1166 | int sh_len; | |
1167 | ||
1168 | /* allocate space for base edesc and hw desc commands, link tables */ | |
1169 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, | |
1170 | GFP_DMA | flags); | |
1171 | if (!edesc) { | |
1172 | dev_err(jrdev, "could not allocate extended descriptor\n"); | |
1173 | return -ENOMEM; | |
1174 | } | |
1175 | ||
060e234e | 1176 | edesc->sec4_sg_bytes = 0; |
045e3678 YK |
1177 | sh_len = desc_len(sh_desc); |
1178 | desc = edesc->hw_desc; | |
1179 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | |
1180 | ||
1181 | state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); | |
ce572085 HG |
1182 | if (dma_mapping_error(jrdev, state->buf_dma)) { |
1183 | dev_err(jrdev, "unable to map src\n"); | |
1184 | return -ENOMEM; | |
1185 | } | |
045e3678 YK |
1186 | |
1187 | append_seq_in_ptr(desc, state->buf_dma, buflen, 0); | |
1188 | ||
1189 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | |
1190 | digestsize); | |
ce572085 HG |
1191 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
1192 | dev_err(jrdev, "unable to map dst\n"); | |
1193 | return -ENOMEM; | |
1194 | } | |
045e3678 YK |
1195 | edesc->src_nents = 0; |
1196 | ||
1197 | #ifdef DEBUG | |
514df281 | 1198 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
1199 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1200 | #endif | |
1201 | ||
1202 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | |
1203 | if (!ret) { | |
1204 | ret = -EINPROGRESS; | |
1205 | } else { | |
1206 | ahash_unmap(jrdev, edesc, req, digestsize); | |
1207 | kfree(edesc); | |
1208 | } | |
1209 | ||
1210 | return ret; | |
1211 | } | |
1212 | ||
1213 | /* submit ahash update if it the first job descriptor after update */ | |
1214 | static int ahash_update_no_ctx(struct ahash_request *req) | |
1215 | { | |
1216 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
1217 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
1218 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1219 | struct device *jrdev = ctx->jrdev; | |
1220 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
1221 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
1222 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | |
1223 | int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; | |
1224 | u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; | |
1225 | int *next_buflen = state->current_buf ? &state->buflen_0 : | |
1226 | &state->buflen_1; | |
1227 | int in_len = *buflen + req->nbytes, to_hash; | |
1228 | int sec4_sg_bytes, src_nents; | |
1229 | struct ahash_edesc *edesc; | |
1230 | u32 *desc, *sh_desc = ctx->sh_desc_update_first; | |
1231 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | |
643b39b0 | 1232 | bool chained = false; |
045e3678 YK |
1233 | int ret = 0; |
1234 | int sh_len; | |
1235 | ||
1236 | *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); | |
1237 | to_hash = in_len - *next_buflen; | |
1238 | ||
1239 | if (to_hash) { | |
643b39b0 YK |
1240 | src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), |
1241 | &chained); | |
045e3678 YK |
1242 | sec4_sg_bytes = (1 + src_nents) * |
1243 | sizeof(struct sec4_sg_entry); | |
1244 | ||
1245 | /* | |
1246 | * allocate space for base edesc and hw desc commands, | |
1247 | * link tables | |
1248 | */ | |
1249 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | |
1250 | sec4_sg_bytes, GFP_DMA | flags); | |
1251 | if (!edesc) { | |
1252 | dev_err(jrdev, | |
1253 | "could not allocate extended descriptor\n"); | |
1254 | return -ENOMEM; | |
1255 | } | |
1256 | ||
1257 | edesc->src_nents = src_nents; | |
643b39b0 | 1258 | edesc->chained = chained; |
045e3678 YK |
1259 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1260 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | |
1261 | DESC_JOB_IO_LEN; | |
76b99080 | 1262 | edesc->dst_dma = 0; |
045e3678 YK |
1263 | |
1264 | state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, | |
1265 | buf, *buflen); | |
1266 | src_map_to_sec4_sg(jrdev, req->src, src_nents, | |
643b39b0 | 1267 | edesc->sec4_sg + 1, chained); |
045e3678 | 1268 | if (*next_buflen) { |
307fd543 CS |
1269 | scatterwalk_map_and_copy(next_buf, req->src, |
1270 | to_hash - *buflen, | |
1271 | *next_buflen, 0); | |
045e3678 YK |
1272 | } |
1273 | ||
8af7b0f8 VM |
1274 | state->current_buf = !state->current_buf; |
1275 | ||
045e3678 YK |
1276 | sh_len = desc_len(sh_desc); |
1277 | desc = edesc->hw_desc; | |
1278 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | |
1279 | HDR_REVERSE); | |
1280 | ||
1da2be33 RG |
1281 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1282 | sec4_sg_bytes, | |
1283 | DMA_TO_DEVICE); | |
ce572085 HG |
1284 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
1285 | dev_err(jrdev, "unable to map S/G table\n"); | |
1286 | return -ENOMEM; | |
1287 | } | |
1da2be33 | 1288 | |
045e3678 YK |
1289 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); |
1290 | ||
ce572085 HG |
1291 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1292 | if (ret) | |
1293 | return ret; | |
045e3678 YK |
1294 | |
1295 | #ifdef DEBUG | |
514df281 | 1296 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
1297 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
1298 | desc_bytes(desc), 1); | |
1299 | #endif | |
1300 | ||
1301 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | |
1302 | if (!ret) { | |
1303 | ret = -EINPROGRESS; | |
1304 | state->update = ahash_update_ctx; | |
1305 | state->finup = ahash_finup_ctx; | |
1306 | state->final = ahash_final_ctx; | |
1307 | } else { | |
1308 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | |
1309 | DMA_TO_DEVICE); | |
1310 | kfree(edesc); | |
1311 | } | |
1312 | } else if (*next_buflen) { | |
307fd543 CS |
1313 | scatterwalk_map_and_copy(buf + *buflen, req->src, 0, |
1314 | req->nbytes, 0); | |
045e3678 YK |
1315 | *buflen = *next_buflen; |
1316 | *next_buflen = 0; | |
1317 | } | |
1318 | #ifdef DEBUG | |
514df281 | 1319 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", |
045e3678 | 1320 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
514df281 | 1321 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |
045e3678 YK |
1322 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
1323 | *next_buflen, 1); | |
1324 | #endif | |
1325 | ||
1326 | return ret; | |
1327 | } | |
1328 | ||
1329 | /* submit ahash finup if it the first job descriptor after update */ | |
1330 | static int ahash_finup_no_ctx(struct ahash_request *req) | |
1331 | { | |
1332 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
1333 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
1334 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1335 | struct device *jrdev = ctx->jrdev; | |
1336 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
1337 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
1338 | u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; | |
1339 | int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; | |
1340 | int last_buflen = state->current_buf ? state->buflen_0 : | |
1341 | state->buflen_1; | |
1342 | u32 *sh_desc = ctx->sh_desc_digest, *desc; | |
1343 | dma_addr_t ptr = ctx->sh_desc_digest_dma; | |
1344 | int sec4_sg_bytes, sec4_sg_src_index, src_nents; | |
1345 | int digestsize = crypto_ahash_digestsize(ahash); | |
1346 | struct ahash_edesc *edesc; | |
643b39b0 | 1347 | bool chained = false; |
045e3678 YK |
1348 | int sh_len; |
1349 | int ret = 0; | |
1350 | ||
643b39b0 | 1351 | src_nents = __sg_count(req->src, req->nbytes, &chained); |
045e3678 YK |
1352 | sec4_sg_src_index = 2; |
1353 | sec4_sg_bytes = (sec4_sg_src_index + src_nents) * | |
1354 | sizeof(struct sec4_sg_entry); | |
1355 | ||
1356 | /* allocate space for base edesc and hw desc commands, link tables */ | |
1357 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | |
1358 | sec4_sg_bytes, GFP_DMA | flags); | |
1359 | if (!edesc) { | |
1360 | dev_err(jrdev, "could not allocate extended descriptor\n"); | |
1361 | return -ENOMEM; | |
1362 | } | |
1363 | ||
1364 | sh_len = desc_len(sh_desc); | |
1365 | desc = edesc->hw_desc; | |
1366 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); | |
1367 | ||
1368 | edesc->src_nents = src_nents; | |
643b39b0 | 1369 | edesc->chained = chained; |
045e3678 YK |
1370 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1371 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | |
1372 | DESC_JOB_IO_LEN; | |
045e3678 YK |
1373 | |
1374 | state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, | |
1375 | state->buf_dma, buflen, | |
1376 | last_buflen); | |
1377 | ||
643b39b0 YK |
1378 | src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, |
1379 | chained); | |
045e3678 | 1380 | |
1da2be33 RG |
1381 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
1382 | sec4_sg_bytes, DMA_TO_DEVICE); | |
ce572085 HG |
1383 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
1384 | dev_err(jrdev, "unable to map S/G table\n"); | |
1385 | return -ENOMEM; | |
1386 | } | |
1da2be33 | 1387 | |
045e3678 YK |
1388 | append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + |
1389 | req->nbytes, LDST_SGF); | |
1390 | ||
1391 | edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, | |
1392 | digestsize); | |
ce572085 HG |
1393 | if (dma_mapping_error(jrdev, edesc->dst_dma)) { |
1394 | dev_err(jrdev, "unable to map dst\n"); | |
1395 | return -ENOMEM; | |
1396 | } | |
045e3678 YK |
1397 | |
1398 | #ifdef DEBUG | |
514df281 | 1399 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
1400 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); |
1401 | #endif | |
1402 | ||
1403 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | |
1404 | if (!ret) { | |
1405 | ret = -EINPROGRESS; | |
1406 | } else { | |
1407 | ahash_unmap(jrdev, edesc, req, digestsize); | |
1408 | kfree(edesc); | |
1409 | } | |
1410 | ||
1411 | return ret; | |
1412 | } | |
1413 | ||
1414 | /* submit first update job descriptor after init */ | |
1415 | static int ahash_update_first(struct ahash_request *req) | |
1416 | { | |
1417 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
1418 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
1419 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1420 | struct device *jrdev = ctx->jrdev; | |
1421 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | |
1422 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | |
4451d494 CS |
1423 | u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; |
1424 | int *next_buflen = state->current_buf ? | |
1425 | &state->buflen_1 : &state->buflen_0; | |
045e3678 YK |
1426 | int to_hash; |
1427 | u32 *sh_desc = ctx->sh_desc_update_first, *desc; | |
1428 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; | |
1429 | int sec4_sg_bytes, src_nents; | |
1430 | dma_addr_t src_dma; | |
1431 | u32 options; | |
1432 | struct ahash_edesc *edesc; | |
643b39b0 | 1433 | bool chained = false; |
045e3678 YK |
1434 | int ret = 0; |
1435 | int sh_len; | |
1436 | ||
1437 | *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - | |
1438 | 1); | |
1439 | to_hash = req->nbytes - *next_buflen; | |
1440 | ||
1441 | if (to_hash) { | |
643b39b0 YK |
1442 | src_nents = sg_count(req->src, req->nbytes - (*next_buflen), |
1443 | &chained); | |
1444 | dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, | |
1445 | DMA_TO_DEVICE, chained); | |
045e3678 YK |
1446 | sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); |
1447 | ||
1448 | /* | |
1449 | * allocate space for base edesc and hw desc commands, | |
1450 | * link tables | |
1451 | */ | |
1452 | edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + | |
1453 | sec4_sg_bytes, GFP_DMA | flags); | |
1454 | if (!edesc) { | |
1455 | dev_err(jrdev, | |
1456 | "could not allocate extended descriptor\n"); | |
1457 | return -ENOMEM; | |
1458 | } | |
1459 | ||
1460 | edesc->src_nents = src_nents; | |
643b39b0 | 1461 | edesc->chained = chained; |
045e3678 YK |
1462 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
1463 | edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + | |
1464 | DESC_JOB_IO_LEN; | |
76b99080 | 1465 | edesc->dst_dma = 0; |
045e3678 YK |
1466 | |
1467 | if (src_nents) { | |
1468 | sg_to_sec4_sg_last(req->src, src_nents, | |
1469 | edesc->sec4_sg, 0); | |
1da2be33 RG |
1470 | edesc->sec4_sg_dma = dma_map_single(jrdev, |
1471 | edesc->sec4_sg, | |
1472 | sec4_sg_bytes, | |
1473 | DMA_TO_DEVICE); | |
ce572085 HG |
1474 | if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { |
1475 | dev_err(jrdev, "unable to map S/G table\n"); | |
1476 | return -ENOMEM; | |
1477 | } | |
045e3678 YK |
1478 | src_dma = edesc->sec4_sg_dma; |
1479 | options = LDST_SGF; | |
1480 | } else { | |
1481 | src_dma = sg_dma_address(req->src); | |
1482 | options = 0; | |
1483 | } | |
1484 | ||
1485 | if (*next_buflen) | |
307fd543 CS |
1486 | scatterwalk_map_and_copy(next_buf, req->src, to_hash, |
1487 | *next_buflen, 0); | |
045e3678 YK |
1488 | |
1489 | sh_len = desc_len(sh_desc); | |
1490 | desc = edesc->hw_desc; | |
1491 | init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | | |
1492 | HDR_REVERSE); | |
1493 | ||
1494 | append_seq_in_ptr(desc, src_dma, to_hash, options); | |
1495 | ||
ce572085 HG |
1496 | ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); |
1497 | if (ret) | |
1498 | return ret; | |
045e3678 YK |
1499 | |
1500 | #ifdef DEBUG | |
514df281 | 1501 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", |
045e3678 YK |
1502 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
1503 | desc_bytes(desc), 1); | |
1504 | #endif | |
1505 | ||
1506 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, | |
1507 | req); | |
1508 | if (!ret) { | |
1509 | ret = -EINPROGRESS; | |
1510 | state->update = ahash_update_ctx; | |
1511 | state->finup = ahash_finup_ctx; | |
1512 | state->final = ahash_final_ctx; | |
1513 | } else { | |
1514 | ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, | |
1515 | DMA_TO_DEVICE); | |
1516 | kfree(edesc); | |
1517 | } | |
1518 | } else if (*next_buflen) { | |
1519 | state->update = ahash_update_no_ctx; | |
1520 | state->finup = ahash_finup_no_ctx; | |
1521 | state->final = ahash_final_no_ctx; | |
307fd543 CS |
1522 | scatterwalk_map_and_copy(next_buf, req->src, 0, |
1523 | req->nbytes, 0); | |
045e3678 YK |
1524 | } |
1525 | #ifdef DEBUG | |
514df281 | 1526 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", |
045e3678 YK |
1527 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
1528 | *next_buflen, 1); | |
1529 | #endif | |
1530 | ||
1531 | return ret; | |
1532 | } | |
1533 | ||
1534 | static int ahash_finup_first(struct ahash_request *req) | |
1535 | { | |
1536 | return ahash_digest(req); | |
1537 | } | |
1538 | ||
1539 | static int ahash_init(struct ahash_request *req) | |
1540 | { | |
1541 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1542 | ||
1543 | state->update = ahash_update_first; | |
1544 | state->finup = ahash_finup_first; | |
1545 | state->final = ahash_final_no_ctx; | |
1546 | ||
1547 | state->current_buf = 0; | |
de0e35ec | 1548 | state->buf_dma = 0; |
6fd4b156 SC |
1549 | state->buflen_0 = 0; |
1550 | state->buflen_1 = 0; | |
045e3678 YK |
1551 | |
1552 | return 0; | |
1553 | } | |
1554 | ||
1555 | static int ahash_update(struct ahash_request *req) | |
1556 | { | |
1557 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1558 | ||
1559 | return state->update(req); | |
1560 | } | |
1561 | ||
1562 | static int ahash_finup(struct ahash_request *req) | |
1563 | { | |
1564 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1565 | ||
1566 | return state->finup(req); | |
1567 | } | |
1568 | ||
1569 | static int ahash_final(struct ahash_request *req) | |
1570 | { | |
1571 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1572 | ||
1573 | return state->final(req); | |
1574 | } | |
1575 | ||
1576 | static int ahash_export(struct ahash_request *req, void *out) | |
1577 | { | |
1578 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
1579 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
1580 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1581 | ||
1582 | memcpy(out, ctx, sizeof(struct caam_hash_ctx)); | |
1583 | memcpy(out + sizeof(struct caam_hash_ctx), state, | |
1584 | sizeof(struct caam_hash_state)); | |
1585 | return 0; | |
1586 | } | |
1587 | ||
1588 | static int ahash_import(struct ahash_request *req, const void *in) | |
1589 | { | |
1590 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | |
1591 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
1592 | struct caam_hash_state *state = ahash_request_ctx(req); | |
1593 | ||
1594 | memcpy(ctx, in, sizeof(struct caam_hash_ctx)); | |
1595 | memcpy(state, in + sizeof(struct caam_hash_ctx), | |
1596 | sizeof(struct caam_hash_state)); | |
1597 | return 0; | |
1598 | } | |
1599 | ||
1600 | struct caam_hash_template { | |
1601 | char name[CRYPTO_MAX_ALG_NAME]; | |
1602 | char driver_name[CRYPTO_MAX_ALG_NAME]; | |
b0e09bae YK |
1603 | char hmac_name[CRYPTO_MAX_ALG_NAME]; |
1604 | char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; | |
045e3678 YK |
1605 | unsigned int blocksize; |
1606 | struct ahash_alg template_ahash; | |
1607 | u32 alg_type; | |
1608 | u32 alg_op; | |
1609 | }; | |
1610 | ||
1611 | /* ahash descriptors */ | |
1612 | static struct caam_hash_template driver_hash[] = { | |
1613 | { | |
b0e09bae YK |
1614 | .name = "sha1", |
1615 | .driver_name = "sha1-caam", | |
1616 | .hmac_name = "hmac(sha1)", | |
1617 | .hmac_driver_name = "hmac-sha1-caam", | |
045e3678 YK |
1618 | .blocksize = SHA1_BLOCK_SIZE, |
1619 | .template_ahash = { | |
1620 | .init = ahash_init, | |
1621 | .update = ahash_update, | |
1622 | .final = ahash_final, | |
1623 | .finup = ahash_finup, | |
1624 | .digest = ahash_digest, | |
1625 | .export = ahash_export, | |
1626 | .import = ahash_import, | |
1627 | .setkey = ahash_setkey, | |
1628 | .halg = { | |
1629 | .digestsize = SHA1_DIGEST_SIZE, | |
1630 | }, | |
1631 | }, | |
1632 | .alg_type = OP_ALG_ALGSEL_SHA1, | |
1633 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | |
1634 | }, { | |
b0e09bae YK |
1635 | .name = "sha224", |
1636 | .driver_name = "sha224-caam", | |
1637 | .hmac_name = "hmac(sha224)", | |
1638 | .hmac_driver_name = "hmac-sha224-caam", | |
045e3678 YK |
1639 | .blocksize = SHA224_BLOCK_SIZE, |
1640 | .template_ahash = { | |
1641 | .init = ahash_init, | |
1642 | .update = ahash_update, | |
1643 | .final = ahash_final, | |
1644 | .finup = ahash_finup, | |
1645 | .digest = ahash_digest, | |
1646 | .export = ahash_export, | |
1647 | .import = ahash_import, | |
1648 | .setkey = ahash_setkey, | |
1649 | .halg = { | |
1650 | .digestsize = SHA224_DIGEST_SIZE, | |
1651 | }, | |
1652 | }, | |
1653 | .alg_type = OP_ALG_ALGSEL_SHA224, | |
1654 | .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, | |
1655 | }, { | |
b0e09bae YK |
1656 | .name = "sha256", |
1657 | .driver_name = "sha256-caam", | |
1658 | .hmac_name = "hmac(sha256)", | |
1659 | .hmac_driver_name = "hmac-sha256-caam", | |
045e3678 YK |
1660 | .blocksize = SHA256_BLOCK_SIZE, |
1661 | .template_ahash = { | |
1662 | .init = ahash_init, | |
1663 | .update = ahash_update, | |
1664 | .final = ahash_final, | |
1665 | .finup = ahash_finup, | |
1666 | .digest = ahash_digest, | |
1667 | .export = ahash_export, | |
1668 | .import = ahash_import, | |
1669 | .setkey = ahash_setkey, | |
1670 | .halg = { | |
1671 | .digestsize = SHA256_DIGEST_SIZE, | |
1672 | }, | |
1673 | }, | |
1674 | .alg_type = OP_ALG_ALGSEL_SHA256, | |
1675 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | |
1676 | }, { | |
b0e09bae YK |
1677 | .name = "sha384", |
1678 | .driver_name = "sha384-caam", | |
1679 | .hmac_name = "hmac(sha384)", | |
1680 | .hmac_driver_name = "hmac-sha384-caam", | |
045e3678 YK |
1681 | .blocksize = SHA384_BLOCK_SIZE, |
1682 | .template_ahash = { | |
1683 | .init = ahash_init, | |
1684 | .update = ahash_update, | |
1685 | .final = ahash_final, | |
1686 | .finup = ahash_finup, | |
1687 | .digest = ahash_digest, | |
1688 | .export = ahash_export, | |
1689 | .import = ahash_import, | |
1690 | .setkey = ahash_setkey, | |
1691 | .halg = { | |
1692 | .digestsize = SHA384_DIGEST_SIZE, | |
1693 | }, | |
1694 | }, | |
1695 | .alg_type = OP_ALG_ALGSEL_SHA384, | |
1696 | .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, | |
1697 | }, { | |
b0e09bae YK |
1698 | .name = "sha512", |
1699 | .driver_name = "sha512-caam", | |
1700 | .hmac_name = "hmac(sha512)", | |
1701 | .hmac_driver_name = "hmac-sha512-caam", | |
045e3678 YK |
1702 | .blocksize = SHA512_BLOCK_SIZE, |
1703 | .template_ahash = { | |
1704 | .init = ahash_init, | |
1705 | .update = ahash_update, | |
1706 | .final = ahash_final, | |
1707 | .finup = ahash_finup, | |
1708 | .digest = ahash_digest, | |
1709 | .export = ahash_export, | |
1710 | .import = ahash_import, | |
1711 | .setkey = ahash_setkey, | |
1712 | .halg = { | |
1713 | .digestsize = SHA512_DIGEST_SIZE, | |
1714 | }, | |
1715 | }, | |
1716 | .alg_type = OP_ALG_ALGSEL_SHA512, | |
1717 | .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, | |
1718 | }, { | |
b0e09bae YK |
1719 | .name = "md5", |
1720 | .driver_name = "md5-caam", | |
1721 | .hmac_name = "hmac(md5)", | |
1722 | .hmac_driver_name = "hmac-md5-caam", | |
045e3678 YK |
1723 | .blocksize = MD5_BLOCK_WORDS * 4, |
1724 | .template_ahash = { | |
1725 | .init = ahash_init, | |
1726 | .update = ahash_update, | |
1727 | .final = ahash_final, | |
1728 | .finup = ahash_finup, | |
1729 | .digest = ahash_digest, | |
1730 | .export = ahash_export, | |
1731 | .import = ahash_import, | |
1732 | .setkey = ahash_setkey, | |
1733 | .halg = { | |
1734 | .digestsize = MD5_DIGEST_SIZE, | |
1735 | }, | |
1736 | }, | |
1737 | .alg_type = OP_ALG_ALGSEL_MD5, | |
1738 | .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, | |
1739 | }, | |
1740 | }; | |
1741 | ||
1742 | struct caam_hash_alg { | |
1743 | struct list_head entry; | |
045e3678 YK |
1744 | int alg_type; |
1745 | int alg_op; | |
1746 | struct ahash_alg ahash_alg; | |
1747 | }; | |
1748 | ||
1749 | static int caam_hash_cra_init(struct crypto_tfm *tfm) | |
1750 | { | |
1751 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
1752 | struct crypto_alg *base = tfm->__crt_alg; | |
1753 | struct hash_alg_common *halg = | |
1754 | container_of(base, struct hash_alg_common, base); | |
1755 | struct ahash_alg *alg = | |
1756 | container_of(halg, struct ahash_alg, halg); | |
1757 | struct caam_hash_alg *caam_hash = | |
1758 | container_of(alg, struct caam_hash_alg, ahash_alg); | |
1759 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
045e3678 YK |
1760 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ |
1761 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, | |
1762 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, | |
1763 | HASH_MSG_LEN + 32, | |
1764 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, | |
1765 | HASH_MSG_LEN + 64, | |
1766 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | |
045e3678 YK |
1767 | int ret = 0; |
1768 | ||
1769 | /* | |
cfc6f11b | 1770 | * Get a Job ring from Job Ring driver to ensure in-order |
045e3678 YK |
1771 | * crypto request processing per tfm |
1772 | */ | |
cfc6f11b RG |
1773 | ctx->jrdev = caam_jr_alloc(); |
1774 | if (IS_ERR(ctx->jrdev)) { | |
1775 | pr_err("Job Ring Device allocation for transform failed\n"); | |
1776 | return PTR_ERR(ctx->jrdev); | |
1777 | } | |
045e3678 YK |
1778 | /* copy descriptor header template value */ |
1779 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; | |
1780 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; | |
1781 | ||
1782 | ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | |
1783 | OP_ALG_ALGSEL_SHIFT]; | |
1784 | ||
1785 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
1786 | sizeof(struct caam_hash_state)); | |
1787 | ||
1788 | ret = ahash_set_sh_desc(ahash); | |
1789 | ||
1790 | return ret; | |
1791 | } | |
1792 | ||
1793 | static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |
1794 | { | |
1795 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
1796 | ||
1797 | if (ctx->sh_desc_update_dma && | |
1798 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) | |
1799 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, | |
1800 | desc_bytes(ctx->sh_desc_update), | |
1801 | DMA_TO_DEVICE); | |
1802 | if (ctx->sh_desc_update_first_dma && | |
1803 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) | |
1804 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, | |
1805 | desc_bytes(ctx->sh_desc_update_first), | |
1806 | DMA_TO_DEVICE); | |
1807 | if (ctx->sh_desc_fin_dma && | |
1808 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) | |
1809 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, | |
1810 | desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); | |
1811 | if (ctx->sh_desc_digest_dma && | |
1812 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) | |
1813 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, | |
1814 | desc_bytes(ctx->sh_desc_digest), | |
1815 | DMA_TO_DEVICE); | |
1816 | if (ctx->sh_desc_finup_dma && | |
1817 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) | |
1818 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, | |
1819 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); | |
cfc6f11b RG |
1820 | |
1821 | caam_jr_free(ctx->jrdev); | |
045e3678 YK |
1822 | } |
1823 | ||
1824 | static void __exit caam_algapi_hash_exit(void) | |
1825 | { | |
045e3678 YK |
1826 | struct caam_hash_alg *t_alg, *n; |
1827 | ||
cfc6f11b | 1828 | if (!hash_list.next) |
045e3678 YK |
1829 | return; |
1830 | ||
cfc6f11b | 1831 | list_for_each_entry_safe(t_alg, n, &hash_list, entry) { |
045e3678 YK |
1832 | crypto_unregister_ahash(&t_alg->ahash_alg); |
1833 | list_del(&t_alg->entry); | |
1834 | kfree(t_alg); | |
1835 | } | |
1836 | } | |
1837 | ||
1838 | static struct caam_hash_alg * | |
cfc6f11b | 1839 | caam_hash_alloc(struct caam_hash_template *template, |
b0e09bae | 1840 | bool keyed) |
045e3678 YK |
1841 | { |
1842 | struct caam_hash_alg *t_alg; | |
1843 | struct ahash_alg *halg; | |
1844 | struct crypto_alg *alg; | |
1845 | ||
1846 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); | |
1847 | if (!t_alg) { | |
cfc6f11b | 1848 | pr_err("failed to allocate t_alg\n"); |
045e3678 YK |
1849 | return ERR_PTR(-ENOMEM); |
1850 | } | |
1851 | ||
1852 | t_alg->ahash_alg = template->template_ahash; | |
1853 | halg = &t_alg->ahash_alg; | |
1854 | alg = &halg->halg.base; | |
1855 | ||
b0e09bae YK |
1856 | if (keyed) { |
1857 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1858 | template->hmac_name); | |
1859 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1860 | template->hmac_driver_name); | |
1861 | } else { | |
1862 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1863 | template->name); | |
1864 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1865 | template->driver_name); | |
1866 | } | |
045e3678 YK |
1867 | alg->cra_module = THIS_MODULE; |
1868 | alg->cra_init = caam_hash_cra_init; | |
1869 | alg->cra_exit = caam_hash_cra_exit; | |
1870 | alg->cra_ctxsize = sizeof(struct caam_hash_ctx); | |
1871 | alg->cra_priority = CAAM_CRA_PRIORITY; | |
1872 | alg->cra_blocksize = template->blocksize; | |
1873 | alg->cra_alignmask = 0; | |
1874 | alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; | |
1875 | alg->cra_type = &crypto_ahash_type; | |
1876 | ||
1877 | t_alg->alg_type = template->alg_type; | |
1878 | t_alg->alg_op = template->alg_op; | |
045e3678 YK |
1879 | |
1880 | return t_alg; | |
1881 | } | |
1882 | ||
1883 | static int __init caam_algapi_hash_init(void) | |
1884 | { | |
35af6403 RG |
1885 | struct device_node *dev_node; |
1886 | struct platform_device *pdev; | |
1887 | struct device *ctrldev; | |
1888 | void *priv; | |
045e3678 YK |
1889 | int i = 0, err = 0; |
1890 | ||
35af6403 RG |
1891 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
1892 | if (!dev_node) { | |
1893 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | |
1894 | if (!dev_node) | |
1895 | return -ENODEV; | |
1896 | } | |
1897 | ||
1898 | pdev = of_find_device_by_node(dev_node); | |
1899 | if (!pdev) { | |
1900 | of_node_put(dev_node); | |
1901 | return -ENODEV; | |
1902 | } | |
1903 | ||
1904 | ctrldev = &pdev->dev; | |
1905 | priv = dev_get_drvdata(ctrldev); | |
1906 | of_node_put(dev_node); | |
1907 | ||
1908 | /* | |
1909 | * If priv is NULL, it's probably because the caam driver wasn't | |
1910 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | |
1911 | */ | |
1912 | if (!priv) | |
1913 | return -ENODEV; | |
1914 | ||
cfc6f11b | 1915 | INIT_LIST_HEAD(&hash_list); |
045e3678 YK |
1916 | |
1917 | /* register crypto algorithms the device supports */ | |
1918 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { | |
1919 | /* TODO: check if h/w supports alg */ | |
1920 | struct caam_hash_alg *t_alg; | |
1921 | ||
b0e09bae | 1922 | /* register hmac version */ |
cfc6f11b | 1923 | t_alg = caam_hash_alloc(&driver_hash[i], true); |
b0e09bae YK |
1924 | if (IS_ERR(t_alg)) { |
1925 | err = PTR_ERR(t_alg); | |
cfc6f11b RG |
1926 | pr_warn("%s alg allocation failed\n", |
1927 | driver_hash[i].driver_name); | |
b0e09bae YK |
1928 | continue; |
1929 | } | |
1930 | ||
1931 | err = crypto_register_ahash(&t_alg->ahash_alg); | |
1932 | if (err) { | |
cfc6f11b | 1933 | pr_warn("%s alg registration failed\n", |
b0e09bae YK |
1934 | t_alg->ahash_alg.halg.base.cra_driver_name); |
1935 | kfree(t_alg); | |
1936 | } else | |
cfc6f11b | 1937 | list_add_tail(&t_alg->entry, &hash_list); |
b0e09bae YK |
1938 | |
1939 | /* register unkeyed version */ | |
cfc6f11b | 1940 | t_alg = caam_hash_alloc(&driver_hash[i], false); |
045e3678 YK |
1941 | if (IS_ERR(t_alg)) { |
1942 | err = PTR_ERR(t_alg); | |
cfc6f11b RG |
1943 | pr_warn("%s alg allocation failed\n", |
1944 | driver_hash[i].driver_name); | |
045e3678 YK |
1945 | continue; |
1946 | } | |
1947 | ||
1948 | err = crypto_register_ahash(&t_alg->ahash_alg); | |
1949 | if (err) { | |
cfc6f11b | 1950 | pr_warn("%s alg registration failed\n", |
045e3678 YK |
1951 | t_alg->ahash_alg.halg.base.cra_driver_name); |
1952 | kfree(t_alg); | |
1953 | } else | |
cfc6f11b | 1954 | list_add_tail(&t_alg->entry, &hash_list); |
045e3678 YK |
1955 | } |
1956 | ||
1957 | return err; | |
1958 | } | |
1959 | ||
1960 | module_init(caam_algapi_hash_init); | |
1961 | module_exit(caam_algapi_hash_exit); | |
1962 | ||
1963 | MODULE_LICENSE("GPL"); | |
1964 | MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); | |
1965 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); |