]>
Commit | Line | Data |
---|---|---|
8e8ec596 KP |
1 | /* |
2 | * caam - Freescale FSL CAAM support for crypto API | |
3 | * | |
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | |
5 | * | |
6 | * Based on talitos crypto API driver. | |
7 | * | |
8 | * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): | |
9 | * | |
10 | * --------------- --------------- | |
11 | * | JobDesc #1 |-------------------->| ShareDesc | | |
12 | * | *(packet 1) | | (PDB) | | |
13 | * --------------- |------------->| (hashKey) | | |
14 | * . | | (cipherKey) | | |
15 | * . | |-------->| (operation) | | |
16 | * --------------- | | --------------- | |
17 | * | JobDesc #2 |------| | | |
18 | * | *(packet 2) | | | |
19 | * --------------- | | |
20 | * . | | |
21 | * . | | |
22 | * --------------- | | |
23 | * | JobDesc #3 |------------ | |
24 | * | *(packet 3) | | |
25 | * --------------- | |
26 | * | |
27 | * The SharedDesc never changes for a connection unless rekeyed, but | |
28 | * each packet will likely be in a different place. So all we need | |
29 | * to know to process the packet is where the input is, where the | |
30 | * output goes, and what context we want to process with. Context is | |
31 | * in the SharedDesc, packet references in the JobDesc. | |
32 | * | |
33 | * So, a job desc looks like: | |
34 | * | |
35 | * --------------------- | |
36 | * | Header | | |
37 | * | ShareDesc Pointer | | |
38 | * | SEQ_OUT_PTR | | |
39 | * | (output buffer) | | |
40 | * | SEQ_IN_PTR | | |
41 | * | (input buffer) | | |
42 | * | LOAD (to DECO) | | |
43 | * --------------------- | |
44 | */ | |
45 | ||
46 | #include "compat.h" | |
47 | ||
48 | #include "regs.h" | |
49 | #include "intern.h" | |
50 | #include "desc_constr.h" | |
51 | #include "jr.h" | |
52 | #include "error.h" | |
53 | ||
54 | /* | |
55 | * crypto alg | |
56 | */ | |
57 | #define CAAM_CRA_PRIORITY 3000 | |
58 | /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ | |
59 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ | |
60 | SHA512_DIGEST_SIZE * 2) | |
61 | /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | |
62 | #define CAAM_MAX_IV_LENGTH 16 | |
63 | ||
64 | #ifdef DEBUG | |
65 | /* for print_hex_dumps with line references */ | |
66 | #define xstr(s) str(s) | |
67 | #define str(s) #s | |
68 | #define debug(format, arg...) printk(format, arg) | |
69 | #else | |
70 | #define debug(format, arg...) | |
71 | #endif | |
72 | ||
73 | /* | |
74 | * per-session context | |
75 | */ | |
76 | struct caam_ctx { | |
77 | struct device *jrdev; | |
78 | u32 *sh_desc; | |
79 | dma_addr_t shared_desc_phys; | |
80 | u32 class1_alg_type; | |
81 | u32 class2_alg_type; | |
82 | u32 alg_op; | |
83 | u8 *key; | |
84 | dma_addr_t key_phys; | |
8e8ec596 KP |
85 | unsigned int enckeylen; |
86 | unsigned int authkeylen; | |
87 | unsigned int split_key_len; | |
88 | unsigned int split_key_pad_len; | |
89 | unsigned int authsize; | |
90 | }; | |
91 | ||
92 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | |
93 | unsigned int authsize) | |
94 | { | |
95 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | |
96 | ||
97 | ctx->authsize = authsize; | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
102 | struct split_key_result { | |
103 | struct completion completion; | |
104 | int err; | |
105 | }; | |
106 | ||
107 | static void split_key_done(struct device *dev, u32 *desc, u32 err, | |
108 | void *context) | |
109 | { | |
110 | struct split_key_result *res = context; | |
111 | ||
112 | #ifdef DEBUG | |
113 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
114 | #endif | |
115 | if (err) { | |
de2954d6 | 116 | char tmp[CAAM_ERROR_STR_MAX]; |
8e8ec596 KP |
117 | |
118 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | |
119 | } | |
120 | ||
121 | res->err = err; | |
122 | ||
123 | complete(&res->completion); | |
124 | } | |
125 | ||
126 | /* | |
127 | get a split ipad/opad key | |
128 | ||
129 | Split key generation----------------------------------------------- | |
130 | ||
131 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | |
132 | [01] 0x04000014 key: class2->keyreg len=20 | |
133 | @0xffe01000 | |
134 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | |
135 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | |
136 | [05] 0xa4000001 jump: class2 local all ->1 [06] | |
137 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | |
138 | @0xffe04000 | |
139 | */ | |
140 | static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | |
141 | { | |
142 | struct device *jrdev = ctx->jrdev; | |
143 | u32 *desc; | |
144 | struct split_key_result result; | |
145 | dma_addr_t dma_addr_in, dma_addr_out; | |
146 | int ret = 0; | |
147 | ||
148 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | |
149 | ||
150 | init_job_desc(desc, 0); | |
151 | ||
152 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, | |
153 | DMA_TO_DEVICE); | |
154 | if (dma_mapping_error(jrdev, dma_addr_in)) { | |
155 | dev_err(jrdev, "unable to map key input memory\n"); | |
156 | kfree(desc); | |
157 | return -ENOMEM; | |
158 | } | |
159 | append_key(desc, dma_addr_in, authkeylen, CLASS_2 | | |
160 | KEY_DEST_CLASS_REG); | |
161 | ||
162 | /* Sets MDHA up into an HMAC-INIT */ | |
163 | append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | | |
164 | OP_ALG_AS_INIT); | |
165 | ||
166 | /* | |
167 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | |
168 | into both pads inside MDHA | |
169 | */ | |
170 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | |
171 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | |
172 | ||
173 | /* | |
174 | * FIFO_STORE with the explicit split-key content store | |
175 | * (0x26 output type) | |
176 | */ | |
177 | dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | |
178 | DMA_FROM_DEVICE); | |
179 | if (dma_mapping_error(jrdev, dma_addr_out)) { | |
180 | dev_err(jrdev, "unable to map key output memory\n"); | |
181 | kfree(desc); | |
182 | return -ENOMEM; | |
183 | } | |
184 | append_fifo_store(desc, dma_addr_out, ctx->split_key_len, | |
185 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | |
186 | ||
187 | #ifdef DEBUG | |
188 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | |
189 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); | |
190 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | |
191 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | |
192 | #endif | |
193 | ||
194 | result.err = 0; | |
195 | init_completion(&result.completion); | |
196 | ||
197 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | |
198 | if (!ret) { | |
199 | /* in progress */ | |
200 | wait_for_completion_interruptible(&result.completion); | |
201 | ret = result.err; | |
202 | #ifdef DEBUG | |
203 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | |
204 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | |
205 | ctx->split_key_pad_len, 1); | |
206 | #endif | |
207 | } | |
208 | ||
209 | dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, | |
210 | DMA_FROM_DEVICE); | |
211 | dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); | |
212 | ||
213 | kfree(desc); | |
214 | ||
215 | return ret; | |
216 | } | |
217 | ||
218 | static int build_sh_desc_ipsec(struct caam_ctx *ctx) | |
219 | { | |
220 | struct device *jrdev = ctx->jrdev; | |
221 | u32 *sh_desc; | |
222 | u32 *jump_cmd; | |
223 | ||
224 | /* build shared descriptor for this session */ | |
225 | sh_desc = kmalloc(CAAM_CMD_SZ * 4 + ctx->split_key_pad_len + | |
226 | ctx->enckeylen, GFP_DMA | GFP_KERNEL); | |
227 | if (!sh_desc) { | |
228 | dev_err(jrdev, "could not allocate shared descriptor\n"); | |
229 | return -ENOMEM; | |
230 | } | |
231 | ||
232 | init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); | |
233 | ||
234 | jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | | |
235 | JUMP_COND_SHRD | JUMP_COND_SELF); | |
236 | ||
237 | /* process keys, starting with class 2/authentication */ | |
238 | append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, | |
239 | ctx->split_key_len, | |
240 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | |
241 | ||
242 | append_key_as_imm(sh_desc, (void *)ctx->key + ctx->split_key_pad_len, | |
243 | ctx->enckeylen, ctx->enckeylen, | |
244 | CLASS_1 | KEY_DEST_CLASS_REG); | |
245 | ||
246 | /* update jump cmd now that we are at the jump target */ | |
247 | set_jump_tgt_here(sh_desc, jump_cmd); | |
248 | ||
249 | ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, | |
250 | desc_bytes(sh_desc), | |
251 | DMA_TO_DEVICE); | |
252 | if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { | |
253 | dev_err(jrdev, "unable to map shared descriptor\n"); | |
254 | kfree(sh_desc); | |
255 | return -ENOMEM; | |
256 | } | |
257 | ||
258 | ctx->sh_desc = sh_desc; | |
259 | ||
260 | return 0; | |
261 | } | |
262 | ||
263 | static int aead_authenc_setkey(struct crypto_aead *aead, | |
264 | const u8 *key, unsigned int keylen) | |
265 | { | |
266 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | |
267 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | |
268 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
269 | struct device *jrdev = ctx->jrdev; | |
270 | struct rtattr *rta = (void *)key; | |
271 | struct crypto_authenc_key_param *param; | |
272 | unsigned int authkeylen; | |
273 | unsigned int enckeylen; | |
274 | int ret = 0; | |
275 | ||
276 | param = RTA_DATA(rta); | |
277 | enckeylen = be32_to_cpu(param->enckeylen); | |
278 | ||
279 | key += RTA_ALIGN(rta->rta_len); | |
280 | keylen -= RTA_ALIGN(rta->rta_len); | |
281 | ||
282 | if (keylen < enckeylen) | |
283 | goto badkey; | |
284 | ||
285 | authkeylen = keylen - enckeylen; | |
286 | ||
287 | if (keylen > CAAM_MAX_KEY_SIZE) | |
288 | goto badkey; | |
289 | ||
290 | /* Pick class 2 key length from algorithm submask */ | |
291 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | |
292 | OP_ALG_ALGSEL_SHIFT] * 2; | |
293 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | |
294 | ||
295 | #ifdef DEBUG | |
296 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", | |
297 | keylen, enckeylen, authkeylen); | |
298 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | |
299 | ctx->split_key_len, ctx->split_key_pad_len); | |
300 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | |
301 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | |
302 | #endif | |
303 | ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, | |
304 | GFP_KERNEL | GFP_DMA); | |
305 | if (!ctx->key) { | |
306 | dev_err(jrdev, "could not allocate key output memory\n"); | |
307 | return -ENOMEM; | |
308 | } | |
309 | ||
310 | ret = gen_split_key(ctx, key, authkeylen); | |
311 | if (ret) { | |
312 | kfree(ctx->key); | |
313 | goto badkey; | |
314 | } | |
315 | ||
316 | /* postpend encryption key to auth split key */ | |
317 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); | |
318 | ||
319 | ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + | |
320 | enckeylen, DMA_TO_DEVICE); | |
321 | if (dma_mapping_error(jrdev, ctx->key_phys)) { | |
322 | dev_err(jrdev, "unable to map key i/o memory\n"); | |
323 | kfree(ctx->key); | |
324 | return -ENOMEM; | |
325 | } | |
326 | #ifdef DEBUG | |
327 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | |
328 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | |
329 | ctx->split_key_pad_len + enckeylen, 1); | |
330 | #endif | |
331 | ||
8e8ec596 KP |
332 | ctx->enckeylen = enckeylen; |
333 | ctx->authkeylen = authkeylen; | |
334 | ||
335 | ret = build_sh_desc_ipsec(ctx); | |
336 | if (ret) { | |
337 | dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len + | |
338 | enckeylen, DMA_TO_DEVICE); | |
339 | kfree(ctx->key); | |
340 | } | |
341 | ||
342 | return ret; | |
343 | badkey: | |
344 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
345 | return -EINVAL; | |
346 | } | |
347 | ||
348 | struct link_tbl_entry { | |
349 | u64 ptr; | |
350 | u32 len; | |
351 | u8 reserved; | |
352 | u8 buf_pool_id; | |
353 | u16 offset; | |
354 | }; | |
355 | ||
356 | /* | |
357 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | |
358 | * @src_nents: number of segments in input scatterlist | |
359 | * @dst_nents: number of segments in output scatterlist | |
360 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | |
361 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | |
362 | * @link_tbl_bytes: length of dma mapped link_tbl space | |
363 | * @link_tbl_dma: bus physical mapped address of h/w link table | |
364 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | |
365 | */ | |
366 | struct ipsec_esp_edesc { | |
367 | int assoc_nents; | |
368 | int src_nents; | |
369 | int dst_nents; | |
370 | int link_tbl_bytes; | |
371 | dma_addr_t link_tbl_dma; | |
372 | struct link_tbl_entry *link_tbl; | |
373 | u32 hw_desc[0]; | |
374 | }; | |
375 | ||
376 | static void ipsec_esp_unmap(struct device *dev, | |
377 | struct ipsec_esp_edesc *edesc, | |
378 | struct aead_request *areq) | |
379 | { | |
380 | dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | |
381 | ||
382 | if (unlikely(areq->dst != areq->src)) { | |
383 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | |
384 | DMA_TO_DEVICE); | |
385 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents, | |
386 | DMA_FROM_DEVICE); | |
387 | } else { | |
388 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | |
389 | DMA_BIDIRECTIONAL); | |
390 | } | |
391 | ||
392 | if (edesc->link_tbl_bytes) | |
393 | dma_unmap_single(dev, edesc->link_tbl_dma, | |
394 | edesc->link_tbl_bytes, | |
395 | DMA_TO_DEVICE); | |
396 | } | |
397 | ||
398 | /* | |
399 | * ipsec_esp descriptor callbacks | |
400 | */ | |
401 | static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |
402 | void *context) | |
403 | { | |
404 | struct aead_request *areq = context; | |
405 | struct ipsec_esp_edesc *edesc; | |
406 | #ifdef DEBUG | |
407 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
408 | int ivsize = crypto_aead_ivsize(aead); | |
409 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
410 | ||
411 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
412 | #endif | |
413 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | |
414 | offsetof(struct ipsec_esp_edesc, hw_desc)); | |
415 | ||
416 | if (err) { | |
de2954d6 | 417 | char tmp[CAAM_ERROR_STR_MAX]; |
8e8ec596 | 418 | |
8e8ec596 KP |
419 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); |
420 | } | |
421 | ||
422 | ipsec_esp_unmap(jrdev, edesc, areq); | |
423 | ||
424 | #ifdef DEBUG | |
425 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | |
426 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | |
427 | areq->assoclen , 1); | |
428 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | |
429 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | |
430 | edesc->src_nents ? 100 : ivsize, 1); | |
431 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | |
432 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | |
433 | edesc->src_nents ? 100 : areq->cryptlen + | |
434 | ctx->authsize + 4, 1); | |
435 | #endif | |
436 | ||
437 | kfree(edesc); | |
438 | ||
439 | aead_request_complete(areq, err); | |
440 | } | |
441 | ||
442 | static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |
443 | void *context) | |
444 | { | |
445 | struct aead_request *areq = context; | |
446 | struct ipsec_esp_edesc *edesc; | |
447 | #ifdef DEBUG | |
448 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
449 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
450 | ||
451 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
452 | #endif | |
453 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | |
454 | offsetof(struct ipsec_esp_edesc, hw_desc)); | |
455 | ||
456 | if (err) { | |
de2954d6 | 457 | char tmp[CAAM_ERROR_STR_MAX]; |
8e8ec596 KP |
458 | |
459 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | |
460 | } | |
461 | ||
462 | ipsec_esp_unmap(jrdev, edesc, areq); | |
463 | ||
464 | /* | |
465 | * verify hw auth check passed else return -EBADMSG | |
466 | */ | |
467 | if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) | |
468 | err = -EBADMSG; | |
469 | ||
470 | #ifdef DEBUG | |
471 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", | |
472 | DUMP_PREFIX_ADDRESS, 16, 4, | |
473 | ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), | |
474 | sizeof(struct iphdr) + areq->assoclen + | |
475 | ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + | |
476 | ctx->authsize + 36, 1); | |
477 | if (!err && edesc->link_tbl_bytes) { | |
478 | struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); | |
479 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | |
480 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | |
481 | sg->length + ctx->authsize + 16, 1); | |
482 | } | |
483 | #endif | |
484 | kfree(edesc); | |
485 | ||
486 | aead_request_complete(areq, err); | |
487 | } | |
488 | ||
489 | /* | |
490 | * convert scatterlist to h/w link table format | |
491 | * scatterlist must have been previously dma mapped | |
492 | */ | |
493 | static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |
494 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | |
495 | { | |
496 | while (sg_count) { | |
497 | link_tbl_ptr->ptr = sg_dma_address(sg); | |
498 | link_tbl_ptr->len = sg_dma_len(sg); | |
499 | link_tbl_ptr->reserved = 0; | |
500 | link_tbl_ptr->buf_pool_id = 0; | |
501 | link_tbl_ptr->offset = offset; | |
502 | link_tbl_ptr++; | |
503 | sg = sg_next(sg); | |
504 | sg_count--; | |
505 | } | |
506 | ||
507 | /* set Final bit (marks end of link table) */ | |
508 | link_tbl_ptr--; | |
509 | link_tbl_ptr->len |= 0x40000000; | |
510 | } | |
511 | ||
512 | /* | |
513 | * fill in and submit ipsec_esp job descriptor | |
514 | */ | |
515 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |
516 | u32 encrypt, | |
517 | void (*callback) (struct device *dev, u32 *desc, | |
518 | u32 err, void *context)) | |
519 | { | |
520 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
521 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
522 | struct device *jrdev = ctx->jrdev; | |
523 | u32 *desc = edesc->hw_desc, options; | |
524 | int ret, sg_count, assoc_sg_count; | |
525 | int ivsize = crypto_aead_ivsize(aead); | |
526 | int authsize = ctx->authsize; | |
527 | dma_addr_t ptr, dst_dma, src_dma; | |
528 | #ifdef DEBUG | |
529 | u32 *sh_desc = ctx->sh_desc; | |
530 | ||
531 | debug("assoclen %d cryptlen %d authsize %d\n", | |
532 | areq->assoclen, areq->cryptlen, authsize); | |
533 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | |
534 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | |
535 | areq->assoclen , 1); | |
536 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | |
537 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | |
538 | edesc->src_nents ? 100 : ivsize, 1); | |
539 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | |
540 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | |
541 | edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); | |
542 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | |
543 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | |
544 | desc_bytes(sh_desc), 1); | |
545 | #endif | |
546 | assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, | |
547 | DMA_TO_DEVICE); | |
548 | if (areq->src == areq->dst) | |
549 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | |
550 | DMA_BIDIRECTIONAL); | |
551 | else | |
552 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | |
553 | DMA_TO_DEVICE); | |
554 | ||
555 | /* start auth operation */ | |
556 | append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | | |
557 | (encrypt ? : OP_ALG_ICV_ON)); | |
558 | ||
559 | /* Load FIFO with data for Class 2 CHA */ | |
560 | options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; | |
561 | if (!edesc->assoc_nents) { | |
562 | ptr = sg_dma_address(areq->assoc); | |
563 | } else { | |
564 | sg_to_link_tbl(areq->assoc, edesc->assoc_nents, | |
565 | edesc->link_tbl, 0); | |
566 | ptr = edesc->link_tbl_dma; | |
567 | options |= LDST_SGF; | |
568 | } | |
569 | append_fifo_load(desc, ptr, areq->assoclen, options); | |
570 | ||
571 | /* copy iv from cipher/class1 input context to class2 infifo */ | |
572 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | |
573 | ||
ddbb8088 KP |
574 | if (!encrypt) { |
575 | u32 *jump_cmd, *uncond_jump_cmd; | |
576 | ||
577 | /* JUMP if shared */ | |
578 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | |
579 | ||
580 | /* start class 1 (cipher) operation, non-shared version */ | |
581 | append_operation(desc, ctx->class1_alg_type | | |
582 | OP_ALG_AS_INITFINAL); | |
583 | ||
584 | uncond_jump_cmd = append_jump(desc, 0); | |
585 | ||
586 | set_jump_tgt_here(desc, jump_cmd); | |
587 | ||
588 | /* start class 1 (cipher) operation, shared version */ | |
589 | append_operation(desc, ctx->class1_alg_type | | |
590 | OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK); | |
591 | set_jump_tgt_here(desc, uncond_jump_cmd); | |
592 | } else | |
593 | append_operation(desc, ctx->class1_alg_type | | |
594 | OP_ALG_AS_INITFINAL | encrypt); | |
8e8ec596 KP |
595 | |
596 | /* load payload & instruct to class2 to snoop class 1 if encrypting */ | |
597 | options = 0; | |
598 | if (!edesc->src_nents) { | |
599 | src_dma = sg_dma_address(areq->src); | |
600 | } else { | |
601 | sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + | |
602 | edesc->assoc_nents, 0); | |
603 | src_dma = edesc->link_tbl_dma + edesc->assoc_nents * | |
604 | sizeof(struct link_tbl_entry); | |
605 | options |= LDST_SGF; | |
606 | } | |
607 | append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); | |
608 | append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | | |
609 | FIFOLD_TYPE_LASTBOTH | | |
610 | (encrypt ? FIFOLD_TYPE_MSG1OUT2 | |
611 | : FIFOLD_TYPE_MSG)); | |
612 | ||
613 | /* specify destination */ | |
614 | if (areq->src == areq->dst) { | |
615 | dst_dma = src_dma; | |
616 | } else { | |
617 | sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, | |
618 | DMA_FROM_DEVICE); | |
619 | if (!edesc->dst_nents) { | |
620 | dst_dma = sg_dma_address(areq->dst); | |
621 | options = 0; | |
622 | } else { | |
623 | sg_to_link_tbl(areq->dst, edesc->dst_nents, | |
624 | edesc->link_tbl + edesc->assoc_nents + | |
625 | edesc->src_nents, 0); | |
626 | dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + | |
627 | edesc->src_nents) * | |
628 | sizeof(struct link_tbl_entry); | |
629 | options = LDST_SGF; | |
630 | } | |
631 | } | |
632 | append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); | |
633 | append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); | |
634 | ||
635 | /* ICV */ | |
636 | if (encrypt) | |
637 | append_seq_store(desc, authsize, LDST_CLASS_2_CCB | | |
638 | LDST_SRCDST_BYTE_CONTEXT); | |
639 | else | |
640 | append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | | |
641 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | |
642 | ||
643 | #ifdef DEBUG | |
644 | debug("job_desc_len %d\n", desc_len(desc)); | |
645 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | |
646 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); | |
647 | print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", | |
648 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | |
649 | edesc->link_tbl_bytes, 1); | |
650 | #endif | |
651 | ||
652 | ret = caam_jr_enqueue(jrdev, desc, callback, areq); | |
653 | if (!ret) | |
654 | ret = -EINPROGRESS; | |
655 | else { | |
656 | ipsec_esp_unmap(jrdev, edesc, areq); | |
657 | kfree(edesc); | |
658 | } | |
659 | ||
660 | return ret; | |
661 | } | |
662 | ||
663 | /* | |
664 | * derive number of elements in scatterlist | |
665 | */ | |
666 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | |
667 | { | |
668 | struct scatterlist *sg = sg_list; | |
669 | int sg_nents = 0; | |
670 | ||
671 | *chained = 0; | |
672 | while (nbytes > 0) { | |
673 | sg_nents++; | |
674 | nbytes -= sg->length; | |
675 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | |
676 | *chained = 1; | |
677 | sg = scatterwalk_sg_next(sg); | |
678 | } | |
679 | ||
680 | return sg_nents; | |
681 | } | |
682 | ||
683 | /* | |
684 | * allocate and map the ipsec_esp extended descriptor | |
685 | */ | |
686 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | |
687 | int desc_bytes) | |
688 | { | |
689 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
690 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
691 | struct device *jrdev = ctx->jrdev; | |
692 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
693 | GFP_ATOMIC; | |
694 | int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; | |
695 | struct ipsec_esp_edesc *edesc; | |
696 | ||
697 | assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); | |
698 | BUG_ON(chained); | |
699 | if (likely(assoc_nents == 1)) | |
700 | assoc_nents = 0; | |
701 | ||
702 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, | |
703 | &chained); | |
704 | BUG_ON(chained); | |
705 | if (src_nents == 1) | |
706 | src_nents = 0; | |
707 | ||
708 | if (unlikely(areq->dst != areq->src)) { | |
709 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, | |
710 | &chained); | |
711 | BUG_ON(chained); | |
712 | if (dst_nents == 1) | |
713 | dst_nents = 0; | |
714 | } | |
715 | ||
716 | link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * | |
717 | sizeof(struct link_tbl_entry); | |
718 | debug("link_tbl_bytes %d\n", link_tbl_bytes); | |
719 | ||
720 | /* allocate space for base edesc and hw desc commands, link tables */ | |
721 | edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + | |
722 | link_tbl_bytes, GFP_DMA | flags); | |
723 | if (!edesc) { | |
724 | dev_err(jrdev, "could not allocate extended descriptor\n"); | |
725 | return ERR_PTR(-ENOMEM); | |
726 | } | |
727 | ||
728 | edesc->assoc_nents = assoc_nents; | |
729 | edesc->src_nents = src_nents; | |
730 | edesc->dst_nents = dst_nents; | |
731 | edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + | |
732 | desc_bytes; | |
733 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | |
734 | link_tbl_bytes, DMA_TO_DEVICE); | |
735 | edesc->link_tbl_bytes = link_tbl_bytes; | |
736 | ||
737 | return edesc; | |
738 | } | |
739 | ||
740 | static int aead_authenc_encrypt(struct aead_request *areq) | |
741 | { | |
742 | struct ipsec_esp_edesc *edesc; | |
743 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
744 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
745 | struct device *jrdev = ctx->jrdev; | |
746 | int ivsize = crypto_aead_ivsize(aead); | |
747 | u32 *desc; | |
748 | dma_addr_t iv_dma; | |
749 | ||
750 | /* allocate extended descriptor */ | |
751 | edesc = ipsec_esp_edesc_alloc(areq, 21 * sizeof(u32)); | |
752 | if (IS_ERR(edesc)) | |
753 | return PTR_ERR(edesc); | |
754 | ||
755 | desc = edesc->hw_desc; | |
756 | ||
757 | /* insert shared descriptor pointer */ | |
758 | init_job_desc_shared(desc, ctx->shared_desc_phys, | |
759 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | |
760 | ||
761 | iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); | |
762 | /* check dma error */ | |
763 | ||
764 | append_load(desc, iv_dma, ivsize, | |
765 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | |
766 | ||
767 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | |
768 | } | |
769 | ||
770 | static int aead_authenc_decrypt(struct aead_request *req) | |
771 | { | |
772 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
773 | int ivsize = crypto_aead_ivsize(aead); | |
774 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
775 | struct device *jrdev = ctx->jrdev; | |
776 | struct ipsec_esp_edesc *edesc; | |
777 | u32 *desc; | |
778 | dma_addr_t iv_dma; | |
779 | ||
780 | req->cryptlen -= ctx->authsize; | |
781 | ||
782 | /* allocate extended descriptor */ | |
ddbb8088 | 783 | edesc = ipsec_esp_edesc_alloc(req, 24 * sizeof(u32)); |
8e8ec596 KP |
784 | if (IS_ERR(edesc)) |
785 | return PTR_ERR(edesc); | |
786 | ||
787 | desc = edesc->hw_desc; | |
788 | ||
789 | /* insert shared descriptor pointer */ | |
790 | init_job_desc_shared(desc, ctx->shared_desc_phys, | |
791 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | |
792 | ||
793 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | |
794 | /* check dma error */ | |
795 | ||
796 | append_load(desc, iv_dma, ivsize, | |
797 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | |
798 | ||
799 | return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); | |
800 | } | |
801 | ||
802 | static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) | |
803 | { | |
804 | struct aead_request *areq = &req->areq; | |
805 | struct ipsec_esp_edesc *edesc; | |
806 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
807 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
808 | struct device *jrdev = ctx->jrdev; | |
809 | int ivsize = crypto_aead_ivsize(aead); | |
810 | dma_addr_t iv_dma; | |
811 | u32 *desc; | |
812 | ||
813 | iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); | |
814 | ||
815 | debug("%s: giv %p\n", __func__, req->giv); | |
816 | ||
817 | /* allocate extended descriptor */ | |
818 | edesc = ipsec_esp_edesc_alloc(areq, 27 * sizeof(u32)); | |
819 | if (IS_ERR(edesc)) | |
820 | return PTR_ERR(edesc); | |
821 | ||
822 | desc = edesc->hw_desc; | |
823 | ||
824 | /* insert shared descriptor pointer */ | |
825 | init_job_desc_shared(desc, ctx->shared_desc_phys, | |
826 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | |
827 | ||
828 | /* | |
829 | * LOAD IMM Info FIFO | |
830 | * to DECO, Last, Padding, Random, Message, 16 bytes | |
831 | */ | |
832 | append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | | |
833 | NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | | |
834 | NFIFOENTRY_PTYPE_RND | ivsize, | |
835 | LDST_SRCDST_WORD_INFO_FIFO); | |
836 | ||
837 | /* | |
838 | * disable info fifo entries since the above serves as the entry | |
839 | * this way, the MOVE command won't generate an entry. | |
840 | * Note that this isn't required in more recent versions of | |
841 | * SEC as a MOVE that doesn't do info FIFO entries is available. | |
842 | */ | |
843 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | |
844 | ||
845 | /* MOVE DECO Alignment -> C1 Context 16 bytes */ | |
d37d36e3 | 846 | append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize); |
8e8ec596 KP |
847 | |
848 | /* re-enable info fifo entries */ | |
849 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | |
850 | ||
851 | /* MOVE C1 Context -> OFIFO 16 bytes */ | |
d37d36e3 | 852 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize); |
8e8ec596 KP |
853 | |
854 | append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); | |
855 | ||
856 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | |
857 | } | |
858 | ||
859 | struct caam_alg_template { | |
860 | char name[CRYPTO_MAX_ALG_NAME]; | |
861 | char driver_name[CRYPTO_MAX_ALG_NAME]; | |
862 | unsigned int blocksize; | |
863 | struct aead_alg aead; | |
864 | u32 class1_alg_type; | |
865 | u32 class2_alg_type; | |
866 | u32 alg_op; | |
867 | }; | |
868 | ||
869 | static struct caam_alg_template driver_algs[] = { | |
870 | /* single-pass ipsec_esp descriptor */ | |
871 | { | |
872 | .name = "authenc(hmac(sha1),cbc(aes))", | |
873 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", | |
874 | .blocksize = AES_BLOCK_SIZE, | |
875 | .aead = { | |
876 | .setkey = aead_authenc_setkey, | |
877 | .setauthsize = aead_authenc_setauthsize, | |
878 | .encrypt = aead_authenc_encrypt, | |
879 | .decrypt = aead_authenc_decrypt, | |
880 | .givencrypt = aead_authenc_givencrypt, | |
881 | .geniv = "<built-in>", | |
882 | .ivsize = AES_BLOCK_SIZE, | |
883 | .maxauthsize = SHA1_DIGEST_SIZE, | |
884 | }, | |
885 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | |
886 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | |
887 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | |
888 | }, | |
889 | { | |
890 | .name = "authenc(hmac(sha256),cbc(aes))", | |
891 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", | |
892 | .blocksize = AES_BLOCK_SIZE, | |
893 | .aead = { | |
894 | .setkey = aead_authenc_setkey, | |
895 | .setauthsize = aead_authenc_setauthsize, | |
896 | .encrypt = aead_authenc_encrypt, | |
897 | .decrypt = aead_authenc_decrypt, | |
898 | .givencrypt = aead_authenc_givencrypt, | |
899 | .geniv = "<built-in>", | |
900 | .ivsize = AES_BLOCK_SIZE, | |
901 | .maxauthsize = SHA256_DIGEST_SIZE, | |
902 | }, | |
903 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | |
904 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | |
905 | OP_ALG_AAI_HMAC_PRECOMP, | |
906 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | |
907 | }, | |
908 | { | |
909 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | |
910 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", | |
911 | .blocksize = DES3_EDE_BLOCK_SIZE, | |
912 | .aead = { | |
913 | .setkey = aead_authenc_setkey, | |
914 | .setauthsize = aead_authenc_setauthsize, | |
915 | .encrypt = aead_authenc_encrypt, | |
916 | .decrypt = aead_authenc_decrypt, | |
917 | .givencrypt = aead_authenc_givencrypt, | |
918 | .geniv = "<built-in>", | |
919 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
920 | .maxauthsize = SHA1_DIGEST_SIZE, | |
921 | }, | |
922 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | |
923 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | |
924 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | |
925 | }, | |
926 | { | |
927 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | |
928 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", | |
929 | .blocksize = DES3_EDE_BLOCK_SIZE, | |
930 | .aead = { | |
931 | .setkey = aead_authenc_setkey, | |
932 | .setauthsize = aead_authenc_setauthsize, | |
933 | .encrypt = aead_authenc_encrypt, | |
934 | .decrypt = aead_authenc_decrypt, | |
935 | .givencrypt = aead_authenc_givencrypt, | |
936 | .geniv = "<built-in>", | |
937 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
938 | .maxauthsize = SHA256_DIGEST_SIZE, | |
939 | }, | |
940 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | |
941 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | |
942 | OP_ALG_AAI_HMAC_PRECOMP, | |
943 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | |
944 | }, | |
945 | { | |
946 | .name = "authenc(hmac(sha1),cbc(des))", | |
947 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", | |
948 | .blocksize = DES_BLOCK_SIZE, | |
949 | .aead = { | |
950 | .setkey = aead_authenc_setkey, | |
951 | .setauthsize = aead_authenc_setauthsize, | |
952 | .encrypt = aead_authenc_encrypt, | |
953 | .decrypt = aead_authenc_decrypt, | |
954 | .givencrypt = aead_authenc_givencrypt, | |
955 | .geniv = "<built-in>", | |
956 | .ivsize = DES_BLOCK_SIZE, | |
957 | .maxauthsize = SHA1_DIGEST_SIZE, | |
958 | }, | |
959 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | |
960 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | |
961 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | |
962 | }, | |
963 | { | |
964 | .name = "authenc(hmac(sha256),cbc(des))", | |
965 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", | |
966 | .blocksize = DES_BLOCK_SIZE, | |
967 | .aead = { | |
968 | .setkey = aead_authenc_setkey, | |
969 | .setauthsize = aead_authenc_setauthsize, | |
970 | .encrypt = aead_authenc_encrypt, | |
971 | .decrypt = aead_authenc_decrypt, | |
972 | .givencrypt = aead_authenc_givencrypt, | |
973 | .geniv = "<built-in>", | |
974 | .ivsize = DES_BLOCK_SIZE, | |
975 | .maxauthsize = SHA256_DIGEST_SIZE, | |
976 | }, | |
977 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | |
978 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | |
979 | OP_ALG_AAI_HMAC_PRECOMP, | |
980 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | |
981 | }, | |
982 | }; | |
983 | ||
984 | struct caam_crypto_alg { | |
985 | struct list_head entry; | |
986 | struct device *ctrldev; | |
987 | int class1_alg_type; | |
988 | int class2_alg_type; | |
989 | int alg_op; | |
990 | struct crypto_alg crypto_alg; | |
991 | }; | |
992 | ||
993 | static int caam_cra_init(struct crypto_tfm *tfm) | |
994 | { | |
995 | struct crypto_alg *alg = tfm->__crt_alg; | |
996 | struct caam_crypto_alg *caam_alg = | |
997 | container_of(alg, struct caam_crypto_alg, crypto_alg); | |
998 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | |
999 | struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); | |
1000 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | |
1001 | ||
1002 | /* | |
1003 | * distribute tfms across job rings to ensure in-order | |
1004 | * crypto request processing per tfm | |
1005 | */ | |
1006 | ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; | |
1007 | ||
1008 | /* copy descriptor header template value */ | |
1009 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | |
1010 | ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; | |
1011 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; | |
1012 | ||
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | static void caam_cra_exit(struct crypto_tfm *tfm) | |
1017 | { | |
1018 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | |
1019 | ||
1020 | if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) | |
1021 | dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, | |
1022 | desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); | |
1023 | kfree(ctx->sh_desc); | |
1024 | } | |
1025 | ||
1026 | static void __exit caam_algapi_exit(void) | |
1027 | { | |
1028 | ||
1029 | struct device_node *dev_node; | |
1030 | struct platform_device *pdev; | |
1031 | struct device *ctrldev; | |
1032 | struct caam_drv_private *priv; | |
1033 | struct caam_crypto_alg *t_alg, *n; | |
1034 | int i, err; | |
1035 | ||
54e198d4 | 1036 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
8e8ec596 KP |
1037 | if (!dev_node) |
1038 | return; | |
1039 | ||
1040 | pdev = of_find_device_by_node(dev_node); | |
1041 | if (!pdev) | |
1042 | return; | |
1043 | ||
1044 | ctrldev = &pdev->dev; | |
1045 | of_node_put(dev_node); | |
1046 | priv = dev_get_drvdata(ctrldev); | |
1047 | ||
1048 | if (!priv->alg_list.next) | |
1049 | return; | |
1050 | ||
1051 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | |
1052 | crypto_unregister_alg(&t_alg->crypto_alg); | |
1053 | list_del(&t_alg->entry); | |
1054 | kfree(t_alg); | |
1055 | } | |
1056 | ||
1057 | for (i = 0; i < priv->total_jobrs; i++) { | |
1058 | err = caam_jr_deregister(priv->algapi_jr[i]); | |
1059 | if (err < 0) | |
1060 | break; | |
1061 | } | |
1062 | kfree(priv->algapi_jr); | |
1063 | } | |
1064 | ||
1065 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |
1066 | struct caam_alg_template | |
1067 | *template) | |
1068 | { | |
1069 | struct caam_crypto_alg *t_alg; | |
1070 | struct crypto_alg *alg; | |
1071 | ||
1072 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); | |
1073 | if (!t_alg) { | |
1074 | dev_err(ctrldev, "failed to allocate t_alg\n"); | |
1075 | return ERR_PTR(-ENOMEM); | |
1076 | } | |
1077 | ||
1078 | alg = &t_alg->crypto_alg; | |
1079 | ||
1080 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); | |
1081 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1082 | template->driver_name); | |
1083 | alg->cra_module = THIS_MODULE; | |
1084 | alg->cra_init = caam_cra_init; | |
1085 | alg->cra_exit = caam_cra_exit; | |
1086 | alg->cra_priority = CAAM_CRA_PRIORITY; | |
1087 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | |
1088 | alg->cra_blocksize = template->blocksize; | |
1089 | alg->cra_alignmask = 0; | |
1090 | alg->cra_type = &crypto_aead_type; | |
1091 | alg->cra_ctxsize = sizeof(struct caam_ctx); | |
1092 | alg->cra_u.aead = template->aead; | |
1093 | ||
1094 | t_alg->class1_alg_type = template->class1_alg_type; | |
1095 | t_alg->class2_alg_type = template->class2_alg_type; | |
1096 | t_alg->alg_op = template->alg_op; | |
1097 | t_alg->ctrldev = ctrldev; | |
1098 | ||
1099 | return t_alg; | |
1100 | } | |
1101 | ||
1102 | static int __init caam_algapi_init(void) | |
1103 | { | |
1104 | struct device_node *dev_node; | |
1105 | struct platform_device *pdev; | |
1106 | struct device *ctrldev, **jrdev; | |
1107 | struct caam_drv_private *priv; | |
1108 | int i = 0, err = 0; | |
1109 | ||
54e198d4 | 1110 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
8e8ec596 KP |
1111 | if (!dev_node) |
1112 | return -ENODEV; | |
1113 | ||
1114 | pdev = of_find_device_by_node(dev_node); | |
1115 | if (!pdev) | |
1116 | return -ENODEV; | |
1117 | ||
1118 | ctrldev = &pdev->dev; | |
1119 | priv = dev_get_drvdata(ctrldev); | |
1120 | of_node_put(dev_node); | |
1121 | ||
1122 | INIT_LIST_HEAD(&priv->alg_list); | |
1123 | ||
1124 | jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); | |
1125 | if (!jrdev) | |
1126 | return -ENOMEM; | |
1127 | ||
1128 | for (i = 0; i < priv->total_jobrs; i++) { | |
1129 | err = caam_jr_register(ctrldev, &jrdev[i]); | |
1130 | if (err < 0) | |
1131 | break; | |
1132 | } | |
1133 | if (err < 0 && i == 0) { | |
1134 | dev_err(ctrldev, "algapi error in job ring registration: %d\n", | |
1135 | err); | |
b3b7f055 | 1136 | kfree(jrdev); |
8e8ec596 KP |
1137 | return err; |
1138 | } | |
1139 | ||
1140 | priv->num_jrs_for_algapi = i; | |
1141 | priv->algapi_jr = jrdev; | |
1142 | atomic_set(&priv->tfm_count, -1); | |
1143 | ||
1144 | /* register crypto algorithms the device supports */ | |
1145 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
1146 | /* TODO: check if h/w supports alg */ | |
1147 | struct caam_crypto_alg *t_alg; | |
1148 | ||
1149 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | |
1150 | if (IS_ERR(t_alg)) { | |
1151 | err = PTR_ERR(t_alg); | |
1152 | dev_warn(ctrldev, "%s alg allocation failed\n", | |
cdc712d8 | 1153 | driver_algs[i].driver_name); |
8e8ec596 KP |
1154 | continue; |
1155 | } | |
1156 | ||
1157 | err = crypto_register_alg(&t_alg->crypto_alg); | |
1158 | if (err) { | |
1159 | dev_warn(ctrldev, "%s alg registration failed\n", | |
1160 | t_alg->crypto_alg.cra_driver_name); | |
1161 | kfree(t_alg); | |
1162 | } else { | |
1163 | list_add_tail(&t_alg->entry, &priv->alg_list); | |
1164 | dev_info(ctrldev, "%s\n", | |
1165 | t_alg->crypto_alg.cra_driver_name); | |
1166 | } | |
1167 | } | |
1168 | ||
1169 | return err; | |
1170 | } | |
1171 | ||
1172 | module_init(caam_algapi_init); | |
1173 | module_exit(caam_algapi_exit); | |
1174 | ||
1175 | MODULE_LICENSE("GPL"); | |
1176 | MODULE_DESCRIPTION("FSL CAAM support for crypto API"); | |
1177 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); |