]>
Commit | Line | Data |
---|---|---|
8e8ec596 KP |
1 | /* |
2 | * caam - Freescale FSL CAAM support for crypto API | |
3 | * | |
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | |
5 | * | |
6 | * Based on talitos crypto API driver. | |
7 | * | |
8 | * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): | |
9 | * | |
10 | * --------------- --------------- | |
11 | * | JobDesc #1 |-------------------->| ShareDesc | | |
12 | * | *(packet 1) | | (PDB) | | |
13 | * --------------- |------------->| (hashKey) | | |
14 | * . | | (cipherKey) | | |
15 | * . | |-------->| (operation) | | |
16 | * --------------- | | --------------- | |
17 | * | JobDesc #2 |------| | | |
18 | * | *(packet 2) | | | |
19 | * --------------- | | |
20 | * . | | |
21 | * . | | |
22 | * --------------- | | |
23 | * | JobDesc #3 |------------ | |
24 | * | *(packet 3) | | |
25 | * --------------- | |
26 | * | |
27 | * The SharedDesc never changes for a connection unless rekeyed, but | |
28 | * each packet will likely be in a different place. So all we need | |
29 | * to know to process the packet is where the input is, where the | |
30 | * output goes, and what context we want to process with. Context is | |
31 | * in the SharedDesc, packet references in the JobDesc. | |
32 | * | |
33 | * So, a job desc looks like: | |
34 | * | |
35 | * --------------------- | |
36 | * | Header | | |
37 | * | ShareDesc Pointer | | |
38 | * | SEQ_OUT_PTR | | |
39 | * | (output buffer) | | |
40 | * | SEQ_IN_PTR | | |
41 | * | (input buffer) | | |
42 | * | LOAD (to DECO) | | |
43 | * --------------------- | |
44 | */ | |
45 | ||
46 | #include "compat.h" | |
47 | ||
48 | #include "regs.h" | |
49 | #include "intern.h" | |
50 | #include "desc_constr.h" | |
51 | #include "jr.h" | |
52 | #include "error.h" | |
53 | ||
54 | /* | |
55 | * crypto alg | |
56 | */ | |
57 | #define CAAM_CRA_PRIORITY 3000 | |
58 | /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ | |
59 | #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ | |
60 | SHA512_DIGEST_SIZE * 2) | |
61 | /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | |
62 | #define CAAM_MAX_IV_LENGTH 16 | |
63 | ||
64 | #ifdef DEBUG | |
65 | /* for print_hex_dumps with line references */ | |
66 | #define xstr(s) str(s) | |
67 | #define str(s) #s | |
68 | #define debug(format, arg...) printk(format, arg) | |
69 | #else | |
70 | #define debug(format, arg...) | |
71 | #endif | |
72 | ||
73 | /* | |
74 | * per-session context | |
75 | */ | |
76 | struct caam_ctx { | |
77 | struct device *jrdev; | |
78 | u32 *sh_desc; | |
79 | dma_addr_t shared_desc_phys; | |
80 | u32 class1_alg_type; | |
81 | u32 class2_alg_type; | |
82 | u32 alg_op; | |
83 | u8 *key; | |
84 | dma_addr_t key_phys; | |
85 | unsigned int keylen; | |
86 | unsigned int enckeylen; | |
87 | unsigned int authkeylen; | |
88 | unsigned int split_key_len; | |
89 | unsigned int split_key_pad_len; | |
90 | unsigned int authsize; | |
91 | }; | |
92 | ||
93 | static int aead_authenc_setauthsize(struct crypto_aead *authenc, | |
94 | unsigned int authsize) | |
95 | { | |
96 | struct caam_ctx *ctx = crypto_aead_ctx(authenc); | |
97 | ||
98 | ctx->authsize = authsize; | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
103 | struct split_key_result { | |
104 | struct completion completion; | |
105 | int err; | |
106 | }; | |
107 | ||
108 | static void split_key_done(struct device *dev, u32 *desc, u32 err, | |
109 | void *context) | |
110 | { | |
111 | struct split_key_result *res = context; | |
112 | ||
113 | #ifdef DEBUG | |
114 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
115 | #endif | |
116 | if (err) { | |
117 | char tmp[256]; | |
118 | ||
119 | dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | |
120 | } | |
121 | ||
122 | res->err = err; | |
123 | ||
124 | complete(&res->completion); | |
125 | } | |
126 | ||
127 | /* | |
128 | get a split ipad/opad key | |
129 | ||
130 | Split key generation----------------------------------------------- | |
131 | ||
132 | [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 | |
133 | [01] 0x04000014 key: class2->keyreg len=20 | |
134 | @0xffe01000 | |
135 | [03] 0x84410014 operation: cls2-op sha1 hmac init dec | |
136 | [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm | |
137 | [05] 0xa4000001 jump: class2 local all ->1 [06] | |
138 | [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 | |
139 | @0xffe04000 | |
140 | */ | |
141 | static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen) | |
142 | { | |
143 | struct device *jrdev = ctx->jrdev; | |
144 | u32 *desc; | |
145 | struct split_key_result result; | |
146 | dma_addr_t dma_addr_in, dma_addr_out; | |
147 | int ret = 0; | |
148 | ||
149 | desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); | |
150 | ||
151 | init_job_desc(desc, 0); | |
152 | ||
153 | dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen, | |
154 | DMA_TO_DEVICE); | |
155 | if (dma_mapping_error(jrdev, dma_addr_in)) { | |
156 | dev_err(jrdev, "unable to map key input memory\n"); | |
157 | kfree(desc); | |
158 | return -ENOMEM; | |
159 | } | |
160 | append_key(desc, dma_addr_in, authkeylen, CLASS_2 | | |
161 | KEY_DEST_CLASS_REG); | |
162 | ||
163 | /* Sets MDHA up into an HMAC-INIT */ | |
164 | append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT | | |
165 | OP_ALG_AS_INIT); | |
166 | ||
167 | /* | |
168 | * do a FIFO_LOAD of zero, this will trigger the internal key expansion | |
169 | into both pads inside MDHA | |
170 | */ | |
171 | append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | | |
172 | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); | |
173 | ||
174 | /* | |
175 | * FIFO_STORE with the explicit split-key content store | |
176 | * (0x26 output type) | |
177 | */ | |
178 | dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, | |
179 | DMA_FROM_DEVICE); | |
180 | if (dma_mapping_error(jrdev, dma_addr_out)) { | |
181 | dev_err(jrdev, "unable to map key output memory\n"); | |
182 | kfree(desc); | |
183 | return -ENOMEM; | |
184 | } | |
185 | append_fifo_store(desc, dma_addr_out, ctx->split_key_len, | |
186 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | |
187 | ||
188 | #ifdef DEBUG | |
189 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | |
190 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1); | |
191 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | |
192 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | |
193 | #endif | |
194 | ||
195 | result.err = 0; | |
196 | init_completion(&result.completion); | |
197 | ||
198 | ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); | |
199 | if (!ret) { | |
200 | /* in progress */ | |
201 | wait_for_completion_interruptible(&result.completion); | |
202 | ret = result.err; | |
203 | #ifdef DEBUG | |
204 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | |
205 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | |
206 | ctx->split_key_pad_len, 1); | |
207 | #endif | |
208 | } | |
209 | ||
210 | dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len, | |
211 | DMA_FROM_DEVICE); | |
212 | dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE); | |
213 | ||
214 | kfree(desc); | |
215 | ||
216 | return ret; | |
217 | } | |
218 | ||
219 | static int build_sh_desc_ipsec(struct caam_ctx *ctx) | |
220 | { | |
221 | struct device *jrdev = ctx->jrdev; | |
222 | u32 *sh_desc; | |
223 | u32 *jump_cmd; | |
224 | ||
225 | /* build shared descriptor for this session */ | |
226 | sh_desc = kmalloc(CAAM_CMD_SZ * 4 + ctx->split_key_pad_len + | |
227 | ctx->enckeylen, GFP_DMA | GFP_KERNEL); | |
228 | if (!sh_desc) { | |
229 | dev_err(jrdev, "could not allocate shared descriptor\n"); | |
230 | return -ENOMEM; | |
231 | } | |
232 | ||
233 | init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL); | |
234 | ||
235 | jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL | | |
236 | JUMP_COND_SHRD | JUMP_COND_SELF); | |
237 | ||
238 | /* process keys, starting with class 2/authentication */ | |
239 | append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len, | |
240 | ctx->split_key_len, | |
241 | CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); | |
242 | ||
243 | append_key_as_imm(sh_desc, (void *)ctx->key + ctx->split_key_pad_len, | |
244 | ctx->enckeylen, ctx->enckeylen, | |
245 | CLASS_1 | KEY_DEST_CLASS_REG); | |
246 | ||
247 | /* update jump cmd now that we are at the jump target */ | |
248 | set_jump_tgt_here(sh_desc, jump_cmd); | |
249 | ||
250 | ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc, | |
251 | desc_bytes(sh_desc), | |
252 | DMA_TO_DEVICE); | |
253 | if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) { | |
254 | dev_err(jrdev, "unable to map shared descriptor\n"); | |
255 | kfree(sh_desc); | |
256 | return -ENOMEM; | |
257 | } | |
258 | ||
259 | ctx->sh_desc = sh_desc; | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static int aead_authenc_setkey(struct crypto_aead *aead, | |
265 | const u8 *key, unsigned int keylen) | |
266 | { | |
267 | /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ | |
268 | static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; | |
269 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
270 | struct device *jrdev = ctx->jrdev; | |
271 | struct rtattr *rta = (void *)key; | |
272 | struct crypto_authenc_key_param *param; | |
273 | unsigned int authkeylen; | |
274 | unsigned int enckeylen; | |
275 | int ret = 0; | |
276 | ||
277 | param = RTA_DATA(rta); | |
278 | enckeylen = be32_to_cpu(param->enckeylen); | |
279 | ||
280 | key += RTA_ALIGN(rta->rta_len); | |
281 | keylen -= RTA_ALIGN(rta->rta_len); | |
282 | ||
283 | if (keylen < enckeylen) | |
284 | goto badkey; | |
285 | ||
286 | authkeylen = keylen - enckeylen; | |
287 | ||
288 | if (keylen > CAAM_MAX_KEY_SIZE) | |
289 | goto badkey; | |
290 | ||
291 | /* Pick class 2 key length from algorithm submask */ | |
292 | ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> | |
293 | OP_ALG_ALGSEL_SHIFT] * 2; | |
294 | ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); | |
295 | ||
296 | #ifdef DEBUG | |
297 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", | |
298 | keylen, enckeylen, authkeylen); | |
299 | printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", | |
300 | ctx->split_key_len, ctx->split_key_pad_len); | |
301 | print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ", | |
302 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | |
303 | #endif | |
304 | ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen, | |
305 | GFP_KERNEL | GFP_DMA); | |
306 | if (!ctx->key) { | |
307 | dev_err(jrdev, "could not allocate key output memory\n"); | |
308 | return -ENOMEM; | |
309 | } | |
310 | ||
311 | ret = gen_split_key(ctx, key, authkeylen); | |
312 | if (ret) { | |
313 | kfree(ctx->key); | |
314 | goto badkey; | |
315 | } | |
316 | ||
317 | /* postpend encryption key to auth split key */ | |
318 | memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen); | |
319 | ||
320 | ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len + | |
321 | enckeylen, DMA_TO_DEVICE); | |
322 | if (dma_mapping_error(jrdev, ctx->key_phys)) { | |
323 | dev_err(jrdev, "unable to map key i/o memory\n"); | |
324 | kfree(ctx->key); | |
325 | return -ENOMEM; | |
326 | } | |
327 | #ifdef DEBUG | |
328 | print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ", | |
329 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | |
330 | ctx->split_key_pad_len + enckeylen, 1); | |
331 | #endif | |
332 | ||
333 | ctx->keylen = keylen; | |
334 | ctx->enckeylen = enckeylen; | |
335 | ctx->authkeylen = authkeylen; | |
336 | ||
337 | ret = build_sh_desc_ipsec(ctx); | |
338 | if (ret) { | |
339 | dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len + | |
340 | enckeylen, DMA_TO_DEVICE); | |
341 | kfree(ctx->key); | |
342 | } | |
343 | ||
344 | return ret; | |
345 | badkey: | |
346 | crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
347 | return -EINVAL; | |
348 | } | |
349 | ||
350 | struct link_tbl_entry { | |
351 | u64 ptr; | |
352 | u32 len; | |
353 | u8 reserved; | |
354 | u8 buf_pool_id; | |
355 | u16 offset; | |
356 | }; | |
357 | ||
358 | /* | |
359 | * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor | |
360 | * @src_nents: number of segments in input scatterlist | |
361 | * @dst_nents: number of segments in output scatterlist | |
362 | * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist | |
363 | * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) | |
364 | * @link_tbl_bytes: length of dma mapped link_tbl space | |
365 | * @link_tbl_dma: bus physical mapped address of h/w link table | |
366 | * @hw_desc: the h/w job descriptor followed by any referenced link tables | |
367 | */ | |
368 | struct ipsec_esp_edesc { | |
369 | int assoc_nents; | |
370 | int src_nents; | |
371 | int dst_nents; | |
372 | int link_tbl_bytes; | |
373 | dma_addr_t link_tbl_dma; | |
374 | struct link_tbl_entry *link_tbl; | |
375 | u32 hw_desc[0]; | |
376 | }; | |
377 | ||
378 | static void ipsec_esp_unmap(struct device *dev, | |
379 | struct ipsec_esp_edesc *edesc, | |
380 | struct aead_request *areq) | |
381 | { | |
382 | dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE); | |
383 | ||
384 | if (unlikely(areq->dst != areq->src)) { | |
385 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | |
386 | DMA_TO_DEVICE); | |
387 | dma_unmap_sg(dev, areq->dst, edesc->dst_nents, | |
388 | DMA_FROM_DEVICE); | |
389 | } else { | |
390 | dma_unmap_sg(dev, areq->src, edesc->src_nents, | |
391 | DMA_BIDIRECTIONAL); | |
392 | } | |
393 | ||
394 | if (edesc->link_tbl_bytes) | |
395 | dma_unmap_single(dev, edesc->link_tbl_dma, | |
396 | edesc->link_tbl_bytes, | |
397 | DMA_TO_DEVICE); | |
398 | } | |
399 | ||
400 | /* | |
401 | * ipsec_esp descriptor callbacks | |
402 | */ | |
403 | static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |
404 | void *context) | |
405 | { | |
406 | struct aead_request *areq = context; | |
407 | struct ipsec_esp_edesc *edesc; | |
408 | #ifdef DEBUG | |
409 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
410 | int ivsize = crypto_aead_ivsize(aead); | |
411 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
412 | ||
413 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
414 | #endif | |
415 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | |
416 | offsetof(struct ipsec_esp_edesc, hw_desc)); | |
417 | ||
418 | if (err) { | |
419 | char tmp[256]; | |
420 | ||
421 | dev_err(jrdev, "%s\n", caam_jr_strstatus(tmp, err)); | |
422 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | |
423 | } | |
424 | ||
425 | ipsec_esp_unmap(jrdev, edesc, areq); | |
426 | ||
427 | #ifdef DEBUG | |
428 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | |
429 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | |
430 | areq->assoclen , 1); | |
431 | print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ", | |
432 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | |
433 | edesc->src_nents ? 100 : ivsize, 1); | |
434 | print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ", | |
435 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | |
436 | edesc->src_nents ? 100 : areq->cryptlen + | |
437 | ctx->authsize + 4, 1); | |
438 | #endif | |
439 | ||
440 | kfree(edesc); | |
441 | ||
442 | aead_request_complete(areq, err); | |
443 | } | |
444 | ||
445 | static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |
446 | void *context) | |
447 | { | |
448 | struct aead_request *areq = context; | |
449 | struct ipsec_esp_edesc *edesc; | |
450 | #ifdef DEBUG | |
451 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
452 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
453 | ||
454 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | |
455 | #endif | |
456 | edesc = (struct ipsec_esp_edesc *)((char *)desc - | |
457 | offsetof(struct ipsec_esp_edesc, hw_desc)); | |
458 | ||
459 | if (err) { | |
460 | char tmp[256]; | |
461 | ||
462 | dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err)); | |
463 | } | |
464 | ||
465 | ipsec_esp_unmap(jrdev, edesc, areq); | |
466 | ||
467 | /* | |
468 | * verify hw auth check passed else return -EBADMSG | |
469 | */ | |
470 | if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) | |
471 | err = -EBADMSG; | |
472 | ||
473 | #ifdef DEBUG | |
474 | print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ", | |
475 | DUMP_PREFIX_ADDRESS, 16, 4, | |
476 | ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)), | |
477 | sizeof(struct iphdr) + areq->assoclen + | |
478 | ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) + | |
479 | ctx->authsize + 36, 1); | |
480 | if (!err && edesc->link_tbl_bytes) { | |
481 | struct scatterlist *sg = sg_last(areq->src, edesc->src_nents); | |
482 | print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ", | |
483 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg), | |
484 | sg->length + ctx->authsize + 16, 1); | |
485 | } | |
486 | #endif | |
487 | kfree(edesc); | |
488 | ||
489 | aead_request_complete(areq, err); | |
490 | } | |
491 | ||
492 | /* | |
493 | * convert scatterlist to h/w link table format | |
494 | * scatterlist must have been previously dma mapped | |
495 | */ | |
496 | static void sg_to_link_tbl(struct scatterlist *sg, int sg_count, | |
497 | struct link_tbl_entry *link_tbl_ptr, u32 offset) | |
498 | { | |
499 | while (sg_count) { | |
500 | link_tbl_ptr->ptr = sg_dma_address(sg); | |
501 | link_tbl_ptr->len = sg_dma_len(sg); | |
502 | link_tbl_ptr->reserved = 0; | |
503 | link_tbl_ptr->buf_pool_id = 0; | |
504 | link_tbl_ptr->offset = offset; | |
505 | link_tbl_ptr++; | |
506 | sg = sg_next(sg); | |
507 | sg_count--; | |
508 | } | |
509 | ||
510 | /* set Final bit (marks end of link table) */ | |
511 | link_tbl_ptr--; | |
512 | link_tbl_ptr->len |= 0x40000000; | |
513 | } | |
514 | ||
515 | /* | |
516 | * fill in and submit ipsec_esp job descriptor | |
517 | */ | |
518 | static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq, | |
519 | u32 encrypt, | |
520 | void (*callback) (struct device *dev, u32 *desc, | |
521 | u32 err, void *context)) | |
522 | { | |
523 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
524 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
525 | struct device *jrdev = ctx->jrdev; | |
526 | u32 *desc = edesc->hw_desc, options; | |
527 | int ret, sg_count, assoc_sg_count; | |
528 | int ivsize = crypto_aead_ivsize(aead); | |
529 | int authsize = ctx->authsize; | |
530 | dma_addr_t ptr, dst_dma, src_dma; | |
531 | #ifdef DEBUG | |
532 | u32 *sh_desc = ctx->sh_desc; | |
533 | ||
534 | debug("assoclen %d cryptlen %d authsize %d\n", | |
535 | areq->assoclen, areq->cryptlen, authsize); | |
536 | print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ", | |
537 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc), | |
538 | areq->assoclen , 1); | |
539 | print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ", | |
540 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize, | |
541 | edesc->src_nents ? 100 : ivsize, 1); | |
542 | print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ", | |
543 | DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src), | |
544 | edesc->src_nents ? 100 : areq->cryptlen + authsize, 1); | |
545 | print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ", | |
546 | DUMP_PREFIX_ADDRESS, 16, 4, sh_desc, | |
547 | desc_bytes(sh_desc), 1); | |
548 | #endif | |
549 | assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1, | |
550 | DMA_TO_DEVICE); | |
551 | if (areq->src == areq->dst) | |
552 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | |
553 | DMA_BIDIRECTIONAL); | |
554 | else | |
555 | sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1, | |
556 | DMA_TO_DEVICE); | |
557 | ||
558 | /* start auth operation */ | |
559 | append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | | |
560 | (encrypt ? : OP_ALG_ICV_ON)); | |
561 | ||
562 | /* Load FIFO with data for Class 2 CHA */ | |
563 | options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG; | |
564 | if (!edesc->assoc_nents) { | |
565 | ptr = sg_dma_address(areq->assoc); | |
566 | } else { | |
567 | sg_to_link_tbl(areq->assoc, edesc->assoc_nents, | |
568 | edesc->link_tbl, 0); | |
569 | ptr = edesc->link_tbl_dma; | |
570 | options |= LDST_SGF; | |
571 | } | |
572 | append_fifo_load(desc, ptr, areq->assoclen, options); | |
573 | ||
574 | /* copy iv from cipher/class1 input context to class2 infifo */ | |
575 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize); | |
576 | ||
577 | /* start class 1 (cipher) operation */ | |
578 | append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | | |
579 | encrypt); | |
580 | ||
581 | /* load payload & instruct to class2 to snoop class 1 if encrypting */ | |
582 | options = 0; | |
583 | if (!edesc->src_nents) { | |
584 | src_dma = sg_dma_address(areq->src); | |
585 | } else { | |
586 | sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl + | |
587 | edesc->assoc_nents, 0); | |
588 | src_dma = edesc->link_tbl_dma + edesc->assoc_nents * | |
589 | sizeof(struct link_tbl_entry); | |
590 | options |= LDST_SGF; | |
591 | } | |
592 | append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options); | |
593 | append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH | | |
594 | FIFOLD_TYPE_LASTBOTH | | |
595 | (encrypt ? FIFOLD_TYPE_MSG1OUT2 | |
596 | : FIFOLD_TYPE_MSG)); | |
597 | ||
598 | /* specify destination */ | |
599 | if (areq->src == areq->dst) { | |
600 | dst_dma = src_dma; | |
601 | } else { | |
602 | sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1, | |
603 | DMA_FROM_DEVICE); | |
604 | if (!edesc->dst_nents) { | |
605 | dst_dma = sg_dma_address(areq->dst); | |
606 | options = 0; | |
607 | } else { | |
608 | sg_to_link_tbl(areq->dst, edesc->dst_nents, | |
609 | edesc->link_tbl + edesc->assoc_nents + | |
610 | edesc->src_nents, 0); | |
611 | dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents + | |
612 | edesc->src_nents) * | |
613 | sizeof(struct link_tbl_entry); | |
614 | options = LDST_SGF; | |
615 | } | |
616 | } | |
617 | append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options); | |
618 | append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA); | |
619 | ||
620 | /* ICV */ | |
621 | if (encrypt) | |
622 | append_seq_store(desc, authsize, LDST_CLASS_2_CCB | | |
623 | LDST_SRCDST_BYTE_CONTEXT); | |
624 | else | |
625 | append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 | | |
626 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | |
627 | ||
628 | #ifdef DEBUG | |
629 | debug("job_desc_len %d\n", desc_len(desc)); | |
630 | print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ", | |
631 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1); | |
632 | print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ", | |
633 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl, | |
634 | edesc->link_tbl_bytes, 1); | |
635 | #endif | |
636 | ||
637 | ret = caam_jr_enqueue(jrdev, desc, callback, areq); | |
638 | if (!ret) | |
639 | ret = -EINPROGRESS; | |
640 | else { | |
641 | ipsec_esp_unmap(jrdev, edesc, areq); | |
642 | kfree(edesc); | |
643 | } | |
644 | ||
645 | return ret; | |
646 | } | |
647 | ||
648 | /* | |
649 | * derive number of elements in scatterlist | |
650 | */ | |
651 | static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) | |
652 | { | |
653 | struct scatterlist *sg = sg_list; | |
654 | int sg_nents = 0; | |
655 | ||
656 | *chained = 0; | |
657 | while (nbytes > 0) { | |
658 | sg_nents++; | |
659 | nbytes -= sg->length; | |
660 | if (!sg_is_last(sg) && (sg + 1)->length == 0) | |
661 | *chained = 1; | |
662 | sg = scatterwalk_sg_next(sg); | |
663 | } | |
664 | ||
665 | return sg_nents; | |
666 | } | |
667 | ||
668 | /* | |
669 | * allocate and map the ipsec_esp extended descriptor | |
670 | */ | |
671 | static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq, | |
672 | int desc_bytes) | |
673 | { | |
674 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
675 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
676 | struct device *jrdev = ctx->jrdev; | |
677 | gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
678 | GFP_ATOMIC; | |
679 | int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes; | |
680 | struct ipsec_esp_edesc *edesc; | |
681 | ||
682 | assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained); | |
683 | BUG_ON(chained); | |
684 | if (likely(assoc_nents == 1)) | |
685 | assoc_nents = 0; | |
686 | ||
687 | src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize, | |
688 | &chained); | |
689 | BUG_ON(chained); | |
690 | if (src_nents == 1) | |
691 | src_nents = 0; | |
692 | ||
693 | if (unlikely(areq->dst != areq->src)) { | |
694 | dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize, | |
695 | &chained); | |
696 | BUG_ON(chained); | |
697 | if (dst_nents == 1) | |
698 | dst_nents = 0; | |
699 | } | |
700 | ||
701 | link_tbl_bytes = (assoc_nents + src_nents + dst_nents) * | |
702 | sizeof(struct link_tbl_entry); | |
703 | debug("link_tbl_bytes %d\n", link_tbl_bytes); | |
704 | ||
705 | /* allocate space for base edesc and hw desc commands, link tables */ | |
706 | edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes + | |
707 | link_tbl_bytes, GFP_DMA | flags); | |
708 | if (!edesc) { | |
709 | dev_err(jrdev, "could not allocate extended descriptor\n"); | |
710 | return ERR_PTR(-ENOMEM); | |
711 | } | |
712 | ||
713 | edesc->assoc_nents = assoc_nents; | |
714 | edesc->src_nents = src_nents; | |
715 | edesc->dst_nents = dst_nents; | |
716 | edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) + | |
717 | desc_bytes; | |
718 | edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl, | |
719 | link_tbl_bytes, DMA_TO_DEVICE); | |
720 | edesc->link_tbl_bytes = link_tbl_bytes; | |
721 | ||
722 | return edesc; | |
723 | } | |
724 | ||
725 | static int aead_authenc_encrypt(struct aead_request *areq) | |
726 | { | |
727 | struct ipsec_esp_edesc *edesc; | |
728 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
729 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
730 | struct device *jrdev = ctx->jrdev; | |
731 | int ivsize = crypto_aead_ivsize(aead); | |
732 | u32 *desc; | |
733 | dma_addr_t iv_dma; | |
734 | ||
735 | /* allocate extended descriptor */ | |
736 | edesc = ipsec_esp_edesc_alloc(areq, 21 * sizeof(u32)); | |
737 | if (IS_ERR(edesc)) | |
738 | return PTR_ERR(edesc); | |
739 | ||
740 | desc = edesc->hw_desc; | |
741 | ||
742 | /* insert shared descriptor pointer */ | |
743 | init_job_desc_shared(desc, ctx->shared_desc_phys, | |
744 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | |
745 | ||
746 | iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE); | |
747 | /* check dma error */ | |
748 | ||
749 | append_load(desc, iv_dma, ivsize, | |
750 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | |
751 | ||
752 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | |
753 | } | |
754 | ||
755 | static int aead_authenc_decrypt(struct aead_request *req) | |
756 | { | |
757 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
758 | int ivsize = crypto_aead_ivsize(aead); | |
759 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
760 | struct device *jrdev = ctx->jrdev; | |
761 | struct ipsec_esp_edesc *edesc; | |
762 | u32 *desc; | |
763 | dma_addr_t iv_dma; | |
764 | ||
765 | req->cryptlen -= ctx->authsize; | |
766 | ||
767 | /* allocate extended descriptor */ | |
768 | edesc = ipsec_esp_edesc_alloc(req, 21 * sizeof(u32)); | |
769 | if (IS_ERR(edesc)) | |
770 | return PTR_ERR(edesc); | |
771 | ||
772 | desc = edesc->hw_desc; | |
773 | ||
774 | /* insert shared descriptor pointer */ | |
775 | init_job_desc_shared(desc, ctx->shared_desc_phys, | |
776 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | |
777 | ||
778 | iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE); | |
779 | /* check dma error */ | |
780 | ||
781 | append_load(desc, iv_dma, ivsize, | |
782 | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); | |
783 | ||
784 | return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done); | |
785 | } | |
786 | ||
787 | static int aead_authenc_givencrypt(struct aead_givcrypt_request *req) | |
788 | { | |
789 | struct aead_request *areq = &req->areq; | |
790 | struct ipsec_esp_edesc *edesc; | |
791 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | |
792 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | |
793 | struct device *jrdev = ctx->jrdev; | |
794 | int ivsize = crypto_aead_ivsize(aead); | |
795 | dma_addr_t iv_dma; | |
796 | u32 *desc; | |
797 | ||
798 | iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE); | |
799 | ||
800 | debug("%s: giv %p\n", __func__, req->giv); | |
801 | ||
802 | /* allocate extended descriptor */ | |
803 | edesc = ipsec_esp_edesc_alloc(areq, 27 * sizeof(u32)); | |
804 | if (IS_ERR(edesc)) | |
805 | return PTR_ERR(edesc); | |
806 | ||
807 | desc = edesc->hw_desc; | |
808 | ||
809 | /* insert shared descriptor pointer */ | |
810 | init_job_desc_shared(desc, ctx->shared_desc_phys, | |
811 | desc_len(ctx->sh_desc), HDR_SHARE_DEFER); | |
812 | ||
813 | /* | |
814 | * LOAD IMM Info FIFO | |
815 | * to DECO, Last, Padding, Random, Message, 16 bytes | |
816 | */ | |
817 | append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 | | |
818 | NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | | |
819 | NFIFOENTRY_PTYPE_RND | ivsize, | |
820 | LDST_SRCDST_WORD_INFO_FIFO); | |
821 | ||
822 | /* | |
823 | * disable info fifo entries since the above serves as the entry | |
824 | * this way, the MOVE command won't generate an entry. | |
825 | * Note that this isn't required in more recent versions of | |
826 | * SEC as a MOVE that doesn't do info FIFO entries is available. | |
827 | */ | |
828 | append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); | |
829 | ||
830 | /* MOVE DECO Alignment -> C1 Context 16 bytes */ | |
831 | append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO | | |
832 | MOVE_DEST_CLASS1CTX | ivsize); | |
833 | ||
834 | /* re-enable info fifo entries */ | |
835 | append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); | |
836 | ||
837 | /* MOVE C1 Context -> OFIFO 16 bytes */ | |
838 | append_move(desc, MOVE_WAITCOMP | MOVE_SRC_CLASS1CTX | | |
839 | MOVE_DEST_OUTFIFO | ivsize); | |
840 | ||
841 | append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA); | |
842 | ||
843 | return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done); | |
844 | } | |
845 | ||
846 | struct caam_alg_template { | |
847 | char name[CRYPTO_MAX_ALG_NAME]; | |
848 | char driver_name[CRYPTO_MAX_ALG_NAME]; | |
849 | unsigned int blocksize; | |
850 | struct aead_alg aead; | |
851 | u32 class1_alg_type; | |
852 | u32 class2_alg_type; | |
853 | u32 alg_op; | |
854 | }; | |
855 | ||
856 | static struct caam_alg_template driver_algs[] = { | |
857 | /* single-pass ipsec_esp descriptor */ | |
858 | { | |
859 | .name = "authenc(hmac(sha1),cbc(aes))", | |
860 | .driver_name = "authenc-hmac-sha1-cbc-aes-caam", | |
861 | .blocksize = AES_BLOCK_SIZE, | |
862 | .aead = { | |
863 | .setkey = aead_authenc_setkey, | |
864 | .setauthsize = aead_authenc_setauthsize, | |
865 | .encrypt = aead_authenc_encrypt, | |
866 | .decrypt = aead_authenc_decrypt, | |
867 | .givencrypt = aead_authenc_givencrypt, | |
868 | .geniv = "<built-in>", | |
869 | .ivsize = AES_BLOCK_SIZE, | |
870 | .maxauthsize = SHA1_DIGEST_SIZE, | |
871 | }, | |
872 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | |
873 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | |
874 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | |
875 | }, | |
876 | { | |
877 | .name = "authenc(hmac(sha256),cbc(aes))", | |
878 | .driver_name = "authenc-hmac-sha256-cbc-aes-caam", | |
879 | .blocksize = AES_BLOCK_SIZE, | |
880 | .aead = { | |
881 | .setkey = aead_authenc_setkey, | |
882 | .setauthsize = aead_authenc_setauthsize, | |
883 | .encrypt = aead_authenc_encrypt, | |
884 | .decrypt = aead_authenc_decrypt, | |
885 | .givencrypt = aead_authenc_givencrypt, | |
886 | .geniv = "<built-in>", | |
887 | .ivsize = AES_BLOCK_SIZE, | |
888 | .maxauthsize = SHA256_DIGEST_SIZE, | |
889 | }, | |
890 | .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, | |
891 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | |
892 | OP_ALG_AAI_HMAC_PRECOMP, | |
893 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | |
894 | }, | |
895 | { | |
896 | .name = "authenc(hmac(sha1),cbc(des3_ede))", | |
897 | .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam", | |
898 | .blocksize = DES3_EDE_BLOCK_SIZE, | |
899 | .aead = { | |
900 | .setkey = aead_authenc_setkey, | |
901 | .setauthsize = aead_authenc_setauthsize, | |
902 | .encrypt = aead_authenc_encrypt, | |
903 | .decrypt = aead_authenc_decrypt, | |
904 | .givencrypt = aead_authenc_givencrypt, | |
905 | .geniv = "<built-in>", | |
906 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
907 | .maxauthsize = SHA1_DIGEST_SIZE, | |
908 | }, | |
909 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | |
910 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | |
911 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | |
912 | }, | |
913 | { | |
914 | .name = "authenc(hmac(sha256),cbc(des3_ede))", | |
915 | .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam", | |
916 | .blocksize = DES3_EDE_BLOCK_SIZE, | |
917 | .aead = { | |
918 | .setkey = aead_authenc_setkey, | |
919 | .setauthsize = aead_authenc_setauthsize, | |
920 | .encrypt = aead_authenc_encrypt, | |
921 | .decrypt = aead_authenc_decrypt, | |
922 | .givencrypt = aead_authenc_givencrypt, | |
923 | .geniv = "<built-in>", | |
924 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
925 | .maxauthsize = SHA256_DIGEST_SIZE, | |
926 | }, | |
927 | .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, | |
928 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | |
929 | OP_ALG_AAI_HMAC_PRECOMP, | |
930 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | |
931 | }, | |
932 | { | |
933 | .name = "authenc(hmac(sha1),cbc(des))", | |
934 | .driver_name = "authenc-hmac-sha1-cbc-des-caam", | |
935 | .blocksize = DES_BLOCK_SIZE, | |
936 | .aead = { | |
937 | .setkey = aead_authenc_setkey, | |
938 | .setauthsize = aead_authenc_setauthsize, | |
939 | .encrypt = aead_authenc_encrypt, | |
940 | .decrypt = aead_authenc_decrypt, | |
941 | .givencrypt = aead_authenc_givencrypt, | |
942 | .geniv = "<built-in>", | |
943 | .ivsize = DES_BLOCK_SIZE, | |
944 | .maxauthsize = SHA1_DIGEST_SIZE, | |
945 | }, | |
946 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | |
947 | .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, | |
948 | .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, | |
949 | }, | |
950 | { | |
951 | .name = "authenc(hmac(sha256),cbc(des))", | |
952 | .driver_name = "authenc-hmac-sha256-cbc-des-caam", | |
953 | .blocksize = DES_BLOCK_SIZE, | |
954 | .aead = { | |
955 | .setkey = aead_authenc_setkey, | |
956 | .setauthsize = aead_authenc_setauthsize, | |
957 | .encrypt = aead_authenc_encrypt, | |
958 | .decrypt = aead_authenc_decrypt, | |
959 | .givencrypt = aead_authenc_givencrypt, | |
960 | .geniv = "<built-in>", | |
961 | .ivsize = DES_BLOCK_SIZE, | |
962 | .maxauthsize = SHA256_DIGEST_SIZE, | |
963 | }, | |
964 | .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, | |
965 | .class2_alg_type = OP_ALG_ALGSEL_SHA256 | | |
966 | OP_ALG_AAI_HMAC_PRECOMP, | |
967 | .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, | |
968 | }, | |
969 | }; | |
970 | ||
971 | struct caam_crypto_alg { | |
972 | struct list_head entry; | |
973 | struct device *ctrldev; | |
974 | int class1_alg_type; | |
975 | int class2_alg_type; | |
976 | int alg_op; | |
977 | struct crypto_alg crypto_alg; | |
978 | }; | |
979 | ||
980 | static int caam_cra_init(struct crypto_tfm *tfm) | |
981 | { | |
982 | struct crypto_alg *alg = tfm->__crt_alg; | |
983 | struct caam_crypto_alg *caam_alg = | |
984 | container_of(alg, struct caam_crypto_alg, crypto_alg); | |
985 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | |
986 | struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); | |
987 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | |
988 | ||
989 | /* | |
990 | * distribute tfms across job rings to ensure in-order | |
991 | * crypto request processing per tfm | |
992 | */ | |
993 | ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi]; | |
994 | ||
995 | /* copy descriptor header template value */ | |
996 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | |
997 | ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; | |
998 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; | |
999 | ||
1000 | return 0; | |
1001 | } | |
1002 | ||
1003 | static void caam_cra_exit(struct crypto_tfm *tfm) | |
1004 | { | |
1005 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | |
1006 | ||
1007 | if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys)) | |
1008 | dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys, | |
1009 | desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); | |
1010 | kfree(ctx->sh_desc); | |
1011 | } | |
1012 | ||
1013 | static void __exit caam_algapi_exit(void) | |
1014 | { | |
1015 | ||
1016 | struct device_node *dev_node; | |
1017 | struct platform_device *pdev; | |
1018 | struct device *ctrldev; | |
1019 | struct caam_drv_private *priv; | |
1020 | struct caam_crypto_alg *t_alg, *n; | |
1021 | int i, err; | |
1022 | ||
54e198d4 | 1023 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
8e8ec596 KP |
1024 | if (!dev_node) |
1025 | return; | |
1026 | ||
1027 | pdev = of_find_device_by_node(dev_node); | |
1028 | if (!pdev) | |
1029 | return; | |
1030 | ||
1031 | ctrldev = &pdev->dev; | |
1032 | of_node_put(dev_node); | |
1033 | priv = dev_get_drvdata(ctrldev); | |
1034 | ||
1035 | if (!priv->alg_list.next) | |
1036 | return; | |
1037 | ||
1038 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | |
1039 | crypto_unregister_alg(&t_alg->crypto_alg); | |
1040 | list_del(&t_alg->entry); | |
1041 | kfree(t_alg); | |
1042 | } | |
1043 | ||
1044 | for (i = 0; i < priv->total_jobrs; i++) { | |
1045 | err = caam_jr_deregister(priv->algapi_jr[i]); | |
1046 | if (err < 0) | |
1047 | break; | |
1048 | } | |
1049 | kfree(priv->algapi_jr); | |
1050 | } | |
1051 | ||
1052 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |
1053 | struct caam_alg_template | |
1054 | *template) | |
1055 | { | |
1056 | struct caam_crypto_alg *t_alg; | |
1057 | struct crypto_alg *alg; | |
1058 | ||
1059 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); | |
1060 | if (!t_alg) { | |
1061 | dev_err(ctrldev, "failed to allocate t_alg\n"); | |
1062 | return ERR_PTR(-ENOMEM); | |
1063 | } | |
1064 | ||
1065 | alg = &t_alg->crypto_alg; | |
1066 | ||
1067 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); | |
1068 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | |
1069 | template->driver_name); | |
1070 | alg->cra_module = THIS_MODULE; | |
1071 | alg->cra_init = caam_cra_init; | |
1072 | alg->cra_exit = caam_cra_exit; | |
1073 | alg->cra_priority = CAAM_CRA_PRIORITY; | |
1074 | alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | |
1075 | alg->cra_blocksize = template->blocksize; | |
1076 | alg->cra_alignmask = 0; | |
1077 | alg->cra_type = &crypto_aead_type; | |
1078 | alg->cra_ctxsize = sizeof(struct caam_ctx); | |
1079 | alg->cra_u.aead = template->aead; | |
1080 | ||
1081 | t_alg->class1_alg_type = template->class1_alg_type; | |
1082 | t_alg->class2_alg_type = template->class2_alg_type; | |
1083 | t_alg->alg_op = template->alg_op; | |
1084 | t_alg->ctrldev = ctrldev; | |
1085 | ||
1086 | return t_alg; | |
1087 | } | |
1088 | ||
1089 | static int __init caam_algapi_init(void) | |
1090 | { | |
1091 | struct device_node *dev_node; | |
1092 | struct platform_device *pdev; | |
1093 | struct device *ctrldev, **jrdev; | |
1094 | struct caam_drv_private *priv; | |
1095 | int i = 0, err = 0; | |
1096 | ||
54e198d4 | 1097 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
8e8ec596 KP |
1098 | if (!dev_node) |
1099 | return -ENODEV; | |
1100 | ||
1101 | pdev = of_find_device_by_node(dev_node); | |
1102 | if (!pdev) | |
1103 | return -ENODEV; | |
1104 | ||
1105 | ctrldev = &pdev->dev; | |
1106 | priv = dev_get_drvdata(ctrldev); | |
1107 | of_node_put(dev_node); | |
1108 | ||
1109 | INIT_LIST_HEAD(&priv->alg_list); | |
1110 | ||
1111 | jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL); | |
1112 | if (!jrdev) | |
1113 | return -ENOMEM; | |
1114 | ||
1115 | for (i = 0; i < priv->total_jobrs; i++) { | |
1116 | err = caam_jr_register(ctrldev, &jrdev[i]); | |
1117 | if (err < 0) | |
1118 | break; | |
1119 | } | |
1120 | if (err < 0 && i == 0) { | |
1121 | dev_err(ctrldev, "algapi error in job ring registration: %d\n", | |
1122 | err); | |
b3b7f055 | 1123 | kfree(jrdev); |
8e8ec596 KP |
1124 | return err; |
1125 | } | |
1126 | ||
1127 | priv->num_jrs_for_algapi = i; | |
1128 | priv->algapi_jr = jrdev; | |
1129 | atomic_set(&priv->tfm_count, -1); | |
1130 | ||
1131 | /* register crypto algorithms the device supports */ | |
1132 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
1133 | /* TODO: check if h/w supports alg */ | |
1134 | struct caam_crypto_alg *t_alg; | |
1135 | ||
1136 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | |
1137 | if (IS_ERR(t_alg)) { | |
1138 | err = PTR_ERR(t_alg); | |
1139 | dev_warn(ctrldev, "%s alg allocation failed\n", | |
cdc712d8 | 1140 | driver_algs[i].driver_name); |
8e8ec596 KP |
1141 | continue; |
1142 | } | |
1143 | ||
1144 | err = crypto_register_alg(&t_alg->crypto_alg); | |
1145 | if (err) { | |
1146 | dev_warn(ctrldev, "%s alg registration failed\n", | |
1147 | t_alg->crypto_alg.cra_driver_name); | |
1148 | kfree(t_alg); | |
1149 | } else { | |
1150 | list_add_tail(&t_alg->entry, &priv->alg_list); | |
1151 | dev_info(ctrldev, "%s\n", | |
1152 | t_alg->crypto_alg.cra_driver_name); | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | return err; | |
1157 | } | |
1158 | ||
1159 | module_init(caam_algapi_init); | |
1160 | module_exit(caam_algapi_exit); | |
1161 | ||
1162 | MODULE_LICENSE("GPL"); | |
1163 | MODULE_DESCRIPTION("FSL CAAM support for crypto API"); | |
1164 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); |