]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Block chaining cipher operations. | |
3 | * | |
4 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | |
5 | * multiple page boundaries by using temporary blocks. In user context, | |
6 | * the kernel is given a chance to schedule us once per page. | |
7 | * | |
8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of the GNU General Public License as published by the Free | |
12 | * Software Foundation; either version 2 of the License, or (at your option) | |
13 | * any later version. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <crypto/internal/skcipher.h> | |
18 | #include <crypto/scatterwalk.h> | |
19 | #include <linux/errno.h> | |
20 | #include <linux/hardirq.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/scatterlist.h> | |
24 | #include <linux/seq_file.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/cryptouser.h> | |
28 | #include <net/netlink.h> | |
29 | ||
30 | #include "internal.h" | |
31 | ||
32 | enum { | |
33 | BLKCIPHER_WALK_PHYS = 1 << 0, | |
34 | BLKCIPHER_WALK_SLOW = 1 << 1, | |
35 | BLKCIPHER_WALK_COPY = 1 << 2, | |
36 | BLKCIPHER_WALK_DIFF = 1 << 3, | |
37 | }; | |
38 | ||
39 | static int blkcipher_walk_next(struct blkcipher_desc *desc, | |
40 | struct blkcipher_walk *walk); | |
41 | static int blkcipher_walk_first(struct blkcipher_desc *desc, | |
42 | struct blkcipher_walk *walk); | |
43 | ||
44 | static inline void blkcipher_map_src(struct blkcipher_walk *walk) | |
45 | { | |
46 | walk->src.virt.addr = scatterwalk_map(&walk->in); | |
47 | } | |
48 | ||
49 | static inline void blkcipher_map_dst(struct blkcipher_walk *walk) | |
50 | { | |
51 | walk->dst.virt.addr = scatterwalk_map(&walk->out); | |
52 | } | |
53 | ||
54 | static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) | |
55 | { | |
56 | scatterwalk_unmap(walk->src.virt.addr); | |
57 | } | |
58 | ||
59 | static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) | |
60 | { | |
61 | scatterwalk_unmap(walk->dst.virt.addr); | |
62 | } | |
63 | ||
64 | /* Get a spot of the specified length that does not straddle a page. | |
65 | * The caller needs to ensure that there is enough space for this operation. | |
66 | */ | |
67 | static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) | |
68 | { | |
69 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | |
70 | return max(start, end_page); | |
71 | } | |
72 | ||
73 | static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, | |
74 | unsigned int bsize) | |
75 | { | |
76 | u8 *addr; | |
77 | ||
78 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | |
79 | addr = blkcipher_get_spot(addr, bsize); | |
80 | scatterwalk_copychunks(addr, &walk->out, bsize, 1); | |
81 | return bsize; | |
82 | } | |
83 | ||
84 | static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, | |
85 | unsigned int n) | |
86 | { | |
87 | if (walk->flags & BLKCIPHER_WALK_COPY) { | |
88 | blkcipher_map_dst(walk); | |
89 | memcpy(walk->dst.virt.addr, walk->page, n); | |
90 | blkcipher_unmap_dst(walk); | |
91 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { | |
92 | if (walk->flags & BLKCIPHER_WALK_DIFF) | |
93 | blkcipher_unmap_dst(walk); | |
94 | blkcipher_unmap_src(walk); | |
95 | } | |
96 | ||
97 | scatterwalk_advance(&walk->in, n); | |
98 | scatterwalk_advance(&walk->out, n); | |
99 | ||
100 | return n; | |
101 | } | |
102 | ||
103 | int blkcipher_walk_done(struct blkcipher_desc *desc, | |
104 | struct blkcipher_walk *walk, int err) | |
105 | { | |
106 | unsigned int nbytes = 0; | |
107 | ||
108 | if (likely(err >= 0)) { | |
109 | unsigned int n = walk->nbytes - err; | |
110 | ||
111 | if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) | |
112 | n = blkcipher_done_fast(walk, n); | |
113 | else if (WARN_ON(err)) { | |
114 | err = -EINVAL; | |
115 | goto err; | |
116 | } else | |
117 | n = blkcipher_done_slow(walk, n); | |
118 | ||
119 | nbytes = walk->total - n; | |
120 | err = 0; | |
121 | } | |
122 | ||
123 | scatterwalk_done(&walk->in, 0, nbytes); | |
124 | scatterwalk_done(&walk->out, 1, nbytes); | |
125 | ||
126 | err: | |
127 | walk->total = nbytes; | |
128 | walk->nbytes = nbytes; | |
129 | ||
130 | if (nbytes) { | |
131 | crypto_yield(desc->flags); | |
132 | return blkcipher_walk_next(desc, walk); | |
133 | } | |
134 | ||
135 | if (walk->iv != desc->info) | |
136 | memcpy(desc->info, walk->iv, walk->ivsize); | |
137 | if (walk->buffer != walk->page) | |
138 | kfree(walk->buffer); | |
139 | if (walk->page) | |
140 | free_page((unsigned long)walk->page); | |
141 | ||
142 | return err; | |
143 | } | |
144 | EXPORT_SYMBOL_GPL(blkcipher_walk_done); | |
145 | ||
146 | static inline int blkcipher_next_slow(struct blkcipher_desc *desc, | |
147 | struct blkcipher_walk *walk, | |
148 | unsigned int bsize, | |
149 | unsigned int alignmask) | |
150 | { | |
151 | unsigned int n; | |
152 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); | |
153 | ||
154 | if (walk->buffer) | |
155 | goto ok; | |
156 | ||
157 | walk->buffer = walk->page; | |
158 | if (walk->buffer) | |
159 | goto ok; | |
160 | ||
161 | n = aligned_bsize * 3 - (alignmask + 1) + | |
162 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); | |
163 | walk->buffer = kmalloc(n, GFP_ATOMIC); | |
164 | if (!walk->buffer) | |
165 | return blkcipher_walk_done(desc, walk, -ENOMEM); | |
166 | ||
167 | ok: | |
168 | walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, | |
169 | alignmask + 1); | |
170 | walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); | |
171 | walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + | |
172 | aligned_bsize, bsize); | |
173 | ||
174 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | |
175 | ||
176 | walk->nbytes = bsize; | |
177 | walk->flags |= BLKCIPHER_WALK_SLOW; | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
182 | static inline int blkcipher_next_copy(struct blkcipher_walk *walk) | |
183 | { | |
184 | u8 *tmp = walk->page; | |
185 | ||
186 | blkcipher_map_src(walk); | |
187 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | |
188 | blkcipher_unmap_src(walk); | |
189 | ||
190 | walk->src.virt.addr = tmp; | |
191 | walk->dst.virt.addr = tmp; | |
192 | ||
193 | return 0; | |
194 | } | |
195 | ||
196 | static inline int blkcipher_next_fast(struct blkcipher_desc *desc, | |
197 | struct blkcipher_walk *walk) | |
198 | { | |
199 | unsigned long diff; | |
200 | ||
201 | walk->src.phys.page = scatterwalk_page(&walk->in); | |
202 | walk->src.phys.offset = offset_in_page(walk->in.offset); | |
203 | walk->dst.phys.page = scatterwalk_page(&walk->out); | |
204 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | |
205 | ||
206 | if (walk->flags & BLKCIPHER_WALK_PHYS) | |
207 | return 0; | |
208 | ||
209 | diff = walk->src.phys.offset - walk->dst.phys.offset; | |
210 | diff |= walk->src.virt.page - walk->dst.virt.page; | |
211 | ||
212 | blkcipher_map_src(walk); | |
213 | walk->dst.virt.addr = walk->src.virt.addr; | |
214 | ||
215 | if (diff) { | |
216 | walk->flags |= BLKCIPHER_WALK_DIFF; | |
217 | blkcipher_map_dst(walk); | |
218 | } | |
219 | ||
220 | return 0; | |
221 | } | |
222 | ||
223 | static int blkcipher_walk_next(struct blkcipher_desc *desc, | |
224 | struct blkcipher_walk *walk) | |
225 | { | |
226 | unsigned int bsize; | |
227 | unsigned int n; | |
228 | int err; | |
229 | ||
230 | n = walk->total; | |
231 | if (unlikely(n < walk->cipher_blocksize)) { | |
232 | desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | |
233 | return blkcipher_walk_done(desc, walk, -EINVAL); | |
234 | } | |
235 | ||
236 | walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | | |
237 | BLKCIPHER_WALK_DIFF); | |
238 | if (!scatterwalk_aligned(&walk->in, walk->alignmask) || | |
239 | !scatterwalk_aligned(&walk->out, walk->alignmask)) { | |
240 | walk->flags |= BLKCIPHER_WALK_COPY; | |
241 | if (!walk->page) { | |
242 | walk->page = (void *)__get_free_page(GFP_ATOMIC); | |
243 | if (!walk->page) | |
244 | n = 0; | |
245 | } | |
246 | } | |
247 | ||
248 | bsize = min(walk->walk_blocksize, n); | |
249 | n = scatterwalk_clamp(&walk->in, n); | |
250 | n = scatterwalk_clamp(&walk->out, n); | |
251 | ||
252 | if (unlikely(n < bsize)) { | |
253 | err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask); | |
254 | goto set_phys_lowmem; | |
255 | } | |
256 | ||
257 | walk->nbytes = n; | |
258 | if (walk->flags & BLKCIPHER_WALK_COPY) { | |
259 | err = blkcipher_next_copy(walk); | |
260 | goto set_phys_lowmem; | |
261 | } | |
262 | ||
263 | return blkcipher_next_fast(desc, walk); | |
264 | ||
265 | set_phys_lowmem: | |
266 | if (walk->flags & BLKCIPHER_WALK_PHYS) { | |
267 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | |
268 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | |
269 | walk->src.phys.offset &= PAGE_SIZE - 1; | |
270 | walk->dst.phys.offset &= PAGE_SIZE - 1; | |
271 | } | |
272 | return err; | |
273 | } | |
274 | ||
275 | static inline int blkcipher_copy_iv(struct blkcipher_walk *walk) | |
276 | { | |
277 | unsigned bs = walk->walk_blocksize; | |
278 | unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1); | |
279 | unsigned int size = aligned_bs * 2 + | |
280 | walk->ivsize + max(aligned_bs, walk->ivsize) - | |
281 | (walk->alignmask + 1); | |
282 | u8 *iv; | |
283 | ||
284 | size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1); | |
285 | walk->buffer = kmalloc(size, GFP_ATOMIC); | |
286 | if (!walk->buffer) | |
287 | return -ENOMEM; | |
288 | ||
289 | iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | |
290 | iv = blkcipher_get_spot(iv, bs) + aligned_bs; | |
291 | iv = blkcipher_get_spot(iv, bs) + aligned_bs; | |
292 | iv = blkcipher_get_spot(iv, walk->ivsize); | |
293 | ||
294 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); | |
295 | return 0; | |
296 | } | |
297 | ||
298 | int blkcipher_walk_virt(struct blkcipher_desc *desc, | |
299 | struct blkcipher_walk *walk) | |
300 | { | |
301 | walk->flags &= ~BLKCIPHER_WALK_PHYS; | |
302 | walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); | |
303 | walk->cipher_blocksize = walk->walk_blocksize; | |
304 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); | |
305 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); | |
306 | return blkcipher_walk_first(desc, walk); | |
307 | } | |
308 | EXPORT_SYMBOL_GPL(blkcipher_walk_virt); | |
309 | ||
310 | int blkcipher_walk_phys(struct blkcipher_desc *desc, | |
311 | struct blkcipher_walk *walk) | |
312 | { | |
313 | walk->flags |= BLKCIPHER_WALK_PHYS; | |
314 | walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); | |
315 | walk->cipher_blocksize = walk->walk_blocksize; | |
316 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); | |
317 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); | |
318 | return blkcipher_walk_first(desc, walk); | |
319 | } | |
320 | EXPORT_SYMBOL_GPL(blkcipher_walk_phys); | |
321 | ||
322 | static int blkcipher_walk_first(struct blkcipher_desc *desc, | |
323 | struct blkcipher_walk *walk) | |
324 | { | |
325 | if (WARN_ON_ONCE(in_irq())) | |
326 | return -EDEADLK; | |
327 | ||
328 | walk->nbytes = walk->total; | |
329 | if (unlikely(!walk->total)) | |
330 | return 0; | |
331 | ||
332 | walk->buffer = NULL; | |
333 | walk->iv = desc->info; | |
334 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | |
335 | int err = blkcipher_copy_iv(walk); | |
336 | if (err) | |
337 | return err; | |
338 | } | |
339 | ||
340 | scatterwalk_start(&walk->in, walk->in.sg); | |
341 | scatterwalk_start(&walk->out, walk->out.sg); | |
342 | walk->page = NULL; | |
343 | ||
344 | return blkcipher_walk_next(desc, walk); | |
345 | } | |
346 | ||
347 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, | |
348 | struct blkcipher_walk *walk, | |
349 | unsigned int blocksize) | |
350 | { | |
351 | walk->flags &= ~BLKCIPHER_WALK_PHYS; | |
352 | walk->walk_blocksize = blocksize; | |
353 | walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm); | |
354 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); | |
355 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); | |
356 | return blkcipher_walk_first(desc, walk); | |
357 | } | |
358 | EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); | |
359 | ||
360 | static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, | |
361 | unsigned int keylen) | |
362 | { | |
363 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; | |
364 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
365 | int ret; | |
366 | u8 *buffer, *alignbuffer; | |
367 | unsigned long absize; | |
368 | ||
369 | absize = keylen + alignmask; | |
370 | buffer = kmalloc(absize, GFP_ATOMIC); | |
371 | if (!buffer) | |
372 | return -ENOMEM; | |
373 | ||
374 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
375 | memcpy(alignbuffer, key, keylen); | |
376 | ret = cipher->setkey(tfm, alignbuffer, keylen); | |
377 | memset(alignbuffer, 0, keylen); | |
378 | kfree(buffer); | |
379 | return ret; | |
380 | } | |
381 | ||
382 | static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) | |
383 | { | |
384 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; | |
385 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
386 | ||
387 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { | |
388 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
389 | return -EINVAL; | |
390 | } | |
391 | ||
392 | if ((unsigned long)key & alignmask) | |
393 | return setkey_unaligned(tfm, key, keylen); | |
394 | ||
395 | return cipher->setkey(tfm, key, keylen); | |
396 | } | |
397 | ||
398 | static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
399 | unsigned int keylen) | |
400 | { | |
401 | return setkey(crypto_ablkcipher_tfm(tfm), key, keylen); | |
402 | } | |
403 | ||
404 | static int async_encrypt(struct ablkcipher_request *req) | |
405 | { | |
406 | struct crypto_tfm *tfm = req->base.tfm; | |
407 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
408 | struct blkcipher_desc desc = { | |
409 | .tfm = __crypto_blkcipher_cast(tfm), | |
410 | .info = req->info, | |
411 | .flags = req->base.flags, | |
412 | }; | |
413 | ||
414 | ||
415 | return alg->encrypt(&desc, req->dst, req->src, req->nbytes); | |
416 | } | |
417 | ||
418 | static int async_decrypt(struct ablkcipher_request *req) | |
419 | { | |
420 | struct crypto_tfm *tfm = req->base.tfm; | |
421 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
422 | struct blkcipher_desc desc = { | |
423 | .tfm = __crypto_blkcipher_cast(tfm), | |
424 | .info = req->info, | |
425 | .flags = req->base.flags, | |
426 | }; | |
427 | ||
428 | return alg->decrypt(&desc, req->dst, req->src, req->nbytes); | |
429 | } | |
430 | ||
431 | static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, | |
432 | u32 mask) | |
433 | { | |
434 | struct blkcipher_alg *cipher = &alg->cra_blkcipher; | |
435 | unsigned int len = alg->cra_ctxsize; | |
436 | ||
437 | if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK && | |
438 | cipher->ivsize) { | |
439 | len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); | |
440 | len += cipher->ivsize; | |
441 | } | |
442 | ||
443 | return len; | |
444 | } | |
445 | ||
446 | static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) | |
447 | { | |
448 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; | |
449 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
450 | ||
451 | crt->setkey = async_setkey; | |
452 | crt->encrypt = async_encrypt; | |
453 | crt->decrypt = async_decrypt; | |
454 | if (!alg->ivsize) { | |
455 | crt->givencrypt = skcipher_null_givencrypt; | |
456 | crt->givdecrypt = skcipher_null_givdecrypt; | |
457 | } | |
458 | crt->base = __crypto_ablkcipher_cast(tfm); | |
459 | crt->ivsize = alg->ivsize; | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) | |
465 | { | |
466 | struct blkcipher_tfm *crt = &tfm->crt_blkcipher; | |
467 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
468 | unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; | |
469 | unsigned long addr; | |
470 | ||
471 | crt->setkey = setkey; | |
472 | crt->encrypt = alg->encrypt; | |
473 | crt->decrypt = alg->decrypt; | |
474 | ||
475 | addr = (unsigned long)crypto_tfm_ctx(tfm); | |
476 | addr = ALIGN(addr, align); | |
477 | addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); | |
478 | crt->iv = (void *)addr; | |
479 | ||
480 | return 0; | |
481 | } | |
482 | ||
483 | static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |
484 | { | |
485 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
486 | ||
487 | if (alg->ivsize > PAGE_SIZE / 8) | |
488 | return -EINVAL; | |
489 | ||
490 | if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK) | |
491 | return crypto_init_blkcipher_ops_sync(tfm); | |
492 | else | |
493 | return crypto_init_blkcipher_ops_async(tfm); | |
494 | } | |
495 | ||
496 | #ifdef CONFIG_NET | |
497 | static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
498 | { | |
499 | struct crypto_report_blkcipher rblkcipher; | |
500 | ||
501 | strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); | |
502 | strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>", | |
503 | sizeof(rblkcipher.geniv)); | |
504 | ||
505 | rblkcipher.blocksize = alg->cra_blocksize; | |
506 | rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
507 | rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
508 | rblkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
509 | ||
510 | if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, | |
511 | sizeof(struct crypto_report_blkcipher), &rblkcipher)) | |
512 | goto nla_put_failure; | |
513 | return 0; | |
514 | ||
515 | nla_put_failure: | |
516 | return -EMSGSIZE; | |
517 | } | |
518 | #else | |
519 | static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
520 | { | |
521 | return -ENOSYS; | |
522 | } | |
523 | #endif | |
524 | ||
525 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) | |
526 | __attribute__ ((unused)); | |
527 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) | |
528 | { | |
529 | seq_printf(m, "type : blkcipher\n"); | |
530 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
531 | seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); | |
532 | seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); | |
533 | seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); | |
534 | seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?: | |
535 | "<default>"); | |
536 | } | |
537 | ||
538 | const struct crypto_type crypto_blkcipher_type = { | |
539 | .ctxsize = crypto_blkcipher_ctxsize, | |
540 | .init = crypto_init_blkcipher_ops, | |
541 | #ifdef CONFIG_PROC_FS | |
542 | .show = crypto_blkcipher_show, | |
543 | #endif | |
544 | .report = crypto_blkcipher_report, | |
545 | }; | |
546 | EXPORT_SYMBOL_GPL(crypto_blkcipher_type); | |
547 | ||
548 | static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn, | |
549 | const char *name, u32 type, u32 mask) | |
550 | { | |
551 | struct crypto_alg *alg; | |
552 | int err; | |
553 | ||
554 | type = crypto_skcipher_type(type); | |
555 | mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV; | |
556 | ||
557 | alg = crypto_alg_mod_lookup(name, type, mask); | |
558 | if (IS_ERR(alg)) | |
559 | return PTR_ERR(alg); | |
560 | ||
561 | err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask); | |
562 | crypto_mod_put(alg); | |
563 | return err; | |
564 | } | |
565 | ||
566 | struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl, | |
567 | struct rtattr **tb, u32 type, | |
568 | u32 mask) | |
569 | { | |
570 | struct { | |
571 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | |
572 | unsigned int keylen); | |
573 | int (*encrypt)(struct ablkcipher_request *req); | |
574 | int (*decrypt)(struct ablkcipher_request *req); | |
575 | ||
576 | unsigned int min_keysize; | |
577 | unsigned int max_keysize; | |
578 | unsigned int ivsize; | |
579 | ||
580 | const char *geniv; | |
581 | } balg; | |
582 | const char *name; | |
583 | struct crypto_skcipher_spawn *spawn; | |
584 | struct crypto_attr_type *algt; | |
585 | struct crypto_instance *inst; | |
586 | struct crypto_alg *alg; | |
587 | int err; | |
588 | ||
589 | algt = crypto_get_attr_type(tb); | |
590 | if (IS_ERR(algt)) | |
591 | return ERR_CAST(algt); | |
592 | ||
593 | if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) & | |
594 | algt->mask) | |
595 | return ERR_PTR(-EINVAL); | |
596 | ||
597 | name = crypto_attr_alg_name(tb[1]); | |
598 | if (IS_ERR(name)) | |
599 | return ERR_CAST(name); | |
600 | ||
601 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | |
602 | if (!inst) | |
603 | return ERR_PTR(-ENOMEM); | |
604 | ||
605 | spawn = crypto_instance_ctx(inst); | |
606 | ||
607 | /* Ignore async algorithms if necessary. */ | |
608 | mask |= crypto_requires_sync(algt->type, algt->mask); | |
609 | ||
610 | crypto_set_skcipher_spawn(spawn, inst); | |
611 | err = crypto_grab_nivcipher(spawn, name, type, mask); | |
612 | if (err) | |
613 | goto err_free_inst; | |
614 | ||
615 | alg = crypto_skcipher_spawn_alg(spawn); | |
616 | ||
617 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == | |
618 | CRYPTO_ALG_TYPE_BLKCIPHER) { | |
619 | balg.ivsize = alg->cra_blkcipher.ivsize; | |
620 | balg.min_keysize = alg->cra_blkcipher.min_keysize; | |
621 | balg.max_keysize = alg->cra_blkcipher.max_keysize; | |
622 | ||
623 | balg.setkey = async_setkey; | |
624 | balg.encrypt = async_encrypt; | |
625 | balg.decrypt = async_decrypt; | |
626 | ||
627 | balg.geniv = alg->cra_blkcipher.geniv; | |
628 | } else { | |
629 | balg.ivsize = alg->cra_ablkcipher.ivsize; | |
630 | balg.min_keysize = alg->cra_ablkcipher.min_keysize; | |
631 | balg.max_keysize = alg->cra_ablkcipher.max_keysize; | |
632 | ||
633 | balg.setkey = alg->cra_ablkcipher.setkey; | |
634 | balg.encrypt = alg->cra_ablkcipher.encrypt; | |
635 | balg.decrypt = alg->cra_ablkcipher.decrypt; | |
636 | ||
637 | balg.geniv = alg->cra_ablkcipher.geniv; | |
638 | } | |
639 | ||
640 | err = -EINVAL; | |
641 | if (!balg.ivsize) | |
642 | goto err_drop_alg; | |
643 | ||
644 | /* | |
645 | * This is only true if we're constructing an algorithm with its | |
646 | * default IV generator. For the default generator we elide the | |
647 | * template name and double-check the IV generator. | |
648 | */ | |
649 | if (algt->mask & CRYPTO_ALG_GENIV) { | |
650 | if (!balg.geniv) | |
651 | balg.geniv = crypto_default_geniv(alg); | |
652 | err = -EAGAIN; | |
653 | if (strcmp(tmpl->name, balg.geniv)) | |
654 | goto err_drop_alg; | |
655 | ||
656 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
657 | memcpy(inst->alg.cra_driver_name, alg->cra_driver_name, | |
658 | CRYPTO_MAX_ALG_NAME); | |
659 | } else { | |
660 | err = -ENAMETOOLONG; | |
661 | if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, | |
662 | "%s(%s)", tmpl->name, alg->cra_name) >= | |
663 | CRYPTO_MAX_ALG_NAME) | |
664 | goto err_drop_alg; | |
665 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
666 | "%s(%s)", tmpl->name, alg->cra_driver_name) >= | |
667 | CRYPTO_MAX_ALG_NAME) | |
668 | goto err_drop_alg; | |
669 | } | |
670 | ||
671 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV; | |
672 | inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; | |
673 | inst->alg.cra_priority = alg->cra_priority; | |
674 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
675 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
676 | inst->alg.cra_type = &crypto_givcipher_type; | |
677 | ||
678 | inst->alg.cra_ablkcipher.ivsize = balg.ivsize; | |
679 | inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize; | |
680 | inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize; | |
681 | inst->alg.cra_ablkcipher.geniv = balg.geniv; | |
682 | ||
683 | inst->alg.cra_ablkcipher.setkey = balg.setkey; | |
684 | inst->alg.cra_ablkcipher.encrypt = balg.encrypt; | |
685 | inst->alg.cra_ablkcipher.decrypt = balg.decrypt; | |
686 | ||
687 | out: | |
688 | return inst; | |
689 | ||
690 | err_drop_alg: | |
691 | crypto_drop_skcipher(spawn); | |
692 | err_free_inst: | |
693 | kfree(inst); | |
694 | inst = ERR_PTR(err); | |
695 | goto out; | |
696 | } | |
697 | EXPORT_SYMBOL_GPL(skcipher_geniv_alloc); | |
698 | ||
699 | void skcipher_geniv_free(struct crypto_instance *inst) | |
700 | { | |
701 | crypto_drop_skcipher(crypto_instance_ctx(inst)); | |
702 | kfree(inst); | |
703 | } | |
704 | EXPORT_SYMBOL_GPL(skcipher_geniv_free); | |
705 | ||
706 | int skcipher_geniv_init(struct crypto_tfm *tfm) | |
707 | { | |
708 | struct crypto_instance *inst = (void *)tfm->__crt_alg; | |
709 | struct crypto_ablkcipher *cipher; | |
710 | ||
711 | cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst)); | |
712 | if (IS_ERR(cipher)) | |
713 | return PTR_ERR(cipher); | |
714 | ||
715 | tfm->crt_ablkcipher.base = cipher; | |
716 | tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher); | |
717 | ||
718 | return 0; | |
719 | } | |
720 | EXPORT_SYMBOL_GPL(skcipher_geniv_init); | |
721 | ||
722 | void skcipher_geniv_exit(struct crypto_tfm *tfm) | |
723 | { | |
724 | crypto_free_ablkcipher(tfm->crt_ablkcipher.base); | |
725 | } | |
726 | EXPORT_SYMBOL_GPL(skcipher_geniv_exit); | |
727 | ||
728 | MODULE_LICENSE("GPL"); | |
729 | MODULE_DESCRIPTION("Generic block chaining cipher type"); |