]>
Commit | Line | Data |
---|---|---|
5cde0af2 HX |
1 | /* |
2 | * Block chaining cipher operations. | |
3 | * | |
4 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | |
5 | * multiple page boundaries by using temporary blocks. In user context, | |
6 | * the kernel is given a chance to schedule us once per page. | |
7 | * | |
8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of the GNU General Public License as published by the Free | |
12 | * Software Foundation; either version 2 of the License, or (at your option) | |
13 | * any later version. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/crypto.h> | |
18 | #include <linux/errno.h> | |
fb469840 | 19 | #include <linux/hardirq.h> |
5cde0af2 | 20 | #include <linux/kernel.h> |
5cde0af2 HX |
21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/seq_file.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/string.h> | |
26 | ||
27 | #include "internal.h" | |
28 | #include "scatterwalk.h" | |
29 | ||
30 | enum { | |
31 | BLKCIPHER_WALK_PHYS = 1 << 0, | |
32 | BLKCIPHER_WALK_SLOW = 1 << 1, | |
33 | BLKCIPHER_WALK_COPY = 1 << 2, | |
34 | BLKCIPHER_WALK_DIFF = 1 << 3, | |
35 | }; | |
36 | ||
37 | static int blkcipher_walk_next(struct blkcipher_desc *desc, | |
38 | struct blkcipher_walk *walk); | |
39 | static int blkcipher_walk_first(struct blkcipher_desc *desc, | |
40 | struct blkcipher_walk *walk); | |
41 | ||
42 | static inline void blkcipher_map_src(struct blkcipher_walk *walk) | |
43 | { | |
44 | walk->src.virt.addr = scatterwalk_map(&walk->in, 0); | |
45 | } | |
46 | ||
47 | static inline void blkcipher_map_dst(struct blkcipher_walk *walk) | |
48 | { | |
49 | walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); | |
50 | } | |
51 | ||
52 | static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) | |
53 | { | |
54 | scatterwalk_unmap(walk->src.virt.addr, 0); | |
55 | } | |
56 | ||
57 | static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) | |
58 | { | |
59 | scatterwalk_unmap(walk->dst.virt.addr, 1); | |
60 | } | |
61 | ||
62 | static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) | |
63 | { | |
64 | if (offset_in_page(start + len) < len) | |
65 | return (u8 *)((unsigned long)(start + len) & PAGE_MASK); | |
66 | return start; | |
67 | } | |
68 | ||
69 | static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, | |
70 | struct blkcipher_walk *walk, | |
71 | unsigned int bsize) | |
72 | { | |
73 | u8 *addr; | |
74 | unsigned int alignmask = crypto_blkcipher_alignmask(tfm); | |
75 | ||
76 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); | |
77 | addr = blkcipher_get_spot(addr, bsize); | |
78 | scatterwalk_copychunks(addr, &walk->out, bsize, 1); | |
79 | return bsize; | |
80 | } | |
81 | ||
82 | static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, | |
83 | unsigned int n) | |
84 | { | |
85 | n = walk->nbytes - n; | |
86 | ||
87 | if (walk->flags & BLKCIPHER_WALK_COPY) { | |
88 | blkcipher_map_dst(walk); | |
89 | memcpy(walk->dst.virt.addr, walk->page, n); | |
90 | blkcipher_unmap_dst(walk); | |
91 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { | |
92 | blkcipher_unmap_src(walk); | |
93 | if (walk->flags & BLKCIPHER_WALK_DIFF) | |
94 | blkcipher_unmap_dst(walk); | |
95 | } | |
96 | ||
97 | scatterwalk_advance(&walk->in, n); | |
98 | scatterwalk_advance(&walk->out, n); | |
99 | ||
100 | return n; | |
101 | } | |
102 | ||
103 | int blkcipher_walk_done(struct blkcipher_desc *desc, | |
104 | struct blkcipher_walk *walk, int err) | |
105 | { | |
106 | struct crypto_blkcipher *tfm = desc->tfm; | |
107 | unsigned int nbytes = 0; | |
108 | ||
109 | if (likely(err >= 0)) { | |
110 | unsigned int bsize = crypto_blkcipher_blocksize(tfm); | |
111 | unsigned int n; | |
112 | ||
113 | if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) | |
114 | n = blkcipher_done_fast(walk, err); | |
115 | else | |
116 | n = blkcipher_done_slow(tfm, walk, bsize); | |
117 | ||
118 | nbytes = walk->total - n; | |
119 | err = 0; | |
120 | } | |
121 | ||
122 | scatterwalk_done(&walk->in, 0, nbytes); | |
123 | scatterwalk_done(&walk->out, 1, nbytes); | |
124 | ||
125 | walk->total = nbytes; | |
126 | walk->nbytes = nbytes; | |
127 | ||
128 | if (nbytes) { | |
129 | crypto_yield(desc->flags); | |
130 | return blkcipher_walk_next(desc, walk); | |
131 | } | |
132 | ||
133 | if (walk->iv != desc->info) | |
134 | memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); | |
135 | if (walk->buffer != walk->page) | |
136 | kfree(walk->buffer); | |
137 | if (walk->page) | |
138 | free_page((unsigned long)walk->page); | |
139 | ||
140 | return err; | |
141 | } | |
142 | EXPORT_SYMBOL_GPL(blkcipher_walk_done); | |
143 | ||
144 | static inline int blkcipher_next_slow(struct blkcipher_desc *desc, | |
145 | struct blkcipher_walk *walk, | |
146 | unsigned int bsize, | |
147 | unsigned int alignmask) | |
148 | { | |
149 | unsigned int n; | |
150 | ||
151 | if (walk->buffer) | |
152 | goto ok; | |
153 | ||
154 | walk->buffer = walk->page; | |
155 | if (walk->buffer) | |
156 | goto ok; | |
157 | ||
158 | n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); | |
159 | walk->buffer = kmalloc(n, GFP_ATOMIC); | |
160 | if (!walk->buffer) | |
161 | return blkcipher_walk_done(desc, walk, -ENOMEM); | |
162 | ||
163 | ok: | |
164 | walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, | |
165 | alignmask + 1); | |
166 | walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); | |
167 | walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize, | |
168 | bsize); | |
169 | ||
170 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | |
171 | ||
172 | walk->nbytes = bsize; | |
173 | walk->flags |= BLKCIPHER_WALK_SLOW; | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
178 | static inline int blkcipher_next_copy(struct blkcipher_walk *walk) | |
179 | { | |
180 | u8 *tmp = walk->page; | |
181 | ||
182 | blkcipher_map_src(walk); | |
183 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | |
184 | blkcipher_unmap_src(walk); | |
185 | ||
186 | walk->src.virt.addr = tmp; | |
187 | walk->dst.virt.addr = tmp; | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
192 | static inline int blkcipher_next_fast(struct blkcipher_desc *desc, | |
193 | struct blkcipher_walk *walk) | |
194 | { | |
195 | unsigned long diff; | |
196 | ||
197 | walk->src.phys.page = scatterwalk_page(&walk->in); | |
198 | walk->src.phys.offset = offset_in_page(walk->in.offset); | |
199 | walk->dst.phys.page = scatterwalk_page(&walk->out); | |
200 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | |
201 | ||
202 | if (walk->flags & BLKCIPHER_WALK_PHYS) | |
203 | return 0; | |
204 | ||
205 | diff = walk->src.phys.offset - walk->dst.phys.offset; | |
206 | diff |= walk->src.virt.page - walk->dst.virt.page; | |
207 | ||
208 | blkcipher_map_src(walk); | |
209 | walk->dst.virt.addr = walk->src.virt.addr; | |
210 | ||
211 | if (diff) { | |
212 | walk->flags |= BLKCIPHER_WALK_DIFF; | |
213 | blkcipher_map_dst(walk); | |
214 | } | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | static int blkcipher_walk_next(struct blkcipher_desc *desc, | |
220 | struct blkcipher_walk *walk) | |
221 | { | |
222 | struct crypto_blkcipher *tfm = desc->tfm; | |
223 | unsigned int alignmask = crypto_blkcipher_alignmask(tfm); | |
224 | unsigned int bsize = crypto_blkcipher_blocksize(tfm); | |
225 | unsigned int n; | |
226 | int err; | |
227 | ||
228 | n = walk->total; | |
229 | if (unlikely(n < bsize)) { | |
230 | desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | |
231 | return blkcipher_walk_done(desc, walk, -EINVAL); | |
232 | } | |
233 | ||
234 | walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | | |
235 | BLKCIPHER_WALK_DIFF); | |
236 | if (!scatterwalk_aligned(&walk->in, alignmask) || | |
237 | !scatterwalk_aligned(&walk->out, alignmask)) { | |
238 | walk->flags |= BLKCIPHER_WALK_COPY; | |
239 | if (!walk->page) { | |
240 | walk->page = (void *)__get_free_page(GFP_ATOMIC); | |
241 | if (!walk->page) | |
242 | n = 0; | |
243 | } | |
244 | } | |
245 | ||
246 | n = scatterwalk_clamp(&walk->in, n); | |
247 | n = scatterwalk_clamp(&walk->out, n); | |
248 | ||
249 | if (unlikely(n < bsize)) { | |
250 | err = blkcipher_next_slow(desc, walk, bsize, alignmask); | |
251 | goto set_phys_lowmem; | |
252 | } | |
253 | ||
254 | walk->nbytes = n; | |
255 | if (walk->flags & BLKCIPHER_WALK_COPY) { | |
256 | err = blkcipher_next_copy(walk); | |
257 | goto set_phys_lowmem; | |
258 | } | |
259 | ||
260 | return blkcipher_next_fast(desc, walk); | |
261 | ||
262 | set_phys_lowmem: | |
263 | if (walk->flags & BLKCIPHER_WALK_PHYS) { | |
264 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | |
265 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | |
266 | walk->src.phys.offset &= PAGE_SIZE - 1; | |
267 | walk->dst.phys.offset &= PAGE_SIZE - 1; | |
268 | } | |
269 | return err; | |
270 | } | |
271 | ||
272 | static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, | |
273 | struct crypto_blkcipher *tfm, | |
274 | unsigned int alignmask) | |
275 | { | |
276 | unsigned bs = crypto_blkcipher_blocksize(tfm); | |
277 | unsigned int ivsize = crypto_blkcipher_ivsize(tfm); | |
278 | unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1); | |
279 | u8 *iv; | |
280 | ||
281 | size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); | |
282 | walk->buffer = kmalloc(size, GFP_ATOMIC); | |
283 | if (!walk->buffer) | |
284 | return -ENOMEM; | |
285 | ||
286 | iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); | |
287 | iv = blkcipher_get_spot(iv, bs) + bs; | |
288 | iv = blkcipher_get_spot(iv, bs) + bs; | |
289 | iv = blkcipher_get_spot(iv, ivsize); | |
290 | ||
291 | walk->iv = memcpy(iv, walk->iv, ivsize); | |
292 | return 0; | |
293 | } | |
294 | ||
295 | int blkcipher_walk_virt(struct blkcipher_desc *desc, | |
296 | struct blkcipher_walk *walk) | |
297 | { | |
298 | walk->flags &= ~BLKCIPHER_WALK_PHYS; | |
299 | return blkcipher_walk_first(desc, walk); | |
300 | } | |
301 | EXPORT_SYMBOL_GPL(blkcipher_walk_virt); | |
302 | ||
303 | int blkcipher_walk_phys(struct blkcipher_desc *desc, | |
304 | struct blkcipher_walk *walk) | |
305 | { | |
306 | walk->flags |= BLKCIPHER_WALK_PHYS; | |
307 | return blkcipher_walk_first(desc, walk); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(blkcipher_walk_phys); | |
310 | ||
311 | static int blkcipher_walk_first(struct blkcipher_desc *desc, | |
312 | struct blkcipher_walk *walk) | |
313 | { | |
314 | struct crypto_blkcipher *tfm = desc->tfm; | |
315 | unsigned int alignmask = crypto_blkcipher_alignmask(tfm); | |
316 | ||
fb469840 HX |
317 | if (WARN_ON_ONCE(in_irq())) |
318 | return -EDEADLK; | |
319 | ||
5cde0af2 HX |
320 | walk->nbytes = walk->total; |
321 | if (unlikely(!walk->total)) | |
322 | return 0; | |
323 | ||
324 | walk->buffer = NULL; | |
325 | walk->iv = desc->info; | |
326 | if (unlikely(((unsigned long)walk->iv & alignmask))) { | |
327 | int err = blkcipher_copy_iv(walk, tfm, alignmask); | |
328 | if (err) | |
329 | return err; | |
330 | } | |
331 | ||
332 | scatterwalk_start(&walk->in, walk->in.sg); | |
333 | scatterwalk_start(&walk->out, walk->out.sg); | |
334 | walk->page = NULL; | |
335 | ||
336 | return blkcipher_walk_next(desc, walk); | |
337 | } | |
338 | ||
ca7c3938 SS |
339 | static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) |
340 | { | |
341 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; | |
342 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
343 | int ret; | |
344 | u8 *buffer, *alignbuffer; | |
345 | unsigned long absize; | |
346 | ||
347 | absize = keylen + alignmask; | |
348 | buffer = kmalloc(absize, GFP_ATOMIC); | |
349 | if (!buffer) | |
350 | return -ENOMEM; | |
351 | ||
352 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
353 | memcpy(alignbuffer, key, keylen); | |
354 | ret = cipher->setkey(tfm, alignbuffer, keylen); | |
355 | memset(alignbuffer, 0, absize); | |
356 | kfree(buffer); | |
357 | return ret; | |
358 | } | |
359 | ||
5cde0af2 HX |
360 | static int setkey(struct crypto_tfm *tfm, const u8 *key, |
361 | unsigned int keylen) | |
362 | { | |
363 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; | |
ca7c3938 | 364 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); |
5cde0af2 HX |
365 | |
366 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { | |
367 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
368 | return -EINVAL; | |
369 | } | |
370 | ||
ca7c3938 SS |
371 | if ((unsigned long)key & alignmask) |
372 | return setkey_unaligned(tfm, key, keylen); | |
373 | ||
5cde0af2 HX |
374 | return cipher->setkey(tfm, key, keylen); |
375 | } | |
376 | ||
32e3983f HX |
377 | static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
378 | unsigned int keylen) | |
379 | { | |
380 | return setkey(crypto_ablkcipher_tfm(tfm), key, keylen); | |
381 | } | |
382 | ||
383 | static int async_encrypt(struct ablkcipher_request *req) | |
384 | { | |
385 | struct crypto_tfm *tfm = req->base.tfm; | |
386 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
387 | struct blkcipher_desc desc = { | |
388 | .tfm = __crypto_blkcipher_cast(tfm), | |
389 | .info = req->info, | |
390 | .flags = req->base.flags, | |
391 | }; | |
392 | ||
393 | ||
394 | return alg->encrypt(&desc, req->dst, req->src, req->nbytes); | |
395 | } | |
396 | ||
397 | static int async_decrypt(struct ablkcipher_request *req) | |
398 | { | |
399 | struct crypto_tfm *tfm = req->base.tfm; | |
400 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
401 | struct blkcipher_desc desc = { | |
402 | .tfm = __crypto_blkcipher_cast(tfm), | |
403 | .info = req->info, | |
404 | .flags = req->base.flags, | |
405 | }; | |
406 | ||
407 | return alg->decrypt(&desc, req->dst, req->src, req->nbytes); | |
408 | } | |
409 | ||
27d2a330 HX |
410 | static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, |
411 | u32 mask) | |
5cde0af2 HX |
412 | { |
413 | struct blkcipher_alg *cipher = &alg->cra_blkcipher; | |
414 | unsigned int len = alg->cra_ctxsize; | |
415 | ||
32e3983f HX |
416 | type ^= CRYPTO_ALG_ASYNC; |
417 | mask &= CRYPTO_ALG_ASYNC; | |
418 | if ((type & mask) && cipher->ivsize) { | |
5cde0af2 HX |
419 | len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); |
420 | len += cipher->ivsize; | |
421 | } | |
422 | ||
423 | return len; | |
424 | } | |
425 | ||
32e3983f HX |
426 | static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) |
427 | { | |
428 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; | |
429 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
430 | ||
431 | crt->setkey = async_setkey; | |
432 | crt->encrypt = async_encrypt; | |
433 | crt->decrypt = async_decrypt; | |
434 | crt->ivsize = alg->ivsize; | |
435 | ||
436 | return 0; | |
437 | } | |
438 | ||
439 | static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) | |
5cde0af2 HX |
440 | { |
441 | struct blkcipher_tfm *crt = &tfm->crt_blkcipher; | |
442 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
443 | unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; | |
444 | unsigned long addr; | |
445 | ||
5cde0af2 HX |
446 | crt->setkey = setkey; |
447 | crt->encrypt = alg->encrypt; | |
448 | crt->decrypt = alg->decrypt; | |
449 | ||
450 | addr = (unsigned long)crypto_tfm_ctx(tfm); | |
451 | addr = ALIGN(addr, align); | |
452 | addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); | |
453 | crt->iv = (void *)addr; | |
454 | ||
455 | return 0; | |
456 | } | |
457 | ||
32e3983f HX |
458 | static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) |
459 | { | |
460 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
461 | ||
462 | if (alg->ivsize > PAGE_SIZE / 8) | |
463 | return -EINVAL; | |
464 | ||
465 | type ^= CRYPTO_ALG_ASYNC; | |
466 | mask &= CRYPTO_ALG_ASYNC; | |
467 | if (type & mask) | |
468 | return crypto_init_blkcipher_ops_sync(tfm); | |
469 | else | |
470 | return crypto_init_blkcipher_ops_async(tfm); | |
471 | } | |
472 | ||
5cde0af2 | 473 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
03f5d8ce | 474 | __attribute__ ((unused)); |
5cde0af2 HX |
475 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
476 | { | |
477 | seq_printf(m, "type : blkcipher\n"); | |
478 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
479 | seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); | |
480 | seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); | |
481 | seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); | |
482 | } | |
483 | ||
484 | const struct crypto_type crypto_blkcipher_type = { | |
485 | .ctxsize = crypto_blkcipher_ctxsize, | |
486 | .init = crypto_init_blkcipher_ops, | |
487 | #ifdef CONFIG_PROC_FS | |
488 | .show = crypto_blkcipher_show, | |
489 | #endif | |
490 | }; | |
491 | EXPORT_SYMBOL_GPL(crypto_blkcipher_type); | |
492 | ||
493 | MODULE_LICENSE("GPL"); | |
494 | MODULE_DESCRIPTION("Generic block chaining cipher type"); |