]>
Commit | Line | Data |
---|---|---|
7a7ffe65 HX |
1 | /* |
2 | * Symmetric key cipher operations. | |
3 | * | |
4 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | |
5 | * multiple page boundaries by using temporary blocks. In user context, | |
6 | * the kernel is given a chance to schedule us once per page. | |
7 | * | |
8 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of the GNU General Public License as published by the Free | |
12 | * Software Foundation; either version 2 of the License, or (at your option) | |
13 | * any later version. | |
14 | * | |
15 | */ | |
16 | ||
b286d8b1 | 17 | #include <crypto/internal/aead.h> |
7a7ffe65 | 18 | #include <crypto/internal/skcipher.h> |
b286d8b1 | 19 | #include <crypto/scatterwalk.h> |
7a7ffe65 | 20 | #include <linux/bug.h> |
4e6c3df4 | 21 | #include <linux/cryptouser.h> |
d8c34b94 | 22 | #include <linux/compiler.h> |
b286d8b1 | 23 | #include <linux/list.h> |
7a7ffe65 | 24 | #include <linux/module.h> |
4e6c3df4 HX |
25 | #include <linux/rtnetlink.h> |
26 | #include <linux/seq_file.h> | |
27 | #include <net/netlink.h> | |
7a7ffe65 HX |
28 | |
29 | #include "internal.h" | |
30 | ||
b286d8b1 HX |
31 | enum { |
32 | SKCIPHER_WALK_PHYS = 1 << 0, | |
33 | SKCIPHER_WALK_SLOW = 1 << 1, | |
34 | SKCIPHER_WALK_COPY = 1 << 2, | |
35 | SKCIPHER_WALK_DIFF = 1 << 3, | |
36 | SKCIPHER_WALK_SLEEP = 1 << 4, | |
37 | }; | |
38 | ||
39 | struct skcipher_walk_buffer { | |
40 | struct list_head entry; | |
41 | struct scatter_walk dst; | |
42 | unsigned int len; | |
43 | u8 *data; | |
44 | u8 buffer[]; | |
45 | }; | |
46 | ||
47 | static int skcipher_walk_next(struct skcipher_walk *walk); | |
48 | ||
49 | static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) | |
50 | { | |
51 | if (PageHighMem(scatterwalk_page(walk))) | |
52 | kunmap_atomic(vaddr); | |
53 | } | |
54 | ||
55 | static inline void *skcipher_map(struct scatter_walk *walk) | |
56 | { | |
57 | struct page *page = scatterwalk_page(walk); | |
58 | ||
59 | return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + | |
60 | offset_in_page(walk->offset); | |
61 | } | |
62 | ||
63 | static inline void skcipher_map_src(struct skcipher_walk *walk) | |
64 | { | |
65 | walk->src.virt.addr = skcipher_map(&walk->in); | |
66 | } | |
67 | ||
68 | static inline void skcipher_map_dst(struct skcipher_walk *walk) | |
69 | { | |
70 | walk->dst.virt.addr = skcipher_map(&walk->out); | |
71 | } | |
72 | ||
73 | static inline void skcipher_unmap_src(struct skcipher_walk *walk) | |
74 | { | |
75 | skcipher_unmap(&walk->in, walk->src.virt.addr); | |
76 | } | |
77 | ||
78 | static inline void skcipher_unmap_dst(struct skcipher_walk *walk) | |
79 | { | |
80 | skcipher_unmap(&walk->out, walk->dst.virt.addr); | |
81 | } | |
82 | ||
83 | static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) | |
84 | { | |
85 | return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | |
86 | } | |
87 | ||
88 | /* Get a spot of the specified length that does not straddle a page. | |
89 | * The caller needs to ensure that there is enough space for this operation. | |
90 | */ | |
91 | static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) | |
92 | { | |
93 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | |
94 | ||
95 | return max(start, end_page); | |
96 | } | |
97 | ||
d7c79ef0 | 98 | static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) |
b286d8b1 HX |
99 | { |
100 | u8 *addr; | |
101 | ||
102 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | |
103 | addr = skcipher_get_spot(addr, bsize); | |
104 | scatterwalk_copychunks(addr, &walk->out, bsize, | |
105 | (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); | |
b286d8b1 HX |
106 | } |
107 | ||
108 | int skcipher_walk_done(struct skcipher_walk *walk, int err) | |
109 | { | |
d7c79ef0 EB |
110 | unsigned int n; /* bytes processed */ |
111 | bool more; | |
112 | ||
113 | if (unlikely(err < 0)) | |
114 | goto finish; | |
115 | ||
116 | n = walk->nbytes - err; | |
117 | walk->total -= n; | |
118 | more = (walk->total != 0); | |
119 | ||
120 | if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | | |
121 | SKCIPHER_WALK_SLOW | | |
122 | SKCIPHER_WALK_COPY | | |
123 | SKCIPHER_WALK_DIFF)))) { | |
b286d8b1 HX |
124 | unmap_src: |
125 | skcipher_unmap_src(walk); | |
126 | } else if (walk->flags & SKCIPHER_WALK_DIFF) { | |
127 | skcipher_unmap_dst(walk); | |
128 | goto unmap_src; | |
129 | } else if (walk->flags & SKCIPHER_WALK_COPY) { | |
130 | skcipher_map_dst(walk); | |
131 | memcpy(walk->dst.virt.addr, walk->page, n); | |
132 | skcipher_unmap_dst(walk); | |
133 | } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { | |
95d3ea57 EB |
134 | if (err) { |
135 | /* | |
136 | * Didn't process all bytes. Either the algorithm is | |
137 | * broken, or this was the last step and it turned out | |
138 | * the message wasn't evenly divisible into blocks but | |
139 | * the algorithm requires it. | |
140 | */ | |
b286d8b1 | 141 | err = -EINVAL; |
d7c79ef0 EB |
142 | goto finish; |
143 | } | |
144 | skcipher_done_slow(walk, n); | |
145 | goto already_advanced; | |
b286d8b1 HX |
146 | } |
147 | ||
b286d8b1 HX |
148 | scatterwalk_advance(&walk->in, n); |
149 | scatterwalk_advance(&walk->out, n); | |
d7c79ef0 EB |
150 | already_advanced: |
151 | scatterwalk_done(&walk->in, 0, more); | |
152 | scatterwalk_done(&walk->out, 1, more); | |
b286d8b1 | 153 | |
d7c79ef0 | 154 | if (more) { |
b286d8b1 HX |
155 | crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? |
156 | CRYPTO_TFM_REQ_MAY_SLEEP : 0); | |
157 | return skcipher_walk_next(walk); | |
158 | } | |
d7c79ef0 EB |
159 | err = 0; |
160 | finish: | |
161 | walk->nbytes = 0; | |
b286d8b1 HX |
162 | |
163 | /* Short-circuit for the common/fast path. */ | |
164 | if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) | |
165 | goto out; | |
166 | ||
167 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
168 | goto out; | |
169 | ||
170 | if (walk->iv != walk->oiv) | |
171 | memcpy(walk->oiv, walk->iv, walk->ivsize); | |
172 | if (walk->buffer != walk->page) | |
173 | kfree(walk->buffer); | |
174 | if (walk->page) | |
175 | free_page((unsigned long)walk->page); | |
176 | ||
177 | out: | |
178 | return err; | |
179 | } | |
180 | EXPORT_SYMBOL_GPL(skcipher_walk_done); | |
181 | ||
182 | void skcipher_walk_complete(struct skcipher_walk *walk, int err) | |
183 | { | |
184 | struct skcipher_walk_buffer *p, *tmp; | |
185 | ||
186 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | |
187 | u8 *data; | |
188 | ||
189 | if (err) | |
190 | goto done; | |
191 | ||
192 | data = p->data; | |
193 | if (!data) { | |
194 | data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); | |
c821f6ab | 195 | data = skcipher_get_spot(data, walk->stride); |
b286d8b1 HX |
196 | } |
197 | ||
198 | scatterwalk_copychunks(data, &p->dst, p->len, 1); | |
199 | ||
c821f6ab | 200 | if (offset_in_page(p->data) + p->len + walk->stride > |
b286d8b1 HX |
201 | PAGE_SIZE) |
202 | free_page((unsigned long)p->data); | |
203 | ||
204 | done: | |
205 | list_del(&p->entry); | |
206 | kfree(p); | |
207 | } | |
208 | ||
209 | if (!err && walk->iv != walk->oiv) | |
210 | memcpy(walk->oiv, walk->iv, walk->ivsize); | |
211 | if (walk->buffer != walk->page) | |
212 | kfree(walk->buffer); | |
213 | if (walk->page) | |
214 | free_page((unsigned long)walk->page); | |
215 | } | |
216 | EXPORT_SYMBOL_GPL(skcipher_walk_complete); | |
217 | ||
218 | static void skcipher_queue_write(struct skcipher_walk *walk, | |
219 | struct skcipher_walk_buffer *p) | |
220 | { | |
221 | p->dst = walk->out; | |
222 | list_add_tail(&p->entry, &walk->buffers); | |
223 | } | |
224 | ||
225 | static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) | |
226 | { | |
227 | bool phys = walk->flags & SKCIPHER_WALK_PHYS; | |
228 | unsigned alignmask = walk->alignmask; | |
229 | struct skcipher_walk_buffer *p; | |
230 | unsigned a; | |
231 | unsigned n; | |
232 | u8 *buffer; | |
233 | void *v; | |
234 | ||
235 | if (!phys) { | |
18e615ad AB |
236 | if (!walk->buffer) |
237 | walk->buffer = walk->page; | |
238 | buffer = walk->buffer; | |
b286d8b1 HX |
239 | if (buffer) |
240 | goto ok; | |
241 | } | |
242 | ||
243 | /* Start with the minimum alignment of kmalloc. */ | |
244 | a = crypto_tfm_ctx_alignment() - 1; | |
245 | n = bsize; | |
246 | ||
247 | if (phys) { | |
248 | /* Calculate the minimum alignment of p->buffer. */ | |
249 | a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; | |
250 | n += sizeof(*p); | |
251 | } | |
252 | ||
253 | /* Minimum size to align p->buffer by alignmask. */ | |
254 | n += alignmask & ~a; | |
255 | ||
256 | /* Minimum size to ensure p->buffer does not straddle a page. */ | |
257 | n += (bsize - 1) & ~(alignmask | a); | |
258 | ||
259 | v = kzalloc(n, skcipher_walk_gfp(walk)); | |
260 | if (!v) | |
261 | return skcipher_walk_done(walk, -ENOMEM); | |
262 | ||
263 | if (phys) { | |
264 | p = v; | |
265 | p->len = bsize; | |
266 | skcipher_queue_write(walk, p); | |
267 | buffer = p->buffer; | |
268 | } else { | |
269 | walk->buffer = v; | |
270 | buffer = v; | |
271 | } | |
272 | ||
273 | ok: | |
274 | walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); | |
275 | walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); | |
276 | walk->src.virt.addr = walk->dst.virt.addr; | |
277 | ||
278 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | |
279 | ||
280 | walk->nbytes = bsize; | |
281 | walk->flags |= SKCIPHER_WALK_SLOW; | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
286 | static int skcipher_next_copy(struct skcipher_walk *walk) | |
287 | { | |
288 | struct skcipher_walk_buffer *p; | |
289 | u8 *tmp = walk->page; | |
290 | ||
291 | skcipher_map_src(walk); | |
292 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | |
293 | skcipher_unmap_src(walk); | |
294 | ||
295 | walk->src.virt.addr = tmp; | |
296 | walk->dst.virt.addr = tmp; | |
297 | ||
298 | if (!(walk->flags & SKCIPHER_WALK_PHYS)) | |
299 | return 0; | |
300 | ||
301 | p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); | |
302 | if (!p) | |
303 | return -ENOMEM; | |
304 | ||
305 | p->data = walk->page; | |
306 | p->len = walk->nbytes; | |
307 | skcipher_queue_write(walk, p); | |
308 | ||
c821f6ab | 309 | if (offset_in_page(walk->page) + walk->nbytes + walk->stride > |
b286d8b1 HX |
310 | PAGE_SIZE) |
311 | walk->page = NULL; | |
312 | else | |
313 | walk->page += walk->nbytes; | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | static int skcipher_next_fast(struct skcipher_walk *walk) | |
319 | { | |
320 | unsigned long diff; | |
321 | ||
322 | walk->src.phys.page = scatterwalk_page(&walk->in); | |
323 | walk->src.phys.offset = offset_in_page(walk->in.offset); | |
324 | walk->dst.phys.page = scatterwalk_page(&walk->out); | |
325 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | |
326 | ||
327 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
328 | return 0; | |
329 | ||
330 | diff = walk->src.phys.offset - walk->dst.phys.offset; | |
331 | diff |= walk->src.virt.page - walk->dst.virt.page; | |
332 | ||
333 | skcipher_map_src(walk); | |
334 | walk->dst.virt.addr = walk->src.virt.addr; | |
335 | ||
336 | if (diff) { | |
337 | walk->flags |= SKCIPHER_WALK_DIFF; | |
338 | skcipher_map_dst(walk); | |
339 | } | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | static int skcipher_walk_next(struct skcipher_walk *walk) | |
345 | { | |
346 | unsigned int bsize; | |
347 | unsigned int n; | |
348 | int err; | |
349 | ||
350 | walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | | |
351 | SKCIPHER_WALK_DIFF); | |
352 | ||
353 | n = walk->total; | |
c821f6ab | 354 | bsize = min(walk->stride, max(n, walk->blocksize)); |
b286d8b1 HX |
355 | n = scatterwalk_clamp(&walk->in, n); |
356 | n = scatterwalk_clamp(&walk->out, n); | |
357 | ||
358 | if (unlikely(n < bsize)) { | |
359 | if (unlikely(walk->total < walk->blocksize)) | |
360 | return skcipher_walk_done(walk, -EINVAL); | |
361 | ||
362 | slow_path: | |
363 | err = skcipher_next_slow(walk, bsize); | |
364 | goto set_phys_lowmem; | |
365 | } | |
366 | ||
367 | if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { | |
368 | if (!walk->page) { | |
369 | gfp_t gfp = skcipher_walk_gfp(walk); | |
370 | ||
371 | walk->page = (void *)__get_free_page(gfp); | |
372 | if (!walk->page) | |
373 | goto slow_path; | |
374 | } | |
375 | ||
376 | walk->nbytes = min_t(unsigned, n, | |
377 | PAGE_SIZE - offset_in_page(walk->page)); | |
378 | walk->flags |= SKCIPHER_WALK_COPY; | |
379 | err = skcipher_next_copy(walk); | |
380 | goto set_phys_lowmem; | |
381 | } | |
382 | ||
383 | walk->nbytes = n; | |
384 | ||
385 | return skcipher_next_fast(walk); | |
386 | ||
387 | set_phys_lowmem: | |
388 | if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { | |
389 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | |
390 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | |
391 | walk->src.phys.offset &= PAGE_SIZE - 1; | |
392 | walk->dst.phys.offset &= PAGE_SIZE - 1; | |
393 | } | |
394 | return err; | |
395 | } | |
396 | EXPORT_SYMBOL_GPL(skcipher_walk_next); | |
397 | ||
398 | static int skcipher_copy_iv(struct skcipher_walk *walk) | |
399 | { | |
400 | unsigned a = crypto_tfm_ctx_alignment() - 1; | |
401 | unsigned alignmask = walk->alignmask; | |
402 | unsigned ivsize = walk->ivsize; | |
c821f6ab | 403 | unsigned bs = walk->stride; |
b286d8b1 HX |
404 | unsigned aligned_bs; |
405 | unsigned size; | |
406 | u8 *iv; | |
407 | ||
a8fbe56b | 408 | aligned_bs = ALIGN(bs, alignmask + 1); |
b286d8b1 HX |
409 | |
410 | /* Minimum size to align buffer by alignmask. */ | |
411 | size = alignmask & ~a; | |
412 | ||
413 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
414 | size += ivsize; | |
415 | else { | |
416 | size += aligned_bs + ivsize; | |
417 | ||
418 | /* Minimum size to ensure buffer does not straddle a page. */ | |
419 | size += (bs - 1) & ~(alignmask | a); | |
420 | } | |
421 | ||
422 | walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); | |
423 | if (!walk->buffer) | |
424 | return -ENOMEM; | |
425 | ||
426 | iv = PTR_ALIGN(walk->buffer, alignmask + 1); | |
427 | iv = skcipher_get_spot(iv, bs) + aligned_bs; | |
428 | ||
429 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); | |
430 | return 0; | |
431 | } | |
432 | ||
433 | static int skcipher_walk_first(struct skcipher_walk *walk) | |
434 | { | |
b286d8b1 HX |
435 | if (WARN_ON_ONCE(in_irq())) |
436 | return -EDEADLK; | |
437 | ||
b286d8b1 HX |
438 | walk->buffer = NULL; |
439 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | |
440 | int err = skcipher_copy_iv(walk); | |
441 | if (err) | |
442 | return err; | |
443 | } | |
444 | ||
445 | walk->page = NULL; | |
446 | walk->nbytes = walk->total; | |
447 | ||
448 | return skcipher_walk_next(walk); | |
449 | } | |
450 | ||
451 | static int skcipher_walk_skcipher(struct skcipher_walk *walk, | |
452 | struct skcipher_request *req) | |
453 | { | |
454 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
455 | ||
0cabf2af HX |
456 | walk->total = req->cryptlen; |
457 | walk->nbytes = 0; | |
2b4f27c3 EB |
458 | walk->iv = req->iv; |
459 | walk->oiv = req->iv; | |
0cabf2af HX |
460 | |
461 | if (unlikely(!walk->total)) | |
462 | return 0; | |
463 | ||
b286d8b1 HX |
464 | scatterwalk_start(&walk->in, req->src); |
465 | scatterwalk_start(&walk->out, req->dst); | |
466 | ||
b286d8b1 HX |
467 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
468 | walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | |
469 | SKCIPHER_WALK_SLEEP : 0; | |
470 | ||
471 | walk->blocksize = crypto_skcipher_blocksize(tfm); | |
c821f6ab | 472 | walk->stride = crypto_skcipher_walksize(tfm); |
b286d8b1 HX |
473 | walk->ivsize = crypto_skcipher_ivsize(tfm); |
474 | walk->alignmask = crypto_skcipher_alignmask(tfm); | |
475 | ||
476 | return skcipher_walk_first(walk); | |
477 | } | |
478 | ||
479 | int skcipher_walk_virt(struct skcipher_walk *walk, | |
480 | struct skcipher_request *req, bool atomic) | |
481 | { | |
482 | int err; | |
483 | ||
484 | walk->flags &= ~SKCIPHER_WALK_PHYS; | |
485 | ||
486 | err = skcipher_walk_skcipher(walk, req); | |
487 | ||
488 | walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; | |
489 | ||
490 | return err; | |
491 | } | |
492 | EXPORT_SYMBOL_GPL(skcipher_walk_virt); | |
493 | ||
494 | void skcipher_walk_atomise(struct skcipher_walk *walk) | |
495 | { | |
496 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | |
497 | } | |
498 | EXPORT_SYMBOL_GPL(skcipher_walk_atomise); | |
499 | ||
500 | int skcipher_walk_async(struct skcipher_walk *walk, | |
501 | struct skcipher_request *req) | |
502 | { | |
503 | walk->flags |= SKCIPHER_WALK_PHYS; | |
504 | ||
505 | INIT_LIST_HEAD(&walk->buffers); | |
506 | ||
507 | return skcipher_walk_skcipher(walk, req); | |
508 | } | |
509 | EXPORT_SYMBOL_GPL(skcipher_walk_async); | |
510 | ||
34bc085c HX |
511 | static int skcipher_walk_aead_common(struct skcipher_walk *walk, |
512 | struct aead_request *req, bool atomic) | |
b286d8b1 HX |
513 | { |
514 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
515 | int err; | |
516 | ||
0cabf2af | 517 | walk->nbytes = 0; |
2b4f27c3 EB |
518 | walk->iv = req->iv; |
519 | walk->oiv = req->iv; | |
0cabf2af HX |
520 | |
521 | if (unlikely(!walk->total)) | |
522 | return 0; | |
523 | ||
3cbf61fb AB |
524 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
525 | ||
b286d8b1 HX |
526 | scatterwalk_start(&walk->in, req->src); |
527 | scatterwalk_start(&walk->out, req->dst); | |
528 | ||
529 | scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); | |
530 | scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); | |
531 | ||
c14ca838 OM |
532 | scatterwalk_done(&walk->in, 0, walk->total); |
533 | scatterwalk_done(&walk->out, 0, walk->total); | |
534 | ||
b286d8b1 HX |
535 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
536 | walk->flags |= SKCIPHER_WALK_SLEEP; | |
537 | else | |
538 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | |
539 | ||
540 | walk->blocksize = crypto_aead_blocksize(tfm); | |
c821f6ab | 541 | walk->stride = crypto_aead_chunksize(tfm); |
b286d8b1 HX |
542 | walk->ivsize = crypto_aead_ivsize(tfm); |
543 | walk->alignmask = crypto_aead_alignmask(tfm); | |
544 | ||
545 | err = skcipher_walk_first(walk); | |
546 | ||
547 | if (atomic) | |
548 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | |
549 | ||
550 | return err; | |
551 | } | |
34bc085c HX |
552 | |
553 | int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, | |
554 | bool atomic) | |
555 | { | |
556 | walk->total = req->cryptlen; | |
557 | ||
558 | return skcipher_walk_aead_common(walk, req, atomic); | |
559 | } | |
b286d8b1 HX |
560 | EXPORT_SYMBOL_GPL(skcipher_walk_aead); |
561 | ||
34bc085c HX |
562 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, |
563 | struct aead_request *req, bool atomic) | |
564 | { | |
565 | walk->total = req->cryptlen; | |
566 | ||
567 | return skcipher_walk_aead_common(walk, req, atomic); | |
568 | } | |
569 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); | |
570 | ||
571 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, | |
572 | struct aead_request *req, bool atomic) | |
573 | { | |
574 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
575 | ||
576 | walk->total = req->cryptlen - crypto_aead_authsize(tfm); | |
577 | ||
578 | return skcipher_walk_aead_common(walk, req, atomic); | |
579 | } | |
580 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); | |
581 | ||
7a7ffe65 HX |
582 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) |
583 | { | |
584 | if (alg->cra_type == &crypto_blkcipher_type) | |
585 | return sizeof(struct crypto_blkcipher *); | |
586 | ||
4e6c3df4 HX |
587 | if (alg->cra_type == &crypto_ablkcipher_type || |
588 | alg->cra_type == &crypto_givcipher_type) | |
589 | return sizeof(struct crypto_ablkcipher *); | |
7a7ffe65 | 590 | |
4e6c3df4 | 591 | return crypto_alg_extsize(alg); |
7a7ffe65 HX |
592 | } |
593 | ||
594 | static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, | |
595 | const u8 *key, unsigned int keylen) | |
596 | { | |
597 | struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); | |
598 | struct crypto_blkcipher *blkcipher = *ctx; | |
599 | int err; | |
600 | ||
601 | crypto_blkcipher_clear_flags(blkcipher, ~0); | |
602 | crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & | |
603 | CRYPTO_TFM_REQ_MASK); | |
604 | err = crypto_blkcipher_setkey(blkcipher, key, keylen); | |
605 | crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & | |
606 | CRYPTO_TFM_RES_MASK); | |
607 | ||
608 | return err; | |
609 | } | |
610 | ||
611 | static int skcipher_crypt_blkcipher(struct skcipher_request *req, | |
612 | int (*crypt)(struct blkcipher_desc *, | |
613 | struct scatterlist *, | |
614 | struct scatterlist *, | |
615 | unsigned int)) | |
616 | { | |
617 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
618 | struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); | |
619 | struct blkcipher_desc desc = { | |
620 | .tfm = *ctx, | |
621 | .info = req->iv, | |
622 | .flags = req->base.flags, | |
623 | }; | |
624 | ||
625 | ||
626 | return crypt(&desc, req->dst, req->src, req->cryptlen); | |
627 | } | |
628 | ||
629 | static int skcipher_encrypt_blkcipher(struct skcipher_request *req) | |
630 | { | |
631 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | |
632 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | |
633 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
634 | ||
635 | return skcipher_crypt_blkcipher(req, alg->encrypt); | |
636 | } | |
637 | ||
638 | static int skcipher_decrypt_blkcipher(struct skcipher_request *req) | |
639 | { | |
640 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | |
641 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | |
642 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
643 | ||
644 | return skcipher_crypt_blkcipher(req, alg->decrypt); | |
645 | } | |
646 | ||
647 | static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) | |
648 | { | |
649 | struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); | |
650 | ||
651 | crypto_free_blkcipher(*ctx); | |
652 | } | |
653 | ||
ecdd6bed | 654 | static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) |
7a7ffe65 HX |
655 | { |
656 | struct crypto_alg *calg = tfm->__crt_alg; | |
657 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | |
658 | struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); | |
659 | struct crypto_blkcipher *blkcipher; | |
660 | struct crypto_tfm *btfm; | |
661 | ||
662 | if (!crypto_mod_get(calg)) | |
663 | return -EAGAIN; | |
664 | ||
665 | btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, | |
666 | CRYPTO_ALG_TYPE_MASK); | |
667 | if (IS_ERR(btfm)) { | |
668 | crypto_mod_put(calg); | |
669 | return PTR_ERR(btfm); | |
670 | } | |
671 | ||
672 | blkcipher = __crypto_blkcipher_cast(btfm); | |
673 | *ctx = blkcipher; | |
674 | tfm->exit = crypto_exit_skcipher_ops_blkcipher; | |
675 | ||
676 | skcipher->setkey = skcipher_setkey_blkcipher; | |
677 | skcipher->encrypt = skcipher_encrypt_blkcipher; | |
678 | skcipher->decrypt = skcipher_decrypt_blkcipher; | |
679 | ||
680 | skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); | |
973fb3fb | 681 | skcipher->keysize = calg->cra_blkcipher.max_keysize; |
7a7ffe65 HX |
682 | |
683 | return 0; | |
684 | } | |
685 | ||
686 | static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, | |
687 | const u8 *key, unsigned int keylen) | |
688 | { | |
689 | struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); | |
690 | struct crypto_ablkcipher *ablkcipher = *ctx; | |
691 | int err; | |
692 | ||
693 | crypto_ablkcipher_clear_flags(ablkcipher, ~0); | |
694 | crypto_ablkcipher_set_flags(ablkcipher, | |
695 | crypto_skcipher_get_flags(tfm) & | |
696 | CRYPTO_TFM_REQ_MASK); | |
697 | err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); | |
698 | crypto_skcipher_set_flags(tfm, | |
699 | crypto_ablkcipher_get_flags(ablkcipher) & | |
700 | CRYPTO_TFM_RES_MASK); | |
701 | ||
702 | return err; | |
703 | } | |
704 | ||
705 | static int skcipher_crypt_ablkcipher(struct skcipher_request *req, | |
706 | int (*crypt)(struct ablkcipher_request *)) | |
707 | { | |
708 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
709 | struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); | |
710 | struct ablkcipher_request *subreq = skcipher_request_ctx(req); | |
711 | ||
712 | ablkcipher_request_set_tfm(subreq, *ctx); | |
713 | ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), | |
714 | req->base.complete, req->base.data); | |
715 | ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
716 | req->iv); | |
717 | ||
718 | return crypt(subreq); | |
719 | } | |
720 | ||
721 | static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) | |
722 | { | |
723 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | |
724 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | |
725 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; | |
726 | ||
727 | return skcipher_crypt_ablkcipher(req, alg->encrypt); | |
728 | } | |
729 | ||
730 | static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) | |
731 | { | |
732 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | |
733 | struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); | |
734 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; | |
735 | ||
736 | return skcipher_crypt_ablkcipher(req, alg->decrypt); | |
737 | } | |
738 | ||
739 | static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) | |
740 | { | |
741 | struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); | |
742 | ||
743 | crypto_free_ablkcipher(*ctx); | |
744 | } | |
745 | ||
ecdd6bed | 746 | static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) |
7a7ffe65 HX |
747 | { |
748 | struct crypto_alg *calg = tfm->__crt_alg; | |
749 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | |
750 | struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); | |
751 | struct crypto_ablkcipher *ablkcipher; | |
752 | struct crypto_tfm *abtfm; | |
753 | ||
754 | if (!crypto_mod_get(calg)) | |
755 | return -EAGAIN; | |
756 | ||
757 | abtfm = __crypto_alloc_tfm(calg, 0, 0); | |
758 | if (IS_ERR(abtfm)) { | |
759 | crypto_mod_put(calg); | |
760 | return PTR_ERR(abtfm); | |
761 | } | |
762 | ||
763 | ablkcipher = __crypto_ablkcipher_cast(abtfm); | |
764 | *ctx = ablkcipher; | |
765 | tfm->exit = crypto_exit_skcipher_ops_ablkcipher; | |
766 | ||
767 | skcipher->setkey = skcipher_setkey_ablkcipher; | |
768 | skcipher->encrypt = skcipher_encrypt_ablkcipher; | |
769 | skcipher->decrypt = skcipher_decrypt_ablkcipher; | |
770 | ||
771 | skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); | |
772 | skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + | |
773 | sizeof(struct ablkcipher_request); | |
973fb3fb | 774 | skcipher->keysize = calg->cra_ablkcipher.max_keysize; |
7a7ffe65 HX |
775 | |
776 | return 0; | |
777 | } | |
778 | ||
9933e113 HX |
779 | static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, |
780 | const u8 *key, unsigned int keylen) | |
781 | { | |
782 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | |
783 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | |
784 | u8 *buffer, *alignbuffer; | |
785 | unsigned long absize; | |
786 | int ret; | |
787 | ||
788 | absize = keylen + alignmask; | |
789 | buffer = kmalloc(absize, GFP_ATOMIC); | |
790 | if (!buffer) | |
791 | return -ENOMEM; | |
792 | ||
793 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
794 | memcpy(alignbuffer, key, keylen); | |
795 | ret = cipher->setkey(tfm, alignbuffer, keylen); | |
796 | kzfree(buffer); | |
797 | return ret; | |
798 | } | |
799 | ||
800 | static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | |
801 | unsigned int keylen) | |
802 | { | |
803 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | |
804 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | |
805 | ||
806 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { | |
807 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
808 | return -EINVAL; | |
809 | } | |
810 | ||
811 | if ((unsigned long)key & alignmask) | |
812 | return skcipher_setkey_unaligned(tfm, key, keylen); | |
813 | ||
814 | return cipher->setkey(tfm, key, keylen); | |
815 | } | |
816 | ||
4e6c3df4 HX |
817 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
818 | { | |
819 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | |
820 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); | |
821 | ||
822 | alg->exit(skcipher); | |
823 | } | |
824 | ||
7a7ffe65 HX |
825 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) |
826 | { | |
4e6c3df4 HX |
827 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
828 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); | |
829 | ||
7a7ffe65 HX |
830 | if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) |
831 | return crypto_init_skcipher_ops_blkcipher(tfm); | |
832 | ||
4e6c3df4 HX |
833 | if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || |
834 | tfm->__crt_alg->cra_type == &crypto_givcipher_type) | |
835 | return crypto_init_skcipher_ops_ablkcipher(tfm); | |
836 | ||
9933e113 | 837 | skcipher->setkey = skcipher_setkey; |
4e6c3df4 HX |
838 | skcipher->encrypt = alg->encrypt; |
839 | skcipher->decrypt = alg->decrypt; | |
840 | skcipher->ivsize = alg->ivsize; | |
841 | skcipher->keysize = alg->max_keysize; | |
842 | ||
843 | if (alg->exit) | |
844 | skcipher->base.exit = crypto_skcipher_exit_tfm; | |
7a7ffe65 | 845 | |
4e6c3df4 HX |
846 | if (alg->init) |
847 | return alg->init(skcipher); | |
848 | ||
849 | return 0; | |
850 | } | |
851 | ||
852 | static void crypto_skcipher_free_instance(struct crypto_instance *inst) | |
853 | { | |
854 | struct skcipher_instance *skcipher = | |
855 | container_of(inst, struct skcipher_instance, s.base); | |
856 | ||
857 | skcipher->free(skcipher); | |
858 | } | |
859 | ||
860 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) | |
d8c34b94 | 861 | __maybe_unused; |
4e6c3df4 HX |
862 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) |
863 | { | |
864 | struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, | |
865 | base); | |
866 | ||
867 | seq_printf(m, "type : skcipher\n"); | |
868 | seq_printf(m, "async : %s\n", | |
869 | alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); | |
870 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
871 | seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); | |
872 | seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); | |
873 | seq_printf(m, "ivsize : %u\n", skcipher->ivsize); | |
874 | seq_printf(m, "chunksize : %u\n", skcipher->chunksize); | |
c821f6ab | 875 | seq_printf(m, "walksize : %u\n", skcipher->walksize); |
7a7ffe65 HX |
876 | } |
877 | ||
4e6c3df4 HX |
878 | #ifdef CONFIG_NET |
879 | static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
880 | { | |
881 | struct crypto_report_blkcipher rblkcipher; | |
882 | struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, | |
883 | base); | |
884 | ||
885 | strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); | |
886 | strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); | |
887 | ||
888 | rblkcipher.blocksize = alg->cra_blocksize; | |
889 | rblkcipher.min_keysize = skcipher->min_keysize; | |
890 | rblkcipher.max_keysize = skcipher->max_keysize; | |
891 | rblkcipher.ivsize = skcipher->ivsize; | |
892 | ||
893 | if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, | |
894 | sizeof(struct crypto_report_blkcipher), &rblkcipher)) | |
895 | goto nla_put_failure; | |
896 | return 0; | |
897 | ||
898 | nla_put_failure: | |
899 | return -EMSGSIZE; | |
900 | } | |
901 | #else | |
902 | static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
903 | { | |
904 | return -ENOSYS; | |
905 | } | |
906 | #endif | |
907 | ||
7a7ffe65 HX |
908 | static const struct crypto_type crypto_skcipher_type2 = { |
909 | .extsize = crypto_skcipher_extsize, | |
910 | .init_tfm = crypto_skcipher_init_tfm, | |
4e6c3df4 HX |
911 | .free = crypto_skcipher_free_instance, |
912 | #ifdef CONFIG_PROC_FS | |
913 | .show = crypto_skcipher_show, | |
914 | #endif | |
915 | .report = crypto_skcipher_report, | |
7a7ffe65 HX |
916 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
917 | .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, | |
4e6c3df4 | 918 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
7a7ffe65 HX |
919 | .tfmsize = offsetof(struct crypto_skcipher, base), |
920 | }; | |
921 | ||
3a01d0ee | 922 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, |
4e6c3df4 HX |
923 | const char *name, u32 type, u32 mask) |
924 | { | |
925 | spawn->base.frontend = &crypto_skcipher_type2; | |
926 | return crypto_grab_spawn(&spawn->base, name, type, mask); | |
927 | } | |
3a01d0ee | 928 | EXPORT_SYMBOL_GPL(crypto_grab_skcipher); |
4e6c3df4 | 929 | |
7a7ffe65 HX |
930 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, |
931 | u32 type, u32 mask) | |
932 | { | |
933 | return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); | |
934 | } | |
935 | EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); | |
936 | ||
4e6c3df4 HX |
937 | int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) |
938 | { | |
939 | return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, | |
940 | type, mask); | |
941 | } | |
942 | EXPORT_SYMBOL_GPL(crypto_has_skcipher2); | |
943 | ||
944 | static int skcipher_prepare_alg(struct skcipher_alg *alg) | |
945 | { | |
946 | struct crypto_alg *base = &alg->base; | |
947 | ||
c821f6ab AB |
948 | if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || |
949 | alg->walksize > PAGE_SIZE / 8) | |
4e6c3df4 HX |
950 | return -EINVAL; |
951 | ||
952 | if (!alg->chunksize) | |
953 | alg->chunksize = base->cra_blocksize; | |
c821f6ab AB |
954 | if (!alg->walksize) |
955 | alg->walksize = alg->chunksize; | |
4e6c3df4 HX |
956 | |
957 | base->cra_type = &crypto_skcipher_type2; | |
958 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | |
959 | base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; | |
960 | ||
961 | return 0; | |
962 | } | |
963 | ||
964 | int crypto_register_skcipher(struct skcipher_alg *alg) | |
965 | { | |
966 | struct crypto_alg *base = &alg->base; | |
967 | int err; | |
968 | ||
969 | err = skcipher_prepare_alg(alg); | |
970 | if (err) | |
971 | return err; | |
972 | ||
973 | return crypto_register_alg(base); | |
974 | } | |
975 | EXPORT_SYMBOL_GPL(crypto_register_skcipher); | |
976 | ||
977 | void crypto_unregister_skcipher(struct skcipher_alg *alg) | |
978 | { | |
979 | crypto_unregister_alg(&alg->base); | |
980 | } | |
981 | EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); | |
982 | ||
983 | int crypto_register_skciphers(struct skcipher_alg *algs, int count) | |
984 | { | |
985 | int i, ret; | |
986 | ||
987 | for (i = 0; i < count; i++) { | |
988 | ret = crypto_register_skcipher(&algs[i]); | |
989 | if (ret) | |
990 | goto err; | |
991 | } | |
992 | ||
993 | return 0; | |
994 | ||
995 | err: | |
996 | for (--i; i >= 0; --i) | |
997 | crypto_unregister_skcipher(&algs[i]); | |
998 | ||
999 | return ret; | |
1000 | } | |
1001 | EXPORT_SYMBOL_GPL(crypto_register_skciphers); | |
1002 | ||
1003 | void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) | |
1004 | { | |
1005 | int i; | |
1006 | ||
1007 | for (i = count - 1; i >= 0; --i) | |
1008 | crypto_unregister_skcipher(&algs[i]); | |
1009 | } | |
1010 | EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); | |
1011 | ||
1012 | int skcipher_register_instance(struct crypto_template *tmpl, | |
1013 | struct skcipher_instance *inst) | |
1014 | { | |
1015 | int err; | |
1016 | ||
1017 | err = skcipher_prepare_alg(&inst->alg); | |
1018 | if (err) | |
1019 | return err; | |
1020 | ||
1021 | return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); | |
1022 | } | |
1023 | EXPORT_SYMBOL_GPL(skcipher_register_instance); | |
1024 | ||
7a7ffe65 HX |
1025 | MODULE_LICENSE("GPL"); |
1026 | MODULE_DESCRIPTION("Symmetric key cipher type"); |