]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Block chaining cipher operations. | |
3 | * | |
4 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | |
5 | * multiple page boundaries by using temporary blocks. In user context, | |
6 | * the kernel is given a chance to schedule us once per page. | |
7 | * | |
8 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of the GNU General Public License as published by the Free | |
12 | * Software Foundation; either version 2 of the License, or (at your option) | |
13 | * any later version. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <crypto/aead.h> | |
18 | #include <crypto/internal/skcipher.h> | |
19 | #include <crypto/scatterwalk.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/hardirq.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/seq_file.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/cryptouser.h> | |
28 | #include <linux/compiler.h> | |
29 | #include <net/netlink.h> | |
30 | ||
31 | #include "internal.h" | |
32 | ||
33 | enum { | |
34 | BLKCIPHER_WALK_PHYS = 1 << 0, | |
35 | BLKCIPHER_WALK_SLOW = 1 << 1, | |
36 | BLKCIPHER_WALK_COPY = 1 << 2, | |
37 | BLKCIPHER_WALK_DIFF = 1 << 3, | |
38 | }; | |
39 | ||
40 | static int blkcipher_walk_next(struct blkcipher_desc *desc, | |
41 | struct blkcipher_walk *walk); | |
42 | static int blkcipher_walk_first(struct blkcipher_desc *desc, | |
43 | struct blkcipher_walk *walk); | |
44 | ||
45 | static inline void blkcipher_map_src(struct blkcipher_walk *walk) | |
46 | { | |
47 | walk->src.virt.addr = scatterwalk_map(&walk->in); | |
48 | } | |
49 | ||
50 | static inline void blkcipher_map_dst(struct blkcipher_walk *walk) | |
51 | { | |
52 | walk->dst.virt.addr = scatterwalk_map(&walk->out); | |
53 | } | |
54 | ||
55 | static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) | |
56 | { | |
57 | scatterwalk_unmap(walk->src.virt.addr); | |
58 | } | |
59 | ||
60 | static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) | |
61 | { | |
62 | scatterwalk_unmap(walk->dst.virt.addr); | |
63 | } | |
64 | ||
65 | /* Get a spot of the specified length that does not straddle a page. | |
66 | * The caller needs to ensure that there is enough space for this operation. | |
67 | */ | |
68 | static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) | |
69 | { | |
70 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | |
71 | return max(start, end_page); | |
72 | } | |
73 | ||
74 | static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, | |
75 | unsigned int bsize) | |
76 | { | |
77 | u8 *addr; | |
78 | ||
79 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | |
80 | addr = blkcipher_get_spot(addr, bsize); | |
81 | scatterwalk_copychunks(addr, &walk->out, bsize, 1); | |
82 | return bsize; | |
83 | } | |
84 | ||
85 | static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, | |
86 | unsigned int n) | |
87 | { | |
88 | if (walk->flags & BLKCIPHER_WALK_COPY) { | |
89 | blkcipher_map_dst(walk); | |
90 | memcpy(walk->dst.virt.addr, walk->page, n); | |
91 | blkcipher_unmap_dst(walk); | |
92 | } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { | |
93 | if (walk->flags & BLKCIPHER_WALK_DIFF) | |
94 | blkcipher_unmap_dst(walk); | |
95 | blkcipher_unmap_src(walk); | |
96 | } | |
97 | ||
98 | scatterwalk_advance(&walk->in, n); | |
99 | scatterwalk_advance(&walk->out, n); | |
100 | ||
101 | return n; | |
102 | } | |
103 | ||
104 | int blkcipher_walk_done(struct blkcipher_desc *desc, | |
105 | struct blkcipher_walk *walk, int err) | |
106 | { | |
107 | unsigned int nbytes = 0; | |
108 | ||
109 | if (likely(err >= 0)) { | |
110 | unsigned int n = walk->nbytes - err; | |
111 | ||
112 | if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) | |
113 | n = blkcipher_done_fast(walk, n); | |
114 | else if (WARN_ON(err)) { | |
115 | err = -EINVAL; | |
116 | goto err; | |
117 | } else | |
118 | n = blkcipher_done_slow(walk, n); | |
119 | ||
120 | nbytes = walk->total - n; | |
121 | err = 0; | |
122 | } | |
123 | ||
124 | scatterwalk_done(&walk->in, 0, nbytes); | |
125 | scatterwalk_done(&walk->out, 1, nbytes); | |
126 | ||
127 | err: | |
128 | walk->total = nbytes; | |
129 | walk->nbytes = nbytes; | |
130 | ||
131 | if (nbytes) { | |
132 | crypto_yield(desc->flags); | |
133 | return blkcipher_walk_next(desc, walk); | |
134 | } | |
135 | ||
136 | if (walk->iv != desc->info) | |
137 | memcpy(desc->info, walk->iv, walk->ivsize); | |
138 | if (walk->buffer != walk->page) | |
139 | kfree(walk->buffer); | |
140 | if (walk->page) | |
141 | free_page((unsigned long)walk->page); | |
142 | ||
143 | return err; | |
144 | } | |
145 | EXPORT_SYMBOL_GPL(blkcipher_walk_done); | |
146 | ||
147 | static inline int blkcipher_next_slow(struct blkcipher_desc *desc, | |
148 | struct blkcipher_walk *walk, | |
149 | unsigned int bsize, | |
150 | unsigned int alignmask) | |
151 | { | |
152 | unsigned int n; | |
153 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); | |
154 | ||
155 | if (walk->buffer) | |
156 | goto ok; | |
157 | ||
158 | walk->buffer = walk->page; | |
159 | if (walk->buffer) | |
160 | goto ok; | |
161 | ||
162 | n = aligned_bsize * 3 - (alignmask + 1) + | |
163 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); | |
164 | walk->buffer = kmalloc(n, GFP_ATOMIC); | |
165 | if (!walk->buffer) | |
166 | return blkcipher_walk_done(desc, walk, -ENOMEM); | |
167 | ||
168 | ok: | |
169 | walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, | |
170 | alignmask + 1); | |
171 | walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); | |
172 | walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + | |
173 | aligned_bsize, bsize); | |
174 | ||
175 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | |
176 | ||
177 | walk->nbytes = bsize; | |
178 | walk->flags |= BLKCIPHER_WALK_SLOW; | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | static inline int blkcipher_next_copy(struct blkcipher_walk *walk) | |
184 | { | |
185 | u8 *tmp = walk->page; | |
186 | ||
187 | blkcipher_map_src(walk); | |
188 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | |
189 | blkcipher_unmap_src(walk); | |
190 | ||
191 | walk->src.virt.addr = tmp; | |
192 | walk->dst.virt.addr = tmp; | |
193 | ||
194 | return 0; | |
195 | } | |
196 | ||
197 | static inline int blkcipher_next_fast(struct blkcipher_desc *desc, | |
198 | struct blkcipher_walk *walk) | |
199 | { | |
200 | unsigned long diff; | |
201 | ||
202 | walk->src.phys.page = scatterwalk_page(&walk->in); | |
203 | walk->src.phys.offset = offset_in_page(walk->in.offset); | |
204 | walk->dst.phys.page = scatterwalk_page(&walk->out); | |
205 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | |
206 | ||
207 | if (walk->flags & BLKCIPHER_WALK_PHYS) | |
208 | return 0; | |
209 | ||
210 | diff = walk->src.phys.offset - walk->dst.phys.offset; | |
211 | diff |= walk->src.virt.page - walk->dst.virt.page; | |
212 | ||
213 | blkcipher_map_src(walk); | |
214 | walk->dst.virt.addr = walk->src.virt.addr; | |
215 | ||
216 | if (diff) { | |
217 | walk->flags |= BLKCIPHER_WALK_DIFF; | |
218 | blkcipher_map_dst(walk); | |
219 | } | |
220 | ||
221 | return 0; | |
222 | } | |
223 | ||
224 | static int blkcipher_walk_next(struct blkcipher_desc *desc, | |
225 | struct blkcipher_walk *walk) | |
226 | { | |
227 | unsigned int bsize; | |
228 | unsigned int n; | |
229 | int err; | |
230 | ||
231 | n = walk->total; | |
232 | if (unlikely(n < walk->cipher_blocksize)) { | |
233 | desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | |
234 | return blkcipher_walk_done(desc, walk, -EINVAL); | |
235 | } | |
236 | ||
237 | bsize = min(walk->walk_blocksize, n); | |
238 | ||
239 | walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | | |
240 | BLKCIPHER_WALK_DIFF); | |
241 | if (!scatterwalk_aligned(&walk->in, walk->alignmask) || | |
242 | !scatterwalk_aligned(&walk->out, walk->alignmask)) { | |
243 | walk->flags |= BLKCIPHER_WALK_COPY; | |
244 | if (!walk->page) { | |
245 | walk->page = (void *)__get_free_page(GFP_ATOMIC); | |
246 | if (!walk->page) | |
247 | n = 0; | |
248 | } | |
249 | } | |
250 | ||
251 | n = scatterwalk_clamp(&walk->in, n); | |
252 | n = scatterwalk_clamp(&walk->out, n); | |
253 | ||
254 | if (unlikely(n < bsize)) { | |
255 | err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask); | |
256 | goto set_phys_lowmem; | |
257 | } | |
258 | ||
259 | walk->nbytes = n; | |
260 | if (walk->flags & BLKCIPHER_WALK_COPY) { | |
261 | err = blkcipher_next_copy(walk); | |
262 | goto set_phys_lowmem; | |
263 | } | |
264 | ||
265 | return blkcipher_next_fast(desc, walk); | |
266 | ||
267 | set_phys_lowmem: | |
268 | if (walk->flags & BLKCIPHER_WALK_PHYS) { | |
269 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | |
270 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | |
271 | walk->src.phys.offset &= PAGE_SIZE - 1; | |
272 | walk->dst.phys.offset &= PAGE_SIZE - 1; | |
273 | } | |
274 | return err; | |
275 | } | |
276 | ||
277 | static inline int blkcipher_copy_iv(struct blkcipher_walk *walk) | |
278 | { | |
279 | unsigned bs = walk->walk_blocksize; | |
280 | unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1); | |
281 | unsigned int size = aligned_bs * 2 + | |
282 | walk->ivsize + max(aligned_bs, walk->ivsize) - | |
283 | (walk->alignmask + 1); | |
284 | u8 *iv; | |
285 | ||
286 | size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1); | |
287 | walk->buffer = kmalloc(size, GFP_ATOMIC); | |
288 | if (!walk->buffer) | |
289 | return -ENOMEM; | |
290 | ||
291 | iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | |
292 | iv = blkcipher_get_spot(iv, bs) + aligned_bs; | |
293 | iv = blkcipher_get_spot(iv, bs) + aligned_bs; | |
294 | iv = blkcipher_get_spot(iv, walk->ivsize); | |
295 | ||
296 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); | |
297 | return 0; | |
298 | } | |
299 | ||
300 | int blkcipher_walk_virt(struct blkcipher_desc *desc, | |
301 | struct blkcipher_walk *walk) | |
302 | { | |
303 | walk->flags &= ~BLKCIPHER_WALK_PHYS; | |
304 | walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); | |
305 | walk->cipher_blocksize = walk->walk_blocksize; | |
306 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); | |
307 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); | |
308 | return blkcipher_walk_first(desc, walk); | |
309 | } | |
310 | EXPORT_SYMBOL_GPL(blkcipher_walk_virt); | |
311 | ||
312 | int blkcipher_walk_phys(struct blkcipher_desc *desc, | |
313 | struct blkcipher_walk *walk) | |
314 | { | |
315 | walk->flags |= BLKCIPHER_WALK_PHYS; | |
316 | walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm); | |
317 | walk->cipher_blocksize = walk->walk_blocksize; | |
318 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); | |
319 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); | |
320 | return blkcipher_walk_first(desc, walk); | |
321 | } | |
322 | EXPORT_SYMBOL_GPL(blkcipher_walk_phys); | |
323 | ||
324 | static int blkcipher_walk_first(struct blkcipher_desc *desc, | |
325 | struct blkcipher_walk *walk) | |
326 | { | |
327 | if (WARN_ON_ONCE(in_irq())) | |
328 | return -EDEADLK; | |
329 | ||
330 | walk->iv = desc->info; | |
331 | walk->nbytes = walk->total; | |
332 | if (unlikely(!walk->total)) | |
333 | return 0; | |
334 | ||
335 | walk->buffer = NULL; | |
336 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | |
337 | int err = blkcipher_copy_iv(walk); | |
338 | if (err) | |
339 | return err; | |
340 | } | |
341 | ||
342 | scatterwalk_start(&walk->in, walk->in.sg); | |
343 | scatterwalk_start(&walk->out, walk->out.sg); | |
344 | walk->page = NULL; | |
345 | ||
346 | return blkcipher_walk_next(desc, walk); | |
347 | } | |
348 | ||
349 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, | |
350 | struct blkcipher_walk *walk, | |
351 | unsigned int blocksize) | |
352 | { | |
353 | walk->flags &= ~BLKCIPHER_WALK_PHYS; | |
354 | walk->walk_blocksize = blocksize; | |
355 | walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm); | |
356 | walk->ivsize = crypto_blkcipher_ivsize(desc->tfm); | |
357 | walk->alignmask = crypto_blkcipher_alignmask(desc->tfm); | |
358 | return blkcipher_walk_first(desc, walk); | |
359 | } | |
360 | EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); | |
361 | ||
362 | int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, | |
363 | struct blkcipher_walk *walk, | |
364 | struct crypto_aead *tfm, | |
365 | unsigned int blocksize) | |
366 | { | |
367 | walk->flags &= ~BLKCIPHER_WALK_PHYS; | |
368 | walk->walk_blocksize = blocksize; | |
369 | walk->cipher_blocksize = crypto_aead_blocksize(tfm); | |
370 | walk->ivsize = crypto_aead_ivsize(tfm); | |
371 | walk->alignmask = crypto_aead_alignmask(tfm); | |
372 | return blkcipher_walk_first(desc, walk); | |
373 | } | |
374 | EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block); | |
375 | ||
376 | static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, | |
377 | unsigned int keylen) | |
378 | { | |
379 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; | |
380 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
381 | int ret; | |
382 | u8 *buffer, *alignbuffer; | |
383 | unsigned long absize; | |
384 | ||
385 | absize = keylen + alignmask; | |
386 | buffer = kmalloc(absize, GFP_ATOMIC); | |
387 | if (!buffer) | |
388 | return -ENOMEM; | |
389 | ||
390 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
391 | memcpy(alignbuffer, key, keylen); | |
392 | ret = cipher->setkey(tfm, alignbuffer, keylen); | |
393 | memset(alignbuffer, 0, keylen); | |
394 | kfree(buffer); | |
395 | return ret; | |
396 | } | |
397 | ||
398 | static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) | |
399 | { | |
400 | struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; | |
401 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
402 | ||
403 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { | |
404 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
405 | return -EINVAL; | |
406 | } | |
407 | ||
408 | if ((unsigned long)key & alignmask) | |
409 | return setkey_unaligned(tfm, key, keylen); | |
410 | ||
411 | return cipher->setkey(tfm, key, keylen); | |
412 | } | |
413 | ||
414 | static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |
415 | unsigned int keylen) | |
416 | { | |
417 | return setkey(crypto_ablkcipher_tfm(tfm), key, keylen); | |
418 | } | |
419 | ||
420 | static int async_encrypt(struct ablkcipher_request *req) | |
421 | { | |
422 | struct crypto_tfm *tfm = req->base.tfm; | |
423 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
424 | struct blkcipher_desc desc = { | |
425 | .tfm = __crypto_blkcipher_cast(tfm), | |
426 | .info = req->info, | |
427 | .flags = req->base.flags, | |
428 | }; | |
429 | ||
430 | ||
431 | return alg->encrypt(&desc, req->dst, req->src, req->nbytes); | |
432 | } | |
433 | ||
434 | static int async_decrypt(struct ablkcipher_request *req) | |
435 | { | |
436 | struct crypto_tfm *tfm = req->base.tfm; | |
437 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
438 | struct blkcipher_desc desc = { | |
439 | .tfm = __crypto_blkcipher_cast(tfm), | |
440 | .info = req->info, | |
441 | .flags = req->base.flags, | |
442 | }; | |
443 | ||
444 | return alg->decrypt(&desc, req->dst, req->src, req->nbytes); | |
445 | } | |
446 | ||
447 | static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, | |
448 | u32 mask) | |
449 | { | |
450 | struct blkcipher_alg *cipher = &alg->cra_blkcipher; | |
451 | unsigned int len = alg->cra_ctxsize; | |
452 | ||
453 | if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK && | |
454 | cipher->ivsize) { | |
455 | len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); | |
456 | len += cipher->ivsize; | |
457 | } | |
458 | ||
459 | return len; | |
460 | } | |
461 | ||
462 | static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) | |
463 | { | |
464 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; | |
465 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
466 | ||
467 | crt->setkey = async_setkey; | |
468 | crt->encrypt = async_encrypt; | |
469 | crt->decrypt = async_decrypt; | |
470 | crt->base = __crypto_ablkcipher_cast(tfm); | |
471 | crt->ivsize = alg->ivsize; | |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
476 | static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) | |
477 | { | |
478 | struct blkcipher_tfm *crt = &tfm->crt_blkcipher; | |
479 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
480 | unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; | |
481 | unsigned long addr; | |
482 | ||
483 | crt->setkey = setkey; | |
484 | crt->encrypt = alg->encrypt; | |
485 | crt->decrypt = alg->decrypt; | |
486 | ||
487 | addr = (unsigned long)crypto_tfm_ctx(tfm); | |
488 | addr = ALIGN(addr, align); | |
489 | addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); | |
490 | crt->iv = (void *)addr; | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
495 | static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) | |
496 | { | |
497 | struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; | |
498 | ||
499 | if (alg->ivsize > PAGE_SIZE / 8) | |
500 | return -EINVAL; | |
501 | ||
502 | if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK) | |
503 | return crypto_init_blkcipher_ops_sync(tfm); | |
504 | else | |
505 | return crypto_init_blkcipher_ops_async(tfm); | |
506 | } | |
507 | ||
508 | #ifdef CONFIG_NET | |
509 | static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
510 | { | |
511 | struct crypto_report_blkcipher rblkcipher; | |
512 | ||
513 | strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); | |
514 | strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>", | |
515 | sizeof(rblkcipher.geniv)); | |
516 | ||
517 | rblkcipher.blocksize = alg->cra_blocksize; | |
518 | rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
519 | rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
520 | rblkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
521 | ||
522 | if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, | |
523 | sizeof(struct crypto_report_blkcipher), &rblkcipher)) | |
524 | goto nla_put_failure; | |
525 | return 0; | |
526 | ||
527 | nla_put_failure: | |
528 | return -EMSGSIZE; | |
529 | } | |
530 | #else | |
531 | static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
532 | { | |
533 | return -ENOSYS; | |
534 | } | |
535 | #endif | |
536 | ||
537 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) | |
538 | __maybe_unused; | |
539 | static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) | |
540 | { | |
541 | seq_printf(m, "type : blkcipher\n"); | |
542 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
543 | seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); | |
544 | seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); | |
545 | seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); | |
546 | seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?: | |
547 | "<default>"); | |
548 | } | |
549 | ||
550 | const struct crypto_type crypto_blkcipher_type = { | |
551 | .ctxsize = crypto_blkcipher_ctxsize, | |
552 | .init = crypto_init_blkcipher_ops, | |
553 | #ifdef CONFIG_PROC_FS | |
554 | .show = crypto_blkcipher_show, | |
555 | #endif | |
556 | .report = crypto_blkcipher_report, | |
557 | }; | |
558 | EXPORT_SYMBOL_GPL(crypto_blkcipher_type); | |
559 | ||
560 | MODULE_LICENSE("GPL"); | |
561 | MODULE_DESCRIPTION("Generic block chaining cipher type"); |