]>
Commit | Line | Data |
---|---|---|
64470f1b RS |
1 | /* LRW: as defined by Cyril Guyot in |
2 | * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf | |
3 | * | |
4 | * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> | |
5 | * | |
6c2205b8 | 6 | * Based on ecb.c |
64470f1b RS |
7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the Free | |
11 | * Software Foundation; either version 2 of the License, or (at your option) | |
12 | * any later version. | |
13 | */ | |
14 | /* This implementation is checked against the test vectors in the above | |
15 | * document and by a test vector provided by Ken Buchanan at | |
16 | * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html | |
17 | * | |
18 | * The test vectors are included in the testing module tcrypt.[ch] */ | |
6c2205b8 | 19 | |
700cb3f5 HX |
20 | #include <crypto/internal/skcipher.h> |
21 | #include <crypto/scatterwalk.h> | |
64470f1b RS |
22 | #include <linux/err.h> |
23 | #include <linux/init.h> | |
24 | #include <linux/kernel.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/scatterlist.h> | |
27 | #include <linux/slab.h> | |
28 | ||
29 | #include <crypto/b128ops.h> | |
30 | #include <crypto/gf128mul.h> | |
6c2205b8 | 31 | #include <crypto/lrw.h> |
64470f1b | 32 | |
700cb3f5 HX |
33 | #define LRW_BUFFER_SIZE 128u |
34 | ||
171c0204 | 35 | struct priv { |
700cb3f5 | 36 | struct crypto_skcipher *child; |
171c0204 JK |
37 | struct lrw_table_ctx table; |
38 | }; | |
39 | ||
700cb3f5 HX |
40 | struct rctx { |
41 | be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; | |
42 | ||
43 | be128 t; | |
44 | ||
45 | be128 *ext; | |
46 | ||
47 | struct scatterlist srcbuf[2]; | |
48 | struct scatterlist dstbuf[2]; | |
49 | struct scatterlist *src; | |
50 | struct scatterlist *dst; | |
51 | ||
52 | unsigned int left; | |
53 | ||
54 | struct skcipher_request subreq; | |
55 | }; | |
56 | ||
64470f1b RS |
57 | static inline void setbit128_bbe(void *b, int bit) |
58 | { | |
8eb2dfac HX |
59 | __set_bit(bit ^ (0x80 - |
60 | #ifdef __BIG_ENDIAN | |
61 | BITS_PER_LONG | |
62 | #else | |
63 | BITS_PER_BYTE | |
64 | #endif | |
65 | ), b); | |
64470f1b RS |
66 | } |
67 | ||
6c2205b8 | 68 | int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) |
64470f1b | 69 | { |
64470f1b | 70 | be128 tmp = { 0 }; |
171c0204 | 71 | int i; |
64470f1b RS |
72 | |
73 | if (ctx->table) | |
74 | gf128mul_free_64k(ctx->table); | |
75 | ||
76 | /* initialize multiplication table for Key2 */ | |
171c0204 | 77 | ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); |
64470f1b RS |
78 | if (!ctx->table) |
79 | return -ENOMEM; | |
80 | ||
81 | /* initialize optimization table */ | |
82 | for (i = 0; i < 128; i++) { | |
83 | setbit128_bbe(&tmp, i); | |
84 | ctx->mulinc[i] = tmp; | |
85 | gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); | |
86 | } | |
87 | ||
88 | return 0; | |
89 | } | |
6c2205b8 | 90 | EXPORT_SYMBOL_GPL(lrw_init_table); |
64470f1b | 91 | |
6c2205b8 | 92 | void lrw_free_table(struct lrw_table_ctx *ctx) |
171c0204 JK |
93 | { |
94 | if (ctx->table) | |
95 | gf128mul_free_64k(ctx->table); | |
96 | } | |
6c2205b8 | 97 | EXPORT_SYMBOL_GPL(lrw_free_table); |
171c0204 | 98 | |
700cb3f5 | 99 | static int setkey(struct crypto_skcipher *parent, const u8 *key, |
171c0204 JK |
100 | unsigned int keylen) |
101 | { | |
700cb3f5 HX |
102 | struct priv *ctx = crypto_skcipher_ctx(parent); |
103 | struct crypto_skcipher *child = ctx->child; | |
171c0204 JK |
104 | int err, bsize = LRW_BLOCK_SIZE; |
105 | const u8 *tweak = key + keylen - bsize; | |
106 | ||
700cb3f5 HX |
107 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
108 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | |
109 | CRYPTO_TFM_REQ_MASK); | |
110 | err = crypto_skcipher_setkey(child, key, keylen - bsize); | |
111 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | |
112 | CRYPTO_TFM_RES_MASK); | |
171c0204 JK |
113 | if (err) |
114 | return err; | |
171c0204 JK |
115 | |
116 | return lrw_init_table(&ctx->table, tweak); | |
117 | } | |
118 | ||
64470f1b RS |
119 | static inline void inc(be128 *iv) |
120 | { | |
fd4609a8 MS |
121 | be64_add_cpu(&iv->b, 1); |
122 | if (!iv->b) | |
123 | be64_add_cpu(&iv->a, 1); | |
64470f1b RS |
124 | } |
125 | ||
64470f1b RS |
126 | /* this returns the number of consequative 1 bits starting |
127 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ | |
128 | static inline int get_index128(be128 *block) | |
129 | { | |
130 | int x; | |
131 | __be32 *p = (__be32 *) block; | |
132 | ||
133 | for (p += 3, x = 0; x < 128; p--, x += 32) { | |
134 | u32 val = be32_to_cpup(p); | |
135 | ||
136 | if (!~val) | |
137 | continue; | |
138 | ||
139 | return x + ffz(val); | |
140 | } | |
141 | ||
142 | return x; | |
143 | } | |
144 | ||
700cb3f5 | 145 | static int post_crypt(struct skcipher_request *req) |
64470f1b | 146 | { |
700cb3f5 HX |
147 | struct rctx *rctx = skcipher_request_ctx(req); |
148 | be128 *buf = rctx->ext ?: rctx->buf; | |
149 | struct skcipher_request *subreq; | |
150 | const int bs = LRW_BLOCK_SIZE; | |
151 | struct skcipher_walk w; | |
152 | struct scatterlist *sg; | |
153 | unsigned offset; | |
64470f1b | 154 | int err; |
700cb3f5 HX |
155 | |
156 | subreq = &rctx->subreq; | |
157 | err = skcipher_walk_virt(&w, subreq, false); | |
158 | ||
159 | while (w.nbytes) { | |
160 | unsigned int avail = w.nbytes; | |
161 | be128 *wdst; | |
162 | ||
163 | wdst = w.dst.virt.addr; | |
164 | ||
165 | do { | |
166 | be128_xor(wdst, buf++, wdst); | |
167 | wdst++; | |
168 | } while ((avail -= bs) >= bs); | |
169 | ||
170 | err = skcipher_walk_done(&w, avail); | |
171 | } | |
172 | ||
173 | rctx->left -= subreq->cryptlen; | |
174 | ||
175 | if (err || !rctx->left) | |
176 | goto out; | |
177 | ||
178 | rctx->dst = rctx->dstbuf; | |
179 | ||
180 | scatterwalk_done(&w.out, 0, 1); | |
181 | sg = w.out.sg; | |
182 | offset = w.out.offset; | |
183 | ||
184 | if (rctx->dst != sg) { | |
185 | rctx->dst[0] = *sg; | |
186 | sg_unmark_end(rctx->dst); | |
187 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); | |
188 | } | |
189 | rctx->dst[0].length -= offset - sg->offset; | |
190 | rctx->dst[0].offset = offset; | |
191 | ||
192 | out: | |
193 | return err; | |
194 | } | |
195 | ||
196 | static int pre_crypt(struct skcipher_request *req) | |
197 | { | |
198 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
199 | struct rctx *rctx = skcipher_request_ctx(req); | |
200 | struct priv *ctx = crypto_skcipher_ctx(tfm); | |
201 | be128 *buf = rctx->ext ?: rctx->buf; | |
202 | struct skcipher_request *subreq; | |
4660720d | 203 | const int bs = LRW_BLOCK_SIZE; |
700cb3f5 HX |
204 | struct skcipher_walk w; |
205 | struct scatterlist *sg; | |
206 | unsigned cryptlen; | |
207 | unsigned offset; | |
64470f1b | 208 | be128 *iv; |
700cb3f5 HX |
209 | bool more; |
210 | int err; | |
64470f1b | 211 | |
700cb3f5 HX |
212 | subreq = &rctx->subreq; |
213 | skcipher_request_set_tfm(subreq, tfm); | |
64470f1b | 214 | |
700cb3f5 HX |
215 | cryptlen = subreq->cryptlen; |
216 | more = rctx->left > cryptlen; | |
217 | if (!more) | |
218 | cryptlen = rctx->left; | |
64470f1b | 219 | |
700cb3f5 HX |
220 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
221 | cryptlen, req->iv); | |
64470f1b | 222 | |
700cb3f5 HX |
223 | err = skcipher_walk_virt(&w, subreq, false); |
224 | iv = w.iv; | |
64470f1b | 225 | |
700cb3f5 HX |
226 | while (w.nbytes) { |
227 | unsigned int avail = w.nbytes; | |
228 | be128 *wsrc; | |
229 | be128 *wdst; | |
230 | ||
231 | wsrc = w.src.virt.addr; | |
232 | wdst = w.dst.virt.addr; | |
64470f1b | 233 | |
64470f1b | 234 | do { |
700cb3f5 HX |
235 | *buf++ = rctx->t; |
236 | be128_xor(wdst++, &rctx->t, wsrc++); | |
237 | ||
64470f1b RS |
238 | /* T <- I*Key2, using the optimization |
239 | * discussed in the specification */ | |
700cb3f5 | 240 | be128_xor(&rctx->t, &rctx->t, |
171c0204 | 241 | &ctx->table.mulinc[get_index128(iv)]); |
64470f1b | 242 | inc(iv); |
700cb3f5 | 243 | } while ((avail -= bs) >= bs); |
64470f1b | 244 | |
700cb3f5 HX |
245 | err = skcipher_walk_done(&w, avail); |
246 | } | |
64470f1b | 247 | |
700cb3f5 HX |
248 | skcipher_request_set_tfm(subreq, ctx->child); |
249 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, | |
250 | cryptlen, NULL); | |
64470f1b | 251 | |
700cb3f5 HX |
252 | if (err || !more) |
253 | goto out; | |
254 | ||
255 | rctx->src = rctx->srcbuf; | |
256 | ||
257 | scatterwalk_done(&w.in, 0, 1); | |
258 | sg = w.in.sg; | |
259 | offset = w.in.offset; | |
260 | ||
261 | if (rctx->src != sg) { | |
262 | rctx->src[0] = *sg; | |
263 | sg_unmark_end(rctx->src); | |
264 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); | |
265 | } | |
266 | rctx->src[0].length -= offset - sg->offset; | |
267 | rctx->src[0].offset = offset; | |
268 | ||
269 | out: | |
270 | return err; | |
271 | } | |
272 | ||
273 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) | |
274 | { | |
275 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | |
276 | struct rctx *rctx = skcipher_request_ctx(req); | |
277 | struct skcipher_request *subreq; | |
278 | gfp_t gfp; | |
279 | ||
280 | subreq = &rctx->subreq; | |
281 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | |
282 | ||
283 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
284 | GFP_ATOMIC; | |
285 | rctx->ext = NULL; | |
286 | ||
287 | subreq->cryptlen = LRW_BUFFER_SIZE; | |
288 | if (req->cryptlen > LRW_BUFFER_SIZE) { | |
9df0eb18 EB |
289 | unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); |
290 | ||
291 | rctx->ext = kmalloc(n, gfp); | |
292 | if (rctx->ext) | |
293 | subreq->cryptlen = n; | |
700cb3f5 HX |
294 | } |
295 | ||
296 | rctx->src = req->src; | |
297 | rctx->dst = req->dst; | |
298 | rctx->left = req->cryptlen; | |
299 | ||
300 | /* calculate first value of T */ | |
301 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); | |
302 | ||
303 | /* T <- I*Key2 */ | |
304 | gf128mul_64k_bbe(&rctx->t, ctx->table.table); | |
64470f1b | 305 | |
700cb3f5 HX |
306 | return 0; |
307 | } | |
308 | ||
309 | static void exit_crypt(struct skcipher_request *req) | |
310 | { | |
311 | struct rctx *rctx = skcipher_request_ctx(req); | |
312 | ||
313 | rctx->left = 0; | |
314 | ||
315 | if (rctx->ext) | |
316 | kfree(rctx->ext); | |
317 | } | |
318 | ||
319 | static int do_encrypt(struct skcipher_request *req, int err) | |
320 | { | |
321 | struct rctx *rctx = skcipher_request_ctx(req); | |
322 | struct skcipher_request *subreq; | |
323 | ||
324 | subreq = &rctx->subreq; | |
325 | ||
326 | while (!err && rctx->left) { | |
327 | err = pre_crypt(req) ?: | |
328 | crypto_skcipher_encrypt(subreq) ?: | |
329 | post_crypt(req); | |
330 | ||
331 | if (err == -EINPROGRESS || | |
332 | (err == -EBUSY && | |
333 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
334 | return err; | |
64470f1b RS |
335 | } |
336 | ||
700cb3f5 | 337 | exit_crypt(req); |
64470f1b RS |
338 | return err; |
339 | } | |
340 | ||
700cb3f5 HX |
341 | static void encrypt_done(struct crypto_async_request *areq, int err) |
342 | { | |
343 | struct skcipher_request *req = areq->data; | |
344 | struct skcipher_request *subreq; | |
345 | struct rctx *rctx; | |
346 | ||
347 | rctx = skcipher_request_ctx(req); | |
348 | subreq = &rctx->subreq; | |
349 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | |
350 | ||
351 | err = do_encrypt(req, err ?: post_crypt(req)); | |
352 | if (rctx->left) | |
353 | return; | |
354 | ||
355 | skcipher_request_complete(req, err); | |
356 | } | |
357 | ||
358 | static int encrypt(struct skcipher_request *req) | |
359 | { | |
360 | return do_encrypt(req, init_crypt(req, encrypt_done)); | |
361 | } | |
362 | ||
363 | static int do_decrypt(struct skcipher_request *req, int err) | |
64470f1b | 364 | { |
700cb3f5 HX |
365 | struct rctx *rctx = skcipher_request_ctx(req); |
366 | struct skcipher_request *subreq; | |
367 | ||
368 | subreq = &rctx->subreq; | |
369 | ||
370 | while (!err && rctx->left) { | |
371 | err = pre_crypt(req) ?: | |
372 | crypto_skcipher_decrypt(subreq) ?: | |
373 | post_crypt(req); | |
374 | ||
375 | if (err == -EINPROGRESS || | |
376 | (err == -EBUSY && | |
377 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
378 | return err; | |
379 | } | |
64470f1b | 380 | |
700cb3f5 HX |
381 | exit_crypt(req); |
382 | return err; | |
64470f1b RS |
383 | } |
384 | ||
700cb3f5 | 385 | static void decrypt_done(struct crypto_async_request *areq, int err) |
64470f1b | 386 | { |
700cb3f5 HX |
387 | struct skcipher_request *req = areq->data; |
388 | struct skcipher_request *subreq; | |
389 | struct rctx *rctx; | |
390 | ||
391 | rctx = skcipher_request_ctx(req); | |
392 | subreq = &rctx->subreq; | |
393 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | |
394 | ||
395 | err = do_decrypt(req, err ?: post_crypt(req)); | |
396 | if (rctx->left) | |
397 | return; | |
64470f1b | 398 | |
700cb3f5 HX |
399 | skcipher_request_complete(req, err); |
400 | } | |
401 | ||
402 | static int decrypt(struct skcipher_request *req) | |
403 | { | |
404 | return do_decrypt(req, init_crypt(req, decrypt_done)); | |
64470f1b RS |
405 | } |
406 | ||
6c2205b8 JK |
407 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
408 | struct scatterlist *ssrc, unsigned int nbytes, | |
409 | struct lrw_crypt_req *req) | |
410 | { | |
411 | const unsigned int bsize = LRW_BLOCK_SIZE; | |
412 | const unsigned int max_blks = req->tbuflen / bsize; | |
413 | struct lrw_table_ctx *ctx = req->table_ctx; | |
414 | struct blkcipher_walk walk; | |
415 | unsigned int nblocks; | |
416 | be128 *iv, *src, *dst, *t; | |
417 | be128 *t_buf = req->tbuf; | |
418 | int err, i; | |
419 | ||
420 | BUG_ON(max_blks < 1); | |
421 | ||
422 | blkcipher_walk_init(&walk, sdst, ssrc, nbytes); | |
423 | ||
424 | err = blkcipher_walk_virt(desc, &walk); | |
425 | nbytes = walk.nbytes; | |
426 | if (!nbytes) | |
427 | return err; | |
428 | ||
429 | nblocks = min(walk.nbytes / bsize, max_blks); | |
430 | src = (be128 *)walk.src.virt.addr; | |
431 | dst = (be128 *)walk.dst.virt.addr; | |
432 | ||
433 | /* calculate first value of T */ | |
434 | iv = (be128 *)walk.iv; | |
435 | t_buf[0] = *iv; | |
436 | ||
437 | /* T <- I*Key2 */ | |
438 | gf128mul_64k_bbe(&t_buf[0], ctx->table); | |
439 | ||
440 | i = 0; | |
441 | goto first; | |
442 | ||
443 | for (;;) { | |
444 | do { | |
445 | for (i = 0; i < nblocks; i++) { | |
446 | /* T <- I*Key2, using the optimization | |
447 | * discussed in the specification */ | |
448 | be128_xor(&t_buf[i], t, | |
449 | &ctx->mulinc[get_index128(iv)]); | |
450 | inc(iv); | |
451 | first: | |
452 | t = &t_buf[i]; | |
453 | ||
454 | /* PP <- T xor P */ | |
455 | be128_xor(dst + i, t, src + i); | |
456 | } | |
457 | ||
458 | /* CC <- E(Key2,PP) */ | |
459 | req->crypt_fn(req->crypt_ctx, (u8 *)dst, | |
460 | nblocks * bsize); | |
461 | ||
462 | /* C <- T xor CC */ | |
463 | for (i = 0; i < nblocks; i++) | |
464 | be128_xor(dst + i, dst + i, &t_buf[i]); | |
465 | ||
466 | src += nblocks; | |
467 | dst += nblocks; | |
468 | nbytes -= nblocks * bsize; | |
469 | nblocks = min(nbytes / bsize, max_blks); | |
470 | } while (nblocks > 0); | |
471 | ||
472 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
473 | nbytes = walk.nbytes; | |
474 | if (!nbytes) | |
475 | break; | |
476 | ||
477 | nblocks = min(nbytes / bsize, max_blks); | |
478 | src = (be128 *)walk.src.virt.addr; | |
479 | dst = (be128 *)walk.dst.virt.addr; | |
480 | } | |
481 | ||
482 | return err; | |
483 | } | |
484 | EXPORT_SYMBOL_GPL(lrw_crypt); | |
485 | ||
700cb3f5 | 486 | static int init_tfm(struct crypto_skcipher *tfm) |
64470f1b | 487 | { |
700cb3f5 HX |
488 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
489 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); | |
490 | struct priv *ctx = crypto_skcipher_ctx(tfm); | |
491 | struct crypto_skcipher *cipher; | |
64470f1b | 492 | |
700cb3f5 | 493 | cipher = crypto_spawn_skcipher(spawn); |
2e306ee0 HX |
494 | if (IS_ERR(cipher)) |
495 | return PTR_ERR(cipher); | |
64470f1b | 496 | |
2e306ee0 | 497 | ctx->child = cipher; |
700cb3f5 HX |
498 | |
499 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + | |
500 | sizeof(struct rctx)); | |
501 | ||
64470f1b RS |
502 | return 0; |
503 | } | |
504 | ||
700cb3f5 | 505 | static void exit_tfm(struct crypto_skcipher *tfm) |
64470f1b | 506 | { |
700cb3f5 | 507 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
171c0204 JK |
508 | |
509 | lrw_free_table(&ctx->table); | |
700cb3f5 HX |
510 | crypto_free_skcipher(ctx->child); |
511 | } | |
512 | ||
513 | static void free(struct skcipher_instance *inst) | |
514 | { | |
515 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | |
516 | kfree(inst); | |
64470f1b RS |
517 | } |
518 | ||
700cb3f5 | 519 | static int create(struct crypto_template *tmpl, struct rtattr **tb) |
64470f1b | 520 | { |
700cb3f5 HX |
521 | struct crypto_skcipher_spawn *spawn; |
522 | struct skcipher_instance *inst; | |
523 | struct crypto_attr_type *algt; | |
524 | struct skcipher_alg *alg; | |
525 | const char *cipher_name; | |
526 | char ecb_name[CRYPTO_MAX_ALG_NAME]; | |
ebc610e5 HX |
527 | int err; |
528 | ||
700cb3f5 HX |
529 | algt = crypto_get_attr_type(tb); |
530 | if (IS_ERR(algt)) | |
531 | return PTR_ERR(algt); | |
532 | ||
533 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | |
534 | return -EINVAL; | |
535 | ||
536 | cipher_name = crypto_attr_alg_name(tb[1]); | |
537 | if (IS_ERR(cipher_name)) | |
538 | return PTR_ERR(cipher_name); | |
539 | ||
540 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | |
541 | if (!inst) | |
542 | return -ENOMEM; | |
543 | ||
544 | spawn = skcipher_instance_ctx(inst); | |
545 | ||
546 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | |
547 | err = crypto_grab_skcipher(spawn, cipher_name, 0, | |
548 | crypto_requires_sync(algt->type, | |
549 | algt->mask)); | |
550 | if (err == -ENOENT) { | |
551 | err = -ENAMETOOLONG; | |
552 | if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | |
553 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | |
554 | goto err_free_inst; | |
555 | ||
556 | err = crypto_grab_skcipher(spawn, ecb_name, 0, | |
557 | crypto_requires_sync(algt->type, | |
558 | algt->mask)); | |
559 | } | |
560 | ||
ebc610e5 | 561 | if (err) |
700cb3f5 | 562 | goto err_free_inst; |
64470f1b | 563 | |
700cb3f5 | 564 | alg = crypto_skcipher_spawn_alg(spawn); |
64470f1b | 565 | |
700cb3f5 HX |
566 | err = -EINVAL; |
567 | if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) | |
568 | goto err_drop_spawn; | |
64470f1b | 569 | |
700cb3f5 HX |
570 | if (crypto_skcipher_alg_ivsize(alg)) |
571 | goto err_drop_spawn; | |
64470f1b | 572 | |
700cb3f5 HX |
573 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", |
574 | &alg->base); | |
575 | if (err) | |
576 | goto err_drop_spawn; | |
64470f1b | 577 | |
700cb3f5 HX |
578 | err = -EINVAL; |
579 | cipher_name = alg->base.cra_name; | |
64470f1b | 580 | |
700cb3f5 HX |
581 | /* Alas we screwed up the naming so we have to mangle the |
582 | * cipher name. | |
583 | */ | |
584 | if (!strncmp(cipher_name, "ecb(", 4)) { | |
585 | unsigned len; | |
64470f1b | 586 | |
700cb3f5 HX |
587 | len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); |
588 | if (len < 2 || len >= sizeof(ecb_name)) | |
589 | goto err_drop_spawn; | |
64470f1b | 590 | |
700cb3f5 HX |
591 | if (ecb_name[len - 1] != ')') |
592 | goto err_drop_spawn; | |
64470f1b | 593 | |
700cb3f5 | 594 | ecb_name[len - 1] = 0; |
64470f1b | 595 | |
700cb3f5 HX |
596 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
597 | "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) | |
598 | return -ENAMETOOLONG; | |
599 | } | |
600 | ||
601 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; | |
602 | inst->alg.base.cra_priority = alg->base.cra_priority; | |
603 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; | |
604 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | |
605 | (__alignof__(u64) - 1); | |
606 | ||
607 | inst->alg.ivsize = LRW_BLOCK_SIZE; | |
608 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + | |
609 | LRW_BLOCK_SIZE; | |
610 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + | |
611 | LRW_BLOCK_SIZE; | |
612 | ||
613 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | |
614 | ||
615 | inst->alg.init = init_tfm; | |
616 | inst->alg.exit = exit_tfm; | |
617 | ||
618 | inst->alg.setkey = setkey; | |
619 | inst->alg.encrypt = encrypt; | |
620 | inst->alg.decrypt = decrypt; | |
621 | ||
622 | inst->free = free; | |
623 | ||
624 | err = skcipher_register_instance(tmpl, inst); | |
625 | if (err) | |
626 | goto err_drop_spawn; | |
627 | ||
628 | out: | |
629 | return err; | |
630 | ||
631 | err_drop_spawn: | |
632 | crypto_drop_skcipher(spawn); | |
633 | err_free_inst: | |
64470f1b | 634 | kfree(inst); |
700cb3f5 | 635 | goto out; |
64470f1b RS |
636 | } |
637 | ||
638 | static struct crypto_template crypto_tmpl = { | |
639 | .name = "lrw", | |
700cb3f5 | 640 | .create = create, |
64470f1b RS |
641 | .module = THIS_MODULE, |
642 | }; | |
643 | ||
644 | static int __init crypto_module_init(void) | |
645 | { | |
646 | return crypto_register_template(&crypto_tmpl); | |
647 | } | |
648 | ||
649 | static void __exit crypto_module_exit(void) | |
650 | { | |
651 | crypto_unregister_template(&crypto_tmpl); | |
652 | } | |
653 | ||
654 | module_init(crypto_module_init); | |
655 | module_exit(crypto_module_exit); | |
656 | ||
657 | MODULE_LICENSE("GPL"); | |
658 | MODULE_DESCRIPTION("LRW block cipher mode"); | |
4943ba16 | 659 | MODULE_ALIAS_CRYPTO("lrw"); |