]>
Commit | Line | Data |
---|---|---|
64470f1b RS |
1 | /* LRW: as defined by Cyril Guyot in |
2 | * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf | |
3 | * | |
4 | * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> | |
5 | * | |
6c2205b8 | 6 | * Based on ecb.c |
64470f1b RS |
7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the Free | |
11 | * Software Foundation; either version 2 of the License, or (at your option) | |
12 | * any later version. | |
13 | */ | |
14 | /* This implementation is checked against the test vectors in the above | |
15 | * document and by a test vector provided by Ken Buchanan at | |
16 | * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html | |
17 | * | |
18 | * The test vectors are included in the testing module tcrypt.[ch] */ | |
6c2205b8 | 19 | |
700cb3f5 HX |
20 | #include <crypto/internal/skcipher.h> |
21 | #include <crypto/scatterwalk.h> | |
64470f1b RS |
22 | #include <linux/err.h> |
23 | #include <linux/init.h> | |
24 | #include <linux/kernel.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/scatterlist.h> | |
27 | #include <linux/slab.h> | |
28 | ||
29 | #include <crypto/b128ops.h> | |
30 | #include <crypto/gf128mul.h> | |
6c2205b8 | 31 | #include <crypto/lrw.h> |
64470f1b | 32 | |
700cb3f5 HX |
33 | #define LRW_BUFFER_SIZE 128u |
34 | ||
171c0204 | 35 | struct priv { |
700cb3f5 | 36 | struct crypto_skcipher *child; |
171c0204 JK |
37 | struct lrw_table_ctx table; |
38 | }; | |
39 | ||
700cb3f5 HX |
40 | struct rctx { |
41 | be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; | |
42 | ||
43 | be128 t; | |
44 | ||
45 | be128 *ext; | |
46 | ||
47 | struct scatterlist srcbuf[2]; | |
48 | struct scatterlist dstbuf[2]; | |
49 | struct scatterlist *src; | |
50 | struct scatterlist *dst; | |
51 | ||
52 | unsigned int left; | |
53 | ||
54 | struct skcipher_request subreq; | |
55 | }; | |
56 | ||
64470f1b RS |
57 | static inline void setbit128_bbe(void *b, int bit) |
58 | { | |
8eb2dfac HX |
59 | __set_bit(bit ^ (0x80 - |
60 | #ifdef __BIG_ENDIAN | |
61 | BITS_PER_LONG | |
62 | #else | |
63 | BITS_PER_BYTE | |
64 | #endif | |
65 | ), b); | |
64470f1b RS |
66 | } |
67 | ||
6c2205b8 | 68 | int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) |
64470f1b | 69 | { |
64470f1b | 70 | be128 tmp = { 0 }; |
171c0204 | 71 | int i; |
64470f1b RS |
72 | |
73 | if (ctx->table) | |
74 | gf128mul_free_64k(ctx->table); | |
75 | ||
76 | /* initialize multiplication table for Key2 */ | |
171c0204 | 77 | ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); |
64470f1b RS |
78 | if (!ctx->table) |
79 | return -ENOMEM; | |
80 | ||
81 | /* initialize optimization table */ | |
82 | for (i = 0; i < 128; i++) { | |
83 | setbit128_bbe(&tmp, i); | |
84 | ctx->mulinc[i] = tmp; | |
85 | gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); | |
86 | } | |
87 | ||
88 | return 0; | |
89 | } | |
6c2205b8 | 90 | EXPORT_SYMBOL_GPL(lrw_init_table); |
64470f1b | 91 | |
6c2205b8 | 92 | void lrw_free_table(struct lrw_table_ctx *ctx) |
171c0204 JK |
93 | { |
94 | if (ctx->table) | |
95 | gf128mul_free_64k(ctx->table); | |
96 | } | |
6c2205b8 | 97 | EXPORT_SYMBOL_GPL(lrw_free_table); |
171c0204 | 98 | |
700cb3f5 | 99 | static int setkey(struct crypto_skcipher *parent, const u8 *key, |
171c0204 JK |
100 | unsigned int keylen) |
101 | { | |
700cb3f5 HX |
102 | struct priv *ctx = crypto_skcipher_ctx(parent); |
103 | struct crypto_skcipher *child = ctx->child; | |
171c0204 JK |
104 | int err, bsize = LRW_BLOCK_SIZE; |
105 | const u8 *tweak = key + keylen - bsize; | |
106 | ||
700cb3f5 HX |
107 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
108 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | |
109 | CRYPTO_TFM_REQ_MASK); | |
110 | err = crypto_skcipher_setkey(child, key, keylen - bsize); | |
111 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | |
112 | CRYPTO_TFM_RES_MASK); | |
171c0204 JK |
113 | if (err) |
114 | return err; | |
171c0204 JK |
115 | |
116 | return lrw_init_table(&ctx->table, tweak); | |
117 | } | |
118 | ||
64470f1b RS |
119 | static inline void inc(be128 *iv) |
120 | { | |
fd4609a8 MS |
121 | be64_add_cpu(&iv->b, 1); |
122 | if (!iv->b) | |
123 | be64_add_cpu(&iv->a, 1); | |
64470f1b RS |
124 | } |
125 | ||
64470f1b RS |
126 | /* this returns the number of consequative 1 bits starting |
127 | * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ | |
128 | static inline int get_index128(be128 *block) | |
129 | { | |
130 | int x; | |
131 | __be32 *p = (__be32 *) block; | |
132 | ||
133 | for (p += 3, x = 0; x < 128; p--, x += 32) { | |
134 | u32 val = be32_to_cpup(p); | |
135 | ||
136 | if (!~val) | |
137 | continue; | |
138 | ||
139 | return x + ffz(val); | |
140 | } | |
141 | ||
142 | return x; | |
143 | } | |
144 | ||
700cb3f5 | 145 | static int post_crypt(struct skcipher_request *req) |
64470f1b | 146 | { |
700cb3f5 HX |
147 | struct rctx *rctx = skcipher_request_ctx(req); |
148 | be128 *buf = rctx->ext ?: rctx->buf; | |
149 | struct skcipher_request *subreq; | |
150 | const int bs = LRW_BLOCK_SIZE; | |
151 | struct skcipher_walk w; | |
152 | struct scatterlist *sg; | |
153 | unsigned offset; | |
64470f1b | 154 | int err; |
700cb3f5 HX |
155 | |
156 | subreq = &rctx->subreq; | |
157 | err = skcipher_walk_virt(&w, subreq, false); | |
158 | ||
159 | while (w.nbytes) { | |
160 | unsigned int avail = w.nbytes; | |
161 | be128 *wdst; | |
162 | ||
163 | wdst = w.dst.virt.addr; | |
164 | ||
165 | do { | |
166 | be128_xor(wdst, buf++, wdst); | |
167 | wdst++; | |
168 | } while ((avail -= bs) >= bs); | |
169 | ||
170 | err = skcipher_walk_done(&w, avail); | |
171 | } | |
172 | ||
173 | rctx->left -= subreq->cryptlen; | |
174 | ||
175 | if (err || !rctx->left) | |
176 | goto out; | |
177 | ||
178 | rctx->dst = rctx->dstbuf; | |
179 | ||
180 | scatterwalk_done(&w.out, 0, 1); | |
181 | sg = w.out.sg; | |
182 | offset = w.out.offset; | |
183 | ||
184 | if (rctx->dst != sg) { | |
185 | rctx->dst[0] = *sg; | |
186 | sg_unmark_end(rctx->dst); | |
187 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); | |
188 | } | |
189 | rctx->dst[0].length -= offset - sg->offset; | |
190 | rctx->dst[0].offset = offset; | |
191 | ||
192 | out: | |
193 | return err; | |
194 | } | |
195 | ||
196 | static int pre_crypt(struct skcipher_request *req) | |
197 | { | |
198 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
199 | struct rctx *rctx = skcipher_request_ctx(req); | |
200 | struct priv *ctx = crypto_skcipher_ctx(tfm); | |
201 | be128 *buf = rctx->ext ?: rctx->buf; | |
202 | struct skcipher_request *subreq; | |
4660720d | 203 | const int bs = LRW_BLOCK_SIZE; |
700cb3f5 HX |
204 | struct skcipher_walk w; |
205 | struct scatterlist *sg; | |
206 | unsigned cryptlen; | |
207 | unsigned offset; | |
64470f1b | 208 | be128 *iv; |
700cb3f5 HX |
209 | bool more; |
210 | int err; | |
64470f1b | 211 | |
700cb3f5 HX |
212 | subreq = &rctx->subreq; |
213 | skcipher_request_set_tfm(subreq, tfm); | |
64470f1b | 214 | |
700cb3f5 HX |
215 | cryptlen = subreq->cryptlen; |
216 | more = rctx->left > cryptlen; | |
217 | if (!more) | |
218 | cryptlen = rctx->left; | |
64470f1b | 219 | |
700cb3f5 HX |
220 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
221 | cryptlen, req->iv); | |
64470f1b | 222 | |
700cb3f5 HX |
223 | err = skcipher_walk_virt(&w, subreq, false); |
224 | iv = w.iv; | |
64470f1b | 225 | |
700cb3f5 HX |
226 | while (w.nbytes) { |
227 | unsigned int avail = w.nbytes; | |
228 | be128 *wsrc; | |
229 | be128 *wdst; | |
230 | ||
231 | wsrc = w.src.virt.addr; | |
232 | wdst = w.dst.virt.addr; | |
64470f1b | 233 | |
64470f1b | 234 | do { |
700cb3f5 HX |
235 | *buf++ = rctx->t; |
236 | be128_xor(wdst++, &rctx->t, wsrc++); | |
237 | ||
64470f1b RS |
238 | /* T <- I*Key2, using the optimization |
239 | * discussed in the specification */ | |
700cb3f5 | 240 | be128_xor(&rctx->t, &rctx->t, |
171c0204 | 241 | &ctx->table.mulinc[get_index128(iv)]); |
64470f1b | 242 | inc(iv); |
700cb3f5 | 243 | } while ((avail -= bs) >= bs); |
64470f1b | 244 | |
700cb3f5 HX |
245 | err = skcipher_walk_done(&w, avail); |
246 | } | |
64470f1b | 247 | |
700cb3f5 HX |
248 | skcipher_request_set_tfm(subreq, ctx->child); |
249 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, | |
250 | cryptlen, NULL); | |
64470f1b | 251 | |
700cb3f5 HX |
252 | if (err || !more) |
253 | goto out; | |
254 | ||
255 | rctx->src = rctx->srcbuf; | |
256 | ||
257 | scatterwalk_done(&w.in, 0, 1); | |
258 | sg = w.in.sg; | |
259 | offset = w.in.offset; | |
260 | ||
261 | if (rctx->src != sg) { | |
262 | rctx->src[0] = *sg; | |
263 | sg_unmark_end(rctx->src); | |
264 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); | |
265 | } | |
266 | rctx->src[0].length -= offset - sg->offset; | |
267 | rctx->src[0].offset = offset; | |
268 | ||
269 | out: | |
270 | return err; | |
271 | } | |
272 | ||
273 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) | |
274 | { | |
275 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); | |
276 | struct rctx *rctx = skcipher_request_ctx(req); | |
277 | struct skcipher_request *subreq; | |
278 | gfp_t gfp; | |
279 | ||
280 | subreq = &rctx->subreq; | |
281 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | |
282 | ||
283 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
284 | GFP_ATOMIC; | |
285 | rctx->ext = NULL; | |
286 | ||
287 | subreq->cryptlen = LRW_BUFFER_SIZE; | |
288 | if (req->cryptlen > LRW_BUFFER_SIZE) { | |
aa0abefa EB |
289 | unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); |
290 | ||
291 | rctx->ext = kmalloc(n, gfp); | |
292 | if (rctx->ext) | |
293 | subreq->cryptlen = n; | |
700cb3f5 HX |
294 | } |
295 | ||
296 | rctx->src = req->src; | |
297 | rctx->dst = req->dst; | |
298 | rctx->left = req->cryptlen; | |
299 | ||
300 | /* calculate first value of T */ | |
301 | memcpy(&rctx->t, req->iv, sizeof(rctx->t)); | |
302 | ||
303 | /* T <- I*Key2 */ | |
304 | gf128mul_64k_bbe(&rctx->t, ctx->table.table); | |
64470f1b | 305 | |
700cb3f5 HX |
306 | return 0; |
307 | } | |
308 | ||
309 | static void exit_crypt(struct skcipher_request *req) | |
310 | { | |
311 | struct rctx *rctx = skcipher_request_ctx(req); | |
312 | ||
313 | rctx->left = 0; | |
314 | ||
315 | if (rctx->ext) | |
316 | kfree(rctx->ext); | |
317 | } | |
318 | ||
319 | static int do_encrypt(struct skcipher_request *req, int err) | |
320 | { | |
321 | struct rctx *rctx = skcipher_request_ctx(req); | |
322 | struct skcipher_request *subreq; | |
323 | ||
324 | subreq = &rctx->subreq; | |
325 | ||
326 | while (!err && rctx->left) { | |
327 | err = pre_crypt(req) ?: | |
328 | crypto_skcipher_encrypt(subreq) ?: | |
329 | post_crypt(req); | |
330 | ||
331 | if (err == -EINPROGRESS || | |
332 | (err == -EBUSY && | |
333 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
334 | return err; | |
64470f1b RS |
335 | } |
336 | ||
700cb3f5 | 337 | exit_crypt(req); |
64470f1b RS |
338 | return err; |
339 | } | |
340 | ||
700cb3f5 HX |
341 | static void encrypt_done(struct crypto_async_request *areq, int err) |
342 | { | |
343 | struct skcipher_request *req = areq->data; | |
344 | struct skcipher_request *subreq; | |
345 | struct rctx *rctx; | |
346 | ||
347 | rctx = skcipher_request_ctx(req); | |
4f00567a HX |
348 | |
349 | if (err == -EINPROGRESS) { | |
350 | if (rctx->left != req->cryptlen) | |
351 | return; | |
352 | goto out; | |
353 | } | |
354 | ||
700cb3f5 HX |
355 | subreq = &rctx->subreq; |
356 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | |
357 | ||
358 | err = do_encrypt(req, err ?: post_crypt(req)); | |
359 | if (rctx->left) | |
360 | return; | |
361 | ||
4f00567a | 362 | out: |
700cb3f5 HX |
363 | skcipher_request_complete(req, err); |
364 | } | |
365 | ||
366 | static int encrypt(struct skcipher_request *req) | |
367 | { | |
368 | return do_encrypt(req, init_crypt(req, encrypt_done)); | |
369 | } | |
370 | ||
371 | static int do_decrypt(struct skcipher_request *req, int err) | |
64470f1b | 372 | { |
700cb3f5 HX |
373 | struct rctx *rctx = skcipher_request_ctx(req); |
374 | struct skcipher_request *subreq; | |
375 | ||
376 | subreq = &rctx->subreq; | |
377 | ||
378 | while (!err && rctx->left) { | |
379 | err = pre_crypt(req) ?: | |
380 | crypto_skcipher_decrypt(subreq) ?: | |
381 | post_crypt(req); | |
382 | ||
383 | if (err == -EINPROGRESS || | |
384 | (err == -EBUSY && | |
385 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
386 | return err; | |
387 | } | |
64470f1b | 388 | |
700cb3f5 HX |
389 | exit_crypt(req); |
390 | return err; | |
64470f1b RS |
391 | } |
392 | ||
700cb3f5 | 393 | static void decrypt_done(struct crypto_async_request *areq, int err) |
64470f1b | 394 | { |
700cb3f5 HX |
395 | struct skcipher_request *req = areq->data; |
396 | struct skcipher_request *subreq; | |
397 | struct rctx *rctx; | |
398 | ||
399 | rctx = skcipher_request_ctx(req); | |
4f00567a HX |
400 | |
401 | if (err == -EINPROGRESS) { | |
402 | if (rctx->left != req->cryptlen) | |
403 | return; | |
404 | goto out; | |
405 | } | |
406 | ||
700cb3f5 HX |
407 | subreq = &rctx->subreq; |
408 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | |
409 | ||
410 | err = do_decrypt(req, err ?: post_crypt(req)); | |
411 | if (rctx->left) | |
412 | return; | |
64470f1b | 413 | |
4f00567a | 414 | out: |
700cb3f5 HX |
415 | skcipher_request_complete(req, err); |
416 | } | |
417 | ||
418 | static int decrypt(struct skcipher_request *req) | |
419 | { | |
420 | return do_decrypt(req, init_crypt(req, decrypt_done)); | |
64470f1b RS |
421 | } |
422 | ||
6c2205b8 JK |
423 | int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
424 | struct scatterlist *ssrc, unsigned int nbytes, | |
425 | struct lrw_crypt_req *req) | |
426 | { | |
427 | const unsigned int bsize = LRW_BLOCK_SIZE; | |
428 | const unsigned int max_blks = req->tbuflen / bsize; | |
429 | struct lrw_table_ctx *ctx = req->table_ctx; | |
430 | struct blkcipher_walk walk; | |
431 | unsigned int nblocks; | |
432 | be128 *iv, *src, *dst, *t; | |
433 | be128 *t_buf = req->tbuf; | |
434 | int err, i; | |
435 | ||
436 | BUG_ON(max_blks < 1); | |
437 | ||
438 | blkcipher_walk_init(&walk, sdst, ssrc, nbytes); | |
439 | ||
440 | err = blkcipher_walk_virt(desc, &walk); | |
441 | nbytes = walk.nbytes; | |
442 | if (!nbytes) | |
443 | return err; | |
444 | ||
445 | nblocks = min(walk.nbytes / bsize, max_blks); | |
446 | src = (be128 *)walk.src.virt.addr; | |
447 | dst = (be128 *)walk.dst.virt.addr; | |
448 | ||
449 | /* calculate first value of T */ | |
450 | iv = (be128 *)walk.iv; | |
451 | t_buf[0] = *iv; | |
452 | ||
453 | /* T <- I*Key2 */ | |
454 | gf128mul_64k_bbe(&t_buf[0], ctx->table); | |
455 | ||
456 | i = 0; | |
457 | goto first; | |
458 | ||
459 | for (;;) { | |
460 | do { | |
461 | for (i = 0; i < nblocks; i++) { | |
462 | /* T <- I*Key2, using the optimization | |
463 | * discussed in the specification */ | |
464 | be128_xor(&t_buf[i], t, | |
465 | &ctx->mulinc[get_index128(iv)]); | |
466 | inc(iv); | |
467 | first: | |
468 | t = &t_buf[i]; | |
469 | ||
470 | /* PP <- T xor P */ | |
471 | be128_xor(dst + i, t, src + i); | |
472 | } | |
473 | ||
474 | /* CC <- E(Key2,PP) */ | |
475 | req->crypt_fn(req->crypt_ctx, (u8 *)dst, | |
476 | nblocks * bsize); | |
477 | ||
478 | /* C <- T xor CC */ | |
479 | for (i = 0; i < nblocks; i++) | |
480 | be128_xor(dst + i, dst + i, &t_buf[i]); | |
481 | ||
482 | src += nblocks; | |
483 | dst += nblocks; | |
484 | nbytes -= nblocks * bsize; | |
485 | nblocks = min(nbytes / bsize, max_blks); | |
486 | } while (nblocks > 0); | |
487 | ||
488 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
489 | nbytes = walk.nbytes; | |
490 | if (!nbytes) | |
491 | break; | |
492 | ||
493 | nblocks = min(nbytes / bsize, max_blks); | |
494 | src = (be128 *)walk.src.virt.addr; | |
495 | dst = (be128 *)walk.dst.virt.addr; | |
496 | } | |
497 | ||
498 | return err; | |
499 | } | |
500 | EXPORT_SYMBOL_GPL(lrw_crypt); | |
501 | ||
700cb3f5 | 502 | static int init_tfm(struct crypto_skcipher *tfm) |
64470f1b | 503 | { |
700cb3f5 HX |
504 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
505 | struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); | |
506 | struct priv *ctx = crypto_skcipher_ctx(tfm); | |
507 | struct crypto_skcipher *cipher; | |
64470f1b | 508 | |
700cb3f5 | 509 | cipher = crypto_spawn_skcipher(spawn); |
2e306ee0 HX |
510 | if (IS_ERR(cipher)) |
511 | return PTR_ERR(cipher); | |
64470f1b | 512 | |
2e306ee0 | 513 | ctx->child = cipher; |
700cb3f5 HX |
514 | |
515 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + | |
516 | sizeof(struct rctx)); | |
517 | ||
64470f1b RS |
518 | return 0; |
519 | } | |
520 | ||
700cb3f5 | 521 | static void exit_tfm(struct crypto_skcipher *tfm) |
64470f1b | 522 | { |
700cb3f5 | 523 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
171c0204 JK |
524 | |
525 | lrw_free_table(&ctx->table); | |
700cb3f5 HX |
526 | crypto_free_skcipher(ctx->child); |
527 | } | |
528 | ||
529 | static void free(struct skcipher_instance *inst) | |
530 | { | |
531 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | |
532 | kfree(inst); | |
64470f1b RS |
533 | } |
534 | ||
700cb3f5 | 535 | static int create(struct crypto_template *tmpl, struct rtattr **tb) |
64470f1b | 536 | { |
700cb3f5 HX |
537 | struct crypto_skcipher_spawn *spawn; |
538 | struct skcipher_instance *inst; | |
539 | struct crypto_attr_type *algt; | |
540 | struct skcipher_alg *alg; | |
541 | const char *cipher_name; | |
542 | char ecb_name[CRYPTO_MAX_ALG_NAME]; | |
ebc610e5 HX |
543 | int err; |
544 | ||
700cb3f5 HX |
545 | algt = crypto_get_attr_type(tb); |
546 | if (IS_ERR(algt)) | |
547 | return PTR_ERR(algt); | |
548 | ||
549 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | |
550 | return -EINVAL; | |
551 | ||
552 | cipher_name = crypto_attr_alg_name(tb[1]); | |
553 | if (IS_ERR(cipher_name)) | |
554 | return PTR_ERR(cipher_name); | |
555 | ||
556 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | |
557 | if (!inst) | |
558 | return -ENOMEM; | |
559 | ||
560 | spawn = skcipher_instance_ctx(inst); | |
561 | ||
562 | crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); | |
563 | err = crypto_grab_skcipher(spawn, cipher_name, 0, | |
564 | crypto_requires_sync(algt->type, | |
565 | algt->mask)); | |
566 | if (err == -ENOENT) { | |
567 | err = -ENAMETOOLONG; | |
568 | if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | |
569 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | |
570 | goto err_free_inst; | |
571 | ||
572 | err = crypto_grab_skcipher(spawn, ecb_name, 0, | |
573 | crypto_requires_sync(algt->type, | |
574 | algt->mask)); | |
575 | } | |
576 | ||
ebc610e5 | 577 | if (err) |
700cb3f5 | 578 | goto err_free_inst; |
64470f1b | 579 | |
700cb3f5 | 580 | alg = crypto_skcipher_spawn_alg(spawn); |
64470f1b | 581 | |
700cb3f5 HX |
582 | err = -EINVAL; |
583 | if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) | |
584 | goto err_drop_spawn; | |
64470f1b | 585 | |
700cb3f5 HX |
586 | if (crypto_skcipher_alg_ivsize(alg)) |
587 | goto err_drop_spawn; | |
64470f1b | 588 | |
700cb3f5 HX |
589 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", |
590 | &alg->base); | |
591 | if (err) | |
592 | goto err_drop_spawn; | |
64470f1b | 593 | |
700cb3f5 HX |
594 | err = -EINVAL; |
595 | cipher_name = alg->base.cra_name; | |
64470f1b | 596 | |
700cb3f5 HX |
597 | /* Alas we screwed up the naming so we have to mangle the |
598 | * cipher name. | |
599 | */ | |
600 | if (!strncmp(cipher_name, "ecb(", 4)) { | |
601 | unsigned len; | |
64470f1b | 602 | |
700cb3f5 HX |
603 | len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); |
604 | if (len < 2 || len >= sizeof(ecb_name)) | |
605 | goto err_drop_spawn; | |
64470f1b | 606 | |
700cb3f5 HX |
607 | if (ecb_name[len - 1] != ')') |
608 | goto err_drop_spawn; | |
64470f1b | 609 | |
700cb3f5 | 610 | ecb_name[len - 1] = 0; |
64470f1b | 611 | |
700cb3f5 HX |
612 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
613 | "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) | |
614 | return -ENAMETOOLONG; | |
615 | } | |
616 | ||
617 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; | |
618 | inst->alg.base.cra_priority = alg->base.cra_priority; | |
619 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; | |
620 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | |
621 | (__alignof__(u64) - 1); | |
622 | ||
623 | inst->alg.ivsize = LRW_BLOCK_SIZE; | |
624 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + | |
625 | LRW_BLOCK_SIZE; | |
626 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + | |
627 | LRW_BLOCK_SIZE; | |
628 | ||
629 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | |
630 | ||
631 | inst->alg.init = init_tfm; | |
632 | inst->alg.exit = exit_tfm; | |
633 | ||
634 | inst->alg.setkey = setkey; | |
635 | inst->alg.encrypt = encrypt; | |
636 | inst->alg.decrypt = decrypt; | |
637 | ||
638 | inst->free = free; | |
639 | ||
640 | err = skcipher_register_instance(tmpl, inst); | |
641 | if (err) | |
642 | goto err_drop_spawn; | |
643 | ||
644 | out: | |
645 | return err; | |
646 | ||
647 | err_drop_spawn: | |
648 | crypto_drop_skcipher(spawn); | |
649 | err_free_inst: | |
64470f1b | 650 | kfree(inst); |
700cb3f5 | 651 | goto out; |
64470f1b RS |
652 | } |
653 | ||
654 | static struct crypto_template crypto_tmpl = { | |
655 | .name = "lrw", | |
700cb3f5 | 656 | .create = create, |
64470f1b RS |
657 | .module = THIS_MODULE, |
658 | }; | |
659 | ||
660 | static int __init crypto_module_init(void) | |
661 | { | |
662 | return crypto_register_template(&crypto_tmpl); | |
663 | } | |
664 | ||
665 | static void __exit crypto_module_exit(void) | |
666 | { | |
667 | crypto_unregister_template(&crypto_tmpl); | |
668 | } | |
669 | ||
670 | module_init(crypto_module_init); | |
671 | module_exit(crypto_module_exit); | |
672 | ||
673 | MODULE_LICENSE("GPL"); | |
674 | MODULE_DESCRIPTION("LRW block cipher mode"); | |
4943ba16 | 675 | MODULE_ALIAS_CRYPTO("lrw"); |