]>
Commit | Line | Data |
---|---|---|
f19f5111 RS |
1 | /* XTS: as defined in IEEE1619/D16 |
2 | * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf | |
3 | * (sector sizes which are not a multiple of 16 bytes are, | |
4 | * however currently unsupported) | |
5 | * | |
6 | * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org> | |
7 | * | |
ddbc7361 | 8 | * Based on ecb.c |
f19f5111 RS |
9 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the Free | |
13 | * Software Foundation; either version 2 of the License, or (at your option) | |
14 | * any later version. | |
15 | */ | |
f1c131b4 HX |
16 | #include <crypto/internal/skcipher.h> |
17 | #include <crypto/scatterwalk.h> | |
f19f5111 RS |
18 | #include <linux/err.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/slab.h> | |
24 | ||
ce004556 | 25 | #include <crypto/xts.h> |
f19f5111 RS |
26 | #include <crypto/b128ops.h> |
27 | #include <crypto/gf128mul.h> | |
28 | ||
f1c131b4 HX |
29 | #define XTS_BUFFER_SIZE 128u |
30 | ||
f19f5111 | 31 | struct priv { |
f1c131b4 | 32 | struct crypto_skcipher *child; |
f19f5111 RS |
33 | struct crypto_cipher *tweak; |
34 | }; | |
35 | ||
f1c131b4 HX |
36 | struct xts_instance_ctx { |
37 | struct crypto_skcipher_spawn spawn; | |
38 | char name[CRYPTO_MAX_ALG_NAME]; | |
39 | }; | |
40 | ||
41 | struct rctx { | |
42 | be128 buf[XTS_BUFFER_SIZE / sizeof(be128)]; | |
43 | ||
44 | be128 t; | |
45 | ||
46 | be128 *ext; | |
47 | ||
48 | struct scatterlist srcbuf[2]; | |
49 | struct scatterlist dstbuf[2]; | |
50 | struct scatterlist *src; | |
51 | struct scatterlist *dst; | |
52 | ||
53 | unsigned int left; | |
54 | ||
55 | struct skcipher_request subreq; | |
56 | }; | |
57 | ||
58 | static int setkey(struct crypto_skcipher *parent, const u8 *key, | |
f19f5111 RS |
59 | unsigned int keylen) |
60 | { | |
f1c131b4 HX |
61 | struct priv *ctx = crypto_skcipher_ctx(parent); |
62 | struct crypto_skcipher *child; | |
63 | struct crypto_cipher *tweak; | |
f19f5111 RS |
64 | int err; |
65 | ||
f1c131b4 | 66 | err = xts_verify_key(parent, key, keylen); |
28856a9e SM |
67 | if (err) |
68 | return err; | |
f19f5111 | 69 | |
f1c131b4 HX |
70 | keylen /= 2; |
71 | ||
25985edc | 72 | /* we need two cipher instances: one to compute the initial 'tweak' |
f19f5111 RS |
73 | * by encrypting the IV (usually the 'plain' iv) and the other |
74 | * one to encrypt and decrypt the data */ | |
75 | ||
76 | /* tweak cipher, uses Key2 i.e. the second half of *key */ | |
f1c131b4 HX |
77 | tweak = ctx->tweak; |
78 | crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK); | |
79 | crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) & | |
f19f5111 | 80 | CRYPTO_TFM_REQ_MASK); |
f1c131b4 HX |
81 | err = crypto_cipher_setkey(tweak, key + keylen, keylen); |
82 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) & | |
83 | CRYPTO_TFM_RES_MASK); | |
f19f5111 RS |
84 | if (err) |
85 | return err; | |
86 | ||
f1c131b4 | 87 | /* data cipher, uses Key1 i.e. the first half of *key */ |
f19f5111 | 88 | child = ctx->child; |
f1c131b4 HX |
89 | crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
90 | crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & | |
91 | CRYPTO_TFM_REQ_MASK); | |
92 | err = crypto_skcipher_setkey(child, key, keylen); | |
93 | crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & | |
94 | CRYPTO_TFM_RES_MASK); | |
f19f5111 | 95 | |
f1c131b4 HX |
96 | return err; |
97 | } | |
f19f5111 | 98 | |
f1c131b4 HX |
99 | static int post_crypt(struct skcipher_request *req) |
100 | { | |
101 | struct rctx *rctx = skcipher_request_ctx(req); | |
102 | be128 *buf = rctx->ext ?: rctx->buf; | |
103 | struct skcipher_request *subreq; | |
104 | const int bs = XTS_BLOCK_SIZE; | |
105 | struct skcipher_walk w; | |
106 | struct scatterlist *sg; | |
107 | unsigned offset; | |
108 | int err; | |
f19f5111 | 109 | |
f1c131b4 HX |
110 | subreq = &rctx->subreq; |
111 | err = skcipher_walk_virt(&w, subreq, false); | |
f19f5111 | 112 | |
f1c131b4 HX |
113 | while (w.nbytes) { |
114 | unsigned int avail = w.nbytes; | |
115 | be128 *wdst; | |
f19f5111 | 116 | |
f1c131b4 HX |
117 | wdst = w.dst.virt.addr; |
118 | ||
119 | do { | |
120 | be128_xor(wdst, buf++, wdst); | |
121 | wdst++; | |
122 | } while ((avail -= bs) >= bs); | |
123 | ||
124 | err = skcipher_walk_done(&w, avail); | |
125 | } | |
126 | ||
127 | rctx->left -= subreq->cryptlen; | |
128 | ||
129 | if (err || !rctx->left) | |
130 | goto out; | |
131 | ||
132 | rctx->dst = rctx->dstbuf; | |
133 | ||
134 | scatterwalk_done(&w.out, 0, 1); | |
135 | sg = w.out.sg; | |
136 | offset = w.out.offset; | |
137 | ||
138 | if (rctx->dst != sg) { | |
139 | rctx->dst[0] = *sg; | |
140 | sg_unmark_end(rctx->dst); | |
141 | scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); | |
142 | } | |
143 | rctx->dst[0].length -= offset - sg->offset; | |
144 | rctx->dst[0].offset = offset; | |
145 | ||
146 | out: | |
147 | return err; | |
f19f5111 RS |
148 | } |
149 | ||
f1c131b4 | 150 | static int pre_crypt(struct skcipher_request *req) |
f19f5111 | 151 | { |
f1c131b4 HX |
152 | struct rctx *rctx = skcipher_request_ctx(req); |
153 | be128 *buf = rctx->ext ?: rctx->buf; | |
154 | struct skcipher_request *subreq; | |
f9d2691f | 155 | const int bs = XTS_BLOCK_SIZE; |
f1c131b4 HX |
156 | struct skcipher_walk w; |
157 | struct scatterlist *sg; | |
158 | unsigned cryptlen; | |
159 | unsigned offset; | |
160 | bool more; | |
161 | int err; | |
f19f5111 | 162 | |
f1c131b4 HX |
163 | subreq = &rctx->subreq; |
164 | cryptlen = subreq->cryptlen; | |
f19f5111 | 165 | |
f1c131b4 HX |
166 | more = rctx->left > cryptlen; |
167 | if (!more) | |
168 | cryptlen = rctx->left; | |
f19f5111 | 169 | |
f1c131b4 HX |
170 | skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, |
171 | cryptlen, NULL); | |
f19f5111 | 172 | |
f1c131b4 | 173 | err = skcipher_walk_virt(&w, subreq, false); |
f19f5111 | 174 | |
f1c131b4 HX |
175 | while (w.nbytes) { |
176 | unsigned int avail = w.nbytes; | |
177 | be128 *wsrc; | |
178 | be128 *wdst; | |
f19f5111 | 179 | |
f1c131b4 HX |
180 | wsrc = w.src.virt.addr; |
181 | wdst = w.dst.virt.addr; | |
f19f5111 | 182 | |
f1c131b4 HX |
183 | do { |
184 | *buf++ = rctx->t; | |
185 | be128_xor(wdst++, &rctx->t, wsrc++); | |
186 | gf128mul_x_ble(&rctx->t, &rctx->t); | |
f19f5111 RS |
187 | } while ((avail -= bs) >= bs); |
188 | ||
f1c131b4 HX |
189 | err = skcipher_walk_done(&w, avail); |
190 | } | |
191 | ||
192 | skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, | |
193 | cryptlen, NULL); | |
f19f5111 | 194 | |
f1c131b4 HX |
195 | if (err || !more) |
196 | goto out; | |
f19f5111 | 197 | |
f1c131b4 HX |
198 | rctx->src = rctx->srcbuf; |
199 | ||
200 | scatterwalk_done(&w.in, 0, 1); | |
201 | sg = w.in.sg; | |
202 | offset = w.in.offset; | |
203 | ||
204 | if (rctx->src != sg) { | |
205 | rctx->src[0] = *sg; | |
206 | sg_unmark_end(rctx->src); | |
207 | scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); | |
f19f5111 | 208 | } |
f1c131b4 HX |
209 | rctx->src[0].length -= offset - sg->offset; |
210 | rctx->src[0].offset = offset; | |
f19f5111 | 211 | |
f1c131b4 | 212 | out: |
f19f5111 RS |
213 | return err; |
214 | } | |
215 | ||
f1c131b4 | 216 | static int init_crypt(struct skcipher_request *req, crypto_completion_t done) |
f19f5111 | 217 | { |
f1c131b4 HX |
218 | struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); |
219 | struct rctx *rctx = skcipher_request_ctx(req); | |
220 | struct skcipher_request *subreq; | |
221 | gfp_t gfp; | |
222 | ||
223 | subreq = &rctx->subreq; | |
224 | skcipher_request_set_tfm(subreq, ctx->child); | |
225 | skcipher_request_set_callback(subreq, req->base.flags, done, req); | |
226 | ||
227 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
228 | GFP_ATOMIC; | |
229 | rctx->ext = NULL; | |
230 | ||
231 | subreq->cryptlen = XTS_BUFFER_SIZE; | |
232 | if (req->cryptlen > XTS_BUFFER_SIZE) { | |
9df0eb18 EB |
233 | unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE); |
234 | ||
235 | rctx->ext = kmalloc(n, gfp); | |
236 | if (rctx->ext) | |
237 | subreq->cryptlen = n; | |
f1c131b4 HX |
238 | } |
239 | ||
240 | rctx->src = req->src; | |
241 | rctx->dst = req->dst; | |
242 | rctx->left = req->cryptlen; | |
f19f5111 | 243 | |
f1c131b4 HX |
244 | /* calculate first value of T */ |
245 | crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv); | |
246 | ||
247 | return 0; | |
f19f5111 RS |
248 | } |
249 | ||
f1c131b4 | 250 | static void exit_crypt(struct skcipher_request *req) |
f19f5111 | 251 | { |
f1c131b4 HX |
252 | struct rctx *rctx = skcipher_request_ctx(req); |
253 | ||
254 | rctx->left = 0; | |
f19f5111 | 255 | |
f1c131b4 HX |
256 | if (rctx->ext) |
257 | kzfree(rctx->ext); | |
258 | } | |
259 | ||
260 | static int do_encrypt(struct skcipher_request *req, int err) | |
261 | { | |
262 | struct rctx *rctx = skcipher_request_ctx(req); | |
263 | struct skcipher_request *subreq; | |
264 | ||
265 | subreq = &rctx->subreq; | |
266 | ||
267 | while (!err && rctx->left) { | |
268 | err = pre_crypt(req) ?: | |
269 | crypto_skcipher_encrypt(subreq) ?: | |
270 | post_crypt(req); | |
271 | ||
272 | if (err == -EINPROGRESS || | |
273 | (err == -EBUSY && | |
274 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
275 | return err; | |
276 | } | |
277 | ||
278 | exit_crypt(req); | |
279 | return err; | |
280 | } | |
281 | ||
282 | static void encrypt_done(struct crypto_async_request *areq, int err) | |
283 | { | |
284 | struct skcipher_request *req = areq->data; | |
285 | struct skcipher_request *subreq; | |
286 | struct rctx *rctx; | |
287 | ||
288 | rctx = skcipher_request_ctx(req); | |
289 | subreq = &rctx->subreq; | |
290 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | |
291 | ||
292 | err = do_encrypt(req, err ?: post_crypt(req)); | |
293 | if (rctx->left) | |
294 | return; | |
295 | ||
296 | skcipher_request_complete(req, err); | |
297 | } | |
298 | ||
299 | static int encrypt(struct skcipher_request *req) | |
300 | { | |
301 | return do_encrypt(req, init_crypt(req, encrypt_done)); | |
302 | } | |
303 | ||
304 | static int do_decrypt(struct skcipher_request *req, int err) | |
305 | { | |
306 | struct rctx *rctx = skcipher_request_ctx(req); | |
307 | struct skcipher_request *subreq; | |
308 | ||
309 | subreq = &rctx->subreq; | |
310 | ||
311 | while (!err && rctx->left) { | |
312 | err = pre_crypt(req) ?: | |
313 | crypto_skcipher_decrypt(subreq) ?: | |
314 | post_crypt(req); | |
315 | ||
316 | if (err == -EINPROGRESS || | |
317 | (err == -EBUSY && | |
318 | req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | |
319 | return err; | |
320 | } | |
321 | ||
322 | exit_crypt(req); | |
323 | return err; | |
324 | } | |
325 | ||
326 | static void decrypt_done(struct crypto_async_request *areq, int err) | |
327 | { | |
328 | struct skcipher_request *req = areq->data; | |
329 | struct skcipher_request *subreq; | |
330 | struct rctx *rctx; | |
331 | ||
332 | rctx = skcipher_request_ctx(req); | |
333 | subreq = &rctx->subreq; | |
334 | subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; | |
335 | ||
336 | err = do_decrypt(req, err ?: post_crypt(req)); | |
337 | if (rctx->left) | |
338 | return; | |
339 | ||
340 | skcipher_request_complete(req, err); | |
341 | } | |
342 | ||
343 | static int decrypt(struct skcipher_request *req) | |
344 | { | |
345 | return do_decrypt(req, init_crypt(req, decrypt_done)); | |
f19f5111 RS |
346 | } |
347 | ||
ce004556 JK |
348 | int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, |
349 | struct scatterlist *ssrc, unsigned int nbytes, | |
350 | struct xts_crypt_req *req) | |
351 | { | |
352 | const unsigned int bsize = XTS_BLOCK_SIZE; | |
353 | const unsigned int max_blks = req->tbuflen / bsize; | |
354 | struct blkcipher_walk walk; | |
355 | unsigned int nblocks; | |
356 | be128 *src, *dst, *t; | |
357 | be128 *t_buf = req->tbuf; | |
358 | int err, i; | |
359 | ||
360 | BUG_ON(max_blks < 1); | |
361 | ||
362 | blkcipher_walk_init(&walk, sdst, ssrc, nbytes); | |
363 | ||
364 | err = blkcipher_walk_virt(desc, &walk); | |
365 | nbytes = walk.nbytes; | |
366 | if (!nbytes) | |
367 | return err; | |
368 | ||
369 | nblocks = min(nbytes / bsize, max_blks); | |
370 | src = (be128 *)walk.src.virt.addr; | |
371 | dst = (be128 *)walk.dst.virt.addr; | |
372 | ||
373 | /* calculate first value of T */ | |
374 | req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); | |
375 | ||
376 | i = 0; | |
377 | goto first; | |
378 | ||
379 | for (;;) { | |
380 | do { | |
381 | for (i = 0; i < nblocks; i++) { | |
382 | gf128mul_x_ble(&t_buf[i], t); | |
383 | first: | |
384 | t = &t_buf[i]; | |
385 | ||
386 | /* PP <- T xor P */ | |
387 | be128_xor(dst + i, t, src + i); | |
388 | } | |
389 | ||
390 | /* CC <- E(Key2,PP) */ | |
391 | req->crypt_fn(req->crypt_ctx, (u8 *)dst, | |
392 | nblocks * bsize); | |
393 | ||
394 | /* C <- T xor CC */ | |
395 | for (i = 0; i < nblocks; i++) | |
396 | be128_xor(dst + i, dst + i, &t_buf[i]); | |
397 | ||
398 | src += nblocks; | |
399 | dst += nblocks; | |
400 | nbytes -= nblocks * bsize; | |
401 | nblocks = min(nbytes / bsize, max_blks); | |
402 | } while (nblocks > 0); | |
403 | ||
404 | *(be128 *)walk.iv = *t; | |
405 | ||
406 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
407 | nbytes = walk.nbytes; | |
408 | if (!nbytes) | |
409 | break; | |
410 | ||
411 | nblocks = min(nbytes / bsize, max_blks); | |
412 | src = (be128 *)walk.src.virt.addr; | |
413 | dst = (be128 *)walk.dst.virt.addr; | |
414 | } | |
415 | ||
416 | return err; | |
417 | } | |
418 | EXPORT_SYMBOL_GPL(xts_crypt); | |
419 | ||
f1c131b4 | 420 | static int init_tfm(struct crypto_skcipher *tfm) |
f19f5111 | 421 | { |
f1c131b4 HX |
422 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
423 | struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst); | |
424 | struct priv *ctx = crypto_skcipher_ctx(tfm); | |
425 | struct crypto_skcipher *child; | |
426 | struct crypto_cipher *tweak; | |
f19f5111 | 427 | |
f1c131b4 HX |
428 | child = crypto_spawn_skcipher(&ictx->spawn); |
429 | if (IS_ERR(child)) | |
430 | return PTR_ERR(child); | |
f19f5111 | 431 | |
f1c131b4 | 432 | ctx->child = child; |
f19f5111 | 433 | |
f1c131b4 HX |
434 | tweak = crypto_alloc_cipher(ictx->name, 0, 0); |
435 | if (IS_ERR(tweak)) { | |
436 | crypto_free_skcipher(ctx->child); | |
437 | return PTR_ERR(tweak); | |
f19f5111 RS |
438 | } |
439 | ||
f1c131b4 HX |
440 | ctx->tweak = tweak; |
441 | ||
442 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) + | |
443 | sizeof(struct rctx)); | |
f19f5111 RS |
444 | |
445 | return 0; | |
446 | } | |
447 | ||
f1c131b4 | 448 | static void exit_tfm(struct crypto_skcipher *tfm) |
f19f5111 | 449 | { |
f1c131b4 HX |
450 | struct priv *ctx = crypto_skcipher_ctx(tfm); |
451 | ||
452 | crypto_free_skcipher(ctx->child); | |
f19f5111 RS |
453 | crypto_free_cipher(ctx->tweak); |
454 | } | |
455 | ||
f1c131b4 HX |
456 | static void free(struct skcipher_instance *inst) |
457 | { | |
458 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | |
459 | kfree(inst); | |
460 | } | |
461 | ||
462 | static int create(struct crypto_template *tmpl, struct rtattr **tb) | |
f19f5111 | 463 | { |
f1c131b4 HX |
464 | struct skcipher_instance *inst; |
465 | struct crypto_attr_type *algt; | |
466 | struct xts_instance_ctx *ctx; | |
467 | struct skcipher_alg *alg; | |
468 | const char *cipher_name; | |
89027579 | 469 | u32 mask; |
f19f5111 RS |
470 | int err; |
471 | ||
f1c131b4 HX |
472 | algt = crypto_get_attr_type(tb); |
473 | if (IS_ERR(algt)) | |
474 | return PTR_ERR(algt); | |
475 | ||
476 | if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) | |
477 | return -EINVAL; | |
478 | ||
479 | cipher_name = crypto_attr_alg_name(tb[1]); | |
480 | if (IS_ERR(cipher_name)) | |
481 | return PTR_ERR(cipher_name); | |
482 | ||
483 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
484 | if (!inst) | |
485 | return -ENOMEM; | |
486 | ||
487 | ctx = skcipher_instance_ctx(inst); | |
488 | ||
489 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | |
89027579 HX |
490 | |
491 | mask = crypto_requires_off(algt->type, algt->mask, | |
492 | CRYPTO_ALG_NEED_FALLBACK | | |
493 | CRYPTO_ALG_ASYNC); | |
494 | ||
495 | err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask); | |
f1c131b4 HX |
496 | if (err == -ENOENT) { |
497 | err = -ENAMETOOLONG; | |
498 | if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", | |
499 | cipher_name) >= CRYPTO_MAX_ALG_NAME) | |
500 | goto err_free_inst; | |
501 | ||
89027579 | 502 | err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask); |
f1c131b4 HX |
503 | } |
504 | ||
f19f5111 | 505 | if (err) |
f1c131b4 | 506 | goto err_free_inst; |
f19f5111 | 507 | |
f1c131b4 | 508 | alg = crypto_skcipher_spawn_alg(&ctx->spawn); |
f19f5111 | 509 | |
f1c131b4 HX |
510 | err = -EINVAL; |
511 | if (alg->base.cra_blocksize != XTS_BLOCK_SIZE) | |
512 | goto err_drop_spawn; | |
f19f5111 | 513 | |
f1c131b4 HX |
514 | if (crypto_skcipher_alg_ivsize(alg)) |
515 | goto err_drop_spawn; | |
f19f5111 | 516 | |
f1c131b4 HX |
517 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts", |
518 | &alg->base); | |
519 | if (err) | |
520 | goto err_drop_spawn; | |
f19f5111 | 521 | |
f1c131b4 HX |
522 | err = -EINVAL; |
523 | cipher_name = alg->base.cra_name; | |
f19f5111 | 524 | |
f1c131b4 HX |
525 | /* Alas we screwed up the naming so we have to mangle the |
526 | * cipher name. | |
527 | */ | |
528 | if (!strncmp(cipher_name, "ecb(", 4)) { | |
529 | unsigned len; | |
f19f5111 | 530 | |
f1c131b4 HX |
531 | len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name)); |
532 | if (len < 2 || len >= sizeof(ctx->name)) | |
533 | goto err_drop_spawn; | |
f19f5111 | 534 | |
f1c131b4 HX |
535 | if (ctx->name[len - 1] != ')') |
536 | goto err_drop_spawn; | |
f19f5111 | 537 | |
f1c131b4 | 538 | ctx->name[len - 1] = 0; |
f19f5111 | 539 | |
f1c131b4 HX |
540 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
541 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) | |
542 | return -ENAMETOOLONG; | |
543 | } else | |
544 | goto err_drop_spawn; | |
f19f5111 | 545 | |
f1c131b4 HX |
546 | inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; |
547 | inst->alg.base.cra_priority = alg->base.cra_priority; | |
548 | inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE; | |
549 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | |
550 | (__alignof__(u64) - 1); | |
551 | ||
552 | inst->alg.ivsize = XTS_BLOCK_SIZE; | |
553 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2; | |
554 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2; | |
555 | ||
556 | inst->alg.base.cra_ctxsize = sizeof(struct priv); | |
557 | ||
558 | inst->alg.init = init_tfm; | |
559 | inst->alg.exit = exit_tfm; | |
560 | ||
561 | inst->alg.setkey = setkey; | |
562 | inst->alg.encrypt = encrypt; | |
563 | inst->alg.decrypt = decrypt; | |
564 | ||
565 | inst->free = free; | |
566 | ||
567 | err = skcipher_register_instance(tmpl, inst); | |
568 | if (err) | |
569 | goto err_drop_spawn; | |
570 | ||
571 | out: | |
572 | return err; | |
573 | ||
574 | err_drop_spawn: | |
575 | crypto_drop_skcipher(&ctx->spawn); | |
576 | err_free_inst: | |
f19f5111 | 577 | kfree(inst); |
f1c131b4 | 578 | goto out; |
f19f5111 RS |
579 | } |
580 | ||
581 | static struct crypto_template crypto_tmpl = { | |
582 | .name = "xts", | |
f1c131b4 | 583 | .create = create, |
f19f5111 RS |
584 | .module = THIS_MODULE, |
585 | }; | |
586 | ||
587 | static int __init crypto_module_init(void) | |
588 | { | |
589 | return crypto_register_template(&crypto_tmpl); | |
590 | } | |
591 | ||
592 | static void __exit crypto_module_exit(void) | |
593 | { | |
594 | crypto_unregister_template(&crypto_tmpl); | |
595 | } | |
596 | ||
597 | module_init(crypto_module_init); | |
598 | module_exit(crypto_module_exit); | |
599 | ||
600 | MODULE_LICENSE("GPL"); | |
601 | MODULE_DESCRIPTION("XTS block cipher mode"); | |
4943ba16 | 602 | MODULE_ALIAS_CRYPTO("xts"); |