]>
Commit | Line | Data |
---|---|---|
bf754ae8 JG |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * s390 implementation of the AES Cipher Algorithm. | |
5 | * | |
6 | * s390 Version: | |
a53c8fab | 7 | * Copyright IBM Corp. 2005, 2007 |
bf754ae8 | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
b0c3e75d | 9 | * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback |
bf754ae8 | 10 | * |
f8246af0 | 11 | * Derived from "crypto/aes_generic.c" |
bf754ae8 JG |
12 | * |
13 | * This program is free software; you can redistribute it and/or modify it | |
14 | * under the terms of the GNU General Public License as published by the Free | |
15 | * Software Foundation; either version 2 of the License, or (at your option) | |
16 | * any later version. | |
17 | * | |
18 | */ | |
19 | ||
39f09392 JG |
20 | #define KMSG_COMPONENT "aes_s390" |
21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
22 | ||
89e12654 | 23 | #include <crypto/aes.h> |
a9e62fad | 24 | #include <crypto/algapi.h> |
b0c3e75d | 25 | #include <linux/err.h> |
bf754ae8 JG |
26 | #include <linux/module.h> |
27 | #include <linux/init.h> | |
bf754ae8 JG |
28 | #include "crypt_s390.h" |
29 | ||
86aa9fc2 JG |
30 | #define AES_KEYLEN_128 1 |
31 | #define AES_KEYLEN_192 2 | |
32 | #define AES_KEYLEN_256 4 | |
33 | ||
0200f3ec GS |
34 | static u8 *ctrblk; |
35 | static char keylen_flag; | |
bf754ae8 JG |
36 | |
37 | struct s390_aes_ctx { | |
bf754ae8 | 38 | u8 key[AES_MAX_KEY_SIZE]; |
a9e62fad HX |
39 | long enc; |
40 | long dec; | |
bf754ae8 | 41 | int key_len; |
b0c3e75d SS |
42 | union { |
43 | struct crypto_blkcipher *blk; | |
44 | struct crypto_cipher *cip; | |
45 | } fallback; | |
bf754ae8 JG |
46 | }; |
47 | ||
99d97222 GS |
48 | struct pcc_param { |
49 | u8 key[32]; | |
50 | u8 tweak[16]; | |
51 | u8 block[16]; | |
52 | u8 bit[16]; | |
53 | u8 xts[16]; | |
54 | }; | |
55 | ||
56 | struct s390_xts_ctx { | |
57 | u8 key[32]; | |
9dda2769 | 58 | u8 pcc_key[32]; |
99d97222 GS |
59 | long enc; |
60 | long dec; | |
61 | int key_len; | |
62 | struct crypto_blkcipher *fallback; | |
63 | }; | |
64 | ||
b0c3e75d SS |
65 | /* |
66 | * Check if the key_len is supported by the HW. | |
67 | * Returns 0 if it is, a positive number if it is not and software fallback is | |
68 | * required or a negative number in case the key size is not valid | |
69 | */ | |
70 | static int need_fallback(unsigned int key_len) | |
bf754ae8 | 71 | { |
bf754ae8 JG |
72 | switch (key_len) { |
73 | case 16: | |
86aa9fc2 | 74 | if (!(keylen_flag & AES_KEYLEN_128)) |
b0c3e75d | 75 | return 1; |
bf754ae8 JG |
76 | break; |
77 | case 24: | |
86aa9fc2 | 78 | if (!(keylen_flag & AES_KEYLEN_192)) |
b0c3e75d | 79 | return 1; |
bf754ae8 JG |
80 | break; |
81 | case 32: | |
86aa9fc2 | 82 | if (!(keylen_flag & AES_KEYLEN_256)) |
b0c3e75d | 83 | return 1; |
bf754ae8 JG |
84 | break; |
85 | default: | |
b0c3e75d | 86 | return -1; |
bf754ae8 JG |
87 | break; |
88 | } | |
b0c3e75d SS |
89 | return 0; |
90 | } | |
91 | ||
92 | static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, | |
93 | unsigned int key_len) | |
94 | { | |
95 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
96 | int ret; | |
97 | ||
d7ac7690 RK |
98 | sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
99 | sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & | |
b0c3e75d SS |
100 | CRYPTO_TFM_REQ_MASK); |
101 | ||
102 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); | |
103 | if (ret) { | |
104 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
d7ac7690 | 105 | tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & |
b0c3e75d SS |
106 | CRYPTO_TFM_RES_MASK); |
107 | } | |
108 | return ret; | |
109 | } | |
110 | ||
111 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
112 | unsigned int key_len) | |
113 | { | |
114 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
115 | u32 *flags = &tfm->crt_flags; | |
116 | int ret; | |
117 | ||
118 | ret = need_fallback(key_len); | |
119 | if (ret < 0) { | |
120 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
121 | return -EINVAL; | |
122 | } | |
bf754ae8 JG |
123 | |
124 | sctx->key_len = key_len; | |
b0c3e75d SS |
125 | if (!ret) { |
126 | memcpy(sctx->key, in_key, key_len); | |
127 | return 0; | |
128 | } | |
129 | ||
130 | return setkey_fallback_cip(tfm, in_key, key_len); | |
bf754ae8 JG |
131 | } |
132 | ||
6c2bb98b | 133 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
bf754ae8 | 134 | { |
6c2bb98b | 135 | const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
bf754ae8 | 136 | |
b0c3e75d SS |
137 | if (unlikely(need_fallback(sctx->key_len))) { |
138 | crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); | |
139 | return; | |
140 | } | |
141 | ||
bf754ae8 JG |
142 | switch (sctx->key_len) { |
143 | case 16: | |
144 | crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, | |
145 | AES_BLOCK_SIZE); | |
146 | break; | |
147 | case 24: | |
148 | crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, | |
149 | AES_BLOCK_SIZE); | |
150 | break; | |
151 | case 32: | |
152 | crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, | |
153 | AES_BLOCK_SIZE); | |
154 | break; | |
155 | } | |
156 | } | |
157 | ||
6c2bb98b | 158 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
bf754ae8 | 159 | { |
6c2bb98b | 160 | const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
bf754ae8 | 161 | |
b0c3e75d SS |
162 | if (unlikely(need_fallback(sctx->key_len))) { |
163 | crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); | |
164 | return; | |
165 | } | |
166 | ||
bf754ae8 JG |
167 | switch (sctx->key_len) { |
168 | case 16: | |
169 | crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, | |
170 | AES_BLOCK_SIZE); | |
171 | break; | |
172 | case 24: | |
173 | crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, | |
174 | AES_BLOCK_SIZE); | |
175 | break; | |
176 | case 32: | |
177 | crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, | |
178 | AES_BLOCK_SIZE); | |
179 | break; | |
180 | } | |
181 | } | |
182 | ||
b0c3e75d SS |
183 | static int fallback_init_cip(struct crypto_tfm *tfm) |
184 | { | |
185 | const char *name = tfm->__crt_alg->cra_name; | |
186 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
187 | ||
188 | sctx->fallback.cip = crypto_alloc_cipher(name, 0, | |
189 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
190 | ||
191 | if (IS_ERR(sctx->fallback.cip)) { | |
39f09392 JG |
192 | pr_err("Allocating AES fallback algorithm %s failed\n", |
193 | name); | |
b59cdcb3 | 194 | return PTR_ERR(sctx->fallback.cip); |
b0c3e75d SS |
195 | } |
196 | ||
197 | return 0; | |
198 | } | |
199 | ||
200 | static void fallback_exit_cip(struct crypto_tfm *tfm) | |
201 | { | |
202 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
203 | ||
204 | crypto_free_cipher(sctx->fallback.cip); | |
205 | sctx->fallback.cip = NULL; | |
206 | } | |
bf754ae8 JG |
207 | |
208 | static struct crypto_alg aes_alg = { | |
209 | .cra_name = "aes", | |
65b75c36 HX |
210 | .cra_driver_name = "aes-s390", |
211 | .cra_priority = CRYPT_S390_PRIORITY, | |
f67d1369 JG |
212 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | |
213 | CRYPTO_ALG_NEED_FALLBACK, | |
bf754ae8 JG |
214 | .cra_blocksize = AES_BLOCK_SIZE, |
215 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
216 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
217 | .cra_init = fallback_init_cip, |
218 | .cra_exit = fallback_exit_cip, | |
bf754ae8 JG |
219 | .cra_u = { |
220 | .cipher = { | |
221 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
222 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
223 | .cia_setkey = aes_set_key, | |
224 | .cia_encrypt = aes_encrypt, | |
225 | .cia_decrypt = aes_decrypt, | |
bf754ae8 JG |
226 | } |
227 | } | |
228 | }; | |
229 | ||
b0c3e75d SS |
230 | static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, |
231 | unsigned int len) | |
232 | { | |
233 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
234 | unsigned int ret; | |
235 | ||
236 | sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
237 | sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & | |
238 | CRYPTO_TFM_REQ_MASK); | |
239 | ||
240 | ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len); | |
241 | if (ret) { | |
242 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
243 | tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & | |
244 | CRYPTO_TFM_RES_MASK); | |
245 | } | |
246 | return ret; | |
247 | } | |
248 | ||
249 | static int fallback_blk_dec(struct blkcipher_desc *desc, | |
250 | struct scatterlist *dst, struct scatterlist *src, | |
251 | unsigned int nbytes) | |
252 | { | |
253 | unsigned int ret; | |
254 | struct crypto_blkcipher *tfm; | |
255 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
256 | ||
b0c3e75d SS |
257 | tfm = desc->tfm; |
258 | desc->tfm = sctx->fallback.blk; | |
259 | ||
2d74d405 | 260 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); |
b0c3e75d SS |
261 | |
262 | desc->tfm = tfm; | |
263 | return ret; | |
264 | } | |
265 | ||
266 | static int fallback_blk_enc(struct blkcipher_desc *desc, | |
267 | struct scatterlist *dst, struct scatterlist *src, | |
268 | unsigned int nbytes) | |
269 | { | |
270 | unsigned int ret; | |
271 | struct crypto_blkcipher *tfm; | |
272 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
273 | ||
b0c3e75d SS |
274 | tfm = desc->tfm; |
275 | desc->tfm = sctx->fallback.blk; | |
276 | ||
2d74d405 | 277 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); |
b0c3e75d SS |
278 | |
279 | desc->tfm = tfm; | |
280 | return ret; | |
281 | } | |
282 | ||
a9e62fad HX |
283 | static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
284 | unsigned int key_len) | |
285 | { | |
286 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
b0c3e75d SS |
287 | int ret; |
288 | ||
289 | ret = need_fallback(key_len); | |
290 | if (ret > 0) { | |
291 | sctx->key_len = key_len; | |
292 | return setkey_fallback_blk(tfm, in_key, key_len); | |
293 | } | |
a9e62fad HX |
294 | |
295 | switch (key_len) { | |
296 | case 16: | |
297 | sctx->enc = KM_AES_128_ENCRYPT; | |
298 | sctx->dec = KM_AES_128_DECRYPT; | |
299 | break; | |
300 | case 24: | |
301 | sctx->enc = KM_AES_192_ENCRYPT; | |
302 | sctx->dec = KM_AES_192_DECRYPT; | |
303 | break; | |
304 | case 32: | |
305 | sctx->enc = KM_AES_256_ENCRYPT; | |
306 | sctx->dec = KM_AES_256_DECRYPT; | |
307 | break; | |
308 | } | |
309 | ||
310 | return aes_set_key(tfm, in_key, key_len); | |
311 | } | |
312 | ||
313 | static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, | |
314 | struct blkcipher_walk *walk) | |
315 | { | |
316 | int ret = blkcipher_walk_virt(desc, walk); | |
317 | unsigned int nbytes; | |
318 | ||
319 | while ((nbytes = walk->nbytes)) { | |
320 | /* only use complete blocks */ | |
321 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); | |
322 | u8 *out = walk->dst.virt.addr; | |
323 | u8 *in = walk->src.virt.addr; | |
324 | ||
325 | ret = crypt_s390_km(func, param, out, in, n); | |
36eb2caa JG |
326 | if (ret < 0 || ret != n) |
327 | return -EIO; | |
a9e62fad HX |
328 | |
329 | nbytes &= AES_BLOCK_SIZE - 1; | |
330 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
331 | } | |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
336 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | |
337 | struct scatterlist *dst, struct scatterlist *src, | |
338 | unsigned int nbytes) | |
339 | { | |
340 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
341 | struct blkcipher_walk walk; | |
342 | ||
b0c3e75d SS |
343 | if (unlikely(need_fallback(sctx->key_len))) |
344 | return fallback_blk_enc(desc, dst, src, nbytes); | |
345 | ||
a9e62fad HX |
346 | blkcipher_walk_init(&walk, dst, src, nbytes); |
347 | return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); | |
348 | } | |
349 | ||
350 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |
351 | struct scatterlist *dst, struct scatterlist *src, | |
352 | unsigned int nbytes) | |
353 | { | |
354 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
355 | struct blkcipher_walk walk; | |
356 | ||
b0c3e75d SS |
357 | if (unlikely(need_fallback(sctx->key_len))) |
358 | return fallback_blk_dec(desc, dst, src, nbytes); | |
359 | ||
a9e62fad HX |
360 | blkcipher_walk_init(&walk, dst, src, nbytes); |
361 | return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); | |
362 | } | |
363 | ||
b0c3e75d SS |
364 | static int fallback_init_blk(struct crypto_tfm *tfm) |
365 | { | |
366 | const char *name = tfm->__crt_alg->cra_name; | |
367 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
368 | ||
369 | sctx->fallback.blk = crypto_alloc_blkcipher(name, 0, | |
370 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
371 | ||
372 | if (IS_ERR(sctx->fallback.blk)) { | |
39f09392 JG |
373 | pr_err("Allocating AES fallback algorithm %s failed\n", |
374 | name); | |
b0c3e75d SS |
375 | return PTR_ERR(sctx->fallback.blk); |
376 | } | |
377 | ||
378 | return 0; | |
379 | } | |
380 | ||
381 | static void fallback_exit_blk(struct crypto_tfm *tfm) | |
382 | { | |
383 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
384 | ||
385 | crypto_free_blkcipher(sctx->fallback.blk); | |
386 | sctx->fallback.blk = NULL; | |
387 | } | |
388 | ||
a9e62fad HX |
389 | static struct crypto_alg ecb_aes_alg = { |
390 | .cra_name = "ecb(aes)", | |
391 | .cra_driver_name = "ecb-aes-s390", | |
392 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | |
f67d1369 JG |
393 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
394 | CRYPTO_ALG_NEED_FALLBACK, | |
a9e62fad HX |
395 | .cra_blocksize = AES_BLOCK_SIZE, |
396 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
397 | .cra_type = &crypto_blkcipher_type, | |
398 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
399 | .cra_init = fallback_init_blk, |
400 | .cra_exit = fallback_exit_blk, | |
a9e62fad HX |
401 | .cra_u = { |
402 | .blkcipher = { | |
403 | .min_keysize = AES_MIN_KEY_SIZE, | |
404 | .max_keysize = AES_MAX_KEY_SIZE, | |
405 | .setkey = ecb_aes_set_key, | |
406 | .encrypt = ecb_aes_encrypt, | |
407 | .decrypt = ecb_aes_decrypt, | |
408 | } | |
409 | } | |
410 | }; | |
411 | ||
412 | static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
413 | unsigned int key_len) | |
414 | { | |
415 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
b0c3e75d SS |
416 | int ret; |
417 | ||
418 | ret = need_fallback(key_len); | |
419 | if (ret > 0) { | |
420 | sctx->key_len = key_len; | |
421 | return setkey_fallback_blk(tfm, in_key, key_len); | |
422 | } | |
a9e62fad HX |
423 | |
424 | switch (key_len) { | |
425 | case 16: | |
426 | sctx->enc = KMC_AES_128_ENCRYPT; | |
427 | sctx->dec = KMC_AES_128_DECRYPT; | |
428 | break; | |
429 | case 24: | |
430 | sctx->enc = KMC_AES_192_ENCRYPT; | |
431 | sctx->dec = KMC_AES_192_DECRYPT; | |
432 | break; | |
433 | case 32: | |
434 | sctx->enc = KMC_AES_256_ENCRYPT; | |
435 | sctx->dec = KMC_AES_256_DECRYPT; | |
436 | break; | |
437 | } | |
438 | ||
439 | return aes_set_key(tfm, in_key, key_len); | |
440 | } | |
441 | ||
f262f0f5 | 442 | static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, |
a9e62fad HX |
443 | struct blkcipher_walk *walk) |
444 | { | |
f262f0f5 | 445 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
a9e62fad HX |
446 | int ret = blkcipher_walk_virt(desc, walk); |
447 | unsigned int nbytes = walk->nbytes; | |
f262f0f5 HX |
448 | struct { |
449 | u8 iv[AES_BLOCK_SIZE]; | |
450 | u8 key[AES_MAX_KEY_SIZE]; | |
451 | } param; | |
a9e62fad HX |
452 | |
453 | if (!nbytes) | |
454 | goto out; | |
455 | ||
f262f0f5 HX |
456 | memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); |
457 | memcpy(param.key, sctx->key, sctx->key_len); | |
a9e62fad HX |
458 | do { |
459 | /* only use complete blocks */ | |
460 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); | |
461 | u8 *out = walk->dst.virt.addr; | |
462 | u8 *in = walk->src.virt.addr; | |
463 | ||
f262f0f5 | 464 | ret = crypt_s390_kmc(func, ¶m, out, in, n); |
36eb2caa JG |
465 | if (ret < 0 || ret != n) |
466 | return -EIO; | |
a9e62fad HX |
467 | |
468 | nbytes &= AES_BLOCK_SIZE - 1; | |
469 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
470 | } while ((nbytes = walk->nbytes)); | |
f262f0f5 | 471 | memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); |
a9e62fad HX |
472 | |
473 | out: | |
474 | return ret; | |
475 | } | |
476 | ||
477 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |
478 | struct scatterlist *dst, struct scatterlist *src, | |
479 | unsigned int nbytes) | |
480 | { | |
481 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
482 | struct blkcipher_walk walk; | |
483 | ||
b0c3e75d SS |
484 | if (unlikely(need_fallback(sctx->key_len))) |
485 | return fallback_blk_enc(desc, dst, src, nbytes); | |
486 | ||
a9e62fad | 487 | blkcipher_walk_init(&walk, dst, src, nbytes); |
f262f0f5 | 488 | return cbc_aes_crypt(desc, sctx->enc, &walk); |
a9e62fad HX |
489 | } |
490 | ||
491 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |
492 | struct scatterlist *dst, struct scatterlist *src, | |
493 | unsigned int nbytes) | |
494 | { | |
495 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
496 | struct blkcipher_walk walk; | |
497 | ||
b0c3e75d SS |
498 | if (unlikely(need_fallback(sctx->key_len))) |
499 | return fallback_blk_dec(desc, dst, src, nbytes); | |
500 | ||
a9e62fad | 501 | blkcipher_walk_init(&walk, dst, src, nbytes); |
f262f0f5 | 502 | return cbc_aes_crypt(desc, sctx->dec, &walk); |
a9e62fad HX |
503 | } |
504 | ||
505 | static struct crypto_alg cbc_aes_alg = { | |
506 | .cra_name = "cbc(aes)", | |
507 | .cra_driver_name = "cbc-aes-s390", | |
508 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | |
f67d1369 JG |
509 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
510 | CRYPTO_ALG_NEED_FALLBACK, | |
a9e62fad HX |
511 | .cra_blocksize = AES_BLOCK_SIZE, |
512 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
513 | .cra_type = &crypto_blkcipher_type, | |
514 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
515 | .cra_init = fallback_init_blk, |
516 | .cra_exit = fallback_exit_blk, | |
a9e62fad HX |
517 | .cra_u = { |
518 | .blkcipher = { | |
519 | .min_keysize = AES_MIN_KEY_SIZE, | |
520 | .max_keysize = AES_MAX_KEY_SIZE, | |
521 | .ivsize = AES_BLOCK_SIZE, | |
522 | .setkey = cbc_aes_set_key, | |
523 | .encrypt = cbc_aes_encrypt, | |
524 | .decrypt = cbc_aes_decrypt, | |
525 | } | |
526 | } | |
527 | }; | |
528 | ||
99d97222 GS |
529 | static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key, |
530 | unsigned int len) | |
531 | { | |
532 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
533 | unsigned int ret; | |
534 | ||
535 | xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
536 | xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags & | |
537 | CRYPTO_TFM_REQ_MASK); | |
538 | ||
539 | ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len); | |
540 | if (ret) { | |
541 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
542 | tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags & | |
543 | CRYPTO_TFM_RES_MASK); | |
544 | } | |
545 | return ret; | |
546 | } | |
547 | ||
548 | static int xts_fallback_decrypt(struct blkcipher_desc *desc, | |
549 | struct scatterlist *dst, struct scatterlist *src, | |
550 | unsigned int nbytes) | |
551 | { | |
552 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
553 | struct crypto_blkcipher *tfm; | |
554 | unsigned int ret; | |
555 | ||
556 | tfm = desc->tfm; | |
557 | desc->tfm = xts_ctx->fallback; | |
558 | ||
559 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); | |
560 | ||
561 | desc->tfm = tfm; | |
562 | return ret; | |
563 | } | |
564 | ||
565 | static int xts_fallback_encrypt(struct blkcipher_desc *desc, | |
566 | struct scatterlist *dst, struct scatterlist *src, | |
567 | unsigned int nbytes) | |
568 | { | |
569 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
570 | struct crypto_blkcipher *tfm; | |
571 | unsigned int ret; | |
572 | ||
573 | tfm = desc->tfm; | |
574 | desc->tfm = xts_ctx->fallback; | |
575 | ||
576 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); | |
577 | ||
578 | desc->tfm = tfm; | |
579 | return ret; | |
580 | } | |
581 | ||
582 | static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
583 | unsigned int key_len) | |
584 | { | |
585 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
586 | u32 *flags = &tfm->crt_flags; | |
587 | ||
588 | switch (key_len) { | |
589 | case 32: | |
590 | xts_ctx->enc = KM_XTS_128_ENCRYPT; | |
591 | xts_ctx->dec = KM_XTS_128_DECRYPT; | |
592 | memcpy(xts_ctx->key + 16, in_key, 16); | |
9dda2769 | 593 | memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16); |
99d97222 GS |
594 | break; |
595 | case 48: | |
596 | xts_ctx->enc = 0; | |
597 | xts_ctx->dec = 0; | |
598 | xts_fallback_setkey(tfm, in_key, key_len); | |
599 | break; | |
600 | case 64: | |
601 | xts_ctx->enc = KM_XTS_256_ENCRYPT; | |
602 | xts_ctx->dec = KM_XTS_256_DECRYPT; | |
603 | memcpy(xts_ctx->key, in_key, 32); | |
9dda2769 | 604 | memcpy(xts_ctx->pcc_key, in_key + 32, 32); |
99d97222 GS |
605 | break; |
606 | default: | |
607 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
608 | return -EINVAL; | |
609 | } | |
610 | xts_ctx->key_len = key_len; | |
611 | return 0; | |
612 | } | |
613 | ||
614 | static int xts_aes_crypt(struct blkcipher_desc *desc, long func, | |
615 | struct s390_xts_ctx *xts_ctx, | |
616 | struct blkcipher_walk *walk) | |
617 | { | |
618 | unsigned int offset = (xts_ctx->key_len >> 1) & 0x10; | |
619 | int ret = blkcipher_walk_virt(desc, walk); | |
620 | unsigned int nbytes = walk->nbytes; | |
621 | unsigned int n; | |
622 | u8 *in, *out; | |
9dda2769 GS |
623 | struct pcc_param pcc_param; |
624 | struct { | |
625 | u8 key[32]; | |
626 | u8 init[16]; | |
627 | } xts_param; | |
99d97222 GS |
628 | |
629 | if (!nbytes) | |
630 | goto out; | |
631 | ||
9dda2769 GS |
632 | memset(pcc_param.block, 0, sizeof(pcc_param.block)); |
633 | memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); | |
634 | memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); | |
635 | memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); | |
636 | memcpy(pcc_param.key, xts_ctx->pcc_key, 32); | |
637 | ret = crypt_s390_pcc(func, &pcc_param.key[offset]); | |
36eb2caa JG |
638 | if (ret < 0) |
639 | return -EIO; | |
99d97222 | 640 | |
9dda2769 GS |
641 | memcpy(xts_param.key, xts_ctx->key, 32); |
642 | memcpy(xts_param.init, pcc_param.xts, 16); | |
99d97222 GS |
643 | do { |
644 | /* only use complete blocks */ | |
645 | n = nbytes & ~(AES_BLOCK_SIZE - 1); | |
646 | out = walk->dst.virt.addr; | |
647 | in = walk->src.virt.addr; | |
648 | ||
9dda2769 | 649 | ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n); |
36eb2caa JG |
650 | if (ret < 0 || ret != n) |
651 | return -EIO; | |
99d97222 GS |
652 | |
653 | nbytes &= AES_BLOCK_SIZE - 1; | |
654 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
655 | } while ((nbytes = walk->nbytes)); | |
656 | out: | |
657 | return ret; | |
658 | } | |
659 | ||
660 | static int xts_aes_encrypt(struct blkcipher_desc *desc, | |
661 | struct scatterlist *dst, struct scatterlist *src, | |
662 | unsigned int nbytes) | |
663 | { | |
664 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
665 | struct blkcipher_walk walk; | |
666 | ||
667 | if (unlikely(xts_ctx->key_len == 48)) | |
668 | return xts_fallback_encrypt(desc, dst, src, nbytes); | |
669 | ||
670 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
671 | return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk); | |
672 | } | |
673 | ||
674 | static int xts_aes_decrypt(struct blkcipher_desc *desc, | |
675 | struct scatterlist *dst, struct scatterlist *src, | |
676 | unsigned int nbytes) | |
677 | { | |
678 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
679 | struct blkcipher_walk walk; | |
680 | ||
681 | if (unlikely(xts_ctx->key_len == 48)) | |
682 | return xts_fallback_decrypt(desc, dst, src, nbytes); | |
683 | ||
684 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
685 | return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk); | |
686 | } | |
687 | ||
688 | static int xts_fallback_init(struct crypto_tfm *tfm) | |
689 | { | |
690 | const char *name = tfm->__crt_alg->cra_name; | |
691 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
692 | ||
693 | xts_ctx->fallback = crypto_alloc_blkcipher(name, 0, | |
694 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
695 | ||
696 | if (IS_ERR(xts_ctx->fallback)) { | |
697 | pr_err("Allocating XTS fallback algorithm %s failed\n", | |
698 | name); | |
699 | return PTR_ERR(xts_ctx->fallback); | |
700 | } | |
701 | return 0; | |
702 | } | |
703 | ||
704 | static void xts_fallback_exit(struct crypto_tfm *tfm) | |
705 | { | |
706 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
707 | ||
708 | crypto_free_blkcipher(xts_ctx->fallback); | |
709 | xts_ctx->fallback = NULL; | |
710 | } | |
711 | ||
712 | static struct crypto_alg xts_aes_alg = { | |
713 | .cra_name = "xts(aes)", | |
714 | .cra_driver_name = "xts-aes-s390", | |
715 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | |
716 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | | |
717 | CRYPTO_ALG_NEED_FALLBACK, | |
718 | .cra_blocksize = AES_BLOCK_SIZE, | |
719 | .cra_ctxsize = sizeof(struct s390_xts_ctx), | |
720 | .cra_type = &crypto_blkcipher_type, | |
721 | .cra_module = THIS_MODULE, | |
99d97222 GS |
722 | .cra_init = xts_fallback_init, |
723 | .cra_exit = xts_fallback_exit, | |
724 | .cra_u = { | |
725 | .blkcipher = { | |
726 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | |
727 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | |
728 | .ivsize = AES_BLOCK_SIZE, | |
729 | .setkey = xts_aes_set_key, | |
730 | .encrypt = xts_aes_encrypt, | |
731 | .decrypt = xts_aes_decrypt, | |
732 | } | |
733 | } | |
734 | }; | |
735 | ||
4f57ba71 IT |
736 | static int xts_aes_alg_reg; |
737 | ||
0200f3ec GS |
738 | static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
739 | unsigned int key_len) | |
740 | { | |
741 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
742 | ||
743 | switch (key_len) { | |
744 | case 16: | |
745 | sctx->enc = KMCTR_AES_128_ENCRYPT; | |
746 | sctx->dec = KMCTR_AES_128_DECRYPT; | |
747 | break; | |
748 | case 24: | |
749 | sctx->enc = KMCTR_AES_192_ENCRYPT; | |
750 | sctx->dec = KMCTR_AES_192_DECRYPT; | |
751 | break; | |
752 | case 32: | |
753 | sctx->enc = KMCTR_AES_256_ENCRYPT; | |
754 | sctx->dec = KMCTR_AES_256_DECRYPT; | |
755 | break; | |
756 | } | |
757 | ||
758 | return aes_set_key(tfm, in_key, key_len); | |
759 | } | |
760 | ||
761 | static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, | |
762 | struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) | |
763 | { | |
764 | int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); | |
765 | unsigned int i, n, nbytes; | |
766 | u8 buf[AES_BLOCK_SIZE]; | |
767 | u8 *out, *in; | |
768 | ||
769 | if (!walk->nbytes) | |
770 | return ret; | |
771 | ||
772 | memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); | |
773 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { | |
774 | out = walk->dst.virt.addr; | |
775 | in = walk->src.virt.addr; | |
776 | while (nbytes >= AES_BLOCK_SIZE) { | |
777 | /* only use complete blocks, max. PAGE_SIZE */ | |
778 | n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : | |
779 | nbytes & ~(AES_BLOCK_SIZE - 1); | |
780 | for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { | |
781 | memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, | |
782 | AES_BLOCK_SIZE); | |
783 | crypto_inc(ctrblk + i, AES_BLOCK_SIZE); | |
784 | } | |
785 | ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); | |
36eb2caa JG |
786 | if (ret < 0 || ret != n) |
787 | return -EIO; | |
0200f3ec GS |
788 | if (n > AES_BLOCK_SIZE) |
789 | memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, | |
790 | AES_BLOCK_SIZE); | |
791 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | |
792 | out += n; | |
793 | in += n; | |
794 | nbytes -= n; | |
795 | } | |
796 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
797 | } | |
798 | /* | |
799 | * final block may be < AES_BLOCK_SIZE, copy only nbytes | |
800 | */ | |
801 | if (nbytes) { | |
802 | out = walk->dst.virt.addr; | |
803 | in = walk->src.virt.addr; | |
804 | ret = crypt_s390_kmctr(func, sctx->key, buf, in, | |
805 | AES_BLOCK_SIZE, ctrblk); | |
36eb2caa JG |
806 | if (ret < 0 || ret != AES_BLOCK_SIZE) |
807 | return -EIO; | |
0200f3ec GS |
808 | memcpy(out, buf, nbytes); |
809 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | |
810 | ret = blkcipher_walk_done(desc, walk, 0); | |
811 | } | |
812 | memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); | |
813 | return ret; | |
814 | } | |
815 | ||
816 | static int ctr_aes_encrypt(struct blkcipher_desc *desc, | |
817 | struct scatterlist *dst, struct scatterlist *src, | |
818 | unsigned int nbytes) | |
819 | { | |
820 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
821 | struct blkcipher_walk walk; | |
822 | ||
823 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
824 | return ctr_aes_crypt(desc, sctx->enc, sctx, &walk); | |
825 | } | |
826 | ||
827 | static int ctr_aes_decrypt(struct blkcipher_desc *desc, | |
828 | struct scatterlist *dst, struct scatterlist *src, | |
829 | unsigned int nbytes) | |
830 | { | |
831 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
832 | struct blkcipher_walk walk; | |
833 | ||
834 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
835 | return ctr_aes_crypt(desc, sctx->dec, sctx, &walk); | |
836 | } | |
837 | ||
838 | static struct crypto_alg ctr_aes_alg = { | |
839 | .cra_name = "ctr(aes)", | |
840 | .cra_driver_name = "ctr-aes-s390", | |
841 | .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, | |
842 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | |
843 | .cra_blocksize = 1, | |
844 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
845 | .cra_type = &crypto_blkcipher_type, | |
846 | .cra_module = THIS_MODULE, | |
0200f3ec GS |
847 | .cra_u = { |
848 | .blkcipher = { | |
849 | .min_keysize = AES_MIN_KEY_SIZE, | |
850 | .max_keysize = AES_MAX_KEY_SIZE, | |
851 | .ivsize = AES_BLOCK_SIZE, | |
852 | .setkey = ctr_aes_set_key, | |
853 | .encrypt = ctr_aes_encrypt, | |
854 | .decrypt = ctr_aes_decrypt, | |
855 | } | |
856 | } | |
857 | }; | |
858 | ||
4f57ba71 IT |
859 | static int ctr_aes_alg_reg; |
860 | ||
9f7819c1 | 861 | static int __init aes_s390_init(void) |
bf754ae8 JG |
862 | { |
863 | int ret; | |
864 | ||
1822bc90 | 865 | if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA)) |
86aa9fc2 | 866 | keylen_flag |= AES_KEYLEN_128; |
1822bc90 | 867 | if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA)) |
86aa9fc2 | 868 | keylen_flag |= AES_KEYLEN_192; |
1822bc90 | 869 | if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA)) |
86aa9fc2 JG |
870 | keylen_flag |= AES_KEYLEN_256; |
871 | ||
872 | if (!keylen_flag) | |
873 | return -EOPNOTSUPP; | |
bf754ae8 | 874 | |
86aa9fc2 | 875 | /* z9 109 and z9 BC/EC only support 128 bit key length */ |
b0c3e75d | 876 | if (keylen_flag == AES_KEYLEN_128) |
39f09392 JG |
877 | pr_info("AES hardware acceleration is only available for" |
878 | " 128-bit keys\n"); | |
bf754ae8 JG |
879 | |
880 | ret = crypto_register_alg(&aes_alg); | |
86aa9fc2 | 881 | if (ret) |
a9e62fad | 882 | goto aes_err; |
a9e62fad HX |
883 | |
884 | ret = crypto_register_alg(&ecb_aes_alg); | |
86aa9fc2 | 885 | if (ret) |
a9e62fad | 886 | goto ecb_aes_err; |
a9e62fad HX |
887 | |
888 | ret = crypto_register_alg(&cbc_aes_alg); | |
86aa9fc2 | 889 | if (ret) |
a9e62fad | 890 | goto cbc_aes_err; |
a9e62fad | 891 | |
99d97222 GS |
892 | if (crypt_s390_func_available(KM_XTS_128_ENCRYPT, |
893 | CRYPT_S390_MSA | CRYPT_S390_MSA4) && | |
894 | crypt_s390_func_available(KM_XTS_256_ENCRYPT, | |
895 | CRYPT_S390_MSA | CRYPT_S390_MSA4)) { | |
896 | ret = crypto_register_alg(&xts_aes_alg); | |
897 | if (ret) | |
898 | goto xts_aes_err; | |
4f57ba71 | 899 | xts_aes_alg_reg = 1; |
99d97222 GS |
900 | } |
901 | ||
0200f3ec GS |
902 | if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT, |
903 | CRYPT_S390_MSA | CRYPT_S390_MSA4) && | |
904 | crypt_s390_func_available(KMCTR_AES_192_ENCRYPT, | |
905 | CRYPT_S390_MSA | CRYPT_S390_MSA4) && | |
906 | crypt_s390_func_available(KMCTR_AES_256_ENCRYPT, | |
907 | CRYPT_S390_MSA | CRYPT_S390_MSA4)) { | |
908 | ctrblk = (u8 *) __get_free_page(GFP_KERNEL); | |
909 | if (!ctrblk) { | |
910 | ret = -ENOMEM; | |
911 | goto ctr_aes_err; | |
912 | } | |
913 | ret = crypto_register_alg(&ctr_aes_alg); | |
914 | if (ret) { | |
915 | free_page((unsigned long) ctrblk); | |
916 | goto ctr_aes_err; | |
917 | } | |
4f57ba71 | 918 | ctr_aes_alg_reg = 1; |
0200f3ec GS |
919 | } |
920 | ||
a9e62fad | 921 | out: |
bf754ae8 | 922 | return ret; |
a9e62fad | 923 | |
0200f3ec GS |
924 | ctr_aes_err: |
925 | crypto_unregister_alg(&xts_aes_alg); | |
99d97222 GS |
926 | xts_aes_err: |
927 | crypto_unregister_alg(&cbc_aes_alg); | |
a9e62fad HX |
928 | cbc_aes_err: |
929 | crypto_unregister_alg(&ecb_aes_alg); | |
930 | ecb_aes_err: | |
931 | crypto_unregister_alg(&aes_alg); | |
932 | aes_err: | |
933 | goto out; | |
bf754ae8 JG |
934 | } |
935 | ||
9f7819c1 | 936 | static void __exit aes_s390_fini(void) |
bf754ae8 | 937 | { |
4f57ba71 IT |
938 | if (ctr_aes_alg_reg) { |
939 | crypto_unregister_alg(&ctr_aes_alg); | |
940 | free_page((unsigned long) ctrblk); | |
941 | } | |
942 | if (xts_aes_alg_reg) | |
943 | crypto_unregister_alg(&xts_aes_alg); | |
a9e62fad HX |
944 | crypto_unregister_alg(&cbc_aes_alg); |
945 | crypto_unregister_alg(&ecb_aes_alg); | |
bf754ae8 JG |
946 | crypto_unregister_alg(&aes_alg); |
947 | } | |
948 | ||
9f7819c1 HC |
949 | module_init(aes_s390_init); |
950 | module_exit(aes_s390_fini); | |
bf754ae8 | 951 | |
a760a665 | 952 | MODULE_ALIAS("aes-all"); |
bf754ae8 JG |
953 | |
954 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); | |
955 | MODULE_LICENSE("GPL"); |