]>
Commit | Line | Data |
---|---|---|
bf754ae8 JG |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * s390 implementation of the AES Cipher Algorithm. | |
5 | * | |
6 | * s390 Version: | |
bf7fa038 | 7 | * Copyright IBM Corp. 2005, 2017 |
bf754ae8 | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
b0c3e75d | 9 | * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback |
bf7fa038 HF |
10 | * Patrick Steuer <patrick.steuer@de.ibm.com> |
11 | * Harald Freudenberger <freude@de.ibm.com> | |
bf754ae8 | 12 | * |
f8246af0 | 13 | * Derived from "crypto/aes_generic.c" |
bf754ae8 JG |
14 | * |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License as published by the Free | |
17 | * Software Foundation; either version 2 of the License, or (at your option) | |
18 | * any later version. | |
19 | * | |
20 | */ | |
21 | ||
39f09392 JG |
22 | #define KMSG_COMPONENT "aes_s390" |
23 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
24 | ||
89e12654 | 25 | #include <crypto/aes.h> |
a9e62fad | 26 | #include <crypto/algapi.h> |
bf7fa038 HF |
27 | #include <crypto/ghash.h> |
28 | #include <crypto/internal/aead.h> | |
64e26807 | 29 | #include <crypto/internal/skcipher.h> |
bf7fa038 | 30 | #include <crypto/scatterwalk.h> |
b0c3e75d | 31 | #include <linux/err.h> |
bf754ae8 | 32 | #include <linux/module.h> |
d05377c1 | 33 | #include <linux/cpufeature.h> |
bf754ae8 | 34 | #include <linux/init.h> |
0519e9ad | 35 | #include <linux/spinlock.h> |
a4f2779e | 36 | #include <linux/fips.h> |
bf7fa038 | 37 | #include <linux/string.h> |
49abc0d2 | 38 | #include <crypto/xts.h> |
c7d4d259 | 39 | #include <asm/cpacf.h> |
bf754ae8 | 40 | |
0200f3ec | 41 | static u8 *ctrblk; |
0519e9ad | 42 | static DEFINE_SPINLOCK(ctrblk_lock); |
69c0e360 | 43 | |
bf7fa038 HF |
44 | static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, |
45 | kma_functions; | |
bf754ae8 JG |
46 | |
47 | struct s390_aes_ctx { | |
bf754ae8 JG |
48 | u8 key[AES_MAX_KEY_SIZE]; |
49 | int key_len; | |
edc63a37 | 50 | unsigned long fc; |
b0c3e75d | 51 | union { |
64e26807 | 52 | struct crypto_skcipher *blk; |
b0c3e75d SS |
53 | struct crypto_cipher *cip; |
54 | } fallback; | |
bf754ae8 JG |
55 | }; |
56 | ||
99d97222 GS |
57 | struct s390_xts_ctx { |
58 | u8 key[32]; | |
9dda2769 | 59 | u8 pcc_key[32]; |
99d97222 | 60 | int key_len; |
edc63a37 | 61 | unsigned long fc; |
64e26807 | 62 | struct crypto_skcipher *fallback; |
99d97222 GS |
63 | }; |
64 | ||
bf7fa038 HF |
65 | struct gcm_sg_walk { |
66 | struct scatter_walk walk; | |
67 | unsigned int walk_bytes; | |
68 | u8 *walk_ptr; | |
69 | unsigned int walk_bytes_remain; | |
70 | u8 buf[AES_BLOCK_SIZE]; | |
71 | unsigned int buf_bytes; | |
72 | u8 *ptr; | |
73 | unsigned int nbytes; | |
74 | }; | |
75 | ||
b0c3e75d SS |
76 | static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, |
77 | unsigned int key_len) | |
78 | { | |
79 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
80 | int ret; | |
81 | ||
d7ac7690 RK |
82 | sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
83 | sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & | |
b0c3e75d SS |
84 | CRYPTO_TFM_REQ_MASK); |
85 | ||
86 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); | |
87 | if (ret) { | |
88 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
d7ac7690 | 89 | tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & |
b0c3e75d SS |
90 | CRYPTO_TFM_RES_MASK); |
91 | } | |
92 | return ret; | |
93 | } | |
94 | ||
95 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
96 | unsigned int key_len) | |
97 | { | |
98 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
69c0e360 | 99 | unsigned long fc; |
b0c3e75d | 100 | |
69c0e360 MS |
101 | /* Pick the correct function code based on the key length */ |
102 | fc = (key_len == 16) ? CPACF_KM_AES_128 : | |
103 | (key_len == 24) ? CPACF_KM_AES_192 : | |
104 | (key_len == 32) ? CPACF_KM_AES_256 : 0; | |
bf754ae8 | 105 | |
69c0e360 MS |
106 | /* Check if the function code is available */ |
107 | sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; | |
108 | if (!sctx->fc) | |
109 | return setkey_fallback_cip(tfm, in_key, key_len); | |
b0c3e75d | 110 | |
69c0e360 MS |
111 | sctx->key_len = key_len; |
112 | memcpy(sctx->key, in_key, key_len); | |
113 | return 0; | |
bf754ae8 JG |
114 | } |
115 | ||
6c2bb98b | 116 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
bf754ae8 | 117 | { |
e6a67ad0 | 118 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
bf754ae8 | 119 | |
69c0e360 | 120 | if (unlikely(!sctx->fc)) { |
b0c3e75d SS |
121 | crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); |
122 | return; | |
123 | } | |
69c0e360 | 124 | cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); |
bf754ae8 JG |
125 | } |
126 | ||
6c2bb98b | 127 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
bf754ae8 | 128 | { |
e6a67ad0 | 129 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
bf754ae8 | 130 | |
69c0e360 | 131 | if (unlikely(!sctx->fc)) { |
b0c3e75d SS |
132 | crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); |
133 | return; | |
134 | } | |
69c0e360 MS |
135 | cpacf_km(sctx->fc | CPACF_DECRYPT, |
136 | &sctx->key, out, in, AES_BLOCK_SIZE); | |
bf754ae8 JG |
137 | } |
138 | ||
b0c3e75d SS |
139 | static int fallback_init_cip(struct crypto_tfm *tfm) |
140 | { | |
141 | const char *name = tfm->__crt_alg->cra_name; | |
142 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
143 | ||
144 | sctx->fallback.cip = crypto_alloc_cipher(name, 0, | |
145 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
146 | ||
147 | if (IS_ERR(sctx->fallback.cip)) { | |
39f09392 JG |
148 | pr_err("Allocating AES fallback algorithm %s failed\n", |
149 | name); | |
b59cdcb3 | 150 | return PTR_ERR(sctx->fallback.cip); |
b0c3e75d SS |
151 | } |
152 | ||
153 | return 0; | |
154 | } | |
155 | ||
156 | static void fallback_exit_cip(struct crypto_tfm *tfm) | |
157 | { | |
158 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
159 | ||
160 | crypto_free_cipher(sctx->fallback.cip); | |
161 | sctx->fallback.cip = NULL; | |
162 | } | |
bf754ae8 JG |
163 | |
164 | static struct crypto_alg aes_alg = { | |
165 | .cra_name = "aes", | |
65b75c36 | 166 | .cra_driver_name = "aes-s390", |
c7d4d259 | 167 | .cra_priority = 300, |
f67d1369 JG |
168 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | |
169 | CRYPTO_ALG_NEED_FALLBACK, | |
bf754ae8 JG |
170 | .cra_blocksize = AES_BLOCK_SIZE, |
171 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
172 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
173 | .cra_init = fallback_init_cip, |
174 | .cra_exit = fallback_exit_cip, | |
bf754ae8 JG |
175 | .cra_u = { |
176 | .cipher = { | |
177 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
178 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
179 | .cia_setkey = aes_set_key, | |
180 | .cia_encrypt = aes_encrypt, | |
181 | .cia_decrypt = aes_decrypt, | |
bf754ae8 JG |
182 | } |
183 | } | |
184 | }; | |
185 | ||
b0c3e75d SS |
186 | static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, |
187 | unsigned int len) | |
188 | { | |
189 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
190 | unsigned int ret; | |
191 | ||
64e26807 HX |
192 | crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK); |
193 | crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags & | |
194 | CRYPTO_TFM_REQ_MASK); | |
195 | ||
196 | ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len); | |
197 | ||
198 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
199 | tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) & | |
200 | CRYPTO_TFM_RES_MASK; | |
b0c3e75d | 201 | |
b0c3e75d SS |
202 | return ret; |
203 | } | |
204 | ||
205 | static int fallback_blk_dec(struct blkcipher_desc *desc, | |
206 | struct scatterlist *dst, struct scatterlist *src, | |
207 | unsigned int nbytes) | |
208 | { | |
209 | unsigned int ret; | |
64e26807 HX |
210 | struct crypto_blkcipher *tfm = desc->tfm; |
211 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); | |
212 | SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); | |
b0c3e75d | 213 | |
64e26807 HX |
214 | skcipher_request_set_tfm(req, sctx->fallback.blk); |
215 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | |
216 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | |
b0c3e75d | 217 | |
64e26807 | 218 | ret = crypto_skcipher_decrypt(req); |
b0c3e75d | 219 | |
64e26807 | 220 | skcipher_request_zero(req); |
b0c3e75d SS |
221 | return ret; |
222 | } | |
223 | ||
224 | static int fallback_blk_enc(struct blkcipher_desc *desc, | |
225 | struct scatterlist *dst, struct scatterlist *src, | |
226 | unsigned int nbytes) | |
227 | { | |
228 | unsigned int ret; | |
64e26807 HX |
229 | struct crypto_blkcipher *tfm = desc->tfm; |
230 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); | |
231 | SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); | |
b0c3e75d | 232 | |
64e26807 HX |
233 | skcipher_request_set_tfm(req, sctx->fallback.blk); |
234 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | |
235 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | |
b0c3e75d | 236 | |
64e26807 | 237 | ret = crypto_skcipher_encrypt(req); |
b0c3e75d SS |
238 | return ret; |
239 | } | |
240 | ||
a9e62fad HX |
241 | static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
242 | unsigned int key_len) | |
243 | { | |
244 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
69c0e360 | 245 | unsigned long fc; |
b0c3e75d | 246 | |
69c0e360 MS |
247 | /* Pick the correct function code based on the key length */ |
248 | fc = (key_len == 16) ? CPACF_KM_AES_128 : | |
249 | (key_len == 24) ? CPACF_KM_AES_192 : | |
250 | (key_len == 32) ? CPACF_KM_AES_256 : 0; | |
a9e62fad | 251 | |
69c0e360 MS |
252 | /* Check if the function code is available */ |
253 | sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; | |
254 | if (!sctx->fc) | |
255 | return setkey_fallback_blk(tfm, in_key, key_len); | |
a9e62fad | 256 | |
69c0e360 MS |
257 | sctx->key_len = key_len; |
258 | memcpy(sctx->key, in_key, key_len); | |
259 | return 0; | |
a9e62fad HX |
260 | } |
261 | ||
7bac4f5b | 262 | static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
a9e62fad HX |
263 | struct blkcipher_walk *walk) |
264 | { | |
7bac4f5b MS |
265 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
266 | unsigned int nbytes, n; | |
267 | int ret; | |
a9e62fad | 268 | |
7bac4f5b MS |
269 | ret = blkcipher_walk_virt(desc, walk); |
270 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { | |
a9e62fad | 271 | /* only use complete blocks */ |
7bac4f5b MS |
272 | n = nbytes & ~(AES_BLOCK_SIZE - 1); |
273 | cpacf_km(sctx->fc | modifier, sctx->key, | |
274 | walk->dst.virt.addr, walk->src.virt.addr, n); | |
275 | ret = blkcipher_walk_done(desc, walk, nbytes - n); | |
a9e62fad HX |
276 | } |
277 | ||
278 | return ret; | |
279 | } | |
280 | ||
281 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | |
282 | struct scatterlist *dst, struct scatterlist *src, | |
283 | unsigned int nbytes) | |
284 | { | |
285 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
286 | struct blkcipher_walk walk; | |
287 | ||
69c0e360 | 288 | if (unlikely(!sctx->fc)) |
b0c3e75d SS |
289 | return fallback_blk_enc(desc, dst, src, nbytes); |
290 | ||
a9e62fad | 291 | blkcipher_walk_init(&walk, dst, src, nbytes); |
7bac4f5b | 292 | return ecb_aes_crypt(desc, 0, &walk); |
a9e62fad HX |
293 | } |
294 | ||
295 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |
296 | struct scatterlist *dst, struct scatterlist *src, | |
297 | unsigned int nbytes) | |
298 | { | |
299 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
300 | struct blkcipher_walk walk; | |
301 | ||
69c0e360 | 302 | if (unlikely(!sctx->fc)) |
b0c3e75d SS |
303 | return fallback_blk_dec(desc, dst, src, nbytes); |
304 | ||
a9e62fad | 305 | blkcipher_walk_init(&walk, dst, src, nbytes); |
7bac4f5b | 306 | return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk); |
a9e62fad HX |
307 | } |
308 | ||
b0c3e75d SS |
309 | static int fallback_init_blk(struct crypto_tfm *tfm) |
310 | { | |
311 | const char *name = tfm->__crt_alg->cra_name; | |
312 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
313 | ||
64e26807 HX |
314 | sctx->fallback.blk = crypto_alloc_skcipher(name, 0, |
315 | CRYPTO_ALG_ASYNC | | |
316 | CRYPTO_ALG_NEED_FALLBACK); | |
b0c3e75d SS |
317 | |
318 | if (IS_ERR(sctx->fallback.blk)) { | |
39f09392 JG |
319 | pr_err("Allocating AES fallback algorithm %s failed\n", |
320 | name); | |
b0c3e75d SS |
321 | return PTR_ERR(sctx->fallback.blk); |
322 | } | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
327 | static void fallback_exit_blk(struct crypto_tfm *tfm) | |
328 | { | |
329 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
330 | ||
64e26807 | 331 | crypto_free_skcipher(sctx->fallback.blk); |
b0c3e75d SS |
332 | } |
333 | ||
a9e62fad HX |
334 | static struct crypto_alg ecb_aes_alg = { |
335 | .cra_name = "ecb(aes)", | |
336 | .cra_driver_name = "ecb-aes-s390", | |
c7d4d259 | 337 | .cra_priority = 400, /* combo: aes + ecb */ |
f67d1369 JG |
338 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
339 | CRYPTO_ALG_NEED_FALLBACK, | |
a9e62fad HX |
340 | .cra_blocksize = AES_BLOCK_SIZE, |
341 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
342 | .cra_type = &crypto_blkcipher_type, | |
343 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
344 | .cra_init = fallback_init_blk, |
345 | .cra_exit = fallback_exit_blk, | |
a9e62fad HX |
346 | .cra_u = { |
347 | .blkcipher = { | |
348 | .min_keysize = AES_MIN_KEY_SIZE, | |
349 | .max_keysize = AES_MAX_KEY_SIZE, | |
350 | .setkey = ecb_aes_set_key, | |
351 | .encrypt = ecb_aes_encrypt, | |
352 | .decrypt = ecb_aes_decrypt, | |
353 | } | |
354 | } | |
355 | }; | |
356 | ||
357 | static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
358 | unsigned int key_len) | |
359 | { | |
360 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
69c0e360 | 361 | unsigned long fc; |
b0c3e75d | 362 | |
69c0e360 MS |
363 | /* Pick the correct function code based on the key length */ |
364 | fc = (key_len == 16) ? CPACF_KMC_AES_128 : | |
365 | (key_len == 24) ? CPACF_KMC_AES_192 : | |
366 | (key_len == 32) ? CPACF_KMC_AES_256 : 0; | |
a9e62fad | 367 | |
69c0e360 MS |
368 | /* Check if the function code is available */ |
369 | sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; | |
370 | if (!sctx->fc) | |
371 | return setkey_fallback_blk(tfm, in_key, key_len); | |
a9e62fad | 372 | |
69c0e360 MS |
373 | sctx->key_len = key_len; |
374 | memcpy(sctx->key, in_key, key_len); | |
375 | return 0; | |
a9e62fad HX |
376 | } |
377 | ||
7bac4f5b | 378 | static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
a9e62fad HX |
379 | struct blkcipher_walk *walk) |
380 | { | |
f262f0f5 | 381 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
7bac4f5b MS |
382 | unsigned int nbytes, n; |
383 | int ret; | |
f262f0f5 HX |
384 | struct { |
385 | u8 iv[AES_BLOCK_SIZE]; | |
386 | u8 key[AES_MAX_KEY_SIZE]; | |
387 | } param; | |
a9e62fad | 388 | |
7bac4f5b | 389 | ret = blkcipher_walk_virt(desc, walk); |
f262f0f5 HX |
390 | memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); |
391 | memcpy(param.key, sctx->key, sctx->key_len); | |
7bac4f5b | 392 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { |
a9e62fad | 393 | /* only use complete blocks */ |
7bac4f5b MS |
394 | n = nbytes & ~(AES_BLOCK_SIZE - 1); |
395 | cpacf_kmc(sctx->fc | modifier, ¶m, | |
396 | walk->dst.virt.addr, walk->src.virt.addr, n); | |
397 | ret = blkcipher_walk_done(desc, walk, nbytes - n); | |
398 | } | |
f262f0f5 | 399 | memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); |
a9e62fad HX |
400 | return ret; |
401 | } | |
402 | ||
403 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |
404 | struct scatterlist *dst, struct scatterlist *src, | |
405 | unsigned int nbytes) | |
406 | { | |
407 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
408 | struct blkcipher_walk walk; | |
409 | ||
69c0e360 | 410 | if (unlikely(!sctx->fc)) |
b0c3e75d SS |
411 | return fallback_blk_enc(desc, dst, src, nbytes); |
412 | ||
a9e62fad | 413 | blkcipher_walk_init(&walk, dst, src, nbytes); |
7bac4f5b | 414 | return cbc_aes_crypt(desc, 0, &walk); |
a9e62fad HX |
415 | } |
416 | ||
417 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |
418 | struct scatterlist *dst, struct scatterlist *src, | |
419 | unsigned int nbytes) | |
420 | { | |
421 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
422 | struct blkcipher_walk walk; | |
423 | ||
69c0e360 | 424 | if (unlikely(!sctx->fc)) |
b0c3e75d SS |
425 | return fallback_blk_dec(desc, dst, src, nbytes); |
426 | ||
a9e62fad | 427 | blkcipher_walk_init(&walk, dst, src, nbytes); |
7bac4f5b | 428 | return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk); |
a9e62fad HX |
429 | } |
430 | ||
431 | static struct crypto_alg cbc_aes_alg = { | |
432 | .cra_name = "cbc(aes)", | |
433 | .cra_driver_name = "cbc-aes-s390", | |
c7d4d259 | 434 | .cra_priority = 400, /* combo: aes + cbc */ |
f67d1369 JG |
435 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
436 | CRYPTO_ALG_NEED_FALLBACK, | |
a9e62fad HX |
437 | .cra_blocksize = AES_BLOCK_SIZE, |
438 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
439 | .cra_type = &crypto_blkcipher_type, | |
440 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
441 | .cra_init = fallback_init_blk, |
442 | .cra_exit = fallback_exit_blk, | |
a9e62fad HX |
443 | .cra_u = { |
444 | .blkcipher = { | |
445 | .min_keysize = AES_MIN_KEY_SIZE, | |
446 | .max_keysize = AES_MAX_KEY_SIZE, | |
447 | .ivsize = AES_BLOCK_SIZE, | |
448 | .setkey = cbc_aes_set_key, | |
449 | .encrypt = cbc_aes_encrypt, | |
450 | .decrypt = cbc_aes_decrypt, | |
451 | } | |
452 | } | |
453 | }; | |
454 | ||
99d97222 GS |
455 | static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key, |
456 | unsigned int len) | |
457 | { | |
458 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
459 | unsigned int ret; | |
460 | ||
64e26807 HX |
461 | crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); |
462 | crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags & | |
463 | CRYPTO_TFM_REQ_MASK); | |
464 | ||
465 | ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len); | |
466 | ||
467 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
468 | tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) & | |
469 | CRYPTO_TFM_RES_MASK; | |
99d97222 | 470 | |
99d97222 GS |
471 | return ret; |
472 | } | |
473 | ||
474 | static int xts_fallback_decrypt(struct blkcipher_desc *desc, | |
475 | struct scatterlist *dst, struct scatterlist *src, | |
476 | unsigned int nbytes) | |
477 | { | |
64e26807 HX |
478 | struct crypto_blkcipher *tfm = desc->tfm; |
479 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); | |
480 | SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); | |
99d97222 GS |
481 | unsigned int ret; |
482 | ||
64e26807 HX |
483 | skcipher_request_set_tfm(req, xts_ctx->fallback); |
484 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | |
485 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | |
99d97222 | 486 | |
64e26807 | 487 | ret = crypto_skcipher_decrypt(req); |
99d97222 | 488 | |
64e26807 | 489 | skcipher_request_zero(req); |
99d97222 GS |
490 | return ret; |
491 | } | |
492 | ||
493 | static int xts_fallback_encrypt(struct blkcipher_desc *desc, | |
494 | struct scatterlist *dst, struct scatterlist *src, | |
495 | unsigned int nbytes) | |
496 | { | |
64e26807 HX |
497 | struct crypto_blkcipher *tfm = desc->tfm; |
498 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); | |
499 | SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); | |
99d97222 GS |
500 | unsigned int ret; |
501 | ||
64e26807 HX |
502 | skcipher_request_set_tfm(req, xts_ctx->fallback); |
503 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | |
504 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | |
99d97222 | 505 | |
64e26807 | 506 | ret = crypto_skcipher_encrypt(req); |
99d97222 | 507 | |
64e26807 | 508 | skcipher_request_zero(req); |
99d97222 GS |
509 | return ret; |
510 | } | |
511 | ||
512 | static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
513 | unsigned int key_len) | |
514 | { | |
515 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
69c0e360 | 516 | unsigned long fc; |
28856a9e SM |
517 | int err; |
518 | ||
519 | err = xts_check_key(tfm, in_key, key_len); | |
520 | if (err) | |
521 | return err; | |
99d97222 | 522 | |
a4f2779e HF |
523 | /* In fips mode only 128 bit or 256 bit keys are valid */ |
524 | if (fips_enabled && key_len != 32 && key_len != 64) { | |
525 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
526 | return -EINVAL; | |
527 | } | |
528 | ||
69c0e360 MS |
529 | /* Pick the correct function code based on the key length */ |
530 | fc = (key_len == 32) ? CPACF_KM_XTS_128 : | |
531 | (key_len == 64) ? CPACF_KM_XTS_256 : 0; | |
532 | ||
533 | /* Check if the function code is available */ | |
534 | xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; | |
535 | if (!xts_ctx->fc) | |
536 | return xts_fallback_setkey(tfm, in_key, key_len); | |
537 | ||
538 | /* Split the XTS key into the two subkeys */ | |
539 | key_len = key_len / 2; | |
99d97222 | 540 | xts_ctx->key_len = key_len; |
69c0e360 MS |
541 | memcpy(xts_ctx->key, in_key, key_len); |
542 | memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); | |
99d97222 GS |
543 | return 0; |
544 | } | |
545 | ||
7bac4f5b | 546 | static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
99d97222 GS |
547 | struct blkcipher_walk *walk) |
548 | { | |
7bac4f5b MS |
549 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); |
550 | unsigned int offset, nbytes, n; | |
551 | int ret; | |
552 | struct { | |
553 | u8 key[32]; | |
554 | u8 tweak[16]; | |
555 | u8 block[16]; | |
556 | u8 bit[16]; | |
557 | u8 xts[16]; | |
558 | } pcc_param; | |
9dda2769 GS |
559 | struct { |
560 | u8 key[32]; | |
561 | u8 init[16]; | |
562 | } xts_param; | |
99d97222 | 563 | |
7bac4f5b MS |
564 | ret = blkcipher_walk_virt(desc, walk); |
565 | offset = xts_ctx->key_len & 0x10; | |
9dda2769 GS |
566 | memset(pcc_param.block, 0, sizeof(pcc_param.block)); |
567 | memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); | |
568 | memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); | |
569 | memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); | |
69c0e360 | 570 | memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); |
7bac4f5b | 571 | cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); |
99d97222 | 572 | |
69c0e360 | 573 | memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); |
9dda2769 | 574 | memcpy(xts_param.init, pcc_param.xts, 16); |
7bac4f5b MS |
575 | |
576 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { | |
99d97222 GS |
577 | /* only use complete blocks */ |
578 | n = nbytes & ~(AES_BLOCK_SIZE - 1); | |
7bac4f5b MS |
579 | cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, |
580 | walk->dst.virt.addr, walk->src.virt.addr, n); | |
581 | ret = blkcipher_walk_done(desc, walk, nbytes - n); | |
582 | } | |
99d97222 GS |
583 | return ret; |
584 | } | |
585 | ||
586 | static int xts_aes_encrypt(struct blkcipher_desc *desc, | |
587 | struct scatterlist *dst, struct scatterlist *src, | |
588 | unsigned int nbytes) | |
589 | { | |
590 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
591 | struct blkcipher_walk walk; | |
592 | ||
69c0e360 | 593 | if (unlikely(!xts_ctx->fc)) |
99d97222 GS |
594 | return xts_fallback_encrypt(desc, dst, src, nbytes); |
595 | ||
596 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
7bac4f5b | 597 | return xts_aes_crypt(desc, 0, &walk); |
99d97222 GS |
598 | } |
599 | ||
600 | static int xts_aes_decrypt(struct blkcipher_desc *desc, | |
601 | struct scatterlist *dst, struct scatterlist *src, | |
602 | unsigned int nbytes) | |
603 | { | |
604 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
605 | struct blkcipher_walk walk; | |
606 | ||
69c0e360 | 607 | if (unlikely(!xts_ctx->fc)) |
99d97222 GS |
608 | return xts_fallback_decrypt(desc, dst, src, nbytes); |
609 | ||
610 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
7bac4f5b | 611 | return xts_aes_crypt(desc, CPACF_DECRYPT, &walk); |
99d97222 GS |
612 | } |
613 | ||
614 | static int xts_fallback_init(struct crypto_tfm *tfm) | |
615 | { | |
616 | const char *name = tfm->__crt_alg->cra_name; | |
617 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
618 | ||
64e26807 HX |
619 | xts_ctx->fallback = crypto_alloc_skcipher(name, 0, |
620 | CRYPTO_ALG_ASYNC | | |
621 | CRYPTO_ALG_NEED_FALLBACK); | |
99d97222 GS |
622 | |
623 | if (IS_ERR(xts_ctx->fallback)) { | |
624 | pr_err("Allocating XTS fallback algorithm %s failed\n", | |
625 | name); | |
626 | return PTR_ERR(xts_ctx->fallback); | |
627 | } | |
628 | return 0; | |
629 | } | |
630 | ||
631 | static void xts_fallback_exit(struct crypto_tfm *tfm) | |
632 | { | |
633 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
634 | ||
64e26807 | 635 | crypto_free_skcipher(xts_ctx->fallback); |
99d97222 GS |
636 | } |
637 | ||
638 | static struct crypto_alg xts_aes_alg = { | |
639 | .cra_name = "xts(aes)", | |
640 | .cra_driver_name = "xts-aes-s390", | |
c7d4d259 | 641 | .cra_priority = 400, /* combo: aes + xts */ |
99d97222 GS |
642 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
643 | CRYPTO_ALG_NEED_FALLBACK, | |
644 | .cra_blocksize = AES_BLOCK_SIZE, | |
645 | .cra_ctxsize = sizeof(struct s390_xts_ctx), | |
646 | .cra_type = &crypto_blkcipher_type, | |
647 | .cra_module = THIS_MODULE, | |
99d97222 GS |
648 | .cra_init = xts_fallback_init, |
649 | .cra_exit = xts_fallback_exit, | |
650 | .cra_u = { | |
651 | .blkcipher = { | |
652 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | |
653 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | |
654 | .ivsize = AES_BLOCK_SIZE, | |
655 | .setkey = xts_aes_set_key, | |
656 | .encrypt = xts_aes_encrypt, | |
657 | .decrypt = xts_aes_decrypt, | |
658 | } | |
659 | } | |
660 | }; | |
661 | ||
0200f3ec GS |
662 | static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
663 | unsigned int key_len) | |
664 | { | |
665 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
69c0e360 | 666 | unsigned long fc; |
0200f3ec | 667 | |
69c0e360 MS |
668 | /* Pick the correct function code based on the key length */ |
669 | fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : | |
670 | (key_len == 24) ? CPACF_KMCTR_AES_192 : | |
671 | (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; | |
672 | ||
673 | /* Check if the function code is available */ | |
674 | sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; | |
675 | if (!sctx->fc) | |
676 | return setkey_fallback_blk(tfm, in_key, key_len); | |
0200f3ec | 677 | |
69c0e360 MS |
678 | sctx->key_len = key_len; |
679 | memcpy(sctx->key, in_key, key_len); | |
680 | return 0; | |
0200f3ec GS |
681 | } |
682 | ||
7bac4f5b | 683 | static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) |
0519e9ad HF |
684 | { |
685 | unsigned int i, n; | |
686 | ||
687 | /* only use complete blocks, max. PAGE_SIZE */ | |
7bac4f5b | 688 | memcpy(ctrptr, iv, AES_BLOCK_SIZE); |
0519e9ad | 689 | n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); |
7bac4f5b MS |
690 | for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { |
691 | memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); | |
692 | crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); | |
693 | ctrptr += AES_BLOCK_SIZE; | |
0519e9ad HF |
694 | } |
695 | return n; | |
696 | } | |
697 | ||
7bac4f5b MS |
698 | static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, |
699 | struct blkcipher_walk *walk) | |
0200f3ec | 700 | { |
7bac4f5b MS |
701 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
702 | u8 buf[AES_BLOCK_SIZE], *ctrptr; | |
0519e9ad | 703 | unsigned int n, nbytes; |
7bac4f5b | 704 | int ret, locked; |
0200f3ec | 705 | |
7bac4f5b | 706 | locked = spin_trylock(&ctrblk_lock); |
0519e9ad | 707 | |
7bac4f5b | 708 | ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); |
0200f3ec | 709 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { |
7bac4f5b MS |
710 | n = AES_BLOCK_SIZE; |
711 | if (nbytes >= 2*AES_BLOCK_SIZE && locked) | |
712 | n = __ctrblk_init(ctrblk, walk->iv, nbytes); | |
713 | ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; | |
714 | cpacf_kmctr(sctx->fc | modifier, sctx->key, | |
715 | walk->dst.virt.addr, walk->src.virt.addr, | |
716 | n, ctrptr); | |
717 | if (ctrptr == ctrblk) | |
718 | memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE, | |
719 | AES_BLOCK_SIZE); | |
720 | crypto_inc(walk->iv, AES_BLOCK_SIZE); | |
721 | ret = blkcipher_walk_done(desc, walk, nbytes - n); | |
0200f3ec | 722 | } |
7bac4f5b | 723 | if (locked) |
0519e9ad | 724 | spin_unlock(&ctrblk_lock); |
0200f3ec GS |
725 | /* |
726 | * final block may be < AES_BLOCK_SIZE, copy only nbytes | |
727 | */ | |
728 | if (nbytes) { | |
7bac4f5b MS |
729 | cpacf_kmctr(sctx->fc | modifier, sctx->key, |
730 | buf, walk->src.virt.addr, | |
731 | AES_BLOCK_SIZE, walk->iv); | |
732 | memcpy(walk->dst.virt.addr, buf, nbytes); | |
733 | crypto_inc(walk->iv, AES_BLOCK_SIZE); | |
0200f3ec GS |
734 | ret = blkcipher_walk_done(desc, walk, 0); |
735 | } | |
0519e9ad | 736 | |
0200f3ec GS |
737 | return ret; |
738 | } | |
739 | ||
740 | static int ctr_aes_encrypt(struct blkcipher_desc *desc, | |
741 | struct scatterlist *dst, struct scatterlist *src, | |
742 | unsigned int nbytes) | |
743 | { | |
744 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
745 | struct blkcipher_walk walk; | |
746 | ||
69c0e360 MS |
747 | if (unlikely(!sctx->fc)) |
748 | return fallback_blk_enc(desc, dst, src, nbytes); | |
749 | ||
0200f3ec | 750 | blkcipher_walk_init(&walk, dst, src, nbytes); |
7bac4f5b | 751 | return ctr_aes_crypt(desc, 0, &walk); |
0200f3ec GS |
752 | } |
753 | ||
754 | static int ctr_aes_decrypt(struct blkcipher_desc *desc, | |
755 | struct scatterlist *dst, struct scatterlist *src, | |
756 | unsigned int nbytes) | |
757 | { | |
758 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
759 | struct blkcipher_walk walk; | |
760 | ||
69c0e360 MS |
761 | if (unlikely(!sctx->fc)) |
762 | return fallback_blk_dec(desc, dst, src, nbytes); | |
763 | ||
0200f3ec | 764 | blkcipher_walk_init(&walk, dst, src, nbytes); |
7bac4f5b | 765 | return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk); |
0200f3ec GS |
766 | } |
767 | ||
768 | static struct crypto_alg ctr_aes_alg = { | |
769 | .cra_name = "ctr(aes)", | |
770 | .cra_driver_name = "ctr-aes-s390", | |
c7d4d259 | 771 | .cra_priority = 400, /* combo: aes + ctr */ |
69c0e360 MS |
772 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
773 | CRYPTO_ALG_NEED_FALLBACK, | |
0200f3ec GS |
774 | .cra_blocksize = 1, |
775 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
776 | .cra_type = &crypto_blkcipher_type, | |
777 | .cra_module = THIS_MODULE, | |
69c0e360 MS |
778 | .cra_init = fallback_init_blk, |
779 | .cra_exit = fallback_exit_blk, | |
0200f3ec GS |
780 | .cra_u = { |
781 | .blkcipher = { | |
782 | .min_keysize = AES_MIN_KEY_SIZE, | |
783 | .max_keysize = AES_MAX_KEY_SIZE, | |
784 | .ivsize = AES_BLOCK_SIZE, | |
785 | .setkey = ctr_aes_set_key, | |
786 | .encrypt = ctr_aes_encrypt, | |
787 | .decrypt = ctr_aes_decrypt, | |
788 | } | |
789 | } | |
790 | }; | |
791 | ||
bf7fa038 HF |
792 | static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, |
793 | unsigned int keylen) | |
794 | { | |
795 | struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); | |
796 | ||
797 | switch (keylen) { | |
798 | case AES_KEYSIZE_128: | |
799 | ctx->fc = CPACF_KMA_GCM_AES_128; | |
800 | break; | |
801 | case AES_KEYSIZE_192: | |
802 | ctx->fc = CPACF_KMA_GCM_AES_192; | |
803 | break; | |
804 | case AES_KEYSIZE_256: | |
805 | ctx->fc = CPACF_KMA_GCM_AES_256; | |
806 | break; | |
807 | default: | |
808 | return -EINVAL; | |
809 | } | |
810 | ||
811 | memcpy(ctx->key, key, keylen); | |
812 | ctx->key_len = keylen; | |
813 | return 0; | |
814 | } | |
815 | ||
816 | static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |
817 | { | |
818 | switch (authsize) { | |
819 | case 4: | |
820 | case 8: | |
821 | case 12: | |
822 | case 13: | |
823 | case 14: | |
824 | case 15: | |
825 | case 16: | |
826 | break; | |
827 | default: | |
828 | return -EINVAL; | |
829 | } | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
834 | static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, | |
835 | unsigned int len) | |
836 | { | |
837 | memset(gw, 0, sizeof(*gw)); | |
838 | gw->walk_bytes_remain = len; | |
839 | scatterwalk_start(&gw->walk, sg); | |
840 | } | |
841 | ||
842 | static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) | |
843 | { | |
844 | int n; | |
845 | ||
846 | /* minbytesneeded <= AES_BLOCK_SIZE */ | |
847 | if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { | |
848 | gw->ptr = gw->buf; | |
849 | gw->nbytes = gw->buf_bytes; | |
850 | goto out; | |
851 | } | |
852 | ||
853 | if (gw->walk_bytes_remain == 0) { | |
854 | gw->ptr = NULL; | |
855 | gw->nbytes = 0; | |
856 | goto out; | |
857 | } | |
858 | ||
859 | gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); | |
860 | if (!gw->walk_bytes) { | |
861 | scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); | |
862 | gw->walk_bytes = scatterwalk_clamp(&gw->walk, | |
863 | gw->walk_bytes_remain); | |
864 | } | |
865 | gw->walk_ptr = scatterwalk_map(&gw->walk); | |
866 | ||
867 | if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { | |
868 | gw->ptr = gw->walk_ptr; | |
869 | gw->nbytes = gw->walk_bytes; | |
870 | goto out; | |
871 | } | |
872 | ||
873 | while (1) { | |
874 | n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); | |
875 | memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); | |
876 | gw->buf_bytes += n; | |
877 | gw->walk_bytes_remain -= n; | |
878 | scatterwalk_unmap(&gw->walk); | |
879 | scatterwalk_advance(&gw->walk, n); | |
880 | scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); | |
881 | ||
882 | if (gw->buf_bytes >= minbytesneeded) { | |
883 | gw->ptr = gw->buf; | |
884 | gw->nbytes = gw->buf_bytes; | |
885 | goto out; | |
886 | } | |
887 | ||
888 | gw->walk_bytes = scatterwalk_clamp(&gw->walk, | |
889 | gw->walk_bytes_remain); | |
890 | if (!gw->walk_bytes) { | |
891 | scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); | |
892 | gw->walk_bytes = scatterwalk_clamp(&gw->walk, | |
893 | gw->walk_bytes_remain); | |
894 | } | |
895 | gw->walk_ptr = scatterwalk_map(&gw->walk); | |
896 | } | |
897 | ||
898 | out: | |
899 | return gw->nbytes; | |
900 | } | |
901 | ||
902 | static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) | |
903 | { | |
904 | int n; | |
905 | ||
906 | if (gw->ptr == NULL) | |
907 | return; | |
908 | ||
909 | if (gw->ptr == gw->buf) { | |
910 | n = gw->buf_bytes - bytesdone; | |
911 | if (n > 0) { | |
912 | memmove(gw->buf, gw->buf + bytesdone, n); | |
913 | gw->buf_bytes -= n; | |
914 | } else | |
915 | gw->buf_bytes = 0; | |
916 | } else { | |
917 | gw->walk_bytes_remain -= bytesdone; | |
918 | scatterwalk_unmap(&gw->walk); | |
919 | scatterwalk_advance(&gw->walk, bytesdone); | |
920 | scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); | |
921 | } | |
922 | } | |
923 | ||
924 | static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) | |
925 | { | |
926 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
927 | struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); | |
928 | unsigned int ivsize = crypto_aead_ivsize(tfm); | |
929 | unsigned int taglen = crypto_aead_authsize(tfm); | |
930 | unsigned int aadlen = req->assoclen; | |
931 | unsigned int pclen = req->cryptlen; | |
932 | int ret = 0; | |
933 | ||
934 | unsigned int len, in_bytes, out_bytes, | |
935 | min_bytes, bytes, aad_bytes, pc_bytes; | |
936 | struct gcm_sg_walk gw_in, gw_out; | |
937 | u8 tag[GHASH_DIGEST_SIZE]; | |
938 | ||
939 | struct { | |
940 | u32 _[3]; /* reserved */ | |
941 | u32 cv; /* Counter Value */ | |
942 | u8 t[GHASH_DIGEST_SIZE];/* Tag */ | |
943 | u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */ | |
944 | u64 taadl; /* Total AAD Length */ | |
945 | u64 tpcl; /* Total Plain-/Cipher-text Length */ | |
946 | u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */ | |
947 | u8 k[AES_MAX_KEY_SIZE]; /* Key */ | |
948 | } param; | |
949 | ||
950 | /* | |
951 | * encrypt | |
952 | * req->src: aad||plaintext | |
953 | * req->dst: aad||ciphertext||tag | |
954 | * decrypt | |
955 | * req->src: aad||ciphertext||tag | |
956 | * req->dst: aad||plaintext, return 0 or -EBADMSG | |
957 | * aad, plaintext and ciphertext may be empty. | |
958 | */ | |
959 | if (flags & CPACF_DECRYPT) | |
960 | pclen -= taglen; | |
961 | len = aadlen + pclen; | |
962 | ||
963 | memset(¶m, 0, sizeof(param)); | |
964 | param.cv = 1; | |
965 | param.taadl = aadlen * 8; | |
966 | param.tpcl = pclen * 8; | |
967 | memcpy(param.j0, req->iv, ivsize); | |
968 | *(u32 *)(param.j0 + ivsize) = 1; | |
969 | memcpy(param.k, ctx->key, ctx->key_len); | |
970 | ||
971 | gcm_sg_walk_start(&gw_in, req->src, len); | |
972 | gcm_sg_walk_start(&gw_out, req->dst, len); | |
973 | ||
974 | do { | |
975 | min_bytes = min_t(unsigned int, | |
976 | aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); | |
977 | in_bytes = gcm_sg_walk_go(&gw_in, min_bytes); | |
978 | out_bytes = gcm_sg_walk_go(&gw_out, min_bytes); | |
979 | bytes = min(in_bytes, out_bytes); | |
980 | ||
981 | if (aadlen + pclen <= bytes) { | |
982 | aad_bytes = aadlen; | |
983 | pc_bytes = pclen; | |
984 | flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC; | |
985 | } else { | |
986 | if (aadlen <= bytes) { | |
987 | aad_bytes = aadlen; | |
988 | pc_bytes = (bytes - aadlen) & | |
989 | ~(AES_BLOCK_SIZE - 1); | |
990 | flags |= CPACF_KMA_LAAD; | |
991 | } else { | |
992 | aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1); | |
993 | pc_bytes = 0; | |
994 | } | |
995 | } | |
996 | ||
997 | if (aad_bytes > 0) | |
998 | memcpy(gw_out.ptr, gw_in.ptr, aad_bytes); | |
999 | ||
1000 | cpacf_kma(ctx->fc | flags, ¶m, | |
1001 | gw_out.ptr + aad_bytes, | |
1002 | gw_in.ptr + aad_bytes, pc_bytes, | |
1003 | gw_in.ptr, aad_bytes); | |
1004 | ||
1005 | gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes); | |
1006 | gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes); | |
1007 | aadlen -= aad_bytes; | |
1008 | pclen -= pc_bytes; | |
1009 | } while (aadlen + pclen > 0); | |
1010 | ||
1011 | if (flags & CPACF_DECRYPT) { | |
1012 | scatterwalk_map_and_copy(tag, req->src, len, taglen, 0); | |
1013 | if (crypto_memneq(tag, param.t, taglen)) | |
1014 | ret = -EBADMSG; | |
1015 | } else | |
1016 | scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1); | |
1017 | ||
1018 | memzero_explicit(¶m, sizeof(param)); | |
1019 | return ret; | |
1020 | } | |
1021 | ||
1022 | static int gcm_aes_encrypt(struct aead_request *req) | |
1023 | { | |
1024 | return gcm_aes_crypt(req, CPACF_ENCRYPT); | |
1025 | } | |
1026 | ||
1027 | static int gcm_aes_decrypt(struct aead_request *req) | |
1028 | { | |
1029 | return gcm_aes_crypt(req, CPACF_DECRYPT); | |
1030 | } | |
1031 | ||
1032 | static struct aead_alg gcm_aes_aead = { | |
1033 | .setkey = gcm_aes_setkey, | |
1034 | .setauthsize = gcm_aes_setauthsize, | |
1035 | .encrypt = gcm_aes_encrypt, | |
1036 | .decrypt = gcm_aes_decrypt, | |
1037 | ||
1038 | .ivsize = GHASH_BLOCK_SIZE - sizeof(u32), | |
1039 | .maxauthsize = GHASH_DIGEST_SIZE, | |
1040 | .chunksize = AES_BLOCK_SIZE, | |
1041 | ||
1042 | .base = { | |
1043 | .cra_flags = CRYPTO_ALG_TYPE_AEAD, | |
1044 | .cra_blocksize = 1, | |
1045 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
1046 | .cra_priority = 900, | |
1047 | .cra_name = "gcm(aes)", | |
1048 | .cra_driver_name = "gcm-aes-s390", | |
1049 | .cra_module = THIS_MODULE, | |
1050 | }, | |
1051 | }; | |
1052 | ||
d863d594 MS |
1053 | static struct crypto_alg *aes_s390_algs_ptr[5]; |
1054 | static int aes_s390_algs_num; | |
1055 | ||
1056 | static int aes_s390_register_alg(struct crypto_alg *alg) | |
1057 | { | |
1058 | int ret; | |
1059 | ||
1060 | ret = crypto_register_alg(alg); | |
1061 | if (!ret) | |
1062 | aes_s390_algs_ptr[aes_s390_algs_num++] = alg; | |
1063 | return ret; | |
1064 | } | |
1065 | ||
1066 | static void aes_s390_fini(void) | |
1067 | { | |
1068 | while (aes_s390_algs_num--) | |
1069 | crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]); | |
1070 | if (ctrblk) | |
1071 | free_page((unsigned long) ctrblk); | |
bf7fa038 HF |
1072 | |
1073 | crypto_unregister_aead(&gcm_aes_aead); | |
d863d594 | 1074 | } |
4f57ba71 | 1075 | |
9f7819c1 | 1076 | static int __init aes_s390_init(void) |
bf754ae8 JG |
1077 | { |
1078 | int ret; | |
1079 | ||
bf7fa038 | 1080 | /* Query available functions for KM, KMC, KMCTR and KMA */ |
69c0e360 MS |
1081 | cpacf_query(CPACF_KM, &km_functions); |
1082 | cpacf_query(CPACF_KMC, &kmc_functions); | |
1083 | cpacf_query(CPACF_KMCTR, &kmctr_functions); | |
bf7fa038 | 1084 | cpacf_query(CPACF_KMA, &kma_functions); |
a9e62fad | 1085 | |
69c0e360 MS |
1086 | if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || |
1087 | cpacf_test_func(&km_functions, CPACF_KM_AES_192) || | |
1088 | cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { | |
1089 | ret = aes_s390_register_alg(&aes_alg); | |
1090 | if (ret) | |
1091 | goto out_err; | |
1092 | ret = aes_s390_register_alg(&ecb_aes_alg); | |
1093 | if (ret) | |
1094 | goto out_err; | |
1095 | } | |
a9e62fad | 1096 | |
69c0e360 MS |
1097 | if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || |
1098 | cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || | |
1099 | cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { | |
1100 | ret = aes_s390_register_alg(&cbc_aes_alg); | |
1101 | if (ret) | |
1102 | goto out_err; | |
1103 | } | |
a9e62fad | 1104 | |
69c0e360 MS |
1105 | if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || |
1106 | cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { | |
d863d594 | 1107 | ret = aes_s390_register_alg(&xts_aes_alg); |
99d97222 | 1108 | if (ret) |
d863d594 | 1109 | goto out_err; |
99d97222 GS |
1110 | } |
1111 | ||
69c0e360 MS |
1112 | if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || |
1113 | cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || | |
1114 | cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { | |
0200f3ec GS |
1115 | ctrblk = (u8 *) __get_free_page(GFP_KERNEL); |
1116 | if (!ctrblk) { | |
1117 | ret = -ENOMEM; | |
d863d594 | 1118 | goto out_err; |
0200f3ec | 1119 | } |
d863d594 MS |
1120 | ret = aes_s390_register_alg(&ctr_aes_alg); |
1121 | if (ret) | |
1122 | goto out_err; | |
0200f3ec GS |
1123 | } |
1124 | ||
bf7fa038 HF |
1125 | if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) || |
1126 | cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) || | |
1127 | cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) { | |
1128 | ret = crypto_register_aead(&gcm_aes_aead); | |
1129 | if (ret) | |
1130 | goto out_err; | |
1131 | } | |
1132 | ||
d863d594 MS |
1133 | return 0; |
1134 | out_err: | |
1135 | aes_s390_fini(); | |
bf754ae8 | 1136 | return ret; |
bf754ae8 JG |
1137 | } |
1138 | ||
d05377c1 | 1139 | module_cpu_feature_match(MSA, aes_s390_init); |
9f7819c1 | 1140 | module_exit(aes_s390_fini); |
bf754ae8 | 1141 | |
5d26a105 | 1142 | MODULE_ALIAS_CRYPTO("aes-all"); |
bf754ae8 JG |
1143 | |
1144 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); | |
1145 | MODULE_LICENSE("GPL"); |