]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/s390/crypto/aes_s390.c
s390/crypto: fix gcm-aes-s390 selftest failures
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / crypto / aes_s390.c
CommitLineData
20a884f5 1// SPDX-License-Identifier: GPL-2.0+
bf754ae8
JG
2/*
3 * Cryptographic API.
4 *
5 * s390 implementation of the AES Cipher Algorithm.
6 *
7 * s390 Version:
bf7fa038 8 * Copyright IBM Corp. 2005, 2017
bf754ae8 9 * Author(s): Jan Glauber (jang@de.ibm.com)
b0c3e75d 10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
bf7fa038
HF
11 * Patrick Steuer <patrick.steuer@de.ibm.com>
12 * Harald Freudenberger <freude@de.ibm.com>
bf754ae8 13 *
f8246af0 14 * Derived from "crypto/aes_generic.c"
bf754ae8
JG
15 */
16
39f09392
JG
17#define KMSG_COMPONENT "aes_s390"
18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
89e12654 20#include <crypto/aes.h>
a9e62fad 21#include <crypto/algapi.h>
bf7fa038
HF
22#include <crypto/ghash.h>
23#include <crypto/internal/aead.h>
64e26807 24#include <crypto/internal/skcipher.h>
bf7fa038 25#include <crypto/scatterwalk.h>
b0c3e75d 26#include <linux/err.h>
bf754ae8 27#include <linux/module.h>
d05377c1 28#include <linux/cpufeature.h>
bf754ae8 29#include <linux/init.h>
0519e9ad 30#include <linux/spinlock.h>
a4f2779e 31#include <linux/fips.h>
bf7fa038 32#include <linux/string.h>
49abc0d2 33#include <crypto/xts.h>
c7d4d259 34#include <asm/cpacf.h>
bf754ae8 35
0200f3ec 36static u8 *ctrblk;
0519e9ad 37static DEFINE_SPINLOCK(ctrblk_lock);
69c0e360 38
bf7fa038
HF
39static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
40 kma_functions;
bf754ae8
JG
41
42struct s390_aes_ctx {
bf754ae8
JG
43 u8 key[AES_MAX_KEY_SIZE];
44 int key_len;
edc63a37 45 unsigned long fc;
b0c3e75d 46 union {
64e26807 47 struct crypto_skcipher *blk;
b0c3e75d
SS
48 struct crypto_cipher *cip;
49 } fallback;
bf754ae8
JG
50};
51
99d97222
GS
52struct s390_xts_ctx {
53 u8 key[32];
9dda2769 54 u8 pcc_key[32];
99d97222 55 int key_len;
edc63a37 56 unsigned long fc;
64e26807 57 struct crypto_skcipher *fallback;
99d97222
GS
58};
59
bf7fa038
HF
60struct gcm_sg_walk {
61 struct scatter_walk walk;
62 unsigned int walk_bytes;
63 u8 *walk_ptr;
64 unsigned int walk_bytes_remain;
65 u8 buf[AES_BLOCK_SIZE];
66 unsigned int buf_bytes;
67 u8 *ptr;
68 unsigned int nbytes;
69};
70
b0c3e75d
SS
71static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
72 unsigned int key_len)
73{
74 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
75 int ret;
76
d7ac7690
RK
77 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
78 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
b0c3e75d
SS
79 CRYPTO_TFM_REQ_MASK);
80
81 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
82 if (ret) {
83 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
d7ac7690 84 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
b0c3e75d
SS
85 CRYPTO_TFM_RES_MASK);
86 }
87 return ret;
88}
89
90static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
91 unsigned int key_len)
92{
93 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
69c0e360 94 unsigned long fc;
b0c3e75d 95
69c0e360
MS
96 /* Pick the correct function code based on the key length */
97 fc = (key_len == 16) ? CPACF_KM_AES_128 :
98 (key_len == 24) ? CPACF_KM_AES_192 :
99 (key_len == 32) ? CPACF_KM_AES_256 : 0;
bf754ae8 100
69c0e360
MS
101 /* Check if the function code is available */
102 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
103 if (!sctx->fc)
104 return setkey_fallback_cip(tfm, in_key, key_len);
b0c3e75d 105
69c0e360
MS
106 sctx->key_len = key_len;
107 memcpy(sctx->key, in_key, key_len);
108 return 0;
bf754ae8
JG
109}
110
6c2bb98b 111static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
bf754ae8 112{
e6a67ad0 113 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
bf754ae8 114
69c0e360 115 if (unlikely(!sctx->fc)) {
b0c3e75d
SS
116 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
117 return;
118 }
69c0e360 119 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
bf754ae8
JG
120}
121
6c2bb98b 122static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
bf754ae8 123{
e6a67ad0 124 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
bf754ae8 125
69c0e360 126 if (unlikely(!sctx->fc)) {
b0c3e75d
SS
127 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
128 return;
129 }
69c0e360
MS
130 cpacf_km(sctx->fc | CPACF_DECRYPT,
131 &sctx->key, out, in, AES_BLOCK_SIZE);
bf754ae8
JG
132}
133
b0c3e75d
SS
134static int fallback_init_cip(struct crypto_tfm *tfm)
135{
136 const char *name = tfm->__crt_alg->cra_name;
137 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
138
139 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
140 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
141
142 if (IS_ERR(sctx->fallback.cip)) {
39f09392
JG
143 pr_err("Allocating AES fallback algorithm %s failed\n",
144 name);
b59cdcb3 145 return PTR_ERR(sctx->fallback.cip);
b0c3e75d
SS
146 }
147
148 return 0;
149}
150
151static void fallback_exit_cip(struct crypto_tfm *tfm)
152{
153 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
154
155 crypto_free_cipher(sctx->fallback.cip);
156 sctx->fallback.cip = NULL;
157}
bf754ae8
JG
158
159static struct crypto_alg aes_alg = {
160 .cra_name = "aes",
65b75c36 161 .cra_driver_name = "aes-s390",
c7d4d259 162 .cra_priority = 300,
f67d1369
JG
163 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
164 CRYPTO_ALG_NEED_FALLBACK,
bf754ae8
JG
165 .cra_blocksize = AES_BLOCK_SIZE,
166 .cra_ctxsize = sizeof(struct s390_aes_ctx),
167 .cra_module = THIS_MODULE,
b0c3e75d
SS
168 .cra_init = fallback_init_cip,
169 .cra_exit = fallback_exit_cip,
bf754ae8
JG
170 .cra_u = {
171 .cipher = {
172 .cia_min_keysize = AES_MIN_KEY_SIZE,
173 .cia_max_keysize = AES_MAX_KEY_SIZE,
174 .cia_setkey = aes_set_key,
175 .cia_encrypt = aes_encrypt,
176 .cia_decrypt = aes_decrypt,
bf754ae8
JG
177 }
178 }
179};
180
b0c3e75d
SS
181static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
182 unsigned int len)
183{
184 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
185 unsigned int ret;
186
64e26807
HX
187 crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
188 crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
189 CRYPTO_TFM_REQ_MASK);
190
191 ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
192
193 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
194 tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
195 CRYPTO_TFM_RES_MASK;
b0c3e75d 196
b0c3e75d
SS
197 return ret;
198}
199
200static int fallback_blk_dec(struct blkcipher_desc *desc,
201 struct scatterlist *dst, struct scatterlist *src,
202 unsigned int nbytes)
203{
204 unsigned int ret;
64e26807
HX
205 struct crypto_blkcipher *tfm = desc->tfm;
206 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
207 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
b0c3e75d 208
64e26807
HX
209 skcipher_request_set_tfm(req, sctx->fallback.blk);
210 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
211 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
b0c3e75d 212
64e26807 213 ret = crypto_skcipher_decrypt(req);
b0c3e75d 214
64e26807 215 skcipher_request_zero(req);
b0c3e75d
SS
216 return ret;
217}
218
219static int fallback_blk_enc(struct blkcipher_desc *desc,
220 struct scatterlist *dst, struct scatterlist *src,
221 unsigned int nbytes)
222{
223 unsigned int ret;
64e26807
HX
224 struct crypto_blkcipher *tfm = desc->tfm;
225 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
226 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
b0c3e75d 227
64e26807
HX
228 skcipher_request_set_tfm(req, sctx->fallback.blk);
229 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
230 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
b0c3e75d 231
64e26807 232 ret = crypto_skcipher_encrypt(req);
b0c3e75d
SS
233 return ret;
234}
235
a9e62fad
HX
236static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
237 unsigned int key_len)
238{
239 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
69c0e360 240 unsigned long fc;
b0c3e75d 241
69c0e360
MS
242 /* Pick the correct function code based on the key length */
243 fc = (key_len == 16) ? CPACF_KM_AES_128 :
244 (key_len == 24) ? CPACF_KM_AES_192 :
245 (key_len == 32) ? CPACF_KM_AES_256 : 0;
a9e62fad 246
69c0e360
MS
247 /* Check if the function code is available */
248 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
249 if (!sctx->fc)
250 return setkey_fallback_blk(tfm, in_key, key_len);
a9e62fad 251
69c0e360
MS
252 sctx->key_len = key_len;
253 memcpy(sctx->key, in_key, key_len);
254 return 0;
a9e62fad
HX
255}
256
7bac4f5b 257static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
a9e62fad
HX
258 struct blkcipher_walk *walk)
259{
7bac4f5b
MS
260 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
261 unsigned int nbytes, n;
262 int ret;
a9e62fad 263
7bac4f5b
MS
264 ret = blkcipher_walk_virt(desc, walk);
265 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
a9e62fad 266 /* only use complete blocks */
7bac4f5b
MS
267 n = nbytes & ~(AES_BLOCK_SIZE - 1);
268 cpacf_km(sctx->fc | modifier, sctx->key,
269 walk->dst.virt.addr, walk->src.virt.addr, n);
270 ret = blkcipher_walk_done(desc, walk, nbytes - n);
a9e62fad
HX
271 }
272
273 return ret;
274}
275
276static int ecb_aes_encrypt(struct blkcipher_desc *desc,
277 struct scatterlist *dst, struct scatterlist *src,
278 unsigned int nbytes)
279{
280 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
281 struct blkcipher_walk walk;
282
69c0e360 283 if (unlikely(!sctx->fc))
b0c3e75d
SS
284 return fallback_blk_enc(desc, dst, src, nbytes);
285
a9e62fad 286 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 287 return ecb_aes_crypt(desc, 0, &walk);
a9e62fad
HX
288}
289
290static int ecb_aes_decrypt(struct blkcipher_desc *desc,
291 struct scatterlist *dst, struct scatterlist *src,
292 unsigned int nbytes)
293{
294 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
295 struct blkcipher_walk walk;
296
69c0e360 297 if (unlikely(!sctx->fc))
b0c3e75d
SS
298 return fallback_blk_dec(desc, dst, src, nbytes);
299
a9e62fad 300 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 301 return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
a9e62fad
HX
302}
303
b0c3e75d
SS
304static int fallback_init_blk(struct crypto_tfm *tfm)
305{
306 const char *name = tfm->__crt_alg->cra_name;
307 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
308
64e26807
HX
309 sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
310 CRYPTO_ALG_ASYNC |
311 CRYPTO_ALG_NEED_FALLBACK);
b0c3e75d
SS
312
313 if (IS_ERR(sctx->fallback.blk)) {
39f09392
JG
314 pr_err("Allocating AES fallback algorithm %s failed\n",
315 name);
b0c3e75d
SS
316 return PTR_ERR(sctx->fallback.blk);
317 }
318
319 return 0;
320}
321
322static void fallback_exit_blk(struct crypto_tfm *tfm)
323{
324 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
325
64e26807 326 crypto_free_skcipher(sctx->fallback.blk);
b0c3e75d
SS
327}
328
a9e62fad
HX
329static struct crypto_alg ecb_aes_alg = {
330 .cra_name = "ecb(aes)",
331 .cra_driver_name = "ecb-aes-s390",
0f884c09 332 .cra_priority = 401, /* combo: aes + ecb + 1 */
f67d1369
JG
333 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
334 CRYPTO_ALG_NEED_FALLBACK,
a9e62fad
HX
335 .cra_blocksize = AES_BLOCK_SIZE,
336 .cra_ctxsize = sizeof(struct s390_aes_ctx),
337 .cra_type = &crypto_blkcipher_type,
338 .cra_module = THIS_MODULE,
b0c3e75d
SS
339 .cra_init = fallback_init_blk,
340 .cra_exit = fallback_exit_blk,
a9e62fad
HX
341 .cra_u = {
342 .blkcipher = {
343 .min_keysize = AES_MIN_KEY_SIZE,
344 .max_keysize = AES_MAX_KEY_SIZE,
345 .setkey = ecb_aes_set_key,
346 .encrypt = ecb_aes_encrypt,
347 .decrypt = ecb_aes_decrypt,
348 }
349 }
350};
351
352static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
353 unsigned int key_len)
354{
355 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
69c0e360 356 unsigned long fc;
b0c3e75d 357
69c0e360
MS
358 /* Pick the correct function code based on the key length */
359 fc = (key_len == 16) ? CPACF_KMC_AES_128 :
360 (key_len == 24) ? CPACF_KMC_AES_192 :
361 (key_len == 32) ? CPACF_KMC_AES_256 : 0;
a9e62fad 362
69c0e360
MS
363 /* Check if the function code is available */
364 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
365 if (!sctx->fc)
366 return setkey_fallback_blk(tfm, in_key, key_len);
a9e62fad 367
69c0e360
MS
368 sctx->key_len = key_len;
369 memcpy(sctx->key, in_key, key_len);
370 return 0;
a9e62fad
HX
371}
372
7bac4f5b 373static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
a9e62fad
HX
374 struct blkcipher_walk *walk)
375{
f262f0f5 376 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
7bac4f5b
MS
377 unsigned int nbytes, n;
378 int ret;
f262f0f5
HX
379 struct {
380 u8 iv[AES_BLOCK_SIZE];
381 u8 key[AES_MAX_KEY_SIZE];
382 } param;
a9e62fad 383
7bac4f5b 384 ret = blkcipher_walk_virt(desc, walk);
f262f0f5
HX
385 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
386 memcpy(param.key, sctx->key, sctx->key_len);
7bac4f5b 387 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
a9e62fad 388 /* only use complete blocks */
7bac4f5b
MS
389 n = nbytes & ~(AES_BLOCK_SIZE - 1);
390 cpacf_kmc(sctx->fc | modifier, &param,
391 walk->dst.virt.addr, walk->src.virt.addr, n);
392 ret = blkcipher_walk_done(desc, walk, nbytes - n);
393 }
f262f0f5 394 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
a9e62fad
HX
395 return ret;
396}
397
398static int cbc_aes_encrypt(struct blkcipher_desc *desc,
399 struct scatterlist *dst, struct scatterlist *src,
400 unsigned int nbytes)
401{
402 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
403 struct blkcipher_walk walk;
404
69c0e360 405 if (unlikely(!sctx->fc))
b0c3e75d
SS
406 return fallback_blk_enc(desc, dst, src, nbytes);
407
a9e62fad 408 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 409 return cbc_aes_crypt(desc, 0, &walk);
a9e62fad
HX
410}
411
412static int cbc_aes_decrypt(struct blkcipher_desc *desc,
413 struct scatterlist *dst, struct scatterlist *src,
414 unsigned int nbytes)
415{
416 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
417 struct blkcipher_walk walk;
418
69c0e360 419 if (unlikely(!sctx->fc))
b0c3e75d
SS
420 return fallback_blk_dec(desc, dst, src, nbytes);
421
a9e62fad 422 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 423 return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
a9e62fad
HX
424}
425
426static struct crypto_alg cbc_aes_alg = {
427 .cra_name = "cbc(aes)",
428 .cra_driver_name = "cbc-aes-s390",
0f884c09 429 .cra_priority = 402, /* ecb-aes-s390 + 1 */
f67d1369
JG
430 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
431 CRYPTO_ALG_NEED_FALLBACK,
a9e62fad
HX
432 .cra_blocksize = AES_BLOCK_SIZE,
433 .cra_ctxsize = sizeof(struct s390_aes_ctx),
434 .cra_type = &crypto_blkcipher_type,
435 .cra_module = THIS_MODULE,
b0c3e75d
SS
436 .cra_init = fallback_init_blk,
437 .cra_exit = fallback_exit_blk,
a9e62fad
HX
438 .cra_u = {
439 .blkcipher = {
440 .min_keysize = AES_MIN_KEY_SIZE,
441 .max_keysize = AES_MAX_KEY_SIZE,
442 .ivsize = AES_BLOCK_SIZE,
443 .setkey = cbc_aes_set_key,
444 .encrypt = cbc_aes_encrypt,
445 .decrypt = cbc_aes_decrypt,
446 }
447 }
448};
449
99d97222
GS
450static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
451 unsigned int len)
452{
453 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
454 unsigned int ret;
455
64e26807
HX
456 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
457 crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
458 CRYPTO_TFM_REQ_MASK);
459
460 ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
461
462 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
463 tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
464 CRYPTO_TFM_RES_MASK;
99d97222 465
99d97222
GS
466 return ret;
467}
468
469static int xts_fallback_decrypt(struct blkcipher_desc *desc,
470 struct scatterlist *dst, struct scatterlist *src,
471 unsigned int nbytes)
472{
64e26807
HX
473 struct crypto_blkcipher *tfm = desc->tfm;
474 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
475 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
99d97222
GS
476 unsigned int ret;
477
64e26807
HX
478 skcipher_request_set_tfm(req, xts_ctx->fallback);
479 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
480 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
99d97222 481
64e26807 482 ret = crypto_skcipher_decrypt(req);
99d97222 483
64e26807 484 skcipher_request_zero(req);
99d97222
GS
485 return ret;
486}
487
488static int xts_fallback_encrypt(struct blkcipher_desc *desc,
489 struct scatterlist *dst, struct scatterlist *src,
490 unsigned int nbytes)
491{
64e26807
HX
492 struct crypto_blkcipher *tfm = desc->tfm;
493 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
494 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
99d97222
GS
495 unsigned int ret;
496
64e26807
HX
497 skcipher_request_set_tfm(req, xts_ctx->fallback);
498 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
499 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
99d97222 500
64e26807 501 ret = crypto_skcipher_encrypt(req);
99d97222 502
64e26807 503 skcipher_request_zero(req);
99d97222
GS
504 return ret;
505}
506
507static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
508 unsigned int key_len)
509{
510 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
69c0e360 511 unsigned long fc;
28856a9e
SM
512 int err;
513
514 err = xts_check_key(tfm, in_key, key_len);
515 if (err)
516 return err;
99d97222 517
a4f2779e
HF
518 /* In fips mode only 128 bit or 256 bit keys are valid */
519 if (fips_enabled && key_len != 32 && key_len != 64) {
520 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
521 return -EINVAL;
522 }
523
69c0e360
MS
524 /* Pick the correct function code based on the key length */
525 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
526 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
527
528 /* Check if the function code is available */
529 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
530 if (!xts_ctx->fc)
531 return xts_fallback_setkey(tfm, in_key, key_len);
532
533 /* Split the XTS key into the two subkeys */
534 key_len = key_len / 2;
99d97222 535 xts_ctx->key_len = key_len;
69c0e360
MS
536 memcpy(xts_ctx->key, in_key, key_len);
537 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
99d97222
GS
538 return 0;
539}
540
7bac4f5b 541static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
99d97222
GS
542 struct blkcipher_walk *walk)
543{
7bac4f5b
MS
544 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
545 unsigned int offset, nbytes, n;
546 int ret;
547 struct {
548 u8 key[32];
549 u8 tweak[16];
550 u8 block[16];
551 u8 bit[16];
552 u8 xts[16];
553 } pcc_param;
9dda2769
GS
554 struct {
555 u8 key[32];
556 u8 init[16];
557 } xts_param;
99d97222 558
7bac4f5b
MS
559 ret = blkcipher_walk_virt(desc, walk);
560 offset = xts_ctx->key_len & 0x10;
9dda2769
GS
561 memset(pcc_param.block, 0, sizeof(pcc_param.block));
562 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
563 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
564 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
69c0e360 565 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
7bac4f5b 566 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
99d97222 567
69c0e360 568 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
9dda2769 569 memcpy(xts_param.init, pcc_param.xts, 16);
7bac4f5b
MS
570
571 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
99d97222
GS
572 /* only use complete blocks */
573 n = nbytes & ~(AES_BLOCK_SIZE - 1);
7bac4f5b
MS
574 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
575 walk->dst.virt.addr, walk->src.virt.addr, n);
576 ret = blkcipher_walk_done(desc, walk, nbytes - n);
577 }
99d97222
GS
578 return ret;
579}
580
581static int xts_aes_encrypt(struct blkcipher_desc *desc,
582 struct scatterlist *dst, struct scatterlist *src,
583 unsigned int nbytes)
584{
585 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
586 struct blkcipher_walk walk;
587
69c0e360 588 if (unlikely(!xts_ctx->fc))
99d97222
GS
589 return xts_fallback_encrypt(desc, dst, src, nbytes);
590
591 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 592 return xts_aes_crypt(desc, 0, &walk);
99d97222
GS
593}
594
595static int xts_aes_decrypt(struct blkcipher_desc *desc,
596 struct scatterlist *dst, struct scatterlist *src,
597 unsigned int nbytes)
598{
599 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
600 struct blkcipher_walk walk;
601
69c0e360 602 if (unlikely(!xts_ctx->fc))
99d97222
GS
603 return xts_fallback_decrypt(desc, dst, src, nbytes);
604
605 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 606 return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
99d97222
GS
607}
608
609static int xts_fallback_init(struct crypto_tfm *tfm)
610{
611 const char *name = tfm->__crt_alg->cra_name;
612 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
613
64e26807
HX
614 xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
615 CRYPTO_ALG_ASYNC |
616 CRYPTO_ALG_NEED_FALLBACK);
99d97222
GS
617
618 if (IS_ERR(xts_ctx->fallback)) {
619 pr_err("Allocating XTS fallback algorithm %s failed\n",
620 name);
621 return PTR_ERR(xts_ctx->fallback);
622 }
623 return 0;
624}
625
626static void xts_fallback_exit(struct crypto_tfm *tfm)
627{
628 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
629
64e26807 630 crypto_free_skcipher(xts_ctx->fallback);
99d97222
GS
631}
632
633static struct crypto_alg xts_aes_alg = {
634 .cra_name = "xts(aes)",
635 .cra_driver_name = "xts-aes-s390",
0f884c09 636 .cra_priority = 402, /* ecb-aes-s390 + 1 */
99d97222
GS
637 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
638 CRYPTO_ALG_NEED_FALLBACK,
639 .cra_blocksize = AES_BLOCK_SIZE,
640 .cra_ctxsize = sizeof(struct s390_xts_ctx),
641 .cra_type = &crypto_blkcipher_type,
642 .cra_module = THIS_MODULE,
99d97222
GS
643 .cra_init = xts_fallback_init,
644 .cra_exit = xts_fallback_exit,
645 .cra_u = {
646 .blkcipher = {
647 .min_keysize = 2 * AES_MIN_KEY_SIZE,
648 .max_keysize = 2 * AES_MAX_KEY_SIZE,
649 .ivsize = AES_BLOCK_SIZE,
650 .setkey = xts_aes_set_key,
651 .encrypt = xts_aes_encrypt,
652 .decrypt = xts_aes_decrypt,
653 }
654 }
655};
656
0200f3ec
GS
657static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
658 unsigned int key_len)
659{
660 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
69c0e360 661 unsigned long fc;
0200f3ec 662
69c0e360
MS
663 /* Pick the correct function code based on the key length */
664 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
665 (key_len == 24) ? CPACF_KMCTR_AES_192 :
666 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
667
668 /* Check if the function code is available */
669 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
670 if (!sctx->fc)
671 return setkey_fallback_blk(tfm, in_key, key_len);
0200f3ec 672
69c0e360
MS
673 sctx->key_len = key_len;
674 memcpy(sctx->key, in_key, key_len);
675 return 0;
0200f3ec
GS
676}
677
7bac4f5b 678static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
0519e9ad
HF
679{
680 unsigned int i, n;
681
682 /* only use complete blocks, max. PAGE_SIZE */
7bac4f5b 683 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
0519e9ad 684 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
7bac4f5b
MS
685 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
686 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
687 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
688 ctrptr += AES_BLOCK_SIZE;
0519e9ad
HF
689 }
690 return n;
691}
692
7bac4f5b
MS
693static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
694 struct blkcipher_walk *walk)
0200f3ec 695{
7bac4f5b
MS
696 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
697 u8 buf[AES_BLOCK_SIZE], *ctrptr;
0519e9ad 698 unsigned int n, nbytes;
7bac4f5b 699 int ret, locked;
0200f3ec 700
7bac4f5b 701 locked = spin_trylock(&ctrblk_lock);
0519e9ad 702
7bac4f5b 703 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
0200f3ec 704 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
7bac4f5b
MS
705 n = AES_BLOCK_SIZE;
706 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
707 n = __ctrblk_init(ctrblk, walk->iv, nbytes);
708 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
709 cpacf_kmctr(sctx->fc | modifier, sctx->key,
710 walk->dst.virt.addr, walk->src.virt.addr,
711 n, ctrptr);
712 if (ctrptr == ctrblk)
713 memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
714 AES_BLOCK_SIZE);
715 crypto_inc(walk->iv, AES_BLOCK_SIZE);
716 ret = blkcipher_walk_done(desc, walk, nbytes - n);
0200f3ec 717 }
7bac4f5b 718 if (locked)
0519e9ad 719 spin_unlock(&ctrblk_lock);
0200f3ec
GS
720 /*
721 * final block may be < AES_BLOCK_SIZE, copy only nbytes
722 */
723 if (nbytes) {
7bac4f5b
MS
724 cpacf_kmctr(sctx->fc | modifier, sctx->key,
725 buf, walk->src.virt.addr,
726 AES_BLOCK_SIZE, walk->iv);
727 memcpy(walk->dst.virt.addr, buf, nbytes);
728 crypto_inc(walk->iv, AES_BLOCK_SIZE);
0200f3ec
GS
729 ret = blkcipher_walk_done(desc, walk, 0);
730 }
0519e9ad 731
0200f3ec
GS
732 return ret;
733}
734
735static int ctr_aes_encrypt(struct blkcipher_desc *desc,
736 struct scatterlist *dst, struct scatterlist *src,
737 unsigned int nbytes)
738{
739 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
740 struct blkcipher_walk walk;
741
69c0e360
MS
742 if (unlikely(!sctx->fc))
743 return fallback_blk_enc(desc, dst, src, nbytes);
744
0200f3ec 745 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 746 return ctr_aes_crypt(desc, 0, &walk);
0200f3ec
GS
747}
748
749static int ctr_aes_decrypt(struct blkcipher_desc *desc,
750 struct scatterlist *dst, struct scatterlist *src,
751 unsigned int nbytes)
752{
753 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
754 struct blkcipher_walk walk;
755
69c0e360
MS
756 if (unlikely(!sctx->fc))
757 return fallback_blk_dec(desc, dst, src, nbytes);
758
0200f3ec 759 blkcipher_walk_init(&walk, dst, src, nbytes);
7bac4f5b 760 return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
0200f3ec
GS
761}
762
763static struct crypto_alg ctr_aes_alg = {
764 .cra_name = "ctr(aes)",
765 .cra_driver_name = "ctr-aes-s390",
0f884c09 766 .cra_priority = 402, /* ecb-aes-s390 + 1 */
69c0e360
MS
767 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
768 CRYPTO_ALG_NEED_FALLBACK,
0200f3ec
GS
769 .cra_blocksize = 1,
770 .cra_ctxsize = sizeof(struct s390_aes_ctx),
771 .cra_type = &crypto_blkcipher_type,
772 .cra_module = THIS_MODULE,
69c0e360
MS
773 .cra_init = fallback_init_blk,
774 .cra_exit = fallback_exit_blk,
0200f3ec
GS
775 .cra_u = {
776 .blkcipher = {
777 .min_keysize = AES_MIN_KEY_SIZE,
778 .max_keysize = AES_MAX_KEY_SIZE,
779 .ivsize = AES_BLOCK_SIZE,
780 .setkey = ctr_aes_set_key,
781 .encrypt = ctr_aes_encrypt,
782 .decrypt = ctr_aes_decrypt,
783 }
784 }
785};
786
bf7fa038
HF
787static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
788 unsigned int keylen)
789{
790 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
791
792 switch (keylen) {
793 case AES_KEYSIZE_128:
794 ctx->fc = CPACF_KMA_GCM_AES_128;
795 break;
796 case AES_KEYSIZE_192:
797 ctx->fc = CPACF_KMA_GCM_AES_192;
798 break;
799 case AES_KEYSIZE_256:
800 ctx->fc = CPACF_KMA_GCM_AES_256;
801 break;
802 default:
803 return -EINVAL;
804 }
805
806 memcpy(ctx->key, key, keylen);
807 ctx->key_len = keylen;
808 return 0;
809}
810
811static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
812{
813 switch (authsize) {
814 case 4:
815 case 8:
816 case 12:
817 case 13:
818 case 14:
819 case 15:
820 case 16:
821 break;
822 default:
823 return -EINVAL;
824 }
825
826 return 0;
827}
828
1d811227
HF
829static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
830 unsigned int len)
bf7fa038
HF
831{
832 memset(gw, 0, sizeof(*gw));
833 gw->walk_bytes_remain = len;
834 scatterwalk_start(&gw->walk, sg);
835}
836
1d811227
HF
837static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
838{
839 struct scatterlist *nextsg;
840
841 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
842 while (!gw->walk_bytes) {
843 nextsg = sg_next(gw->walk.sg);
844 if (!nextsg)
845 return 0;
846 scatterwalk_start(&gw->walk, nextsg);
847 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
848 gw->walk_bytes_remain);
849 }
850 gw->walk_ptr = scatterwalk_map(&gw->walk);
851 return gw->walk_bytes;
852}
853
854static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
855 unsigned int nbytes)
856{
857 gw->walk_bytes_remain -= nbytes;
858 scatterwalk_unmap(&gw->walk);
859 scatterwalk_advance(&gw->walk, nbytes);
860 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
861 gw->walk_ptr = NULL;
862}
863
864static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
bf7fa038
HF
865{
866 int n;
867
bf7fa038
HF
868 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
869 gw->ptr = gw->buf;
870 gw->nbytes = gw->buf_bytes;
871 goto out;
872 }
873
874 if (gw->walk_bytes_remain == 0) {
875 gw->ptr = NULL;
876 gw->nbytes = 0;
877 goto out;
878 }
879
1d811227
HF
880 if (!_gcm_sg_clamp_and_map(gw)) {
881 gw->ptr = NULL;
882 gw->nbytes = 0;
883 goto out;
bf7fa038 884 }
bf7fa038
HF
885
886 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
887 gw->ptr = gw->walk_ptr;
888 gw->nbytes = gw->walk_bytes;
889 goto out;
890 }
891
892 while (1) {
893 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
894 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
895 gw->buf_bytes += n;
1d811227 896 _gcm_sg_unmap_and_advance(gw, n);
bf7fa038
HF
897 if (gw->buf_bytes >= minbytesneeded) {
898 gw->ptr = gw->buf;
899 gw->nbytes = gw->buf_bytes;
900 goto out;
901 }
1d811227
HF
902 if (!_gcm_sg_clamp_and_map(gw)) {
903 gw->ptr = NULL;
904 gw->nbytes = 0;
905 goto out;
bf7fa038 906 }
bf7fa038
HF
907 }
908
909out:
910 return gw->nbytes;
911}
912
1d811227 913static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
bf7fa038 914{
1d811227
HF
915 if (gw->walk_bytes_remain == 0) {
916 gw->ptr = NULL;
917 gw->nbytes = 0;
918 goto out;
919 }
bf7fa038 920
1d811227
HF
921 if (!_gcm_sg_clamp_and_map(gw)) {
922 gw->ptr = NULL;
923 gw->nbytes = 0;
924 goto out;
925 }
926
927 if (gw->walk_bytes >= minbytesneeded) {
928 gw->ptr = gw->walk_ptr;
929 gw->nbytes = gw->walk_bytes;
930 goto out;
931 }
932
933 scatterwalk_unmap(&gw->walk);
934 gw->walk_ptr = NULL;
935
936 gw->ptr = gw->buf;
937 gw->nbytes = sizeof(gw->buf);
938
939out:
940 return gw->nbytes;
941}
942
943static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
944{
bf7fa038 945 if (gw->ptr == NULL)
1d811227 946 return 0;
bf7fa038
HF
947
948 if (gw->ptr == gw->buf) {
1d811227 949 int n = gw->buf_bytes - bytesdone;
bf7fa038
HF
950 if (n > 0) {
951 memmove(gw->buf, gw->buf + bytesdone, n);
1d811227 952 gw->buf_bytes = n;
bf7fa038
HF
953 } else
954 gw->buf_bytes = 0;
1d811227
HF
955 } else
956 _gcm_sg_unmap_and_advance(gw, bytesdone);
957
958 return bytesdone;
959}
960
961static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
962{
963 int i, n;
964
965 if (gw->ptr == NULL)
966 return 0;
967
968 if (gw->ptr == gw->buf) {
969 for (i = 0; i < bytesdone; i += n) {
970 if (!_gcm_sg_clamp_and_map(gw))
971 return i;
972 n = min(gw->walk_bytes, bytesdone - i);
973 memcpy(gw->walk_ptr, gw->buf + i, n);
974 _gcm_sg_unmap_and_advance(gw, n);
975 }
976 } else
977 _gcm_sg_unmap_and_advance(gw, bytesdone);
978
979 return bytesdone;
bf7fa038
HF
980}
981
982static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
983{
984 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
985 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
986 unsigned int ivsize = crypto_aead_ivsize(tfm);
987 unsigned int taglen = crypto_aead_authsize(tfm);
988 unsigned int aadlen = req->assoclen;
989 unsigned int pclen = req->cryptlen;
990 int ret = 0;
991
1d811227 992 unsigned int n, len, in_bytes, out_bytes,
bf7fa038
HF
993 min_bytes, bytes, aad_bytes, pc_bytes;
994 struct gcm_sg_walk gw_in, gw_out;
995 u8 tag[GHASH_DIGEST_SIZE];
996
997 struct {
998 u32 _[3]; /* reserved */
999 u32 cv; /* Counter Value */
1000 u8 t[GHASH_DIGEST_SIZE];/* Tag */
1001 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
1002 u64 taadl; /* Total AAD Length */
1003 u64 tpcl; /* Total Plain-/Cipher-text Length */
1004 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
1005 u8 k[AES_MAX_KEY_SIZE]; /* Key */
1006 } param;
1007
1008 /*
1009 * encrypt
1010 * req->src: aad||plaintext
1011 * req->dst: aad||ciphertext||tag
1012 * decrypt
1013 * req->src: aad||ciphertext||tag
1014 * req->dst: aad||plaintext, return 0 or -EBADMSG
1015 * aad, plaintext and ciphertext may be empty.
1016 */
1017 if (flags & CPACF_DECRYPT)
1018 pclen -= taglen;
1019 len = aadlen + pclen;
1020
1021 memset(&param, 0, sizeof(param));
1022 param.cv = 1;
1023 param.taadl = aadlen * 8;
1024 param.tpcl = pclen * 8;
1025 memcpy(param.j0, req->iv, ivsize);
1026 *(u32 *)(param.j0 + ivsize) = 1;
1027 memcpy(param.k, ctx->key, ctx->key_len);
1028
1d811227
HF
1029 gcm_walk_start(&gw_in, req->src, len);
1030 gcm_walk_start(&gw_out, req->dst, len);
bf7fa038
HF
1031
1032 do {
1033 min_bytes = min_t(unsigned int,
1034 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
1d811227
HF
1035 in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
1036 out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
bf7fa038
HF
1037 bytes = min(in_bytes, out_bytes);
1038
1039 if (aadlen + pclen <= bytes) {
1040 aad_bytes = aadlen;
1041 pc_bytes = pclen;
1042 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
1043 } else {
1044 if (aadlen <= bytes) {
1045 aad_bytes = aadlen;
1046 pc_bytes = (bytes - aadlen) &
1047 ~(AES_BLOCK_SIZE - 1);
1048 flags |= CPACF_KMA_LAAD;
1049 } else {
1050 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
1051 pc_bytes = 0;
1052 }
1053 }
1054
1055 if (aad_bytes > 0)
1056 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
1057
1058 cpacf_kma(ctx->fc | flags, &param,
1059 gw_out.ptr + aad_bytes,
1060 gw_in.ptr + aad_bytes, pc_bytes,
1061 gw_in.ptr, aad_bytes);
1062
1d811227
HF
1063 n = aad_bytes + pc_bytes;
1064 if (gcm_in_walk_done(&gw_in, n) != n)
1065 return -ENOMEM;
1066 if (gcm_out_walk_done(&gw_out, n) != n)
1067 return -ENOMEM;
bf7fa038
HF
1068 aadlen -= aad_bytes;
1069 pclen -= pc_bytes;
1070 } while (aadlen + pclen > 0);
1071
1072 if (flags & CPACF_DECRYPT) {
1073 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1074 if (crypto_memneq(tag, param.t, taglen))
1075 ret = -EBADMSG;
1076 } else
1077 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1078
1079 memzero_explicit(&param, sizeof(param));
1080 return ret;
1081}
1082
1083static int gcm_aes_encrypt(struct aead_request *req)
1084{
1085 return gcm_aes_crypt(req, CPACF_ENCRYPT);
1086}
1087
1088static int gcm_aes_decrypt(struct aead_request *req)
1089{
1090 return gcm_aes_crypt(req, CPACF_DECRYPT);
1091}
1092
1093static struct aead_alg gcm_aes_aead = {
1094 .setkey = gcm_aes_setkey,
1095 .setauthsize = gcm_aes_setauthsize,
1096 .encrypt = gcm_aes_encrypt,
1097 .decrypt = gcm_aes_decrypt,
1098
1099 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
1100 .maxauthsize = GHASH_DIGEST_SIZE,
1101 .chunksize = AES_BLOCK_SIZE,
1102
1103 .base = {
1104 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1105 .cra_blocksize = 1,
1106 .cra_ctxsize = sizeof(struct s390_aes_ctx),
1107 .cra_priority = 900,
1108 .cra_name = "gcm(aes)",
1109 .cra_driver_name = "gcm-aes-s390",
1110 .cra_module = THIS_MODULE,
1111 },
1112};
1113
d863d594
MS
1114static struct crypto_alg *aes_s390_algs_ptr[5];
1115static int aes_s390_algs_num;
4a5e828c 1116static struct aead_alg *aes_s390_aead_alg;
d863d594
MS
1117
1118static int aes_s390_register_alg(struct crypto_alg *alg)
1119{
1120 int ret;
1121
1122 ret = crypto_register_alg(alg);
1123 if (!ret)
1124 aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
1125 return ret;
1126}
1127
1128static void aes_s390_fini(void)
1129{
1130 while (aes_s390_algs_num--)
1131 crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
1132 if (ctrblk)
1133 free_page((unsigned long) ctrblk);
bf7fa038 1134
4a5e828c
HF
1135 if (aes_s390_aead_alg)
1136 crypto_unregister_aead(aes_s390_aead_alg);
d863d594 1137}
4f57ba71 1138
9f7819c1 1139static int __init aes_s390_init(void)
bf754ae8
JG
1140{
1141 int ret;
1142
bf7fa038 1143 /* Query available functions for KM, KMC, KMCTR and KMA */
69c0e360
MS
1144 cpacf_query(CPACF_KM, &km_functions);
1145 cpacf_query(CPACF_KMC, &kmc_functions);
1146 cpacf_query(CPACF_KMCTR, &kmctr_functions);
bf7fa038 1147 cpacf_query(CPACF_KMA, &kma_functions);
a9e62fad 1148
69c0e360
MS
1149 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1150 cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1151 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1152 ret = aes_s390_register_alg(&aes_alg);
1153 if (ret)
1154 goto out_err;
1155 ret = aes_s390_register_alg(&ecb_aes_alg);
1156 if (ret)
1157 goto out_err;
1158 }
a9e62fad 1159
69c0e360
MS
1160 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1161 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1162 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1163 ret = aes_s390_register_alg(&cbc_aes_alg);
1164 if (ret)
1165 goto out_err;
1166 }
a9e62fad 1167
69c0e360
MS
1168 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1169 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
d863d594 1170 ret = aes_s390_register_alg(&xts_aes_alg);
99d97222 1171 if (ret)
d863d594 1172 goto out_err;
99d97222
GS
1173 }
1174
69c0e360
MS
1175 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1176 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1177 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
0200f3ec
GS
1178 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1179 if (!ctrblk) {
1180 ret = -ENOMEM;
d863d594 1181 goto out_err;
0200f3ec 1182 }
d863d594
MS
1183 ret = aes_s390_register_alg(&ctr_aes_alg);
1184 if (ret)
1185 goto out_err;
0200f3ec
GS
1186 }
1187
bf7fa038
HF
1188 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1189 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1190 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1191 ret = crypto_register_aead(&gcm_aes_aead);
1192 if (ret)
1193 goto out_err;
4a5e828c 1194 aes_s390_aead_alg = &gcm_aes_aead;
bf7fa038
HF
1195 }
1196
d863d594
MS
1197 return 0;
1198out_err:
1199 aes_s390_fini();
bf754ae8 1200 return ret;
bf754ae8
JG
1201}
1202
d05377c1 1203module_cpu_feature_match(MSA, aes_s390_init);
9f7819c1 1204module_exit(aes_s390_fini);
bf754ae8 1205
5d26a105 1206MODULE_ALIAS_CRYPTO("aes-all");
bf754ae8
JG
1207
1208MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1209MODULE_LICENSE("GPL");