4 * s390 implementation of the AES Cipher Algorithm.
7 * Copyright IBM Corp. 2005, 2017
8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10 * Patrick Steuer <patrick.steuer@de.ibm.com>
11 * Harald Freudenberger <freude@de.ibm.com>
13 * Derived from "crypto/aes_generic.c"
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option)
22 #define KMSG_COMPONENT "aes_s390"
23 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25 #include <crypto/aes.h>
26 #include <crypto/algapi.h>
27 #include <crypto/ghash.h>
28 #include <crypto/internal/aead.h>
29 #include <crypto/internal/skcipher.h>
30 #include <crypto/scatterwalk.h>
31 #include <linux/err.h>
32 #include <linux/module.h>
33 #include <linux/cpufeature.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <linux/fips.h>
37 #include <linux/string.h>
38 #include <crypto/xts.h>
39 #include <asm/cpacf.h>
42 static DEFINE_SPINLOCK(ctrblk_lock
);
44 static cpacf_mask_t km_functions
, kmc_functions
, kmctr_functions
,
48 u8 key
[AES_MAX_KEY_SIZE
];
52 struct crypto_skcipher
*blk
;
53 struct crypto_cipher
*cip
;
62 struct crypto_skcipher
*fallback
;
66 struct scatter_walk walk
;
67 unsigned int walk_bytes
;
69 unsigned int walk_bytes_remain
;
70 u8 buf
[AES_BLOCK_SIZE
];
71 unsigned int buf_bytes
;
76 static int setkey_fallback_cip(struct crypto_tfm
*tfm
, const u8
*in_key
,
79 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
82 sctx
->fallback
.cip
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
83 sctx
->fallback
.cip
->base
.crt_flags
|= (tfm
->crt_flags
&
86 ret
= crypto_cipher_setkey(sctx
->fallback
.cip
, in_key
, key_len
);
88 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
89 tfm
->crt_flags
|= (sctx
->fallback
.cip
->base
.crt_flags
&
95 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
98 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
101 /* Pick the correct function code based on the key length */
102 fc
= (key_len
== 16) ? CPACF_KM_AES_128
:
103 (key_len
== 24) ? CPACF_KM_AES_192
:
104 (key_len
== 32) ? CPACF_KM_AES_256
: 0;
106 /* Check if the function code is available */
107 sctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
109 return setkey_fallback_cip(tfm
, in_key
, key_len
);
111 sctx
->key_len
= key_len
;
112 memcpy(sctx
->key
, in_key
, key_len
);
116 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
118 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
120 if (unlikely(!sctx
->fc
)) {
121 crypto_cipher_encrypt_one(sctx
->fallback
.cip
, out
, in
);
124 cpacf_km(sctx
->fc
, &sctx
->key
, out
, in
, AES_BLOCK_SIZE
);
127 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
129 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
131 if (unlikely(!sctx
->fc
)) {
132 crypto_cipher_decrypt_one(sctx
->fallback
.cip
, out
, in
);
135 cpacf_km(sctx
->fc
| CPACF_DECRYPT
,
136 &sctx
->key
, out
, in
, AES_BLOCK_SIZE
);
139 static int fallback_init_cip(struct crypto_tfm
*tfm
)
141 const char *name
= tfm
->__crt_alg
->cra_name
;
142 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
144 sctx
->fallback
.cip
= crypto_alloc_cipher(name
, 0,
145 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
147 if (IS_ERR(sctx
->fallback
.cip
)) {
148 pr_err("Allocating AES fallback algorithm %s failed\n",
150 return PTR_ERR(sctx
->fallback
.cip
);
156 static void fallback_exit_cip(struct crypto_tfm
*tfm
)
158 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
160 crypto_free_cipher(sctx
->fallback
.cip
);
161 sctx
->fallback
.cip
= NULL
;
164 static struct crypto_alg aes_alg
= {
166 .cra_driver_name
= "aes-s390",
168 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
|
169 CRYPTO_ALG_NEED_FALLBACK
,
170 .cra_blocksize
= AES_BLOCK_SIZE
,
171 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
172 .cra_module
= THIS_MODULE
,
173 .cra_init
= fallback_init_cip
,
174 .cra_exit
= fallback_exit_cip
,
177 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
178 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
179 .cia_setkey
= aes_set_key
,
180 .cia_encrypt
= aes_encrypt
,
181 .cia_decrypt
= aes_decrypt
,
186 static int setkey_fallback_blk(struct crypto_tfm
*tfm
, const u8
*key
,
189 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
192 crypto_skcipher_clear_flags(sctx
->fallback
.blk
, CRYPTO_TFM_REQ_MASK
);
193 crypto_skcipher_set_flags(sctx
->fallback
.blk
, tfm
->crt_flags
&
194 CRYPTO_TFM_REQ_MASK
);
196 ret
= crypto_skcipher_setkey(sctx
->fallback
.blk
, key
, len
);
198 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
199 tfm
->crt_flags
|= crypto_skcipher_get_flags(sctx
->fallback
.blk
) &
205 static int fallback_blk_dec(struct blkcipher_desc
*desc
,
206 struct scatterlist
*dst
, struct scatterlist
*src
,
210 struct crypto_blkcipher
*tfm
= desc
->tfm
;
211 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(tfm
);
212 SKCIPHER_REQUEST_ON_STACK(req
, sctx
->fallback
.blk
);
214 skcipher_request_set_tfm(req
, sctx
->fallback
.blk
);
215 skcipher_request_set_callback(req
, desc
->flags
, NULL
, NULL
);
216 skcipher_request_set_crypt(req
, src
, dst
, nbytes
, desc
->info
);
218 ret
= crypto_skcipher_decrypt(req
);
220 skcipher_request_zero(req
);
224 static int fallback_blk_enc(struct blkcipher_desc
*desc
,
225 struct scatterlist
*dst
, struct scatterlist
*src
,
229 struct crypto_blkcipher
*tfm
= desc
->tfm
;
230 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(tfm
);
231 SKCIPHER_REQUEST_ON_STACK(req
, sctx
->fallback
.blk
);
233 skcipher_request_set_tfm(req
, sctx
->fallback
.blk
);
234 skcipher_request_set_callback(req
, desc
->flags
, NULL
, NULL
);
235 skcipher_request_set_crypt(req
, src
, dst
, nbytes
, desc
->info
);
237 ret
= crypto_skcipher_encrypt(req
);
241 static int ecb_aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
242 unsigned int key_len
)
244 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
247 /* Pick the correct function code based on the key length */
248 fc
= (key_len
== 16) ? CPACF_KM_AES_128
:
249 (key_len
== 24) ? CPACF_KM_AES_192
:
250 (key_len
== 32) ? CPACF_KM_AES_256
: 0;
252 /* Check if the function code is available */
253 sctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
255 return setkey_fallback_blk(tfm
, in_key
, key_len
);
257 sctx
->key_len
= key_len
;
258 memcpy(sctx
->key
, in_key
, key_len
);
262 static int ecb_aes_crypt(struct blkcipher_desc
*desc
, unsigned long modifier
,
263 struct blkcipher_walk
*walk
)
265 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
266 unsigned int nbytes
, n
;
269 ret
= blkcipher_walk_virt(desc
, walk
);
270 while ((nbytes
= walk
->nbytes
) >= AES_BLOCK_SIZE
) {
271 /* only use complete blocks */
272 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
273 cpacf_km(sctx
->fc
| modifier
, sctx
->key
,
274 walk
->dst
.virt
.addr
, walk
->src
.virt
.addr
, n
);
275 ret
= blkcipher_walk_done(desc
, walk
, nbytes
- n
);
281 static int ecb_aes_encrypt(struct blkcipher_desc
*desc
,
282 struct scatterlist
*dst
, struct scatterlist
*src
,
285 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
286 struct blkcipher_walk walk
;
288 if (unlikely(!sctx
->fc
))
289 return fallback_blk_enc(desc
, dst
, src
, nbytes
);
291 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
292 return ecb_aes_crypt(desc
, 0, &walk
);
295 static int ecb_aes_decrypt(struct blkcipher_desc
*desc
,
296 struct scatterlist
*dst
, struct scatterlist
*src
,
299 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
300 struct blkcipher_walk walk
;
302 if (unlikely(!sctx
->fc
))
303 return fallback_blk_dec(desc
, dst
, src
, nbytes
);
305 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
306 return ecb_aes_crypt(desc
, CPACF_DECRYPT
, &walk
);
309 static int fallback_init_blk(struct crypto_tfm
*tfm
)
311 const char *name
= tfm
->__crt_alg
->cra_name
;
312 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
314 sctx
->fallback
.blk
= crypto_alloc_skcipher(name
, 0,
316 CRYPTO_ALG_NEED_FALLBACK
);
318 if (IS_ERR(sctx
->fallback
.blk
)) {
319 pr_err("Allocating AES fallback algorithm %s failed\n",
321 return PTR_ERR(sctx
->fallback
.blk
);
327 static void fallback_exit_blk(struct crypto_tfm
*tfm
)
329 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
331 crypto_free_skcipher(sctx
->fallback
.blk
);
334 static struct crypto_alg ecb_aes_alg
= {
335 .cra_name
= "ecb(aes)",
336 .cra_driver_name
= "ecb-aes-s390",
337 .cra_priority
= 400, /* combo: aes + ecb */
338 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
339 CRYPTO_ALG_NEED_FALLBACK
,
340 .cra_blocksize
= AES_BLOCK_SIZE
,
341 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
342 .cra_type
= &crypto_blkcipher_type
,
343 .cra_module
= THIS_MODULE
,
344 .cra_init
= fallback_init_blk
,
345 .cra_exit
= fallback_exit_blk
,
348 .min_keysize
= AES_MIN_KEY_SIZE
,
349 .max_keysize
= AES_MAX_KEY_SIZE
,
350 .setkey
= ecb_aes_set_key
,
351 .encrypt
= ecb_aes_encrypt
,
352 .decrypt
= ecb_aes_decrypt
,
357 static int cbc_aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
358 unsigned int key_len
)
360 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
363 /* Pick the correct function code based on the key length */
364 fc
= (key_len
== 16) ? CPACF_KMC_AES_128
:
365 (key_len
== 24) ? CPACF_KMC_AES_192
:
366 (key_len
== 32) ? CPACF_KMC_AES_256
: 0;
368 /* Check if the function code is available */
369 sctx
->fc
= (fc
&& cpacf_test_func(&kmc_functions
, fc
)) ? fc
: 0;
371 return setkey_fallback_blk(tfm
, in_key
, key_len
);
373 sctx
->key_len
= key_len
;
374 memcpy(sctx
->key
, in_key
, key_len
);
378 static int cbc_aes_crypt(struct blkcipher_desc
*desc
, unsigned long modifier
,
379 struct blkcipher_walk
*walk
)
381 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
382 unsigned int nbytes
, n
;
385 u8 iv
[AES_BLOCK_SIZE
];
386 u8 key
[AES_MAX_KEY_SIZE
];
389 ret
= blkcipher_walk_virt(desc
, walk
);
390 memcpy(param
.iv
, walk
->iv
, AES_BLOCK_SIZE
);
391 memcpy(param
.key
, sctx
->key
, sctx
->key_len
);
392 while ((nbytes
= walk
->nbytes
) >= AES_BLOCK_SIZE
) {
393 /* only use complete blocks */
394 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
395 cpacf_kmc(sctx
->fc
| modifier
, ¶m
,
396 walk
->dst
.virt
.addr
, walk
->src
.virt
.addr
, n
);
397 ret
= blkcipher_walk_done(desc
, walk
, nbytes
- n
);
399 memcpy(walk
->iv
, param
.iv
, AES_BLOCK_SIZE
);
403 static int cbc_aes_encrypt(struct blkcipher_desc
*desc
,
404 struct scatterlist
*dst
, struct scatterlist
*src
,
407 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
408 struct blkcipher_walk walk
;
410 if (unlikely(!sctx
->fc
))
411 return fallback_blk_enc(desc
, dst
, src
, nbytes
);
413 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
414 return cbc_aes_crypt(desc
, 0, &walk
);
417 static int cbc_aes_decrypt(struct blkcipher_desc
*desc
,
418 struct scatterlist
*dst
, struct scatterlist
*src
,
421 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
422 struct blkcipher_walk walk
;
424 if (unlikely(!sctx
->fc
))
425 return fallback_blk_dec(desc
, dst
, src
, nbytes
);
427 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
428 return cbc_aes_crypt(desc
, CPACF_DECRYPT
, &walk
);
431 static struct crypto_alg cbc_aes_alg
= {
432 .cra_name
= "cbc(aes)",
433 .cra_driver_name
= "cbc-aes-s390",
434 .cra_priority
= 400, /* combo: aes + cbc */
435 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
436 CRYPTO_ALG_NEED_FALLBACK
,
437 .cra_blocksize
= AES_BLOCK_SIZE
,
438 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
439 .cra_type
= &crypto_blkcipher_type
,
440 .cra_module
= THIS_MODULE
,
441 .cra_init
= fallback_init_blk
,
442 .cra_exit
= fallback_exit_blk
,
445 .min_keysize
= AES_MIN_KEY_SIZE
,
446 .max_keysize
= AES_MAX_KEY_SIZE
,
447 .ivsize
= AES_BLOCK_SIZE
,
448 .setkey
= cbc_aes_set_key
,
449 .encrypt
= cbc_aes_encrypt
,
450 .decrypt
= cbc_aes_decrypt
,
455 static int xts_fallback_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
458 struct s390_xts_ctx
*xts_ctx
= crypto_tfm_ctx(tfm
);
461 crypto_skcipher_clear_flags(xts_ctx
->fallback
, CRYPTO_TFM_REQ_MASK
);
462 crypto_skcipher_set_flags(xts_ctx
->fallback
, tfm
->crt_flags
&
463 CRYPTO_TFM_REQ_MASK
);
465 ret
= crypto_skcipher_setkey(xts_ctx
->fallback
, key
, len
);
467 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
468 tfm
->crt_flags
|= crypto_skcipher_get_flags(xts_ctx
->fallback
) &
474 static int xts_fallback_decrypt(struct blkcipher_desc
*desc
,
475 struct scatterlist
*dst
, struct scatterlist
*src
,
478 struct crypto_blkcipher
*tfm
= desc
->tfm
;
479 struct s390_xts_ctx
*xts_ctx
= crypto_blkcipher_ctx(tfm
);
480 SKCIPHER_REQUEST_ON_STACK(req
, xts_ctx
->fallback
);
483 skcipher_request_set_tfm(req
, xts_ctx
->fallback
);
484 skcipher_request_set_callback(req
, desc
->flags
, NULL
, NULL
);
485 skcipher_request_set_crypt(req
, src
, dst
, nbytes
, desc
->info
);
487 ret
= crypto_skcipher_decrypt(req
);
489 skcipher_request_zero(req
);
493 static int xts_fallback_encrypt(struct blkcipher_desc
*desc
,
494 struct scatterlist
*dst
, struct scatterlist
*src
,
497 struct crypto_blkcipher
*tfm
= desc
->tfm
;
498 struct s390_xts_ctx
*xts_ctx
= crypto_blkcipher_ctx(tfm
);
499 SKCIPHER_REQUEST_ON_STACK(req
, xts_ctx
->fallback
);
502 skcipher_request_set_tfm(req
, xts_ctx
->fallback
);
503 skcipher_request_set_callback(req
, desc
->flags
, NULL
, NULL
);
504 skcipher_request_set_crypt(req
, src
, dst
, nbytes
, desc
->info
);
506 ret
= crypto_skcipher_encrypt(req
);
508 skcipher_request_zero(req
);
512 static int xts_aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
513 unsigned int key_len
)
515 struct s390_xts_ctx
*xts_ctx
= crypto_tfm_ctx(tfm
);
519 err
= xts_check_key(tfm
, in_key
, key_len
);
523 /* In fips mode only 128 bit or 256 bit keys are valid */
524 if (fips_enabled
&& key_len
!= 32 && key_len
!= 64) {
525 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
529 /* Pick the correct function code based on the key length */
530 fc
= (key_len
== 32) ? CPACF_KM_XTS_128
:
531 (key_len
== 64) ? CPACF_KM_XTS_256
: 0;
533 /* Check if the function code is available */
534 xts_ctx
->fc
= (fc
&& cpacf_test_func(&km_functions
, fc
)) ? fc
: 0;
536 return xts_fallback_setkey(tfm
, in_key
, key_len
);
538 /* Split the XTS key into the two subkeys */
539 key_len
= key_len
/ 2;
540 xts_ctx
->key_len
= key_len
;
541 memcpy(xts_ctx
->key
, in_key
, key_len
);
542 memcpy(xts_ctx
->pcc_key
, in_key
+ key_len
, key_len
);
546 static int xts_aes_crypt(struct blkcipher_desc
*desc
, unsigned long modifier
,
547 struct blkcipher_walk
*walk
)
549 struct s390_xts_ctx
*xts_ctx
= crypto_blkcipher_ctx(desc
->tfm
);
550 unsigned int offset
, nbytes
, n
;
564 ret
= blkcipher_walk_virt(desc
, walk
);
565 offset
= xts_ctx
->key_len
& 0x10;
566 memset(pcc_param
.block
, 0, sizeof(pcc_param
.block
));
567 memset(pcc_param
.bit
, 0, sizeof(pcc_param
.bit
));
568 memset(pcc_param
.xts
, 0, sizeof(pcc_param
.xts
));
569 memcpy(pcc_param
.tweak
, walk
->iv
, sizeof(pcc_param
.tweak
));
570 memcpy(pcc_param
.key
+ offset
, xts_ctx
->pcc_key
, xts_ctx
->key_len
);
571 cpacf_pcc(xts_ctx
->fc
, pcc_param
.key
+ offset
);
573 memcpy(xts_param
.key
+ offset
, xts_ctx
->key
, xts_ctx
->key_len
);
574 memcpy(xts_param
.init
, pcc_param
.xts
, 16);
576 while ((nbytes
= walk
->nbytes
) >= AES_BLOCK_SIZE
) {
577 /* only use complete blocks */
578 n
= nbytes
& ~(AES_BLOCK_SIZE
- 1);
579 cpacf_km(xts_ctx
->fc
| modifier
, xts_param
.key
+ offset
,
580 walk
->dst
.virt
.addr
, walk
->src
.virt
.addr
, n
);
581 ret
= blkcipher_walk_done(desc
, walk
, nbytes
- n
);
586 static int xts_aes_encrypt(struct blkcipher_desc
*desc
,
587 struct scatterlist
*dst
, struct scatterlist
*src
,
590 struct s390_xts_ctx
*xts_ctx
= crypto_blkcipher_ctx(desc
->tfm
);
591 struct blkcipher_walk walk
;
593 if (unlikely(!xts_ctx
->fc
))
594 return xts_fallback_encrypt(desc
, dst
, src
, nbytes
);
596 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
597 return xts_aes_crypt(desc
, 0, &walk
);
600 static int xts_aes_decrypt(struct blkcipher_desc
*desc
,
601 struct scatterlist
*dst
, struct scatterlist
*src
,
604 struct s390_xts_ctx
*xts_ctx
= crypto_blkcipher_ctx(desc
->tfm
);
605 struct blkcipher_walk walk
;
607 if (unlikely(!xts_ctx
->fc
))
608 return xts_fallback_decrypt(desc
, dst
, src
, nbytes
);
610 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
611 return xts_aes_crypt(desc
, CPACF_DECRYPT
, &walk
);
614 static int xts_fallback_init(struct crypto_tfm
*tfm
)
616 const char *name
= tfm
->__crt_alg
->cra_name
;
617 struct s390_xts_ctx
*xts_ctx
= crypto_tfm_ctx(tfm
);
619 xts_ctx
->fallback
= crypto_alloc_skcipher(name
, 0,
621 CRYPTO_ALG_NEED_FALLBACK
);
623 if (IS_ERR(xts_ctx
->fallback
)) {
624 pr_err("Allocating XTS fallback algorithm %s failed\n",
626 return PTR_ERR(xts_ctx
->fallback
);
631 static void xts_fallback_exit(struct crypto_tfm
*tfm
)
633 struct s390_xts_ctx
*xts_ctx
= crypto_tfm_ctx(tfm
);
635 crypto_free_skcipher(xts_ctx
->fallback
);
638 static struct crypto_alg xts_aes_alg
= {
639 .cra_name
= "xts(aes)",
640 .cra_driver_name
= "xts-aes-s390",
641 .cra_priority
= 400, /* combo: aes + xts */
642 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
643 CRYPTO_ALG_NEED_FALLBACK
,
644 .cra_blocksize
= AES_BLOCK_SIZE
,
645 .cra_ctxsize
= sizeof(struct s390_xts_ctx
),
646 .cra_type
= &crypto_blkcipher_type
,
647 .cra_module
= THIS_MODULE
,
648 .cra_init
= xts_fallback_init
,
649 .cra_exit
= xts_fallback_exit
,
652 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
653 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
654 .ivsize
= AES_BLOCK_SIZE
,
655 .setkey
= xts_aes_set_key
,
656 .encrypt
= xts_aes_encrypt
,
657 .decrypt
= xts_aes_decrypt
,
662 static int ctr_aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
663 unsigned int key_len
)
665 struct s390_aes_ctx
*sctx
= crypto_tfm_ctx(tfm
);
668 /* Pick the correct function code based on the key length */
669 fc
= (key_len
== 16) ? CPACF_KMCTR_AES_128
:
670 (key_len
== 24) ? CPACF_KMCTR_AES_192
:
671 (key_len
== 32) ? CPACF_KMCTR_AES_256
: 0;
673 /* Check if the function code is available */
674 sctx
->fc
= (fc
&& cpacf_test_func(&kmctr_functions
, fc
)) ? fc
: 0;
676 return setkey_fallback_blk(tfm
, in_key
, key_len
);
678 sctx
->key_len
= key_len
;
679 memcpy(sctx
->key
, in_key
, key_len
);
683 static unsigned int __ctrblk_init(u8
*ctrptr
, u8
*iv
, unsigned int nbytes
)
687 /* only use complete blocks, max. PAGE_SIZE */
688 memcpy(ctrptr
, iv
, AES_BLOCK_SIZE
);
689 n
= (nbytes
> PAGE_SIZE
) ? PAGE_SIZE
: nbytes
& ~(AES_BLOCK_SIZE
- 1);
690 for (i
= (n
/ AES_BLOCK_SIZE
) - 1; i
> 0; i
--) {
691 memcpy(ctrptr
+ AES_BLOCK_SIZE
, ctrptr
, AES_BLOCK_SIZE
);
692 crypto_inc(ctrptr
+ AES_BLOCK_SIZE
, AES_BLOCK_SIZE
);
693 ctrptr
+= AES_BLOCK_SIZE
;
698 static int ctr_aes_crypt(struct blkcipher_desc
*desc
, unsigned long modifier
,
699 struct blkcipher_walk
*walk
)
701 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
702 u8 buf
[AES_BLOCK_SIZE
], *ctrptr
;
703 unsigned int n
, nbytes
;
706 locked
= spin_trylock(&ctrblk_lock
);
708 ret
= blkcipher_walk_virt_block(desc
, walk
, AES_BLOCK_SIZE
);
709 while ((nbytes
= walk
->nbytes
) >= AES_BLOCK_SIZE
) {
711 if (nbytes
>= 2*AES_BLOCK_SIZE
&& locked
)
712 n
= __ctrblk_init(ctrblk
, walk
->iv
, nbytes
);
713 ctrptr
= (n
> AES_BLOCK_SIZE
) ? ctrblk
: walk
->iv
;
714 cpacf_kmctr(sctx
->fc
| modifier
, sctx
->key
,
715 walk
->dst
.virt
.addr
, walk
->src
.virt
.addr
,
717 if (ctrptr
== ctrblk
)
718 memcpy(walk
->iv
, ctrptr
+ n
- AES_BLOCK_SIZE
,
720 crypto_inc(walk
->iv
, AES_BLOCK_SIZE
);
721 ret
= blkcipher_walk_done(desc
, walk
, nbytes
- n
);
724 spin_unlock(&ctrblk_lock
);
726 * final block may be < AES_BLOCK_SIZE, copy only nbytes
729 cpacf_kmctr(sctx
->fc
| modifier
, sctx
->key
,
730 buf
, walk
->src
.virt
.addr
,
731 AES_BLOCK_SIZE
, walk
->iv
);
732 memcpy(walk
->dst
.virt
.addr
, buf
, nbytes
);
733 crypto_inc(walk
->iv
, AES_BLOCK_SIZE
);
734 ret
= blkcipher_walk_done(desc
, walk
, 0);
740 static int ctr_aes_encrypt(struct blkcipher_desc
*desc
,
741 struct scatterlist
*dst
, struct scatterlist
*src
,
744 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
745 struct blkcipher_walk walk
;
747 if (unlikely(!sctx
->fc
))
748 return fallback_blk_enc(desc
, dst
, src
, nbytes
);
750 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
751 return ctr_aes_crypt(desc
, 0, &walk
);
754 static int ctr_aes_decrypt(struct blkcipher_desc
*desc
,
755 struct scatterlist
*dst
, struct scatterlist
*src
,
758 struct s390_aes_ctx
*sctx
= crypto_blkcipher_ctx(desc
->tfm
);
759 struct blkcipher_walk walk
;
761 if (unlikely(!sctx
->fc
))
762 return fallback_blk_dec(desc
, dst
, src
, nbytes
);
764 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
765 return ctr_aes_crypt(desc
, CPACF_DECRYPT
, &walk
);
768 static struct crypto_alg ctr_aes_alg
= {
769 .cra_name
= "ctr(aes)",
770 .cra_driver_name
= "ctr-aes-s390",
771 .cra_priority
= 400, /* combo: aes + ctr */
772 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
|
773 CRYPTO_ALG_NEED_FALLBACK
,
775 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
776 .cra_type
= &crypto_blkcipher_type
,
777 .cra_module
= THIS_MODULE
,
778 .cra_init
= fallback_init_blk
,
779 .cra_exit
= fallback_exit_blk
,
782 .min_keysize
= AES_MIN_KEY_SIZE
,
783 .max_keysize
= AES_MAX_KEY_SIZE
,
784 .ivsize
= AES_BLOCK_SIZE
,
785 .setkey
= ctr_aes_set_key
,
786 .encrypt
= ctr_aes_encrypt
,
787 .decrypt
= ctr_aes_decrypt
,
792 static int gcm_aes_setkey(struct crypto_aead
*tfm
, const u8
*key
,
795 struct s390_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
798 case AES_KEYSIZE_128
:
799 ctx
->fc
= CPACF_KMA_GCM_AES_128
;
801 case AES_KEYSIZE_192
:
802 ctx
->fc
= CPACF_KMA_GCM_AES_192
;
804 case AES_KEYSIZE_256
:
805 ctx
->fc
= CPACF_KMA_GCM_AES_256
;
811 memcpy(ctx
->key
, key
, keylen
);
812 ctx
->key_len
= keylen
;
816 static int gcm_aes_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
834 static void gcm_sg_walk_start(struct gcm_sg_walk
*gw
, struct scatterlist
*sg
,
837 memset(gw
, 0, sizeof(*gw
));
838 gw
->walk_bytes_remain
= len
;
839 scatterwalk_start(&gw
->walk
, sg
);
842 static int gcm_sg_walk_go(struct gcm_sg_walk
*gw
, unsigned int minbytesneeded
)
846 /* minbytesneeded <= AES_BLOCK_SIZE */
847 if (gw
->buf_bytes
&& gw
->buf_bytes
>= minbytesneeded
) {
849 gw
->nbytes
= gw
->buf_bytes
;
853 if (gw
->walk_bytes_remain
== 0) {
859 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
, gw
->walk_bytes_remain
);
860 if (!gw
->walk_bytes
) {
861 scatterwalk_start(&gw
->walk
, sg_next(gw
->walk
.sg
));
862 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
,
863 gw
->walk_bytes_remain
);
865 gw
->walk_ptr
= scatterwalk_map(&gw
->walk
);
867 if (!gw
->buf_bytes
&& gw
->walk_bytes
>= minbytesneeded
) {
868 gw
->ptr
= gw
->walk_ptr
;
869 gw
->nbytes
= gw
->walk_bytes
;
874 n
= min(gw
->walk_bytes
, AES_BLOCK_SIZE
- gw
->buf_bytes
);
875 memcpy(gw
->buf
+ gw
->buf_bytes
, gw
->walk_ptr
, n
);
877 gw
->walk_bytes_remain
-= n
;
878 scatterwalk_unmap(&gw
->walk
);
879 scatterwalk_advance(&gw
->walk
, n
);
880 scatterwalk_done(&gw
->walk
, 0, gw
->walk_bytes_remain
);
882 if (gw
->buf_bytes
>= minbytesneeded
) {
884 gw
->nbytes
= gw
->buf_bytes
;
888 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
,
889 gw
->walk_bytes_remain
);
890 if (!gw
->walk_bytes
) {
891 scatterwalk_start(&gw
->walk
, sg_next(gw
->walk
.sg
));
892 gw
->walk_bytes
= scatterwalk_clamp(&gw
->walk
,
893 gw
->walk_bytes_remain
);
895 gw
->walk_ptr
= scatterwalk_map(&gw
->walk
);
902 static void gcm_sg_walk_done(struct gcm_sg_walk
*gw
, unsigned int bytesdone
)
909 if (gw
->ptr
== gw
->buf
) {
910 n
= gw
->buf_bytes
- bytesdone
;
912 memmove(gw
->buf
, gw
->buf
+ bytesdone
, n
);
917 gw
->walk_bytes_remain
-= bytesdone
;
918 scatterwalk_unmap(&gw
->walk
);
919 scatterwalk_advance(&gw
->walk
, bytesdone
);
920 scatterwalk_done(&gw
->walk
, 0, gw
->walk_bytes_remain
);
924 static int gcm_aes_crypt(struct aead_request
*req
, unsigned int flags
)
926 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
927 struct s390_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
928 unsigned int ivsize
= crypto_aead_ivsize(tfm
);
929 unsigned int taglen
= crypto_aead_authsize(tfm
);
930 unsigned int aadlen
= req
->assoclen
;
931 unsigned int pclen
= req
->cryptlen
;
934 unsigned int len
, in_bytes
, out_bytes
,
935 min_bytes
, bytes
, aad_bytes
, pc_bytes
;
936 struct gcm_sg_walk gw_in
, gw_out
;
937 u8 tag
[GHASH_DIGEST_SIZE
];
940 u32 _
[3]; /* reserved */
941 u32 cv
; /* Counter Value */
942 u8 t
[GHASH_DIGEST_SIZE
];/* Tag */
943 u8 h
[AES_BLOCK_SIZE
]; /* Hash-subkey */
944 u64 taadl
; /* Total AAD Length */
945 u64 tpcl
; /* Total Plain-/Cipher-text Length */
946 u8 j0
[GHASH_BLOCK_SIZE
];/* initial counter value */
947 u8 k
[AES_MAX_KEY_SIZE
]; /* Key */
952 * req->src: aad||plaintext
953 * req->dst: aad||ciphertext||tag
955 * req->src: aad||ciphertext||tag
956 * req->dst: aad||plaintext, return 0 or -EBADMSG
957 * aad, plaintext and ciphertext may be empty.
959 if (flags
& CPACF_DECRYPT
)
961 len
= aadlen
+ pclen
;
963 memset(¶m
, 0, sizeof(param
));
965 param
.taadl
= aadlen
* 8;
966 param
.tpcl
= pclen
* 8;
967 memcpy(param
.j0
, req
->iv
, ivsize
);
968 *(u32
*)(param
.j0
+ ivsize
) = 1;
969 memcpy(param
.k
, ctx
->key
, ctx
->key_len
);
971 gcm_sg_walk_start(&gw_in
, req
->src
, len
);
972 gcm_sg_walk_start(&gw_out
, req
->dst
, len
);
975 min_bytes
= min_t(unsigned int,
976 aadlen
> 0 ? aadlen
: pclen
, AES_BLOCK_SIZE
);
977 in_bytes
= gcm_sg_walk_go(&gw_in
, min_bytes
);
978 out_bytes
= gcm_sg_walk_go(&gw_out
, min_bytes
);
979 bytes
= min(in_bytes
, out_bytes
);
981 if (aadlen
+ pclen
<= bytes
) {
984 flags
|= CPACF_KMA_LAAD
| CPACF_KMA_LPC
;
986 if (aadlen
<= bytes
) {
988 pc_bytes
= (bytes
- aadlen
) &
989 ~(AES_BLOCK_SIZE
- 1);
990 flags
|= CPACF_KMA_LAAD
;
992 aad_bytes
= bytes
& ~(AES_BLOCK_SIZE
- 1);
998 memcpy(gw_out
.ptr
, gw_in
.ptr
, aad_bytes
);
1000 cpacf_kma(ctx
->fc
| flags
, ¶m
,
1001 gw_out
.ptr
+ aad_bytes
,
1002 gw_in
.ptr
+ aad_bytes
, pc_bytes
,
1003 gw_in
.ptr
, aad_bytes
);
1005 gcm_sg_walk_done(&gw_in
, aad_bytes
+ pc_bytes
);
1006 gcm_sg_walk_done(&gw_out
, aad_bytes
+ pc_bytes
);
1007 aadlen
-= aad_bytes
;
1009 } while (aadlen
+ pclen
> 0);
1011 if (flags
& CPACF_DECRYPT
) {
1012 scatterwalk_map_and_copy(tag
, req
->src
, len
, taglen
, 0);
1013 if (crypto_memneq(tag
, param
.t
, taglen
))
1016 scatterwalk_map_and_copy(param
.t
, req
->dst
, len
, taglen
, 1);
1018 memzero_explicit(¶m
, sizeof(param
));
1022 static int gcm_aes_encrypt(struct aead_request
*req
)
1024 return gcm_aes_crypt(req
, CPACF_ENCRYPT
);
1027 static int gcm_aes_decrypt(struct aead_request
*req
)
1029 return gcm_aes_crypt(req
, CPACF_DECRYPT
);
1032 static struct aead_alg gcm_aes_aead
= {
1033 .setkey
= gcm_aes_setkey
,
1034 .setauthsize
= gcm_aes_setauthsize
,
1035 .encrypt
= gcm_aes_encrypt
,
1036 .decrypt
= gcm_aes_decrypt
,
1038 .ivsize
= GHASH_BLOCK_SIZE
- sizeof(u32
),
1039 .maxauthsize
= GHASH_DIGEST_SIZE
,
1040 .chunksize
= AES_BLOCK_SIZE
,
1043 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
1045 .cra_ctxsize
= sizeof(struct s390_aes_ctx
),
1046 .cra_priority
= 900,
1047 .cra_name
= "gcm(aes)",
1048 .cra_driver_name
= "gcm-aes-s390",
1049 .cra_module
= THIS_MODULE
,
1053 static struct crypto_alg
*aes_s390_algs_ptr
[5];
1054 static int aes_s390_algs_num
;
1056 static int aes_s390_register_alg(struct crypto_alg
*alg
)
1060 ret
= crypto_register_alg(alg
);
1062 aes_s390_algs_ptr
[aes_s390_algs_num
++] = alg
;
1066 static void aes_s390_fini(void)
1068 while (aes_s390_algs_num
--)
1069 crypto_unregister_alg(aes_s390_algs_ptr
[aes_s390_algs_num
]);
1071 free_page((unsigned long) ctrblk
);
1073 crypto_unregister_aead(&gcm_aes_aead
);
1076 static int __init
aes_s390_init(void)
1080 /* Query available functions for KM, KMC, KMCTR and KMA */
1081 cpacf_query(CPACF_KM
, &km_functions
);
1082 cpacf_query(CPACF_KMC
, &kmc_functions
);
1083 cpacf_query(CPACF_KMCTR
, &kmctr_functions
);
1084 cpacf_query(CPACF_KMA
, &kma_functions
);
1086 if (cpacf_test_func(&km_functions
, CPACF_KM_AES_128
) ||
1087 cpacf_test_func(&km_functions
, CPACF_KM_AES_192
) ||
1088 cpacf_test_func(&km_functions
, CPACF_KM_AES_256
)) {
1089 ret
= aes_s390_register_alg(&aes_alg
);
1092 ret
= aes_s390_register_alg(&ecb_aes_alg
);
1097 if (cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_128
) ||
1098 cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_192
) ||
1099 cpacf_test_func(&kmc_functions
, CPACF_KMC_AES_256
)) {
1100 ret
= aes_s390_register_alg(&cbc_aes_alg
);
1105 if (cpacf_test_func(&km_functions
, CPACF_KM_XTS_128
) ||
1106 cpacf_test_func(&km_functions
, CPACF_KM_XTS_256
)) {
1107 ret
= aes_s390_register_alg(&xts_aes_alg
);
1112 if (cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_128
) ||
1113 cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_192
) ||
1114 cpacf_test_func(&kmctr_functions
, CPACF_KMCTR_AES_256
)) {
1115 ctrblk
= (u8
*) __get_free_page(GFP_KERNEL
);
1120 ret
= aes_s390_register_alg(&ctr_aes_alg
);
1125 if (cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_128
) ||
1126 cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_192
) ||
1127 cpacf_test_func(&kma_functions
, CPACF_KMA_GCM_AES_256
)) {
1128 ret
= crypto_register_aead(&gcm_aes_aead
);
1139 module_cpu_feature_match(MSA
, aes_s390_init
);
1140 module_exit(aes_s390_fini
);
1142 MODULE_ALIAS_CRYPTO("aes-all");
1144 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1145 MODULE_LICENSE("GPL");