]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
s390/crypto: add s390 platform specific aes gcm support.
authorHarald Freudenberger <freude@linux.vnet.ibm.com>
Mon, 18 Sep 2017 10:48:09 +0000 (12:48 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 29 Sep 2017 13:51:25 +0000 (15:51 +0200)
This patch introduces gcm(aes) support into the aes_s390 kernel module.

Signed-off-by: Patrick Steuer <patrick.steuer@de.ibm.com>
Signed-off-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/crypto/aes_s390.c

index 591cbdf615af046d4e0662241b03c3b113280333..b48e20dd94e96a52f845782c2dd134b015794004 100644 (file)
@@ -4,9 +4,11 @@
  * s390 implementation of the AES Cipher Algorithm.
  *
  * s390 Version:
- *   Copyright IBM Corp. 2005, 2007
+ *   Copyright IBM Corp. 2005, 2017
  *   Author(s): Jan Glauber (jang@de.ibm.com)
  *             Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
+ *             Patrick Steuer <patrick.steuer@de.ibm.com>
+ *             Harald Freudenberger <freude@de.ibm.com>
  *
  * Derived from "crypto/aes_generic.c"
  *
 
 #include <crypto/aes.h>
 #include <crypto/algapi.h>
+#include <crypto/ghash.h>
+#include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/fips.h>
+#include <linux/string.h>
 #include <crypto/xts.h>
 #include <asm/cpacf.h>
 
 static u8 *ctrblk;
 static DEFINE_SPINLOCK(ctrblk_lock);
 
-static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
+                   kma_functions;
 
 struct s390_aes_ctx {
        u8 key[AES_MAX_KEY_SIZE];
@@ -55,6 +62,17 @@ struct s390_xts_ctx {
        struct crypto_skcipher *fallback;
 };
 
+struct gcm_sg_walk {
+       struct scatter_walk walk;
+       unsigned int walk_bytes;
+       u8 *walk_ptr;
+       unsigned int walk_bytes_remain;
+       u8 buf[AES_BLOCK_SIZE];
+       unsigned int buf_bytes;
+       u8 *ptr;
+       unsigned int nbytes;
+};
+
 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
                unsigned int key_len)
 {
@@ -771,6 +789,267 @@ static struct crypto_alg ctr_aes_alg = {
        }
 };
 
+static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
+                         unsigned int keylen)
+{
+       struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+       switch (keylen) {
+       case AES_KEYSIZE_128:
+               ctx->fc = CPACF_KMA_GCM_AES_128;
+               break;
+       case AES_KEYSIZE_192:
+               ctx->fc = CPACF_KMA_GCM_AES_192;
+               break;
+       case AES_KEYSIZE_256:
+               ctx->fc = CPACF_KMA_GCM_AES_256;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       memcpy(ctx->key, key, keylen);
+       ctx->key_len = keylen;
+       return 0;
+}
+
+static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+       switch (authsize) {
+       case 4:
+       case 8:
+       case 12:
+       case 13:
+       case 14:
+       case 15:
+       case 16:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
+                             unsigned int len)
+{
+       memset(gw, 0, sizeof(*gw));
+       gw->walk_bytes_remain = len;
+       scatterwalk_start(&gw->walk, sg);
+}
+
+static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
+{
+       int n;
+
+       /* minbytesneeded <= AES_BLOCK_SIZE */
+       if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
+               gw->ptr = gw->buf;
+               gw->nbytes = gw->buf_bytes;
+               goto out;
+       }
+
+       if (gw->walk_bytes_remain == 0) {
+               gw->ptr = NULL;
+               gw->nbytes = 0;
+               goto out;
+       }
+
+       gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
+       if (!gw->walk_bytes) {
+               scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+               gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+                                                  gw->walk_bytes_remain);
+       }
+       gw->walk_ptr = scatterwalk_map(&gw->walk);
+
+       if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
+               gw->ptr = gw->walk_ptr;
+               gw->nbytes = gw->walk_bytes;
+               goto out;
+       }
+
+       while (1) {
+               n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
+               memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
+               gw->buf_bytes += n;
+               gw->walk_bytes_remain -= n;
+               scatterwalk_unmap(&gw->walk);
+               scatterwalk_advance(&gw->walk, n);
+               scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+
+               if (gw->buf_bytes >= minbytesneeded) {
+                       gw->ptr = gw->buf;
+                       gw->nbytes = gw->buf_bytes;
+                       goto out;
+               }
+
+               gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+                                                  gw->walk_bytes_remain);
+               if (!gw->walk_bytes) {
+                       scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+                       gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+                                                       gw->walk_bytes_remain);
+               }
+               gw->walk_ptr = scatterwalk_map(&gw->walk);
+       }
+
+out:
+       return gw->nbytes;
+}
+
+static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+{
+       int n;
+
+       if (gw->ptr == NULL)
+               return;
+
+       if (gw->ptr == gw->buf) {
+               n = gw->buf_bytes - bytesdone;
+               if (n > 0) {
+                       memmove(gw->buf, gw->buf + bytesdone, n);
+                       gw->buf_bytes -= n;
+               } else
+                       gw->buf_bytes = 0;
+       } else {
+               gw->walk_bytes_remain -= bytesdone;
+               scatterwalk_unmap(&gw->walk);
+               scatterwalk_advance(&gw->walk, bytesdone);
+               scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+       }
+}
+
+static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
+{
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
+       unsigned int ivsize = crypto_aead_ivsize(tfm);
+       unsigned int taglen = crypto_aead_authsize(tfm);
+       unsigned int aadlen = req->assoclen;
+       unsigned int pclen = req->cryptlen;
+       int ret = 0;
+
+       unsigned int len, in_bytes, out_bytes,
+                    min_bytes, bytes, aad_bytes, pc_bytes;
+       struct gcm_sg_walk gw_in, gw_out;
+       u8 tag[GHASH_DIGEST_SIZE];
+
+       struct {
+               u32 _[3];               /* reserved */
+               u32 cv;                 /* Counter Value */
+               u8 t[GHASH_DIGEST_SIZE];/* Tag */
+               u8 h[AES_BLOCK_SIZE];   /* Hash-subkey */
+               u64 taadl;              /* Total AAD Length */
+               u64 tpcl;               /* Total Plain-/Cipher-text Length */
+               u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
+               u8 k[AES_MAX_KEY_SIZE]; /* Key */
+       } param;
+
+       /*
+        * encrypt
+        *   req->src: aad||plaintext
+        *   req->dst: aad||ciphertext||tag
+        * decrypt
+        *   req->src: aad||ciphertext||tag
+        *   req->dst: aad||plaintext, return 0 or -EBADMSG
+        * aad, plaintext and ciphertext may be empty.
+        */
+       if (flags & CPACF_DECRYPT)
+               pclen -= taglen;
+       len = aadlen + pclen;
+
+       memset(&param, 0, sizeof(param));
+       param.cv = 1;
+       param.taadl = aadlen * 8;
+       param.tpcl = pclen * 8;
+       memcpy(param.j0, req->iv, ivsize);
+       *(u32 *)(param.j0 + ivsize) = 1;
+       memcpy(param.k, ctx->key, ctx->key_len);
+
+       gcm_sg_walk_start(&gw_in, req->src, len);
+       gcm_sg_walk_start(&gw_out, req->dst, len);
+
+       do {
+               min_bytes = min_t(unsigned int,
+                                 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
+               in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
+               out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
+               bytes = min(in_bytes, out_bytes);
+
+               if (aadlen + pclen <= bytes) {
+                       aad_bytes = aadlen;
+                       pc_bytes = pclen;
+                       flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
+               } else {
+                       if (aadlen <= bytes) {
+                               aad_bytes = aadlen;
+                               pc_bytes = (bytes - aadlen) &
+                                          ~(AES_BLOCK_SIZE - 1);
+                               flags |= CPACF_KMA_LAAD;
+                       } else {
+                               aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
+                               pc_bytes = 0;
+                       }
+               }
+
+               if (aad_bytes > 0)
+                       memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
+
+               cpacf_kma(ctx->fc | flags, &param,
+                         gw_out.ptr + aad_bytes,
+                         gw_in.ptr + aad_bytes, pc_bytes,
+                         gw_in.ptr, aad_bytes);
+
+               gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
+               gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
+               aadlen -= aad_bytes;
+               pclen -= pc_bytes;
+       } while (aadlen + pclen > 0);
+
+       if (flags & CPACF_DECRYPT) {
+               scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
+               if (crypto_memneq(tag, param.t, taglen))
+                       ret = -EBADMSG;
+       } else
+               scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
+
+       memzero_explicit(&param, sizeof(param));
+       return ret;
+}
+
+static int gcm_aes_encrypt(struct aead_request *req)
+{
+       return gcm_aes_crypt(req, CPACF_ENCRYPT);
+}
+
+static int gcm_aes_decrypt(struct aead_request *req)
+{
+       return gcm_aes_crypt(req, CPACF_DECRYPT);
+}
+
+static struct aead_alg gcm_aes_aead = {
+       .setkey                 = gcm_aes_setkey,
+       .setauthsize            = gcm_aes_setauthsize,
+       .encrypt                = gcm_aes_encrypt,
+       .decrypt                = gcm_aes_decrypt,
+
+       .ivsize                 = GHASH_BLOCK_SIZE - sizeof(u32),
+       .maxauthsize            = GHASH_DIGEST_SIZE,
+       .chunksize              = AES_BLOCK_SIZE,
+
+       .base                   = {
+               .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
+               .cra_blocksize          = 1,
+               .cra_ctxsize            = sizeof(struct s390_aes_ctx),
+               .cra_priority           = 900,
+               .cra_name               = "gcm(aes)",
+               .cra_driver_name        = "gcm-aes-s390",
+               .cra_module             = THIS_MODULE,
+       },
+};
+
 static struct crypto_alg *aes_s390_algs_ptr[5];
 static int aes_s390_algs_num;
 
@@ -790,16 +1069,19 @@ static void aes_s390_fini(void)
                crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
        if (ctrblk)
                free_page((unsigned long) ctrblk);
+
+       crypto_unregister_aead(&gcm_aes_aead);
 }
 
 static int __init aes_s390_init(void)
 {
        int ret;
 
-       /* Query available functions for KM, KMC and KMCTR */
+       /* Query available functions for KM, KMC, KMCTR and KMA */
        cpacf_query(CPACF_KM, &km_functions);
        cpacf_query(CPACF_KMC, &kmc_functions);
        cpacf_query(CPACF_KMCTR, &kmctr_functions);
+       cpacf_query(CPACF_KMA, &kma_functions);
 
        if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
            cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
@@ -840,6 +1122,14 @@ static int __init aes_s390_init(void)
                        goto out_err;
        }
 
+       if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
+           cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
+           cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
+               ret = crypto_register_aead(&gcm_aes_aead);
+               if (ret)
+                       goto out_err;
+       }
+
        return 0;
 out_err:
        aes_s390_fini();