]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
crypto: arm64/sha1-ce - add non-SIMD generic fallback
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Mon, 24 Jul 2017 10:28:08 +0000 (11:28 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 4 Aug 2017 01:27:18 +0000 (09:27 +0800)
The arm64 kernel will shortly disallow nested kernel mode NEON, so
add a fallback to scalar C code that can be invoked in that case.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm64/crypto/Kconfig
arch/arm64/crypto/sha1-ce-glue.c

index 7d75a363e31727bffca45cffc482d6c61d830084..5d5953545dad5b8f3f9c28ef6b9cc8e821c884a9 100644 (file)
@@ -18,8 +18,9 @@ config CRYPTO_SHA512_ARM64
 
 config CRYPTO_SHA1_ARM64_CE
        tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
-       depends on ARM64 && KERNEL_MODE_NEON
+       depends on KERNEL_MODE_NEON
        select CRYPTO_HASH
+       select CRYPTO_SHA1
 
 config CRYPTO_SHA2_ARM64_CE
        tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
index ea319c055f5dfbee35a31c68ceb005501a8f26b7..efbeb3e0dcfb048099095a971512d8a9a1f62595 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
  *
- * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -9,6 +9,7 @@
  */
 
 #include <asm/neon.h>
+#include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/internal/hash.h>
 #include <crypto/sha.h>
@@ -37,8 +38,11 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
 {
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 
+       if (!may_use_simd())
+               return crypto_sha1_update(desc, data, len);
+
        sctx->finalize = 0;
-       kernel_neon_begin_partial(16);
+       kernel_neon_begin();
        sha1_base_do_update(desc, data, len,
                            (sha1_block_fn *)sha1_ce_transform);
        kernel_neon_end();
@@ -52,13 +56,16 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
        bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
 
+       if (!may_use_simd())
+               return crypto_sha1_finup(desc, data, len, out);
+
        /*
         * Allow the asm code to perform the finalization if there is no
         * partial data and the input is a round multiple of the block size.
         */
        sctx->finalize = finalize;
 
-       kernel_neon_begin_partial(16);
+       kernel_neon_begin();
        sha1_base_do_update(desc, data, len,
                            (sha1_block_fn *)sha1_ce_transform);
        if (!finalize)
@@ -71,8 +78,11 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 {
        struct sha1_ce_state *sctx = shash_desc_ctx(desc);
 
+       if (!may_use_simd())
+               return crypto_sha1_finup(desc, NULL, 0, out);
+
        sctx->finalize = 0;
-       kernel_neon_begin_partial(16);
+       kernel_neon_begin();
        sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
        kernel_neon_end();
        return sha1_base_finish(desc, out);