]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm64/crypto/sha256-glue.c
timekeeping: Repair ktime_get_coarse*() granularity
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / crypto / sha256-glue.c
1 /*
2 * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
3 *
4 * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13 #include <asm/hwcap.h>
14 #include <asm/neon.h>
15 #include <asm/simd.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/simd.h>
18 #include <crypto/sha.h>
19 #include <crypto/sha256_base.h>
20 #include <linux/cryptohash.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23
24 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
25 MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
26 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
27 MODULE_LICENSE("GPL v2");
28 MODULE_ALIAS_CRYPTO("sha224");
29 MODULE_ALIAS_CRYPTO("sha256");
30
31 asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
32 unsigned int num_blks);
33 EXPORT_SYMBOL(sha256_block_data_order);
34
35 asmlinkage void sha256_block_neon(u32 *digest, const void *data,
36 unsigned int num_blks);
37
38 static int sha256_update(struct shash_desc *desc, const u8 *data,
39 unsigned int len)
40 {
41 return sha256_base_do_update(desc, data, len,
42 (sha256_block_fn *)sha256_block_data_order);
43 }
44
45 static int sha256_finup(struct shash_desc *desc, const u8 *data,
46 unsigned int len, u8 *out)
47 {
48 if (len)
49 sha256_base_do_update(desc, data, len,
50 (sha256_block_fn *)sha256_block_data_order);
51 sha256_base_do_finalize(desc,
52 (sha256_block_fn *)sha256_block_data_order);
53
54 return sha256_base_finish(desc, out);
55 }
56
57 static int sha256_final(struct shash_desc *desc, u8 *out)
58 {
59 return sha256_finup(desc, NULL, 0, out);
60 }
61
62 static struct shash_alg algs[] = { {
63 .digestsize = SHA256_DIGEST_SIZE,
64 .init = sha256_base_init,
65 .update = sha256_update,
66 .final = sha256_final,
67 .finup = sha256_finup,
68 .descsize = sizeof(struct sha256_state),
69 .base.cra_name = "sha256",
70 .base.cra_driver_name = "sha256-arm64",
71 .base.cra_priority = 125,
72 .base.cra_blocksize = SHA256_BLOCK_SIZE,
73 .base.cra_module = THIS_MODULE,
74 }, {
75 .digestsize = SHA224_DIGEST_SIZE,
76 .init = sha224_base_init,
77 .update = sha256_update,
78 .final = sha256_final,
79 .finup = sha256_finup,
80 .descsize = sizeof(struct sha256_state),
81 .base.cra_name = "sha224",
82 .base.cra_driver_name = "sha224-arm64",
83 .base.cra_priority = 125,
84 .base.cra_blocksize = SHA224_BLOCK_SIZE,
85 .base.cra_module = THIS_MODULE,
86 } };
87
88 static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
89 unsigned int len)
90 {
91 struct sha256_state *sctx = shash_desc_ctx(desc);
92
93 if (!crypto_simd_usable())
94 return sha256_base_do_update(desc, data, len,
95 (sha256_block_fn *)sha256_block_data_order);
96
97 while (len > 0) {
98 unsigned int chunk = len;
99
100 /*
101 * Don't hog the CPU for the entire time it takes to process all
102 * input when running on a preemptible kernel, but process the
103 * data block by block instead.
104 */
105 if (IS_ENABLED(CONFIG_PREEMPT) &&
106 chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
107 chunk = SHA256_BLOCK_SIZE -
108 sctx->count % SHA256_BLOCK_SIZE;
109
110 kernel_neon_begin();
111 sha256_base_do_update(desc, data, chunk,
112 (sha256_block_fn *)sha256_block_neon);
113 kernel_neon_end();
114 data += chunk;
115 len -= chunk;
116 }
117 return 0;
118 }
119
120 static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
121 unsigned int len, u8 *out)
122 {
123 if (!crypto_simd_usable()) {
124 if (len)
125 sha256_base_do_update(desc, data, len,
126 (sha256_block_fn *)sha256_block_data_order);
127 sha256_base_do_finalize(desc,
128 (sha256_block_fn *)sha256_block_data_order);
129 } else {
130 if (len)
131 sha256_update_neon(desc, data, len);
132 kernel_neon_begin();
133 sha256_base_do_finalize(desc,
134 (sha256_block_fn *)sha256_block_neon);
135 kernel_neon_end();
136 }
137 return sha256_base_finish(desc, out);
138 }
139
140 static int sha256_final_neon(struct shash_desc *desc, u8 *out)
141 {
142 return sha256_finup_neon(desc, NULL, 0, out);
143 }
144
145 static struct shash_alg neon_algs[] = { {
146 .digestsize = SHA256_DIGEST_SIZE,
147 .init = sha256_base_init,
148 .update = sha256_update_neon,
149 .final = sha256_final_neon,
150 .finup = sha256_finup_neon,
151 .descsize = sizeof(struct sha256_state),
152 .base.cra_name = "sha256",
153 .base.cra_driver_name = "sha256-arm64-neon",
154 .base.cra_priority = 150,
155 .base.cra_blocksize = SHA256_BLOCK_SIZE,
156 .base.cra_module = THIS_MODULE,
157 }, {
158 .digestsize = SHA224_DIGEST_SIZE,
159 .init = sha224_base_init,
160 .update = sha256_update_neon,
161 .final = sha256_final_neon,
162 .finup = sha256_finup_neon,
163 .descsize = sizeof(struct sha256_state),
164 .base.cra_name = "sha224",
165 .base.cra_driver_name = "sha224-arm64-neon",
166 .base.cra_priority = 150,
167 .base.cra_blocksize = SHA224_BLOCK_SIZE,
168 .base.cra_module = THIS_MODULE,
169 } };
170
171 static int __init sha256_mod_init(void)
172 {
173 int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
174 if (ret)
175 return ret;
176
177 if (cpu_have_named_feature(ASIMD)) {
178 ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
179 if (ret)
180 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
181 }
182 return ret;
183 }
184
185 static void __exit sha256_mod_fini(void)
186 {
187 if (cpu_have_named_feature(ASIMD))
188 crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
189 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
190 }
191
192 module_init(sha256_mod_init);
193 module_exit(sha256_mod_fini);