4 * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
5 * Supplemental SSE3 instructions.
7 * This file is based on sha1_generic.c
9 * Copyright (c) Alan Smithee.
10 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
11 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
12 * Copyright (c) Mathias Krause <minipli@googlemail.com>
13 * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the Free
17 * Software Foundation; either version 2 of the License, or (at your option)
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <crypto/internal/hash.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
28 #include <linux/cryptohash.h>
29 #include <linux/types.h>
30 #include <crypto/sha.h>
31 #include <asm/byteorder.h>
34 #include <asm/xsave.h>
37 asmlinkage
void sha1_transform_ssse3(u32
*digest
, const char *data
,
40 asmlinkage
void sha1_transform_avx(u32
*digest
, const char *data
,
44 #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
46 asmlinkage
void sha1_transform_avx2(u32
*digest
, const char *data
,
50 static asmlinkage
void (*sha1_transform_asm
)(u32
*, const char *, unsigned int);
53 static int sha1_ssse3_init(struct shash_desc
*desc
)
55 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
57 *sctx
= (struct sha1_state
){
58 .state
= { SHA1_H0
, SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
},
64 static int __sha1_ssse3_update(struct shash_desc
*desc
, const u8
*data
,
65 unsigned int len
, unsigned int partial
)
67 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
68 unsigned int done
= 0;
73 done
= SHA1_BLOCK_SIZE
- partial
;
74 memcpy(sctx
->buffer
+ partial
, data
, done
);
75 sha1_transform_asm(sctx
->state
, sctx
->buffer
, 1);
78 if (len
- done
>= SHA1_BLOCK_SIZE
) {
79 const unsigned int rounds
= (len
- done
) / SHA1_BLOCK_SIZE
;
81 sha1_transform_asm(sctx
->state
, data
+ done
, rounds
);
82 done
+= rounds
* SHA1_BLOCK_SIZE
;
85 memcpy(sctx
->buffer
, data
+ done
, len
- done
);
90 static int sha1_ssse3_update(struct shash_desc
*desc
, const u8
*data
,
93 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
94 unsigned int partial
= sctx
->count
% SHA1_BLOCK_SIZE
;
97 /* Handle the fast case right here */
98 if (partial
+ len
< SHA1_BLOCK_SIZE
) {
100 memcpy(sctx
->buffer
+ partial
, data
, len
);
105 if (!irq_fpu_usable()) {
106 res
= crypto_sha1_update(desc
, data
, len
);
109 res
= __sha1_ssse3_update(desc
, data
, len
, partial
);
117 /* Add padding and return the message digest. */
118 static int sha1_ssse3_final(struct shash_desc
*desc
, u8
*out
)
120 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
121 unsigned int i
, index
, padlen
;
122 __be32
*dst
= (__be32
*)out
;
124 static const u8 padding
[SHA1_BLOCK_SIZE
] = { 0x80, };
126 bits
= cpu_to_be64(sctx
->count
<< 3);
128 /* Pad out to 56 mod 64 and append length */
129 index
= sctx
->count
% SHA1_BLOCK_SIZE
;
130 padlen
= (index
< 56) ? (56 - index
) : ((SHA1_BLOCK_SIZE
+56) - index
);
131 if (!irq_fpu_usable()) {
132 crypto_sha1_update(desc
, padding
, padlen
);
133 crypto_sha1_update(desc
, (const u8
*)&bits
, sizeof(bits
));
136 /* We need to fill a whole block for __sha1_ssse3_update() */
138 sctx
->count
+= padlen
;
139 memcpy(sctx
->buffer
+ index
, padding
, padlen
);
141 __sha1_ssse3_update(desc
, padding
, padlen
, index
);
143 __sha1_ssse3_update(desc
, (const u8
*)&bits
, sizeof(bits
), 56);
147 /* Store state in digest */
148 for (i
= 0; i
< 5; i
++)
149 dst
[i
] = cpu_to_be32(sctx
->state
[i
]);
152 memset(sctx
, 0, sizeof(*sctx
));
157 static int sha1_ssse3_export(struct shash_desc
*desc
, void *out
)
159 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
161 memcpy(out
, sctx
, sizeof(*sctx
));
166 static int sha1_ssse3_import(struct shash_desc
*desc
, const void *in
)
168 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
170 memcpy(sctx
, in
, sizeof(*sctx
));
175 #ifdef CONFIG_AS_AVX2
176 static void sha1_apply_transform_avx2(u32
*digest
, const char *data
,
179 /* Select the optimal transform based on data block size */
180 if (rounds
>= SHA1_AVX2_BLOCK_OPTSIZE
)
181 sha1_transform_avx2(digest
, data
, rounds
);
183 sha1_transform_avx(digest
, data
, rounds
);
187 static struct shash_alg alg
= {
188 .digestsize
= SHA1_DIGEST_SIZE
,
189 .init
= sha1_ssse3_init
,
190 .update
= sha1_ssse3_update
,
191 .final
= sha1_ssse3_final
,
192 .export
= sha1_ssse3_export
,
193 .import
= sha1_ssse3_import
,
194 .descsize
= sizeof(struct sha1_state
),
195 .statesize
= sizeof(struct sha1_state
),
198 .cra_driver_name
= "sha1-ssse3",
200 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
201 .cra_blocksize
= SHA1_BLOCK_SIZE
,
202 .cra_module
= THIS_MODULE
,
207 static bool __init
avx_usable(void)
211 if (!cpu_has_avx
|| !cpu_has_osxsave
)
214 xcr0
= xgetbv(XCR_XFEATURE_ENABLED_MASK
);
215 if ((xcr0
& (XSTATE_SSE
| XSTATE_YMM
)) != (XSTATE_SSE
| XSTATE_YMM
)) {
216 pr_info("AVX detected but unusable.\n");
224 #ifdef CONFIG_AS_AVX2
225 static bool __init
avx2_usable(void)
227 if (avx_usable() && cpu_has_avx2
&& boot_cpu_has(X86_FEATURE_BMI1
) &&
228 boot_cpu_has(X86_FEATURE_BMI2
))
236 static int __init
sha1_ssse3_mod_init(void)
240 /* test for SSSE3 first */
242 sha1_transform_asm
= sha1_transform_ssse3
;
247 /* allow AVX to override SSSE3, it's a little faster */
249 sha1_transform_asm
= sha1_transform_avx
;
251 #ifdef CONFIG_AS_AVX2
252 /* allow AVX2 to override AVX, it's a little faster */
254 sha1_transform_asm
= sha1_apply_transform_avx2
;
261 if (sha1_transform_asm
) {
262 pr_info("Using %s optimized SHA-1 implementation\n", algo_name
);
263 return crypto_register_shash(&alg
);
265 pr_info("Neither AVX nor AVX2 nor SSSE3 is available/usable.\n");
270 static void __exit
sha1_ssse3_mod_fini(void)
272 crypto_unregister_shash(&alg
);
275 module_init(sha1_ssse3_mod_init
);
276 module_exit(sha1_ssse3_mod_fini
);
278 MODULE_LICENSE("GPL");
279 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
281 MODULE_ALIAS("sha1");