]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/crypto/padlock-sha.c
crypto: shash - Fix async finup handling of null digest
[mirror_ubuntu-zesty-kernel.git] / drivers / crypto / padlock-sha.c
CommitLineData
6c833275
ML
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
7d024608 15#include <crypto/internal/hash.h>
5265eeb2 16#include <crypto/sha.h>
6010439f 17#include <linux/err.h>
6c833275
ML
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
6c833275
ML
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/scatterlist.h>
e4914012 24#include <asm/i387.h>
6c833275
ML
25#include "padlock.h"
26
bbbee467
HX
27struct padlock_sha_desc {
28 struct shash_desc fallback;
6c833275
ML
29};
30
bbbee467
HX
31struct padlock_sha_ctx {
32 struct crypto_shash *fallback;
33};
6c833275 34
bbbee467 35static int padlock_sha_init(struct shash_desc *desc)
6c833275 36{
bbbee467
HX
37 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
38 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
6c833275 39
bbbee467
HX
40 dctx->fallback.tfm = ctx->fallback;
41 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
42 return crypto_shash_init(&dctx->fallback);
6c833275
ML
43}
44
bbbee467
HX
45static int padlock_sha_update(struct shash_desc *desc,
46 const u8 *data, unsigned int length)
6c833275 47{
bbbee467 48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
6c833275 49
bbbee467
HX
50 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
51 return crypto_shash_update(&dctx->fallback, data, length);
6c833275
ML
52}
53
54static inline void padlock_output_block(uint32_t *src,
55 uint32_t *dst, size_t count)
56{
57 while (count--)
58 *dst++ = swab32(*src++);
59}
60
bbbee467
HX
61static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
62 unsigned int count, u8 *out)
6c833275
ML
63{
64 /* We can't store directly to *out as it may be unaligned. */
65 /* BTW Don't reduce the buffer size below 128 Bytes!
66 * PadLock microcode needs it that big. */
bbbee467
HX
67 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
68 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 struct sha1_state state;
70 unsigned int space;
71 unsigned int leftover;
e4914012 72 int ts_state;
bbbee467
HX
73 int err;
74
75 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
76 err = crypto_shash_export(&dctx->fallback, &state);
77 if (err)
78 goto out;
79
80 if (state.count + count > ULONG_MAX)
81 return crypto_shash_finup(&dctx->fallback, in, count, out);
82
83 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
84 space = SHA1_BLOCK_SIZE - leftover;
85 if (space) {
86 if (count > space) {
87 err = crypto_shash_update(&dctx->fallback, in, space) ?:
88 crypto_shash_export(&dctx->fallback, &state);
89 if (err)
90 goto out;
91 count -= space;
92 in += space;
93 } else {
94 memcpy(state.buffer + leftover, in, count);
95 in = state.buffer;
96 count += leftover;
97 }
98 }
99
100 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
6c833275 101
e4914012
SS
102 /* prevent taking the spurious DNA fault with padlock. */
103 ts_state = irq_ts_save();
6c833275 104 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
bbbee467 105 : \
faae8908
HX
106 : "c"((unsigned long)state.count + count), \
107 "a"((unsigned long)state.count), \
bbbee467 108 "S"(in), "D"(result));
e4914012 109 irq_ts_restore(ts_state);
6c833275
ML
110
111 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
bbbee467
HX
112
113out:
114 return err;
6c833275
ML
115}
116
bbbee467
HX
117static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
118{
119 u8 buf[4];
120
121 return padlock_sha1_finup(desc, buf, 0, out);
122}
123
124static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
125 unsigned int count, u8 *out)
6c833275
ML
126{
127 /* We can't store directly to *out as it may be unaligned. */
128 /* BTW Don't reduce the buffer size below 128 Bytes!
129 * PadLock microcode needs it that big. */
bbbee467
HX
130 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
131 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
132 struct sha256_state state;
133 unsigned int space;
134 unsigned int leftover;
e4914012 135 int ts_state;
bbbee467 136 int err;
6c833275 137
bbbee467
HX
138 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
139 err = crypto_shash_export(&dctx->fallback, &state);
140 if (err)
141 goto out;
142
143 if (state.count + count > ULONG_MAX)
144 return crypto_shash_finup(&dctx->fallback, in, count, out);
145
146 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
147 space = SHA256_BLOCK_SIZE - leftover;
148 if (space) {
149 if (count > space) {
150 err = crypto_shash_update(&dctx->fallback, in, space) ?:
151 crypto_shash_export(&dctx->fallback, &state);
152 if (err)
153 goto out;
154 count -= space;
155 in += space;
156 } else {
157 memcpy(state.buf + leftover, in, count);
158 in = state.buf;
159 count += leftover;
160 }
161 }
162
163 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
6c833275 164
e4914012
SS
165 /* prevent taking the spurious DNA fault with padlock. */
166 ts_state = irq_ts_save();
6c833275 167 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
bbbee467 168 : \
faae8908
HX
169 : "c"((unsigned long)state.count + count), \
170 "a"((unsigned long)state.count), \
bbbee467 171 "S"(in), "D"(result));
e4914012 172 irq_ts_restore(ts_state);
6c833275
ML
173
174 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
bbbee467
HX
175
176out:
177 return err;
6c833275
ML
178}
179
bbbee467 180static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
6c833275 181{
bbbee467 182 u8 buf[4];
6c833275 183
bbbee467 184 return padlock_sha256_finup(desc, buf, 0, out);
6c833275
ML
185}
186
6010439f 187static int padlock_cra_init(struct crypto_tfm *tfm)
6c833275 188{
bbbee467 189 struct crypto_shash *hash = __crypto_shash_cast(tfm);
6010439f 190 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
bbbee467 191 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
7d024608
HX
192 struct crypto_shash *fallback_tfm;
193 int err = -ENOMEM;
6010439f 194
6c833275 195 /* Allocate a fallback and abort if it failed. */
7d024608
HX
196 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
197 CRYPTO_ALG_NEED_FALLBACK);
6010439f 198 if (IS_ERR(fallback_tfm)) {
6c833275
ML
199 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
200 fallback_driver_name);
7d024608 201 err = PTR_ERR(fallback_tfm);
bbbee467 202 goto out;
6c833275
ML
203 }
204
bbbee467
HX
205 ctx->fallback = fallback_tfm;
206 hash->descsize += crypto_shash_descsize(fallback_tfm);
6c833275 207 return 0;
7d024608 208
7d024608
HX
209out:
210 return err;
6c833275
ML
211}
212
6c833275
ML
213static void padlock_cra_exit(struct crypto_tfm *tfm)
214{
bbbee467 215 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
7d024608 216
bbbee467 217 crypto_free_shash(ctx->fallback);
6c833275
ML
218}
219
bbbee467
HX
220static struct shash_alg sha1_alg = {
221 .digestsize = SHA1_DIGEST_SIZE,
222 .init = padlock_sha_init,
223 .update = padlock_sha_update,
224 .finup = padlock_sha1_finup,
225 .final = padlock_sha1_final,
226 .descsize = sizeof(struct padlock_sha_desc),
227 .base = {
228 .cra_name = "sha1",
229 .cra_driver_name = "sha1-padlock",
230 .cra_priority = PADLOCK_CRA_PRIORITY,
231 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
232 CRYPTO_ALG_NEED_FALLBACK,
233 .cra_blocksize = SHA1_BLOCK_SIZE,
234 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
235 .cra_module = THIS_MODULE,
236 .cra_init = padlock_cra_init,
237 .cra_exit = padlock_cra_exit,
6c833275
ML
238 }
239};
240
bbbee467
HX
241static struct shash_alg sha256_alg = {
242 .digestsize = SHA256_DIGEST_SIZE,
243 .init = padlock_sha_init,
244 .update = padlock_sha_update,
245 .finup = padlock_sha256_finup,
246 .final = padlock_sha256_final,
247 .descsize = sizeof(struct padlock_sha_desc),
248 .base = {
249 .cra_name = "sha256",
250 .cra_driver_name = "sha256-padlock",
251 .cra_priority = PADLOCK_CRA_PRIORITY,
252 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
253 CRYPTO_ALG_NEED_FALLBACK,
254 .cra_blocksize = SHA256_BLOCK_SIZE,
255 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
256 .cra_module = THIS_MODULE,
257 .cra_init = padlock_cra_init,
258 .cra_exit = padlock_cra_exit,
6c833275
ML
259 }
260};
261
6c833275
ML
262static int __init padlock_init(void)
263{
264 int rc = -ENODEV;
265
266 if (!cpu_has_phe) {
b43e726b 267 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
6c833275
ML
268 return -ENODEV;
269 }
270
271 if (!cpu_has_phe_enabled) {
b43e726b 272 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
6c833275
ML
273 return -ENODEV;
274 }
275
bbbee467 276 rc = crypto_register_shash(&sha1_alg);
6c833275
ML
277 if (rc)
278 goto out;
279
bbbee467 280 rc = crypto_register_shash(&sha256_alg);
6c833275
ML
281 if (rc)
282 goto out_unreg1;
283
284 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
285
286 return 0;
287
288out_unreg1:
bbbee467 289 crypto_unregister_shash(&sha1_alg);
6c833275
ML
290out:
291 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
292 return rc;
293}
294
295static void __exit padlock_fini(void)
296{
bbbee467
HX
297 crypto_unregister_shash(&sha1_alg);
298 crypto_unregister_shash(&sha256_alg);
6c833275
ML
299}
300
301module_init(padlock_init);
302module_exit(padlock_fini);
303
304MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
305MODULE_LICENSE("GPL");
306MODULE_AUTHOR("Michal Ludvig");
307
a760a665
HX
308MODULE_ALIAS("sha1-all");
309MODULE_ALIAS("sha256-all");
6c833275
ML
310MODULE_ALIAS("sha1-padlock");
311MODULE_ALIAS("sha256-padlock");