]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/crypto/padlock-sha.c
Merge branch 'next/dt' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / crypto / padlock-sha.c
CommitLineData
6c833275
ML
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
7d024608 15#include <crypto/internal/hash.h>
21493088 16#include <crypto/padlock.h>
5265eeb2 17#include <crypto/sha.h>
6010439f 18#include <linux/err.h>
6c833275
ML
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/errno.h>
6c833275
ML
22#include <linux/interrupt.h>
23#include <linux/kernel.h>
24#include <linux/scatterlist.h>
e4914012 25#include <asm/i387.h>
4c6ab3ee 26
bbbee467
HX
27struct padlock_sha_desc {
28 struct shash_desc fallback;
6c833275
ML
29};
30
bbbee467
HX
31struct padlock_sha_ctx {
32 struct crypto_shash *fallback;
33};
6c833275 34
bbbee467 35static int padlock_sha_init(struct shash_desc *desc)
6c833275 36{
bbbee467
HX
37 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
38 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
6c833275 39
bbbee467
HX
40 dctx->fallback.tfm = ctx->fallback;
41 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
42 return crypto_shash_init(&dctx->fallback);
6c833275
ML
43}
44
bbbee467
HX
45static int padlock_sha_update(struct shash_desc *desc,
46 const u8 *data, unsigned int length)
6c833275 47{
bbbee467 48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
6c833275 49
bbbee467
HX
50 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
51 return crypto_shash_update(&dctx->fallback, data, length);
6c833275
ML
52}
53
a8d7ac27
HX
54static int padlock_sha_export(struct shash_desc *desc, void *out)
55{
56 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
57
58 return crypto_shash_export(&dctx->fallback, out);
59}
60
61static int padlock_sha_import(struct shash_desc *desc, const void *in)
62{
63 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
64 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
65
66 dctx->fallback.tfm = ctx->fallback;
67 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
68 return crypto_shash_import(&dctx->fallback, in);
69}
70
6c833275
ML
71static inline void padlock_output_block(uint32_t *src,
72 uint32_t *dst, size_t count)
73{
74 while (count--)
75 *dst++ = swab32(*src++);
76}
77
bbbee467
HX
78static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
79 unsigned int count, u8 *out)
6c833275
ML
80{
81 /* We can't store directly to *out as it may be unaligned. */
82 /* BTW Don't reduce the buffer size below 128 Bytes!
83 * PadLock microcode needs it that big. */
4c6ab3ee
HX
84 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
85 ((aligned(STACK_ALIGN)));
86 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
bbbee467
HX
87 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
88 struct sha1_state state;
89 unsigned int space;
90 unsigned int leftover;
e4914012 91 int ts_state;
bbbee467
HX
92 int err;
93
94 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
95 err = crypto_shash_export(&dctx->fallback, &state);
96 if (err)
97 goto out;
98
99 if (state.count + count > ULONG_MAX)
100 return crypto_shash_finup(&dctx->fallback, in, count, out);
101
102 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
103 space = SHA1_BLOCK_SIZE - leftover;
104 if (space) {
105 if (count > space) {
106 err = crypto_shash_update(&dctx->fallback, in, space) ?:
107 crypto_shash_export(&dctx->fallback, &state);
108 if (err)
109 goto out;
110 count -= space;
111 in += space;
112 } else {
113 memcpy(state.buffer + leftover, in, count);
114 in = state.buffer;
115 count += leftover;
e9b25f16 116 state.count &= ~(SHA1_BLOCK_SIZE - 1);
bbbee467
HX
117 }
118 }
119
120 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
6c833275 121
e4914012
SS
122 /* prevent taking the spurious DNA fault with padlock. */
123 ts_state = irq_ts_save();
6c833275 124 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
bbbee467 125 : \
faae8908
HX
126 : "c"((unsigned long)state.count + count), \
127 "a"((unsigned long)state.count), \
bbbee467 128 "S"(in), "D"(result));
e4914012 129 irq_ts_restore(ts_state);
6c833275
ML
130
131 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
bbbee467
HX
132
133out:
134 return err;
6c833275
ML
135}
136
bbbee467
HX
137static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
138{
139 u8 buf[4];
140
141 return padlock_sha1_finup(desc, buf, 0, out);
142}
143
144static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
145 unsigned int count, u8 *out)
6c833275
ML
146{
147 /* We can't store directly to *out as it may be unaligned. */
148 /* BTW Don't reduce the buffer size below 128 Bytes!
149 * PadLock microcode needs it that big. */
4c6ab3ee
HX
150 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
151 ((aligned(STACK_ALIGN)));
152 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
bbbee467
HX
153 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
154 struct sha256_state state;
155 unsigned int space;
156 unsigned int leftover;
e4914012 157 int ts_state;
bbbee467 158 int err;
6c833275 159
bbbee467
HX
160 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
161 err = crypto_shash_export(&dctx->fallback, &state);
162 if (err)
163 goto out;
164
165 if (state.count + count > ULONG_MAX)
166 return crypto_shash_finup(&dctx->fallback, in, count, out);
167
168 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
169 space = SHA256_BLOCK_SIZE - leftover;
170 if (space) {
171 if (count > space) {
172 err = crypto_shash_update(&dctx->fallback, in, space) ?:
173 crypto_shash_export(&dctx->fallback, &state);
174 if (err)
175 goto out;
176 count -= space;
177 in += space;
178 } else {
179 memcpy(state.buf + leftover, in, count);
180 in = state.buf;
181 count += leftover;
e9b25f16 182 state.count &= ~(SHA1_BLOCK_SIZE - 1);
bbbee467
HX
183 }
184 }
185
186 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
6c833275 187
e4914012
SS
188 /* prevent taking the spurious DNA fault with padlock. */
189 ts_state = irq_ts_save();
6c833275 190 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
bbbee467 191 : \
faae8908
HX
192 : "c"((unsigned long)state.count + count), \
193 "a"((unsigned long)state.count), \
bbbee467 194 "S"(in), "D"(result));
e4914012 195 irq_ts_restore(ts_state);
6c833275
ML
196
197 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
bbbee467
HX
198
199out:
200 return err;
6c833275
ML
201}
202
bbbee467 203static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
6c833275 204{
bbbee467 205 u8 buf[4];
6c833275 206
bbbee467 207 return padlock_sha256_finup(desc, buf, 0, out);
6c833275
ML
208}
209
6010439f 210static int padlock_cra_init(struct crypto_tfm *tfm)
6c833275 211{
bbbee467 212 struct crypto_shash *hash = __crypto_shash_cast(tfm);
6010439f 213 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
bbbee467 214 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
7d024608
HX
215 struct crypto_shash *fallback_tfm;
216 int err = -ENOMEM;
6010439f 217
6c833275 218 /* Allocate a fallback and abort if it failed. */
7d024608
HX
219 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
220 CRYPTO_ALG_NEED_FALLBACK);
6010439f 221 if (IS_ERR(fallback_tfm)) {
6c833275
ML
222 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
223 fallback_driver_name);
7d024608 224 err = PTR_ERR(fallback_tfm);
bbbee467 225 goto out;
6c833275
ML
226 }
227
bbbee467
HX
228 ctx->fallback = fallback_tfm;
229 hash->descsize += crypto_shash_descsize(fallback_tfm);
6c833275 230 return 0;
7d024608 231
7d024608
HX
232out:
233 return err;
6c833275
ML
234}
235
6c833275
ML
236static void padlock_cra_exit(struct crypto_tfm *tfm)
237{
bbbee467 238 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
7d024608 239
bbbee467 240 crypto_free_shash(ctx->fallback);
6c833275
ML
241}
242
bbbee467
HX
243static struct shash_alg sha1_alg = {
244 .digestsize = SHA1_DIGEST_SIZE,
245 .init = padlock_sha_init,
246 .update = padlock_sha_update,
247 .finup = padlock_sha1_finup,
248 .final = padlock_sha1_final,
a8d7ac27
HX
249 .export = padlock_sha_export,
250 .import = padlock_sha_import,
bbbee467 251 .descsize = sizeof(struct padlock_sha_desc),
a8d7ac27 252 .statesize = sizeof(struct sha1_state),
bbbee467
HX
253 .base = {
254 .cra_name = "sha1",
255 .cra_driver_name = "sha1-padlock",
256 .cra_priority = PADLOCK_CRA_PRIORITY,
257 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
258 CRYPTO_ALG_NEED_FALLBACK,
259 .cra_blocksize = SHA1_BLOCK_SIZE,
260 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
261 .cra_module = THIS_MODULE,
262 .cra_init = padlock_cra_init,
263 .cra_exit = padlock_cra_exit,
6c833275
ML
264 }
265};
266
bbbee467
HX
267static struct shash_alg sha256_alg = {
268 .digestsize = SHA256_DIGEST_SIZE,
269 .init = padlock_sha_init,
270 .update = padlock_sha_update,
271 .finup = padlock_sha256_finup,
272 .final = padlock_sha256_final,
a8d7ac27
HX
273 .export = padlock_sha_export,
274 .import = padlock_sha_import,
bbbee467 275 .descsize = sizeof(struct padlock_sha_desc),
a8d7ac27 276 .statesize = sizeof(struct sha256_state),
bbbee467
HX
277 .base = {
278 .cra_name = "sha256",
279 .cra_driver_name = "sha256-padlock",
280 .cra_priority = PADLOCK_CRA_PRIORITY,
281 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
282 CRYPTO_ALG_NEED_FALLBACK,
283 .cra_blocksize = SHA256_BLOCK_SIZE,
284 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
285 .cra_module = THIS_MODULE,
286 .cra_init = padlock_cra_init,
287 .cra_exit = padlock_cra_exit,
6c833275
ML
288 }
289};
290
0475add3
BW
291/* Add two shash_alg instance for hardware-implemented *
292* multiple-parts hash supported by VIA Nano Processor.*/
293static int padlock_sha1_init_nano(struct shash_desc *desc)
294{
295 struct sha1_state *sctx = shash_desc_ctx(desc);
296
297 *sctx = (struct sha1_state){
298 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
299 };
300
301 return 0;
302}
303
304static int padlock_sha1_update_nano(struct shash_desc *desc,
305 const u8 *data, unsigned int len)
306{
307 struct sha1_state *sctx = shash_desc_ctx(desc);
308 unsigned int partial, done;
309 const u8 *src;
310 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
311 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
312 ((aligned(STACK_ALIGN)));
313 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
314 int ts_state;
315
316 partial = sctx->count & 0x3f;
317 sctx->count += len;
318 done = 0;
319 src = data;
320 memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
321
322 if ((partial + len) >= SHA1_BLOCK_SIZE) {
323
324 /* Append the bytes in state's buffer to a block to handle */
325 if (partial) {
326 done = -partial;
327 memcpy(sctx->buffer + partial, data,
328 done + SHA1_BLOCK_SIZE);
329 src = sctx->buffer;
330 ts_state = irq_ts_save();
331 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
332 : "+S"(src), "+D"(dst) \
333 : "a"((long)-1), "c"((unsigned long)1));
334 irq_ts_restore(ts_state);
335 done += SHA1_BLOCK_SIZE;
336 src = data + done;
337 }
338
339 /* Process the left bytes from the input data */
340 if (len - done >= SHA1_BLOCK_SIZE) {
341 ts_state = irq_ts_save();
342 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
343 : "+S"(src), "+D"(dst)
344 : "a"((long)-1),
345 "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
346 irq_ts_restore(ts_state);
347 done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
348 src = data + done;
349 }
350 partial = 0;
351 }
352 memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
353 memcpy(sctx->buffer + partial, src, len - done);
354
355 return 0;
356}
357
358static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
359{
360 struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
361 unsigned int partial, padlen;
362 __be64 bits;
363 static const u8 padding[64] = { 0x80, };
364
365 bits = cpu_to_be64(state->count << 3);
366
367 /* Pad out to 56 mod 64 */
368 partial = state->count & 0x3f;
369 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
370 padlock_sha1_update_nano(desc, padding, padlen);
371
372 /* Append length field bytes */
373 padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
374
375 /* Swap to output */
376 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
377
378 return 0;
379}
380
381static int padlock_sha256_init_nano(struct shash_desc *desc)
382{
383 struct sha256_state *sctx = shash_desc_ctx(desc);
384
385 *sctx = (struct sha256_state){
386 .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
387 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
388 };
389
390 return 0;
391}
392
393static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
394 unsigned int len)
395{
396 struct sha256_state *sctx = shash_desc_ctx(desc);
397 unsigned int partial, done;
398 const u8 *src;
399 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
400 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
401 ((aligned(STACK_ALIGN)));
402 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
403 int ts_state;
404
405 partial = sctx->count & 0x3f;
406 sctx->count += len;
407 done = 0;
408 src = data;
409 memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
410
411 if ((partial + len) >= SHA256_BLOCK_SIZE) {
412
413 /* Append the bytes in state's buffer to a block to handle */
414 if (partial) {
415 done = -partial;
416 memcpy(sctx->buf + partial, data,
417 done + SHA256_BLOCK_SIZE);
418 src = sctx->buf;
419 ts_state = irq_ts_save();
420 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
421 : "+S"(src), "+D"(dst)
422 : "a"((long)-1), "c"((unsigned long)1));
423 irq_ts_restore(ts_state);
424 done += SHA256_BLOCK_SIZE;
425 src = data + done;
426 }
427
428 /* Process the left bytes from input data*/
429 if (len - done >= SHA256_BLOCK_SIZE) {
430 ts_state = irq_ts_save();
431 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
432 : "+S"(src), "+D"(dst)
433 : "a"((long)-1),
434 "c"((unsigned long)((len - done) / 64)));
435 irq_ts_restore(ts_state);
436 done += ((len - done) - (len - done) % 64);
437 src = data + done;
438 }
439 partial = 0;
440 }
441 memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
442 memcpy(sctx->buf + partial, src, len - done);
443
444 return 0;
445}
446
447static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
448{
449 struct sha256_state *state =
450 (struct sha256_state *)shash_desc_ctx(desc);
451 unsigned int partial, padlen;
452 __be64 bits;
453 static const u8 padding[64] = { 0x80, };
454
455 bits = cpu_to_be64(state->count << 3);
456
457 /* Pad out to 56 mod 64 */
458 partial = state->count & 0x3f;
459 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
460 padlock_sha256_update_nano(desc, padding, padlen);
461
462 /* Append length field bytes */
463 padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
464
465 /* Swap to output */
466 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
467
468 return 0;
469}
470
471static int padlock_sha_export_nano(struct shash_desc *desc,
472 void *out)
473{
474 int statesize = crypto_shash_statesize(desc->tfm);
475 void *sctx = shash_desc_ctx(desc);
476
477 memcpy(out, sctx, statesize);
478 return 0;
479}
480
481static int padlock_sha_import_nano(struct shash_desc *desc,
482 const void *in)
483{
484 int statesize = crypto_shash_statesize(desc->tfm);
485 void *sctx = shash_desc_ctx(desc);
486
487 memcpy(sctx, in, statesize);
488 return 0;
489}
490
491static struct shash_alg sha1_alg_nano = {
492 .digestsize = SHA1_DIGEST_SIZE,
493 .init = padlock_sha1_init_nano,
494 .update = padlock_sha1_update_nano,
495 .final = padlock_sha1_final_nano,
496 .export = padlock_sha_export_nano,
497 .import = padlock_sha_import_nano,
498 .descsize = sizeof(struct sha1_state),
499 .statesize = sizeof(struct sha1_state),
500 .base = {
501 .cra_name = "sha1",
502 .cra_driver_name = "sha1-padlock-nano",
503 .cra_priority = PADLOCK_CRA_PRIORITY,
504 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
505 .cra_blocksize = SHA1_BLOCK_SIZE,
506 .cra_module = THIS_MODULE,
507 }
508};
509
510static struct shash_alg sha256_alg_nano = {
511 .digestsize = SHA256_DIGEST_SIZE,
512 .init = padlock_sha256_init_nano,
513 .update = padlock_sha256_update_nano,
514 .final = padlock_sha256_final_nano,
515 .export = padlock_sha_export_nano,
516 .import = padlock_sha_import_nano,
517 .descsize = sizeof(struct sha256_state),
518 .statesize = sizeof(struct sha256_state),
519 .base = {
520 .cra_name = "sha256",
521 .cra_driver_name = "sha256-padlock-nano",
522 .cra_priority = PADLOCK_CRA_PRIORITY,
523 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
524 .cra_blocksize = SHA256_BLOCK_SIZE,
525 .cra_module = THIS_MODULE,
526 }
527};
528
6c833275
ML
529static int __init padlock_init(void)
530{
531 int rc = -ENODEV;
0475add3
BW
532 struct cpuinfo_x86 *c = &cpu_data(0);
533 struct shash_alg *sha1;
534 struct shash_alg *sha256;
6c833275
ML
535
536 if (!cpu_has_phe) {
b43e726b 537 printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
6c833275
ML
538 return -ENODEV;
539 }
540
541 if (!cpu_has_phe_enabled) {
b43e726b 542 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
6c833275
ML
543 return -ENODEV;
544 }
545
0475add3
BW
546 /* Register the newly added algorithm module if on *
547 * VIA Nano processor, or else just do as before */
548 if (c->x86_model < 0x0f) {
549 sha1 = &sha1_alg;
550 sha256 = &sha256_alg;
551 } else {
552 sha1 = &sha1_alg_nano;
553 sha256 = &sha256_alg_nano;
554 }
555
556 rc = crypto_register_shash(sha1);
6c833275
ML
557 if (rc)
558 goto out;
559
0475add3 560 rc = crypto_register_shash(sha256);
6c833275
ML
561 if (rc)
562 goto out_unreg1;
563
564 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
565
566 return 0;
567
568out_unreg1:
0475add3
BW
569 crypto_unregister_shash(sha1);
570
6c833275
ML
571out:
572 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
573 return rc;
574}
575
576static void __exit padlock_fini(void)
577{
0475add3
BW
578 struct cpuinfo_x86 *c = &cpu_data(0);
579
580 if (c->x86_model >= 0x0f) {
581 crypto_unregister_shash(&sha1_alg_nano);
582 crypto_unregister_shash(&sha256_alg_nano);
583 } else {
584 crypto_unregister_shash(&sha1_alg);
585 crypto_unregister_shash(&sha256_alg);
586 }
6c833275
ML
587}
588
589module_init(padlock_init);
590module_exit(padlock_fini);
591
592MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
593MODULE_LICENSE("GPL");
594MODULE_AUTHOR("Michal Ludvig");
595
a760a665
HX
596MODULE_ALIAS("sha1-all");
597MODULE_ALIAS("sha256-all");
6c833275
ML
598MODULE_ALIAS("sha1-padlock");
599MODULE_ALIAS("sha256-padlock");