]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/icp/algs/sha1/sha1.c
New upstream version 0.7.3
[mirror_zfs-debian.git] / module / icp / algs / sha1 / sha1.c
1 /*
2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * The basic framework for this code came from the reference
8 * implementation for MD5. That implementation is Copyright (C)
9 * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
10 *
11 * License to copy and use this software is granted provided that it
12 * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
13 * Algorithm" in all material mentioning or referencing this software
14 * or this function.
15 *
16 * License is also granted to make and use derivative works provided
17 * that such works are identified as "derived from the RSA Data
18 * Security, Inc. MD5 Message-Digest Algorithm" in all material
19 * mentioning or referencing the derived work.
20 *
21 * RSA Data Security, Inc. makes no representations concerning either
22 * the merchantability of this software or the suitability of this
23 * software for any particular purpose. It is provided "as is"
24 * without express or implied warranty of any kind.
25 *
26 * These notices must be retained in any copies of any part of this
27 * documentation and/or software.
28 *
29 * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
30 * standard, available at http://www.itl.nist.gov/fipspubs/fip180-1.htm
31 * Not as fast as one would like -- further optimizations are encouraged
32 * and appreciated.
33 */
34
35 #include <sys/zfs_context.h>
36 #include <sha1/sha1.h>
37 #include <sha1/sha1_consts.h>
38
39 #ifdef _LITTLE_ENDIAN
40 #include <sys/byteorder.h>
41 #define HAVE_HTONL
42 #endif
43
44 #define _RESTRICT_KYWD
45
46 static void Encode(uint8_t *, const uint32_t *, size_t);
47
48 #if defined(__sparc)
49
50 #define SHA1_TRANSFORM(ctx, in) \
51 SHA1Transform((ctx)->state[0], (ctx)->state[1], (ctx)->state[2], \
52 (ctx)->state[3], (ctx)->state[4], (ctx), (in))
53
54 static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
55 SHA1_CTX *, const uint8_t *);
56
57 #elif defined(__amd64)
58
59 #define SHA1_TRANSFORM(ctx, in) sha1_block_data_order((ctx), (in), 1)
60 #define SHA1_TRANSFORM_BLOCKS(ctx, in, num) sha1_block_data_order((ctx), \
61 (in), (num))
62
63 void sha1_block_data_order(SHA1_CTX *ctx, const void *inpp, size_t num_blocks);
64
65 #else
66
67 #define SHA1_TRANSFORM(ctx, in) SHA1Transform((ctx), (in))
68
69 static void SHA1Transform(SHA1_CTX *, const uint8_t *);
70
71 #endif
72
73
74 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
75
76 /*
77 * F, G, and H are the basic SHA1 functions.
78 */
79 #define F(b, c, d) (((b) & (c)) | ((~b) & (d)))
80 #define G(b, c, d) ((b) ^ (c) ^ (d))
81 #define H(b, c, d) (((b) & (c)) | (((b)|(c)) & (d)))
82
83 /*
84 * ROTATE_LEFT rotates x left n bits.
85 */
86
87 #if defined(__GNUC__) && defined(_LP64)
88 static __inline__ uint64_t
89 ROTATE_LEFT(uint64_t value, uint32_t n)
90 {
91 uint32_t t32;
92
93 t32 = (uint32_t)value;
94 return ((t32 << n) | (t32 >> (32 - n)));
95 }
96
97 #else
98
99 #define ROTATE_LEFT(x, n) \
100 (((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
101
102 #endif
103
104
105 /*
106 * SHA1Init()
107 *
108 * purpose: initializes the sha1 context and begins and sha1 digest operation
109 * input: SHA1_CTX * : the context to initializes.
110 * output: void
111 */
112
113 void
114 SHA1Init(SHA1_CTX *ctx)
115 {
116 ctx->count[0] = ctx->count[1] = 0;
117
118 /*
119 * load magic initialization constants. Tell lint
120 * that these constants are unsigned by using U.
121 */
122
123 ctx->state[0] = 0x67452301U;
124 ctx->state[1] = 0xefcdab89U;
125 ctx->state[2] = 0x98badcfeU;
126 ctx->state[3] = 0x10325476U;
127 ctx->state[4] = 0xc3d2e1f0U;
128 }
129
130 void
131 SHA1Update(SHA1_CTX *ctx, const void *inptr, size_t input_len)
132 {
133 uint32_t i, buf_index, buf_len;
134 const uint8_t *input = inptr;
135 #if defined(__amd64)
136 uint32_t block_count;
137 #endif /* __amd64 */
138
139 /* check for noop */
140 if (input_len == 0)
141 return;
142
143 /* compute number of bytes mod 64 */
144 buf_index = (ctx->count[1] >> 3) & 0x3F;
145
146 /* update number of bits */
147 if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
148 ctx->count[0]++;
149
150 ctx->count[0] += (input_len >> 29);
151
152 buf_len = 64 - buf_index;
153
154 /* transform as many times as possible */
155 i = 0;
156 if (input_len >= buf_len) {
157
158 /*
159 * general optimization:
160 *
161 * only do initial bcopy() and SHA1Transform() if
162 * buf_index != 0. if buf_index == 0, we're just
163 * wasting our time doing the bcopy() since there
164 * wasn't any data left over from a previous call to
165 * SHA1Update().
166 */
167
168 if (buf_index) {
169 bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
170 SHA1_TRANSFORM(ctx, ctx->buf_un.buf8);
171 i = buf_len;
172 }
173
174 #if !defined(__amd64)
175 for (; i + 63 < input_len; i += 64)
176 SHA1_TRANSFORM(ctx, &input[i]);
177 #else
178 block_count = (input_len - i) >> 6;
179 if (block_count > 0) {
180 SHA1_TRANSFORM_BLOCKS(ctx, &input[i], block_count);
181 i += block_count << 6;
182 }
183 #endif /* !__amd64 */
184
185 /*
186 * general optimization:
187 *
188 * if i and input_len are the same, return now instead
189 * of calling bcopy(), since the bcopy() in this case
190 * will be an expensive nop.
191 */
192
193 if (input_len == i)
194 return;
195
196 buf_index = 0;
197 }
198
199 /* buffer remaining input */
200 bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
201 }
202
203 /*
204 * SHA1Final()
205 *
206 * purpose: ends an sha1 digest operation, finalizing the message digest and
207 * zeroing the context.
208 * input: uchar_t * : A buffer to store the digest.
209 * : The function actually uses void* because many
210 * : callers pass things other than uchar_t here.
211 * SHA1_CTX * : the context to finalize, save, and zero
212 * output: void
213 */
214
215 void
216 SHA1Final(void *digest, SHA1_CTX *ctx)
217 {
218 uint8_t bitcount_be[sizeof (ctx->count)];
219 uint32_t index = (ctx->count[1] >> 3) & 0x3f;
220
221 /* store bit count, big endian */
222 Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
223
224 /* pad out to 56 mod 64 */
225 SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
226
227 /* append length (before padding) */
228 SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
229
230 /* store state in digest */
231 Encode(digest, ctx->state, sizeof (ctx->state));
232
233 /* zeroize sensitive information */
234 bzero(ctx, sizeof (*ctx));
235 }
236
237
238 #if !defined(__amd64)
239
240 typedef uint32_t sha1word;
241
242 /*
243 * sparc optimization:
244 *
245 * on the sparc, we can load big endian 32-bit data easily. note that
246 * special care must be taken to ensure the address is 32-bit aligned.
247 * in the interest of speed, we don't check to make sure, since
248 * careful programming can guarantee this for us.
249 */
250
251 #if defined(_BIG_ENDIAN)
252 #define LOAD_BIG_32(addr) (*(uint32_t *)(addr))
253
254 #elif defined(HAVE_HTONL)
255 #define LOAD_BIG_32(addr) htonl(*((uint32_t *)(addr)))
256
257 #else
258 /* little endian -- will work on big endian, but slowly */
259 #define LOAD_BIG_32(addr) \
260 (((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
261 #endif /* _BIG_ENDIAN */
262
263 /*
264 * SHA1Transform()
265 */
266 #if defined(W_ARRAY)
267 #define W(n) w[n]
268 #else /* !defined(W_ARRAY) */
269 #define W(n) w_ ## n
270 #endif /* !defined(W_ARRAY) */
271
272 #if defined(__sparc)
273
274
275 /*
276 * sparc register window optimization:
277 *
278 * `a', `b', `c', `d', and `e' are passed into SHA1Transform
279 * explicitly since it increases the number of registers available to
280 * the compiler. under this scheme, these variables can be held in
281 * %i0 - %i4, which leaves more local and out registers available.
282 *
283 * purpose: sha1 transformation -- updates the digest based on `block'
284 * input: uint32_t : bytes 1 - 4 of the digest
285 * uint32_t : bytes 5 - 8 of the digest
286 * uint32_t : bytes 9 - 12 of the digest
287 * uint32_t : bytes 12 - 16 of the digest
288 * uint32_t : bytes 16 - 20 of the digest
289 * SHA1_CTX * : the context to update
290 * uint8_t [64]: the block to use to update the digest
291 * output: void
292 */
293
294
295 void
296 SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
297 SHA1_CTX *ctx, const uint8_t blk[64])
298 {
299 /*
300 * sparc optimization:
301 *
302 * while it is somewhat counter-intuitive, on sparc, it is
303 * more efficient to place all the constants used in this
304 * function in an array and load the values out of the array
305 * than to manually load the constants. this is because
306 * setting a register to a 32-bit value takes two ops in most
307 * cases: a `sethi' and an `or', but loading a 32-bit value
308 * from memory only takes one `ld' (or `lduw' on v9). while
309 * this increases memory usage, the compiler can find enough
310 * other things to do while waiting to keep the pipeline does
311 * not stall. additionally, it is likely that many of these
312 * constants are cached so that later accesses do not even go
313 * out to the bus.
314 *
315 * this array is declared `static' to keep the compiler from
316 * having to bcopy() this array onto the stack frame of
317 * SHA1Transform() each time it is called -- which is
318 * unacceptably expensive.
319 *
320 * the `const' is to ensure that callers are good citizens and
321 * do not try to munge the array. since these routines are
322 * going to be called from inside multithreaded kernelland,
323 * this is a good safety check. -- `sha1_consts' will end up in
324 * .rodata.
325 *
326 * unfortunately, loading from an array in this manner hurts
327 * performance under Intel. So, there is a macro,
328 * SHA1_CONST(), used in SHA1Transform(), that either expands to
329 * a reference to this array, or to the actual constant,
330 * depending on what platform this code is compiled for.
331 */
332
333
334 static const uint32_t sha1_consts[] = {
335 SHA1_CONST_0, SHA1_CONST_1, SHA1_CONST_2, SHA1_CONST_3
336 };
337
338
339 /*
340 * general optimization:
341 *
342 * use individual integers instead of using an array. this is a
343 * win, although the amount it wins by seems to vary quite a bit.
344 */
345
346
347 uint32_t w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7;
348 uint32_t w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
349
350
351 /*
352 * sparc optimization:
353 *
354 * if `block' is already aligned on a 4-byte boundary, use
355 * LOAD_BIG_32() directly. otherwise, bcopy() into a
356 * buffer that *is* aligned on a 4-byte boundary and then do
357 * the LOAD_BIG_32() on that buffer. benchmarks have shown
358 * that using the bcopy() is better than loading the bytes
359 * individually and doing the endian-swap by hand.
360 *
361 * even though it's quite tempting to assign to do:
362 *
363 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
364 *
365 * and only have one set of LOAD_BIG_32()'s, the compiler
366 * *does not* like that, so please resist the urge.
367 */
368
369
370 if ((uintptr_t)blk & 0x3) { /* not 4-byte aligned? */
371 bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
372 w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15);
373 w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14);
374 w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13);
375 w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12);
376 w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11);
377 w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10);
378 w_9 = LOAD_BIG_32(ctx->buf_un.buf32 + 9);
379 w_8 = LOAD_BIG_32(ctx->buf_un.buf32 + 8);
380 w_7 = LOAD_BIG_32(ctx->buf_un.buf32 + 7);
381 w_6 = LOAD_BIG_32(ctx->buf_un.buf32 + 6);
382 w_5 = LOAD_BIG_32(ctx->buf_un.buf32 + 5);
383 w_4 = LOAD_BIG_32(ctx->buf_un.buf32 + 4);
384 w_3 = LOAD_BIG_32(ctx->buf_un.buf32 + 3);
385 w_2 = LOAD_BIG_32(ctx->buf_un.buf32 + 2);
386 w_1 = LOAD_BIG_32(ctx->buf_un.buf32 + 1);
387 w_0 = LOAD_BIG_32(ctx->buf_un.buf32 + 0);
388 } else {
389 /* LINTED E_BAD_PTR_CAST_ALIGN */
390 w_15 = LOAD_BIG_32(blk + 60);
391 /* LINTED E_BAD_PTR_CAST_ALIGN */
392 w_14 = LOAD_BIG_32(blk + 56);
393 /* LINTED E_BAD_PTR_CAST_ALIGN */
394 w_13 = LOAD_BIG_32(blk + 52);
395 /* LINTED E_BAD_PTR_CAST_ALIGN */
396 w_12 = LOAD_BIG_32(blk + 48);
397 /* LINTED E_BAD_PTR_CAST_ALIGN */
398 w_11 = LOAD_BIG_32(blk + 44);
399 /* LINTED E_BAD_PTR_CAST_ALIGN */
400 w_10 = LOAD_BIG_32(blk + 40);
401 /* LINTED E_BAD_PTR_CAST_ALIGN */
402 w_9 = LOAD_BIG_32(blk + 36);
403 /* LINTED E_BAD_PTR_CAST_ALIGN */
404 w_8 = LOAD_BIG_32(blk + 32);
405 /* LINTED E_BAD_PTR_CAST_ALIGN */
406 w_7 = LOAD_BIG_32(blk + 28);
407 /* LINTED E_BAD_PTR_CAST_ALIGN */
408 w_6 = LOAD_BIG_32(blk + 24);
409 /* LINTED E_BAD_PTR_CAST_ALIGN */
410 w_5 = LOAD_BIG_32(blk + 20);
411 /* LINTED E_BAD_PTR_CAST_ALIGN */
412 w_4 = LOAD_BIG_32(blk + 16);
413 /* LINTED E_BAD_PTR_CAST_ALIGN */
414 w_3 = LOAD_BIG_32(blk + 12);
415 /* LINTED E_BAD_PTR_CAST_ALIGN */
416 w_2 = LOAD_BIG_32(blk + 8);
417 /* LINTED E_BAD_PTR_CAST_ALIGN */
418 w_1 = LOAD_BIG_32(blk + 4);
419 /* LINTED E_BAD_PTR_CAST_ALIGN */
420 w_0 = LOAD_BIG_32(blk + 0);
421 }
422 #else /* !defined(__sparc) */
423
424 void /* CSTYLED */
425 SHA1Transform(SHA1_CTX *ctx, const uint8_t blk[64])
426 {
427 /* CSTYLED */
428 sha1word a = ctx->state[0];
429 sha1word b = ctx->state[1];
430 sha1word c = ctx->state[2];
431 sha1word d = ctx->state[3];
432 sha1word e = ctx->state[4];
433
434 #if defined(W_ARRAY)
435 sha1word w[16];
436 #else /* !defined(W_ARRAY) */
437 sha1word w_0, w_1, w_2, w_3, w_4, w_5, w_6, w_7;
438 sha1word w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
439 #endif /* !defined(W_ARRAY) */
440
441 W(0) = LOAD_BIG_32((void *)(blk + 0));
442 W(1) = LOAD_BIG_32((void *)(blk + 4));
443 W(2) = LOAD_BIG_32((void *)(blk + 8));
444 W(3) = LOAD_BIG_32((void *)(blk + 12));
445 W(4) = LOAD_BIG_32((void *)(blk + 16));
446 W(5) = LOAD_BIG_32((void *)(blk + 20));
447 W(6) = LOAD_BIG_32((void *)(blk + 24));
448 W(7) = LOAD_BIG_32((void *)(blk + 28));
449 W(8) = LOAD_BIG_32((void *)(blk + 32));
450 W(9) = LOAD_BIG_32((void *)(blk + 36));
451 W(10) = LOAD_BIG_32((void *)(blk + 40));
452 W(11) = LOAD_BIG_32((void *)(blk + 44));
453 W(12) = LOAD_BIG_32((void *)(blk + 48));
454 W(13) = LOAD_BIG_32((void *)(blk + 52));
455 W(14) = LOAD_BIG_32((void *)(blk + 56));
456 W(15) = LOAD_BIG_32((void *)(blk + 60));
457
458 #endif /* !defined(__sparc) */
459
460 /*
461 * general optimization:
462 *
463 * even though this approach is described in the standard as
464 * being slower algorithmically, it is 30-40% faster than the
465 * "faster" version under SPARC, because this version has more
466 * of the constraints specified at compile-time and uses fewer
467 * variables (and therefore has better register utilization)
468 * than its "speedier" brother. (i've tried both, trust me)
469 *
470 * for either method given in the spec, there is an "assignment"
471 * phase where the following takes place:
472 *
473 * tmp = (main_computation);
474 * e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
475 *
476 * we can make the algorithm go faster by not doing this work,
477 * but just pretending that `d' is now `e', etc. this works
478 * really well and obviates the need for a temporary variable.
479 * however, we still explicitly perform the rotate action,
480 * since it is cheaper on SPARC to do it once than to have to
481 * do it over and over again.
482 */
483
484 /* round 1 */
485 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(0) + SHA1_CONST(0); /* 0 */
486 b = ROTATE_LEFT(b, 30);
487
488 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(1) + SHA1_CONST(0); /* 1 */
489 a = ROTATE_LEFT(a, 30);
490
491 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(2) + SHA1_CONST(0); /* 2 */
492 e = ROTATE_LEFT(e, 30);
493
494 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(3) + SHA1_CONST(0); /* 3 */
495 d = ROTATE_LEFT(d, 30);
496
497 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(4) + SHA1_CONST(0); /* 4 */
498 c = ROTATE_LEFT(c, 30);
499
500 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(5) + SHA1_CONST(0); /* 5 */
501 b = ROTATE_LEFT(b, 30);
502
503 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(6) + SHA1_CONST(0); /* 6 */
504 a = ROTATE_LEFT(a, 30);
505
506 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(7) + SHA1_CONST(0); /* 7 */
507 e = ROTATE_LEFT(e, 30);
508
509 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(8) + SHA1_CONST(0); /* 8 */
510 d = ROTATE_LEFT(d, 30);
511
512 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(9) + SHA1_CONST(0); /* 9 */
513 c = ROTATE_LEFT(c, 30);
514
515 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(10) + SHA1_CONST(0); /* 10 */
516 b = ROTATE_LEFT(b, 30);
517
518 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(11) + SHA1_CONST(0); /* 11 */
519 a = ROTATE_LEFT(a, 30);
520
521 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(12) + SHA1_CONST(0); /* 12 */
522 e = ROTATE_LEFT(e, 30);
523
524 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(13) + SHA1_CONST(0); /* 13 */
525 d = ROTATE_LEFT(d, 30);
526
527 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(14) + SHA1_CONST(0); /* 14 */
528 c = ROTATE_LEFT(c, 30);
529
530 e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + W(15) + SHA1_CONST(0); /* 15 */
531 b = ROTATE_LEFT(b, 30);
532
533 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 16 */
534 d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + W(0) + SHA1_CONST(0);
535 a = ROTATE_LEFT(a, 30);
536
537 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 17 */
538 c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + W(1) + SHA1_CONST(0);
539 e = ROTATE_LEFT(e, 30);
540
541 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 18 */
542 b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + W(2) + SHA1_CONST(0);
543 d = ROTATE_LEFT(d, 30);
544
545 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 19 */
546 a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + W(3) + SHA1_CONST(0);
547 c = ROTATE_LEFT(c, 30);
548
549 /* round 2 */
550 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 20 */
551 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(4) + SHA1_CONST(1);
552 b = ROTATE_LEFT(b, 30);
553
554 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 21 */
555 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(5) + SHA1_CONST(1);
556 a = ROTATE_LEFT(a, 30);
557
558 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 22 */
559 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(6) + SHA1_CONST(1);
560 e = ROTATE_LEFT(e, 30);
561
562 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 23 */
563 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(7) + SHA1_CONST(1);
564 d = ROTATE_LEFT(d, 30);
565
566 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 24 */
567 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(8) + SHA1_CONST(1);
568 c = ROTATE_LEFT(c, 30);
569
570 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 25 */
571 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(9) + SHA1_CONST(1);
572 b = ROTATE_LEFT(b, 30);
573
574 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 26 */
575 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(10) + SHA1_CONST(1);
576 a = ROTATE_LEFT(a, 30);
577
578 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 27 */
579 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(11) + SHA1_CONST(1);
580 e = ROTATE_LEFT(e, 30);
581
582 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 28 */
583 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(12) + SHA1_CONST(1);
584 d = ROTATE_LEFT(d, 30);
585
586 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 29 */
587 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(13) + SHA1_CONST(1);
588 c = ROTATE_LEFT(c, 30);
589
590 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 30 */
591 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(14) + SHA1_CONST(1);
592 b = ROTATE_LEFT(b, 30);
593
594 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 31 */
595 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(15) + SHA1_CONST(1);
596 a = ROTATE_LEFT(a, 30);
597
598 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 32 */
599 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(0) + SHA1_CONST(1);
600 e = ROTATE_LEFT(e, 30);
601
602 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 33 */
603 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(1) + SHA1_CONST(1);
604 d = ROTATE_LEFT(d, 30);
605
606 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 34 */
607 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(2) + SHA1_CONST(1);
608 c = ROTATE_LEFT(c, 30);
609
610 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 35 */
611 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(3) + SHA1_CONST(1);
612 b = ROTATE_LEFT(b, 30);
613
614 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 36 */
615 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(4) + SHA1_CONST(1);
616 a = ROTATE_LEFT(a, 30);
617
618 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 37 */
619 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(5) + SHA1_CONST(1);
620 e = ROTATE_LEFT(e, 30);
621
622 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 38 */
623 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(6) + SHA1_CONST(1);
624 d = ROTATE_LEFT(d, 30);
625
626 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 39 */
627 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(7) + SHA1_CONST(1);
628 c = ROTATE_LEFT(c, 30);
629
630 /* round 3 */
631 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 40 */
632 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(8) + SHA1_CONST(2);
633 b = ROTATE_LEFT(b, 30);
634
635 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 41 */
636 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(9) + SHA1_CONST(2);
637 a = ROTATE_LEFT(a, 30);
638
639 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 42 */
640 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(10) + SHA1_CONST(2);
641 e = ROTATE_LEFT(e, 30);
642
643 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 43 */
644 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(11) + SHA1_CONST(2);
645 d = ROTATE_LEFT(d, 30);
646
647 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 44 */
648 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(12) + SHA1_CONST(2);
649 c = ROTATE_LEFT(c, 30);
650
651 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 45 */
652 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(13) + SHA1_CONST(2);
653 b = ROTATE_LEFT(b, 30);
654
655 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 46 */
656 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(14) + SHA1_CONST(2);
657 a = ROTATE_LEFT(a, 30);
658
659 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 47 */
660 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(15) + SHA1_CONST(2);
661 e = ROTATE_LEFT(e, 30);
662
663 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 48 */
664 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(0) + SHA1_CONST(2);
665 d = ROTATE_LEFT(d, 30);
666
667 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 49 */
668 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(1) + SHA1_CONST(2);
669 c = ROTATE_LEFT(c, 30);
670
671 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 50 */
672 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(2) + SHA1_CONST(2);
673 b = ROTATE_LEFT(b, 30);
674
675 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 51 */
676 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(3) + SHA1_CONST(2);
677 a = ROTATE_LEFT(a, 30);
678
679 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 52 */
680 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(4) + SHA1_CONST(2);
681 e = ROTATE_LEFT(e, 30);
682
683 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 53 */
684 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(5) + SHA1_CONST(2);
685 d = ROTATE_LEFT(d, 30);
686
687 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 54 */
688 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(6) + SHA1_CONST(2);
689 c = ROTATE_LEFT(c, 30);
690
691 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 55 */
692 e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + W(7) + SHA1_CONST(2);
693 b = ROTATE_LEFT(b, 30);
694
695 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 56 */
696 d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + W(8) + SHA1_CONST(2);
697 a = ROTATE_LEFT(a, 30);
698
699 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 57 */
700 c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + W(9) + SHA1_CONST(2);
701 e = ROTATE_LEFT(e, 30);
702
703 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 58 */
704 b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + W(10) + SHA1_CONST(2);
705 d = ROTATE_LEFT(d, 30);
706
707 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 59 */
708 a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + W(11) + SHA1_CONST(2);
709 c = ROTATE_LEFT(c, 30);
710
711 /* round 4 */
712 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 60 */
713 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(12) + SHA1_CONST(3);
714 b = ROTATE_LEFT(b, 30);
715
716 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 61 */
717 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(13) + SHA1_CONST(3);
718 a = ROTATE_LEFT(a, 30);
719
720 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 62 */
721 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(14) + SHA1_CONST(3);
722 e = ROTATE_LEFT(e, 30);
723
724 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 63 */
725 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(15) + SHA1_CONST(3);
726 d = ROTATE_LEFT(d, 30);
727
728 W(0) = ROTATE_LEFT((W(13) ^ W(8) ^ W(2) ^ W(0)), 1); /* 64 */
729 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(0) + SHA1_CONST(3);
730 c = ROTATE_LEFT(c, 30);
731
732 W(1) = ROTATE_LEFT((W(14) ^ W(9) ^ W(3) ^ W(1)), 1); /* 65 */
733 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(1) + SHA1_CONST(3);
734 b = ROTATE_LEFT(b, 30);
735
736 W(2) = ROTATE_LEFT((W(15) ^ W(10) ^ W(4) ^ W(2)), 1); /* 66 */
737 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(2) + SHA1_CONST(3);
738 a = ROTATE_LEFT(a, 30);
739
740 W(3) = ROTATE_LEFT((W(0) ^ W(11) ^ W(5) ^ W(3)), 1); /* 67 */
741 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(3) + SHA1_CONST(3);
742 e = ROTATE_LEFT(e, 30);
743
744 W(4) = ROTATE_LEFT((W(1) ^ W(12) ^ W(6) ^ W(4)), 1); /* 68 */
745 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(4) + SHA1_CONST(3);
746 d = ROTATE_LEFT(d, 30);
747
748 W(5) = ROTATE_LEFT((W(2) ^ W(13) ^ W(7) ^ W(5)), 1); /* 69 */
749 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(5) + SHA1_CONST(3);
750 c = ROTATE_LEFT(c, 30);
751
752 W(6) = ROTATE_LEFT((W(3) ^ W(14) ^ W(8) ^ W(6)), 1); /* 70 */
753 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(6) + SHA1_CONST(3);
754 b = ROTATE_LEFT(b, 30);
755
756 W(7) = ROTATE_LEFT((W(4) ^ W(15) ^ W(9) ^ W(7)), 1); /* 71 */
757 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(7) + SHA1_CONST(3);
758 a = ROTATE_LEFT(a, 30);
759
760 W(8) = ROTATE_LEFT((W(5) ^ W(0) ^ W(10) ^ W(8)), 1); /* 72 */
761 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(8) + SHA1_CONST(3);
762 e = ROTATE_LEFT(e, 30);
763
764 W(9) = ROTATE_LEFT((W(6) ^ W(1) ^ W(11) ^ W(9)), 1); /* 73 */
765 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(9) + SHA1_CONST(3);
766 d = ROTATE_LEFT(d, 30);
767
768 W(10) = ROTATE_LEFT((W(7) ^ W(2) ^ W(12) ^ W(10)), 1); /* 74 */
769 a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(10) + SHA1_CONST(3);
770 c = ROTATE_LEFT(c, 30);
771
772 W(11) = ROTATE_LEFT((W(8) ^ W(3) ^ W(13) ^ W(11)), 1); /* 75 */
773 e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + W(11) + SHA1_CONST(3);
774 b = ROTATE_LEFT(b, 30);
775
776 W(12) = ROTATE_LEFT((W(9) ^ W(4) ^ W(14) ^ W(12)), 1); /* 76 */
777 d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + W(12) + SHA1_CONST(3);
778 a = ROTATE_LEFT(a, 30);
779
780 W(13) = ROTATE_LEFT((W(10) ^ W(5) ^ W(15) ^ W(13)), 1); /* 77 */
781 c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + W(13) + SHA1_CONST(3);
782 e = ROTATE_LEFT(e, 30);
783
784 W(14) = ROTATE_LEFT((W(11) ^ W(6) ^ W(0) ^ W(14)), 1); /* 78 */
785 b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + W(14) + SHA1_CONST(3);
786 d = ROTATE_LEFT(d, 30);
787
788 W(15) = ROTATE_LEFT((W(12) ^ W(7) ^ W(1) ^ W(15)), 1); /* 79 */
789
790 ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + W(15) +
791 SHA1_CONST(3);
792 ctx->state[1] += b;
793 ctx->state[2] += ROTATE_LEFT(c, 30);
794 ctx->state[3] += d;
795 ctx->state[4] += e;
796
797 /* zeroize sensitive information */
798 W(0) = W(1) = W(2) = W(3) = W(4) = W(5) = W(6) = W(7) = W(8) = 0;
799 W(9) = W(10) = W(11) = W(12) = W(13) = W(14) = W(15) = 0;
800 }
801 #endif /* !__amd64 */
802
803
804 /*
805 * Encode()
806 *
807 * purpose: to convert a list of numbers from little endian to big endian
808 * input: uint8_t * : place to store the converted big endian numbers
809 * uint32_t * : place to get numbers to convert from
810 * size_t : the length of the input in bytes
811 * output: void
812 */
813
814 static void
815 Encode(uint8_t *_RESTRICT_KYWD output, const uint32_t *_RESTRICT_KYWD input,
816 size_t len)
817 {
818 size_t i, j;
819
820 #if defined(__sparc)
821 if (IS_P2ALIGNED(output, sizeof (uint32_t))) {
822 for (i = 0, j = 0; j < len; i++, j += 4) {
823 /* LINTED E_BAD_PTR_CAST_ALIGN */
824 *((uint32_t *)(output + j)) = input[i];
825 }
826 } else {
827 #endif /* little endian -- will work on big endian, but slowly */
828
829 for (i = 0, j = 0; j < len; i++, j += 4) {
830 output[j] = (input[i] >> 24) & 0xff;
831 output[j + 1] = (input[i] >> 16) & 0xff;
832 output[j + 2] = (input[i] >> 8) & 0xff;
833 output[j + 3] = input[i] & 0xff;
834 }
835 #if defined(__sparc)
836 }
837 #endif
838 }