]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /********************************************************************** |
2 | Copyright(c) 2011-2016 Intel Corporation All rights reserved. | |
3 | ||
4 | Redistribution and use in source and binary forms, with or without | |
1e59de90 | 5 | modification, are permitted provided that the following conditions |
7c673cae FG |
6 | are met: |
7 | * Redistributions of source code must retain the above copyright | |
8 | notice, this list of conditions and the following disclaimer. | |
9 | * Redistributions in binary form must reproduce the above copyright | |
10 | notice, this list of conditions and the following disclaimer in | |
11 | the documentation and/or other materials provided with the | |
12 | distribution. | |
13 | * Neither the name of Intel Corporation nor the names of its | |
14 | contributors may be used to endorse or promote products derived | |
15 | from this software without specific prior written permission. | |
16 | ||
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 | **********************************************************************/ | |
29 | ||
30 | #ifndef FINALIZE_FUNCTION | |
31 | #include <stdlib.h> // For NULL | |
32 | #include "mh_sha1_murmur3_x64_128_internal.h" | |
33 | ||
34 | #define FINALIZE_FUNCTION mh_sha1_murmur3_x64_128_finalize_base | |
35 | #define MH_SHA1_TAIL_FUNCTION mh_sha1_tail_base | |
36 | #define FINALIZE_FUNCTION_SLVER | |
37 | #endif | |
38 | ||
39 | #define MURMUR_BLOCK_FUNCTION murmur3_x64_128_block | |
40 | #define MURMUR_TAIL_FUNCTION murmur3_x64_128_tail | |
41 | ||
42 | int FINALIZE_FUNCTION(struct mh_sha1_murmur3_x64_128_ctx *ctx, void *mh_sha1_digest, | |
43 | void *murmur3_x64_128_digest) | |
44 | { | |
45 | uint8_t *partial_block_buffer, *murmur_tail_data; | |
46 | uint64_t partial_block_len, total_len; | |
47 | uint32_t(*mh_sha1_segs_digests)[HASH_SEGS]; | |
48 | uint8_t *aligned_frame_buffer; | |
49 | ||
50 | if (ctx == NULL) | |
51 | return MH_SHA1_MURMUR3_CTX_ERROR_NULL; | |
52 | ||
53 | total_len = ctx->total_length; | |
54 | partial_block_len = total_len % MH_SHA1_BLOCK_SIZE; | |
55 | partial_block_buffer = ctx->partial_block_buffer; | |
56 | ||
57 | // Calculate murmur3 firstly | |
58 | // because mh_sha1 will change the partial_block_buffer | |
59 | // ( partial_block_buffer = n murmur3 blocks and 1 murmur3 tail) | |
60 | murmur_tail_data = | |
61 | partial_block_buffer + partial_block_len - partial_block_len % MUR_BLOCK_SIZE; | |
62 | MURMUR_BLOCK_FUNCTION(partial_block_buffer, partial_block_len / MUR_BLOCK_SIZE, | |
63 | ctx->murmur3_x64_128_digest); | |
64 | MURMUR_TAIL_FUNCTION(murmur_tail_data, total_len, ctx->murmur3_x64_128_digest); | |
65 | ||
66 | /* mh_sha1 final */ | |
67 | aligned_frame_buffer = (uint8_t *) ALIGN_64(ctx->frame_buffer); | |
68 | mh_sha1_segs_digests = (uint32_t(*)[HASH_SEGS]) ctx->mh_sha1_interim_digests; | |
69 | ||
70 | MH_SHA1_TAIL_FUNCTION(partial_block_buffer, total_len, mh_sha1_segs_digests, | |
71 | aligned_frame_buffer, ctx->mh_sha1_digest); | |
72 | ||
73 | /* Output the digests of murmur3 and mh_sha1 */ | |
74 | if (mh_sha1_digest != NULL) { | |
75 | ((uint32_t *) mh_sha1_digest)[0] = ctx->mh_sha1_digest[0]; | |
76 | ((uint32_t *) mh_sha1_digest)[1] = ctx->mh_sha1_digest[1]; | |
77 | ((uint32_t *) mh_sha1_digest)[2] = ctx->mh_sha1_digest[2]; | |
78 | ((uint32_t *) mh_sha1_digest)[3] = ctx->mh_sha1_digest[3]; | |
79 | ((uint32_t *) mh_sha1_digest)[4] = ctx->mh_sha1_digest[4]; | |
80 | } | |
81 | ||
82 | if (murmur3_x64_128_digest != NULL) { | |
83 | ((uint32_t *) murmur3_x64_128_digest)[0] = ctx->murmur3_x64_128_digest[0]; | |
84 | ((uint32_t *) murmur3_x64_128_digest)[1] = ctx->murmur3_x64_128_digest[1]; | |
85 | ((uint32_t *) murmur3_x64_128_digest)[2] = ctx->murmur3_x64_128_digest[2]; | |
86 | ((uint32_t *) murmur3_x64_128_digest)[3] = ctx->murmur3_x64_128_digest[3]; | |
87 | } | |
88 | ||
89 | return MH_SHA1_MURMUR3_CTX_ERROR_NONE; | |
90 | } | |
91 | ||
92 | #ifdef FINALIZE_FUNCTION_SLVER | |
93 | struct slver { | |
94 | uint16_t snum; | |
95 | uint8_t ver; | |
96 | uint8_t core; | |
97 | }; | |
98 | ||
99 | // Version info | |
100 | struct slver mh_sha1_murmur3_x64_128_finalize_base_slver_0000025b; | |
101 | struct slver mh_sha1_murmur3_x64_128_finalize_base_slver = { 0x025b, 0x00, 0x00 }; | |
102 | #endif |