1 /**********************************************************************
2 Copyright(c) 2011-2016 Intel Corporation All rights reserved.
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
13 * Neither the name of Intel Corporation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 **********************************************************************/
30 #include "mh_sha1_murmur3_x64_128_internal.h"
31 #include <stdlib.h> // for NULL
33 /* murmur3_x64_128 constants */
34 // Shift bits of circle rotate
42 #define MUR_ADD1 0x52dce729
43 #define MUR_ADD2 0x38495ab5
45 #define MUR_CON1 0x87c37b91114253d5LLU
46 #define MUR_CON2 0x4cf5ad432745937fLLU
48 #define MUR_FMUL1 0xff51afd7ed558ccdLLU
49 #define MUR_FMUL2 0xc4ceb9fe1a85ec53LLU
51 /* murmur3_x64_128 inline functions */
52 static inline uint64_t blockmix64(uint64_t data
, uint64_t conA
, uint64_t conB
, uint64_t shift
)
55 data
= (data
<< shift
) | (data
>> (64 - shift
));
60 static inline uint64_t hashmix64(uint64_t hashA
, uint64_t hashB
, uint64_t data
, uint64_t add
,
64 hashA
= (hashA
<< shift
) | (hashA
>> (64 - shift
));
66 hashA
= hashA
* MUR_MUL
+ add
;
70 void murmur3_x64_128_block(const uint8_t * input_data
, uint32_t num_blocks
,
71 uint32_t digests
[MURMUR3_x64_128_DIGEST_WORDS
])
73 uint64_t data1
, data2
;
74 uint64_t *input_qword
= (uint64_t *) input_data
;
75 uint64_t *hash
= (uint64_t *) digests
;
78 while (i
< num_blocks
) {
79 data1
= input_qword
[i
* 2];
80 data2
= input_qword
[i
* 2 + 1];
81 data1
= blockmix64(data1
, MUR_CON1
, MUR_CON2
, MUR_SH1
);
82 data2
= blockmix64(data2
, MUR_CON2
, MUR_CON1
, MUR_SH2
);
83 hash
[0] = hashmix64(hash
[0], hash
[1], data1
, MUR_ADD1
, MUR_SH3
);
84 hash
[1] = hashmix64(hash
[1], hash
[0], data2
, MUR_ADD2
, MUR_SH4
);
91 void murmur3_x64_128_tail(const uint8_t * tail_buffer
, uint32_t total_len
,
92 uint32_t digests
[MURMUR3_x64_128_DIGEST_WORDS
])
94 uint64_t data1
, data2
;
95 uint64_t *hash
= (uint64_t *) digests
;
96 uint64_t tail_len
= total_len
% 16;
97 uint8_t *tail
= (uint8_t *) tail_buffer
;
105 hashU
.hash
[0] = hashU
.hash
[1] = 0;
107 while (tail_len
-- > 0)
108 hashU
.hashB
[tail_len
] = tail
[tail_len
];
110 data1
= hashU
.hash
[0];
111 data2
= hashU
.hash
[1];
113 data1
= blockmix64(data1
, MUR_CON1
, MUR_CON2
, MUR_SH1
);
114 data2
= blockmix64(data2
, MUR_CON2
, MUR_CON1
, MUR_SH2
);
116 hash
[0] ^= total_len
^ data1
;
117 hash
[1] ^= total_len
^ data2
;
122 hash
[0] ^= hash
[0] >> MUR_SH5
;
123 hash
[0] *= MUR_FMUL1
;
124 hash
[0] ^= hash
[0] >> MUR_SH5
;
125 hash
[0] *= MUR_FMUL2
;
126 hash
[0] ^= hash
[0] >> MUR_SH5
;
128 hash
[1] ^= hash
[1] >> MUR_SH5
;
129 hash
[1] *= MUR_FMUL1
;
130 hash
[1] ^= hash
[1] >> MUR_SH5
;
131 hash
[1] *= MUR_FMUL2
;
132 hash
[1] ^= hash
[1] >> MUR_SH5
;