]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/mh_sha1_murmur3_x64_128/murmur3_x64_128_internal.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / mh_sha1_murmur3_x64_128 / murmur3_x64_128_internal.c
1 /**********************************************************************
2 Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
6 are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
12 distribution.
13 * Neither the name of Intel Corporation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 **********************************************************************/
29
30 #include "mh_sha1_murmur3_x64_128_internal.h"
31 #include <stdlib.h> // for NULL
32
33 /* murmur3_x64_128 constants */
34 // Shift bits of circle rotate
35 #define MUR_SH1 31
36 #define MUR_SH2 33
37 #define MUR_SH3 27
38 #define MUR_SH4 31
39 #define MUR_SH5 33
40
41 #define MUR_MUL 5
42 #define MUR_ADD1 0x52dce729
43 #define MUR_ADD2 0x38495ab5
44
45 #define MUR_CON1 0x87c37b91114253d5LLU
46 #define MUR_CON2 0x4cf5ad432745937fLLU
47
48 #define MUR_FMUL1 0xff51afd7ed558ccdLLU
49 #define MUR_FMUL2 0xc4ceb9fe1a85ec53LLU
50
51 /* murmur3_x64_128 inline functions */
52 static inline uint64_t blockmix64(uint64_t data, uint64_t conA, uint64_t conB, uint64_t shift)
53 {
54 data *= conA;
55 data = (data << shift) | (data >> (64 - shift));
56 data *= conB;
57 return data;
58 }
59
60 static inline uint64_t hashmix64(uint64_t hashA, uint64_t hashB, uint64_t data, uint64_t add,
61 uint64_t shift)
62 {
63 hashA ^= data;
64 hashA = (hashA << shift) | (hashA >> (64 - shift));
65 hashA += hashB;
66 hashA = hashA * MUR_MUL + add;
67 return hashA;
68 }
69
70 void murmur3_x64_128_block(const uint8_t * input_data, uint32_t num_blocks,
71 uint32_t digests[MURMUR3_x64_128_DIGEST_WORDS])
72 {
73 uint64_t data1, data2;
74 uint64_t *input_qword = (uint64_t *) input_data;
75 uint64_t *hash = (uint64_t *) digests;
76 uint32_t i = 0;
77
78 while (i < num_blocks) {
79 data1 = input_qword[i * 2];
80 data2 = input_qword[i * 2 + 1];
81 data1 = blockmix64(data1, MUR_CON1, MUR_CON2, MUR_SH1);
82 data2 = blockmix64(data2, MUR_CON2, MUR_CON1, MUR_SH2);
83 hash[0] = hashmix64(hash[0], hash[1], data1, MUR_ADD1, MUR_SH3);
84 hash[1] = hashmix64(hash[1], hash[0], data2, MUR_ADD2, MUR_SH4);
85 i++;
86 }
87
88 return;
89 }
90
91 void murmur3_x64_128_tail(const uint8_t * tail_buffer, uint32_t total_len,
92 uint32_t digests[MURMUR3_x64_128_DIGEST_WORDS])
93 {
94 uint64_t data1, data2;
95 uint64_t *hash = (uint64_t *) digests;
96 uint64_t tail_len = total_len % 16;
97 uint8_t *tail = (uint8_t *) tail_buffer;
98
99 union {
100 uint64_t hash[2];
101 uint8_t hashB[16];
102 } hashU;
103
104 // tail
105 hashU.hash[0] = hashU.hash[1] = 0;
106
107 while (tail_len-- > 0)
108 hashU.hashB[tail_len] = tail[tail_len];
109
110 data1 = hashU.hash[0];
111 data2 = hashU.hash[1];
112
113 data1 = blockmix64(data1, MUR_CON1, MUR_CON2, MUR_SH1);
114 data2 = blockmix64(data2, MUR_CON2, MUR_CON1, MUR_SH2);
115
116 hash[0] ^= total_len ^ data1;
117 hash[1] ^= total_len ^ data2;
118
119 hash[0] += hash[1];
120 hash[1] += hash[0];
121
122 hash[0] ^= hash[0] >> MUR_SH5;
123 hash[0] *= MUR_FMUL1;
124 hash[0] ^= hash[0] >> MUR_SH5;
125 hash[0] *= MUR_FMUL2;
126 hash[0] ^= hash[0] >> MUR_SH5;
127
128 hash[1] ^= hash[1] >> MUR_SH5;
129 hash[1] *= MUR_FMUL1;
130 hash[1] ^= hash[1] >> MUR_SH5;
131 hash[1] *= MUR_FMUL2;
132 hash[1] ^= hash[1] >> MUR_SH5;
133
134 hash[0] += hash[1];
135 hash[1] += hash[0];
136
137 return;
138 }