]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/sha256_mb/sha256_ctx_sse_ni.c
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha256_mb / sha256_ctx_sse_ni.c
1 /**********************************************************************
2 Copyright(c) 2011-2017 Intel Corporation All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
6 are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
12 distribution.
13 * Neither the name of Intel Corporation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 **********************************************************************/
29
30 #include "sha256_mb.h"
31 #include "memcpy_inline.h"
32 #include "endian_helper.h"
33
34 #ifdef _MSC_VER
35 # include <intrin.h>
36 # define inline __inline
37 #endif
38
39 #ifdef HAVE_AS_KNOWS_SHANI
40
41 static inline void hash_init_digest(SHA256_WORD_T * digest);
42 static inline uint32_t hash_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], uint64_t total_len);
43 static SHA256_HASH_CTX *sha256_ctx_mgr_resubmit(SHA256_HASH_CTX_MGR * mgr,
44 SHA256_HASH_CTX * ctx);
45
46 void sha256_ctx_mgr_init_sse_ni(SHA256_HASH_CTX_MGR * mgr)
47 {
48 // Same with sse
49 sha256_mb_mgr_init_sse(&mgr->mgr);
50 }
51
52 SHA256_HASH_CTX *sha256_ctx_mgr_submit_sse_ni(SHA256_HASH_CTX_MGR * mgr, SHA256_HASH_CTX * ctx,
53 const void *buffer, uint32_t len,
54 HASH_CTX_FLAG flags)
55 {
56
57 if (flags & (~HASH_ENTIRE)) {
58 // User should not pass anything other than FIRST, UPDATE, or LAST
59 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
60 return ctx;
61 }
62
63 if (ctx->status & HASH_CTX_STS_PROCESSING) {
64 // Cannot submit to a currently processing job.
65 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
66 return ctx;
67 }
68
69 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
70 // Cannot update a finished job.
71 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
72 return ctx;
73 }
74
75 if (flags & HASH_FIRST) {
76 // Init digest
77 hash_init_digest(ctx->job.result_digest);
78
79 // Reset byte counter
80 ctx->total_length = 0;
81
82 // Clear extra blocks
83 ctx->partial_block_buffer_length = 0;
84 }
85 // If we made it here, there were no errors during this call to submit
86 ctx->error = HASH_CTX_ERROR_NONE;
87
88 // Store buffer ptr info from user
89 ctx->incoming_buffer = buffer;
90 ctx->incoming_buffer_length = len;
91
92 // Store the user's request flags and mark this ctx as currently being processed.
93 ctx->status = (flags & HASH_LAST) ?
94 (HASH_CTX_STS) (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
95 HASH_CTX_STS_PROCESSING;
96
97 // Advance byte counter
98 ctx->total_length += len;
99
100 // If there is anything currently buffered in the extra blocks, append to it until it contains a whole block.
101 // Or if the user's buffer contains less than a whole block, append as much as possible to the extra block.
102 if ((ctx->partial_block_buffer_length) | (len < SHA256_BLOCK_SIZE)) {
103 // Compute how many bytes to copy from user buffer into extra block
104 uint32_t copy_len = SHA256_BLOCK_SIZE - ctx->partial_block_buffer_length;
105 if (len < copy_len)
106 copy_len = len;
107
108 if (copy_len) {
109 // Copy and update relevant pointers and counters
110 memcpy_varlen(&ctx->partial_block_buffer
111 [ctx->partial_block_buffer_length], buffer, copy_len);
112
113 ctx->partial_block_buffer_length += copy_len;
114 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
115 ctx->incoming_buffer_length = len - copy_len;
116 }
117 // The extra block should never contain more than 1 block here
118 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
119
120 // If the extra block buffer contains exactly 1 block, it can be hashed.
121 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
122 ctx->partial_block_buffer_length = 0;
123
124 ctx->job.buffer = ctx->partial_block_buffer;
125 ctx->job.len = 1;
126 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_submit_sse_ni(&mgr->mgr,
127 &ctx->job);
128 }
129 }
130
131 return sha256_ctx_mgr_resubmit(mgr, ctx);
132 }
133
134 SHA256_HASH_CTX *sha256_ctx_mgr_flush_sse_ni(SHA256_HASH_CTX_MGR * mgr)
135 {
136 SHA256_HASH_CTX *ctx;
137
138 while (1) {
139 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_flush_sse_ni(&mgr->mgr);
140
141 // If flush returned 0, there are no more jobs in flight.
142 if (!ctx)
143 return NULL;
144
145 // If flush returned a job, verify that it is safe to return to the user.
146 // If it is not ready, resubmit the job to finish processing.
147 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
148
149 // If sha256_ctx_mgr_resubmit returned a job, it is ready to be returned.
150 if (ctx)
151 return ctx;
152
153 // Otherwise, all jobs currently being managed by the SHA256_HASH_CTX_MGR still need processing. Loop.
154 }
155 }
156
157 static SHA256_HASH_CTX *sha256_ctx_mgr_resubmit(SHA256_HASH_CTX_MGR * mgr,
158 SHA256_HASH_CTX * ctx)
159 {
160 while (ctx) {
161 if (ctx->status & HASH_CTX_STS_COMPLETE) {
162 ctx->status = HASH_CTX_STS_COMPLETE; // Clear PROCESSING bit
163 return ctx;
164 }
165 // If the extra blocks are empty, begin hashing what remains in the user's buffer.
166 if (ctx->partial_block_buffer_length == 0 && ctx->incoming_buffer_length) {
167 const void *buffer = ctx->incoming_buffer;
168 uint32_t len = ctx->incoming_buffer_length;
169
170 // Only entire blocks can be hashed. Copy remainder to extra blocks buffer.
171 uint32_t copy_len = len & (SHA256_BLOCK_SIZE - 1);
172
173 if (copy_len) {
174 len -= copy_len;
175 memcpy_varlen(ctx->partial_block_buffer,
176 ((const char *)buffer + len), copy_len);
177 ctx->partial_block_buffer_length = copy_len;
178 }
179
180 ctx->incoming_buffer_length = 0;
181
182 // len should be a multiple of the block size now
183 assert((len % SHA256_BLOCK_SIZE) == 0);
184
185 // Set len to the number of blocks to be hashed in the user's buffer
186 len >>= SHA256_LOG2_BLOCK_SIZE;
187
188 if (len) {
189 ctx->job.buffer = (uint8_t *) buffer;
190 ctx->job.len = len;
191 ctx =
192 (SHA256_HASH_CTX *) sha256_mb_mgr_submit_sse_ni(&mgr->mgr,
193 &ctx->job);
194 continue;
195 }
196 }
197 // If the extra blocks are not empty, then we are either on the last block(s)
198 // or we need more user input before continuing.
199 if (ctx->status & HASH_CTX_STS_LAST) {
200 uint8_t *buf = ctx->partial_block_buffer;
201 uint32_t n_extra_blocks = hash_pad(buf, ctx->total_length);
202
203 ctx->status =
204 (HASH_CTX_STS) (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_COMPLETE);
205 ctx->job.buffer = buf;
206 ctx->job.len = (uint32_t) n_extra_blocks;
207
208 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_submit_sse_ni(&mgr->mgr,
209 &ctx->job);
210 continue;
211 }
212
213 if (ctx)
214 ctx->status = HASH_CTX_STS_IDLE;
215 return ctx;
216 }
217
218 return NULL;
219 }
220
221 static inline void hash_init_digest(SHA256_WORD_T * digest)
222 {
223 static const SHA256_WORD_T hash_initial_digest[SHA256_DIGEST_NWORDS] =
224 { SHA256_INITIAL_DIGEST };
225 memcpy_fixedlen(digest, hash_initial_digest, sizeof(hash_initial_digest));
226 }
227
228 static inline uint32_t hash_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], uint64_t total_len)
229 {
230 uint32_t i = (uint32_t) (total_len & (SHA256_BLOCK_SIZE - 1));
231
232 memclr_fixedlen(&padblock[i], SHA256_BLOCK_SIZE);
233 padblock[i] = 0x80;
234
235 // Move i to the end of either 1st or 2nd extra block depending on length
236 i += ((SHA256_BLOCK_SIZE - 1) & (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1))) +
237 1 + SHA256_PADLENGTHFIELD_SIZE;
238
239 #if SHA256_PADLENGTHFIELD_SIZE == 16
240 *((uint64_t *) & padblock[i - 16]) = 0;
241 #endif
242
243 *((uint64_t *) & padblock[i - 8]) = to_be64((uint64_t) total_len << 3);
244
245 return i >> SHA256_LOG2_BLOCK_SIZE; // Number of extra blocks to hash
246 }
247
248 struct slver {
249 uint16_t snum;
250 uint8_t ver;
251 uint8_t core;
252 };
253 struct slver sha256_ctx_mgr_init_sse_ni_slver_070002c7;
254 struct slver sha256_ctx_mgr_init_sse_ni_slver = { 0x02c7, 0x00, 0x07 };
255
256 struct slver sha256_ctx_mgr_submit_sse_ni_slver_070002c8;
257 struct slver sha256_ctx_mgr_submit_sse_ni_slver = { 0x02c8, 0x00, 0x07 };
258
259 struct slver sha256_ctx_mgr_flush_sse_ni_slver_070002c9;
260 struct slver sha256_ctx_mgr_flush_sse_ni_slver = { 0x02c9, 0x00, 0x07 };
261
262 #endif // HAVE_AS_KNOWS_SHANI