]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/sha256_mb/sha256_ctx_avx512.c
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha256_mb / sha256_ctx_avx512.c
1 /**********************************************************************
2 Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
6 are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
12 distribution.
13 * Neither the name of Intel Corporation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 **********************************************************************/
29
30 #if defined(__clang__)
31 # pragma clang attribute push (__attribute__((target("avx2"))), apply_to=function)
32 #elif defined(__ICC)
33 # pragma intel optimization_parameter target_arch=AVX2
34 #elif defined(__ICL)
35 # pragma [intel] optimization_parameter target_arch=AVX2
36 #elif (__GNUC__ >= 5)
37 # pragma GCC target("avx2")
38 #endif
39
40 #include "sha256_mb.h"
41 #include "memcpy_inline.h"
42 #include "endian_helper.h"
43
44 #ifdef _MSC_VER
45 # include <intrin.h>
46 # define inline __inline
47 #endif
48
49 #ifdef HAVE_AS_KNOWS_AVX512
50
51 static inline void hash_init_digest(SHA256_WORD_T * digest);
52 static inline uint32_t hash_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], uint64_t total_len);
53 static SHA256_HASH_CTX *sha256_ctx_mgr_resubmit(SHA256_HASH_CTX_MGR * mgr,
54 SHA256_HASH_CTX * ctx);
55
56 void sha256_ctx_mgr_init_avx512(SHA256_HASH_CTX_MGR * mgr)
57 {
58 sha256_mb_mgr_init_avx512(&mgr->mgr);
59 }
60
61 SHA256_HASH_CTX *sha256_ctx_mgr_submit_avx512(SHA256_HASH_CTX_MGR * mgr, SHA256_HASH_CTX * ctx,
62 const void *buffer, uint32_t len,
63 HASH_CTX_FLAG flags)
64 {
65 if (flags & (~HASH_ENTIRE)) {
66 // User should not pass anything other than FIRST, UPDATE, or LAST
67 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
68 return ctx;
69 }
70
71 if (ctx->status & HASH_CTX_STS_PROCESSING) {
72 // Cannot submit to a currently processing job.
73 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
74 return ctx;
75 }
76
77 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
78 // Cannot update a finished job.
79 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
80 return ctx;
81 }
82
83 if (flags & HASH_FIRST) {
84 // Init digest
85 hash_init_digest(ctx->job.result_digest);
86
87 // Reset byte counter
88 ctx->total_length = 0;
89
90 // Clear extra blocks
91 ctx->partial_block_buffer_length = 0;
92 }
93 // If we made it here, there were no errors during this call to submit
94 ctx->error = HASH_CTX_ERROR_NONE;
95
96 // Store buffer ptr info from user
97 ctx->incoming_buffer = buffer;
98 ctx->incoming_buffer_length = len;
99
100 // Store the user's request flags and mark this ctx as currently being processed.
101 ctx->status = (flags & HASH_LAST) ?
102 (HASH_CTX_STS) (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
103 HASH_CTX_STS_PROCESSING;
104
105 // Advance byte counter
106 ctx->total_length += len;
107
108 // If there is anything currently buffered in the extra blocks, append to it until it contains a whole block.
109 // Or if the user's buffer contains less than a whole block, append as much as possible to the extra block.
110 if ((ctx->partial_block_buffer_length) | (len < SHA256_BLOCK_SIZE)) {
111 // Compute how many bytes to copy from user buffer into extra block
112 uint32_t copy_len = SHA256_BLOCK_SIZE - ctx->partial_block_buffer_length;
113 if (len < copy_len)
114 copy_len = len;
115
116 if (copy_len) {
117 // Copy and update relevant pointers and counters
118 memcpy_varlen(&ctx->partial_block_buffer
119 [ctx->partial_block_buffer_length], buffer, copy_len);
120
121 ctx->partial_block_buffer_length += copy_len;
122 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
123 ctx->incoming_buffer_length = len - copy_len;
124 }
125 // The extra block should never contain more than 1 block here
126 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
127
128 // If the extra block buffer contains exactly 1 block, it can be hashed.
129 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
130 ctx->partial_block_buffer_length = 0;
131
132 ctx->job.buffer = ctx->partial_block_buffer;
133 ctx->job.len = 1;
134 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_submit_avx512(&mgr->mgr,
135 &ctx->job);
136 }
137 }
138
139 return sha256_ctx_mgr_resubmit(mgr, ctx);
140 }
141
142 SHA256_HASH_CTX *sha256_ctx_mgr_flush_avx512(SHA256_HASH_CTX_MGR * mgr)
143 {
144 SHA256_HASH_CTX *ctx;
145
146 while (1) {
147 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_flush_avx512(&mgr->mgr);
148
149 // If flush returned 0, there are no more jobs in flight.
150 if (!ctx)
151 return NULL;
152
153 // If flush returned a job, verify that it is safe to return to the user.
154 // If it is not ready, resubmit the job to finish processing.
155 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
156
157 // If sha256_ctx_mgr_resubmit returned a job, it is ready to be returned.
158 if (ctx)
159 return ctx;
160
161 // Otherwise, all jobs currently being managed by the SHA256_HASH_CTX_MGR still need processing. Loop.
162 }
163 }
164
165 static SHA256_HASH_CTX *sha256_ctx_mgr_resubmit(SHA256_HASH_CTX_MGR * mgr,
166 SHA256_HASH_CTX * ctx)
167 {
168 while (ctx) {
169 if (ctx->status & HASH_CTX_STS_COMPLETE) {
170 ctx->status = HASH_CTX_STS_COMPLETE; // Clear PROCESSING bit
171 return ctx;
172 }
173 // If the extra blocks are empty, begin hashing what remains in the user's buffer.
174 if (ctx->partial_block_buffer_length == 0 && ctx->incoming_buffer_length) {
175 const void *buffer = ctx->incoming_buffer;
176 uint32_t len = ctx->incoming_buffer_length;
177
178 // Only entire blocks can be hashed. Copy remainder to extra blocks buffer.
179 uint32_t copy_len = len & (SHA256_BLOCK_SIZE - 1);
180
181 if (copy_len) {
182 len -= copy_len;
183 memcpy_varlen(ctx->partial_block_buffer,
184 ((const char *)buffer + len), copy_len);
185 ctx->partial_block_buffer_length = copy_len;
186 }
187
188 ctx->incoming_buffer_length = 0;
189
190 // len should be a multiple of the block size now
191 assert((len % SHA256_BLOCK_SIZE) == 0);
192
193 // Set len to the number of blocks to be hashed in the user's buffer
194 len >>= SHA256_LOG2_BLOCK_SIZE;
195
196 if (len) {
197 ctx->job.buffer = (uint8_t *) buffer;
198 ctx->job.len = len;
199 ctx =
200 (SHA256_HASH_CTX *) sha256_mb_mgr_submit_avx512(&mgr->mgr,
201 &ctx->job);
202 continue;
203 }
204 }
205 // If the extra blocks are not empty, then we are either on the last block(s)
206 // or we need more user input before continuing.
207 if (ctx->status & HASH_CTX_STS_LAST) {
208 uint8_t *buf = ctx->partial_block_buffer;
209 uint32_t n_extra_blocks = hash_pad(buf, ctx->total_length);
210
211 ctx->status =
212 (HASH_CTX_STS) (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_COMPLETE);
213 ctx->job.buffer = buf;
214 ctx->job.len = (uint32_t) n_extra_blocks;
215 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_submit_avx512(&mgr->mgr,
216 &ctx->job);
217 continue;
218 }
219
220 if (ctx)
221 ctx->status = HASH_CTX_STS_IDLE;
222 return ctx;
223 }
224
225 return NULL;
226 }
227
228 static inline void hash_init_digest(SHA256_WORD_T * digest)
229 {
230 static const SHA256_WORD_T hash_initial_digest[SHA256_DIGEST_NWORDS] =
231 { SHA256_INITIAL_DIGEST };
232 memcpy_fixedlen(digest, hash_initial_digest, sizeof(hash_initial_digest));
233 }
234
235 static inline uint32_t hash_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], uint64_t total_len)
236 {
237 uint32_t i = (uint32_t) (total_len & (SHA256_BLOCK_SIZE - 1));
238
239 memclr_fixedlen(&padblock[i], SHA256_BLOCK_SIZE);
240 padblock[i] = 0x80;
241
242 // Move i to the end of either 1st or 2nd extra block depending on length
243 i += ((SHA256_BLOCK_SIZE - 1) & (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1))) +
244 1 + SHA256_PADLENGTHFIELD_SIZE;
245
246 #if SHA256_PADLENGTHFIELD_SIZE == 16
247 *((uint64_t *) & padblock[i - 16]) = 0;
248 #endif
249
250 *((uint64_t *) & padblock[i - 8]) = to_be64((uint64_t) total_len << 3);
251
252 return i >> SHA256_LOG2_BLOCK_SIZE; // Number of extra blocks to hash
253 }
254
255 struct slver {
256 uint16_t snum;
257 uint8_t ver;
258 uint8_t core;
259 };
260 struct slver sha256_ctx_mgr_init_avx512_slver_0600015a;
261 struct slver sha256_ctx_mgr_init_avx512_slver = { 0x015a, 0x00, 0x06 };
262
263 struct slver sha256_ctx_mgr_submit_avx512_slver_0600015b;
264 struct slver sha256_ctx_mgr_submit_avx512_slver = { 0x015b, 0x00, 0x06 };
265
266 struct slver sha256_ctx_mgr_flush_avx512_slver_0600015c;
267 struct slver sha256_ctx_mgr_flush_avx512_slver = { 0x015c, 0x00, 0x06 };
268
269 #endif // HAVE_AS_KNOWS_AVX512
270
271 #if defined(__clang__)
272 # pragma clang attribute pop
273 #endif