]> git.proxmox.com Git - ceph.git/blob - ceph/src/crypto/isa-l/isa-l_crypto/sha256_mb/sha256_ctx_avx512_ni.c
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crypto / isa-l / isa-l_crypto / sha256_mb / sha256_ctx_avx512_ni.c
1 /**********************************************************************
2 Copyright(c) 2011-2017 Intel Corporation All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
6 are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in
11 the documentation and/or other materials provided with the
12 distribution.
13 * Neither the name of Intel Corporation nor the names of its
14 contributors may be used to endorse or promote products derived
15 from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 **********************************************************************/
29
30 #if defined(__clang__)
31 # pragma clang attribute push (__attribute__((target("avx2"))), apply_to=function)
32 #elif defined(__ICC)
33 # pragma intel optimization_parameter target_arch=AVX2
34 #elif defined(__ICL)
35 # pragma [intel] optimization_parameter target_arch=AVX2
36 #elif (__GNUC__ >= 5)
37 # pragma GCC target("avx2")
38 #endif
39
40 #include "sha256_mb.h"
41 #include "memcpy_inline.h"
42 #include "endian_helper.h"
43
44 #ifdef _MSC_VER
45 # include <intrin.h>
46 # define inline __inline
47 #endif
48
49 /**
50 * sha256_ctx_avx512_ni related functions are aiming to utilize Canon Lake.
51 * Since SHANI is still slower than multibuffer for full lanes,
52 * sha256_ctx_mgr_init_avx512_ni and sha256_ctx_mgr_submit_avx512_ni are
53 * similare with their avx512 versions.
54 * sha256_ctx_mgr_flush_avx512_ni is different. It will call
55 * sha256_mb_mgr_flush_avx512_ni which would use shani when lanes are less
56 * than a threshold.
57 *
58 */
59 #if defined(HAVE_AS_KNOWS_AVX512) && defined(HAVE_AS_KNOWS_SHANI)
60
61 static inline void hash_init_digest(SHA256_WORD_T * digest);
62 static inline uint32_t hash_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], uint64_t total_len);
63 static SHA256_HASH_CTX *sha256_ctx_mgr_resubmit(SHA256_HASH_CTX_MGR * mgr,
64 SHA256_HASH_CTX * ctx);
65
66 void sha256_ctx_mgr_init_avx512_ni(SHA256_HASH_CTX_MGR * mgr)
67 {
68 sha256_mb_mgr_init_avx512(&mgr->mgr);
69 }
70
71 SHA256_HASH_CTX *sha256_ctx_mgr_submit_avx512_ni(SHA256_HASH_CTX_MGR * mgr,
72 SHA256_HASH_CTX * ctx, const void *buffer,
73 uint32_t len, HASH_CTX_FLAG flags)
74 {
75 if (flags & (~HASH_ENTIRE)) {
76 // User should not pass anything other than FIRST, UPDATE, or LAST
77 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
78 return ctx;
79 }
80
81 if (ctx->status & HASH_CTX_STS_PROCESSING) {
82 // Cannot submit to a currently processing job.
83 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
84 return ctx;
85 }
86
87 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
88 // Cannot update a finished job.
89 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
90 return ctx;
91 }
92
93 if (flags & HASH_FIRST) {
94 // Init digest
95 hash_init_digest(ctx->job.result_digest);
96
97 // Reset byte counter
98 ctx->total_length = 0;
99
100 // Clear extra blocks
101 ctx->partial_block_buffer_length = 0;
102 }
103 // If we made it here, there were no errors during this call to submit
104 ctx->error = HASH_CTX_ERROR_NONE;
105
106 // Store buffer ptr info from user
107 ctx->incoming_buffer = buffer;
108 ctx->incoming_buffer_length = len;
109
110 // Store the user's request flags and mark this ctx as currently being processed.
111 ctx->status = (flags & HASH_LAST) ?
112 (HASH_CTX_STS) (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
113 HASH_CTX_STS_PROCESSING;
114
115 // Advance byte counter
116 ctx->total_length += len;
117
118 // If there is anything currently buffered in the extra blocks, append to it until it contains a whole block.
119 // Or if the user's buffer contains less than a whole block, append as much as possible to the extra block.
120 if ((ctx->partial_block_buffer_length) | (len < SHA256_BLOCK_SIZE)) {
121 // Compute how many bytes to copy from user buffer into extra block
122 uint32_t copy_len = SHA256_BLOCK_SIZE - ctx->partial_block_buffer_length;
123 if (len < copy_len)
124 copy_len = len;
125
126 if (copy_len) {
127 // Copy and update relevant pointers and counters
128 memcpy_varlen(&ctx->partial_block_buffer
129 [ctx->partial_block_buffer_length], buffer, copy_len);
130
131 ctx->partial_block_buffer_length += copy_len;
132 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len);
133 ctx->incoming_buffer_length = len - copy_len;
134 }
135 // The extra block should never contain more than 1 block here
136 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
137
138 // If the extra block buffer contains exactly 1 block, it can be hashed.
139 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
140 ctx->partial_block_buffer_length = 0;
141
142 ctx->job.buffer = ctx->partial_block_buffer;
143 ctx->job.len = 1;
144 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_submit_avx512(&mgr->mgr,
145 &ctx->job);
146 }
147 }
148
149 return sha256_ctx_mgr_resubmit(mgr, ctx);
150 }
151
152 SHA256_HASH_CTX *sha256_ctx_mgr_flush_avx512_ni(SHA256_HASH_CTX_MGR * mgr)
153 {
154 SHA256_HASH_CTX *ctx;
155
156 while (1) {
157 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_flush_avx512_ni(&mgr->mgr);
158
159 // If flush returned 0, there are no more jobs in flight.
160 if (!ctx)
161 return NULL;
162
163 // If flush returned a job, verify that it is safe to return to the user.
164 // If it is not ready, resubmit the job to finish processing.
165 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
166
167 // If sha256_ctx_mgr_resubmit returned a job, it is ready to be returned.
168 if (ctx)
169 return ctx;
170
171 // Otherwise, all jobs currently being managed by the SHA256_HASH_CTX_MGR still need processing. Loop.
172 }
173 }
174
175 static SHA256_HASH_CTX *sha256_ctx_mgr_resubmit(SHA256_HASH_CTX_MGR * mgr,
176 SHA256_HASH_CTX * ctx)
177 {
178 while (ctx) {
179 if (ctx->status & HASH_CTX_STS_COMPLETE) {
180 ctx->status = HASH_CTX_STS_COMPLETE; // Clear PROCESSING bit
181 return ctx;
182 }
183 // If the extra blocks are empty, begin hashing what remains in the user's buffer.
184 if (ctx->partial_block_buffer_length == 0 && ctx->incoming_buffer_length) {
185 const void *buffer = ctx->incoming_buffer;
186 uint32_t len = ctx->incoming_buffer_length;
187
188 // Only entire blocks can be hashed. Copy remainder to extra blocks buffer.
189 uint32_t copy_len = len & (SHA256_BLOCK_SIZE - 1);
190
191 if (copy_len) {
192 len -= copy_len;
193 memcpy_varlen(ctx->partial_block_buffer,
194 ((const char *)buffer + len), copy_len);
195 ctx->partial_block_buffer_length = copy_len;
196 }
197
198 ctx->incoming_buffer_length = 0;
199
200 // len should be a multiple of the block size now
201 assert((len % SHA256_BLOCK_SIZE) == 0);
202
203 // Set len to the number of blocks to be hashed in the user's buffer
204 len >>= SHA256_LOG2_BLOCK_SIZE;
205
206 if (len) {
207 ctx->job.buffer = (uint8_t *) buffer;
208 ctx->job.len = len;
209 ctx =
210 (SHA256_HASH_CTX *) sha256_mb_mgr_submit_avx512(&mgr->mgr,
211 &ctx->job);
212 continue;
213 }
214 }
215 // If the extra blocks are not empty, then we are either on the last block(s)
216 // or we need more user input before continuing.
217 if (ctx->status & HASH_CTX_STS_LAST) {
218 uint8_t *buf = ctx->partial_block_buffer;
219 uint32_t n_extra_blocks = hash_pad(buf, ctx->total_length);
220
221 ctx->status =
222 (HASH_CTX_STS) (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_COMPLETE);
223 ctx->job.buffer = buf;
224 ctx->job.len = (uint32_t) n_extra_blocks;
225 ctx = (SHA256_HASH_CTX *) sha256_mb_mgr_submit_avx512(&mgr->mgr,
226 &ctx->job);
227 continue;
228 }
229
230 if (ctx)
231 ctx->status = HASH_CTX_STS_IDLE;
232 return ctx;
233 }
234
235 return NULL;
236 }
237
238 static inline void hash_init_digest(SHA256_WORD_T * digest)
239 {
240 static const SHA256_WORD_T hash_initial_digest[SHA256_DIGEST_NWORDS] =
241 { SHA256_INITIAL_DIGEST };
242 memcpy_fixedlen(digest, hash_initial_digest, sizeof(hash_initial_digest));
243 }
244
245 static inline uint32_t hash_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], uint64_t total_len)
246 {
247 uint32_t i = (uint32_t) (total_len & (SHA256_BLOCK_SIZE - 1));
248
249 memclr_fixedlen(&padblock[i], SHA256_BLOCK_SIZE);
250 padblock[i] = 0x80;
251
252 // Move i to the end of either 1st or 2nd extra block depending on length
253 i += ((SHA256_BLOCK_SIZE - 1) & (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1))) +
254 1 + SHA256_PADLENGTHFIELD_SIZE;
255
256 #if SHA256_PADLENGTHFIELD_SIZE == 16
257 *((uint64_t *) & padblock[i - 16]) = 0;
258 #endif
259
260 *((uint64_t *) & padblock[i - 8]) = to_be64((uint64_t) total_len << 3);
261
262 return i >> SHA256_LOG2_BLOCK_SIZE; // Number of extra blocks to hash
263 }
264
265 struct slver {
266 uint16_t snum;
267 uint8_t ver;
268 uint8_t core;
269 };
270 struct slver sha256_ctx_mgr_init_avx512_ni_slver_080002ca;
271 struct slver sha256_ctx_mgr_init_avx512_ni_slver = { 0x02ca, 0x00, 0x08 };
272
273 struct slver sha256_ctx_mgr_submit_avx512_ni_slver_080002cb;
274 struct slver sha256_ctx_mgr_submit_avx512_ni_slver = { 0x02cb, 0x00, 0x08 };
275
276 struct slver sha256_ctx_mgr_flush_avx512_ni_slver_080002cc;
277 struct slver sha256_ctx_mgr_flush_avx512_ni_slver = { 0x02cc, 0x00, 0x08 };
278
279 #endif // HAVE_AS_KNOWS_AVX512 and HAVE_AS_KNOWS_SHANI
280
281 #if defined(__clang__)
282 # pragma clang attribute pop
283 #endif