]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
9babb374 | 22 | * Copyright 2009 Sun Microsystems, Inc. All rights reserved. |
34dc7c2f | 23 | * Use is subject to license terms. |
cae5b340 AX |
24 | * Copyright (C) 2016 Gvozden Nešković. All rights reserved. |
25 | */ | |
26 | /* | |
27 | * Copyright 2013 Saso Kiselkov. All rights reserved. | |
28 | */ | |
29 | ||
30 | /* | |
31 | * Copyright (c) 2016 by Delphix. All rights reserved. | |
34dc7c2f BB |
32 | */ |
33 | ||
9babb374 BB |
34 | /* |
35 | * Fletcher Checksums | |
36 | * ------------------ | |
37 | * | |
38 | * ZFS's 2nd and 4th order Fletcher checksums are defined by the following | |
39 | * recurrence relations: | |
40 | * | |
41 | * a = a + f | |
42 | * i i-1 i-1 | |
43 | * | |
44 | * b = b + a | |
45 | * i i-1 i | |
46 | * | |
47 | * c = c + b (fletcher-4 only) | |
48 | * i i-1 i | |
49 | * | |
50 | * d = d + c (fletcher-4 only) | |
51 | * i i-1 i | |
52 | * | |
53 | * Where | |
54 | * a_0 = b_0 = c_0 = d_0 = 0 | |
55 | * and | |
56 | * f_0 .. f_(n-1) are the input data. | |
57 | * | |
58 | * Using standard techniques, these translate into the following series: | |
59 | * | |
60 | * __n_ __n_ | |
61 | * \ | \ | | |
62 | * a = > f b = > i * f | |
63 | * n /___| n - i n /___| n - i | |
64 | * i = 1 i = 1 | |
65 | * | |
66 | * | |
67 | * __n_ __n_ | |
68 | * \ | i*(i+1) \ | i*(i+1)*(i+2) | |
69 | * c = > ------- f d = > ------------- f | |
70 | * n /___| 2 n - i n /___| 6 n - i | |
71 | * i = 1 i = 1 | |
72 | * | |
73 | * For fletcher-2, the f_is are 64-bit, and [ab]_i are 64-bit accumulators. | |
74 | * Since the additions are done mod (2^64), errors in the high bits may not | |
75 | * be noticed. For this reason, fletcher-2 is deprecated. | |
76 | * | |
77 | * For fletcher-4, the f_is are 32-bit, and [abcd]_i are 64-bit accumulators. | |
78 | * A conservative estimate of how big the buffer can get before we overflow | |
79 | * can be estimated using f_i = 0xffffffff for all i: | |
80 | * | |
81 | * % bc | |
82 | * f=2^32-1;d=0; for (i = 1; d<2^64; i++) { d += f*i*(i+1)*(i+2)/6 }; (i-1)*4 | |
83 | * 2264 | |
84 | * quit | |
85 | * % | |
86 | * | |
87 | * So blocks of up to 2k will not overflow. Our largest block size is | |
88 | * 128k, which has 32k 4-byte words, so we can compute the largest possible | |
89 | * accumulators, then divide by 2^64 to figure the max amount of overflow: | |
90 | * | |
91 | * % bc | |
92 | * a=b=c=d=0; f=2^32-1; for (i=1; i<=32*1024; i++) { a+=f; b+=a; c+=b; d+=c } | |
93 | * a/2^64;b/2^64;c/2^64;d/2^64 | |
94 | * 0 | |
95 | * 0 | |
96 | * 1365 | |
97 | * 11186858 | |
98 | * quit | |
99 | * % | |
100 | * | |
101 | * So a and b cannot overflow. To make sure each bit of input has some | |
102 | * effect on the contents of c and d, we can look at what the factors of | |
103 | * the coefficients in the equations for c_n and d_n are. The number of 2s | |
104 | * in the factors determines the lowest set bit in the multiplier. Running | |
105 | * through the cases for n*(n+1)/2 reveals that the highest power of 2 is | |
106 | * 2^14, and for n*(n+1)*(n+2)/6 it is 2^15. So while some data may overflow | |
107 | * the 64-bit accumulators, every bit of every f_i effects every accumulator, | |
108 | * even for 128k blocks. | |
109 | * | |
110 | * If we wanted to make a stronger version of fletcher4 (fletcher4c?), | |
111 | * we could do our calculations mod (2^32 - 1) by adding in the carries | |
112 | * periodically, and store the number of carries in the top 32-bits. | |
113 | * | |
114 | * -------------------- | |
115 | * Checksum Performance | |
116 | * -------------------- | |
117 | * | |
118 | * There are two interesting components to checksum performance: cached and | |
119 | * uncached performance. With cached data, fletcher-2 is about four times | |
120 | * faster than fletcher-4. With uncached data, the performance difference is | |
121 | * negligible, since the cost of a cache fill dominates the processing time. | |
122 | * Even though fletcher-4 is slower than fletcher-2, it is still a pretty | |
123 | * efficient pass over the data. | |
124 | * | |
125 | * In normal operation, the data which is being checksummed is in a buffer | |
126 | * which has been filled either by: | |
127 | * | |
128 | * 1. a compression step, which will be mostly cached, or | |
129 | * 2. a bcopy() or copyin(), which will be uncached (because the | |
130 | * copy is cache-bypassing). | |
131 | * | |
132 | * For both cached and uncached data, both fletcher checksums are much faster | |
133 | * than sha-256, and slower than 'off', which doesn't touch the data at all. | |
134 | */ | |
34dc7c2f BB |
135 | |
136 | #include <sys/types.h> | |
137 | #include <sys/sysmacros.h> | |
138 | #include <sys/byteorder.h> | |
139 | #include <sys/spa.h> | |
cae5b340 AX |
140 | #include <sys/zio_checksum.h> |
141 | #include <sys/zfs_context.h> | |
142 | #include <zfs_fletcher.h> | |
143 | ||
144 | #define FLETCHER_MIN_SIMD_SIZE 64 | |
145 | ||
146 | static void fletcher_4_scalar_init(fletcher_4_ctx_t *ctx); | |
147 | static void fletcher_4_scalar_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp); | |
148 | static void fletcher_4_scalar_native(fletcher_4_ctx_t *ctx, | |
149 | const void *buf, uint64_t size); | |
150 | static void fletcher_4_scalar_byteswap(fletcher_4_ctx_t *ctx, | |
151 | const void *buf, uint64_t size); | |
152 | static boolean_t fletcher_4_scalar_valid(void); | |
153 | ||
154 | static const fletcher_4_ops_t fletcher_4_scalar_ops = { | |
155 | .init_native = fletcher_4_scalar_init, | |
156 | .fini_native = fletcher_4_scalar_fini, | |
157 | .compute_native = fletcher_4_scalar_native, | |
158 | .init_byteswap = fletcher_4_scalar_init, | |
159 | .fini_byteswap = fletcher_4_scalar_fini, | |
160 | .compute_byteswap = fletcher_4_scalar_byteswap, | |
161 | .valid = fletcher_4_scalar_valid, | |
162 | .name = "scalar" | |
163 | }; | |
164 | ||
165 | static fletcher_4_ops_t fletcher_4_fastest_impl = { | |
166 | .name = "fastest", | |
167 | .valid = fletcher_4_scalar_valid | |
168 | }; | |
169 | ||
170 | static const fletcher_4_ops_t *fletcher_4_impls[] = { | |
171 | &fletcher_4_scalar_ops, | |
172 | &fletcher_4_superscalar_ops, | |
173 | &fletcher_4_superscalar4_ops, | |
174 | #if defined(HAVE_SSE2) | |
175 | &fletcher_4_sse2_ops, | |
176 | #endif | |
177 | #if defined(HAVE_SSE2) && defined(HAVE_SSSE3) | |
178 | &fletcher_4_ssse3_ops, | |
179 | #endif | |
180 | #if defined(HAVE_AVX) && defined(HAVE_AVX2) | |
181 | &fletcher_4_avx2_ops, | |
182 | #endif | |
183 | #if defined(__x86_64) && defined(HAVE_AVX512F) | |
184 | &fletcher_4_avx512f_ops, | |
185 | #endif | |
186 | #if defined(__aarch64__) | |
187 | &fletcher_4_aarch64_neon_ops, | |
188 | #endif | |
189 | }; | |
34dc7c2f | 190 | |
cae5b340 AX |
191 | /* Hold all supported implementations */ |
192 | static uint32_t fletcher_4_supp_impls_cnt = 0; | |
193 | static fletcher_4_ops_t *fletcher_4_supp_impls[ARRAY_SIZE(fletcher_4_impls)]; | |
194 | ||
195 | /* Select fletcher4 implementation */ | |
196 | #define IMPL_FASTEST (UINT32_MAX) | |
197 | #define IMPL_CYCLE (UINT32_MAX - 1) | |
198 | #define IMPL_SCALAR (0) | |
199 | ||
200 | static uint32_t fletcher_4_impl_chosen = IMPL_FASTEST; | |
201 | ||
202 | #define IMPL_READ(i) (*(volatile uint32_t *) &(i)) | |
203 | ||
204 | static struct fletcher_4_impl_selector { | |
205 | const char *fis_name; | |
206 | uint32_t fis_sel; | |
207 | } fletcher_4_impl_selectors[] = { | |
208 | #if !defined(_KERNEL) | |
209 | { "cycle", IMPL_CYCLE }, | |
210 | #endif | |
211 | { "fastest", IMPL_FASTEST }, | |
212 | { "scalar", IMPL_SCALAR } | |
213 | }; | |
214 | ||
215 | static kstat_t *fletcher_4_kstat; | |
216 | ||
217 | static struct fletcher_4_kstat { | |
218 | uint64_t native; | |
219 | uint64_t byteswap; | |
220 | } fletcher_4_stat_data[ARRAY_SIZE(fletcher_4_impls) + 1]; | |
221 | ||
222 | /* Indicate that benchmark has been completed */ | |
223 | static boolean_t fletcher_4_initialized = B_FALSE; | |
224 | ||
225 | /*ARGSUSED*/ | |
34dc7c2f | 226 | void |
cae5b340 | 227 | fletcher_init(zio_cksum_t *zcp) |
34dc7c2f | 228 | { |
cae5b340 AX |
229 | ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0); |
230 | } | |
231 | ||
232 | int | |
233 | fletcher_2_incremental_native(void *buf, size_t size, void *data) | |
234 | { | |
235 | zio_cksum_t *zcp = data; | |
236 | ||
34dc7c2f BB |
237 | const uint64_t *ip = buf; |
238 | const uint64_t *ipend = ip + (size / sizeof (uint64_t)); | |
239 | uint64_t a0, b0, a1, b1; | |
240 | ||
cae5b340 AX |
241 | a0 = zcp->zc_word[0]; |
242 | a1 = zcp->zc_word[1]; | |
243 | b0 = zcp->zc_word[2]; | |
244 | b1 = zcp->zc_word[3]; | |
245 | ||
246 | for (; ip < ipend; ip += 2) { | |
34dc7c2f BB |
247 | a0 += ip[0]; |
248 | a1 += ip[1]; | |
249 | b0 += a0; | |
250 | b1 += a1; | |
251 | } | |
252 | ||
253 | ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1); | |
cae5b340 | 254 | return (0); |
34dc7c2f BB |
255 | } |
256 | ||
cae5b340 | 257 | /*ARGSUSED*/ |
34dc7c2f | 258 | void |
cae5b340 AX |
259 | fletcher_2_native(const void *buf, uint64_t size, |
260 | const void *ctx_template, zio_cksum_t *zcp) | |
34dc7c2f | 261 | { |
cae5b340 AX |
262 | fletcher_init(zcp); |
263 | (void) fletcher_2_incremental_native((void *) buf, size, zcp); | |
264 | } | |
265 | ||
266 | int | |
267 | fletcher_2_incremental_byteswap(void *buf, size_t size, void *data) | |
268 | { | |
269 | zio_cksum_t *zcp = data; | |
270 | ||
34dc7c2f BB |
271 | const uint64_t *ip = buf; |
272 | const uint64_t *ipend = ip + (size / sizeof (uint64_t)); | |
273 | uint64_t a0, b0, a1, b1; | |
274 | ||
cae5b340 AX |
275 | a0 = zcp->zc_word[0]; |
276 | a1 = zcp->zc_word[1]; | |
277 | b0 = zcp->zc_word[2]; | |
278 | b1 = zcp->zc_word[3]; | |
279 | ||
280 | for (; ip < ipend; ip += 2) { | |
34dc7c2f BB |
281 | a0 += BSWAP_64(ip[0]); |
282 | a1 += BSWAP_64(ip[1]); | |
283 | b0 += a0; | |
284 | b1 += a1; | |
285 | } | |
286 | ||
287 | ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1); | |
cae5b340 | 288 | return (0); |
34dc7c2f BB |
289 | } |
290 | ||
cae5b340 | 291 | /*ARGSUSED*/ |
34dc7c2f | 292 | void |
cae5b340 AX |
293 | fletcher_2_byteswap(const void *buf, uint64_t size, |
294 | const void *ctx_template, zio_cksum_t *zcp) | |
295 | { | |
296 | fletcher_init(zcp); | |
297 | (void) fletcher_2_incremental_byteswap((void *) buf, size, zcp); | |
298 | } | |
299 | ||
300 | static void | |
301 | fletcher_4_scalar_init(fletcher_4_ctx_t *ctx) | |
302 | { | |
303 | ZIO_SET_CHECKSUM(&ctx->scalar, 0, 0, 0, 0); | |
304 | } | |
305 | ||
306 | static void | |
307 | fletcher_4_scalar_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp) | |
308 | { | |
309 | memcpy(zcp, &ctx->scalar, sizeof (zio_cksum_t)); | |
310 | } | |
311 | ||
312 | static void | |
313 | fletcher_4_scalar_native(fletcher_4_ctx_t *ctx, const void *buf, | |
314 | uint64_t size) | |
34dc7c2f BB |
315 | { |
316 | const uint32_t *ip = buf; | |
317 | const uint32_t *ipend = ip + (size / sizeof (uint32_t)); | |
318 | uint64_t a, b, c, d; | |
319 | ||
cae5b340 AX |
320 | a = ctx->scalar.zc_word[0]; |
321 | b = ctx->scalar.zc_word[1]; | |
322 | c = ctx->scalar.zc_word[2]; | |
323 | d = ctx->scalar.zc_word[3]; | |
324 | ||
325 | for (; ip < ipend; ip++) { | |
34dc7c2f BB |
326 | a += ip[0]; |
327 | b += a; | |
328 | c += b; | |
329 | d += c; | |
330 | } | |
331 | ||
cae5b340 | 332 | ZIO_SET_CHECKSUM(&ctx->scalar, a, b, c, d); |
34dc7c2f BB |
333 | } |
334 | ||
cae5b340 AX |
335 | static void |
336 | fletcher_4_scalar_byteswap(fletcher_4_ctx_t *ctx, const void *buf, | |
337 | uint64_t size) | |
34dc7c2f BB |
338 | { |
339 | const uint32_t *ip = buf; | |
340 | const uint32_t *ipend = ip + (size / sizeof (uint32_t)); | |
341 | uint64_t a, b, c, d; | |
342 | ||
cae5b340 AX |
343 | a = ctx->scalar.zc_word[0]; |
344 | b = ctx->scalar.zc_word[1]; | |
345 | c = ctx->scalar.zc_word[2]; | |
346 | d = ctx->scalar.zc_word[3]; | |
347 | ||
348 | for (; ip < ipend; ip++) { | |
34dc7c2f BB |
349 | a += BSWAP_32(ip[0]); |
350 | b += a; | |
351 | c += b; | |
352 | d += c; | |
353 | } | |
354 | ||
cae5b340 AX |
355 | ZIO_SET_CHECKSUM(&ctx->scalar, a, b, c, d); |
356 | } | |
357 | ||
358 | static boolean_t | |
359 | fletcher_4_scalar_valid(void) | |
360 | { | |
361 | return (B_TRUE); | |
34dc7c2f BB |
362 | } |
363 | ||
cae5b340 AX |
364 | int |
365 | fletcher_4_impl_set(const char *val) | |
366 | { | |
367 | int err = -EINVAL; | |
368 | uint32_t impl = IMPL_READ(fletcher_4_impl_chosen); | |
369 | size_t i, val_len; | |
370 | ||
371 | val_len = strlen(val); | |
372 | while ((val_len > 0) && !!isspace(val[val_len-1])) /* trim '\n' */ | |
373 | val_len--; | |
374 | ||
375 | /* check mandatory implementations */ | |
376 | for (i = 0; i < ARRAY_SIZE(fletcher_4_impl_selectors); i++) { | |
377 | const char *name = fletcher_4_impl_selectors[i].fis_name; | |
378 | ||
379 | if (val_len == strlen(name) && | |
380 | strncmp(val, name, val_len) == 0) { | |
381 | impl = fletcher_4_impl_selectors[i].fis_sel; | |
382 | err = 0; | |
383 | break; | |
384 | } | |
385 | } | |
386 | ||
387 | if (err != 0 && fletcher_4_initialized) { | |
388 | /* check all supported implementations */ | |
389 | for (i = 0; i < fletcher_4_supp_impls_cnt; i++) { | |
390 | const char *name = fletcher_4_supp_impls[i]->name; | |
391 | ||
392 | if (val_len == strlen(name) && | |
393 | strncmp(val, name, val_len) == 0) { | |
394 | impl = i; | |
395 | err = 0; | |
396 | break; | |
397 | } | |
398 | } | |
399 | } | |
400 | ||
401 | if (err == 0) { | |
402 | atomic_swap_32(&fletcher_4_impl_chosen, impl); | |
403 | membar_producer(); | |
404 | } | |
405 | ||
406 | return (err); | |
407 | } | |
408 | ||
409 | static inline const fletcher_4_ops_t * | |
410 | fletcher_4_impl_get(void) | |
411 | { | |
412 | fletcher_4_ops_t *ops = NULL; | |
413 | const uint32_t impl = IMPL_READ(fletcher_4_impl_chosen); | |
414 | ||
415 | switch (impl) { | |
416 | case IMPL_FASTEST: | |
417 | ASSERT(fletcher_4_initialized); | |
418 | ops = &fletcher_4_fastest_impl; | |
419 | break; | |
420 | #if !defined(_KERNEL) | |
421 | case IMPL_CYCLE: { | |
422 | ASSERT(fletcher_4_initialized); | |
423 | ASSERT3U(fletcher_4_supp_impls_cnt, >, 0); | |
424 | ||
425 | static uint32_t cycle_count = 0; | |
426 | uint32_t idx = (++cycle_count) % fletcher_4_supp_impls_cnt; | |
427 | ops = fletcher_4_supp_impls[idx]; | |
428 | } | |
429 | break; | |
430 | #endif | |
431 | default: | |
432 | ASSERT3U(fletcher_4_supp_impls_cnt, >, 0); | |
433 | ASSERT3U(impl, <, fletcher_4_supp_impls_cnt); | |
434 | ||
435 | ops = fletcher_4_supp_impls[impl]; | |
436 | break; | |
437 | } | |
438 | ||
439 | ASSERT3P(ops, !=, NULL); | |
440 | ||
441 | return (ops); | |
442 | } | |
443 | ||
444 | static inline void | |
445 | fletcher_4_native_impl(const void *buf, uint64_t size, zio_cksum_t *zcp) | |
446 | { | |
447 | fletcher_4_ctx_t ctx; | |
448 | const fletcher_4_ops_t *ops = fletcher_4_impl_get(); | |
449 | ||
450 | ops->init_native(&ctx); | |
451 | ops->compute_native(&ctx, buf, size); | |
452 | ops->fini_native(&ctx, zcp); | |
453 | } | |
454 | ||
455 | /*ARGSUSED*/ | |
34dc7c2f | 456 | void |
cae5b340 AX |
457 | fletcher_4_native(const void *buf, uint64_t size, |
458 | const void *ctx_template, zio_cksum_t *zcp) | |
34dc7c2f | 459 | { |
cae5b340 | 460 | const uint64_t p2size = P2ALIGN(size, FLETCHER_MIN_SIMD_SIZE); |
34dc7c2f | 461 | |
cae5b340 | 462 | ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t))); |
34dc7c2f | 463 | |
cae5b340 AX |
464 | if (size == 0 || p2size == 0) { |
465 | ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0); | |
466 | ||
467 | if (size > 0) | |
468 | fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, | |
469 | buf, size); | |
470 | } else { | |
471 | fletcher_4_native_impl(buf, p2size, zcp); | |
472 | ||
473 | if (p2size < size) | |
474 | fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, | |
475 | (char *)buf + p2size, size - p2size); | |
34dc7c2f | 476 | } |
cae5b340 AX |
477 | } |
478 | ||
479 | void | |
480 | fletcher_4_native_varsize(const void *buf, uint64_t size, zio_cksum_t *zcp) | |
481 | { | |
482 | ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0); | |
483 | fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, buf, size); | |
484 | } | |
34dc7c2f | 485 | |
cae5b340 AX |
486 | static inline void |
487 | fletcher_4_byteswap_impl(const void *buf, uint64_t size, zio_cksum_t *zcp) | |
488 | { | |
489 | fletcher_4_ctx_t ctx; | |
490 | const fletcher_4_ops_t *ops = fletcher_4_impl_get(); | |
491 | ||
492 | ops->init_byteswap(&ctx); | |
493 | ops->compute_byteswap(&ctx, buf, size); | |
494 | ops->fini_byteswap(&ctx, zcp); | |
34dc7c2f BB |
495 | } |
496 | ||
cae5b340 | 497 | /*ARGSUSED*/ |
34dc7c2f | 498 | void |
cae5b340 AX |
499 | fletcher_4_byteswap(const void *buf, uint64_t size, |
500 | const void *ctx_template, zio_cksum_t *zcp) | |
501 | { | |
502 | const uint64_t p2size = P2ALIGN(size, FLETCHER_MIN_SIMD_SIZE); | |
503 | ||
504 | ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t))); | |
505 | ||
506 | if (size == 0 || p2size == 0) { | |
507 | ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0); | |
508 | ||
509 | if (size > 0) | |
510 | fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp, | |
511 | buf, size); | |
512 | } else { | |
513 | fletcher_4_byteswap_impl(buf, p2size, zcp); | |
514 | ||
515 | if (p2size < size) | |
516 | fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp, | |
517 | (char *)buf + p2size, size - p2size); | |
518 | } | |
519 | } | |
520 | ||
521 | /* Incremental Fletcher 4 */ | |
522 | ||
523 | #define ZFS_FLETCHER_4_INC_MAX_SIZE (8ULL << 20) | |
524 | ||
525 | static inline void | |
526 | fletcher_4_incremental_combine(zio_cksum_t *zcp, const uint64_t size, | |
527 | const zio_cksum_t *nzcp) | |
528 | { | |
529 | const uint64_t c1 = size / sizeof (uint32_t); | |
530 | const uint64_t c2 = c1 * (c1 + 1) / 2; | |
531 | const uint64_t c3 = c2 * (c1 + 2) / 3; | |
532 | ||
533 | /* | |
534 | * Value of 'c3' overflows on buffer sizes close to 16MiB. For that | |
535 | * reason we split incremental fletcher4 computation of large buffers | |
536 | * to steps of (ZFS_FLETCHER_4_INC_MAX_SIZE) size. | |
537 | */ | |
538 | ASSERT3U(size, <=, ZFS_FLETCHER_4_INC_MAX_SIZE); | |
539 | ||
540 | zcp->zc_word[3] += nzcp->zc_word[3] + c1 * zcp->zc_word[2] + | |
541 | c2 * zcp->zc_word[1] + c3 * zcp->zc_word[0]; | |
542 | zcp->zc_word[2] += nzcp->zc_word[2] + c1 * zcp->zc_word[1] + | |
543 | c2 * zcp->zc_word[0]; | |
544 | zcp->zc_word[1] += nzcp->zc_word[1] + c1 * zcp->zc_word[0]; | |
545 | zcp->zc_word[0] += nzcp->zc_word[0]; | |
546 | } | |
547 | ||
548 | static inline void | |
549 | fletcher_4_incremental_impl(boolean_t native, const void *buf, uint64_t size, | |
34dc7c2f BB |
550 | zio_cksum_t *zcp) |
551 | { | |
cae5b340 AX |
552 | while (size > 0) { |
553 | zio_cksum_t nzc; | |
554 | uint64_t len = MIN(size, ZFS_FLETCHER_4_INC_MAX_SIZE); | |
34dc7c2f | 555 | |
cae5b340 AX |
556 | if (native) |
557 | fletcher_4_native(buf, len, NULL, &nzc); | |
558 | else | |
559 | fletcher_4_byteswap(buf, len, NULL, &nzc); | |
34dc7c2f | 560 | |
cae5b340 AX |
561 | fletcher_4_incremental_combine(zcp, len, &nzc); |
562 | ||
563 | size -= len; | |
564 | buf += len; | |
34dc7c2f | 565 | } |
cae5b340 | 566 | } |
34dc7c2f | 567 | |
cae5b340 AX |
568 | int |
569 | fletcher_4_incremental_native(void *buf, size_t size, void *data) | |
570 | { | |
571 | zio_cksum_t *zcp = data; | |
572 | /* Use scalar impl to directly update cksum of small blocks */ | |
573 | if (size < SPA_MINBLOCKSIZE) | |
574 | fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, buf, size); | |
575 | else | |
576 | fletcher_4_incremental_impl(B_TRUE, buf, size, zcp); | |
577 | return (0); | |
34dc7c2f | 578 | } |
c28b2279 | 579 | |
cae5b340 AX |
580 | int |
581 | fletcher_4_incremental_byteswap(void *buf, size_t size, void *data) | |
582 | { | |
583 | zio_cksum_t *zcp = data; | |
584 | /* Use scalar impl to directly update cksum of small blocks */ | |
585 | if (size < SPA_MINBLOCKSIZE) | |
586 | fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp, buf, size); | |
587 | else | |
588 | fletcher_4_incremental_impl(B_FALSE, buf, size, zcp); | |
589 | return (0); | |
590 | } | |
591 | ||
592 | ||
593 | /* Fletcher 4 kstats */ | |
594 | ||
595 | static int | |
596 | fletcher_4_kstat_headers(char *buf, size_t size) | |
597 | { | |
598 | ssize_t off = 0; | |
599 | ||
600 | off += snprintf(buf + off, size, "%-17s", "implementation"); | |
601 | off += snprintf(buf + off, size - off, "%-15s", "native"); | |
602 | (void) snprintf(buf + off, size - off, "%-15s\n", "byteswap"); | |
603 | ||
604 | return (0); | |
605 | } | |
606 | ||
607 | static int | |
608 | fletcher_4_kstat_data(char *buf, size_t size, void *data) | |
609 | { | |
610 | struct fletcher_4_kstat *fastest_stat = | |
611 | &fletcher_4_stat_data[fletcher_4_supp_impls_cnt]; | |
612 | struct fletcher_4_kstat *curr_stat = (struct fletcher_4_kstat *)data; | |
613 | ssize_t off = 0; | |
614 | ||
615 | if (curr_stat == fastest_stat) { | |
616 | off += snprintf(buf + off, size - off, "%-17s", "fastest"); | |
617 | off += snprintf(buf + off, size - off, "%-15s", | |
618 | fletcher_4_supp_impls[fastest_stat->native]->name); | |
619 | off += snprintf(buf + off, size - off, "%-15s\n", | |
620 | fletcher_4_supp_impls[fastest_stat->byteswap]->name); | |
621 | } else { | |
622 | ptrdiff_t id = curr_stat - fletcher_4_stat_data; | |
623 | ||
624 | off += snprintf(buf + off, size - off, "%-17s", | |
625 | fletcher_4_supp_impls[id]->name); | |
626 | off += snprintf(buf + off, size - off, "%-15llu", | |
627 | (u_longlong_t)curr_stat->native); | |
628 | off += snprintf(buf + off, size - off, "%-15llu\n", | |
629 | (u_longlong_t)curr_stat->byteswap); | |
630 | } | |
631 | ||
632 | return (0); | |
633 | } | |
634 | ||
635 | static void * | |
636 | fletcher_4_kstat_addr(kstat_t *ksp, loff_t n) | |
637 | { | |
638 | if (n <= fletcher_4_supp_impls_cnt) | |
639 | ksp->ks_private = (void *) (fletcher_4_stat_data + n); | |
640 | else | |
641 | ksp->ks_private = NULL; | |
642 | ||
643 | return (ksp->ks_private); | |
644 | } | |
645 | ||
646 | #define FLETCHER_4_FASTEST_FN_COPY(type, src) \ | |
647 | { \ | |
648 | fletcher_4_fastest_impl.init_ ## type = src->init_ ## type; \ | |
649 | fletcher_4_fastest_impl.fini_ ## type = src->fini_ ## type; \ | |
650 | fletcher_4_fastest_impl.compute_ ## type = src->compute_ ## type; \ | |
651 | } | |
652 | ||
653 | #define FLETCHER_4_BENCH_NS (MSEC2NSEC(50)) /* 50ms */ | |
654 | ||
655 | typedef void fletcher_checksum_func_t(const void *, uint64_t, const void *, | |
656 | zio_cksum_t *); | |
657 | ||
658 | static void | |
659 | fletcher_4_benchmark_impl(boolean_t native, char *data, uint64_t data_size) | |
660 | { | |
661 | ||
662 | struct fletcher_4_kstat *fastest_stat = | |
663 | &fletcher_4_stat_data[fletcher_4_supp_impls_cnt]; | |
664 | hrtime_t start; | |
665 | uint64_t run_bw, run_time_ns, best_run = 0; | |
666 | zio_cksum_t zc; | |
667 | uint32_t i, l, sel_save = IMPL_READ(fletcher_4_impl_chosen); | |
668 | ||
669 | ||
670 | fletcher_checksum_func_t *fletcher_4_test = native ? | |
671 | fletcher_4_native : fletcher_4_byteswap; | |
672 | ||
673 | for (i = 0; i < fletcher_4_supp_impls_cnt; i++) { | |
674 | struct fletcher_4_kstat *stat = &fletcher_4_stat_data[i]; | |
675 | uint64_t run_count = 0; | |
676 | ||
677 | /* temporary set an implementation */ | |
678 | fletcher_4_impl_chosen = i; | |
679 | ||
680 | kpreempt_disable(); | |
681 | start = gethrtime(); | |
682 | do { | |
683 | for (l = 0; l < 32; l++, run_count++) | |
684 | fletcher_4_test(data, data_size, NULL, &zc); | |
685 | ||
686 | run_time_ns = gethrtime() - start; | |
687 | } while (run_time_ns < FLETCHER_4_BENCH_NS); | |
688 | kpreempt_enable(); | |
689 | ||
690 | run_bw = data_size * run_count * NANOSEC; | |
691 | run_bw /= run_time_ns; /* B/s */ | |
692 | ||
693 | if (native) | |
694 | stat->native = run_bw; | |
695 | else | |
696 | stat->byteswap = run_bw; | |
697 | ||
698 | if (run_bw > best_run) { | |
699 | best_run = run_bw; | |
700 | ||
701 | if (native) { | |
702 | fastest_stat->native = i; | |
703 | FLETCHER_4_FASTEST_FN_COPY(native, | |
704 | fletcher_4_supp_impls[i]); | |
705 | } else { | |
706 | fastest_stat->byteswap = i; | |
707 | FLETCHER_4_FASTEST_FN_COPY(byteswap, | |
708 | fletcher_4_supp_impls[i]); | |
709 | } | |
710 | } | |
711 | } | |
712 | ||
713 | /* restore original selection */ | |
714 | atomic_swap_32(&fletcher_4_impl_chosen, sel_save); | |
715 | } | |
716 | ||
717 | void | |
718 | fletcher_4_init(void) | |
719 | { | |
720 | static const size_t data_size = 1 << SPA_OLD_MAXBLOCKSHIFT; /* 128kiB */ | |
721 | fletcher_4_ops_t *curr_impl; | |
722 | char *databuf; | |
723 | int i, c; | |
724 | ||
725 | /* move supported impl into fletcher_4_supp_impls */ | |
726 | for (i = 0, c = 0; i < ARRAY_SIZE(fletcher_4_impls); i++) { | |
727 | curr_impl = (fletcher_4_ops_t *)fletcher_4_impls[i]; | |
728 | ||
729 | if (curr_impl->valid && curr_impl->valid()) | |
730 | fletcher_4_supp_impls[c++] = curr_impl; | |
731 | } | |
732 | membar_producer(); /* complete fletcher_4_supp_impls[] init */ | |
733 | fletcher_4_supp_impls_cnt = c; /* number of supported impl */ | |
734 | ||
735 | #if !defined(_KERNEL) | |
736 | /* Skip benchmarking and use last implementation as fastest */ | |
737 | memcpy(&fletcher_4_fastest_impl, | |
738 | fletcher_4_supp_impls[fletcher_4_supp_impls_cnt-1], | |
739 | sizeof (fletcher_4_fastest_impl)); | |
740 | fletcher_4_fastest_impl.name = "fastest"; | |
741 | membar_producer(); | |
742 | ||
743 | fletcher_4_initialized = B_TRUE; | |
744 | return; | |
745 | #endif | |
746 | /* Benchmark all supported implementations */ | |
747 | databuf = vmem_alloc(data_size, KM_SLEEP); | |
748 | for (i = 0; i < data_size / sizeof (uint64_t); i++) | |
749 | ((uint64_t *)databuf)[i] = (uintptr_t)(databuf+i); /* warm-up */ | |
750 | ||
751 | fletcher_4_benchmark_impl(B_FALSE, databuf, data_size); | |
752 | fletcher_4_benchmark_impl(B_TRUE, databuf, data_size); | |
753 | ||
754 | vmem_free(databuf, data_size); | |
755 | ||
756 | /* install kstats for all implementations */ | |
757 | fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc", | |
758 | KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL); | |
759 | if (fletcher_4_kstat != NULL) { | |
760 | fletcher_4_kstat->ks_data = NULL; | |
761 | fletcher_4_kstat->ks_ndata = UINT32_MAX; | |
762 | kstat_set_raw_ops(fletcher_4_kstat, | |
763 | fletcher_4_kstat_headers, | |
764 | fletcher_4_kstat_data, | |
765 | fletcher_4_kstat_addr); | |
766 | kstat_install(fletcher_4_kstat); | |
767 | } | |
768 | ||
769 | /* Finish initialization */ | |
770 | fletcher_4_initialized = B_TRUE; | |
771 | } | |
772 | ||
773 | void | |
774 | fletcher_4_fini(void) | |
775 | { | |
776 | if (fletcher_4_kstat != NULL) { | |
777 | kstat_delete(fletcher_4_kstat); | |
778 | fletcher_4_kstat = NULL; | |
779 | } | |
780 | } | |
781 | ||
782 | /* ABD adapters */ | |
783 | ||
784 | static void | |
785 | abd_fletcher_4_init(zio_abd_checksum_data_t *cdp) | |
786 | { | |
787 | const fletcher_4_ops_t *ops = fletcher_4_impl_get(); | |
788 | cdp->acd_private = (void *) ops; | |
789 | ||
790 | if (cdp->acd_byteorder == ZIO_CHECKSUM_NATIVE) | |
791 | ops->init_native(cdp->acd_ctx); | |
792 | else | |
793 | ops->init_byteswap(cdp->acd_ctx); | |
794 | } | |
795 | ||
796 | static void | |
797 | abd_fletcher_4_fini(zio_abd_checksum_data_t *cdp) | |
798 | { | |
799 | fletcher_4_ops_t *ops = (fletcher_4_ops_t *)cdp->acd_private; | |
800 | ||
801 | ASSERT(ops); | |
802 | ||
803 | if (cdp->acd_byteorder == ZIO_CHECKSUM_NATIVE) | |
804 | ops->fini_native(cdp->acd_ctx, cdp->acd_zcp); | |
805 | else | |
806 | ops->fini_byteswap(cdp->acd_ctx, cdp->acd_zcp); | |
807 | } | |
808 | ||
809 | static void | |
810 | abd_fletcher_4_simd2scalar(boolean_t native, void *data, size_t size, | |
811 | zio_abd_checksum_data_t *cdp) | |
812 | { | |
813 | zio_cksum_t *zcp = cdp->acd_zcp; | |
814 | ||
815 | ASSERT3U(size, <, FLETCHER_MIN_SIMD_SIZE); | |
816 | ||
817 | abd_fletcher_4_fini(cdp); | |
818 | cdp->acd_private = (void *)&fletcher_4_scalar_ops; | |
819 | ||
820 | if (native) | |
821 | fletcher_4_incremental_native(data, size, zcp); | |
822 | else | |
823 | fletcher_4_incremental_byteswap(data, size, zcp); | |
824 | } | |
825 | ||
826 | static int | |
827 | abd_fletcher_4_iter(void *data, size_t size, void *private) | |
828 | { | |
829 | zio_abd_checksum_data_t *cdp = (zio_abd_checksum_data_t *)private; | |
830 | fletcher_4_ctx_t *ctx = cdp->acd_ctx; | |
831 | fletcher_4_ops_t *ops = (fletcher_4_ops_t *)cdp->acd_private; | |
832 | boolean_t native = cdp->acd_byteorder == ZIO_CHECKSUM_NATIVE; | |
833 | uint64_t asize = P2ALIGN(size, FLETCHER_MIN_SIMD_SIZE); | |
834 | ||
835 | ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t))); | |
836 | ||
837 | if (asize > 0) { | |
838 | if (native) | |
839 | ops->compute_native(ctx, data, asize); | |
840 | else | |
841 | ops->compute_byteswap(ctx, data, asize); | |
842 | ||
843 | size -= asize; | |
844 | data = (char *)data + asize; | |
845 | } | |
846 | ||
847 | if (size > 0) { | |
848 | ASSERT3U(size, <, FLETCHER_MIN_SIMD_SIZE); | |
849 | /* At this point we have to switch to scalar impl */ | |
850 | abd_fletcher_4_simd2scalar(native, data, size, cdp); | |
851 | } | |
852 | ||
853 | return (0); | |
854 | } | |
855 | ||
856 | zio_abd_checksum_func_t fletcher_4_abd_ops = { | |
857 | .acf_init = abd_fletcher_4_init, | |
858 | .acf_fini = abd_fletcher_4_fini, | |
859 | .acf_iter = abd_fletcher_4_iter | |
860 | }; | |
861 | ||
862 | ||
c28b2279 | 863 | #if defined(_KERNEL) && defined(HAVE_SPL) |
cae5b340 AX |
864 | #include <linux/mod_compat.h> |
865 | ||
866 | static int | |
867 | fletcher_4_param_get(char *buffer, zfs_kernel_param_t *unused) | |
868 | { | |
869 | const uint32_t impl = IMPL_READ(fletcher_4_impl_chosen); | |
870 | char *fmt; | |
871 | int i, cnt = 0; | |
872 | ||
873 | /* list fastest */ | |
874 | fmt = (impl == IMPL_FASTEST) ? "[%s] " : "%s "; | |
875 | cnt += sprintf(buffer + cnt, fmt, "fastest"); | |
876 | ||
877 | /* list all supported implementations */ | |
878 | for (i = 0; i < fletcher_4_supp_impls_cnt; i++) { | |
879 | fmt = (i == impl) ? "[%s] " : "%s "; | |
880 | cnt += sprintf(buffer + cnt, fmt, | |
881 | fletcher_4_supp_impls[i]->name); | |
882 | } | |
883 | ||
884 | return (cnt); | |
885 | } | |
886 | ||
887 | static int | |
888 | fletcher_4_param_set(const char *val, zfs_kernel_param_t *unused) | |
889 | { | |
890 | return (fletcher_4_impl_set(val)); | |
891 | } | |
892 | ||
893 | /* | |
894 | * Choose a fletcher 4 implementation in ZFS. | |
895 | * Users can choose "cycle" to exercise all implementations, but this is | |
896 | * for testing purpose therefore it can only be set in user space. | |
897 | */ | |
898 | module_param_call(zfs_fletcher_4_impl, | |
899 | fletcher_4_param_set, fletcher_4_param_get, NULL, 0644); | |
900 | MODULE_PARM_DESC(zfs_fletcher_4_impl, "Select fletcher 4 implementation."); | |
901 | ||
902 | EXPORT_SYMBOL(fletcher_init); | |
903 | EXPORT_SYMBOL(fletcher_2_incremental_native); | |
904 | EXPORT_SYMBOL(fletcher_2_incremental_byteswap); | |
905 | EXPORT_SYMBOL(fletcher_4_init); | |
906 | EXPORT_SYMBOL(fletcher_4_fini); | |
c28b2279 BB |
907 | EXPORT_SYMBOL(fletcher_2_native); |
908 | EXPORT_SYMBOL(fletcher_2_byteswap); | |
909 | EXPORT_SYMBOL(fletcher_4_native); | |
cae5b340 | 910 | EXPORT_SYMBOL(fletcher_4_native_varsize); |
c28b2279 BB |
911 | EXPORT_SYMBOL(fletcher_4_byteswap); |
912 | EXPORT_SYMBOL(fletcher_4_incremental_native); | |
913 | EXPORT_SYMBOL(fletcher_4_incremental_byteswap); | |
cae5b340 | 914 | EXPORT_SYMBOL(fletcher_4_abd_ops); |
c28b2279 | 915 | #endif |