4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
27 * Copyright 2013 Saso Kiselkov. All rights reserved.
31 * Copyright (c) 2016 by Delphix. All rights reserved.
38 * ZFS's 2nd and 4th order Fletcher checksums are defined by the following
39 * recurrence relations:
47 * c = c + b (fletcher-4 only)
50 * d = d + c (fletcher-4 only)
54 * a_0 = b_0 = c_0 = d_0 = 0
56 * f_0 .. f_(n-1) are the input data.
58 * Using standard techniques, these translate into the following series:
63 * n /___| n - i n /___| n - i
68 * \ | i*(i+1) \ | i*(i+1)*(i+2)
69 * c = > ------- f d = > ------------- f
70 * n /___| 2 n - i n /___| 6 n - i
73 * For fletcher-2, the f_is are 64-bit, and [ab]_i are 64-bit accumulators.
74 * Since the additions are done mod (2^64), errors in the high bits may not
75 * be noticed. For this reason, fletcher-2 is deprecated.
77 * For fletcher-4, the f_is are 32-bit, and [abcd]_i are 64-bit accumulators.
78 * A conservative estimate of how big the buffer can get before we overflow
79 * can be estimated using f_i = 0xffffffff for all i:
82 * f=2^32-1;d=0; for (i = 1; d<2^64; i++) { d += f*i*(i+1)*(i+2)/6 }; (i-1)*4
87 * So blocks of up to 2k will not overflow. Our largest block size is
88 * 128k, which has 32k 4-byte words, so we can compute the largest possible
89 * accumulators, then divide by 2^64 to figure the max amount of overflow:
92 * a=b=c=d=0; f=2^32-1; for (i=1; i<=32*1024; i++) { a+=f; b+=a; c+=b; d+=c }
93 * a/2^64;b/2^64;c/2^64;d/2^64
101 * So a and b cannot overflow. To make sure each bit of input has some
102 * effect on the contents of c and d, we can look at what the factors of
103 * the coefficients in the equations for c_n and d_n are. The number of 2s
104 * in the factors determines the lowest set bit in the multiplier. Running
105 * through the cases for n*(n+1)/2 reveals that the highest power of 2 is
106 * 2^14, and for n*(n+1)*(n+2)/6 it is 2^15. So while some data may overflow
107 * the 64-bit accumulators, every bit of every f_i effects every accumulator,
108 * even for 128k blocks.
110 * If we wanted to make a stronger version of fletcher4 (fletcher4c?),
111 * we could do our calculations mod (2^32 - 1) by adding in the carries
112 * periodically, and store the number of carries in the top 32-bits.
114 * --------------------
115 * Checksum Performance
116 * --------------------
118 * There are two interesting components to checksum performance: cached and
119 * uncached performance. With cached data, fletcher-2 is about four times
120 * faster than fletcher-4. With uncached data, the performance difference is
121 * negligible, since the cost of a cache fill dominates the processing time.
122 * Even though fletcher-4 is slower than fletcher-2, it is still a pretty
123 * efficient pass over the data.
125 * In normal operation, the data which is being checksummed is in a buffer
126 * which has been filled either by:
128 * 1. a compression step, which will be mostly cached, or
129 * 2. a bcopy() or copyin(), which will be uncached (because the
130 * copy is cache-bypassing).
132 * For both cached and uncached data, both fletcher checksums are much faster
133 * than sha-256, and slower than 'off', which doesn't touch the data at all.
136 #include <sys/types.h>
137 #include <sys/sysmacros.h>
138 #include <sys/byteorder.h>
140 #include <sys/zio_checksum.h>
141 #include <sys/zfs_context.h>
142 #include <zfs_fletcher.h>
144 #define FLETCHER_MIN_SIMD_SIZE 64
146 static void fletcher_4_scalar_init(fletcher_4_ctx_t
*ctx
);
147 static void fletcher_4_scalar_fini(fletcher_4_ctx_t
*ctx
, zio_cksum_t
*zcp
);
148 static void fletcher_4_scalar_native(fletcher_4_ctx_t
*ctx
,
149 const void *buf
, uint64_t size
);
150 static void fletcher_4_scalar_byteswap(fletcher_4_ctx_t
*ctx
,
151 const void *buf
, uint64_t size
);
152 static boolean_t
fletcher_4_scalar_valid(void);
154 static const fletcher_4_ops_t fletcher_4_scalar_ops
= {
155 .init_native
= fletcher_4_scalar_init
,
156 .fini_native
= fletcher_4_scalar_fini
,
157 .compute_native
= fletcher_4_scalar_native
,
158 .init_byteswap
= fletcher_4_scalar_init
,
159 .fini_byteswap
= fletcher_4_scalar_fini
,
160 .compute_byteswap
= fletcher_4_scalar_byteswap
,
161 .valid
= fletcher_4_scalar_valid
,
165 static fletcher_4_ops_t fletcher_4_fastest_impl
= {
167 .valid
= fletcher_4_scalar_valid
170 static const fletcher_4_ops_t
*fletcher_4_impls
[] = {
171 &fletcher_4_scalar_ops
,
172 &fletcher_4_superscalar_ops
,
173 &fletcher_4_superscalar4_ops
,
174 #if defined(HAVE_SSE2)
175 &fletcher_4_sse2_ops
,
177 #if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
178 &fletcher_4_ssse3_ops
,
180 #if defined(HAVE_AVX) && defined(HAVE_AVX2)
181 &fletcher_4_avx2_ops
,
183 #if defined(__x86_64) && defined(HAVE_AVX512F)
184 &fletcher_4_avx512f_ops
,
186 #if defined(__aarch64__)
187 &fletcher_4_aarch64_neon_ops
,
191 /* Hold all supported implementations */
192 static uint32_t fletcher_4_supp_impls_cnt
= 0;
193 static fletcher_4_ops_t
*fletcher_4_supp_impls
[ARRAY_SIZE(fletcher_4_impls
)];
195 /* Select fletcher4 implementation */
196 #define IMPL_FASTEST (UINT32_MAX)
197 #define IMPL_CYCLE (UINT32_MAX - 1)
198 #define IMPL_SCALAR (0)
200 static uint32_t fletcher_4_impl_chosen
= IMPL_FASTEST
;
202 #define IMPL_READ(i) (*(volatile uint32_t *) &(i))
204 static struct fletcher_4_impl_selector
{
205 const char *fis_name
;
207 } fletcher_4_impl_selectors
[] = {
208 #if !defined(_KERNEL)
209 { "cycle", IMPL_CYCLE
},
211 { "fastest", IMPL_FASTEST
},
212 { "scalar", IMPL_SCALAR
}
216 static kstat_t
*fletcher_4_kstat
;
219 static struct fletcher_4_kstat
{
222 } fletcher_4_stat_data
[ARRAY_SIZE(fletcher_4_impls
) + 1];
224 /* Indicate that benchmark has been completed */
225 static boolean_t fletcher_4_initialized
= B_FALSE
;
229 fletcher_init(zio_cksum_t
*zcp
)
231 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
235 fletcher_2_incremental_native(void *buf
, size_t size
, void *data
)
237 zio_cksum_t
*zcp
= data
;
239 const uint64_t *ip
= buf
;
240 const uint64_t *ipend
= ip
+ (size
/ sizeof (uint64_t));
241 uint64_t a0
, b0
, a1
, b1
;
243 a0
= zcp
->zc_word
[0];
244 a1
= zcp
->zc_word
[1];
245 b0
= zcp
->zc_word
[2];
246 b1
= zcp
->zc_word
[3];
248 for (; ip
< ipend
; ip
+= 2) {
255 ZIO_SET_CHECKSUM(zcp
, a0
, a1
, b0
, b1
);
261 fletcher_2_native(const void *buf
, uint64_t size
,
262 const void *ctx_template
, zio_cksum_t
*zcp
)
265 (void) fletcher_2_incremental_native((void *) buf
, size
, zcp
);
269 fletcher_2_incremental_byteswap(void *buf
, size_t size
, void *data
)
271 zio_cksum_t
*zcp
= data
;
273 const uint64_t *ip
= buf
;
274 const uint64_t *ipend
= ip
+ (size
/ sizeof (uint64_t));
275 uint64_t a0
, b0
, a1
, b1
;
277 a0
= zcp
->zc_word
[0];
278 a1
= zcp
->zc_word
[1];
279 b0
= zcp
->zc_word
[2];
280 b1
= zcp
->zc_word
[3];
282 for (; ip
< ipend
; ip
+= 2) {
283 a0
+= BSWAP_64(ip
[0]);
284 a1
+= BSWAP_64(ip
[1]);
289 ZIO_SET_CHECKSUM(zcp
, a0
, a1
, b0
, b1
);
295 fletcher_2_byteswap(const void *buf
, uint64_t size
,
296 const void *ctx_template
, zio_cksum_t
*zcp
)
299 (void) fletcher_2_incremental_byteswap((void *) buf
, size
, zcp
);
303 fletcher_4_scalar_init(fletcher_4_ctx_t
*ctx
)
305 ZIO_SET_CHECKSUM(&ctx
->scalar
, 0, 0, 0, 0);
309 fletcher_4_scalar_fini(fletcher_4_ctx_t
*ctx
, zio_cksum_t
*zcp
)
311 memcpy(zcp
, &ctx
->scalar
, sizeof (zio_cksum_t
));
315 fletcher_4_scalar_native(fletcher_4_ctx_t
*ctx
, const void *buf
,
318 const uint32_t *ip
= buf
;
319 const uint32_t *ipend
= ip
+ (size
/ sizeof (uint32_t));
322 a
= ctx
->scalar
.zc_word
[0];
323 b
= ctx
->scalar
.zc_word
[1];
324 c
= ctx
->scalar
.zc_word
[2];
325 d
= ctx
->scalar
.zc_word
[3];
327 for (; ip
< ipend
; ip
++) {
334 ZIO_SET_CHECKSUM(&ctx
->scalar
, a
, b
, c
, d
);
338 fletcher_4_scalar_byteswap(fletcher_4_ctx_t
*ctx
, const void *buf
,
341 const uint32_t *ip
= buf
;
342 const uint32_t *ipend
= ip
+ (size
/ sizeof (uint32_t));
345 a
= ctx
->scalar
.zc_word
[0];
346 b
= ctx
->scalar
.zc_word
[1];
347 c
= ctx
->scalar
.zc_word
[2];
348 d
= ctx
->scalar
.zc_word
[3];
350 for (; ip
< ipend
; ip
++) {
351 a
+= BSWAP_32(ip
[0]);
357 ZIO_SET_CHECKSUM(&ctx
->scalar
, a
, b
, c
, d
);
361 fletcher_4_scalar_valid(void)
367 fletcher_4_impl_set(const char *val
)
370 uint32_t impl
= IMPL_READ(fletcher_4_impl_chosen
);
373 val_len
= strlen(val
);
374 while ((val_len
> 0) && !!isspace(val
[val_len
-1])) /* trim '\n' */
377 /* check mandatory implementations */
378 for (i
= 0; i
< ARRAY_SIZE(fletcher_4_impl_selectors
); i
++) {
379 const char *name
= fletcher_4_impl_selectors
[i
].fis_name
;
381 if (val_len
== strlen(name
) &&
382 strncmp(val
, name
, val_len
) == 0) {
383 impl
= fletcher_4_impl_selectors
[i
].fis_sel
;
389 if (err
!= 0 && fletcher_4_initialized
) {
390 /* check all supported implementations */
391 for (i
= 0; i
< fletcher_4_supp_impls_cnt
; i
++) {
392 const char *name
= fletcher_4_supp_impls
[i
]->name
;
394 if (val_len
== strlen(name
) &&
395 strncmp(val
, name
, val_len
) == 0) {
404 atomic_swap_32(&fletcher_4_impl_chosen
, impl
);
411 static inline const fletcher_4_ops_t
*
412 fletcher_4_impl_get(void)
414 fletcher_4_ops_t
*ops
= NULL
;
415 const uint32_t impl
= IMPL_READ(fletcher_4_impl_chosen
);
419 ASSERT(fletcher_4_initialized
);
420 ops
= &fletcher_4_fastest_impl
;
422 #if !defined(_KERNEL)
424 ASSERT(fletcher_4_initialized
);
425 ASSERT3U(fletcher_4_supp_impls_cnt
, >, 0);
427 static uint32_t cycle_count
= 0;
428 uint32_t idx
= (++cycle_count
) % fletcher_4_supp_impls_cnt
;
429 ops
= fletcher_4_supp_impls
[idx
];
434 ASSERT3U(fletcher_4_supp_impls_cnt
, >, 0);
435 ASSERT3U(impl
, <, fletcher_4_supp_impls_cnt
);
437 ops
= fletcher_4_supp_impls
[impl
];
441 ASSERT3P(ops
, !=, NULL
);
447 fletcher_4_native_impl(const void *buf
, uint64_t size
, zio_cksum_t
*zcp
)
449 fletcher_4_ctx_t ctx
;
450 const fletcher_4_ops_t
*ops
= fletcher_4_impl_get();
452 ops
->init_native(&ctx
);
453 ops
->compute_native(&ctx
, buf
, size
);
454 ops
->fini_native(&ctx
, zcp
);
459 fletcher_4_native(const void *buf
, uint64_t size
,
460 const void *ctx_template
, zio_cksum_t
*zcp
)
462 const uint64_t p2size
= P2ALIGN(size
, FLETCHER_MIN_SIMD_SIZE
);
464 ASSERT(IS_P2ALIGNED(size
, sizeof (uint32_t)));
466 if (size
== 0 || p2size
== 0) {
467 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
470 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
,
473 fletcher_4_native_impl(buf
, p2size
, zcp
);
476 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
,
477 (char *)buf
+ p2size
, size
- p2size
);
482 fletcher_4_native_varsize(const void *buf
, uint64_t size
, zio_cksum_t
*zcp
)
484 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
485 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
, buf
, size
);
489 fletcher_4_byteswap_impl(const void *buf
, uint64_t size
, zio_cksum_t
*zcp
)
491 fletcher_4_ctx_t ctx
;
492 const fletcher_4_ops_t
*ops
= fletcher_4_impl_get();
494 ops
->init_byteswap(&ctx
);
495 ops
->compute_byteswap(&ctx
, buf
, size
);
496 ops
->fini_byteswap(&ctx
, zcp
);
501 fletcher_4_byteswap(const void *buf
, uint64_t size
,
502 const void *ctx_template
, zio_cksum_t
*zcp
)
504 const uint64_t p2size
= P2ALIGN(size
, FLETCHER_MIN_SIMD_SIZE
);
506 ASSERT(IS_P2ALIGNED(size
, sizeof (uint32_t)));
508 if (size
== 0 || p2size
== 0) {
509 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
512 fletcher_4_scalar_byteswap((fletcher_4_ctx_t
*)zcp
,
515 fletcher_4_byteswap_impl(buf
, p2size
, zcp
);
518 fletcher_4_scalar_byteswap((fletcher_4_ctx_t
*)zcp
,
519 (char *)buf
+ p2size
, size
- p2size
);
523 /* Incremental Fletcher 4 */
525 #define ZFS_FLETCHER_4_INC_MAX_SIZE (8ULL << 20)
528 fletcher_4_incremental_combine(zio_cksum_t
*zcp
, const uint64_t size
,
529 const zio_cksum_t
*nzcp
)
531 const uint64_t c1
= size
/ sizeof (uint32_t);
532 const uint64_t c2
= c1
* (c1
+ 1) / 2;
533 const uint64_t c3
= c2
* (c1
+ 2) / 3;
536 * Value of 'c3' overflows on buffer sizes close to 16MiB. For that
537 * reason we split incremental fletcher4 computation of large buffers
538 * to steps of (ZFS_FLETCHER_4_INC_MAX_SIZE) size.
540 ASSERT3U(size
, <=, ZFS_FLETCHER_4_INC_MAX_SIZE
);
542 zcp
->zc_word
[3] += nzcp
->zc_word
[3] + c1
* zcp
->zc_word
[2] +
543 c2
* zcp
->zc_word
[1] + c3
* zcp
->zc_word
[0];
544 zcp
->zc_word
[2] += nzcp
->zc_word
[2] + c1
* zcp
->zc_word
[1] +
545 c2
* zcp
->zc_word
[0];
546 zcp
->zc_word
[1] += nzcp
->zc_word
[1] + c1
* zcp
->zc_word
[0];
547 zcp
->zc_word
[0] += nzcp
->zc_word
[0];
551 fletcher_4_incremental_impl(boolean_t native
, const void *buf
, uint64_t size
,
556 uint64_t len
= MIN(size
, ZFS_FLETCHER_4_INC_MAX_SIZE
);
559 fletcher_4_native(buf
, len
, NULL
, &nzc
);
561 fletcher_4_byteswap(buf
, len
, NULL
, &nzc
);
563 fletcher_4_incremental_combine(zcp
, len
, &nzc
);
571 fletcher_4_incremental_native(void *buf
, size_t size
, void *data
)
573 zio_cksum_t
*zcp
= data
;
574 /* Use scalar impl to directly update cksum of small blocks */
575 if (size
< SPA_MINBLOCKSIZE
)
576 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
, buf
, size
);
578 fletcher_4_incremental_impl(B_TRUE
, buf
, size
, zcp
);
583 fletcher_4_incremental_byteswap(void *buf
, size_t size
, void *data
)
585 zio_cksum_t
*zcp
= data
;
586 /* Use scalar impl to directly update cksum of small blocks */
587 if (size
< SPA_MINBLOCKSIZE
)
588 fletcher_4_scalar_byteswap((fletcher_4_ctx_t
*)zcp
, buf
, size
);
590 fletcher_4_incremental_impl(B_FALSE
, buf
, size
, zcp
);
595 /* Fletcher 4 kstats */
598 fletcher_4_kstat_headers(char *buf
, size_t size
)
602 off
+= snprintf(buf
+ off
, size
, "%-17s", "implementation");
603 off
+= snprintf(buf
+ off
, size
- off
, "%-15s", "native");
604 (void) snprintf(buf
+ off
, size
- off
, "%-15s\n", "byteswap");
610 fletcher_4_kstat_data(char *buf
, size_t size
, void *data
)
612 struct fletcher_4_kstat
*fastest_stat
=
613 &fletcher_4_stat_data
[fletcher_4_supp_impls_cnt
];
614 struct fletcher_4_kstat
*curr_stat
= (struct fletcher_4_kstat
*)data
;
617 if (curr_stat
== fastest_stat
) {
618 off
+= snprintf(buf
+ off
, size
- off
, "%-17s", "fastest");
619 off
+= snprintf(buf
+ off
, size
- off
, "%-15s",
620 fletcher_4_supp_impls
[fastest_stat
->native
]->name
);
621 off
+= snprintf(buf
+ off
, size
- off
, "%-15s\n",
622 fletcher_4_supp_impls
[fastest_stat
->byteswap
]->name
);
624 ptrdiff_t id
= curr_stat
- fletcher_4_stat_data
;
626 off
+= snprintf(buf
+ off
, size
- off
, "%-17s",
627 fletcher_4_supp_impls
[id
]->name
);
628 off
+= snprintf(buf
+ off
, size
- off
, "%-15llu",
629 (u_longlong_t
)curr_stat
->native
);
630 off
+= snprintf(buf
+ off
, size
- off
, "%-15llu\n",
631 (u_longlong_t
)curr_stat
->byteswap
);
638 fletcher_4_kstat_addr(kstat_t
*ksp
, loff_t n
)
640 if (n
<= fletcher_4_supp_impls_cnt
)
641 ksp
->ks_private
= (void *) (fletcher_4_stat_data
+ n
);
643 ksp
->ks_private
= NULL
;
645 return (ksp
->ks_private
);
649 #define FLETCHER_4_FASTEST_FN_COPY(type, src) \
651 fletcher_4_fastest_impl.init_ ## type = src->init_ ## type; \
652 fletcher_4_fastest_impl.fini_ ## type = src->fini_ ## type; \
653 fletcher_4_fastest_impl.compute_ ## type = src->compute_ ## type; \
656 #define FLETCHER_4_BENCH_NS (MSEC2NSEC(50)) /* 50ms */
658 typedef void fletcher_checksum_func_t(const void *, uint64_t, const void *,
662 fletcher_4_benchmark_impl(boolean_t native
, char *data
, uint64_t data_size
)
665 struct fletcher_4_kstat
*fastest_stat
=
666 &fletcher_4_stat_data
[fletcher_4_supp_impls_cnt
];
668 uint64_t run_bw
, run_time_ns
, best_run
= 0;
670 uint32_t i
, l
, sel_save
= IMPL_READ(fletcher_4_impl_chosen
);
673 fletcher_checksum_func_t
*fletcher_4_test
= native
?
674 fletcher_4_native
: fletcher_4_byteswap
;
676 for (i
= 0; i
< fletcher_4_supp_impls_cnt
; i
++) {
677 struct fletcher_4_kstat
*stat
= &fletcher_4_stat_data
[i
];
678 uint64_t run_count
= 0;
680 /* temporary set an implementation */
681 fletcher_4_impl_chosen
= i
;
686 for (l
= 0; l
< 32; l
++, run_count
++)
687 fletcher_4_test(data
, data_size
, NULL
, &zc
);
689 run_time_ns
= gethrtime() - start
;
690 } while (run_time_ns
< FLETCHER_4_BENCH_NS
);
693 run_bw
= data_size
* run_count
* NANOSEC
;
694 run_bw
/= run_time_ns
; /* B/s */
697 stat
->native
= run_bw
;
699 stat
->byteswap
= run_bw
;
701 if (run_bw
> best_run
) {
705 fastest_stat
->native
= i
;
706 FLETCHER_4_FASTEST_FN_COPY(native
,
707 fletcher_4_supp_impls
[i
]);
709 fastest_stat
->byteswap
= i
;
710 FLETCHER_4_FASTEST_FN_COPY(byteswap
,
711 fletcher_4_supp_impls
[i
]);
716 /* restore original selection */
717 atomic_swap_32(&fletcher_4_impl_chosen
, sel_save
);
721 fletcher_4_init(void)
723 static const size_t data_size
= 1 << SPA_OLD_MAXBLOCKSHIFT
; /* 128kiB */
724 fletcher_4_ops_t
*curr_impl
;
728 /* move supported impl into fletcher_4_supp_impls */
729 for (i
= 0, c
= 0; i
< ARRAY_SIZE(fletcher_4_impls
); i
++) {
730 curr_impl
= (fletcher_4_ops_t
*)fletcher_4_impls
[i
];
732 if (curr_impl
->valid
&& curr_impl
->valid())
733 fletcher_4_supp_impls
[c
++] = curr_impl
;
735 membar_producer(); /* complete fletcher_4_supp_impls[] init */
736 fletcher_4_supp_impls_cnt
= c
; /* number of supported impl */
738 #if !defined(_KERNEL)
739 /* Skip benchmarking and use last implementation as fastest */
740 memcpy(&fletcher_4_fastest_impl
,
741 fletcher_4_supp_impls
[fletcher_4_supp_impls_cnt
-1],
742 sizeof (fletcher_4_fastest_impl
));
743 fletcher_4_fastest_impl
.name
= "fastest";
746 fletcher_4_initialized
= B_TRUE
;
749 /* Benchmark all supported implementations */
750 databuf
= vmem_alloc(data_size
, KM_SLEEP
);
751 for (i
= 0; i
< data_size
/ sizeof (uint64_t); i
++)
752 ((uint64_t *)databuf
)[i
] = (uintptr_t)(databuf
+i
); /* warm-up */
754 fletcher_4_benchmark_impl(B_FALSE
, databuf
, data_size
);
755 fletcher_4_benchmark_impl(B_TRUE
, databuf
, data_size
);
757 vmem_free(databuf
, data_size
);
760 /* install kstats for all implementations */
761 fletcher_4_kstat
= kstat_create("zfs", 0, "fletcher_4_bench", "misc",
762 KSTAT_TYPE_RAW
, 0, KSTAT_FLAG_VIRTUAL
);
763 if (fletcher_4_kstat
!= NULL
) {
764 fletcher_4_kstat
->ks_data
= NULL
;
765 fletcher_4_kstat
->ks_ndata
= UINT32_MAX
;
766 kstat_set_raw_ops(fletcher_4_kstat
,
767 fletcher_4_kstat_headers
,
768 fletcher_4_kstat_data
,
769 fletcher_4_kstat_addr
);
770 kstat_install(fletcher_4_kstat
);
774 /* Finish initialization */
775 fletcher_4_initialized
= B_TRUE
;
779 fletcher_4_fini(void)
782 if (fletcher_4_kstat
!= NULL
) {
783 kstat_delete(fletcher_4_kstat
);
784 fletcher_4_kstat
= NULL
;
792 abd_fletcher_4_init(zio_abd_checksum_data_t
*cdp
)
794 const fletcher_4_ops_t
*ops
= fletcher_4_impl_get();
795 cdp
->acd_private
= (void *) ops
;
797 if (cdp
->acd_byteorder
== ZIO_CHECKSUM_NATIVE
)
798 ops
->init_native(cdp
->acd_ctx
);
800 ops
->init_byteswap(cdp
->acd_ctx
);
804 abd_fletcher_4_fini(zio_abd_checksum_data_t
*cdp
)
806 fletcher_4_ops_t
*ops
= (fletcher_4_ops_t
*)cdp
->acd_private
;
810 if (cdp
->acd_byteorder
== ZIO_CHECKSUM_NATIVE
)
811 ops
->fini_native(cdp
->acd_ctx
, cdp
->acd_zcp
);
813 ops
->fini_byteswap(cdp
->acd_ctx
, cdp
->acd_zcp
);
817 abd_fletcher_4_simd2scalar(boolean_t native
, void *data
, size_t size
,
818 zio_abd_checksum_data_t
*cdp
)
820 zio_cksum_t
*zcp
= cdp
->acd_zcp
;
822 ASSERT3U(size
, <, FLETCHER_MIN_SIMD_SIZE
);
824 abd_fletcher_4_fini(cdp
);
825 cdp
->acd_private
= (void *)&fletcher_4_scalar_ops
;
828 fletcher_4_incremental_native(data
, size
, zcp
);
830 fletcher_4_incremental_byteswap(data
, size
, zcp
);
834 abd_fletcher_4_iter(void *data
, size_t size
, void *private)
836 zio_abd_checksum_data_t
*cdp
= (zio_abd_checksum_data_t
*)private;
837 fletcher_4_ctx_t
*ctx
= cdp
->acd_ctx
;
838 fletcher_4_ops_t
*ops
= (fletcher_4_ops_t
*)cdp
->acd_private
;
839 boolean_t native
= cdp
->acd_byteorder
== ZIO_CHECKSUM_NATIVE
;
840 uint64_t asize
= P2ALIGN(size
, FLETCHER_MIN_SIMD_SIZE
);
842 ASSERT(IS_P2ALIGNED(size
, sizeof (uint32_t)));
846 ops
->compute_native(ctx
, data
, asize
);
848 ops
->compute_byteswap(ctx
, data
, asize
);
851 data
= (char *)data
+ asize
;
855 ASSERT3U(size
, <, FLETCHER_MIN_SIMD_SIZE
);
856 /* At this point we have to switch to scalar impl */
857 abd_fletcher_4_simd2scalar(native
, data
, size
, cdp
);
863 zio_abd_checksum_func_t fletcher_4_abd_ops
= {
864 .acf_init
= abd_fletcher_4_init
,
865 .acf_fini
= abd_fletcher_4_fini
,
866 .acf_iter
= abd_fletcher_4_iter
871 #include <linux/mod_compat.h>
874 fletcher_4_param_get(char *buffer
, zfs_kernel_param_t
*unused
)
876 const uint32_t impl
= IMPL_READ(fletcher_4_impl_chosen
);
881 fmt
= (impl
== IMPL_FASTEST
) ? "[%s] " : "%s ";
882 cnt
+= sprintf(buffer
+ cnt
, fmt
, "fastest");
884 /* list all supported implementations */
885 for (i
= 0; i
< fletcher_4_supp_impls_cnt
; i
++) {
886 fmt
= (i
== impl
) ? "[%s] " : "%s ";
887 cnt
+= sprintf(buffer
+ cnt
, fmt
,
888 fletcher_4_supp_impls
[i
]->name
);
895 fletcher_4_param_set(const char *val
, zfs_kernel_param_t
*unused
)
897 return (fletcher_4_impl_set(val
));
901 * Choose a fletcher 4 implementation in ZFS.
902 * Users can choose "cycle" to exercise all implementations, but this is
903 * for testing purpose therefore it can only be set in user space.
905 module_param_call(zfs_fletcher_4_impl
,
906 fletcher_4_param_set
, fletcher_4_param_get
, NULL
, 0644);
907 MODULE_PARM_DESC(zfs_fletcher_4_impl
, "Select fletcher 4 implementation.");
909 EXPORT_SYMBOL(fletcher_init
);
910 EXPORT_SYMBOL(fletcher_2_incremental_native
);
911 EXPORT_SYMBOL(fletcher_2_incremental_byteswap
);
912 EXPORT_SYMBOL(fletcher_4_init
);
913 EXPORT_SYMBOL(fletcher_4_fini
);
914 EXPORT_SYMBOL(fletcher_2_native
);
915 EXPORT_SYMBOL(fletcher_2_byteswap
);
916 EXPORT_SYMBOL(fletcher_4_native
);
917 EXPORT_SYMBOL(fletcher_4_native_varsize
);
918 EXPORT_SYMBOL(fletcher_4_byteswap
);
919 EXPORT_SYMBOL(fletcher_4_incremental_native
);
920 EXPORT_SYMBOL(fletcher_4_incremental_byteswap
);
921 EXPORT_SYMBOL(fletcher_4_abd_ops
);