4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (C) 2016 Gvozden Nešković. All rights reserved.
27 * Copyright 2013 Saso Kiselkov. All rights reserved.
31 * Copyright (c) 2016 by Delphix. All rights reserved.
38 * ZFS's 2nd and 4th order Fletcher checksums are defined by the following
39 * recurrence relations:
47 * c = c + b (fletcher-4 only)
50 * d = d + c (fletcher-4 only)
54 * a_0 = b_0 = c_0 = d_0 = 0
56 * f_0 .. f_(n-1) are the input data.
58 * Using standard techniques, these translate into the following series:
63 * n /___| n - i n /___| n - i
68 * \ | i*(i+1) \ | i*(i+1)*(i+2)
69 * c = > ------- f d = > ------------- f
70 * n /___| 2 n - i n /___| 6 n - i
73 * For fletcher-2, the f_is are 64-bit, and [ab]_i are 64-bit accumulators.
74 * Since the additions are done mod (2^64), errors in the high bits may not
75 * be noticed. For this reason, fletcher-2 is deprecated.
77 * For fletcher-4, the f_is are 32-bit, and [abcd]_i are 64-bit accumulators.
78 * A conservative estimate of how big the buffer can get before we overflow
79 * can be estimated using f_i = 0xffffffff for all i:
82 * f=2^32-1;d=0; for (i = 1; d<2^64; i++) { d += f*i*(i+1)*(i+2)/6 }; (i-1)*4
87 * So blocks of up to 2k will not overflow. Our largest block size is
88 * 128k, which has 32k 4-byte words, so we can compute the largest possible
89 * accumulators, then divide by 2^64 to figure the max amount of overflow:
92 * a=b=c=d=0; f=2^32-1; for (i=1; i<=32*1024; i++) { a+=f; b+=a; c+=b; d+=c }
93 * a/2^64;b/2^64;c/2^64;d/2^64
101 * So a and b cannot overflow. To make sure each bit of input has some
102 * effect on the contents of c and d, we can look at what the factors of
103 * the coefficients in the equations for c_n and d_n are. The number of 2s
104 * in the factors determines the lowest set bit in the multiplier. Running
105 * through the cases for n*(n+1)/2 reveals that the highest power of 2 is
106 * 2^14, and for n*(n+1)*(n+2)/6 it is 2^15. So while some data may overflow
107 * the 64-bit accumulators, every bit of every f_i effects every accumulator,
108 * even for 128k blocks.
110 * If we wanted to make a stronger version of fletcher4 (fletcher4c?),
111 * we could do our calculations mod (2^32 - 1) by adding in the carries
112 * periodically, and store the number of carries in the top 32-bits.
114 * --------------------
115 * Checksum Performance
116 * --------------------
118 * There are two interesting components to checksum performance: cached and
119 * uncached performance. With cached data, fletcher-2 is about four times
120 * faster than fletcher-4. With uncached data, the performance difference is
121 * negligible, since the cost of a cache fill dominates the processing time.
122 * Even though fletcher-4 is slower than fletcher-2, it is still a pretty
123 * efficient pass over the data.
125 * In normal operation, the data which is being checksummed is in a buffer
126 * which has been filled either by:
128 * 1. a compression step, which will be mostly cached, or
129 * 2. a bcopy() or copyin(), which will be uncached (because the
130 * copy is cache-bypassing).
132 * For both cached and uncached data, both fletcher checksums are much faster
133 * than sha-256, and slower than 'off', which doesn't touch the data at all.
136 #include <sys/types.h>
137 #include <sys/sysmacros.h>
138 #include <sys/byteorder.h>
140 #include <sys/zio_checksum.h>
141 #include <sys/zfs_context.h>
142 #include <zfs_fletcher.h>
145 static void fletcher_4_scalar_init(fletcher_4_ctx_t
*ctx
);
146 static void fletcher_4_scalar_fini(fletcher_4_ctx_t
*ctx
, zio_cksum_t
*zcp
);
147 static void fletcher_4_scalar_native(fletcher_4_ctx_t
*ctx
,
148 const void *buf
, uint64_t size
);
149 static void fletcher_4_scalar_byteswap(fletcher_4_ctx_t
*ctx
,
150 const void *buf
, uint64_t size
);
151 static boolean_t
fletcher_4_scalar_valid(void);
153 static const fletcher_4_ops_t fletcher_4_scalar_ops
= {
154 .init_native
= fletcher_4_scalar_init
,
155 .fini_native
= fletcher_4_scalar_fini
,
156 .compute_native
= fletcher_4_scalar_native
,
157 .init_byteswap
= fletcher_4_scalar_init
,
158 .fini_byteswap
= fletcher_4_scalar_fini
,
159 .compute_byteswap
= fletcher_4_scalar_byteswap
,
160 .valid
= fletcher_4_scalar_valid
,
164 static fletcher_4_ops_t fletcher_4_fastest_impl
= {
166 .valid
= fletcher_4_scalar_valid
169 static const fletcher_4_ops_t
*fletcher_4_impls
[] = {
170 &fletcher_4_scalar_ops
,
171 &fletcher_4_superscalar_ops
,
172 &fletcher_4_superscalar4_ops
,
173 #if defined(HAVE_SSE2)
174 &fletcher_4_sse2_ops
,
176 #if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
177 &fletcher_4_ssse3_ops
,
179 #if defined(HAVE_AVX) && defined(HAVE_AVX2)
180 &fletcher_4_avx2_ops
,
182 #if defined(__x86_64) && defined(HAVE_AVX512F)
183 &fletcher_4_avx512f_ops
,
185 #if defined(__aarch64__)
186 &fletcher_4_aarch64_neon_ops
,
190 /* Hold all supported implementations */
191 static uint32_t fletcher_4_supp_impls_cnt
= 0;
192 static fletcher_4_ops_t
*fletcher_4_supp_impls
[ARRAY_SIZE(fletcher_4_impls
)];
194 /* Select fletcher4 implementation */
195 #define IMPL_FASTEST (UINT32_MAX)
196 #define IMPL_CYCLE (UINT32_MAX - 1)
197 #define IMPL_SCALAR (0)
199 static uint32_t fletcher_4_impl_chosen
= IMPL_FASTEST
;
201 #define IMPL_READ(i) (*(volatile uint32_t *) &(i))
203 static struct fletcher_4_impl_selector
{
204 const char *fis_name
;
206 } fletcher_4_impl_selectors
[] = {
207 #if !defined(_KERNEL)
208 { "cycle", IMPL_CYCLE
},
210 { "fastest", IMPL_FASTEST
},
211 { "scalar", IMPL_SCALAR
}
214 static kstat_t
*fletcher_4_kstat
;
216 static struct fletcher_4_kstat
{
219 } fletcher_4_stat_data
[ARRAY_SIZE(fletcher_4_impls
) + 1];
221 /* Indicate that benchmark has been completed */
222 static boolean_t fletcher_4_initialized
= B_FALSE
;
226 fletcher_init(zio_cksum_t
*zcp
)
228 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
232 fletcher_2_incremental_native(void *buf
, size_t size
, void *data
)
234 zio_cksum_t
*zcp
= data
;
236 const uint64_t *ip
= buf
;
237 const uint64_t *ipend
= ip
+ (size
/ sizeof (uint64_t));
238 uint64_t a0
, b0
, a1
, b1
;
240 a0
= zcp
->zc_word
[0];
241 a1
= zcp
->zc_word
[1];
242 b0
= zcp
->zc_word
[2];
243 b1
= zcp
->zc_word
[3];
245 for (; ip
< ipend
; ip
+= 2) {
252 ZIO_SET_CHECKSUM(zcp
, a0
, a1
, b0
, b1
);
258 fletcher_2_native(const void *buf
, uint64_t size
,
259 const void *ctx_template
, zio_cksum_t
*zcp
)
262 (void) fletcher_2_incremental_native((void *) buf
, size
, zcp
);
266 fletcher_2_incremental_byteswap(void *buf
, size_t size
, void *data
)
268 zio_cksum_t
*zcp
= data
;
270 const uint64_t *ip
= buf
;
271 const uint64_t *ipend
= ip
+ (size
/ sizeof (uint64_t));
272 uint64_t a0
, b0
, a1
, b1
;
274 a0
= zcp
->zc_word
[0];
275 a1
= zcp
->zc_word
[1];
276 b0
= zcp
->zc_word
[2];
277 b1
= zcp
->zc_word
[3];
279 for (; ip
< ipend
; ip
+= 2) {
280 a0
+= BSWAP_64(ip
[0]);
281 a1
+= BSWAP_64(ip
[1]);
286 ZIO_SET_CHECKSUM(zcp
, a0
, a1
, b0
, b1
);
292 fletcher_2_byteswap(const void *buf
, uint64_t size
,
293 const void *ctx_template
, zio_cksum_t
*zcp
)
296 (void) fletcher_2_incremental_byteswap((void *) buf
, size
, zcp
);
300 fletcher_4_scalar_init(fletcher_4_ctx_t
*ctx
)
302 ZIO_SET_CHECKSUM(&ctx
->scalar
, 0, 0, 0, 0);
306 fletcher_4_scalar_fini(fletcher_4_ctx_t
*ctx
, zio_cksum_t
*zcp
)
308 memcpy(zcp
, &ctx
->scalar
, sizeof (zio_cksum_t
));
312 fletcher_4_scalar_native(fletcher_4_ctx_t
*ctx
, const void *buf
,
315 const uint32_t *ip
= buf
;
316 const uint32_t *ipend
= ip
+ (size
/ sizeof (uint32_t));
319 a
= ctx
->scalar
.zc_word
[0];
320 b
= ctx
->scalar
.zc_word
[1];
321 c
= ctx
->scalar
.zc_word
[2];
322 d
= ctx
->scalar
.zc_word
[3];
324 for (; ip
< ipend
; ip
++) {
331 ZIO_SET_CHECKSUM(&ctx
->scalar
, a
, b
, c
, d
);
335 fletcher_4_scalar_byteswap(fletcher_4_ctx_t
*ctx
, const void *buf
,
338 const uint32_t *ip
= buf
;
339 const uint32_t *ipend
= ip
+ (size
/ sizeof (uint32_t));
342 a
= ctx
->scalar
.zc_word
[0];
343 b
= ctx
->scalar
.zc_word
[1];
344 c
= ctx
->scalar
.zc_word
[2];
345 d
= ctx
->scalar
.zc_word
[3];
347 for (; ip
< ipend
; ip
++) {
348 a
+= BSWAP_32(ip
[0]);
354 ZIO_SET_CHECKSUM(&ctx
->scalar
, a
, b
, c
, d
);
358 fletcher_4_scalar_valid(void)
364 fletcher_4_impl_set(const char *val
)
367 uint32_t impl
= IMPL_READ(fletcher_4_impl_chosen
);
370 val_len
= strlen(val
);
371 while ((val_len
> 0) && !!isspace(val
[val_len
-1])) /* trim '\n' */
374 /* check mandatory implementations */
375 for (i
= 0; i
< ARRAY_SIZE(fletcher_4_impl_selectors
); i
++) {
376 const char *name
= fletcher_4_impl_selectors
[i
].fis_name
;
378 if (val_len
== strlen(name
) &&
379 strncmp(val
, name
, val_len
) == 0) {
380 impl
= fletcher_4_impl_selectors
[i
].fis_sel
;
386 if (err
!= 0 && fletcher_4_initialized
) {
387 /* check all supported implementations */
388 for (i
= 0; i
< fletcher_4_supp_impls_cnt
; i
++) {
389 const char *name
= fletcher_4_supp_impls
[i
]->name
;
391 if (val_len
== strlen(name
) &&
392 strncmp(val
, name
, val_len
) == 0) {
401 atomic_swap_32(&fletcher_4_impl_chosen
, impl
);
408 static inline const fletcher_4_ops_t
*
409 fletcher_4_impl_get(void)
411 fletcher_4_ops_t
*ops
= NULL
;
412 const uint32_t impl
= IMPL_READ(fletcher_4_impl_chosen
);
416 ASSERT(fletcher_4_initialized
);
417 ops
= &fletcher_4_fastest_impl
;
419 #if !defined(_KERNEL)
421 ASSERT(fletcher_4_initialized
);
422 ASSERT3U(fletcher_4_supp_impls_cnt
, >, 0);
424 static uint32_t cycle_count
= 0;
425 uint32_t idx
= (++cycle_count
) % fletcher_4_supp_impls_cnt
;
426 ops
= fletcher_4_supp_impls
[idx
];
431 ASSERT3U(fletcher_4_supp_impls_cnt
, >, 0);
432 ASSERT3U(impl
, <, fletcher_4_supp_impls_cnt
);
434 ops
= fletcher_4_supp_impls
[impl
];
438 ASSERT3P(ops
, !=, NULL
);
444 fletcher_4_native_impl(const void *buf
, uint64_t size
, zio_cksum_t
*zcp
)
446 fletcher_4_ctx_t ctx
;
447 const fletcher_4_ops_t
*ops
= fletcher_4_impl_get();
449 ops
->init_native(&ctx
);
450 ops
->compute_native(&ctx
, buf
, size
);
451 ops
->fini_native(&ctx
, zcp
);
456 fletcher_4_native(const void *buf
, uint64_t size
,
457 const void *ctx_template
, zio_cksum_t
*zcp
)
459 const uint64_t p2size
= P2ALIGN(size
, 64);
461 ASSERT(IS_P2ALIGNED(size
, sizeof (uint32_t)));
463 if (size
== 0 || p2size
== 0) {
464 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
467 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
,
470 fletcher_4_native_impl(buf
, p2size
, zcp
);
473 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
,
474 (char *)buf
+ p2size
, size
- p2size
);
479 fletcher_4_native_varsize(const void *buf
, uint64_t size
, zio_cksum_t
*zcp
)
481 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
482 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
, buf
, size
);
486 fletcher_4_byteswap_impl(const void *buf
, uint64_t size
, zio_cksum_t
*zcp
)
488 fletcher_4_ctx_t ctx
;
489 const fletcher_4_ops_t
*ops
= fletcher_4_impl_get();
491 ops
->init_byteswap(&ctx
);
492 ops
->compute_byteswap(&ctx
, buf
, size
);
493 ops
->fini_byteswap(&ctx
, zcp
);
498 fletcher_4_byteswap(const void *buf
, uint64_t size
,
499 const void *ctx_template
, zio_cksum_t
*zcp
)
501 const uint64_t p2size
= P2ALIGN(size
, 64);
503 ASSERT(IS_P2ALIGNED(size
, sizeof (uint32_t)));
505 if (size
== 0 || p2size
== 0) {
506 ZIO_SET_CHECKSUM(zcp
, 0, 0, 0, 0);
509 fletcher_4_scalar_byteswap((fletcher_4_ctx_t
*)zcp
,
512 fletcher_4_byteswap_impl(buf
, p2size
, zcp
);
515 fletcher_4_scalar_byteswap((fletcher_4_ctx_t
*)zcp
,
516 (char *)buf
+ p2size
, size
- p2size
);
520 /* Incremental Fletcher 4 */
522 #define ZFS_FLETCHER_4_INC_MAX_SIZE (8ULL << 20)
525 fletcher_4_incremental_combine(zio_cksum_t
*zcp
, const uint64_t size
,
526 const zio_cksum_t
*nzcp
)
528 const uint64_t c1
= size
/ sizeof (uint32_t);
529 const uint64_t c2
= c1
* (c1
+ 1) / 2;
530 const uint64_t c3
= c2
* (c1
+ 2) / 3;
533 * Value of 'c3' overflows on buffer sizes close to 16MiB. For that
534 * reason we split incremental fletcher4 computation of large buffers
535 * to steps of (ZFS_FLETCHER_4_INC_MAX_SIZE) size.
537 ASSERT3U(size
, <=, ZFS_FLETCHER_4_INC_MAX_SIZE
);
539 zcp
->zc_word
[3] += nzcp
->zc_word
[3] + c1
* zcp
->zc_word
[2] +
540 c2
* zcp
->zc_word
[1] + c3
* zcp
->zc_word
[0];
541 zcp
->zc_word
[2] += nzcp
->zc_word
[2] + c1
* zcp
->zc_word
[1] +
542 c2
* zcp
->zc_word
[0];
543 zcp
->zc_word
[1] += nzcp
->zc_word
[1] + c1
* zcp
->zc_word
[0];
544 zcp
->zc_word
[0] += nzcp
->zc_word
[0];
548 fletcher_4_incremental_impl(boolean_t native
, const void *buf
, uint64_t size
,
553 uint64_t len
= MIN(size
, ZFS_FLETCHER_4_INC_MAX_SIZE
);
556 fletcher_4_native(buf
, len
, NULL
, &nzc
);
558 fletcher_4_byteswap(buf
, len
, NULL
, &nzc
);
560 fletcher_4_incremental_combine(zcp
, len
, &nzc
);
568 fletcher_4_incremental_native(void *buf
, size_t size
, void *data
)
570 zio_cksum_t
*zcp
= data
;
571 /* Use scalar impl to directly update cksum of small blocks */
572 if (size
< SPA_MINBLOCKSIZE
)
573 fletcher_4_scalar_native((fletcher_4_ctx_t
*)zcp
, buf
, size
);
575 fletcher_4_incremental_impl(B_TRUE
, buf
, size
, zcp
);
580 fletcher_4_incremental_byteswap(void *buf
, size_t size
, void *data
)
582 zio_cksum_t
*zcp
= data
;
583 /* Use scalar impl to directly update cksum of small blocks */
584 if (size
< SPA_MINBLOCKSIZE
)
585 fletcher_4_scalar_byteswap((fletcher_4_ctx_t
*)zcp
, buf
, size
);
587 fletcher_4_incremental_impl(B_FALSE
, buf
, size
, zcp
);
592 /* Fletcher 4 kstats */
595 fletcher_4_kstat_headers(char *buf
, size_t size
)
599 off
+= snprintf(buf
+ off
, size
, "%-17s", "implementation");
600 off
+= snprintf(buf
+ off
, size
- off
, "%-15s", "native");
601 (void) snprintf(buf
+ off
, size
- off
, "%-15s\n", "byteswap");
607 fletcher_4_kstat_data(char *buf
, size_t size
, void *data
)
609 struct fletcher_4_kstat
*fastest_stat
=
610 &fletcher_4_stat_data
[fletcher_4_supp_impls_cnt
];
611 struct fletcher_4_kstat
*curr_stat
= (struct fletcher_4_kstat
*) data
;
614 if (curr_stat
== fastest_stat
) {
615 off
+= snprintf(buf
+ off
, size
- off
, "%-17s", "fastest");
616 off
+= snprintf(buf
+ off
, size
- off
, "%-15s",
617 fletcher_4_supp_impls
[fastest_stat
->native
]->name
);
618 off
+= snprintf(buf
+ off
, size
- off
, "%-15s\n",
619 fletcher_4_supp_impls
[fastest_stat
->byteswap
]->name
);
621 ptrdiff_t id
= curr_stat
- fletcher_4_stat_data
;
623 off
+= snprintf(buf
+ off
, size
- off
, "%-17s",
624 fletcher_4_supp_impls
[id
]->name
);
625 off
+= snprintf(buf
+ off
, size
- off
, "%-15llu",
626 (u_longlong_t
) curr_stat
->native
);
627 off
+= snprintf(buf
+ off
, size
- off
, "%-15llu\n",
628 (u_longlong_t
) curr_stat
->byteswap
);
635 fletcher_4_kstat_addr(kstat_t
*ksp
, loff_t n
)
637 if (n
<= fletcher_4_supp_impls_cnt
)
638 ksp
->ks_private
= (void *) (fletcher_4_stat_data
+ n
);
640 ksp
->ks_private
= NULL
;
642 return (ksp
->ks_private
);
645 #define FLETCHER_4_FASTEST_FN_COPY(type, src) \
647 fletcher_4_fastest_impl.init_ ## type = src->init_ ## type; \
648 fletcher_4_fastest_impl.fini_ ## type = src->fini_ ## type; \
649 fletcher_4_fastest_impl.compute_ ## type = src->compute_ ## type; \
652 #define FLETCHER_4_BENCH_NS (MSEC2NSEC(50)) /* 50ms */
654 typedef void fletcher_checksum_func_t(const void *, uint64_t, const void *,
658 fletcher_4_benchmark_impl(boolean_t native
, char *data
, uint64_t data_size
)
661 struct fletcher_4_kstat
*fastest_stat
=
662 &fletcher_4_stat_data
[fletcher_4_supp_impls_cnt
];
664 uint64_t run_bw
, run_time_ns
, best_run
= 0;
666 uint32_t i
, l
, sel_save
= IMPL_READ(fletcher_4_impl_chosen
);
669 fletcher_checksum_func_t
*fletcher_4_test
= native
?
670 fletcher_4_native
: fletcher_4_byteswap
;
672 for (i
= 0; i
< fletcher_4_supp_impls_cnt
; i
++) {
673 struct fletcher_4_kstat
*stat
= &fletcher_4_stat_data
[i
];
674 uint64_t run_count
= 0;
676 /* temporary set an implementation */
677 fletcher_4_impl_chosen
= i
;
682 for (l
= 0; l
< 32; l
++, run_count
++)
683 fletcher_4_test(data
, data_size
, NULL
, &zc
);
685 run_time_ns
= gethrtime() - start
;
686 } while (run_time_ns
< FLETCHER_4_BENCH_NS
);
689 run_bw
= data_size
* run_count
* NANOSEC
;
690 run_bw
/= run_time_ns
; /* B/s */
693 stat
->native
= run_bw
;
695 stat
->byteswap
= run_bw
;
697 if (run_bw
> best_run
) {
701 fastest_stat
->native
= i
;
702 FLETCHER_4_FASTEST_FN_COPY(native
,
703 fletcher_4_supp_impls
[i
]);
705 fastest_stat
->byteswap
= i
;
706 FLETCHER_4_FASTEST_FN_COPY(byteswap
,
707 fletcher_4_supp_impls
[i
]);
712 /* restore original selection */
713 atomic_swap_32(&fletcher_4_impl_chosen
, sel_save
);
717 fletcher_4_init(void)
719 static const size_t data_size
= 1 << SPA_OLD_MAXBLOCKSHIFT
; /* 128kiB */
720 fletcher_4_ops_t
*curr_impl
;
724 /* move supported impl into fletcher_4_supp_impls */
725 for (i
= 0, c
= 0; i
< ARRAY_SIZE(fletcher_4_impls
); i
++) {
726 curr_impl
= (fletcher_4_ops_t
*) fletcher_4_impls
[i
];
728 if (curr_impl
->valid
&& curr_impl
->valid())
729 fletcher_4_supp_impls
[c
++] = curr_impl
;
731 membar_producer(); /* complete fletcher_4_supp_impls[] init */
732 fletcher_4_supp_impls_cnt
= c
; /* number of supported impl */
734 #if !defined(_KERNEL)
735 /* Skip benchmarking and use last implementation as fastest */
736 memcpy(&fletcher_4_fastest_impl
,
737 fletcher_4_supp_impls
[fletcher_4_supp_impls_cnt
-1],
738 sizeof (fletcher_4_fastest_impl
));
739 fletcher_4_fastest_impl
.name
= "fastest";
742 fletcher_4_initialized
= B_TRUE
;
745 /* Benchmark all supported implementations */
746 databuf
= vmem_alloc(data_size
, KM_SLEEP
);
747 for (i
= 0; i
< data_size
/ sizeof (uint64_t); i
++)
748 ((uint64_t *)databuf
)[i
] = (uintptr_t)(databuf
+i
); /* warm-up */
750 fletcher_4_benchmark_impl(B_FALSE
, databuf
, data_size
);
751 fletcher_4_benchmark_impl(B_TRUE
, databuf
, data_size
);
753 vmem_free(databuf
, data_size
);
755 /* install kstats for all implementations */
756 fletcher_4_kstat
= kstat_create("zfs", 0, "fletcher_4_bench", "misc",
757 KSTAT_TYPE_RAW
, 0, KSTAT_FLAG_VIRTUAL
);
758 if (fletcher_4_kstat
!= NULL
) {
759 fletcher_4_kstat
->ks_data
= NULL
;
760 fletcher_4_kstat
->ks_ndata
= UINT32_MAX
;
761 kstat_set_raw_ops(fletcher_4_kstat
,
762 fletcher_4_kstat_headers
,
763 fletcher_4_kstat_data
,
764 fletcher_4_kstat_addr
);
765 kstat_install(fletcher_4_kstat
);
768 /* Finish initialization */
769 fletcher_4_initialized
= B_TRUE
;
773 fletcher_4_fini(void)
775 if (fletcher_4_kstat
!= NULL
) {
776 kstat_delete(fletcher_4_kstat
);
777 fletcher_4_kstat
= NULL
;
781 #if defined(_KERNEL) && defined(HAVE_SPL)
782 #include <linux/mod_compat.h>
785 fletcher_4_param_get(char *buffer
, zfs_kernel_param_t
*unused
)
787 const uint32_t impl
= IMPL_READ(fletcher_4_impl_chosen
);
792 fmt
= (impl
== IMPL_FASTEST
) ? "[%s] " : "%s ";
793 cnt
+= sprintf(buffer
+ cnt
, fmt
, "fastest");
795 /* list all supported implementations */
796 for (i
= 0; i
< fletcher_4_supp_impls_cnt
; i
++) {
797 fmt
= (i
== impl
) ? "[%s] " : "%s ";
798 cnt
+= sprintf(buffer
+ cnt
, fmt
,
799 fletcher_4_supp_impls
[i
]->name
);
806 fletcher_4_param_set(const char *val
, zfs_kernel_param_t
*unused
)
808 return (fletcher_4_impl_set(val
));
812 * Choose a fletcher 4 implementation in ZFS.
813 * Users can choose "cycle" to exercise all implementations, but this is
814 * for testing purpose therefore it can only be set in user space.
816 module_param_call(zfs_fletcher_4_impl
,
817 fletcher_4_param_set
, fletcher_4_param_get
, NULL
, 0644);
818 MODULE_PARM_DESC(zfs_fletcher_4_impl
, "Select fletcher 4 implementation.");
820 EXPORT_SYMBOL(fletcher_init
);
821 EXPORT_SYMBOL(fletcher_2_incremental_native
);
822 EXPORT_SYMBOL(fletcher_2_incremental_byteswap
);
823 EXPORT_SYMBOL(fletcher_4_init
);
824 EXPORT_SYMBOL(fletcher_4_fini
);
825 EXPORT_SYMBOL(fletcher_2_native
);
826 EXPORT_SYMBOL(fletcher_2_byteswap
);
827 EXPORT_SYMBOL(fletcher_4_native
);
828 EXPORT_SYMBOL(fletcher_4_native_varsize
);
829 EXPORT_SYMBOL(fletcher_4_byteswap
);
830 EXPORT_SYMBOL(fletcher_4_incremental_native
);
831 EXPORT_SYMBOL(fletcher_4_incremental_byteswap
);