]> git.proxmox.com Git - mirror_zfs.git/blob - include/linux/simd_x86.h
Linux 5.0 compat: Disable vector instructions on 5.0+ kernels
[mirror_zfs.git] / include / linux / simd_x86.h
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
23 */
24
25 /*
26 * USER API:
27 *
28 * Kernel fpu methods:
29 * kfpu_begin()
30 * kfpu_end()
31 *
32 * SIMD support:
33 *
34 * Following functions should be called to determine whether CPU feature
35 * is supported. All functions are usable in kernel and user space.
36 * If a SIMD algorithm is using more than one instruction set
37 * all relevant feature test functions should be called.
38 *
39 * Supported features:
40 * zfs_sse_available()
41 * zfs_sse2_available()
42 * zfs_sse3_available()
43 * zfs_ssse3_available()
44 * zfs_sse4_1_available()
45 * zfs_sse4_2_available()
46 *
47 * zfs_avx_available()
48 * zfs_avx2_available()
49 *
50 * zfs_bmi1_available()
51 * zfs_bmi2_available()
52 *
53 * zfs_avx512f_available()
54 * zfs_avx512cd_available()
55 * zfs_avx512er_available()
56 * zfs_avx512pf_available()
57 * zfs_avx512bw_available()
58 * zfs_avx512dq_available()
59 * zfs_avx512vl_available()
60 * zfs_avx512ifma_available()
61 * zfs_avx512vbmi_available()
62 *
63 * NOTE(AVX-512VL): If using AVX-512 instructions with 128Bit registers
64 * also add zfs_avx512vl_available() to feature check.
65 */
66
67 #ifndef _SIMD_X86_H
68 #define _SIMD_X86_H
69
70 #include <sys/isa_defs.h>
71
72 /* only for __x86 */
73 #if defined(__x86)
74
75 #include <sys/types.h>
76
77 #if defined(_KERNEL)
78 #include <asm/cpufeature.h>
79 #else
80 #include <cpuid.h>
81 #endif
82
83 #if defined(_KERNEL)
84 #if defined(HAVE_UNDERSCORE_KERNEL_FPU)
85 #include <asm/fpu/api.h>
86 #include <asm/fpu/internal.h>
87 #define kfpu_begin() \
88 { \
89 preempt_disable(); \
90 __kernel_fpu_begin(); \
91 }
92 #define kfpu_end() \
93 { \
94 __kernel_fpu_end(); \
95 preempt_enable(); \
96 }
97 #elif defined(HAVE_KERNEL_FPU)
98 #include <asm/i387.h>
99 #include <asm/xcr.h>
100 #define kfpu_begin() kernel_fpu_begin()
101 #define kfpu_end() kernel_fpu_end()
102 #else
103 /* Kernel doesn't export any kernel_fpu_* functions */
104 #include <asm/fpu/internal.h> /* For kernel xgetbv() */
105 #define kfpu_begin() panic("This code should never run")
106 #define kfpu_end() panic("This code should never run")
107 #endif /* defined(HAVE_KERNEL_FPU) */
108
109 #else
110 /*
111 * fpu dummy methods for userspace
112 */
113 #define kfpu_begin() do {} while (0)
114 #define kfpu_end() do {} while (0)
115 #endif /* defined(_KERNEL) */
116
117 /*
118 * CPUID feature tests for user-space. Linux kernel provides an interface for
119 * CPU feature testing.
120 */
121 #if !defined(_KERNEL)
122
123 /*
124 * x86 registers used implicitly by CPUID
125 */
126 typedef enum cpuid_regs {
127 EAX = 0,
128 EBX,
129 ECX,
130 EDX,
131 CPUID_REG_CNT = 4
132 } cpuid_regs_t;
133
134 /*
135 * List of instruction sets identified by CPUID
136 */
137 typedef enum cpuid_inst_sets {
138 SSE = 0,
139 SSE2,
140 SSE3,
141 SSSE3,
142 SSE4_1,
143 SSE4_2,
144 OSXSAVE,
145 AVX,
146 AVX2,
147 BMI1,
148 BMI2,
149 AVX512F,
150 AVX512CD,
151 AVX512DQ,
152 AVX512BW,
153 AVX512IFMA,
154 AVX512VBMI,
155 AVX512PF,
156 AVX512ER,
157 AVX512VL,
158 AES,
159 PCLMULQDQ
160 } cpuid_inst_sets_t;
161
162 /*
163 * Instruction set descriptor.
164 */
165 typedef struct cpuid_feature_desc {
166 uint32_t leaf; /* CPUID leaf */
167 uint32_t subleaf; /* CPUID sub-leaf */
168 uint32_t flag; /* bit mask of the feature */
169 cpuid_regs_t reg; /* which CPUID return register to test */
170 } cpuid_feature_desc_t;
171
172 #define _AVX512F_BIT (1U << 16)
173 #define _AVX512CD_BIT (_AVX512F_BIT | (1U << 28))
174 #define _AVX512DQ_BIT (_AVX512F_BIT | (1U << 17))
175 #define _AVX512BW_BIT (_AVX512F_BIT | (1U << 30))
176 #define _AVX512IFMA_BIT (_AVX512F_BIT | (1U << 21))
177 #define _AVX512VBMI_BIT (1U << 1) /* AVX512F_BIT is on another leaf */
178 #define _AVX512PF_BIT (_AVX512F_BIT | (1U << 26))
179 #define _AVX512ER_BIT (_AVX512F_BIT | (1U << 27))
180 #define _AVX512VL_BIT (1U << 31) /* if used also check other levels */
181 #define _AES_BIT (1U << 25)
182 #define _PCLMULQDQ_BIT (1U << 1)
183
184 /*
185 * Descriptions of supported instruction sets
186 */
187 static const cpuid_feature_desc_t cpuid_features[] = {
188 [SSE] = {1U, 0U, 1U << 25, EDX },
189 [SSE2] = {1U, 0U, 1U << 26, EDX },
190 [SSE3] = {1U, 0U, 1U << 0, ECX },
191 [SSSE3] = {1U, 0U, 1U << 9, ECX },
192 [SSE4_1] = {1U, 0U, 1U << 19, ECX },
193 [SSE4_2] = {1U, 0U, 1U << 20, ECX },
194 [OSXSAVE] = {1U, 0U, 1U << 27, ECX },
195 [AVX] = {1U, 0U, 1U << 28, ECX },
196 [AVX2] = {7U, 0U, 1U << 5, EBX },
197 [BMI1] = {7U, 0U, 1U << 3, EBX },
198 [BMI2] = {7U, 0U, 1U << 8, EBX },
199 [AVX512F] = {7U, 0U, _AVX512F_BIT, EBX },
200 [AVX512CD] = {7U, 0U, _AVX512CD_BIT, EBX },
201 [AVX512DQ] = {7U, 0U, _AVX512DQ_BIT, EBX },
202 [AVX512BW] = {7U, 0U, _AVX512BW_BIT, EBX },
203 [AVX512IFMA] = {7U, 0U, _AVX512IFMA_BIT, EBX },
204 [AVX512VBMI] = {7U, 0U, _AVX512VBMI_BIT, ECX },
205 [AVX512PF] = {7U, 0U, _AVX512PF_BIT, EBX },
206 [AVX512ER] = {7U, 0U, _AVX512ER_BIT, EBX },
207 [AVX512VL] = {7U, 0U, _AVX512ER_BIT, EBX },
208 [AES] = {1U, 0U, _AES_BIT, ECX },
209 [PCLMULQDQ] = {1U, 0U, _PCLMULQDQ_BIT, ECX },
210 };
211
212 /*
213 * Check if OS supports AVX and AVX2 by checking XCR0
214 * Only call this function if CPUID indicates that AVX feature is
215 * supported by the CPU, otherwise it might be an illegal instruction.
216 */
217 static inline uint64_t
218 xgetbv(uint32_t index)
219 {
220 uint32_t eax, edx;
221 /* xgetbv - instruction byte code */
222 __asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
223 : "=a" (eax), "=d" (edx)
224 : "c" (index));
225
226 return ((((uint64_t)edx)<<32) | (uint64_t)eax);
227 }
228
229 /*
230 * Check if CPU supports a feature
231 */
232 static inline boolean_t
233 __cpuid_check_feature(const cpuid_feature_desc_t *desc)
234 {
235 uint32_t r[CPUID_REG_CNT];
236
237 if (__get_cpuid_max(0, NULL) >= desc->leaf) {
238 /*
239 * __cpuid_count is needed to properly check
240 * for AVX2. It is a macro, so return parameters
241 * are passed by value.
242 */
243 __cpuid_count(desc->leaf, desc->subleaf,
244 r[EAX], r[EBX], r[ECX], r[EDX]);
245 return ((r[desc->reg] & desc->flag) == desc->flag);
246 }
247 return (B_FALSE);
248 }
249
250 #define CPUID_FEATURE_CHECK(name, id) \
251 static inline boolean_t \
252 __cpuid_has_ ## name(void) \
253 { \
254 return (__cpuid_check_feature(&cpuid_features[id])); \
255 }
256
257 /*
258 * Define functions for user-space CPUID features testing
259 */
260 CPUID_FEATURE_CHECK(sse, SSE);
261 CPUID_FEATURE_CHECK(sse2, SSE2);
262 CPUID_FEATURE_CHECK(sse3, SSE3);
263 CPUID_FEATURE_CHECK(ssse3, SSSE3);
264 CPUID_FEATURE_CHECK(sse4_1, SSE4_1);
265 CPUID_FEATURE_CHECK(sse4_2, SSE4_2);
266 CPUID_FEATURE_CHECK(avx, AVX);
267 CPUID_FEATURE_CHECK(avx2, AVX2);
268 CPUID_FEATURE_CHECK(osxsave, OSXSAVE);
269 CPUID_FEATURE_CHECK(bmi1, BMI1);
270 CPUID_FEATURE_CHECK(bmi2, BMI2);
271 CPUID_FEATURE_CHECK(avx512f, AVX512F);
272 CPUID_FEATURE_CHECK(avx512cd, AVX512CD);
273 CPUID_FEATURE_CHECK(avx512dq, AVX512DQ);
274 CPUID_FEATURE_CHECK(avx512bw, AVX512BW);
275 CPUID_FEATURE_CHECK(avx512ifma, AVX512IFMA);
276 CPUID_FEATURE_CHECK(avx512vbmi, AVX512VBMI);
277 CPUID_FEATURE_CHECK(avx512pf, AVX512PF);
278 CPUID_FEATURE_CHECK(avx512er, AVX512ER);
279 CPUID_FEATURE_CHECK(avx512vl, AVX512VL);
280 CPUID_FEATURE_CHECK(aes, AES);
281 CPUID_FEATURE_CHECK(pclmulqdq, PCLMULQDQ);
282
283 #endif /* !defined(_KERNEL) */
284
285
286 /*
287 * Detect register set support
288 */
289 static inline boolean_t
290 __simd_state_enabled(const uint64_t state)
291 {
292 boolean_t has_osxsave;
293 uint64_t xcr0;
294
295 #if defined(_KERNEL)
296 #if defined(X86_FEATURE_OSXSAVE) && defined(KERNEL_EXPORTS_X86_FPU)
297 has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
298 #else
299 has_osxsave = B_FALSE;
300 #endif
301 #elif !defined(_KERNEL)
302 has_osxsave = __cpuid_has_osxsave();
303 #endif
304
305 if (!has_osxsave)
306 return (B_FALSE);
307
308 xcr0 = xgetbv(0);
309 return ((xcr0 & state) == state);
310 }
311
312 #define _XSTATE_SSE_AVX (0x2 | 0x4)
313 #define _XSTATE_AVX512 (0xE0 | _XSTATE_SSE_AVX)
314
315 #define __ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
316 #define __zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
317
318
319 /*
320 * Check if SSE instruction set is available
321 */
322 static inline boolean_t
323 zfs_sse_available(void)
324 {
325 #if defined(_KERNEL)
326 #if defined(KERNEL_EXPORTS_X86_FPU)
327 return (!!boot_cpu_has(X86_FEATURE_XMM));
328 #else
329 return (B_FALSE);
330 #endif
331 #elif !defined(_KERNEL)
332 return (__cpuid_has_sse());
333 #endif
334 }
335
336 /*
337 * Check if SSE2 instruction set is available
338 */
339 static inline boolean_t
340 zfs_sse2_available(void)
341 {
342 #if defined(_KERNEL)
343 #if defined(KERNEL_EXPORTS_X86_FPU)
344 return (!!boot_cpu_has(X86_FEATURE_XMM2));
345 #else
346 return (B_FALSE);
347 #endif
348 #elif !defined(_KERNEL)
349 return (__cpuid_has_sse2());
350 #endif
351 }
352
353 /*
354 * Check if SSE3 instruction set is available
355 */
356 static inline boolean_t
357 zfs_sse3_available(void)
358 {
359 #if defined(_KERNEL)
360 #if defined(KERNEL_EXPORTS_X86_FPU)
361 return (!!boot_cpu_has(X86_FEATURE_XMM3));
362 #else
363 return (B_FALSE);
364 #endif
365 #elif !defined(_KERNEL)
366 return (__cpuid_has_sse3());
367 #endif
368 }
369
370 /*
371 * Check if SSSE3 instruction set is available
372 */
373 static inline boolean_t
374 zfs_ssse3_available(void)
375 {
376 #if defined(_KERNEL)
377 #if defined(KERNEL_EXPORTS_X86_FPU)
378 return (!!boot_cpu_has(X86_FEATURE_SSSE3));
379 #else
380 return (B_FALSE);
381 #endif
382 #elif !defined(_KERNEL)
383 return (__cpuid_has_ssse3());
384 #endif
385 }
386
387 /*
388 * Check if SSE4.1 instruction set is available
389 */
390 static inline boolean_t
391 zfs_sse4_1_available(void)
392 {
393 #if defined(_KERNEL)
394 #if defined(KERNEL_EXPORTS_X86_FPU)
395 return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
396 #else
397 return (B_FALSE);
398 #endif
399 #elif !defined(_KERNEL)
400 return (__cpuid_has_sse4_1());
401 #endif
402 }
403
404 /*
405 * Check if SSE4.2 instruction set is available
406 */
407 static inline boolean_t
408 zfs_sse4_2_available(void)
409 {
410 #if defined(_KERNEL)
411 #if defined(KERNEL_EXPORTS_X86_FPU)
412 return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
413 #else
414 return (B_FALSE);
415 #endif
416 #elif !defined(_KERNEL)
417 return (__cpuid_has_sse4_2());
418 #endif
419 }
420
421 /*
422 * Check if AVX instruction set is available
423 */
424 static inline boolean_t
425 zfs_avx_available(void)
426 {
427 boolean_t has_avx;
428 #if defined(_KERNEL)
429 #if defined(KERNEL_EXPORTS_X86_FPU)
430 has_avx = !!boot_cpu_has(X86_FEATURE_AVX);
431 #else
432 has_avx = B_FALSE;
433 #endif
434 #elif !defined(_KERNEL)
435 has_avx = __cpuid_has_avx();
436 #endif
437
438 return (has_avx && __ymm_enabled());
439 }
440
441 /*
442 * Check if AVX2 instruction set is available
443 */
444 static inline boolean_t
445 zfs_avx2_available(void)
446 {
447 boolean_t has_avx2;
448 #if defined(_KERNEL)
449 #if defined(X86_FEATURE_AVX2) && defined(KERNEL_EXPORTS_X86_FPU)
450 has_avx2 = !!boot_cpu_has(X86_FEATURE_AVX2);
451 #else
452 has_avx2 = B_FALSE;
453 #endif
454 #elif !defined(_KERNEL)
455 has_avx2 = __cpuid_has_avx2();
456 #endif
457
458 return (has_avx2 && __ymm_enabled());
459 }
460
461 /*
462 * Check if BMI1 instruction set is available
463 */
464 static inline boolean_t
465 zfs_bmi1_available(void)
466 {
467 #if defined(_KERNEL)
468 #if defined(X86_FEATURE_BMI1) && defined(KERNEL_EXPORTS_X86_FPU)
469 return (!!boot_cpu_has(X86_FEATURE_BMI1));
470 #else
471 return (B_FALSE);
472 #endif
473 #elif !defined(_KERNEL)
474 return (__cpuid_has_bmi1());
475 #endif
476 }
477
478 /*
479 * Check if BMI2 instruction set is available
480 */
481 static inline boolean_t
482 zfs_bmi2_available(void)
483 {
484 #if defined(_KERNEL)
485 #if defined(X86_FEATURE_BMI2) && defined(KERNEL_EXPORTS_X86_FPU)
486 return (!!boot_cpu_has(X86_FEATURE_BMI2));
487 #else
488 return (B_FALSE);
489 #endif
490 #elif !defined(_KERNEL)
491 return (__cpuid_has_bmi2());
492 #endif
493 }
494
495 /*
496 * Check if AES instruction set is available
497 */
498 static inline boolean_t
499 zfs_aes_available(void)
500 {
501 #if defined(_KERNEL)
502 #if defined(X86_FEATURE_AES) && defined(KERNEL_EXPORTS_X86_FPU)
503 return (!!boot_cpu_has(X86_FEATURE_AES));
504 #else
505 return (B_FALSE);
506 #endif
507 #elif !defined(_KERNEL)
508 return (__cpuid_has_aes());
509 #endif
510 }
511
512 /*
513 * Check if PCLMULQDQ instruction set is available
514 */
515 static inline boolean_t
516 zfs_pclmulqdq_available(void)
517 {
518 #if defined(_KERNEL)
519 #if defined(X86_FEATURE_PCLMULQDQ) && defined(KERNEL_EXPORTS_X86_FPU)
520 return (!!boot_cpu_has(X86_FEATURE_PCLMULQDQ));
521 #else
522 return (B_FALSE);
523 #endif
524 #elif !defined(_KERNEL)
525 return (__cpuid_has_pclmulqdq());
526 #endif
527 }
528
529 /*
530 * AVX-512 family of instruction sets:
531 *
532 * AVX512F Foundation
533 * AVX512CD Conflict Detection Instructions
534 * AVX512ER Exponential and Reciprocal Instructions
535 * AVX512PF Prefetch Instructions
536 *
537 * AVX512BW Byte and Word Instructions
538 * AVX512DQ Double-word and Quadword Instructions
539 * AVX512VL Vector Length Extensions
540 *
541 * AVX512IFMA Integer Fused Multiply Add (Not supported by kernel 4.4)
542 * AVX512VBMI Vector Byte Manipulation Instructions
543 */
544
545
546 /* Check if AVX512F instruction set is available */
547 static inline boolean_t
548 zfs_avx512f_available(void)
549 {
550 boolean_t has_avx512 = B_FALSE;
551
552 #if defined(_KERNEL)
553 #if defined(X86_FEATURE_AVX512F) && defined(KERNEL_EXPORTS_X86_FPU)
554 has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
555 #else
556 has_avx512 = B_FALSE;
557 #endif
558 #elif !defined(_KERNEL)
559 has_avx512 = __cpuid_has_avx512f();
560 #endif
561
562 return (has_avx512 && __zmm_enabled());
563 }
564
565 /* Check if AVX512CD instruction set is available */
566 static inline boolean_t
567 zfs_avx512cd_available(void)
568 {
569 boolean_t has_avx512 = B_FALSE;
570
571 #if defined(_KERNEL)
572 #if defined(X86_FEATURE_AVX512CD) && defined(KERNEL_EXPORTS_X86_FPU)
573 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
574 boot_cpu_has(X86_FEATURE_AVX512CD);
575 #else
576 has_avx512 = B_FALSE;
577 #endif
578 #elif !defined(_KERNEL)
579 has_avx512 = __cpuid_has_avx512cd();
580 #endif
581
582 return (has_avx512 && __zmm_enabled());
583 }
584
585 /* Check if AVX512ER instruction set is available */
586 static inline boolean_t
587 zfs_avx512er_available(void)
588 {
589 boolean_t has_avx512 = B_FALSE;
590
591 #if defined(_KERNEL)
592 #if defined(X86_FEATURE_AVX512ER) && defined(KERNEL_EXPORTS_X86_FPU)
593 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
594 boot_cpu_has(X86_FEATURE_AVX512ER);
595 #else
596 has_avx512 = B_FALSE;
597 #endif
598 #elif !defined(_KERNEL)
599 has_avx512 = __cpuid_has_avx512er();
600 #endif
601
602 return (has_avx512 && __zmm_enabled());
603 }
604
605 /* Check if AVX512PF instruction set is available */
606 static inline boolean_t
607 zfs_avx512pf_available(void)
608 {
609 boolean_t has_avx512 = B_FALSE;
610
611 #if defined(_KERNEL)
612 #if defined(X86_FEATURE_AVX512PF) && defined(KERNEL_EXPORTS_X86_FPU)
613 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
614 boot_cpu_has(X86_FEATURE_AVX512PF);
615 #else
616 has_avx512 = B_FALSE;
617 #endif
618 #elif !defined(_KERNEL)
619 has_avx512 = __cpuid_has_avx512pf();
620 #endif
621
622 return (has_avx512 && __zmm_enabled());
623 }
624
625 /* Check if AVX512BW instruction set is available */
626 static inline boolean_t
627 zfs_avx512bw_available(void)
628 {
629 boolean_t has_avx512 = B_FALSE;
630
631 #if defined(_KERNEL)
632 #if defined(X86_FEATURE_AVX512BW) && defined(KERNEL_EXPORTS_X86_FPU)
633 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
634 boot_cpu_has(X86_FEATURE_AVX512BW);
635 #else
636 has_avx512 = B_FALSE;
637 #endif
638 #elif !defined(_KERNEL)
639 has_avx512 = __cpuid_has_avx512bw();
640 #endif
641
642 return (has_avx512 && __zmm_enabled());
643 }
644
645 /* Check if AVX512DQ instruction set is available */
646 static inline boolean_t
647 zfs_avx512dq_available(void)
648 {
649 boolean_t has_avx512 = B_FALSE;
650
651 #if defined(_KERNEL)
652 #if defined(X86_FEATURE_AVX512DQ) && defined(KERNEL_EXPORTS_X86_FPU)
653 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
654 boot_cpu_has(X86_FEATURE_AVX512DQ);
655 #else
656 has_avx512 = B_FALSE;
657 #endif
658 #elif !defined(_KERNEL)
659 has_avx512 = __cpuid_has_avx512dq();
660 #endif
661
662 return (has_avx512 && __zmm_enabled());
663 }
664
665 /* Check if AVX512VL instruction set is available */
666 static inline boolean_t
667 zfs_avx512vl_available(void)
668 {
669 boolean_t has_avx512 = B_FALSE;
670
671 #if defined(_KERNEL)
672 #if defined(X86_FEATURE_AVX512VL) && defined(KERNEL_EXPORTS_X86_FPU)
673 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
674 boot_cpu_has(X86_FEATURE_AVX512VL);
675 #else
676 has_avx512 = B_FALSE;
677 #endif
678 #elif !defined(_KERNEL)
679 has_avx512 = __cpuid_has_avx512vl();
680 #endif
681
682 return (has_avx512 && __zmm_enabled());
683 }
684
685 /* Check if AVX512IFMA instruction set is available */
686 static inline boolean_t
687 zfs_avx512ifma_available(void)
688 {
689 boolean_t has_avx512 = B_FALSE;
690
691 #if defined(_KERNEL)
692 #if defined(X86_FEATURE_AVX512IFMA) && defined(KERNEL_EXPORTS_X86_FPU)
693 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
694 boot_cpu_has(X86_FEATURE_AVX512IFMA);
695 #else
696 has_avx512 = B_FALSE;
697 #endif
698 #elif !defined(_KERNEL)
699 has_avx512 = __cpuid_has_avx512ifma();
700 #endif
701
702 return (has_avx512 && __zmm_enabled());
703 }
704
705 /* Check if AVX512VBMI instruction set is available */
706 static inline boolean_t
707 zfs_avx512vbmi_available(void)
708 {
709 boolean_t has_avx512 = B_FALSE;
710
711 #if defined(_KERNEL)
712 #if defined(X86_FEATURE_AVX512VBMI) && defined(KERNEL_EXPORTS_X86_FPU)
713 has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
714 boot_cpu_has(X86_FEATURE_AVX512VBMI);
715 #else
716 has_avx512 = B_FALSE;
717 #endif
718 #elif !defined(_KERNEL)
719 has_avx512 = __cpuid_has_avx512f() &&
720 __cpuid_has_avx512vbmi();
721 #endif
722
723 return (has_avx512 && __zmm_enabled());
724 }
725
726 #endif /* defined(__x86) */
727
728 #endif /* _SIMD_X86_H */