1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
23  */
24 
25 /*
26  * USER API:
27  *
28  * Kernel fpu methods:
29  *	kfpu_allowed()
30  *	kfpu_begin()
31  *	kfpu_end()
32  *	kfpu_init()
33  *	kfpu_fini()
34  *
35  * SIMD support:
36  *
37  * Following functions should be called to determine whether CPU feature
38  * is supported. All functions are usable in kernel and user space.
39  * If a SIMD algorithm is using more than one instruction set
40  * all relevant feature test functions should be called.
41  *
42  * Supported features:
43  *	zfs_sse_available()
44  *	zfs_sse2_available()
45  *	zfs_sse3_available()
46  *	zfs_ssse3_available()
47  *	zfs_sse4_1_available()
48  *	zfs_sse4_2_available()
49  *
50  *	zfs_avx_available()
51  *	zfs_avx2_available()
52  *
53  *	zfs_bmi1_available()
54  *	zfs_bmi2_available()
55  *
56  *	zfs_shani_available()
57  *
58  *	zfs_avx512f_available()
59  *	zfs_avx512cd_available()
60  *	zfs_avx512er_available()
61  *	zfs_avx512pf_available()
62  *	zfs_avx512bw_available()
63  *	zfs_avx512dq_available()
64  *	zfs_avx512vl_available()
65  *	zfs_avx512ifma_available()
66  *	zfs_avx512vbmi_available()
67  *
68  * NOTE(AVX-512VL):	If using AVX-512 instructions with 128Bit registers
69  *			also add zfs_avx512vl_available() to feature check.
70  */
71 
72 #ifndef _LINUX_SIMD_X86_H
73 #define	_LINUX_SIMD_X86_H
74 
75 /* only for __x86 */
76 #if defined(__x86)
77 
78 #include <sys/types.h>
79 #include <asm/cpufeature.h>
80 
81 /*
82  * Disable the WARN_ON_FPU() macro to prevent additional dependencies
83  * when providing the kfpu_* functions.  Relevant warnings are included
84  * as appropriate and are unconditionally enabled.
85  */
86 #if defined(CONFIG_X86_DEBUG_FPU) && !defined(KERNEL_EXPORTS_X86_FPU)
87 #undef CONFIG_X86_DEBUG_FPU
88 #endif
89 
90 /*
91  * The following cases are for kernels which export either the
92  * kernel_fpu_* or __kernel_fpu_* functions.
93  */
94 #if defined(KERNEL_EXPORTS_X86_FPU)
95 
96 #if defined(HAVE_KERNEL_FPU_API_HEADER)
97 #include <asm/fpu/api.h>
98 #if defined(HAVE_KERNEL_FPU_INTERNAL_HEADER)
99 #include <asm/fpu/internal.h>
100 #endif
101 #else
102 #include <asm/i387.h>
103 #endif
104 
105 #define	kfpu_allowed()		1
106 #define	kfpu_init()		0
107 #define	kfpu_fini()		((void) 0)
108 
109 #if defined(HAVE_UNDERSCORE_KERNEL_FPU)
110 #define	kfpu_begin()		\
111 {				\
112 	preempt_disable();	\
113 	__kernel_fpu_begin();	\
114 }
115 #define	kfpu_end()		\
116 {				\
117 	__kernel_fpu_end();	\
118 	preempt_enable();	\
119 }
120 
121 #elif defined(HAVE_KERNEL_FPU)
122 #define	kfpu_begin()		kernel_fpu_begin()
123 #define	kfpu_end()		kernel_fpu_end()
124 
125 #else
126 /*
127  * This case is unreachable.  When KERNEL_EXPORTS_X86_FPU is defined then
128  * either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
129  */
130 #error "Unreachable kernel configuration"
131 #endif
132 
133 #else /* defined(KERNEL_EXPORTS_X86_FPU) */
134 
135 /*
136  * When the kernel_fpu_* symbols are unavailable then provide our own
137  * versions which allow the FPU to be safely used.
138  */
139 #if defined(HAVE_KERNEL_FPU_INTERNAL)
140 
141 /*
142  * For kernels not exporting *kfpu_{begin,end} we have to use inline assembly
143  * with the XSAVE{,OPT,S} instructions, so we need the toolchain to support at
144  * least XSAVE.
145  */
146 #if !defined(HAVE_XSAVE)
147 #error "Toolchain needs to support the XSAVE assembler instruction"
148 #endif
149 
150 #include <linux/mm.h>
151 #include <linux/slab.h>
152 
153 extern uint8_t **zfs_kfpu_fpregs;
154 
155 /*
156  * Return the size in bytes required by the XSAVE instruction for an
157  * XSAVE area containing all the user state components supported by this CPU.
158  * See: Intel 64 and IA-32 Architectures Software Developer’s Manual.
159  * Dec. 2021. Vol. 2A p. 3-222.
160  */
161 static inline uint32_t
162 get_xsave_area_size(void)
163 {
164 	if (!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
165 		return (0);
166 	}
167 	/*
168 	 * Call CPUID with leaf 13 and subleaf 0. The size is in ecx.
169 	 * We don't need to check for cpuid_max here, since if this CPU has
170 	 * OSXSAVE set, it has leaf 13 (0x0D) as well.
171 	 */
172 	uint32_t eax, ebx, ecx, edx;
173 
174 	eax = 13U;
175 	ecx = 0U;
176 	__asm__ __volatile__("cpuid"
177 	    : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
178 	    : "a" (eax), "c" (ecx));
179 
180 	return (ecx);
181 }
182 
183 /*
184  * Return the allocation order of the maximum buffer size required to save the
185  * FPU state on this architecture. The value returned is the same as Linux'
186  * get_order() function would return (i.e. 2^order = nr. of pages required).
187  * Currently this will always return 0 since the save area is below 4k even for
188  * a full fledged AVX-512 implementation.
189  */
190 static inline int
191 get_fpuregs_save_area_order(void)
192 {
193 	size_t area_size = (size_t)get_xsave_area_size();
194 
195 	/*
196 	 * If we are dealing with a CPU not supporting XSAVE,
197 	 * get_xsave_area_size() will return 0. Thus the maximum memory
198 	 * required is the FXSAVE area size which is 512 bytes. See: Intel 64
199 	 * and IA-32 Architectures Software Developer’s Manual. Dec. 2021.
200 	 * Vol. 2A p. 3-451.
201 	 */
202 	if (area_size == 0) {
203 		area_size = 512;
204 	}
205 	return (get_order(area_size));
206 }
207 
208 /*
209  * Initialize per-cpu variables to store FPU state.
210  */
211 static inline void
212 kfpu_fini(void)
213 {
214 	int cpu;
215 	int order = get_fpuregs_save_area_order();
216 
217 	for_each_possible_cpu(cpu) {
218 		if (zfs_kfpu_fpregs[cpu] != NULL) {
219 			free_pages((unsigned long)zfs_kfpu_fpregs[cpu], order);
220 		}
221 	}
222 
223 	kfree(zfs_kfpu_fpregs);
224 }
225 
226 static inline int
227 kfpu_init(void)
228 {
229 	zfs_kfpu_fpregs = kzalloc(num_possible_cpus() * sizeof (uint8_t *),
230 	    GFP_KERNEL);
231 
232 	if (zfs_kfpu_fpregs == NULL)
233 		return (-ENOMEM);
234 
235 	/*
236 	 * The fxsave and xsave operations require 16-/64-byte alignment of
237 	 * the target memory. Since kmalloc() provides no alignment
238 	 * guarantee instead use alloc_pages_node().
239 	 */
240 	int cpu;
241 	int order = get_fpuregs_save_area_order();
242 
243 	for_each_possible_cpu(cpu) {
244 		struct page *page = alloc_pages_node(cpu_to_node(cpu),
245 		    GFP_KERNEL | __GFP_ZERO, order);
246 		if (page == NULL) {
247 			kfpu_fini();
248 			return (-ENOMEM);
249 		}
250 
251 		zfs_kfpu_fpregs[cpu] = page_address(page);
252 	}
253 
254 	return (0);
255 }
256 
257 #define	kfpu_allowed()		1
258 
259 /*
260  * FPU save and restore instructions.
261  */
262 #define	__asm			__asm__ __volatile__
263 #define	kfpu_fxsave(addr)	__asm("fxsave %0" : "=m" (*(addr)))
264 #define	kfpu_fxsaveq(addr)	__asm("fxsaveq %0" : "=m" (*(addr)))
265 #define	kfpu_fnsave(addr)	__asm("fnsave %0; fwait" : "=m" (*(addr)))
266 #define	kfpu_fxrstor(addr)	__asm("fxrstor %0" : : "m" (*(addr)))
267 #define	kfpu_fxrstorq(addr)	__asm("fxrstorq %0" : : "m" (*(addr)))
268 #define	kfpu_frstor(addr)	__asm("frstor %0" : : "m" (*(addr)))
269 #define	kfpu_fxsr_clean(rval)	__asm("fnclex; emms; fildl %P[addr]" \
270 				    : : [addr] "m" (rval));
271 
272 #define	kfpu_do_xsave(instruction, addr, mask)			\
273 {								\
274 	uint32_t low, hi;					\
275 								\
276 	low = mask;						\
277 	hi = (uint64_t)(mask) >> 32;				\
278 	__asm(instruction " %[dst]\n\t"				\
279 	    :							\
280 	    : [dst] "m" (*(addr)), "a" (low), "d" (hi)		\
281 	    : "memory");					\
282 }
283 
284 static inline void
285 kfpu_save_fxsr(uint8_t  *addr)
286 {
287 	if (IS_ENABLED(CONFIG_X86_32))
288 		kfpu_fxsave(addr);
289 	else
290 		kfpu_fxsaveq(addr);
291 }
292 
293 static inline void
294 kfpu_save_fsave(uint8_t *addr)
295 {
296 	kfpu_fnsave(addr);
297 }
298 
299 static inline void
300 kfpu_begin(void)
301 {
302 	/*
303 	 * Preemption and interrupts must be disabled for the critical
304 	 * region where the FPU state is being modified.
305 	 */
306 	preempt_disable();
307 	local_irq_disable();
308 
309 	/*
310 	 * The current FPU registers need to be preserved by kfpu_begin()
311 	 * and restored by kfpu_end().  They are stored in a dedicated
312 	 * per-cpu variable, not in the task struct, this allows any user
313 	 * FPU state to be correctly preserved and restored.
314 	 */
315 	uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
316 #if defined(HAVE_XSAVES)
317 	if (static_cpu_has(X86_FEATURE_XSAVES)) {
318 		kfpu_do_xsave("xsaves", state, ~0);
319 		return;
320 	}
321 #endif
322 #if defined(HAVE_XSAVEOPT)
323 	if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
324 		kfpu_do_xsave("xsaveopt", state, ~0);
325 		return;
326 	}
327 #endif
328 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
329 		kfpu_do_xsave("xsave", state, ~0);
330 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
331 		kfpu_save_fxsr(state);
332 	} else {
333 		kfpu_save_fsave(state);
334 	}
335 }
336 
337 #define	kfpu_do_xrstor(instruction, addr, mask)			\
338 {								\
339 	uint32_t low, hi;					\
340 								\
341 	low = mask;						\
342 	hi = (uint64_t)(mask) >> 32;				\
343 	__asm(instruction " %[src]"				\
344 	    :							\
345 	    : [src] "m" (*(addr)), "a" (low), "d" (hi)		\
346 	    : "memory");					\
347 }
348 
349 static inline void
350 kfpu_restore_fxsr(uint8_t *addr)
351 {
352 	/*
353 	 * On AuthenticAMD K7 and K8 processors the fxrstor instruction only
354 	 * restores the _x87 FOP, FIP, and FDP registers when an exception
355 	 * is pending.  Clean the _x87 state to force the restore.
356 	 */
357 	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK)))
358 		kfpu_fxsr_clean(addr);
359 
360 	if (IS_ENABLED(CONFIG_X86_32)) {
361 		kfpu_fxrstor(addr);
362 	} else {
363 		kfpu_fxrstorq(addr);
364 	}
365 }
366 
367 static inline void
368 kfpu_restore_fsave(uint8_t *addr)
369 {
370 	kfpu_frstor(addr);
371 }
372 
373 static inline void
374 kfpu_end(void)
375 {
376 	uint8_t  *state = zfs_kfpu_fpregs[smp_processor_id()];
377 #if defined(HAVE_XSAVES)
378 	if (static_cpu_has(X86_FEATURE_XSAVES)) {
379 		kfpu_do_xrstor("xrstors", state, ~0);
380 		goto out;
381 	}
382 #endif
383 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
384 		kfpu_do_xrstor("xrstor", state, ~0);
385 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
386 		kfpu_restore_fxsr(state);
387 	} else {
388 		kfpu_restore_fsave(state);
389 	}
390 out:
391 	local_irq_enable();
392 	preempt_enable();
393 
394 }
395 
396 #else
397 
398 #error	"Exactly one of KERNEL_EXPORTS_X86_FPU or HAVE_KERNEL_FPU_INTERNAL" \
399 	" must be defined"
400 
401 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL */
402 #endif /* defined(KERNEL_EXPORTS_X86_FPU) */
403 
404 /*
405  * Linux kernel provides an interface for CPU feature testing.
406  */
407 
408 /*
409  * Detect register set support
410  */
411 
412 /*
413  * Check if OS supports AVX and AVX2 by checking XCR0
414  * Only call this function if CPUID indicates that AVX feature is
415  * supported by the CPU, otherwise it might be an illegal instruction.
416  */
417 static inline uint64_t
418 zfs_xgetbv(uint32_t index)
419 {
420 	uint32_t eax, edx;
421 	/* xgetbv - instruction byte code */
422 	__asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
423 	    : "=a" (eax), "=d" (edx)
424 	    : "c" (index));
425 
426 	return ((((uint64_t)edx)<<32) | (uint64_t)eax);
427 }
428 
429 
430 static inline boolean_t
431 __simd_state_enabled(const uint64_t state)
432 {
433 	boolean_t has_osxsave;
434 	uint64_t xcr0;
435 
436 #if defined(X86_FEATURE_OSXSAVE)
437 	has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
438 #else
439 	has_osxsave = B_FALSE;
440 #endif
441 	if (!has_osxsave)
442 		return (B_FALSE);
443 
444 	xcr0 = zfs_xgetbv(0);
445 	return ((xcr0 & state) == state);
446 }
447 
448 #define	_XSTATE_SSE_AVX		(0x2 | 0x4)
449 #define	_XSTATE_AVX512		(0xE0 | _XSTATE_SSE_AVX)
450 
451 #define	__ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
452 #define	__zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
453 
454 /*
455  * Check if SSE instruction set is available
456  */
457 static inline boolean_t
458 zfs_sse_available(void)
459 {
460 	return (!!boot_cpu_has(X86_FEATURE_XMM));
461 }
462 
463 /*
464  * Check if SSE2 instruction set is available
465  */
466 static inline boolean_t
467 zfs_sse2_available(void)
468 {
469 	return (!!boot_cpu_has(X86_FEATURE_XMM2));
470 }
471 
472 /*
473  * Check if SSE3 instruction set is available
474  */
475 static inline boolean_t
476 zfs_sse3_available(void)
477 {
478 	return (!!boot_cpu_has(X86_FEATURE_XMM3));
479 }
480 
481 /*
482  * Check if SSSE3 instruction set is available
483  */
484 static inline boolean_t
485 zfs_ssse3_available(void)
486 {
487 	return (!!boot_cpu_has(X86_FEATURE_SSSE3));
488 }
489 
490 /*
491  * Check if SSE4.1 instruction set is available
492  */
493 static inline boolean_t
494 zfs_sse4_1_available(void)
495 {
496 	return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
497 }
498 
499 /*
500  * Check if SSE4.2 instruction set is available
501  */
502 static inline boolean_t
503 zfs_sse4_2_available(void)
504 {
505 	return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
506 }
507 
508 /*
509  * Check if AVX instruction set is available
510  */
511 static inline boolean_t
512 zfs_avx_available(void)
513 {
514 	return (boot_cpu_has(X86_FEATURE_AVX) && __ymm_enabled());
515 }
516 
517 /*
518  * Check if AVX2 instruction set is available
519  */
520 static inline boolean_t
521 zfs_avx2_available(void)
522 {
523 	return (boot_cpu_has(X86_FEATURE_AVX2) && __ymm_enabled());
524 }
525 
526 /*
527  * Check if BMI1 instruction set is available
528  */
529 static inline boolean_t
530 zfs_bmi1_available(void)
531 {
532 #if defined(X86_FEATURE_BMI1)
533 	return (!!boot_cpu_has(X86_FEATURE_BMI1));
534 #else
535 	return (B_FALSE);
536 #endif
537 }
538 
539 /*
540  * Check if BMI2 instruction set is available
541  */
542 static inline boolean_t
543 zfs_bmi2_available(void)
544 {
545 #if defined(X86_FEATURE_BMI2)
546 	return (!!boot_cpu_has(X86_FEATURE_BMI2));
547 #else
548 	return (B_FALSE);
549 #endif
550 }
551 
552 /*
553  * Check if AES instruction set is available
554  */
555 static inline boolean_t
556 zfs_aes_available(void)
557 {
558 #if defined(X86_FEATURE_AES)
559 	return (!!boot_cpu_has(X86_FEATURE_AES));
560 #else
561 	return (B_FALSE);
562 #endif
563 }
564 
565 /*
566  * Check if PCLMULQDQ instruction set is available
567  */
568 static inline boolean_t
569 zfs_pclmulqdq_available(void)
570 {
571 #if defined(X86_FEATURE_PCLMULQDQ)
572 	return (!!boot_cpu_has(X86_FEATURE_PCLMULQDQ));
573 #else
574 	return (B_FALSE);
575 #endif
576 }
577 
578 /*
579  * Check if MOVBE instruction is available
580  */
581 static inline boolean_t
582 zfs_movbe_available(void)
583 {
584 #if defined(X86_FEATURE_MOVBE)
585 	return (!!boot_cpu_has(X86_FEATURE_MOVBE));
586 #else
587 	return (B_FALSE);
588 #endif
589 }
590 
591 /*
592  * Check if SHA_NI instruction set is available
593  */
594 static inline boolean_t
595 zfs_shani_available(void)
596 {
597 #if defined(X86_FEATURE_SHA_NI)
598 	return (!!boot_cpu_has(X86_FEATURE_SHA_NI));
599 #else
600 	return (B_FALSE);
601 #endif
602 }
603 
604 /*
605  * AVX-512 family of instruction sets:
606  *
607  * AVX512F	Foundation
608  * AVX512CD	Conflict Detection Instructions
609  * AVX512ER	Exponential and Reciprocal Instructions
610  * AVX512PF	Prefetch Instructions
611  *
612  * AVX512BW	Byte and Word Instructions
613  * AVX512DQ	Double-word and Quadword Instructions
614  * AVX512VL	Vector Length Extensions
615  *
616  * AVX512IFMA	Integer Fused Multiply Add (Not supported by kernel 4.4)
617  * AVX512VBMI	Vector Byte Manipulation Instructions
618  */
619 
620 /*
621  * Check if AVX512F instruction set is available
622  */
623 static inline boolean_t
624 zfs_avx512f_available(void)
625 {
626 	boolean_t has_avx512 = B_FALSE;
627 
628 #if defined(X86_FEATURE_AVX512F)
629 	has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
630 #endif
631 	return (has_avx512 && __zmm_enabled());
632 }
633 
634 /*
635  * Check if AVX512CD instruction set is available
636  */
637 static inline boolean_t
638 zfs_avx512cd_available(void)
639 {
640 	boolean_t has_avx512 = B_FALSE;
641 
642 #if defined(X86_FEATURE_AVX512CD)
643 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
644 	    boot_cpu_has(X86_FEATURE_AVX512CD);
645 #endif
646 	return (has_avx512 && __zmm_enabled());
647 }
648 
649 /*
650  * Check if AVX512ER instruction set is available
651  */
652 static inline boolean_t
653 zfs_avx512er_available(void)
654 {
655 	boolean_t has_avx512 = B_FALSE;
656 
657 #if defined(X86_FEATURE_AVX512ER)
658 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
659 	    boot_cpu_has(X86_FEATURE_AVX512ER);
660 #endif
661 	return (has_avx512 && __zmm_enabled());
662 }
663 
664 /*
665  * Check if AVX512PF instruction set is available
666  */
667 static inline boolean_t
668 zfs_avx512pf_available(void)
669 {
670 	boolean_t has_avx512 = B_FALSE;
671 
672 #if defined(X86_FEATURE_AVX512PF)
673 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
674 	    boot_cpu_has(X86_FEATURE_AVX512PF);
675 #endif
676 	return (has_avx512 && __zmm_enabled());
677 }
678 
679 /*
680  * Check if AVX512BW instruction set is available
681  */
682 static inline boolean_t
683 zfs_avx512bw_available(void)
684 {
685 	boolean_t has_avx512 = B_FALSE;
686 
687 #if defined(X86_FEATURE_AVX512BW)
688 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
689 	    boot_cpu_has(X86_FEATURE_AVX512BW);
690 #endif
691 
692 	return (has_avx512 && __zmm_enabled());
693 }
694 
695 /*
696  * Check if AVX512DQ instruction set is available
697  */
698 static inline boolean_t
699 zfs_avx512dq_available(void)
700 {
701 	boolean_t has_avx512 = B_FALSE;
702 
703 #if defined(X86_FEATURE_AVX512DQ)
704 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
705 	    boot_cpu_has(X86_FEATURE_AVX512DQ);
706 #endif
707 	return (has_avx512 && __zmm_enabled());
708 }
709 
710 /*
711  * Check if AVX512VL instruction set is available
712  */
713 static inline boolean_t
714 zfs_avx512vl_available(void)
715 {
716 	boolean_t has_avx512 = B_FALSE;
717 
718 #if defined(X86_FEATURE_AVX512VL)
719 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
720 	    boot_cpu_has(X86_FEATURE_AVX512VL);
721 #endif
722 	return (has_avx512 && __zmm_enabled());
723 }
724 
725 /*
726  * Check if AVX512IFMA instruction set is available
727  */
728 static inline boolean_t
729 zfs_avx512ifma_available(void)
730 {
731 	boolean_t has_avx512 = B_FALSE;
732 
733 #if defined(X86_FEATURE_AVX512IFMA)
734 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
735 	    boot_cpu_has(X86_FEATURE_AVX512IFMA);
736 #endif
737 	return (has_avx512 && __zmm_enabled());
738 }
739 
740 /*
741  * Check if AVX512VBMI instruction set is available
742  */
743 static inline boolean_t
744 zfs_avx512vbmi_available(void)
745 {
746 	boolean_t has_avx512 = B_FALSE;
747 
748 #if defined(X86_FEATURE_AVX512VBMI)
749 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
750 	    boot_cpu_has(X86_FEATURE_AVX512VBMI);
751 #endif
752 	return (has_avx512 && __zmm_enabled());
753 }
754 
755 #endif /* defined(__x86) */
756 
757 #endif /* _LINUX_SIMD_X86_H */
758