1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
23  */
24 
25 /*
26  * USER API:
27  *
28  * Kernel fpu methods:
29  *	kfpu_allowed()
30  *	kfpu_begin()
31  *	kfpu_end()
32  *	kfpu_init()
33  *	kfpu_fini()
34  *
35  * SIMD support:
36  *
37  * Following functions should be called to determine whether CPU feature
38  * is supported. All functions are usable in kernel and user space.
39  * If a SIMD algorithm is using more than one instruction set
40  * all relevant feature test functions should be called.
41  *
42  * Supported features:
43  *	zfs_sse_available()
44  *	zfs_sse2_available()
45  *	zfs_sse3_available()
46  *	zfs_ssse3_available()
47  *	zfs_sse4_1_available()
48  *	zfs_sse4_2_available()
49  *
50  *	zfs_avx_available()
51  *	zfs_avx2_available()
52  *
53  *	zfs_bmi1_available()
54  *	zfs_bmi2_available()
55  *
56  *	zfs_avx512f_available()
57  *	zfs_avx512cd_available()
58  *	zfs_avx512er_available()
59  *	zfs_avx512pf_available()
60  *	zfs_avx512bw_available()
61  *	zfs_avx512dq_available()
62  *	zfs_avx512vl_available()
63  *	zfs_avx512ifma_available()
64  *	zfs_avx512vbmi_available()
65  *
66  * NOTE(AVX-512VL):	If using AVX-512 instructions with 128Bit registers
67  *			also add zfs_avx512vl_available() to feature check.
68  */
69 
70 #ifndef _LINUX_SIMD_X86_H
71 #define	_LINUX_SIMD_X86_H
72 
73 /* only for __x86 */
74 #if defined(__x86)
75 
76 #include <sys/types.h>
77 #include <asm/cpufeature.h>
78 
79 /*
80  * Disable the WARN_ON_FPU() macro to prevent additional dependencies
81  * when providing the kfpu_* functions.  Relevant warnings are included
82  * as appropriate and are unconditionally enabled.
83  */
84 #if defined(CONFIG_X86_DEBUG_FPU) && !defined(KERNEL_EXPORTS_X86_FPU)
85 #undef CONFIG_X86_DEBUG_FPU
86 #endif
87 
88 /*
89  * The following cases are for kernels which export either the
90  * kernel_fpu_* or __kernel_fpu_* functions.
91  */
92 #if defined(KERNEL_EXPORTS_X86_FPU)
93 
94 #if defined(HAVE_KERNEL_FPU_API_HEADER)
95 #include <asm/fpu/api.h>
96 #if defined(HAVE_KERNEL_FPU_INTERNAL_HEADER)
97 #include <asm/fpu/internal.h>
98 #endif
99 #else
100 #include <asm/i387.h>
101 #endif
102 
103 #define	kfpu_allowed()		1
104 #define	kfpu_init()		0
105 #define	kfpu_fini()		((void) 0)
106 
107 #if defined(HAVE_UNDERSCORE_KERNEL_FPU)
108 #define	kfpu_begin()		\
109 {				\
110 	preempt_disable();	\
111 	__kernel_fpu_begin();	\
112 }
113 #define	kfpu_end()		\
114 {				\
115 	__kernel_fpu_end();	\
116 	preempt_enable();	\
117 }
118 
119 #elif defined(HAVE_KERNEL_FPU)
120 #define	kfpu_begin()		kernel_fpu_begin()
121 #define	kfpu_end()		kernel_fpu_end()
122 
123 #else
124 /*
125  * This case is unreachable.  When KERNEL_EXPORTS_X86_FPU is defined then
126  * either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
127  */
128 #error "Unreachable kernel configuration"
129 #endif
130 
131 #else /* defined(KERNEL_EXPORTS_X86_FPU) */
132 
133 /*
134  * When the kernel_fpu_* symbols are unavailable then provide our own
135  * versions which allow the FPU to be safely used.
136  */
137 #if defined(HAVE_KERNEL_FPU_INTERNAL)
138 
139 /*
140  * For kernels not exporting *kfpu_{begin,end} we have to use inline assembly
141  * with the XSAVE{,OPT,S} instructions, so we need the toolchain to support at
142  * least XSAVE.
143  */
144 #if !defined(HAVE_XSAVE)
145 #error "Toolchain needs to support the XSAVE assembler instruction"
146 #endif
147 
148 #include <linux/mm.h>
149 #include <linux/slab.h>
150 
151 extern uint8_t **zfs_kfpu_fpregs;
152 
153 /*
154  * Return the size in bytes required by the XSAVE instruction for an
155  * XSAVE area containing all the user state components supported by this CPU.
156  * See: Intel 64 and IA-32 Architectures Software Developer’s Manual.
157  * Dec. 2021. Vol. 2A p. 3-222.
158  */
159 static inline uint32_t
160 get_xsave_area_size(void)
161 {
162 	if (!boot_cpu_has(X86_FEATURE_OSXSAVE)) {
163 		return (0);
164 	}
165 	/*
166 	 * Call CPUID with leaf 13 and subleaf 0. The size is in ecx.
167 	 * We don't need to check for cpuid_max here, since if this CPU has
168 	 * OSXSAVE set, it has leaf 13 (0x0D) as well.
169 	 */
170 	uint32_t eax, ebx, ecx, edx;
171 
172 	eax = 13U;
173 	ecx = 0U;
174 	__asm__ __volatile__("cpuid"
175 	    : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
176 	    : "a" (eax), "c" (ecx));
177 
178 	return (ecx);
179 }
180 
181 /*
182  * Return the allocation order of the maximum buffer size required to save the
183  * FPU state on this architecture. The value returned is the same as Linux'
184  * get_order() function would return (i.e. 2^order = nr. of pages required).
185  * Currently this will always return 0 since the save area is below 4k even for
186  * a full fledged AVX-512 implementation.
187  */
188 static inline int
189 get_fpuregs_save_area_order(void)
190 {
191 	size_t area_size = (size_t)get_xsave_area_size();
192 
193 	/*
194 	 * If we are dealing with a CPU not supporting XSAVE,
195 	 * get_xsave_area_size() will return 0. Thus the maximum memory
196 	 * required is the FXSAVE area size which is 512 bytes. See: Intel 64
197 	 * and IA-32 Architectures Software Developer’s Manual. Dec. 2021.
198 	 * Vol. 2A p. 3-451.
199 	 */
200 	if (area_size == 0) {
201 		area_size = 512;
202 	}
203 	return (get_order(area_size));
204 }
205 
206 /*
207  * Initialize per-cpu variables to store FPU state.
208  */
209 static inline void
210 kfpu_fini(void)
211 {
212 	int cpu;
213 	int order = get_fpuregs_save_area_order();
214 
215 	for_each_possible_cpu(cpu) {
216 		if (zfs_kfpu_fpregs[cpu] != NULL) {
217 			free_pages((unsigned long)zfs_kfpu_fpregs[cpu], order);
218 		}
219 	}
220 
221 	kfree(zfs_kfpu_fpregs);
222 }
223 
224 static inline int
225 kfpu_init(void)
226 {
227 	zfs_kfpu_fpregs = kzalloc(num_possible_cpus() * sizeof (uint8_t *),
228 	    GFP_KERNEL);
229 
230 	if (zfs_kfpu_fpregs == NULL)
231 		return (-ENOMEM);
232 
233 	/*
234 	 * The fxsave and xsave operations require 16-/64-byte alignment of
235 	 * the target memory. Since kmalloc() provides no alignment
236 	 * guarantee instead use alloc_pages_node().
237 	 */
238 	int cpu;
239 	int order = get_fpuregs_save_area_order();
240 
241 	for_each_possible_cpu(cpu) {
242 		struct page *page = alloc_pages_node(cpu_to_node(cpu),
243 		    GFP_KERNEL | __GFP_ZERO, order);
244 		if (page == NULL) {
245 			kfpu_fini();
246 			return (-ENOMEM);
247 		}
248 
249 		zfs_kfpu_fpregs[cpu] = page_address(page);
250 	}
251 
252 	return (0);
253 }
254 
255 #define	kfpu_allowed()		1
256 
257 /*
258  * FPU save and restore instructions.
259  */
260 #define	__asm			__asm__ __volatile__
261 #define	kfpu_fxsave(addr)	__asm("fxsave %0" : "=m" (*(addr)))
262 #define	kfpu_fxsaveq(addr)	__asm("fxsaveq %0" : "=m" (*(addr)))
263 #define	kfpu_fnsave(addr)	__asm("fnsave %0; fwait" : "=m" (*(addr)))
264 #define	kfpu_fxrstor(addr)	__asm("fxrstor %0" : : "m" (*(addr)))
265 #define	kfpu_fxrstorq(addr)	__asm("fxrstorq %0" : : "m" (*(addr)))
266 #define	kfpu_frstor(addr)	__asm("frstor %0" : : "m" (*(addr)))
267 #define	kfpu_fxsr_clean(rval)	__asm("fnclex; emms; fildl %P[addr]" \
268 				    : : [addr] "m" (rval));
269 
270 #define	kfpu_do_xsave(instruction, addr, mask)			\
271 {								\
272 	uint32_t low, hi;					\
273 								\
274 	low = mask;						\
275 	hi = (uint64_t)(mask) >> 32;				\
276 	__asm(instruction " %[dst]\n\t"				\
277 	    :							\
278 	    : [dst] "m" (*(addr)), "a" (low), "d" (hi)		\
279 	    : "memory");					\
280 }
281 
282 static inline void
283 kfpu_save_fxsr(uint8_t  *addr)
284 {
285 	if (IS_ENABLED(CONFIG_X86_32))
286 		kfpu_fxsave(addr);
287 	else
288 		kfpu_fxsaveq(addr);
289 }
290 
291 static inline void
292 kfpu_save_fsave(uint8_t *addr)
293 {
294 	kfpu_fnsave(addr);
295 }
296 
297 static inline void
298 kfpu_begin(void)
299 {
300 	/*
301 	 * Preemption and interrupts must be disabled for the critical
302 	 * region where the FPU state is being modified.
303 	 */
304 	preempt_disable();
305 	local_irq_disable();
306 
307 	/*
308 	 * The current FPU registers need to be preserved by kfpu_begin()
309 	 * and restored by kfpu_end().  They are stored in a dedicated
310 	 * per-cpu variable, not in the task struct, this allows any user
311 	 * FPU state to be correctly preserved and restored.
312 	 */
313 	uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
314 #if defined(HAVE_XSAVES)
315 	if (static_cpu_has(X86_FEATURE_XSAVES)) {
316 		kfpu_do_xsave("xsaves", state, ~0);
317 		return;
318 	}
319 #endif
320 #if defined(HAVE_XSAVEOPT)
321 	if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
322 		kfpu_do_xsave("xsaveopt", state, ~0);
323 		return;
324 	}
325 #endif
326 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
327 		kfpu_do_xsave("xsave", state, ~0);
328 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
329 		kfpu_save_fxsr(state);
330 	} else {
331 		kfpu_save_fsave(state);
332 	}
333 }
334 
335 #define	kfpu_do_xrstor(instruction, addr, mask)			\
336 {								\
337 	uint32_t low, hi;					\
338 								\
339 	low = mask;						\
340 	hi = (uint64_t)(mask) >> 32;				\
341 	__asm(instruction " %[src]"				\
342 	    :							\
343 	    : [src] "m" (*(addr)), "a" (low), "d" (hi)		\
344 	    : "memory");					\
345 }
346 
347 static inline void
348 kfpu_restore_fxsr(uint8_t *addr)
349 {
350 	/*
351 	 * On AuthenticAMD K7 and K8 processors the fxrstor instruction only
352 	 * restores the _x87 FOP, FIP, and FDP registers when an exception
353 	 * is pending.  Clean the _x87 state to force the restore.
354 	 */
355 	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK)))
356 		kfpu_fxsr_clean(addr);
357 
358 	if (IS_ENABLED(CONFIG_X86_32)) {
359 		kfpu_fxrstor(addr);
360 	} else {
361 		kfpu_fxrstorq(addr);
362 	}
363 }
364 
365 static inline void
366 kfpu_restore_fsave(uint8_t *addr)
367 {
368 	kfpu_frstor(addr);
369 }
370 
371 static inline void
372 kfpu_end(void)
373 {
374 	uint8_t  *state = zfs_kfpu_fpregs[smp_processor_id()];
375 #if defined(HAVE_XSAVES)
376 	if (static_cpu_has(X86_FEATURE_XSAVES)) {
377 		kfpu_do_xrstor("xrstors", state, ~0);
378 		goto out;
379 	}
380 #endif
381 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
382 		kfpu_do_xrstor("xrstor", state, ~0);
383 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
384 		kfpu_restore_fxsr(state);
385 	} else {
386 		kfpu_restore_fsave(state);
387 	}
388 out:
389 	local_irq_enable();
390 	preempt_enable();
391 
392 }
393 
394 #else
395 
396 #error	"Exactly one of KERNEL_EXPORTS_X86_FPU or HAVE_KERNEL_FPU_INTERNAL" \
397 	" must be defined"
398 
399 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL */
400 #endif /* defined(KERNEL_EXPORTS_X86_FPU) */
401 
402 /*
403  * Linux kernel provides an interface for CPU feature testing.
404  */
405 
406 /*
407  * Detect register set support
408  */
409 
410 /*
411  * Check if OS supports AVX and AVX2 by checking XCR0
412  * Only call this function if CPUID indicates that AVX feature is
413  * supported by the CPU, otherwise it might be an illegal instruction.
414  */
415 static inline uint64_t
416 zfs_xgetbv(uint32_t index)
417 {
418 	uint32_t eax, edx;
419 	/* xgetbv - instruction byte code */
420 	__asm__ __volatile__(".byte 0x0f; .byte 0x01; .byte 0xd0"
421 	    : "=a" (eax), "=d" (edx)
422 	    : "c" (index));
423 
424 	return ((((uint64_t)edx)<<32) | (uint64_t)eax);
425 }
426 
427 
428 static inline boolean_t
429 __simd_state_enabled(const uint64_t state)
430 {
431 	boolean_t has_osxsave;
432 	uint64_t xcr0;
433 
434 #if defined(X86_FEATURE_OSXSAVE)
435 	has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
436 #else
437 	has_osxsave = B_FALSE;
438 #endif
439 	if (!has_osxsave)
440 		return (B_FALSE);
441 
442 	xcr0 = zfs_xgetbv(0);
443 	return ((xcr0 & state) == state);
444 }
445 
446 #define	_XSTATE_SSE_AVX		(0x2 | 0x4)
447 #define	_XSTATE_AVX512		(0xE0 | _XSTATE_SSE_AVX)
448 
449 #define	__ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
450 #define	__zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
451 
452 /*
453  * Check if SSE instruction set is available
454  */
455 static inline boolean_t
456 zfs_sse_available(void)
457 {
458 	return (!!boot_cpu_has(X86_FEATURE_XMM));
459 }
460 
461 /*
462  * Check if SSE2 instruction set is available
463  */
464 static inline boolean_t
465 zfs_sse2_available(void)
466 {
467 	return (!!boot_cpu_has(X86_FEATURE_XMM2));
468 }
469 
470 /*
471  * Check if SSE3 instruction set is available
472  */
473 static inline boolean_t
474 zfs_sse3_available(void)
475 {
476 	return (!!boot_cpu_has(X86_FEATURE_XMM3));
477 }
478 
479 /*
480  * Check if SSSE3 instruction set is available
481  */
482 static inline boolean_t
483 zfs_ssse3_available(void)
484 {
485 	return (!!boot_cpu_has(X86_FEATURE_SSSE3));
486 }
487 
488 /*
489  * Check if SSE4.1 instruction set is available
490  */
491 static inline boolean_t
492 zfs_sse4_1_available(void)
493 {
494 	return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
495 }
496 
497 /*
498  * Check if SSE4.2 instruction set is available
499  */
500 static inline boolean_t
501 zfs_sse4_2_available(void)
502 {
503 	return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
504 }
505 
506 /*
507  * Check if AVX instruction set is available
508  */
509 static inline boolean_t
510 zfs_avx_available(void)
511 {
512 	return (boot_cpu_has(X86_FEATURE_AVX) && __ymm_enabled());
513 }
514 
515 /*
516  * Check if AVX2 instruction set is available
517  */
518 static inline boolean_t
519 zfs_avx2_available(void)
520 {
521 	return (boot_cpu_has(X86_FEATURE_AVX2) && __ymm_enabled());
522 }
523 
524 /*
525  * Check if BMI1 instruction set is available
526  */
527 static inline boolean_t
528 zfs_bmi1_available(void)
529 {
530 #if defined(X86_FEATURE_BMI1)
531 	return (!!boot_cpu_has(X86_FEATURE_BMI1));
532 #else
533 	return (B_FALSE);
534 #endif
535 }
536 
537 /*
538  * Check if BMI2 instruction set is available
539  */
540 static inline boolean_t
541 zfs_bmi2_available(void)
542 {
543 #if defined(X86_FEATURE_BMI2)
544 	return (!!boot_cpu_has(X86_FEATURE_BMI2));
545 #else
546 	return (B_FALSE);
547 #endif
548 }
549 
550 /*
551  * Check if AES instruction set is available
552  */
553 static inline boolean_t
554 zfs_aes_available(void)
555 {
556 #if defined(X86_FEATURE_AES)
557 	return (!!boot_cpu_has(X86_FEATURE_AES));
558 #else
559 	return (B_FALSE);
560 #endif
561 }
562 
563 /*
564  * Check if PCLMULQDQ instruction set is available
565  */
566 static inline boolean_t
567 zfs_pclmulqdq_available(void)
568 {
569 #if defined(X86_FEATURE_PCLMULQDQ)
570 	return (!!boot_cpu_has(X86_FEATURE_PCLMULQDQ));
571 #else
572 	return (B_FALSE);
573 #endif
574 }
575 
576 /*
577  * Check if MOVBE instruction is available
578  */
579 static inline boolean_t
580 zfs_movbe_available(void)
581 {
582 #if defined(X86_FEATURE_MOVBE)
583 	return (!!boot_cpu_has(X86_FEATURE_MOVBE));
584 #else
585 	return (B_FALSE);
586 #endif
587 }
588 
589 /*
590  * AVX-512 family of instruction sets:
591  *
592  * AVX512F	Foundation
593  * AVX512CD	Conflict Detection Instructions
594  * AVX512ER	Exponential and Reciprocal Instructions
595  * AVX512PF	Prefetch Instructions
596  *
597  * AVX512BW	Byte and Word Instructions
598  * AVX512DQ	Double-word and Quadword Instructions
599  * AVX512VL	Vector Length Extensions
600  *
601  * AVX512IFMA	Integer Fused Multiply Add (Not supported by kernel 4.4)
602  * AVX512VBMI	Vector Byte Manipulation Instructions
603  */
604 
605 /*
606  * Check if AVX512F instruction set is available
607  */
608 static inline boolean_t
609 zfs_avx512f_available(void)
610 {
611 	boolean_t has_avx512 = B_FALSE;
612 
613 #if defined(X86_FEATURE_AVX512F)
614 	has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
615 #endif
616 	return (has_avx512 && __zmm_enabled());
617 }
618 
619 /*
620  * Check if AVX512CD instruction set is available
621  */
622 static inline boolean_t
623 zfs_avx512cd_available(void)
624 {
625 	boolean_t has_avx512 = B_FALSE;
626 
627 #if defined(X86_FEATURE_AVX512CD)
628 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
629 	    boot_cpu_has(X86_FEATURE_AVX512CD);
630 #endif
631 	return (has_avx512 && __zmm_enabled());
632 }
633 
634 /*
635  * Check if AVX512ER instruction set is available
636  */
637 static inline boolean_t
638 zfs_avx512er_available(void)
639 {
640 	boolean_t has_avx512 = B_FALSE;
641 
642 #if defined(X86_FEATURE_AVX512ER)
643 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
644 	    boot_cpu_has(X86_FEATURE_AVX512ER);
645 #endif
646 	return (has_avx512 && __zmm_enabled());
647 }
648 
649 /*
650  * Check if AVX512PF instruction set is available
651  */
652 static inline boolean_t
653 zfs_avx512pf_available(void)
654 {
655 	boolean_t has_avx512 = B_FALSE;
656 
657 #if defined(X86_FEATURE_AVX512PF)
658 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
659 	    boot_cpu_has(X86_FEATURE_AVX512PF);
660 #endif
661 	return (has_avx512 && __zmm_enabled());
662 }
663 
664 /*
665  * Check if AVX512BW instruction set is available
666  */
667 static inline boolean_t
668 zfs_avx512bw_available(void)
669 {
670 	boolean_t has_avx512 = B_FALSE;
671 
672 #if defined(X86_FEATURE_AVX512BW)
673 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
674 	    boot_cpu_has(X86_FEATURE_AVX512BW);
675 #endif
676 
677 	return (has_avx512 && __zmm_enabled());
678 }
679 
680 /*
681  * Check if AVX512DQ instruction set is available
682  */
683 static inline boolean_t
684 zfs_avx512dq_available(void)
685 {
686 	boolean_t has_avx512 = B_FALSE;
687 
688 #if defined(X86_FEATURE_AVX512DQ)
689 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
690 	    boot_cpu_has(X86_FEATURE_AVX512DQ);
691 #endif
692 	return (has_avx512 && __zmm_enabled());
693 }
694 
695 /*
696  * Check if AVX512VL instruction set is available
697  */
698 static inline boolean_t
699 zfs_avx512vl_available(void)
700 {
701 	boolean_t has_avx512 = B_FALSE;
702 
703 #if defined(X86_FEATURE_AVX512VL)
704 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
705 	    boot_cpu_has(X86_FEATURE_AVX512VL);
706 #endif
707 	return (has_avx512 && __zmm_enabled());
708 }
709 
710 /*
711  * Check if AVX512IFMA instruction set is available
712  */
713 static inline boolean_t
714 zfs_avx512ifma_available(void)
715 {
716 	boolean_t has_avx512 = B_FALSE;
717 
718 #if defined(X86_FEATURE_AVX512IFMA)
719 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
720 	    boot_cpu_has(X86_FEATURE_AVX512IFMA);
721 #endif
722 	return (has_avx512 && __zmm_enabled());
723 }
724 
725 /*
726  * Check if AVX512VBMI instruction set is available
727  */
728 static inline boolean_t
729 zfs_avx512vbmi_available(void)
730 {
731 	boolean_t has_avx512 = B_FALSE;
732 
733 #if defined(X86_FEATURE_AVX512VBMI)
734 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
735 	    boot_cpu_has(X86_FEATURE_AVX512VBMI);
736 #endif
737 	return (has_avx512 && __zmm_enabled());
738 }
739 
740 #endif /* defined(__x86) */
741 
742 #endif /* _LINUX_SIMD_X86_H */
743