1 /* { dg-do run } */
2 /* { dg-require-effective-target avx } */
3 /* { dg-options "-O2 -mavx -mtune=generic" } */
4 
5 #include "avx-check.h"
6 
7 #ifdef __x86_64__
8 #define LEN 16
9 #else
10 #define LEN 8
11 #endif
12 
13 static void
avx_test(void)14 avx_test (void)
15 {
16   __m256i src;
17 
18   char reg_save[LEN][32];
19   int i, j;
20 
21   int s[8] = {1, 2, 3, 4, 5, 6, 7, 8};
22   int d[8] = {11, 22, 33, 44, 55, 66, 77, 88};
23 
24   __builtin_memset (reg_save, -1, sizeof reg_save);
25 
26   src = _mm256_loadu_si256 ((__m256i*) s);
27 
28   _mm256_zeroupper ();
29 
30   __asm__ __volatile__ ("vmovdqu %%ymm0,%0":"=m"(reg_save[0]));
31   __asm__ __volatile__ ("vmovdqu %%ymm1,%0":"=m"(reg_save[1]));
32   __asm__ __volatile__ ("vmovdqu %%ymm2,%0":"=m"(reg_save[2]));
33   __asm__ __volatile__ ("vmovdqu %%ymm3,%0":"=m"(reg_save[3]));
34   __asm__ __volatile__ ("vmovdqu %%ymm4,%0":"=m"(reg_save[4]));
35   __asm__ __volatile__ ("vmovdqu %%ymm5,%0":"=m"(reg_save[5]));
36   __asm__ __volatile__ ("vmovdqu %%ymm6,%0":"=m"(reg_save[6]));
37   __asm__ __volatile__ ("vmovdqu %%ymm7,%0":"=m"(reg_save[7]));
38 #ifdef __x86_64__
39   __asm__ __volatile__ ("vmovdqu %%ymm8,%0":"=m"(reg_save[8]));
40   __asm__ __volatile__ ("vmovdqu %%ymm9,%0":"=m"(reg_save[9]));
41   __asm__ __volatile__ ("vmovdqu %%ymm10,%0":"=m"(reg_save[10]));
42   __asm__ __volatile__ ("vmovdqu %%ymm11,%0":"=m"(reg_save[11]));
43   __asm__ __volatile__ ("vmovdqu %%ymm12,%0":"=m"(reg_save[12]));
44   __asm__ __volatile__ ("vmovdqu %%ymm13,%0":"=m"(reg_save[13]));
45   __asm__ __volatile__ ("vmovdqu %%ymm14,%0":"=m"(reg_save[14]));
46   __asm__ __volatile__ ("vmovdqu %%ymm15,%0":"=m"(reg_save[15]));
47 #endif
48 
49   for (i = 0; i < LEN; i++)
50     for (j = 16; j < 32; j++)
51       if (reg_save[i][j])
52         abort ();
53 
54   _mm256_storeu_si256 ((__m256i*) d, src);
55 
56 }
57