Home
last modified time | relevance | path

Searched refs:vmovddup (Results 26 – 50 of 1773) sorted by relevance

12345678910>>...71

/dports/devel/llvm80/llvm-8.0.1.src/test/CodeGen/X86/
H A Dhaddsub-shuf.ll32 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
45 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
72 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
115 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
162 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
175 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
202 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
290 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
302 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
383 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
[all …]
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
50 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
51 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
55 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
70 ; and then we fake it: use vmovddup to splat 64-bit value.
75 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/www/chromium-legacy/chromium-88.0.4324.182/native_client/src/trusted/validator_ragel/
H A Davx1_xmm_ymm_memory_moves.py23 vmovddup = AllXmmYmmMemorytoXmmYmmMoves('vmovddup', bitness)
43 moves = (vmovapd | vmovaps | vmovddup | vmovdqa | vmovdqu | vmovhlps |
/dports/math/openblas/OpenBLAS-0.3.18/kernel/x86_64/
H A Dzgemm_kernel_4x2_haswell.S372 vmovddup ALPHA_R, %xmm0
373 vmovddup ALPHA_I, %xmm1
501 vmovddup ALPHA_R, %xmm0
502 vmovddup ALPHA_I, %xmm1
696 vmovddup ALPHA_R, %xmm0
697 vmovddup ALPHA_I, %xmm1
795 vmovddup ALPHA_R, %xmm0
796 vmovddup ALPHA_I, %xmm1
939 vmovddup ALPHA_R, %xmm0
940 vmovddup ALPHA_I, %xmm1
[all …]
H A Ddgemv_n_bulldozer.S240 vmovddup (X), %xmm8
242 vmovddup (X), %xmm9
244 vmovddup (X), %xmm10
246 vmovddup (X), %xmm11
248 vmovddup (X), %xmm12
250 vmovddup (X), %xmm13
252 vmovddup (X), %xmm14
254 vmovddup (X), %xmm15
257 vmovddup ALPHA, %xmm0
594 vmovddup (X), %xmm12
[all …]
H A Ddgemm_kernel_4x4_haswell.S459 vmovddup -12 * SIZE(BO), %xmm1
460 vmovddup -11 * SIZE(BO), %xmm2
461 vmovddup -10 * SIZE(BO), %xmm3
463 vmovddup -9 * SIZE(BO), %xmm1
465 vmovddup -8 * SIZE(BO), %xmm2
467 vmovddup -7 * SIZE(BO), %xmm3
490 vmovddup ALPHA, %xmm0
861 vmovddup ALPHA, %xmm0
975 vmovddup ALPHA, %xmm0
1026 vmovddup ALPHA, %xmm0
[all …]
H A Ddgemm_kernel_4x8_haswell.S569 vmovddup -12 * SIZE(BO), %xmm1
570 vmovddup -11 * SIZE(BO), %xmm2
571 vmovddup -10 * SIZE(BO), %xmm3
573 vmovddup -9 * SIZE(BO), %xmm1
575 vmovddup -8 * SIZE(BO), %xmm2
600 vmovddup ALPHA, %xmm0
1158 vmovddup ALPHA, %xmm0
1557 vmovddup ALPHA, %xmm0
1671 vmovddup ALPHA, %xmm0
1722 vmovddup ALPHA, %xmm0
[all …]
/dports/devel/llvm70/llvm-7.0.1.src/test/CodeGen/X86/
H A Dvector-shuffle-combining-avx.ll41 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
46 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
55 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
60 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
226 ; X32-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
231 ; X64-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
240 ; X32-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
245 ; X64-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2]
296 ; X32-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
301 ; X64-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
[all …]
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
49 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
50 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
54 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
68 ; and then we fake it: use vmovddup to splat 64-bit value.
73 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
/dports/devel/llvm90/llvm-9.0.1.src/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
50 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
51 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
55 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
70 ; and then we fake it: use vmovddup to splat 64-bit value.
75 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
H A Dhaddsub-shuf.ll42 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
107 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
160 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
172 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
204 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
216 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
286 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
387 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
522 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
596 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
[all …]
/dports/devel/llvm10/llvm-10.0.1.src/test/CodeGen/X86/
H A Dhaddsub-shuf.ll42 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
107 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
160 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
172 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
204 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
216 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
286 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
387 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
522 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
596 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
[all …]
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/tinygo/tinygo-0.14.1/llvm-project/llvm/test/CodeGen/X86/
H A Dhaddsub-shuf.ll42 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
107 ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
160 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
172 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
204 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
216 ; AVX2_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
286 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
387 ; AVX1_SLOW-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
522 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
596 ; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
[all …]
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/llvm-cheri/llvm-project-37c49ff00e3eadce5d8703fdc4497f28458c64a8/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/llvm11/llvm-11.0.1.src/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/llvm-devel/llvm-project-f05c95f10fc1d8171071735af8ad3a9e87633120/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/wasi-libcxx/llvm-project-13.0.1.src/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/graphics/llvm-mesa/llvm-13.0.1.src/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/llvm12/llvm-project-12.0.1.src/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/wasi-compiler-rt13/llvm-project-13.0.1.src/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/www/chromium-legacy/chromium-88.0.4324.182/third_party/llvm/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/wasi-compiler-rt12/llvm-project-12.0.1.src/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
/dports/devel/llvm13/llvm-project-13.0.1.src/llvm/test/CodeGen/X86/
H A Dsplat-for-size.ll12 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
23 ; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1.0E+0,1.0E+0]
91 ; AVX can't do integer splats, so fake it: use vmovddup to splat 64-bit value.
92 ; We also generate vmovddup for AVX2 because it's one byte smaller than vpbroadcastq.
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
128 ; and then we fake it: use vmovddup to splat 64-bit value.
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]

12345678910>>...71