1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2   | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
4; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx    | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
5; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2   | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
6
7; PR6399
8
9define <2 x i32> @_mul2xi32a(<2 x i32>, <2 x i32>) {
10; SSE2-LABEL: _mul2xi32a:
11; SSE2:       # %bb.0:
12; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
13; SSE2-NEXT:    pmuludq %xmm1, %xmm0
14; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
15; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
16; SSE2-NEXT:    pmuludq %xmm2, %xmm1
17; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
18; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
19; SSE2-NEXT:    retq
20;
21; SSE42-LABEL: _mul2xi32a:
22; SSE42:       # %bb.0:
23; SSE42-NEXT:    pmulld %xmm1, %xmm0
24; SSE42-NEXT:    retq
25;
26; AVX-LABEL: _mul2xi32a:
27; AVX:       # %bb.0:
28; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
29; AVX-NEXT:    retq
30  %r = mul <2 x i32> %0, %1
31  ret <2 x i32> %r
32}
33
34define <2 x i32> @_mul2xi32b(<2 x i32>, <2 x i32>) {
35; SSE-LABEL: _mul2xi32b:
36; SSE:       # %bb.0:
37; SSE-NEXT:    pmuludq %xmm1, %xmm0
38; SSE-NEXT:    retq
39;
40; AVX-LABEL: _mul2xi32b:
41; AVX:       # %bb.0:
42; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
43; AVX-NEXT:    retq
44  %factor0 = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
45  %factor1 = shufflevector <2 x i32> %1, <2 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
46  %product64 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %factor0, <4 x i32> %factor1) readnone
47  %product = bitcast <2 x i64> %product64 to <4 x i32>
48  %r = shufflevector <4 x i32> %product, <4 x i32> undef, <2 x i32> <i32 0, i32 4>
49  ret <2 x i32> %r
50}
51
52define <4 x i32> @_mul4xi32a(<4 x i32>, <4 x i32>) {
53; SSE2-LABEL: _mul4xi32a:
54; SSE2:       # %bb.0:
55; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
56; SSE2-NEXT:    pmuludq %xmm1, %xmm0
57; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
58; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
59; SSE2-NEXT:    pmuludq %xmm2, %xmm1
60; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
61; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
62; SSE2-NEXT:    retq
63;
64; SSE42-LABEL: _mul4xi32a:
65; SSE42:       # %bb.0:
66; SSE42-NEXT:    pmulld %xmm1, %xmm0
67; SSE42-NEXT:    retq
68;
69; AVX-LABEL: _mul4xi32a:
70; AVX:       # %bb.0:
71; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
72; AVX-NEXT:    retq
73  %r = mul <4 x i32> %0, %1
74  ret <4 x i32> %r
75}
76
77define <4 x i32> @_mul4xi32b(<4 x i32>, <4 x i32>) {
78; SSE2-LABEL: _mul4xi32b:
79; SSE2:       # %bb.0:
80; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
81; SSE2-NEXT:    pmuludq %xmm1, %xmm0
82; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
83; SSE2-NEXT:    pmuludq %xmm2, %xmm1
84; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
85; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
86; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
87; SSE2-NEXT:    retq
88;
89; SSE42-LABEL: _mul4xi32b:
90; SSE42:       # %bb.0:
91; SSE42-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
92; SSE42-NEXT:    pmuludq %xmm1, %xmm0
93; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
94; SSE42-NEXT:    pmuludq %xmm2, %xmm1
95; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,2,2]
96; SSE42-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
97; SSE42-NEXT:    retq
98;
99; AVX1-LABEL: _mul4xi32b:
100; AVX1:       # %bb.0:
101; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
102; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
103; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
104; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
105; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
106; AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
107; AVX1-NEXT:    retq
108;
109; AVX2-LABEL: _mul4xi32b:
110; AVX2:       # %bb.0:
111; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
112; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
113; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
114; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
115; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,2,2]
116; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
117; AVX2-NEXT:    retq
118  %even0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
119  %even1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
120  %evenMul64 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %even0, <4 x i32> %even1) readnone
121  %evenMul = bitcast <2 x i64> %evenMul64 to <4 x i32>
122  %odd0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
123  %odd1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
124  %oddMul64 = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %odd0, <4 x i32> %odd1) readnone
125  %oddMul = bitcast <2 x i64> %oddMul64 to <4 x i32>
126  %r = shufflevector <4 x i32> %evenMul, <4 x i32> %oddMul, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
127  ret <4 x i32> %r
128}
129
130; the following extractelement's and insertelement's
131; are just an unrolled 'zext' on a vector
132; %ext0 = zext <4 x i32> %0 to <4 x i64>
133; %ext1 = zext <4 x i32> %1 to <4 x i64>
134define <4 x i64> @_mul4xi32toi64a(<4 x i32>, <4 x i32>) {
135; SSE2-LABEL: _mul4xi32toi64a:
136; SSE2:       # %bb.0:
137; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
138; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
139; SSE2-NEXT:    pmuludq %xmm3, %xmm2
140; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,3,3]
141; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,3,3]
142; SSE2-NEXT:    pmuludq %xmm3, %xmm1
143; SSE2-NEXT:    movdqa %xmm2, %xmm0
144; SSE2-NEXT:    retq
145;
146; SSE42-LABEL: _mul4xi32toi64a:
147; SSE42:       # %bb.0:
148; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
149; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
150; SSE42-NEXT:    pmuludq %xmm3, %xmm2
151; SSE42-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,1,3,3]
152; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
153; SSE42-NEXT:    pmuludq %xmm3, %xmm1
154; SSE42-NEXT:    movdqa %xmm2, %xmm0
155; SSE42-NEXT:    retq
156;
157; AVX1-LABEL: _mul4xi32toi64a:
158; AVX1:       # %bb.0:
159; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,3,3]
160; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
161; AVX1-NEXT:    vpmuludq %xmm2, %xmm3, %xmm2
162; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
163; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
164; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
165; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
166; AVX1-NEXT:    retq
167;
168; AVX2-LABEL: _mul4xi32toi64a:
169; AVX2:       # %bb.0:
170; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
171; AVX2-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
172; AVX2-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
173; AVX2-NEXT:    retq
174  %f00 = extractelement <4 x i32> %0, i32 0
175  %f01 = extractelement <4 x i32> %0, i32 1
176  %f02 = extractelement <4 x i32> %0, i32 2
177  %f03 = extractelement <4 x i32> %0, i32 3
178  %f10 = extractelement <4 x i32> %1, i32 0
179  %f11 = extractelement <4 x i32> %1, i32 1
180  %f12 = extractelement <4 x i32> %1, i32 2
181  %f13 = extractelement <4 x i32> %1, i32 3
182  %ext00 = zext i32 %f00 to i64
183  %ext01 = zext i32 %f01 to i64
184  %ext02 = zext i32 %f02 to i64
185  %ext03 = zext i32 %f03 to i64
186  %ext10 = zext i32 %f10 to i64
187  %ext11 = zext i32 %f11 to i64
188  %ext12 = zext i32 %f12 to i64
189  %ext13 = zext i32 %f13 to i64
190  %extv00 = insertelement <4 x i64> undef,   i64 %ext00, i32 0
191  %extv01 = insertelement <4 x i64> %extv00, i64 %ext01, i32 1
192  %extv02 = insertelement <4 x i64> %extv01, i64 %ext02, i32 2
193  %extv03 = insertelement <4 x i64> %extv02, i64 %ext03, i32 3
194  %extv10 = insertelement <4 x i64> undef,   i64 %ext10, i32 0
195  %extv11 = insertelement <4 x i64> %extv10, i64 %ext11, i32 1
196  %extv12 = insertelement <4 x i64> %extv11, i64 %ext12, i32 2
197  %extv13 = insertelement <4 x i64> %extv12, i64 %ext13, i32 3
198  %r = mul <4 x i64> %extv03, %extv13
199  ret <4 x i64> %r
200}
201
202; very similar to mul4xi32 above
203; there is no bitcast and the final shuffle is a little different
204define <4 x i64> @_mul4xi32toi64b(<4 x i32>, <4 x i32>) {
205; SSE-LABEL: _mul4xi32toi64b:
206; SSE:       # %bb.0:
207; SSE-NEXT:    movdqa %xmm0, %xmm2
208; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
209; SSE-NEXT:    pmuludq %xmm1, %xmm2
210; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
211; SSE-NEXT:    pmuludq %xmm0, %xmm1
212; SSE-NEXT:    movdqa %xmm2, %xmm0
213; SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
214; SSE-NEXT:    punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
215; SSE-NEXT:    movdqa %xmm2, %xmm1
216; SSE-NEXT:    retq
217;
218; AVX1-LABEL: _mul4xi32toi64b:
219; AVX1:       # %bb.0:
220; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
221; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
222; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
223; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
224; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
225; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
226; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
227; AVX1-NEXT:    retq
228;
229; AVX2-LABEL: _mul4xi32toi64b:
230; AVX2:       # %bb.0:
231; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm2
232; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
233; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
234; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
235; AVX2-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm2[1],xmm0[1]
236; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
237; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
238; AVX2-NEXT:    retq
239  %even0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
240  %even1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 2, i32 undef>
241  %evenMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %even0, <4 x i32> %even1) readnone
242  %odd0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
243  %odd1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 3, i32 undef>
244  %oddMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %odd0, <4 x i32> %odd1) readnone
245  %r = shufflevector <2 x i64> %evenMul, <2 x i64> %oddMul, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
246  ret <4 x i64> %r
247}
248
249; Here we do not split into even and odd indexed elements
250; but into the lower and the upper half of the factor vectors.
251; This makes the initial shuffle more complicated,
252; but the final shuffle is a no-op.
253define <4 x i64> @_mul4xi32toi64c(<4 x i32>, <4 x i32>) {
254; SSE2-LABEL: _mul4xi32toi64c:
255; SSE2:       # %bb.0:
256; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[0,1,1,3]
257; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[0,1,1,3]
258; SSE2-NEXT:    pmuludq %xmm3, %xmm2
259; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,1,3,3]
260; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
261; SSE2-NEXT:    pmuludq %xmm0, %xmm1
262; SSE2-NEXT:    movdqa %xmm2, %xmm0
263; SSE2-NEXT:    retq
264;
265; SSE42-LABEL: _mul4xi32toi64c:
266; SSE42:       # %bb.0:
267; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
268; SSE42-NEXT:    pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
269; SSE42-NEXT:    pmuludq %xmm3, %xmm2
270; SSE42-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
271; SSE42-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
272; SSE42-NEXT:    pmuludq %xmm0, %xmm1
273; SSE42-NEXT:    movdqa %xmm2, %xmm0
274; SSE42-NEXT:    retq
275;
276; AVX1-LABEL: _mul4xi32toi64c:
277; AVX1:       # %bb.0:
278; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
279; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
280; AVX1-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
281; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
282; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
283; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
284; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
285; AVX1-NEXT:    retq
286;
287; AVX2-LABEL: _mul4xi32toi64c:
288; AVX2:       # %bb.0:
289; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
290; AVX2-NEXT:    vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
291; AVX2-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
292; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
293; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
294; AVX2-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
295; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm2, %ymm0
296; AVX2-NEXT:    retq
297  %lower0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
298  %lower1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 1, i32 undef>
299  %lowerMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %lower0, <4 x i32> %lower1) readnone
300  %upper0 = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 3, i32 undef>
301  %upper1 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 3, i32 undef>
302  %upperMul = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %upper0, <4 x i32> %upper1) readnone
303  %r = shufflevector <2 x i64> %lowerMul, <2 x i64> %upperMul, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
304  ret <4 x i64> %r
305}
306
307; If we know, that the most significant half of i64 elements are zero,
308; then multiplication can be simplified drastically.
309; In the following example I assert a zero upper half
310; by 'trunc' followed by 'zext'.
311;
312; the following extractelement's and insertelement's
313; are just an unrolled 'trunc' plus 'zext' on a vector
314; %trunc0 = trunc <2 x i64> %0 to <2 x i32>
315; %trunc1 = trunc <2 x i64> %1 to <2 x i32>
316; %ext0 = zext <2 x i32> %0 to <2 x i64>
317; %ext1 = zext <2 x i32> %1 to <2 x i64>
318define <2 x i64> @_mul2xi64toi64a(<2 x i64>, <2 x i64>) {
319; SSE-LABEL: _mul2xi64toi64a:
320; SSE:       # %bb.0:
321; SSE-NEXT:    pmuludq %xmm1, %xmm0
322; SSE-NEXT:    retq
323;
324; AVX-LABEL: _mul2xi64toi64a:
325; AVX:       # %bb.0:
326; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
327; AVX-NEXT:    retq
328  %f00 = extractelement <2 x i64> %0, i32 0
329  %f01 = extractelement <2 x i64> %0, i32 1
330  %f10 = extractelement <2 x i64> %1, i32 0
331  %f11 = extractelement <2 x i64> %1, i32 1
332  %trunc00 = trunc i64 %f00 to i32
333  %trunc01 = trunc i64 %f01 to i32
334  %ext00 = zext i32 %trunc00 to i64
335  %ext01 = zext i32 %trunc01 to i64
336  %trunc10 = trunc i64 %f10 to i32
337  %trunc11 = trunc i64 %f11 to i32
338  %ext10 = zext i32 %trunc10 to i64
339  %ext11 = zext i32 %trunc11 to i64
340  %extv00 = insertelement <2 x i64> undef,   i64 %ext00, i32 0
341  %extv01 = insertelement <2 x i64> %extv00, i64 %ext01, i32 1
342  %extv10 = insertelement <2 x i64> undef,   i64 %ext10, i32 0
343  %extv11 = insertelement <2 x i64> %extv10, i64 %ext11, i32 1
344  %r = mul <2 x i64> %extv01, %extv11
345  ret <2 x i64> %r
346}
347
348define <2 x i64> @_mul2xi64toi64b(<2 x i64>, <2 x i64>) {
349; SSE-LABEL: _mul2xi64toi64b:
350; SSE:       # %bb.0:
351; SSE-NEXT:    pmuludq %xmm1, %xmm0
352; SSE-NEXT:    retq
353;
354; AVX-LABEL: _mul2xi64toi64b:
355; AVX:       # %bb.0:
356; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
357; AVX-NEXT:    retq
358  %f0 = bitcast <2 x i64> %0 to <4 x i32>
359  %f1 = bitcast <2 x i64> %1 to <4 x i32>
360  %r = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %f0, <4 x i32> %f1) readnone
361  ret <2 x i64> %r
362}
363
364declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone
365