1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -basic-aa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-apple-macosx10.8.0"
6define void @fextr(double* %ptr) {
7; CHECK-LABEL: @fextr(
8; CHECK-NEXT:  entry:
9; CHECK-NEXT:    [[LD:%.*]] = load <2 x double>, <2 x double>* undef
10; CHECK-NEXT:    [[P0:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i64 0
11; CHECK-NEXT:    [[TMP0:%.*]] = fadd <2 x double> [[LD]], <double 0.000000e+00, double 1.100000e+00>
12; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[P0]] to <2 x double>*
13; CHECK-NEXT:    store <2 x double> [[TMP0]], <2 x double>* [[TMP1]], align 4
14; CHECK-NEXT:    ret void
15;
16entry:
17  %LD = load <2 x double>, <2 x double>* undef
18  %V0 = extractelement <2 x double> %LD, i32 0
19  %V1 = extractelement <2 x double> %LD, i32 1
20  %P0 = getelementptr inbounds double, double* %ptr, i64 0
21  %P1 = getelementptr inbounds double, double* %ptr, i64 1
22  %A0 = fadd double %V0, 0.0
23  %A1 = fadd double %V1, 1.1
24  store double %A0, double* %P0, align 4
25  store double %A1, double* %P1, align 4
26  ret void
27}
28
29define void @fextr1(double* %ptr) {
30; CHECK-LABEL: @fextr1(
31; CHECK-NEXT:  entry:
32; CHECK-NEXT:    [[LD:%.*]] = load <2 x double>, <2 x double>* undef
33; CHECK-NEXT:    [[REORDER_SHUFFLE:%.*]] = shufflevector <2 x double> [[LD]], <2 x double> undef, <2 x i32> <i32 1, i32 0>
34; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i64 0
35; CHECK-NEXT:    [[TMP0:%.*]] = fadd <2 x double> [[REORDER_SHUFFLE]], <double 3.400000e+00, double 1.200000e+00>
36; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double* [[P1]] to <2 x double>*
37; CHECK-NEXT:    store <2 x double> [[TMP0]], <2 x double>* [[TMP1]], align 4
38; CHECK-NEXT:    ret void
39;
40entry:
41  %LD = load <2 x double>, <2 x double>* undef
42  %V0 = extractelement <2 x double> %LD, i32 0
43  %V1 = extractelement <2 x double> %LD, i32 1
44  %P0 = getelementptr inbounds double, double* %ptr, i64 1  ; <--- incorrect order
45  %P1 = getelementptr inbounds double, double* %ptr, i64 0
46  %A0 = fadd double %V0, 1.2
47  %A1 = fadd double %V1, 3.4
48  store double %A0, double* %P0, align 4
49  store double %A1, double* %P1, align 4
50  ret void
51}
52
53define void @fextr2(double* %ptr) {
54; CHECK-LABEL: @fextr2(
55; CHECK-NEXT:  entry:
56; CHECK-NEXT:    [[LD:%.*]] = load <4 x double>, <4 x double>* undef
57; CHECK-NEXT:    [[V0:%.*]] = extractelement <4 x double> [[LD]], i32 0
58; CHECK-NEXT:    [[V1:%.*]] = extractelement <4 x double> [[LD]], i32 1
59; CHECK-NEXT:    [[P0:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i64 0
60; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> undef, double [[V0]], i32 0
61; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[V1]], i32 1
62; CHECK-NEXT:    [[TMP2:%.*]] = fadd <2 x double> [[TMP1]], <double 5.500000e+00, double 6.600000e+00>
63; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[P0]] to <2 x double>*
64; CHECK-NEXT:    store <2 x double> [[TMP2]], <2 x double>* [[TMP3]], align 4
65; CHECK-NEXT:    ret void
66;
67entry:
68  %LD = load <4 x double>, <4 x double>* undef
69  %V0 = extractelement <4 x double> %LD, i32 0  ; <--- invalid size.
70  %V1 = extractelement <4 x double> %LD, i32 1
71  %P0 = getelementptr inbounds double, double* %ptr, i64 0
72  %P1 = getelementptr inbounds double, double* %ptr, i64 1
73  %A0 = fadd double %V0, 5.5
74  %A1 = fadd double %V1, 6.6
75  store double %A0, double* %P0, align 4
76  store double %A1, double* %P1, align 4
77  ret void
78}
79
80