1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
5 
6 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
7 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
9 // expected-no-diagnostics
10 #ifndef HEADER
11 #define HEADER
12 
13 
14 struct S {
15   int a;
SS16   S() : a(0) {}
SS17   S(const S&) {}
operator =S18   S& operator=(const S&) {return *this;}
~SS19   ~S() {}
operator +(const S & a,const S & b)20   friend S operator+(const S&a, const S&b) {return a;}
21 };
22 
23 
main(int argc,char ** argv)24 int main(int argc, char **argv) {
25   int a;
26   float b;
27   S c[5];
28   short d[argc];
29 #pragma omp taskgroup task_reduction(+: a, b, argc)
30   {
31 #pragma omp taskgroup task_reduction(-:c, d)
32 #pragma omp parallel
33 #pragma omp master taskloop simd in_reduction(+:a) in_reduction(-:d)
34     for (int i = 0; i < 5; ++i)
35       a += d[a];
36   }
37   return 0;
38 }
39 
40 
41 
42 #endif
43 // CHECK1-LABEL: define {{[^@]+}}@main
44 // CHECK1-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
45 // CHECK1-NEXT:  entry:
46 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
47 // CHECK1-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
48 // CHECK1-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
49 // CHECK1-NEXT:    [[A:%.*]] = alloca i32, align 4
50 // CHECK1-NEXT:    [[B:%.*]] = alloca float, align 4
51 // CHECK1-NEXT:    [[C:%.*]] = alloca [5 x %struct.S], align 16
52 // CHECK1-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
53 // CHECK1-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
54 // CHECK1-NEXT:    [[DOTRD_INPUT_:%.*]] = alloca [3 x %struct.kmp_taskred_input_t], align 8
55 // CHECK1-NEXT:    [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
56 // CHECK1-NEXT:    [[DOTRD_INPUT_3:%.*]] = alloca [2 x %struct.kmp_taskred_input_t.0], align 8
57 // CHECK1-NEXT:    [[DOTTASK_RED_6:%.*]] = alloca i8*, align 8
58 // CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
59 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
60 // CHECK1-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
61 // CHECK1-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
62 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
63 // CHECK1-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
64 // CHECK1-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
65 // CHECK1:       arrayctor.loop:
66 // CHECK1-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
67 // CHECK1-NEXT:    call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
68 // CHECK1-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
69 // CHECK1-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
70 // CHECK1-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
71 // CHECK1:       arrayctor.cont:
72 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
73 // CHECK1-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
74 // CHECK1-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
75 // CHECK1-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
76 // CHECK1-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16
77 // CHECK1-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
78 // CHECK1-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
79 // CHECK1-NEXT:    [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
80 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
81 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i32* [[A]] to i8*
82 // CHECK1-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 8
83 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
84 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast i32* [[A]] to i8*
85 // CHECK1-NEXT:    store i8* [[TMP7]], i8** [[TMP6]], align 8
86 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
87 // CHECK1-NEXT:    store i64 4, i64* [[TMP8]], align 8
88 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
89 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP9]], align 8
90 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
91 // CHECK1-NEXT:    store i8* null, i8** [[TMP10]], align 8
92 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
93 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP11]], align 8
94 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
95 // CHECK1-NEXT:    [[TMP13:%.*]] = bitcast i32* [[TMP12]] to i8*
96 // CHECK1-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP13]], i8 0, i64 4, i1 false)
97 // CHECK1-NEXT:    [[DOTRD_INPUT_GEP_1:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
98 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 0
99 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast float* [[B]] to i8*
100 // CHECK1-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 8
101 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 1
102 // CHECK1-NEXT:    [[TMP17:%.*]] = bitcast float* [[B]] to i8*
103 // CHECK1-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
104 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 2
105 // CHECK1-NEXT:    store i64 4, i64* [[TMP18]], align 8
106 // CHECK1-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 3
107 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP19]], align 8
108 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 4
109 // CHECK1-NEXT:    store i8* null, i8** [[TMP20]], align 8
110 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 5
111 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP21]], align 8
112 // CHECK1-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 6
113 // CHECK1-NEXT:    [[TMP23:%.*]] = bitcast i32* [[TMP22]] to i8*
114 // CHECK1-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP23]], i8 0, i64 4, i1 false)
115 // CHECK1-NEXT:    [[DOTRD_INPUT_GEP_2:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 2
116 // CHECK1-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 0
117 // CHECK1-NEXT:    [[TMP25:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8*
118 // CHECK1-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
119 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 1
120 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8*
121 // CHECK1-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
122 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 2
123 // CHECK1-NEXT:    store i64 4, i64* [[TMP28]], align 8
124 // CHECK1-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 3
125 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..3 to i8*), i8** [[TMP29]], align 8
126 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 4
127 // CHECK1-NEXT:    store i8* null, i8** [[TMP30]], align 8
128 // CHECK1-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 5
129 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..4 to i8*), i8** [[TMP31]], align 8
130 // CHECK1-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 6
131 // CHECK1-NEXT:    [[TMP33:%.*]] = bitcast i32* [[TMP32]] to i8*
132 // CHECK1-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP33]], i8 0, i64 4, i1 false)
133 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
134 // CHECK1-NEXT:    [[TMP35:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 3, i8* [[TMP34]])
135 // CHECK1-NEXT:    store i8* [[TMP35]], i8** [[DOTTASK_RED_]], align 8
136 // CHECK1-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
137 // CHECK1-NEXT:    [[DOTRD_INPUT_GEP_4:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 0
138 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0:%.*]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 0
139 // CHECK1-NEXT:    [[TMP37:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8*
140 // CHECK1-NEXT:    store i8* [[TMP37]], i8** [[TMP36]], align 8
141 // CHECK1-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 1
142 // CHECK1-NEXT:    [[TMP39:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8*
143 // CHECK1-NEXT:    store i8* [[TMP39]], i8** [[TMP38]], align 8
144 // CHECK1-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
145 // CHECK1-NEXT:    store i64 20, i64* [[TMP40]], align 8
146 // CHECK1-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
147 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..5 to i8*), i8** [[TMP41]], align 8
148 // CHECK1-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 4
149 // CHECK1-NEXT:    store i8* bitcast (void (i8*)* @.red_fini. to i8*), i8** [[TMP42]], align 8
150 // CHECK1-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 5
151 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..6 to i8*), i8** [[TMP43]], align 8
152 // CHECK1-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 6
153 // CHECK1-NEXT:    [[TMP45:%.*]] = bitcast i32* [[TMP44]] to i8*
154 // CHECK1-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP45]], i8 0, i64 4, i1 false)
155 // CHECK1-NEXT:    [[DOTRD_INPUT_GEP_5:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 1
156 // CHECK1-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 0
157 // CHECK1-NEXT:    [[TMP47:%.*]] = bitcast i16* [[VLA]] to i8*
158 // CHECK1-NEXT:    store i8* [[TMP47]], i8** [[TMP46]], align 8
159 // CHECK1-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 1
160 // CHECK1-NEXT:    [[TMP49:%.*]] = bitcast i16* [[VLA]] to i8*
161 // CHECK1-NEXT:    store i8* [[TMP49]], i8** [[TMP48]], align 8
162 // CHECK1-NEXT:    [[TMP50:%.*]] = mul nuw i64 [[TMP2]], 2
163 // CHECK1-NEXT:    [[TMP51:%.*]] = udiv exact i64 [[TMP50]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
164 // CHECK1-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 2
165 // CHECK1-NEXT:    store i64 [[TMP50]], i64* [[TMP52]], align 8
166 // CHECK1-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 3
167 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..7 to i8*), i8** [[TMP53]], align 8
168 // CHECK1-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 4
169 // CHECK1-NEXT:    store i8* null, i8** [[TMP54]], align 8
170 // CHECK1-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 5
171 // CHECK1-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..8 to i8*), i8** [[TMP55]], align 8
172 // CHECK1-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 6
173 // CHECK1-NEXT:    store i32 1, i32* [[TMP56]], align 8
174 // CHECK1-NEXT:    [[TMP57:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]] to i8*
175 // CHECK1-NEXT:    [[TMP58:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 2, i8* [[TMP57]])
176 // CHECK1-NEXT:    store i8* [[TMP58]], i8** [[DOTTASK_RED_6]], align 8
177 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]], i64 [[TMP2]], i16* [[VLA]], i8** [[DOTTASK_RED_]], i8** [[DOTTASK_RED_6]])
178 // CHECK1-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
179 // CHECK1-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
180 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
181 // CHECK1-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
182 // CHECK1-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
183 // CHECK1-NEXT:    [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
184 // CHECK1-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5
185 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
186 // CHECK1:       arraydestroy.body:
187 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP60]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
188 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
189 // CHECK1-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
190 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
191 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
192 // CHECK1:       arraydestroy.done8:
193 // CHECK1-NEXT:    [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4
194 // CHECK1-NEXT:    ret i32 [[TMP61]]
195 //
196 //
197 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1Ev
198 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
199 // CHECK1-NEXT:  entry:
200 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
201 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
202 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
203 // CHECK1-NEXT:    call void @_ZN1SC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
204 // CHECK1-NEXT:    ret void
205 //
206 //
207 // CHECK1-LABEL: define {{[^@]+}}@.red_init.
208 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
209 // CHECK1-NEXT:  entry:
210 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
211 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
212 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
213 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
214 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
215 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
216 // CHECK1-NEXT:    store i32 0, i32* [[TMP3]], align 8
217 // CHECK1-NEXT:    ret void
218 //
219 //
220 // CHECK1-LABEL: define {{[^@]+}}@.red_comb.
221 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
222 // CHECK1-NEXT:  entry:
223 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
224 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
225 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
226 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
227 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
228 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
229 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
230 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i32*
231 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 8
232 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 8
233 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]]
234 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP3]], align 8
235 // CHECK1-NEXT:    ret void
236 //
237 //
238 // CHECK1-LABEL: define {{[^@]+}}@.red_init..1
239 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
240 // CHECK1-NEXT:  entry:
241 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
242 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
243 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
244 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
245 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
246 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to float*
247 // CHECK1-NEXT:    store float 0.000000e+00, float* [[TMP3]], align 8
248 // CHECK1-NEXT:    ret void
249 //
250 //
251 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..2
252 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
253 // CHECK1-NEXT:  entry:
254 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
255 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
256 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
257 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
258 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
259 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to float*
260 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
261 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to float*
262 // CHECK1-NEXT:    [[TMP6:%.*]] = load float, float* [[TMP3]], align 8
263 // CHECK1-NEXT:    [[TMP7:%.*]] = load float, float* [[TMP5]], align 8
264 // CHECK1-NEXT:    [[ADD:%.*]] = fadd float [[TMP6]], [[TMP7]]
265 // CHECK1-NEXT:    store float [[ADD]], float* [[TMP3]], align 8
266 // CHECK1-NEXT:    ret void
267 //
268 //
269 // CHECK1-LABEL: define {{[^@]+}}@.red_init..3
270 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
271 // CHECK1-NEXT:  entry:
272 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
273 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
274 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
275 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
276 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
277 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
278 // CHECK1-NEXT:    store i32 0, i32* [[TMP3]], align 8
279 // CHECK1-NEXT:    ret void
280 //
281 //
282 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..4
283 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
284 // CHECK1-NEXT:  entry:
285 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
286 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
287 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
288 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
289 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
290 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
291 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
292 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i32*
293 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 8
294 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 8
295 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]]
296 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[TMP3]], align 8
297 // CHECK1-NEXT:    ret void
298 //
299 //
300 // CHECK1-LABEL: define {{[^@]+}}@.red_init..5
301 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
302 // CHECK1-NEXT:  entry:
303 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
304 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
305 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
306 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
307 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
308 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [5 x %struct.S]*
309 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP3]], i32 0, i32 0
310 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
311 // CHECK1-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP4]]
312 // CHECK1-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
313 // CHECK1:       omp.arrayinit.body:
314 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
315 // CHECK1-NEXT:    call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]])
316 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
317 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
318 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
319 // CHECK1:       omp.arrayinit.done:
320 // CHECK1-NEXT:    ret void
321 //
322 //
323 // CHECK1-LABEL: define {{[^@]+}}@.red_fini.
324 // CHECK1-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5]] {
325 // CHECK1-NEXT:  entry:
326 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
327 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
328 // CHECK1-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
329 // CHECK1-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [5 x %struct.S]*
330 // CHECK1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP2]], i32 0, i32 0
331 // CHECK1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
332 // CHECK1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
333 // CHECK1:       arraydestroy.body:
334 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
335 // CHECK1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
336 // CHECK1-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
337 // CHECK1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
338 // CHECK1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
339 // CHECK1:       arraydestroy.done1:
340 // CHECK1-NEXT:    ret void
341 //
342 //
343 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD1Ev
344 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
345 // CHECK1-NEXT:  entry:
346 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
347 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
348 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
349 // CHECK1-NEXT:    call void @_ZN1SD2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
350 // CHECK1-NEXT:    ret void
351 //
352 //
353 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..6
354 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
355 // CHECK1-NEXT:  entry:
356 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
357 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
358 // CHECK1-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
359 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
360 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
361 // CHECK1-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
362 // CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.S*
363 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
364 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct.S*
365 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP3]], i64 5
366 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[TMP3]], [[TMP6]]
367 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
368 // CHECK1:       omp.arraycpy.body:
369 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
370 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
371 // CHECK1-NEXT:    call void @_ZplRK1SS1_(%struct.S* sret([[STRUCT_S]]) align 4 [[REF_TMP]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
372 // CHECK1-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SaSERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
373 // CHECK1-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3]]
374 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
375 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
376 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP6]]
377 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
378 // CHECK1:       omp.arraycpy.done2:
379 // CHECK1-NEXT:    ret void
380 //
381 //
382 // CHECK1-LABEL: define {{[^@]+}}@_ZplRK1SS1_
383 // CHECK1-SAME: (%struct.S* noalias sret([[STRUCT_S:%.*]]) align 4 [[AGG_RESULT:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[A:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR7:[0-9]+]] {
384 // CHECK1-NEXT:  entry:
385 // CHECK1-NEXT:    [[RESULT_PTR:%.*]] = alloca i8*, align 8
386 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca %struct.S*, align 8
387 // CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca %struct.S*, align 8
388 // CHECK1-NEXT:    [[TMP0:%.*]] = bitcast %struct.S* [[AGG_RESULT]] to i8*
389 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[RESULT_PTR]], align 8
390 // CHECK1-NEXT:    store %struct.S* [[A]], %struct.S** [[A_ADDR]], align 8
391 // CHECK1-NEXT:    store %struct.S* [[B]], %struct.S** [[B_ADDR]], align 8
392 // CHECK1-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 8
393 // CHECK1-NEXT:    call void @_ZN1SC1ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[AGG_RESULT]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]])
394 // CHECK1-NEXT:    ret void
395 //
396 //
397 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SaSERKS_
398 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR7]] align 2 {
399 // CHECK1-NEXT:  entry:
400 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
401 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
402 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
403 // CHECK1-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
404 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
405 // CHECK1-NEXT:    ret %struct.S* [[THIS1]]
406 //
407 //
408 // CHECK1-LABEL: define {{[^@]+}}@.red_init..7
409 // CHECK1-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
410 // CHECK1-NEXT:  entry:
411 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
412 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
413 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
414 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
415 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
416 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
417 // CHECK1-NEXT:    [[TMP4:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}})
418 // CHECK1-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i64*
419 // CHECK1-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8
420 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP3]] to i16*
421 // CHECK1-NEXT:    [[TMP8:%.*]] = getelementptr i16, i16* [[TMP7]], i64 [[TMP6]]
422 // CHECK1-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[TMP7]], [[TMP8]]
423 // CHECK1-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
424 // CHECK1:       omp.arrayinit.body:
425 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
426 // CHECK1-NEXT:    store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
427 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
428 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]]
429 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
430 // CHECK1:       omp.arrayinit.done:
431 // CHECK1-NEXT:    ret void
432 //
433 //
434 // CHECK1-LABEL: define {{[^@]+}}@.red_comb..8
435 // CHECK1-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
436 // CHECK1-NEXT:  entry:
437 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
438 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
439 // CHECK1-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
440 // CHECK1-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
441 // CHECK1-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
442 // CHECK1-NEXT:    [[TMP3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}})
443 // CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i64*
444 // CHECK1-NEXT:    [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
445 // CHECK1-NEXT:    [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8
446 // CHECK1-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16*
447 // CHECK1-NEXT:    [[TMP8:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
448 // CHECK1-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16*
449 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr i16, i16* [[TMP7]], i64 [[TMP5]]
450 // CHECK1-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP7]], [[TMP10]]
451 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
452 // CHECK1:       omp.arraycpy.body:
453 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
454 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP7]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
455 // CHECK1-NEXT:    [[TMP11:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
456 // CHECK1-NEXT:    [[CONV:%.*]] = sext i16 [[TMP11]] to i32
457 // CHECK1-NEXT:    [[TMP12:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
458 // CHECK1-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP12]] to i32
459 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
460 // CHECK1-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
461 // CHECK1-NEXT:    store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
462 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
463 // CHECK1-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
464 // CHECK1-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP10]]
465 // CHECK1-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
466 // CHECK1:       omp.arraycpy.done4:
467 // CHECK1-NEXT:    ret void
468 //
469 //
470 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
471 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[VLA:%.*]], i16* nonnull align 2 dereferenceable(2) [[D:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_1:%.*]]) #[[ATTR8:[0-9]+]] {
472 // CHECK1-NEXT:  entry:
473 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
474 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
475 // CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
476 // CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
477 // CHECK1-NEXT:    [[D_ADDR:%.*]] = alloca i16*, align 8
478 // CHECK1-NEXT:    [[DOTTASK_RED__ADDR:%.*]] = alloca i8**, align 8
479 // CHECK1-NEXT:    [[DOTTASK_RED__ADDR2:%.*]] = alloca i8**, align 8
480 // CHECK1-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
481 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
482 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
483 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
484 // CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
485 // CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
486 // CHECK1-NEXT:    store i16* [[D]], i16** [[D_ADDR]], align 8
487 // CHECK1-NEXT:    store i8** [[DOTTASK_RED_]], i8*** [[DOTTASK_RED__ADDR]], align 8
488 // CHECK1-NEXT:    store i8** [[DOTTASK_RED_1]], i8*** [[DOTTASK_RED__ADDR2]], align 8
489 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
490 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
491 // CHECK1-NEXT:    [[TMP2:%.*]] = load i16*, i16** [[D_ADDR]], align 8
492 // CHECK1-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR]], align 8
493 // CHECK1-NEXT:    [[TMP4:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR2]], align 8
494 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
495 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
496 // CHECK1-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
497 // CHECK1-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
498 // CHECK1-NEXT:    br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
499 // CHECK1:       omp_if.then:
500 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
501 // CHECK1-NEXT:    store i32* [[TMP0]], i32** [[TMP9]], align 8
502 // CHECK1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
503 // CHECK1-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
504 // CHECK1-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
505 // CHECK1-NEXT:    store i16* [[TMP2]], i16** [[TMP11]], align 8
506 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 3
507 // CHECK1-NEXT:    store i8** [[TMP3]], i8*** [[TMP12]], align 8
508 // CHECK1-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 4
509 // CHECK1-NEXT:    store i8** [[TMP4]], i8*** [[TMP13]], align 8
510 // CHECK1-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
511 // CHECK1-NEXT:    [[TMP14:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
512 // CHECK1-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to %struct.kmp_task_t_with_privates*
513 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 0
514 // CHECK1-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 0
515 // CHECK1-NEXT:    [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8
516 // CHECK1-NEXT:    [[TMP19:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
517 // CHECK1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP18]], i8* align 8 [[TMP19]], i64 40, i1 false)
518 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 1
519 // CHECK1-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 0
520 // CHECK1-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP3]], align 8
521 // CHECK1-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
522 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 1
523 // CHECK1-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP4]], align 8
524 // CHECK1-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
525 // CHECK1-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 5
526 // CHECK1-NEXT:    store i64 0, i64* [[TMP25]], align 8
527 // CHECK1-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 6
528 // CHECK1-NEXT:    store i64 4, i64* [[TMP26]], align 8
529 // CHECK1-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 7
530 // CHECK1-NEXT:    store i64 1, i64* [[TMP27]], align 8
531 // CHECK1-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 9
532 // CHECK1-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i8*
533 // CHECK1-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP29]], i8 0, i64 8, i1 false)
534 // CHECK1-NEXT:    [[TMP30:%.*]] = load i64, i64* [[TMP27]], align 8
535 // CHECK1-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i8* [[TMP14]], i32 1, i64* [[TMP25]], i64* [[TMP26]], i64 [[TMP30]], i32 1, i32 0, i64 0, i8* null)
536 // CHECK1-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
537 // CHECK1-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
538 // CHECK1-NEXT:    br label [[OMP_IF_END]]
539 // CHECK1:       omp_if.end:
540 // CHECK1-NEXT:    ret void
541 //
542 //
543 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_privates_map.
544 // CHECK1-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i8*** noalias [[TMP1:%.*]], i8*** noalias [[TMP2:%.*]]) #[[ATTR9:[0-9]+]] {
545 // CHECK1-NEXT:  entry:
546 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
547 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca i8***, align 8
548 // CHECK1-NEXT:    [[DOTADDR2:%.*]] = alloca i8***, align 8
549 // CHECK1-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
550 // CHECK1-NEXT:    store i8*** [[TMP1]], i8**** [[DOTADDR1]], align 8
551 // CHECK1-NEXT:    store i8*** [[TMP2]], i8**** [[DOTADDR2]], align 8
552 // CHECK1-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
553 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
554 // CHECK1-NEXT:    [[TMP5:%.*]] = load i8***, i8**** [[DOTADDR1]], align 8
555 // CHECK1-NEXT:    store i8** [[TMP4]], i8*** [[TMP5]], align 8
556 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
557 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8***, i8**** [[DOTADDR2]], align 8
558 // CHECK1-NEXT:    store i8** [[TMP6]], i8*** [[TMP7]], align 8
559 // CHECK1-NEXT:    ret void
560 //
561 //
562 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry.
563 // CHECK1-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5]] {
564 // CHECK1-NEXT:  entry:
565 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
566 // CHECK1-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
567 // CHECK1-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
568 // CHECK1-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
569 // CHECK1-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
570 // CHECK1-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
571 // CHECK1-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
572 // CHECK1-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
573 // CHECK1-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
574 // CHECK1-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
575 // CHECK1-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
576 // CHECK1-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
577 // CHECK1-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i8**, align 8
578 // CHECK1-NEXT:    [[I_I:%.*]] = alloca i32, align 4
579 // CHECK1-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
580 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
581 // CHECK1-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
582 // CHECK1-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
583 // CHECK1-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
584 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
585 // CHECK1-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
586 // CHECK1-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
587 // CHECK1-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
588 // CHECK1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
589 // CHECK1-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
590 // CHECK1-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
591 // CHECK1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
592 // CHECK1-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
593 // CHECK1-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
594 // CHECK1-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
595 // CHECK1-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
596 // CHECK1-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
597 // CHECK1-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
598 // CHECK1-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
599 // CHECK1-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
600 // CHECK1-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
601 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
602 // CHECK1-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
603 // CHECK1-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
604 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
605 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
606 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
607 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
608 // CHECK1-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
609 // CHECK1-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
610 // CHECK1-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
611 // CHECK1-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
612 // CHECK1-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
613 // CHECK1-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
614 // CHECK1-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
615 // CHECK1-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
616 // CHECK1-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
617 // CHECK1-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
618 // CHECK1-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
619 // CHECK1-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
620 // CHECK1-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
621 // CHECK1-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 1
622 // CHECK1-NEXT:    [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8
623 // CHECK1-NEXT:    [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
624 // CHECK1-NEXT:    [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
625 // CHECK1-NEXT:    [[TMP27:%.*]] = bitcast void (i8*, ...)* [[TMP25]] to void (i8*, i8***, i8***)*
626 // CHECK1-NEXT:    call void [[TMP27]](i8* [[TMP26]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]]
627 // CHECK1-NEXT:    [[TMP28:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
628 // CHECK1-NEXT:    [[TMP29:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
629 // CHECK1-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 0
630 // CHECK1-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
631 // CHECK1-NEXT:    [[TMP32:%.*]] = load i8*, i8** [[TMP28]], align 8
632 // CHECK1-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
633 // CHECK1-NEXT:    [[TMP34:%.*]] = bitcast i32* [[TMP31]] to i8*
634 // CHECK1-NEXT:    [[TMP35:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP32]], i8* [[TMP34]]) #[[ATTR3]]
635 // CHECK1-NEXT:    [[CONV_I:%.*]] = bitcast i8* [[TMP35]] to i32*
636 // CHECK1-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 2
637 // CHECK1-NEXT:    [[TMP37:%.*]] = load i16*, i16** [[TMP36]], align 8
638 // CHECK1-NEXT:    [[TMP38:%.*]] = mul nuw i64 [[TMP24]], 2
639 // CHECK1-NEXT:    [[TMP39:%.*]] = udiv exact i64 [[TMP38]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
640 // CHECK1-NEXT:    [[TMP40:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]], i8* bitcast (i64* @{{reduction_size[.].+[.]}}) #[[ATTR3]]
641 // CHECK1-NEXT:    [[TMP41:%.*]] = bitcast i8* [[TMP40]] to i64*
642 // CHECK1-NEXT:    store i64 [[TMP39]], i64* [[TMP41]], align 8
643 // CHECK1-NEXT:    [[TMP42:%.*]] = load i8*, i8** [[TMP29]], align 8
644 // CHECK1-NEXT:    [[TMP43:%.*]] = bitcast i16* [[TMP37]] to i8*
645 // CHECK1-NEXT:    [[TMP44:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP42]], i8* [[TMP43]]) #[[ATTR3]]
646 // CHECK1-NEXT:    [[CONV2_I:%.*]] = bitcast i8* [[TMP44]] to i16*
647 // CHECK1-NEXT:    [[TMP45:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
648 // CHECK1-NEXT:    [[CONV3_I:%.*]] = trunc i64 [[TMP45]] to i32
649 // CHECK1-NEXT:    store i32 [[CONV3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
650 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
651 // CHECK1:       omp.inner.for.cond.i:
652 // CHECK1-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
653 // CHECK1-NEXT:    [[CONV4_I:%.*]] = sext i32 [[TMP46]] to i64
654 // CHECK1-NEXT:    [[TMP47:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group !15
655 // CHECK1-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP47]]
656 // CHECK1-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]]
657 // CHECK1:       omp.inner.for.body.i:
658 // CHECK1-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
659 // CHECK1-NEXT:    store i32 [[TMP48]], i32* [[I_I]], align 4, !noalias !14, !llvm.access.group !15
660 // CHECK1-NEXT:    [[TMP49:%.*]] = load i32, i32* [[CONV_I]], align 4, !llvm.access.group !15
661 // CHECK1-NEXT:    [[IDXPROM_I:%.*]] = sext i32 [[TMP49]] to i64
662 // CHECK1-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, i16* [[CONV2_I]], i64 [[IDXPROM_I]]
663 // CHECK1-NEXT:    [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX_I]], align 2, !llvm.access.group !15
664 // CHECK1-NEXT:    [[CONV5_I:%.*]] = sext i16 [[TMP50]] to i32
665 // CHECK1-NEXT:    [[TMP51:%.*]] = load i32, i32* [[CONV_I]], align 4, !llvm.access.group !15
666 // CHECK1-NEXT:    [[ADD6_I:%.*]] = add nsw i32 [[TMP51]], [[CONV5_I]]
667 // CHECK1-NEXT:    store i32 [[ADD6_I]], i32* [[CONV_I]], align 4, !llvm.access.group !15
668 // CHECK1-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
669 // CHECK1-NEXT:    [[ADD7_I:%.*]] = add nsw i32 [[TMP52]], 1
670 // CHECK1-NEXT:    store i32 [[ADD7_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
671 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]]
672 // CHECK1:       .omp_outlined..9.exit:
673 // CHECK1-NEXT:    ret i32 0
674 //
675 //
676 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2Ev
677 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
678 // CHECK1-NEXT:  entry:
679 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
680 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
681 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
682 // CHECK1-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
683 // CHECK1-NEXT:    store i32 0, i32* [[A]], align 4
684 // CHECK1-NEXT:    ret void
685 //
686 //
687 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SD2Ev
688 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
689 // CHECK1-NEXT:  entry:
690 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
691 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
692 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
693 // CHECK1-NEXT:    ret void
694 //
695 //
696 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC1ERKS_
697 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
698 // CHECK1-NEXT:  entry:
699 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
700 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
701 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
702 // CHECK1-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
703 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
704 // CHECK1-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[DOTADDR]], align 8
705 // CHECK1-NEXT:    call void @_ZN1SC2ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]])
706 // CHECK1-NEXT:    ret void
707 //
708 //
709 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SC2ERKS_
710 // CHECK1-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
711 // CHECK1-NEXT:  entry:
712 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
713 // CHECK1-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
714 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
715 // CHECK1-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
716 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
717 // CHECK1-NEXT:    ret void
718 //
719 //
720 // CHECK2-LABEL: define {{[^@]+}}@main
721 // CHECK2-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
722 // CHECK2-NEXT:  entry:
723 // CHECK2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
724 // CHECK2-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
725 // CHECK2-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
726 // CHECK2-NEXT:    [[A:%.*]] = alloca i32, align 4
727 // CHECK2-NEXT:    [[B:%.*]] = alloca float, align 4
728 // CHECK2-NEXT:    [[C:%.*]] = alloca [5 x %struct.S], align 16
729 // CHECK2-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
730 // CHECK2-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
731 // CHECK2-NEXT:    [[DOTRD_INPUT_:%.*]] = alloca [3 x %struct.kmp_taskred_input_t], align 8
732 // CHECK2-NEXT:    [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
733 // CHECK2-NEXT:    [[DOTRD_INPUT_3:%.*]] = alloca [2 x %struct.kmp_taskred_input_t.0], align 8
734 // CHECK2-NEXT:    [[DOTTASK_RED_6:%.*]] = alloca i8*, align 8
735 // CHECK2-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
736 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
737 // CHECK2-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
738 // CHECK2-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
739 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
740 // CHECK2-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
741 // CHECK2-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
742 // CHECK2:       arrayctor.loop:
743 // CHECK2-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
744 // CHECK2-NEXT:    call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
745 // CHECK2-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
746 // CHECK2-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
747 // CHECK2-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
748 // CHECK2:       arrayctor.cont:
749 // CHECK2-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
750 // CHECK2-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
751 // CHECK2-NEXT:    [[TMP3:%.*]] = call i8* @llvm.stacksave()
752 // CHECK2-NEXT:    store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8
753 // CHECK2-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP2]], align 16
754 // CHECK2-NEXT:    store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8
755 // CHECK2-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
756 // CHECK2-NEXT:    [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0
757 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0
758 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i32* [[A]] to i8*
759 // CHECK2-NEXT:    store i8* [[TMP5]], i8** [[TMP4]], align 8
760 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1
761 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast i32* [[A]] to i8*
762 // CHECK2-NEXT:    store i8* [[TMP7]], i8** [[TMP6]], align 8
763 // CHECK2-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2
764 // CHECK2-NEXT:    store i64 4, i64* [[TMP8]], align 8
765 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3
766 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP9]], align 8
767 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4
768 // CHECK2-NEXT:    store i8* null, i8** [[TMP10]], align 8
769 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5
770 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP11]], align 8
771 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6
772 // CHECK2-NEXT:    [[TMP13:%.*]] = bitcast i32* [[TMP12]] to i8*
773 // CHECK2-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP13]], i8 0, i64 4, i1 false)
774 // CHECK2-NEXT:    [[DOTRD_INPUT_GEP_1:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 1
775 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 0
776 // CHECK2-NEXT:    [[TMP15:%.*]] = bitcast float* [[B]] to i8*
777 // CHECK2-NEXT:    store i8* [[TMP15]], i8** [[TMP14]], align 8
778 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 1
779 // CHECK2-NEXT:    [[TMP17:%.*]] = bitcast float* [[B]] to i8*
780 // CHECK2-NEXT:    store i8* [[TMP17]], i8** [[TMP16]], align 8
781 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 2
782 // CHECK2-NEXT:    store i64 4, i64* [[TMP18]], align 8
783 // CHECK2-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 3
784 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..1 to i8*), i8** [[TMP19]], align 8
785 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 4
786 // CHECK2-NEXT:    store i8* null, i8** [[TMP20]], align 8
787 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 5
788 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..2 to i8*), i8** [[TMP21]], align 8
789 // CHECK2-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_1]], i32 0, i32 6
790 // CHECK2-NEXT:    [[TMP23:%.*]] = bitcast i32* [[TMP22]] to i8*
791 // CHECK2-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP23]], i8 0, i64 4, i1 false)
792 // CHECK2-NEXT:    [[DOTRD_INPUT_GEP_2:%.*]] = getelementptr inbounds [3 x %struct.kmp_taskred_input_t], [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 2
793 // CHECK2-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 0
794 // CHECK2-NEXT:    [[TMP25:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8*
795 // CHECK2-NEXT:    store i8* [[TMP25]], i8** [[TMP24]], align 8
796 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 1
797 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast i32* [[ARGC_ADDR]] to i8*
798 // CHECK2-NEXT:    store i8* [[TMP27]], i8** [[TMP26]], align 8
799 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 2
800 // CHECK2-NEXT:    store i64 4, i64* [[TMP28]], align 8
801 // CHECK2-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 3
802 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..3 to i8*), i8** [[TMP29]], align 8
803 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 4
804 // CHECK2-NEXT:    store i8* null, i8** [[TMP30]], align 8
805 // CHECK2-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 5
806 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..4 to i8*), i8** [[TMP31]], align 8
807 // CHECK2-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_2]], i32 0, i32 6
808 // CHECK2-NEXT:    [[TMP33:%.*]] = bitcast i32* [[TMP32]] to i8*
809 // CHECK2-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP33]], i8 0, i64 4, i1 false)
810 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast [3 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8*
811 // CHECK2-NEXT:    [[TMP35:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 3, i8* [[TMP34]])
812 // CHECK2-NEXT:    store i8* [[TMP35]], i8** [[DOTTASK_RED_]], align 8
813 // CHECK2-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
814 // CHECK2-NEXT:    [[DOTRD_INPUT_GEP_4:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 0
815 // CHECK2-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0:%.*]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 0
816 // CHECK2-NEXT:    [[TMP37:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8*
817 // CHECK2-NEXT:    store i8* [[TMP37]], i8** [[TMP36]], align 8
818 // CHECK2-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 1
819 // CHECK2-NEXT:    [[TMP39:%.*]] = bitcast [5 x %struct.S]* [[C]] to i8*
820 // CHECK2-NEXT:    store i8* [[TMP39]], i8** [[TMP38]], align 8
821 // CHECK2-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 2
822 // CHECK2-NEXT:    store i64 20, i64* [[TMP40]], align 8
823 // CHECK2-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 3
824 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..5 to i8*), i8** [[TMP41]], align 8
825 // CHECK2-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 4
826 // CHECK2-NEXT:    store i8* bitcast (void (i8*)* @.red_fini. to i8*), i8** [[TMP42]], align 8
827 // CHECK2-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 5
828 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..6 to i8*), i8** [[TMP43]], align 8
829 // CHECK2-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_4]], i32 0, i32 6
830 // CHECK2-NEXT:    [[TMP45:%.*]] = bitcast i32* [[TMP44]] to i8*
831 // CHECK2-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP45]], i8 0, i64 4, i1 false)
832 // CHECK2-NEXT:    [[DOTRD_INPUT_GEP_5:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]], i64 0, i64 1
833 // CHECK2-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 0
834 // CHECK2-NEXT:    [[TMP47:%.*]] = bitcast i16* [[VLA]] to i8*
835 // CHECK2-NEXT:    store i8* [[TMP47]], i8** [[TMP46]], align 8
836 // CHECK2-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 1
837 // CHECK2-NEXT:    [[TMP49:%.*]] = bitcast i16* [[VLA]] to i8*
838 // CHECK2-NEXT:    store i8* [[TMP49]], i8** [[TMP48]], align 8
839 // CHECK2-NEXT:    [[TMP50:%.*]] = mul nuw i64 [[TMP2]], 2
840 // CHECK2-NEXT:    [[TMP51:%.*]] = udiv exact i64 [[TMP50]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
841 // CHECK2-NEXT:    [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 2
842 // CHECK2-NEXT:    store i64 [[TMP50]], i64* [[TMP52]], align 8
843 // CHECK2-NEXT:    [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 3
844 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_init..7 to i8*), i8** [[TMP53]], align 8
845 // CHECK2-NEXT:    [[TMP54:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 4
846 // CHECK2-NEXT:    store i8* null, i8** [[TMP54]], align 8
847 // CHECK2-NEXT:    [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 5
848 // CHECK2-NEXT:    store i8* bitcast (void (i8*, i8*)* @.red_comb..8 to i8*), i8** [[TMP55]], align 8
849 // CHECK2-NEXT:    [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], %struct.kmp_taskred_input_t.0* [[DOTRD_INPUT_GEP_5]], i32 0, i32 6
850 // CHECK2-NEXT:    store i32 1, i32* [[TMP56]], align 8
851 // CHECK2-NEXT:    [[TMP57:%.*]] = bitcast [2 x %struct.kmp_taskred_input_t.0]* [[DOTRD_INPUT_3]] to i8*
852 // CHECK2-NEXT:    [[TMP58:%.*]] = call i8* @__kmpc_taskred_init(i32 [[TMP0]], i32 2, i8* [[TMP57]])
853 // CHECK2-NEXT:    store i8* [[TMP58]], i8** [[DOTTASK_RED_6]], align 8
854 // CHECK2-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i16*, i8**, i8**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[A]], i64 [[TMP2]], i16* [[VLA]], i8** [[DOTTASK_RED_]], i8** [[DOTTASK_RED_6]])
855 // CHECK2-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
856 // CHECK2-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
857 // CHECK2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
858 // CHECK2-NEXT:    [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
859 // CHECK2-NEXT:    call void @llvm.stackrestore(i8* [[TMP59]])
860 // CHECK2-NEXT:    [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
861 // CHECK2-NEXT:    [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN7]], i64 5
862 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
863 // CHECK2:       arraydestroy.body:
864 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP60]], [[ARRAYCTOR_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
865 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
866 // CHECK2-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
867 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]]
868 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE8:%.*]], label [[ARRAYDESTROY_BODY]]
869 // CHECK2:       arraydestroy.done8:
870 // CHECK2-NEXT:    [[TMP61:%.*]] = load i32, i32* [[RETVAL]], align 4
871 // CHECK2-NEXT:    ret i32 [[TMP61]]
872 //
873 //
874 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SC1Ev
875 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
876 // CHECK2-NEXT:  entry:
877 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
878 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
879 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
880 // CHECK2-NEXT:    call void @_ZN1SC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
881 // CHECK2-NEXT:    ret void
882 //
883 //
884 // CHECK2-LABEL: define {{[^@]+}}@.red_init.
885 // CHECK2-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
886 // CHECK2-NEXT:  entry:
887 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
888 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
889 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
890 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
891 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
892 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
893 // CHECK2-NEXT:    store i32 0, i32* [[TMP3]], align 8
894 // CHECK2-NEXT:    ret void
895 //
896 //
897 // CHECK2-LABEL: define {{[^@]+}}@.red_comb.
898 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
899 // CHECK2-NEXT:  entry:
900 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
901 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
902 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
903 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
904 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
905 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
906 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
907 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i32*
908 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 8
909 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 8
910 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]]
911 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP3]], align 8
912 // CHECK2-NEXT:    ret void
913 //
914 //
915 // CHECK2-LABEL: define {{[^@]+}}@.red_init..1
916 // CHECK2-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
917 // CHECK2-NEXT:  entry:
918 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
919 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
920 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
921 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
922 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
923 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to float*
924 // CHECK2-NEXT:    store float 0.000000e+00, float* [[TMP3]], align 8
925 // CHECK2-NEXT:    ret void
926 //
927 //
928 // CHECK2-LABEL: define {{[^@]+}}@.red_comb..2
929 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
930 // CHECK2-NEXT:  entry:
931 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
932 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
933 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
934 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
935 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
936 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to float*
937 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
938 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to float*
939 // CHECK2-NEXT:    [[TMP6:%.*]] = load float, float* [[TMP3]], align 8
940 // CHECK2-NEXT:    [[TMP7:%.*]] = load float, float* [[TMP5]], align 8
941 // CHECK2-NEXT:    [[ADD:%.*]] = fadd float [[TMP6]], [[TMP7]]
942 // CHECK2-NEXT:    store float [[ADD]], float* [[TMP3]], align 8
943 // CHECK2-NEXT:    ret void
944 //
945 //
946 // CHECK2-LABEL: define {{[^@]+}}@.red_init..3
947 // CHECK2-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
948 // CHECK2-NEXT:  entry:
949 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
950 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
951 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
952 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
953 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
954 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
955 // CHECK2-NEXT:    store i32 0, i32* [[TMP3]], align 8
956 // CHECK2-NEXT:    ret void
957 //
958 //
959 // CHECK2-LABEL: define {{[^@]+}}@.red_comb..4
960 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
961 // CHECK2-NEXT:  entry:
962 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
963 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
964 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
965 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
966 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
967 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to i32*
968 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
969 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i32*
970 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP3]], align 8
971 // CHECK2-NEXT:    [[TMP7:%.*]] = load i32, i32* [[TMP5]], align 8
972 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]]
973 // CHECK2-NEXT:    store i32 [[ADD]], i32* [[TMP3]], align 8
974 // CHECK2-NEXT:    ret void
975 //
976 //
977 // CHECK2-LABEL: define {{[^@]+}}@.red_init..5
978 // CHECK2-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
979 // CHECK2-NEXT:  entry:
980 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
981 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
982 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
983 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
984 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
985 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [5 x %struct.S]*
986 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP3]], i32 0, i32 0
987 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
988 // CHECK2-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP4]]
989 // CHECK2-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
990 // CHECK2:       omp.arrayinit.body:
991 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
992 // CHECK2-NEXT:    call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]])
993 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
994 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
995 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
996 // CHECK2:       omp.arrayinit.done:
997 // CHECK2-NEXT:    ret void
998 //
999 //
1000 // CHECK2-LABEL: define {{[^@]+}}@.red_fini.
1001 // CHECK2-SAME: (i8* [[TMP0:%.*]]) #[[ATTR5]] {
1002 // CHECK2-NEXT:  entry:
1003 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1004 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1005 // CHECK2-NEXT:    [[TMP1:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1006 // CHECK2-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to [5 x %struct.S]*
1007 // CHECK2-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[TMP2]], i32 0, i32 0
1008 // CHECK2-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
1009 // CHECK2-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1010 // CHECK2:       arraydestroy.body:
1011 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1012 // CHECK2-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1013 // CHECK2-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]]
1014 // CHECK2-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1015 // CHECK2-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
1016 // CHECK2:       arraydestroy.done1:
1017 // CHECK2-NEXT:    ret void
1018 //
1019 //
1020 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SD1Ev
1021 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1022 // CHECK2-NEXT:  entry:
1023 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1024 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1025 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1026 // CHECK2-NEXT:    call void @_ZN1SD2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
1027 // CHECK2-NEXT:    ret void
1028 //
1029 //
1030 // CHECK2-LABEL: define {{[^@]+}}@.red_comb..6
1031 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
1032 // CHECK2-NEXT:  entry:
1033 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1034 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1035 // CHECK2-NEXT:    [[REF_TMP:%.*]] = alloca [[STRUCT_S:%.*]], align 4
1036 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1037 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1038 // CHECK2-NEXT:    [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1039 // CHECK2-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP2]] to %struct.S*
1040 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1041 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct.S*
1042 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[TMP3]], i64 5
1043 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[TMP3]], [[TMP6]]
1044 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1045 // CHECK2:       omp.arraycpy.body:
1046 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1047 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1048 // CHECK2-NEXT:    call void @_ZplRK1SS1_(%struct.S* sret([[STRUCT_S]]) align 4 [[REF_TMP]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]])
1049 // CHECK2-NEXT:    [[CALL:%.*]] = call nonnull align 4 dereferenceable(4) %struct.S* @_ZN1SaSERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]])
1050 // CHECK2-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[REF_TMP]]) #[[ATTR3]]
1051 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1052 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1053 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP6]]
1054 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
1055 // CHECK2:       omp.arraycpy.done2:
1056 // CHECK2-NEXT:    ret void
1057 //
1058 //
1059 // CHECK2-LABEL: define {{[^@]+}}@_ZplRK1SS1_
1060 // CHECK2-SAME: (%struct.S* noalias sret([[STRUCT_S:%.*]]) align 4 [[AGG_RESULT:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[A:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[B:%.*]]) #[[ATTR7:[0-9]+]] {
1061 // CHECK2-NEXT:  entry:
1062 // CHECK2-NEXT:    [[RESULT_PTR:%.*]] = alloca i8*, align 8
1063 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca %struct.S*, align 8
1064 // CHECK2-NEXT:    [[B_ADDR:%.*]] = alloca %struct.S*, align 8
1065 // CHECK2-NEXT:    [[TMP0:%.*]] = bitcast %struct.S* [[AGG_RESULT]] to i8*
1066 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[RESULT_PTR]], align 8
1067 // CHECK2-NEXT:    store %struct.S* [[A]], %struct.S** [[A_ADDR]], align 8
1068 // CHECK2-NEXT:    store %struct.S* [[B]], %struct.S** [[B_ADDR]], align 8
1069 // CHECK2-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 8
1070 // CHECK2-NEXT:    call void @_ZN1SC1ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[AGG_RESULT]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]])
1071 // CHECK2-NEXT:    ret void
1072 //
1073 //
1074 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SaSERKS_
1075 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) #[[ATTR7]] align 2 {
1076 // CHECK2-NEXT:  entry:
1077 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1078 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
1079 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1080 // CHECK2-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
1081 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1082 // CHECK2-NEXT:    ret %struct.S* [[THIS1]]
1083 //
1084 //
1085 // CHECK2-LABEL: define {{[^@]+}}@.red_init..7
1086 // CHECK2-SAME: (i8* noalias [[TMP0:%.*]], i8* noalias [[TMP1:%.*]]) #[[ATTR5]] {
1087 // CHECK2-NEXT:  entry:
1088 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1089 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1090 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1091 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1092 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1093 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1094 // CHECK2-NEXT:    [[TMP4:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}})
1095 // CHECK2-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to i64*
1096 // CHECK2-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8
1097 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP3]] to i16*
1098 // CHECK2-NEXT:    [[TMP8:%.*]] = getelementptr i16, i16* [[TMP7]], i64 [[TMP6]]
1099 // CHECK2-NEXT:    [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i16* [[TMP7]], [[TMP8]]
1100 // CHECK2-NEXT:    br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
1101 // CHECK2:       omp.arrayinit.body:
1102 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
1103 // CHECK2-NEXT:    store i16 0, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
1104 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1105 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]]
1106 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
1107 // CHECK2:       omp.arrayinit.done:
1108 // CHECK2-NEXT:    ret void
1109 //
1110 //
1111 // CHECK2-LABEL: define {{[^@]+}}@.red_comb..8
1112 // CHECK2-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] {
1113 // CHECK2-NEXT:  entry:
1114 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i8*, align 8
1115 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8*, align 8
1116 // CHECK2-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
1117 // CHECK2-NEXT:    store i8* [[TMP0]], i8** [[DOTADDR]], align 8
1118 // CHECK2-NEXT:    store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
1119 // CHECK2-NEXT:    [[TMP3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* bitcast (i64* @{{reduction_size[.].+[.]}})
1120 // CHECK2-NEXT:    [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i64*
1121 // CHECK2-NEXT:    [[TMP5:%.*]] = load i64, i64* [[TMP4]], align 8
1122 // CHECK2-NEXT:    [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8
1123 // CHECK2-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16*
1124 // CHECK2-NEXT:    [[TMP8:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
1125 // CHECK2-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16*
1126 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr i16, i16* [[TMP7]], i64 [[TMP5]]
1127 // CHECK2-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i16* [[TMP7]], [[TMP10]]
1128 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
1129 // CHECK2:       omp.arraycpy.body:
1130 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i16* [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1131 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i16* [ [[TMP7]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
1132 // CHECK2-NEXT:    [[TMP11:%.*]] = load i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
1133 // CHECK2-NEXT:    [[CONV:%.*]] = sext i16 [[TMP11]] to i32
1134 // CHECK2-NEXT:    [[TMP12:%.*]] = load i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2
1135 // CHECK2-NEXT:    [[CONV2:%.*]] = sext i16 [[TMP12]] to i32
1136 // CHECK2-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
1137 // CHECK2-NEXT:    [[CONV3:%.*]] = trunc i32 [[ADD]] to i16
1138 // CHECK2-NEXT:    store i16 [[CONV3]], i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2
1139 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
1140 // CHECK2-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i16, i16* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
1141 // CHECK2-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i16* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP10]]
1142 // CHECK2-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]]
1143 // CHECK2:       omp.arraycpy.done4:
1144 // CHECK2-NEXT:    ret void
1145 //
1146 //
1147 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined.
1148 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]], i64 [[VLA:%.*]], i16* nonnull align 2 dereferenceable(2) [[D:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_:%.*]], i8** nonnull align 8 dereferenceable(8) [[DOTTASK_RED_1:%.*]]) #[[ATTR8:[0-9]+]] {
1149 // CHECK2-NEXT:  entry:
1150 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1151 // CHECK2-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1152 // CHECK2-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
1153 // CHECK2-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1154 // CHECK2-NEXT:    [[D_ADDR:%.*]] = alloca i16*, align 8
1155 // CHECK2-NEXT:    [[DOTTASK_RED__ADDR:%.*]] = alloca i8**, align 8
1156 // CHECK2-NEXT:    [[DOTTASK_RED__ADDR2:%.*]] = alloca i8**, align 8
1157 // CHECK2-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
1158 // CHECK2-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1159 // CHECK2-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1160 // CHECK2-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1161 // CHECK2-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
1162 // CHECK2-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1163 // CHECK2-NEXT:    store i16* [[D]], i16** [[D_ADDR]], align 8
1164 // CHECK2-NEXT:    store i8** [[DOTTASK_RED_]], i8*** [[DOTTASK_RED__ADDR]], align 8
1165 // CHECK2-NEXT:    store i8** [[DOTTASK_RED_1]], i8*** [[DOTTASK_RED__ADDR2]], align 8
1166 // CHECK2-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
1167 // CHECK2-NEXT:    [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1168 // CHECK2-NEXT:    [[TMP2:%.*]] = load i16*, i16** [[D_ADDR]], align 8
1169 // CHECK2-NEXT:    [[TMP3:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR]], align 8
1170 // CHECK2-NEXT:    [[TMP4:%.*]] = load i8**, i8*** [[DOTTASK_RED__ADDR2]], align 8
1171 // CHECK2-NEXT:    [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1172 // CHECK2-NEXT:    [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
1173 // CHECK2-NEXT:    [[TMP7:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
1174 // CHECK2-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0
1175 // CHECK2-NEXT:    br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1176 // CHECK2:       omp_if.then:
1177 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
1178 // CHECK2-NEXT:    store i32* [[TMP0]], i32** [[TMP9]], align 8
1179 // CHECK2-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
1180 // CHECK2-NEXT:    store i64 [[TMP1]], i64* [[TMP10]], align 8
1181 // CHECK2-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2
1182 // CHECK2-NEXT:    store i16* [[TMP2]], i16** [[TMP11]], align 8
1183 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 3
1184 // CHECK2-NEXT:    store i8** [[TMP3]], i8*** [[TMP12]], align 8
1185 // CHECK2-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 4
1186 // CHECK2-NEXT:    store i8** [[TMP4]], i8*** [[TMP13]], align 8
1187 // CHECK2-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
1188 // CHECK2-NEXT:    [[TMP14:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 1, i64 96, i64 40, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
1189 // CHECK2-NEXT:    [[TMP15:%.*]] = bitcast i8* [[TMP14]] to %struct.kmp_task_t_with_privates*
1190 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 0
1191 // CHECK2-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 0
1192 // CHECK2-NEXT:    [[TMP18:%.*]] = load i8*, i8** [[TMP17]], align 8
1193 // CHECK2-NEXT:    [[TMP19:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
1194 // CHECK2-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP18]], i8* align 8 [[TMP19]], i64 40, i1 false)
1195 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP15]], i32 0, i32 1
1196 // CHECK2-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 0
1197 // CHECK2-NEXT:    [[TMP22:%.*]] = load i8*, i8** [[TMP3]], align 8
1198 // CHECK2-NEXT:    store i8* [[TMP22]], i8** [[TMP21]], align 8
1199 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP20]], i32 0, i32 1
1200 // CHECK2-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[TMP4]], align 8
1201 // CHECK2-NEXT:    store i8* [[TMP24]], i8** [[TMP23]], align 8
1202 // CHECK2-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 5
1203 // CHECK2-NEXT:    store i64 0, i64* [[TMP25]], align 8
1204 // CHECK2-NEXT:    [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 6
1205 // CHECK2-NEXT:    store i64 4, i64* [[TMP26]], align 8
1206 // CHECK2-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 7
1207 // CHECK2-NEXT:    store i64 1, i64* [[TMP27]], align 8
1208 // CHECK2-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP16]], i32 0, i32 9
1209 // CHECK2-NEXT:    [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i8*
1210 // CHECK2-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP29]], i8 0, i64 8, i1 false)
1211 // CHECK2-NEXT:    [[TMP30:%.*]] = load i64, i64* [[TMP27]], align 8
1212 // CHECK2-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i8* [[TMP14]], i32 1, i64* [[TMP25]], i64* [[TMP26]], i64 [[TMP30]], i32 1, i32 0, i64 0, i8* null)
1213 // CHECK2-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
1214 // CHECK2-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]])
1215 // CHECK2-NEXT:    br label [[OMP_IF_END]]
1216 // CHECK2:       omp_if.end:
1217 // CHECK2-NEXT:    ret void
1218 //
1219 //
1220 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1221 // CHECK2-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i8*** noalias [[TMP1:%.*]], i8*** noalias [[TMP2:%.*]]) #[[ATTR9:[0-9]+]] {
1222 // CHECK2-NEXT:  entry:
1223 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
1224 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca i8***, align 8
1225 // CHECK2-NEXT:    [[DOTADDR2:%.*]] = alloca i8***, align 8
1226 // CHECK2-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
1227 // CHECK2-NEXT:    store i8*** [[TMP1]], i8**** [[DOTADDR1]], align 8
1228 // CHECK2-NEXT:    store i8*** [[TMP2]], i8**** [[DOTADDR2]], align 8
1229 // CHECK2-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
1230 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
1231 // CHECK2-NEXT:    [[TMP5:%.*]] = load i8***, i8**** [[DOTADDR1]], align 8
1232 // CHECK2-NEXT:    store i8** [[TMP4]], i8*** [[TMP5]], align 8
1233 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
1234 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8***, i8**** [[DOTADDR2]], align 8
1235 // CHECK2-NEXT:    store i8** [[TMP6]], i8*** [[TMP7]], align 8
1236 // CHECK2-NEXT:    ret void
1237 //
1238 //
1239 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_entry.
1240 // CHECK2-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR5]] {
1241 // CHECK2-NEXT:  entry:
1242 // CHECK2-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1243 // CHECK2-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1244 // CHECK2-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1245 // CHECK2-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1246 // CHECK2-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1247 // CHECK2-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
1248 // CHECK2-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
1249 // CHECK2-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
1250 // CHECK2-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
1251 // CHECK2-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
1252 // CHECK2-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
1253 // CHECK2-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i8**, align 8
1254 // CHECK2-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i8**, align 8
1255 // CHECK2-NEXT:    [[I_I:%.*]] = alloca i32, align 4
1256 // CHECK2-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
1257 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
1258 // CHECK2-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
1259 // CHECK2-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1260 // CHECK2-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1261 // CHECK2-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1262 // CHECK2-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1263 // CHECK2-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1264 // CHECK2-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1265 // CHECK2-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1266 // CHECK2-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1267 // CHECK2-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1268 // CHECK2-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
1269 // CHECK2-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
1270 // CHECK2-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1271 // CHECK2-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
1272 // CHECK2-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
1273 // CHECK2-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
1274 // CHECK2-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
1275 // CHECK2-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
1276 // CHECK2-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
1277 // CHECK2-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
1278 // CHECK2-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
1279 // CHECK2-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
1280 // CHECK2-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
1281 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
1282 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
1283 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1284 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
1285 // CHECK2-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
1286 // CHECK2-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1287 // CHECK2-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
1288 // CHECK2-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1289 // CHECK2-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1290 // CHECK2-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
1291 // CHECK2-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1292 // CHECK2-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1293 // CHECK2-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
1294 // CHECK2-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
1295 // CHECK2-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
1296 // CHECK2-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1297 // CHECK2-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1298 // CHECK2-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 1
1299 // CHECK2-NEXT:    [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8
1300 // CHECK2-NEXT:    [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1301 // CHECK2-NEXT:    [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1302 // CHECK2-NEXT:    [[TMP27:%.*]] = bitcast void (i8*, ...)* [[TMP25]] to void (i8*, i8***, i8***)*
1303 // CHECK2-NEXT:    call void [[TMP27]](i8* [[TMP26]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]]
1304 // CHECK2-NEXT:    [[TMP28:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
1305 // CHECK2-NEXT:    [[TMP29:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
1306 // CHECK2-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 0
1307 // CHECK2-NEXT:    [[TMP31:%.*]] = load i32*, i32** [[TMP30]], align 8
1308 // CHECK2-NEXT:    [[TMP32:%.*]] = load i8*, i8** [[TMP28]], align 8
1309 // CHECK2-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1310 // CHECK2-NEXT:    [[TMP34:%.*]] = bitcast i32* [[TMP31]] to i8*
1311 // CHECK2-NEXT:    [[TMP35:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP32]], i8* [[TMP34]]) #[[ATTR3]]
1312 // CHECK2-NEXT:    [[CONV_I:%.*]] = bitcast i8* [[TMP35]] to i32*
1313 // CHECK2-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP22]], i32 0, i32 2
1314 // CHECK2-NEXT:    [[TMP37:%.*]] = load i16*, i16** [[TMP36]], align 8
1315 // CHECK2-NEXT:    [[TMP38:%.*]] = mul nuw i64 [[TMP24]], 2
1316 // CHECK2-NEXT:    [[TMP39:%.*]] = udiv exact i64 [[TMP38]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
1317 // CHECK2-NEXT:    [[TMP40:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]], i8* bitcast (i64* @{{reduction_size[.].+[.]}}) #[[ATTR3]]
1318 // CHECK2-NEXT:    [[TMP41:%.*]] = bitcast i8* [[TMP40]] to i64*
1319 // CHECK2-NEXT:    store i64 [[TMP39]], i64* [[TMP41]], align 8
1320 // CHECK2-NEXT:    [[TMP42:%.*]] = load i8*, i8** [[TMP29]], align 8
1321 // CHECK2-NEXT:    [[TMP43:%.*]] = bitcast i16* [[TMP37]] to i8*
1322 // CHECK2-NEXT:    [[TMP44:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP33]], i8* [[TMP42]], i8* [[TMP43]]) #[[ATTR3]]
1323 // CHECK2-NEXT:    [[CONV2_I:%.*]] = bitcast i8* [[TMP44]] to i16*
1324 // CHECK2-NEXT:    [[TMP45:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1325 // CHECK2-NEXT:    [[CONV3_I:%.*]] = trunc i64 [[TMP45]] to i32
1326 // CHECK2-NEXT:    store i32 [[CONV3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1327 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
1328 // CHECK2:       omp.inner.for.cond.i:
1329 // CHECK2-NEXT:    [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
1330 // CHECK2-NEXT:    [[CONV4_I:%.*]] = sext i32 [[TMP46]] to i64
1331 // CHECK2-NEXT:    [[TMP47:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group !15
1332 // CHECK2-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP47]]
1333 // CHECK2-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]]
1334 // CHECK2:       omp.inner.for.body.i:
1335 // CHECK2-NEXT:    [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
1336 // CHECK2-NEXT:    store i32 [[TMP48]], i32* [[I_I]], align 4, !noalias !14, !llvm.access.group !15
1337 // CHECK2-NEXT:    [[TMP49:%.*]] = load i32, i32* [[CONV_I]], align 4, !llvm.access.group !15
1338 // CHECK2-NEXT:    [[IDXPROM_I:%.*]] = sext i32 [[TMP49]] to i64
1339 // CHECK2-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, i16* [[CONV2_I]], i64 [[IDXPROM_I]]
1340 // CHECK2-NEXT:    [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX_I]], align 2, !llvm.access.group !15
1341 // CHECK2-NEXT:    [[CONV5_I:%.*]] = sext i16 [[TMP50]] to i32
1342 // CHECK2-NEXT:    [[TMP51:%.*]] = load i32, i32* [[CONV_I]], align 4, !llvm.access.group !15
1343 // CHECK2-NEXT:    [[ADD6_I:%.*]] = add nsw i32 [[TMP51]], [[CONV5_I]]
1344 // CHECK2-NEXT:    store i32 [[ADD6_I]], i32* [[CONV_I]], align 4, !llvm.access.group !15
1345 // CHECK2-NEXT:    [[TMP52:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
1346 // CHECK2-NEXT:    [[ADD7_I:%.*]] = add nsw i32 [[TMP52]], 1
1347 // CHECK2-NEXT:    store i32 [[ADD7_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group !15
1348 // CHECK2-NEXT:    br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]]
1349 // CHECK2:       .omp_outlined..9.exit:
1350 // CHECK2-NEXT:    ret i32 0
1351 //
1352 //
1353 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SC2Ev
1354 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1355 // CHECK2-NEXT:  entry:
1356 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1357 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1358 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1359 // CHECK2-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1360 // CHECK2-NEXT:    store i32 0, i32* [[A]], align 4
1361 // CHECK2-NEXT:    ret void
1362 //
1363 //
1364 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SD2Ev
1365 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1366 // CHECK2-NEXT:  entry:
1367 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1368 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1369 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1370 // CHECK2-NEXT:    ret void
1371 //
1372 //
1373 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SC1ERKS_
1374 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1375 // CHECK2-NEXT:  entry:
1376 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1377 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
1378 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1379 // CHECK2-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
1380 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1381 // CHECK2-NEXT:    [[TMP1:%.*]] = load %struct.S*, %struct.S** [[DOTADDR]], align 8
1382 // CHECK2-NEXT:    call void @_ZN1SC2ERKS_(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP1]])
1383 // CHECK2-NEXT:    ret void
1384 //
1385 //
1386 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SC2ERKS_
1387 // CHECK2-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[TMP0:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1388 // CHECK2-NEXT:  entry:
1389 // CHECK2-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1390 // CHECK2-NEXT:    [[DOTADDR:%.*]] = alloca %struct.S*, align 8
1391 // CHECK2-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1392 // CHECK2-NEXT:    store %struct.S* [[TMP0]], %struct.S** [[DOTADDR]], align 8
1393 // CHECK2-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1394 // CHECK2-NEXT:    ret void
1395 //
1396 //
1397 // CHECK3-LABEL: define {{[^@]+}}@main
1398 // CHECK3-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
1399 // CHECK3-NEXT:  entry:
1400 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1401 // CHECK3-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
1402 // CHECK3-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
1403 // CHECK3-NEXT:    [[A:%.*]] = alloca i32, align 4
1404 // CHECK3-NEXT:    [[B:%.*]] = alloca float, align 4
1405 // CHECK3-NEXT:    [[C:%.*]] = alloca [5 x %struct.S], align 16
1406 // CHECK3-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
1407 // CHECK3-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
1408 // CHECK3-NEXT:    [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
1409 // CHECK3-NEXT:    [[DOTTASK_RED_1:%.*]] = alloca i8*, align 8
1410 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1411 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1412 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1413 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1414 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
1415 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1416 // CHECK3-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
1417 // CHECK3-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
1418 // CHECK3-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
1419 // CHECK3-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
1420 // CHECK3-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1421 // CHECK3:       arrayctor.loop:
1422 // CHECK3-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1423 // CHECK3-NEXT:    call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1424 // CHECK3-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
1425 // CHECK3-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1426 // CHECK3-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1427 // CHECK3:       arrayctor.cont:
1428 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
1429 // CHECK3-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
1430 // CHECK3-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
1431 // CHECK3-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
1432 // CHECK3-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP1]], align 16
1433 // CHECK3-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
1434 // CHECK3-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1435 // CHECK3-NEXT:    store i64 4, i64* [[DOTOMP_UB]], align 8
1436 // CHECK3-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1437 // CHECK3-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP3]] to i32
1438 // CHECK3-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
1439 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1440 // CHECK3:       omp.inner.for.cond:
1441 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1442 // CHECK3-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP4]] to i64
1443 // CHECK3-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !2
1444 // CHECK3-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP5]]
1445 // CHECK3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1446 // CHECK3:       omp.inner.for.body:
1447 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1448 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
1449 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1450 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
1451 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !2
1452 // CHECK3-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
1453 // CHECK3-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[IDXPROM]]
1454 // CHECK3-NEXT:    [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX]], align 2, !llvm.access.group !2
1455 // CHECK3-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP8]] to i32
1456 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !2
1457 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[CONV3]]
1458 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4, !llvm.access.group !2
1459 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1460 // CHECK3:       omp.body.continue:
1461 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1462 // CHECK3:       omp.inner.for.inc:
1463 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1464 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1
1465 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1466 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
1467 // CHECK3:       omp.inner.for.end:
1468 // CHECK3-NEXT:    store i32 5, i32* [[I]], align 4
1469 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1470 // CHECK3-NEXT:    [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
1471 // CHECK3-NEXT:    call void @llvm.stackrestore(i8* [[TMP11]])
1472 // CHECK3-NEXT:    [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
1473 // CHECK3-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN6]], i64 5
1474 // CHECK3-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1475 // CHECK3:       arraydestroy.body:
1476 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1477 // CHECK3-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1478 // CHECK3-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
1479 // CHECK3-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
1480 // CHECK3-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
1481 // CHECK3:       arraydestroy.done7:
1482 // CHECK3-NEXT:    [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4
1483 // CHECK3-NEXT:    ret i32 [[TMP13]]
1484 //
1485 //
1486 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SC1Ev
1487 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
1488 // CHECK3-NEXT:  entry:
1489 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1490 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1491 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1492 // CHECK3-NEXT:    call void @_ZN1SC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1493 // CHECK3-NEXT:    ret void
1494 //
1495 //
1496 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SD1Ev
1497 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1498 // CHECK3-NEXT:  entry:
1499 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1500 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1501 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1502 // CHECK3-NEXT:    call void @_ZN1SD2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
1503 // CHECK3-NEXT:    ret void
1504 //
1505 //
1506 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SC2Ev
1507 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1508 // CHECK3-NEXT:  entry:
1509 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1510 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1511 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1512 // CHECK3-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1513 // CHECK3-NEXT:    store i32 0, i32* [[A]], align 4
1514 // CHECK3-NEXT:    ret void
1515 //
1516 //
1517 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SD2Ev
1518 // CHECK3-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1519 // CHECK3-NEXT:  entry:
1520 // CHECK3-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1521 // CHECK3-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1522 // CHECK3-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1523 // CHECK3-NEXT:    ret void
1524 //
1525 //
1526 // CHECK4-LABEL: define {{[^@]+}}@main
1527 // CHECK4-SAME: (i32 [[ARGC:%.*]], i8** [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] {
1528 // CHECK4-NEXT:  entry:
1529 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1530 // CHECK4-NEXT:    [[ARGC_ADDR:%.*]] = alloca i32, align 4
1531 // CHECK4-NEXT:    [[ARGV_ADDR:%.*]] = alloca i8**, align 8
1532 // CHECK4-NEXT:    [[A:%.*]] = alloca i32, align 4
1533 // CHECK4-NEXT:    [[B:%.*]] = alloca float, align 4
1534 // CHECK4-NEXT:    [[C:%.*]] = alloca [5 x %struct.S], align 16
1535 // CHECK4-NEXT:    [[SAVED_STACK:%.*]] = alloca i8*, align 8
1536 // CHECK4-NEXT:    [[__VLA_EXPR0:%.*]] = alloca i64, align 8
1537 // CHECK4-NEXT:    [[DOTTASK_RED_:%.*]] = alloca i8*, align 8
1538 // CHECK4-NEXT:    [[DOTTASK_RED_1:%.*]] = alloca i8*, align 8
1539 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1540 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1541 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1542 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1543 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
1544 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1545 // CHECK4-NEXT:    store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4
1546 // CHECK4-NEXT:    store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8
1547 // CHECK4-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
1548 // CHECK4-NEXT:    [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 5
1549 // CHECK4-NEXT:    br label [[ARRAYCTOR_LOOP:%.*]]
1550 // CHECK4:       arrayctor.loop:
1551 // CHECK4-NEXT:    [[ARRAYCTOR_CUR:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ]
1552 // CHECK4-NEXT:    call void @_ZN1SC1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]])
1553 // CHECK4-NEXT:    [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYCTOR_CUR]], i64 1
1554 // CHECK4-NEXT:    [[ARRAYCTOR_DONE:%.*]] = icmp eq %struct.S* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]]
1555 // CHECK4-NEXT:    br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]]
1556 // CHECK4:       arrayctor.cont:
1557 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4
1558 // CHECK4-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
1559 // CHECK4-NEXT:    [[TMP2:%.*]] = call i8* @llvm.stacksave()
1560 // CHECK4-NEXT:    store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8
1561 // CHECK4-NEXT:    [[VLA:%.*]] = alloca i16, i64 [[TMP1]], align 16
1562 // CHECK4-NEXT:    store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8
1563 // CHECK4-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1564 // CHECK4-NEXT:    store i64 4, i64* [[DOTOMP_UB]], align 8
1565 // CHECK4-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1566 // CHECK4-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP3]] to i32
1567 // CHECK4-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
1568 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1569 // CHECK4:       omp.inner.for.cond:
1570 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1571 // CHECK4-NEXT:    [[CONV2:%.*]] = sext i32 [[TMP4]] to i64
1572 // CHECK4-NEXT:    [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group !2
1573 // CHECK4-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP5]]
1574 // CHECK4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1575 // CHECK4:       omp.inner.for.body:
1576 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1577 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1
1578 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1579 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2
1580 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !2
1581 // CHECK4-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64
1582 // CHECK4-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[IDXPROM]]
1583 // CHECK4-NEXT:    [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX]], align 2, !llvm.access.group !2
1584 // CHECK4-NEXT:    [[CONV3:%.*]] = sext i16 [[TMP8]] to i32
1585 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group !2
1586 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[CONV3]]
1587 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[A]], align 4, !llvm.access.group !2
1588 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1589 // CHECK4:       omp.body.continue:
1590 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1591 // CHECK4:       omp.inner.for.inc:
1592 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1593 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1
1594 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2
1595 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
1596 // CHECK4:       omp.inner.for.end:
1597 // CHECK4-NEXT:    store i32 5, i32* [[I]], align 4
1598 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1599 // CHECK4-NEXT:    [[TMP11:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8
1600 // CHECK4-NEXT:    call void @llvm.stackrestore(i8* [[TMP11]])
1601 // CHECK4-NEXT:    [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [5 x %struct.S], [5 x %struct.S]* [[C]], i32 0, i32 0
1602 // CHECK4-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN6]], i64 5
1603 // CHECK4-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1604 // CHECK4:       arraydestroy.body:
1605 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1606 // CHECK4-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1607 // CHECK4-NEXT:    call void @_ZN1SD1Ev(%struct.S* nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3:[0-9]+]]
1608 // CHECK4-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]]
1609 // CHECK4-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE7:%.*]], label [[ARRAYDESTROY_BODY]]
1610 // CHECK4:       arraydestroy.done7:
1611 // CHECK4-NEXT:    [[TMP13:%.*]] = load i32, i32* [[RETVAL]], align 4
1612 // CHECK4-NEXT:    ret i32 [[TMP13]]
1613 //
1614 //
1615 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SC1Ev
1616 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
1617 // CHECK4-NEXT:  entry:
1618 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1619 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1620 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1621 // CHECK4-NEXT:    call void @_ZN1SC2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]])
1622 // CHECK4-NEXT:    ret void
1623 //
1624 //
1625 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SD1Ev
1626 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1627 // CHECK4-NEXT:  entry:
1628 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1629 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1630 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1631 // CHECK4-NEXT:    call void @_ZN1SD2Ev(%struct.S* nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]]
1632 // CHECK4-NEXT:    ret void
1633 //
1634 //
1635 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SC2Ev
1636 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1637 // CHECK4-NEXT:  entry:
1638 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1639 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1640 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1641 // CHECK4-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1642 // CHECK4-NEXT:    store i32 0, i32* [[A]], align 4
1643 // CHECK4-NEXT:    ret void
1644 //
1645 //
1646 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SD2Ev
1647 // CHECK4-SAME: (%struct.S* nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1648 // CHECK4-NEXT:  entry:
1649 // CHECK4-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1650 // CHECK4-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1651 // CHECK4-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1652 // CHECK4-NEXT:    ret void
1653 //
1654