1 // RUN: %clang_cc1 -triple wasm32-unknown-unknown -o - -emit-llvm %s | FileCheck %s
2 
3 #include <stdarg.h>
4 
test_i32(char * fmt,...)5 int test_i32(char *fmt, ...) {
6   va_list va;
7 
8   va_start(va, fmt);
9   int v = va_arg(va, int);
10   va_end(va);
11 
12   return v;
13 }
14 
15 // CHECK-LABEL: define i32 @test_i32(i8*{{.*}} %fmt, ...) {{.*}} {
16 // CHECK:   [[FMT_ADDR:%[^,=]+]] = alloca i8*, align 4
17 // CHECK:   [[VA:%[^,=]+]] = alloca i8*, align 4
18 // CHECK:   [[V:%[^,=]+]] = alloca i32, align 4
19 // CHECK:   store i8* %fmt, i8** [[FMT_ADDR]], align 4
20 // CHECK:   [[VA1:%[^,=]+]] = bitcast i8** [[VA]] to i8*
21 // CHECK:   call void @llvm.va_start(i8* [[VA1]])
22 // CHECK:   [[ARGP_CUR:%[^,=]+]] = load i8*, i8** [[VA]], align 4
23 // CHECK:   [[ARGP_NEXT:%[^,=]+]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 4
24 // CHECK:   store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
25 // CHECK:   [[R3:%[^,=]+]] = bitcast i8* [[ARGP_CUR]] to i32*
26 // CHECK:   [[R4:%[^,=]+]] = load i32, i32* [[R3]], align 4
27 // CHECK:   store i32 [[R4]], i32* [[V]], align 4
28 // CHECK:   [[VA2:%[^,=]+]] = bitcast i8** [[VA]] to i8*
29 // CHECK:   call void @llvm.va_end(i8* [[VA2]])
30 // CHECK:   [[R5:%[^,=]+]] = load i32, i32* [[V]], align 4
31 // CHECK:   ret i32 [[R5]]
32 // CHECK: }
33 
test_i64(char * fmt,...)34 long long test_i64(char *fmt, ...) {
35   va_list va;
36 
37   va_start(va, fmt);
38   long long v = va_arg(va, long long);
39   va_end(va);
40 
41   return v;
42 }
43 
44 // CHECK-LABEL: define i64 @test_i64(i8*{{.*}} %fmt, ...) {{.*}} {
45 // CHECK:   [[FMT_ADDR:%[^,=]+]] = alloca i8*, align 4
46 // CHECK:   [[VA:%[^,=]+]] = alloca i8*, align 4
47 // CHECK:   [[V:%[^,=]+]] = alloca i64, align 8
48 // CHECK:   store i8* %fmt, i8** [[FMT_ADDR]], align 4
49 // CHECK:   [[VA1:%[^,=]+]] = bitcast i8** [[VA]] to i8*
50 // CHECK:   call void @llvm.va_start(i8* [[VA1]])
51 // CHECK:   [[ARGP_CUR:%[^,=]+]] = load i8*, i8** [[VA]], align 4
52 // CHECK:   [[R0:%[^,=]+]] = ptrtoint i8* [[ARGP_CUR]] to i32
53 // CHECK:   [[R1:%[^,=]+]] = add i32 [[R0]], 7
54 // CHECK:   [[R2:%[^,=]+]] = and i32 [[R1]], -8
55 // CHECK:   [[ARGP_CUR_ALIGNED:%[^,=]+]] = inttoptr i32 [[R2]] to i8*
56 // CHECK:   [[ARGP_NEXT:%[^,=]+]] = getelementptr inbounds i8, i8* [[ARGP_CUR_ALIGNED]], i32 8
57 // CHECK:   store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
58 // CHECK:   [[R3:%[^,=]+]] = bitcast i8* [[ARGP_CUR_ALIGNED]] to i64*
59 // CHECK:   [[R4:%[^,=]+]] = load i64, i64* [[R3]], align 8
60 // CHECK:   store i64 [[R4]], i64* [[V]], align 8
61 // CHECK:   [[VA2:%[^,=]+]] = bitcast i8** [[VA]] to i8*
62 // CHECK:   call void @llvm.va_end(i8* [[VA2]])
63 // CHECK:   [[R5:%[^,=]+]] = load i64, i64* [[V]], align 8
64 // CHECK:   ret i64 [[R5]]
65 // CHECK: }
66 
67 struct S {
68   int x;
69   int y;
70   int z;
71 };
72 
test_struct(char * fmt,...)73 struct S test_struct(char *fmt, ...) {
74   va_list va;
75 
76   va_start(va, fmt);
77   struct S v = va_arg(va, struct S);
78   va_end(va);
79 
80   return v;
81 }
82 
83 // CHECK:      define void @test_struct([[STRUCT_S:%[^,=]+]]*{{.*}} noalias sret({{.*}}) align 4 [[AGG_RESULT:%.*]], i8*{{.*}} %fmt, ...) {{.*}} {
84 // CHECK:        [[FMT_ADDR:%[^,=]+]] = alloca i8*, align 4
85 // CHECK-NEXT:   [[VA:%[^,=]+]] = alloca i8*, align 4
86 // CHECK-NEXT:   store i8* %fmt, i8** [[FMT_ADDR]], align 4
87 // CHECK-NEXT:   [[VA1:%[^,=]+]] = bitcast i8** [[VA]] to i8*
88 // CHECK-NEXT:   call void @llvm.va_start(i8* [[VA1]])
89 // CHECK-NEXT:   [[ARGP_CUR:%[^,=]+]] = load i8*, i8** [[VA]], align 4
90 // CHECK-NEXT:   [[ARGP_NEXT:%[^,=]+]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 4
91 // CHECK-NEXT:   store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
92 // CHECK-NEXT:   [[R3:%[^,=]+]] = bitcast i8* [[ARGP_CUR]] to [[STRUCT_S]]**
93 // CHECK-NEXT:   [[R4:%[^,=]+]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[R3]], align 4
94 // CHECK-NEXT:   [[R5:%[^,=]+]] = bitcast [[STRUCT_S]]* [[AGG_RESULT]] to i8*
95 // CHECK-NEXT:   [[R6:%[^,=]+]] = bitcast [[STRUCT_S]]* [[R4]] to i8*
96 // CHECK-NEXT:   call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[R5]], i8* align 4 [[R6]], i32 12, i1 false)
97 // CHECK-NEXT:   [[VA2:%[^,=]+]] = bitcast i8** [[VA]] to i8*
98 // CHECK-NEXT:   call void @llvm.va_end(i8* [[VA2]])
99 // CHECK-NEXT:   ret void
100 // CHECK-NEXT: }
101 
102 struct Z {};
103 
test_empty_struct(char * fmt,...)104 struct S test_empty_struct(char *fmt, ...) {
105   va_list va;
106 
107   va_start(va, fmt);
108   struct Z u = va_arg(va, struct Z);
109   struct S v = va_arg(va, struct S);
110   va_end(va);
111 
112   return v;
113 }
114 
115 // CHECK:      define void @test_empty_struct([[STRUCT_S:%[^,=]+]]*{{.*}} noalias sret([[STRUCT_S]]) align 4 [[AGG_RESULT:%.*]], i8*{{.*}} %fmt, ...) {{.*}} {
116 // CHECK:        [[FMT_ADDR:%[^,=]+]] = alloca i8*, align 4
117 // CHECK-NEXT:   [[VA:%[^,=]+]] = alloca i8*, align 4
118 // CHECK-NEXT:   [[U:%[^,=]+]] = alloca [[STRUCT_Z:%[^,=]+]], align 1
119 // CHECK-NEXT:   store i8* %fmt, i8** [[FMT_ADDR]], align 4
120 // CHECK-NEXT:   [[VA1:%[^,=]+]] = bitcast i8** [[VA]] to i8*
121 // CHECK-NEXT:   call void @llvm.va_start(i8* [[VA1]])
122 // CHECK-NEXT:   [[ARGP_CUR:%[^,=]+]] = load i8*, i8** [[VA]], align 4
123 // CHECK-NEXT:   [[ARGP_NEXT:%[^,=]+]] = getelementptr inbounds i8, i8* [[ARGP_CUR]], i32 0
124 // CHECK-NEXT:   store i8* [[ARGP_NEXT]], i8** [[VA]], align 4
125 // CHECK-NEXT:   [[R0:%[^,=]+]] = bitcast i8* [[ARGP_CUR]] to [[STRUCT_Z]]*
126 // CHECK-NEXT:   [[R1:%[^,=]+]] = bitcast [[STRUCT_Z]]* [[U]] to i8*
127 // CHECK-NEXT:   [[R2:%[^,=]+]] = bitcast [[STRUCT_Z]]* [[R0]] to i8*
128 // CHECK-NEXT:   call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[R1]], i8* align 4 [[R2]], i32 0, i1 false)
129 // CHECK-NEXT:   [[ARGP_CUR2:%[^,=]+]] = load i8*, i8** [[VA]], align 4
130 // CHECK-NEXT:   [[ARGP_NEXT2:%[^,=]+]] = getelementptr inbounds i8, i8* [[ARGP_CUR2]], i32 4
131 // CHECK-NEXT:   store i8* [[ARGP_NEXT2]], i8** [[VA]], align 4
132 // CHECK-NEXT:   [[R3:%[^,=]+]] = bitcast i8* [[ARGP_CUR2]] to [[STRUCT_S]]**
133 // CHECK-NEXT:   [[R4:%[^,=]+]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[R3]], align 4
134 // CHECK-NEXT:   [[R5:%[^,=]+]] = bitcast [[STRUCT_S]]* [[AGG_RESULT]] to i8*
135 // CHECK-NEXT:   [[R6:%[^,=]+]] = bitcast [[STRUCT_S]]* [[R4]] to i8*
136 // CHECK-NEXT:   call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[R5]], i8* align 4 [[R6]], i32 12, i1 false)
137 // CHECK-NEXT:   [[VA2:%[^,=]+]] = bitcast i8** [[VA]] to i8*
138 // CHECK-NEXT:   call void @llvm.va_end(i8* [[VA2]])
139 // CHECK-NEXT:   ret void
140 // CHECK-NEXT: }
141