1; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s -check-prefix=X32
2; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s -check-prefix=X64
3; Control Flow Guard is currently only available on Windows
4
5; Test that Control Flow Guard checks are correctly added when required.
6
7
8declare i32 @target_func()
9
10
11; Test that Control Flow Guard checks are not added on calls with the "guard_nocf" attribute.
12define i32 @func_guard_nocf() {
13entry:
14  %func_ptr = alloca i32 ()*, align 8
15  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
16  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
17  %1 = call i32 %0() #0
18  ret i32 %1
19
20  ; X32-LABEL: func_guard_nocf
21  ; X32: 	     movl  $_target_func, %eax
22  ; X32-NOT: __guard_check_icall_fptr
23	; X32: 	     calll *%eax
24
25  ; X64-LABEL: func_guard_nocf
26  ; X64:       leaq	target_func(%rip), %rax
27  ; X64-NOT: __guard_dispatch_icall_fptr
28  ; X64:       callq	*%rax
29}
30attributes #0 = { "guard_nocf" }
31
32
33; Test that Control Flow Guard checks are added even at -O0.
34; FIXME Ideally these checks should be added as a single call instruction, as in the optimized case.
35define i32 @func_optnone_cf() #1 {
36entry:
37  %func_ptr = alloca i32 ()*, align 8
38  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
39  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
40  %1 = call i32 %0()
41  ret i32 %1
42
43  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
44  ; X32-LABEL: func_optnone_cf
45	; X32: 	     leal  _target_func, %eax
46	; X32: 	     movl  %eax, (%esp)
47	; X32: 	     movl  (%esp), %ecx
48	; X32: 	     movl ___guard_check_icall_fptr, %eax
49	; X32: 	     calll *%eax
50	; X32-NEXT:  calll *%ecx
51
52  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
53  ; X64-LABEL: func_optnone_cf
54  ; X64:       leaq	target_func(%rip), %rax
55  ; X64:       movq __guard_dispatch_icall_fptr(%rip), %rcx
56  ; X64:       callq *%rcx
57  ; X64-NOT:   callq
58}
59attributes #1 = { noinline optnone }
60
61
62; Test that Control Flow Guard checks are correctly added in optimized code (common case).
63define i32 @func_cf() {
64entry:
65  %func_ptr = alloca i32 ()*, align 8
66  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
67  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
68  %1 = call i32 %0()
69  ret i32 %1
70
71  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
72  ; X32-LABEL: func_cf
73  ; X32: 	     movl  $_target_func, %esi
74	; X32: 	     movl  $_target_func, %ecx
75	; X32: 	     calll *___guard_check_icall_fptr
76	; X32-NEXT:  calll *%esi
77
78  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
79  ; X64-LABEL: func_cf
80  ; X64:       leaq	target_func(%rip), %rax
81  ; X64:       callq *__guard_dispatch_icall_fptr(%rip)
82  ; X64-NOT:   callq
83}
84
85
86; Test that Control Flow Guard checks are correctly added on invoke instructions.
87define i32 @func_cf_invoke() personality i8* bitcast (void ()* @h to i8*) {
88entry:
89  %0 = alloca i32, align 4
90  %func_ptr = alloca i32 ()*, align 8
91  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
92  %1 = load i32 ()*, i32 ()** %func_ptr, align 8
93  %2 = invoke i32 %1()
94          to label %invoke.cont unwind label %lpad
95invoke.cont:                                      ; preds = %entry
96  ret i32 %2
97
98lpad:                                             ; preds = %entry
99  %tmp = landingpad { i8*, i32 }
100          catch i8* null
101  ret i32 -1
102
103  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
104  ; X32-LABEL: func_cf_invoke
105  ; X32: 	     movl  $_target_func, %esi
106	; X32: 	     movl  $_target_func, %ecx
107	; X32: 	     calll *___guard_check_icall_fptr
108	; X32-NEXT:  calll *%esi
109  ; X32:       # %invoke.cont
110  ; X32:       # %lpad
111
112  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
113  ; X64-LABEL: func_cf_invoke
114  ; X64:       leaq	target_func(%rip), %rax
115  ; X64:       callq *__guard_dispatch_icall_fptr(%rip)
116  ; X64-NOT:   callq
117  ; X64:       # %invoke.cont
118  ; X64:       # %lpad
119}
120
121declare void @h()
122
123
124; Test that Control Flow Guard preserves floating point arguments.
125declare double @target_func_doubles(double, double, double, double)
126
127define double @func_cf_doubles() {
128entry:
129  %func_ptr = alloca double (double, double, double, double)*, align 8
130  store double (double, double, double, double)* @target_func_doubles, double (double, double, double, double)** %func_ptr, align 8
131  %0 = load double (double, double, double, double)*, double (double, double, double, double)** %func_ptr, align 8
132  %1 = call double %0(double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00)
133  ret double %1
134
135  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
136  ; X32-LABEL: func_cf_doubles
137  ; X32: 	     movl  $_target_func_doubles, %esi
138	; X32: 	     movl  $_target_func_doubles, %ecx
139	; X32: 	     calll *___guard_check_icall_fptr
140	; X32:       calll *%esi
141
142
143  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
144  ; X64-LABEL: func_cf_doubles
145  ; X64:       leaq	target_func_doubles(%rip), %rax
146  ; X64:       movsd __real@3ff0000000000000(%rip), %xmm0
147  ; X64:       movsd __real@4000000000000000(%rip), %xmm1
148  ; X64:       movsd __real@4008000000000000(%rip), %xmm2
149  ; X64:       movsd __real@4010000000000000(%rip), %xmm3
150  ; X64:       callq *__guard_dispatch_icall_fptr(%rip)
151  ; X64-NOT:   callq
152}
153
154
155; Test that Control Flow Guard checks are correctly added for tail calls.
156define i32 @func_cf_tail() {
157entry:
158  %func_ptr = alloca i32 ()*, align 8
159  store i32 ()* @target_func, i32 ()** %func_ptr, align 8
160  %0 = load i32 ()*, i32 ()** %func_ptr, align 8
161  %1 = musttail call i32 %0()
162  ret i32 %1
163
164  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
165  ; X32-LABEL: func_cf_tail
166	; X32: 	     movl  $_target_func, %ecx
167	; X32: 	     calll *___guard_check_icall_fptr
168  ; X32:       movl $_target_func, %eax
169	; X32:       jmpl	*%eax                  # TAILCALL
170  ; X32-NOT:   calll
171
172  ; X64-LABEL: func_cf_tail
173  ; X64:       leaq	target_func(%rip), %rax
174  ; X64:       rex64 jmpq *__guard_dispatch_icall_fptr(%rip)         # TAILCALL
175  ; X64-NOT:   callq
176}
177
178%struct.Foo = type { i32 (%struct.Foo*)** }
179
180; Test that Control Flow Guard checks are correctly added for variadic musttail
181; calls. These are used for MS C++ ABI virtual member pointer thunks.
182; PR44049
183define i32 @vmptr_thunk(%struct.Foo* inreg %p) {
184entry:
185  %vptr.addr = getelementptr inbounds %struct.Foo, %struct.Foo* %p, i32 0, i32 0
186  %vptr = load i32 (%struct.Foo*)**, i32 (%struct.Foo*)*** %vptr.addr
187  %slot = getelementptr inbounds i32 (%struct.Foo*)*, i32 (%struct.Foo*)** %vptr, i32 1
188  %vmethod = load i32 (%struct.Foo*)*, i32 (%struct.Foo*)** %slot
189  %rv = musttail call i32 %vmethod(%struct.Foo* inreg %p)
190  ret i32 %rv
191
192  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
193  ; X32-LABEL: _vmptr_thunk:
194  ; X32:       movl %eax, %esi
195  ; X32:       movl (%eax), %eax
196  ; X32:       movl 4(%eax), %ecx
197  ; X32:       calll *___guard_check_icall_fptr
198  ; X32:       movl %esi, %eax
199  ; X32:       jmpl       *%ecx                  # TAILCALL
200  ; X32-NOT:   calll
201
202  ; Use NEXT here because we previously had an extra instruction in this sequence.
203  ; X64-LABEL: vmptr_thunk:
204  ; X64:            movq (%rcx), %rax
205  ; X64-NEXT:       movq 8(%rax), %rax
206  ; X64-NEXT:       movq __guard_dispatch_icall_fptr(%rip), %rdx
207  ; X64-NEXT:       rex64 jmpq *%rdx            # TAILCALL
208  ; X64-NOT:   callq
209}
210
211; Test that longjmp targets have public labels and are included in the .gljmp section.
212%struct._SETJMP_FLOAT128 = type { [2 x i64] }
213@buf1 = internal global [16 x %struct._SETJMP_FLOAT128] zeroinitializer, align 16
214
215define i32 @func_cf_setjmp() {
216  %1 = alloca i32, align 4
217  %2 = alloca i32, align 4
218  store i32 0, i32* %1, align 4
219  store i32 -1, i32* %2, align 4
220  %3 = call i8* @llvm.frameaddress(i32 0)
221  %4 = call i32 @_setjmp(i8* bitcast ([16 x %struct._SETJMP_FLOAT128]* @buf1 to i8*), i8* %3) #2
222
223  ; X32-LABEL: func_cf_setjmp
224  ; X32:       calll __setjmp
225  ; X32-NEXT:  $cfgsj_func_cf_setjmp0:
226
227  ; X64-LABEL: func_cf_setjmp
228  ; X64:       callq _setjmp
229  ; X64-NEXT:  $cfgsj_func_cf_setjmp0:
230
231  %5 = call i8* @llvm.frameaddress(i32 0)
232  %6 = call i32 @_setjmp(i8* bitcast ([16 x %struct._SETJMP_FLOAT128]* @buf1 to i8*), i8* %5) #2
233
234  ; X32:       calll __setjmp
235  ; X32-NEXT:  $cfgsj_func_cf_setjmp1:
236
237  ; X64:       callq _setjmp
238  ; X64-NEXT:  $cfgsj_func_cf_setjmp1:
239
240  store i32 1, i32* %2, align 4
241  %7 = load i32, i32* %2, align 4
242  ret i32 %7
243
244  ; X32:       .section .gljmp$y,"dr"
245  ; X32-NEXT:  .symidx $cfgsj_func_cf_setjmp0
246  ; X32-NEXT:  .symidx $cfgsj_func_cf_setjmp1
247
248  ; X64:       .section .gljmp$y,"dr"
249  ; X64-NEXT:  .symidx $cfgsj_func_cf_setjmp0
250  ; X64-NEXT:  .symidx $cfgsj_func_cf_setjmp1
251}
252
253declare i8* @llvm.frameaddress(i32)
254
255; Function Attrs: returns_twice
256declare dso_local i32 @_setjmp(i8*, i8*) #2
257
258attributes #2 = { returns_twice }
259
260
261!llvm.module.flags = !{!0}
262!0 = !{i32 2, !"cfguard", i32 2}
263