1 // RUN: %clang_cc1 -fno-rtti -emit-llvm %s -o - -cxx-abi microsoft -triple=i386-pc-win32 | FileCheck %s
2 // FIXME: Test x86_64 member pointers when codegen no longer asserts on records
3 // with virtual bases.
4 
5 struct B1 {
6   void foo();
7   int b;
8 };
9 struct B2 {
10   int b2;
11   void foo();
12 };
13 struct Single : B1 {
14   void foo();
15 };
16 struct Multiple : B1, B2 {
17   int m;
18   void foo();
19 };
20 struct Virtual : virtual B1 {
21   int v;
22   void foo();
23 };
24 
25 struct POD {
26   int a;
27   int b;
28 };
29 
30 struct Polymorphic {
31   virtual void myVirtual();
32   int a;
33   int b;
34 };
35 
36 // This class uses the virtual inheritance model, yet its vbptr offset is not 0.
37 // We still use zero for the null field offset, despite it being a valid field
38 // offset.
39 struct NonZeroVBPtr : POD, Virtual {
40   int n;
41   void foo();
42 };
43 
44 struct Unspecified;
45 struct UnspecSingle;
46 
47 // Check that we can lower the LLVM types and get the null initializers right.
48 int Single     ::*s_d_memptr;
49 int Polymorphic::*p_d_memptr;
50 int Multiple   ::*m_d_memptr;
51 int Virtual    ::*v_d_memptr;
52 int NonZeroVBPtr::*n_d_memptr;
53 int Unspecified::*u_d_memptr;
54 int UnspecSingle::*us_d_memptr;
55 // CHECK: @"\01?s_d_memptr@@3PQSingle@@HQ1@" = global i32 -1, align 4
56 // CHECK: @"\01?p_d_memptr@@3PQPolymorphic@@HQ1@" = global i32 0, align 4
57 // CHECK: @"\01?m_d_memptr@@3PQMultiple@@HQ1@" = global i32 -1, align 4
58 // CHECK: @"\01?v_d_memptr@@3PQVirtual@@HQ1@" = global { i32, i32 }
59 // CHECK:   { i32 0, i32 -1 }, align 4
60 // CHECK: @"\01?n_d_memptr@@3PQNonZeroVBPtr@@HQ1@" = global { i32, i32 }
61 // CHECK:   { i32 0, i32 -1 }, align 4
62 // CHECK: @"\01?u_d_memptr@@3PQUnspecified@@HQ1@" = global { i32, i32, i32 }
63 // CHECK:   { i32 0, i32 0, i32 -1 }, align 4
64 // CHECK: @"\01?us_d_memptr@@3PQUnspecSingle@@HQ1@" = global { i32, i32, i32 }
65 // CHECK:   { i32 0, i32 0, i32 -1 }, align 4
66 
67 void (Single  ::*s_f_memptr)();
68 void (Multiple::*m_f_memptr)();
69 void (Virtual ::*v_f_memptr)();
70 // CHECK: @"\01?s_f_memptr@@3P8Single@@AEXXZQ1@" = global i8* null, align 4
71 // CHECK: @"\01?m_f_memptr@@3P8Multiple@@AEXXZQ1@" = global { i8*, i32 } zeroinitializer, align 4
72 // CHECK: @"\01?v_f_memptr@@3P8Virtual@@AEXXZQ1@" = global { i8*, i32, i32 } zeroinitializer, align 4
73 
74 // We can define Unspecified after locking in the inheritance model.
75 struct Unspecified : Multiple, Virtual {
76   void foo();
77   int u;
78 };
79 
80 struct UnspecSingle {
81   void foo();
82 };
83 
84 // Test memptr emission in a constant expression.
85 namespace Const {
86 void (Single     ::*s_f_mp)() = &Single::foo;
87 void (Multiple   ::*m_f_mp)() = &B2::foo;
88 void (Virtual    ::*v_f_mp)() = &Virtual::foo;
89 void (Unspecified::*u_f_mp)() = &Unspecified::foo;
90 void (UnspecSingle::*us_f_mp)() = &UnspecSingle::foo;
91 // CHECK: @"\01?s_f_mp@Const@@3P8Single@@AEXXZQ2@" =
92 // CHECK:   global i8* bitcast ({{.*}} @"\01?foo@Single@@QAEXXZ" to i8*), align 4
93 // CHECK: @"\01?m_f_mp@Const@@3P8Multiple@@AEXXZQ2@" =
94 // CHECK:   global { i8*, i32 } { i8* bitcast ({{.*}} @"\01?foo@B2@@QAEXXZ" to i8*), i32 4 }, align 4
95 // CHECK: @"\01?v_f_mp@Const@@3P8Virtual@@AEXXZQ2@" =
96 // CHECK:   global { i8*, i32, i32 } { i8* bitcast ({{.*}} @"\01?foo@Virtual@@QAEXXZ" to i8*), i32 0, i32 0 }, align 4
97 // CHECK: @"\01?u_f_mp@Const@@3P8Unspecified@@AEXXZQ2@" =
98 // CHECK:   global { i8*, i32, i32, i32 } { i8* bitcast ({{.*}} @"\01?foo@Unspecified@@QAEXXZ" to i8*), i32 0, i32 12, i32 0 }, align 4
99 // CHECK: @"\01?us_f_mp@Const@@3P8UnspecSingle@@AEXXZQ2@" =
100 // CHECK:   global { i8*, i32, i32, i32 } { i8* bitcast ({{.*}} @"\01?foo@UnspecSingle@@QAEXXZ" to i8*), i32 0, i32 0, i32 0 }, align 4
101 }
102 
103 namespace CastParam {
104 // This exercises ConstExprEmitter instead of ValueDecl::evaluateValue.  The
105 // extra reinterpret_cast for the parameter type requires more careful folding.
106 // FIXME: Or does it?  If reinterpret_casts are no-ops, we should be able to
107 // strip them in evaluateValue() and just proceed as normal with an APValue.
108 struct A {
109   int a;
110   void foo(A *p);
111 };
112 struct B { int b; };
113 struct C : B, A { int c; };
114 
115 void (A::*ptr1)(void *) = (void (A::*)(void *)) &A::foo;
116 // CHECK: @"\01?ptr1@CastParam@@3P8A@1@AEXPAX@ZQ21@" =
117 // CHECK:   global i8* bitcast (void ({{.*}})* @"\01?foo@A@CastParam@@QAEXPAU12@@Z" to i8*), align 4
118 
119 // Try a reinterpret_cast followed by a memptr conversion.
120 void (C::*ptr2)(void *) = (void (C::*)(void *)) (void (A::*)(void *)) &A::foo;
121 // CHECK: @"\01?ptr2@CastParam@@3P8C@1@AEXPAX@ZQ21@" =
122 // CHECK:   global { i8*, i32 } { i8* bitcast (void ({{.*}})* @"\01?foo@A@CastParam@@QAEXPAU12@@Z" to i8*), i32 4 }, align 4
123 
124 void (C::*ptr3)(void *) = (void (C::*)(void *)) (void (A::*)(void *)) (void (A::*)(A *)) 0;
125 // CHECK: @"\01?ptr3@CastParam@@3P8C@1@AEXPAX@ZQ21@" =
126 // CHECK:   global { i8*, i32 } zeroinitializer, align 4
127 
128 struct D : C {
129   virtual void isPolymorphic();
130   int d;
131 };
132 
133 // Try a cast that changes the inheritance model.  Null for D is 0, but null for
134 // C is -1.  We need the cast to long in order to hit the non-APValue path.
135 int C::*ptr4 = (int C::*) (int D::*) (long D::*) 0;
136 // CHECK: @"\01?ptr4@CastParam@@3PQC@1@HQ21@" = global i32 -1, align 4
137 
138 // MSVC rejects this but we accept it.
139 int C::*ptr5 = (int C::*) (long D::*) 0;
140 // CHECK: @"\01?ptr5@CastParam@@3PQC@1@HQ21@" = global i32 -1, align 4
141 }
142 
143 struct UnspecWithVBPtr;
144 int UnspecWithVBPtr::*forceUnspecWithVBPtr;
145 struct UnspecWithVBPtr : B1, virtual B2 {
146   int u;
147   void foo();
148 };
149 
150 // Test emitting non-virtual member pointers in a non-constexpr setting.
151 void EmitNonVirtualMemberPointers() {
152   void (Single     ::*s_f_memptr)() = &Single::foo;
153   void (Multiple   ::*m_f_memptr)() = &Multiple::foo;
154   void (Virtual    ::*v_f_memptr)() = &Virtual::foo;
155   void (Unspecified::*u_f_memptr)() = &Unspecified::foo;
156   void (UnspecWithVBPtr::*u2_f_memptr)() = &UnspecWithVBPtr::foo;
157 // CHECK: define void @"\01?EmitNonVirtualMemberPointers@@YAXXZ"() {{.*}} {
158 // CHECK:   alloca i8*, align 4
159 // CHECK:   alloca { i8*, i32 }, align 4
160 // CHECK:   alloca { i8*, i32, i32 }, align 4
161 // CHECK:   alloca { i8*, i32, i32, i32 }, align 4
162 // CHECK:   store i8* bitcast (void (%{{.*}}*)* @"\01?foo@Single@@QAEXXZ" to i8*), i8** %{{.*}}, align 4
163 // CHECK:   store { i8*, i32 }
164 // CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Multiple@@QAEXXZ" to i8*), i32 0 },
165 // CHECK:     { i8*, i32 }* %{{.*}}, align 4
166 // CHECK:   store { i8*, i32, i32 }
167 // CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Virtual@@QAEXXZ" to i8*), i32 0, i32 0 },
168 // CHECK:     { i8*, i32, i32 }* %{{.*}}, align 4
169 // CHECK:   store { i8*, i32, i32, i32 }
170 // CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@Unspecified@@QAEXXZ" to i8*), i32 0, i32 12, i32 0 },
171 // CHECK:     { i8*, i32, i32, i32 }* %{{.*}}, align 4
172 // CHECK:   store { i8*, i32, i32, i32 }
173 // CHECK:     { i8* bitcast (void (%{{.*}}*)* @"\01?foo@UnspecWithVBPtr@@QAEXXZ" to i8*),
174 // CHECK:       i32 0, i32 4, i32 0 },
175 // CHECK:     { i8*, i32, i32, i32 }* %{{.*}}, align 4
176 // CHECK:   ret void
177 // CHECK: }
178 }
179 
180 void podMemPtrs() {
181   int POD::*memptr;
182   memptr = &POD::a;
183   memptr = &POD::b;
184   if (memptr)
185     memptr = 0;
186 // Check that member pointers use the right offsets and that null is -1.
187 // CHECK:      define void @"\01?podMemPtrs@@YAXXZ"() {{.*}} {
188 // CHECK:        %[[memptr:.*]] = alloca i32, align 4
189 // CHECK-NEXT:   store i32 0, i32* %[[memptr]], align 4
190 // CHECK-NEXT:   store i32 4, i32* %[[memptr]], align 4
191 // CHECK-NEXT:   %[[memptr_val:.*]] = load i32* %[[memptr]], align 4
192 // CHECK-NEXT:   %{{.*}} = icmp ne i32 %[[memptr_val]], -1
193 // CHECK-NEXT:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
194 // CHECK:        store i32 -1, i32* %[[memptr]], align 4
195 // CHECK:        ret void
196 // CHECK:      }
197 }
198 
199 void polymorphicMemPtrs() {
200   int Polymorphic::*memptr;
201   memptr = &Polymorphic::a;
202   memptr = &Polymorphic::b;
203   if (memptr)
204     memptr = 0;
205 // Member pointers for polymorphic classes include the vtable slot in their
206 // offset and use 0 to represent null.
207 // CHECK:      define void @"\01?polymorphicMemPtrs@@YAXXZ"() {{.*}} {
208 // CHECK:        %[[memptr:.*]] = alloca i32, align 4
209 // CHECK-NEXT:   store i32 4, i32* %[[memptr]], align 4
210 // CHECK-NEXT:   store i32 8, i32* %[[memptr]], align 4
211 // CHECK-NEXT:   %[[memptr_val:.*]] = load i32* %[[memptr]], align 4
212 // CHECK-NEXT:   %{{.*}} = icmp ne i32 %[[memptr_val]], 0
213 // CHECK-NEXT:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
214 // CHECK:        store i32 0, i32* %[[memptr]], align 4
215 // CHECK:        ret void
216 // CHECK:      }
217 }
218 
219 bool nullTestDataUnspecified(int Unspecified::*mp) {
220   return mp;
221 // CHECK: define zeroext i1 @"\01?nullTestDataUnspecified@@YA_NPQUnspecified@@H@Z"{{.*}} {
222 // CHECK:   %{{.*}} = load { i32, i32, i32 }* %{{.*}}, align 4
223 // CHECK:   store { i32, i32, i32 } {{.*}} align 4
224 // CHECK:   %[[mp:.*]] = load { i32, i32, i32 }* %{{.*}}, align 4
225 // CHECK:   %[[mp0:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 0
226 // CHECK:   %[[cmp0:.*]] = icmp ne i32 %[[mp0]], 0
227 // CHECK:   %[[mp1:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 1
228 // CHECK:   %[[cmp1:.*]] = icmp ne i32 %[[mp1]], 0
229 // CHECK:   %[[and0:.*]] = and i1 %[[cmp0]], %[[cmp1]]
230 // CHECK:   %[[mp2:.*]] = extractvalue { i32, i32, i32 } %[[mp]], 2
231 // CHECK:   %[[cmp2:.*]] = icmp ne i32 %[[mp2]], -1
232 // CHECK:   %[[and1:.*]] = and i1 %[[and0]], %[[cmp2]]
233 // CHECK:   ret i1 %[[and1]]
234 // CHECK: }
235 }
236 
237 bool nullTestFunctionUnspecified(void (Unspecified::*mp)()) {
238   return mp;
239 // CHECK: define zeroext i1 @"\01?nullTestFunctionUnspecified@@YA_NP8Unspecified@@AEXXZ@Z"{{.*}} {
240 // CHECK:   %{{.*}} = load { i8*, i32, i32, i32 }* %{{.*}}, align 4
241 // CHECK:   store { i8*, i32, i32, i32 } {{.*}} align 4
242 // CHECK:   %[[mp:.*]] = load { i8*, i32, i32, i32 }* %{{.*}}, align 4
243 // CHECK:   %[[mp0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[mp]], 0
244 // CHECK:   %[[cmp0:.*]] = icmp ne i8* %[[mp0]], null
245 // CHECK:   ret i1 %[[cmp0]]
246 // CHECK: }
247 }
248 
249 int loadDataMemberPointerVirtual(Virtual *o, int Virtual::*memptr) {
250   return o->*memptr;
251 // Test that we can unpack this aggregate member pointer and load the member
252 // data pointer.
253 // CHECK: define i32 @"\01?loadDataMemberPointerVirtual@@YAHPAUVirtual@@PQ1@H@Z"{{.*}} {
254 // CHECK:   %[[o:.*]] = load %{{.*}}** %{{.*}}, align 4
255 // CHECK:   %[[memptr:.*]] = load { i32, i32 }* %{{.*}}, align 4
256 // CHECK:   %[[memptr0:.*]] = extractvalue { i32, i32 } %[[memptr:.*]], 0
257 // CHECK:   %[[memptr1:.*]] = extractvalue { i32, i32 } %[[memptr:.*]], 1
258 // CHECK:   %[[v6:.*]] = bitcast %{{.*}}* %[[o]] to i8*
259 // CHECK:   %[[vbptr:.*]] = getelementptr inbounds i8* %[[v6]], i32 0
260 // CHECK:   %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i8**
261 // CHECK:   %[[vbtable:.*]] = load i8** %[[vbptr_a:.*]]
262 // CHECK:   %[[v7:.*]] = getelementptr inbounds i8* %[[vbtable]], i32 %[[memptr1]]
263 // CHECK:   %[[v8:.*]] = bitcast i8* %[[v7]] to i32*
264 // CHECK:   %[[vbase_offs:.*]] = load i32* %[[v8]]
265 // CHECK:   %[[v10:.*]] = getelementptr inbounds i8* %[[vbptr]], i32 %[[vbase_offs]]
266 // CHECK:   %[[offset:.*]] = getelementptr inbounds i8* %[[v10]], i32 %[[memptr0]]
267 // CHECK:   %[[v11:.*]] = bitcast i8* %[[offset]] to i32*
268 // CHECK:   %[[v12:.*]] = load i32* %[[v11]]
269 // CHECK:   ret i32 %[[v12]]
270 // CHECK: }
271 }
272 
273 int loadDataMemberPointerUnspecified(Unspecified *o, int Unspecified::*memptr) {
274   return o->*memptr;
275 // Test that we can unpack this aggregate member pointer and load the member
276 // data pointer.
277 // CHECK: define i32 @"\01?loadDataMemberPointerUnspecified@@YAHPAUUnspecified@@PQ1@H@Z"{{.*}} {
278 // CHECK:   %[[o:.*]] = load %{{.*}}** %{{.*}}, align 4
279 // CHECK:   %[[memptr:.*]] = load { i32, i32, i32 }* %{{.*}}, align 4
280 // CHECK:   %[[memptr0:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 0
281 // CHECK:   %[[memptr1:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 1
282 // CHECK:   %[[memptr2:.*]] = extractvalue { i32, i32, i32 } %[[memptr:.*]], 2
283 // CHECK:   %[[base:.*]] = bitcast %{{.*}}* %[[o]] to i8*
284 // CHECK:   %[[is_vbase:.*]] = icmp ne i32 %[[memptr2]], 0
285 // CHECK:   br i1 %[[is_vbase]], label %[[vadjust:.*]], label %[[skip:.*]]
286 //
287 // CHECK: [[vadjust]]
288 // CHECK:   %[[vbptr:.*]] = getelementptr inbounds i8* %[[base]], i32 %[[memptr1]]
289 // CHECK:   %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i8**
290 // CHECK:   %[[vbtable:.*]] = load i8** %[[vbptr_a:.*]]
291 // CHECK:   %[[v7:.*]] = getelementptr inbounds i8* %[[vbtable]], i32 %[[memptr2]]
292 // CHECK:   %[[v8:.*]] = bitcast i8* %[[v7]] to i32*
293 // CHECK:   %[[vbase_offs:.*]] = load i32* %[[v8]]
294 // CHECK:   %[[base_adj:.*]] = getelementptr inbounds i8* %[[vbptr]], i32 %[[vbase_offs]]
295 //
296 // CHECK: [[skip]]
297 // CHECK:   %[[new_base:.*]] = phi i8* [ %[[base]], %{{.*}} ], [ %[[base_adj]], %[[vadjust]] ]
298 // CHECK:   %[[offset:.*]] = getelementptr inbounds i8* %[[new_base]], i32 %[[memptr0]]
299 // CHECK:   %[[v11:.*]] = bitcast i8* %[[offset]] to i32*
300 // CHECK:   %[[v12:.*]] = load i32* %[[v11]]
301 // CHECK:   ret i32 %[[v12]]
302 // CHECK: }
303 }
304 
305 void callMemberPointerSingle(Single *o, void (Single::*memptr)()) {
306   (o->*memptr)();
307 // Just look for an indirect thiscall.
308 // CHECK: define void @"\01?callMemberPointerSingle@@{{.*}} {{.*}} {
309 // CHECK:   call x86_thiscallcc void %{{.*}}(%{{.*}} %{{.*}})
310 // CHECK:   ret void
311 // CHECK: }
312 }
313 
314 void callMemberPointerMultiple(Multiple *o, void (Multiple::*memptr)()) {
315   (o->*memptr)();
316 // CHECK: define void @"\01?callMemberPointerMultiple@@{{.*}} {
317 // CHECK:   %[[memptr0:.*]] = extractvalue { i8*, i32 } %{{.*}}, 0
318 // CHECK:   %[[memptr1:.*]] = extractvalue { i8*, i32 } %{{.*}}, 1
319 // CHECK:   %[[this_adjusted:.*]] = getelementptr inbounds i8* %{{.*}}, i32 %[[memptr1]]
320 // CHECK:   %[[this:.*]] = bitcast i8* %[[this_adjusted]] to {{.*}}
321 // CHECK:   %[[fptr:.*]] = bitcast i8* %[[memptr0]] to {{.*}}
322 // CHECK:   call x86_thiscallcc void %[[fptr]](%{{.*}} %[[this]])
323 // CHECK:   ret void
324 // CHECK: }
325 }
326 
327 void callMemberPointerVirtualBase(Virtual *o, void (Virtual::*memptr)()) {
328   (o->*memptr)();
329 // This shares a lot with virtual data member pointers.
330 // CHECK: define void @"\01?callMemberPointerVirtualBase@@{{.*}} {
331 // CHECK:   %[[memptr0:.*]] = extractvalue { i8*, i32, i32 } %{{.*}}, 0
332 // CHECK:   %[[memptr1:.*]] = extractvalue { i8*, i32, i32 } %{{.*}}, 1
333 // CHECK:   %[[memptr2:.*]] = extractvalue { i8*, i32, i32 } %{{.*}}, 2
334 // CHECK:   %[[vbptr:.*]] = getelementptr inbounds i8* %{{.*}}, i32 0
335 // CHECK:   %[[vbptr_a:.*]] = bitcast i8* %[[vbptr]] to i8**
336 // CHECK:   %[[vbtable:.*]] = load i8** %[[vbptr_a:.*]]
337 // CHECK:   %[[v7:.*]] = getelementptr inbounds i8* %[[vbtable]], i32 %[[memptr2]]
338 // CHECK:   %[[v8:.*]] = bitcast i8* %[[v7]] to i32*
339 // CHECK:   %[[vbase_offs:.*]] = load i32* %[[v8]]
340 // CHECK:   %[[v10:.*]] = getelementptr inbounds i8* %[[vbptr]], i32 %[[vbase_offs]]
341 // CHECK:   %[[this_adjusted:.*]] = getelementptr inbounds i8* %[[v10]], i32 %[[memptr1]]
342 // CHECK:   %[[fptr:.*]] = bitcast i8* %[[memptr0]] to void ({{.*}})
343 // CHECK:   %[[this:.*]] = bitcast i8* %[[this_adjusted]] to {{.*}}
344 // CHECK:   call x86_thiscallcc void %[[fptr]](%{{.*}} %[[this]])
345 // CHECK:   ret void
346 // CHECK: }
347 }
348 
349 bool compareSingleFunctionMemptr(void (Single::*l)(), void (Single::*r)()) {
350   return l == r;
351 // Should only be one comparison here.
352 // CHECK: define zeroext i1 @"\01?compareSingleFunctionMemptr@@YA_NP8Single@@AEXXZ0@Z"{{.*}} {
353 // CHECK-NOT: icmp
354 // CHECK:   %[[r:.*]] = icmp eq
355 // CHECK-NOT: icmp
356 // CHECK:   ret i1 %[[r]]
357 // CHECK: }
358 }
359 
360 bool compareNeqSingleFunctionMemptr(void (Single::*l)(), void (Single::*r)()) {
361   return l != r;
362 // Should only be one comparison here.
363 // CHECK: define zeroext i1 @"\01?compareNeqSingleFunctionMemptr@@YA_NP8Single@@AEXXZ0@Z"{{.*}} {
364 // CHECK-NOT: icmp
365 // CHECK:   %[[r:.*]] = icmp ne
366 // CHECK-NOT: icmp
367 // CHECK:   ret i1 %[[r]]
368 // CHECK: }
369 }
370 
371 bool unspecFuncMemptrEq(void (Unspecified::*l)(), void (Unspecified::*r)()) {
372   return l == r;
373 // CHECK: define zeroext i1 @"\01?unspecFuncMemptrEq@@YA_NP8Unspecified@@AEXXZ0@Z"{{.*}} {
374 // CHECK:   %[[lhs0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[l:.*]], 0
375 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r:.*]], 0
376 // CHECK:   %[[cmp0:.*]] = icmp eq i8* %[[lhs0]], %{{.*}}
377 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 1
378 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 1
379 // CHECK:   %[[cmp1:.*]] = icmp eq i32
380 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 2
381 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 2
382 // CHECK:   %[[cmp2:.*]] = icmp eq i32
383 // CHECK:   %[[res12:.*]] = and i1 %[[cmp1]], %[[cmp2]]
384 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 3
385 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 3
386 // CHECK:   %[[cmp3:.*]] = icmp eq i32
387 // CHECK:   %[[res123:.*]] = and i1 %[[res12]], %[[cmp3]]
388 // CHECK:   %[[iszero:.*]] = icmp eq i8* %[[lhs0]], null
389 // CHECK:   %[[bits_or_null:.*]] = or i1 %[[res123]], %[[iszero]]
390 // CHECK:   %{{.*}} = and i1 %[[bits_or_null]], %[[cmp0]]
391 // CHECK:   ret i1 %{{.*}}
392 // CHECK: }
393 }
394 
395 bool unspecFuncMemptrNeq(void (Unspecified::*l)(), void (Unspecified::*r)()) {
396   return l != r;
397 // CHECK: define zeroext i1 @"\01?unspecFuncMemptrNeq@@YA_NP8Unspecified@@AEXXZ0@Z"{{.*}} {
398 // CHECK:   %[[lhs0:.*]] = extractvalue { i8*, i32, i32, i32 } %[[l:.*]], 0
399 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r:.*]], 0
400 // CHECK:   %[[cmp0:.*]] = icmp ne i8* %[[lhs0]], %{{.*}}
401 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 1
402 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 1
403 // CHECK:   %[[cmp1:.*]] = icmp ne i32
404 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 2
405 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 2
406 // CHECK:   %[[cmp2:.*]] = icmp ne i32
407 // CHECK:   %[[res12:.*]] = or i1 %[[cmp1]], %[[cmp2]]
408 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[l]], 3
409 // CHECK:   %{{.*}} = extractvalue { i8*, i32, i32, i32 } %[[r]], 3
410 // CHECK:   %[[cmp3:.*]] = icmp ne i32
411 // CHECK:   %[[res123:.*]] = or i1 %[[res12]], %[[cmp3]]
412 // CHECK:   %[[iszero:.*]] = icmp ne i8* %[[lhs0]], null
413 // CHECK:   %[[bits_or_null:.*]] = and i1 %[[res123]], %[[iszero]]
414 // CHECK:   %{{.*}} = or i1 %[[bits_or_null]], %[[cmp0]]
415 // CHECK:   ret i1 %{{.*}}
416 // CHECK: }
417 }
418 
419 bool unspecDataMemptrEq(int Unspecified::*l, int Unspecified::*r) {
420   return l == r;
421 // CHECK: define zeroext i1 @"\01?unspecDataMemptrEq@@YA_NPQUnspecified@@H0@Z"{{.*}} {
422 // CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 0
423 // CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 0
424 // CHECK:   icmp eq i32
425 // CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 1
426 // CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 1
427 // CHECK:   icmp eq i32
428 // CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 2
429 // CHECK:   extractvalue { i32, i32, i32 } %{{.*}}, 2
430 // CHECK:   icmp eq i32
431 // CHECK:   and i1
432 // CHECK:   and i1
433 // CHECK:   ret i1
434 // CHECK: }
435 }
436 
437 void (Multiple::*convertB2FuncToMultiple(void (B2::*mp)()))() {
438   return mp;
439 // CHECK: define i64 @"\01?convertB2FuncToMultiple@@YAP8Multiple@@AEXXZP8B2@@AEXXZ@Z"{{.*}} {
440 // CHECK:   store
441 // CHECK:   %[[mp:.*]] = load i8** %{{.*}}, align 4
442 // CHECK:   icmp ne i8* %[[mp]], null
443 // CHECK:   br i1 %{{.*}} label %{{.*}}, label %{{.*}}
444 //
445 //        memptr.convert:                                   ; preds = %entry
446 // CHECK:   insertvalue { i8*, i32 } undef, i8* %[[mp]], 0
447 // CHECK:   insertvalue { i8*, i32 } %{{.*}}, i32 4, 1
448 // CHECK:   br label
449 //
450 //        memptr.converted:                                 ; preds = %memptr.convert, %entry
451 // CHECK:   phi { i8*, i32 } [ zeroinitializer, %{{.*}} ], [ {{.*}} ]
452 // CHECK: }
453 }
454 
455 void (B2::*convertMultipleFuncToB2(void (Multiple::*mp)()))() {
456 // FIXME: cl emits warning C4407 on this code because of the representation
457 // change.  We might want to do the same.
458   return static_cast<void (B2::*)()>(mp);
459 // FIXME: We should return i8* instead of i32 here.  The ptrtoint cast prevents
460 // LLVM from optimizing away the branch.  This is likely a bug in
461 // lib/CodeGen/TargetInfo.cpp with how we classify memptr types for returns.
462 //
463 // CHECK: define i32 @"\01?convertMultipleFuncToB2@@YAP8B2@@AEXXZP8Multiple@@AEXXZ@Z"{{.*}} {
464 // CHECK:   store
465 // CHECK:   %[[src:.*]] = load { i8*, i32 }* %{{.*}}, align 4
466 // CHECK:   extractvalue { i8*, i32 } %[[src]], 0
467 // CHECK:   icmp ne i8* %{{.*}}, null
468 // CHECK:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
469 //
470 //        memptr.convert:                                   ; preds = %entry
471 // CHECK:   %[[fp:.*]] = extractvalue { i8*, i32 } %[[src]], 0
472 // CHECK:   br label
473 //
474 //        memptr.converted:                                 ; preds = %memptr.convert, %entry
475 // CHECK:   phi i8* [ null, %{{.*}} ], [ %[[fp]], %{{.*}} ]
476 // CHECK: }
477 }
478 
479 namespace Test1 {
480 
481 struct A { int a; };
482 struct B { int b; };
483 struct C : virtual A { int c; };
484 struct D : B, C { int d; };
485 
486 void (D::*convertCToD(void (C::*mp)()))() {
487   return mp;
488 // CHECK: define void @"\01?convertCToD@Test1@@YAP8D@1@AEXXZP8C@1@AEXXZ@Z"{{.*}} {
489 // CHECK:   store
490 // CHECK:   load { i8*, i32, i32 }* %{{.*}}, align 4
491 // CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 0
492 // CHECK:   icmp ne i8* %{{.*}}, null
493 // CHECK:   br i1 %{{.*}}, label %{{.*}}, label %{{.*}}
494 //
495 //        memptr.convert:                                   ; preds = %entry
496 // CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 0
497 // CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 1
498 // CHECK:   extractvalue { i8*, i32, i32 } %{{.*}}, 2
499 // CHECK:   %[[adj:.*]] = add nsw i32 %{{.*}}, 4
500 // CHECK:   insertvalue { i8*, i32, i32 } undef, i8* {{.*}}, 0
501 // CHECK:   insertvalue { i8*, i32, i32 } {{.*}}, i32 %[[adj]], 1
502 // CHECK:   insertvalue { i8*, i32, i32 } {{.*}}, i32 {{.*}}, 2
503 // CHECK:   br label
504 //
505 //        memptr.converted:                                 ; preds = %memptr.convert, %entry
506 // CHECK:   phi { i8*, i32, i32 } [ { i8* null, i32 0, i32 -1 }, {{.*}} ], [ {{.*}} ]
507 // CHECK: }
508 }
509 
510 }
511 
512 namespace Test2 {
513 // Test that we dynamically convert between different null reps.
514 
515 struct A { int a; };
516 struct B : A { int b; };
517 struct C : A {
518   int c;
519   virtual void hasVfPtr();
520 };
521 
522 int A::*reinterpret(int B::*mp) {
523   return reinterpret_cast<int A::*>(mp);
524 // CHECK: define i32 @"\01?reinterpret@Test2@@YAPQA@1@HPQB@1@H@Z"{{.*}}  {
525 // CHECK-NOT: select
526 // CHECK:   ret i32
527 // CHECK: }
528 }
529 
530 int A::*reinterpret(int C::*mp) {
531   return reinterpret_cast<int A::*>(mp);
532 // CHECK: define i32 @"\01?reinterpret@Test2@@YAPQA@1@HPQC@1@H@Z"{{.*}}  {
533 // CHECK:   %[[mp:.*]] = load i32*
534 // CHECK:   %[[cmp:.*]] = icmp ne i32 %[[mp]], 0
535 // CHECK:   select i1 %[[cmp]], i32 %[[mp]], i32 -1
536 // CHECK: }
537 }
538 
539 }
540