1; RUN: llc < %s -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers -disable-wasm-fallthrough-return-opt | FileCheck %s
2
3; Test constant load and store address offsets.
4
5target triple = "wasm32-unknown-unknown"
6
7;===----------------------------------------------------------------------------
8; Loads: 32-bit
9;===----------------------------------------------------------------------------
10
11; Basic load.
12
13; CHECK-LABEL: load_i32_no_offset:
14; CHECK: i32.load $push0=, 0($0){{$}}
15; CHECK-NEXT: return $pop0{{$}}
16define i32 @load_i32_no_offset(i32 *%p) {
17  %v = load i32, i32* %p
18  ret i32 %v
19}
20
21; With an nuw add, we can fold an offset.
22
23; CHECK-LABEL: load_i32_with_folded_offset:
24; CHECK: i32.load  $push0=, 24($0){{$}}
25define i32 @load_i32_with_folded_offset(i32* %p) {
26  %q = ptrtoint i32* %p to i32
27  %r = add nuw i32 %q, 24
28  %s = inttoptr i32 %r to i32*
29  %t = load i32, i32* %s
30  ret i32 %t
31}
32
33; With an inbounds gep, we can fold an offset.
34
35; CHECK-LABEL: load_i32_with_folded_gep_offset:
36; CHECK: i32.load  $push0=, 24($0){{$}}
37define i32 @load_i32_with_folded_gep_offset(i32* %p) {
38  %s = getelementptr inbounds i32, i32* %p, i32 6
39  %t = load i32, i32* %s
40  ret i32 %t
41}
42
43; We can't fold a negative offset though, even with an inbounds gep.
44
45; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
46; CHECK: i32.const $push0=, -24{{$}}
47; CHECK: i32.add   $push1=, $0, $pop0{{$}}
48; CHECK: i32.load  $push2=, 0($pop1){{$}}
49define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
50  %s = getelementptr inbounds i32, i32* %p, i32 -6
51  %t = load i32, i32* %s
52  ret i32 %t
53}
54
55; Without nuw, and even with nsw, we can't fold an offset.
56
57; CHECK-LABEL: load_i32_with_unfolded_offset:
58; CHECK: i32.const $push0=, 24{{$}}
59; CHECK: i32.add   $push1=, $0, $pop0{{$}}
60; CHECK: i32.load  $push2=, 0($pop1){{$}}
61define i32 @load_i32_with_unfolded_offset(i32* %p) {
62  %q = ptrtoint i32* %p to i32
63  %r = add nsw i32 %q, 24
64  %s = inttoptr i32 %r to i32*
65  %t = load i32, i32* %s
66  ret i32 %t
67}
68
69; Without inbounds, we can't fold a gep offset.
70
71; CHECK-LABEL: load_i32_with_unfolded_gep_offset:
72; CHECK: i32.const $push0=, 24{{$}}
73; CHECK: i32.add   $push1=, $0, $pop0{{$}}
74; CHECK: i32.load  $push2=, 0($pop1){{$}}
75define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
76  %s = getelementptr i32, i32* %p, i32 6
77  %t = load i32, i32* %s
78  ret i32 %t
79}
80
81; When loading from a fixed address, materialize a zero.
82
83; CHECK-LABEL: load_i32_from_numeric_address
84; CHECK: i32.const $push0=, 0{{$}}
85; CHECK: i32.load  $push1=, 42($pop0){{$}}
86define i32 @load_i32_from_numeric_address() {
87  %s = inttoptr i32 42 to i32*
88  %t = load i32, i32* %s
89  ret i32 %t
90}
91
92; CHECK-LABEL: load_i32_from_global_address
93; CHECK: i32.const $push0=, 0{{$}}
94; CHECK: i32.load  $push1=, gv($pop0){{$}}
95@gv = global i32 0
96define i32 @load_i32_from_global_address() {
97  %t = load i32, i32* @gv
98  ret i32 %t
99}
100
101;===----------------------------------------------------------------------------
102; Loads: 64-bit
103;===----------------------------------------------------------------------------
104
105; Basic load.
106
107; CHECK-LABEL: load_i64_no_offset:
108; CHECK: i64.load $push0=, 0($0){{$}}
109; CHECK-NEXT: return $pop0{{$}}
110define i64 @load_i64_no_offset(i64 *%p) {
111  %v = load i64, i64* %p
112  ret i64 %v
113}
114
115; With an nuw add, we can fold an offset.
116
117; CHECK-LABEL: load_i64_with_folded_offset:
118; CHECK: i64.load  $push0=, 24($0){{$}}
119define i64 @load_i64_with_folded_offset(i64* %p) {
120  %q = ptrtoint i64* %p to i32
121  %r = add nuw i32 %q, 24
122  %s = inttoptr i32 %r to i64*
123  %t = load i64, i64* %s
124  ret i64 %t
125}
126
127; With an inbounds gep, we can fold an offset.
128
129; CHECK-LABEL: load_i64_with_folded_gep_offset:
130; CHECK: i64.load  $push0=, 24($0){{$}}
131define i64 @load_i64_with_folded_gep_offset(i64* %p) {
132  %s = getelementptr inbounds i64, i64* %p, i32 3
133  %t = load i64, i64* %s
134  ret i64 %t
135}
136
137; We can't fold a negative offset though, even with an inbounds gep.
138
139; CHECK-LABEL: load_i64_with_unfolded_gep_negative_offset:
140; CHECK: i32.const $push0=, -24{{$}}
141; CHECK: i32.add   $push1=, $0, $pop0{{$}}
142; CHECK: i64.load  $push2=, 0($pop1){{$}}
143define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
144  %s = getelementptr inbounds i64, i64* %p, i32 -3
145  %t = load i64, i64* %s
146  ret i64 %t
147}
148
149; Without nuw, and even with nsw, we can't fold an offset.
150
151; CHECK-LABEL: load_i64_with_unfolded_offset:
152; CHECK: i32.const $push0=, 24{{$}}
153; CHECK: i32.add   $push1=, $0, $pop0{{$}}
154; CHECK: i64.load  $push2=, 0($pop1){{$}}
155define i64 @load_i64_with_unfolded_offset(i64* %p) {
156  %q = ptrtoint i64* %p to i32
157  %r = add nsw i32 %q, 24
158  %s = inttoptr i32 %r to i64*
159  %t = load i64, i64* %s
160  ret i64 %t
161}
162
163; Without inbounds, we can't fold a gep offset.
164
165; CHECK-LABEL: load_i64_with_unfolded_gep_offset:
166; CHECK: i32.const $push0=, 24{{$}}
167; CHECK: i32.add   $push1=, $0, $pop0{{$}}
168; CHECK: i64.load  $push2=, 0($pop1){{$}}
169define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
170  %s = getelementptr i64, i64* %p, i32 3
171  %t = load i64, i64* %s
172  ret i64 %t
173}
174
175;===----------------------------------------------------------------------------
176; Stores: 32-bit
177;===----------------------------------------------------------------------------
178
179; Basic store.
180
181; CHECK-LABEL: store_i32_no_offset:
182; CHECK-NEXT: .functype store_i32_no_offset (i32, i32) -> (){{$}}
183; CHECK-NEXT: i32.store 0($0), $1{{$}}
184; CHECK-NEXT: return{{$}}
185define void @store_i32_no_offset(i32 *%p, i32 %v) {
186  store i32 %v, i32* %p
187  ret void
188}
189
190; With an nuw add, we can fold an offset.
191
192; CHECK-LABEL: store_i32_with_folded_offset:
193; CHECK: i32.store 24($0), $pop0{{$}}
194define void @store_i32_with_folded_offset(i32* %p) {
195  %q = ptrtoint i32* %p to i32
196  %r = add nuw i32 %q, 24
197  %s = inttoptr i32 %r to i32*
198  store i32 0, i32* %s
199  ret void
200}
201
202; With an inbounds gep, we can fold an offset.
203
204; CHECK-LABEL: store_i32_with_folded_gep_offset:
205; CHECK: i32.store 24($0), $pop0{{$}}
206define void @store_i32_with_folded_gep_offset(i32* %p) {
207  %s = getelementptr inbounds i32, i32* %p, i32 6
208  store i32 0, i32* %s
209  ret void
210}
211
212; We can't fold a negative offset though, even with an inbounds gep.
213
214; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset:
215; CHECK: i32.const $push0=, -24{{$}}
216; CHECK: i32.add   $push1=, $0, $pop0{{$}}
217; CHECK: i32.store 0($pop1), $pop2{{$}}
218define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
219  %s = getelementptr inbounds i32, i32* %p, i32 -6
220  store i32 0, i32* %s
221  ret void
222}
223
224; Without nuw, and even with nsw, we can't fold an offset.
225
226; CHECK-LABEL: store_i32_with_unfolded_offset:
227; CHECK: i32.const $push0=, 24{{$}}
228; CHECK: i32.add   $push1=, $0, $pop0{{$}}
229; CHECK: i32.store 0($pop1), $pop2{{$}}
230define void @store_i32_with_unfolded_offset(i32* %p) {
231  %q = ptrtoint i32* %p to i32
232  %r = add nsw i32 %q, 24
233  %s = inttoptr i32 %r to i32*
234  store i32 0, i32* %s
235  ret void
236}
237
238; Without inbounds, we can't fold a gep offset.
239
240; CHECK-LABEL: store_i32_with_unfolded_gep_offset:
241; CHECK: i32.const $push0=, 24{{$}}
242; CHECK: i32.add   $push1=, $0, $pop0{{$}}
243; CHECK: i32.store 0($pop1), $pop2{{$}}
244define void @store_i32_with_unfolded_gep_offset(i32* %p) {
245  %s = getelementptr i32, i32* %p, i32 6
246  store i32 0, i32* %s
247  ret void
248}
249
250; When storing from a fixed address, materialize a zero.
251
252; CHECK-LABEL: store_i32_to_numeric_address:
253; CHECK:      i32.const $push0=, 0{{$}}
254; CHECK-NEXT: i32.const $push1=, 0{{$}}
255; CHECK-NEXT: i32.store 42($pop0), $pop1{{$}}
256define void @store_i32_to_numeric_address() {
257  %s = inttoptr i32 42 to i32*
258  store i32 0, i32* %s
259  ret void
260}
261
262; CHECK-LABEL: store_i32_to_global_address:
263; CHECK: i32.const $push0=, 0{{$}}
264; CHECK: i32.const $push1=, 0{{$}}
265; CHECK: i32.store gv($pop0), $pop1{{$}}
266define void @store_i32_to_global_address() {
267  store i32 0, i32* @gv
268  ret void
269}
270
271;===----------------------------------------------------------------------------
272; Stores: 64-bit
273;===----------------------------------------------------------------------------
274
275; Basic store.
276
277; CHECK-LABEL: store_i64_with_folded_offset:
278; CHECK: i64.store 24($0), $pop0{{$}}
279define void @store_i64_with_folded_offset(i64* %p) {
280  %q = ptrtoint i64* %p to i32
281  %r = add nuw i32 %q, 24
282  %s = inttoptr i32 %r to i64*
283  store i64 0, i64* %s
284  ret void
285}
286
287; With an nuw add, we can fold an offset.
288
289; CHECK-LABEL: store_i64_with_folded_gep_offset:
290; CHECK: i64.store 24($0), $pop0{{$}}
291define void @store_i64_with_folded_gep_offset(i64* %p) {
292  %s = getelementptr inbounds i64, i64* %p, i32 3
293  store i64 0, i64* %s
294  ret void
295}
296
297; With an inbounds gep, we can fold an offset.
298
299; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset:
300; CHECK: i32.const $push0=, -24{{$}}
301; CHECK: i32.add   $push1=, $0, $pop0{{$}}
302; CHECK: i64.store 0($pop1), $pop2{{$}}
303define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
304  %s = getelementptr inbounds i64, i64* %p, i32 -3
305  store i64 0, i64* %s
306  ret void
307}
308
309; We can't fold a negative offset though, even with an inbounds gep.
310
311; CHECK-LABEL: store_i64_with_unfolded_offset:
312; CHECK: i32.const $push0=, 24{{$}}
313; CHECK: i32.add   $push1=, $0, $pop0{{$}}
314; CHECK: i64.store 0($pop1), $pop2{{$}}
315define void @store_i64_with_unfolded_offset(i64* %p) {
316  %q = ptrtoint i64* %p to i32
317  %r = add nsw i32 %q, 24
318  %s = inttoptr i32 %r to i64*
319  store i64 0, i64* %s
320  ret void
321}
322
323; Without nuw, and even with nsw, we can't fold an offset.
324
325; CHECK-LABEL: store_i64_with_unfolded_gep_offset:
326; CHECK: i32.const $push0=, 24{{$}}
327; CHECK: i32.add   $push1=, $0, $pop0{{$}}
328; CHECK: i64.store 0($pop1), $pop2{{$}}
329define void @store_i64_with_unfolded_gep_offset(i64* %p) {
330  %s = getelementptr i64, i64* %p, i32 3
331  store i64 0, i64* %s
332  ret void
333}
334
335; Without inbounds, we can't fold a gep offset.
336
337; CHECK-LABEL: store_i32_with_folded_or_offset:
338; CHECK: i32.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
339define void @store_i32_with_folded_or_offset(i32 %x) {
340  %and = and i32 %x, -4
341  %t0 = inttoptr i32 %and to i8*
342  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
343  store i8 0, i8* %arrayidx, align 1
344  ret void
345}
346
347;===----------------------------------------------------------------------------
348; Sign-extending loads
349;===----------------------------------------------------------------------------
350
351; Fold an offset into a sign-extending load.
352
353; CHECK-LABEL: load_i8_i32_s_with_folded_offset:
354; CHECK: i32.load8_s $push0=, 24($0){{$}}
355define i32 @load_i8_i32_s_with_folded_offset(i8* %p) {
356  %q = ptrtoint i8* %p to i32
357  %r = add nuw i32 %q, 24
358  %s = inttoptr i32 %r to i8*
359  %t = load i8, i8* %s
360  %u = sext i8 %t to i32
361  ret i32 %u
362}
363
364; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
365; CHECK: i64.load32_s $push0=, 24($0){{$}}
366define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
367  %q = ptrtoint i32* %p to i32
368  %r = add nuw i32 %q, 24
369  %s = inttoptr i32 %r to i32*
370  %t = load i32, i32* %s
371  %u = sext i32 %t to i64
372  ret i64 %u
373}
374
375; Fold a gep offset into a sign-extending load.
376
377; CHECK-LABEL: load_i8_i32_s_with_folded_gep_offset:
378; CHECK: i32.load8_s $push0=, 24($0){{$}}
379define i32 @load_i8_i32_s_with_folded_gep_offset(i8* %p) {
380  %s = getelementptr inbounds i8, i8* %p, i32 24
381  %t = load i8, i8* %s
382  %u = sext i8 %t to i32
383  ret i32 %u
384}
385
386; CHECK-LABEL: load_i16_i32_s_with_folded_gep_offset:
387; CHECK: i32.load16_s $push0=, 48($0){{$}}
388define i32 @load_i16_i32_s_with_folded_gep_offset(i16* %p) {
389  %s = getelementptr inbounds i16, i16* %p, i32 24
390  %t = load i16, i16* %s
391  %u = sext i16 %t to i32
392  ret i32 %u
393}
394
395; CHECK-LABEL: load_i16_i64_s_with_folded_gep_offset:
396; CHECK: i64.load16_s $push0=, 48($0){{$}}
397define i64 @load_i16_i64_s_with_folded_gep_offset(i16* %p) {
398  %s = getelementptr inbounds i16, i16* %p, i32 24
399  %t = load i16, i16* %s
400  %u = sext i16 %t to i64
401  ret i64 %u
402}
403
404; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
405; an 'add' if the or'ed bits are known to be zero.
406
407; CHECK-LABEL: load_i8_i32_s_with_folded_or_offset:
408; CHECK: i32.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
409define i32 @load_i8_i32_s_with_folded_or_offset(i32 %x) {
410  %and = and i32 %x, -4
411  %t0 = inttoptr i32 %and to i8*
412  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
413  %t1 = load i8, i8* %arrayidx
414  %conv = sext i8 %t1 to i32
415  ret i32 %conv
416}
417
418; CHECK-LABEL: load_i8_i64_s_with_folded_or_offset:
419; CHECK: i64.load8_s $push{{[0-9]+}}=, 2($pop{{[0-9]+}}){{$}}
420define i64 @load_i8_i64_s_with_folded_or_offset(i32 %x) {
421  %and = and i32 %x, -4
422  %t0 = inttoptr i32 %and to i8*
423  %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
424  %t1 = load i8, i8* %arrayidx
425  %conv = sext i8 %t1 to i64
426  ret i64 %conv
427}
428
429; When loading from a fixed address, materialize a zero.
430
431; CHECK-LABEL: load_i16_i32_s_from_numeric_address
432; CHECK: i32.const $push0=, 0{{$}}
433; CHECK: i32.load16_s  $push1=, 42($pop0){{$}}
434define i32 @load_i16_i32_s_from_numeric_address() {
435  %s = inttoptr i32 42 to i16*
436  %t = load i16, i16* %s
437  %u = sext i16 %t to i32
438  ret i32 %u
439}
440
441; CHECK-LABEL: load_i8_i32_s_from_global_address
442; CHECK: i32.const $push0=, 0{{$}}
443; CHECK: i32.load8_s  $push1=, gv8($pop0){{$}}
444@gv8 = global i8 0
445define i32 @load_i8_i32_s_from_global_address() {
446  %t = load i8, i8* @gv8
447  %u = sext i8 %t to i32
448  ret i32 %u
449}
450
451;===----------------------------------------------------------------------------
452; Zero-extending loads
453;===----------------------------------------------------------------------------
454
455; Fold an offset into a zero-extending load.
456
457; CHECK-LABEL: load_i8_i32_z_with_folded_offset:
458; CHECK: i32.load8_u $push0=, 24($0){{$}}
459define i32 @load_i8_i32_z_with_folded_offset(i8* %p) {
460  %q = ptrtoint i8* %p to i32
461  %r = add nuw i32 %q, 24
462  %s = inttoptr i32 %r to i8*
463  %t = load i8, i8* %s
464  %u = zext i8 %t to i32
465  ret i32 %u
466}
467
468; CHECK-LABEL: load_i32_i64_z_with_folded_offset:
469; CHECK: i64.load32_u $push0=, 24($0){{$}}
470define i64 @load_i32_i64_z_with_folded_offset(i32* %p) {
471  %q = ptrtoint i32* %p to i32
472  %r = add nuw i32 %q, 24
473  %s = inttoptr i32 %r to i32*
474  %t = load i32, i32* %s
475  %u = zext i32 %t to i64
476  ret i64 %u
477}
478
479; Fold a gep offset into a zero-extending load.
480
481; CHECK-LABEL: load_i8_i32_z_with_folded_gep_offset:
482; CHECK: i32.load8_u $push0=, 24($0){{$}}
483define i32 @load_i8_i32_z_with_folded_gep_offset(i8* %p) {
484  %s = getelementptr inbounds i8, i8* %p, i32 24
485  %t = load i8, i8* %s
486  %u = zext i8 %t to i32
487  ret i32 %u
488}
489
490; CHECK-LABEL: load_i16_i32_z_with_folded_gep_offset:
491; CHECK: i32.load16_u $push0=, 48($0){{$}}
492define i32 @load_i16_i32_z_with_folded_gep_offset(i16* %p) {
493  %s = getelementptr inbounds i16, i16* %p, i32 24
494  %t = load i16, i16* %s
495  %u = zext i16 %t to i32
496  ret i32 %u
497}
498
499; CHECK-LABEL: load_i16_i64_z_with_folded_gep_offset:
500; CHECK: i64.load16_u $push0=, 48($0){{$}}
501define i64 @load_i16_i64_z_with_folded_gep_offset(i16* %p) {
502  %s = getelementptr inbounds i16, i16* %p, i64 24
503  %t = load i16, i16* %s
504  %u = zext i16 %t to i64
505  ret i64 %u
506}
507
508; When loading from a fixed address, materialize a zero.
509
510; CHECK-LABEL: load_i16_i32_z_from_numeric_address
511; CHECK: i32.const $push0=, 0{{$}}
512; CHECK: i32.load16_u  $push1=, 42($pop0){{$}}
513define i32 @load_i16_i32_z_from_numeric_address() {
514  %s = inttoptr i32 42 to i16*
515  %t = load i16, i16* %s
516  %u = zext i16 %t to i32
517  ret i32 %u
518}
519
520; CHECK-LABEL: load_i8_i32_z_from_global_address
521; CHECK: i32.const $push0=, 0{{$}}
522; CHECK: i32.load8_u  $push1=, gv8($pop0){{$}}
523define i32 @load_i8_i32_z_from_global_address() {
524  %t = load i8, i8* @gv8
525  %u = zext i8 %t to i32
526  ret i32 %u
527}
528
529; i8 return value should test anyext loads
530; CHECK-LABEL: load_i8_i32_retvalue:
531; CHECK: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
532; CHECK-NEXT: return $pop[[NUM]]{{$}}
533define i8 @load_i8_i32_retvalue(i8 *%p) {
534  %v = load i8, i8* %p
535  ret i8 %v
536}
537
538;===----------------------------------------------------------------------------
539; Truncating stores
540;===----------------------------------------------------------------------------
541
542; Fold an offset into a truncating store.
543
544; CHECK-LABEL: store_i8_i32_with_folded_offset:
545; CHECK: i32.store8 24($0), $1{{$}}
546define void @store_i8_i32_with_folded_offset(i8* %p, i32 %v) {
547  %q = ptrtoint i8* %p to i32
548  %r = add nuw i32 %q, 24
549  %s = inttoptr i32 %r to i8*
550  %t = trunc i32 %v to i8
551  store i8 %t, i8* %s
552  ret void
553}
554
555; CHECK-LABEL: store_i32_i64_with_folded_offset:
556; CHECK: i64.store32 24($0), $1{{$}}
557define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
558  %q = ptrtoint i32* %p to i32
559  %r = add nuw i32 %q, 24
560  %s = inttoptr i32 %r to i32*
561  %t = trunc i64 %v to i32
562  store i32 %t, i32* %s
563  ret void
564}
565
566; Fold a gep offset into a truncating store.
567
568; CHECK-LABEL: store_i8_i32_with_folded_gep_offset:
569; CHECK: i32.store8 24($0), $1{{$}}
570define void @store_i8_i32_with_folded_gep_offset(i8* %p, i32 %v) {
571  %s = getelementptr inbounds i8, i8* %p, i32 24
572  %t = trunc i32 %v to i8
573  store i8 %t, i8* %s
574  ret void
575}
576
577; CHECK-LABEL: store_i16_i32_with_folded_gep_offset:
578; CHECK: i32.store16 48($0), $1{{$}}
579define void @store_i16_i32_with_folded_gep_offset(i16* %p, i32 %v) {
580  %s = getelementptr inbounds i16, i16* %p, i32 24
581  %t = trunc i32 %v to i16
582  store i16 %t, i16* %s
583  ret void
584}
585
586; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
587; CHECK: i64.store16 48($0), $1{{$}}
588define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
589  %s = getelementptr inbounds i16, i16* %p, i64 24
590  %t = trunc i64 %v to i16
591  store i16 %t, i16* %s
592  ret void
593}
594
595; 'add' in this code becomes 'or' after DAG optimization. Treat an 'or' node as
596; an 'add' if the or'ed bits are known to be zero.
597
598; CHECK-LABEL: store_i8_i32_with_folded_or_offset:
599; CHECK: i32.store8 2($pop{{[0-9]+}}), $1{{$}}
600define void @store_i8_i32_with_folded_or_offset(i32 %x, i32 %v) {
601  %and = and i32 %x, -4
602  %p = inttoptr i32 %and to i8*
603  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
604  %t = trunc i32 %v to i8
605  store i8 %t, i8* %arrayidx
606  ret void
607}
608
609; CHECK-LABEL: store_i8_i64_with_folded_or_offset:
610; CHECK: i64.store8 2($pop{{[0-9]+}}), $1{{$}}
611define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
612  %and = and i32 %x, -4
613  %p = inttoptr i32 %and to i8*
614  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
615  %t = trunc i64 %v to i8
616  store i8 %t, i8* %arrayidx
617  ret void
618}
619
620;===----------------------------------------------------------------------------
621; Aggregate values
622;===----------------------------------------------------------------------------
623
624; Fold the offsets when lowering aggregate loads and stores.
625
626; CHECK-LABEL: aggregate_load_store:
627; CHECK: i32.load  $2=, 0($0){{$}}
628; CHECK: i32.load  $3=, 4($0){{$}}
629; CHECK: i32.load  $4=, 8($0){{$}}
630; CHECK: i32.load  $push0=, 12($0){{$}}
631; CHECK: i32.store 12($1), $pop0{{$}}
632; CHECK: i32.store 8($1), $4{{$}}
633; CHECK: i32.store 4($1), $3{{$}}
634; CHECK: i32.store 0($1), $2{{$}}
635define void @aggregate_load_store({i32,i32,i32,i32}* %p, {i32,i32,i32,i32}* %q) {
636  ; volatile so that things stay in order for the tests above
637  %t = load volatile {i32,i32,i32,i32}, {i32, i32,i32,i32}* %p
638  store volatile {i32,i32,i32,i32} %t, {i32, i32,i32,i32}* %q
639  ret void
640}
641
642; Fold the offsets when lowering aggregate return values. The stores get
643; merged into i64 stores.
644
645; CHECK-LABEL: aggregate_return:
646; CHECK: i64.const   $push[[L0:[0-9]+]]=, 0{{$}}
647; CHECK: i64.store   8($0), $pop[[L0]]{{$}}
648; CHECK: i64.const   $push[[L1:[0-9]+]]=, 0{{$}}
649; CHECK: i64.store   0($0), $pop[[L1]]{{$}}
650define {i32,i32,i32,i32} @aggregate_return() {
651  ret {i32,i32,i32,i32} zeroinitializer
652}
653
654; Fold the offsets when lowering aggregate return values. The stores are not
655; merged.
656
657; CHECK-LABEL: aggregate_return_without_merge:
658; CHECK: i32.const   $push[[L0:[0-9]+]]=, 0{{$}}
659; CHECK: i32.store8  14($0), $pop[[L0]]{{$}}
660; CHECK: i32.const   $push[[L1:[0-9]+]]=, 0{{$}}
661; CHECK: i32.store16 12($0), $pop[[L1]]{{$}}
662; CHECK: i32.const   $push[[L2:[0-9]+]]=, 0{{$}}
663; CHECK: i32.store   8($0), $pop[[L2]]{{$}}
664; CHECK: i64.const   $push[[L3:[0-9]+]]=, 0{{$}}
665; CHECK: i64.store   0($0), $pop[[L3]]{{$}}
666define {i64,i32,i16,i8} @aggregate_return_without_merge() {
667  ret {i64,i32,i16,i8} zeroinitializer
668}
669