1; RUN: llc < %s -mtriple=armv7a-eabi   | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-AT2
2; RUN: llc < %s -mtriple=thumbv7m-eabi | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-AT2
3; RUN: llc < %s -mtriple=thumbv6m-eabi | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-T1
4
5; This test checks that various kinds of getelementptr are all optimised to a
6; simple multiply plus add, with the add being done by a register offset if the
7; result is used in a load.
8
9; CHECK-LABEL: calc_1d:
10; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
11; CHECK-AT2: mla r0, r1, [[REG1]], r0
12; CHECK-T1: muls [[REG2:r[0-9]+]], r1, [[REG1]]
13; CHECK-T1: adds r0, r0, [[REG2]]
14define i32* @calc_1d(i32* %p, i32 %n) {
15entry:
16  %mul = mul nsw i32 %n, 21
17  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %mul
18  ret i32* %add.ptr
19}
20
21; CHECK-LABEL: load_1d:
22; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
23; CHECK: mul{{s?}} [[REG2:r[0-9]+]],{{( r1,)?}} [[REG1]]{{(, r1)?}}
24; CHECK: ldr r0, [r0, [[REG2]]]
25define i32 @load_1d(i32* %p, i32 %n) #1 {
26entry:
27  %mul = mul nsw i32 %n, 21
28  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %mul
29  %0 = load i32, i32* %arrayidx, align 4
30  ret i32 %0
31}
32
33; CHECK-LABEL: calc_2d_a:
34; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
35; CHECK-AT2: mla r0, r1, [[REG1]], r0
36; CHECK-T1: muls [[REG2:r[0-9]+]], r1, [[REG1]]
37; CHECK-T1: adds r0, r0, [[REG2]]
38define i32* @calc_2d_a([100 x i32]* %p, i32 %n) {
39entry:
40  %mul = mul nsw i32 %n, 21
41  %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %p, i32 0, i32 %mul
42  ret i32* %arrayidx1
43}
44
45; CHECK-LABEL: load_2d_a:
46; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
47; CHECK: mul{{s?}} [[REG2:r[0-9]+]],{{( r1,)?}} [[REG1]]{{(, r1)?}}
48; CHECK: ldr r0, [r0, [[REG2]]]
49define i32 @load_2d_a([100 x i32]* %p, i32 %n) #1 {
50entry:
51  %mul = mul nsw i32 %n, 21
52  %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %p, i32 0, i32 %mul
53  %0 = load i32, i32* %arrayidx1, align 4
54  ret i32 %0
55}
56
57; CHECK-LABEL: calc_2d_b:
58; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
59; CHECK-AT2: mla r0, r1, [[REG1]], r0
60; CHECK-T1: muls [[REG2:r[0-9]+]], r1, [[REG1]]
61; CHECK-T1: adds r0, r0, [[REG2]]
62define i32* @calc_2d_b([21 x i32]* %p, i32 %n) {
63entry:
64  %arrayidx1 = getelementptr inbounds [21 x i32], [21 x i32]* %p, i32 %n, i32 0
65  ret i32* %arrayidx1
66}
67
68; CHECK-LABEL: load_2d_b:
69; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
70; CHECK: mul{{s?}} [[REG2:r[0-9]+]],{{( r1,)?}} [[REG1]]{{(, r1)?}}
71; CHECK: ldr r0, [r0, [[REG2]]]
72define i32 @load_2d_b([21 x i32]* %p, i32 %n) {
73entry:
74  %arrayidx1 = getelementptr inbounds [21 x i32], [21 x i32]* %p, i32 %n, i32 0
75  %0 = load i32, i32* %arrayidx1, align 4
76  ret i32 %0
77}
78