1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV64I %s 4 5; Check indexed and unindexed, sext, zext and anyext loads 6 7define i64 @lb(i8 *%a) nounwind { 8; RV64I-LABEL: lb: 9; RV64I: # %bb.0: 10; RV64I-NEXT: lb a1, 1(a0) 11; RV64I-NEXT: lb a0, 0(a0) 12; RV64I-NEXT: mv a0, a1 13; RV64I-NEXT: ret 14 %1 = getelementptr i8, i8* %a, i32 1 15 %2 = load i8, i8* %1 16 %3 = sext i8 %2 to i64 17 ; the unused load will produce an anyext for selection 18 %4 = load volatile i8, i8* %a 19 ret i64 %3 20} 21 22define i64 @lh(i16 *%a) nounwind { 23; RV64I-LABEL: lh: 24; RV64I: # %bb.0: 25; RV64I-NEXT: lh a1, 4(a0) 26; RV64I-NEXT: lh a0, 0(a0) 27; RV64I-NEXT: mv a0, a1 28; RV64I-NEXT: ret 29 %1 = getelementptr i16, i16* %a, i32 2 30 %2 = load i16, i16* %1 31 %3 = sext i16 %2 to i64 32 ; the unused load will produce an anyext for selection 33 %4 = load volatile i16, i16* %a 34 ret i64 %3 35} 36 37define i64 @lw(i32 *%a) nounwind { 38; RV64I-LABEL: lw: 39; RV64I: # %bb.0: 40; RV64I-NEXT: lw a1, 12(a0) 41; RV64I-NEXT: lw a0, 0(a0) 42; RV64I-NEXT: mv a0, a1 43; RV64I-NEXT: ret 44 %1 = getelementptr i32, i32* %a, i32 3 45 %2 = load i32, i32* %1 46 %3 = sext i32 %2 to i64 47 ; the unused load will produce an anyext for selection 48 %4 = load volatile i32, i32* %a 49 ret i64 %3 50} 51 52define i64 @lbu(i8 *%a) nounwind { 53; RV64I-LABEL: lbu: 54; RV64I: # %bb.0: 55; RV64I-NEXT: lbu a1, 4(a0) 56; RV64I-NEXT: lbu a0, 0(a0) 57; RV64I-NEXT: add a0, a1, a0 58; RV64I-NEXT: ret 59 %1 = getelementptr i8, i8* %a, i32 4 60 %2 = load i8, i8* %1 61 %3 = zext i8 %2 to i64 62 %4 = load volatile i8, i8* %a 63 %5 = zext i8 %4 to i64 64 %6 = add i64 %3, %5 65 ret i64 %6 66} 67 68define i64 @lhu(i16 *%a) nounwind { 69; RV64I-LABEL: lhu: 70; RV64I: # %bb.0: 71; RV64I-NEXT: lhu a1, 10(a0) 72; RV64I-NEXT: lhu a0, 0(a0) 73; RV64I-NEXT: add a0, a1, a0 74; RV64I-NEXT: ret 75 %1 = getelementptr i16, i16* %a, i32 5 76 %2 = load i16, i16* %1 77 %3 = zext i16 %2 to i64 78 %4 = load volatile i16, i16* %a 79 %5 = zext i16 %4 to i64 80 %6 = add i64 %3, %5 81 ret i64 %6 82} 83 84define i64 @lwu(i32 *%a) nounwind { 85; RV64I-LABEL: lwu: 86; RV64I: # %bb.0: 87; RV64I-NEXT: lwu a1, 24(a0) 88; RV64I-NEXT: lwu a0, 0(a0) 89; RV64I-NEXT: add a0, a1, a0 90; RV64I-NEXT: ret 91 %1 = getelementptr i32, i32* %a, i32 6 92 %2 = load i32, i32* %1 93 %3 = zext i32 %2 to i64 94 %4 = load volatile i32, i32* %a 95 %5 = zext i32 %4 to i64 96 %6 = add i64 %3, %5 97 ret i64 %6 98} 99 100; Check indexed and unindexed stores 101 102define void @sb(i8 *%a, i8 %b) nounwind { 103; RV64I-LABEL: sb: 104; RV64I: # %bb.0: 105; RV64I-NEXT: sb a1, 0(a0) 106; RV64I-NEXT: sb a1, 7(a0) 107; RV64I-NEXT: ret 108 store i8 %b, i8* %a 109 %1 = getelementptr i8, i8* %a, i32 7 110 store i8 %b, i8* %1 111 ret void 112} 113 114define void @sh(i16 *%a, i16 %b) nounwind { 115; RV64I-LABEL: sh: 116; RV64I: # %bb.0: 117; RV64I-NEXT: sh a1, 0(a0) 118; RV64I-NEXT: sh a1, 16(a0) 119; RV64I-NEXT: ret 120 store i16 %b, i16* %a 121 %1 = getelementptr i16, i16* %a, i32 8 122 store i16 %b, i16* %1 123 ret void 124} 125 126define void @sw(i32 *%a, i32 %b) nounwind { 127; RV64I-LABEL: sw: 128; RV64I: # %bb.0: 129; RV64I-NEXT: sw a1, 0(a0) 130; RV64I-NEXT: sw a1, 36(a0) 131; RV64I-NEXT: ret 132 store i32 %b, i32* %a 133 %1 = getelementptr i32, i32* %a, i32 9 134 store i32 %b, i32* %1 135 ret void 136} 137 138; 64-bit loads and stores 139 140define i64 @ld(i64 *%a) nounwind { 141; RV64I-LABEL: ld: 142; RV64I: # %bb.0: 143; RV64I-NEXT: ld a1, 80(a0) 144; RV64I-NEXT: ld a0, 0(a0) 145; RV64I-NEXT: mv a0, a1 146; RV64I-NEXT: ret 147 %1 = getelementptr i64, i64* %a, i32 10 148 %2 = load i64, i64* %1 149 %3 = load volatile i64, i64* %a 150 ret i64 %2 151} 152 153define void @sd(i64 *%a, i64 %b) nounwind { 154; RV64I-LABEL: sd: 155; RV64I: # %bb.0: 156; RV64I-NEXT: sd a1, 0(a0) 157; RV64I-NEXT: sd a1, 88(a0) 158; RV64I-NEXT: ret 159 store i64 %b, i64* %a 160 %1 = getelementptr i64, i64* %a, i32 11 161 store i64 %b, i64* %1 162 ret void 163} 164 165; Check load and store to an i1 location 166define i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind { 167; RV64I-LABEL: load_sext_zext_anyext_i1: 168; RV64I: # %bb.0: 169; RV64I-NEXT: lbu a1, 1(a0) 170; RV64I-NEXT: lbu a2, 2(a0) 171; RV64I-NEXT: lb a0, 0(a0) 172; RV64I-NEXT: sub a0, a2, a1 173; RV64I-NEXT: ret 174 ; sextload i1 175 %1 = getelementptr i1, i1* %a, i32 1 176 %2 = load i1, i1* %1 177 %3 = sext i1 %2 to i64 178 ; zextload i1 179 %4 = getelementptr i1, i1* %a, i32 2 180 %5 = load i1, i1* %4 181 %6 = zext i1 %5 to i64 182 %7 = add i64 %3, %6 183 ; extload i1 (anyext). Produced as the load is unused. 184 %8 = load volatile i1, i1* %a 185 ret i64 %7 186} 187 188define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { 189; RV64I-LABEL: load_sext_zext_anyext_i1_i16: 190; RV64I: # %bb.0: 191; RV64I-NEXT: lbu a1, 1(a0) 192; RV64I-NEXT: lbu a2, 2(a0) 193; RV64I-NEXT: lb a0, 0(a0) 194; RV64I-NEXT: sub a0, a2, a1 195; RV64I-NEXT: ret 196 ; sextload i1 197 %1 = getelementptr i1, i1* %a, i32 1 198 %2 = load i1, i1* %1 199 %3 = sext i1 %2 to i16 200 ; zextload i1 201 %4 = getelementptr i1, i1* %a, i32 2 202 %5 = load i1, i1* %4 203 %6 = zext i1 %5 to i16 204 %7 = add i16 %3, %6 205 ; extload i1 (anyext). Produced as the load is unused. 206 %8 = load volatile i1, i1* %a 207 ret i16 %7 208} 209 210; Check load and store to a global 211@G = global i64 0 212 213define i64 @ld_sd_global(i64 %a) nounwind { 214; RV64I-LABEL: ld_sd_global: 215; RV64I: # %bb.0: 216; RV64I-NEXT: lui a2, %hi(G) 217; RV64I-NEXT: ld a1, %lo(G)(a2) 218; RV64I-NEXT: sd a0, %lo(G)(a2) 219; RV64I-NEXT: addi a2, a2, %lo(G) 220; RV64I-NEXT: ld a3, 72(a2) 221; RV64I-NEXT: sd a0, 72(a2) 222; RV64I-NEXT: mv a0, a1 223; RV64I-NEXT: ret 224 %1 = load volatile i64, i64* @G 225 store i64 %a, i64* @G 226 %2 = getelementptr i64, i64* @G, i64 9 227 %3 = load volatile i64, i64* %2 228 store i64 %a, i64* %2 229 ret i64 %1 230} 231