1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \ 3; RUN: | FileCheck %s --check-prefix=RV32V 4; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \ 5; RUN: | FileCheck %s --check-prefix=RV64V 6 7define <vscale x 8 x i64> @vsplat_nxv8i64_1() { 8; RV32V-LABEL: vsplat_nxv8i64_1: 9; RV32V: # %bb.0: 10; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 11; RV32V-NEXT: vmv.v.i v8, -1 12; RV32V-NEXT: ret 13; 14; RV64V-LABEL: vsplat_nxv8i64_1: 15; RV64V: # %bb.0: 16; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 17; RV64V-NEXT: vmv.v.i v8, -1 18; RV64V-NEXT: ret 19 %head = insertelement <vscale x 8 x i64> undef, i64 -1, i32 0 20 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 21 ret <vscale x 8 x i64> %splat 22} 23 24define <vscale x 8 x i64> @vsplat_nxv8i64_2() { 25; RV32V-LABEL: vsplat_nxv8i64_2: 26; RV32V: # %bb.0: 27; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 28; RV32V-NEXT: vmv.v.i v8, 4 29; RV32V-NEXT: ret 30; 31; RV64V-LABEL: vsplat_nxv8i64_2: 32; RV64V: # %bb.0: 33; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 34; RV64V-NEXT: vmv.v.i v8, 4 35; RV64V-NEXT: ret 36 %head = insertelement <vscale x 8 x i64> undef, i64 4, i32 0 37 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 38 ret <vscale x 8 x i64> %splat 39} 40 41define <vscale x 8 x i64> @vsplat_nxv8i64_3() { 42; RV32V-LABEL: vsplat_nxv8i64_3: 43; RV32V: # %bb.0: 44; RV32V-NEXT: addi a0, zero, 255 45; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 46; RV32V-NEXT: vmv.v.x v8, a0 47; RV32V-NEXT: ret 48; 49; RV64V-LABEL: vsplat_nxv8i64_3: 50; RV64V: # %bb.0: 51; RV64V-NEXT: addi a0, zero, 255 52; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 53; RV64V-NEXT: vmv.v.x v8, a0 54; RV64V-NEXT: ret 55 %head = insertelement <vscale x 8 x i64> undef, i64 255, i32 0 56 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 57 ret <vscale x 8 x i64> %splat 58} 59 60define <vscale x 8 x i64> @vsplat_nxv8i64_4() { 61; RV32V-LABEL: vsplat_nxv8i64_4: 62; RV32V: # %bb.0: 63; RV32V-NEXT: addi sp, sp, -16 64; RV32V-NEXT: .cfi_def_cfa_offset 16 65; RV32V-NEXT: sw zero, 12(sp) 66; RV32V-NEXT: lui a0, 1028096 67; RV32V-NEXT: addi a0, a0, -1281 68; RV32V-NEXT: sw a0, 8(sp) 69; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 70; RV32V-NEXT: addi a0, sp, 8 71; RV32V-NEXT: vlse64.v v8, (a0), zero 72; RV32V-NEXT: addi sp, sp, 16 73; RV32V-NEXT: ret 74; 75; RV64V-LABEL: vsplat_nxv8i64_4: 76; RV64V: # %bb.0: 77; RV64V-NEXT: addi a0, zero, 251 78; RV64V-NEXT: slli a0, a0, 24 79; RV64V-NEXT: addi a0, a0, -1281 80; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 81; RV64V-NEXT: vmv.v.x v8, a0 82; RV64V-NEXT: ret 83 %head = insertelement <vscale x 8 x i64> undef, i64 4211079935, i32 0 84 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 85 ret <vscale x 8 x i64> %splat 86} 87 88define <vscale x 8 x i64> @vsplat_nxv8i64_5(i64 %a) { 89; RV32V-LABEL: vsplat_nxv8i64_5: 90; RV32V: # %bb.0: 91; RV32V-NEXT: addi sp, sp, -16 92; RV32V-NEXT: .cfi_def_cfa_offset 16 93; RV32V-NEXT: sw a1, 12(sp) 94; RV32V-NEXT: sw a0, 8(sp) 95; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 96; RV32V-NEXT: addi a0, sp, 8 97; RV32V-NEXT: vlse64.v v8, (a0), zero 98; RV32V-NEXT: addi sp, sp, 16 99; RV32V-NEXT: ret 100; 101; RV64V-LABEL: vsplat_nxv8i64_5: 102; RV64V: # %bb.0: 103; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 104; RV64V-NEXT: vmv.v.x v8, a0 105; RV64V-NEXT: ret 106 %head = insertelement <vscale x 8 x i64> undef, i64 %a, i32 0 107 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 108 ret <vscale x 8 x i64> %splat 109} 110 111define <vscale x 8 x i64> @vadd_vx_nxv8i64_6(<vscale x 8 x i64> %v) { 112; RV32V-LABEL: vadd_vx_nxv8i64_6: 113; RV32V: # %bb.0: 114; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 115; RV32V-NEXT: vadd.vi v8, v8, 2 116; RV32V-NEXT: ret 117; 118; RV64V-LABEL: vadd_vx_nxv8i64_6: 119; RV64V: # %bb.0: 120; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 121; RV64V-NEXT: vadd.vi v8, v8, 2 122; RV64V-NEXT: ret 123 %head = insertelement <vscale x 8 x i64> undef, i64 2, i32 0 124 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 125 %vret = add <vscale x 8 x i64> %v, %splat 126 ret <vscale x 8 x i64> %vret 127} 128 129define <vscale x 8 x i64> @vadd_vx_nxv8i64_7(<vscale x 8 x i64> %v) { 130; RV32V-LABEL: vadd_vx_nxv8i64_7: 131; RV32V: # %bb.0: 132; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 133; RV32V-NEXT: vadd.vi v8, v8, -1 134; RV32V-NEXT: ret 135; 136; RV64V-LABEL: vadd_vx_nxv8i64_7: 137; RV64V: # %bb.0: 138; RV64V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 139; RV64V-NEXT: vadd.vi v8, v8, -1 140; RV64V-NEXT: ret 141 %head = insertelement <vscale x 8 x i64> undef, i64 -1, i32 0 142 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 143 %vret = add <vscale x 8 x i64> %v, %splat 144 ret <vscale x 8 x i64> %vret 145} 146 147define <vscale x 8 x i64> @vadd_vx_nxv8i64_8(<vscale x 8 x i64> %v) { 148; RV32V-LABEL: vadd_vx_nxv8i64_8: 149; RV32V: # %bb.0: 150; RV32V-NEXT: addi a0, zero, 255 151; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 152; RV32V-NEXT: vadd.vx v8, v8, a0 153; RV32V-NEXT: ret 154; 155; RV64V-LABEL: vadd_vx_nxv8i64_8: 156; RV64V: # %bb.0: 157; RV64V-NEXT: addi a0, zero, 255 158; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 159; RV64V-NEXT: vadd.vx v8, v8, a0 160; RV64V-NEXT: ret 161 %head = insertelement <vscale x 8 x i64> undef, i64 255, i32 0 162 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 163 %vret = add <vscale x 8 x i64> %v, %splat 164 ret <vscale x 8 x i64> %vret 165} 166 167define <vscale x 8 x i64> @vadd_vx_nxv8i64_9(<vscale x 8 x i64> %v) { 168; RV32V-LABEL: vadd_vx_nxv8i64_9: 169; RV32V: # %bb.0: 170; RV32V-NEXT: lui a0, 503808 171; RV32V-NEXT: addi a0, a0, -1281 172; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 173; RV32V-NEXT: vadd.vx v8, v8, a0 174; RV32V-NEXT: ret 175; 176; RV64V-LABEL: vadd_vx_nxv8i64_9: 177; RV64V: # %bb.0: 178; RV64V-NEXT: lui a0, 503808 179; RV64V-NEXT: addiw a0, a0, -1281 180; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 181; RV64V-NEXT: vadd.vx v8, v8, a0 182; RV64V-NEXT: ret 183 %head = insertelement <vscale x 8 x i64> undef, i64 2063596287, i32 0 184 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 185 %vret = add <vscale x 8 x i64> %v, %splat 186 ret <vscale x 8 x i64> %vret 187} 188 189define <vscale x 8 x i64> @vadd_vx_nxv8i64_10(<vscale x 8 x i64> %v) { 190; RV32V-LABEL: vadd_vx_nxv8i64_10: 191; RV32V: # %bb.0: 192; RV32V-NEXT: addi sp, sp, -16 193; RV32V-NEXT: .cfi_def_cfa_offset 16 194; RV32V-NEXT: sw zero, 12(sp) 195; RV32V-NEXT: lui a0, 1028096 196; RV32V-NEXT: addi a0, a0, -1281 197; RV32V-NEXT: sw a0, 8(sp) 198; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 199; RV32V-NEXT: addi a0, sp, 8 200; RV32V-NEXT: vlse64.v v16, (a0), zero 201; RV32V-NEXT: vadd.vv v8, v8, v16 202; RV32V-NEXT: addi sp, sp, 16 203; RV32V-NEXT: ret 204; 205; RV64V-LABEL: vadd_vx_nxv8i64_10: 206; RV64V: # %bb.0: 207; RV64V-NEXT: addi a0, zero, 251 208; RV64V-NEXT: slli a0, a0, 24 209; RV64V-NEXT: addi a0, a0, -1281 210; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 211; RV64V-NEXT: vadd.vx v8, v8, a0 212; RV64V-NEXT: ret 213 %head = insertelement <vscale x 8 x i64> undef, i64 4211079935, i32 0 214 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 215 %vret = add <vscale x 8 x i64> %v, %splat 216 ret <vscale x 8 x i64> %vret 217} 218 219define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) { 220; RV32V-LABEL: vadd_vx_nxv8i64_11: 221; RV32V: # %bb.0: 222; RV32V-NEXT: addi sp, sp, -16 223; RV32V-NEXT: .cfi_def_cfa_offset 16 224; RV32V-NEXT: addi a0, zero, 1 225; RV32V-NEXT: sw a0, 12(sp) 226; RV32V-NEXT: lui a0, 1028096 227; RV32V-NEXT: addi a0, a0, -1281 228; RV32V-NEXT: sw a0, 8(sp) 229; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 230; RV32V-NEXT: addi a0, sp, 8 231; RV32V-NEXT: vlse64.v v16, (a0), zero 232; RV32V-NEXT: vadd.vv v8, v8, v16 233; RV32V-NEXT: addi sp, sp, 16 234; RV32V-NEXT: ret 235; 236; RV64V-LABEL: vadd_vx_nxv8i64_11: 237; RV64V: # %bb.0: 238; RV64V-NEXT: addi a0, zero, 507 239; RV64V-NEXT: slli a0, a0, 24 240; RV64V-NEXT: addi a0, a0, -1281 241; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 242; RV64V-NEXT: vadd.vx v8, v8, a0 243; RV64V-NEXT: ret 244 %head = insertelement <vscale x 8 x i64> undef, i64 8506047231, i32 0 245 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 246 %vret = add <vscale x 8 x i64> %v, %splat 247 ret <vscale x 8 x i64> %vret 248} 249 250define <vscale x 8 x i64> @vadd_vx_nxv8i64_12(<vscale x 8 x i64> %v, i64 %a) { 251; RV32V-LABEL: vadd_vx_nxv8i64_12: 252; RV32V: # %bb.0: 253; RV32V-NEXT: addi sp, sp, -16 254; RV32V-NEXT: .cfi_def_cfa_offset 16 255; RV32V-NEXT: sw a1, 12(sp) 256; RV32V-NEXT: sw a0, 8(sp) 257; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 258; RV32V-NEXT: addi a0, sp, 8 259; RV32V-NEXT: vlse64.v v16, (a0), zero 260; RV32V-NEXT: vadd.vv v8, v8, v16 261; RV32V-NEXT: addi sp, sp, 16 262; RV32V-NEXT: ret 263; 264; RV64V-LABEL: vadd_vx_nxv8i64_12: 265; RV64V: # %bb.0: 266; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 267; RV64V-NEXT: vadd.vx v8, v8, a0 268; RV64V-NEXT: ret 269 %head = insertelement <vscale x 8 x i64> undef, i64 %a, i32 0 270 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 271 %vret = add <vscale x 8 x i64> %v, %splat 272 ret <vscale x 8 x i64> %vret 273} 274 275define <vscale x 8 x i64> @vsplat_nxv8i64_13(i32 %a) { 276; RV32V-LABEL: vsplat_nxv8i64_13: 277; RV32V: # %bb.0: 278; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 279; RV32V-NEXT: vmv.v.x v8, a0 280; RV32V-NEXT: ret 281; 282; RV64V-LABEL: vsplat_nxv8i64_13: 283; RV64V: # %bb.0: 284; RV64V-NEXT: sext.w a0, a0 285; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 286; RV64V-NEXT: vmv.v.x v8, a0 287; RV64V-NEXT: ret 288 %b = sext i32 %a to i64 289 %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0 290 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 291 ret <vscale x 8 x i64> %splat 292} 293 294define <vscale x 8 x i64> @vsplat_nxv8i64_14(i32 %a) { 295; RV32V-LABEL: vsplat_nxv8i64_14: 296; RV32V: # %bb.0: 297; RV32V-NEXT: addi sp, sp, -16 298; RV32V-NEXT: .cfi_def_cfa_offset 16 299; RV32V-NEXT: sw zero, 12(sp) 300; RV32V-NEXT: sw a0, 8(sp) 301; RV32V-NEXT: vsetvli a0, zero, e64, m8, ta, mu 302; RV32V-NEXT: addi a0, sp, 8 303; RV32V-NEXT: vlse64.v v8, (a0), zero 304; RV32V-NEXT: addi sp, sp, 16 305; RV32V-NEXT: ret 306; 307; RV64V-LABEL: vsplat_nxv8i64_14: 308; RV64V: # %bb.0: 309; RV64V-NEXT: slli a0, a0, 32 310; RV64V-NEXT: srli a0, a0, 32 311; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu 312; RV64V-NEXT: vmv.v.x v8, a0 313; RV64V-NEXT: ret 314 %b = zext i32 %a to i64 315 %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0 316 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 317 ret <vscale x 8 x i64> %splat 318} 319