1; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s 2 3;;; Test vector load intrinsic instructions 4;;; 5;;; Note: 6;;; We test VLD*rrl, VLD*irl, VLD*rrl_v, and VLD*irl_v instructions. 7 8; Function Attrs: nounwind 9define void @vld_vssl(i8* %0, i64 %1) { 10; CHECK-LABEL: vld_vssl: 11; CHECK: # %bb.0: 12; CHECK-NEXT: lea %s2, 256 13; CHECK-NEXT: lvl %s2 14; CHECK-NEXT: vld %v0, %s1, %s0 15; CHECK-NEXT: #APP 16; CHECK-NEXT: vst %v0, %s1, %s0 17; CHECK-NEXT: #NO_APP 18; CHECK-NEXT: b.l.t (, %s10) 19 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %0, i32 256) 20 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 21 ret void 22} 23 24; Function Attrs: nounwind readonly 25declare <256 x double> @llvm.ve.vl.vld.vssl(i64, i8*, i32) 26 27; Function Attrs: nounwind 28define void @vld_vssvl(i8* %0, i64 %1, i8* %2) { 29; CHECK-LABEL: vld_vssvl: 30; CHECK: # %bb.0: 31; CHECK-NEXT: lea %s3, 256 32; CHECK-NEXT: lvl %s3 33; CHECK-NEXT: vld %v0, %s1, %s2 34; CHECK-NEXT: vld %v0, %s1, %s0 35; CHECK-NEXT: #APP 36; CHECK-NEXT: vst %v0, %s1, %s0 37; CHECK-NEXT: #NO_APP 38; CHECK-NEXT: b.l.t (, %s10) 39 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 %1, i8* %2, i32 256) 40 %5 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 41 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 42 ret void 43} 44 45; Function Attrs: nounwind readonly 46declare <256 x double> @llvm.ve.vl.vld.vssvl(i64, i8*, <256 x double>, i32) 47 48; Function Attrs: nounwind 49define void @vld_vssl_imm(i8* %0) { 50; CHECK-LABEL: vld_vssl_imm: 51; CHECK: # %bb.0: 52; CHECK-NEXT: lea %s1, 256 53; CHECK-NEXT: lvl %s1 54; CHECK-NEXT: vld %v0, 8, %s0 55; CHECK-NEXT: #APP 56; CHECK-NEXT: vst %v0, 8, %s0 57; CHECK-NEXT: #NO_APP 58; CHECK-NEXT: b.l.t (, %s10) 59 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %0, i32 256) 60 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 61 ret void 62} 63 64; Function Attrs: nounwind 65define void @vld_vssvl_imm(i8* %0, i8* %1) { 66; CHECK-LABEL: vld_vssvl_imm: 67; CHECK: # %bb.0: 68; CHECK-NEXT: lea %s2, 256 69; CHECK-NEXT: lvl %s2 70; CHECK-NEXT: vld %v0, 8, %s1 71; CHECK-NEXT: vld %v0, 8, %s0 72; CHECK-NEXT: #APP 73; CHECK-NEXT: vst %v0, 8, %s0 74; CHECK-NEXT: #NO_APP 75; CHECK-NEXT: b.l.t (, %s10) 76 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, i8* %1, i32 256) 77 %4 = tail call fast <256 x double> @llvm.ve.vl.vld.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 78 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 79 ret void 80} 81 82; Function Attrs: nounwind 83define void @vldnc_vssl(i8* %0, i64 %1) { 84; CHECK-LABEL: vldnc_vssl: 85; CHECK: # %bb.0: 86; CHECK-NEXT: lea %s2, 256 87; CHECK-NEXT: lvl %s2 88; CHECK-NEXT: vld.nc %v0, %s1, %s0 89; CHECK-NEXT: #APP 90; CHECK-NEXT: vst %v0, %s1, %s0 91; CHECK-NEXT: #NO_APP 92; CHECK-NEXT: b.l.t (, %s10) 93 %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %0, i32 256) 94 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 95 ret void 96} 97 98; Function Attrs: nounwind readonly 99declare <256 x double> @llvm.ve.vl.vldnc.vssl(i64, i8*, i32) 100 101; Function Attrs: nounwind 102define void @vldnc_vssvl(i8* %0, i64 %1, i8* %2) { 103; CHECK-LABEL: vldnc_vssvl: 104; CHECK: # %bb.0: 105; CHECK-NEXT: lea %s3, 256 106; CHECK-NEXT: lvl %s3 107; CHECK-NEXT: vld.nc %v0, %s1, %s2 108; CHECK-NEXT: vld.nc %v0, %s1, %s0 109; CHECK-NEXT: #APP 110; CHECK-NEXT: vst %v0, %s1, %s0 111; CHECK-NEXT: #NO_APP 112; CHECK-NEXT: b.l.t (, %s10) 113 %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 %1, i8* %2, i32 256) 114 %5 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 115 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 116 ret void 117} 118 119; Function Attrs: nounwind readonly 120declare <256 x double> @llvm.ve.vl.vldnc.vssvl(i64, i8*, <256 x double>, i32) 121 122; Function Attrs: nounwind 123define void @vldnc_vssl_imm(i8* %0) { 124; CHECK-LABEL: vldnc_vssl_imm: 125; CHECK: # %bb.0: 126; CHECK-NEXT: lea %s1, 256 127; CHECK-NEXT: lvl %s1 128; CHECK-NEXT: vld.nc %v0, 8, %s0 129; CHECK-NEXT: #APP 130; CHECK-NEXT: vst %v0, 8, %s0 131; CHECK-NEXT: #NO_APP 132; CHECK-NEXT: b.l.t (, %s10) 133 %2 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %0, i32 256) 134 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 135 ret void 136} 137 138; Function Attrs: nounwind 139define void @vldnc_vssvl_imm(i8* %0, i8* %1) { 140; CHECK-LABEL: vldnc_vssvl_imm: 141; CHECK: # %bb.0: 142; CHECK-NEXT: lea %s2, 256 143; CHECK-NEXT: lvl %s2 144; CHECK-NEXT: vld.nc %v0, 8, %s1 145; CHECK-NEXT: vld.nc %v0, 8, %s0 146; CHECK-NEXT: #APP 147; CHECK-NEXT: vst %v0, 8, %s0 148; CHECK-NEXT: #NO_APP 149; CHECK-NEXT: b.l.t (, %s10) 150 %3 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssl(i64 8, i8* %1, i32 256) 151 %4 = tail call fast <256 x double> @llvm.ve.vl.vldnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 152 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 153 ret void 154} 155 156; Function Attrs: nounwind 157define void @vldu_vssl(i8* %0, i64 %1) { 158; CHECK-LABEL: vldu_vssl: 159; CHECK: # %bb.0: 160; CHECK-NEXT: lea %s2, 256 161; CHECK-NEXT: lvl %s2 162; CHECK-NEXT: vldu %v0, %s1, %s0 163; CHECK-NEXT: #APP 164; CHECK-NEXT: vst %v0, %s1, %s0 165; CHECK-NEXT: #NO_APP 166; CHECK-NEXT: b.l.t (, %s10) 167 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %0, i32 256) 168 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 169 ret void 170} 171 172; Function Attrs: nounwind readonly 173declare <256 x double> @llvm.ve.vl.vldu.vssl(i64, i8*, i32) 174 175; Function Attrs: nounwind 176define void @vldu_vssvl(i8* %0, i64 %1, i8* %2) { 177; CHECK-LABEL: vldu_vssvl: 178; CHECK: # %bb.0: 179; CHECK-NEXT: lea %s3, 256 180; CHECK-NEXT: lvl %s3 181; CHECK-NEXT: vldu %v0, %s1, %s2 182; CHECK-NEXT: vldu %v0, %s1, %s0 183; CHECK-NEXT: #APP 184; CHECK-NEXT: vst %v0, %s1, %s0 185; CHECK-NEXT: #NO_APP 186; CHECK-NEXT: b.l.t (, %s10) 187 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 %1, i8* %2, i32 256) 188 %5 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 189 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 190 ret void 191} 192 193; Function Attrs: nounwind readonly 194declare <256 x double> @llvm.ve.vl.vldu.vssvl(i64, i8*, <256 x double>, i32) 195 196; Function Attrs: nounwind 197define void @vldu_vssl_imm(i8* %0) { 198; CHECK-LABEL: vldu_vssl_imm: 199; CHECK: # %bb.0: 200; CHECK-NEXT: lea %s1, 256 201; CHECK-NEXT: lvl %s1 202; CHECK-NEXT: vldu %v0, 8, %s0 203; CHECK-NEXT: #APP 204; CHECK-NEXT: vst %v0, 8, %s0 205; CHECK-NEXT: #NO_APP 206; CHECK-NEXT: b.l.t (, %s10) 207 %2 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %0, i32 256) 208 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 209 ret void 210} 211 212; Function Attrs: nounwind 213define void @vldu_vssvl_imm(i8* %0, i8* %1) { 214; CHECK-LABEL: vldu_vssvl_imm: 215; CHECK: # %bb.0: 216; CHECK-NEXT: lea %s2, 256 217; CHECK-NEXT: lvl %s2 218; CHECK-NEXT: vldu %v0, 8, %s1 219; CHECK-NEXT: vldu %v0, 8, %s0 220; CHECK-NEXT: #APP 221; CHECK-NEXT: vst %v0, 8, %s0 222; CHECK-NEXT: #NO_APP 223; CHECK-NEXT: b.l.t (, %s10) 224 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssl(i64 8, i8* %1, i32 256) 225 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 226 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 227 ret void 228} 229 230; Function Attrs: nounwind 231define void @vldunc_vssl(i8* %0, i64 %1) { 232; CHECK-LABEL: vldunc_vssl: 233; CHECK: # %bb.0: 234; CHECK-NEXT: lea %s2, 256 235; CHECK-NEXT: lvl %s2 236; CHECK-NEXT: vldu.nc %v0, %s1, %s0 237; CHECK-NEXT: #APP 238; CHECK-NEXT: vst %v0, %s1, %s0 239; CHECK-NEXT: #NO_APP 240; CHECK-NEXT: b.l.t (, %s10) 241 %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %0, i32 256) 242 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 243 ret void 244} 245 246; Function Attrs: nounwind readonly 247declare <256 x double> @llvm.ve.vl.vldunc.vssl(i64, i8*, i32) 248 249; Function Attrs: nounwind 250define void @vldunc_vssvl(i8* %0, i64 %1, i8* %2) { 251; CHECK-LABEL: vldunc_vssvl: 252; CHECK: # %bb.0: 253; CHECK-NEXT: lea %s3, 256 254; CHECK-NEXT: lvl %s3 255; CHECK-NEXT: vldu.nc %v0, %s1, %s2 256; CHECK-NEXT: vldu.nc %v0, %s1, %s0 257; CHECK-NEXT: #APP 258; CHECK-NEXT: vst %v0, %s1, %s0 259; CHECK-NEXT: #NO_APP 260; CHECK-NEXT: b.l.t (, %s10) 261 %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 %1, i8* %2, i32 256) 262 %5 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 263 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 264 ret void 265} 266 267; Function Attrs: nounwind readonly 268declare <256 x double> @llvm.ve.vl.vldunc.vssvl(i64, i8*, <256 x double>, i32) 269 270; Function Attrs: nounwind 271define void @vldunc_vssl_imm(i8* %0) { 272; CHECK-LABEL: vldunc_vssl_imm: 273; CHECK: # %bb.0: 274; CHECK-NEXT: lea %s1, 256 275; CHECK-NEXT: lvl %s1 276; CHECK-NEXT: vldu.nc %v0, 8, %s0 277; CHECK-NEXT: #APP 278; CHECK-NEXT: vst %v0, 8, %s0 279; CHECK-NEXT: #NO_APP 280; CHECK-NEXT: b.l.t (, %s10) 281 %2 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %0, i32 256) 282 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 283 ret void 284} 285 286; Function Attrs: nounwind 287define void @vldunc_vssvl_imm(i8* %0, i8* %1) { 288; CHECK-LABEL: vldunc_vssvl_imm: 289; CHECK: # %bb.0: 290; CHECK-NEXT: lea %s2, 256 291; CHECK-NEXT: lvl %s2 292; CHECK-NEXT: vldu.nc %v0, 8, %s1 293; CHECK-NEXT: vldu.nc %v0, 8, %s0 294; CHECK-NEXT: #APP 295; CHECK-NEXT: vst %v0, 8, %s0 296; CHECK-NEXT: #NO_APP 297; CHECK-NEXT: b.l.t (, %s10) 298 %3 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssl(i64 8, i8* %1, i32 256) 299 %4 = tail call fast <256 x double> @llvm.ve.vl.vldunc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 300 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 301 ret void 302} 303 304; Function Attrs: nounwind 305define void @vldlsx_vssl(i8* %0, i64 %1) { 306; CHECK-LABEL: vldlsx_vssl: 307; CHECK: # %bb.0: 308; CHECK-NEXT: lea %s2, 256 309; CHECK-NEXT: lvl %s2 310; CHECK-NEXT: vldl.sx %v0, %s1, %s0 311; CHECK-NEXT: #APP 312; CHECK-NEXT: vst %v0, %s1, %s0 313; CHECK-NEXT: #NO_APP 314; CHECK-NEXT: b.l.t (, %s10) 315 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %0, i32 256) 316 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 317 ret void 318} 319 320; Function Attrs: nounwind readonly 321declare <256 x double> @llvm.ve.vl.vldlsx.vssl(i64, i8*, i32) 322 323; Function Attrs: nounwind 324define void @vldlsx_vssvl(i8* %0, i64 %1, i8* %2) { 325; CHECK-LABEL: vldlsx_vssvl: 326; CHECK: # %bb.0: 327; CHECK-NEXT: lea %s3, 256 328; CHECK-NEXT: lvl %s3 329; CHECK-NEXT: vldl.sx %v0, %s1, %s2 330; CHECK-NEXT: vldl.sx %v0, %s1, %s0 331; CHECK-NEXT: #APP 332; CHECK-NEXT: vst %v0, %s1, %s0 333; CHECK-NEXT: #NO_APP 334; CHECK-NEXT: b.l.t (, %s10) 335 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 %1, i8* %2, i32 256) 336 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 337 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 338 ret void 339} 340 341; Function Attrs: nounwind readonly 342declare <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64, i8*, <256 x double>, i32) 343 344; Function Attrs: nounwind 345define void @vldlsx_vssl_imm(i8* %0) { 346; CHECK-LABEL: vldlsx_vssl_imm: 347; CHECK: # %bb.0: 348; CHECK-NEXT: lea %s1, 256 349; CHECK-NEXT: lvl %s1 350; CHECK-NEXT: vldl.sx %v0, 8, %s0 351; CHECK-NEXT: #APP 352; CHECK-NEXT: vst %v0, 8, %s0 353; CHECK-NEXT: #NO_APP 354; CHECK-NEXT: b.l.t (, %s10) 355 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %0, i32 256) 356 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 357 ret void 358} 359 360; Function Attrs: nounwind 361define void @vldlsx_vssvl_imm(i8* %0, i8* %1) { 362; CHECK-LABEL: vldlsx_vssvl_imm: 363; CHECK: # %bb.0: 364; CHECK-NEXT: lea %s2, 256 365; CHECK-NEXT: lvl %s2 366; CHECK-NEXT: vldl.sx %v0, 8, %s1 367; CHECK-NEXT: vldl.sx %v0, 8, %s0 368; CHECK-NEXT: #APP 369; CHECK-NEXT: vst %v0, 8, %s0 370; CHECK-NEXT: #NO_APP 371; CHECK-NEXT: b.l.t (, %s10) 372 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssl(i64 8, i8* %1, i32 256) 373 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 374 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 375 ret void 376} 377 378; Function Attrs: nounwind 379define void @vldlsxnc_vssl(i8* %0, i64 %1) { 380; CHECK-LABEL: vldlsxnc_vssl: 381; CHECK: # %bb.0: 382; CHECK-NEXT: lea %s2, 256 383; CHECK-NEXT: lvl %s2 384; CHECK-NEXT: vldl.sx.nc %v0, %s1, %s0 385; CHECK-NEXT: #APP 386; CHECK-NEXT: vst %v0, %s1, %s0 387; CHECK-NEXT: #NO_APP 388; CHECK-NEXT: b.l.t (, %s10) 389 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %0, i32 256) 390 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 391 ret void 392} 393 394; Function Attrs: nounwind readonly 395declare <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64, i8*, i32) 396 397; Function Attrs: nounwind 398define void @vldlsxnc_vssvl(i8* %0, i64 %1, i8* %2) { 399; CHECK-LABEL: vldlsxnc_vssvl: 400; CHECK: # %bb.0: 401; CHECK-NEXT: lea %s3, 256 402; CHECK-NEXT: lvl %s3 403; CHECK-NEXT: vldl.sx.nc %v0, %s1, %s2 404; CHECK-NEXT: vldl.sx.nc %v0, %s1, %s0 405; CHECK-NEXT: #APP 406; CHECK-NEXT: vst %v0, %s1, %s0 407; CHECK-NEXT: #NO_APP 408; CHECK-NEXT: b.l.t (, %s10) 409 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 %1, i8* %2, i32 256) 410 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 411 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 412 ret void 413} 414 415; Function Attrs: nounwind readonly 416declare <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64, i8*, <256 x double>, i32) 417 418; Function Attrs: nounwind 419define void @vldlsxnc_vssl_imm(i8* %0) { 420; CHECK-LABEL: vldlsxnc_vssl_imm: 421; CHECK: # %bb.0: 422; CHECK-NEXT: lea %s1, 256 423; CHECK-NEXT: lvl %s1 424; CHECK-NEXT: vldl.sx.nc %v0, 8, %s0 425; CHECK-NEXT: #APP 426; CHECK-NEXT: vst %v0, 8, %s0 427; CHECK-NEXT: #NO_APP 428; CHECK-NEXT: b.l.t (, %s10) 429 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %0, i32 256) 430 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 431 ret void 432} 433 434; Function Attrs: nounwind 435define void @vldlsxnc_vssvl_imm(i8* %0, i8* %1) { 436; CHECK-LABEL: vldlsxnc_vssvl_imm: 437; CHECK: # %bb.0: 438; CHECK-NEXT: lea %s2, 256 439; CHECK-NEXT: lvl %s2 440; CHECK-NEXT: vldl.sx.nc %v0, 8, %s1 441; CHECK-NEXT: vldl.sx.nc %v0, 8, %s0 442; CHECK-NEXT: #APP 443; CHECK-NEXT: vst %v0, 8, %s0 444; CHECK-NEXT: #NO_APP 445; CHECK-NEXT: b.l.t (, %s10) 446 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssl(i64 8, i8* %1, i32 256) 447 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 448 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 449 ret void 450} 451 452; Function Attrs: nounwind 453define void @vldlzx_vssl(i8* %0, i64 %1) { 454; CHECK-LABEL: vldlzx_vssl: 455; CHECK: # %bb.0: 456; CHECK-NEXT: lea %s2, 256 457; CHECK-NEXT: lvl %s2 458; CHECK-NEXT: vldl.zx %v0, %s1, %s0 459; CHECK-NEXT: #APP 460; CHECK-NEXT: vst %v0, %s1, %s0 461; CHECK-NEXT: #NO_APP 462; CHECK-NEXT: b.l.t (, %s10) 463 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %0, i32 256) 464 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 465 ret void 466} 467 468; Function Attrs: nounwind readonly 469declare <256 x double> @llvm.ve.vl.vldlzx.vssl(i64, i8*, i32) 470 471; Function Attrs: nounwind 472define void @vldlzx_vssvl(i8* %0, i64 %1, i8* %2) { 473; CHECK-LABEL: vldlzx_vssvl: 474; CHECK: # %bb.0: 475; CHECK-NEXT: lea %s3, 256 476; CHECK-NEXT: lvl %s3 477; CHECK-NEXT: vldl.zx %v0, %s1, %s2 478; CHECK-NEXT: vldl.zx %v0, %s1, %s0 479; CHECK-NEXT: #APP 480; CHECK-NEXT: vst %v0, %s1, %s0 481; CHECK-NEXT: #NO_APP 482; CHECK-NEXT: b.l.t (, %s10) 483 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 %1, i8* %2, i32 256) 484 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 485 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 486 ret void 487} 488 489; Function Attrs: nounwind readonly 490declare <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64, i8*, <256 x double>, i32) 491 492; Function Attrs: nounwind 493define void @vldlzx_vssl_imm(i8* %0) { 494; CHECK-LABEL: vldlzx_vssl_imm: 495; CHECK: # %bb.0: 496; CHECK-NEXT: lea %s1, 256 497; CHECK-NEXT: lvl %s1 498; CHECK-NEXT: vldl.zx %v0, 8, %s0 499; CHECK-NEXT: #APP 500; CHECK-NEXT: vst %v0, 8, %s0 501; CHECK-NEXT: #NO_APP 502; CHECK-NEXT: b.l.t (, %s10) 503 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %0, i32 256) 504 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 505 ret void 506} 507 508; Function Attrs: nounwind 509define void @vldlzx_vssvl_imm(i8* %0, i8* %1) { 510; CHECK-LABEL: vldlzx_vssvl_imm: 511; CHECK: # %bb.0: 512; CHECK-NEXT: lea %s2, 256 513; CHECK-NEXT: lvl %s2 514; CHECK-NEXT: vldl.zx %v0, 8, %s1 515; CHECK-NEXT: vldl.zx %v0, 8, %s0 516; CHECK-NEXT: #APP 517; CHECK-NEXT: vst %v0, 8, %s0 518; CHECK-NEXT: #NO_APP 519; CHECK-NEXT: b.l.t (, %s10) 520 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssl(i64 8, i8* %1, i32 256) 521 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 522 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 523 ret void 524} 525 526; Function Attrs: nounwind 527define void @vldlzxnc_vssl(i8* %0, i64 %1) { 528; CHECK-LABEL: vldlzxnc_vssl: 529; CHECK: # %bb.0: 530; CHECK-NEXT: lea %s2, 256 531; CHECK-NEXT: lvl %s2 532; CHECK-NEXT: vldl.zx.nc %v0, %s1, %s0 533; CHECK-NEXT: #APP 534; CHECK-NEXT: vst %v0, %s1, %s0 535; CHECK-NEXT: #NO_APP 536; CHECK-NEXT: b.l.t (, %s10) 537 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %0, i32 256) 538 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 539 ret void 540} 541 542; Function Attrs: nounwind readonly 543declare <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64, i8*, i32) 544 545; Function Attrs: nounwind 546define void @vldlzxnc_vssvl(i8* %0, i64 %1, i8* %2) { 547; CHECK-LABEL: vldlzxnc_vssvl: 548; CHECK: # %bb.0: 549; CHECK-NEXT: lea %s3, 256 550; CHECK-NEXT: lvl %s3 551; CHECK-NEXT: vldl.zx.nc %v0, %s1, %s2 552; CHECK-NEXT: vldl.zx.nc %v0, %s1, %s0 553; CHECK-NEXT: #APP 554; CHECK-NEXT: vst %v0, %s1, %s0 555; CHECK-NEXT: #NO_APP 556; CHECK-NEXT: b.l.t (, %s10) 557 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 %1, i8* %2, i32 256) 558 %5 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 559 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 560 ret void 561} 562 563; Function Attrs: nounwind readonly 564declare <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64, i8*, <256 x double>, i32) 565 566; Function Attrs: nounwind 567define void @vldlzxnc_vssl_imm(i8* %0) { 568; CHECK-LABEL: vldlzxnc_vssl_imm: 569; CHECK: # %bb.0: 570; CHECK-NEXT: lea %s1, 256 571; CHECK-NEXT: lvl %s1 572; CHECK-NEXT: vldl.zx.nc %v0, 8, %s0 573; CHECK-NEXT: #APP 574; CHECK-NEXT: vst %v0, 8, %s0 575; CHECK-NEXT: #NO_APP 576; CHECK-NEXT: b.l.t (, %s10) 577 %2 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %0, i32 256) 578 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 579 ret void 580} 581 582; Function Attrs: nounwind 583define void @vldlzxnc_vssvl_imm(i8* %0, i8* %1) { 584; CHECK-LABEL: vldlzxnc_vssvl_imm: 585; CHECK: # %bb.0: 586; CHECK-NEXT: lea %s2, 256 587; CHECK-NEXT: lvl %s2 588; CHECK-NEXT: vldl.zx.nc %v0, 8, %s1 589; CHECK-NEXT: vldl.zx.nc %v0, 8, %s0 590; CHECK-NEXT: #APP 591; CHECK-NEXT: vst %v0, 8, %s0 592; CHECK-NEXT: #NO_APP 593; CHECK-NEXT: b.l.t (, %s10) 594 %3 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssl(i64 8, i8* %1, i32 256) 595 %4 = tail call fast <256 x double> @llvm.ve.vl.vldlzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 596 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 597 ret void 598} 599 600; Function Attrs: nounwind 601define void @vld2d_vssl(i8* %0, i64 %1) { 602; CHECK-LABEL: vld2d_vssl: 603; CHECK: # %bb.0: 604; CHECK-NEXT: lea %s2, 256 605; CHECK-NEXT: lvl %s2 606; CHECK-NEXT: vld2d %v0, %s1, %s0 607; CHECK-NEXT: #APP 608; CHECK-NEXT: vst %v0, %s1, %s0 609; CHECK-NEXT: #NO_APP 610; CHECK-NEXT: b.l.t (, %s10) 611 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %0, i32 256) 612 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 613 ret void 614} 615 616; Function Attrs: nounwind readonly 617declare <256 x double> @llvm.ve.vl.vld2d.vssl(i64, i8*, i32) 618 619; Function Attrs: nounwind 620define void @vld2d_vssvl(i8* %0, i64 %1, i8* %2) { 621; CHECK-LABEL: vld2d_vssvl: 622; CHECK: # %bb.0: 623; CHECK-NEXT: lea %s3, 256 624; CHECK-NEXT: lvl %s3 625; CHECK-NEXT: vld2d %v0, %s1, %s2 626; CHECK-NEXT: vld2d %v0, %s1, %s0 627; CHECK-NEXT: #APP 628; CHECK-NEXT: vst %v0, %s1, %s0 629; CHECK-NEXT: #NO_APP 630; CHECK-NEXT: b.l.t (, %s10) 631 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 %1, i8* %2, i32 256) 632 %5 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 633 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 634 ret void 635} 636 637; Function Attrs: nounwind readonly 638declare <256 x double> @llvm.ve.vl.vld2d.vssvl(i64, i8*, <256 x double>, i32) 639 640; Function Attrs: nounwind 641define void @vld2d_vssl_imm(i8* %0) { 642; CHECK-LABEL: vld2d_vssl_imm: 643; CHECK: # %bb.0: 644; CHECK-NEXT: lea %s1, 256 645; CHECK-NEXT: lvl %s1 646; CHECK-NEXT: vld2d %v0, 8, %s0 647; CHECK-NEXT: #APP 648; CHECK-NEXT: vst %v0, 8, %s0 649; CHECK-NEXT: #NO_APP 650; CHECK-NEXT: b.l.t (, %s10) 651 %2 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %0, i32 256) 652 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 653 ret void 654} 655 656; Function Attrs: nounwind 657define void @vld2d_vssvl_imm(i8* %0, i8* %1) { 658; CHECK-LABEL: vld2d_vssvl_imm: 659; CHECK: # %bb.0: 660; CHECK-NEXT: lea %s2, 256 661; CHECK-NEXT: lvl %s2 662; CHECK-NEXT: vld2d %v0, 8, %s1 663; CHECK-NEXT: vld2d %v0, 8, %s0 664; CHECK-NEXT: #APP 665; CHECK-NEXT: vst %v0, 8, %s0 666; CHECK-NEXT: #NO_APP 667; CHECK-NEXT: b.l.t (, %s10) 668 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssl(i64 8, i8* %1, i32 256) 669 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 670 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 671 ret void 672} 673 674; Function Attrs: nounwind 675define void @vld2dnc_vssl(i8* %0, i64 %1) { 676; CHECK-LABEL: vld2dnc_vssl: 677; CHECK: # %bb.0: 678; CHECK-NEXT: lea %s2, 256 679; CHECK-NEXT: lvl %s2 680; CHECK-NEXT: vld2d.nc %v0, %s1, %s0 681; CHECK-NEXT: #APP 682; CHECK-NEXT: vst %v0, %s1, %s0 683; CHECK-NEXT: #NO_APP 684; CHECK-NEXT: b.l.t (, %s10) 685 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %0, i32 256) 686 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 687 ret void 688} 689 690; Function Attrs: nounwind readonly 691declare <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64, i8*, i32) 692 693; Function Attrs: nounwind 694define void @vld2dnc_vssvl(i8* %0, i64 %1, i8* %2) { 695; CHECK-LABEL: vld2dnc_vssvl: 696; CHECK: # %bb.0: 697; CHECK-NEXT: lea %s3, 256 698; CHECK-NEXT: lvl %s3 699; CHECK-NEXT: vld2d.nc %v0, %s1, %s2 700; CHECK-NEXT: vld2d.nc %v0, %s1, %s0 701; CHECK-NEXT: #APP 702; CHECK-NEXT: vst %v0, %s1, %s0 703; CHECK-NEXT: #NO_APP 704; CHECK-NEXT: b.l.t (, %s10) 705 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 %1, i8* %2, i32 256) 706 %5 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 707 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 708 ret void 709} 710 711; Function Attrs: nounwind readonly 712declare <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64, i8*, <256 x double>, i32) 713 714; Function Attrs: nounwind 715define void @vld2dnc_vssl_imm(i8* %0) { 716; CHECK-LABEL: vld2dnc_vssl_imm: 717; CHECK: # %bb.0: 718; CHECK-NEXT: lea %s1, 256 719; CHECK-NEXT: lvl %s1 720; CHECK-NEXT: vld2d.nc %v0, 8, %s0 721; CHECK-NEXT: #APP 722; CHECK-NEXT: vst %v0, 8, %s0 723; CHECK-NEXT: #NO_APP 724; CHECK-NEXT: b.l.t (, %s10) 725 %2 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %0, i32 256) 726 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 727 ret void 728} 729 730; Function Attrs: nounwind 731define void @vld2dnc_vssvl_imm(i8* %0, i8* %1) { 732; CHECK-LABEL: vld2dnc_vssvl_imm: 733; CHECK: # %bb.0: 734; CHECK-NEXT: lea %s2, 256 735; CHECK-NEXT: lvl %s2 736; CHECK-NEXT: vld2d.nc %v0, 8, %s1 737; CHECK-NEXT: vld2d.nc %v0, 8, %s0 738; CHECK-NEXT: #APP 739; CHECK-NEXT: vst %v0, 8, %s0 740; CHECK-NEXT: #NO_APP 741; CHECK-NEXT: b.l.t (, %s10) 742 %3 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssl(i64 8, i8* %1, i32 256) 743 %4 = tail call fast <256 x double> @llvm.ve.vl.vld2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 744 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 745 ret void 746} 747 748; Function Attrs: nounwind 749define void @vldu2d_vssl(i8* %0, i64 %1) { 750; CHECK-LABEL: vldu2d_vssl: 751; CHECK: # %bb.0: 752; CHECK-NEXT: lea %s2, 256 753; CHECK-NEXT: lvl %s2 754; CHECK-NEXT: vldu2d %v0, %s1, %s0 755; CHECK-NEXT: #APP 756; CHECK-NEXT: vst %v0, %s1, %s0 757; CHECK-NEXT: #NO_APP 758; CHECK-NEXT: b.l.t (, %s10) 759 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %0, i32 256) 760 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 761 ret void 762} 763 764; Function Attrs: nounwind readonly 765declare <256 x double> @llvm.ve.vl.vldu2d.vssl(i64, i8*, i32) 766 767; Function Attrs: nounwind 768define void @vldu2d_vssvl(i8* %0, i64 %1, i8* %2) { 769; CHECK-LABEL: vldu2d_vssvl: 770; CHECK: # %bb.0: 771; CHECK-NEXT: lea %s3, 256 772; CHECK-NEXT: lvl %s3 773; CHECK-NEXT: vldu2d %v0, %s1, %s2 774; CHECK-NEXT: vldu2d %v0, %s1, %s0 775; CHECK-NEXT: #APP 776; CHECK-NEXT: vst %v0, %s1, %s0 777; CHECK-NEXT: #NO_APP 778; CHECK-NEXT: b.l.t (, %s10) 779 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 %1, i8* %2, i32 256) 780 %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 781 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 782 ret void 783} 784 785; Function Attrs: nounwind readonly 786declare <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64, i8*, <256 x double>, i32) 787 788; Function Attrs: nounwind 789define void @vldu2d_vssl_imm(i8* %0) { 790; CHECK-LABEL: vldu2d_vssl_imm: 791; CHECK: # %bb.0: 792; CHECK-NEXT: lea %s1, 256 793; CHECK-NEXT: lvl %s1 794; CHECK-NEXT: vldu2d %v0, 8, %s0 795; CHECK-NEXT: #APP 796; CHECK-NEXT: vst %v0, 8, %s0 797; CHECK-NEXT: #NO_APP 798; CHECK-NEXT: b.l.t (, %s10) 799 %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %0, i32 256) 800 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 801 ret void 802} 803 804; Function Attrs: nounwind 805define void @vldu2d_vssvl_imm(i8* %0, i8* %1) { 806; CHECK-LABEL: vldu2d_vssvl_imm: 807; CHECK: # %bb.0: 808; CHECK-NEXT: lea %s2, 256 809; CHECK-NEXT: lvl %s2 810; CHECK-NEXT: vldu2d %v0, 8, %s1 811; CHECK-NEXT: vldu2d %v0, 8, %s0 812; CHECK-NEXT: #APP 813; CHECK-NEXT: vst %v0, 8, %s0 814; CHECK-NEXT: #NO_APP 815; CHECK-NEXT: b.l.t (, %s10) 816 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssl(i64 8, i8* %1, i32 256) 817 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2d.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 818 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 819 ret void 820} 821 822; Function Attrs: nounwind 823define void @vldu2dnc_vssl(i8* %0, i64 %1) { 824; CHECK-LABEL: vldu2dnc_vssl: 825; CHECK: # %bb.0: 826; CHECK-NEXT: lea %s2, 256 827; CHECK-NEXT: lvl %s2 828; CHECK-NEXT: vldu2d.nc %v0, %s1, %s0 829; CHECK-NEXT: #APP 830; CHECK-NEXT: vst %v0, %s1, %s0 831; CHECK-NEXT: #NO_APP 832; CHECK-NEXT: b.l.t (, %s10) 833 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %0, i32 256) 834 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 835 ret void 836} 837 838; Function Attrs: nounwind readonly 839declare <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64, i8*, i32) 840 841; Function Attrs: nounwind 842define void @vldu2dnc_vssvl(i8* %0, i64 %1, i8* %2) { 843; CHECK-LABEL: vldu2dnc_vssvl: 844; CHECK: # %bb.0: 845; CHECK-NEXT: lea %s3, 256 846; CHECK-NEXT: lvl %s3 847; CHECK-NEXT: vldu2d.nc %v0, %s1, %s2 848; CHECK-NEXT: vldu2d.nc %v0, %s1, %s0 849; CHECK-NEXT: #APP 850; CHECK-NEXT: vst %v0, %s1, %s0 851; CHECK-NEXT: #NO_APP 852; CHECK-NEXT: b.l.t (, %s10) 853 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 %1, i8* %2, i32 256) 854 %5 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 855 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 856 ret void 857} 858 859; Function Attrs: nounwind readonly 860declare <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64, i8*, <256 x double>, i32) 861 862; Function Attrs: nounwind 863define void @vldu2dnc_vssl_imm(i8* %0) { 864; CHECK-LABEL: vldu2dnc_vssl_imm: 865; CHECK: # %bb.0: 866; CHECK-NEXT: lea %s1, 256 867; CHECK-NEXT: lvl %s1 868; CHECK-NEXT: vldu2d.nc %v0, 8, %s0 869; CHECK-NEXT: #APP 870; CHECK-NEXT: vst %v0, 8, %s0 871; CHECK-NEXT: #NO_APP 872; CHECK-NEXT: b.l.t (, %s10) 873 %2 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %0, i32 256) 874 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 875 ret void 876} 877 878; Function Attrs: nounwind 879define void @vldu2dnc_vssvl_imm(i8* %0, i8* %1) { 880; CHECK-LABEL: vldu2dnc_vssvl_imm: 881; CHECK: # %bb.0: 882; CHECK-NEXT: lea %s2, 256 883; CHECK-NEXT: lvl %s2 884; CHECK-NEXT: vldu2d.nc %v0, 8, %s1 885; CHECK-NEXT: vldu2d.nc %v0, 8, %s0 886; CHECK-NEXT: #APP 887; CHECK-NEXT: vst %v0, 8, %s0 888; CHECK-NEXT: #NO_APP 889; CHECK-NEXT: b.l.t (, %s10) 890 %3 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssl(i64 8, i8* %1, i32 256) 891 %4 = tail call fast <256 x double> @llvm.ve.vl.vldu2dnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 892 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 893 ret void 894} 895 896; Function Attrs: nounwind 897define void @vldl2dsx_vssl(i8* %0, i64 %1) { 898; CHECK-LABEL: vldl2dsx_vssl: 899; CHECK: # %bb.0: 900; CHECK-NEXT: lea %s2, 256 901; CHECK-NEXT: lvl %s2 902; CHECK-NEXT: vldl2d.sx %v0, %s1, %s0 903; CHECK-NEXT: #APP 904; CHECK-NEXT: vst %v0, %s1, %s0 905; CHECK-NEXT: #NO_APP 906; CHECK-NEXT: b.l.t (, %s10) 907 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %0, i32 256) 908 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 909 ret void 910} 911 912; Function Attrs: nounwind readonly 913declare <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64, i8*, i32) 914 915; Function Attrs: nounwind 916define void @vldl2dsx_vssvl(i8* %0, i64 %1, i8* %2) { 917; CHECK-LABEL: vldl2dsx_vssvl: 918; CHECK: # %bb.0: 919; CHECK-NEXT: lea %s3, 256 920; CHECK-NEXT: lvl %s3 921; CHECK-NEXT: vldl2d.sx %v0, %s1, %s2 922; CHECK-NEXT: vldl2d.sx %v0, %s1, %s0 923; CHECK-NEXT: #APP 924; CHECK-NEXT: vst %v0, %s1, %s0 925; CHECK-NEXT: #NO_APP 926; CHECK-NEXT: b.l.t (, %s10) 927 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 %1, i8* %2, i32 256) 928 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 929 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 930 ret void 931} 932 933; Function Attrs: nounwind readonly 934declare <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64, i8*, <256 x double>, i32) 935 936; Function Attrs: nounwind 937define void @vldl2dsx_vssl_imm(i8* %0) { 938; CHECK-LABEL: vldl2dsx_vssl_imm: 939; CHECK: # %bb.0: 940; CHECK-NEXT: lea %s1, 256 941; CHECK-NEXT: lvl %s1 942; CHECK-NEXT: vldl2d.sx %v0, 8, %s0 943; CHECK-NEXT: #APP 944; CHECK-NEXT: vst %v0, 8, %s0 945; CHECK-NEXT: #NO_APP 946; CHECK-NEXT: b.l.t (, %s10) 947 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %0, i32 256) 948 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 949 ret void 950} 951 952; Function Attrs: nounwind 953define void @vldl2dsx_vssvl_imm(i8* %0, i8* %1) { 954; CHECK-LABEL: vldl2dsx_vssvl_imm: 955; CHECK: # %bb.0: 956; CHECK-NEXT: lea %s2, 256 957; CHECK-NEXT: lvl %s2 958; CHECK-NEXT: vldl2d.sx %v0, 8, %s1 959; CHECK-NEXT: vldl2d.sx %v0, 8, %s0 960; CHECK-NEXT: #APP 961; CHECK-NEXT: vst %v0, 8, %s0 962; CHECK-NEXT: #NO_APP 963; CHECK-NEXT: b.l.t (, %s10) 964 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssl(i64 8, i8* %1, i32 256) 965 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 966 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 967 ret void 968} 969 970; Function Attrs: nounwind 971define void @vldl2dsxnc_vssl(i8* %0, i64 %1) { 972; CHECK-LABEL: vldl2dsxnc_vssl: 973; CHECK: # %bb.0: 974; CHECK-NEXT: lea %s2, 256 975; CHECK-NEXT: lvl %s2 976; CHECK-NEXT: vldl2d.sx.nc %v0, %s1, %s0 977; CHECK-NEXT: #APP 978; CHECK-NEXT: vst %v0, %s1, %s0 979; CHECK-NEXT: #NO_APP 980; CHECK-NEXT: b.l.t (, %s10) 981 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %0, i32 256) 982 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 983 ret void 984} 985 986; Function Attrs: nounwind readonly 987declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64, i8*, i32) 988 989; Function Attrs: nounwind 990define void @vldl2dsxnc_vssvl(i8* %0, i64 %1, i8* %2) { 991; CHECK-LABEL: vldl2dsxnc_vssvl: 992; CHECK: # %bb.0: 993; CHECK-NEXT: lea %s3, 256 994; CHECK-NEXT: lvl %s3 995; CHECK-NEXT: vldl2d.sx.nc %v0, %s1, %s2 996; CHECK-NEXT: vldl2d.sx.nc %v0, %s1, %s0 997; CHECK-NEXT: #APP 998; CHECK-NEXT: vst %v0, %s1, %s0 999; CHECK-NEXT: #NO_APP 1000; CHECK-NEXT: b.l.t (, %s10) 1001 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 %1, i8* %2, i32 256) 1002 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 1003 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 1004 ret void 1005} 1006 1007; Function Attrs: nounwind readonly 1008declare <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64, i8*, <256 x double>, i32) 1009 1010; Function Attrs: nounwind 1011define void @vldl2dsxnc_vssl_imm(i8* %0) { 1012; CHECK-LABEL: vldl2dsxnc_vssl_imm: 1013; CHECK: # %bb.0: 1014; CHECK-NEXT: lea %s1, 256 1015; CHECK-NEXT: lvl %s1 1016; CHECK-NEXT: vldl2d.sx.nc %v0, 8, %s0 1017; CHECK-NEXT: #APP 1018; CHECK-NEXT: vst %v0, 8, %s0 1019; CHECK-NEXT: #NO_APP 1020; CHECK-NEXT: b.l.t (, %s10) 1021 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %0, i32 256) 1022 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 1023 ret void 1024} 1025 1026; Function Attrs: nounwind 1027define void @vldl2dsxnc_vssvl_imm(i8* %0, i8* %1) { 1028; CHECK-LABEL: vldl2dsxnc_vssvl_imm: 1029; CHECK: # %bb.0: 1030; CHECK-NEXT: lea %s2, 256 1031; CHECK-NEXT: lvl %s2 1032; CHECK-NEXT: vldl2d.sx.nc %v0, 8, %s1 1033; CHECK-NEXT: vldl2d.sx.nc %v0, 8, %s0 1034; CHECK-NEXT: #APP 1035; CHECK-NEXT: vst %v0, 8, %s0 1036; CHECK-NEXT: #NO_APP 1037; CHECK-NEXT: b.l.t (, %s10) 1038 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssl(i64 8, i8* %1, i32 256) 1039 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dsxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 1040 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 1041 ret void 1042} 1043 1044; Function Attrs: nounwind 1045define void @vldl2dzx_vssl(i8* %0, i64 %1) { 1046; CHECK-LABEL: vldl2dzx_vssl: 1047; CHECK: # %bb.0: 1048; CHECK-NEXT: lea %s2, 256 1049; CHECK-NEXT: lvl %s2 1050; CHECK-NEXT: vldl2d.zx %v0, %s1, %s0 1051; CHECK-NEXT: #APP 1052; CHECK-NEXT: vst %v0, %s1, %s0 1053; CHECK-NEXT: #NO_APP 1054; CHECK-NEXT: b.l.t (, %s10) 1055 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %0, i32 256) 1056 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 1057 ret void 1058} 1059 1060; Function Attrs: nounwind readonly 1061declare <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64, i8*, i32) 1062 1063; Function Attrs: nounwind 1064define void @vldl2dzx_vssvl(i8* %0, i64 %1, i8* %2) { 1065; CHECK-LABEL: vldl2dzx_vssvl: 1066; CHECK: # %bb.0: 1067; CHECK-NEXT: lea %s3, 256 1068; CHECK-NEXT: lvl %s3 1069; CHECK-NEXT: vldl2d.zx %v0, %s1, %s2 1070; CHECK-NEXT: vldl2d.zx %v0, %s1, %s0 1071; CHECK-NEXT: #APP 1072; CHECK-NEXT: vst %v0, %s1, %s0 1073; CHECK-NEXT: #NO_APP 1074; CHECK-NEXT: b.l.t (, %s10) 1075 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 %1, i8* %2, i32 256) 1076 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 1077 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 1078 ret void 1079} 1080 1081; Function Attrs: nounwind readonly 1082declare <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64, i8*, <256 x double>, i32) 1083 1084; Function Attrs: nounwind 1085define void @vldl2dzx_vssl_imm(i8* %0) { 1086; CHECK-LABEL: vldl2dzx_vssl_imm: 1087; CHECK: # %bb.0: 1088; CHECK-NEXT: lea %s1, 256 1089; CHECK-NEXT: lvl %s1 1090; CHECK-NEXT: vldl2d.zx %v0, 8, %s0 1091; CHECK-NEXT: #APP 1092; CHECK-NEXT: vst %v0, 8, %s0 1093; CHECK-NEXT: #NO_APP 1094; CHECK-NEXT: b.l.t (, %s10) 1095 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %0, i32 256) 1096 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 1097 ret void 1098} 1099 1100; Function Attrs: nounwind 1101define void @vldl2dzx_vssvl_imm(i8* %0, i8* %1) { 1102; CHECK-LABEL: vldl2dzx_vssvl_imm: 1103; CHECK: # %bb.0: 1104; CHECK-NEXT: lea %s2, 256 1105; CHECK-NEXT: lvl %s2 1106; CHECK-NEXT: vldl2d.zx %v0, 8, %s1 1107; CHECK-NEXT: vldl2d.zx %v0, 8, %s0 1108; CHECK-NEXT: #APP 1109; CHECK-NEXT: vst %v0, 8, %s0 1110; CHECK-NEXT: #NO_APP 1111; CHECK-NEXT: b.l.t (, %s10) 1112 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssl(i64 8, i8* %1, i32 256) 1113 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzx.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 1114 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 1115 ret void 1116} 1117 1118; Function Attrs: nounwind 1119define void @vldl2dzxnc_vssl(i8* %0, i64 %1) { 1120; CHECK-LABEL: vldl2dzxnc_vssl: 1121; CHECK: # %bb.0: 1122; CHECK-NEXT: lea %s2, 256 1123; CHECK-NEXT: lvl %s2 1124; CHECK-NEXT: vldl2d.zx.nc %v0, %s1, %s0 1125; CHECK-NEXT: #APP 1126; CHECK-NEXT: vst %v0, %s1, %s0 1127; CHECK-NEXT: #NO_APP 1128; CHECK-NEXT: b.l.t (, %s10) 1129 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %0, i32 256) 1130 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %3, i8* %0, i64 %1) 1131 ret void 1132} 1133 1134; Function Attrs: nounwind readonly 1135declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64, i8*, i32) 1136 1137; Function Attrs: nounwind 1138define void @vldl2dzxnc_vssvl(i8* %0, i64 %1, i8* %2) { 1139; CHECK-LABEL: vldl2dzxnc_vssvl: 1140; CHECK: # %bb.0: 1141; CHECK-NEXT: lea %s3, 256 1142; CHECK-NEXT: lvl %s3 1143; CHECK-NEXT: vldl2d.zx.nc %v0, %s1, %s2 1144; CHECK-NEXT: vldl2d.zx.nc %v0, %s1, %s0 1145; CHECK-NEXT: #APP 1146; CHECK-NEXT: vst %v0, %s1, %s0 1147; CHECK-NEXT: #NO_APP 1148; CHECK-NEXT: b.l.t (, %s10) 1149 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 %1, i8* %2, i32 256) 1150 %5 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %1, i8* %0, <256 x double> %4, i32 256) 1151 tail call void asm sideeffect "vst ${0:v}, $2, $1", "v,r,r"(<256 x double> %5, i8* %0, i64 %1) 1152 ret void 1153} 1154 1155; Function Attrs: nounwind readonly 1156declare <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64, i8*, <256 x double>, i32) 1157 1158; Function Attrs: nounwind 1159define void @vldl2dzxnc_vssl_imm(i8* %0) { 1160; CHECK-LABEL: vldl2dzxnc_vssl_imm: 1161; CHECK: # %bb.0: 1162; CHECK-NEXT: lea %s1, 256 1163; CHECK-NEXT: lvl %s1 1164; CHECK-NEXT: vldl2d.zx.nc %v0, 8, %s0 1165; CHECK-NEXT: #APP 1166; CHECK-NEXT: vst %v0, 8, %s0 1167; CHECK-NEXT: #NO_APP 1168; CHECK-NEXT: b.l.t (, %s10) 1169 %2 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %0, i32 256) 1170 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %2, i8* %0) 1171 ret void 1172} 1173 1174; Function Attrs: nounwind 1175define void @vldl2dzxnc_vssvl_imm(i8* %0, i8* %1) { 1176; CHECK-LABEL: vldl2dzxnc_vssvl_imm: 1177; CHECK: # %bb.0: 1178; CHECK-NEXT: lea %s2, 256 1179; CHECK-NEXT: lvl %s2 1180; CHECK-NEXT: vldl2d.zx.nc %v0, 8, %s1 1181; CHECK-NEXT: vldl2d.zx.nc %v0, 8, %s0 1182; CHECK-NEXT: #APP 1183; CHECK-NEXT: vst %v0, 8, %s0 1184; CHECK-NEXT: #NO_APP 1185; CHECK-NEXT: b.l.t (, %s10) 1186 %3 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssl(i64 8, i8* %1, i32 256) 1187 %4 = tail call fast <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 8, i8* %0, <256 x double> %3, i32 256) 1188 tail call void asm sideeffect "vst ${0:v}, 8, $1", "v,r"(<256 x double> %4, i8* %0) 1189 ret void 1190} 1191