1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s 3 4define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) { 5; CHECK-LABEL: vmul_vv_nxv1i8: 6; CHECK: # %bb.0: 7; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu 8; CHECK-NEXT: vmul.vv v8, v8, v9 9; CHECK-NEXT: ret 10 %vc = mul <vscale x 1 x i8> %va, %vb 11 ret <vscale x 1 x i8> %vc 12} 13 14define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) { 15; CHECK-LABEL: vmul_vx_nxv1i8: 16; CHECK: # %bb.0: 17; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu 18; CHECK-NEXT: vmul.vx v8, v8, a0 19; CHECK-NEXT: ret 20 %head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0 21 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer 22 %vc = mul <vscale x 1 x i8> %va, %splat 23 ret <vscale x 1 x i8> %vc 24} 25 26define <vscale x 1 x i8> @vmul_vi_nxv1i8_0(<vscale x 1 x i8> %va) { 27; CHECK-LABEL: vmul_vi_nxv1i8_0: 28; CHECK: # %bb.0: 29; CHECK-NEXT: addi a0, zero, -7 30; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu 31; CHECK-NEXT: vmul.vx v8, v8, a0 32; CHECK-NEXT: ret 33 %head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0 34 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer 35 %vc = mul <vscale x 1 x i8> %va, %splat 36 ret <vscale x 1 x i8> %vc 37} 38 39define <vscale x 2 x i8> @vmul_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) { 40; CHECK-LABEL: vmul_vv_nxv2i8: 41; CHECK: # %bb.0: 42; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu 43; CHECK-NEXT: vmul.vv v8, v8, v9 44; CHECK-NEXT: ret 45 %vc = mul <vscale x 2 x i8> %va, %vb 46 ret <vscale x 2 x i8> %vc 47} 48 49define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) { 50; CHECK-LABEL: vmul_vx_nxv2i8: 51; CHECK: # %bb.0: 52; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu 53; CHECK-NEXT: vmul.vx v8, v8, a0 54; CHECK-NEXT: ret 55 %head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0 56 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer 57 %vc = mul <vscale x 2 x i8> %va, %splat 58 ret <vscale x 2 x i8> %vc 59} 60 61define <vscale x 2 x i8> @vmul_vi_nxv2i8_0(<vscale x 2 x i8> %va) { 62; CHECK-LABEL: vmul_vi_nxv2i8_0: 63; CHECK: # %bb.0: 64; CHECK-NEXT: addi a0, zero, -7 65; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu 66; CHECK-NEXT: vmul.vx v8, v8, a0 67; CHECK-NEXT: ret 68 %head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0 69 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer 70 %vc = mul <vscale x 2 x i8> %va, %splat 71 ret <vscale x 2 x i8> %vc 72} 73 74define <vscale x 4 x i8> @vmul_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) { 75; CHECK-LABEL: vmul_vv_nxv4i8: 76; CHECK: # %bb.0: 77; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu 78; CHECK-NEXT: vmul.vv v8, v8, v9 79; CHECK-NEXT: ret 80 %vc = mul <vscale x 4 x i8> %va, %vb 81 ret <vscale x 4 x i8> %vc 82} 83 84define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) { 85; CHECK-LABEL: vmul_vx_nxv4i8: 86; CHECK: # %bb.0: 87; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu 88; CHECK-NEXT: vmul.vx v8, v8, a0 89; CHECK-NEXT: ret 90 %head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0 91 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer 92 %vc = mul <vscale x 4 x i8> %va, %splat 93 ret <vscale x 4 x i8> %vc 94} 95 96define <vscale x 4 x i8> @vmul_vi_nxv4i8_0(<vscale x 4 x i8> %va) { 97; CHECK-LABEL: vmul_vi_nxv4i8_0: 98; CHECK: # %bb.0: 99; CHECK-NEXT: addi a0, zero, -7 100; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu 101; CHECK-NEXT: vmul.vx v8, v8, a0 102; CHECK-NEXT: ret 103 %head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0 104 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer 105 %vc = mul <vscale x 4 x i8> %va, %splat 106 ret <vscale x 4 x i8> %vc 107} 108 109define <vscale x 8 x i8> @vmul_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) { 110; CHECK-LABEL: vmul_vv_nxv8i8: 111; CHECK: # %bb.0: 112; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu 113; CHECK-NEXT: vmul.vv v8, v8, v9 114; CHECK-NEXT: ret 115 %vc = mul <vscale x 8 x i8> %va, %vb 116 ret <vscale x 8 x i8> %vc 117} 118 119define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) { 120; CHECK-LABEL: vmul_vx_nxv8i8: 121; CHECK: # %bb.0: 122; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu 123; CHECK-NEXT: vmul.vx v8, v8, a0 124; CHECK-NEXT: ret 125 %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0 126 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer 127 %vc = mul <vscale x 8 x i8> %va, %splat 128 ret <vscale x 8 x i8> %vc 129} 130 131define <vscale x 8 x i8> @vmul_vi_nxv8i8_0(<vscale x 8 x i8> %va) { 132; CHECK-LABEL: vmul_vi_nxv8i8_0: 133; CHECK: # %bb.0: 134; CHECK-NEXT: addi a0, zero, -7 135; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu 136; CHECK-NEXT: vmul.vx v8, v8, a0 137; CHECK-NEXT: ret 138 %head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0 139 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer 140 %vc = mul <vscale x 8 x i8> %va, %splat 141 ret <vscale x 8 x i8> %vc 142} 143 144define <vscale x 16 x i8> @vmul_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) { 145; CHECK-LABEL: vmul_vv_nxv16i8: 146; CHECK: # %bb.0: 147; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu 148; CHECK-NEXT: vmul.vv v8, v8, v10 149; CHECK-NEXT: ret 150 %vc = mul <vscale x 16 x i8> %va, %vb 151 ret <vscale x 16 x i8> %vc 152} 153 154define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) { 155; CHECK-LABEL: vmul_vx_nxv16i8: 156; CHECK: # %bb.0: 157; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu 158; CHECK-NEXT: vmul.vx v8, v8, a0 159; CHECK-NEXT: ret 160 %head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0 161 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer 162 %vc = mul <vscale x 16 x i8> %va, %splat 163 ret <vscale x 16 x i8> %vc 164} 165 166define <vscale x 16 x i8> @vmul_vi_nxv16i8_0(<vscale x 16 x i8> %va) { 167; CHECK-LABEL: vmul_vi_nxv16i8_0: 168; CHECK: # %bb.0: 169; CHECK-NEXT: addi a0, zero, -7 170; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu 171; CHECK-NEXT: vmul.vx v8, v8, a0 172; CHECK-NEXT: ret 173 %head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0 174 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer 175 %vc = mul <vscale x 16 x i8> %va, %splat 176 ret <vscale x 16 x i8> %vc 177} 178 179define <vscale x 32 x i8> @vmul_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) { 180; CHECK-LABEL: vmul_vv_nxv32i8: 181; CHECK: # %bb.0: 182; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu 183; CHECK-NEXT: vmul.vv v8, v8, v12 184; CHECK-NEXT: ret 185 %vc = mul <vscale x 32 x i8> %va, %vb 186 ret <vscale x 32 x i8> %vc 187} 188 189define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) { 190; CHECK-LABEL: vmul_vx_nxv32i8: 191; CHECK: # %bb.0: 192; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu 193; CHECK-NEXT: vmul.vx v8, v8, a0 194; CHECK-NEXT: ret 195 %head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0 196 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer 197 %vc = mul <vscale x 32 x i8> %va, %splat 198 ret <vscale x 32 x i8> %vc 199} 200 201define <vscale x 32 x i8> @vmul_vi_nxv32i8_0(<vscale x 32 x i8> %va) { 202; CHECK-LABEL: vmul_vi_nxv32i8_0: 203; CHECK: # %bb.0: 204; CHECK-NEXT: addi a0, zero, -7 205; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu 206; CHECK-NEXT: vmul.vx v8, v8, a0 207; CHECK-NEXT: ret 208 %head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0 209 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer 210 %vc = mul <vscale x 32 x i8> %va, %splat 211 ret <vscale x 32 x i8> %vc 212} 213 214define <vscale x 64 x i8> @vmul_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) { 215; CHECK-LABEL: vmul_vv_nxv64i8: 216; CHECK: # %bb.0: 217; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu 218; CHECK-NEXT: vmul.vv v8, v8, v16 219; CHECK-NEXT: ret 220 %vc = mul <vscale x 64 x i8> %va, %vb 221 ret <vscale x 64 x i8> %vc 222} 223 224define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) { 225; CHECK-LABEL: vmul_vx_nxv64i8: 226; CHECK: # %bb.0: 227; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu 228; CHECK-NEXT: vmul.vx v8, v8, a0 229; CHECK-NEXT: ret 230 %head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0 231 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer 232 %vc = mul <vscale x 64 x i8> %va, %splat 233 ret <vscale x 64 x i8> %vc 234} 235 236define <vscale x 64 x i8> @vmul_vi_nxv64i8_0(<vscale x 64 x i8> %va) { 237; CHECK-LABEL: vmul_vi_nxv64i8_0: 238; CHECK: # %bb.0: 239; CHECK-NEXT: addi a0, zero, -7 240; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu 241; CHECK-NEXT: vmul.vx v8, v8, a0 242; CHECK-NEXT: ret 243 %head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0 244 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer 245 %vc = mul <vscale x 64 x i8> %va, %splat 246 ret <vscale x 64 x i8> %vc 247} 248 249define <vscale x 1 x i16> @vmul_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) { 250; CHECK-LABEL: vmul_vv_nxv1i16: 251; CHECK: # %bb.0: 252; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu 253; CHECK-NEXT: vmul.vv v8, v8, v9 254; CHECK-NEXT: ret 255 %vc = mul <vscale x 1 x i16> %va, %vb 256 ret <vscale x 1 x i16> %vc 257} 258 259define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) { 260; CHECK-LABEL: vmul_vx_nxv1i16: 261; CHECK: # %bb.0: 262; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu 263; CHECK-NEXT: vmul.vx v8, v8, a0 264; CHECK-NEXT: ret 265 %head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0 266 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer 267 %vc = mul <vscale x 1 x i16> %va, %splat 268 ret <vscale x 1 x i16> %vc 269} 270 271define <vscale x 1 x i16> @vmul_vi_nxv1i16_0(<vscale x 1 x i16> %va) { 272; CHECK-LABEL: vmul_vi_nxv1i16_0: 273; CHECK: # %bb.0: 274; CHECK-NEXT: addi a0, zero, -7 275; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu 276; CHECK-NEXT: vmul.vx v8, v8, a0 277; CHECK-NEXT: ret 278 %head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0 279 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer 280 %vc = mul <vscale x 1 x i16> %va, %splat 281 ret <vscale x 1 x i16> %vc 282} 283 284define <vscale x 2 x i16> @vmul_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) { 285; CHECK-LABEL: vmul_vv_nxv2i16: 286; CHECK: # %bb.0: 287; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu 288; CHECK-NEXT: vmul.vv v8, v8, v9 289; CHECK-NEXT: ret 290 %vc = mul <vscale x 2 x i16> %va, %vb 291 ret <vscale x 2 x i16> %vc 292} 293 294define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) { 295; CHECK-LABEL: vmul_vx_nxv2i16: 296; CHECK: # %bb.0: 297; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu 298; CHECK-NEXT: vmul.vx v8, v8, a0 299; CHECK-NEXT: ret 300 %head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0 301 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer 302 %vc = mul <vscale x 2 x i16> %va, %splat 303 ret <vscale x 2 x i16> %vc 304} 305 306define <vscale x 2 x i16> @vmul_vi_nxv2i16_0(<vscale x 2 x i16> %va) { 307; CHECK-LABEL: vmul_vi_nxv2i16_0: 308; CHECK: # %bb.0: 309; CHECK-NEXT: addi a0, zero, -7 310; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu 311; CHECK-NEXT: vmul.vx v8, v8, a0 312; CHECK-NEXT: ret 313 %head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0 314 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer 315 %vc = mul <vscale x 2 x i16> %va, %splat 316 ret <vscale x 2 x i16> %vc 317} 318 319define <vscale x 4 x i16> @vmul_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) { 320; CHECK-LABEL: vmul_vv_nxv4i16: 321; CHECK: # %bb.0: 322; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu 323; CHECK-NEXT: vmul.vv v8, v8, v9 324; CHECK-NEXT: ret 325 %vc = mul <vscale x 4 x i16> %va, %vb 326 ret <vscale x 4 x i16> %vc 327} 328 329define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) { 330; CHECK-LABEL: vmul_vx_nxv4i16: 331; CHECK: # %bb.0: 332; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu 333; CHECK-NEXT: vmul.vx v8, v8, a0 334; CHECK-NEXT: ret 335 %head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0 336 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer 337 %vc = mul <vscale x 4 x i16> %va, %splat 338 ret <vscale x 4 x i16> %vc 339} 340 341define <vscale x 4 x i16> @vmul_vi_nxv4i16_0(<vscale x 4 x i16> %va) { 342; CHECK-LABEL: vmul_vi_nxv4i16_0: 343; CHECK: # %bb.0: 344; CHECK-NEXT: addi a0, zero, -7 345; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu 346; CHECK-NEXT: vmul.vx v8, v8, a0 347; CHECK-NEXT: ret 348 %head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0 349 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer 350 %vc = mul <vscale x 4 x i16> %va, %splat 351 ret <vscale x 4 x i16> %vc 352} 353 354define <vscale x 8 x i16> @vmul_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) { 355; CHECK-LABEL: vmul_vv_nxv8i16: 356; CHECK: # %bb.0: 357; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu 358; CHECK-NEXT: vmul.vv v8, v8, v10 359; CHECK-NEXT: ret 360 %vc = mul <vscale x 8 x i16> %va, %vb 361 ret <vscale x 8 x i16> %vc 362} 363 364define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) { 365; CHECK-LABEL: vmul_vx_nxv8i16: 366; CHECK: # %bb.0: 367; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu 368; CHECK-NEXT: vmul.vx v8, v8, a0 369; CHECK-NEXT: ret 370 %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0 371 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer 372 %vc = mul <vscale x 8 x i16> %va, %splat 373 ret <vscale x 8 x i16> %vc 374} 375 376define <vscale x 8 x i16> @vmul_vi_nxv8i16_0(<vscale x 8 x i16> %va) { 377; CHECK-LABEL: vmul_vi_nxv8i16_0: 378; CHECK: # %bb.0: 379; CHECK-NEXT: addi a0, zero, -7 380; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu 381; CHECK-NEXT: vmul.vx v8, v8, a0 382; CHECK-NEXT: ret 383 %head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0 384 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer 385 %vc = mul <vscale x 8 x i16> %va, %splat 386 ret <vscale x 8 x i16> %vc 387} 388 389define <vscale x 16 x i16> @vmul_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) { 390; CHECK-LABEL: vmul_vv_nxv16i16: 391; CHECK: # %bb.0: 392; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu 393; CHECK-NEXT: vmul.vv v8, v8, v12 394; CHECK-NEXT: ret 395 %vc = mul <vscale x 16 x i16> %va, %vb 396 ret <vscale x 16 x i16> %vc 397} 398 399define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) { 400; CHECK-LABEL: vmul_vx_nxv16i16: 401; CHECK: # %bb.0: 402; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu 403; CHECK-NEXT: vmul.vx v8, v8, a0 404; CHECK-NEXT: ret 405 %head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0 406 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer 407 %vc = mul <vscale x 16 x i16> %va, %splat 408 ret <vscale x 16 x i16> %vc 409} 410 411define <vscale x 16 x i16> @vmul_vi_nxv16i16_0(<vscale x 16 x i16> %va) { 412; CHECK-LABEL: vmul_vi_nxv16i16_0: 413; CHECK: # %bb.0: 414; CHECK-NEXT: addi a0, zero, -7 415; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu 416; CHECK-NEXT: vmul.vx v8, v8, a0 417; CHECK-NEXT: ret 418 %head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0 419 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer 420 %vc = mul <vscale x 16 x i16> %va, %splat 421 ret <vscale x 16 x i16> %vc 422} 423 424define <vscale x 32 x i16> @vmul_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) { 425; CHECK-LABEL: vmul_vv_nxv32i16: 426; CHECK: # %bb.0: 427; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu 428; CHECK-NEXT: vmul.vv v8, v8, v16 429; CHECK-NEXT: ret 430 %vc = mul <vscale x 32 x i16> %va, %vb 431 ret <vscale x 32 x i16> %vc 432} 433 434define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) { 435; CHECK-LABEL: vmul_vx_nxv32i16: 436; CHECK: # %bb.0: 437; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu 438; CHECK-NEXT: vmul.vx v8, v8, a0 439; CHECK-NEXT: ret 440 %head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0 441 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer 442 %vc = mul <vscale x 32 x i16> %va, %splat 443 ret <vscale x 32 x i16> %vc 444} 445 446define <vscale x 32 x i16> @vmul_vi_nxv32i16_0(<vscale x 32 x i16> %va) { 447; CHECK-LABEL: vmul_vi_nxv32i16_0: 448; CHECK: # %bb.0: 449; CHECK-NEXT: addi a0, zero, -7 450; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu 451; CHECK-NEXT: vmul.vx v8, v8, a0 452; CHECK-NEXT: ret 453 %head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0 454 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer 455 %vc = mul <vscale x 32 x i16> %va, %splat 456 ret <vscale x 32 x i16> %vc 457} 458 459define <vscale x 1 x i32> @vmul_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) { 460; CHECK-LABEL: vmul_vv_nxv1i32: 461; CHECK: # %bb.0: 462; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu 463; CHECK-NEXT: vmul.vv v8, v8, v9 464; CHECK-NEXT: ret 465 %vc = mul <vscale x 1 x i32> %va, %vb 466 ret <vscale x 1 x i32> %vc 467} 468 469define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b) { 470; CHECK-LABEL: vmul_vx_nxv1i32: 471; CHECK: # %bb.0: 472; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu 473; CHECK-NEXT: vmul.vx v8, v8, a0 474; CHECK-NEXT: ret 475 %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0 476 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer 477 %vc = mul <vscale x 1 x i32> %va, %splat 478 ret <vscale x 1 x i32> %vc 479} 480 481define <vscale x 1 x i32> @vmul_vi_nxv1i32_0(<vscale x 1 x i32> %va) { 482; CHECK-LABEL: vmul_vi_nxv1i32_0: 483; CHECK: # %bb.0: 484; CHECK-NEXT: addi a0, zero, -7 485; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu 486; CHECK-NEXT: vmul.vx v8, v8, a0 487; CHECK-NEXT: ret 488 %head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0 489 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer 490 %vc = mul <vscale x 1 x i32> %va, %splat 491 ret <vscale x 1 x i32> %vc 492} 493 494define <vscale x 2 x i32> @vmul_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) { 495; CHECK-LABEL: vmul_vv_nxv2i32: 496; CHECK: # %bb.0: 497; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu 498; CHECK-NEXT: vmul.vv v8, v8, v9 499; CHECK-NEXT: ret 500 %vc = mul <vscale x 2 x i32> %va, %vb 501 ret <vscale x 2 x i32> %vc 502} 503 504define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b) { 505; CHECK-LABEL: vmul_vx_nxv2i32: 506; CHECK: # %bb.0: 507; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu 508; CHECK-NEXT: vmul.vx v8, v8, a0 509; CHECK-NEXT: ret 510 %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0 511 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer 512 %vc = mul <vscale x 2 x i32> %va, %splat 513 ret <vscale x 2 x i32> %vc 514} 515 516define <vscale x 2 x i32> @vmul_vi_nxv2i32_0(<vscale x 2 x i32> %va) { 517; CHECK-LABEL: vmul_vi_nxv2i32_0: 518; CHECK: # %bb.0: 519; CHECK-NEXT: addi a0, zero, -7 520; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu 521; CHECK-NEXT: vmul.vx v8, v8, a0 522; CHECK-NEXT: ret 523 %head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0 524 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer 525 %vc = mul <vscale x 2 x i32> %va, %splat 526 ret <vscale x 2 x i32> %vc 527} 528 529define <vscale x 4 x i32> @vmul_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) { 530; CHECK-LABEL: vmul_vv_nxv4i32: 531; CHECK: # %bb.0: 532; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu 533; CHECK-NEXT: vmul.vv v8, v8, v10 534; CHECK-NEXT: ret 535 %vc = mul <vscale x 4 x i32> %va, %vb 536 ret <vscale x 4 x i32> %vc 537} 538 539define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b) { 540; CHECK-LABEL: vmul_vx_nxv4i32: 541; CHECK: # %bb.0: 542; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu 543; CHECK-NEXT: vmul.vx v8, v8, a0 544; CHECK-NEXT: ret 545 %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0 546 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer 547 %vc = mul <vscale x 4 x i32> %va, %splat 548 ret <vscale x 4 x i32> %vc 549} 550 551define <vscale x 4 x i32> @vmul_vi_nxv4i32_0(<vscale x 4 x i32> %va) { 552; CHECK-LABEL: vmul_vi_nxv4i32_0: 553; CHECK: # %bb.0: 554; CHECK-NEXT: addi a0, zero, -7 555; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu 556; CHECK-NEXT: vmul.vx v8, v8, a0 557; CHECK-NEXT: ret 558 %head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0 559 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer 560 %vc = mul <vscale x 4 x i32> %va, %splat 561 ret <vscale x 4 x i32> %vc 562} 563 564define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) { 565; CHECK-LABEL: vmul_vv_nxv8i32: 566; CHECK: # %bb.0: 567; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu 568; CHECK-NEXT: vmul.vv v8, v8, v12 569; CHECK-NEXT: ret 570 %vc = mul <vscale x 8 x i32> %va, %vb 571 ret <vscale x 8 x i32> %vc 572} 573 574define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) { 575; CHECK-LABEL: vmul_vx_nxv8i32: 576; CHECK: # %bb.0: 577; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu 578; CHECK-NEXT: vmul.vx v8, v8, a0 579; CHECK-NEXT: ret 580 %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0 581 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer 582 %vc = mul <vscale x 8 x i32> %va, %splat 583 ret <vscale x 8 x i32> %vc 584} 585 586define <vscale x 8 x i32> @vmul_vi_nxv8i32_0(<vscale x 8 x i32> %va) { 587; CHECK-LABEL: vmul_vi_nxv8i32_0: 588; CHECK: # %bb.0: 589; CHECK-NEXT: addi a0, zero, -7 590; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu 591; CHECK-NEXT: vmul.vx v8, v8, a0 592; CHECK-NEXT: ret 593 %head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0 594 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer 595 %vc = mul <vscale x 8 x i32> %va, %splat 596 ret <vscale x 8 x i32> %vc 597} 598 599define <vscale x 16 x i32> @vmul_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) { 600; CHECK-LABEL: vmul_vv_nxv16i32: 601; CHECK: # %bb.0: 602; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu 603; CHECK-NEXT: vmul.vv v8, v8, v16 604; CHECK-NEXT: ret 605 %vc = mul <vscale x 16 x i32> %va, %vb 606 ret <vscale x 16 x i32> %vc 607} 608 609define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b) { 610; CHECK-LABEL: vmul_vx_nxv16i32: 611; CHECK: # %bb.0: 612; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu 613; CHECK-NEXT: vmul.vx v8, v8, a0 614; CHECK-NEXT: ret 615 %head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0 616 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer 617 %vc = mul <vscale x 16 x i32> %va, %splat 618 ret <vscale x 16 x i32> %vc 619} 620 621define <vscale x 16 x i32> @vmul_vi_nxv16i32_0(<vscale x 16 x i32> %va) { 622; CHECK-LABEL: vmul_vi_nxv16i32_0: 623; CHECK: # %bb.0: 624; CHECK-NEXT: addi a0, zero, -7 625; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu 626; CHECK-NEXT: vmul.vx v8, v8, a0 627; CHECK-NEXT: ret 628 %head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0 629 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer 630 %vc = mul <vscale x 16 x i32> %va, %splat 631 ret <vscale x 16 x i32> %vc 632} 633 634define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) { 635; CHECK-LABEL: vmul_vv_nxv1i64: 636; CHECK: # %bb.0: 637; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu 638; CHECK-NEXT: vmul.vv v8, v8, v9 639; CHECK-NEXT: ret 640 %vc = mul <vscale x 1 x i64> %va, %vb 641 ret <vscale x 1 x i64> %vc 642} 643 644define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) { 645; CHECK-LABEL: vmul_vx_nxv1i64: 646; CHECK: # %bb.0: 647; CHECK-NEXT: addi sp, sp, -16 648; CHECK-NEXT: .cfi_def_cfa_offset 16 649; CHECK-NEXT: sw a1, 12(sp) 650; CHECK-NEXT: sw a0, 8(sp) 651; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu 652; CHECK-NEXT: addi a0, sp, 8 653; CHECK-NEXT: vlse64.v v25, (a0), zero 654; CHECK-NEXT: vmul.vv v8, v8, v25 655; CHECK-NEXT: addi sp, sp, 16 656; CHECK-NEXT: ret 657 %head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0 658 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer 659 %vc = mul <vscale x 1 x i64> %va, %splat 660 ret <vscale x 1 x i64> %vc 661} 662 663define <vscale x 1 x i64> @vmul_vi_nxv1i64_0(<vscale x 1 x i64> %va) { 664; CHECK-LABEL: vmul_vi_nxv1i64_0: 665; CHECK: # %bb.0: 666; CHECK-NEXT: addi a0, zero, -7 667; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu 668; CHECK-NEXT: vmul.vx v8, v8, a0 669; CHECK-NEXT: ret 670 %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0 671 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer 672 %vc = mul <vscale x 1 x i64> %va, %splat 673 ret <vscale x 1 x i64> %vc 674} 675 676define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) { 677; CHECK-LABEL: vmul_vv_nxv2i64: 678; CHECK: # %bb.0: 679; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu 680; CHECK-NEXT: vmul.vv v8, v8, v10 681; CHECK-NEXT: ret 682 %vc = mul <vscale x 2 x i64> %va, %vb 683 ret <vscale x 2 x i64> %vc 684} 685 686define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) { 687; CHECK-LABEL: vmul_vx_nxv2i64: 688; CHECK: # %bb.0: 689; CHECK-NEXT: addi sp, sp, -16 690; CHECK-NEXT: .cfi_def_cfa_offset 16 691; CHECK-NEXT: sw a1, 12(sp) 692; CHECK-NEXT: sw a0, 8(sp) 693; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu 694; CHECK-NEXT: addi a0, sp, 8 695; CHECK-NEXT: vlse64.v v26, (a0), zero 696; CHECK-NEXT: vmul.vv v8, v8, v26 697; CHECK-NEXT: addi sp, sp, 16 698; CHECK-NEXT: ret 699 %head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0 700 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer 701 %vc = mul <vscale x 2 x i64> %va, %splat 702 ret <vscale x 2 x i64> %vc 703} 704 705define <vscale x 2 x i64> @vmul_vi_nxv2i64_0(<vscale x 2 x i64> %va) { 706; CHECK-LABEL: vmul_vi_nxv2i64_0: 707; CHECK: # %bb.0: 708; CHECK-NEXT: addi a0, zero, -7 709; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu 710; CHECK-NEXT: vmul.vx v8, v8, a0 711; CHECK-NEXT: ret 712 %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0 713 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer 714 %vc = mul <vscale x 2 x i64> %va, %splat 715 ret <vscale x 2 x i64> %vc 716} 717 718define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) { 719; CHECK-LABEL: vmul_vv_nxv4i64: 720; CHECK: # %bb.0: 721; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu 722; CHECK-NEXT: vmul.vv v8, v8, v12 723; CHECK-NEXT: ret 724 %vc = mul <vscale x 4 x i64> %va, %vb 725 ret <vscale x 4 x i64> %vc 726} 727 728define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) { 729; CHECK-LABEL: vmul_vx_nxv4i64: 730; CHECK: # %bb.0: 731; CHECK-NEXT: addi sp, sp, -16 732; CHECK-NEXT: .cfi_def_cfa_offset 16 733; CHECK-NEXT: sw a1, 12(sp) 734; CHECK-NEXT: sw a0, 8(sp) 735; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu 736; CHECK-NEXT: addi a0, sp, 8 737; CHECK-NEXT: vlse64.v v28, (a0), zero 738; CHECK-NEXT: vmul.vv v8, v8, v28 739; CHECK-NEXT: addi sp, sp, 16 740; CHECK-NEXT: ret 741 %head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0 742 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer 743 %vc = mul <vscale x 4 x i64> %va, %splat 744 ret <vscale x 4 x i64> %vc 745} 746 747define <vscale x 4 x i64> @vmul_vi_nxv4i64_0(<vscale x 4 x i64> %va) { 748; CHECK-LABEL: vmul_vi_nxv4i64_0: 749; CHECK: # %bb.0: 750; CHECK-NEXT: addi a0, zero, -7 751; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu 752; CHECK-NEXT: vmul.vx v8, v8, a0 753; CHECK-NEXT: ret 754 %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0 755 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer 756 %vc = mul <vscale x 4 x i64> %va, %splat 757 ret <vscale x 4 x i64> %vc 758} 759 760define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) { 761; CHECK-LABEL: vmul_vv_nxv8i64: 762; CHECK: # %bb.0: 763; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu 764; CHECK-NEXT: vmul.vv v8, v8, v16 765; CHECK-NEXT: ret 766 %vc = mul <vscale x 8 x i64> %va, %vb 767 ret <vscale x 8 x i64> %vc 768} 769 770define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) { 771; CHECK-LABEL: vmul_vx_nxv8i64: 772; CHECK: # %bb.0: 773; CHECK-NEXT: addi sp, sp, -16 774; CHECK-NEXT: .cfi_def_cfa_offset 16 775; CHECK-NEXT: sw a1, 12(sp) 776; CHECK-NEXT: sw a0, 8(sp) 777; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu 778; CHECK-NEXT: addi a0, sp, 8 779; CHECK-NEXT: vlse64.v v16, (a0), zero 780; CHECK-NEXT: vmul.vv v8, v8, v16 781; CHECK-NEXT: addi sp, sp, 16 782; CHECK-NEXT: ret 783 %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0 784 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 785 %vc = mul <vscale x 8 x i64> %va, %splat 786 ret <vscale x 8 x i64> %vc 787} 788 789define <vscale x 8 x i64> @vmul_vi_nxv8i64_0(<vscale x 8 x i64> %va) { 790; CHECK-LABEL: vmul_vi_nxv8i64_0: 791; CHECK: # %bb.0: 792; CHECK-NEXT: addi a0, zero, -7 793; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu 794; CHECK-NEXT: vmul.vx v8, v8, a0 795; CHECK-NEXT: ret 796 %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0 797 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer 798 %vc = mul <vscale x 8 x i64> %va, %splat 799 ret <vscale x 8 x i64> %vc 800} 801 802