1; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s 2 3define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) { 4;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 5 %tmp3 = icmp eq <8 x i8> %A, %B; 6 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 7 ret <8 x i8> %tmp4 8} 9 10define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) { 11;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 12 %tmp3 = icmp eq <16 x i8> %A, %B; 13 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 14 ret <16 x i8> %tmp4 15} 16 17define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) { 18;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 19 %tmp3 = icmp eq <4 x i16> %A, %B; 20 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 21 ret <4 x i16> %tmp4 22} 23 24define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) { 25;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 26 %tmp3 = icmp eq <8 x i16> %A, %B; 27 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 28 ret <8 x i16> %tmp4 29} 30 31define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) { 32;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 33 %tmp3 = icmp eq <2 x i32> %A, %B; 34 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 35 ret <2 x i32> %tmp4 36} 37 38define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) { 39;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 40 %tmp3 = icmp eq <4 x i32> %A, %B; 41 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 42 ret <4 x i32> %tmp4 43} 44 45define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) { 46;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 47 %tmp3 = icmp eq <2 x i64> %A, %B; 48 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 49 ret <2 x i64> %tmp4 50} 51 52define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) { 53;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 54;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 55 %tmp3 = icmp ne <8 x i8> %A, %B; 56 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 57 ret <8 x i8> %tmp4 58} 59 60define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) { 61;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 62;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 63 %tmp3 = icmp ne <16 x i8> %A, %B; 64 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 65 ret <16 x i8> %tmp4 66} 67 68define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) { 69;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 70;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 71 %tmp3 = icmp ne <4 x i16> %A, %B; 72 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 73 ret <4 x i16> %tmp4 74} 75 76define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) { 77;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 78;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 79 %tmp3 = icmp ne <8 x i16> %A, %B; 80 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 81 ret <8 x i16> %tmp4 82} 83 84define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) { 85;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 86;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 87 %tmp3 = icmp ne <2 x i32> %A, %B; 88 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 89 ret <2 x i32> %tmp4 90} 91 92define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) { 93;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 94;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 95 %tmp3 = icmp ne <4 x i32> %A, %B; 96 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 97 ret <4 x i32> %tmp4 98} 99 100define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) { 101;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 102;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 103 %tmp3 = icmp ne <2 x i64> %A, %B; 104 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 105 ret <2 x i64> %tmp4 106} 107 108define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) { 109;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 110 %tmp3 = icmp sgt <8 x i8> %A, %B; 111 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 112 ret <8 x i8> %tmp4 113} 114 115define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) { 116;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 117 %tmp3 = icmp sgt <16 x i8> %A, %B; 118 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 119 ret <16 x i8> %tmp4 120} 121 122define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) { 123;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 124 %tmp3 = icmp sgt <4 x i16> %A, %B; 125 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 126 ret <4 x i16> %tmp4 127} 128 129define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) { 130;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 131 %tmp3 = icmp sgt <8 x i16> %A, %B; 132 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 133 ret <8 x i16> %tmp4 134} 135 136define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) { 137;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 138 %tmp3 = icmp sgt <2 x i32> %A, %B; 139 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 140 ret <2 x i32> %tmp4 141} 142 143define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) { 144;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 145 %tmp3 = icmp sgt <4 x i32> %A, %B; 146 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 147 ret <4 x i32> %tmp4 148} 149 150define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) { 151;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 152 %tmp3 = icmp sgt <2 x i64> %A, %B; 153 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 154 ret <2 x i64> %tmp4 155} 156 157define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) { 158; Using registers other than v0, v1 are possible, but would be odd. 159; LT implemented as GT, so check reversed operands. 160;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b 161 %tmp3 = icmp slt <8 x i8> %A, %B; 162 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 163 ret <8 x i8> %tmp4 164} 165 166define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) { 167; Using registers other than v0, v1 are possible, but would be odd. 168; LT implemented as GT, so check reversed operands. 169;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b 170 %tmp3 = icmp slt <16 x i8> %A, %B; 171 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 172 ret <16 x i8> %tmp4 173} 174 175define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) { 176; Using registers other than v0, v1 are possible, but would be odd. 177; LT implemented as GT, so check reversed operands. 178;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h 179 %tmp3 = icmp slt <4 x i16> %A, %B; 180 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 181 ret <4 x i16> %tmp4 182} 183 184define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) { 185; Using registers other than v0, v1 are possible, but would be odd. 186; LT implemented as GT, so check reversed operands. 187;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h 188 %tmp3 = icmp slt <8 x i16> %A, %B; 189 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 190 ret <8 x i16> %tmp4 191} 192 193define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) { 194; Using registers other than v0, v1 are possible, but would be odd. 195; LT implemented as GT, so check reversed operands. 196;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s 197 %tmp3 = icmp slt <2 x i32> %A, %B; 198 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 199 ret <2 x i32> %tmp4 200} 201 202define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) { 203; Using registers other than v0, v1 are possible, but would be odd. 204; LT implemented as GT, so check reversed operands. 205;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s 206 %tmp3 = icmp slt <4 x i32> %A, %B; 207 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 208 ret <4 x i32> %tmp4 209} 210 211define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) { 212; Using registers other than v0, v1 are possible, but would be odd. 213; LT implemented as GT, so check reversed operands. 214;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d 215 %tmp3 = icmp slt <2 x i64> %A, %B; 216 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 217 ret <2 x i64> %tmp4 218} 219 220define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) { 221;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 222 %tmp3 = icmp sge <8 x i8> %A, %B; 223 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 224 ret <8 x i8> %tmp4 225} 226 227define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) { 228;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 229 %tmp3 = icmp sge <16 x i8> %A, %B; 230 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 231 ret <16 x i8> %tmp4 232} 233 234define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) { 235;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 236 %tmp3 = icmp sge <4 x i16> %A, %B; 237 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 238 ret <4 x i16> %tmp4 239} 240 241define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) { 242;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 243 %tmp3 = icmp sge <8 x i16> %A, %B; 244 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 245 ret <8 x i16> %tmp4 246} 247 248define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) { 249;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 250 %tmp3 = icmp sge <2 x i32> %A, %B; 251 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 252 ret <2 x i32> %tmp4 253} 254 255define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) { 256;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 257 %tmp3 = icmp sge <4 x i32> %A, %B; 258 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 259 ret <4 x i32> %tmp4 260} 261 262define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) { 263;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 264 %tmp3 = icmp sge <2 x i64> %A, %B; 265 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 266 ret <2 x i64> %tmp4 267} 268 269define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) { 270; Using registers other than v0, v1 are possible, but would be odd. 271; LE implemented as GE, so check reversed operands. 272;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b 273 %tmp3 = icmp sle <8 x i8> %A, %B; 274 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 275 ret <8 x i8> %tmp4 276} 277 278define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) { 279; Using registers other than v0, v1 are possible, but would be odd. 280; LE implemented as GE, so check reversed operands. 281;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b 282 %tmp3 = icmp sle <16 x i8> %A, %B; 283 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 284 ret <16 x i8> %tmp4 285} 286 287define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) { 288; Using registers other than v0, v1 are possible, but would be odd. 289; LE implemented as GE, so check reversed operands. 290;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h 291 %tmp3 = icmp sle <4 x i16> %A, %B; 292 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 293 ret <4 x i16> %tmp4 294} 295 296define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) { 297; Using registers other than v0, v1 are possible, but would be odd. 298; LE implemented as GE, so check reversed operands. 299;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h 300 %tmp3 = icmp sle <8 x i16> %A, %B; 301 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 302 ret <8 x i16> %tmp4 303} 304 305define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) { 306; Using registers other than v0, v1 are possible, but would be odd. 307; LE implemented as GE, so check reversed operands. 308;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s 309 %tmp3 = icmp sle <2 x i32> %A, %B; 310 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 311 ret <2 x i32> %tmp4 312} 313 314define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) { 315; Using registers other than v0, v1 are possible, but would be odd. 316; LE implemented as GE, so check reversed operands. 317;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s 318 %tmp3 = icmp sle <4 x i32> %A, %B; 319 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 320 ret <4 x i32> %tmp4 321} 322 323define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) { 324; Using registers other than v0, v1 are possible, but would be odd. 325; LE implemented as GE, so check reversed operands. 326;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d 327 %tmp3 = icmp sle <2 x i64> %A, %B; 328 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 329 ret <2 x i64> %tmp4 330} 331 332define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) { 333;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 334 %tmp3 = icmp ugt <8 x i8> %A, %B; 335 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 336 ret <8 x i8> %tmp4 337} 338 339define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) { 340;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 341 %tmp3 = icmp ugt <16 x i8> %A, %B; 342 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 343 ret <16 x i8> %tmp4 344} 345 346define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) { 347;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 348 %tmp3 = icmp ugt <4 x i16> %A, %B; 349 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 350 ret <4 x i16> %tmp4 351} 352 353define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) { 354;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 355 %tmp3 = icmp ugt <8 x i16> %A, %B; 356 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 357 ret <8 x i16> %tmp4 358} 359 360define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) { 361;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 362 %tmp3 = icmp ugt <2 x i32> %A, %B; 363 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 364 ret <2 x i32> %tmp4 365} 366 367define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) { 368;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 369 %tmp3 = icmp ugt <4 x i32> %A, %B; 370 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 371 ret <4 x i32> %tmp4 372} 373 374define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) { 375;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 376 %tmp3 = icmp ugt <2 x i64> %A, %B; 377 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 378 ret <2 x i64> %tmp4 379} 380 381define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) { 382; Using registers other than v0, v1 are possible, but would be odd. 383; LO implemented as HI, so check reversed operands. 384;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b 385 %tmp3 = icmp ult <8 x i8> %A, %B; 386 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 387 ret <8 x i8> %tmp4 388} 389 390define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) { 391; Using registers other than v0, v1 are possible, but would be odd. 392; LO implemented as HI, so check reversed operands. 393;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b 394 %tmp3 = icmp ult <16 x i8> %A, %B; 395 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 396 ret <16 x i8> %tmp4 397} 398 399define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) { 400; Using registers other than v0, v1 are possible, but would be odd. 401; LO implemented as HI, so check reversed operands. 402;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h 403 %tmp3 = icmp ult <4 x i16> %A, %B; 404 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 405 ret <4 x i16> %tmp4 406} 407 408define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) { 409; Using registers other than v0, v1 are possible, but would be odd. 410; LO implemented as HI, so check reversed operands. 411;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h 412 %tmp3 = icmp ult <8 x i16> %A, %B; 413 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 414 ret <8 x i16> %tmp4 415} 416 417define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) { 418; Using registers other than v0, v1 are possible, but would be odd. 419; LO implemented as HI, so check reversed operands. 420;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s 421 %tmp3 = icmp ult <2 x i32> %A, %B; 422 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 423 ret <2 x i32> %tmp4 424} 425 426define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) { 427; Using registers other than v0, v1 are possible, but would be odd. 428; LO implemented as HI, so check reversed operands. 429;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s 430 %tmp3 = icmp ult <4 x i32> %A, %B; 431 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 432 ret <4 x i32> %tmp4 433} 434 435define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) { 436; Using registers other than v0, v1 are possible, but would be odd. 437; LO implemented as HI, so check reversed operands. 438;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d 439 %tmp3 = icmp ult <2 x i64> %A, %B; 440 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 441 ret <2 x i64> %tmp4 442} 443 444define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) { 445;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 446 %tmp3 = icmp uge <8 x i8> %A, %B; 447 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 448 ret <8 x i8> %tmp4 449} 450 451define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) { 452;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 453 %tmp3 = icmp uge <16 x i8> %A, %B; 454 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 455 ret <16 x i8> %tmp4 456} 457 458define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) { 459;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 460 %tmp3 = icmp uge <4 x i16> %A, %B; 461 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 462 ret <4 x i16> %tmp4 463} 464 465define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) { 466;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 467 %tmp3 = icmp uge <8 x i16> %A, %B; 468 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 469 ret <8 x i16> %tmp4 470} 471 472define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) { 473;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 474 %tmp3 = icmp uge <2 x i32> %A, %B; 475 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 476 ret <2 x i32> %tmp4 477} 478 479define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) { 480;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 481 %tmp3 = icmp uge <4 x i32> %A, %B; 482 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 483 ret <4 x i32> %tmp4 484} 485 486define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) { 487;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 488 %tmp3 = icmp uge <2 x i64> %A, %B; 489 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 490 ret <2 x i64> %tmp4 491} 492 493define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) { 494; Using registers other than v0, v1 are possible, but would be odd. 495; LS implemented as HS, so check reversed operands. 496;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b 497 %tmp3 = icmp ule <8 x i8> %A, %B; 498 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 499 ret <8 x i8> %tmp4 500} 501 502define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) { 503; Using registers other than v0, v1 are possible, but would be odd. 504; LS implemented as HS, so check reversed operands. 505;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b 506 %tmp3 = icmp ule <16 x i8> %A, %B; 507 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 508 ret <16 x i8> %tmp4 509} 510 511define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) { 512; Using registers other than v0, v1 are possible, but would be odd. 513; LS implemented as HS, so check reversed operands. 514;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h 515 %tmp3 = icmp ule <4 x i16> %A, %B; 516 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 517 ret <4 x i16> %tmp4 518} 519 520define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) { 521; Using registers other than v0, v1 are possible, but would be odd. 522; LS implemented as HS, so check reversed operands. 523;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h 524 %tmp3 = icmp ule <8 x i16> %A, %B; 525 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 526 ret <8 x i16> %tmp4 527} 528 529define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) { 530; Using registers other than v0, v1 are possible, but would be odd. 531; LS implemented as HS, so check reversed operands. 532;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s 533 %tmp3 = icmp ule <2 x i32> %A, %B; 534 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 535 ret <2 x i32> %tmp4 536} 537 538define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) { 539; Using registers other than v0, v1 are possible, but would be odd. 540; LS implemented as HS, so check reversed operands. 541;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s 542 %tmp3 = icmp ule <4 x i32> %A, %B; 543 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 544 ret <4 x i32> %tmp4 545} 546 547define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) { 548; Using registers other than v0, v1 are possible, but would be odd. 549; LS implemented as HS, so check reversed operands. 550;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d 551 %tmp3 = icmp ule <2 x i64> %A, %B; 552 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 553 ret <2 x i64> %tmp4 554} 555 556define <8 x i8> @cmtst8xi8(<8 x i8> %A, <8 x i8> %B) { 557;CHECK: cmtst {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 558 %tmp3 = and <8 x i8> %A, %B 559 %tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer 560 %tmp5 = sext <8 x i1> %tmp4 to <8 x i8> 561 ret <8 x i8> %tmp5 562} 563 564define <16 x i8> @cmtst16xi8(<16 x i8> %A, <16 x i8> %B) { 565;CHECK: cmtst {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 566 %tmp3 = and <16 x i8> %A, %B 567 %tmp4 = icmp ne <16 x i8> %tmp3, zeroinitializer 568 %tmp5 = sext <16 x i1> %tmp4 to <16 x i8> 569 ret <16 x i8> %tmp5 570} 571 572define <4 x i16> @cmtst4xi16(<4 x i16> %A, <4 x i16> %B) { 573;CHECK: cmtst {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 574 %tmp3 = and <4 x i16> %A, %B 575 %tmp4 = icmp ne <4 x i16> %tmp3, zeroinitializer 576 %tmp5 = sext <4 x i1> %tmp4 to <4 x i16> 577 ret <4 x i16> %tmp5 578} 579 580define <8 x i16> @cmtst8xi16(<8 x i16> %A, <8 x i16> %B) { 581;CHECK: cmtst {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 582 %tmp3 = and <8 x i16> %A, %B 583 %tmp4 = icmp ne <8 x i16> %tmp3, zeroinitializer 584 %tmp5 = sext <8 x i1> %tmp4 to <8 x i16> 585 ret <8 x i16> %tmp5 586} 587 588define <2 x i32> @cmtst2xi32(<2 x i32> %A, <2 x i32> %B) { 589;CHECK: cmtst {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 590 %tmp3 = and <2 x i32> %A, %B 591 %tmp4 = icmp ne <2 x i32> %tmp3, zeroinitializer 592 %tmp5 = sext <2 x i1> %tmp4 to <2 x i32> 593 ret <2 x i32> %tmp5 594} 595 596define <4 x i32> @cmtst4xi32(<4 x i32> %A, <4 x i32> %B) { 597;CHECK: cmtst {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 598 %tmp3 = and <4 x i32> %A, %B 599 %tmp4 = icmp ne <4 x i32> %tmp3, zeroinitializer 600 %tmp5 = sext <4 x i1> %tmp4 to <4 x i32> 601 ret <4 x i32> %tmp5 602} 603 604define <2 x i64> @cmtst2xi64(<2 x i64> %A, <2 x i64> %B) { 605;CHECK: cmtst {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 606 %tmp3 = and <2 x i64> %A, %B 607 %tmp4 = icmp ne <2 x i64> %tmp3, zeroinitializer 608 %tmp5 = sext <2 x i1> %tmp4 to <2 x i64> 609 ret <2 x i64> %tmp5 610} 611 612 613 614define <8 x i8> @cmeqz8xi8(<8 x i8> %A) { 615;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0 616 %tmp3 = icmp eq <8 x i8> %A, zeroinitializer; 617 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 618 ret <8 x i8> %tmp4 619} 620 621define <16 x i8> @cmeqz16xi8(<16 x i8> %A) { 622;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0 623 %tmp3 = icmp eq <16 x i8> %A, zeroinitializer; 624 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 625 ret <16 x i8> %tmp4 626} 627 628define <4 x i16> @cmeqz4xi16(<4 x i16> %A) { 629;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0 630 %tmp3 = icmp eq <4 x i16> %A, zeroinitializer; 631 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 632 ret <4 x i16> %tmp4 633} 634 635define <8 x i16> @cmeqz8xi16(<8 x i16> %A) { 636;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0 637 %tmp3 = icmp eq <8 x i16> %A, zeroinitializer; 638 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 639 ret <8 x i16> %tmp4 640} 641 642define <2 x i32> @cmeqz2xi32(<2 x i32> %A) { 643;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0 644 %tmp3 = icmp eq <2 x i32> %A, zeroinitializer; 645 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 646 ret <2 x i32> %tmp4 647} 648 649define <4 x i32> @cmeqz4xi32(<4 x i32> %A) { 650;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0 651 %tmp3 = icmp eq <4 x i32> %A, zeroinitializer; 652 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 653 ret <4 x i32> %tmp4 654} 655 656define <2 x i64> @cmeqz2xi64(<2 x i64> %A) { 657;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0 658 %tmp3 = icmp eq <2 x i64> %A, zeroinitializer; 659 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 660 ret <2 x i64> %tmp4 661} 662 663 664define <8 x i8> @cmgez8xi8(<8 x i8> %A) { 665;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0 666 %tmp3 = icmp sge <8 x i8> %A, zeroinitializer; 667 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 668 ret <8 x i8> %tmp4 669} 670 671define <16 x i8> @cmgez16xi8(<16 x i8> %A) { 672;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0 673 %tmp3 = icmp sge <16 x i8> %A, zeroinitializer; 674 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 675 ret <16 x i8> %tmp4 676} 677 678define <4 x i16> @cmgez4xi16(<4 x i16> %A) { 679;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0 680 %tmp3 = icmp sge <4 x i16> %A, zeroinitializer; 681 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 682 ret <4 x i16> %tmp4 683} 684 685define <8 x i16> @cmgez8xi16(<8 x i16> %A) { 686;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0 687 %tmp3 = icmp sge <8 x i16> %A, zeroinitializer; 688 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 689 ret <8 x i16> %tmp4 690} 691 692define <2 x i32> @cmgez2xi32(<2 x i32> %A) { 693;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0 694 %tmp3 = icmp sge <2 x i32> %A, zeroinitializer; 695 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 696 ret <2 x i32> %tmp4 697} 698 699define <4 x i32> @cmgez4xi32(<4 x i32> %A) { 700;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0 701 %tmp3 = icmp sge <4 x i32> %A, zeroinitializer; 702 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 703 ret <4 x i32> %tmp4 704} 705 706define <2 x i64> @cmgez2xi64(<2 x i64> %A) { 707;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0 708 %tmp3 = icmp sge <2 x i64> %A, zeroinitializer; 709 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 710 ret <2 x i64> %tmp4 711} 712 713 714define <8 x i8> @cmgtz8xi8(<8 x i8> %A) { 715;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0 716 %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer; 717 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 718 ret <8 x i8> %tmp4 719} 720 721define <16 x i8> @cmgtz16xi8(<16 x i8> %A) { 722;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0 723 %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer; 724 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 725 ret <16 x i8> %tmp4 726} 727 728define <4 x i16> @cmgtz4xi16(<4 x i16> %A) { 729;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0 730 %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer; 731 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 732 ret <4 x i16> %tmp4 733} 734 735define <8 x i16> @cmgtz8xi16(<8 x i16> %A) { 736;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0 737 %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer; 738 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 739 ret <8 x i16> %tmp4 740} 741 742define <2 x i32> @cmgtz2xi32(<2 x i32> %A) { 743;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0 744 %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer; 745 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 746 ret <2 x i32> %tmp4 747} 748 749define <4 x i32> @cmgtz4xi32(<4 x i32> %A) { 750;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0 751 %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer; 752 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 753 ret <4 x i32> %tmp4 754} 755 756define <2 x i64> @cmgtz2xi64(<2 x i64> %A) { 757;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0 758 %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer; 759 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 760 ret <2 x i64> %tmp4 761} 762 763define <8 x i8> @cmlez8xi8(<8 x i8> %A) { 764;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0 765 %tmp3 = icmp sle <8 x i8> %A, zeroinitializer; 766 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 767 ret <8 x i8> %tmp4 768} 769 770define <16 x i8> @cmlez16xi8(<16 x i8> %A) { 771;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0 772 %tmp3 = icmp sle <16 x i8> %A, zeroinitializer; 773 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 774 ret <16 x i8> %tmp4 775} 776 777define <4 x i16> @cmlez4xi16(<4 x i16> %A) { 778;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0 779 %tmp3 = icmp sle <4 x i16> %A, zeroinitializer; 780 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 781 ret <4 x i16> %tmp4 782} 783 784define <8 x i16> @cmlez8xi16(<8 x i16> %A) { 785;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0 786 %tmp3 = icmp sle <8 x i16> %A, zeroinitializer; 787 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 788 ret <8 x i16> %tmp4 789} 790 791define <2 x i32> @cmlez2xi32(<2 x i32> %A) { 792;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0 793 %tmp3 = icmp sle <2 x i32> %A, zeroinitializer; 794 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 795 ret <2 x i32> %tmp4 796} 797 798define <4 x i32> @cmlez4xi32(<4 x i32> %A) { 799;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0 800 %tmp3 = icmp sle <4 x i32> %A, zeroinitializer; 801 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 802 ret <4 x i32> %tmp4 803} 804 805define <2 x i64> @cmlez2xi64(<2 x i64> %A) { 806;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0 807 %tmp3 = icmp sle <2 x i64> %A, zeroinitializer; 808 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 809 ret <2 x i64> %tmp4 810} 811 812define <8 x i8> @cmltz8xi8(<8 x i8> %A) { 813;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0 814 %tmp3 = icmp slt <8 x i8> %A, zeroinitializer; 815 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 816 ret <8 x i8> %tmp4 817} 818 819define <16 x i8> @cmltz16xi8(<16 x i8> %A) { 820;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0 821 %tmp3 = icmp slt <16 x i8> %A, zeroinitializer; 822 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 823 ret <16 x i8> %tmp4 824} 825 826define <4 x i16> @cmltz4xi16(<4 x i16> %A) { 827;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0 828 %tmp3 = icmp slt <4 x i16> %A, zeroinitializer; 829 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 830 ret <4 x i16> %tmp4 831} 832 833define <8 x i16> @cmltz8xi16(<8 x i16> %A) { 834;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0 835 %tmp3 = icmp slt <8 x i16> %A, zeroinitializer; 836 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 837 ret <8 x i16> %tmp4 838} 839 840define <2 x i32> @cmltz2xi32(<2 x i32> %A) { 841;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0 842 %tmp3 = icmp slt <2 x i32> %A, zeroinitializer; 843 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 844 ret <2 x i32> %tmp4 845} 846 847define <4 x i32> @cmltz4xi32(<4 x i32> %A) { 848;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0 849 %tmp3 = icmp slt <4 x i32> %A, zeroinitializer; 850 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 851 ret <4 x i32> %tmp4 852} 853 854define <2 x i64> @cmltz2xi64(<2 x i64> %A) { 855;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0 856 %tmp3 = icmp slt <2 x i64> %A, zeroinitializer; 857 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 858 ret <2 x i64> %tmp4 859} 860 861define <8 x i8> @cmneqz8xi8(<8 x i8> %A) { 862;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0 863;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 864 %tmp3 = icmp ne <8 x i8> %A, zeroinitializer; 865 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 866 ret <8 x i8> %tmp4 867} 868 869define <16 x i8> @cmneqz16xi8(<16 x i8> %A) { 870;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0 871;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 872 %tmp3 = icmp ne <16 x i8> %A, zeroinitializer; 873 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 874 ret <16 x i8> %tmp4 875} 876 877define <4 x i16> @cmneqz4xi16(<4 x i16> %A) { 878;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0 879;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 880 %tmp3 = icmp ne <4 x i16> %A, zeroinitializer; 881 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 882 ret <4 x i16> %tmp4 883} 884 885define <8 x i16> @cmneqz8xi16(<8 x i16> %A) { 886;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0 887;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 888 %tmp3 = icmp ne <8 x i16> %A, zeroinitializer; 889 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 890 ret <8 x i16> %tmp4 891} 892 893define <2 x i32> @cmneqz2xi32(<2 x i32> %A) { 894;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0 895;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 896 %tmp3 = icmp ne <2 x i32> %A, zeroinitializer; 897 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 898 ret <2 x i32> %tmp4 899} 900 901define <4 x i32> @cmneqz4xi32(<4 x i32> %A) { 902;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0 903;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 904 %tmp3 = icmp ne <4 x i32> %A, zeroinitializer; 905 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 906 ret <4 x i32> %tmp4 907} 908 909define <2 x i64> @cmneqz2xi64(<2 x i64> %A) { 910;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0 911;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 912 %tmp3 = icmp ne <2 x i64> %A, zeroinitializer; 913 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 914 ret <2 x i64> %tmp4 915} 916 917define <8 x i8> @cmhsz8xi8(<8 x i8> %A) { 918;CHECK: movi {{v[0-9]+}}.8b, #0x0 919;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 920 %tmp3 = icmp uge <8 x i8> %A, zeroinitializer; 921 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 922 ret <8 x i8> %tmp4 923} 924 925define <16 x i8> @cmhsz16xi8(<16 x i8> %A) { 926;CHECK: movi {{v[0-9]+}}.16b, #0x0 927;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 928 %tmp3 = icmp uge <16 x i8> %A, zeroinitializer; 929 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 930 ret <16 x i8> %tmp4 931} 932 933define <4 x i16> @cmhsz4xi16(<4 x i16> %A) { 934;CHECK: movi {{v[0-9]+}}.8b, #0x0 935;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 936 %tmp3 = icmp uge <4 x i16> %A, zeroinitializer; 937 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 938 ret <4 x i16> %tmp4 939} 940 941define <8 x i16> @cmhsz8xi16(<8 x i16> %A) { 942;CHECK: movi {{v[0-9]+}}.16b, #0x0 943;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 944 %tmp3 = icmp uge <8 x i16> %A, zeroinitializer; 945 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 946 ret <8 x i16> %tmp4 947} 948 949define <2 x i32> @cmhsz2xi32(<2 x i32> %A) { 950;CHECK: movi {{v[0-9]+}}.8b, #0x0 951;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 952 %tmp3 = icmp uge <2 x i32> %A, zeroinitializer; 953 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 954 ret <2 x i32> %tmp4 955} 956 957define <4 x i32> @cmhsz4xi32(<4 x i32> %A) { 958;CHECK: movi {{v[0-9]+}}.16b, #0x0 959;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 960 %tmp3 = icmp uge <4 x i32> %A, zeroinitializer; 961 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 962 ret <4 x i32> %tmp4 963} 964 965define <2 x i64> @cmhsz2xi64(<2 x i64> %A) { 966;CHECK: movi {{v[0-9]+}}.16b, #0x0 967;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 968 %tmp3 = icmp uge <2 x i64> %A, zeroinitializer; 969 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 970 ret <2 x i64> %tmp4 971} 972 973 974define <8 x i8> @cmhiz8xi8(<8 x i8> %A) { 975;CHECK: movi {{v[0-9]+}}.8b, #0x0 976;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 977 %tmp3 = icmp ugt <8 x i8> %A, zeroinitializer; 978 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 979 ret <8 x i8> %tmp4 980} 981 982define <16 x i8> @cmhiz16xi8(<16 x i8> %A) { 983;CHECK: movi {{v[0-9]+}}.16b, #0x0 984;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 985 %tmp3 = icmp ugt <16 x i8> %A, zeroinitializer; 986 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 987 ret <16 x i8> %tmp4 988} 989 990define <4 x i16> @cmhiz4xi16(<4 x i16> %A) { 991;CHECK: movi {{v[0-9]+}}.8b, #0x0 992;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h 993 %tmp3 = icmp ugt <4 x i16> %A, zeroinitializer; 994 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 995 ret <4 x i16> %tmp4 996} 997 998define <8 x i16> @cmhiz8xi16(<8 x i16> %A) { 999;CHECK: movi {{v[0-9]+}}.16b, #0x0 1000;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h 1001 %tmp3 = icmp ugt <8 x i16> %A, zeroinitializer; 1002 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 1003 ret <8 x i16> %tmp4 1004} 1005 1006define <2 x i32> @cmhiz2xi32(<2 x i32> %A) { 1007;CHECK: movi {{v[0-9]+}}.8b, #0x0 1008;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 1009 %tmp3 = icmp ugt <2 x i32> %A, zeroinitializer; 1010 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1011 ret <2 x i32> %tmp4 1012} 1013 1014define <4 x i32> @cmhiz4xi32(<4 x i32> %A) { 1015;CHECK: movi {{v[0-9]+}}.16b, #0x0 1016;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 1017 %tmp3 = icmp ugt <4 x i32> %A, zeroinitializer; 1018 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1019 ret <4 x i32> %tmp4 1020} 1021 1022define <2 x i64> @cmhiz2xi64(<2 x i64> %A) { 1023;CHECK: movi {{v[0-9]+}}.16b, #0x0 1024;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 1025 %tmp3 = icmp ugt <2 x i64> %A, zeroinitializer; 1026 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1027 ret <2 x i64> %tmp4 1028} 1029 1030define <8 x i8> @cmlsz8xi8(<8 x i8> %A) { 1031; Using registers other than v0, v1 are possible, but would be odd. 1032; LS implemented as HS, so check reversed operands. 1033;CHECK: movi v1.8b, #0x0 1034;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b 1035 %tmp3 = icmp ule <8 x i8> %A, zeroinitializer; 1036 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 1037 ret <8 x i8> %tmp4 1038} 1039 1040define <16 x i8> @cmlsz16xi8(<16 x i8> %A) { 1041; Using registers other than v0, v1 are possible, but would be odd. 1042; LS implemented as HS, so check reversed operands. 1043;CHECK: movi v1.16b, #0x0 1044;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b 1045 %tmp3 = icmp ule <16 x i8> %A, zeroinitializer; 1046 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 1047 ret <16 x i8> %tmp4 1048} 1049 1050define <4 x i16> @cmlsz4xi16(<4 x i16> %A) { 1051; Using registers other than v0, v1 are possible, but would be odd. 1052; LS implemented as HS, so check reversed operands. 1053;CHECK: movi v1.8b, #0x0 1054;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h 1055 %tmp3 = icmp ule <4 x i16> %A, zeroinitializer; 1056 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 1057 ret <4 x i16> %tmp4 1058} 1059 1060define <8 x i16> @cmlsz8xi16(<8 x i16> %A) { 1061; Using registers other than v0, v1 are possible, but would be odd. 1062; LS implemented as HS, so check reversed operands. 1063;CHECK: movi v1.16b, #0x0 1064;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h 1065 %tmp3 = icmp ule <8 x i16> %A, zeroinitializer; 1066 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 1067 ret <8 x i16> %tmp4 1068} 1069 1070define <2 x i32> @cmlsz2xi32(<2 x i32> %A) { 1071; Using registers other than v0, v1 are possible, but would be odd. 1072; LS implemented as HS, so check reversed operands. 1073;CHECK: movi v1.8b, #0x0 1074;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s 1075 %tmp3 = icmp ule <2 x i32> %A, zeroinitializer; 1076 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1077 ret <2 x i32> %tmp4 1078} 1079 1080define <4 x i32> @cmlsz4xi32(<4 x i32> %A) { 1081; Using registers other than v0, v1 are possible, but would be odd. 1082; LS implemented as HS, so check reversed operands. 1083;CHECK: movi v1.16b, #0x0 1084;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s 1085 %tmp3 = icmp ule <4 x i32> %A, zeroinitializer; 1086 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1087 ret <4 x i32> %tmp4 1088} 1089 1090define <2 x i64> @cmlsz2xi64(<2 x i64> %A) { 1091; Using registers other than v0, v1 are possible, but would be odd. 1092; LS implemented as HS, so check reversed operands. 1093;CHECK: movi v1.16b, #0x0 1094;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d 1095 %tmp3 = icmp ule <2 x i64> %A, zeroinitializer; 1096 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1097 ret <2 x i64> %tmp4 1098} 1099 1100define <8 x i8> @cmloz8xi8(<8 x i8> %A) { 1101; Using registers other than v0, v1 are possible, but would be odd. 1102; LO implemented as HI, so check reversed operands. 1103;CHECK: movi v1.8b, #0x0 1104;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v1.8b, {{v[0-9]+}}.8b 1105 %tmp3 = icmp ult <8 x i8> %A, zeroinitializer; 1106 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8> 1107 ret <8 x i8> %tmp4 1108} 1109 1110define <16 x i8> @cmloz16xi8(<16 x i8> %A) { 1111; Using registers other than v0, v1 are possible, but would be odd. 1112; LO implemented as HI, so check reversed operands. 1113;CHECK: movi v1.16b, #0x0 1114;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b 1115 %tmp3 = icmp ult <16 x i8> %A, zeroinitializer; 1116 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8> 1117 ret <16 x i8> %tmp4 1118} 1119 1120define <4 x i16> @cmloz4xi16(<4 x i16> %A) { 1121; Using registers other than v0, v1 are possible, but would be odd. 1122; LO implemented as HI, so check reversed operands. 1123;CHECK: movi v1.8b, #0x0 1124;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h 1125 %tmp3 = icmp ult <4 x i16> %A, zeroinitializer; 1126 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16> 1127 ret <4 x i16> %tmp4 1128} 1129 1130define <8 x i16> @cmloz8xi16(<8 x i16> %A) { 1131; Using registers other than v0, v1 are possible, but would be odd. 1132; LO implemented as HI, so check reversed operands. 1133;CHECK: movi v1.16b, #0x0 1134;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h 1135 %tmp3 = icmp ult <8 x i16> %A, zeroinitializer; 1136 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16> 1137 ret <8 x i16> %tmp4 1138} 1139 1140define <2 x i32> @cmloz2xi32(<2 x i32> %A) { 1141; Using registers other than v0, v1 are possible, but would be odd. 1142; LO implemented as HI, so check reversed operands. 1143;CHECK: movi v1.8b, #0x0 1144;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s 1145 %tmp3 = icmp ult <2 x i32> %A, zeroinitializer; 1146 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1147 ret <2 x i32> %tmp4 1148} 1149 1150define <4 x i32> @cmloz4xi32(<4 x i32> %A) { 1151; Using registers other than v0, v1 are possible, but would be odd. 1152; LO implemented as HI, so check reversed operands. 1153;CHECK: movi v1.16b, #0x0 1154;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s 1155 %tmp3 = icmp ult <4 x i32> %A, zeroinitializer; 1156 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1157 ret <4 x i32> %tmp4 1158} 1159 1160define <2 x i64> @cmloz2xi64(<2 x i64> %A) { 1161; Using registers other than v0, v1 are possible, but would be odd. 1162; LO implemented as HI, so check reversed operands. 1163;CHECK: movi v1.16b, #0x0 1164;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d 1165 %tmp3 = icmp ult <2 x i64> %A, zeroinitializer; 1166 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1167 ret <2 x i64> %tmp4 1168} 1169 1170 1171define <2 x i32> @fcmoeq2xfloat(<2 x float> %A, <2 x float> %B) { 1172;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 1173 %tmp3 = fcmp oeq <2 x float> %A, %B 1174 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1175 ret <2 x i32> %tmp4 1176} 1177 1178define <4 x i32> @fcmoeq4xfloat(<4 x float> %A, <4 x float> %B) { 1179;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 1180 %tmp3 = fcmp oeq <4 x float> %A, %B 1181 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1182 ret <4 x i32> %tmp4 1183} 1184define <2 x i64> @fcmoeq2xdouble(<2 x double> %A, <2 x double> %B) { 1185;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 1186 %tmp3 = fcmp oeq <2 x double> %A, %B 1187 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1188 ret <2 x i64> %tmp4 1189} 1190 1191define <2 x i32> @fcmoge2xfloat(<2 x float> %A, <2 x float> %B) { 1192;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 1193 %tmp3 = fcmp oge <2 x float> %A, %B 1194 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1195 ret <2 x i32> %tmp4 1196} 1197 1198define <4 x i32> @fcmoge4xfloat(<4 x float> %A, <4 x float> %B) { 1199;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 1200 %tmp3 = fcmp oge <4 x float> %A, %B 1201 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1202 ret <4 x i32> %tmp4 1203} 1204define <2 x i64> @fcmoge2xdouble(<2 x double> %A, <2 x double> %B) { 1205;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 1206 %tmp3 = fcmp oge <2 x double> %A, %B 1207 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1208 ret <2 x i64> %tmp4 1209} 1210 1211define <2 x i32> @fcmogt2xfloat(<2 x float> %A, <2 x float> %B) { 1212;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s 1213 %tmp3 = fcmp ogt <2 x float> %A, %B 1214 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1215 ret <2 x i32> %tmp4 1216} 1217 1218define <4 x i32> @fcmogt4xfloat(<4 x float> %A, <4 x float> %B) { 1219;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s 1220 %tmp3 = fcmp ogt <4 x float> %A, %B 1221 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1222 ret <4 x i32> %tmp4 1223} 1224define <2 x i64> @fcmogt2xdouble(<2 x double> %A, <2 x double> %B) { 1225;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d 1226 %tmp3 = fcmp ogt <2 x double> %A, %B 1227 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1228 ret <2 x i64> %tmp4 1229} 1230 1231define <2 x i32> @fcmole2xfloat(<2 x float> %A, <2 x float> %B) { 1232; Using registers other than v0, v1 are possible, but would be odd. 1233; OLE implemented as OGE, so check reversed operands. 1234;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s 1235 %tmp3 = fcmp ole <2 x float> %A, %B 1236 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1237 ret <2 x i32> %tmp4 1238} 1239 1240define <4 x i32> @fcmole4xfloat(<4 x float> %A, <4 x float> %B) { 1241; Using registers other than v0, v1 are possible, but would be odd. 1242; OLE implemented as OGE, so check reversed operands. 1243;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s 1244 %tmp3 = fcmp ole <4 x float> %A, %B 1245 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1246 ret <4 x i32> %tmp4 1247} 1248define <2 x i64> @fcmole2xdouble(<2 x double> %A, <2 x double> %B) { 1249; Using registers other than v0, v1 are possible, but would be odd. 1250; OLE implemented as OGE, so check reversed operands. 1251;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d 1252 %tmp3 = fcmp ole <2 x double> %A, %B 1253 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1254 ret <2 x i64> %tmp4 1255} 1256 1257define <2 x i32> @fcmolt2xfloat(<2 x float> %A, <2 x float> %B) { 1258; Using registers other than v0, v1 are possible, but would be odd. 1259; OLE implemented as OGE, so check reversed operands. 1260;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s 1261 %tmp3 = fcmp olt <2 x float> %A, %B 1262 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1263 ret <2 x i32> %tmp4 1264} 1265 1266define <4 x i32> @fcmolt4xfloat(<4 x float> %A, <4 x float> %B) { 1267; Using registers other than v0, v1 are possible, but would be odd. 1268; OLE implemented as OGE, so check reversed operands. 1269;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s 1270 %tmp3 = fcmp olt <4 x float> %A, %B 1271 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1272 ret <4 x i32> %tmp4 1273} 1274define <2 x i64> @fcmolt2xdouble(<2 x double> %A, <2 x double> %B) { 1275; Using registers other than v0, v1 are possible, but would be odd. 1276; OLE implemented as OGE, so check reversed operands. 1277;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d 1278 %tmp3 = fcmp olt <2 x double> %A, %B 1279 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1280 ret <2 x i64> %tmp4 1281} 1282 1283define <2 x i32> @fcmone2xfloat(<2 x float> %A, <2 x float> %B) { 1284; Using registers other than v0, v1 are possible, but would be odd. 1285; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands 1286;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s 1287;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s 1288;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1289 %tmp3 = fcmp one <2 x float> %A, %B 1290 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1291 ret <2 x i32> %tmp4 1292} 1293 1294define <4 x i32> @fcmone4xfloat(<4 x float> %A, <4 x float> %B) { 1295; Using registers other than v0, v1 are possible, but would be odd. 1296; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands 1297;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s 1298;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s 1299;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1300 %tmp3 = fcmp one <4 x float> %A, %B 1301 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1302 ret <4 x i32> %tmp4 1303} 1304define <2 x i64> @fcmone2xdouble(<2 x double> %A, <2 x double> %B) { 1305; Using registers other than v0, v1 are possible, but would be odd. 1306; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands 1307;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d 1308;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d 1309;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1310; todo check reversed operands 1311 %tmp3 = fcmp one <2 x double> %A, %B 1312 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1313 ret <2 x i64> %tmp4 1314} 1315 1316 1317define <2 x i32> @fcmord2xfloat(<2 x float> %A, <2 x float> %B) { 1318; Using registers other than v0, v1 are possible, but would be odd. 1319; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands. 1320;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s 1321;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s 1322;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1323 %tmp3 = fcmp ord <2 x float> %A, %B 1324 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1325 ret <2 x i32> %tmp4 1326} 1327 1328 1329define <4 x i32> @fcmord4xfloat(<4 x float> %A, <4 x float> %B) { 1330; Using registers other than v0, v1 are possible, but would be odd. 1331; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands. 1332;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s 1333;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s 1334;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1335 %tmp3 = fcmp ord <4 x float> %A, %B 1336 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1337 ret <4 x i32> %tmp4 1338} 1339 1340define <2 x i64> @fcmord2xdouble(<2 x double> %A, <2 x double> %B) { 1341; Using registers other than v0, v1 are possible, but would be odd. 1342; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands. 1343;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d 1344;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d 1345;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1346 %tmp3 = fcmp ord <2 x double> %A, %B 1347 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1348 ret <2 x i64> %tmp4 1349} 1350 1351 1352define <2 x i32> @fcmuno2xfloat(<2 x float> %A, <2 x float> %B) { 1353; Using registers other than v0, v1 are possible, but would be odd. 1354; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands. 1355;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s 1356;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s 1357;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1358;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1359 %tmp3 = fcmp uno <2 x float> %A, %B 1360 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1361 ret <2 x i32> %tmp4 1362} 1363 1364define <4 x i32> @fcmuno4xfloat(<4 x float> %A, <4 x float> %B) { 1365; Using registers other than v0, v1 are possible, but would be odd. 1366; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands. 1367;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s 1368;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s 1369;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1370;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1371 %tmp3 = fcmp uno <4 x float> %A, %B 1372 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1373 ret <4 x i32> %tmp4 1374} 1375 1376define <2 x i64> @fcmuno2xdouble(<2 x double> %A, <2 x double> %B) { 1377; Using registers other than v0, v1 are possible, but would be odd. 1378; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands. 1379;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d 1380;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d 1381;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1382;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1383 %tmp3 = fcmp uno <2 x double> %A, %B 1384 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1385 ret <2 x i64> %tmp4 1386} 1387 1388define <2 x i32> @fcmueq2xfloat(<2 x float> %A, <2 x float> %B) { 1389; Using registers other than v0, v1 are possible, but would be odd. 1390; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands 1391;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s 1392;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s 1393;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1394;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1395 %tmp3 = fcmp ueq <2 x float> %A, %B 1396 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1397 ret <2 x i32> %tmp4 1398} 1399 1400define <4 x i32> @fcmueq4xfloat(<4 x float> %A, <4 x float> %B) { 1401; Using registers other than v0, v1 are possible, but would be odd. 1402; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands 1403;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s 1404;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s 1405;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1406;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1407 %tmp3 = fcmp ueq <4 x float> %A, %B 1408 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1409 ret <4 x i32> %tmp4 1410} 1411 1412define <2 x i64> @fcmueq2xdouble(<2 x double> %A, <2 x double> %B) { 1413; Using registers other than v0, v1 are possible, but would be odd. 1414; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands 1415;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d 1416;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d 1417;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1418;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1419 %tmp3 = fcmp ueq <2 x double> %A, %B 1420 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1421 ret <2 x i64> %tmp4 1422} 1423 1424define <2 x i32> @fcmuge2xfloat(<2 x float> %A, <2 x float> %B) { 1425; Using registers other than v0, v1 are possible, but would be odd. 1426; UGE = ULE with swapped operands, ULE implemented as !OGT. 1427;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s 1428;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1429 %tmp3 = fcmp uge <2 x float> %A, %B 1430 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1431 ret <2 x i32> %tmp4 1432} 1433 1434define <4 x i32> @fcmuge4xfloat(<4 x float> %A, <4 x float> %B) { 1435; Using registers other than v0, v1 are possible, but would be odd. 1436; UGE = ULE with swapped operands, ULE implemented as !OGT. 1437;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s 1438;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1439 %tmp3 = fcmp uge <4 x float> %A, %B 1440 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1441 ret <4 x i32> %tmp4 1442} 1443 1444define <2 x i64> @fcmuge2xdouble(<2 x double> %A, <2 x double> %B) { 1445; Using registers other than v0, v1 are possible, but would be odd. 1446; UGE = ULE with swapped operands, ULE implemented as !OGT. 1447;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d 1448;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1449 %tmp3 = fcmp uge <2 x double> %A, %B 1450 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1451 ret <2 x i64> %tmp4 1452} 1453 1454define <2 x i32> @fcmugt2xfloat(<2 x float> %A, <2 x float> %B) { 1455; Using registers other than v0, v1 are possible, but would be odd. 1456; UGT = ULT with swapped operands, ULT implemented as !OGE. 1457;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s 1458;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1459 %tmp3 = fcmp ugt <2 x float> %A, %B 1460 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1461 ret <2 x i32> %tmp4 1462} 1463 1464define <4 x i32> @fcmugt4xfloat(<4 x float> %A, <4 x float> %B) { 1465; Using registers other than v0, v1 are possible, but would be odd. 1466; UGT = ULT with swapped operands, ULT implemented as !OGE. 1467;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s 1468;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1469 %tmp3 = fcmp ugt <4 x float> %A, %B 1470 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1471 ret <4 x i32> %tmp4 1472} 1473define <2 x i64> @fcmugt2xdouble(<2 x double> %A, <2 x double> %B) { 1474;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d 1475;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1476 %tmp3 = fcmp ugt <2 x double> %A, %B 1477 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1478 ret <2 x i64> %tmp4 1479} 1480 1481define <2 x i32> @fcmule2xfloat(<2 x float> %A, <2 x float> %B) { 1482; Using registers other than v0, v1 are possible, but would be odd. 1483; ULE implemented as !OGT. 1484;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s 1485;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1486 %tmp3 = fcmp ule <2 x float> %A, %B 1487 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1488 ret <2 x i32> %tmp4 1489} 1490 1491define <4 x i32> @fcmule4xfloat(<4 x float> %A, <4 x float> %B) { 1492; Using registers other than v0, v1 are possible, but would be odd. 1493; ULE implemented as !OGT. 1494;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s 1495;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1496 %tmp3 = fcmp ule <4 x float> %A, %B 1497 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1498 ret <4 x i32> %tmp4 1499} 1500define <2 x i64> @fcmule2xdouble(<2 x double> %A, <2 x double> %B) { 1501; Using registers other than v0, v1 are possible, but would be odd. 1502; ULE implemented as !OGT. 1503;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d 1504;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1505 %tmp3 = fcmp ule <2 x double> %A, %B 1506 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1507 ret <2 x i64> %tmp4 1508} 1509 1510define <2 x i32> @fcmult2xfloat(<2 x float> %A, <2 x float> %B) { 1511; Using registers other than v0, v1 are possible, but would be odd. 1512; ULT implemented as !OGE. 1513;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s 1514;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1515 %tmp3 = fcmp ult <2 x float> %A, %B 1516 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1517 ret <2 x i32> %tmp4 1518} 1519 1520define <4 x i32> @fcmult4xfloat(<4 x float> %A, <4 x float> %B) { 1521; Using registers other than v0, v1 are possible, but would be odd. 1522; ULT implemented as !OGE. 1523;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s 1524;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1525 %tmp3 = fcmp ult <4 x float> %A, %B 1526 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1527 ret <4 x i32> %tmp4 1528} 1529define <2 x i64> @fcmult2xdouble(<2 x double> %A, <2 x double> %B) { 1530; Using registers other than v0, v1 are possible, but would be odd. 1531; ULT implemented as !OGE. 1532;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d 1533;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1534 %tmp3 = fcmp ult <2 x double> %A, %B 1535 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1536 ret <2 x i64> %tmp4 1537} 1538 1539define <2 x i32> @fcmune2xfloat(<2 x float> %A, <2 x float> %B) { 1540; Using registers other than v0, v1 are possible, but would be odd. 1541; UNE = !OEQ. 1542;CHECK: fcmeq {{v[0-9]+}}.2s, v0.2s, v1.2s 1543;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1544 %tmp3 = fcmp une <2 x float> %A, %B 1545 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1546 ret <2 x i32> %tmp4 1547} 1548 1549define <4 x i32> @fcmune4xfloat(<4 x float> %A, <4 x float> %B) { 1550; Using registers other than v0, v1 are possible, but would be odd. 1551; UNE = !OEQ. 1552;CHECK: fcmeq {{v[0-9]+}}.4s, v0.4s, v1.4s 1553;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1554 %tmp3 = fcmp une <4 x float> %A, %B 1555 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1556 ret <4 x i32> %tmp4 1557} 1558define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) { 1559; Using registers other than v0, v1 are possible, but would be odd. 1560; UNE = !OEQ. 1561;CHECK: fcmeq {{v[0-9]+}}.2d, v0.2d, v1.2d 1562;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1563 %tmp3 = fcmp une <2 x double> %A, %B 1564 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1565 ret <2 x i64> %tmp4 1566} 1567 1568define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) { 1569;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1570 %tmp3 = fcmp oeq <2 x float> %A, zeroinitializer 1571 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1572 ret <2 x i32> %tmp4 1573} 1574 1575define <4 x i32> @fcmoeqz4xfloat(<4 x float> %A) { 1576;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1577 %tmp3 = fcmp oeq <4 x float> %A, zeroinitializer 1578 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1579 ret <4 x i32> %tmp4 1580} 1581define <2 x i64> @fcmoeqz2xdouble(<2 x double> %A) { 1582;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1583 %tmp3 = fcmp oeq <2 x double> %A, zeroinitializer 1584 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1585 ret <2 x i64> %tmp4 1586} 1587 1588 1589define <2 x i32> @fcmogez2xfloat(<2 x float> %A) { 1590;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1591 %tmp3 = fcmp oge <2 x float> %A, zeroinitializer 1592 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1593 ret <2 x i32> %tmp4 1594} 1595 1596define <4 x i32> @fcmogez4xfloat(<4 x float> %A) { 1597;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1598 %tmp3 = fcmp oge <4 x float> %A, zeroinitializer 1599 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1600 ret <4 x i32> %tmp4 1601} 1602define <2 x i64> @fcmogez2xdouble(<2 x double> %A) { 1603;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1604 %tmp3 = fcmp oge <2 x double> %A, zeroinitializer 1605 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1606 ret <2 x i64> %tmp4 1607} 1608 1609define <2 x i32> @fcmogtz2xfloat(<2 x float> %A) { 1610;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1611 %tmp3 = fcmp ogt <2 x float> %A, zeroinitializer 1612 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1613 ret <2 x i32> %tmp4 1614} 1615 1616define <4 x i32> @fcmogtz4xfloat(<4 x float> %A) { 1617;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1618 %tmp3 = fcmp ogt <4 x float> %A, zeroinitializer 1619 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1620 ret <4 x i32> %tmp4 1621} 1622define <2 x i64> @fcmogtz2xdouble(<2 x double> %A) { 1623;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1624 %tmp3 = fcmp ogt <2 x double> %A, zeroinitializer 1625 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1626 ret <2 x i64> %tmp4 1627} 1628 1629define <2 x i32> @fcmoltz2xfloat(<2 x float> %A) { 1630;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1631 %tmp3 = fcmp olt <2 x float> %A, zeroinitializer 1632 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1633 ret <2 x i32> %tmp4 1634} 1635 1636define <4 x i32> @fcmoltz4xfloat(<4 x float> %A) { 1637;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1638 %tmp3 = fcmp olt <4 x float> %A, zeroinitializer 1639 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1640 ret <4 x i32> %tmp4 1641} 1642 1643define <2 x i64> @fcmoltz2xdouble(<2 x double> %A) { 1644;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1645 %tmp3 = fcmp olt <2 x double> %A, zeroinitializer 1646 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1647 ret <2 x i64> %tmp4 1648} 1649 1650define <2 x i32> @fcmolez2xfloat(<2 x float> %A) { 1651;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1652 %tmp3 = fcmp ole <2 x float> %A, zeroinitializer 1653 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1654 ret <2 x i32> %tmp4 1655} 1656 1657define <4 x i32> @fcmolez4xfloat(<4 x float> %A) { 1658;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1659 %tmp3 = fcmp ole <4 x float> %A, zeroinitializer 1660 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1661 ret <4 x i32> %tmp4 1662} 1663 1664define <2 x i64> @fcmolez2xdouble(<2 x double> %A) { 1665;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1666 %tmp3 = fcmp ole <2 x double> %A, zeroinitializer 1667 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1668 ret <2 x i64> %tmp4 1669} 1670 1671define <2 x i32> @fcmonez2xfloat(<2 x float> %A) { 1672; ONE with zero = OLT | OGT 1673;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1674;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1675;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1676 %tmp3 = fcmp one <2 x float> %A, zeroinitializer 1677 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1678 ret <2 x i32> %tmp4 1679} 1680 1681define <4 x i32> @fcmonez4xfloat(<4 x float> %A) { 1682; ONE with zero = OLT | OGT 1683;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1684;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1685;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1686 %tmp3 = fcmp one <4 x float> %A, zeroinitializer 1687 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1688 ret <4 x i32> %tmp4 1689} 1690define <2 x i64> @fcmonez2xdouble(<2 x double> %A) { 1691; ONE with zero = OLT | OGT 1692;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1693;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1694;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1695 %tmp3 = fcmp one <2 x double> %A, zeroinitializer 1696 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1697 ret <2 x i64> %tmp4 1698} 1699 1700define <2 x i32> @fcmordz2xfloat(<2 x float> %A) { 1701; ORD with zero = OLT | OGE 1702;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1703;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1704;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1705 %tmp3 = fcmp ord <2 x float> %A, zeroinitializer 1706 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1707 ret <2 x i32> %tmp4 1708} 1709 1710define <4 x i32> @fcmordz4xfloat(<4 x float> %A) { 1711; ORD with zero = OLT | OGE 1712;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1713;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1714;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1715 %tmp3 = fcmp ord <4 x float> %A, zeroinitializer 1716 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1717 ret <4 x i32> %tmp4 1718} 1719define <2 x i64> @fcmordz2xdouble(<2 x double> %A) { 1720; ORD with zero = OLT | OGE 1721;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1722;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1723;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1724 %tmp3 = fcmp ord <2 x double> %A, zeroinitializer 1725 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1726 ret <2 x i64> %tmp4 1727} 1728 1729define <2 x i32> @fcmueqz2xfloat(<2 x float> %A) { 1730; UEQ with zero = !ONE = !(OLT |OGT) 1731;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1732;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1733;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1734;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1735 %tmp3 = fcmp ueq <2 x float> %A, zeroinitializer 1736 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1737 ret <2 x i32> %tmp4 1738} 1739 1740define <4 x i32> @fcmueqz4xfloat(<4 x float> %A) { 1741; UEQ with zero = !ONE = !(OLT |OGT) 1742;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1743;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1744;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1745;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1746 %tmp3 = fcmp ueq <4 x float> %A, zeroinitializer 1747 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1748 ret <4 x i32> %tmp4 1749} 1750 1751define <2 x i64> @fcmueqz2xdouble(<2 x double> %A) { 1752; UEQ with zero = !ONE = !(OLT |OGT) 1753;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1754;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1755;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1756;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1757 %tmp3 = fcmp ueq <2 x double> %A, zeroinitializer 1758 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1759 ret <2 x i64> %tmp4 1760} 1761 1762define <2 x i32> @fcmugez2xfloat(<2 x float> %A) { 1763; UGE with zero = !OLT 1764;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1765;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1766 %tmp3 = fcmp uge <2 x float> %A, zeroinitializer 1767 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1768 ret <2 x i32> %tmp4 1769} 1770 1771define <4 x i32> @fcmugez4xfloat(<4 x float> %A) { 1772; UGE with zero = !OLT 1773;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1774;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1775 %tmp3 = fcmp uge <4 x float> %A, zeroinitializer 1776 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1777 ret <4 x i32> %tmp4 1778} 1779define <2 x i64> @fcmugez2xdouble(<2 x double> %A) { 1780; UGE with zero = !OLT 1781;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1782;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1783 %tmp3 = fcmp uge <2 x double> %A, zeroinitializer 1784 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1785 ret <2 x i64> %tmp4 1786} 1787 1788define <2 x i32> @fcmugtz2xfloat(<2 x float> %A) { 1789; UGT with zero = !OLE 1790;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1791;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1792 %tmp3 = fcmp ugt <2 x float> %A, zeroinitializer 1793 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1794 ret <2 x i32> %tmp4 1795} 1796 1797define <4 x i32> @fcmugtz4xfloat(<4 x float> %A) { 1798; UGT with zero = !OLE 1799;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1800;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1801 %tmp3 = fcmp ugt <4 x float> %A, zeroinitializer 1802 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1803 ret <4 x i32> %tmp4 1804} 1805define <2 x i64> @fcmugtz2xdouble(<2 x double> %A) { 1806; UGT with zero = !OLE 1807;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1808;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1809 %tmp3 = fcmp ugt <2 x double> %A, zeroinitializer 1810 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1811 ret <2 x i64> %tmp4 1812} 1813 1814define <2 x i32> @fcmultz2xfloat(<2 x float> %A) { 1815; ULT with zero = !OGE 1816;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1817;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1818 %tmp3 = fcmp ult <2 x float> %A, zeroinitializer 1819 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1820 ret <2 x i32> %tmp4 1821} 1822 1823define <4 x i32> @fcmultz4xfloat(<4 x float> %A) { 1824;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1825;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1826 %tmp3 = fcmp ult <4 x float> %A, zeroinitializer 1827 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1828 ret <4 x i32> %tmp4 1829} 1830 1831define <2 x i64> @fcmultz2xdouble(<2 x double> %A) { 1832;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1833;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1834 %tmp3 = fcmp ult <2 x double> %A, zeroinitializer 1835 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1836 ret <2 x i64> %tmp4 1837} 1838 1839 1840define <2 x i32> @fcmulez2xfloat(<2 x float> %A) { 1841; ULE with zero = !OGT 1842;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1843;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1844 %tmp3 = fcmp ule <2 x float> %A, zeroinitializer 1845 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1846 ret <2 x i32> %tmp4 1847} 1848 1849define <4 x i32> @fcmulez4xfloat(<4 x float> %A) { 1850; ULE with zero = !OGT 1851;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1852;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1853 %tmp3 = fcmp ule <4 x float> %A, zeroinitializer 1854 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1855 ret <4 x i32> %tmp4 1856} 1857 1858define <2 x i64> @fcmulez2xdouble(<2 x double> %A) { 1859; ULE with zero = !OGT 1860;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1861;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1862 %tmp3 = fcmp ule <2 x double> %A, zeroinitializer 1863 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1864 ret <2 x i64> %tmp4 1865} 1866 1867define <2 x i32> @fcmunez2xfloat(<2 x float> %A) { 1868; UNE with zero = !OEQ with zero 1869;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1870;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1871 %tmp3 = fcmp une <2 x float> %A, zeroinitializer 1872 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1873 ret <2 x i32> %tmp4 1874} 1875 1876define <4 x i32> @fcmunez4xfloat(<4 x float> %A) { 1877; UNE with zero = !OEQ with zero 1878;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1879;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1880 %tmp3 = fcmp une <4 x float> %A, zeroinitializer 1881 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1882 ret <4 x i32> %tmp4 1883} 1884define <2 x i64> @fcmunez2xdouble(<2 x double> %A) { 1885; UNE with zero = !OEQ with zero 1886;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1887;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1888 %tmp3 = fcmp une <2 x double> %A, zeroinitializer 1889 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1890 ret <2 x i64> %tmp4 1891} 1892 1893 1894define <2 x i32> @fcmunoz2xfloat(<2 x float> %A) { 1895; UNO with zero = !ORD = !(OLT | OGE) 1896;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1897;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0 1898;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1899;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b 1900 %tmp3 = fcmp uno <2 x float> %A, zeroinitializer 1901 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32> 1902 ret <2 x i32> %tmp4 1903} 1904 1905define <4 x i32> @fcmunoz4xfloat(<4 x float> %A) { 1906; UNO with zero = !ORD = !(OLT | OGE) 1907;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1908;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0 1909;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1910;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1911 %tmp3 = fcmp uno <4 x float> %A, zeroinitializer 1912 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32> 1913 ret <4 x i32> %tmp4 1914} 1915 1916define <2 x i64> @fcmunoz2xdouble(<2 x double> %A) { 1917; UNO with zero = !ORD = !(OLT | OGE) 1918;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1919;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0 1920;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1921;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b 1922 %tmp3 = fcmp uno <2 x double> %A, zeroinitializer 1923 %tmp4 = sext <2 x i1> %tmp3 to <2 x i64> 1924 ret <2 x i64> %tmp4 1925 1926} 1927