1 /* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */ 2 3 #include "test_sve_acle.h" 4 5 /* 6 ** qrshl_s32_m_tied1: 7 ** sqrshl z0\.s, p0/m, z0\.s, z4\.s 8 ** ret 9 */ 10 TEST_DUAL_Z (qrshl_s32_m_tied1, svint32_t, svint32_t, 11 z0 = svqrshl_s32_m (p0, z0, z4), 12 z0 = svqrshl_m (p0, z0, z4)) 13 14 /* 15 ** qrshl_s32_m_tied2: 16 ** mov (z[0-9]+)\.d, z0\.d 17 ** movprfx z0, z4 18 ** sqrshl z0\.s, p0/m, z0\.s, \1\.s 19 ** ret 20 */ 21 TEST_DUAL_Z_REV (qrshl_s32_m_tied2, svint32_t, svint32_t, 22 z0_res = svqrshl_s32_m (p0, z4, z0), 23 z0_res = svqrshl_m (p0, z4, z0)) 24 25 /* 26 ** qrshl_s32_m_untied: 27 ** movprfx z0, z1 28 ** sqrshl z0\.s, p0/m, z0\.s, z4\.s 29 ** ret 30 */ 31 TEST_DUAL_Z (qrshl_s32_m_untied, svint32_t, svint32_t, 32 z0 = svqrshl_s32_m (p0, z1, z4), 33 z0 = svqrshl_m (p0, z1, z4)) 34 35 /* 36 ** qrshl_w0_s32_m_tied1: 37 ** mov (z[0-9]+\.s), w0 38 ** sqrshl z0\.s, p0/m, z0\.s, \1 39 ** ret 40 */ 41 TEST_UNIFORM_ZX (qrshl_w0_s32_m_tied1, svint32_t, int32_t, 42 z0 = svqrshl_n_s32_m (p0, z0, x0), 43 z0 = svqrshl_m (p0, z0, x0)) 44 45 /* 46 ** qrshl_w0_s32_m_untied: 47 ** mov (z[0-9]+\.s), w0 48 ** movprfx z0, z1 49 ** sqrshl z0\.s, p0/m, z0\.s, \1 50 ** ret 51 */ 52 TEST_UNIFORM_ZX (qrshl_w0_s32_m_untied, svint32_t, int32_t, 53 z0 = svqrshl_n_s32_m (p0, z1, x0), 54 z0 = svqrshl_m (p0, z1, x0)) 55 56 /* 57 ** qrshl_m32_s32_m: 58 ** srshr z0\.s, p0/m, z0\.s, #32 59 ** ret 60 */ 61 TEST_UNIFORM_Z (qrshl_m32_s32_m, svint32_t, 62 z0 = svqrshl_n_s32_m (p0, z0, -32), 63 z0 = svqrshl_m (p0, z0, -32)) 64 65 /* 66 ** qrshl_m2_s32_m: 67 ** srshr z0\.s, p0/m, z0\.s, #2 68 ** ret 69 */ 70 TEST_UNIFORM_Z (qrshl_m2_s32_m, svint32_t, 71 z0 = svqrshl_n_s32_m (p0, z0, -2), 72 z0 = svqrshl_m (p0, z0, -2)) 73 74 /* 75 ** qrshl_m1_s32_m_tied1: 76 ** srshr z0\.s, p0/m, z0\.s, #1 77 ** ret 78 */ 79 TEST_UNIFORM_Z (qrshl_m1_s32_m_tied1, svint32_t, 80 z0 = svqrshl_n_s32_m (p0, z0, -1), 81 z0 = svqrshl_m (p0, z0, -1)) 82 83 /* 84 ** qrshl_m1_s32_m_untied: 85 ** movprfx z0, z1 86 ** srshr z0\.s, p0/m, z0\.s, #1 87 ** ret 88 */ 89 TEST_UNIFORM_Z (qrshl_m1_s32_m_untied, svint32_t, 90 z0 = svqrshl_n_s32_m (p0, z1, -1), 91 z0 = svqrshl_m (p0, z1, -1)) 92 93 /* 94 ** qrshl_1_s32_m_tied1: 95 ** sqshl z0\.s, p0/m, z0\.s, #1 96 ** ret 97 */ 98 TEST_UNIFORM_Z (qrshl_1_s32_m_tied1, svint32_t, 99 z0 = svqrshl_n_s32_m (p0, z0, 1), 100 z0 = svqrshl_m (p0, z0, 1)) 101 102 /* 103 ** qrshl_1_s32_m_untied: 104 ** movprfx z0, z1 105 ** sqshl z0\.s, p0/m, z0\.s, #1 106 ** ret 107 */ 108 TEST_UNIFORM_Z (qrshl_1_s32_m_untied, svint32_t, 109 z0 = svqrshl_n_s32_m (p0, z1, 1), 110 z0 = svqrshl_m (p0, z1, 1)) 111 112 /* 113 ** qrshl_2_s32_m: 114 ** sqshl z0\.s, p0/m, z0\.s, #2 115 ** ret 116 */ 117 TEST_UNIFORM_Z (qrshl_2_s32_m, svint32_t, 118 z0 = svqrshl_n_s32_m (p0, z0, 2), 119 z0 = svqrshl_m (p0, z0, 2)) 120 121 /* 122 ** qrshl_31_s32_m: 123 ** sqshl z0\.s, p0/m, z0\.s, #31 124 ** ret 125 */ 126 TEST_UNIFORM_Z (qrshl_31_s32_m, svint32_t, 127 z0 = svqrshl_n_s32_m (p0, z0, 31), 128 z0 = svqrshl_m (p0, z0, 31)) 129 130 /* 131 ** qrshl_s32_z_tied1: 132 ** movprfx z0\.s, p0/z, z0\.s 133 ** sqrshl z0\.s, p0/m, z0\.s, z4\.s 134 ** ret 135 */ 136 TEST_DUAL_Z (qrshl_s32_z_tied1, svint32_t, svint32_t, 137 z0 = svqrshl_s32_z (p0, z0, z4), 138 z0 = svqrshl_z (p0, z0, z4)) 139 140 /* 141 ** qrshl_s32_z_tied2: 142 ** movprfx z0\.s, p0/z, z0\.s 143 ** sqrshlr z0\.s, p0/m, z0\.s, z4\.s 144 ** ret 145 */ 146 TEST_DUAL_Z_REV (qrshl_s32_z_tied2, svint32_t, svint32_t, 147 z0_res = svqrshl_s32_z (p0, z4, z0), 148 z0_res = svqrshl_z (p0, z4, z0)) 149 150 /* 151 ** qrshl_s32_z_untied: 152 ** ( 153 ** movprfx z0\.s, p0/z, z1\.s 154 ** sqrshl z0\.s, p0/m, z0\.s, z4\.s 155 ** | 156 ** movprfx z0\.s, p0/z, z4\.s 157 ** sqrshlr z0\.s, p0/m, z0\.s, z1\.s 158 ** ) 159 ** ret 160 */ 161 TEST_DUAL_Z (qrshl_s32_z_untied, svint32_t, svint32_t, 162 z0 = svqrshl_s32_z (p0, z1, z4), 163 z0 = svqrshl_z (p0, z1, z4)) 164 165 /* 166 ** qrshl_w0_s32_z_tied1: 167 ** mov (z[0-9]+\.s), w0 168 ** movprfx z0\.s, p0/z, z0\.s 169 ** sqrshl z0\.s, p0/m, z0\.s, \1 170 ** ret 171 */ 172 TEST_UNIFORM_ZX (qrshl_w0_s32_z_tied1, svint32_t, int32_t, 173 z0 = svqrshl_n_s32_z (p0, z0, x0), 174 z0 = svqrshl_z (p0, z0, x0)) 175 176 /* 177 ** qrshl_w0_s32_z_untied: 178 ** mov (z[0-9]+\.s), w0 179 ** ( 180 ** movprfx z0\.s, p0/z, z1\.s 181 ** sqrshl z0\.s, p0/m, z0\.s, \1 182 ** | 183 ** movprfx z0\.s, p0/z, \1 184 ** sqrshlr z0\.s, p0/m, z0\.s, z1\.s 185 ** ) 186 ** ret 187 */ 188 TEST_UNIFORM_ZX (qrshl_w0_s32_z_untied, svint32_t, int32_t, 189 z0 = svqrshl_n_s32_z (p0, z1, x0), 190 z0 = svqrshl_z (p0, z1, x0)) 191 192 /* 193 ** qrshl_m32_s32_z: 194 ** movprfx z0\.s, p0/z, z0\.s 195 ** srshr z0\.s, p0/m, z0\.s, #32 196 ** ret 197 */ 198 TEST_UNIFORM_Z (qrshl_m32_s32_z, svint32_t, 199 z0 = svqrshl_n_s32_z (p0, z0, -32), 200 z0 = svqrshl_z (p0, z0, -32)) 201 202 /* 203 ** qrshl_m2_s32_z: 204 ** movprfx z0\.s, p0/z, z0\.s 205 ** srshr z0\.s, p0/m, z0\.s, #2 206 ** ret 207 */ 208 TEST_UNIFORM_Z (qrshl_m2_s32_z, svint32_t, 209 z0 = svqrshl_n_s32_z (p0, z0, -2), 210 z0 = svqrshl_z (p0, z0, -2)) 211 212 /* 213 ** qrshl_m1_s32_z_tied1: 214 ** movprfx z0\.s, p0/z, z0\.s 215 ** srshr z0\.s, p0/m, z0\.s, #1 216 ** ret 217 */ 218 TEST_UNIFORM_Z (qrshl_m1_s32_z_tied1, svint32_t, 219 z0 = svqrshl_n_s32_z (p0, z0, -1), 220 z0 = svqrshl_z (p0, z0, -1)) 221 222 /* 223 ** qrshl_m1_s32_z_untied: 224 ** movprfx z0\.s, p0/z, z1\.s 225 ** srshr z0\.s, p0/m, z0\.s, #1 226 ** ret 227 */ 228 TEST_UNIFORM_Z (qrshl_m1_s32_z_untied, svint32_t, 229 z0 = svqrshl_n_s32_z (p0, z1, -1), 230 z0 = svqrshl_z (p0, z1, -1)) 231 232 /* 233 ** qrshl_1_s32_z_tied1: 234 ** movprfx z0\.s, p0/z, z0\.s 235 ** sqshl z0\.s, p0/m, z0\.s, #1 236 ** ret 237 */ 238 TEST_UNIFORM_Z (qrshl_1_s32_z_tied1, svint32_t, 239 z0 = svqrshl_n_s32_z (p0, z0, 1), 240 z0 = svqrshl_z (p0, z0, 1)) 241 242 /* 243 ** qrshl_1_s32_z_untied: 244 ** movprfx z0\.s, p0/z, z1\.s 245 ** sqshl z0\.s, p0/m, z0\.s, #1 246 ** ret 247 */ 248 TEST_UNIFORM_Z (qrshl_1_s32_z_untied, svint32_t, 249 z0 = svqrshl_n_s32_z (p0, z1, 1), 250 z0 = svqrshl_z (p0, z1, 1)) 251 252 /* 253 ** qrshl_2_s32_z: 254 ** movprfx z0\.s, p0/z, z0\.s 255 ** sqshl z0\.s, p0/m, z0\.s, #2 256 ** ret 257 */ 258 TEST_UNIFORM_Z (qrshl_2_s32_z, svint32_t, 259 z0 = svqrshl_n_s32_z (p0, z0, 2), 260 z0 = svqrshl_z (p0, z0, 2)) 261 262 /* 263 ** qrshl_31_s32_z: 264 ** movprfx z0\.s, p0/z, z0\.s 265 ** sqshl z0\.s, p0/m, z0\.s, #31 266 ** ret 267 */ 268 TEST_UNIFORM_Z (qrshl_31_s32_z, svint32_t, 269 z0 = svqrshl_n_s32_z (p0, z0, 31), 270 z0 = svqrshl_z (p0, z0, 31)) 271 272 /* 273 ** qrshl_s32_x_tied1: 274 ** sqrshl z0\.s, p0/m, z0\.s, z4\.s 275 ** ret 276 */ 277 TEST_DUAL_Z (qrshl_s32_x_tied1, svint32_t, svint32_t, 278 z0 = svqrshl_s32_x (p0, z0, z4), 279 z0 = svqrshl_x (p0, z0, z4)) 280 281 /* 282 ** qrshl_s32_x_tied2: 283 ** sqrshlr z0\.s, p0/m, z0\.s, z4\.s 284 ** ret 285 */ 286 TEST_DUAL_Z_REV (qrshl_s32_x_tied2, svint32_t, svint32_t, 287 z0_res = svqrshl_s32_x (p0, z4, z0), 288 z0_res = svqrshl_x (p0, z4, z0)) 289 290 /* 291 ** qrshl_s32_x_untied: 292 ** ( 293 ** movprfx z0, z1 294 ** sqrshl z0\.s, p0/m, z0\.s, z4\.s 295 ** | 296 ** movprfx z0, z4 297 ** sqrshlr z0\.s, p0/m, z0\.s, z1\.s 298 ** ) 299 ** ret 300 */ 301 TEST_DUAL_Z (qrshl_s32_x_untied, svint32_t, svint32_t, 302 z0 = svqrshl_s32_x (p0, z1, z4), 303 z0 = svqrshl_x (p0, z1, z4)) 304 305 /* 306 ** qrshl_w0_s32_x_tied1: 307 ** mov (z[0-9]+\.s), w0 308 ** sqrshl z0\.s, p0/m, z0\.s, \1 309 ** ret 310 */ 311 TEST_UNIFORM_ZX (qrshl_w0_s32_x_tied1, svint32_t, int32_t, 312 z0 = svqrshl_n_s32_x (p0, z0, x0), 313 z0 = svqrshl_x (p0, z0, x0)) 314 315 /* 316 ** qrshl_w0_s32_x_untied: 317 ** mov z0\.s, w0 318 ** sqrshlr z0\.s, p0/m, z0\.s, z1\.s 319 ** ret 320 */ 321 TEST_UNIFORM_ZX (qrshl_w0_s32_x_untied, svint32_t, int32_t, 322 z0 = svqrshl_n_s32_x (p0, z1, x0), 323 z0 = svqrshl_x (p0, z1, x0)) 324 325 /* 326 ** qrshl_m32_s32_x: 327 ** srshr z0\.s, p0/m, z0\.s, #32 328 ** ret 329 */ 330 TEST_UNIFORM_Z (qrshl_m32_s32_x, svint32_t, 331 z0 = svqrshl_n_s32_x (p0, z0, -32), 332 z0 = svqrshl_x (p0, z0, -32)) 333 334 /* 335 ** qrshl_m2_s32_x: 336 ** srshr z0\.s, p0/m, z0\.s, #2 337 ** ret 338 */ 339 TEST_UNIFORM_Z (qrshl_m2_s32_x, svint32_t, 340 z0 = svqrshl_n_s32_x (p0, z0, -2), 341 z0 = svqrshl_x (p0, z0, -2)) 342 343 /* 344 ** qrshl_m1_s32_x_tied1: 345 ** srshr z0\.s, p0/m, z0\.s, #1 346 ** ret 347 */ 348 TEST_UNIFORM_Z (qrshl_m1_s32_x_tied1, svint32_t, 349 z0 = svqrshl_n_s32_x (p0, z0, -1), 350 z0 = svqrshl_x (p0, z0, -1)) 351 352 /* 353 ** qrshl_m1_s32_x_untied: 354 ** movprfx z0, z1 355 ** srshr z0\.s, p0/m, z0\.s, #1 356 ** ret 357 */ 358 TEST_UNIFORM_Z (qrshl_m1_s32_x_untied, svint32_t, 359 z0 = svqrshl_n_s32_x (p0, z1, -1), 360 z0 = svqrshl_x (p0, z1, -1)) 361 362 /* 363 ** qrshl_1_s32_x_tied1: 364 ** sqshl z0\.s, p0/m, z0\.s, #1 365 ** ret 366 */ 367 TEST_UNIFORM_Z (qrshl_1_s32_x_tied1, svint32_t, 368 z0 = svqrshl_n_s32_x (p0, z0, 1), 369 z0 = svqrshl_x (p0, z0, 1)) 370 371 /* 372 ** qrshl_1_s32_x_untied: 373 ** movprfx z0, z1 374 ** sqshl z0\.s, p0/m, z0\.s, #1 375 ** ret 376 */ 377 TEST_UNIFORM_Z (qrshl_1_s32_x_untied, svint32_t, 378 z0 = svqrshl_n_s32_x (p0, z1, 1), 379 z0 = svqrshl_x (p0, z1, 1)) 380 381 /* 382 ** qrshl_2_s32_x: 383 ** sqshl z0\.s, p0/m, z0\.s, #2 384 ** ret 385 */ 386 TEST_UNIFORM_Z (qrshl_2_s32_x, svint32_t, 387 z0 = svqrshl_n_s32_x (p0, z0, 2), 388 z0 = svqrshl_x (p0, z0, 2)) 389 390 /* 391 ** qrshl_31_s32_x: 392 ** sqshl z0\.s, p0/m, z0\.s, #31 393 ** ret 394 */ 395 TEST_UNIFORM_Z (qrshl_31_s32_x, svint32_t, 396 z0 = svqrshl_n_s32_x (p0, z0, 31), 397 z0 = svqrshl_x (p0, z0, 31)) 398