1 /* pp_sort.c 2 * 3 * Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 4 * 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others 5 * 6 * You may distribute under the terms of either the GNU General Public 7 * License or the Artistic License, as specified in the README file. 8 * 9 */ 10 11 /* 12 * ...they shuffled back towards the rear of the line. 'No, not at the 13 * rear!' the slave-driver shouted. 'Three files up. And stay there... 14 * 15 * [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"] 16 */ 17 18 /* This file contains pp ("push/pop") functions that 19 * execute the opcodes that make up a perl program. A typical pp function 20 * expects to find its arguments on the stack, and usually pushes its 21 * results onto the stack, hence the 'pp' terminology. Each OP structure 22 * contains a pointer to the relevant pp_foo() function. 23 * 24 * This particular file just contains pp_sort(), which is complex 25 * enough to merit its own file! See the other pp*.c files for the rest of 26 * the pp_ functions. 27 */ 28 29 #include "EXTERN.h" 30 #define PERL_IN_PP_SORT_C 31 #include "perl.h" 32 33 #ifndef SMALLSORT 34 #define SMALLSORT (200) 35 #endif 36 37 /* Flags for sortsv_flags */ 38 #define SORTf_STABLE 1 39 #define SORTf_UNSTABLE 2 40 41 /* 42 * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>. 43 * 44 * The original code was written in conjunction with BSD Computer Software 45 * Research Group at University of California, Berkeley. 46 * 47 * See also: "Optimistic Sorting and Information Theoretic Complexity" 48 * Peter McIlroy 49 * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), 50 * pp 467-474, Austin, Texas, 25-27 January 1993. 51 * 52 * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>. 53 * 54 * The code can be distributed under the same terms as Perl itself. 55 * 56 */ 57 58 59 typedef char * aptr; /* pointer for arithmetic on sizes */ 60 typedef SV * gptr; /* pointers in our lists */ 61 62 /* Binary merge internal sort, with a few special mods 63 ** for the special perl environment it now finds itself in. 64 ** 65 ** Things that were once options have been hotwired 66 ** to values suitable for this use. In particular, we'll always 67 ** initialize looking for natural runs, we'll always produce stable 68 ** output, and we'll always do Peter McIlroy's binary merge. 69 */ 70 71 /* Pointer types for arithmetic and storage and convenience casts */ 72 73 #define APTR(P) ((aptr)(P)) 74 #define GPTP(P) ((gptr *)(P)) 75 #define GPPP(P) ((gptr **)(P)) 76 77 78 /* byte offset from pointer P to (larger) pointer Q */ 79 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P)) 80 81 #define PSIZE sizeof(gptr) 82 83 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */ 84 85 #ifdef PSHIFT 86 #define PNELEM(P, Q) (BYTEOFF(P,Q) >> (PSHIFT)) 87 #define PNBYTE(N) ((N) << (PSHIFT)) 88 #define PINDEX(P, N) (GPTP(APTR(P) + PNBYTE(N))) 89 #else 90 /* Leave optimization to compiler */ 91 #define PNELEM(P, Q) (GPTP(Q) - GPTP(P)) 92 #define PNBYTE(N) ((N) * (PSIZE)) 93 #define PINDEX(P, N) (GPTP(P) + (N)) 94 #endif 95 96 /* Pointer into other corresponding to pointer into this */ 97 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P)) 98 99 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim) 100 101 102 /* Runs are identified by a pointer in the auxiliary list. 103 ** The pointer is at the start of the list, 104 ** and it points to the start of the next list. 105 ** NEXT is used as an lvalue, too. 106 */ 107 108 #define NEXT(P) (*GPPP(P)) 109 110 111 /* PTHRESH is the minimum number of pairs with the same sense to justify 112 ** checking for a run and extending it. Note that PTHRESH counts PAIRS, 113 ** not just elements, so PTHRESH == 8 means a run of 16. 114 */ 115 116 #define PTHRESH (8) 117 118 /* RTHRESH is the number of elements in a run that must compare low 119 ** to the low element from the opposing run before we justify 120 ** doing a binary rampup instead of single stepping. 121 ** In random input, N in a row low should only happen with 122 ** probability 2^(1-N), so we can risk that we are dealing 123 ** with orderly input without paying much when we aren't. 124 */ 125 126 #define RTHRESH (6) 127 128 129 /* 130 ** Overview of algorithm and variables. 131 ** The array of elements at list1 will be organized into runs of length 2, 132 ** or runs of length >= 2 * PTHRESH. We only try to form long runs when 133 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order. 134 ** 135 ** Unless otherwise specified, pair pointers address the first of two elements. 136 ** 137 ** b and b+1 are a pair that compare with sense "sense". 138 ** b is the "bottom" of adjacent pairs that might form a longer run. 139 ** 140 ** p2 parallels b in the list2 array, where runs are defined by 141 ** a pointer chain. 142 ** 143 ** t represents the "top" of the adjacent pairs that might extend 144 ** the run beginning at b. Usually, t addresses a pair 145 ** that compares with opposite sense from (b,b+1). 146 ** However, it may also address a singleton element at the end of list1, 147 ** or it may be equal to "last", the first element beyond list1. 148 ** 149 ** r addresses the Nth pair following b. If this would be beyond t, 150 ** we back it off to t. Only when r is less than t do we consider the 151 ** run long enough to consider checking. 152 ** 153 ** q addresses a pair such that the pairs at b through q already form a run. 154 ** Often, q will equal b, indicating we only are sure of the pair itself. 155 ** However, a search on the previous cycle may have revealed a longer run, 156 ** so q may be greater than b. 157 ** 158 ** p is used to work back from a candidate r, trying to reach q, 159 ** which would mean b through r would be a run. If we discover such a run, 160 ** we start q at r and try to push it further towards t. 161 ** If b through r is NOT a run, we detect the wrong order at (p-1,p). 162 ** In any event, after the check (if any), we have two main cases. 163 ** 164 ** 1) Short run. b <= q < p <= r <= t. 165 ** b through q is a run (perhaps trivial) 166 ** q through p are uninteresting pairs 167 ** p through r is a run 168 ** 169 ** 2) Long run. b < r <= q < t. 170 ** b through q is a run (of length >= 2 * PTHRESH) 171 ** 172 ** Note that degenerate cases are not only possible, but likely. 173 ** For example, if the pair following b compares with opposite sense, 174 ** then b == q < p == r == t. 175 */ 176 177 178 PERL_STATIC_FORCE_INLINE IV __attribute__always_inline__ 179 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp) 180 { 181 I32 sense; 182 gptr *b, *p, *q, *t, *p2; 183 gptr *last, *r; 184 IV runs = 0; 185 186 b = list1; 187 last = PINDEX(b, nmemb); 188 sense = (cmp(aTHX_ *b, *(b+1)) > 0); 189 for (p2 = list2; b < last; ) { 190 /* We just started, or just reversed sense. 191 ** Set t at end of pairs with the prevailing sense. 192 */ 193 for (p = b+2, t = p; ++p < last; t = ++p) { 194 if ((cmp(aTHX_ *t, *p) > 0) != sense) break; 195 } 196 q = b; 197 /* Having laid out the playing field, look for long runs */ 198 do { 199 p = r = b + (2 * PTHRESH); 200 if (r >= t) p = r = t; /* too short to care about */ 201 else { 202 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) && 203 ((p -= 2) > q)) {} 204 if (p <= q) { 205 /* b through r is a (long) run. 206 ** Extend it as far as possible. 207 */ 208 p = q = r; 209 while (((p += 2) < t) && 210 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p; 211 r = p = q + 2; /* no simple pairs, no after-run */ 212 } 213 } 214 if (q > b) { /* run of greater than 2 at b */ 215 gptr *savep = p; 216 217 p = q += 2; 218 /* pick up singleton, if possible */ 219 if ((p == t) && 220 ((t + 1) == last) && 221 ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) 222 savep = r = p = q = last; 223 p2 = NEXT(p2) = p2 + (p - b); ++runs; 224 if (sense) 225 while (b < --p) { 226 const gptr c = *b; 227 *b++ = *p; 228 *p = c; 229 } 230 p = savep; 231 } 232 while (q < p) { /* simple pairs */ 233 p2 = NEXT(p2) = p2 + 2; ++runs; 234 if (sense) { 235 const gptr c = *q++; 236 *(q-1) = *q; 237 *q++ = c; 238 } else q += 2; 239 } 240 if (((b = p) == t) && ((t+1) == last)) { 241 NEXT(p2) = p2 + 1; ++runs; 242 b++; 243 } 244 q = r; 245 } while (b < t); 246 sense = !sense; 247 } 248 return runs; 249 } 250 251 252 /* The original merge sort, in use since 5.7, was as fast as, or faster than, 253 * qsort on many platforms, but slower than qsort, conspicuously so, 254 * on others. The most likely explanation was platform-specific 255 * differences in cache sizes and relative speeds. 256 * 257 * The quicksort divide-and-conquer algorithm guarantees that, as the 258 * problem is subdivided into smaller and smaller parts, the parts 259 * fit into smaller (and faster) caches. So it doesn't matter how 260 * many levels of cache exist, quicksort will "find" them, and, 261 * as long as smaller is faster, take advantage of them. 262 * 263 * By contrast, consider how the original mergesort algorithm worked. 264 * Suppose we have five runs (each typically of length 2 after dynprep). 265 * 266 * pass base aux 267 * 0 1 2 3 4 5 268 * 1 12 34 5 269 * 2 1234 5 270 * 3 12345 271 * 4 12345 272 * 273 * Adjacent pairs are merged in "grand sweeps" through the input. 274 * This means, on pass 1, the records in runs 1 and 2 aren't revisited until 275 * runs 3 and 4 are merged and the runs from run 5 have been copied. 276 * The only cache that matters is one large enough to hold *all* the input. 277 * On some platforms, this may be many times slower than smaller caches. 278 * 279 * The following pseudo-code uses the same basic merge algorithm, 280 * but in a divide-and-conquer way. 281 * 282 * # merge $runs runs at offset $offset of list $list1 into $list2. 283 * # all unmerged runs ($runs == 1) originate in list $base. 284 * sub mgsort2 { 285 * my ($offset, $runs, $base, $list1, $list2) = @_; 286 * 287 * if ($runs == 1) { 288 * if ($list1 is $base) copy run to $list2 289 * return offset of end of list (or copy) 290 * } else { 291 * $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1) 292 * mgsort2($off2, $runs/2, $base, $list2, $list1) 293 * merge the adjacent runs at $offset of $list1 into $list2 294 * return the offset of the end of the merged runs 295 * } 296 * } 297 * mgsort2(0, $runs, $base, $aux, $base); 298 * 299 * For our 5 runs, the tree of calls looks like 300 * 301 * 5 302 * 3 2 303 * 2 1 1 1 304 * 1 1 305 * 306 * 1 2 3 4 5 307 * 308 * and the corresponding activity looks like 309 * 310 * copy runs 1 and 2 from base to aux 311 * merge runs 1 and 2 from aux to base 312 * (run 3 is where it belongs, no copy needed) 313 * merge runs 12 and 3 from base to aux 314 * (runs 4 and 5 are where they belong, no copy needed) 315 * merge runs 4 and 5 from base to aux 316 * merge runs 123 and 45 from aux to base 317 * 318 * Note that we merge runs 1 and 2 immediately after copying them, 319 * while they are still likely to be in fast cache. Similarly, 320 * run 3 is merged with run 12 while it still may be lingering in cache. 321 * This implementation should therefore enjoy much of the cache-friendly 322 * behavior that quicksort does. In addition, it does less copying 323 * than the original mergesort implementation (only runs 1 and 2 are copied) 324 * and the "balancing" of merges is better (merged runs comprise more nearly 325 * equal numbers of original runs). 326 * 327 * The actual cache-friendly implementation will use a pseudo-stack 328 * to avoid recursion, and will unroll processing of runs of length 2, 329 * but it is otherwise similar to the recursive implementation. 330 */ 331 332 typedef struct { 333 IV offset; /* offset of 1st of 2 runs at this level */ 334 IV runs; /* how many runs must be combined into 1 */ 335 } off_runs; /* pseudo-stack element */ 336 337 PERL_STATIC_FORCE_INLINE void 338 S_sortsv_flags_impl(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 339 { 340 IV i, run, offset; 341 I32 sense, level; 342 gptr *f1, *f2, *t, *b, *p; 343 int iwhich; 344 gptr *aux; 345 gptr *p1; 346 gptr small[SMALLSORT]; 347 gptr *which[3]; 348 off_runs stack[60], *stackp; 349 350 PERL_UNUSED_ARG(flags); 351 PERL_ARGS_ASSERT_SORTSV_FLAGS_IMPL; 352 if (nmemb <= 1) return; /* sorted trivially */ 353 354 if (nmemb <= SMALLSORT) aux = small; /* use stack for aux array */ 355 else { Newx(aux,nmemb,gptr); } /* allocate auxiliary array */ 356 level = 0; 357 stackp = stack; 358 stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp); 359 stackp->offset = offset = 0; 360 which[0] = which[2] = base; 361 which[1] = aux; 362 for (;;) { 363 /* On levels where both runs have be constructed (stackp->runs == 0), 364 * merge them, and note the offset of their end, in case the offset 365 * is needed at the next level up. Hop up a level, and, 366 * as long as stackp->runs is 0, keep merging. 367 */ 368 IV runs = stackp->runs; 369 if (runs == 0) { 370 gptr *list1, *list2; 371 iwhich = level & 1; 372 list1 = which[iwhich]; /* area where runs are now */ 373 list2 = which[++iwhich]; /* area for merged runs */ 374 do { 375 gptr *l1, *l2, *tp2; 376 offset = stackp->offset; 377 f1 = p1 = list1 + offset; /* start of first run */ 378 p = tp2 = list2 + offset; /* where merged run will go */ 379 t = NEXT(p); /* where first run ends */ 380 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */ 381 t = NEXT(t); /* where second runs ends */ 382 l2 = POTHER(t, list2, list1); /* ... on the other side */ 383 offset = PNELEM(list2, t); 384 while (f1 < l1 && f2 < l2) { 385 /* If head 1 is larger than head 2, find ALL the elements 386 ** in list 2 strictly less than head1, write them all, 387 ** then head 1. Then compare the new heads, and repeat, 388 ** until one or both lists are exhausted. 389 ** 390 ** In all comparisons (after establishing 391 ** which head to merge) the item to merge 392 ** (at pointer q) is the first operand of 393 ** the comparison. When we want to know 394 ** if "q is strictly less than the other", 395 ** we can't just do 396 ** cmp(q, other) < 0 397 ** because stability demands that we treat equality 398 ** as high when q comes from l2, and as low when 399 ** q was from l1. So we ask the question by doing 400 ** cmp(q, other) <= sense 401 ** and make sense == 0 when equality should look low, 402 ** and -1 when equality should look high. 403 */ 404 405 gptr *q; 406 if (cmp(aTHX_ *f1, *f2) <= 0) { 407 q = f2; b = f1; t = l1; 408 sense = -1; 409 } else { 410 q = f1; b = f2; t = l2; 411 sense = 0; 412 } 413 414 415 /* ramp up 416 ** 417 ** Leave t at something strictly 418 ** greater than q (or at the end of the list), 419 ** and b at something strictly less than q. 420 */ 421 for (i = 1, run = 0 ;;) { 422 if ((p = PINDEX(b, i)) >= t) { 423 /* off the end */ 424 if (((p = PINDEX(t, -1)) > b) && 425 (cmp(aTHX_ *q, *p) <= sense)) 426 t = p; 427 else b = p; 428 break; 429 } else if (cmp(aTHX_ *q, *p) <= sense) { 430 t = p; 431 break; 432 } else b = p; 433 if (++run >= RTHRESH) i += i; 434 } 435 436 437 /* q is known to follow b and must be inserted before t. 438 ** Increment b, so the range of possibilities is [b,t). 439 ** Round binary split down, to favor early appearance. 440 ** Adjust b and t until q belongs just before t. 441 */ 442 443 b++; 444 while (b < t) { 445 p = PINDEX(b, (PNELEM(b, t) - 1) / 2); 446 if (cmp(aTHX_ *q, *p) <= sense) { 447 t = p; 448 } else b = p + 1; 449 } 450 451 452 /* Copy all the strictly low elements */ 453 454 if (q == f1) { 455 FROMTOUPTO(f2, tp2, t); 456 *tp2++ = *f1++; 457 } else { 458 FROMTOUPTO(f1, tp2, t); 459 *tp2++ = *f2++; 460 } 461 } 462 463 464 /* Run out remaining list */ 465 if (f1 == l1) { 466 if (f2 < l2) FROMTOUPTO(f2, tp2, l2); 467 } else FROMTOUPTO(f1, tp2, l1); 468 p1 = NEXT(p1) = POTHER(tp2, list2, list1); 469 470 if (--level == 0) goto done; 471 --stackp; 472 t = list1; list1 = list2; list2 = t; /* swap lists */ 473 } while ((runs = stackp->runs) == 0); 474 } 475 476 477 stackp->runs = 0; /* current run will finish level */ 478 /* While there are more than 2 runs remaining, 479 * turn them into exactly 2 runs (at the "other" level), 480 * each made up of approximately half the runs. 481 * Stack the second half for later processing, 482 * and set about producing the first half now. 483 */ 484 while (runs > 2) { 485 ++level; 486 ++stackp; 487 stackp->offset = offset; 488 runs -= stackp->runs = runs / 2; 489 } 490 /* We must construct a single run from 1 or 2 runs. 491 * All the original runs are in which[0] == base. 492 * The run we construct must end up in which[level&1]. 493 */ 494 iwhich = level & 1; 495 if (runs == 1) { 496 /* Constructing a single run from a single run. 497 * If it's where it belongs already, there's nothing to do. 498 * Otherwise, copy it to where it belongs. 499 * A run of 1 is either a singleton at level 0, 500 * or the second half of a split 3. In neither event 501 * is it necessary to set offset. It will be set by the merge 502 * that immediately follows. 503 */ 504 if (iwhich) { /* Belongs in aux, currently in base */ 505 f1 = b = PINDEX(base, offset); /* where list starts */ 506 f2 = PINDEX(aux, offset); /* where list goes */ 507 t = NEXT(f2); /* where list will end */ 508 offset = PNELEM(aux, t); /* offset thereof */ 509 t = PINDEX(base, offset); /* where it currently ends */ 510 FROMTOUPTO(f1, f2, t); /* copy */ 511 NEXT(b) = t; /* set up parallel pointer */ 512 } else if (level == 0) goto done; /* single run at level 0 */ 513 } else { 514 /* Constructing a single run from two runs. 515 * The merge code at the top will do that. 516 * We need only make sure the two runs are in the "other" array, 517 * so they'll end up in the correct array after the merge. 518 */ 519 ++level; 520 ++stackp; 521 stackp->offset = offset; 522 stackp->runs = 0; /* take care of both runs, trigger merge */ 523 if (!iwhich) { /* Merged runs belong in aux, copy 1st */ 524 f1 = b = PINDEX(base, offset); /* where first run starts */ 525 f2 = PINDEX(aux, offset); /* where it will be copied */ 526 t = NEXT(f2); /* where first run will end */ 527 offset = PNELEM(aux, t); /* offset thereof */ 528 p = PINDEX(base, offset); /* end of first run */ 529 t = NEXT(t); /* where second run will end */ 530 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */ 531 FROMTOUPTO(f1, f2, t); /* copy both runs */ 532 NEXT(b) = p; /* paralleled pointer for 1st */ 533 NEXT(p) = t; /* ... and for second */ 534 } 535 } 536 } 537 done: 538 if (aux != small) Safefree(aux); /* free iff allocated */ 539 540 return; 541 } 542 543 /* 544 =head1 SV Manipulation Functions 545 546 =for apidoc sortsv_flags 547 548 In-place sort an array of SV pointers with the given comparison routine, 549 with various SORTf_* flag options. 550 551 =cut 552 */ 553 void 554 Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags) 555 { 556 PERL_ARGS_ASSERT_SORTSV_FLAGS; 557 558 sortsv_flags_impl(base, nmemb, cmp, flags); 559 } 560 561 /* 562 * Each of sortsv_* functions contains an inlined copy of 563 * sortsv_flags_impl() with an inlined comparator. Basically, we are 564 * emulating C++ templates by using __attribute__((always_inline)). 565 * 566 * The purpose of that is to avoid the function call overhead inside 567 * the sorting routine, which calls the comparison function multiple 568 * times per sorted item. 569 */ 570 571 static void 572 sortsv_amagic_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 573 { 574 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp, flags); 575 } 576 577 static void 578 sortsv_amagic_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 579 { 580 sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp_desc, flags); 581 } 582 583 static void 584 sortsv_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 585 { 586 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp, flags); 587 } 588 589 static void 590 sortsv_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 591 { 592 sortsv_flags_impl(base, nmemb, S_sv_i_ncmp_desc, flags); 593 } 594 595 static void 596 sortsv_amagic_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 597 { 598 sortsv_flags_impl(base, nmemb, S_amagic_ncmp, flags); 599 } 600 601 static void 602 sortsv_amagic_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 603 { 604 sortsv_flags_impl(base, nmemb, S_amagic_ncmp_desc, flags); 605 } 606 607 static void 608 sortsv_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 609 { 610 sortsv_flags_impl(base, nmemb, S_sv_ncmp, flags); 611 } 612 613 static void 614 sortsv_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 615 { 616 sortsv_flags_impl(base, nmemb, S_sv_ncmp_desc, flags); 617 } 618 619 static void 620 sortsv_amagic_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 621 { 622 sortsv_flags_impl(base, nmemb, S_amagic_cmp, flags); 623 } 624 625 static void 626 sortsv_amagic_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 627 { 628 sortsv_flags_impl(base, nmemb, S_amagic_cmp_desc, flags); 629 } 630 631 static void 632 sortsv_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags) 633 { 634 sortsv_flags_impl(base, nmemb, Perl_sv_cmp, flags); 635 } 636 637 static void 638 sortsv_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 639 { 640 sortsv_flags_impl(base, nmemb, S_cmp_desc, flags); 641 } 642 643 #ifdef USE_LOCALE_COLLATE 644 645 static void 646 sortsv_amagic_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags) 647 { 648 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale, flags); 649 } 650 651 static void 652 sortsv_amagic_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 653 { 654 sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale_desc, flags); 655 } 656 657 static void 658 sortsv_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags) 659 { 660 sortsv_flags_impl(base, nmemb, Perl_sv_cmp_locale, flags); 661 } 662 663 static void 664 sortsv_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags) 665 { 666 sortsv_flags_impl(base, nmemb, S_cmp_locale_desc, flags); 667 } 668 669 #endif 670 671 /* 672 =head1 Array Manipulation Functions 673 674 =for apidoc sortsv 675 676 In-place sort an array of SV pointers with the given comparison routine. 677 678 Currently this always uses mergesort. See C<L</sortsv_flags>> for a more 679 flexible routine. 680 681 =cut 682 */ 683 684 void 685 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp) 686 { 687 PERL_ARGS_ASSERT_SORTSV; 688 689 sortsv_flags(array, nmemb, cmp, 0); 690 } 691 692 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)) 693 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK) 694 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) ) 695 696 PP(pp_sort) 697 { 698 dSP; dMARK; dORIGMARK; 699 SV **p1 = ORIGMARK+1, **p2; 700 SSize_t max, i; 701 AV* av = NULL; 702 GV *gv; 703 CV *cv = NULL; 704 U8 gimme = GIMME_V; 705 OP* const nextop = PL_op->op_next; 706 I32 overloading = 0; 707 bool hasargs = FALSE; 708 bool copytmps; 709 I32 is_xsub = 0; 710 const U8 priv = PL_op->op_private; 711 const U8 flags = PL_op->op_flags; 712 U32 sort_flags = 0; 713 I32 all_SIVs = 1, descending = 0; 714 715 if ((priv & OPpSORT_DESCEND) != 0) 716 descending = 1; 717 if ((priv & OPpSORT_STABLE) != 0) 718 sort_flags |= SORTf_STABLE; 719 if ((priv & OPpSORT_UNSTABLE) != 0) 720 sort_flags |= SORTf_UNSTABLE; 721 722 if (gimme != G_ARRAY) { 723 SP = MARK; 724 EXTEND(SP,1); 725 RETPUSHUNDEF; 726 } 727 728 ENTER; 729 SAVEVPTR(PL_sortcop); 730 if (flags & OPf_STACKED) { 731 if (flags & OPf_SPECIAL) { 732 OP *nullop = OpSIBLING(cLISTOP->op_first); /* pass pushmark */ 733 assert(nullop->op_type == OP_NULL); 734 PL_sortcop = nullop->op_next; 735 } 736 else { 737 GV *autogv = NULL; 738 HV *stash; 739 cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD); 740 check_cv: 741 if (cv && SvPOK(cv)) { 742 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv)); 743 if (proto && strEQ(proto, "$$")) { 744 hasargs = TRUE; 745 } 746 } 747 if (cv && CvISXSUB(cv) && CvXSUB(cv)) { 748 is_xsub = 1; 749 } 750 else if (!(cv && CvROOT(cv))) { 751 if (gv) { 752 goto autoload; 753 } 754 else if (!CvANON(cv) && (gv = CvGV(cv))) { 755 if (cv != GvCV(gv)) cv = GvCV(gv); 756 autoload: 757 if (!autogv && ( 758 autogv = gv_autoload_pvn( 759 GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv), 760 GvNAMEUTF8(gv) ? SVf_UTF8 : 0 761 ) 762 )) { 763 cv = GvCVu(autogv); 764 goto check_cv; 765 } 766 else { 767 SV *tmpstr = sv_newmortal(); 768 gv_efullname3(tmpstr, gv, NULL); 769 DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called", 770 SVfARG(tmpstr)); 771 } 772 } 773 else { 774 DIE(aTHX_ "Undefined subroutine in sort"); 775 } 776 } 777 778 if (is_xsub) 779 PL_sortcop = (OP*)cv; 780 else 781 PL_sortcop = CvSTART(cv); 782 } 783 } 784 else { 785 PL_sortcop = NULL; 786 } 787 788 /* optimiser converts "@a = sort @a" to "sort \@a". In this case, 789 * push (@a) onto stack, then assign result back to @a at the end of 790 * this function */ 791 if (priv & OPpSORT_INPLACE) { 792 assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV); 793 (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */ 794 av = MUTABLE_AV((*SP)); 795 if (SvREADONLY(av)) 796 Perl_croak_no_modify(); 797 max = AvFILL(av) + 1; 798 MEXTEND(SP, max); 799 if (SvMAGICAL(av)) { 800 for (i=0; i < max; i++) { 801 SV **svp = av_fetch(av, i, FALSE); 802 *SP++ = (svp) ? *svp : NULL; 803 } 804 } 805 else { 806 SV **svp = AvARRAY(av); 807 assert(svp || max == 0); 808 for (i = 0; i < max; i++) 809 *SP++ = *svp++; 810 } 811 SP--; 812 p1 = p2 = SP - (max-1); 813 } 814 else { 815 p2 = MARK+1; 816 max = SP - MARK; 817 } 818 819 /* shuffle stack down, removing optional initial cv (p1!=p2), plus 820 * any nulls; also stringify or converting to integer or number as 821 * required any args */ 822 copytmps = cBOOL(PL_sortcop); 823 for (i=max; i > 0 ; i--) { 824 if ((*p1 = *p2++)) { /* Weed out nulls. */ 825 if (copytmps && SvPADTMP(*p1)) { 826 *p1 = sv_mortalcopy(*p1); 827 } 828 SvTEMP_off(*p1); 829 if (!PL_sortcop) { 830 if (priv & OPpSORT_NUMERIC) { 831 if (priv & OPpSORT_INTEGER) { 832 if (!SvIOK(*p1)) 833 (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); 834 } 835 else { 836 if (!SvNSIOK(*p1)) 837 (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD); 838 if (all_SIVs && !SvSIOK(*p1)) 839 all_SIVs = 0; 840 } 841 } 842 else { 843 if (!SvPOK(*p1)) 844 (void)sv_2pv_flags(*p1, 0, 845 SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD); 846 } 847 if (SvAMAGIC(*p1)) 848 overloading = 1; 849 } 850 p1++; 851 } 852 else 853 max--; 854 } 855 if (max > 1) { 856 SV **start; 857 if (PL_sortcop) { 858 PERL_CONTEXT *cx; 859 const bool oldcatch = CATCH_GET; 860 I32 old_savestack_ix = PL_savestack_ix; 861 862 SAVEOP(); 863 864 CATCH_SET(TRUE); 865 PUSHSTACKi(PERLSI_SORT); 866 if (!hasargs && !is_xsub) { 867 SAVEGENERICSV(PL_firstgv); 868 SAVEGENERICSV(PL_secondgv); 869 PL_firstgv = MUTABLE_GV(SvREFCNT_inc( 870 gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV) 871 )); 872 PL_secondgv = MUTABLE_GV(SvREFCNT_inc( 873 gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV) 874 )); 875 /* make sure the GP isn't removed out from under us for 876 * the SAVESPTR() */ 877 save_gp(PL_firstgv, 0); 878 save_gp(PL_secondgv, 0); 879 /* we don't want modifications localized */ 880 GvINTRO_off(PL_firstgv); 881 GvINTRO_off(PL_secondgv); 882 SAVEGENERICSV(GvSV(PL_firstgv)); 883 SvREFCNT_inc(GvSV(PL_firstgv)); 884 SAVEGENERICSV(GvSV(PL_secondgv)); 885 SvREFCNT_inc(GvSV(PL_secondgv)); 886 } 887 888 gimme = G_SCALAR; 889 cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix); 890 if (!(flags & OPf_SPECIAL)) { 891 cx->cx_type = CXt_SUB|CXp_MULTICALL; 892 cx_pushsub(cx, cv, NULL, hasargs); 893 if (!is_xsub) { 894 PADLIST * const padlist = CvPADLIST(cv); 895 896 if (++CvDEPTH(cv) >= 2) 897 pad_push(padlist, CvDEPTH(cv)); 898 PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv)); 899 900 if (hasargs) { 901 /* This is mostly copied from pp_entersub */ 902 AV * const av = MUTABLE_AV(PAD_SVl(0)); 903 904 cx->blk_sub.savearray = GvAV(PL_defgv); 905 GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av)); 906 } 907 908 } 909 } 910 911 start = p1 - max; 912 Perl_sortsv_flags(aTHX_ start, max, 913 (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv), 914 sort_flags); 915 916 /* Reset cx, in case the context stack has been reallocated. */ 917 cx = CX_CUR(); 918 919 PL_stack_sp = PL_stack_base + cx->blk_oldsp; 920 921 CX_LEAVE_SCOPE(cx); 922 if (!(flags & OPf_SPECIAL)) { 923 assert(CxTYPE(cx) == CXt_SUB); 924 cx_popsub(cx); 925 } 926 else 927 assert(CxTYPE(cx) == CXt_NULL); 928 /* there isn't a POPNULL ! */ 929 930 cx_popblock(cx); 931 CX_POP(cx); 932 POPSTACK; 933 CATCH_SET(oldcatch); 934 } 935 else { 936 MEXTEND(SP, 20); /* Can't afford stack realloc on signal. */ 937 start = ORIGMARK+1; 938 if (priv & OPpSORT_NUMERIC) { 939 if ((priv & OPpSORT_INTEGER) || all_SIVs) { 940 if (overloading) 941 if (descending) 942 sortsv_amagic_i_ncmp_desc(aTHX_ start, max, sort_flags); 943 else 944 sortsv_amagic_i_ncmp(aTHX_ start, max, sort_flags); 945 else 946 if (descending) 947 sortsv_i_ncmp_desc(aTHX_ start, max, sort_flags); 948 else 949 sortsv_i_ncmp(aTHX_ start, max, sort_flags); 950 } 951 else { 952 if (overloading) 953 if (descending) 954 sortsv_amagic_ncmp_desc(aTHX_ start, max, sort_flags); 955 else 956 sortsv_amagic_ncmp(aTHX_ start, max, sort_flags); 957 else 958 if (descending) 959 sortsv_ncmp_desc(aTHX_ start, max, sort_flags); 960 else 961 sortsv_ncmp(aTHX_ start, max, sort_flags); 962 } 963 } 964 #ifdef USE_LOCALE_COLLATE 965 else if(IN_LC_RUNTIME(LC_COLLATE)) { 966 if (overloading) 967 if (descending) 968 sortsv_amagic_cmp_locale_desc(aTHX_ start, max, sort_flags); 969 else 970 sortsv_amagic_cmp_locale(aTHX_ start, max, sort_flags); 971 else 972 if (descending) 973 sortsv_cmp_locale_desc(aTHX_ start, max, sort_flags); 974 else 975 sortsv_cmp_locale(aTHX_ start, max, sort_flags); 976 } 977 #endif 978 else { 979 if (overloading) 980 if (descending) 981 sortsv_amagic_cmp_desc(aTHX_ start, max, sort_flags); 982 else 983 sortsv_amagic_cmp(aTHX_ start, max, sort_flags); 984 else 985 if (descending) 986 sortsv_cmp_desc(aTHX_ start, max, sort_flags); 987 else 988 sortsv_cmp(aTHX_ start, max, sort_flags); 989 } 990 } 991 if ((priv & OPpSORT_REVERSE) != 0) { 992 SV **q = start+max-1; 993 while (start < q) { 994 SV * const tmp = *start; 995 *start++ = *q; 996 *q-- = tmp; 997 } 998 } 999 } 1000 1001 if (av) { 1002 /* copy back result to the array */ 1003 SV** const base = MARK+1; 1004 SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */ 1005 if (SvMAGICAL(av)) { 1006 for (i = 0; i <= max_minus_one; i++) 1007 base[i] = newSVsv(base[i]); 1008 av_clear(av); 1009 if (max_minus_one >= 0) 1010 av_extend(av, max_minus_one); 1011 for (i=0; i <= max_minus_one; i++) { 1012 SV * const sv = base[i]; 1013 SV ** const didstore = av_store(av, i, sv); 1014 if (SvSMAGICAL(sv)) 1015 mg_set(sv); 1016 if (!didstore) 1017 sv_2mortal(sv); 1018 } 1019 } 1020 else { 1021 /* the elements of av are likely to be the same as the 1022 * (non-refcounted) elements on the stack, just in a different 1023 * order. However, its possible that someone's messed with av 1024 * in the meantime. So bump and unbump the relevant refcounts 1025 * first. 1026 */ 1027 for (i = 0; i <= max_minus_one; i++) { 1028 SV *sv = base[i]; 1029 assert(sv); 1030 if (SvREFCNT(sv) > 1) 1031 base[i] = newSVsv(sv); 1032 else 1033 SvREFCNT_inc_simple_void_NN(sv); 1034 } 1035 av_clear(av); 1036 if (max_minus_one >= 0) { 1037 av_extend(av, max_minus_one); 1038 Copy(base, AvARRAY(av), max, SV*); 1039 } 1040 AvFILLp(av) = max_minus_one; 1041 AvREIFY_off(av); 1042 AvREAL_on(av); 1043 } 1044 } 1045 LEAVE; 1046 PL_stack_sp = ORIGMARK + max; 1047 return nextop; 1048 } 1049 1050 static I32 1051 S_sortcv(pTHX_ SV *const a, SV *const b) 1052 { 1053 const I32 oldsaveix = PL_savestack_ix; 1054 I32 result; 1055 PMOP * const pm = PL_curpm; 1056 COP * const cop = PL_curcop; 1057 SV *olda, *oldb; 1058 1059 PERL_ARGS_ASSERT_SORTCV; 1060 1061 olda = GvSV(PL_firstgv); 1062 GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a); 1063 SvREFCNT_dec(olda); 1064 oldb = GvSV(PL_secondgv); 1065 GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b); 1066 SvREFCNT_dec(oldb); 1067 PL_stack_sp = PL_stack_base; 1068 PL_op = PL_sortcop; 1069 CALLRUNOPS(aTHX); 1070 PL_curcop = cop; 1071 /* entry zero of a stack is always PL_sv_undef, which 1072 * simplifies converting a '()' return into undef in scalar context */ 1073 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1074 result = SvIV(*PL_stack_sp); 1075 1076 LEAVE_SCOPE(oldsaveix); 1077 PL_curpm = pm; 1078 return result; 1079 } 1080 1081 static I32 1082 S_sortcv_stacked(pTHX_ SV *const a, SV *const b) 1083 { 1084 const I32 oldsaveix = PL_savestack_ix; 1085 I32 result; 1086 AV * const av = GvAV(PL_defgv); 1087 PMOP * const pm = PL_curpm; 1088 COP * const cop = PL_curcop; 1089 1090 PERL_ARGS_ASSERT_SORTCV_STACKED; 1091 1092 if (AvREAL(av)) { 1093 av_clear(av); 1094 AvREAL_off(av); 1095 AvREIFY_on(av); 1096 } 1097 if (AvMAX(av) < 1) { 1098 SV **ary = AvALLOC(av); 1099 if (AvARRAY(av) != ary) { 1100 AvMAX(av) += AvARRAY(av) - AvALLOC(av); 1101 AvARRAY(av) = ary; 1102 } 1103 if (AvMAX(av) < 1) { 1104 Renew(ary,2,SV*); 1105 AvMAX(av) = 1; 1106 AvARRAY(av) = ary; 1107 AvALLOC(av) = ary; 1108 } 1109 } 1110 AvFILLp(av) = 1; 1111 1112 AvARRAY(av)[0] = a; 1113 AvARRAY(av)[1] = b; 1114 PL_stack_sp = PL_stack_base; 1115 PL_op = PL_sortcop; 1116 CALLRUNOPS(aTHX); 1117 PL_curcop = cop; 1118 /* entry zero of a stack is always PL_sv_undef, which 1119 * simplifies converting a '()' return into undef in scalar context */ 1120 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1121 result = SvIV(*PL_stack_sp); 1122 1123 LEAVE_SCOPE(oldsaveix); 1124 PL_curpm = pm; 1125 return result; 1126 } 1127 1128 static I32 1129 S_sortcv_xsub(pTHX_ SV *const a, SV *const b) 1130 { 1131 dSP; 1132 const I32 oldsaveix = PL_savestack_ix; 1133 CV * const cv=MUTABLE_CV(PL_sortcop); 1134 I32 result; 1135 PMOP * const pm = PL_curpm; 1136 1137 PERL_ARGS_ASSERT_SORTCV_XSUB; 1138 1139 SP = PL_stack_base; 1140 PUSHMARK(SP); 1141 EXTEND(SP, 2); 1142 *++SP = a; 1143 *++SP = b; 1144 PUTBACK; 1145 (void)(*CvXSUB(cv))(aTHX_ cv); 1146 /* entry zero of a stack is always PL_sv_undef, which 1147 * simplifies converting a '()' return into undef in scalar context */ 1148 assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef); 1149 result = SvIV(*PL_stack_sp); 1150 1151 LEAVE_SCOPE(oldsaveix); 1152 PL_curpm = pm; 1153 return result; 1154 } 1155 1156 1157 PERL_STATIC_FORCE_INLINE I32 1158 S_sv_ncmp(pTHX_ SV *const a, SV *const b) 1159 { 1160 I32 cmp = do_ncmp(a, b); 1161 1162 PERL_ARGS_ASSERT_SV_NCMP; 1163 1164 if (cmp == 2) { 1165 if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL); 1166 return 0; 1167 } 1168 1169 return cmp; 1170 } 1171 1172 PERL_STATIC_FORCE_INLINE I32 1173 S_sv_ncmp_desc(pTHX_ SV *const a, SV *const b) 1174 { 1175 PERL_ARGS_ASSERT_SV_NCMP_DESC; 1176 1177 return -S_sv_ncmp(aTHX_ a, b); 1178 } 1179 1180 PERL_STATIC_FORCE_INLINE I32 1181 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b) 1182 { 1183 const IV iv1 = SvIV(a); 1184 const IV iv2 = SvIV(b); 1185 1186 PERL_ARGS_ASSERT_SV_I_NCMP; 1187 1188 return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0; 1189 } 1190 1191 PERL_STATIC_FORCE_INLINE I32 1192 S_sv_i_ncmp_desc(pTHX_ SV *const a, SV *const b) 1193 { 1194 PERL_ARGS_ASSERT_SV_I_NCMP_DESC; 1195 1196 return -S_sv_i_ncmp(aTHX_ a, b); 1197 } 1198 1199 #define tryCALL_AMAGICbin(left,right,meth) \ 1200 (SvAMAGIC(left)||SvAMAGIC(right)) \ 1201 ? amagic_call(left, right, meth, 0) \ 1202 : NULL; 1203 1204 #define SORT_NORMAL_RETURN_VALUE(val) (((val) > 0) ? 1 : ((val) ? -1 : 0)) 1205 1206 PERL_STATIC_FORCE_INLINE I32 1207 S_amagic_ncmp(pTHX_ SV *const a, SV *const b) 1208 { 1209 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1210 1211 PERL_ARGS_ASSERT_AMAGIC_NCMP; 1212 1213 if (tmpsv) { 1214 if (SvIOK(tmpsv)) { 1215 const I32 i = SvIVX(tmpsv); 1216 return SORT_NORMAL_RETURN_VALUE(i); 1217 } 1218 else { 1219 const NV d = SvNV(tmpsv); 1220 return SORT_NORMAL_RETURN_VALUE(d); 1221 } 1222 } 1223 return S_sv_ncmp(aTHX_ a, b); 1224 } 1225 1226 PERL_STATIC_FORCE_INLINE I32 1227 S_amagic_ncmp_desc(pTHX_ SV *const a, SV *const b) 1228 { 1229 PERL_ARGS_ASSERT_AMAGIC_NCMP_DESC; 1230 1231 return -S_amagic_ncmp(aTHX_ a, b); 1232 } 1233 1234 PERL_STATIC_FORCE_INLINE I32 1235 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b) 1236 { 1237 SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg); 1238 1239 PERL_ARGS_ASSERT_AMAGIC_I_NCMP; 1240 1241 if (tmpsv) { 1242 if (SvIOK(tmpsv)) { 1243 const I32 i = SvIVX(tmpsv); 1244 return SORT_NORMAL_RETURN_VALUE(i); 1245 } 1246 else { 1247 const NV d = SvNV(tmpsv); 1248 return SORT_NORMAL_RETURN_VALUE(d); 1249 } 1250 } 1251 return S_sv_i_ncmp(aTHX_ a, b); 1252 } 1253 1254 PERL_STATIC_FORCE_INLINE I32 1255 S_amagic_i_ncmp_desc(pTHX_ SV *const a, SV *const b) 1256 { 1257 PERL_ARGS_ASSERT_AMAGIC_I_NCMP_DESC; 1258 1259 return -S_amagic_i_ncmp(aTHX_ a, b); 1260 } 1261 1262 PERL_STATIC_FORCE_INLINE I32 1263 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2) 1264 { 1265 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1266 1267 PERL_ARGS_ASSERT_AMAGIC_CMP; 1268 1269 if (tmpsv) { 1270 if (SvIOK(tmpsv)) { 1271 const I32 i = SvIVX(tmpsv); 1272 return SORT_NORMAL_RETURN_VALUE(i); 1273 } 1274 else { 1275 const NV d = SvNV(tmpsv); 1276 return SORT_NORMAL_RETURN_VALUE(d); 1277 } 1278 } 1279 return sv_cmp(str1, str2); 1280 } 1281 1282 PERL_STATIC_FORCE_INLINE I32 1283 S_amagic_cmp_desc(pTHX_ SV *const str1, SV *const str2) 1284 { 1285 PERL_ARGS_ASSERT_AMAGIC_CMP_DESC; 1286 1287 return -S_amagic_cmp(aTHX_ str1, str2); 1288 } 1289 1290 PERL_STATIC_FORCE_INLINE I32 1291 S_cmp_desc(pTHX_ SV *const str1, SV *const str2) 1292 { 1293 PERL_ARGS_ASSERT_CMP_DESC; 1294 1295 return -sv_cmp(str1, str2); 1296 } 1297 1298 #ifdef USE_LOCALE_COLLATE 1299 1300 PERL_STATIC_FORCE_INLINE I32 1301 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2) 1302 { 1303 SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg); 1304 1305 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE; 1306 1307 if (tmpsv) { 1308 if (SvIOK(tmpsv)) { 1309 const I32 i = SvIVX(tmpsv); 1310 return SORT_NORMAL_RETURN_VALUE(i); 1311 } 1312 else { 1313 const NV d = SvNV(tmpsv); 1314 return SORT_NORMAL_RETURN_VALUE(d); 1315 } 1316 } 1317 return sv_cmp_locale(str1, str2); 1318 } 1319 1320 PERL_STATIC_FORCE_INLINE I32 1321 S_amagic_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2) 1322 { 1323 PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE_DESC; 1324 1325 return -S_amagic_cmp_locale(aTHX_ str1, str2); 1326 } 1327 1328 PERL_STATIC_FORCE_INLINE I32 1329 S_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2) 1330 { 1331 PERL_ARGS_ASSERT_CMP_LOCALE_DESC; 1332 1333 return -sv_cmp_locale(str1, str2); 1334 } 1335 1336 #endif 1337 1338 /* 1339 * ex: set ts=8 sts=4 sw=4 et: 1340 */ 1341