1 /*    pp_sort.c
2  *
3  *    Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
4  *    2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by Larry Wall and others
5  *
6  *    You may distribute under the terms of either the GNU General Public
7  *    License or the Artistic License, as specified in the README file.
8  *
9  */
10 
11 /*
12  *   ...they shuffled back towards the rear of the line.  'No, not at the
13  *   rear!' the slave-driver shouted.  'Three files up. And stay there...
14  *
15  *     [p.931 of _The Lord of the Rings_, VI/ii: "The Land of Shadow"]
16  */
17 
18 /* This file contains pp ("push/pop") functions that
19  * execute the opcodes that make up a perl program. A typical pp function
20  * expects to find its arguments on the stack, and usually pushes its
21  * results onto the stack, hence the 'pp' terminology. Each OP structure
22  * contains a pointer to the relevant pp_foo() function.
23  *
24  * This particular file just contains pp_sort(), which is complex
25  * enough to merit its own file! See the other pp*.c files for the rest of
26  * the pp_ functions.
27  */
28 
29 #include "EXTERN.h"
30 #define PERL_IN_PP_SORT_C
31 #include "perl.h"
32 
33 #ifndef SMALLSORT
34 #define SMALLSORT (200)
35 #endif
36 
37 /* Flags for sortsv_flags */
38 #define SORTf_STABLE 1
39 #define SORTf_UNSTABLE 2
40 
41 /*
42  * The mergesort implementation is by Peter M. Mcilroy <pmcilroy@lucent.com>.
43  *
44  * The original code was written in conjunction with BSD Computer Software
45  * Research Group at University of California, Berkeley.
46  *
47  * See also: "Optimistic Sorting and Information Theoretic Complexity"
48  *           Peter McIlroy
49  *           SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms),
50  *           pp 467-474, Austin, Texas, 25-27 January 1993.
51  *
52  * The integration to Perl is by John P. Linderman <jpl.jpl@gmail.com>.
53  *
54  * The code can be distributed under the same terms as Perl itself.
55  *
56  */
57 
58 
59 typedef char * aptr;            /* pointer for arithmetic on sizes */
60 typedef SV * gptr;              /* pointers in our lists */
61 
62 /* Binary merge internal sort, with a few special mods
63 ** for the special perl environment it now finds itself in.
64 **
65 ** Things that were once options have been hotwired
66 ** to values suitable for this use.  In particular, we'll always
67 ** initialize looking for natural runs, we'll always produce stable
68 ** output, and we'll always do Peter McIlroy's binary merge.
69 */
70 
71 /* Pointer types for arithmetic and storage and convenience casts */
72 
73 #define APTR(P) ((aptr)(P))
74 #define GPTP(P) ((gptr *)(P))
75 #define GPPP(P) ((gptr **)(P))
76 
77 
78 /* byte offset from pointer P to (larger) pointer Q */
79 #define BYTEOFF(P, Q) (APTR(Q) - APTR(P))
80 
81 #define PSIZE sizeof(gptr)
82 
83 /* If PSIZE is power of 2, make PSHIFT that power, if that helps */
84 
85 #ifdef  PSHIFT
86 #define PNELEM(P, Q)    (BYTEOFF(P,Q) >> (PSHIFT))
87 #define PNBYTE(N)       ((N) << (PSHIFT))
88 #define PINDEX(P, N)    (GPTP(APTR(P) + PNBYTE(N)))
89 #else
90 /* Leave optimization to compiler */
91 #define PNELEM(P, Q)    (GPTP(Q) - GPTP(P))
92 #define PNBYTE(N)       ((N) * (PSIZE))
93 #define PINDEX(P, N)    (GPTP(P) + (N))
94 #endif
95 
96 /* Pointer into other corresponding to pointer into this */
97 #define POTHER(P, THIS, OTHER) GPTP(APTR(OTHER) + BYTEOFF(THIS,P))
98 
99 #define FROMTOUPTO(src, dst, lim) do *dst++ = *src++; while(src<lim)
100 
101 
102 /* Runs are identified by a pointer in the auxiliary list.
103 ** The pointer is at the start of the list,
104 ** and it points to the start of the next list.
105 ** NEXT is used as an lvalue, too.
106 */
107 
108 #define NEXT(P)         (*GPPP(P))
109 
110 
111 /* PTHRESH is the minimum number of pairs with the same sense to justify
112 ** checking for a run and extending it.  Note that PTHRESH counts PAIRS,
113 ** not just elements, so PTHRESH == 8 means a run of 16.
114 */
115 
116 #define PTHRESH (8)
117 
118 /* RTHRESH is the number of elements in a run that must compare low
119 ** to the low element from the opposing run before we justify
120 ** doing a binary rampup instead of single stepping.
121 ** In random input, N in a row low should only happen with
122 ** probability 2^(1-N), so we can risk that we are dealing
123 ** with orderly input without paying much when we aren't.
124 */
125 
126 #define RTHRESH (6)
127 
128 
129 /*
130 ** Overview of algorithm and variables.
131 ** The array of elements at list1 will be organized into runs of length 2,
132 ** or runs of length >= 2 * PTHRESH.  We only try to form long runs when
133 ** PTHRESH adjacent pairs compare in the same way, suggesting overall order.
134 **
135 ** Unless otherwise specified, pair pointers address the first of two elements.
136 **
137 ** b and b+1 are a pair that compare with sense "sense".
138 ** b is the "bottom" of adjacent pairs that might form a longer run.
139 **
140 ** p2 parallels b in the list2 array, where runs are defined by
141 ** a pointer chain.
142 **
143 ** t represents the "top" of the adjacent pairs that might extend
144 ** the run beginning at b.  Usually, t addresses a pair
145 ** that compares with opposite sense from (b,b+1).
146 ** However, it may also address a singleton element at the end of list1,
147 ** or it may be equal to "last", the first element beyond list1.
148 **
149 ** r addresses the Nth pair following b.  If this would be beyond t,
150 ** we back it off to t.  Only when r is less than t do we consider the
151 ** run long enough to consider checking.
152 **
153 ** q addresses a pair such that the pairs at b through q already form a run.
154 ** Often, q will equal b, indicating we only are sure of the pair itself.
155 ** However, a search on the previous cycle may have revealed a longer run,
156 ** so q may be greater than b.
157 **
158 ** p is used to work back from a candidate r, trying to reach q,
159 ** which would mean b through r would be a run.  If we discover such a run,
160 ** we start q at r and try to push it further towards t.
161 ** If b through r is NOT a run, we detect the wrong order at (p-1,p).
162 ** In any event, after the check (if any), we have two main cases.
163 **
164 ** 1) Short run.  b <= q < p <= r <= t.
165 **      b through q is a run (perhaps trivial)
166 **      q through p are uninteresting pairs
167 **      p through r is a run
168 **
169 ** 2) Long run.  b < r <= q < t.
170 **      b through q is a run (of length >= 2 * PTHRESH)
171 **
172 ** Note that degenerate cases are not only possible, but likely.
173 ** For example, if the pair following b compares with opposite sense,
174 ** then b == q < p == r == t.
175 */
176 
177 
178 PERL_STATIC_FORCE_INLINE IV __attribute__always_inline__
dynprep(pTHX_ gptr * list1,gptr * list2,size_t nmemb,const SVCOMPARE_t cmp)179 dynprep(pTHX_ gptr *list1, gptr *list2, size_t nmemb, const SVCOMPARE_t cmp)
180 {
181     I32 sense;
182     gptr *b, *p, *q, *t, *p2;
183     gptr *last, *r;
184     IV runs = 0;
185 
186     b = list1;
187     last = PINDEX(b, nmemb);
188     sense = (cmp(aTHX_ *b, *(b+1)) > 0);
189     for (p2 = list2; b < last; ) {
190         /* We just started, or just reversed sense.
191         ** Set t at end of pairs with the prevailing sense.
192         */
193         for (p = b+2, t = p; ++p < last; t = ++p) {
194             if ((cmp(aTHX_ *t, *p) > 0) != sense) break;
195         }
196         q = b;
197         /* Having laid out the playing field, look for long runs */
198         do {
199             p = r = b + (2 * PTHRESH);
200             if (r >= t) p = r = t;      /* too short to care about */
201             else {
202                 while (((cmp(aTHX_ *(p-1), *p) > 0) == sense) &&
203                        ((p -= 2) > q)) {}
204                 if (p <= q) {
205                     /* b through r is a (long) run.
206                     ** Extend it as far as possible.
207                     */
208                     p = q = r;
209                     while (((p += 2) < t) &&
210                            ((cmp(aTHX_ *(p-1), *p) > 0) == sense)) q = p;
211                     r = p = q + 2;      /* no simple pairs, no after-run */
212                 }
213             }
214             if (q > b) {                /* run of greater than 2 at b */
215                 gptr *savep = p;
216 
217                 p = q += 2;
218                 /* pick up singleton, if possible */
219                 if ((p == t) &&
220                     ((t + 1) == last) &&
221                     ((cmp(aTHX_ *(p-1), *p) > 0) == sense))
222                     savep = r = p = q = last;
223                 p2 = NEXT(p2) = p2 + (p - b); ++runs;
224                 if (sense)
225                     while (b < --p) {
226                         const gptr c = *b;
227                         *b++ = *p;
228                         *p = c;
229                     }
230                 p = savep;
231             }
232             while (q < p) {             /* simple pairs */
233                 p2 = NEXT(p2) = p2 + 2; ++runs;
234                 if (sense) {
235                     const gptr c = *q++;
236                     *(q-1) = *q;
237                     *q++ = c;
238                 } else q += 2;
239             }
240             if (((b = p) == t) && ((t+1) == last)) {
241                 NEXT(p2) = p2 + 1; ++runs;
242                 b++;
243             }
244             q = r;
245         } while (b < t);
246         sense = !sense;
247     }
248     return runs;
249 }
250 
251 
252 /* The original merge sort, in use since 5.7, was as fast as, or faster than,
253  * qsort on many platforms, but slower than qsort, conspicuously so,
254  * on others.  The most likely explanation was platform-specific
255  * differences in cache sizes and relative speeds.
256  *
257  * The quicksort divide-and-conquer algorithm guarantees that, as the
258  * problem is subdivided into smaller and smaller parts, the parts
259  * fit into smaller (and faster) caches.  So it doesn't matter how
260  * many levels of cache exist, quicksort will "find" them, and,
261  * as long as smaller is faster, take advantage of them.
262  *
263  * By contrast, consider how the original mergesort algorithm worked.
264  * Suppose we have five runs (each typically of length 2 after dynprep).
265  *
266  * pass               base                        aux
267  *  0              1 2 3 4 5
268  *  1                                           12 34 5
269  *  2                1234 5
270  *  3                                            12345
271  *  4                 12345
272  *
273  * Adjacent pairs are merged in "grand sweeps" through the input.
274  * This means, on pass 1, the records in runs 1 and 2 aren't revisited until
275  * runs 3 and 4 are merged and the runs from run 5 have been copied.
276  * The only cache that matters is one large enough to hold *all* the input.
277  * On some platforms, this may be many times slower than smaller caches.
278  *
279  * The following pseudo-code uses the same basic merge algorithm,
280  * but in a divide-and-conquer way.
281  *
282  * # merge $runs runs at offset $offset of list $list1 into $list2.
283  * # all unmerged runs ($runs == 1) originate in list $base.
284  * sub mgsort2 {
285  *     my ($offset, $runs, $base, $list1, $list2) = @_;
286  *
287  *     if ($runs == 1) {
288  *         if ($list1 is $base) copy run to $list2
289  *         return offset of end of list (or copy)
290  *     } else {
291  *         $off2 = mgsort2($offset, $runs-($runs/2), $base, $list2, $list1)
292  *         mgsort2($off2, $runs/2, $base, $list2, $list1)
293  *         merge the adjacent runs at $offset of $list1 into $list2
294  *         return the offset of the end of the merged runs
295  *     }
296  * }
297  * mgsort2(0, $runs, $base, $aux, $base);
298  *
299  * For our 5 runs, the tree of calls looks like
300  *
301  *           5
302  *      3        2
303  *   2     1   1   1
304  * 1   1
305  *
306  * 1   2   3   4   5
307  *
308  * and the corresponding activity looks like
309  *
310  * copy runs 1 and 2 from base to aux
311  * merge runs 1 and 2 from aux to base
312  * (run 3 is where it belongs, no copy needed)
313  * merge runs 12 and 3 from base to aux
314  * (runs 4 and 5 are where they belong, no copy needed)
315  * merge runs 4 and 5 from base to aux
316  * merge runs 123 and 45 from aux to base
317  *
318  * Note that we merge runs 1 and 2 immediately after copying them,
319  * while they are still likely to be in fast cache.  Similarly,
320  * run 3 is merged with run 12 while it still may be lingering in cache.
321  * This implementation should therefore enjoy much of the cache-friendly
322  * behavior that quicksort does.  In addition, it does less copying
323  * than the original mergesort implementation (only runs 1 and 2 are copied)
324  * and the "balancing" of merges is better (merged runs comprise more nearly
325  * equal numbers of original runs).
326  *
327  * The actual cache-friendly implementation will use a pseudo-stack
328  * to avoid recursion, and will unroll processing of runs of length 2,
329  * but it is otherwise similar to the recursive implementation.
330  */
331 
332 typedef struct {
333     IV  offset;         /* offset of 1st of 2 runs at this level */
334     IV  runs;           /* how many runs must be combined into 1 */
335 } off_runs;             /* pseudo-stack element */
336 
337 PERL_STATIC_FORCE_INLINE void
S_sortsv_flags_impl(pTHX_ gptr * base,size_t nmemb,SVCOMPARE_t cmp,U32 flags)338 S_sortsv_flags_impl(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
339 {
340     IV i, run, offset;
341     I32 sense, level;
342     gptr *f1, *f2, *t, *b, *p;
343     int iwhich;
344     gptr *aux;
345     gptr *p1;
346     gptr small[SMALLSORT];
347     gptr *which[3];
348     off_runs stack[60], *stackp;
349 
350     PERL_UNUSED_ARG(flags);
351     PERL_ARGS_ASSERT_SORTSV_FLAGS_IMPL;
352     if (nmemb <= 1) return;                     /* sorted trivially */
353 
354     if (nmemb <= SMALLSORT) aux = small;        /* use stack for aux array */
355     else { Newx(aux,nmemb,gptr); }              /* allocate auxiliary array */
356     level = 0;
357     stackp = stack;
358     stackp->runs = dynprep(aTHX_ base, aux, nmemb, cmp);
359     stackp->offset = offset = 0;
360     which[0] = which[2] = base;
361     which[1] = aux;
362     for (;;) {
363         /* On levels where both runs have be constructed (stackp->runs == 0),
364          * merge them, and note the offset of their end, in case the offset
365          * is needed at the next level up.  Hop up a level, and,
366          * as long as stackp->runs is 0, keep merging.
367          */
368         IV runs = stackp->runs;
369         if (runs == 0) {
370             gptr *list1, *list2;
371             iwhich = level & 1;
372             list1 = which[iwhich];              /* area where runs are now */
373             list2 = which[++iwhich];            /* area for merged runs */
374             do {
375                 gptr *l1, *l2, *tp2;
376                 offset = stackp->offset;
377                 f1 = p1 = list1 + offset;               /* start of first run */
378                 p = tp2 = list2 + offset;       /* where merged run will go */
379                 t = NEXT(p);                    /* where first run ends */
380                 f2 = l1 = POTHER(t, list2, list1); /* ... on the other side */
381                 t = NEXT(t);                    /* where second runs ends */
382                 l2 = POTHER(t, list2, list1);   /* ... on the other side */
383                 offset = PNELEM(list2, t);
384                 while (f1 < l1 && f2 < l2) {
385                     /* If head 1 is larger than head 2, find ALL the elements
386                     ** in list 2 strictly less than head1, write them all,
387                     ** then head 1.  Then compare the new heads, and repeat,
388                     ** until one or both lists are exhausted.
389                     **
390                     ** In all comparisons (after establishing
391                     ** which head to merge) the item to merge
392                     ** (at pointer q) is the first operand of
393                     ** the comparison.  When we want to know
394                     ** if "q is strictly less than the other",
395                     ** we can't just do
396                     **    cmp(q, other) < 0
397                     ** because stability demands that we treat equality
398                     ** as high when q comes from l2, and as low when
399                     ** q was from l1.  So we ask the question by doing
400                     **    cmp(q, other) <= sense
401                     ** and make sense == 0 when equality should look low,
402                     ** and -1 when equality should look high.
403                     */
404 
405                     gptr *q;
406                     if (cmp(aTHX_ *f1, *f2) <= 0) {
407                         q = f2; b = f1; t = l1;
408                         sense = -1;
409                     } else {
410                         q = f1; b = f2; t = l2;
411                         sense = 0;
412                     }
413 
414 
415                     /* ramp up
416                     **
417                     ** Leave t at something strictly
418                     ** greater than q (or at the end of the list),
419                     ** and b at something strictly less than q.
420                     */
421                     for (i = 1, run = 0 ;;) {
422                         if ((p = PINDEX(b, i)) >= t) {
423                             /* off the end */
424                             if (((p = PINDEX(t, -1)) > b) &&
425                                 (cmp(aTHX_ *q, *p) <= sense))
426                                  t = p;
427                             else b = p;
428                             break;
429                         } else if (cmp(aTHX_ *q, *p) <= sense) {
430                             t = p;
431                             break;
432                         } else b = p;
433                         if (++run >= RTHRESH) i += i;
434                     }
435 
436 
437                     /* q is known to follow b and must be inserted before t.
438                     ** Increment b, so the range of possibilities is [b,t).
439                     ** Round binary split down, to favor early appearance.
440                     ** Adjust b and t until q belongs just before t.
441                     */
442 
443                     b++;
444                     while (b < t) {
445                         p = PINDEX(b, (PNELEM(b, t) - 1) / 2);
446                         if (cmp(aTHX_ *q, *p) <= sense) {
447                             t = p;
448                         } else b = p + 1;
449                     }
450 
451 
452                     /* Copy all the strictly low elements */
453 
454                     if (q == f1) {
455                         FROMTOUPTO(f2, tp2, t);
456                         *tp2++ = *f1++;
457                     } else {
458                         FROMTOUPTO(f1, tp2, t);
459                         *tp2++ = *f2++;
460                     }
461                 }
462 
463 
464                 /* Run out remaining list */
465                 if (f1 == l1) {
466                        if (f2 < l2) FROMTOUPTO(f2, tp2, l2);
467                 } else              FROMTOUPTO(f1, tp2, l1);
468                 p1 = NEXT(p1) = POTHER(tp2, list2, list1);
469 
470                 if (--level == 0) goto done;
471                 --stackp;
472                 t = list1; list1 = list2; list2 = t;    /* swap lists */
473             } while ((runs = stackp->runs) == 0);
474         }
475 
476 
477         stackp->runs = 0;               /* current run will finish level */
478         /* While there are more than 2 runs remaining,
479          * turn them into exactly 2 runs (at the "other" level),
480          * each made up of approximately half the runs.
481          * Stack the second half for later processing,
482          * and set about producing the first half now.
483          */
484         while (runs > 2) {
485             ++level;
486             ++stackp;
487             stackp->offset = offset;
488             runs -= stackp->runs = runs / 2;
489         }
490         /* We must construct a single run from 1 or 2 runs.
491          * All the original runs are in which[0] == base.
492          * The run we construct must end up in which[level&1].
493          */
494         iwhich = level & 1;
495         if (runs == 1) {
496             /* Constructing a single run from a single run.
497              * If it's where it belongs already, there's nothing to do.
498              * Otherwise, copy it to where it belongs.
499              * A run of 1 is either a singleton at level 0,
500              * or the second half of a split 3.  In neither event
501              * is it necessary to set offset.  It will be set by the merge
502              * that immediately follows.
503              */
504             if (iwhich) {       /* Belongs in aux, currently in base */
505                 f1 = b = PINDEX(base, offset);  /* where list starts */
506                 f2 = PINDEX(aux, offset);       /* where list goes */
507                 t = NEXT(f2);                   /* where list will end */
508                 offset = PNELEM(aux, t);        /* offset thereof */
509                 t = PINDEX(base, offset);       /* where it currently ends */
510                 FROMTOUPTO(f1, f2, t);          /* copy */
511                 NEXT(b) = t;                    /* set up parallel pointer */
512             } else if (level == 0) goto done;   /* single run at level 0 */
513         } else {
514             /* Constructing a single run from two runs.
515              * The merge code at the top will do that.
516              * We need only make sure the two runs are in the "other" array,
517              * so they'll end up in the correct array after the merge.
518              */
519             ++level;
520             ++stackp;
521             stackp->offset = offset;
522             stackp->runs = 0;   /* take care of both runs, trigger merge */
523             if (!iwhich) {      /* Merged runs belong in aux, copy 1st */
524                 f1 = b = PINDEX(base, offset);  /* where first run starts */
525                 f2 = PINDEX(aux, offset);       /* where it will be copied */
526                 t = NEXT(f2);                   /* where first run will end */
527                 offset = PNELEM(aux, t);        /* offset thereof */
528                 p = PINDEX(base, offset);       /* end of first run */
529                 t = NEXT(t);                    /* where second run will end */
530                 t = PINDEX(base, PNELEM(aux, t)); /* where it now ends */
531                 FROMTOUPTO(f1, f2, t);          /* copy both runs */
532                 NEXT(b) = p;                    /* paralleled pointer for 1st */
533                 NEXT(p) = t;                    /* ... and for second */
534             }
535         }
536     }
537   done:
538     if (aux != small) Safefree(aux);    /* free iff allocated */
539 
540     return;
541 }
542 
543 /*
544 =for apidoc sortsv_flags
545 
546 In-place sort an array of SV pointers with the given comparison routine,
547 with various SORTf_* flag options.
548 
549 =cut
550 */
551 void
Perl_sortsv_flags(pTHX_ gptr * base,size_t nmemb,SVCOMPARE_t cmp,U32 flags)552 Perl_sortsv_flags(pTHX_ gptr *base, size_t nmemb, SVCOMPARE_t cmp, U32 flags)
553 {
554     PERL_ARGS_ASSERT_SORTSV_FLAGS;
555 
556     sortsv_flags_impl(base, nmemb, cmp, flags);
557 }
558 
559 /*
560  * Each of sortsv_* functions contains an inlined copy of
561  * sortsv_flags_impl() with an inlined comparator. Basically, we are
562  * emulating C++ templates by using __attribute__((always_inline)).
563  *
564  * The purpose of that is to avoid the function call overhead inside
565  * the sorting routine, which calls the comparison function multiple
566  * times per sorted item.
567  */
568 
569 static void
sortsv_amagic_i_ncmp(pTHX_ gptr * base,size_t nmemb,U32 flags)570 sortsv_amagic_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
571 {
572     sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp, flags);
573 }
574 
575 static void
sortsv_amagic_i_ncmp_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)576 sortsv_amagic_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
577 {
578     sortsv_flags_impl(base, nmemb, S_amagic_i_ncmp_desc, flags);
579 }
580 
581 static void
sortsv_i_ncmp(pTHX_ gptr * base,size_t nmemb,U32 flags)582 sortsv_i_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
583 {
584     sortsv_flags_impl(base, nmemb, S_sv_i_ncmp, flags);
585 }
586 
587 static void
sortsv_i_ncmp_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)588 sortsv_i_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
589 {
590     sortsv_flags_impl(base, nmemb, S_sv_i_ncmp_desc, flags);
591 }
592 
593 static void
sortsv_amagic_ncmp(pTHX_ gptr * base,size_t nmemb,U32 flags)594 sortsv_amagic_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
595 {
596     sortsv_flags_impl(base, nmemb, S_amagic_ncmp, flags);
597 }
598 
599 static void
sortsv_amagic_ncmp_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)600 sortsv_amagic_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
601 {
602     sortsv_flags_impl(base, nmemb, S_amagic_ncmp_desc, flags);
603 }
604 
605 static void
sortsv_ncmp(pTHX_ gptr * base,size_t nmemb,U32 flags)606 sortsv_ncmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
607 {
608     sortsv_flags_impl(base, nmemb, S_sv_ncmp, flags);
609 }
610 
611 static void
sortsv_ncmp_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)612 sortsv_ncmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
613 {
614     sortsv_flags_impl(base, nmemb, S_sv_ncmp_desc, flags);
615 }
616 
617 static void
sortsv_amagic_cmp(pTHX_ gptr * base,size_t nmemb,U32 flags)618 sortsv_amagic_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
619 {
620     sortsv_flags_impl(base, nmemb, S_amagic_cmp, flags);
621 }
622 
623 static void
sortsv_amagic_cmp_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)624 sortsv_amagic_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
625 {
626     sortsv_flags_impl(base, nmemb, S_amagic_cmp_desc, flags);
627 }
628 
629 static void
sortsv_cmp(pTHX_ gptr * base,size_t nmemb,U32 flags)630 sortsv_cmp(pTHX_ gptr *base, size_t nmemb, U32 flags)
631 {
632     sortsv_flags_impl(base, nmemb, Perl_sv_cmp, flags);
633 }
634 
635 static void
sortsv_cmp_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)636 sortsv_cmp_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
637 {
638     sortsv_flags_impl(base, nmemb, S_cmp_desc, flags);
639 }
640 
641 #ifdef USE_LOCALE_COLLATE
642 
643 static void
sortsv_amagic_cmp_locale(pTHX_ gptr * base,size_t nmemb,U32 flags)644 sortsv_amagic_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
645 {
646     sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale, flags);
647 }
648 
649 static void
sortsv_amagic_cmp_locale_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)650 sortsv_amagic_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
651 {
652     sortsv_flags_impl(base, nmemb, S_amagic_cmp_locale_desc, flags);
653 }
654 
655 static void
sortsv_cmp_locale(pTHX_ gptr * base,size_t nmemb,U32 flags)656 sortsv_cmp_locale(pTHX_ gptr *base, size_t nmemb, U32 flags)
657 {
658     sortsv_flags_impl(base, nmemb, Perl_sv_cmp_locale, flags);
659 }
660 
661 static void
sortsv_cmp_locale_desc(pTHX_ gptr * base,size_t nmemb,U32 flags)662 sortsv_cmp_locale_desc(pTHX_ gptr *base, size_t nmemb, U32 flags)
663 {
664     sortsv_flags_impl(base, nmemb, S_cmp_locale_desc, flags);
665 }
666 
667 #endif
668 
669 /*
670 
671 =for apidoc sortsv
672 
673 In-place sort an array of SV pointers with the given comparison routine.
674 
675 Currently this always uses mergesort.  See C<L</sortsv_flags>> for a more
676 flexible routine.
677 
678 =cut
679 */
680 
681 void
Perl_sortsv(pTHX_ SV ** array,size_t nmemb,SVCOMPARE_t cmp)682 Perl_sortsv(pTHX_ SV **array, size_t nmemb, SVCOMPARE_t cmp)
683 {
684     PERL_ARGS_ASSERT_SORTSV;
685 
686     sortsv_flags(array, nmemb, cmp, 0);
687 }
688 
689 #define SvNSIOK(sv) ((SvFLAGS(sv) & SVf_NOK) || ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK))
690 #define SvSIOK(sv) ((SvFLAGS(sv) & (SVf_IOK|SVf_IVisUV)) == SVf_IOK)
691 #define SvNSIV(sv) ( SvNOK(sv) ? SvNVX(sv) : ( SvSIOK(sv) ? SvIVX(sv) : sv_2nv(sv) ) )
692 
PP(pp_sort)693 PP(pp_sort)
694 {
695     dSP; dMARK; dORIGMARK;
696     SV **p1 = ORIGMARK+1, **p2;
697     SSize_t max, i;
698     AV* av = NULL;
699     GV *gv;
700     CV *cv = NULL;
701     U8 gimme = GIMME_V;
702     OP* const nextop = PL_op->op_next;
703     I32 overloading = 0;
704     bool hasargs = FALSE;
705     bool copytmps;
706     I32 is_xsub = 0;
707     const U8 priv = PL_op->op_private;
708     const U8 flags = PL_op->op_flags;
709     U32 sort_flags = 0;
710     I32 all_SIVs = 1, descending = 0;
711 
712     if ((priv & OPpSORT_DESCEND) != 0)
713         descending = 1;
714     if ((priv & OPpSORT_STABLE) != 0)
715         sort_flags |= SORTf_STABLE;
716     if ((priv & OPpSORT_UNSTABLE) != 0)
717         sort_flags |= SORTf_UNSTABLE;
718 
719     if (gimme != G_ARRAY) {
720         SP = MARK;
721         EXTEND(SP,1);
722         RETPUSHUNDEF;
723     }
724 
725     ENTER;
726     SAVEVPTR(PL_sortcop);
727     if (flags & OPf_STACKED) {
728         if (flags & OPf_SPECIAL) {
729             OP *nullop = OpSIBLING(cLISTOP->op_first);  /* pass pushmark */
730             assert(nullop->op_type == OP_NULL);
731             PL_sortcop = nullop->op_next;
732         }
733         else {
734             GV *autogv = NULL;
735             HV *stash;
736             cv = sv_2cv(*++MARK, &stash, &gv, GV_ADD);
737           check_cv:
738             if (cv && SvPOK(cv)) {
739                 const char * const proto = SvPV_nolen_const(MUTABLE_SV(cv));
740                 if (proto && strEQ(proto, "$$")) {
741                     hasargs = TRUE;
742                 }
743             }
744             if (cv && CvISXSUB(cv) && CvXSUB(cv)) {
745                 is_xsub = 1;
746             }
747             else if (!(cv && CvROOT(cv))) {
748                 if (gv) {
749                     goto autoload;
750                 }
751                 else if (!CvANON(cv) && (gv = CvGV(cv))) {
752                     if (cv != GvCV(gv)) cv = GvCV(gv);
753                   autoload:
754                     if (!autogv && (
755                         autogv = gv_autoload_pvn(
756                             GvSTASH(gv), GvNAME(gv), GvNAMELEN(gv),
757                             GvNAMEUTF8(gv) ? SVf_UTF8 : 0
758                         )
759                     )) {
760                         cv = GvCVu(autogv);
761                         goto check_cv;
762                     }
763                     else {
764                         SV *tmpstr = sv_newmortal();
765                         gv_efullname3(tmpstr, gv, NULL);
766                         DIE(aTHX_ "Undefined sort subroutine \"%" SVf "\" called",
767                             SVfARG(tmpstr));
768                     }
769                 }
770                 else {
771                     DIE(aTHX_ "Undefined subroutine in sort");
772                 }
773             }
774 
775             if (is_xsub)
776                 PL_sortcop = (OP*)cv;
777             else
778                 PL_sortcop = CvSTART(cv);
779         }
780     }
781     else {
782         PL_sortcop = NULL;
783     }
784 
785     /* optimiser converts "@a = sort @a" to "sort \@a".  In this case,
786      * push (@a) onto stack, then assign result back to @a at the end of
787      * this function */
788     if (priv & OPpSORT_INPLACE) {
789         assert( MARK+1 == SP && *SP && SvTYPE(*SP) == SVt_PVAV);
790         (void)POPMARK; /* remove mark associated with ex-OP_AASSIGN */
791         av = MUTABLE_AV((*SP));
792         if (SvREADONLY(av))
793             Perl_croak_no_modify();
794         max = AvFILL(av) + 1;
795         MEXTEND(SP, max);
796         if (SvMAGICAL(av)) {
797             for (i=0; i < max; i++) {
798                 SV **svp = av_fetch(av, i, FALSE);
799                 *SP++ = (svp) ? *svp : NULL;
800             }
801         }
802         else {
803             SV **svp = AvARRAY(av);
804             assert(svp || max == 0);
805             for (i = 0; i < max; i++)
806                 *SP++ = *svp++;
807         }
808         SP--;
809         p1 = p2 = SP - (max-1);
810     }
811     else {
812         p2 = MARK+1;
813         max = SP - MARK;
814     }
815 
816     /* shuffle stack down, removing optional initial cv (p1!=p2), plus
817      * any nulls; also stringify or converting to integer or number as
818      * required any args */
819     copytmps = cBOOL(PL_sortcop);
820     for (i=max; i > 0 ; i--) {
821         if ((*p1 = *p2++)) {                    /* Weed out nulls. */
822             if (copytmps && SvPADTMP(*p1)) {
823                 *p1 = sv_mortalcopy(*p1);
824             }
825             SvTEMP_off(*p1);
826             if (!PL_sortcop) {
827                 if (priv & OPpSORT_NUMERIC) {
828                     if (priv & OPpSORT_INTEGER) {
829                         if (!SvIOK(*p1))
830                             (void)sv_2iv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
831                     }
832                     else {
833                         if (!SvNSIOK(*p1))
834                             (void)sv_2nv_flags(*p1, SV_GMAGIC|SV_SKIP_OVERLOAD);
835                         if (all_SIVs && !SvSIOK(*p1))
836                             all_SIVs = 0;
837                     }
838                 }
839                 else {
840                     if (!SvPOK(*p1))
841                         (void)sv_2pv_flags(*p1, 0,
842                             SV_GMAGIC|SV_CONST_RETURN|SV_SKIP_OVERLOAD);
843                 }
844                 if (SvAMAGIC(*p1))
845                     overloading = 1;
846             }
847             p1++;
848         }
849         else
850             max--;
851     }
852     if (max > 1) {
853         SV **start;
854         if (PL_sortcop) {
855             PERL_CONTEXT *cx;
856             const bool oldcatch = CATCH_GET;
857             I32 old_savestack_ix = PL_savestack_ix;
858 
859             SAVEOP();
860 
861             CATCH_SET(TRUE);
862             PUSHSTACKi(PERLSI_SORT);
863             if (!hasargs && !is_xsub) {
864                 SAVEGENERICSV(PL_firstgv);
865                 SAVEGENERICSV(PL_secondgv);
866                 PL_firstgv = MUTABLE_GV(SvREFCNT_inc(
867                     gv_fetchpvs("a", GV_ADD|GV_NOTQUAL, SVt_PV)
868                 ));
869                 PL_secondgv = MUTABLE_GV(SvREFCNT_inc(
870                     gv_fetchpvs("b", GV_ADD|GV_NOTQUAL, SVt_PV)
871                 ));
872                 /* make sure the GP isn't removed out from under us for
873                  * the SAVESPTR() */
874                 save_gp(PL_firstgv, 0);
875                 save_gp(PL_secondgv, 0);
876                 /* we don't want modifications localized */
877                 GvINTRO_off(PL_firstgv);
878                 GvINTRO_off(PL_secondgv);
879                 SAVEGENERICSV(GvSV(PL_firstgv));
880                 SvREFCNT_inc(GvSV(PL_firstgv));
881                 SAVEGENERICSV(GvSV(PL_secondgv));
882                 SvREFCNT_inc(GvSV(PL_secondgv));
883             }
884 
885             gimme = G_SCALAR;
886             cx = cx_pushblock(CXt_NULL, gimme, PL_stack_base, old_savestack_ix);
887             if (!(flags & OPf_SPECIAL)) {
888                 cx->cx_type = CXt_SUB|CXp_MULTICALL;
889                 cx_pushsub(cx, cv, NULL, hasargs);
890                 if (!is_xsub) {
891                     PADLIST * const padlist = CvPADLIST(cv);
892 
893                     if (++CvDEPTH(cv) >= 2)
894                         pad_push(padlist, CvDEPTH(cv));
895                     PAD_SET_CUR_NOSAVE(padlist, CvDEPTH(cv));
896 
897                     if (hasargs) {
898                         /* This is mostly copied from pp_entersub */
899                         AV * const av = MUTABLE_AV(PAD_SVl(0));
900 
901                         cx->blk_sub.savearray = GvAV(PL_defgv);
902                         GvAV(PL_defgv) = MUTABLE_AV(SvREFCNT_inc_simple(av));
903                     }
904 
905                 }
906             }
907 
908             start = p1 - max;
909             Perl_sortsv_flags(aTHX_ start, max,
910                     (is_xsub ? S_sortcv_xsub : hasargs ? S_sortcv_stacked : S_sortcv),
911                     sort_flags);
912 
913             /* Reset cx, in case the context stack has been reallocated. */
914             cx = CX_CUR();
915 
916             PL_stack_sp = PL_stack_base + cx->blk_oldsp;
917 
918             CX_LEAVE_SCOPE(cx);
919             if (!(flags & OPf_SPECIAL)) {
920                 assert(CxTYPE(cx) == CXt_SUB);
921                 cx_popsub(cx);
922             }
923             else
924                 assert(CxTYPE(cx) == CXt_NULL);
925                 /* there isn't a POPNULL ! */
926 
927             cx_popblock(cx);
928             CX_POP(cx);
929             POPSTACK;
930             CATCH_SET(oldcatch);
931         }
932         else {
933             MEXTEND(SP, 20);    /* Can't afford stack realloc on signal. */
934             start = ORIGMARK+1;
935             if (priv & OPpSORT_NUMERIC) {
936                 if ((priv & OPpSORT_INTEGER) || all_SIVs) {
937                     if (overloading)
938                         if (descending)
939                             sortsv_amagic_i_ncmp_desc(aTHX_ start, max, sort_flags);
940                         else
941                             sortsv_amagic_i_ncmp(aTHX_ start, max, sort_flags);
942                     else
943                         if (descending)
944                             sortsv_i_ncmp_desc(aTHX_ start, max, sort_flags);
945                         else
946                             sortsv_i_ncmp(aTHX_ start, max, sort_flags);
947                 }
948                 else {
949                     if (overloading)
950                         if (descending)
951                             sortsv_amagic_ncmp_desc(aTHX_ start, max, sort_flags);
952                         else
953                             sortsv_amagic_ncmp(aTHX_ start, max, sort_flags);
954                     else
955                         if (descending)
956                             sortsv_ncmp_desc(aTHX_ start, max, sort_flags);
957                         else
958                             sortsv_ncmp(aTHX_ start, max, sort_flags);
959                 }
960             }
961 #ifdef USE_LOCALE_COLLATE
962             else if(IN_LC_RUNTIME(LC_COLLATE)) {
963                 if (overloading)
964                     if (descending)
965                         sortsv_amagic_cmp_locale_desc(aTHX_ start, max, sort_flags);
966                     else
967                         sortsv_amagic_cmp_locale(aTHX_ start, max, sort_flags);
968                 else
969                     if (descending)
970                         sortsv_cmp_locale_desc(aTHX_ start, max, sort_flags);
971                     else
972                         sortsv_cmp_locale(aTHX_ start, max, sort_flags);
973             }
974 #endif
975             else {
976                 if (overloading)
977                     if (descending)
978                         sortsv_amagic_cmp_desc(aTHX_ start, max, sort_flags);
979                     else
980                         sortsv_amagic_cmp(aTHX_ start, max, sort_flags);
981                 else
982                     if (descending)
983                         sortsv_cmp_desc(aTHX_ start, max, sort_flags);
984                     else
985                         sortsv_cmp(aTHX_ start, max, sort_flags);
986             }
987         }
988         if ((priv & OPpSORT_REVERSE) != 0) {
989             SV **q = start+max-1;
990             while (start < q) {
991                 SV * const tmp = *start;
992                 *start++ = *q;
993                 *q-- = tmp;
994             }
995         }
996     }
997 
998     if (av) {
999         /* copy back result to the array */
1000         SV** const base = MARK+1;
1001         SSize_t max_minus_one = max - 1; /* attempt to work around mingw bug */
1002         if (SvMAGICAL(av)) {
1003             for (i = 0; i <= max_minus_one; i++)
1004                 base[i] = newSVsv(base[i]);
1005             av_clear(av);
1006             if (max_minus_one >= 0)
1007                 av_extend(av, max_minus_one);
1008             for (i=0; i <= max_minus_one; i++) {
1009                 SV * const sv = base[i];
1010                 SV ** const didstore = av_store(av, i, sv);
1011                 if (SvSMAGICAL(sv))
1012                     mg_set(sv);
1013                 if (!didstore)
1014                     sv_2mortal(sv);
1015             }
1016         }
1017         else {
1018             /* the elements of av are likely to be the same as the
1019              * (non-refcounted) elements on the stack, just in a different
1020              * order. However, its possible that someone's messed with av
1021              * in the meantime. So bump and unbump the relevant refcounts
1022              * first.
1023              */
1024             for (i = 0; i <= max_minus_one; i++) {
1025                 SV *sv = base[i];
1026                 assert(sv);
1027                 if (SvREFCNT(sv) > 1)
1028                     base[i] = newSVsv(sv);
1029                 else
1030                     SvREFCNT_inc_simple_void_NN(sv);
1031             }
1032             av_clear(av);
1033             if (max_minus_one >= 0) {
1034                 av_extend(av, max_minus_one);
1035                 Copy(base, AvARRAY(av), max, SV*);
1036             }
1037             AvFILLp(av) = max_minus_one;
1038             AvREIFY_off(av);
1039             AvREAL_on(av);
1040         }
1041     }
1042     LEAVE;
1043     PL_stack_sp = ORIGMARK +  max;
1044     return nextop;
1045 }
1046 
1047 static I32
S_sortcv(pTHX_ SV * const a,SV * const b)1048 S_sortcv(pTHX_ SV *const a, SV *const b)
1049 {
1050     const I32 oldsaveix = PL_savestack_ix;
1051     I32 result;
1052     PMOP * const pm = PL_curpm;
1053     COP * const cop = PL_curcop;
1054     SV *olda, *oldb;
1055 
1056     PERL_ARGS_ASSERT_SORTCV;
1057 
1058     olda = GvSV(PL_firstgv);
1059     GvSV(PL_firstgv) = SvREFCNT_inc_simple_NN(a);
1060     SvREFCNT_dec(olda);
1061     oldb = GvSV(PL_secondgv);
1062     GvSV(PL_secondgv) = SvREFCNT_inc_simple_NN(b);
1063     SvREFCNT_dec(oldb);
1064     PL_stack_sp = PL_stack_base;
1065     PL_op = PL_sortcop;
1066     CALLRUNOPS(aTHX);
1067     PL_curcop = cop;
1068     /* entry zero of a stack is always PL_sv_undef, which
1069      * simplifies converting a '()' return into undef in scalar context */
1070     assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1071     result = SvIV(*PL_stack_sp);
1072 
1073     LEAVE_SCOPE(oldsaveix);
1074     PL_curpm = pm;
1075     return result;
1076 }
1077 
1078 static I32
S_sortcv_stacked(pTHX_ SV * const a,SV * const b)1079 S_sortcv_stacked(pTHX_ SV *const a, SV *const b)
1080 {
1081     const I32 oldsaveix = PL_savestack_ix;
1082     I32 result;
1083     AV * const av = GvAV(PL_defgv);
1084     PMOP * const pm = PL_curpm;
1085     COP * const cop = PL_curcop;
1086 
1087     PERL_ARGS_ASSERT_SORTCV_STACKED;
1088 
1089     if (AvREAL(av)) {
1090         av_clear(av);
1091         AvREAL_off(av);
1092         AvREIFY_on(av);
1093     }
1094     if (AvMAX(av) < 1) {
1095         SV **ary = AvALLOC(av);
1096         if (AvARRAY(av) != ary) {
1097             AvMAX(av) += AvARRAY(av) - AvALLOC(av);
1098             AvARRAY(av) = ary;
1099         }
1100         if (AvMAX(av) < 1) {
1101             Renew(ary,2,SV*);
1102             AvMAX(av) = 1;
1103             AvARRAY(av) = ary;
1104             AvALLOC(av) = ary;
1105         }
1106     }
1107     AvFILLp(av) = 1;
1108 
1109     AvARRAY(av)[0] = a;
1110     AvARRAY(av)[1] = b;
1111     PL_stack_sp = PL_stack_base;
1112     PL_op = PL_sortcop;
1113     CALLRUNOPS(aTHX);
1114     PL_curcop = cop;
1115     /* entry zero of a stack is always PL_sv_undef, which
1116      * simplifies converting a '()' return into undef in scalar context */
1117     assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1118     result = SvIV(*PL_stack_sp);
1119 
1120     LEAVE_SCOPE(oldsaveix);
1121     PL_curpm = pm;
1122     return result;
1123 }
1124 
1125 static I32
S_sortcv_xsub(pTHX_ SV * const a,SV * const b)1126 S_sortcv_xsub(pTHX_ SV *const a, SV *const b)
1127 {
1128     dSP;
1129     const I32 oldsaveix = PL_savestack_ix;
1130     CV * const cv=MUTABLE_CV(PL_sortcop);
1131     I32 result;
1132     PMOP * const pm = PL_curpm;
1133 
1134     PERL_ARGS_ASSERT_SORTCV_XSUB;
1135 
1136     SP = PL_stack_base;
1137     PUSHMARK(SP);
1138     EXTEND(SP, 2);
1139     *++SP = a;
1140     *++SP = b;
1141     PUTBACK;
1142     (void)(*CvXSUB(cv))(aTHX_ cv);
1143     /* entry zero of a stack is always PL_sv_undef, which
1144      * simplifies converting a '()' return into undef in scalar context */
1145     assert(PL_stack_sp > PL_stack_base || *PL_stack_base == &PL_sv_undef);
1146     result = SvIV(*PL_stack_sp);
1147 
1148     LEAVE_SCOPE(oldsaveix);
1149     PL_curpm = pm;
1150     return result;
1151 }
1152 
1153 
1154 PERL_STATIC_FORCE_INLINE I32
S_sv_ncmp(pTHX_ SV * const a,SV * const b)1155 S_sv_ncmp(pTHX_ SV *const a, SV *const b)
1156 {
1157     I32 cmp = do_ncmp(a, b);
1158 
1159     PERL_ARGS_ASSERT_SV_NCMP;
1160 
1161     if (cmp == 2) {
1162         if (ckWARN(WARN_UNINITIALIZED)) report_uninit(NULL);
1163         return 0;
1164     }
1165 
1166     return cmp;
1167 }
1168 
1169 PERL_STATIC_FORCE_INLINE I32
S_sv_ncmp_desc(pTHX_ SV * const a,SV * const b)1170 S_sv_ncmp_desc(pTHX_ SV *const a, SV *const b)
1171 {
1172     PERL_ARGS_ASSERT_SV_NCMP_DESC;
1173 
1174     return -S_sv_ncmp(aTHX_ a, b);
1175 }
1176 
1177 PERL_STATIC_FORCE_INLINE I32
S_sv_i_ncmp(pTHX_ SV * const a,SV * const b)1178 S_sv_i_ncmp(pTHX_ SV *const a, SV *const b)
1179 {
1180     const IV iv1 = SvIV(a);
1181     const IV iv2 = SvIV(b);
1182 
1183     PERL_ARGS_ASSERT_SV_I_NCMP;
1184 
1185     return iv1 < iv2 ? -1 : iv1 > iv2 ? 1 : 0;
1186 }
1187 
1188 PERL_STATIC_FORCE_INLINE I32
S_sv_i_ncmp_desc(pTHX_ SV * const a,SV * const b)1189 S_sv_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1190 {
1191     PERL_ARGS_ASSERT_SV_I_NCMP_DESC;
1192 
1193     return -S_sv_i_ncmp(aTHX_ a, b);
1194 }
1195 
1196 #define tryCALL_AMAGICbin(left,right,meth) \
1197     (SvAMAGIC(left)||SvAMAGIC(right)) \
1198         ? amagic_call(left, right, meth, 0) \
1199         : NULL;
1200 
1201 #define SORT_NORMAL_RETURN_VALUE(val)  (((val) > 0) ? 1 : ((val) ? -1 : 0))
1202 
1203 PERL_STATIC_FORCE_INLINE I32
S_amagic_ncmp(pTHX_ SV * const a,SV * const b)1204 S_amagic_ncmp(pTHX_ SV *const a, SV *const b)
1205 {
1206     SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1207 
1208     PERL_ARGS_ASSERT_AMAGIC_NCMP;
1209 
1210     if (tmpsv) {
1211         if (SvIOK(tmpsv)) {
1212             const I32 i = SvIVX(tmpsv);
1213             return SORT_NORMAL_RETURN_VALUE(i);
1214         }
1215         else {
1216             const NV d = SvNV(tmpsv);
1217             return SORT_NORMAL_RETURN_VALUE(d);
1218         }
1219      }
1220      return S_sv_ncmp(aTHX_ a, b);
1221 }
1222 
1223 PERL_STATIC_FORCE_INLINE I32
S_amagic_ncmp_desc(pTHX_ SV * const a,SV * const b)1224 S_amagic_ncmp_desc(pTHX_ SV *const a, SV *const b)
1225 {
1226     PERL_ARGS_ASSERT_AMAGIC_NCMP_DESC;
1227 
1228     return -S_amagic_ncmp(aTHX_ a, b);
1229 }
1230 
1231 PERL_STATIC_FORCE_INLINE I32
S_amagic_i_ncmp(pTHX_ SV * const a,SV * const b)1232 S_amagic_i_ncmp(pTHX_ SV *const a, SV *const b)
1233 {
1234     SV * const tmpsv = tryCALL_AMAGICbin(a,b,ncmp_amg);
1235 
1236     PERL_ARGS_ASSERT_AMAGIC_I_NCMP;
1237 
1238     if (tmpsv) {
1239         if (SvIOK(tmpsv)) {
1240             const I32 i = SvIVX(tmpsv);
1241             return SORT_NORMAL_RETURN_VALUE(i);
1242         }
1243         else {
1244             const NV d = SvNV(tmpsv);
1245             return SORT_NORMAL_RETURN_VALUE(d);
1246         }
1247     }
1248     return S_sv_i_ncmp(aTHX_ a, b);
1249 }
1250 
1251 PERL_STATIC_FORCE_INLINE I32
S_amagic_i_ncmp_desc(pTHX_ SV * const a,SV * const b)1252 S_amagic_i_ncmp_desc(pTHX_ SV *const a, SV *const b)
1253 {
1254     PERL_ARGS_ASSERT_AMAGIC_I_NCMP_DESC;
1255 
1256     return -S_amagic_i_ncmp(aTHX_ a, b);
1257 }
1258 
1259 PERL_STATIC_FORCE_INLINE I32
S_amagic_cmp(pTHX_ SV * const str1,SV * const str2)1260 S_amagic_cmp(pTHX_ SV *const str1, SV *const str2)
1261 {
1262     SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1263 
1264     PERL_ARGS_ASSERT_AMAGIC_CMP;
1265 
1266     if (tmpsv) {
1267         if (SvIOK(tmpsv)) {
1268             const I32 i = SvIVX(tmpsv);
1269             return SORT_NORMAL_RETURN_VALUE(i);
1270         }
1271         else {
1272             const NV d = SvNV(tmpsv);
1273             return SORT_NORMAL_RETURN_VALUE(d);
1274         }
1275     }
1276     return sv_cmp(str1, str2);
1277 }
1278 
1279 PERL_STATIC_FORCE_INLINE I32
S_amagic_cmp_desc(pTHX_ SV * const str1,SV * const str2)1280 S_amagic_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1281 {
1282     PERL_ARGS_ASSERT_AMAGIC_CMP_DESC;
1283 
1284     return -S_amagic_cmp(aTHX_ str1, str2);
1285 }
1286 
1287 PERL_STATIC_FORCE_INLINE I32
S_cmp_desc(pTHX_ SV * const str1,SV * const str2)1288 S_cmp_desc(pTHX_ SV *const str1, SV *const str2)
1289 {
1290     PERL_ARGS_ASSERT_CMP_DESC;
1291 
1292     return -sv_cmp(str1, str2);
1293 }
1294 
1295 #ifdef USE_LOCALE_COLLATE
1296 
1297 PERL_STATIC_FORCE_INLINE I32
S_amagic_cmp_locale(pTHX_ SV * const str1,SV * const str2)1298 S_amagic_cmp_locale(pTHX_ SV *const str1, SV *const str2)
1299 {
1300     SV * const tmpsv = tryCALL_AMAGICbin(str1,str2,scmp_amg);
1301 
1302     PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE;
1303 
1304     if (tmpsv) {
1305         if (SvIOK(tmpsv)) {
1306             const I32 i = SvIVX(tmpsv);
1307             return SORT_NORMAL_RETURN_VALUE(i);
1308         }
1309         else {
1310             const NV d = SvNV(tmpsv);
1311             return SORT_NORMAL_RETURN_VALUE(d);
1312         }
1313     }
1314     return sv_cmp_locale(str1, str2);
1315 }
1316 
1317 PERL_STATIC_FORCE_INLINE I32
S_amagic_cmp_locale_desc(pTHX_ SV * const str1,SV * const str2)1318 S_amagic_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1319 {
1320     PERL_ARGS_ASSERT_AMAGIC_CMP_LOCALE_DESC;
1321 
1322     return -S_amagic_cmp_locale(aTHX_ str1, str2);
1323 }
1324 
1325 PERL_STATIC_FORCE_INLINE I32
S_cmp_locale_desc(pTHX_ SV * const str1,SV * const str2)1326 S_cmp_locale_desc(pTHX_ SV *const str1, SV *const str2)
1327 {
1328     PERL_ARGS_ASSERT_CMP_LOCALE_DESC;
1329 
1330     return -sv_cmp_locale(str1, str2);
1331 }
1332 
1333 #endif
1334 
1335 /*
1336  * ex: set ts=8 sts=4 sw=4 et:
1337  */
1338