1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
12#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
13
14#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
15#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
16
17#if defined(_AIX)
18  .toc
19#else
20  .text
21#endif
22
23#if !defined(__USING_SJLJ_EXCEPTIONS__)
24
25#if defined(__i386__)
26DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
27#
28# extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
29#
30# On entry:
31#  +                       +
32#  +-----------------------+
33#  + thread_state pointer  +
34#  +-----------------------+
35#  + return address        +
36#  +-----------------------+   <-- SP
37#  +                       +
38
39  _LIBUNWIND_CET_ENDBR
40  movl   4(%esp), %eax
41  # set up eax and ret on new stack location
42  movl  28(%eax), %edx # edx holds new stack pointer
43  subl  $8,%edx
44  movl  %edx, 28(%eax)
45  movl  0(%eax), %ebx
46  movl  %ebx, 0(%edx)
47  movl  40(%eax), %ebx
48  movl  %ebx, 4(%edx)
49  # we now have ret and eax pushed onto where new stack will be
50  # restore all registers
51  movl   4(%eax), %ebx
52  movl   8(%eax), %ecx
53  movl  12(%eax), %edx
54  movl  16(%eax), %edi
55  movl  20(%eax), %esi
56  movl  24(%eax), %ebp
57  movl  28(%eax), %esp
58  # skip ss
59  # skip eflags
60  pop    %eax  # eax was already pushed on new stack
61  pop    %ecx
62  jmp    *%ecx
63  # skip cs
64  # skip ds
65  # skip es
66  # skip fs
67  # skip gs
68
69#elif defined(__x86_64__)
70
71DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
72#
73# extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
74#
75#if defined(_WIN64)
76# On entry, thread_state pointer is in rcx; move it into rdi
77# to share restore code below. Since this routine restores and
78# overwrites all registers, we can use the same registers for
79# pointers and temporaries as on unix even though win64 normally
80# mustn't clobber some of them.
81  movq  %rcx, %rdi
82#else
83# On entry, thread_state pointer is in rdi
84#endif
85
86  _LIBUNWIND_CET_ENDBR
87  movq  56(%rdi), %rax # rax holds new stack pointer
88  subq  $16, %rax
89  movq  %rax, 56(%rdi)
90  movq  32(%rdi), %rbx  # store new rdi on new stack
91  movq  %rbx, 0(%rax)
92  movq  128(%rdi), %rbx # store new rip on new stack
93  movq  %rbx, 8(%rax)
94  # restore all registers
95  movq    0(%rdi), %rax
96  movq    8(%rdi), %rbx
97  movq   16(%rdi), %rcx
98  movq   24(%rdi), %rdx
99  # restore rdi later
100  movq   40(%rdi), %rsi
101  movq   48(%rdi), %rbp
102  # restore rsp later
103  movq   64(%rdi), %r8
104  movq   72(%rdi), %r9
105  movq   80(%rdi), %r10
106  movq   88(%rdi), %r11
107  movq   96(%rdi), %r12
108  movq  104(%rdi), %r13
109  movq  112(%rdi), %r14
110  movq  120(%rdi), %r15
111  # skip rflags
112  # skip cs
113  # skip fs
114  # skip gs
115
116#if defined(_WIN64)
117  movdqu 176(%rdi),%xmm0
118  movdqu 192(%rdi),%xmm1
119  movdqu 208(%rdi),%xmm2
120  movdqu 224(%rdi),%xmm3
121  movdqu 240(%rdi),%xmm4
122  movdqu 256(%rdi),%xmm5
123  movdqu 272(%rdi),%xmm6
124  movdqu 288(%rdi),%xmm7
125  movdqu 304(%rdi),%xmm8
126  movdqu 320(%rdi),%xmm9
127  movdqu 336(%rdi),%xmm10
128  movdqu 352(%rdi),%xmm11
129  movdqu 368(%rdi),%xmm12
130  movdqu 384(%rdi),%xmm13
131  movdqu 400(%rdi),%xmm14
132  movdqu 416(%rdi),%xmm15
133#endif
134  movq  56(%rdi), %rsp  # cut back rsp to new location
135  pop    %rdi      # rdi was saved here earlier
136  pop    %rcx
137  jmpq   *%rcx
138
139
140#elif defined(__powerpc64__)
141
142DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
143//
144// void libunwind::Registers_ppc64::jumpto()
145//
146// On entry:
147//  thread_state pointer is in r3
148//
149
150// load register (GPR)
151#define PPC64_LR(n) \
152  ld    n, (8 * (n + 2))(3)
153
154  // restore integral registers
155  // skip r0 for now
156  // skip r1 for now
157  PPC64_LR(2)
158  // skip r3 for now
159  // skip r4 for now
160  // skip r5 for now
161  PPC64_LR(6)
162  PPC64_LR(7)
163  PPC64_LR(8)
164  PPC64_LR(9)
165  PPC64_LR(10)
166  PPC64_LR(11)
167  PPC64_LR(12)
168  PPC64_LR(13)
169  PPC64_LR(14)
170  PPC64_LR(15)
171  PPC64_LR(16)
172  PPC64_LR(17)
173  PPC64_LR(18)
174  PPC64_LR(19)
175  PPC64_LR(20)
176  PPC64_LR(21)
177  PPC64_LR(22)
178  PPC64_LR(23)
179  PPC64_LR(24)
180  PPC64_LR(25)
181  PPC64_LR(26)
182  PPC64_LR(27)
183  PPC64_LR(28)
184  PPC64_LR(29)
185  PPC64_LR(30)
186  PPC64_LR(31)
187
188#if defined(__VSX__)
189
190  // restore VS registers
191  // (note that this also restores floating point registers and V registers,
192  // because part of VS is mapped to these registers)
193
194  addi  4, 3, PPC64_OFFS_FP
195
196// load VS register
197#ifdef __LITTLE_ENDIAN__
198// For little-endian targets, we need a swap since lxvd2x will load the register
199// in the incorrect doubleword order.
200// FIXME: when supporting targets older than Power9 on LE is no longer required,
201//        this can be changed to simply `lxv n, (16 * n)(4)`.
202#define PPC64_LVS(n)         \
203  lxvd2x  n, 0, 4           ;\
204  xxswapd n, n              ;\
205  addi    4, 4, 16
206#else
207#define PPC64_LVS(n)         \
208  lxvd2x  n, 0, 4           ;\
209  addi    4, 4, 16
210#endif
211
212  // restore the first 32 VS regs (and also all floating point regs)
213  PPC64_LVS(0)
214  PPC64_LVS(1)
215  PPC64_LVS(2)
216  PPC64_LVS(3)
217  PPC64_LVS(4)
218  PPC64_LVS(5)
219  PPC64_LVS(6)
220  PPC64_LVS(7)
221  PPC64_LVS(8)
222  PPC64_LVS(9)
223  PPC64_LVS(10)
224  PPC64_LVS(11)
225  PPC64_LVS(12)
226  PPC64_LVS(13)
227  PPC64_LVS(14)
228  PPC64_LVS(15)
229  PPC64_LVS(16)
230  PPC64_LVS(17)
231  PPC64_LVS(18)
232  PPC64_LVS(19)
233  PPC64_LVS(20)
234  PPC64_LVS(21)
235  PPC64_LVS(22)
236  PPC64_LVS(23)
237  PPC64_LVS(24)
238  PPC64_LVS(25)
239  PPC64_LVS(26)
240  PPC64_LVS(27)
241  PPC64_LVS(28)
242  PPC64_LVS(29)
243  PPC64_LVS(30)
244  PPC64_LVS(31)
245
246#ifdef __LITTLE_ENDIAN__
247#define PPC64_CLVS_RESTORE(n)                    \
248  addi   4, 3, PPC64_OFFS_FP + n * 16           ;\
249  lxvd2x n, 0, 4                                ;\
250  xxswapd n, n
251#else
252#define PPC64_CLVS_RESTORE(n)                    \
253  addi   4, 3, PPC64_OFFS_FP + n * 16           ;\
254  lxvd2x n, 0, 4
255#endif
256
257#if !defined(_AIX)
258  // use VRSAVE to conditionally restore the remaining VS regs, that are
259  // where the V regs are mapped. In the AIX ABI, VRSAVE is not used.
260  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
261  cmpwi 5, 0
262  beq   Lnovec
263
264// conditionally load VS
265#define PPC64_CLVSl(n)                           \
266  andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n))         ;\
267  beq    Ldone##n                               ;\
268  PPC64_CLVS_RESTORE(n)                         ;\
269Ldone##n:
270
271#define PPC64_CLVSh(n)                           \
272  andi.  0, 5, (1 PPC_LEFT_SHIFT(63-n))         ;\
273  beq    Ldone##n                               ;\
274  PPC64_CLVS_RESTORE(n)                         ;\
275Ldone##n:
276
277#else
278
279#define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n)
280#define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n)
281
282#endif // !defined(_AIX)
283
284  PPC64_CLVSl(32)
285  PPC64_CLVSl(33)
286  PPC64_CLVSl(34)
287  PPC64_CLVSl(35)
288  PPC64_CLVSl(36)
289  PPC64_CLVSl(37)
290  PPC64_CLVSl(38)
291  PPC64_CLVSl(39)
292  PPC64_CLVSl(40)
293  PPC64_CLVSl(41)
294  PPC64_CLVSl(42)
295  PPC64_CLVSl(43)
296  PPC64_CLVSl(44)
297  PPC64_CLVSl(45)
298  PPC64_CLVSl(46)
299  PPC64_CLVSl(47)
300  PPC64_CLVSh(48)
301  PPC64_CLVSh(49)
302  PPC64_CLVSh(50)
303  PPC64_CLVSh(51)
304  PPC64_CLVSh(52)
305  PPC64_CLVSh(53)
306  PPC64_CLVSh(54)
307  PPC64_CLVSh(55)
308  PPC64_CLVSh(56)
309  PPC64_CLVSh(57)
310  PPC64_CLVSh(58)
311  PPC64_CLVSh(59)
312  PPC64_CLVSh(60)
313  PPC64_CLVSh(61)
314  PPC64_CLVSh(62)
315  PPC64_CLVSh(63)
316
317#else
318
319// load FP register
320#define PPC64_LF(n) \
321  lfd   n, (PPC64_OFFS_FP + n * 16)(3)
322
323  // restore float registers
324  PPC64_LF(0)
325  PPC64_LF(1)
326  PPC64_LF(2)
327  PPC64_LF(3)
328  PPC64_LF(4)
329  PPC64_LF(5)
330  PPC64_LF(6)
331  PPC64_LF(7)
332  PPC64_LF(8)
333  PPC64_LF(9)
334  PPC64_LF(10)
335  PPC64_LF(11)
336  PPC64_LF(12)
337  PPC64_LF(13)
338  PPC64_LF(14)
339  PPC64_LF(15)
340  PPC64_LF(16)
341  PPC64_LF(17)
342  PPC64_LF(18)
343  PPC64_LF(19)
344  PPC64_LF(20)
345  PPC64_LF(21)
346  PPC64_LF(22)
347  PPC64_LF(23)
348  PPC64_LF(24)
349  PPC64_LF(25)
350  PPC64_LF(26)
351  PPC64_LF(27)
352  PPC64_LF(28)
353  PPC64_LF(29)
354  PPC64_LF(30)
355  PPC64_LF(31)
356
357#if defined(__ALTIVEC__)
358
359#define PPC64_CLV_UNALIGNED_RESTORE(n)       \
360  ld     0, (PPC64_OFFS_V + n * 16)(3)      ;\
361  std    0, 0(4)                            ;\
362  ld     0, (PPC64_OFFS_V + n * 16 + 8)(3)  ;\
363  std    0, 8(4)                            ;\
364  lvx    n, 0, 4
365
366#if !defined(_AIX)
367  // restore vector registers if any are in use. In the AIX ABI, VRSAVE is
368  // not used.
369  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
370  cmpwi 5, 0
371  beq   Lnovec
372
373#define PPC64_CLV_UNALIGNEDl(n)              \
374  andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n))     ;\
375  beq    Ldone##n                           ;\
376  PPC64_CLV_UNALIGNED_RESTORE(n)            ;\
377Ldone  ## n:
378
379#define PPC64_CLV_UNALIGNEDh(n)              \
380  andi.  0, 5, (1 PPC_LEFT_SHIFT(31-n))     ;\
381  beq    Ldone##n                           ;\
382  PPC64_CLV_UNALIGNED_RESTORE(n)            ;\
383Ldone  ## n:
384
385#else
386
387#define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n)
388#define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n)
389
390#endif // !defined(_AIX)
391
392  subi  4, 1, 16
393  // r4 is now a 16-byte aligned pointer into the red zone
394  // the _vectorScalarRegisters may not be 16-byte aligned
395  // so copy via red zone temp buffer
396
397  PPC64_CLV_UNALIGNEDl(0)
398  PPC64_CLV_UNALIGNEDl(1)
399  PPC64_CLV_UNALIGNEDl(2)
400  PPC64_CLV_UNALIGNEDl(3)
401  PPC64_CLV_UNALIGNEDl(4)
402  PPC64_CLV_UNALIGNEDl(5)
403  PPC64_CLV_UNALIGNEDl(6)
404  PPC64_CLV_UNALIGNEDl(7)
405  PPC64_CLV_UNALIGNEDl(8)
406  PPC64_CLV_UNALIGNEDl(9)
407  PPC64_CLV_UNALIGNEDl(10)
408  PPC64_CLV_UNALIGNEDl(11)
409  PPC64_CLV_UNALIGNEDl(12)
410  PPC64_CLV_UNALIGNEDl(13)
411  PPC64_CLV_UNALIGNEDl(14)
412  PPC64_CLV_UNALIGNEDl(15)
413  PPC64_CLV_UNALIGNEDh(16)
414  PPC64_CLV_UNALIGNEDh(17)
415  PPC64_CLV_UNALIGNEDh(18)
416  PPC64_CLV_UNALIGNEDh(19)
417  PPC64_CLV_UNALIGNEDh(20)
418  PPC64_CLV_UNALIGNEDh(21)
419  PPC64_CLV_UNALIGNEDh(22)
420  PPC64_CLV_UNALIGNEDh(23)
421  PPC64_CLV_UNALIGNEDh(24)
422  PPC64_CLV_UNALIGNEDh(25)
423  PPC64_CLV_UNALIGNEDh(26)
424  PPC64_CLV_UNALIGNEDh(27)
425  PPC64_CLV_UNALIGNEDh(28)
426  PPC64_CLV_UNALIGNEDh(29)
427  PPC64_CLV_UNALIGNEDh(30)
428  PPC64_CLV_UNALIGNEDh(31)
429
430#endif
431#endif
432
433Lnovec:
434  ld    0, PPC64_OFFS_CR(3)
435  mtcr  0
436  ld    0, PPC64_OFFS_SRR0(3)
437  mtctr 0
438
439#if defined(_AIX)
440  // After setting GPR1 to a higher address, AIX wipes out the original
441  // stack space below that address invalidated by the new GPR1 value. Use
442  // GPR0 to save the value of GPR3 in the context before it is wiped out.
443  // This compromises the content of GPR0 which is a volatile register.
444  ld 0, (8 * (3 + 2))(3)
445#else
446  PPC64_LR(0)
447#endif
448  PPC64_LR(5)
449  PPC64_LR(4)
450  PPC64_LR(1)
451#if defined(_AIX)
452  mr 3, 0
453#else
454  PPC64_LR(3)
455#endif
456  bctr
457
458#elif defined(__powerpc__)
459
460DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
461//
462// void libunwind::Registers_ppc::jumpto()
463//
464// On entry:
465//  thread_state pointer is in r3
466//
467
468  // restore integral registers
469  // skip r0 for now
470  // skip r1 for now
471  lwz     2,  16(3)
472  // skip r3 for now
473  // skip r4 for now
474  // skip r5 for now
475  lwz     6,  32(3)
476  lwz     7,  36(3)
477  lwz     8,  40(3)
478  lwz     9,  44(3)
479  lwz     10, 48(3)
480  lwz     11, 52(3)
481  lwz     12, 56(3)
482  lwz     13, 60(3)
483  lwz     14, 64(3)
484  lwz     15, 68(3)
485  lwz     16, 72(3)
486  lwz     17, 76(3)
487  lwz     18, 80(3)
488  lwz     19, 84(3)
489  lwz     20, 88(3)
490  lwz     21, 92(3)
491  lwz     22, 96(3)
492  lwz     23,100(3)
493  lwz     24,104(3)
494  lwz     25,108(3)
495  lwz     26,112(3)
496  lwz     27,116(3)
497  lwz     28,120(3)
498  lwz     29,124(3)
499  lwz     30,128(3)
500  lwz     31,132(3)
501
502#ifndef __NO_FPRS__
503  // restore float registers
504  lfd     0, 160(3)
505  lfd     1, 168(3)
506  lfd     2, 176(3)
507  lfd     3, 184(3)
508  lfd     4, 192(3)
509  lfd     5, 200(3)
510  lfd     6, 208(3)
511  lfd     7, 216(3)
512  lfd     8, 224(3)
513  lfd     9, 232(3)
514  lfd     10,240(3)
515  lfd     11,248(3)
516  lfd     12,256(3)
517  lfd     13,264(3)
518  lfd     14,272(3)
519  lfd     15,280(3)
520  lfd     16,288(3)
521  lfd     17,296(3)
522  lfd     18,304(3)
523  lfd     19,312(3)
524  lfd     20,320(3)
525  lfd     21,328(3)
526  lfd     22,336(3)
527  lfd     23,344(3)
528  lfd     24,352(3)
529  lfd     25,360(3)
530  lfd     26,368(3)
531  lfd     27,376(3)
532  lfd     28,384(3)
533  lfd     29,392(3)
534  lfd     30,400(3)
535  lfd     31,408(3)
536#endif
537
538#if defined(__ALTIVEC__)
539
540#define LOAD_VECTOR_RESTORE(_index)                 \
541  lwz     0, 424+_index*16(3)             SEPARATOR \
542  stw     0, 0(4)                         SEPARATOR \
543  lwz     0, 424+_index*16+4(3)           SEPARATOR \
544  stw     0, 4(4)                         SEPARATOR \
545  lwz     0, 424+_index*16+8(3)           SEPARATOR \
546  stw     0, 8(4)                         SEPARATOR \
547  lwz     0, 424+_index*16+12(3)          SEPARATOR \
548  stw     0, 12(4)                        SEPARATOR \
549  lvx     _index, 0, 4
550
551#if !defined(_AIX)
552  // restore vector registers if any are in use. In the AIX ABI, VRSAVE
553  // is not used.
554  lwz     5, 156(3)       // test VRsave
555  cmpwi   5, 0
556  beq     Lnovec
557
558#define LOAD_VECTOR_UNALIGNEDl(_index)                   \
559  andis.  0, 5, (1 PPC_LEFT_SHIFT(15-_index))  SEPARATOR \
560  beq     Ldone ## _index                      SEPARATOR \
561  LOAD_VECTOR_RESTORE(_index)                  SEPARATOR \
562  Ldone ## _index:
563
564#define LOAD_VECTOR_UNALIGNEDh(_index)                   \
565  andi.   0, 5, (1 PPC_LEFT_SHIFT(31-_index))  SEPARATOR \
566  beq     Ldone ## _index                      SEPARATOR \
567  LOAD_VECTOR_RESTORE(_index)                  SEPARATOR \
568  Ldone ## _index:
569
570#else
571
572#define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index)
573#define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index)
574
575#endif // !defined(_AIX)
576
577  subi    4, 1, 16
578  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
579  // r4 is now a 16-byte aligned pointer into the red zone
580  // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
581
582  LOAD_VECTOR_UNALIGNEDl(0)
583  LOAD_VECTOR_UNALIGNEDl(1)
584  LOAD_VECTOR_UNALIGNEDl(2)
585  LOAD_VECTOR_UNALIGNEDl(3)
586  LOAD_VECTOR_UNALIGNEDl(4)
587  LOAD_VECTOR_UNALIGNEDl(5)
588  LOAD_VECTOR_UNALIGNEDl(6)
589  LOAD_VECTOR_UNALIGNEDl(7)
590  LOAD_VECTOR_UNALIGNEDl(8)
591  LOAD_VECTOR_UNALIGNEDl(9)
592  LOAD_VECTOR_UNALIGNEDl(10)
593  LOAD_VECTOR_UNALIGNEDl(11)
594  LOAD_VECTOR_UNALIGNEDl(12)
595  LOAD_VECTOR_UNALIGNEDl(13)
596  LOAD_VECTOR_UNALIGNEDl(14)
597  LOAD_VECTOR_UNALIGNEDl(15)
598  LOAD_VECTOR_UNALIGNEDh(16)
599  LOAD_VECTOR_UNALIGNEDh(17)
600  LOAD_VECTOR_UNALIGNEDh(18)
601  LOAD_VECTOR_UNALIGNEDh(19)
602  LOAD_VECTOR_UNALIGNEDh(20)
603  LOAD_VECTOR_UNALIGNEDh(21)
604  LOAD_VECTOR_UNALIGNEDh(22)
605  LOAD_VECTOR_UNALIGNEDh(23)
606  LOAD_VECTOR_UNALIGNEDh(24)
607  LOAD_VECTOR_UNALIGNEDh(25)
608  LOAD_VECTOR_UNALIGNEDh(26)
609  LOAD_VECTOR_UNALIGNEDh(27)
610  LOAD_VECTOR_UNALIGNEDh(28)
611  LOAD_VECTOR_UNALIGNEDh(29)
612  LOAD_VECTOR_UNALIGNEDh(30)
613  LOAD_VECTOR_UNALIGNEDh(31)
614#endif
615
616Lnovec:
617  lwz     0, 136(3)   // __cr
618  mtcr    0
619  lwz     0, 148(3)   // __ctr
620  mtctr   0
621  lwz     0,   0(3)   // __ssr0
622  mtctr   0
623  lwz     0,   8(3)   // do r0 now
624  lwz     5,  28(3)   // do r5 now
625  lwz     4,  24(3)   // do r4 now
626  lwz     1,  12(3)   // do sp now
627  lwz     3,  20(3)   // do r3 last
628  bctr
629
630#elif defined(__aarch64__)
631
632//
633// extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
634//
635// On entry:
636//  thread_state pointer is in x0
637//
638  .p2align 2
639DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
640  // skip restore of x0,x1 for now
641  ldp    x2, x3,  [x0, #0x010]
642  ldp    x4, x5,  [x0, #0x020]
643  ldp    x6, x7,  [x0, #0x030]
644  ldp    x8, x9,  [x0, #0x040]
645  ldp    x10,x11, [x0, #0x050]
646  ldp    x12,x13, [x0, #0x060]
647  ldp    x14,x15, [x0, #0x070]
648  // x16 and x17 were clobbered by the call into the unwinder, so no point in
649  // restoring them.
650  ldp    x18,x19, [x0, #0x090]
651  ldp    x20,x21, [x0, #0x0A0]
652  ldp    x22,x23, [x0, #0x0B0]
653  ldp    x24,x25, [x0, #0x0C0]
654  ldp    x26,x27, [x0, #0x0D0]
655  ldp    x28,x29, [x0, #0x0E0]
656  ldr    x30,     [x0, #0x100]  // restore pc into lr
657
658  ldp    d0, d1,  [x0, #0x110]
659  ldp    d2, d3,  [x0, #0x120]
660  ldp    d4, d5,  [x0, #0x130]
661  ldp    d6, d7,  [x0, #0x140]
662  ldp    d8, d9,  [x0, #0x150]
663  ldp    d10,d11, [x0, #0x160]
664  ldp    d12,d13, [x0, #0x170]
665  ldp    d14,d15, [x0, #0x180]
666  ldp    d16,d17, [x0, #0x190]
667  ldp    d18,d19, [x0, #0x1A0]
668  ldp    d20,d21, [x0, #0x1B0]
669  ldp    d22,d23, [x0, #0x1C0]
670  ldp    d24,d25, [x0, #0x1D0]
671  ldp    d26,d27, [x0, #0x1E0]
672  ldp    d28,d29, [x0, #0x1F0]
673  ldr    d30,     [x0, #0x200]
674  ldr    d31,     [x0, #0x208]
675
676  // Finally, restore sp. This must be done after the the last read from the
677  // context struct, because it is allocated on the stack, and an exception
678  // could clobber the de-allocated portion of the stack after sp has been
679  // restored.
680  ldr    x16,     [x0, #0x0F8]
681  ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
682  mov    sp,x16                 // restore sp
683  ret    x30                    // jump to pc
684
685#elif defined(__arm__) && !defined(__APPLE__)
686
687#if !defined(__ARM_ARCH_ISA_ARM)
688#if (__ARM_ARCH_ISA_THUMB == 2)
689  .syntax unified
690#endif
691  .thumb
692#endif
693
694@
695@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
696@
697@ On entry:
698@  thread_state pointer is in r0
699@
700  .p2align 2
701DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
702#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
703  @ r8-r11: ldm into r1-r4, then mov to r8-r11
704  adds r0, #0x20
705  ldm r0!, {r1-r4}
706  subs r0, #0x30
707  mov r8, r1
708  mov r9, r2
709  mov r10, r3
710  mov r11, r4
711  @ r12 does not need loading, it it the intra-procedure-call scratch register
712  ldr r2, [r0, #0x34]
713  ldr r3, [r0, #0x3c]
714  mov sp, r2
715  mov lr, r3         @ restore pc into lr
716  ldm r0, {r0-r7}
717#else
718  @ Use lr as base so that r0 can be restored.
719  mov lr, r0
720  @ 32bit thumb-2 restrictions for ldm:
721  @ . the sp (r13) cannot be in the list
722  @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
723  ldm lr, {r0-r12}
724  ldr sp, [lr, #52]
725  ldr lr, [lr, #60]  @ restore pc into lr
726#endif
727#if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM)
728  // 'bx' is not BTI setting when used with lr, therefore r12 is used instead
729  mov r12, lr
730  JMP(r12)
731#else
732  JMP(lr)
733#endif
734
735@
736@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
737@
738@ On entry:
739@  values pointer is in r0
740@
741  .p2align 2
742#if defined(__ELF__)
743  .fpu vfpv3-d16
744#endif
745DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
746  @ VFP and iwMMX instructions are only available when compiling with the flags
747  @ that enable them. We do not want to do that in the library (because we do not
748  @ want the compiler to generate instructions that access those) but this is
749  @ only accessed if the personality routine needs these registers. Use of
750  @ these registers implies they are, actually, available on the target, so
751  @ it's ok to execute.
752  @ So, generate the instruction using the corresponding coprocessor mnemonic.
753  vldmia r0, {d0-d15}
754  JMP(lr)
755
756@
757@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
758@
759@ On entry:
760@  values pointer is in r0
761@
762  .p2align 2
763#if defined(__ELF__)
764  .fpu vfpv3-d16
765#endif
766DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
767  vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
768  JMP(lr)
769
770@
771@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
772@
773@ On entry:
774@  values pointer is in r0
775@
776  .p2align 2
777#if defined(__ELF__)
778  .fpu vfpv3
779#endif
780DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
781  vldmia r0, {d16-d31}
782  JMP(lr)
783
784#if defined(__ARM_WMMX)
785
786@
787@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
788@
789@ On entry:
790@  values pointer is in r0
791@
792  .p2align 2
793#if defined(__ELF__)
794  .arch armv5te
795#endif
796DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
797  ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
798  ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
799  ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
800  ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
801  ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
802  ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
803  ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
804  ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
805  ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
806  ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
807  ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
808  ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
809  ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
810  ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
811  ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
812  ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
813  JMP(lr)
814
815@
816@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
817@
818@ On entry:
819@  values pointer is in r0
820@
821  .p2align 2
822#if defined(__ELF__)
823  .arch armv5te
824#endif
825DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
826  ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
827  ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
828  ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
829  ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
830  JMP(lr)
831
832#endif
833
834#elif defined(__or1k__)
835
836DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
837#
838# void libunwind::Registers_or1k::jumpto()
839#
840# On entry:
841#  thread_state pointer is in r3
842#
843
844  # restore integral registers
845  l.lwz     r0,  0(r3)
846  l.lwz     r1,  4(r3)
847  l.lwz     r2,  8(r3)
848  # skip r3 for now
849  l.lwz     r4, 16(r3)
850  l.lwz     r5, 20(r3)
851  l.lwz     r6, 24(r3)
852  l.lwz     r7, 28(r3)
853  l.lwz     r8, 32(r3)
854  # skip r9
855  l.lwz    r10, 40(r3)
856  l.lwz    r11, 44(r3)
857  l.lwz    r12, 48(r3)
858  l.lwz    r13, 52(r3)
859  l.lwz    r14, 56(r3)
860  l.lwz    r15, 60(r3)
861  l.lwz    r16, 64(r3)
862  l.lwz    r17, 68(r3)
863  l.lwz    r18, 72(r3)
864  l.lwz    r19, 76(r3)
865  l.lwz    r20, 80(r3)
866  l.lwz    r21, 84(r3)
867  l.lwz    r22, 88(r3)
868  l.lwz    r23, 92(r3)
869  l.lwz    r24, 96(r3)
870  l.lwz    r25,100(r3)
871  l.lwz    r26,104(r3)
872  l.lwz    r27,108(r3)
873  l.lwz    r28,112(r3)
874  l.lwz    r29,116(r3)
875  l.lwz    r30,120(r3)
876  l.lwz    r31,124(r3)
877
878  # load new pc into ra
879  l.lwz    r9, 128(r3)
880
881  # at last, restore r3
882  l.lwz    r3,  12(r3)
883
884  # jump to pc
885  l.jr     r9
886   l.nop
887
888#elif defined(__hexagon__)
889# On entry:
890#  thread_state pointer is in r2
891DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
892#
893# void libunwind::Registers_hexagon::jumpto()
894#
895  r8 = memw(r0+#32)
896  r9 = memw(r0+#36)
897  r10 = memw(r0+#40)
898  r11 = memw(r0+#44)
899
900  r12 = memw(r0+#48)
901  r13 = memw(r0+#52)
902  r14 = memw(r0+#56)
903  r15 = memw(r0+#60)
904
905  r16 = memw(r0+#64)
906  r17 = memw(r0+#68)
907  r18 = memw(r0+#72)
908  r19 = memw(r0+#76)
909
910  r20 = memw(r0+#80)
911  r21 = memw(r0+#84)
912  r22 = memw(r0+#88)
913  r23 = memw(r0+#92)
914
915  r24 = memw(r0+#96)
916  r25 = memw(r0+#100)
917  r26 = memw(r0+#104)
918  r27 = memw(r0+#108)
919
920  r28 = memw(r0+#112)
921  r29 = memw(r0+#116)
922  r30 = memw(r0+#120)
923  r31 = memw(r0+#132)
924
925  r1 = memw(r0+#128)
926  c4 = r1   // Predicate register
927  r1 = memw(r0+#4)
928  r0 = memw(r0)
929  jumpr r31
930#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
931
932//
933// void libunwind::Registers_mips_o32::jumpto()
934//
935// On entry:
936//  thread state pointer is in a0 ($4)
937//
938DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
939  .set push
940  .set noat
941  .set noreorder
942  .set nomacro
943#ifdef __mips_hard_float
944#if __mips_fpr != 64
945  ldc1  $f0, (4 * 36 + 8 * 0)($4)
946  ldc1  $f2, (4 * 36 + 8 * 2)($4)
947  ldc1  $f4, (4 * 36 + 8 * 4)($4)
948  ldc1  $f6, (4 * 36 + 8 * 6)($4)
949  ldc1  $f8, (4 * 36 + 8 * 8)($4)
950  ldc1  $f10, (4 * 36 + 8 * 10)($4)
951  ldc1  $f12, (4 * 36 + 8 * 12)($4)
952  ldc1  $f14, (4 * 36 + 8 * 14)($4)
953  ldc1  $f16, (4 * 36 + 8 * 16)($4)
954  ldc1  $f18, (4 * 36 + 8 * 18)($4)
955  ldc1  $f20, (4 * 36 + 8 * 20)($4)
956  ldc1  $f22, (4 * 36 + 8 * 22)($4)
957  ldc1  $f24, (4 * 36 + 8 * 24)($4)
958  ldc1  $f26, (4 * 36 + 8 * 26)($4)
959  ldc1  $f28, (4 * 36 + 8 * 28)($4)
960  ldc1  $f30, (4 * 36 + 8 * 30)($4)
961#else
962  ldc1  $f0, (4 * 36 + 8 * 0)($4)
963  ldc1  $f1, (4 * 36 + 8 * 1)($4)
964  ldc1  $f2, (4 * 36 + 8 * 2)($4)
965  ldc1  $f3, (4 * 36 + 8 * 3)($4)
966  ldc1  $f4, (4 * 36 + 8 * 4)($4)
967  ldc1  $f5, (4 * 36 + 8 * 5)($4)
968  ldc1  $f6, (4 * 36 + 8 * 6)($4)
969  ldc1  $f7, (4 * 36 + 8 * 7)($4)
970  ldc1  $f8, (4 * 36 + 8 * 8)($4)
971  ldc1  $f9, (4 * 36 + 8 * 9)($4)
972  ldc1  $f10, (4 * 36 + 8 * 10)($4)
973  ldc1  $f11, (4 * 36 + 8 * 11)($4)
974  ldc1  $f12, (4 * 36 + 8 * 12)($4)
975  ldc1  $f13, (4 * 36 + 8 * 13)($4)
976  ldc1  $f14, (4 * 36 + 8 * 14)($4)
977  ldc1  $f15, (4 * 36 + 8 * 15)($4)
978  ldc1  $f16, (4 * 36 + 8 * 16)($4)
979  ldc1  $f17, (4 * 36 + 8 * 17)($4)
980  ldc1  $f18, (4 * 36 + 8 * 18)($4)
981  ldc1  $f19, (4 * 36 + 8 * 19)($4)
982  ldc1  $f20, (4 * 36 + 8 * 20)($4)
983  ldc1  $f21, (4 * 36 + 8 * 21)($4)
984  ldc1  $f22, (4 * 36 + 8 * 22)($4)
985  ldc1  $f23, (4 * 36 + 8 * 23)($4)
986  ldc1  $f24, (4 * 36 + 8 * 24)($4)
987  ldc1  $f25, (4 * 36 + 8 * 25)($4)
988  ldc1  $f26, (4 * 36 + 8 * 26)($4)
989  ldc1  $f27, (4 * 36 + 8 * 27)($4)
990  ldc1  $f28, (4 * 36 + 8 * 28)($4)
991  ldc1  $f29, (4 * 36 + 8 * 29)($4)
992  ldc1  $f30, (4 * 36 + 8 * 30)($4)
993  ldc1  $f31, (4 * 36 + 8 * 31)($4)
994#endif
995#endif
996  // restore hi and lo
997  lw    $8, (4 * 33)($4)
998  mthi  $8
999  lw    $8, (4 * 34)($4)
1000  mtlo  $8
1001  // r0 is zero
1002  lw    $1, (4 * 1)($4)
1003  lw    $2, (4 * 2)($4)
1004  lw    $3, (4 * 3)($4)
1005  // skip a0 for now
1006  lw    $5, (4 * 5)($4)
1007  lw    $6, (4 * 6)($4)
1008  lw    $7, (4 * 7)($4)
1009  lw    $8, (4 * 8)($4)
1010  lw    $9, (4 * 9)($4)
1011  lw    $10, (4 * 10)($4)
1012  lw    $11, (4 * 11)($4)
1013  lw    $12, (4 * 12)($4)
1014  lw    $13, (4 * 13)($4)
1015  lw    $14, (4 * 14)($4)
1016  lw    $15, (4 * 15)($4)
1017  lw    $16, (4 * 16)($4)
1018  lw    $17, (4 * 17)($4)
1019  lw    $18, (4 * 18)($4)
1020  lw    $19, (4 * 19)($4)
1021  lw    $20, (4 * 20)($4)
1022  lw    $21, (4 * 21)($4)
1023  lw    $22, (4 * 22)($4)
1024  lw    $23, (4 * 23)($4)
1025  lw    $24, (4 * 24)($4)
1026  lw    $25, (4 * 25)($4)
1027  lw    $26, (4 * 26)($4)
1028  lw    $27, (4 * 27)($4)
1029  lw    $28, (4 * 28)($4)
1030  lw    $29, (4 * 29)($4)
1031  lw    $30, (4 * 30)($4)
1032  // load new pc into ra
1033  lw    $31, (4 * 32)($4)
1034  // jump to ra, load a0 in the delay slot
1035  jr    $31
1036  lw    $4, (4 * 4)($4)
1037  .set pop
1038
1039#elif defined(__mips64)
1040
1041//
1042// void libunwind::Registers_mips_newabi::jumpto()
1043//
1044// On entry:
1045//  thread state pointer is in a0 ($4)
1046//
1047DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
1048  .set push
1049  .set noat
1050  .set noreorder
1051  .set nomacro
1052#ifdef __mips_hard_float
1053  .irp i,FROM_0_TO_31
1054    ldc1 $f\i, (280+8*\i)($4)
1055  .endr
1056#endif
1057  // restore hi and lo
1058  ld    $8, (8 * 33)($4)
1059  mthi  $8
1060  ld    $8, (8 * 34)($4)
1061  mtlo  $8
1062  // r0 is zero
1063  ld    $1, (8 * 1)($4)
1064  ld    $2, (8 * 2)($4)
1065  ld    $3, (8 * 3)($4)
1066  // skip a0 for now
1067  .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
1068    ld $\i, (8 * \i)($4)
1069  .endr
1070  // load new pc into ra
1071  ld    $31, (8 * 32)($4)
1072  // jump to ra, load a0 in the delay slot
1073  jr    $31
1074  ld    $4, (8 * 4)($4)
1075  .set pop
1076
1077#elif defined(__sparc__) && defined(__arch64__)
1078
1079DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv)
1080//
1081// void libunwind::Registers_sparc64::jumpto()
1082//
1083// On entry:
1084//  thread_state pointer is in %o0
1085//
1086  .register %g2, #scratch
1087  .register %g3, #scratch
1088  .register %g6, #scratch
1089  .register %g7, #scratch
1090  flushw
1091  ldx  [%o0 + 0x08], %g1
1092  ldx  [%o0 + 0x10], %g2
1093  ldx  [%o0 + 0x18], %g3
1094  ldx  [%o0 + 0x20], %g4
1095  ldx  [%o0 + 0x28], %g5
1096  ldx  [%o0 + 0x30], %g6
1097  ldx  [%o0 + 0x38], %g7
1098  ldx  [%o0 + 0x48], %o1
1099  ldx  [%o0 + 0x50], %o2
1100  ldx  [%o0 + 0x58], %o3
1101  ldx  [%o0 + 0x60], %o4
1102  ldx  [%o0 + 0x68], %o5
1103  ldx  [%o0 + 0x70], %o6
1104  ldx  [%o0 + 0x78], %o7
1105  ldx  [%o0 + 0x80], %l0
1106  ldx  [%o0 + 0x88], %l1
1107  ldx  [%o0 + 0x90], %l2
1108  ldx  [%o0 + 0x98], %l3
1109  ldx  [%o0 + 0xa0], %l4
1110  ldx  [%o0 + 0xa8], %l5
1111  ldx  [%o0 + 0xb0], %l6
1112  ldx  [%o0 + 0xb8], %l7
1113  ldx  [%o0 + 0xc0], %i0
1114  ldx  [%o0 + 0xc8], %i1
1115  ldx  [%o0 + 0xd0], %i2
1116  ldx  [%o0 + 0xd8], %i3
1117  ldx  [%o0 + 0xe0], %i4
1118  ldx  [%o0 + 0xe8], %i5
1119  ldx  [%o0 + 0xf0], %i6
1120  ldx  [%o0 + 0xf8], %i7
1121  jmp  %o7
1122   ldx [%o0 + 0x40], %o0
1123
1124#elif defined(__sparc__)
1125
1126//
1127// void libunwind::Registers_sparc_o32::jumpto()
1128//
1129// On entry:
1130//  thread_state pointer is in o0
1131//
1132DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1133  ta 3
1134  ldd [%o0 + 64],  %l0
1135  ldd [%o0 + 72],  %l2
1136  ldd [%o0 + 80],  %l4
1137  ldd [%o0 + 88],  %l6
1138  ldd [%o0 + 96],  %i0
1139  ldd [%o0 + 104], %i2
1140  ldd [%o0 + 112], %i4
1141  ldd [%o0 + 120], %i6
1142  ld  [%o0 + 60],  %o7
1143  jmp %o7
1144   nop
1145
1146#elif defined(__riscv)
1147
1148//
1149// void libunwind::Registers_riscv::jumpto()
1150//
1151// On entry:
1152//  thread_state pointer is in a0
1153//
1154  .p2align 2
1155DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1156# if defined(__riscv_flen)
1157  .irp i,FROM_0_TO_31
1158    FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1159  .endr
1160# endif
1161
1162  // x0 is zero
1163  ILOAD    x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
1164  .irp i,2,3,4,5,6,7,8,9
1165    ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1166  .endr
1167  // skip a0 for now
1168  .irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1169    ILOAD x\i, (RISCV_ISIZE * \i)(a0)
1170  .endr
1171  ILOAD    x10, (RISCV_ISIZE * 10)(a0)   // restore a0
1172
1173  ret                       // jump to ra
1174
1175#elif defined(__s390x__)
1176
1177DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)
1178//
1179// void libunwind::Registers_s390x::jumpto()
1180//
1181// On entry:
1182//  thread_state pointer is in r2
1183//
1184
1185  // Skip PSWM, but load PSWA into r1
1186  lg %r1, 8(%r2)
1187
1188  // Restore FPRs
1189  .irp i,FROM_0_TO_15
1190    ld %f\i, (144+8*\i)(%r2)
1191  .endr
1192
1193  // Restore GPRs - skipping %r0 and %r1
1194  lmg  %r2, %r15, 32(%r2)
1195
1196  // Return to PSWA (was loaded into %r1 above)
1197  br %r1
1198
1199#elif defined(__loongarch__) && __loongarch_grlen == 64
1200
1201//
1202// void libunwind::Registers_loongarch::jumpto()
1203//
1204// On entry:
1205//  thread_state pointer is in $a0($r4)
1206//
1207  .p2align 2
1208DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)
1209# if __loongarch_frlen == 64
1210  .irp i,FROM_0_TO_31
1211    fld.d $f\i, $a0, (8 * 33 + 8 * \i)
1212  .endr
1213# endif
1214
1215  // $r0 is zero
1216  .irp i,1,2,3
1217    ld.d $r\i, $a0, (8 * \i)
1218  .endr
1219  // skip $a0 for now
1220  .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1221    ld.d $r\i, $a0, (8 * \i)
1222  .endr
1223
1224  ld.d    $ra,  $a0, (8 * 32)  // load new pc into $ra
1225  ld.d    $a0,  $a0, (8 * 4)   // restore $a0 last
1226
1227  jr      $ra
1228
1229#endif
1230
1231#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1232
1233NO_EXEC_STACK_DIRECTIVE
1234
1235