1//===-------------------- UnwindRegistersRestore.S ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11  .text
12
13#if !defined(__USING_SJLJ_EXCEPTIONS__)
14
15#if defined(__i386__)
16DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
17#
18# extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
19#
20# On entry:
21#  +                       +
22#  +-----------------------+
23#  + thread_state pointer  +
24#  +-----------------------+
25#  + return address        +
26#  +-----------------------+   <-- SP
27#  +                       +
28  movl   4(%esp), %eax
29  # set up eax and ret on new stack location
30  movl  28(%eax), %edx # edx holds new stack pointer
31  subl  $8,%edx
32  movl  %edx, 28(%eax)
33  movl  0(%eax), %ebx
34  movl  %ebx, 0(%edx)
35  movl  40(%eax), %ebx
36  movl  %ebx, 4(%edx)
37  # we now have ret and eax pushed onto where new stack will be
38  # restore all registers
39  movl   4(%eax), %ebx
40  movl   8(%eax), %ecx
41  movl  12(%eax), %edx
42  movl  16(%eax), %edi
43  movl  20(%eax), %esi
44  movl  24(%eax), %ebp
45  movl  28(%eax), %esp
46  # skip ss
47  # skip eflags
48  pop    %eax  # eax was already pushed on new stack
49  ret        # eip was already pushed on new stack
50  # skip cs
51  # skip ds
52  # skip es
53  # skip fs
54  # skip gs
55
56#elif defined(__x86_64__)
57
58DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
59#
60# extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
61#
62#if defined(_WIN64)
63# On entry, thread_state pointer is in rcx; move it into rdi
64# to share restore code below. Since this routine restores and
65# overwrites all registers, we can use the same registers for
66# pointers and temporaries as on unix even though win64 normally
67# mustn't clobber some of them.
68  movq  %rcx, %rdi
69#else
70# On entry, thread_state pointer is in rdi
71#endif
72
73  movq  56(%rdi), %rax # rax holds new stack pointer
74  subq  $16, %rax
75  movq  %rax, 56(%rdi)
76  movq  32(%rdi), %rbx  # store new rdi on new stack
77  movq  %rbx, 0(%rax)
78  movq  128(%rdi), %rbx # store new rip on new stack
79  movq  %rbx, 8(%rax)
80  # restore all registers
81  movq    0(%rdi), %rax
82  movq    8(%rdi), %rbx
83  movq   16(%rdi), %rcx
84  movq   24(%rdi), %rdx
85  # restore rdi later
86  movq   40(%rdi), %rsi
87  movq   48(%rdi), %rbp
88  # restore rsp later
89  movq   64(%rdi), %r8
90  movq   72(%rdi), %r9
91  movq   80(%rdi), %r10
92  movq   88(%rdi), %r11
93  movq   96(%rdi), %r12
94  movq  104(%rdi), %r13
95  movq  112(%rdi), %r14
96  movq  120(%rdi), %r15
97  # skip rflags
98  # skip cs
99  # skip fs
100  # skip gs
101
102#if defined(_WIN64)
103  movdqu 176(%rdi),%xmm0
104  movdqu 192(%rdi),%xmm1
105  movdqu 208(%rdi),%xmm2
106  movdqu 224(%rdi),%xmm3
107  movdqu 240(%rdi),%xmm4
108  movdqu 256(%rdi),%xmm5
109  movdqu 272(%rdi),%xmm6
110  movdqu 288(%rdi),%xmm7
111  movdqu 304(%rdi),%xmm8
112  movdqu 320(%rdi),%xmm9
113  movdqu 336(%rdi),%xmm10
114  movdqu 352(%rdi),%xmm11
115  movdqu 368(%rdi),%xmm12
116  movdqu 384(%rdi),%xmm13
117  movdqu 400(%rdi),%xmm14
118  movdqu 416(%rdi),%xmm15
119#endif
120  movq  56(%rdi), %rsp  # cut back rsp to new location
121  pop    %rdi      # rdi was saved here earlier
122  ret            # rip was saved here
123
124
125#elif defined(__powerpc64__)
126
127DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
128//
129// void libunwind::Registers_ppc64::jumpto()
130//
131// On entry:
132//  thread_state pointer is in r3
133//
134
135// load register (GPR)
136#define PPC64_LR(n) \
137  ld    n, (8 * (n + 2))(3)
138
139  // restore integral registers
140  // skip r0 for now
141  // skip r1 for now
142  PPC64_LR(2)
143  // skip r3 for now
144  // skip r4 for now
145  // skip r5 for now
146  PPC64_LR(6)
147  PPC64_LR(7)
148  PPC64_LR(8)
149  PPC64_LR(9)
150  PPC64_LR(10)
151  PPC64_LR(11)
152  PPC64_LR(12)
153  PPC64_LR(13)
154  PPC64_LR(14)
155  PPC64_LR(15)
156  PPC64_LR(16)
157  PPC64_LR(17)
158  PPC64_LR(18)
159  PPC64_LR(19)
160  PPC64_LR(20)
161  PPC64_LR(21)
162  PPC64_LR(22)
163  PPC64_LR(23)
164  PPC64_LR(24)
165  PPC64_LR(25)
166  PPC64_LR(26)
167  PPC64_LR(27)
168  PPC64_LR(28)
169  PPC64_LR(29)
170  PPC64_LR(30)
171  PPC64_LR(31)
172
173#if defined(__VSX__)
174
175  // restore VS registers
176  // (note that this also restores floating point registers and V registers,
177  // because part of VS is mapped to these registers)
178
179  addi  4, 3, PPC64_OFFS_FP
180
181// load VS register
182#define PPC64_LVS(n)         \
183  lxvd2x  n, 0, 4           ;\
184  addi    4, 4, 16
185
186  // restore the first 32 VS regs (and also all floating point regs)
187  PPC64_LVS(0)
188  PPC64_LVS(1)
189  PPC64_LVS(2)
190  PPC64_LVS(3)
191  PPC64_LVS(4)
192  PPC64_LVS(5)
193  PPC64_LVS(6)
194  PPC64_LVS(7)
195  PPC64_LVS(8)
196  PPC64_LVS(9)
197  PPC64_LVS(10)
198  PPC64_LVS(11)
199  PPC64_LVS(12)
200  PPC64_LVS(13)
201  PPC64_LVS(14)
202  PPC64_LVS(15)
203  PPC64_LVS(16)
204  PPC64_LVS(17)
205  PPC64_LVS(18)
206  PPC64_LVS(19)
207  PPC64_LVS(20)
208  PPC64_LVS(21)
209  PPC64_LVS(22)
210  PPC64_LVS(23)
211  PPC64_LVS(24)
212  PPC64_LVS(25)
213  PPC64_LVS(26)
214  PPC64_LVS(27)
215  PPC64_LVS(28)
216  PPC64_LVS(29)
217  PPC64_LVS(30)
218  PPC64_LVS(31)
219
220  // use VRSAVE to conditionally restore the remaining VS regs,
221  // that are where the V regs are mapped
222
223  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
224  cmpwi 5, 0
225  beq   Lnovec
226
227// conditionally load VS
228#define PPC64_CLVS_BOTTOM(n)               \
229  beq    Ldone##n                         ;\
230  addi   4, 3, PPC64_OFFS_FP + n * 16     ;\
231  lxvd2x n, 0, 4                          ;\
232Ldone##n:
233
234#define PPC64_CLVSl(n)                    \
235  andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n))  ;\
236PPC64_CLVS_BOTTOM(n)
237
238#define PPC64_CLVSh(n)                    \
239  andi.  0, 5, (1 PPC_LEFT_SHIFT(63-n))  ;\
240PPC64_CLVS_BOTTOM(n)
241
242  PPC64_CLVSl(32)
243  PPC64_CLVSl(33)
244  PPC64_CLVSl(34)
245  PPC64_CLVSl(35)
246  PPC64_CLVSl(36)
247  PPC64_CLVSl(37)
248  PPC64_CLVSl(38)
249  PPC64_CLVSl(39)
250  PPC64_CLVSl(40)
251  PPC64_CLVSl(41)
252  PPC64_CLVSl(42)
253  PPC64_CLVSl(43)
254  PPC64_CLVSl(44)
255  PPC64_CLVSl(45)
256  PPC64_CLVSl(46)
257  PPC64_CLVSl(47)
258  PPC64_CLVSh(48)
259  PPC64_CLVSh(49)
260  PPC64_CLVSh(50)
261  PPC64_CLVSh(51)
262  PPC64_CLVSh(52)
263  PPC64_CLVSh(53)
264  PPC64_CLVSh(54)
265  PPC64_CLVSh(55)
266  PPC64_CLVSh(56)
267  PPC64_CLVSh(57)
268  PPC64_CLVSh(58)
269  PPC64_CLVSh(59)
270  PPC64_CLVSh(60)
271  PPC64_CLVSh(61)
272  PPC64_CLVSh(62)
273  PPC64_CLVSh(63)
274
275#else
276
277// load FP register
278#define PPC64_LF(n) \
279  lfd   n, (PPC64_OFFS_FP + n * 16)(3)
280
281  // restore float registers
282  PPC64_LF(0)
283  PPC64_LF(1)
284  PPC64_LF(2)
285  PPC64_LF(3)
286  PPC64_LF(4)
287  PPC64_LF(5)
288  PPC64_LF(6)
289  PPC64_LF(7)
290  PPC64_LF(8)
291  PPC64_LF(9)
292  PPC64_LF(10)
293  PPC64_LF(11)
294  PPC64_LF(12)
295  PPC64_LF(13)
296  PPC64_LF(14)
297  PPC64_LF(15)
298  PPC64_LF(16)
299  PPC64_LF(17)
300  PPC64_LF(18)
301  PPC64_LF(19)
302  PPC64_LF(20)
303  PPC64_LF(21)
304  PPC64_LF(22)
305  PPC64_LF(23)
306  PPC64_LF(24)
307  PPC64_LF(25)
308  PPC64_LF(26)
309  PPC64_LF(27)
310  PPC64_LF(28)
311  PPC64_LF(29)
312  PPC64_LF(30)
313  PPC64_LF(31)
314
315#if defined(__ALTIVEC__)
316  // restore vector registers if any are in use
317  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
318  cmpwi 5, 0
319  beq   Lnovec
320
321  subi  4, 1, 16
322  // r4 is now a 16-byte aligned pointer into the red zone
323  // the _vectorScalarRegisters may not be 16-byte aligned
324  // so copy via red zone temp buffer
325
326#define PPC64_CLV_UNALIGNED_BOTTOM(n)            \
327  beq    Ldone##n                               ;\
328  ld     0, (PPC64_OFFS_V + n * 16)(3)          ;\
329  std    0, 0(4)                                ;\
330  ld     0, (PPC64_OFFS_V + n * 16 + 8)(3)      ;\
331  std    0, 8(4)                                ;\
332  lvx    n, 0, 4                                ;\
333Ldone  ## n:
334
335#define PPC64_CLV_UNALIGNEDl(n)                 \
336  andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n))        ;\
337PPC64_CLV_UNALIGNED_BOTTOM(n)
338
339#define PPC64_CLV_UNALIGNEDh(n)                \
340  andi.  0, 5, (1 PPC_LEFT_SHIFT(31-n))       ;\
341PPC64_CLV_UNALIGNED_BOTTOM(n)
342
343  PPC64_CLV_UNALIGNEDl(0)
344  PPC64_CLV_UNALIGNEDl(1)
345  PPC64_CLV_UNALIGNEDl(2)
346  PPC64_CLV_UNALIGNEDl(3)
347  PPC64_CLV_UNALIGNEDl(4)
348  PPC64_CLV_UNALIGNEDl(5)
349  PPC64_CLV_UNALIGNEDl(6)
350  PPC64_CLV_UNALIGNEDl(7)
351  PPC64_CLV_UNALIGNEDl(8)
352  PPC64_CLV_UNALIGNEDl(9)
353  PPC64_CLV_UNALIGNEDl(10)
354  PPC64_CLV_UNALIGNEDl(11)
355  PPC64_CLV_UNALIGNEDl(12)
356  PPC64_CLV_UNALIGNEDl(13)
357  PPC64_CLV_UNALIGNEDl(14)
358  PPC64_CLV_UNALIGNEDl(15)
359  PPC64_CLV_UNALIGNEDh(16)
360  PPC64_CLV_UNALIGNEDh(17)
361  PPC64_CLV_UNALIGNEDh(18)
362  PPC64_CLV_UNALIGNEDh(19)
363  PPC64_CLV_UNALIGNEDh(20)
364  PPC64_CLV_UNALIGNEDh(21)
365  PPC64_CLV_UNALIGNEDh(22)
366  PPC64_CLV_UNALIGNEDh(23)
367  PPC64_CLV_UNALIGNEDh(24)
368  PPC64_CLV_UNALIGNEDh(25)
369  PPC64_CLV_UNALIGNEDh(26)
370  PPC64_CLV_UNALIGNEDh(27)
371  PPC64_CLV_UNALIGNEDh(28)
372  PPC64_CLV_UNALIGNEDh(29)
373  PPC64_CLV_UNALIGNEDh(30)
374  PPC64_CLV_UNALIGNEDh(31)
375
376#endif
377#endif
378
379Lnovec:
380  ld    0, PPC64_OFFS_CR(3)
381  mtcr  0
382  ld    0, PPC64_OFFS_SRR0(3)
383  mtctr 0
384
385  PPC64_LR(0)
386  PPC64_LR(5)
387  PPC64_LR(4)
388  PPC64_LR(1)
389  PPC64_LR(3)
390  bctr
391
392#elif defined(__ppc__)
393
394DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
395//
396// void libunwind::Registers_ppc::jumpto()
397//
398// On entry:
399//  thread_state pointer is in r3
400//
401
402  // restore integral registerrs
403  // skip r0 for now
404  // skip r1 for now
405  lwz     2,  16(3)
406  // skip r3 for now
407  // skip r4 for now
408  // skip r5 for now
409  lwz     6,  32(3)
410  lwz     7,  36(3)
411  lwz     8,  40(3)
412  lwz     9,  44(3)
413  lwz     10, 48(3)
414  lwz     11, 52(3)
415  lwz     12, 56(3)
416  lwz     13, 60(3)
417  lwz     14, 64(3)
418  lwz     15, 68(3)
419  lwz     16, 72(3)
420  lwz     17, 76(3)
421  lwz     18, 80(3)
422  lwz     19, 84(3)
423  lwz     20, 88(3)
424  lwz     21, 92(3)
425  lwz     22, 96(3)
426  lwz     23,100(3)
427  lwz     24,104(3)
428  lwz     25,108(3)
429  lwz     26,112(3)
430  lwz     27,116(3)
431  lwz     28,120(3)
432  lwz     29,124(3)
433  lwz     30,128(3)
434  lwz     31,132(3)
435
436#ifndef __NO_FPRS__
437  // restore float registers
438  lfd     0, 160(3)
439  lfd     1, 168(3)
440  lfd     2, 176(3)
441  lfd     3, 184(3)
442  lfd     4, 192(3)
443  lfd     5, 200(3)
444  lfd     6, 208(3)
445  lfd     7, 216(3)
446  lfd     8, 224(3)
447  lfd     9, 232(3)
448  lfd     10,240(3)
449  lfd     11,248(3)
450  lfd     12,256(3)
451  lfd     13,264(3)
452  lfd     14,272(3)
453  lfd     15,280(3)
454  lfd     16,288(3)
455  lfd     17,296(3)
456  lfd     18,304(3)
457  lfd     19,312(3)
458  lfd     20,320(3)
459  lfd     21,328(3)
460  lfd     22,336(3)
461  lfd     23,344(3)
462  lfd     24,352(3)
463  lfd     25,360(3)
464  lfd     26,368(3)
465  lfd     27,376(3)
466  lfd     28,384(3)
467  lfd     29,392(3)
468  lfd     30,400(3)
469  lfd     31,408(3)
470#endif
471
472#if defined(__ALTIVEC__)
473  // restore vector registers if any are in use
474  lwz     5, 156(3)       // test VRsave
475  cmpwi   5, 0
476  beq     Lnovec
477
478  subi    4, 1, 16
479  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
480  // r4 is now a 16-byte aligned pointer into the red zone
481  // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
482
483
484#define LOAD_VECTOR_UNALIGNEDl(_index)          \
485  andis.  0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \
486  beq     Ldone ## _index             SEPARATOR \
487  lwz     0, 424+_index*16(3)         SEPARATOR \
488  stw     0, 0(%r4)                   SEPARATOR \
489  lwz     0, 424+_index*16+4(%r3)     SEPARATOR \
490  stw     0, 4(%r4)                   SEPARATOR \
491  lwz     0, 424+_index*16+8(%r3)     SEPARATOR \
492  stw     0, 8(%r4)                   SEPARATOR \
493  lwz     0, 424+_index*16+12(%r3)    SEPARATOR \
494  stw     0, 12(%r4)                  SEPARATOR \
495  lvx     _index, 0, 4                SEPARATOR \
496  Ldone ## _index:
497
498#define LOAD_VECTOR_UNALIGNEDh(_index)          \
499  andi.   0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \
500  beq     Ldone ## _index             SEPARATOR \
501  lwz     0, 424+_index*16(3)         SEPARATOR \
502  stw     0, 0(4)                     SEPARATOR \
503  lwz     0, 424+_index*16+4(3)       SEPARATOR \
504  stw     0, 4(4)                     SEPARATOR \
505  lwz     0, 424+_index*16+8(3)       SEPARATOR \
506  stw     0, 8(%r4)                   SEPARATOR \
507  lwz     0, 424+_index*16+12(3)      SEPARATOR \
508  stw     0, 12(4)                    SEPARATOR \
509  lvx     _index, 0, 4                SEPARATOR \
510  Ldone ## _index:
511
512
513  LOAD_VECTOR_UNALIGNEDl(0)
514  LOAD_VECTOR_UNALIGNEDl(1)
515  LOAD_VECTOR_UNALIGNEDl(2)
516  LOAD_VECTOR_UNALIGNEDl(3)
517  LOAD_VECTOR_UNALIGNEDl(4)
518  LOAD_VECTOR_UNALIGNEDl(5)
519  LOAD_VECTOR_UNALIGNEDl(6)
520  LOAD_VECTOR_UNALIGNEDl(7)
521  LOAD_VECTOR_UNALIGNEDl(8)
522  LOAD_VECTOR_UNALIGNEDl(9)
523  LOAD_VECTOR_UNALIGNEDl(10)
524  LOAD_VECTOR_UNALIGNEDl(11)
525  LOAD_VECTOR_UNALIGNEDl(12)
526  LOAD_VECTOR_UNALIGNEDl(13)
527  LOAD_VECTOR_UNALIGNEDl(14)
528  LOAD_VECTOR_UNALIGNEDl(15)
529  LOAD_VECTOR_UNALIGNEDh(16)
530  LOAD_VECTOR_UNALIGNEDh(17)
531  LOAD_VECTOR_UNALIGNEDh(18)
532  LOAD_VECTOR_UNALIGNEDh(19)
533  LOAD_VECTOR_UNALIGNEDh(20)
534  LOAD_VECTOR_UNALIGNEDh(21)
535  LOAD_VECTOR_UNALIGNEDh(22)
536  LOAD_VECTOR_UNALIGNEDh(23)
537  LOAD_VECTOR_UNALIGNEDh(24)
538  LOAD_VECTOR_UNALIGNEDh(25)
539  LOAD_VECTOR_UNALIGNEDh(26)
540  LOAD_VECTOR_UNALIGNEDh(27)
541  LOAD_VECTOR_UNALIGNEDh(28)
542  LOAD_VECTOR_UNALIGNEDh(29)
543  LOAD_VECTOR_UNALIGNEDh(30)
544  LOAD_VECTOR_UNALIGNEDh(31)
545#endif
546
547Lnovec:
548  lwz     0, 136(3)   // __cr
549  mtcr    0
550  lwz     0, 148(3)   // __ctr
551  mtctr   0
552  lwz     0,   0(3)   // __ssr0
553  mtctr   0
554  lwz     0,   8(3)   // do r0 now
555  lwz     5,  28(3)   // do r5 now
556  lwz     4,  24(3)   // do r4 now
557  lwz     1,  12(3)   // do sp now
558  lwz     3,  20(3)   // do r3 last
559  bctr
560
561#elif defined(__aarch64__)
562
563//
564// extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
565//
566// On entry:
567//  thread_state pointer is in x0
568//
569  .p2align 2
570DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
571  // skip restore of x0,x1 for now
572  ldp    x2, x3,  [x0, #0x010]
573  ldp    x4, x5,  [x0, #0x020]
574  ldp    x6, x7,  [x0, #0x030]
575  ldp    x8, x9,  [x0, #0x040]
576  ldp    x10,x11, [x0, #0x050]
577  ldp    x12,x13, [x0, #0x060]
578  ldp    x14,x15, [x0, #0x070]
579  // x16 and x17 were clobbered by the call into the unwinder, so no point in
580  // restoring them.
581  ldp    x18,x19, [x0, #0x090]
582  ldp    x20,x21, [x0, #0x0A0]
583  ldp    x22,x23, [x0, #0x0B0]
584  ldp    x24,x25, [x0, #0x0C0]
585  ldp    x26,x27, [x0, #0x0D0]
586  ldp    x28,x29, [x0, #0x0E0]
587  ldr    x30,     [x0, #0x100]  // restore pc into lr
588
589  ldp    d0, d1,  [x0, #0x110]
590  ldp    d2, d3,  [x0, #0x120]
591  ldp    d4, d5,  [x0, #0x130]
592  ldp    d6, d7,  [x0, #0x140]
593  ldp    d8, d9,  [x0, #0x150]
594  ldp    d10,d11, [x0, #0x160]
595  ldp    d12,d13, [x0, #0x170]
596  ldp    d14,d15, [x0, #0x180]
597  ldp    d16,d17, [x0, #0x190]
598  ldp    d18,d19, [x0, #0x1A0]
599  ldp    d20,d21, [x0, #0x1B0]
600  ldp    d22,d23, [x0, #0x1C0]
601  ldp    d24,d25, [x0, #0x1D0]
602  ldp    d26,d27, [x0, #0x1E0]
603  ldp    d28,d29, [x0, #0x1F0]
604  ldr    d30,     [x0, #0x200]
605  ldr    d31,     [x0, #0x208]
606
607  // Finally, restore sp. This must be done after the the last read from the
608  // context struct, because it is allocated on the stack, and an exception
609  // could clobber the de-allocated portion of the stack after sp has been
610  // restored.
611  ldr    x16,     [x0, #0x0F8]
612  ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
613  mov    sp,x16                 // restore sp
614  ret    x30                    // jump to pc
615
616#elif defined(__arm__) && !defined(__APPLE__)
617
618#if !defined(__ARM_ARCH_ISA_ARM)
619#if (__ARM_ARCH_ISA_THUMB == 2)
620  .syntax unified
621#endif
622  .thumb
623#endif
624
625@
626@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
627@
628@ On entry:
629@  thread_state pointer is in r0
630@
631  .p2align 2
632DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
633#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
634  @ r8-r11: ldm into r1-r4, then mov to r8-r11
635  adds r0, #0x20
636  ldm r0!, {r1-r4}
637  subs r0, #0x30
638  mov r8, r1
639  mov r9, r2
640  mov r10, r3
641  mov r11, r4
642  @ r12 does not need loading, it it the intra-procedure-call scratch register
643  ldr r2, [r0, #0x34]
644  ldr r3, [r0, #0x3c]
645  mov sp, r2
646  mov lr, r3         @ restore pc into lr
647  ldm r0, {r0-r7}
648#else
649  @ Use lr as base so that r0 can be restored.
650  mov lr, r0
651  @ 32bit thumb-2 restrictions for ldm:
652  @ . the sp (r13) cannot be in the list
653  @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
654  ldm lr, {r0-r12}
655  ldr sp, [lr, #52]
656  ldr lr, [lr, #60]  @ restore pc into lr
657#endif
658  JMP(lr)
659
660@
661@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
662@
663@ On entry:
664@  values pointer is in r0
665@
666  .p2align 2
667#if defined(__ELF__)
668  .fpu vfpv3-d16
669#endif
670DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
671  @ VFP and iwMMX instructions are only available when compiling with the flags
672  @ that enable them. We do not want to do that in the library (because we do not
673  @ want the compiler to generate instructions that access those) but this is
674  @ only accessed if the personality routine needs these registers. Use of
675  @ these registers implies they are, actually, available on the target, so
676  @ it's ok to execute.
677  @ So, generate the instruction using the corresponding coprocessor mnemonic.
678  vldmia r0, {d0-d15}
679  JMP(lr)
680
681@
682@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
683@
684@ On entry:
685@  values pointer is in r0
686@
687  .p2align 2
688#if defined(__ELF__)
689  .fpu vfpv3-d16
690#endif
691DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
692  vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
693  JMP(lr)
694
695@
696@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
697@
698@ On entry:
699@  values pointer is in r0
700@
701  .p2align 2
702#if defined(__ELF__)
703  .fpu vfpv3
704#endif
705DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
706  vldmia r0, {d16-d31}
707  JMP(lr)
708
709#if defined(__ARM_WMMX)
710
711@
712@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
713@
714@ On entry:
715@  values pointer is in r0
716@
717  .p2align 2
718#if defined(__ELF__)
719  .arch armv5te
720#endif
721DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
722  ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
723  ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
724  ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
725  ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
726  ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
727  ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
728  ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
729  ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
730  ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
731  ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
732  ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
733  ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
734  ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
735  ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
736  ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
737  ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
738  JMP(lr)
739
740@
741@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
742@
743@ On entry:
744@  values pointer is in r0
745@
746  .p2align 2
747#if defined(__ELF__)
748  .arch armv5te
749#endif
750DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
751  ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
752  ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
753  ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
754  ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
755  JMP(lr)
756
757#endif
758
759#elif defined(__or1k__)
760
761DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
762#
763# void libunwind::Registers_or1k::jumpto()
764#
765# On entry:
766#  thread_state pointer is in r3
767#
768
769  # restore integral registers
770  l.lwz     r0,  0(r3)
771  l.lwz     r1,  4(r3)
772  l.lwz     r2,  8(r3)
773  # skip r3 for now
774  l.lwz     r4, 16(r3)
775  l.lwz     r5, 20(r3)
776  l.lwz     r6, 24(r3)
777  l.lwz     r7, 28(r3)
778  l.lwz     r8, 32(r3)
779  # skip r9
780  l.lwz    r10, 40(r3)
781  l.lwz    r11, 44(r3)
782  l.lwz    r12, 48(r3)
783  l.lwz    r13, 52(r3)
784  l.lwz    r14, 56(r3)
785  l.lwz    r15, 60(r3)
786  l.lwz    r16, 64(r3)
787  l.lwz    r17, 68(r3)
788  l.lwz    r18, 72(r3)
789  l.lwz    r19, 76(r3)
790  l.lwz    r20, 80(r3)
791  l.lwz    r21, 84(r3)
792  l.lwz    r22, 88(r3)
793  l.lwz    r23, 92(r3)
794  l.lwz    r24, 96(r3)
795  l.lwz    r25,100(r3)
796  l.lwz    r26,104(r3)
797  l.lwz    r27,108(r3)
798  l.lwz    r28,112(r3)
799  l.lwz    r29,116(r3)
800  l.lwz    r30,120(r3)
801  l.lwz    r31,124(r3)
802
803  # at last, restore r3
804  l.lwz    r3,  12(r3)
805
806  # load new pc into ra
807  l.lwz    r9, 128(r3)
808  # jump to pc
809  l.jr     r9
810   l.nop
811
812#elif defined(__hexagon__)
813# On entry:
814#  thread_state pointer is in r2
815DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
816#
817# void libunwind::Registers_hexagon::jumpto()
818#
819  r8 = memw(r0+#32)
820  r9 = memw(r0+#36)
821  r10 = memw(r0+#40)
822  r11 = memw(r0+#44)
823
824  r12 = memw(r0+#48)
825  r13 = memw(r0+#52)
826  r14 = memw(r0+#56)
827  r15 = memw(r0+#60)
828
829  r16 = memw(r0+#64)
830  r17 = memw(r0+#68)
831  r18 = memw(r0+#72)
832  r19 = memw(r0+#76)
833
834  r20 = memw(r0+#80)
835  r21 = memw(r0+#84)
836  r22 = memw(r0+#88)
837  r23 = memw(r0+#92)
838
839  r24 = memw(r0+#96)
840  r25 = memw(r0+#100)
841  r26 = memw(r0+#104)
842  r27 = memw(r0+#108)
843
844  r28 = memw(r0+#112)
845  r29 = memw(r0+#116)
846  r30 = memw(r0+#120)
847  r31 = memw(r0+#132)
848
849  r1 = memw(r0+#128)
850  c4 = r1   // Predicate register
851  r1 = memw(r0+#4)
852  r0 = memw(r0)
853  jumpr r31
854#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
855
856//
857// void libunwind::Registers_mips_o32::jumpto()
858//
859// On entry:
860//  thread state pointer is in a0 ($4)
861//
862DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
863  .set push
864  .set noat
865  .set noreorder
866  .set nomacro
867#ifdef __mips_hard_float
868#if __mips_fpr != 64
869  ldc1  $f0, (4 * 36 + 8 * 0)($4)
870  ldc1  $f2, (4 * 36 + 8 * 2)($4)
871  ldc1  $f4, (4 * 36 + 8 * 4)($4)
872  ldc1  $f6, (4 * 36 + 8 * 6)($4)
873  ldc1  $f8, (4 * 36 + 8 * 8)($4)
874  ldc1  $f10, (4 * 36 + 8 * 10)($4)
875  ldc1  $f12, (4 * 36 + 8 * 12)($4)
876  ldc1  $f14, (4 * 36 + 8 * 14)($4)
877  ldc1  $f16, (4 * 36 + 8 * 16)($4)
878  ldc1  $f18, (4 * 36 + 8 * 18)($4)
879  ldc1  $f20, (4 * 36 + 8 * 20)($4)
880  ldc1  $f22, (4 * 36 + 8 * 22)($4)
881  ldc1  $f24, (4 * 36 + 8 * 24)($4)
882  ldc1  $f26, (4 * 36 + 8 * 26)($4)
883  ldc1  $f28, (4 * 36 + 8 * 28)($4)
884  ldc1  $f30, (4 * 36 + 8 * 30)($4)
885#else
886  ldc1  $f0, (4 * 36 + 8 * 0)($4)
887  ldc1  $f1, (4 * 36 + 8 * 1)($4)
888  ldc1  $f2, (4 * 36 + 8 * 2)($4)
889  ldc1  $f3, (4 * 36 + 8 * 3)($4)
890  ldc1  $f4, (4 * 36 + 8 * 4)($4)
891  ldc1  $f5, (4 * 36 + 8 * 5)($4)
892  ldc1  $f6, (4 * 36 + 8 * 6)($4)
893  ldc1  $f7, (4 * 36 + 8 * 7)($4)
894  ldc1  $f8, (4 * 36 + 8 * 8)($4)
895  ldc1  $f9, (4 * 36 + 8 * 9)($4)
896  ldc1  $f10, (4 * 36 + 8 * 10)($4)
897  ldc1  $f11, (4 * 36 + 8 * 11)($4)
898  ldc1  $f12, (4 * 36 + 8 * 12)($4)
899  ldc1  $f13, (4 * 36 + 8 * 13)($4)
900  ldc1  $f14, (4 * 36 + 8 * 14)($4)
901  ldc1  $f15, (4 * 36 + 8 * 15)($4)
902  ldc1  $f16, (4 * 36 + 8 * 16)($4)
903  ldc1  $f17, (4 * 36 + 8 * 17)($4)
904  ldc1  $f18, (4 * 36 + 8 * 18)($4)
905  ldc1  $f19, (4 * 36 + 8 * 19)($4)
906  ldc1  $f20, (4 * 36 + 8 * 20)($4)
907  ldc1  $f21, (4 * 36 + 8 * 21)($4)
908  ldc1  $f22, (4 * 36 + 8 * 22)($4)
909  ldc1  $f23, (4 * 36 + 8 * 23)($4)
910  ldc1  $f24, (4 * 36 + 8 * 24)($4)
911  ldc1  $f25, (4 * 36 + 8 * 25)($4)
912  ldc1  $f26, (4 * 36 + 8 * 26)($4)
913  ldc1  $f27, (4 * 36 + 8 * 27)($4)
914  ldc1  $f28, (4 * 36 + 8 * 28)($4)
915  ldc1  $f29, (4 * 36 + 8 * 29)($4)
916  ldc1  $f30, (4 * 36 + 8 * 30)($4)
917  ldc1  $f31, (4 * 36 + 8 * 31)($4)
918#endif
919#endif
920  // restore hi and lo
921  lw    $8, (4 * 33)($4)
922  mthi  $8
923  lw    $8, (4 * 34)($4)
924  mtlo  $8
925  // r0 is zero
926  lw    $1, (4 * 1)($4)
927  lw    $2, (4 * 2)($4)
928  lw    $3, (4 * 3)($4)
929  // skip a0 for now
930  lw    $5, (4 * 5)($4)
931  lw    $6, (4 * 6)($4)
932  lw    $7, (4 * 7)($4)
933  lw    $8, (4 * 8)($4)
934  lw    $9, (4 * 9)($4)
935  lw    $10, (4 * 10)($4)
936  lw    $11, (4 * 11)($4)
937  lw    $12, (4 * 12)($4)
938  lw    $13, (4 * 13)($4)
939  lw    $14, (4 * 14)($4)
940  lw    $15, (4 * 15)($4)
941  lw    $16, (4 * 16)($4)
942  lw    $17, (4 * 17)($4)
943  lw    $18, (4 * 18)($4)
944  lw    $19, (4 * 19)($4)
945  lw    $20, (4 * 20)($4)
946  lw    $21, (4 * 21)($4)
947  lw    $22, (4 * 22)($4)
948  lw    $23, (4 * 23)($4)
949  lw    $24, (4 * 24)($4)
950  lw    $25, (4 * 25)($4)
951  lw    $26, (4 * 26)($4)
952  lw    $27, (4 * 27)($4)
953  lw    $28, (4 * 28)($4)
954  lw    $29, (4 * 29)($4)
955  lw    $30, (4 * 30)($4)
956  // load new pc into ra
957  lw    $31, (4 * 32)($4)
958  // jump to ra, load a0 in the delay slot
959  jr    $31
960  lw    $4, (4 * 4)($4)
961  .set pop
962
963#elif defined(__mips64)
964
965//
966// void libunwind::Registers_mips_newabi::jumpto()
967//
968// On entry:
969//  thread state pointer is in a0 ($4)
970//
971DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
972  .set push
973  .set noat
974  .set noreorder
975  .set nomacro
976#ifdef __mips_hard_float
977  ldc1  $f0, (8 * 35)($4)
978  ldc1  $f1, (8 * 36)($4)
979  ldc1  $f2, (8 * 37)($4)
980  ldc1  $f3, (8 * 38)($4)
981  ldc1  $f4, (8 * 39)($4)
982  ldc1  $f5, (8 * 40)($4)
983  ldc1  $f6, (8 * 41)($4)
984  ldc1  $f7, (8 * 42)($4)
985  ldc1  $f8, (8 * 43)($4)
986  ldc1  $f9, (8 * 44)($4)
987  ldc1  $f10, (8 * 45)($4)
988  ldc1  $f11, (8 * 46)($4)
989  ldc1  $f12, (8 * 47)($4)
990  ldc1  $f13, (8 * 48)($4)
991  ldc1  $f14, (8 * 49)($4)
992  ldc1  $f15, (8 * 50)($4)
993  ldc1  $f16, (8 * 51)($4)
994  ldc1  $f17, (8 * 52)($4)
995  ldc1  $f18, (8 * 53)($4)
996  ldc1  $f19, (8 * 54)($4)
997  ldc1  $f20, (8 * 55)($4)
998  ldc1  $f21, (8 * 56)($4)
999  ldc1  $f22, (8 * 57)($4)
1000  ldc1  $f23, (8 * 58)($4)
1001  ldc1  $f24, (8 * 59)($4)
1002  ldc1  $f25, (8 * 60)($4)
1003  ldc1  $f26, (8 * 61)($4)
1004  ldc1  $f27, (8 * 62)($4)
1005  ldc1  $f28, (8 * 63)($4)
1006  ldc1  $f29, (8 * 64)($4)
1007  ldc1  $f30, (8 * 65)($4)
1008  ldc1  $f31, (8 * 66)($4)
1009#endif
1010  // restore hi and lo
1011  ld    $8, (8 * 33)($4)
1012  mthi  $8
1013  ld    $8, (8 * 34)($4)
1014  mtlo  $8
1015  // r0 is zero
1016  ld    $1, (8 * 1)($4)
1017  ld    $2, (8 * 2)($4)
1018  ld    $3, (8 * 3)($4)
1019  // skip a0 for now
1020  ld    $5, (8 * 5)($4)
1021  ld    $6, (8 * 6)($4)
1022  ld    $7, (8 * 7)($4)
1023  ld    $8, (8 * 8)($4)
1024  ld    $9, (8 * 9)($4)
1025  ld    $10, (8 * 10)($4)
1026  ld    $11, (8 * 11)($4)
1027  ld    $12, (8 * 12)($4)
1028  ld    $13, (8 * 13)($4)
1029  ld    $14, (8 * 14)($4)
1030  ld    $15, (8 * 15)($4)
1031  ld    $16, (8 * 16)($4)
1032  ld    $17, (8 * 17)($4)
1033  ld    $18, (8 * 18)($4)
1034  ld    $19, (8 * 19)($4)
1035  ld    $20, (8 * 20)($4)
1036  ld    $21, (8 * 21)($4)
1037  ld    $22, (8 * 22)($4)
1038  ld    $23, (8 * 23)($4)
1039  ld    $24, (8 * 24)($4)
1040  ld    $25, (8 * 25)($4)
1041  ld    $26, (8 * 26)($4)
1042  ld    $27, (8 * 27)($4)
1043  ld    $28, (8 * 28)($4)
1044  ld    $29, (8 * 29)($4)
1045  ld    $30, (8 * 30)($4)
1046  // load new pc into ra
1047  ld    $31, (8 * 32)($4)
1048  // jump to ra, load a0 in the delay slot
1049  jr    $31
1050  ld    $4, (8 * 4)($4)
1051  .set pop
1052
1053#elif defined(__sparc__)
1054
1055//
1056// void libunwind::Registers_sparc_o32::jumpto()
1057//
1058// On entry:
1059//  thread_state pointer is in o0
1060//
1061DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1062  ta 3
1063  ldd [%o0 + 64],  %l0
1064  ldd [%o0 + 72],  %l2
1065  ldd [%o0 + 80],  %l4
1066  ldd [%o0 + 88],  %l6
1067  ldd [%o0 + 96],  %i0
1068  ldd [%o0 + 104], %i2
1069  ldd [%o0 + 112], %i4
1070  ldd [%o0 + 120], %i6
1071  ld  [%o0 + 60],  %o7
1072  jmp %o7
1073   nop
1074
1075#elif defined(__riscv)
1076
1077//
1078// void libunwind::Registers_riscv::jumpto()
1079//
1080// On entry:
1081//  thread_state pointer is in a0
1082//
1083  .p2align 2
1084DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1085# if defined(__riscv_flen)
1086  FLOAD    f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
1087  FLOAD    f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
1088  FLOAD    f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
1089  FLOAD    f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
1090  FLOAD    f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
1091  FLOAD    f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
1092  FLOAD    f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
1093  FLOAD    f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
1094  FLOAD    f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
1095  FLOAD    f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
1096  FLOAD    f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
1097  FLOAD    f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
1098  FLOAD    f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
1099  FLOAD    f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
1100  FLOAD    f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
1101  FLOAD    f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
1102  FLOAD    f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
1103  FLOAD    f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
1104  FLOAD    f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
1105  FLOAD    f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
1106  FLOAD    f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
1107  FLOAD    f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
1108  FLOAD    f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
1109  FLOAD    f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
1110  FLOAD    f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
1111  FLOAD    f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
1112  FLOAD    f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
1113  FLOAD    f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
1114  FLOAD    f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
1115  FLOAD    f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
1116  FLOAD    f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
1117  FLOAD    f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
1118# endif
1119
1120  // x0 is zero
1121  ILOAD    x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
1122  ILOAD    x2, (RISCV_ISIZE * 2)(a0)
1123  ILOAD    x3, (RISCV_ISIZE * 3)(a0)
1124  ILOAD    x4, (RISCV_ISIZE * 4)(a0)
1125  ILOAD    x5, (RISCV_ISIZE * 5)(a0)
1126  ILOAD    x6, (RISCV_ISIZE * 6)(a0)
1127  ILOAD    x7, (RISCV_ISIZE * 7)(a0)
1128  ILOAD    x8, (RISCV_ISIZE * 8)(a0)
1129  ILOAD    x9, (RISCV_ISIZE * 9)(a0)
1130  // skip a0 for now
1131  ILOAD    x11, (RISCV_ISIZE * 11)(a0)
1132  ILOAD    x12, (RISCV_ISIZE * 12)(a0)
1133  ILOAD    x13, (RISCV_ISIZE * 13)(a0)
1134  ILOAD    x14, (RISCV_ISIZE * 14)(a0)
1135  ILOAD    x15, (RISCV_ISIZE * 15)(a0)
1136  ILOAD    x16, (RISCV_ISIZE * 16)(a0)
1137  ILOAD    x17, (RISCV_ISIZE * 17)(a0)
1138  ILOAD    x18, (RISCV_ISIZE * 18)(a0)
1139  ILOAD    x19, (RISCV_ISIZE * 19)(a0)
1140  ILOAD    x20, (RISCV_ISIZE * 20)(a0)
1141  ILOAD    x21, (RISCV_ISIZE * 21)(a0)
1142  ILOAD    x22, (RISCV_ISIZE * 22)(a0)
1143  ILOAD    x23, (RISCV_ISIZE * 23)(a0)
1144  ILOAD    x24, (RISCV_ISIZE * 24)(a0)
1145  ILOAD    x25, (RISCV_ISIZE * 25)(a0)
1146  ILOAD    x26, (RISCV_ISIZE * 26)(a0)
1147  ILOAD    x27, (RISCV_ISIZE * 27)(a0)
1148  ILOAD    x28, (RISCV_ISIZE * 28)(a0)
1149  ILOAD    x29, (RISCV_ISIZE * 29)(a0)
1150  ILOAD    x30, (RISCV_ISIZE * 30)(a0)
1151  ILOAD    x31, (RISCV_ISIZE * 31)(a0)
1152  ILOAD    x10, (RISCV_ISIZE * 10)(a0)   // restore a0
1153
1154  ret                       // jump to ra
1155
1156#endif
1157
1158#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1159
1160NO_EXEC_STACK_DIRECTIVE
1161
1162