1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11#if defined(_AIX)
12  .toc
13#else
14  .text
15#endif
16
17#if !defined(__USING_SJLJ_EXCEPTIONS__)
18
19#if defined(__i386__)
20DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
21#
22# extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);
23#
24# On entry:
25#  +                       +
26#  +-----------------------+
27#  + thread_state pointer  +
28#  +-----------------------+
29#  + return address        +
30#  +-----------------------+   <-- SP
31#  +                       +
32
33  _LIBUNWIND_CET_ENDBR
34  movl   4(%esp), %eax
35  # set up eax and ret on new stack location
36  movl  28(%eax), %edx # edx holds new stack pointer
37  subl  $8,%edx
38  movl  %edx, 28(%eax)
39  movl  0(%eax), %ebx
40  movl  %ebx, 0(%edx)
41  movl  40(%eax), %ebx
42  movl  %ebx, 4(%edx)
43  # we now have ret and eax pushed onto where new stack will be
44  # restore all registers
45  movl   4(%eax), %ebx
46  movl   8(%eax), %ecx
47  movl  12(%eax), %edx
48  movl  16(%eax), %edi
49  movl  20(%eax), %esi
50  movl  24(%eax), %ebp
51  movl  28(%eax), %esp
52  # skip ss
53  # skip eflags
54  pop    %eax  # eax was already pushed on new stack
55  pop    %ecx
56  jmp    *%ecx
57  # skip cs
58  # skip ds
59  # skip es
60  # skip fs
61  # skip gs
62
63#elif defined(__x86_64__)
64
65DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)
66#
67# extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);
68#
69#if defined(_WIN64)
70# On entry, thread_state pointer is in rcx; move it into rdi
71# to share restore code below. Since this routine restores and
72# overwrites all registers, we can use the same registers for
73# pointers and temporaries as on unix even though win64 normally
74# mustn't clobber some of them.
75  movq  %rcx, %rdi
76#else
77# On entry, thread_state pointer is in rdi
78#endif
79
80  _LIBUNWIND_CET_ENDBR
81  movq  56(%rdi), %rax # rax holds new stack pointer
82  subq  $16, %rax
83  movq  %rax, 56(%rdi)
84  movq  32(%rdi), %rbx  # store new rdi on new stack
85  movq  %rbx, 0(%rax)
86  movq  128(%rdi), %rbx # store new rip on new stack
87  movq  %rbx, 8(%rax)
88  # restore all registers
89  movq    0(%rdi), %rax
90  movq    8(%rdi), %rbx
91  movq   16(%rdi), %rcx
92  movq   24(%rdi), %rdx
93  # restore rdi later
94  movq   40(%rdi), %rsi
95  movq   48(%rdi), %rbp
96  # restore rsp later
97  movq   64(%rdi), %r8
98  movq   72(%rdi), %r9
99  movq   80(%rdi), %r10
100  movq   88(%rdi), %r11
101  movq   96(%rdi), %r12
102  movq  104(%rdi), %r13
103  movq  112(%rdi), %r14
104  movq  120(%rdi), %r15
105  # skip rflags
106  # skip cs
107  # skip fs
108  # skip gs
109
110#if defined(_WIN64)
111  movdqu 176(%rdi),%xmm0
112  movdqu 192(%rdi),%xmm1
113  movdqu 208(%rdi),%xmm2
114  movdqu 224(%rdi),%xmm3
115  movdqu 240(%rdi),%xmm4
116  movdqu 256(%rdi),%xmm5
117  movdqu 272(%rdi),%xmm6
118  movdqu 288(%rdi),%xmm7
119  movdqu 304(%rdi),%xmm8
120  movdqu 320(%rdi),%xmm9
121  movdqu 336(%rdi),%xmm10
122  movdqu 352(%rdi),%xmm11
123  movdqu 368(%rdi),%xmm12
124  movdqu 384(%rdi),%xmm13
125  movdqu 400(%rdi),%xmm14
126  movdqu 416(%rdi),%xmm15
127#endif
128  movq  56(%rdi), %rsp  # cut back rsp to new location
129  pop    %rdi      # rdi was saved here earlier
130  pop    %rcx
131  jmpq   *%rcx
132
133
134#elif defined(__powerpc64__)
135
136DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
137//
138// void libunwind::Registers_ppc64::jumpto()
139//
140// On entry:
141//  thread_state pointer is in r3
142//
143
144// load register (GPR)
145#define PPC64_LR(n) \
146  ld    n, (8 * (n + 2))(3)
147
148  // restore integral registers
149  // skip r0 for now
150  // skip r1 for now
151  PPC64_LR(2)
152  // skip r3 for now
153  // skip r4 for now
154  // skip r5 for now
155  PPC64_LR(6)
156  PPC64_LR(7)
157  PPC64_LR(8)
158  PPC64_LR(9)
159  PPC64_LR(10)
160  PPC64_LR(11)
161  PPC64_LR(12)
162  PPC64_LR(13)
163  PPC64_LR(14)
164  PPC64_LR(15)
165  PPC64_LR(16)
166  PPC64_LR(17)
167  PPC64_LR(18)
168  PPC64_LR(19)
169  PPC64_LR(20)
170  PPC64_LR(21)
171  PPC64_LR(22)
172  PPC64_LR(23)
173  PPC64_LR(24)
174  PPC64_LR(25)
175  PPC64_LR(26)
176  PPC64_LR(27)
177  PPC64_LR(28)
178  PPC64_LR(29)
179  PPC64_LR(30)
180  PPC64_LR(31)
181
182#if defined(__VSX__)
183
184  // restore VS registers
185  // (note that this also restores floating point registers and V registers,
186  // because part of VS is mapped to these registers)
187
188  addi  4, 3, PPC64_OFFS_FP
189
190// load VS register
191#define PPC64_LVS(n)         \
192  lxvd2x  n, 0, 4           ;\
193  addi    4, 4, 16
194
195  // restore the first 32 VS regs (and also all floating point regs)
196  PPC64_LVS(0)
197  PPC64_LVS(1)
198  PPC64_LVS(2)
199  PPC64_LVS(3)
200  PPC64_LVS(4)
201  PPC64_LVS(5)
202  PPC64_LVS(6)
203  PPC64_LVS(7)
204  PPC64_LVS(8)
205  PPC64_LVS(9)
206  PPC64_LVS(10)
207  PPC64_LVS(11)
208  PPC64_LVS(12)
209  PPC64_LVS(13)
210  PPC64_LVS(14)
211  PPC64_LVS(15)
212  PPC64_LVS(16)
213  PPC64_LVS(17)
214  PPC64_LVS(18)
215  PPC64_LVS(19)
216  PPC64_LVS(20)
217  PPC64_LVS(21)
218  PPC64_LVS(22)
219  PPC64_LVS(23)
220  PPC64_LVS(24)
221  PPC64_LVS(25)
222  PPC64_LVS(26)
223  PPC64_LVS(27)
224  PPC64_LVS(28)
225  PPC64_LVS(29)
226  PPC64_LVS(30)
227  PPC64_LVS(31)
228
229#define PPC64_CLVS_RESTORE(n)                    \
230  addi   4, 3, PPC64_OFFS_FP + n * 16           ;\
231  lxvd2x n, 0, 4
232
233#if !defined(_AIX)
234  // use VRSAVE to conditionally restore the remaining VS regs, that are
235  // where the V regs are mapped. In the AIX ABI, VRSAVE is not used.
236  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
237  cmpwi 5, 0
238  beq   Lnovec
239
240// conditionally load VS
241#define PPC64_CLVSl(n)                           \
242  andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n))         ;\
243  beq    Ldone##n                               ;\
244  PPC64_CLVS_RESTORE(n)                         ;\
245Ldone##n:
246
247#define PPC64_CLVSh(n)                           \
248  andi.  0, 5, (1 PPC_LEFT_SHIFT(63-n))         ;\
249  beq    Ldone##n                               ;\
250  PPC64_CLVS_RESTORE(n)                         ;\
251Ldone##n:
252
253#else
254
255#define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n)
256#define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n)
257
258#endif // !defined(_AIX)
259
260  PPC64_CLVSl(32)
261  PPC64_CLVSl(33)
262  PPC64_CLVSl(34)
263  PPC64_CLVSl(35)
264  PPC64_CLVSl(36)
265  PPC64_CLVSl(37)
266  PPC64_CLVSl(38)
267  PPC64_CLVSl(39)
268  PPC64_CLVSl(40)
269  PPC64_CLVSl(41)
270  PPC64_CLVSl(42)
271  PPC64_CLVSl(43)
272  PPC64_CLVSl(44)
273  PPC64_CLVSl(45)
274  PPC64_CLVSl(46)
275  PPC64_CLVSl(47)
276  PPC64_CLVSh(48)
277  PPC64_CLVSh(49)
278  PPC64_CLVSh(50)
279  PPC64_CLVSh(51)
280  PPC64_CLVSh(52)
281  PPC64_CLVSh(53)
282  PPC64_CLVSh(54)
283  PPC64_CLVSh(55)
284  PPC64_CLVSh(56)
285  PPC64_CLVSh(57)
286  PPC64_CLVSh(58)
287  PPC64_CLVSh(59)
288  PPC64_CLVSh(60)
289  PPC64_CLVSh(61)
290  PPC64_CLVSh(62)
291  PPC64_CLVSh(63)
292
293#else
294
295// load FP register
296#define PPC64_LF(n) \
297  lfd   n, (PPC64_OFFS_FP + n * 16)(3)
298
299  // restore float registers
300  PPC64_LF(0)
301  PPC64_LF(1)
302  PPC64_LF(2)
303  PPC64_LF(3)
304  PPC64_LF(4)
305  PPC64_LF(5)
306  PPC64_LF(6)
307  PPC64_LF(7)
308  PPC64_LF(8)
309  PPC64_LF(9)
310  PPC64_LF(10)
311  PPC64_LF(11)
312  PPC64_LF(12)
313  PPC64_LF(13)
314  PPC64_LF(14)
315  PPC64_LF(15)
316  PPC64_LF(16)
317  PPC64_LF(17)
318  PPC64_LF(18)
319  PPC64_LF(19)
320  PPC64_LF(20)
321  PPC64_LF(21)
322  PPC64_LF(22)
323  PPC64_LF(23)
324  PPC64_LF(24)
325  PPC64_LF(25)
326  PPC64_LF(26)
327  PPC64_LF(27)
328  PPC64_LF(28)
329  PPC64_LF(29)
330  PPC64_LF(30)
331  PPC64_LF(31)
332
333#if defined(__ALTIVEC__)
334
335#define PPC64_CLV_UNALIGNED_RESTORE(n)       \
336  ld     0, (PPC64_OFFS_V + n * 16)(3)      ;\
337  std    0, 0(4)                            ;\
338  ld     0, (PPC64_OFFS_V + n * 16 + 8)(3)  ;\
339  std    0, 8(4)                            ;\
340  lvx    n, 0, 4
341
342#if !defined(_AIX)
343  // restore vector registers if any are in use. In the AIX ABI, VRSAVE is
344  // not used.
345  ld    5, PPC64_OFFS_VRSAVE(3)   // test VRsave
346  cmpwi 5, 0
347  beq   Lnovec
348
349#define PPC64_CLV_UNALIGNEDl(n)              \
350  andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n))     ;\
351  beq    Ldone##n                           ;\
352  PPC64_CLV_UNALIGNED_RESTORE(n)            ;\
353Ldone  ## n:
354
355#define PPC64_CLV_UNALIGNEDh(n)              \
356  andi.  0, 5, (1 PPC_LEFT_SHIFT(31-n))     ;\
357  beq    Ldone##n                           ;\
358  PPC64_CLV_UNALIGNED_RESTORE(n)            ;\
359Ldone  ## n:
360
361#else
362
363#define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n)
364#define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n)
365
366#endif // !defined(_AIX)
367
368  subi  4, 1, 16
369  // r4 is now a 16-byte aligned pointer into the red zone
370  // the _vectorScalarRegisters may not be 16-byte aligned
371  // so copy via red zone temp buffer
372
373  PPC64_CLV_UNALIGNEDl(0)
374  PPC64_CLV_UNALIGNEDl(1)
375  PPC64_CLV_UNALIGNEDl(2)
376  PPC64_CLV_UNALIGNEDl(3)
377  PPC64_CLV_UNALIGNEDl(4)
378  PPC64_CLV_UNALIGNEDl(5)
379  PPC64_CLV_UNALIGNEDl(6)
380  PPC64_CLV_UNALIGNEDl(7)
381  PPC64_CLV_UNALIGNEDl(8)
382  PPC64_CLV_UNALIGNEDl(9)
383  PPC64_CLV_UNALIGNEDl(10)
384  PPC64_CLV_UNALIGNEDl(11)
385  PPC64_CLV_UNALIGNEDl(12)
386  PPC64_CLV_UNALIGNEDl(13)
387  PPC64_CLV_UNALIGNEDl(14)
388  PPC64_CLV_UNALIGNEDl(15)
389  PPC64_CLV_UNALIGNEDh(16)
390  PPC64_CLV_UNALIGNEDh(17)
391  PPC64_CLV_UNALIGNEDh(18)
392  PPC64_CLV_UNALIGNEDh(19)
393  PPC64_CLV_UNALIGNEDh(20)
394  PPC64_CLV_UNALIGNEDh(21)
395  PPC64_CLV_UNALIGNEDh(22)
396  PPC64_CLV_UNALIGNEDh(23)
397  PPC64_CLV_UNALIGNEDh(24)
398  PPC64_CLV_UNALIGNEDh(25)
399  PPC64_CLV_UNALIGNEDh(26)
400  PPC64_CLV_UNALIGNEDh(27)
401  PPC64_CLV_UNALIGNEDh(28)
402  PPC64_CLV_UNALIGNEDh(29)
403  PPC64_CLV_UNALIGNEDh(30)
404  PPC64_CLV_UNALIGNEDh(31)
405
406#endif
407#endif
408
409Lnovec:
410  ld    0, PPC64_OFFS_CR(3)
411  mtcr  0
412  ld    0, PPC64_OFFS_SRR0(3)
413  mtctr 0
414
415#if defined(_AIX)
416  // After setting GPR1 to a higher address, AIX wipes out the original
417  // stack space below that address invalidated by the new GPR1 value. Use
418  // GPR0 to save the value of GPR3 in the context before it is wiped out.
419  // This compromises the content of GPR0 which is a volatile register.
420  ld 0, (8 * (3 + 2))(3)
421#else
422  PPC64_LR(0)
423#endif
424  PPC64_LR(5)
425  PPC64_LR(4)
426  PPC64_LR(1)
427#if defined(_AIX)
428  mr 3, 0
429#else
430  PPC64_LR(3)
431#endif
432  bctr
433
434#elif defined(__powerpc__)
435
436DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
437//
438// void libunwind::Registers_ppc::jumpto()
439//
440// On entry:
441//  thread_state pointer is in r3
442//
443
444  // restore integral registerrs
445  // skip r0 for now
446  // skip r1 for now
447  lwz     2,  16(3)
448  // skip r3 for now
449  // skip r4 for now
450  // skip r5 for now
451  lwz     6,  32(3)
452  lwz     7,  36(3)
453  lwz     8,  40(3)
454  lwz     9,  44(3)
455  lwz     10, 48(3)
456  lwz     11, 52(3)
457  lwz     12, 56(3)
458  lwz     13, 60(3)
459  lwz     14, 64(3)
460  lwz     15, 68(3)
461  lwz     16, 72(3)
462  lwz     17, 76(3)
463  lwz     18, 80(3)
464  lwz     19, 84(3)
465  lwz     20, 88(3)
466  lwz     21, 92(3)
467  lwz     22, 96(3)
468  lwz     23,100(3)
469  lwz     24,104(3)
470  lwz     25,108(3)
471  lwz     26,112(3)
472  lwz     27,116(3)
473  lwz     28,120(3)
474  lwz     29,124(3)
475  lwz     30,128(3)
476  lwz     31,132(3)
477
478#ifndef __NO_FPRS__
479  // restore float registers
480  lfd     0, 160(3)
481  lfd     1, 168(3)
482  lfd     2, 176(3)
483  lfd     3, 184(3)
484  lfd     4, 192(3)
485  lfd     5, 200(3)
486  lfd     6, 208(3)
487  lfd     7, 216(3)
488  lfd     8, 224(3)
489  lfd     9, 232(3)
490  lfd     10,240(3)
491  lfd     11,248(3)
492  lfd     12,256(3)
493  lfd     13,264(3)
494  lfd     14,272(3)
495  lfd     15,280(3)
496  lfd     16,288(3)
497  lfd     17,296(3)
498  lfd     18,304(3)
499  lfd     19,312(3)
500  lfd     20,320(3)
501  lfd     21,328(3)
502  lfd     22,336(3)
503  lfd     23,344(3)
504  lfd     24,352(3)
505  lfd     25,360(3)
506  lfd     26,368(3)
507  lfd     27,376(3)
508  lfd     28,384(3)
509  lfd     29,392(3)
510  lfd     30,400(3)
511  lfd     31,408(3)
512#endif
513
514#if defined(__ALTIVEC__)
515
516#define LOAD_VECTOR_RESTORE(_index)                 \
517  lwz     0, 424+_index*16(3)             SEPARATOR \
518  stw     0, 0(4)                         SEPARATOR \
519  lwz     0, 424+_index*16+4(3)           SEPARATOR \
520  stw     0, 4(4)                         SEPARATOR \
521  lwz     0, 424+_index*16+8(3)           SEPARATOR \
522  stw     0, 8(4)                         SEPARATOR \
523  lwz     0, 424+_index*16+12(3)          SEPARATOR \
524  stw     0, 12(4)                        SEPARATOR \
525  lvx     _index, 0, 4
526
527#if !defined(_AIX)
528  // restore vector registers if any are in use. In the AIX ABI, VRSAVE
529  // is not used.
530  lwz     5, 156(3)       // test VRsave
531  cmpwi   5, 0
532  beq     Lnovec
533
534#define LOAD_VECTOR_UNALIGNEDl(_index)                   \
535  andis.  0, 5, (1 PPC_LEFT_SHIFT(15-_index))  SEPARATOR \
536  beq     Ldone ## _index                      SEPARATOR \
537  LOAD_VECTOR_RESTORE(_index)                  SEPARATOR \
538  Ldone ## _index:
539
540#define LOAD_VECTOR_UNALIGNEDh(_index)                   \
541  andi.   0, 5, (1 PPC_LEFT_SHIFT(31-_index))  SEPARATOR \
542  beq     Ldone ## _index                      SEPARATOR \
543  LOAD_VECTOR_RESTORE(_index)                  SEPARATOR \
544  Ldone ## _index:
545
546#else
547
548#define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index)
549#define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index)
550
551#endif // !defined(_AIX)
552
553  subi    4, 1, 16
554  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
555  // r4 is now a 16-byte aligned pointer into the red zone
556  // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
557
558  LOAD_VECTOR_UNALIGNEDl(0)
559  LOAD_VECTOR_UNALIGNEDl(1)
560  LOAD_VECTOR_UNALIGNEDl(2)
561  LOAD_VECTOR_UNALIGNEDl(3)
562  LOAD_VECTOR_UNALIGNEDl(4)
563  LOAD_VECTOR_UNALIGNEDl(5)
564  LOAD_VECTOR_UNALIGNEDl(6)
565  LOAD_VECTOR_UNALIGNEDl(7)
566  LOAD_VECTOR_UNALIGNEDl(8)
567  LOAD_VECTOR_UNALIGNEDl(9)
568  LOAD_VECTOR_UNALIGNEDl(10)
569  LOAD_VECTOR_UNALIGNEDl(11)
570  LOAD_VECTOR_UNALIGNEDl(12)
571  LOAD_VECTOR_UNALIGNEDl(13)
572  LOAD_VECTOR_UNALIGNEDl(14)
573  LOAD_VECTOR_UNALIGNEDl(15)
574  LOAD_VECTOR_UNALIGNEDh(16)
575  LOAD_VECTOR_UNALIGNEDh(17)
576  LOAD_VECTOR_UNALIGNEDh(18)
577  LOAD_VECTOR_UNALIGNEDh(19)
578  LOAD_VECTOR_UNALIGNEDh(20)
579  LOAD_VECTOR_UNALIGNEDh(21)
580  LOAD_VECTOR_UNALIGNEDh(22)
581  LOAD_VECTOR_UNALIGNEDh(23)
582  LOAD_VECTOR_UNALIGNEDh(24)
583  LOAD_VECTOR_UNALIGNEDh(25)
584  LOAD_VECTOR_UNALIGNEDh(26)
585  LOAD_VECTOR_UNALIGNEDh(27)
586  LOAD_VECTOR_UNALIGNEDh(28)
587  LOAD_VECTOR_UNALIGNEDh(29)
588  LOAD_VECTOR_UNALIGNEDh(30)
589  LOAD_VECTOR_UNALIGNEDh(31)
590#endif
591
592Lnovec:
593  lwz     0, 136(3)   // __cr
594  mtcr    0
595  lwz     0, 148(3)   // __ctr
596  mtctr   0
597  lwz     0,   0(3)   // __ssr0
598  mtctr   0
599  lwz     0,   8(3)   // do r0 now
600  lwz     5,  28(3)   // do r5 now
601  lwz     4,  24(3)   // do r4 now
602  lwz     1,  12(3)   // do sp now
603  lwz     3,  20(3)   // do r3 last
604  bctr
605
606#elif defined(__aarch64__)
607
608//
609// extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);
610//
611// On entry:
612//  thread_state pointer is in x0
613//
614  .p2align 2
615DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)
616  // skip restore of x0,x1 for now
617  ldp    x2, x3,  [x0, #0x010]
618  ldp    x4, x5,  [x0, #0x020]
619  ldp    x6, x7,  [x0, #0x030]
620  ldp    x8, x9,  [x0, #0x040]
621  ldp    x10,x11, [x0, #0x050]
622  ldp    x12,x13, [x0, #0x060]
623  ldp    x14,x15, [x0, #0x070]
624  // x16 and x17 were clobbered by the call into the unwinder, so no point in
625  // restoring them.
626  ldp    x18,x19, [x0, #0x090]
627  ldp    x20,x21, [x0, #0x0A0]
628  ldp    x22,x23, [x0, #0x0B0]
629  ldp    x24,x25, [x0, #0x0C0]
630  ldp    x26,x27, [x0, #0x0D0]
631  ldp    x28,x29, [x0, #0x0E0]
632  ldr    x30,     [x0, #0x100]  // restore pc into lr
633
634  ldp    d0, d1,  [x0, #0x110]
635  ldp    d2, d3,  [x0, #0x120]
636  ldp    d4, d5,  [x0, #0x130]
637  ldp    d6, d7,  [x0, #0x140]
638  ldp    d8, d9,  [x0, #0x150]
639  ldp    d10,d11, [x0, #0x160]
640  ldp    d12,d13, [x0, #0x170]
641  ldp    d14,d15, [x0, #0x180]
642  ldp    d16,d17, [x0, #0x190]
643  ldp    d18,d19, [x0, #0x1A0]
644  ldp    d20,d21, [x0, #0x1B0]
645  ldp    d22,d23, [x0, #0x1C0]
646  ldp    d24,d25, [x0, #0x1D0]
647  ldp    d26,d27, [x0, #0x1E0]
648  ldp    d28,d29, [x0, #0x1F0]
649  ldr    d30,     [x0, #0x200]
650  ldr    d31,     [x0, #0x208]
651
652  // Finally, restore sp. This must be done after the the last read from the
653  // context struct, because it is allocated on the stack, and an exception
654  // could clobber the de-allocated portion of the stack after sp has been
655  // restored.
656  ldr    x16,     [x0, #0x0F8]
657  ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
658  mov    sp,x16                 // restore sp
659  ret    x30                    // jump to pc
660
661#elif defined(__arm__) && !defined(__APPLE__)
662
663#if !defined(__ARM_ARCH_ISA_ARM)
664#if (__ARM_ARCH_ISA_THUMB == 2)
665  .syntax unified
666#endif
667  .thumb
668#endif
669
670@
671@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
672@
673@ On entry:
674@  thread_state pointer is in r0
675@
676  .p2align 2
677DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
678#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
679  @ r8-r11: ldm into r1-r4, then mov to r8-r11
680  adds r0, #0x20
681  ldm r0!, {r1-r4}
682  subs r0, #0x30
683  mov r8, r1
684  mov r9, r2
685  mov r10, r3
686  mov r11, r4
687  @ r12 does not need loading, it it the intra-procedure-call scratch register
688  ldr r2, [r0, #0x34]
689  ldr r3, [r0, #0x3c]
690  mov sp, r2
691  mov lr, r3         @ restore pc into lr
692  ldm r0, {r0-r7}
693#else
694  @ Use lr as base so that r0 can be restored.
695  mov lr, r0
696  @ 32bit thumb-2 restrictions for ldm:
697  @ . the sp (r13) cannot be in the list
698  @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
699  ldm lr, {r0-r12}
700  ldr sp, [lr, #52]
701  ldr lr, [lr, #60]  @ restore pc into lr
702#endif
703#if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM)
704  // 'bx' is not BTI setting when used with lr, therefore r12 is used instead
705  mov r12, lr
706  JMP(r12)
707#else
708  JMP(lr)
709#endif
710
711@
712@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
713@
714@ On entry:
715@  values pointer is in r0
716@
717  .p2align 2
718#if defined(__ELF__)
719  .fpu vfpv3-d16
720#endif
721DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
722  @ VFP and iwMMX instructions are only available when compiling with the flags
723  @ that enable them. We do not want to do that in the library (because we do not
724  @ want the compiler to generate instructions that access those) but this is
725  @ only accessed if the personality routine needs these registers. Use of
726  @ these registers implies they are, actually, available on the target, so
727  @ it's ok to execute.
728  @ So, generate the instruction using the corresponding coprocessor mnemonic.
729  vldmia r0, {d0-d15}
730  JMP(lr)
731
732@
733@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
734@
735@ On entry:
736@  values pointer is in r0
737@
738  .p2align 2
739#if defined(__ELF__)
740  .fpu vfpv3-d16
741#endif
742DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
743  vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
744  JMP(lr)
745
746@
747@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
748@
749@ On entry:
750@  values pointer is in r0
751@
752  .p2align 2
753#if defined(__ELF__)
754  .fpu vfpv3
755#endif
756DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
757  vldmia r0, {d16-d31}
758  JMP(lr)
759
760#if defined(__ARM_WMMX)
761
762@
763@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
764@
765@ On entry:
766@  values pointer is in r0
767@
768  .p2align 2
769#if defined(__ELF__)
770  .arch armv5te
771#endif
772DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
773  ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
774  ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
775  ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
776  ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
777  ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
778  ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
779  ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
780  ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
781  ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
782  ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
783  ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
784  ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
785  ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
786  ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
787  ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
788  ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
789  JMP(lr)
790
791@
792@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
793@
794@ On entry:
795@  values pointer is in r0
796@
797  .p2align 2
798#if defined(__ELF__)
799  .arch armv5te
800#endif
801DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
802  ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
803  ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
804  ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
805  ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
806  JMP(lr)
807
808#endif
809
810#elif defined(__or1k__)
811
812DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
813#
814# void libunwind::Registers_or1k::jumpto()
815#
816# On entry:
817#  thread_state pointer is in r3
818#
819
820  # restore integral registers
821  l.lwz     r0,  0(r3)
822  l.lwz     r1,  4(r3)
823  l.lwz     r2,  8(r3)
824  # skip r3 for now
825  l.lwz     r4, 16(r3)
826  l.lwz     r5, 20(r3)
827  l.lwz     r6, 24(r3)
828  l.lwz     r7, 28(r3)
829  l.lwz     r8, 32(r3)
830  # skip r9
831  l.lwz    r10, 40(r3)
832  l.lwz    r11, 44(r3)
833  l.lwz    r12, 48(r3)
834  l.lwz    r13, 52(r3)
835  l.lwz    r14, 56(r3)
836  l.lwz    r15, 60(r3)
837  l.lwz    r16, 64(r3)
838  l.lwz    r17, 68(r3)
839  l.lwz    r18, 72(r3)
840  l.lwz    r19, 76(r3)
841  l.lwz    r20, 80(r3)
842  l.lwz    r21, 84(r3)
843  l.lwz    r22, 88(r3)
844  l.lwz    r23, 92(r3)
845  l.lwz    r24, 96(r3)
846  l.lwz    r25,100(r3)
847  l.lwz    r26,104(r3)
848  l.lwz    r27,108(r3)
849  l.lwz    r28,112(r3)
850  l.lwz    r29,116(r3)
851  l.lwz    r30,120(r3)
852  l.lwz    r31,124(r3)
853
854  # load new pc into ra
855  l.lwz    r9, 128(r3)
856
857  # at last, restore r3
858  l.lwz    r3,  12(r3)
859
860  # jump to pc
861  l.jr     r9
862   l.nop
863
864#elif defined(__hexagon__)
865# On entry:
866#  thread_state pointer is in r2
867DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)
868#
869# void libunwind::Registers_hexagon::jumpto()
870#
871  r8 = memw(r0+#32)
872  r9 = memw(r0+#36)
873  r10 = memw(r0+#40)
874  r11 = memw(r0+#44)
875
876  r12 = memw(r0+#48)
877  r13 = memw(r0+#52)
878  r14 = memw(r0+#56)
879  r15 = memw(r0+#60)
880
881  r16 = memw(r0+#64)
882  r17 = memw(r0+#68)
883  r18 = memw(r0+#72)
884  r19 = memw(r0+#76)
885
886  r20 = memw(r0+#80)
887  r21 = memw(r0+#84)
888  r22 = memw(r0+#88)
889  r23 = memw(r0+#92)
890
891  r24 = memw(r0+#96)
892  r25 = memw(r0+#100)
893  r26 = memw(r0+#104)
894  r27 = memw(r0+#108)
895
896  r28 = memw(r0+#112)
897  r29 = memw(r0+#116)
898  r30 = memw(r0+#120)
899  r31 = memw(r0+#132)
900
901  r1 = memw(r0+#128)
902  c4 = r1   // Predicate register
903  r1 = memw(r0+#4)
904  r0 = memw(r0)
905  jumpr r31
906#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
907
908//
909// void libunwind::Registers_mips_o32::jumpto()
910//
911// On entry:
912//  thread state pointer is in a0 ($4)
913//
914DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
915  .set push
916  .set noat
917  .set noreorder
918  .set nomacro
919#ifdef __mips_hard_float
920#if __mips_fpr != 64
921  ldc1  $f0, (4 * 36 + 8 * 0)($4)
922  ldc1  $f2, (4 * 36 + 8 * 2)($4)
923  ldc1  $f4, (4 * 36 + 8 * 4)($4)
924  ldc1  $f6, (4 * 36 + 8 * 6)($4)
925  ldc1  $f8, (4 * 36 + 8 * 8)($4)
926  ldc1  $f10, (4 * 36 + 8 * 10)($4)
927  ldc1  $f12, (4 * 36 + 8 * 12)($4)
928  ldc1  $f14, (4 * 36 + 8 * 14)($4)
929  ldc1  $f16, (4 * 36 + 8 * 16)($4)
930  ldc1  $f18, (4 * 36 + 8 * 18)($4)
931  ldc1  $f20, (4 * 36 + 8 * 20)($4)
932  ldc1  $f22, (4 * 36 + 8 * 22)($4)
933  ldc1  $f24, (4 * 36 + 8 * 24)($4)
934  ldc1  $f26, (4 * 36 + 8 * 26)($4)
935  ldc1  $f28, (4 * 36 + 8 * 28)($4)
936  ldc1  $f30, (4 * 36 + 8 * 30)($4)
937#else
938  ldc1  $f0, (4 * 36 + 8 * 0)($4)
939  ldc1  $f1, (4 * 36 + 8 * 1)($4)
940  ldc1  $f2, (4 * 36 + 8 * 2)($4)
941  ldc1  $f3, (4 * 36 + 8 * 3)($4)
942  ldc1  $f4, (4 * 36 + 8 * 4)($4)
943  ldc1  $f5, (4 * 36 + 8 * 5)($4)
944  ldc1  $f6, (4 * 36 + 8 * 6)($4)
945  ldc1  $f7, (4 * 36 + 8 * 7)($4)
946  ldc1  $f8, (4 * 36 + 8 * 8)($4)
947  ldc1  $f9, (4 * 36 + 8 * 9)($4)
948  ldc1  $f10, (4 * 36 + 8 * 10)($4)
949  ldc1  $f11, (4 * 36 + 8 * 11)($4)
950  ldc1  $f12, (4 * 36 + 8 * 12)($4)
951  ldc1  $f13, (4 * 36 + 8 * 13)($4)
952  ldc1  $f14, (4 * 36 + 8 * 14)($4)
953  ldc1  $f15, (4 * 36 + 8 * 15)($4)
954  ldc1  $f16, (4 * 36 + 8 * 16)($4)
955  ldc1  $f17, (4 * 36 + 8 * 17)($4)
956  ldc1  $f18, (4 * 36 + 8 * 18)($4)
957  ldc1  $f19, (4 * 36 + 8 * 19)($4)
958  ldc1  $f20, (4 * 36 + 8 * 20)($4)
959  ldc1  $f21, (4 * 36 + 8 * 21)($4)
960  ldc1  $f22, (4 * 36 + 8 * 22)($4)
961  ldc1  $f23, (4 * 36 + 8 * 23)($4)
962  ldc1  $f24, (4 * 36 + 8 * 24)($4)
963  ldc1  $f25, (4 * 36 + 8 * 25)($4)
964  ldc1  $f26, (4 * 36 + 8 * 26)($4)
965  ldc1  $f27, (4 * 36 + 8 * 27)($4)
966  ldc1  $f28, (4 * 36 + 8 * 28)($4)
967  ldc1  $f29, (4 * 36 + 8 * 29)($4)
968  ldc1  $f30, (4 * 36 + 8 * 30)($4)
969  ldc1  $f31, (4 * 36 + 8 * 31)($4)
970#endif
971#endif
972  // restore hi and lo
973  lw    $8, (4 * 33)($4)
974  mthi  $8
975  lw    $8, (4 * 34)($4)
976  mtlo  $8
977  // r0 is zero
978  lw    $1, (4 * 1)($4)
979  lw    $2, (4 * 2)($4)
980  lw    $3, (4 * 3)($4)
981  // skip a0 for now
982  lw    $5, (4 * 5)($4)
983  lw    $6, (4 * 6)($4)
984  lw    $7, (4 * 7)($4)
985  lw    $8, (4 * 8)($4)
986  lw    $9, (4 * 9)($4)
987  lw    $10, (4 * 10)($4)
988  lw    $11, (4 * 11)($4)
989  lw    $12, (4 * 12)($4)
990  lw    $13, (4 * 13)($4)
991  lw    $14, (4 * 14)($4)
992  lw    $15, (4 * 15)($4)
993  lw    $16, (4 * 16)($4)
994  lw    $17, (4 * 17)($4)
995  lw    $18, (4 * 18)($4)
996  lw    $19, (4 * 19)($4)
997  lw    $20, (4 * 20)($4)
998  lw    $21, (4 * 21)($4)
999  lw    $22, (4 * 22)($4)
1000  lw    $23, (4 * 23)($4)
1001  lw    $24, (4 * 24)($4)
1002  lw    $25, (4 * 25)($4)
1003  lw    $26, (4 * 26)($4)
1004  lw    $27, (4 * 27)($4)
1005  lw    $28, (4 * 28)($4)
1006  lw    $29, (4 * 29)($4)
1007  lw    $30, (4 * 30)($4)
1008  // load new pc into ra
1009  lw    $31, (4 * 32)($4)
1010  // jump to ra, load a0 in the delay slot
1011  jr    $31
1012  lw    $4, (4 * 4)($4)
1013  .set pop
1014
1015#elif defined(__mips64)
1016
1017//
1018// void libunwind::Registers_mips_newabi::jumpto()
1019//
1020// On entry:
1021//  thread state pointer is in a0 ($4)
1022//
1023DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
1024  .set push
1025  .set noat
1026  .set noreorder
1027  .set nomacro
1028#ifdef __mips_hard_float
1029  ldc1  $f0, (8 * 35)($4)
1030  ldc1  $f1, (8 * 36)($4)
1031  ldc1  $f2, (8 * 37)($4)
1032  ldc1  $f3, (8 * 38)($4)
1033  ldc1  $f4, (8 * 39)($4)
1034  ldc1  $f5, (8 * 40)($4)
1035  ldc1  $f6, (8 * 41)($4)
1036  ldc1  $f7, (8 * 42)($4)
1037  ldc1  $f8, (8 * 43)($4)
1038  ldc1  $f9, (8 * 44)($4)
1039  ldc1  $f10, (8 * 45)($4)
1040  ldc1  $f11, (8 * 46)($4)
1041  ldc1  $f12, (8 * 47)($4)
1042  ldc1  $f13, (8 * 48)($4)
1043  ldc1  $f14, (8 * 49)($4)
1044  ldc1  $f15, (8 * 50)($4)
1045  ldc1  $f16, (8 * 51)($4)
1046  ldc1  $f17, (8 * 52)($4)
1047  ldc1  $f18, (8 * 53)($4)
1048  ldc1  $f19, (8 * 54)($4)
1049  ldc1  $f20, (8 * 55)($4)
1050  ldc1  $f21, (8 * 56)($4)
1051  ldc1  $f22, (8 * 57)($4)
1052  ldc1  $f23, (8 * 58)($4)
1053  ldc1  $f24, (8 * 59)($4)
1054  ldc1  $f25, (8 * 60)($4)
1055  ldc1  $f26, (8 * 61)($4)
1056  ldc1  $f27, (8 * 62)($4)
1057  ldc1  $f28, (8 * 63)($4)
1058  ldc1  $f29, (8 * 64)($4)
1059  ldc1  $f30, (8 * 65)($4)
1060  ldc1  $f31, (8 * 66)($4)
1061#endif
1062  // restore hi and lo
1063  ld    $8, (8 * 33)($4)
1064  mthi  $8
1065  ld    $8, (8 * 34)($4)
1066  mtlo  $8
1067  // r0 is zero
1068  ld    $1, (8 * 1)($4)
1069  ld    $2, (8 * 2)($4)
1070  ld    $3, (8 * 3)($4)
1071  // skip a0 for now
1072  ld    $5, (8 * 5)($4)
1073  ld    $6, (8 * 6)($4)
1074  ld    $7, (8 * 7)($4)
1075  ld    $8, (8 * 8)($4)
1076  ld    $9, (8 * 9)($4)
1077  ld    $10, (8 * 10)($4)
1078  ld    $11, (8 * 11)($4)
1079  ld    $12, (8 * 12)($4)
1080  ld    $13, (8 * 13)($4)
1081  ld    $14, (8 * 14)($4)
1082  ld    $15, (8 * 15)($4)
1083  ld    $16, (8 * 16)($4)
1084  ld    $17, (8 * 17)($4)
1085  ld    $18, (8 * 18)($4)
1086  ld    $19, (8 * 19)($4)
1087  ld    $20, (8 * 20)($4)
1088  ld    $21, (8 * 21)($4)
1089  ld    $22, (8 * 22)($4)
1090  ld    $23, (8 * 23)($4)
1091  ld    $24, (8 * 24)($4)
1092  ld    $25, (8 * 25)($4)
1093  ld    $26, (8 * 26)($4)
1094  ld    $27, (8 * 27)($4)
1095  ld    $28, (8 * 28)($4)
1096  ld    $29, (8 * 29)($4)
1097  ld    $30, (8 * 30)($4)
1098  // load new pc into ra
1099  ld    $31, (8 * 32)($4)
1100  // jump to ra, load a0 in the delay slot
1101  jr    $31
1102  ld    $4, (8 * 4)($4)
1103  .set pop
1104
1105#elif defined(__sparc__) && defined(__arch64__)
1106
1107DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv)
1108//
1109// void libunwind::Registers_sparc64::jumpto()
1110//
1111// On entry:
1112//  thread_state pointer is in %o0
1113//
1114  .register %g2, #scratch
1115  .register %g3, #scratch
1116  .register %g6, #scratch
1117  .register %g7, #scratch
1118  flushw
1119  ldx  [%o0 + 0x08], %g1
1120  ldx  [%o0 + 0x10], %g2
1121  ldx  [%o0 + 0x18], %g3
1122  ldx  [%o0 + 0x20], %g4
1123  ldx  [%o0 + 0x28], %g5
1124  ldx  [%o0 + 0x30], %g6
1125  ldx  [%o0 + 0x38], %g7
1126  ldx  [%o0 + 0x48], %o1
1127  ldx  [%o0 + 0x50], %o2
1128  ldx  [%o0 + 0x58], %o3
1129  ldx  [%o0 + 0x60], %o4
1130  ldx  [%o0 + 0x68], %o5
1131  ldx  [%o0 + 0x70], %o6
1132  ldx  [%o0 + 0x78], %o7
1133  ldx  [%o0 + 0x80], %l0
1134  ldx  [%o0 + 0x88], %l1
1135  ldx  [%o0 + 0x90], %l2
1136  ldx  [%o0 + 0x98], %l3
1137  ldx  [%o0 + 0xa0], %l4
1138  ldx  [%o0 + 0xa8], %l5
1139  ldx  [%o0 + 0xb0], %l6
1140  ldx  [%o0 + 0xb8], %l7
1141  ldx  [%o0 + 0xc0], %i0
1142  ldx  [%o0 + 0xc8], %i1
1143  ldx  [%o0 + 0xd0], %i2
1144  ldx  [%o0 + 0xd8], %i3
1145  ldx  [%o0 + 0xe0], %i4
1146  ldx  [%o0 + 0xe8], %i5
1147  ldx  [%o0 + 0xf0], %i6
1148  ldx  [%o0 + 0xf8], %i7
1149  jmp  %o7
1150   ldx [%o0 + 0x40], %o0
1151
1152#elif defined(__sparc__)
1153
1154//
1155// void libunwind::Registers_sparc_o32::jumpto()
1156//
1157// On entry:
1158//  thread_state pointer is in o0
1159//
1160DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1161  ta 3
1162  ldd [%o0 + 64],  %l0
1163  ldd [%o0 + 72],  %l2
1164  ldd [%o0 + 80],  %l4
1165  ldd [%o0 + 88],  %l6
1166  ldd [%o0 + 96],  %i0
1167  ldd [%o0 + 104], %i2
1168  ldd [%o0 + 112], %i4
1169  ldd [%o0 + 120], %i6
1170  ld  [%o0 + 60],  %o7
1171  jmp %o7
1172   nop
1173
1174#elif defined(__riscv)
1175
1176//
1177// void libunwind::Registers_riscv::jumpto()
1178//
1179// On entry:
1180//  thread_state pointer is in a0
1181//
1182  .p2align 2
1183DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
1184# if defined(__riscv_flen)
1185  FLOAD    f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
1186  FLOAD    f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
1187  FLOAD    f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
1188  FLOAD    f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
1189  FLOAD    f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
1190  FLOAD    f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
1191  FLOAD    f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
1192  FLOAD    f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
1193  FLOAD    f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
1194  FLOAD    f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
1195  FLOAD    f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
1196  FLOAD    f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
1197  FLOAD    f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
1198  FLOAD    f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
1199  FLOAD    f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
1200  FLOAD    f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
1201  FLOAD    f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
1202  FLOAD    f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
1203  FLOAD    f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
1204  FLOAD    f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
1205  FLOAD    f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
1206  FLOAD    f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
1207  FLOAD    f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
1208  FLOAD    f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
1209  FLOAD    f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
1210  FLOAD    f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
1211  FLOAD    f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
1212  FLOAD    f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
1213  FLOAD    f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
1214  FLOAD    f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
1215  FLOAD    f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
1216  FLOAD    f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
1217# endif
1218
1219  // x0 is zero
1220  ILOAD    x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
1221  ILOAD    x2, (RISCV_ISIZE * 2)(a0)
1222  ILOAD    x3, (RISCV_ISIZE * 3)(a0)
1223  ILOAD    x4, (RISCV_ISIZE * 4)(a0)
1224  ILOAD    x5, (RISCV_ISIZE * 5)(a0)
1225  ILOAD    x6, (RISCV_ISIZE * 6)(a0)
1226  ILOAD    x7, (RISCV_ISIZE * 7)(a0)
1227  ILOAD    x8, (RISCV_ISIZE * 8)(a0)
1228  ILOAD    x9, (RISCV_ISIZE * 9)(a0)
1229  // skip a0 for now
1230  ILOAD    x11, (RISCV_ISIZE * 11)(a0)
1231  ILOAD    x12, (RISCV_ISIZE * 12)(a0)
1232  ILOAD    x13, (RISCV_ISIZE * 13)(a0)
1233  ILOAD    x14, (RISCV_ISIZE * 14)(a0)
1234  ILOAD    x15, (RISCV_ISIZE * 15)(a0)
1235  ILOAD    x16, (RISCV_ISIZE * 16)(a0)
1236  ILOAD    x17, (RISCV_ISIZE * 17)(a0)
1237  ILOAD    x18, (RISCV_ISIZE * 18)(a0)
1238  ILOAD    x19, (RISCV_ISIZE * 19)(a0)
1239  ILOAD    x20, (RISCV_ISIZE * 20)(a0)
1240  ILOAD    x21, (RISCV_ISIZE * 21)(a0)
1241  ILOAD    x22, (RISCV_ISIZE * 22)(a0)
1242  ILOAD    x23, (RISCV_ISIZE * 23)(a0)
1243  ILOAD    x24, (RISCV_ISIZE * 24)(a0)
1244  ILOAD    x25, (RISCV_ISIZE * 25)(a0)
1245  ILOAD    x26, (RISCV_ISIZE * 26)(a0)
1246  ILOAD    x27, (RISCV_ISIZE * 27)(a0)
1247  ILOAD    x28, (RISCV_ISIZE * 28)(a0)
1248  ILOAD    x29, (RISCV_ISIZE * 29)(a0)
1249  ILOAD    x30, (RISCV_ISIZE * 30)(a0)
1250  ILOAD    x31, (RISCV_ISIZE * 31)(a0)
1251  ILOAD    x10, (RISCV_ISIZE * 10)(a0)   // restore a0
1252
1253  ret                       // jump to ra
1254
1255#elif defined(__s390x__)
1256
1257DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)
1258//
1259// void libunwind::Registers_s390x::jumpto()
1260//
1261// On entry:
1262//  thread_state pointer is in r2
1263//
1264
1265  // Skip PSWM, but load PSWA into r1
1266  lg %r1, 8(%r2)
1267
1268  // Restore FPRs
1269  ld %f0, 144(%r2)
1270  ld %f1, 152(%r2)
1271  ld %f2, 160(%r2)
1272  ld %f3, 168(%r2)
1273  ld %f4, 176(%r2)
1274  ld %f5, 184(%r2)
1275  ld %f6, 192(%r2)
1276  ld %f7, 200(%r2)
1277  ld %f8, 208(%r2)
1278  ld %f9, 216(%r2)
1279  ld %f10, 224(%r2)
1280  ld %f11, 232(%r2)
1281  ld %f12, 240(%r2)
1282  ld %f13, 248(%r2)
1283  ld %f14, 256(%r2)
1284  ld %f15, 264(%r2)
1285
1286  // Restore GPRs - skipping %r0 and %r1
1287  lmg  %r2, %r15, 32(%r2)
1288
1289  // Return to PSWA (was loaded into %r1 above)
1290  br %r1
1291
1292#endif
1293
1294#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1295
1296NO_EXEC_STACK_DIRECTIVE
1297
1298