1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11#if defined(_AIX)
12    .toc
13#else
14    .text
15#endif
16
17#if !defined(__USING_SJLJ_EXCEPTIONS__)
18
19#if defined(__i386__)
20
21#
22# extern int __unw_getcontext(unw_context_t* thread_state)
23#
24# On entry:
25#   +                       +
26#   +-----------------------+
27#   + thread_state pointer  +
28#   +-----------------------+
29#   + return address        +
30#   +-----------------------+   <-- SP
31#   +                       +
32#
33DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
34
35  _LIBUNWIND_CET_ENDBR
36  push  %eax
37  movl  8(%esp), %eax
38  movl  %ebx,  4(%eax)
39  movl  %ecx,  8(%eax)
40  movl  %edx, 12(%eax)
41  movl  %edi, 16(%eax)
42  movl  %esi, 20(%eax)
43  movl  %ebp, 24(%eax)
44  movl  %esp, %edx
45  addl  $8, %edx
46  movl  %edx, 28(%eax)  # store what sp was at call site as esp
47  # skip ss
48  # skip eflags
49  movl  4(%esp), %edx
50  movl  %edx, 40(%eax)  # store return address as eip
51  # skip cs
52  # skip ds
53  # skip es
54  # skip fs
55  # skip gs
56  movl  (%esp), %edx
57  movl  %edx, (%eax)  # store original eax
58  popl  %eax
59  xorl  %eax, %eax    # return UNW_ESUCCESS
60  ret
61
62#elif defined(__x86_64__)
63
64#
65# extern int __unw_getcontext(unw_context_t* thread_state)
66#
67# On entry:
68#  thread_state pointer is in rdi
69#
70DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
71#if defined(_WIN64)
72#define PTR %rcx
73#define TMP %rdx
74#else
75#define PTR %rdi
76#define TMP %rsi
77#endif
78
79  _LIBUNWIND_CET_ENDBR
80  movq  %rax,   (PTR)
81  movq  %rbx,  8(PTR)
82  movq  %rcx, 16(PTR)
83  movq  %rdx, 24(PTR)
84  movq  %rdi, 32(PTR)
85  movq  %rsi, 40(PTR)
86  movq  %rbp, 48(PTR)
87  movq  %rsp, 56(PTR)
88  addq  $8,   56(PTR)
89  movq  %r8,  64(PTR)
90  movq  %r9,  72(PTR)
91  movq  %r10, 80(PTR)
92  movq  %r11, 88(PTR)
93  movq  %r12, 96(PTR)
94  movq  %r13,104(PTR)
95  movq  %r14,112(PTR)
96  movq  %r15,120(PTR)
97  movq  (%rsp),TMP
98  movq  TMP,128(PTR) # store return address as rip
99  # skip rflags
100  # skip cs
101  # skip fs
102  # skip gs
103
104#if defined(_WIN64)
105  movdqu %xmm0,176(PTR)
106  movdqu %xmm1,192(PTR)
107  movdqu %xmm2,208(PTR)
108  movdqu %xmm3,224(PTR)
109  movdqu %xmm4,240(PTR)
110  movdqu %xmm5,256(PTR)
111  movdqu %xmm6,272(PTR)
112  movdqu %xmm7,288(PTR)
113  movdqu %xmm8,304(PTR)
114  movdqu %xmm9,320(PTR)
115  movdqu %xmm10,336(PTR)
116  movdqu %xmm11,352(PTR)
117  movdqu %xmm12,368(PTR)
118  movdqu %xmm13,384(PTR)
119  movdqu %xmm14,400(PTR)
120  movdqu %xmm15,416(PTR)
121#endif
122  xorl  %eax, %eax    # return UNW_ESUCCESS
123  ret
124
125#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
126
127#
128# extern int __unw_getcontext(unw_context_t* thread_state)
129#
130# On entry:
131#  thread_state pointer is in a0 ($4)
132#
133DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
134  .set push
135  .set noat
136  .set noreorder
137  .set nomacro
138  sw    $1, (4 * 1)($4)
139  sw    $2, (4 * 2)($4)
140  sw    $3, (4 * 3)($4)
141  sw    $4, (4 * 4)($4)
142  sw    $5, (4 * 5)($4)
143  sw    $6, (4 * 6)($4)
144  sw    $7, (4 * 7)($4)
145  sw    $8, (4 * 8)($4)
146  sw    $9, (4 * 9)($4)
147  sw    $10, (4 * 10)($4)
148  sw    $11, (4 * 11)($4)
149  sw    $12, (4 * 12)($4)
150  sw    $13, (4 * 13)($4)
151  sw    $14, (4 * 14)($4)
152  sw    $15, (4 * 15)($4)
153  sw    $16, (4 * 16)($4)
154  sw    $17, (4 * 17)($4)
155  sw    $18, (4 * 18)($4)
156  sw    $19, (4 * 19)($4)
157  sw    $20, (4 * 20)($4)
158  sw    $21, (4 * 21)($4)
159  sw    $22, (4 * 22)($4)
160  sw    $23, (4 * 23)($4)
161  sw    $24, (4 * 24)($4)
162  sw    $25, (4 * 25)($4)
163  sw    $26, (4 * 26)($4)
164  sw    $27, (4 * 27)($4)
165  sw    $28, (4 * 28)($4)
166  sw    $29, (4 * 29)($4)
167  sw    $30, (4 * 30)($4)
168  sw    $31, (4 * 31)($4)
169  # Store return address to pc
170  sw    $31, (4 * 32)($4)
171  # hi and lo
172  mfhi  $8
173  sw    $8,  (4 * 33)($4)
174  mflo  $8
175  sw    $8,  (4 * 34)($4)
176#ifdef __mips_hard_float
177#if __mips_fpr != 64
178  sdc1  $f0, (4 * 36 + 8 * 0)($4)
179  sdc1  $f2, (4 * 36 + 8 * 2)($4)
180  sdc1  $f4, (4 * 36 + 8 * 4)($4)
181  sdc1  $f6, (4 * 36 + 8 * 6)($4)
182  sdc1  $f8, (4 * 36 + 8 * 8)($4)
183  sdc1  $f10, (4 * 36 + 8 * 10)($4)
184  sdc1  $f12, (4 * 36 + 8 * 12)($4)
185  sdc1  $f14, (4 * 36 + 8 * 14)($4)
186  sdc1  $f16, (4 * 36 + 8 * 16)($4)
187  sdc1  $f18, (4 * 36 + 8 * 18)($4)
188  sdc1  $f20, (4 * 36 + 8 * 20)($4)
189  sdc1  $f22, (4 * 36 + 8 * 22)($4)
190  sdc1  $f24, (4 * 36 + 8 * 24)($4)
191  sdc1  $f26, (4 * 36 + 8 * 26)($4)
192  sdc1  $f28, (4 * 36 + 8 * 28)($4)
193  sdc1  $f30, (4 * 36 + 8 * 30)($4)
194#else
195  sdc1  $f0, (4 * 36 + 8 * 0)($4)
196  sdc1  $f1, (4 * 36 + 8 * 1)($4)
197  sdc1  $f2, (4 * 36 + 8 * 2)($4)
198  sdc1  $f3, (4 * 36 + 8 * 3)($4)
199  sdc1  $f4, (4 * 36 + 8 * 4)($4)
200  sdc1  $f5, (4 * 36 + 8 * 5)($4)
201  sdc1  $f6, (4 * 36 + 8 * 6)($4)
202  sdc1  $f7, (4 * 36 + 8 * 7)($4)
203  sdc1  $f8, (4 * 36 + 8 * 8)($4)
204  sdc1  $f9, (4 * 36 + 8 * 9)($4)
205  sdc1  $f10, (4 * 36 + 8 * 10)($4)
206  sdc1  $f11, (4 * 36 + 8 * 11)($4)
207  sdc1  $f12, (4 * 36 + 8 * 12)($4)
208  sdc1  $f13, (4 * 36 + 8 * 13)($4)
209  sdc1  $f14, (4 * 36 + 8 * 14)($4)
210  sdc1  $f15, (4 * 36 + 8 * 15)($4)
211  sdc1  $f16, (4 * 36 + 8 * 16)($4)
212  sdc1  $f17, (4 * 36 + 8 * 17)($4)
213  sdc1  $f18, (4 * 36 + 8 * 18)($4)
214  sdc1  $f19, (4 * 36 + 8 * 19)($4)
215  sdc1  $f20, (4 * 36 + 8 * 20)($4)
216  sdc1  $f21, (4 * 36 + 8 * 21)($4)
217  sdc1  $f22, (4 * 36 + 8 * 22)($4)
218  sdc1  $f23, (4 * 36 + 8 * 23)($4)
219  sdc1  $f24, (4 * 36 + 8 * 24)($4)
220  sdc1  $f25, (4 * 36 + 8 * 25)($4)
221  sdc1  $f26, (4 * 36 + 8 * 26)($4)
222  sdc1  $f27, (4 * 36 + 8 * 27)($4)
223  sdc1  $f28, (4 * 36 + 8 * 28)($4)
224  sdc1  $f29, (4 * 36 + 8 * 29)($4)
225  sdc1  $f30, (4 * 36 + 8 * 30)($4)
226  sdc1  $f31, (4 * 36 + 8 * 31)($4)
227#endif
228#endif
229  jr	$31
230  # return UNW_ESUCCESS
231  or    $2, $0, $0
232  .set pop
233
234#elif defined(__mips64)
235
236#
237# extern int __unw_getcontext(unw_context_t* thread_state)
238#
239# On entry:
240#  thread_state pointer is in a0 ($4)
241#
242DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
243  .set push
244  .set noat
245  .set noreorder
246  .set nomacro
247  sd    $1, (8 * 1)($4)
248  sd    $2, (8 * 2)($4)
249  sd    $3, (8 * 3)($4)
250  sd    $4, (8 * 4)($4)
251  sd    $5, (8 * 5)($4)
252  sd    $6, (8 * 6)($4)
253  sd    $7, (8 * 7)($4)
254  sd    $8, (8 * 8)($4)
255  sd    $9, (8 * 9)($4)
256  sd    $10, (8 * 10)($4)
257  sd    $11, (8 * 11)($4)
258  sd    $12, (8 * 12)($4)
259  sd    $13, (8 * 13)($4)
260  sd    $14, (8 * 14)($4)
261  sd    $15, (8 * 15)($4)
262  sd    $16, (8 * 16)($4)
263  sd    $17, (8 * 17)($4)
264  sd    $18, (8 * 18)($4)
265  sd    $19, (8 * 19)($4)
266  sd    $20, (8 * 20)($4)
267  sd    $21, (8 * 21)($4)
268  sd    $22, (8 * 22)($4)
269  sd    $23, (8 * 23)($4)
270  sd    $24, (8 * 24)($4)
271  sd    $25, (8 * 25)($4)
272  sd    $26, (8 * 26)($4)
273  sd    $27, (8 * 27)($4)
274  sd    $28, (8 * 28)($4)
275  sd    $29, (8 * 29)($4)
276  sd    $30, (8 * 30)($4)
277  sd    $31, (8 * 31)($4)
278  # Store return address to pc
279  sd    $31, (8 * 32)($4)
280  # hi and lo
281  mfhi  $8
282  sd    $8,  (8 * 33)($4)
283  mflo  $8
284  sd    $8,  (8 * 34)($4)
285#ifdef __mips_hard_float
286  sdc1  $f0, (8 * 35)($4)
287  sdc1  $f1, (8 * 36)($4)
288  sdc1  $f2, (8 * 37)($4)
289  sdc1  $f3, (8 * 38)($4)
290  sdc1  $f4, (8 * 39)($4)
291  sdc1  $f5, (8 * 40)($4)
292  sdc1  $f6, (8 * 41)($4)
293  sdc1  $f7, (8 * 42)($4)
294  sdc1  $f8, (8 * 43)($4)
295  sdc1  $f9, (8 * 44)($4)
296  sdc1  $f10, (8 * 45)($4)
297  sdc1  $f11, (8 * 46)($4)
298  sdc1  $f12, (8 * 47)($4)
299  sdc1  $f13, (8 * 48)($4)
300  sdc1  $f14, (8 * 49)($4)
301  sdc1  $f15, (8 * 50)($4)
302  sdc1  $f16, (8 * 51)($4)
303  sdc1  $f17, (8 * 52)($4)
304  sdc1  $f18, (8 * 53)($4)
305  sdc1  $f19, (8 * 54)($4)
306  sdc1  $f20, (8 * 55)($4)
307  sdc1  $f21, (8 * 56)($4)
308  sdc1  $f22, (8 * 57)($4)
309  sdc1  $f23, (8 * 58)($4)
310  sdc1  $f24, (8 * 59)($4)
311  sdc1  $f25, (8 * 60)($4)
312  sdc1  $f26, (8 * 61)($4)
313  sdc1  $f27, (8 * 62)($4)
314  sdc1  $f28, (8 * 63)($4)
315  sdc1  $f29, (8 * 64)($4)
316  sdc1  $f30, (8 * 65)($4)
317  sdc1  $f31, (8 * 66)($4)
318#endif
319  jr	$31
320  # return UNW_ESUCCESS
321  or    $2, $0, $0
322  .set pop
323
324# elif defined(__mips__)
325
326#
327# extern int __unw_getcontext(unw_context_t* thread_state)
328#
329# Just trap for the time being.
330DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
331  teq $0, $0
332
333#elif defined(__powerpc64__)
334
335//
336// extern int __unw_getcontext(unw_context_t* thread_state)
337//
338// On entry:
339//  thread_state pointer is in r3
340//
341#if defined(_AIX)
342DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
343#else
344DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
345#endif
346// store register (GPR)
347#define PPC64_STR(n) \
348  std   n, (8 * (n + 2))(3)
349
350  // save GPRs
351  PPC64_STR(0)
352  mflr  0
353  std   0, PPC64_OFFS_SRR0(3) // store lr as ssr0
354  PPC64_STR(1)
355  PPC64_STR(2)
356  PPC64_STR(3)
357  PPC64_STR(4)
358  PPC64_STR(5)
359  PPC64_STR(6)
360  PPC64_STR(7)
361  PPC64_STR(8)
362  PPC64_STR(9)
363  PPC64_STR(10)
364  PPC64_STR(11)
365  PPC64_STR(12)
366  PPC64_STR(13)
367  PPC64_STR(14)
368  PPC64_STR(15)
369  PPC64_STR(16)
370  PPC64_STR(17)
371  PPC64_STR(18)
372  PPC64_STR(19)
373  PPC64_STR(20)
374  PPC64_STR(21)
375  PPC64_STR(22)
376  PPC64_STR(23)
377  PPC64_STR(24)
378  PPC64_STR(25)
379  PPC64_STR(26)
380  PPC64_STR(27)
381  PPC64_STR(28)
382  PPC64_STR(29)
383  PPC64_STR(30)
384  PPC64_STR(31)
385
386  mfcr  0
387  std   0,  PPC64_OFFS_CR(3)
388  mfxer 0
389  std   0,  PPC64_OFFS_XER(3)
390  mflr  0
391  std   0,  PPC64_OFFS_LR(3)
392  mfctr 0
393  std   0,  PPC64_OFFS_CTR(3)
394  mfvrsave    0
395  std   0,  PPC64_OFFS_VRSAVE(3)
396
397#if defined(__VSX__)
398  // save VS registers
399  // (note that this also saves floating point registers and V registers,
400  // because part of VS is mapped to these registers)
401
402  addi  4, 3, PPC64_OFFS_FP
403
404// store VS register
405#define PPC64_STVS(n)      \
406  stxvd2x n, 0, 4         ;\
407  addi    4, 4, 16
408
409  PPC64_STVS(0)
410  PPC64_STVS(1)
411  PPC64_STVS(2)
412  PPC64_STVS(3)
413  PPC64_STVS(4)
414  PPC64_STVS(5)
415  PPC64_STVS(6)
416  PPC64_STVS(7)
417  PPC64_STVS(8)
418  PPC64_STVS(9)
419  PPC64_STVS(10)
420  PPC64_STVS(11)
421  PPC64_STVS(12)
422  PPC64_STVS(13)
423  PPC64_STVS(14)
424  PPC64_STVS(15)
425  PPC64_STVS(16)
426  PPC64_STVS(17)
427  PPC64_STVS(18)
428  PPC64_STVS(19)
429  PPC64_STVS(20)
430  PPC64_STVS(21)
431  PPC64_STVS(22)
432  PPC64_STVS(23)
433  PPC64_STVS(24)
434  PPC64_STVS(25)
435  PPC64_STVS(26)
436  PPC64_STVS(27)
437  PPC64_STVS(28)
438  PPC64_STVS(29)
439  PPC64_STVS(30)
440  PPC64_STVS(31)
441  PPC64_STVS(32)
442  PPC64_STVS(33)
443  PPC64_STVS(34)
444  PPC64_STVS(35)
445  PPC64_STVS(36)
446  PPC64_STVS(37)
447  PPC64_STVS(38)
448  PPC64_STVS(39)
449  PPC64_STVS(40)
450  PPC64_STVS(41)
451  PPC64_STVS(42)
452  PPC64_STVS(43)
453  PPC64_STVS(44)
454  PPC64_STVS(45)
455  PPC64_STVS(46)
456  PPC64_STVS(47)
457  PPC64_STVS(48)
458  PPC64_STVS(49)
459  PPC64_STVS(50)
460  PPC64_STVS(51)
461  PPC64_STVS(52)
462  PPC64_STVS(53)
463  PPC64_STVS(54)
464  PPC64_STVS(55)
465  PPC64_STVS(56)
466  PPC64_STVS(57)
467  PPC64_STVS(58)
468  PPC64_STVS(59)
469  PPC64_STVS(60)
470  PPC64_STVS(61)
471  PPC64_STVS(62)
472  PPC64_STVS(63)
473
474#else
475
476// store FP register
477#define PPC64_STF(n) \
478  stfd  n, (PPC64_OFFS_FP + n * 16)(3)
479
480  // save float registers
481  PPC64_STF(0)
482  PPC64_STF(1)
483  PPC64_STF(2)
484  PPC64_STF(3)
485  PPC64_STF(4)
486  PPC64_STF(5)
487  PPC64_STF(6)
488  PPC64_STF(7)
489  PPC64_STF(8)
490  PPC64_STF(9)
491  PPC64_STF(10)
492  PPC64_STF(11)
493  PPC64_STF(12)
494  PPC64_STF(13)
495  PPC64_STF(14)
496  PPC64_STF(15)
497  PPC64_STF(16)
498  PPC64_STF(17)
499  PPC64_STF(18)
500  PPC64_STF(19)
501  PPC64_STF(20)
502  PPC64_STF(21)
503  PPC64_STF(22)
504  PPC64_STF(23)
505  PPC64_STF(24)
506  PPC64_STF(25)
507  PPC64_STF(26)
508  PPC64_STF(27)
509  PPC64_STF(28)
510  PPC64_STF(29)
511  PPC64_STF(30)
512  PPC64_STF(31)
513
514#if defined(__ALTIVEC__)
515  // save vector registers
516
517  // Use 16-bytes below the stack pointer as an
518  // aligned buffer to save each vector register.
519  // Note that the stack pointer is always 16-byte aligned.
520  subi  4, 1, 16
521
522#define PPC64_STV_UNALIGNED(n)             \
523  stvx  n, 0, 4                           ;\
524  ld    5, 0(4)                           ;\
525  std   5, (PPC64_OFFS_V + n * 16)(3)     ;\
526  ld    5, 8(4)                           ;\
527  std   5, (PPC64_OFFS_V + n * 16 + 8)(3)
528
529  PPC64_STV_UNALIGNED(0)
530  PPC64_STV_UNALIGNED(1)
531  PPC64_STV_UNALIGNED(2)
532  PPC64_STV_UNALIGNED(3)
533  PPC64_STV_UNALIGNED(4)
534  PPC64_STV_UNALIGNED(5)
535  PPC64_STV_UNALIGNED(6)
536  PPC64_STV_UNALIGNED(7)
537  PPC64_STV_UNALIGNED(8)
538  PPC64_STV_UNALIGNED(9)
539  PPC64_STV_UNALIGNED(10)
540  PPC64_STV_UNALIGNED(11)
541  PPC64_STV_UNALIGNED(12)
542  PPC64_STV_UNALIGNED(13)
543  PPC64_STV_UNALIGNED(14)
544  PPC64_STV_UNALIGNED(15)
545  PPC64_STV_UNALIGNED(16)
546  PPC64_STV_UNALIGNED(17)
547  PPC64_STV_UNALIGNED(18)
548  PPC64_STV_UNALIGNED(19)
549  PPC64_STV_UNALIGNED(20)
550  PPC64_STV_UNALIGNED(21)
551  PPC64_STV_UNALIGNED(22)
552  PPC64_STV_UNALIGNED(23)
553  PPC64_STV_UNALIGNED(24)
554  PPC64_STV_UNALIGNED(25)
555  PPC64_STV_UNALIGNED(26)
556  PPC64_STV_UNALIGNED(27)
557  PPC64_STV_UNALIGNED(28)
558  PPC64_STV_UNALIGNED(29)
559  PPC64_STV_UNALIGNED(30)
560  PPC64_STV_UNALIGNED(31)
561
562#endif
563#endif
564
565  li    3,  0   // return UNW_ESUCCESS
566  blr
567
568
569#elif defined(__powerpc__)
570
571//
572// extern int unw_getcontext(unw_context_t* thread_state)
573//
574// On entry:
575//  thread_state pointer is in r3
576//
577#if defined(_AIX)
578DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
579#else
580DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
581#endif
582  stw     0,   8(3)
583  mflr    0
584  stw     0,   0(3) // store lr as ssr0
585  stw     1,  12(3)
586  stw     2,  16(3)
587  stw     3,  20(3)
588  stw     4,  24(3)
589  stw     5,  28(3)
590  stw     6,  32(3)
591  stw     7,  36(3)
592  stw     8,  40(3)
593  stw     9,  44(3)
594  stw     10, 48(3)
595  stw     11, 52(3)
596  stw     12, 56(3)
597  stw     13, 60(3)
598  stw     14, 64(3)
599  stw     15, 68(3)
600  stw     16, 72(3)
601  stw     17, 76(3)
602  stw     18, 80(3)
603  stw     19, 84(3)
604  stw     20, 88(3)
605  stw     21, 92(3)
606  stw     22, 96(3)
607  stw     23,100(3)
608  stw     24,104(3)
609  stw     25,108(3)
610  stw     26,112(3)
611  stw     27,116(3)
612  stw     28,120(3)
613  stw     29,124(3)
614  stw     30,128(3)
615  stw     31,132(3)
616
617#if defined(__ALTIVEC__)
618  // save VRSave register
619  mfspr   0, 256
620  stw     0, 156(3)
621#endif
622  // save CR registers
623  mfcr    0
624  stw     0, 136(3)
625  // save CTR register
626  mfctr   0
627  stw     0, 148(3)
628
629#if !defined(__NO_FPRS__)
630  // save float registers
631  stfd    0, 160(3)
632  stfd    1, 168(3)
633  stfd    2, 176(3)
634  stfd    3, 184(3)
635  stfd    4, 192(3)
636  stfd    5, 200(3)
637  stfd    6, 208(3)
638  stfd    7, 216(3)
639  stfd    8, 224(3)
640  stfd    9, 232(3)
641  stfd    10,240(3)
642  stfd    11,248(3)
643  stfd    12,256(3)
644  stfd    13,264(3)
645  stfd    14,272(3)
646  stfd    15,280(3)
647  stfd    16,288(3)
648  stfd    17,296(3)
649  stfd    18,304(3)
650  stfd    19,312(3)
651  stfd    20,320(3)
652  stfd    21,328(3)
653  stfd    22,336(3)
654  stfd    23,344(3)
655  stfd    24,352(3)
656  stfd    25,360(3)
657  stfd    26,368(3)
658  stfd    27,376(3)
659  stfd    28,384(3)
660  stfd    29,392(3)
661  stfd    30,400(3)
662  stfd    31,408(3)
663#endif
664
665#if defined(__ALTIVEC__)
666  // save vector registers
667
668  subi    4, 1, 16
669  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
670  // r4 is now a 16-byte aligned pointer into the red zone
671
672#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
673  stvx    _vec, 0, 4               SEPARATOR \
674  lwz     5, 0(4)                  SEPARATOR \
675  stw     5, _offset(3)            SEPARATOR \
676  lwz     5, 4(4)                  SEPARATOR \
677  stw     5, _offset+4(3)          SEPARATOR \
678  lwz     5, 8(4)                  SEPARATOR \
679  stw     5, _offset+8(3)          SEPARATOR \
680  lwz     5, 12(4)                 SEPARATOR \
681  stw     5, _offset+12(3)
682
683  SAVE_VECTOR_UNALIGNED( 0, 424+0x000)
684  SAVE_VECTOR_UNALIGNED( 1, 424+0x010)
685  SAVE_VECTOR_UNALIGNED( 2, 424+0x020)
686  SAVE_VECTOR_UNALIGNED( 3, 424+0x030)
687  SAVE_VECTOR_UNALIGNED( 4, 424+0x040)
688  SAVE_VECTOR_UNALIGNED( 5, 424+0x050)
689  SAVE_VECTOR_UNALIGNED( 6, 424+0x060)
690  SAVE_VECTOR_UNALIGNED( 7, 424+0x070)
691  SAVE_VECTOR_UNALIGNED( 8, 424+0x080)
692  SAVE_VECTOR_UNALIGNED( 9, 424+0x090)
693  SAVE_VECTOR_UNALIGNED(10, 424+0x0A0)
694  SAVE_VECTOR_UNALIGNED(11, 424+0x0B0)
695  SAVE_VECTOR_UNALIGNED(12, 424+0x0C0)
696  SAVE_VECTOR_UNALIGNED(13, 424+0x0D0)
697  SAVE_VECTOR_UNALIGNED(14, 424+0x0E0)
698  SAVE_VECTOR_UNALIGNED(15, 424+0x0F0)
699  SAVE_VECTOR_UNALIGNED(16, 424+0x100)
700  SAVE_VECTOR_UNALIGNED(17, 424+0x110)
701  SAVE_VECTOR_UNALIGNED(18, 424+0x120)
702  SAVE_VECTOR_UNALIGNED(19, 424+0x130)
703  SAVE_VECTOR_UNALIGNED(20, 424+0x140)
704  SAVE_VECTOR_UNALIGNED(21, 424+0x150)
705  SAVE_VECTOR_UNALIGNED(22, 424+0x160)
706  SAVE_VECTOR_UNALIGNED(23, 424+0x170)
707  SAVE_VECTOR_UNALIGNED(24, 424+0x180)
708  SAVE_VECTOR_UNALIGNED(25, 424+0x190)
709  SAVE_VECTOR_UNALIGNED(26, 424+0x1A0)
710  SAVE_VECTOR_UNALIGNED(27, 424+0x1B0)
711  SAVE_VECTOR_UNALIGNED(28, 424+0x1C0)
712  SAVE_VECTOR_UNALIGNED(29, 424+0x1D0)
713  SAVE_VECTOR_UNALIGNED(30, 424+0x1E0)
714  SAVE_VECTOR_UNALIGNED(31, 424+0x1F0)
715#endif
716
717  li      3, 0  // return UNW_ESUCCESS
718  blr
719
720
721#elif defined(__aarch64__)
722
723//
724// extern int __unw_getcontext(unw_context_t* thread_state)
725//
726// On entry:
727//  thread_state pointer is in x0
728//
729  .p2align 2
730DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
731  stp    x0, x1,  [x0, #0x000]
732  stp    x2, x3,  [x0, #0x010]
733  stp    x4, x5,  [x0, #0x020]
734  stp    x6, x7,  [x0, #0x030]
735  stp    x8, x9,  [x0, #0x040]
736  stp    x10,x11, [x0, #0x050]
737  stp    x12,x13, [x0, #0x060]
738  stp    x14,x15, [x0, #0x070]
739  stp    x16,x17, [x0, #0x080]
740  stp    x18,x19, [x0, #0x090]
741  stp    x20,x21, [x0, #0x0A0]
742  stp    x22,x23, [x0, #0x0B0]
743  stp    x24,x25, [x0, #0x0C0]
744  stp    x26,x27, [x0, #0x0D0]
745  stp    x28,x29, [x0, #0x0E0]
746  str    x30,     [x0, #0x0F0]
747  mov    x1,sp
748  str    x1,      [x0, #0x0F8]
749  str    x30,     [x0, #0x100]    // store return address as pc
750  // skip cpsr
751  stp    d0, d1,  [x0, #0x110]
752  stp    d2, d3,  [x0, #0x120]
753  stp    d4, d5,  [x0, #0x130]
754  stp    d6, d7,  [x0, #0x140]
755  stp    d8, d9,  [x0, #0x150]
756  stp    d10,d11, [x0, #0x160]
757  stp    d12,d13, [x0, #0x170]
758  stp    d14,d15, [x0, #0x180]
759  stp    d16,d17, [x0, #0x190]
760  stp    d18,d19, [x0, #0x1A0]
761  stp    d20,d21, [x0, #0x1B0]
762  stp    d22,d23, [x0, #0x1C0]
763  stp    d24,d25, [x0, #0x1D0]
764  stp    d26,d27, [x0, #0x1E0]
765  stp    d28,d29, [x0, #0x1F0]
766  str    d30,     [x0, #0x200]
767  str    d31,     [x0, #0x208]
768  mov    x0, #0                   // return UNW_ESUCCESS
769  ret
770
771#elif defined(__arm__) && !defined(__APPLE__)
772
773#if !defined(__ARM_ARCH_ISA_ARM)
774#if (__ARM_ARCH_ISA_THUMB == 2)
775  .syntax unified
776#endif
777  .thumb
778#endif
779
780@
781@ extern int __unw_getcontext(unw_context_t* thread_state)
782@
783@ On entry:
784@  thread_state pointer is in r0
785@
786@ Per EHABI #4.7 this only saves the core integer registers.
787@ EHABI #7.4.5 notes that in general all VRS registers should be restored
788@ however this is very hard to do for VFP registers because it is unknown
789@ to the library how many registers are implemented by the architecture.
790@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
791@
792  .p2align 2
793DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
794#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
795  stm r0!, {r0-r7}
796  mov r1, r8
797  mov r2, r9
798  mov r3, r10
799  stm r0!, {r1-r3}
800  mov r1, r11
801  mov r2, sp
802  mov r3, lr
803  str r1, [r0, #0]   @ r11
804  @ r12 does not need storing, it it the intra-procedure-call scratch register
805  str r2, [r0, #8]   @ sp
806  str r3, [r0, #12]  @ lr
807  str r3, [r0, #16]  @ store return address as pc
808  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
809  @ It is safe to use here though because we are about to return, and cpsr is
810  @ not expected to be preserved.
811  movs r0, #0        @ return UNW_ESUCCESS
812#else
813  @ 32bit thumb-2 restrictions for stm:
814  @ . the sp (r13) cannot be in the list
815  @ . the pc (r15) cannot be in the list in an STM instruction
816  stm r0, {r0-r12}
817  str sp, [r0, #52]
818  str lr, [r0, #56]
819  str lr, [r0, #60]  @ store return address as pc
820  mov r0, #0         @ return UNW_ESUCCESS
821#endif
822  JMP(lr)
823
824@
825@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
826@
827@ On entry:
828@  values pointer is in r0
829@
830  .p2align 2
831#if defined(__ELF__)
832  .fpu vfpv3-d16
833#endif
834DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
835  vstmia r0, {d0-d15}
836  JMP(lr)
837
838@
839@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
840@
841@ On entry:
842@  values pointer is in r0
843@
844  .p2align 2
845#if defined(__ELF__)
846  .fpu vfpv3-d16
847#endif
848DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
849  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
850  JMP(lr)
851
852@
853@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
854@
855@ On entry:
856@  values pointer is in r0
857@
858  .p2align 2
859#if defined(__ELF__)
860  .fpu vfpv3
861#endif
862DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
863  @ VFP and iwMMX instructions are only available when compiling with the flags
864  @ that enable them. We do not want to do that in the library (because we do not
865  @ want the compiler to generate instructions that access those) but this is
866  @ only accessed if the personality routine needs these registers. Use of
867  @ these registers implies they are, actually, available on the target, so
868  @ it's ok to execute.
869  @ So, generate the instructions using the corresponding coprocessor mnemonic.
870  vstmia r0, {d16-d31}
871  JMP(lr)
872
873#if defined(_LIBUNWIND_ARM_WMMX)
874
875@
876@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
877@
878@ On entry:
879@  values pointer is in r0
880@
881  .p2align 2
882#if defined(__ELF__)
883  .arch armv5te
884#endif
885DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
886  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
887  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
888  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
889  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
890  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
891  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
892  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
893  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
894  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
895  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
896  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
897  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
898  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
899  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
900  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
901  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
902  JMP(lr)
903
904@
905@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
906@
907@ On entry:
908@  values pointer is in r0
909@
910  .p2align 2
911#if defined(__ELF__)
912  .arch armv5te
913#endif
914DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
915  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
916  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
917  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
918  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
919  JMP(lr)
920
921#endif
922
923#elif defined(__or1k__)
924
925#
926# extern int __unw_getcontext(unw_context_t* thread_state)
927#
928# On entry:
929#  thread_state pointer is in r3
930#
931DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
932  l.sw       0(r3), r0
933  l.sw       4(r3), r1
934  l.sw       8(r3), r2
935  l.sw      12(r3), r3
936  l.sw      16(r3), r4
937  l.sw      20(r3), r5
938  l.sw      24(r3), r6
939  l.sw      28(r3), r7
940  l.sw      32(r3), r8
941  l.sw      36(r3), r9
942  l.sw      40(r3), r10
943  l.sw      44(r3), r11
944  l.sw      48(r3), r12
945  l.sw      52(r3), r13
946  l.sw      56(r3), r14
947  l.sw      60(r3), r15
948  l.sw      64(r3), r16
949  l.sw      68(r3), r17
950  l.sw      72(r3), r18
951  l.sw      76(r3), r19
952  l.sw      80(r3), r20
953  l.sw      84(r3), r21
954  l.sw      88(r3), r22
955  l.sw      92(r3), r23
956  l.sw      96(r3), r24
957  l.sw     100(r3), r25
958  l.sw     104(r3), r26
959  l.sw     108(r3), r27
960  l.sw     112(r3), r28
961  l.sw     116(r3), r29
962  l.sw     120(r3), r30
963  l.sw     124(r3), r31
964  # store ra to pc
965  l.sw     128(r3), r9
966  # zero epcr
967  l.sw     132(r3), r0
968
969#elif defined(__hexagon__)
970#
971# extern int unw_getcontext(unw_context_t* thread_state)
972#
973# On entry:
974#  thread_state pointer is in r0
975#
976#define OFFSET(offset) (offset/4)
977DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
978  memw(r0+#32) = r8
979  memw(r0+#36) = r9
980  memw(r0+#40) = r10
981  memw(r0+#44) = r11
982
983  memw(r0+#48) = r12
984  memw(r0+#52) = r13
985  memw(r0+#56) = r14
986  memw(r0+#60) = r15
987
988  memw(r0+#64) = r16
989  memw(r0+#68) = r17
990  memw(r0+#72) = r18
991  memw(r0+#76) = r19
992
993  memw(r0+#80) = r20
994  memw(r0+#84) = r21
995  memw(r0+#88) = r22
996  memw(r0+#92) = r23
997
998  memw(r0+#96) = r24
999  memw(r0+#100) = r25
1000  memw(r0+#104) = r26
1001  memw(r0+#108) = r27
1002
1003  memw(r0+#112) = r28
1004  memw(r0+#116) = r29
1005  memw(r0+#120) = r30
1006  memw(r0+#124) = r31
1007  r1 = c4   // Predicate register
1008  memw(r0+#128) = r1
1009  r1 = memw(r30)           // *FP == Saved FP
1010  r1 = r31
1011  memw(r0+#132) = r1
1012
1013  jumpr r31
1014
1015#elif defined(__sparc__) && defined(__arch64__)
1016
1017#
1018# extern int __unw_getcontext(unw_context_t* thread_state)
1019#
1020# On entry:
1021#  thread_state pointer is in %o0
1022#
1023DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1024  .register %g2, #scratch
1025  .register %g3, #scratch
1026  .register %g6, #scratch
1027  .register %g7, #scratch
1028  stx  %g1, [%o0 + 0x08]
1029  stx  %g2, [%o0 + 0x10]
1030  stx  %g3, [%o0 + 0x18]
1031  stx  %g4, [%o0 + 0x20]
1032  stx  %g5, [%o0 + 0x28]
1033  stx  %g6, [%o0 + 0x30]
1034  stx  %g7, [%o0 + 0x38]
1035  stx  %o0, [%o0 + 0x40]
1036  stx  %o1, [%o0 + 0x48]
1037  stx  %o2, [%o0 + 0x50]
1038  stx  %o3, [%o0 + 0x58]
1039  stx  %o4, [%o0 + 0x60]
1040  stx  %o5, [%o0 + 0x68]
1041  stx  %o6, [%o0 + 0x70]
1042  stx  %o7, [%o0 + 0x78]
1043  stx  %l0, [%o0 + 0x80]
1044  stx  %l1, [%o0 + 0x88]
1045  stx  %l2, [%o0 + 0x90]
1046  stx  %l3, [%o0 + 0x98]
1047  stx  %l4, [%o0 + 0xa0]
1048  stx  %l5, [%o0 + 0xa8]
1049  stx  %l6, [%o0 + 0xb0]
1050  stx  %l7, [%o0 + 0xb8]
1051  stx  %i0, [%o0 + 0xc0]
1052  stx  %i1, [%o0 + 0xc8]
1053  stx  %i2, [%o0 + 0xd0]
1054  stx  %i3, [%o0 + 0xd8]
1055  stx  %i4, [%o0 + 0xe0]
1056  stx  %i5, [%o0 + 0xe8]
1057  stx  %i6, [%o0 + 0xf0]
1058  stx  %i7, [%o0 + 0xf8]
1059
1060  # save StackGhost cookie
1061  mov  %i7, %g4
1062  save %sp, -176, %sp
1063  # register window flush necessary even without StackGhost
1064  flushw
1065  restore
1066  ldx  [%sp + 2047 + 0x78], %g5
1067  xor  %g4, %g5, %g4
1068  stx  %g4, [%o0 + 0x100]
1069  retl
1070  # return UNW_ESUCCESS
1071   clr %o0
1072
1073#elif defined(__sparc__)
1074
1075#
1076# extern int __unw_getcontext(unw_context_t* thread_state)
1077#
1078# On entry:
1079#  thread_state pointer is in o0
1080#
1081DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1082  ta 3
1083  add %o7, 8, %o7
1084  std %g0, [%o0 +   0]
1085  std %g2, [%o0 +   8]
1086  std %g4, [%o0 +  16]
1087  std %g6, [%o0 +  24]
1088  std %o0, [%o0 +  32]
1089  std %o2, [%o0 +  40]
1090  std %o4, [%o0 +  48]
1091  std %o6, [%o0 +  56]
1092  std %l0, [%o0 +  64]
1093  std %l2, [%o0 +  72]
1094  std %l4, [%o0 +  80]
1095  std %l6, [%o0 +  88]
1096  std %i0, [%o0 +  96]
1097  std %i2, [%o0 + 104]
1098  std %i4, [%o0 + 112]
1099  std %i6, [%o0 + 120]
1100  jmp %o7
1101   clr %o0                   // return UNW_ESUCCESS
1102
1103#elif defined(__riscv)
1104
1105#
1106# extern int __unw_getcontext(unw_context_t* thread_state)
1107#
1108# On entry:
1109#  thread_state pointer is in a0
1110#
1111DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1112  ISTORE    x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
1113  ISTORE    x1, (RISCV_ISIZE * 1)(a0)
1114  ISTORE    x2, (RISCV_ISIZE * 2)(a0)
1115  ISTORE    x3, (RISCV_ISIZE * 3)(a0)
1116  ISTORE    x4, (RISCV_ISIZE * 4)(a0)
1117  ISTORE    x5, (RISCV_ISIZE * 5)(a0)
1118  ISTORE    x6, (RISCV_ISIZE * 6)(a0)
1119  ISTORE    x7, (RISCV_ISIZE * 7)(a0)
1120  ISTORE    x8, (RISCV_ISIZE * 8)(a0)
1121  ISTORE    x9, (RISCV_ISIZE * 9)(a0)
1122  ISTORE    x10, (RISCV_ISIZE * 10)(a0)
1123  ISTORE    x11, (RISCV_ISIZE * 11)(a0)
1124  ISTORE    x12, (RISCV_ISIZE * 12)(a0)
1125  ISTORE    x13, (RISCV_ISIZE * 13)(a0)
1126  ISTORE    x14, (RISCV_ISIZE * 14)(a0)
1127  ISTORE    x15, (RISCV_ISIZE * 15)(a0)
1128  ISTORE    x16, (RISCV_ISIZE * 16)(a0)
1129  ISTORE    x17, (RISCV_ISIZE * 17)(a0)
1130  ISTORE    x18, (RISCV_ISIZE * 18)(a0)
1131  ISTORE    x19, (RISCV_ISIZE * 19)(a0)
1132  ISTORE    x20, (RISCV_ISIZE * 20)(a0)
1133  ISTORE    x21, (RISCV_ISIZE * 21)(a0)
1134  ISTORE    x22, (RISCV_ISIZE * 22)(a0)
1135  ISTORE    x23, (RISCV_ISIZE * 23)(a0)
1136  ISTORE    x24, (RISCV_ISIZE * 24)(a0)
1137  ISTORE    x25, (RISCV_ISIZE * 25)(a0)
1138  ISTORE    x26, (RISCV_ISIZE * 26)(a0)
1139  ISTORE    x27, (RISCV_ISIZE * 27)(a0)
1140  ISTORE    x28, (RISCV_ISIZE * 28)(a0)
1141  ISTORE    x29, (RISCV_ISIZE * 29)(a0)
1142  ISTORE    x30, (RISCV_ISIZE * 30)(a0)
1143  ISTORE    x31, (RISCV_ISIZE * 31)(a0)
1144
1145# if defined(__riscv_flen)
1146  FSTORE    f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
1147  FSTORE    f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
1148  FSTORE    f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
1149  FSTORE    f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
1150  FSTORE    f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
1151  FSTORE    f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
1152  FSTORE    f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
1153  FSTORE    f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
1154  FSTORE    f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
1155  FSTORE    f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
1156  FSTORE    f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
1157  FSTORE    f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
1158  FSTORE    f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
1159  FSTORE    f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
1160  FSTORE    f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
1161  FSTORE    f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
1162  FSTORE    f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
1163  FSTORE    f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
1164  FSTORE    f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
1165  FSTORE    f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
1166  FSTORE    f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
1167  FSTORE    f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
1168  FSTORE    f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
1169  FSTORE    f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
1170  FSTORE    f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
1171  FSTORE    f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
1172  FSTORE    f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
1173  FSTORE    f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
1174  FSTORE    f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
1175  FSTORE    f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
1176  FSTORE    f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
1177  FSTORE    f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
1178# endif
1179
1180  li     a0, 0  // return UNW_ESUCCESS
1181  ret           // jump to ra
1182
1183#elif defined(__s390x__)
1184
1185//
1186// extern int __unw_getcontext(unw_context_t* thread_state)
1187//
1188// On entry:
1189//  thread_state pointer is in r2
1190//
1191DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1192
1193  // Save GPRs
1194  stmg %r0, %r15, 16(%r2)
1195
1196  // Save PSWM
1197  epsw %r0, %r1
1198  stm %r0, %r1, 0(%r2)
1199
1200  // Store return address as PSWA
1201  stg %r14, 8(%r2)
1202
1203  // Save FPRs
1204  std %f0, 144(%r2)
1205  std %f1, 152(%r2)
1206  std %f2, 160(%r2)
1207  std %f3, 168(%r2)
1208  std %f4, 176(%r2)
1209  std %f5, 184(%r2)
1210  std %f6, 192(%r2)
1211  std %f7, 200(%r2)
1212  std %f8, 208(%r2)
1213  std %f9, 216(%r2)
1214  std %f10, 224(%r2)
1215  std %f11, 232(%r2)
1216  std %f12, 240(%r2)
1217  std %f13, 248(%r2)
1218  std %f14, 256(%r2)
1219  std %f15, 264(%r2)
1220
1221  // Return UNW_ESUCCESS
1222  lghi %r2, 0
1223  br %r14
1224
1225#endif
1226
1227  WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1228
1229#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1230
1231NO_EXEC_STACK_DIRECTIVE
1232