1//===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implementations of internal_syscall and internal_iserror for
10// Linux/loongarch64.
11//
12//===----------------------------------------------------------------------===//
13
14// About local register variables:
15// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
16//
17// Kernel ABI:
18// https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f
19//  syscall number is placed in a7
20//  parameters, if present, are placed in a0-a6
21//  upon return:
22//    the return value is placed in a0
23//    t0-t8 should be considered clobbered
24//    all other registers are preserved
25#define SYSCALL(name) __NR_##name
26
27#define INTERNAL_SYSCALL_CLOBBERS \
28  "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
29
30static uptr __internal_syscall(u64 nr) {
31  register u64 a7 asm("$a7") = nr;
32  register u64 a0 asm("$a0");
33  __asm__ volatile("syscall 0\n\t"
34                   : "=r"(a0)
35                   : "r"(a7)
36                   : INTERNAL_SYSCALL_CLOBBERS);
37  return a0;
38}
39#define __internal_syscall0(n) (__internal_syscall)(n)
40
41static uptr __internal_syscall(u64 nr, u64 arg1) {
42  register u64 a7 asm("$a7") = nr;
43  register u64 a0 asm("$a0") = arg1;
44  __asm__ volatile("syscall 0\n\t"
45                   : "+r"(a0)
46                   : "r"(a7)
47                   : INTERNAL_SYSCALL_CLOBBERS);
48  return a0;
49}
50#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
51
52static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
53  register u64 a7 asm("$a7") = nr;
54  register u64 a0 asm("$a0") = arg1;
55  register u64 a1 asm("$a1") = arg2;
56  __asm__ volatile("syscall 0\n\t"
57                   : "+r"(a0)
58                   : "r"(a7), "r"(a1)
59                   : INTERNAL_SYSCALL_CLOBBERS);
60  return a0;
61}
62#define __internal_syscall2(n, a1, a2) \
63  (__internal_syscall)(n, (u64)(a1), (long)(a2))
64
65static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
66  register u64 a7 asm("$a7") = nr;
67  register u64 a0 asm("$a0") = arg1;
68  register u64 a1 asm("$a1") = arg2;
69  register u64 a2 asm("$a2") = arg3;
70  __asm__ volatile("syscall 0\n\t"
71                   : "+r"(a0)
72                   : "r"(a7), "r"(a1), "r"(a2)
73                   : INTERNAL_SYSCALL_CLOBBERS);
74  return a0;
75}
76#define __internal_syscall3(n, a1, a2, a3) \
77  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
78
79static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
80                               u64 arg4) {
81  register u64 a7 asm("$a7") = nr;
82  register u64 a0 asm("$a0") = arg1;
83  register u64 a1 asm("$a1") = arg2;
84  register u64 a2 asm("$a2") = arg3;
85  register u64 a3 asm("$a3") = arg4;
86  __asm__ volatile("syscall 0\n\t"
87                   : "+r"(a0)
88                   : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
89                   : INTERNAL_SYSCALL_CLOBBERS);
90  return a0;
91}
92#define __internal_syscall4(n, a1, a2, a3, a4) \
93  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
94
95static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
96                               long arg5) {
97  register u64 a7 asm("$a7") = nr;
98  register u64 a0 asm("$a0") = arg1;
99  register u64 a1 asm("$a1") = arg2;
100  register u64 a2 asm("$a2") = arg3;
101  register u64 a3 asm("$a3") = arg4;
102  register u64 a4 asm("$a4") = arg5;
103  __asm__ volatile("syscall 0\n\t"
104                   : "+r"(a0)
105                   : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
106                   : INTERNAL_SYSCALL_CLOBBERS);
107  return a0;
108}
109#define __internal_syscall5(n, a1, a2, a3, a4, a5)                       \
110  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
111                       (u64)(a5))
112
113static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
114                               long arg5, long arg6) {
115  register u64 a7 asm("$a7") = nr;
116  register u64 a0 asm("$a0") = arg1;
117  register u64 a1 asm("$a1") = arg2;
118  register u64 a2 asm("$a2") = arg3;
119  register u64 a3 asm("$a3") = arg4;
120  register u64 a4 asm("$a4") = arg5;
121  register u64 a5 asm("$a5") = arg6;
122  __asm__ volatile("syscall 0\n\t"
123                   : "+r"(a0)
124                   : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
125                   : INTERNAL_SYSCALL_CLOBBERS);
126  return a0;
127}
128#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6)                   \
129  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
130                       (u64)(a5), (long)(a6))
131
132static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
133                               long arg5, long arg6, long arg7) {
134  register u64 a7 asm("$a7") = nr;
135  register u64 a0 asm("$a0") = arg1;
136  register u64 a1 asm("$a1") = arg2;
137  register u64 a2 asm("$a2") = arg3;
138  register u64 a3 asm("$a3") = arg4;
139  register u64 a4 asm("$a4") = arg5;
140  register u64 a5 asm("$a5") = arg6;
141  register u64 a6 asm("$a6") = arg7;
142  __asm__ volatile("syscall 0\n\t"
143                   : "+r"(a0)
144                   : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
145                     "r"(a6)
146                   : INTERNAL_SYSCALL_CLOBBERS);
147  return a0;
148}
149#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7)               \
150  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
151                       (u64)(a5), (long)(a6), (long)(a7))
152
153#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
154#define __SYSCALL_NARGS(...) \
155  __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
156#define __SYSCALL_CONCAT_X(a, b) a##b
157#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
158#define __SYSCALL_DISP(b, ...) \
159  __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
160
161#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
162
163// Helper function used to avoid clobbering of errno.
164bool internal_iserror(uptr retval, int *internal_errno) {
165  if (retval >= (uptr)-4095) {
166    if (internal_errno)
167      *internal_errno = -retval;
168    return true;
169  }
170  return false;
171}
172