Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2/*
3 * MIPS specific definitions for NOLIBC
4 * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
5 */
6
7#ifndef _NOLIBC_ARCH_MIPS_H
8#define _NOLIBC_ARCH_MIPS_H
9
10#include "compiler.h"
11#include "crt.h"
12
13#if !defined(_ABIO32) && !defined(_ABIN32) && !defined(_ABI64)
14#error Unsupported MIPS ABI
15#endif
16
17/* Syscalls for MIPS ABI O32 :
18 * - WARNING! there's always a delayed slot!
19 * - WARNING again, the syntax is different, registers take a '$' and numbers
20 * do not.
21 * - registers are 32-bit
22 * - stack is 8-byte aligned
23 * - syscall number is passed in v0 (starts at 0xfa0).
24 * - arguments are in a0, a1, a2, a3, then the stack. The caller needs to
25 * leave some room in the stack for the callee to save a0..a3 if needed.
26 * - Many registers are clobbered, in fact only a0..a2 and s0..s8 are
27 * preserved. See: https://www.linux-mips.org/wiki/Syscall as well as
28 * scall32-o32.S in the kernel sources.
29 * - the system call is performed by calling "syscall"
30 * - syscall return comes in v0, and register a3 needs to be checked to know
31 * if an error occurred, in which case errno is in v0.
32 * - the arguments are cast to long and assigned into the target registers
33 * which are then simply passed as registers to the asm code, so that we
34 * don't have to experience issues with register constraints.
35 *
36 * Syscalls for MIPS ABI N32, same as ABI O32 with the following differences :
37 * - arguments are in a0, a1, a2, a3, t0, t1, t2, t3.
38 * t0..t3 are also known as a4..a7.
39 * - stack is 16-byte aligned
40 */
41
42#if !defined(__mips_isa_rev) || __mips_isa_rev < 6
43#define _NOLIBC_SYSCALL_CLOBBER_HI_LO "hi", "lo"
44#else
45#define _NOLIBC_SYSCALL_CLOBBER_HI_LO "$0"
46#endif
47
48#if defined(_ABIO32)
49
50#define _NOLIBC_SYSCALL_CLOBBERLIST \
51 "memory", "cc", "at", "v1", \
52 "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9", \
53 _NOLIBC_SYSCALL_CLOBBER_HI_LO
54
55#define _NOLIBC_SYSCALL_STACK_RESERVE "addiu $sp, $sp, -32\n"
56#define _NOLIBC_SYSCALL_STACK_UNRESERVE "addiu $sp, $sp, 32\n"
57
58#else /* _ABIN32 || _ABI64 */
59
60/* binutils, GCC and clang disagree about register aliases, use numbers instead. */
61#define _NOLIBC_SYSCALL_CLOBBERLIST \
62 "memory", "cc", "at", "v1", \
63 "10", "11", "12", "13", "14", "15", "24", "25", \
64 _NOLIBC_SYSCALL_CLOBBER_HI_LO
65
66#define _NOLIBC_SYSCALL_STACK_RESERVE
67#define _NOLIBC_SYSCALL_STACK_UNRESERVE
68
69#endif /* _ABIO32 */
70
71#define __nolibc_syscall0(num) \
72({ \
73 register long _num __asm__ ("v0") = (num); \
74 register long _arg4 __asm__ ("a3"); \
75 \
76 __asm__ volatile ( \
77 _NOLIBC_SYSCALL_STACK_RESERVE \
78 "syscall\n" \
79 _NOLIBC_SYSCALL_STACK_UNRESERVE \
80 : "=r"(_num), "=r"(_arg4) \
81 : "r"(_num) \
82 : _NOLIBC_SYSCALL_CLOBBERLIST \
83 ); \
84 _arg4 ? -_num : _num; \
85})
86
87#define __nolibc_syscall1(num, arg1) \
88({ \
89 register long _num __asm__ ("v0") = (num); \
90 register long _arg1 __asm__ ("a0") = (long)(arg1); \
91 register long _arg4 __asm__ ("a3"); \
92 \
93 __asm__ volatile ( \
94 _NOLIBC_SYSCALL_STACK_RESERVE \
95 "syscall\n" \
96 _NOLIBC_SYSCALL_STACK_UNRESERVE \
97 : "=r"(_num), "=r"(_arg4) \
98 : "0"(_num), \
99 "r"(_arg1) \
100 : _NOLIBC_SYSCALL_CLOBBERLIST \
101 ); \
102 _arg4 ? -_num : _num; \
103})
104
105#define __nolibc_syscall2(num, arg1, arg2) \
106({ \
107 register long _num __asm__ ("v0") = (num); \
108 register long _arg1 __asm__ ("a0") = (long)(arg1); \
109 register long _arg2 __asm__ ("a1") = (long)(arg2); \
110 register long _arg4 __asm__ ("a3"); \
111 \
112 __asm__ volatile ( \
113 _NOLIBC_SYSCALL_STACK_RESERVE \
114 "syscall\n" \
115 _NOLIBC_SYSCALL_STACK_UNRESERVE \
116 : "=r"(_num), "=r"(_arg4) \
117 : "0"(_num), \
118 "r"(_arg1), "r"(_arg2) \
119 : _NOLIBC_SYSCALL_CLOBBERLIST \
120 ); \
121 _arg4 ? -_num : _num; \
122})
123
124#define __nolibc_syscall3(num, arg1, arg2, arg3) \
125({ \
126 register long _num __asm__ ("v0") = (num); \
127 register long _arg1 __asm__ ("a0") = (long)(arg1); \
128 register long _arg2 __asm__ ("a1") = (long)(arg2); \
129 register long _arg3 __asm__ ("a2") = (long)(arg3); \
130 register long _arg4 __asm__ ("a3"); \
131 \
132 __asm__ volatile ( \
133 _NOLIBC_SYSCALL_STACK_RESERVE \
134 "syscall\n" \
135 _NOLIBC_SYSCALL_STACK_UNRESERVE \
136 : "=r"(_num), "=r"(_arg4) \
137 : "0"(_num), \
138 "r"(_arg1), "r"(_arg2), "r"(_arg3) \
139 : _NOLIBC_SYSCALL_CLOBBERLIST \
140 ); \
141 _arg4 ? -_num : _num; \
142})
143
144#define __nolibc_syscall4(num, arg1, arg2, arg3, arg4) \
145({ \
146 register long _num __asm__ ("v0") = (num); \
147 register long _arg1 __asm__ ("a0") = (long)(arg1); \
148 register long _arg2 __asm__ ("a1") = (long)(arg2); \
149 register long _arg3 __asm__ ("a2") = (long)(arg3); \
150 register long _arg4 __asm__ ("a3") = (long)(arg4); \
151 \
152 __asm__ volatile ( \
153 _NOLIBC_SYSCALL_STACK_RESERVE \
154 "syscall\n" \
155 _NOLIBC_SYSCALL_STACK_UNRESERVE \
156 : "=r" (_num), "=r"(_arg4) \
157 : "0"(_num), \
158 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
159 : _NOLIBC_SYSCALL_CLOBBERLIST \
160 ); \
161 _arg4 ? -_num : _num; \
162})
163
164#if defined(_ABIO32)
165
166#define __nolibc_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
167({ \
168 register long _num __asm__ ("v0") = (num); \
169 register long _arg1 __asm__ ("a0") = (long)(arg1); \
170 register long _arg2 __asm__ ("a1") = (long)(arg2); \
171 register long _arg3 __asm__ ("a2") = (long)(arg3); \
172 register long _arg4 __asm__ ("a3") = (long)(arg4); \
173 register long _arg5 = (long)(arg5); \
174 \
175 __asm__ volatile ( \
176 _NOLIBC_SYSCALL_STACK_RESERVE \
177 "sw %7, 16($sp)\n" \
178 "syscall\n" \
179 _NOLIBC_SYSCALL_STACK_UNRESERVE \
180 : "=r" (_num), "=r"(_arg4) \
181 : "0"(_num), \
182 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
183 : _NOLIBC_SYSCALL_CLOBBERLIST \
184 ); \
185 _arg4 ? -_num : _num; \
186})
187
188#define __nolibc_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
189({ \
190 register long _num __asm__ ("v0") = (num); \
191 register long _arg1 __asm__ ("a0") = (long)(arg1); \
192 register long _arg2 __asm__ ("a1") = (long)(arg2); \
193 register long _arg3 __asm__ ("a2") = (long)(arg3); \
194 register long _arg4 __asm__ ("a3") = (long)(arg4); \
195 register long _arg5 = (long)(arg5); \
196 register long _arg6 = (long)(arg6); \
197 \
198 __asm__ volatile ( \
199 _NOLIBC_SYSCALL_STACK_RESERVE \
200 "sw %7, 16($sp)\n" \
201 "sw %8, 20($sp)\n" \
202 "syscall\n" \
203 _NOLIBC_SYSCALL_STACK_UNRESERVE \
204 : "=r" (_num), "=r"(_arg4) \
205 : "0"(_num), \
206 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
207 "r"(_arg6) \
208 : _NOLIBC_SYSCALL_CLOBBERLIST \
209 ); \
210 _arg4 ? -_num : _num; \
211})
212
213#else /* _ABIN32 || _ABI64 */
214
215#define __nolibc_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
216({ \
217 register long _num __asm__ ("v0") = (num); \
218 register long _arg1 __asm__ ("$4") = (long)(arg1); \
219 register long _arg2 __asm__ ("$5") = (long)(arg2); \
220 register long _arg3 __asm__ ("$6") = (long)(arg3); \
221 register long _arg4 __asm__ ("$7") = (long)(arg4); \
222 register long _arg5 __asm__ ("$8") = (long)(arg5); \
223 \
224 __asm__ volatile ( \
225 "syscall\n" \
226 : "=r" (_num), "=r"(_arg4) \
227 : "0"(_num), \
228 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
229 : _NOLIBC_SYSCALL_CLOBBERLIST \
230 ); \
231 _arg4 ? -_num : _num; \
232})
233
234#define __nolibc_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
235({ \
236 register long _num __asm__ ("v0") = (num); \
237 register long _arg1 __asm__ ("$4") = (long)(arg1); \
238 register long _arg2 __asm__ ("$5") = (long)(arg2); \
239 register long _arg3 __asm__ ("$6") = (long)(arg3); \
240 register long _arg4 __asm__ ("$7") = (long)(arg4); \
241 register long _arg5 __asm__ ("$8") = (long)(arg5); \
242 register long _arg6 __asm__ ("$9") = (long)(arg6); \
243 \
244 __asm__ volatile ( \
245 "syscall\n" \
246 : "=r" (_num), "=r"(_arg4) \
247 : "0"(_num), \
248 "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
249 "r"(_arg6) \
250 : _NOLIBC_SYSCALL_CLOBBERLIST \
251 ); \
252 _arg4 ? -_num : _num; \
253})
254
255#endif /* _ABIO32 */
256
257#ifndef NOLIBC_NO_RUNTIME
258/* startup code, note that it's called __start on MIPS */
259void __start(void);
260void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __start(void)
261{
262 __asm__ volatile (
263 "move $a0, $sp\n" /* save stack pointer to $a0, as arg1 of _start_c */
264#if defined(_ABIO32)
265 "addiu $sp, $sp, -16\n" /* the callee expects to save a0..a3 there */
266#endif /* _ABIO32 */
267 "lui $t9, %hi(_start_c)\n" /* ABI requires current function address in $t9 */
268 "ori $t9, %lo(_start_c)\n"
269#if defined(_ABI64)
270 "lui $t0, %highest(_start_c)\n"
271 "ori $t0, %higher(_start_c)\n"
272 "dsll $t0, 0x20\n"
273 "or $t9, $t0\n"
274#endif /* _ABI64 */
275 "jalr $t9\n" /* transfer to c runtime */
276 );
277 __nolibc_entrypoint_epilogue();
278}
279#endif /* NOLIBC_NO_RUNTIME */
280
281#endif /* _NOLIBC_ARCH_MIPS_H */