Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK 0xffc
17#define PTE_INDX_SHIFT 10
18#define _PGDIR_SHIFT 22
19
20.macro tlbop_begin name, val0, val1, val2
21ENTRY(csky_\name)
22 mtcr a3, ss2
23 mtcr r6, ss3
24 mtcr a2, ss4
25
26 RD_PGDR r6
27 RD_MEH a3
28#ifdef CONFIG_CPU_HAS_TLBI
29 tlbi.vaas a3
30 sync.is
31
32 btsti a3, 31
33 bf 1f
34 RD_PGDR_K r6
351:
36#else
37 bgeni a2, 31
38 WR_MCIR a2
39 bgeni a2, 25
40 WR_MCIR a2
41#endif
42 bclri r6, 0
43 lrw a2, PHYS_OFFSET
44 subu r6, a2
45 bseti r6, 31
46
47 mov a2, a3
48 lsri a2, _PGDIR_SHIFT
49 lsli a2, 2
50 addu r6, a2
51 ldw r6, (r6)
52
53 lrw a2, PHYS_OFFSET
54 subu r6, a2
55 bseti r6, 31
56
57 lsri a3, PTE_INDX_SHIFT
58 lrw a2, PTE_INDX_MSK
59 and a3, a2
60 addu r6, a3
61 ldw a3, (r6)
62
63 movi a2, (_PAGE_PRESENT | \val0)
64 and a3, a2
65 cmpne a3, a2
66 bt \name
67
68 /* First read/write the page, just update the flags */
69 ldw a3, (r6)
70 bgeni a2, PAGE_VALID_BIT
71 bseti a2, PAGE_ACCESSED_BIT
72 bseti a2, \val1
73 bseti a2, \val2
74 or a3, a2
75 stw a3, (r6)
76
77 /* Some cpu tlb-hardrefill bypass the cache */
78#ifdef CONFIG_CPU_NEED_TLBSYNC
79 movi a2, 0x22
80 bseti a2, 6
81 mtcr r6, cr22
82 mtcr a2, cr17
83 sync
84#endif
85
86 mfcr a3, ss2
87 mfcr r6, ss3
88 mfcr a2, ss4
89 rte
90\name:
91 mfcr a3, ss2
92 mfcr r6, ss3
93 mfcr a2, ss4
94 SAVE_ALL EPC_KEEP
95.endm
96.macro tlbop_end is_write
97 RD_MEH a2
98 psrset ee, ie
99 mov a0, sp
100 movi a1, \is_write
101 jbsr do_page_fault
102 movi r11_sig, 0 /* r11 = 0, Not a syscall. */
103 jmpi ret_from_exception
104.endm
105
106.text
107
108tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
109tlbop_end 0
110
111tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
112tlbop_end 1
113
114tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
115#ifndef CONFIG_CPU_HAS_LDSTEX
116jbsr csky_cmpxchg_fixup
117#endif
118tlbop_end 1
119
120ENTRY(csky_systemcall)
121 SAVE_ALL EPC_INCREASE
122
123 psrset ee, ie
124
125 /* Stack frame for syscall, origin call set_esp0 */
126 mov r12, sp
127
128 bmaski r11, 13
129 andn r12, r11
130 bgeni r11, 9
131 addi r11, 32
132 addu r12, r11
133 st sp, (r12, 0)
134
135 lrw r11, __NR_syscalls
136 cmphs syscallid, r11 /* Check nr of syscall */
137 bt ret_from_exception
138
139 lrw r13, sys_call_table
140 ixw r13, syscallid
141 ldw r11, (r13)
142 cmpnei r11, 0
143 bf ret_from_exception
144
145 mov r9, sp
146 bmaski r10, THREAD_SHIFT
147 andn r9, r10
148 ldw r8, (r9, TINFO_FLAGS)
149 btsti r8, TIF_SYSCALL_TRACE
150 bt 1f
151#if defined(__CSKYABIV2__)
152 subi sp, 8
153 stw r5, (sp, 0x4)
154 stw r4, (sp, 0x0)
155 jsr r11 /* Do system call */
156 addi sp, 8
157#else
158 jsr r11
159#endif
160 stw a0, (sp, LSAVE_A0) /* Save return value */
161 jmpi ret_from_exception
162
1631:
164 movi a0, 0 /* enter system call */
165 mov a1, sp /* sp = pt_regs pointer */
166 jbsr syscall_trace
167 /* Prepare args before do system call */
168 ldw a0, (sp, LSAVE_A0)
169 ldw a1, (sp, LSAVE_A1)
170 ldw a2, (sp, LSAVE_A2)
171 ldw a3, (sp, LSAVE_A3)
172#if defined(__CSKYABIV2__)
173 subi sp, 8
174 stw r5, (sp, 0x4)
175 stw r4, (sp, 0x0)
176#else
177 ldw r6, (sp, LSAVE_A4)
178 ldw r7, (sp, LSAVE_A5)
179#endif
180 jsr r11 /* Do system call */
181#if defined(__CSKYABIV2__)
182 addi sp, 8
183#endif
184 stw a0, (sp, LSAVE_A0) /* Save return value */
185
186 movi a0, 1 /* leave system call */
187 mov a1, sp /* sp = pt_regs pointer */
188 jbsr syscall_trace
189
190syscall_exit_work:
191 ld syscallid, (sp, LSAVE_PSR)
192 btsti syscallid, 31
193 bt 2f
194
195 jmpi resume_userspace
196
1972: RESTORE_ALL
198
199ENTRY(ret_from_kernel_thread)
200 jbsr schedule_tail
201 mov a0, r8
202 jsr r9
203 jbsr ret_from_exception
204
205ENTRY(ret_from_fork)
206 jbsr schedule_tail
207 mov r9, sp
208 bmaski r10, THREAD_SHIFT
209 andn r9, r10
210 ldw r8, (r9, TINFO_FLAGS)
211 movi r11_sig, 1
212 btsti r8, TIF_SYSCALL_TRACE
213 bf 3f
214 movi a0, 1
215 mov a1, sp /* sp = pt_regs pointer */
216 jbsr syscall_trace
2173:
218 jbsr ret_from_exception
219
220ret_from_exception:
221 ld syscallid, (sp, LSAVE_PSR)
222 btsti syscallid, 31
223 bt 1f
224
225 /*
226 * Load address of current->thread_info, Then get address of task_struct
227 * Get task_needreshed in task_struct
228 */
229 mov r9, sp
230 bmaski r10, THREAD_SHIFT
231 andn r9, r10
232
233resume_userspace:
234 ldw r8, (r9, TINFO_FLAGS)
235 andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
236 cmpnei r8, 0
237 bt exit_work
2381: RESTORE_ALL
239
240exit_work:
241 mov a0, sp /* Stack address is arg[0] */
242 jbsr set_esp0 /* Call C level */
243 btsti r8, TIF_NEED_RESCHED
244 bt work_resched
245 /* If thread_info->flag is empty, RESTORE_ALL */
246 cmpnei r8, 0
247 bf 1b
248 mov a1, sp
249 mov a0, r8
250 mov a2, r11_sig /* syscall? */
251 btsti r8, TIF_SIGPENDING /* delivering a signal? */
252 /* prevent further restarts(set r11 = 0) */
253 clrt r11_sig
254 jbsr do_notify_resume /* do signals */
255 br resume_userspace
256
257work_resched:
258 lrw syscallid, ret_from_exception
259 mov r15, syscallid /* Return address in link */
260 jmpi schedule
261
262ENTRY(sys_rt_sigreturn)
263 movi r11_sig, 0
264 jmpi do_rt_sigreturn
265
266ENTRY(csky_trap)
267 SAVE_ALL EPC_KEEP
268 psrset ee
269 movi r11_sig, 0 /* r11 = 0, Not a syscall. */
270 mov a0, sp /* Push Stack pointer arg */
271 jbsr trap_c /* Call C-level trap handler */
272 jmpi ret_from_exception
273
274/*
275 * Prototype from libc for abiv1:
276 * register unsigned int __result asm("a0");
277 * asm( "trap 3" :"=r"(__result)::);
278 */
279ENTRY(csky_get_tls)
280 USPTOKSP
281
282 /* increase epc for continue */
283 mfcr a0, epc
284 INCTRAP a0
285 mtcr a0, epc
286
287 /* get current task thread_info with kernel 8K stack */
288 bmaski a0, THREAD_SHIFT
289 not a0
290 subi sp, 1
291 and a0, sp
292 addi sp, 1
293
294 /* get tls */
295 ldw a0, (a0, TINFO_TP_VALUE)
296
297 KSPTOUSP
298 rte
299
300ENTRY(csky_irq)
301 SAVE_ALL EPC_KEEP
302 psrset ee
303 movi r11_sig, 0 /* r11 = 0, Not a syscall. */
304
305#ifdef CONFIG_PREEMPT
306 mov r9, sp /* Get current stack pointer */
307 bmaski r10, THREAD_SHIFT
308 andn r9, r10 /* Get thread_info */
309
310 /*
311 * Get task_struct->stack.preempt_count for current,
312 * and increase 1.
313 */
314 ldw r8, (r9, TINFO_PREEMPT)
315 addi r8, 1
316 stw r8, (r9, TINFO_PREEMPT)
317#endif
318
319 mov a0, sp
320 jbsr csky_do_IRQ
321
322#ifdef CONFIG_PREEMPT
323 subi r8, 1
324 stw r8, (r9, TINFO_PREEMPT)
325 cmpnei r8, 0
326 bt 2f
327 ldw r8, (r9, TINFO_FLAGS)
328 btsti r8, TIF_NEED_RESCHED
329 bf 2f
3301:
331 jbsr preempt_schedule_irq /* irq en/disable is done inside */
332 ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */
333 btsti r7, TIF_NEED_RESCHED
334 bt 1b /* go again */
335#endif
3362:
337 jmpi ret_from_exception
338
339/*
340 * a0 = prev task_struct *
341 * a1 = next task_struct *
342 * a0 = return next
343 */
344ENTRY(__switch_to)
345 lrw a3, TASK_THREAD
346 addu a3, a0
347
348 mfcr a2, psr /* Save PSR value */
349 stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
350 bclri a2, 6 /* Disable interrupts */
351 mtcr a2, psr
352
353 SAVE_SWITCH_STACK
354
355 stw sp, (a3, THREAD_KSP)
356
357#ifdef CONFIG_CPU_HAS_HILO
358 lrw r10, THREAD_DSPHI
359 add r10, a3
360 mfhi r6
361 mflo r7
362 stw r6, (r10, 0) /* THREAD_DSPHI */
363 stw r7, (r10, 4) /* THREAD_DSPLO */
364 mfcr r6, cr14
365 stw r6, (r10, 8) /* THREAD_DSPCSR */
366#endif
367
368 /* Set up next process to run */
369 lrw a3, TASK_THREAD
370 addu a3, a1
371
372 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
373
374#ifdef CONFIG_CPU_HAS_HILO
375 lrw r10, THREAD_DSPHI
376 add r10, a3
377 ldw r6, (r10, 8) /* THREAD_DSPCSR */
378 mtcr r6, cr14
379 ldw r6, (r10, 0) /* THREAD_DSPHI */
380 ldw r7, (r10, 4) /* THREAD_DSPLO */
381 mthi r6
382 mtlo r7
383#endif
384
385 ldw a2, (a3, THREAD_SR) /* Set next PSR */
386 mtcr a2, psr
387
388#if defined(__CSKYABIV2__)
389 addi r7, a1, TASK_THREAD_INFO
390 ldw tls, (r7, TINFO_TP_VALUE)
391#endif
392
393 RESTORE_SWITCH_STACK
394
395 rts
396ENDPROC(__switch_to)