Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26
27#define IOMEM(x) (x)
28
29/*
30 * Endian independent macros for shifting bytes within registers.
31 */
32#ifndef __ARMEB__
33#define pull lsr
34#define push lsl
35#define get_byte_0 lsl #0
36#define get_byte_1 lsr #8
37#define get_byte_2 lsr #16
38#define get_byte_3 lsr #24
39#define put_byte_0 lsl #0
40#define put_byte_1 lsl #8
41#define put_byte_2 lsl #16
42#define put_byte_3 lsl #24
43#else
44#define pull lsl
45#define push lsr
46#define get_byte_0 lsr #24
47#define get_byte_1 lsr #16
48#define get_byte_2 lsr #8
49#define get_byte_3 lsl #0
50#define put_byte_0 lsl #24
51#define put_byte_1 lsl #16
52#define put_byte_2 lsl #8
53#define put_byte_3 lsl #0
54#endif
55
56/* Select code for any configuration running in BE8 mode */
57#ifdef CONFIG_CPU_ENDIAN_BE8
58#define ARM_BE8(code...) code
59#else
60#define ARM_BE8(code...)
61#endif
62
63/*
64 * Data preload for architectures that support it
65 */
66#if __LINUX_ARM_ARCH__ >= 5
67#define PLD(code...) code
68#else
69#define PLD(code...)
70#endif
71
72/*
73 * This can be used to enable code to cacheline align the destination
74 * pointer when bulk writing to memory. Experiments on StrongARM and
75 * XScale didn't show this a worthwhile thing to do when the cache is not
76 * set to write-allocate (this would need further testing on XScale when WA
77 * is used).
78 *
79 * On Feroceon there is much to gain however, regardless of cache mode.
80 */
81#ifdef CONFIG_CPU_FEROCEON
82#define CALGN(code...) code
83#else
84#define CALGN(code...)
85#endif
86
87/*
88 * Enable and disable interrupts
89 */
90#if __LINUX_ARM_ARCH__ >= 6
91 .macro disable_irq_notrace
92 cpsid i
93 .endm
94
95 .macro enable_irq_notrace
96 cpsie i
97 .endm
98#else
99 .macro disable_irq_notrace
100 msr cpsr_c, #PSR_I_BIT | SVC_MODE
101 .endm
102
103 .macro enable_irq_notrace
104 msr cpsr_c, #SVC_MODE
105 .endm
106#endif
107
108 .macro asm_trace_hardirqs_off
109#if defined(CONFIG_TRACE_IRQFLAGS)
110 stmdb sp!, {r0-r3, ip, lr}
111 bl trace_hardirqs_off
112 ldmia sp!, {r0-r3, ip, lr}
113#endif
114 .endm
115
116 .macro asm_trace_hardirqs_on_cond, cond
117#if defined(CONFIG_TRACE_IRQFLAGS)
118 /*
119 * actually the registers should be pushed and pop'd conditionally, but
120 * after bl the flags are certainly clobbered
121 */
122 stmdb sp!, {r0-r3, ip, lr}
123 bl\cond trace_hardirqs_on
124 ldmia sp!, {r0-r3, ip, lr}
125#endif
126 .endm
127
128 .macro asm_trace_hardirqs_on
129 asm_trace_hardirqs_on_cond al
130 .endm
131
132 .macro disable_irq
133 disable_irq_notrace
134 asm_trace_hardirqs_off
135 .endm
136
137 .macro enable_irq
138 asm_trace_hardirqs_on
139 enable_irq_notrace
140 .endm
141/*
142 * Save the current IRQ state and disable IRQs. Note that this macro
143 * assumes FIQs are enabled, and that the processor is in SVC mode.
144 */
145 .macro save_and_disable_irqs, oldcpsr
146#ifdef CONFIG_CPU_V7M
147 mrs \oldcpsr, primask
148#else
149 mrs \oldcpsr, cpsr
150#endif
151 disable_irq
152 .endm
153
154 .macro save_and_disable_irqs_notrace, oldcpsr
155 mrs \oldcpsr, cpsr
156 disable_irq_notrace
157 .endm
158
159/*
160 * Restore interrupt state previously stored in a register. We don't
161 * guarantee that this will preserve the flags.
162 */
163 .macro restore_irqs_notrace, oldcpsr
164#ifdef CONFIG_CPU_V7M
165 msr primask, \oldcpsr
166#else
167 msr cpsr_c, \oldcpsr
168#endif
169 .endm
170
171 .macro restore_irqs, oldcpsr
172 tst \oldcpsr, #PSR_I_BIT
173 asm_trace_hardirqs_on_cond eq
174 restore_irqs_notrace \oldcpsr
175 .endm
176
177#define USER(x...) \
1789999: x; \
179 .pushsection __ex_table,"a"; \
180 .align 3; \
181 .long 9999b,9001f; \
182 .popsection
183
184#ifdef CONFIG_SMP
185#define ALT_SMP(instr...) \
1869998: instr
187/*
188 * Note: if you get assembler errors from ALT_UP() when building with
189 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
190 * ALT_SMP( W(instr) ... )
191 */
192#define ALT_UP(instr...) \
193 .pushsection ".alt.smp.init", "a" ;\
194 .long 9998b ;\
1959997: instr ;\
196 .if . - 9997b != 4 ;\
197 .error "ALT_UP() content must assemble to exactly 4 bytes";\
198 .endif ;\
199 .popsection
200#define ALT_UP_B(label) \
201 .equ up_b_offset, label - 9998b ;\
202 .pushsection ".alt.smp.init", "a" ;\
203 .long 9998b ;\
204 W(b) . + up_b_offset ;\
205 .popsection
206#else
207#define ALT_SMP(instr...)
208#define ALT_UP(instr...) instr
209#define ALT_UP_B(label) b label
210#endif
211
212/*
213 * Instruction barrier
214 */
215 .macro instr_sync
216#if __LINUX_ARM_ARCH__ >= 7
217 isb
218#elif __LINUX_ARM_ARCH__ == 6
219 mcr p15, 0, r0, c7, c5, 4
220#endif
221 .endm
222
223/*
224 * SMP data memory barrier
225 */
226 .macro smp_dmb mode
227#ifdef CONFIG_SMP
228#if __LINUX_ARM_ARCH__ >= 7
229 .ifeqs "\mode","arm"
230 ALT_SMP(dmb ish)
231 .else
232 ALT_SMP(W(dmb) ish)
233 .endif
234#elif __LINUX_ARM_ARCH__ == 6
235 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
236#else
237#error Incompatible SMP platform
238#endif
239 .ifeqs "\mode","arm"
240 ALT_UP(nop)
241 .else
242 ALT_UP(W(nop))
243 .endif
244#endif
245 .endm
246
247#if defined(CONFIG_CPU_V7M)
248 /*
249 * setmode is used to assert to be in svc mode during boot. For v7-M
250 * this is done in __v7m_setup, so setmode can be empty here.
251 */
252 .macro setmode, mode, reg
253 .endm
254#elif defined(CONFIG_THUMB2_KERNEL)
255 .macro setmode, mode, reg
256 mov \reg, #\mode
257 msr cpsr_c, \reg
258 .endm
259#else
260 .macro setmode, mode, reg
261 msr cpsr_c, #\mode
262 .endm
263#endif
264
265/*
266 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
267 * a scratch register for the macro to overwrite.
268 *
269 * This macro is intended for forcing the CPU into SVC mode at boot time.
270 * you cannot return to the original mode.
271 */
272.macro safe_svcmode_maskall reg:req
273#if __LINUX_ARM_ARCH__ >= 6
274 mrs \reg , cpsr
275 eor \reg, \reg, #HYP_MODE
276 tst \reg, #MODE_MASK
277 bic \reg , \reg , #MODE_MASK
278 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
279THUMB( orr \reg , \reg , #PSR_T_BIT )
280 bne 1f
281 orr \reg, \reg, #PSR_A_BIT
282 adr lr, BSYM(2f)
283 msr spsr_cxsf, \reg
284 __MSR_ELR_HYP(14)
285 __ERET
2861: msr cpsr_c, \reg
2872:
288#else
289/*
290 * workaround for possibly broken pre-v6 hardware
291 * (akita, Sharp Zaurus C-1000, PXA270-based)
292 */
293 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
294#endif
295.endm
296
297/*
298 * STRT/LDRT access macros with ARM and Thumb-2 variants
299 */
300#ifdef CONFIG_THUMB2_KERNEL
301
302 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3039999:
304 .if \inc == 1
305 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
306 .elseif \inc == 4
307 \instr\cond\()\t\().w \reg, [\ptr, #\off]
308 .else
309 .error "Unsupported inc macro argument"
310 .endif
311
312 .pushsection __ex_table,"a"
313 .align 3
314 .long 9999b, \abort
315 .popsection
316 .endm
317
318 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
319 @ explicit IT instruction needed because of the label
320 @ introduced by the USER macro
321 .ifnc \cond,al
322 .if \rept == 1
323 itt \cond
324 .elseif \rept == 2
325 ittt \cond
326 .else
327 .error "Unsupported rept macro argument"
328 .endif
329 .endif
330
331 @ Slightly optimised to avoid incrementing the pointer twice
332 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
333 .if \rept == 2
334 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
335 .endif
336
337 add\cond \ptr, #\rept * \inc
338 .endm
339
340#else /* !CONFIG_THUMB2_KERNEL */
341
342 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
343 .rept \rept
3449999:
345 .if \inc == 1
346 \instr\cond\()b\()\t \reg, [\ptr], #\inc
347 .elseif \inc == 4
348 \instr\cond\()\t \reg, [\ptr], #\inc
349 .else
350 .error "Unsupported inc macro argument"
351 .endif
352
353 .pushsection __ex_table,"a"
354 .align 3
355 .long 9999b, \abort
356 .popsection
357 .endr
358 .endm
359
360#endif /* CONFIG_THUMB2_KERNEL */
361
362 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
363 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
364 .endm
365
366 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
367 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
368 .endm
369
370/* Utility macro for declaring string literals */
371 .macro string name:req, string
372 .type \name , #object
373\name:
374 .asciz "\string"
375 .size \name , . - \name
376 .endm
377
378 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
379#ifndef CONFIG_CPU_USE_DOMAINS
380 adds \tmp, \addr, #\size - 1
381 sbcccs \tmp, \tmp, \limit
382 bcs \bad
383#endif
384 .endm
385
386#endif /* __ASM_ASSEMBLER_H__ */