Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h>
27#include <asm/page.h>
28#include <asm/thread_info.h>
29
30#define IOMEM(x) (x)
31
32/*
33 * Endian independent macros for shifting bytes within registers.
34 */
35#ifndef __ARMEB__
36#define lspull lsr
37#define lspush lsl
38#define get_byte_0 lsl #0
39#define get_byte_1 lsr #8
40#define get_byte_2 lsr #16
41#define get_byte_3 lsr #24
42#define put_byte_0 lsl #0
43#define put_byte_1 lsl #8
44#define put_byte_2 lsl #16
45#define put_byte_3 lsl #24
46#else
47#define lspull lsl
48#define lspush lsr
49#define get_byte_0 lsr #24
50#define get_byte_1 lsr #16
51#define get_byte_2 lsr #8
52#define get_byte_3 lsl #0
53#define put_byte_0 lsl #24
54#define put_byte_1 lsl #16
55#define put_byte_2 lsl #8
56#define put_byte_3 lsl #0
57#endif
58
59/* Select code for any configuration running in BE8 mode */
60#ifdef CONFIG_CPU_ENDIAN_BE8
61#define ARM_BE8(code...) code
62#else
63#define ARM_BE8(code...)
64#endif
65
66/*
67 * Data preload for architectures that support it
68 */
69#if __LINUX_ARM_ARCH__ >= 5
70#define PLD(code...) code
71#else
72#define PLD(code...)
73#endif
74
75/*
76 * This can be used to enable code to cacheline align the destination
77 * pointer when bulk writing to memory. Experiments on StrongARM and
78 * XScale didn't show this a worthwhile thing to do when the cache is not
79 * set to write-allocate (this would need further testing on XScale when WA
80 * is used).
81 *
82 * On Feroceon there is much to gain however, regardless of cache mode.
83 */
84#ifdef CONFIG_CPU_FEROCEON
85#define CALGN(code...) code
86#else
87#define CALGN(code...)
88#endif
89
90#define IMM12_MASK 0xfff
91
92/*
93 * Enable and disable interrupts
94 */
95#if __LINUX_ARM_ARCH__ >= 6
96 .macro disable_irq_notrace
97 cpsid i
98 .endm
99
100 .macro enable_irq_notrace
101 cpsie i
102 .endm
103#else
104 .macro disable_irq_notrace
105 msr cpsr_c, #PSR_I_BIT | SVC_MODE
106 .endm
107
108 .macro enable_irq_notrace
109 msr cpsr_c, #SVC_MODE
110 .endm
111#endif
112
113 .macro asm_trace_hardirqs_off, save=1
114#if defined(CONFIG_TRACE_IRQFLAGS)
115 .if \save
116 stmdb sp!, {r0-r3, ip, lr}
117 .endif
118 bl trace_hardirqs_off
119 .if \save
120 ldmia sp!, {r0-r3, ip, lr}
121 .endif
122#endif
123 .endm
124
125 .macro asm_trace_hardirqs_on, cond=al, save=1
126#if defined(CONFIG_TRACE_IRQFLAGS)
127 /*
128 * actually the registers should be pushed and pop'd conditionally, but
129 * after bl the flags are certainly clobbered
130 */
131 .if \save
132 stmdb sp!, {r0-r3, ip, lr}
133 .endif
134 bl\cond trace_hardirqs_on
135 .if \save
136 ldmia sp!, {r0-r3, ip, lr}
137 .endif
138#endif
139 .endm
140
141 .macro disable_irq, save=1
142 disable_irq_notrace
143 asm_trace_hardirqs_off \save
144 .endm
145
146 .macro enable_irq
147 asm_trace_hardirqs_on
148 enable_irq_notrace
149 .endm
150/*
151 * Save the current IRQ state and disable IRQs. Note that this macro
152 * assumes FIQs are enabled, and that the processor is in SVC mode.
153 */
154 .macro save_and_disable_irqs, oldcpsr
155#ifdef CONFIG_CPU_V7M
156 mrs \oldcpsr, primask
157#else
158 mrs \oldcpsr, cpsr
159#endif
160 disable_irq
161 .endm
162
163 .macro save_and_disable_irqs_notrace, oldcpsr
164#ifdef CONFIG_CPU_V7M
165 mrs \oldcpsr, primask
166#else
167 mrs \oldcpsr, cpsr
168#endif
169 disable_irq_notrace
170 .endm
171
172/*
173 * Restore interrupt state previously stored in a register. We don't
174 * guarantee that this will preserve the flags.
175 */
176 .macro restore_irqs_notrace, oldcpsr
177#ifdef CONFIG_CPU_V7M
178 msr primask, \oldcpsr
179#else
180 msr cpsr_c, \oldcpsr
181#endif
182 .endm
183
184 .macro restore_irqs, oldcpsr
185 tst \oldcpsr, #PSR_I_BIT
186 asm_trace_hardirqs_on cond=eq
187 restore_irqs_notrace \oldcpsr
188 .endm
189
190/*
191 * Assembly version of "adr rd, BSYM(sym)". This should only be used to
192 * reference local symbols in the same assembly file which are to be
193 * resolved by the assembler. Other usage is undefined.
194 */
195 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
196 .macro badr\c, rd, sym
197#ifdef CONFIG_THUMB2_KERNEL
198 adr\c \rd, \sym + 1
199#else
200 adr\c \rd, \sym
201#endif
202 .endm
203 .endr
204
205/*
206 * Get current thread_info.
207 */
208 .macro get_thread_info, rd
209 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
210 THUMB( mov \rd, sp )
211 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
212 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
213 .endm
214
215/*
216 * Increment/decrement the preempt count.
217 */
218#ifdef CONFIG_PREEMPT_COUNT
219 .macro inc_preempt_count, ti, tmp
220 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
221 add \tmp, \tmp, #1 @ increment it
222 str \tmp, [\ti, #TI_PREEMPT]
223 .endm
224
225 .macro dec_preempt_count, ti, tmp
226 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
227 sub \tmp, \tmp, #1 @ decrement it
228 str \tmp, [\ti, #TI_PREEMPT]
229 .endm
230
231 .macro dec_preempt_count_ti, ti, tmp
232 get_thread_info \ti
233 dec_preempt_count \ti, \tmp
234 .endm
235#else
236 .macro inc_preempt_count, ti, tmp
237 .endm
238
239 .macro dec_preempt_count, ti, tmp
240 .endm
241
242 .macro dec_preempt_count_ti, ti, tmp
243 .endm
244#endif
245
246#define USERL(l, x...) \
2479999: x; \
248 .pushsection __ex_table,"a"; \
249 .align 3; \
250 .long 9999b,l; \
251 .popsection
252
253#define USER(x...) USERL(9001f, x)
254
255#ifdef CONFIG_SMP
256#define ALT_SMP(instr...) \
2579998: instr
258/*
259 * Note: if you get assembler errors from ALT_UP() when building with
260 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
261 * ALT_SMP( W(instr) ... )
262 */
263#define ALT_UP(instr...) \
264 .pushsection ".alt.smp.init", "a" ;\
265 .long 9998b ;\
2669997: instr ;\
267 .if . - 9997b == 2 ;\
268 nop ;\
269 .endif ;\
270 .if . - 9997b != 4 ;\
271 .error "ALT_UP() content must assemble to exactly 4 bytes";\
272 .endif ;\
273 .popsection
274#define ALT_UP_B(label) \
275 .equ up_b_offset, label - 9998b ;\
276 .pushsection ".alt.smp.init", "a" ;\
277 .long 9998b ;\
278 W(b) . + up_b_offset ;\
279 .popsection
280#else
281#define ALT_SMP(instr...)
282#define ALT_UP(instr...) instr
283#define ALT_UP_B(label) b label
284#endif
285
286/*
287 * Instruction barrier
288 */
289 .macro instr_sync
290#if __LINUX_ARM_ARCH__ >= 7
291 isb
292#elif __LINUX_ARM_ARCH__ == 6
293 mcr p15, 0, r0, c7, c5, 4
294#endif
295 .endm
296
297/*
298 * SMP data memory barrier
299 */
300 .macro smp_dmb mode
301#ifdef CONFIG_SMP
302#if __LINUX_ARM_ARCH__ >= 7
303 .ifeqs "\mode","arm"
304 ALT_SMP(dmb ish)
305 .else
306 ALT_SMP(W(dmb) ish)
307 .endif
308#elif __LINUX_ARM_ARCH__ == 6
309 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
310#else
311#error Incompatible SMP platform
312#endif
313 .ifeqs "\mode","arm"
314 ALT_UP(nop)
315 .else
316 ALT_UP(W(nop))
317 .endif
318#endif
319 .endm
320
321#if defined(CONFIG_CPU_V7M)
322 /*
323 * setmode is used to assert to be in svc mode during boot. For v7-M
324 * this is done in __v7m_setup, so setmode can be empty here.
325 */
326 .macro setmode, mode, reg
327 .endm
328#elif defined(CONFIG_THUMB2_KERNEL)
329 .macro setmode, mode, reg
330 mov \reg, #\mode
331 msr cpsr_c, \reg
332 .endm
333#else
334 .macro setmode, mode, reg
335 msr cpsr_c, #\mode
336 .endm
337#endif
338
339/*
340 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
341 * a scratch register for the macro to overwrite.
342 *
343 * This macro is intended for forcing the CPU into SVC mode at boot time.
344 * you cannot return to the original mode.
345 */
346.macro safe_svcmode_maskall reg:req
347#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
348 mrs \reg , cpsr
349 eor \reg, \reg, #HYP_MODE
350 tst \reg, #MODE_MASK
351 bic \reg , \reg , #MODE_MASK
352 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
353THUMB( orr \reg , \reg , #PSR_T_BIT )
354 bne 1f
355 orr \reg, \reg, #PSR_A_BIT
356 badr lr, 2f
357 msr spsr_cxsf, \reg
358 __MSR_ELR_HYP(14)
359 __ERET
3601: msr cpsr_c, \reg
3612:
362#else
363/*
364 * workaround for possibly broken pre-v6 hardware
365 * (akita, Sharp Zaurus C-1000, PXA270-based)
366 */
367 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
368#endif
369.endm
370
371/*
372 * STRT/LDRT access macros with ARM and Thumb-2 variants
373 */
374#ifdef CONFIG_THUMB2_KERNEL
375
376 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3779999:
378 .if \inc == 1
379 \instr\()b\t\cond\().w \reg, [\ptr, #\off]
380 .elseif \inc == 4
381 \instr\t\cond\().w \reg, [\ptr, #\off]
382 .else
383 .error "Unsupported inc macro argument"
384 .endif
385
386 .pushsection __ex_table,"a"
387 .align 3
388 .long 9999b, \abort
389 .popsection
390 .endm
391
392 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
393 @ explicit IT instruction needed because of the label
394 @ introduced by the USER macro
395 .ifnc \cond,al
396 .if \rept == 1
397 itt \cond
398 .elseif \rept == 2
399 ittt \cond
400 .else
401 .error "Unsupported rept macro argument"
402 .endif
403 .endif
404
405 @ Slightly optimised to avoid incrementing the pointer twice
406 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
407 .if \rept == 2
408 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
409 .endif
410
411 add\cond \ptr, #\rept * \inc
412 .endm
413
414#else /* !CONFIG_THUMB2_KERNEL */
415
416 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
417 .rept \rept
4189999:
419 .if \inc == 1
420 \instr\()b\t\cond \reg, [\ptr], #\inc
421 .elseif \inc == 4
422 \instr\t\cond \reg, [\ptr], #\inc
423 .else
424 .error "Unsupported inc macro argument"
425 .endif
426
427 .pushsection __ex_table,"a"
428 .align 3
429 .long 9999b, \abort
430 .popsection
431 .endr
432 .endm
433
434#endif /* CONFIG_THUMB2_KERNEL */
435
436 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
437 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
438 .endm
439
440 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
441 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
442 .endm
443
444/* Utility macro for declaring string literals */
445 .macro string name:req, string
446 .type \name , #object
447\name:
448 .asciz "\string"
449 .size \name , . - \name
450 .endm
451
452 .macro csdb
453#ifdef CONFIG_THUMB2_KERNEL
454 .inst.w 0xf3af8014
455#else
456 .inst 0xe320f014
457#endif
458 .endm
459
460 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
461#ifndef CONFIG_CPU_USE_DOMAINS
462 adds \tmp, \addr, #\size - 1
463 sbcscc \tmp, \tmp, \limit
464 bcs \bad
465#ifdef CONFIG_CPU_SPECTRE
466 movcs \addr, #0
467 csdb
468#endif
469#endif
470 .endm
471
472 .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
473#ifdef CONFIG_CPU_SPECTRE
474 sub \tmp, \limit, #1
475 subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
476 addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
477 subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
478 movlo \addr, #0 @ if (tmp < 0) addr = NULL
479 csdb
480#endif
481 .endm
482
483 .macro uaccess_disable, tmp, isb=1
484#ifdef CONFIG_CPU_SW_DOMAIN_PAN
485 /*
486 * Whenever we re-enter userspace, the domains should always be
487 * set appropriately.
488 */
489 mov \tmp, #DACR_UACCESS_DISABLE
490 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
491 .if \isb
492 instr_sync
493 .endif
494#endif
495 .endm
496
497 .macro uaccess_enable, tmp, isb=1
498#ifdef CONFIG_CPU_SW_DOMAIN_PAN
499 /*
500 * Whenever we re-enter userspace, the domains should always be
501 * set appropriately.
502 */
503 mov \tmp, #DACR_UACCESS_ENABLE
504 mcr p15, 0, \tmp, c3, c0, 0
505 .if \isb
506 instr_sync
507 .endif
508#endif
509 .endm
510
511 .macro uaccess_save, tmp
512#ifdef CONFIG_CPU_SW_DOMAIN_PAN
513 mrc p15, 0, \tmp, c3, c0, 0
514 str \tmp, [sp, #SVC_DACR]
515#endif
516 .endm
517
518 .macro uaccess_restore
519#ifdef CONFIG_CPU_SW_DOMAIN_PAN
520 ldr r0, [sp, #SVC_DACR]
521 mcr p15, 0, r0, c3, c0, 0
522#endif
523 .endm
524
525 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
526 .macro ret\c, reg
527#if __LINUX_ARM_ARCH__ < 6
528 mov\c pc, \reg
529#else
530 .ifeqs "\reg", "lr"
531 bx\c \reg
532 .else
533 mov\c pc, \reg
534 .endif
535#endif
536 .endm
537 .endr
538
539 .macro ret.w, reg
540 ret \reg
541#ifdef CONFIG_THUMB2_KERNEL
542 nop
543#endif
544 .endm
545
546 .macro bug, msg, line
547#ifdef CONFIG_THUMB2_KERNEL
5481: .inst 0xde02
549#else
5501: .inst 0xe7f001f2
551#endif
552#ifdef CONFIG_DEBUG_BUGVERBOSE
553 .pushsection .rodata.str, "aMS", %progbits, 1
5542: .asciz "\msg"
555 .popsection
556 .pushsection __bug_table, "aw"
557 .align 2
558 .word 1b, 2b
559 .hword \line
560 .popsection
561#endif
562 .endm
563
564#ifdef CONFIG_KPROBES
565#define _ASM_NOKPROBE(entry) \
566 .pushsection "_kprobe_blacklist", "aw" ; \
567 .balign 4 ; \
568 .long entry; \
569 .popsection
570#else
571#define _ASM_NOKPROBE(entry)
572#endif
573
574#endif /* __ASM_ASSEMBLER_H__ */