Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/assembler.h
4 *
5 * Copyright (C) 1996-2000 Russell King
6 *
7 * This file contains arm architecture specific defines
8 * for the different processors.
9 *
10 * Do not include any C declarations in this file - it is included by
11 * assembler source.
12 */
13#ifndef __ASM_ASSEMBLER_H__
14#define __ASM_ASSEMBLER_H__
15
16#ifndef __ASSEMBLY__
17#error "Only include this from assembly code"
18#endif
19
20#include <asm/ptrace.h>
21#include <asm/opcodes-virt.h>
22#include <asm/asm-offsets.h>
23#include <asm/page.h>
24#include <asm/thread_info.h>
25#include <asm/uaccess-asm.h>
26
27#define IOMEM(x) (x)
28
29/*
30 * Endian independent macros for shifting bytes within registers.
31 */
32#ifndef __ARMEB__
33#define lspull lsr
34#define lspush lsl
35#define get_byte_0 lsl #0
36#define get_byte_1 lsr #8
37#define get_byte_2 lsr #16
38#define get_byte_3 lsr #24
39#define put_byte_0 lsl #0
40#define put_byte_1 lsl #8
41#define put_byte_2 lsl #16
42#define put_byte_3 lsl #24
43#else
44#define lspull lsl
45#define lspush lsr
46#define get_byte_0 lsr #24
47#define get_byte_1 lsr #16
48#define get_byte_2 lsr #8
49#define get_byte_3 lsl #0
50#define put_byte_0 lsl #24
51#define put_byte_1 lsl #16
52#define put_byte_2 lsl #8
53#define put_byte_3 lsl #0
54#endif
55
56/* Select code for any configuration running in BE8 mode */
57#ifdef CONFIG_CPU_ENDIAN_BE8
58#define ARM_BE8(code...) code
59#else
60#define ARM_BE8(code...)
61#endif
62
63/*
64 * Data preload for architectures that support it
65 */
66#if __LINUX_ARM_ARCH__ >= 5
67#define PLD(code...) code
68#else
69#define PLD(code...)
70#endif
71
72/*
73 * This can be used to enable code to cacheline align the destination
74 * pointer when bulk writing to memory. Experiments on StrongARM and
75 * XScale didn't show this a worthwhile thing to do when the cache is not
76 * set to write-allocate (this would need further testing on XScale when WA
77 * is used).
78 *
79 * On Feroceon there is much to gain however, regardless of cache mode.
80 */
81#ifdef CONFIG_CPU_FEROCEON
82#define CALGN(code...) code
83#else
84#define CALGN(code...)
85#endif
86
87#define IMM12_MASK 0xfff
88
89/* the frame pointer used for stack unwinding */
90ARM( fpreg .req r11 )
91THUMB( fpreg .req r7 )
92
93/*
94 * Enable and disable interrupts
95 */
96#if __LINUX_ARM_ARCH__ >= 6
97 .macro disable_irq_notrace
98 cpsid i
99 .endm
100
101 .macro enable_irq_notrace
102 cpsie i
103 .endm
104#else
105 .macro disable_irq_notrace
106 msr cpsr_c, #PSR_I_BIT | SVC_MODE
107 .endm
108
109 .macro enable_irq_notrace
110 msr cpsr_c, #SVC_MODE
111 .endm
112#endif
113
114#if __LINUX_ARM_ARCH__ < 7
115 .macro dsb, args
116 mcr p15, 0, r0, c7, c10, 4
117 .endm
118
119 .macro isb, args
120 mcr p15, 0, r0, c7, c5, 4
121 .endm
122#endif
123
124 .macro asm_trace_hardirqs_off, save=1
125#if defined(CONFIG_TRACE_IRQFLAGS)
126 .if \save
127 stmdb sp!, {r0-r3, ip, lr}
128 .endif
129 bl trace_hardirqs_off
130 .if \save
131 ldmia sp!, {r0-r3, ip, lr}
132 .endif
133#endif
134 .endm
135
136 .macro asm_trace_hardirqs_on, cond=al, save=1
137#if defined(CONFIG_TRACE_IRQFLAGS)
138 /*
139 * actually the registers should be pushed and pop'd conditionally, but
140 * after bl the flags are certainly clobbered
141 */
142 .if \save
143 stmdb sp!, {r0-r3, ip, lr}
144 .endif
145 bl\cond trace_hardirqs_on
146 .if \save
147 ldmia sp!, {r0-r3, ip, lr}
148 .endif
149#endif
150 .endm
151
152 .macro disable_irq, save=1
153 disable_irq_notrace
154 asm_trace_hardirqs_off \save
155 .endm
156
157 .macro enable_irq
158 asm_trace_hardirqs_on
159 enable_irq_notrace
160 .endm
161/*
162 * Save the current IRQ state and disable IRQs. Note that this macro
163 * assumes FIQs are enabled, and that the processor is in SVC mode.
164 */
165 .macro save_and_disable_irqs, oldcpsr
166#ifdef CONFIG_CPU_V7M
167 mrs \oldcpsr, primask
168#else
169 mrs \oldcpsr, cpsr
170#endif
171 disable_irq
172 .endm
173
174 .macro save_and_disable_irqs_notrace, oldcpsr
175#ifdef CONFIG_CPU_V7M
176 mrs \oldcpsr, primask
177#else
178 mrs \oldcpsr, cpsr
179#endif
180 disable_irq_notrace
181 .endm
182
183/*
184 * Restore interrupt state previously stored in a register. We don't
185 * guarantee that this will preserve the flags.
186 */
187 .macro restore_irqs_notrace, oldcpsr
188#ifdef CONFIG_CPU_V7M
189 msr primask, \oldcpsr
190#else
191 msr cpsr_c, \oldcpsr
192#endif
193 .endm
194
195 .macro restore_irqs, oldcpsr
196 tst \oldcpsr, #PSR_I_BIT
197 asm_trace_hardirqs_on cond=eq
198 restore_irqs_notrace \oldcpsr
199 .endm
200
201/*
202 * Assembly version of "adr rd, BSYM(sym)". This should only be used to
203 * reference local symbols in the same assembly file which are to be
204 * resolved by the assembler. Other usage is undefined.
205 */
206 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
207 .macro badr\c, rd, sym
208#ifdef CONFIG_THUMB2_KERNEL
209 adr\c \rd, \sym + 1
210#else
211 adr\c \rd, \sym
212#endif
213 .endm
214 .endr
215
216/*
217 * Get current thread_info.
218 */
219 .macro get_thread_info, rd
220 /* thread_info is the first member of struct task_struct */
221 get_current \rd
222 .endm
223
224/*
225 * Increment/decrement the preempt count.
226 */
227#ifdef CONFIG_PREEMPT_COUNT
228 .macro inc_preempt_count, ti, tmp
229 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
230 add \tmp, \tmp, #1 @ increment it
231 str \tmp, [\ti, #TI_PREEMPT]
232 .endm
233
234 .macro dec_preempt_count, ti, tmp
235 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
236 sub \tmp, \tmp, #1 @ decrement it
237 str \tmp, [\ti, #TI_PREEMPT]
238 .endm
239#else
240 .macro inc_preempt_count, ti, tmp
241 .endm
242
243 .macro dec_preempt_count, ti, tmp
244 .endm
245#endif
246
247 .macro local_bh_disable, ti, tmp
248 ldr \tmp, [\ti, #TI_PREEMPT]
249 add \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
250 str \tmp, [\ti, #TI_PREEMPT]
251 .endm
252
253 .macro local_bh_enable_ti, ti, tmp
254 get_thread_info \ti
255 ldr \tmp, [\ti, #TI_PREEMPT]
256 sub \tmp, \tmp, #SOFTIRQ_DISABLE_OFFSET
257 str \tmp, [\ti, #TI_PREEMPT]
258 .endm
259
260#define USERL(l, x...) \
2619999: x; \
262 .pushsection __ex_table,"a"; \
263 .align 3; \
264 .long 9999b,l; \
265 .popsection
266
267#define USER(x...) USERL(9001f, x)
268
269#ifdef CONFIG_SMP
270#define ALT_SMP(instr...) \
2719998: instr
272/*
273 * Note: if you get assembler errors from ALT_UP() when building with
274 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
275 * ALT_SMP( W(instr) ... )
276 */
277#define ALT_UP(instr...) \
278 .pushsection ".alt.smp.init", "a" ;\
279 .align 2 ;\
280 .long 9998b - . ;\
2819997: instr ;\
282 .if . - 9997b == 2 ;\
283 nop ;\
284 .endif ;\
285 .if . - 9997b != 4 ;\
286 .error "ALT_UP() content must assemble to exactly 4 bytes";\
287 .endif ;\
288 .popsection
289#define ALT_UP_B(label) \
290 .pushsection ".alt.smp.init", "a" ;\
291 .align 2 ;\
292 .long 9998b - . ;\
293 W(b) . + (label - 9998b) ;\
294 .popsection
295#else
296#define ALT_SMP(instr...)
297#define ALT_UP(instr...) instr
298#define ALT_UP_B(label) b label
299#endif
300
301 /*
302 * this_cpu_offset - load the per-CPU offset of this CPU into
303 * register 'rd'
304 */
305 .macro this_cpu_offset, rd:req
306#ifdef CONFIG_SMP
307ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
308#ifdef CONFIG_CPU_V6
309ALT_UP_B(.L1_\@)
310.L0_\@:
311 .subsection 1
312.L1_\@: ldr_va \rd, __per_cpu_offset
313 b .L0_\@
314 .previous
315#endif
316#else
317 mov \rd, #0
318#endif
319 .endm
320
321 /*
322 * set_current - store the task pointer of this CPU's current task
323 */
324 .macro set_current, rn:req, tmp:req
325#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
3269998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
327#ifdef CONFIG_CPU_V6
328ALT_UP_B(.L0_\@)
329 .subsection 1
330.L0_\@: str_va \rn, __current, \tmp
331 b .L1_\@
332 .previous
333.L1_\@:
334#endif
335#else
336 str_va \rn, __current, \tmp
337#endif
338 .endm
339
340 /*
341 * get_current - load the task pointer of this CPU's current task
342 */
343 .macro get_current, rd:req
344#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
3459998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
346#ifdef CONFIG_CPU_V6
347ALT_UP_B(.L0_\@)
348 .subsection 1
349.L0_\@: ldr_va \rd, __current
350 b .L1_\@
351 .previous
352.L1_\@:
353#endif
354#else
355 ldr_va \rd, __current
356#endif
357 .endm
358
359 /*
360 * reload_current - reload the task pointer of this CPU's current task
361 * into the TLS register
362 */
363 .macro reload_current, t1:req, t2:req
364#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
365#ifdef CONFIG_CPU_V6
366ALT_SMP(nop)
367ALT_UP_B(.L0_\@)
368#endif
369 ldr_this_cpu \t1, __entry_task, \t1, \t2
370 mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
371.L0_\@:
372#endif
373 .endm
374
375/*
376 * Instruction barrier
377 */
378 .macro instr_sync
379#if __LINUX_ARM_ARCH__ >= 7
380 isb
381#elif __LINUX_ARM_ARCH__ == 6
382 mcr p15, 0, r0, c7, c5, 4
383#endif
384 .endm
385
386/*
387 * SMP data memory barrier
388 */
389 .macro smp_dmb mode
390#ifdef CONFIG_SMP
391#if __LINUX_ARM_ARCH__ >= 7
392 .ifeqs "\mode","arm"
393 ALT_SMP(dmb ish)
394 .else
395 ALT_SMP(W(dmb) ish)
396 .endif
397#elif __LINUX_ARM_ARCH__ == 6
398 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
399#else
400#error Incompatible SMP platform
401#endif
402 .ifeqs "\mode","arm"
403 ALT_UP(nop)
404 .else
405 ALT_UP(W(nop))
406 .endif
407#endif
408 .endm
409
410#if defined(CONFIG_CPU_V7M)
411 /*
412 * setmode is used to assert to be in svc mode during boot. For v7-M
413 * this is done in __v7m_setup, so setmode can be empty here.
414 */
415 .macro setmode, mode, reg
416 .endm
417#elif defined(CONFIG_THUMB2_KERNEL)
418 .macro setmode, mode, reg
419 mov \reg, #\mode
420 msr cpsr_c, \reg
421 .endm
422#else
423 .macro setmode, mode, reg
424 msr cpsr_c, #\mode
425 .endm
426#endif
427
428/*
429 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
430 * a scratch register for the macro to overwrite.
431 *
432 * This macro is intended for forcing the CPU into SVC mode at boot time.
433 * you cannot return to the original mode.
434 */
435.macro safe_svcmode_maskall reg:req
436#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
437 mrs \reg , cpsr
438 eor \reg, \reg, #HYP_MODE
439 tst \reg, #MODE_MASK
440 bic \reg , \reg , #MODE_MASK
441 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
442THUMB( orr \reg , \reg , #PSR_T_BIT )
443 bne 1f
444 orr \reg, \reg, #PSR_A_BIT
445 badr lr, 2f
446 msr spsr_cxsf, \reg
447 __MSR_ELR_HYP(14)
448 __ERET
4491: msr cpsr_c, \reg
4502:
451#else
452/*
453 * workaround for possibly broken pre-v6 hardware
454 * (akita, Sharp Zaurus C-1000, PXA270-based)
455 */
456 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
457#endif
458.endm
459
460/*
461 * STRT/LDRT access macros with ARM and Thumb-2 variants
462 */
463#ifdef CONFIG_THUMB2_KERNEL
464
465 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
4669999:
467 .if \inc == 1
468 \instr\()b\t\cond\().w \reg, [\ptr, #\off]
469 .elseif \inc == 4
470 \instr\t\cond\().w \reg, [\ptr, #\off]
471 .else
472 .error "Unsupported inc macro argument"
473 .endif
474
475 .pushsection __ex_table,"a"
476 .align 3
477 .long 9999b, \abort
478 .popsection
479 .endm
480
481 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
482 @ explicit IT instruction needed because of the label
483 @ introduced by the USER macro
484 .ifnc \cond,al
485 .if \rept == 1
486 itt \cond
487 .elseif \rept == 2
488 ittt \cond
489 .else
490 .error "Unsupported rept macro argument"
491 .endif
492 .endif
493
494 @ Slightly optimised to avoid incrementing the pointer twice
495 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
496 .if \rept == 2
497 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
498 .endif
499
500 add\cond \ptr, #\rept * \inc
501 .endm
502
503#else /* !CONFIG_THUMB2_KERNEL */
504
505 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
506 .rept \rept
5079999:
508 .if \inc == 1
509 \instr\()b\t\cond \reg, [\ptr], #\inc
510 .elseif \inc == 4
511 \instr\t\cond \reg, [\ptr], #\inc
512 .else
513 .error "Unsupported inc macro argument"
514 .endif
515
516 .pushsection __ex_table,"a"
517 .align 3
518 .long 9999b, \abort
519 .popsection
520 .endr
521 .endm
522
523#endif /* CONFIG_THUMB2_KERNEL */
524
525 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
526 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
527 .endm
528
529 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
530 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
531 .endm
532
533/* Utility macro for declaring string literals */
534 .macro string name:req, string
535 .type \name , #object
536\name:
537 .asciz "\string"
538 .size \name , . - \name
539 .endm
540
541 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
542 .macro ret\c, reg
543#if __LINUX_ARM_ARCH__ < 6
544 mov\c pc, \reg
545#else
546 .ifeqs "\reg", "lr"
547 bx\c \reg
548 .else
549 mov\c pc, \reg
550 .endif
551#endif
552 .endm
553 .endr
554
555 .macro ret.w, reg
556 ret \reg
557#ifdef CONFIG_THUMB2_KERNEL
558 nop
559#endif
560 .endm
561
562 .macro bug, msg, line
563#ifdef CONFIG_THUMB2_KERNEL
5641: .inst 0xde02
565#else
5661: .inst 0xe7f001f2
567#endif
568#ifdef CONFIG_DEBUG_BUGVERBOSE
569 .pushsection .rodata.str, "aMS", %progbits, 1
5702: .asciz "\msg"
571 .popsection
572 .pushsection __bug_table, "aw"
573 .align 2
574 .word 1b, 2b
575 .hword \line
576 .popsection
577#endif
578 .endm
579
580#ifdef CONFIG_KPROBES
581#define _ASM_NOKPROBE(entry) \
582 .pushsection "_kprobe_blacklist", "aw" ; \
583 .balign 4 ; \
584 .long entry; \
585 .popsection
586#else
587#define _ASM_NOKPROBE(entry)
588#endif
589
590 .macro __adldst_l, op, reg, sym, tmp, c
591 .if __LINUX_ARM_ARCH__ < 7
592 ldr\c \tmp, .La\@
593 .subsection 1
594 .align 2
595.La\@: .long \sym - .Lpc\@
596 .previous
597 .else
598 .ifnb \c
599 THUMB( ittt \c )
600 .endif
601 movw\c \tmp, #:lower16:\sym - .Lpc\@
602 movt\c \tmp, #:upper16:\sym - .Lpc\@
603 .endif
604
605#ifndef CONFIG_THUMB2_KERNEL
606 .set .Lpc\@, . + 8 // PC bias
607 .ifc \op, add
608 add\c \reg, \tmp, pc
609 .else
610 \op\c \reg, [pc, \tmp]
611 .endif
612#else
613.Lb\@: add\c \tmp, \tmp, pc
614 /*
615 * In Thumb-2 builds, the PC bias depends on whether we are currently
616 * emitting into a .arm or a .thumb section. The size of the add opcode
617 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
618 * emitting in ARM mode, so let's use this to account for the bias.
619 */
620 .set .Lpc\@, . + (. - .Lb\@)
621
622 .ifnc \op, add
623 \op\c \reg, [\tmp]
624 .endif
625#endif
626 .endm
627
628 /*
629 * mov_l - move a constant value or [relocated] address into a register
630 */
631 .macro mov_l, dst:req, imm:req, cond
632 .if __LINUX_ARM_ARCH__ < 7
633 ldr\cond \dst, =\imm
634 .else
635 movw\cond \dst, #:lower16:\imm
636 movt\cond \dst, #:upper16:\imm
637 .endif
638 .endm
639
640 /*
641 * adr_l - adr pseudo-op with unlimited range
642 *
643 * @dst: destination register
644 * @sym: name of the symbol
645 * @cond: conditional opcode suffix
646 */
647 .macro adr_l, dst:req, sym:req, cond
648 __adldst_l add, \dst, \sym, \dst, \cond
649 .endm
650
651 /*
652 * ldr_l - ldr <literal> pseudo-op with unlimited range
653 *
654 * @dst: destination register
655 * @sym: name of the symbol
656 * @cond: conditional opcode suffix
657 */
658 .macro ldr_l, dst:req, sym:req, cond
659 __adldst_l ldr, \dst, \sym, \dst, \cond
660 .endm
661
662 /*
663 * str_l - str <literal> pseudo-op with unlimited range
664 *
665 * @src: source register
666 * @sym: name of the symbol
667 * @tmp: mandatory scratch register
668 * @cond: conditional opcode suffix
669 */
670 .macro str_l, src:req, sym:req, tmp:req, cond
671 __adldst_l str, \src, \sym, \tmp, \cond
672 .endm
673
674 .macro __ldst_va, op, reg, tmp, sym, cond, offset
675#if __LINUX_ARM_ARCH__ >= 7 || \
676 !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
677 (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
678 mov_l \tmp, \sym, \cond
679#else
680 /*
681 * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
682 * with the appropriate relocations. The combined sequence has a range
683 * of -/+ 256 MiB, which should be sufficient for the core kernel and
684 * for modules loaded into the module region.
685 */
686 .globl \sym
687 .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
688 .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
689 .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
690.L0_\@: sub\cond \tmp, pc, #8 - \offset
691.L1_\@: sub\cond \tmp, \tmp, #4 - \offset
692.L2_\@:
693#endif
694 \op\cond \reg, [\tmp, #\offset]
695 .endm
696
697 /*
698 * ldr_va - load a 32-bit word from the virtual address of \sym
699 */
700 .macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
701 .ifnb \tmp
702 __ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
703 .else
704 __ldst_va ldr, \rd, \rd, \sym, \cond, \offset
705 .endif
706 .endm
707
708 /*
709 * str_va - store a 32-bit word to the virtual address of \sym
710 */
711 .macro str_va, rn:req, sym:req, tmp:req, cond
712 __ldst_va str, \rn, \tmp, \sym, \cond, 0
713 .endm
714
715 /*
716 * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
717 * without using a temp register. Supported in ARM mode
718 * only.
719 */
720 .macro ldr_this_cpu_armv6, rd:req, sym:req
721 this_cpu_offset \rd
722 .globl \sym
723 .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
724 .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
725 .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
726 add \rd, \rd, pc
727.L0_\@: sub \rd, \rd, #4
728.L1_\@: sub \rd, \rd, #0
729.L2_\@: ldr \rd, [\rd, #4]
730 .endm
731
732 /*
733 * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
734 * into register 'rd', which may be the stack pointer,
735 * using 't1' and 't2' as general temp registers. These
736 * are permitted to overlap with 'rd' if != sp
737 */
738 .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
739#ifndef CONFIG_SMP
740 ldr_va \rd, \sym, tmp=\t1
741#elif __LINUX_ARM_ARCH__ >= 7 || \
742 !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
743 (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
744 this_cpu_offset \t1
745 mov_l \t2, \sym
746 ldr \rd, [\t1, \t2]
747#else
748 ldr_this_cpu_armv6 \rd, \sym
749#endif
750 .endm
751
752 /*
753 * rev_l - byte-swap a 32-bit value
754 *
755 * @val: source/destination register
756 * @tmp: scratch register
757 */
758 .macro rev_l, val:req, tmp:req
759 .if __LINUX_ARM_ARCH__ < 6
760 eor \tmp, \val, \val, ror #16
761 bic \tmp, \tmp, #0x00ff0000
762 mov \val, \val, ror #8
763 eor \val, \val, \tmp, lsr #8
764 .else
765 rev \val, \val
766 .endif
767 .endm
768
769 .if __LINUX_ARM_ARCH__ < 6
770 .set .Lrev_l_uses_tmp, 1
771 .else
772 .set .Lrev_l_uses_tmp, 0
773 .endif
774
775 /*
776 * bl_r - branch and link to register
777 *
778 * @dst: target to branch to
779 * @c: conditional opcode suffix
780 */
781 .macro bl_r, dst:req, c
782 .if __LINUX_ARM_ARCH__ < 6
783 mov\c lr, pc
784 mov\c pc, \dst
785 .else
786 blx\c \dst
787 .endif
788 .endm
789
790#endif /* __ASM_ASSEMBLER_H__ */