Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/boot/compressed/head.S
4 *
5 * Copyright (C) 1996-2002 Russell King
6 * Copyright (C) 2004 Hyok S. Choi (MPU support)
7 */
8#include <linux/linkage.h>
9#include <asm/assembler.h>
10#include <asm/v7m.h>
11
12#include "efi-header.S"
13
14#ifdef __ARMEB__
15#define OF_DT_MAGIC 0xd00dfeed
16#else
17#define OF_DT_MAGIC 0xedfe0dd0
18#endif
19
20 AR_CLASS( .arch armv7-a )
21 M_CLASS( .arch armv7-m )
22
23/*
24 * Debugging stuff
25 *
26 * Note that these macros must not contain any code which is not
27 * 100% relocatable. Any attempt to do so will result in a crash.
28 * Please select one of the following when turning on debugging.
29 */
30#ifdef DEBUG
31
32#if defined(CONFIG_DEBUG_ICEDCC)
33
34#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
35 .macro loadsp, rb, tmp1, tmp2
36 .endm
37 .macro writeb, ch, rb, tmp
38 mcr p14, 0, \ch, c0, c5, 0
39 .endm
40#elif defined(CONFIG_CPU_XSCALE)
41 .macro loadsp, rb, tmp1, tmp2
42 .endm
43 .macro writeb, ch, rb, tmp
44 mcr p14, 0, \ch, c8, c0, 0
45 .endm
46#else
47 .macro loadsp, rb, tmp1, tmp2
48 .endm
49 .macro writeb, ch, rb, tmp
50 mcr p14, 0, \ch, c1, c0, 0
51 .endm
52#endif
53
54#else
55
56#include CONFIG_DEBUG_LL_INCLUDE
57
58 .macro writeb, ch, rb, tmp
59#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
60 waituartcts \tmp, \rb
61#endif
62 waituarttxrdy \tmp, \rb
63 senduart \ch, \rb
64 busyuart \tmp, \rb
65 .endm
66
67#if defined(CONFIG_ARCH_SA1100)
68 .macro loadsp, rb, tmp1, tmp2
69 mov \rb, #0x80000000 @ physical base address
70#ifdef CONFIG_DEBUG_LL_SER3
71 add \rb, \rb, #0x00050000 @ Ser3
72#else
73 add \rb, \rb, #0x00010000 @ Ser1
74#endif
75 .endm
76#else
77 .macro loadsp, rb, tmp1, tmp2
78 addruart \rb, \tmp1, \tmp2
79 .endm
80#endif
81#endif
82#endif
83
84 .macro kputc,val
85 mov r0, \val
86 bl putc
87 .endm
88
89 .macro kphex,val,len
90 mov r0, \val
91 mov r1, #\len
92 bl phex
93 .endm
94
95 /*
96 * Debug kernel copy by printing the memory addresses involved
97 */
98 .macro dbgkc, begin, end, cbegin, cend
99#ifdef DEBUG
100 kputc #'C'
101 kputc #':'
102 kputc #'0'
103 kputc #'x'
104 kphex \begin, 8 /* Start of compressed kernel */
105 kputc #'-'
106 kputc #'0'
107 kputc #'x'
108 kphex \end, 8 /* End of compressed kernel */
109 kputc #'-'
110 kputc #'>'
111 kputc #'0'
112 kputc #'x'
113 kphex \cbegin, 8 /* Start of kernel copy */
114 kputc #'-'
115 kputc #'0'
116 kputc #'x'
117 kphex \cend, 8 /* End of kernel copy */
118 kputc #'\n'
119#endif
120 .endm
121
122 /*
123 * Debug print of the final appended DTB location
124 */
125 .macro dbgadtb, begin, size
126#ifdef DEBUG
127 kputc #'D'
128 kputc #'T'
129 kputc #'B'
130 kputc #':'
131 kputc #'0'
132 kputc #'x'
133 kphex \begin, 8 /* Start of appended DTB */
134 kputc #' '
135 kputc #'('
136 kputc #'0'
137 kputc #'x'
138 kphex \size, 8 /* Size of appended DTB */
139 kputc #')'
140 kputc #'\n'
141#endif
142 .endm
143
144 .macro enable_cp15_barriers, reg
145 mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR
146 tst \reg, #(1 << 5) @ CP15BEN bit set?
147 bne .L_\@
148 orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions
149 mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR
150 ARM( .inst 0xf57ff06f @ v7+ isb )
151 THUMB( isb )
152.L_\@:
153 .endm
154
155 /*
156 * The kernel build system appends the size of the
157 * decompressed kernel at the end of the compressed data
158 * in little-endian form.
159 */
160 .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req
161 adr \res, .Linflated_image_size_offset
162 ldr \tmp1, [\res]
163 add \tmp1, \tmp1, \res @ address of inflated image size
164
165 ldrb \res, [\tmp1] @ get_unaligned_le32
166 ldrb \tmp2, [\tmp1, #1]
167 orr \res, \res, \tmp2, lsl #8
168 ldrb \tmp2, [\tmp1, #2]
169 ldrb \tmp1, [\tmp1, #3]
170 orr \res, \res, \tmp2, lsl #16
171 orr \res, \res, \tmp1, lsl #24
172 .endm
173
174 .macro be32tocpu, val, tmp
175#ifndef __ARMEB__
176 /* convert to little endian */
177 eor \tmp, \val, \val, ror #16
178 bic \tmp, \tmp, #0x00ff0000
179 mov \val, \val, ror #8
180 eor \val, \val, \tmp, lsr #8
181#endif
182 .endm
183
184 .section ".start", "ax"
185/*
186 * sort out different calling conventions
187 */
188 .align
189 /*
190 * Always enter in ARM state for CPUs that support the ARM ISA.
191 * As of today (2014) that's exactly the members of the A and R
192 * classes.
193 */
194 AR_CLASS( .arm )
195start:
196 .type start,#function
197 /*
198 * These 7 nops along with the 1 nop immediately below for
199 * !THUMB2 form 8 nops that make the compressed kernel bootable
200 * on legacy ARM systems that were assuming the kernel in a.out
201 * binary format. The boot loaders on these systems would
202 * jump 32 bytes into the image to skip the a.out header.
203 * with these 8 nops filling exactly 32 bytes, things still
204 * work as expected on these legacy systems. Thumb2 mode keeps
205 * 7 of the nops as it turns out that some boot loaders
206 * were patching the initial instructions of the kernel, i.e
207 * had started to exploit this "patch area".
208 */
209 .rept 7
210 __nop
211 .endr
212#ifndef CONFIG_THUMB2_KERNEL
213 __nop
214#else
215 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
216 M_CLASS( nop.w ) @ M: already in Thumb2 mode
217 .thumb
218#endif
219 W(b) 1f
220
221 .word _magic_sig @ Magic numbers to help the loader
222 .word _magic_start @ absolute load/run zImage address
223 .word _magic_end @ zImage end address
224 .word 0x04030201 @ endianness flag
225 .word 0x45454545 @ another magic number to indicate
226 .word _magic_table @ additional data table
227
228 __EFI_HEADER
2291:
230 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
231 AR_CLASS( mrs r9, cpsr )
232#ifdef CONFIG_ARM_VIRT_EXT
233 bl __hyp_stub_install @ get into SVC mode, reversibly
234#endif
235 mov r7, r1 @ save architecture ID
236 mov r8, r2 @ save atags pointer
237
238#ifndef CONFIG_CPU_V7M
239 /*
240 * Booting from Angel - need to enter SVC mode and disable
241 * FIQs/IRQs (numeric definitions from angel arm.h source).
242 * We only do this if we were in user mode on entry.
243 */
244 mrs r2, cpsr @ get current mode
245 tst r2, #3 @ not user?
246 bne not_angel
247 mov r0, #0x17 @ angel_SWIreason_EnterSVC
248 ARM( swi 0x123456 ) @ angel_SWI_ARM
249 THUMB( svc 0xab ) @ angel_SWI_THUMB
250not_angel:
251 safe_svcmode_maskall r0
252 msr spsr_cxsf, r9 @ Save the CPU boot mode in
253 @ SPSR
254#endif
255 /*
256 * Note that some cache flushing and other stuff may
257 * be needed here - is there an Angel SWI call for this?
258 */
259
260 /*
261 * some architecture specific code can be inserted
262 * by the linker here, but it should preserve r7, r8, and r9.
263 */
264
265 .text
266
267#ifdef CONFIG_AUTO_ZRELADDR
268 /*
269 * Find the start of physical memory. As we are executing
270 * without the MMU on, we are in the physical address space.
271 * We just need to get rid of any offset by aligning the
272 * address.
273 *
274 * This alignment is a balance between the requirements of
275 * different platforms - we have chosen 128MB to allow
276 * platforms which align the start of their physical memory
277 * to 128MB to use this feature, while allowing the zImage
278 * to be placed within the first 128MB of memory on other
279 * platforms. Increasing the alignment means we place
280 * stricter alignment requirements on the start of physical
281 * memory, but relaxing it means that we break people who
282 * are already placing their zImage in (eg) the top 64MB
283 * of this range.
284 */
285 mov r4, pc
286 and r4, r4, #0xf8000000
287 /* Determine final kernel image address. */
288 add r4, r4, #TEXT_OFFSET
289#else
290 ldr r4, =zreladdr
291#endif
292
293 /*
294 * Set up a page table only if it won't overwrite ourself.
295 * That means r4 < pc || r4 - 16k page directory > &_end.
296 * Given that r4 > &_end is most unfrequent, we add a rough
297 * additional 1MB of room for a possible appended DTB.
298 */
299 mov r0, pc
300 cmp r0, r4
301 ldrcc r0, .Lheadroom
302 addcc r0, r0, pc
303 cmpcc r4, r0
304 orrcc r4, r4, #1 @ remember we skipped cache_on
305 blcs cache_on
306
307restart: adr r0, LC1
308 ldr sp, [r0]
309 ldr r6, [r0, #4]
310 add sp, sp, r0
311 add r6, r6, r0
312
313 get_inflated_image_size r9, r10, lr
314
315#ifndef CONFIG_ZBOOT_ROM
316 /* malloc space is above the relocated stack (64k max) */
317 add r10, sp, #MALLOC_SIZE
318#else
319 /*
320 * With ZBOOT_ROM the bss/stack is non relocatable,
321 * but someone could still run this code from RAM,
322 * in which case our reference is _edata.
323 */
324 mov r10, r6
325#endif
326
327 mov r5, #0 @ init dtb size to 0
328#ifdef CONFIG_ARM_APPENDED_DTB
329/*
330 * r4 = final kernel address (possibly with LSB set)
331 * r5 = appended dtb size (still unknown)
332 * r6 = _edata
333 * r7 = architecture ID
334 * r8 = atags/device tree pointer
335 * r9 = size of decompressed image
336 * r10 = end of this image, including bss/stack/malloc space if non XIP
337 * sp = stack pointer
338 *
339 * if there are device trees (dtb) appended to zImage, advance r10 so that the
340 * dtb data will get relocated along with the kernel if necessary.
341 */
342
343 ldr lr, [r6, #0]
344 ldr r1, =OF_DT_MAGIC
345 cmp lr, r1
346 bne dtb_check_done @ not found
347
348#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
349 /*
350 * OK... Let's do some funky business here.
351 * If we do have a DTB appended to zImage, and we do have
352 * an ATAG list around, we want the later to be translated
353 * and folded into the former here. No GOT fixup has occurred
354 * yet, but none of the code we're about to call uses any
355 * global variable.
356 */
357
358 /* Get the initial DTB size */
359 ldr r5, [r6, #4]
360 be32tocpu r5, r1
361 dbgadtb r6, r5
362 /* 50% DTB growth should be good enough */
363 add r5, r5, r5, lsr #1
364 /* preserve 64-bit alignment */
365 add r5, r5, #7
366 bic r5, r5, #7
367 /* clamp to 32KB min and 1MB max */
368 cmp r5, #(1 << 15)
369 movlo r5, #(1 << 15)
370 cmp r5, #(1 << 20)
371 movhi r5, #(1 << 20)
372 /* temporarily relocate the stack past the DTB work space */
373 add sp, sp, r5
374
375 mov r0, r8
376 mov r1, r6
377 mov r2, r5
378 bl atags_to_fdt
379
380 /*
381 * If returned value is 1, there is no ATAG at the location
382 * pointed by r8. Try the typical 0x100 offset from start
383 * of RAM and hope for the best.
384 */
385 cmp r0, #1
386 sub r0, r4, #TEXT_OFFSET
387 bic r0, r0, #1
388 add r0, r0, #0x100
389 mov r1, r6
390 mov r2, r5
391 bleq atags_to_fdt
392
393 sub sp, sp, r5
394#endif
395
396 mov r8, r6 @ use the appended device tree
397
398 /*
399 * Make sure that the DTB doesn't end up in the final
400 * kernel's .bss area. To do so, we adjust the decompressed
401 * kernel size to compensate if that .bss size is larger
402 * than the relocated code.
403 */
404 ldr r5, =_kernel_bss_size
405 adr r1, wont_overwrite
406 sub r1, r6, r1
407 subs r1, r5, r1
408 addhi r9, r9, r1
409
410 /* Get the current DTB size */
411 ldr r5, [r6, #4]
412 be32tocpu r5, r1
413
414 /* preserve 64-bit alignment */
415 add r5, r5, #7
416 bic r5, r5, #7
417
418 /* relocate some pointers past the appended dtb */
419 add r6, r6, r5
420 add r10, r10, r5
421 add sp, sp, r5
422dtb_check_done:
423#endif
424
425/*
426 * Check to see if we will overwrite ourselves.
427 * r4 = final kernel address (possibly with LSB set)
428 * r9 = size of decompressed image
429 * r10 = end of this image, including bss/stack/malloc space if non XIP
430 * We basically want:
431 * r4 - 16k page directory >= r10 -> OK
432 * r4 + image length <= address of wont_overwrite -> OK
433 * Note: the possible LSB in r4 is harmless here.
434 */
435 add r10, r10, #16384
436 cmp r4, r10
437 bhs wont_overwrite
438 add r10, r4, r9
439 adr r9, wont_overwrite
440 cmp r10, r9
441 bls wont_overwrite
442
443/*
444 * Relocate ourselves past the end of the decompressed kernel.
445 * r6 = _edata
446 * r10 = end of the decompressed kernel
447 * Because we always copy ahead, we need to do it from the end and go
448 * backward in case the source and destination overlap.
449 */
450 /*
451 * Bump to the next 256-byte boundary with the size of
452 * the relocation code added. This avoids overwriting
453 * ourself when the offset is small.
454 */
455 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
456 bic r10, r10, #255
457
458 /* Get start of code we want to copy and align it down. */
459 adr r5, restart
460 bic r5, r5, #31
461
462/* Relocate the hyp vector base if necessary */
463#ifdef CONFIG_ARM_VIRT_EXT
464 mrs r0, spsr
465 and r0, r0, #MODE_MASK
466 cmp r0, #HYP_MODE
467 bne 1f
468
469 /*
470 * Compute the address of the hyp vectors after relocation.
471 * Call __hyp_set_vectors with the new address so that we
472 * can HVC again after the copy.
473 */
474 adr_l r0, __hyp_stub_vectors
475 sub r0, r0, r5
476 add r0, r0, r10
477 bl __hyp_set_vectors
4781:
479#endif
480
481 sub r9, r6, r5 @ size to copy
482 add r9, r9, #31 @ rounded up to a multiple
483 bic r9, r9, #31 @ ... of 32 bytes
484 add r6, r9, r5
485 add r9, r9, r10
486
487#ifdef DEBUG
488 sub r10, r6, r5
489 sub r10, r9, r10
490 /*
491 * We are about to copy the kernel to a new memory area.
492 * The boundaries of the new memory area can be found in
493 * r10 and r9, whilst r5 and r6 contain the boundaries
494 * of the memory we are going to copy.
495 * Calling dbgkc will help with the printing of this
496 * information.
497 */
498 dbgkc r5, r6, r10, r9
499#endif
500
5011: ldmdb r6!, {r0 - r3, r10 - r12, lr}
502 cmp r6, r5
503 stmdb r9!, {r0 - r3, r10 - r12, lr}
504 bhi 1b
505
506 /* Preserve offset to relocated code. */
507 sub r6, r9, r6
508
509 mov r0, r9 @ start of relocated zImage
510 add r1, sp, r6 @ end of relocated zImage
511 bl cache_clean_flush
512
513 badr r0, restart
514 add r0, r0, r6
515 mov pc, r0
516
517wont_overwrite:
518 adr r0, LC0
519 ldmia r0, {r1, r2, r3, r11, r12}
520 sub r0, r0, r1 @ calculate the delta offset
521
522/*
523 * If delta is zero, we are running at the address we were linked at.
524 * r0 = delta
525 * r2 = BSS start
526 * r3 = BSS end
527 * r4 = kernel execution address (possibly with LSB set)
528 * r5 = appended dtb size (0 if not present)
529 * r7 = architecture ID
530 * r8 = atags pointer
531 * r11 = GOT start
532 * r12 = GOT end
533 * sp = stack pointer
534 */
535 orrs r1, r0, r5
536 beq not_relocated
537
538 add r11, r11, r0
539 add r12, r12, r0
540
541#ifndef CONFIG_ZBOOT_ROM
542 /*
543 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
544 * we need to fix up pointers into the BSS region.
545 * Note that the stack pointer has already been fixed up.
546 */
547 add r2, r2, r0
548 add r3, r3, r0
549
550 /*
551 * Relocate all entries in the GOT table.
552 * Bump bss entries to _edata + dtb size
553 */
5541: ldr r1, [r11, #0] @ relocate entries in the GOT
555 add r1, r1, r0 @ This fixes up C references
556 cmp r1, r2 @ if entry >= bss_start &&
557 cmphs r3, r1 @ bss_end > entry
558 addhi r1, r1, r5 @ entry += dtb size
559 str r1, [r11], #4 @ next entry
560 cmp r11, r12
561 blo 1b
562
563 /* bump our bss pointers too */
564 add r2, r2, r5
565 add r3, r3, r5
566
567#else
568
569 /*
570 * Relocate entries in the GOT table. We only relocate
571 * the entries that are outside the (relocated) BSS region.
572 */
5731: ldr r1, [r11, #0] @ relocate entries in the GOT
574 cmp r1, r2 @ entry < bss_start ||
575 cmphs r3, r1 @ _end < entry
576 addlo r1, r1, r0 @ table. This fixes up the
577 str r1, [r11], #4 @ C references.
578 cmp r11, r12
579 blo 1b
580#endif
581
582not_relocated: mov r0, #0
5831: str r0, [r2], #4 @ clear bss
584 str r0, [r2], #4
585 str r0, [r2], #4
586 str r0, [r2], #4
587 cmp r2, r3
588 blo 1b
589
590 /*
591 * Did we skip the cache setup earlier?
592 * That is indicated by the LSB in r4.
593 * Do it now if so.
594 */
595 tst r4, #1
596 bic r4, r4, #1
597 blne cache_on
598
599/*
600 * The C runtime environment should now be setup sufficiently.
601 * Set up some pointers, and start decompressing.
602 * r4 = kernel execution address
603 * r7 = architecture ID
604 * r8 = atags pointer
605 */
606 mov r0, r4
607 mov r1, sp @ malloc space above stack
608 add r2, sp, #MALLOC_SIZE @ 64k max
609 mov r3, r7
610 bl decompress_kernel
611
612 get_inflated_image_size r1, r2, r3
613
614 mov r0, r4 @ start of inflated image
615 add r1, r1, r0 @ end of inflated image
616 bl cache_clean_flush
617 bl cache_off
618
619#ifdef CONFIG_ARM_VIRT_EXT
620 mrs r0, spsr @ Get saved CPU boot mode
621 and r0, r0, #MODE_MASK
622 cmp r0, #HYP_MODE @ if not booted in HYP mode...
623 bne __enter_kernel @ boot kernel directly
624
625 adr_l r0, __hyp_reentry_vectors
626 bl __hyp_set_vectors
627 __HVC(0) @ otherwise bounce to hyp mode
628
629 b . @ should never be reached
630#else
631 b __enter_kernel
632#endif
633
634 .align 2
635 .type LC0, #object
636LC0: .word LC0 @ r1
637 .word __bss_start @ r2
638 .word _end @ r3
639 .word _got_start @ r11
640 .word _got_end @ ip
641 .size LC0, . - LC0
642
643 .type LC1, #object
644LC1: .word .L_user_stack_end - LC1 @ sp
645 .word _edata - LC1 @ r6
646 .size LC1, . - LC1
647
648.Lheadroom:
649 .word _end - restart + 16384 + 1024*1024
650
651.Linflated_image_size_offset:
652 .long (input_data_end - 4) - .
653
654#ifdef CONFIG_ARCH_RPC
655 .globl params
656params: ldr r0, =0x10000100 @ params_phys for RPC
657 mov pc, lr
658 .ltorg
659 .align
660#endif
661
662/*
663 * dcache_line_size - get the minimum D-cache line size from the CTR register
664 * on ARMv7.
665 */
666 .macro dcache_line_size, reg, tmp
667#ifdef CONFIG_CPU_V7M
668 movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
669 movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
670 ldr \tmp, [\tmp]
671#else
672 mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
673#endif
674 lsr \tmp, \tmp, #16
675 and \tmp, \tmp, #0xf @ cache line size encoding
676 mov \reg, #4 @ bytes per word
677 mov \reg, \reg, lsl \tmp @ actual cache line size
678 .endm
679
680/*
681 * Turn on the cache. We need to setup some page tables so that we
682 * can have both the I and D caches on.
683 *
684 * We place the page tables 16k down from the kernel execution address,
685 * and we hope that nothing else is using it. If we're using it, we
686 * will go pop!
687 *
688 * On entry,
689 * r4 = kernel execution address
690 * r7 = architecture number
691 * r8 = atags pointer
692 * On exit,
693 * r0, r1, r2, r3, r9, r10, r12 corrupted
694 * This routine must preserve:
695 * r4, r7, r8
696 */
697 .align 5
698cache_on: mov r3, #8 @ cache_on function
699 b call_cache_fn
700
701/*
702 * Initialize the highest priority protection region, PR7
703 * to cover all 32bit address and cacheable and bufferable.
704 */
705__armv4_mpu_cache_on:
706 mov r0, #0x3f @ 4G, the whole
707 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
708 mcr p15, 0, r0, c6, c7, 1
709
710 mov r0, #0x80 @ PR7
711 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
712 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
713 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
714
715 mov r0, #0xc000
716 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
717 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
718
719 mov r0, #0
720 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
721 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
722 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
723 mrc p15, 0, r0, c1, c0, 0 @ read control reg
724 @ ...I .... ..D. WC.M
725 orr r0, r0, #0x002d @ .... .... ..1. 11.1
726 orr r0, r0, #0x1000 @ ...1 .... .... ....
727
728 mcr p15, 0, r0, c1, c0, 0 @ write control reg
729
730 mov r0, #0
731 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
732 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
733 mov pc, lr
734
735__armv3_mpu_cache_on:
736 mov r0, #0x3f @ 4G, the whole
737 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
738
739 mov r0, #0x80 @ PR7
740 mcr p15, 0, r0, c2, c0, 0 @ cache on
741 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
742
743 mov r0, #0xc000
744 mcr p15, 0, r0, c5, c0, 0 @ access permission
745
746 mov r0, #0
747 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
748 /*
749 * ?? ARMv3 MMU does not allow reading the control register,
750 * does this really work on ARMv3 MPU?
751 */
752 mrc p15, 0, r0, c1, c0, 0 @ read control reg
753 @ .... .... .... WC.M
754 orr r0, r0, #0x000d @ .... .... .... 11.1
755 /* ?? this overwrites the value constructed above? */
756 mov r0, #0
757 mcr p15, 0, r0, c1, c0, 0 @ write control reg
758
759 /* ?? invalidate for the second time? */
760 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
761 mov pc, lr
762
763#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
764#define CB_BITS 0x08
765#else
766#define CB_BITS 0x0c
767#endif
768
769__setup_mmu: sub r3, r4, #16384 @ Page directory size
770 bic r3, r3, #0xff @ Align the pointer
771 bic r3, r3, #0x3f00
772/*
773 * Initialise the page tables, turning on the cacheable and bufferable
774 * bits for the RAM area only.
775 */
776 mov r0, r3
777 mov r9, r0, lsr #18
778 mov r9, r9, lsl #18 @ start of RAM
779 add r10, r9, #0x10000000 @ a reasonable RAM size
780 mov r1, #0x12 @ XN|U + section mapping
781 orr r1, r1, #3 << 10 @ AP=11
782 add r2, r3, #16384
7831: cmp r1, r9 @ if virt > start of RAM
784 cmphs r10, r1 @ && end of RAM > virt
785 bic r1, r1, #0x1c @ clear XN|U + C + B
786 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
787 orrhs r1, r1, r6 @ set RAM section settings
788 str r1, [r0], #4 @ 1:1 mapping
789 add r1, r1, #1048576
790 teq r0, r2
791 bne 1b
792/*
793 * If ever we are running from Flash, then we surely want the cache
794 * to be enabled also for our execution instance... We map 2MB of it
795 * so there is no map overlap problem for up to 1 MB compressed kernel.
796 * If the execution is in RAM then we would only be duplicating the above.
797 */
798 orr r1, r6, #0x04 @ ensure B is set for this
799 orr r1, r1, #3 << 10
800 mov r2, pc
801 mov r2, r2, lsr #20
802 orr r1, r1, r2, lsl #20
803 add r0, r3, r2, lsl #2
804 str r1, [r0], #4
805 add r1, r1, #1048576
806 str r1, [r0]
807 mov pc, lr
808ENDPROC(__setup_mmu)
809
810@ Enable unaligned access on v6, to allow better code generation
811@ for the decompressor C code:
812__armv6_mmu_cache_on:
813 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
814 bic r0, r0, #2 @ A (no unaligned access fault)
815 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
816 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
817 b __armv4_mmu_cache_on
818
819__arm926ejs_mmu_cache_on:
820#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
821 mov r0, #4 @ put dcache in WT mode
822 mcr p15, 7, r0, c15, c0, 0
823#endif
824
825__armv4_mmu_cache_on:
826 mov r12, lr
827#ifdef CONFIG_MMU
828 mov r6, #CB_BITS | 0x12 @ U
829 bl __setup_mmu
830 mov r0, #0
831 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
832 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
833 mrc p15, 0, r0, c1, c0, 0 @ read control reg
834 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
835 orr r0, r0, #0x0030
836 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
837 bl __common_mmu_cache_on
838 mov r0, #0
839 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
840#endif
841 mov pc, r12
842
843__armv7_mmu_cache_on:
844 enable_cp15_barriers r11
845 mov r12, lr
846#ifdef CONFIG_MMU
847 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
848 tst r11, #0xf @ VMSA
849 movne r6, #CB_BITS | 0x02 @ !XN
850 blne __setup_mmu
851 mov r0, #0
852 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
853 tst r11, #0xf @ VMSA
854 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
855#endif
856 mrc p15, 0, r0, c1, c0, 0 @ read control reg
857 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
858 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
859 orr r0, r0, #0x003c @ write buffer
860 bic r0, r0, #2 @ A (no unaligned access fault)
861 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
862 @ (needed for ARM1176)
863#ifdef CONFIG_MMU
864 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
865 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
866 orrne r0, r0, #1 @ MMU enabled
867 movne r1, #0xfffffffd @ domain 0 = client
868 bic r6, r6, #1 << 31 @ 32-bit translation system
869 bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
870 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
871 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
872 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
873#endif
874 mcr p15, 0, r0, c7, c5, 4 @ ISB
875 mcr p15, 0, r0, c1, c0, 0 @ load control register
876 mrc p15, 0, r0, c1, c0, 0 @ and read it back
877 mov r0, #0
878 mcr p15, 0, r0, c7, c5, 4 @ ISB
879 mov pc, r12
880
881__fa526_cache_on:
882 mov r12, lr
883 mov r6, #CB_BITS | 0x12 @ U
884 bl __setup_mmu
885 mov r0, #0
886 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
887 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
888 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
889 mrc p15, 0, r0, c1, c0, 0 @ read control reg
890 orr r0, r0, #0x1000 @ I-cache enable
891 bl __common_mmu_cache_on
892 mov r0, #0
893 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
894 mov pc, r12
895
896__common_mmu_cache_on:
897#ifndef CONFIG_THUMB2_KERNEL
898#ifndef DEBUG
899 orr r0, r0, #0x000d @ Write buffer, mmu
900#endif
901 mov r1, #-1
902 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
903 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
904 b 1f
905 .align 5 @ cache line aligned
9061: mcr p15, 0, r0, c1, c0, 0 @ load control register
907 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
908 sub pc, lr, r0, lsr #32 @ properly flush pipeline
909#endif
910
911#define PROC_ENTRY_SIZE (4*5)
912
913/*
914 * Here follow the relocatable cache support functions for the
915 * various processors. This is a generic hook for locating an
916 * entry and jumping to an instruction at the specified offset
917 * from the start of the block. Please note this is all position
918 * independent code.
919 *
920 * r1 = corrupted
921 * r2 = corrupted
922 * r3 = block offset
923 * r9 = corrupted
924 * r12 = corrupted
925 */
926
927call_cache_fn: adr r12, proc_types
928#ifdef CONFIG_CPU_CP15
929 mrc p15, 0, r9, c0, c0 @ get processor ID
930#elif defined(CONFIG_CPU_V7M)
931 /*
932 * On v7-M the processor id is located in the V7M_SCB_CPUID
933 * register, but as cache handling is IMPLEMENTATION DEFINED on
934 * v7-M (if existant at all) we just return early here.
935 * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
936 * __armv7_mmu_cache_{on,off,flush}) would be selected which
937 * use cp15 registers that are not implemented on v7-M.
938 */
939 bx lr
940#else
941 ldr r9, =CONFIG_PROCESSOR_ID
942#endif
9431: ldr r1, [r12, #0] @ get value
944 ldr r2, [r12, #4] @ get mask
945 eor r1, r1, r9 @ (real ^ match)
946 tst r1, r2 @ & mask
947 ARM( addeq pc, r12, r3 ) @ call cache function
948 THUMB( addeq r12, r3 )
949 THUMB( moveq pc, r12 ) @ call cache function
950 add r12, r12, #PROC_ENTRY_SIZE
951 b 1b
952
953/*
954 * Table for cache operations. This is basically:
955 * - CPU ID match
956 * - CPU ID mask
957 * - 'cache on' method instruction
958 * - 'cache off' method instruction
959 * - 'cache flush' method instruction
960 *
961 * We match an entry using: ((real_id ^ match) & mask) == 0
962 *
963 * Writethrough caches generally only need 'on' and 'off'
964 * methods. Writeback caches _must_ have the flush method
965 * defined.
966 */
967 .align 2
968 .type proc_types,#object
969proc_types:
970 .word 0x41000000 @ old ARM ID
971 .word 0xff00f000
972 mov pc, lr
973 THUMB( nop )
974 mov pc, lr
975 THUMB( nop )
976 mov pc, lr
977 THUMB( nop )
978
979 .word 0x41007000 @ ARM7/710
980 .word 0xfff8fe00
981 mov pc, lr
982 THUMB( nop )
983 mov pc, lr
984 THUMB( nop )
985 mov pc, lr
986 THUMB( nop )
987
988 .word 0x41807200 @ ARM720T (writethrough)
989 .word 0xffffff00
990 W(b) __armv4_mmu_cache_on
991 W(b) __armv4_mmu_cache_off
992 mov pc, lr
993 THUMB( nop )
994
995 .word 0x41007400 @ ARM74x
996 .word 0xff00ff00
997 W(b) __armv3_mpu_cache_on
998 W(b) __armv3_mpu_cache_off
999 W(b) __armv3_mpu_cache_flush
1000
1001 .word 0x41009400 @ ARM94x
1002 .word 0xff00ff00
1003 W(b) __armv4_mpu_cache_on
1004 W(b) __armv4_mpu_cache_off
1005 W(b) __armv4_mpu_cache_flush
1006
1007 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
1008 .word 0xff0ffff0
1009 W(b) __arm926ejs_mmu_cache_on
1010 W(b) __armv4_mmu_cache_off
1011 W(b) __armv5tej_mmu_cache_flush
1012
1013 .word 0x00007000 @ ARM7 IDs
1014 .word 0x0000f000
1015 mov pc, lr
1016 THUMB( nop )
1017 mov pc, lr
1018 THUMB( nop )
1019 mov pc, lr
1020 THUMB( nop )
1021
1022 @ Everything from here on will be the new ID system.
1023
1024 .word 0x4401a100 @ sa110 / sa1100
1025 .word 0xffffffe0
1026 W(b) __armv4_mmu_cache_on
1027 W(b) __armv4_mmu_cache_off
1028 W(b) __armv4_mmu_cache_flush
1029
1030 .word 0x6901b110 @ sa1110
1031 .word 0xfffffff0
1032 W(b) __armv4_mmu_cache_on
1033 W(b) __armv4_mmu_cache_off
1034 W(b) __armv4_mmu_cache_flush
1035
1036 .word 0x56056900
1037 .word 0xffffff00 @ PXA9xx
1038 W(b) __armv4_mmu_cache_on
1039 W(b) __armv4_mmu_cache_off
1040 W(b) __armv4_mmu_cache_flush
1041
1042 .word 0x56158000 @ PXA168
1043 .word 0xfffff000
1044 W(b) __armv4_mmu_cache_on
1045 W(b) __armv4_mmu_cache_off
1046 W(b) __armv5tej_mmu_cache_flush
1047
1048 .word 0x56050000 @ Feroceon
1049 .word 0xff0f0000
1050 W(b) __armv4_mmu_cache_on
1051 W(b) __armv4_mmu_cache_off
1052 W(b) __armv5tej_mmu_cache_flush
1053
1054#ifdef CONFIG_CPU_FEROCEON_OLD_ID
1055 /* this conflicts with the standard ARMv5TE entry */
1056 .long 0x41009260 @ Old Feroceon
1057 .long 0xff00fff0
1058 b __armv4_mmu_cache_on
1059 b __armv4_mmu_cache_off
1060 b __armv5tej_mmu_cache_flush
1061#endif
1062
1063 .word 0x66015261 @ FA526
1064 .word 0xff01fff1
1065 W(b) __fa526_cache_on
1066 W(b) __armv4_mmu_cache_off
1067 W(b) __fa526_cache_flush
1068
1069 @ These match on the architecture ID
1070
1071 .word 0x00020000 @ ARMv4T
1072 .word 0x000f0000
1073 W(b) __armv4_mmu_cache_on
1074 W(b) __armv4_mmu_cache_off
1075 W(b) __armv4_mmu_cache_flush
1076
1077 .word 0x00050000 @ ARMv5TE
1078 .word 0x000f0000
1079 W(b) __armv4_mmu_cache_on
1080 W(b) __armv4_mmu_cache_off
1081 W(b) __armv4_mmu_cache_flush
1082
1083 .word 0x00060000 @ ARMv5TEJ
1084 .word 0x000f0000
1085 W(b) __armv4_mmu_cache_on
1086 W(b) __armv4_mmu_cache_off
1087 W(b) __armv5tej_mmu_cache_flush
1088
1089 .word 0x0007b000 @ ARMv6
1090 .word 0x000ff000
1091 W(b) __armv6_mmu_cache_on
1092 W(b) __armv4_mmu_cache_off
1093 W(b) __armv6_mmu_cache_flush
1094
1095 .word 0x000f0000 @ new CPU Id
1096 .word 0x000f0000
1097 W(b) __armv7_mmu_cache_on
1098 W(b) __armv7_mmu_cache_off
1099 W(b) __armv7_mmu_cache_flush
1100
1101 .word 0 @ unrecognised type
1102 .word 0
1103 mov pc, lr
1104 THUMB( nop )
1105 mov pc, lr
1106 THUMB( nop )
1107 mov pc, lr
1108 THUMB( nop )
1109
1110 .size proc_types, . - proc_types
1111
1112 /*
1113 * If you get a "non-constant expression in ".if" statement"
1114 * error from the assembler on this line, check that you have
1115 * not accidentally written a "b" instruction where you should
1116 * have written W(b).
1117 */
1118 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
1119 .error "The size of one or more proc_types entries is wrong."
1120 .endif
1121
1122/*
1123 * Turn off the Cache and MMU. ARMv3 does not support
1124 * reading the control register, but ARMv4 does.
1125 *
1126 * On exit,
1127 * r0, r1, r2, r3, r9, r12 corrupted
1128 * This routine must preserve:
1129 * r4, r7, r8
1130 */
1131 .align 5
1132cache_off: mov r3, #12 @ cache_off function
1133 b call_cache_fn
1134
1135__armv4_mpu_cache_off:
1136 mrc p15, 0, r0, c1, c0
1137 bic r0, r0, #0x000d
1138 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
1139 mov r0, #0
1140 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
1141 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
1142 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
1143 mov pc, lr
1144
1145__armv3_mpu_cache_off:
1146 mrc p15, 0, r0, c1, c0
1147 bic r0, r0, #0x000d
1148 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
1149 mov r0, #0
1150 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
1151 mov pc, lr
1152
1153__armv4_mmu_cache_off:
1154#ifdef CONFIG_MMU
1155 mrc p15, 0, r0, c1, c0
1156 bic r0, r0, #0x000d
1157 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1158 mov r0, #0
1159 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1160 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
1161#endif
1162 mov pc, lr
1163
1164__armv7_mmu_cache_off:
1165 mrc p15, 0, r0, c1, c0
1166#ifdef CONFIG_MMU
1167 bic r0, r0, #0x000d
1168#else
1169 bic r0, r0, #0x000c
1170#endif
1171 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1172 mov r0, #0
1173#ifdef CONFIG_MMU
1174 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
1175#endif
1176 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1177 mcr p15, 0, r0, c7, c10, 4 @ DSB
1178 mcr p15, 0, r0, c7, c5, 4 @ ISB
1179 mov pc, lr
1180
1181/*
1182 * Clean and flush the cache to maintain consistency.
1183 *
1184 * On entry,
1185 * r0 = start address
1186 * r1 = end address (exclusive)
1187 * On exit,
1188 * r1, r2, r3, r9, r10, r11, r12 corrupted
1189 * This routine must preserve:
1190 * r4, r6, r7, r8
1191 */
1192 .align 5
1193cache_clean_flush:
1194 mov r3, #16
1195 mov r11, r1
1196 b call_cache_fn
1197
1198__armv4_mpu_cache_flush:
1199 tst r4, #1
1200 movne pc, lr
1201 mov r2, #1
1202 mov r3, #0
1203 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1204 mov r1, #7 << 5 @ 8 segments
12051: orr r3, r1, #63 << 26 @ 64 entries
12062: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1207 subs r3, r3, #1 << 26
1208 bcs 2b @ entries 63 to 0
1209 subs r1, r1, #1 << 5
1210 bcs 1b @ segments 7 to 0
1211
1212 teq r2, #0
1213 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1214 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1215 mov pc, lr
1216
1217__fa526_cache_flush:
1218 tst r4, #1
1219 movne pc, lr
1220 mov r1, #0
1221 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1222 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1223 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1224 mov pc, lr
1225
1226__armv6_mmu_cache_flush:
1227 mov r1, #0
1228 tst r4, #1
1229 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1230 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1231 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1232 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1233 mov pc, lr
1234
1235__armv7_mmu_cache_flush:
1236 enable_cp15_barriers r10
1237 tst r4, #1
1238 bne iflush
1239 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1240 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1241 mov r10, #0
1242 beq hierarchical
1243 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1244 b iflush
1245hierarchical:
1246 dcache_line_size r1, r2 @ r1 := dcache min line size
1247 sub r2, r1, #1 @ r2 := line size mask
1248 bic r0, r0, r2 @ round down start to line size
1249 sub r11, r11, #1 @ end address is exclusive
1250 bic r11, r11, r2 @ round down end to line size
12510: cmp r0, r11 @ finished?
1252 bgt iflush
1253 mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA
1254 add r0, r0, r1
1255 b 0b
1256iflush:
1257 mcr p15, 0, r10, c7, c10, 4 @ DSB
1258 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1259 mcr p15, 0, r10, c7, c10, 4 @ DSB
1260 mcr p15, 0, r10, c7, c5, 4 @ ISB
1261 mov pc, lr
1262
1263__armv5tej_mmu_cache_flush:
1264 tst r4, #1
1265 movne pc, lr
12661: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache
1267 bne 1b
1268 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1269 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1270 mov pc, lr
1271
1272__armv4_mmu_cache_flush:
1273 tst r4, #1
1274 movne pc, lr
1275 mov r2, #64*1024 @ default: 32K dcache size (*2)
1276 mov r11, #32 @ default: 32 byte line size
1277 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1278 teq r3, r9 @ cache ID register present?
1279 beq no_cache_id
1280 mov r1, r3, lsr #18
1281 and r1, r1, #7
1282 mov r2, #1024
1283 mov r2, r2, lsl r1 @ base dcache size *2
1284 tst r3, #1 << 14 @ test M bit
1285 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1286 mov r3, r3, lsr #12
1287 and r3, r3, #3
1288 mov r11, #8
1289 mov r11, r11, lsl r3 @ cache line size in bytes
1290no_cache_id:
1291 mov r1, pc
1292 bic r1, r1, #63 @ align to longest cache line
1293 add r2, r1, r2
12941:
1295 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1296 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1297 THUMB( add r1, r1, r11 )
1298 teq r1, r2
1299 bne 1b
1300
1301 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1302 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1303 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1304 mov pc, lr
1305
1306__armv3_mmu_cache_flush:
1307__armv3_mpu_cache_flush:
1308 tst r4, #1
1309 movne pc, lr
1310 mov r1, #0
1311 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1312 mov pc, lr
1313
1314/*
1315 * Various debugging routines for printing hex characters and
1316 * memory, which again must be relocatable.
1317 */
1318#ifdef DEBUG
1319 .align 2
1320 .type phexbuf,#object
1321phexbuf: .space 12
1322 .size phexbuf, . - phexbuf
1323
1324@ phex corrupts {r0, r1, r2, r3}
1325phex: adr r3, phexbuf
1326 mov r2, #0
1327 strb r2, [r3, r1]
13281: subs r1, r1, #1
1329 movmi r0, r3
1330 bmi puts
1331 and r2, r0, #15
1332 mov r0, r0, lsr #4
1333 cmp r2, #10
1334 addge r2, r2, #7
1335 add r2, r2, #'0'
1336 strb r2, [r3, r1]
1337 b 1b
1338
1339@ puts corrupts {r0, r1, r2, r3}
1340puts: loadsp r3, r2, r1
13411: ldrb r2, [r0], #1
1342 teq r2, #0
1343 moveq pc, lr
13442: writeb r2, r3, r1
1345 mov r1, #0x00020000
13463: subs r1, r1, #1
1347 bne 3b
1348 teq r2, #'\n'
1349 moveq r2, #'\r'
1350 beq 2b
1351 teq r0, #0
1352 bne 1b
1353 mov pc, lr
1354@ putc corrupts {r0, r1, r2, r3}
1355putc:
1356 mov r2, r0
1357 loadsp r3, r1, r0
1358 mov r0, #0
1359 b 2b
1360
1361@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1362memdump: mov r12, r0
1363 mov r10, lr
1364 mov r11, #0
13652: mov r0, r11, lsl #2
1366 add r0, r0, r12
1367 mov r1, #8
1368 bl phex
1369 mov r0, #':'
1370 bl putc
13711: mov r0, #' '
1372 bl putc
1373 ldr r0, [r12, r11, lsl #2]
1374 mov r1, #8
1375 bl phex
1376 and r0, r11, #7
1377 teq r0, #3
1378 moveq r0, #' '
1379 bleq putc
1380 and r0, r11, #7
1381 add r11, r11, #1
1382 teq r0, #7
1383 bne 1b
1384 mov r0, #'\n'
1385 bl putc
1386 cmp r11, #64
1387 blt 2b
1388 mov pc, r10
1389#endif
1390
1391 .ltorg
1392
1393#ifdef CONFIG_ARM_VIRT_EXT
1394.align 5
1395__hyp_reentry_vectors:
1396 W(b) . @ reset
1397 W(b) . @ undef
1398#ifdef CONFIG_EFI_STUB
1399 W(b) __enter_kernel_from_hyp @ hvc from HYP
1400#else
1401 W(b) . @ svc
1402#endif
1403 W(b) . @ pabort
1404 W(b) . @ dabort
1405 W(b) __enter_kernel @ hyp
1406 W(b) . @ irq
1407 W(b) . @ fiq
1408#endif /* CONFIG_ARM_VIRT_EXT */
1409
1410__enter_kernel:
1411 mov r0, #0 @ must be 0
1412 mov r1, r7 @ restore architecture number
1413 mov r2, r8 @ restore atags pointer
1414 ARM( mov pc, r4 ) @ call kernel
1415 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
1416 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
1417
1418reloc_code_end:
1419
1420#ifdef CONFIG_EFI_STUB
1421__enter_kernel_from_hyp:
1422 mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR
1423 bic r0, r0, #0x5 @ disable MMU and caches
1424 mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR
1425 isb
1426 b __enter_kernel
1427
1428ENTRY(efi_enter_kernel)
1429 mov r4, r0 @ preserve image base
1430 mov r8, r1 @ preserve DT pointer
1431
1432 adr_l r0, call_cache_fn
1433 adr r1, 0f @ clean the region of code we
1434 bl cache_clean_flush @ may run with the MMU off
1435
1436#ifdef CONFIG_ARM_VIRT_EXT
1437 @
1438 @ The EFI spec does not support booting on ARM in HYP mode,
1439 @ since it mandates that the MMU and caches are on, with all
1440 @ 32-bit addressable DRAM mapped 1:1 using short descriptors.
1441 @
1442 @ While the EDK2 reference implementation adheres to this,
1443 @ U-Boot might decide to enter the EFI stub in HYP mode
1444 @ anyway, with the MMU and caches either on or off.
1445 @
1446 mrs r0, cpsr @ get the current mode
1447 msr spsr_cxsf, r0 @ record boot mode
1448 and r0, r0, #MODE_MASK @ are we running in HYP mode?
1449 cmp r0, #HYP_MODE
1450 bne .Lefi_svc
1451
1452 mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR
1453 tst r1, #0x1 @ MMU enabled at HYP?
1454 beq 1f
1455
1456 @
1457 @ When running in HYP mode with the caches on, we're better
1458 @ off just carrying on using the cached 1:1 mapping that the
1459 @ firmware provided. Set up the HYP vectors so HVC instructions
1460 @ issued from HYP mode take us to the correct handler code. We
1461 @ will disable the MMU before jumping to the kernel proper.
1462 @
1463 ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE
1464 THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE
1465 mcr p15, 4, r1, c1, c0, 0
1466 adr r0, __hyp_reentry_vectors
1467 mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
1468 isb
1469 b .Lefi_hyp
1470
1471 @
1472 @ When running in HYP mode with the caches off, we need to drop
1473 @ into SVC mode now, and let the decompressor set up its cached
1474 @ 1:1 mapping as usual.
1475 @
14761: mov r9, r4 @ preserve image base
1477 bl __hyp_stub_install @ install HYP stub vectors
1478 safe_svcmode_maskall r1 @ drop to SVC mode
1479 msr spsr_cxsf, r0 @ record boot mode
1480 orr r4, r9, #1 @ restore image base and set LSB
1481 b .Lefi_hyp
1482.Lefi_svc:
1483#endif
1484 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
1485 tst r0, #0x1 @ MMU enabled?
1486 orreq r4, r4, #1 @ set LSB if not
1487
1488.Lefi_hyp:
1489 mov r0, r8 @ DT start
1490 add r1, r8, r2 @ DT end
1491 bl cache_clean_flush
1492
1493 adr r0, 0f @ switch to our stack
1494 ldr sp, [r0]
1495 add sp, sp, r0
1496
1497 mov r5, #0 @ appended DTB size
1498 mov r7, #0xFFFFFFFF @ machine ID
1499 b wont_overwrite
1500ENDPROC(efi_enter_kernel)
15010: .long .L_user_stack_end - .
1502#endif
1503
1504 .align
1505 .section ".stack", "aw", %nobits
1506.L_user_stack: .space 4096
1507.L_user_stack_end: