Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * linux/arch/arm/boot/compressed/head.S
3 *
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/linkage.h>
12#include <asm/assembler.h>
13#include <asm/v7m.h>
14
15#include "efi-header.S"
16
17 AR_CLASS( .arch armv7-a )
18 M_CLASS( .arch armv7-m )
19
20/*
21 * Debugging stuff
22 *
23 * Note that these macros must not contain any code which is not
24 * 100% relocatable. Any attempt to do so will result in a crash.
25 * Please select one of the following when turning on debugging.
26 */
27#ifdef DEBUG
28
29#if defined(CONFIG_DEBUG_ICEDCC)
30
31#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
32 .macro loadsp, rb, tmp1, tmp2
33 .endm
34 .macro writeb, ch, rb
35 mcr p14, 0, \ch, c0, c5, 0
36 .endm
37#elif defined(CONFIG_CPU_XSCALE)
38 .macro loadsp, rb, tmp1, tmp2
39 .endm
40 .macro writeb, ch, rb
41 mcr p14, 0, \ch, c8, c0, 0
42 .endm
43#else
44 .macro loadsp, rb, tmp1, tmp2
45 .endm
46 .macro writeb, ch, rb
47 mcr p14, 0, \ch, c1, c0, 0
48 .endm
49#endif
50
51#else
52
53#include CONFIG_DEBUG_LL_INCLUDE
54
55 .macro writeb, ch, rb
56 senduart \ch, \rb
57 .endm
58
59#if defined(CONFIG_ARCH_SA1100)
60 .macro loadsp, rb, tmp1, tmp2
61 mov \rb, #0x80000000 @ physical base address
62#ifdef CONFIG_DEBUG_LL_SER3
63 add \rb, \rb, #0x00050000 @ Ser3
64#else
65 add \rb, \rb, #0x00010000 @ Ser1
66#endif
67 .endm
68#else
69 .macro loadsp, rb, tmp1, tmp2
70 addruart \rb, \tmp1, \tmp2
71 .endm
72#endif
73#endif
74#endif
75
76 .macro kputc,val
77 mov r0, \val
78 bl putc
79 .endm
80
81 .macro kphex,val,len
82 mov r0, \val
83 mov r1, #\len
84 bl phex
85 .endm
86
87 .macro debug_reloc_start
88#ifdef DEBUG
89 kputc #'\n'
90 kphex r6, 8 /* processor id */
91 kputc #':'
92 kphex r7, 8 /* architecture id */
93#ifdef CONFIG_CPU_CP15
94 kputc #':'
95 mrc p15, 0, r0, c1, c0
96 kphex r0, 8 /* control reg */
97#endif
98 kputc #'\n'
99 kphex r5, 8 /* decompressed kernel start */
100 kputc #'-'
101 kphex r9, 8 /* decompressed kernel end */
102 kputc #'>'
103 kphex r4, 8 /* kernel execution address */
104 kputc #'\n'
105#endif
106 .endm
107
108 .macro debug_reloc_end
109#ifdef DEBUG
110 kphex r5, 8 /* end of kernel */
111 kputc #'\n'
112 mov r0, r4
113 bl memdump /* dump 256 bytes at start of kernel */
114#endif
115 .endm
116
117 /*
118 * Debug kernel copy by printing the memory addresses involved
119 */
120 .macro dbgkc, begin, end, cbegin, cend
121#ifdef DEBUG
122 kputc #'\n'
123 kputc #'C'
124 kputc #':'
125 kputc #'0'
126 kputc #'x'
127 kphex \begin, 8 /* Start of compressed kernel */
128 kputc #'-'
129 kputc #'0'
130 kputc #'x'
131 kphex \end, 8 /* End of compressed kernel */
132 kputc #'-'
133 kputc #'>'
134 kputc #'0'
135 kputc #'x'
136 kphex \cbegin, 8 /* Start of kernel copy */
137 kputc #'-'
138 kputc #'0'
139 kputc #'x'
140 kphex \cend, 8 /* End of kernel copy */
141 kputc #'\n'
142 kputc #'\r'
143#endif
144 .endm
145
146 .section ".start", #alloc, #execinstr
147/*
148 * sort out different calling conventions
149 */
150 .align
151 /*
152 * Always enter in ARM state for CPUs that support the ARM ISA.
153 * As of today (2014) that's exactly the members of the A and R
154 * classes.
155 */
156 AR_CLASS( .arm )
157start:
158 .type start,#function
159 .rept 7
160 __nop
161 .endr
162#ifndef CONFIG_THUMB2_KERNEL
163 mov r0, r0
164#else
165 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
166 M_CLASS( nop.w ) @ M: already in Thumb2 mode
167 .thumb
168#endif
169 W(b) 1f
170
171 .word _magic_sig @ Magic numbers to help the loader
172 .word _magic_start @ absolute load/run zImage address
173 .word _magic_end @ zImage end address
174 .word 0x04030201 @ endianness flag
175 .word 0x45454545 @ another magic number to indicate
176 .word _magic_table @ additional data table
177
178 __EFI_HEADER
1791:
180 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
181 AR_CLASS( mrs r9, cpsr )
182#ifdef CONFIG_ARM_VIRT_EXT
183 bl __hyp_stub_install @ get into SVC mode, reversibly
184#endif
185 mov r7, r1 @ save architecture ID
186 mov r8, r2 @ save atags pointer
187
188#ifndef CONFIG_CPU_V7M
189 /*
190 * Booting from Angel - need to enter SVC mode and disable
191 * FIQs/IRQs (numeric definitions from angel arm.h source).
192 * We only do this if we were in user mode on entry.
193 */
194 mrs r2, cpsr @ get current mode
195 tst r2, #3 @ not user?
196 bne not_angel
197 mov r0, #0x17 @ angel_SWIreason_EnterSVC
198 ARM( swi 0x123456 ) @ angel_SWI_ARM
199 THUMB( svc 0xab ) @ angel_SWI_THUMB
200not_angel:
201 safe_svcmode_maskall r0
202 msr spsr_cxsf, r9 @ Save the CPU boot mode in
203 @ SPSR
204#endif
205 /*
206 * Note that some cache flushing and other stuff may
207 * be needed here - is there an Angel SWI call for this?
208 */
209
210 /*
211 * some architecture specific code can be inserted
212 * by the linker here, but it should preserve r7, r8, and r9.
213 */
214
215 .text
216
217#ifdef CONFIG_AUTO_ZRELADDR
218 /*
219 * Find the start of physical memory. As we are executing
220 * without the MMU on, we are in the physical address space.
221 * We just need to get rid of any offset by aligning the
222 * address.
223 *
224 * This alignment is a balance between the requirements of
225 * different platforms - we have chosen 128MB to allow
226 * platforms which align the start of their physical memory
227 * to 128MB to use this feature, while allowing the zImage
228 * to be placed within the first 128MB of memory on other
229 * platforms. Increasing the alignment means we place
230 * stricter alignment requirements on the start of physical
231 * memory, but relaxing it means that we break people who
232 * are already placing their zImage in (eg) the top 64MB
233 * of this range.
234 */
235 mov r4, pc
236 and r4, r4, #0xf8000000
237 /* Determine final kernel image address. */
238 add r4, r4, #TEXT_OFFSET
239#else
240 ldr r4, =zreladdr
241#endif
242
243 /*
244 * Set up a page table only if it won't overwrite ourself.
245 * That means r4 < pc || r4 - 16k page directory > &_end.
246 * Given that r4 > &_end is most unfrequent, we add a rough
247 * additional 1MB of room for a possible appended DTB.
248 */
249 mov r0, pc
250 cmp r0, r4
251 ldrcc r0, LC0+32
252 addcc r0, r0, pc
253 cmpcc r4, r0
254 orrcc r4, r4, #1 @ remember we skipped cache_on
255 blcs cache_on
256
257restart: adr r0, LC0
258 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
259 ldr sp, [r0, #28]
260
261 /*
262 * We might be running at a different address. We need
263 * to fix up various pointers.
264 */
265 sub r0, r0, r1 @ calculate the delta offset
266 add r6, r6, r0 @ _edata
267 add r10, r10, r0 @ inflated kernel size location
268
269 /*
270 * The kernel build system appends the size of the
271 * decompressed kernel at the end of the compressed data
272 * in little-endian form.
273 */
274 ldrb r9, [r10, #0]
275 ldrb lr, [r10, #1]
276 orr r9, r9, lr, lsl #8
277 ldrb lr, [r10, #2]
278 ldrb r10, [r10, #3]
279 orr r9, r9, lr, lsl #16
280 orr r9, r9, r10, lsl #24
281
282#ifndef CONFIG_ZBOOT_ROM
283 /* malloc space is above the relocated stack (64k max) */
284 add sp, sp, r0
285 add r10, sp, #0x10000
286#else
287 /*
288 * With ZBOOT_ROM the bss/stack is non relocatable,
289 * but someone could still run this code from RAM,
290 * in which case our reference is _edata.
291 */
292 mov r10, r6
293#endif
294
295 mov r5, #0 @ init dtb size to 0
296#ifdef CONFIG_ARM_APPENDED_DTB
297/*
298 * r0 = delta
299 * r2 = BSS start
300 * r3 = BSS end
301 * r4 = final kernel address (possibly with LSB set)
302 * r5 = appended dtb size (still unknown)
303 * r6 = _edata
304 * r7 = architecture ID
305 * r8 = atags/device tree pointer
306 * r9 = size of decompressed image
307 * r10 = end of this image, including bss/stack/malloc space if non XIP
308 * r11 = GOT start
309 * r12 = GOT end
310 * sp = stack pointer
311 *
312 * if there are device trees (dtb) appended to zImage, advance r10 so that the
313 * dtb data will get relocated along with the kernel if necessary.
314 */
315
316 ldr lr, [r6, #0]
317#ifndef __ARMEB__
318 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
319#else
320 ldr r1, =0xd00dfeed
321#endif
322 cmp lr, r1
323 bne dtb_check_done @ not found
324
325#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
326 /*
327 * OK... Let's do some funky business here.
328 * If we do have a DTB appended to zImage, and we do have
329 * an ATAG list around, we want the later to be translated
330 * and folded into the former here. No GOT fixup has occurred
331 * yet, but none of the code we're about to call uses any
332 * global variable.
333 */
334
335 /* Get the initial DTB size */
336 ldr r5, [r6, #4]
337#ifndef __ARMEB__
338 /* convert to little endian */
339 eor r1, r5, r5, ror #16
340 bic r1, r1, #0x00ff0000
341 mov r5, r5, ror #8
342 eor r5, r5, r1, lsr #8
343#endif
344 /* 50% DTB growth should be good enough */
345 add r5, r5, r5, lsr #1
346 /* preserve 64-bit alignment */
347 add r5, r5, #7
348 bic r5, r5, #7
349 /* clamp to 32KB min and 1MB max */
350 cmp r5, #(1 << 15)
351 movlo r5, #(1 << 15)
352 cmp r5, #(1 << 20)
353 movhi r5, #(1 << 20)
354 /* temporarily relocate the stack past the DTB work space */
355 add sp, sp, r5
356
357 stmfd sp!, {r0-r3, ip, lr}
358 mov r0, r8
359 mov r1, r6
360 mov r2, r5
361 bl atags_to_fdt
362
363 /*
364 * If returned value is 1, there is no ATAG at the location
365 * pointed by r8. Try the typical 0x100 offset from start
366 * of RAM and hope for the best.
367 */
368 cmp r0, #1
369 sub r0, r4, #TEXT_OFFSET
370 bic r0, r0, #1
371 add r0, r0, #0x100
372 mov r1, r6
373 mov r2, r5
374 bleq atags_to_fdt
375
376 ldmfd sp!, {r0-r3, ip, lr}
377 sub sp, sp, r5
378#endif
379
380 mov r8, r6 @ use the appended device tree
381
382 /*
383 * Make sure that the DTB doesn't end up in the final
384 * kernel's .bss area. To do so, we adjust the decompressed
385 * kernel size to compensate if that .bss size is larger
386 * than the relocated code.
387 */
388 ldr r5, =_kernel_bss_size
389 adr r1, wont_overwrite
390 sub r1, r6, r1
391 subs r1, r5, r1
392 addhi r9, r9, r1
393
394 /* Get the current DTB size */
395 ldr r5, [r6, #4]
396#ifndef __ARMEB__
397 /* convert r5 (dtb size) to little endian */
398 eor r1, r5, r5, ror #16
399 bic r1, r1, #0x00ff0000
400 mov r5, r5, ror #8
401 eor r5, r5, r1, lsr #8
402#endif
403
404 /* preserve 64-bit alignment */
405 add r5, r5, #7
406 bic r5, r5, #7
407
408 /* relocate some pointers past the appended dtb */
409 add r6, r6, r5
410 add r10, r10, r5
411 add sp, sp, r5
412dtb_check_done:
413#endif
414
415/*
416 * Check to see if we will overwrite ourselves.
417 * r4 = final kernel address (possibly with LSB set)
418 * r9 = size of decompressed image
419 * r10 = end of this image, including bss/stack/malloc space if non XIP
420 * We basically want:
421 * r4 - 16k page directory >= r10 -> OK
422 * r4 + image length <= address of wont_overwrite -> OK
423 * Note: the possible LSB in r4 is harmless here.
424 */
425 add r10, r10, #16384
426 cmp r4, r10
427 bhs wont_overwrite
428 add r10, r4, r9
429 adr r9, wont_overwrite
430 cmp r10, r9
431 bls wont_overwrite
432
433/*
434 * Relocate ourselves past the end of the decompressed kernel.
435 * r6 = _edata
436 * r10 = end of the decompressed kernel
437 * Because we always copy ahead, we need to do it from the end and go
438 * backward in case the source and destination overlap.
439 */
440 /*
441 * Bump to the next 256-byte boundary with the size of
442 * the relocation code added. This avoids overwriting
443 * ourself when the offset is small.
444 */
445 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
446 bic r10, r10, #255
447
448 /* Get start of code we want to copy and align it down. */
449 adr r5, restart
450 bic r5, r5, #31
451
452/* Relocate the hyp vector base if necessary */
453#ifdef CONFIG_ARM_VIRT_EXT
454 mrs r0, spsr
455 and r0, r0, #MODE_MASK
456 cmp r0, #HYP_MODE
457 bne 1f
458
459 /*
460 * Compute the address of the hyp vectors after relocation.
461 * This requires some arithmetic since we cannot directly
462 * reference __hyp_stub_vectors in a PC-relative way.
463 * Call __hyp_set_vectors with the new address so that we
464 * can HVC again after the copy.
465 */
4660: adr r0, 0b
467 movw r1, #:lower16:__hyp_stub_vectors - 0b
468 movt r1, #:upper16:__hyp_stub_vectors - 0b
469 add r0, r0, r1
470 sub r0, r0, r5
471 add r0, r0, r10
472 bl __hyp_set_vectors
4731:
474#endif
475
476 sub r9, r6, r5 @ size to copy
477 add r9, r9, #31 @ rounded up to a multiple
478 bic r9, r9, #31 @ ... of 32 bytes
479 add r6, r9, r5
480 add r9, r9, r10
481
482#ifdef DEBUG
483 sub r10, r6, r5
484 sub r10, r9, r10
485 /*
486 * We are about to copy the kernel to a new memory area.
487 * The boundaries of the new memory area can be found in
488 * r10 and r9, whilst r5 and r6 contain the boundaries
489 * of the memory we are going to copy.
490 * Calling dbgkc will help with the printing of this
491 * information.
492 */
493 dbgkc r5, r6, r10, r9
494#endif
495
4961: ldmdb r6!, {r0 - r3, r10 - r12, lr}
497 cmp r6, r5
498 stmdb r9!, {r0 - r3, r10 - r12, lr}
499 bhi 1b
500
501 /* Preserve offset to relocated code. */
502 sub r6, r9, r6
503
504#ifndef CONFIG_ZBOOT_ROM
505 /* cache_clean_flush may use the stack, so relocate it */
506 add sp, sp, r6
507#endif
508
509 bl cache_clean_flush
510
511 badr r0, restart
512 add r0, r0, r6
513 mov pc, r0
514
515wont_overwrite:
516/*
517 * If delta is zero, we are running at the address we were linked at.
518 * r0 = delta
519 * r2 = BSS start
520 * r3 = BSS end
521 * r4 = kernel execution address (possibly with LSB set)
522 * r5 = appended dtb size (0 if not present)
523 * r7 = architecture ID
524 * r8 = atags pointer
525 * r11 = GOT start
526 * r12 = GOT end
527 * sp = stack pointer
528 */
529 orrs r1, r0, r5
530 beq not_relocated
531
532 add r11, r11, r0
533 add r12, r12, r0
534
535#ifndef CONFIG_ZBOOT_ROM
536 /*
537 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
538 * we need to fix up pointers into the BSS region.
539 * Note that the stack pointer has already been fixed up.
540 */
541 add r2, r2, r0
542 add r3, r3, r0
543
544 /*
545 * Relocate all entries in the GOT table.
546 * Bump bss entries to _edata + dtb size
547 */
5481: ldr r1, [r11, #0] @ relocate entries in the GOT
549 add r1, r1, r0 @ This fixes up C references
550 cmp r1, r2 @ if entry >= bss_start &&
551 cmphs r3, r1 @ bss_end > entry
552 addhi r1, r1, r5 @ entry += dtb size
553 str r1, [r11], #4 @ next entry
554 cmp r11, r12
555 blo 1b
556
557 /* bump our bss pointers too */
558 add r2, r2, r5
559 add r3, r3, r5
560
561#else
562
563 /*
564 * Relocate entries in the GOT table. We only relocate
565 * the entries that are outside the (relocated) BSS region.
566 */
5671: ldr r1, [r11, #0] @ relocate entries in the GOT
568 cmp r1, r2 @ entry < bss_start ||
569 cmphs r3, r1 @ _end < entry
570 addlo r1, r1, r0 @ table. This fixes up the
571 str r1, [r11], #4 @ C references.
572 cmp r11, r12
573 blo 1b
574#endif
575
576not_relocated: mov r0, #0
5771: str r0, [r2], #4 @ clear bss
578 str r0, [r2], #4
579 str r0, [r2], #4
580 str r0, [r2], #4
581 cmp r2, r3
582 blo 1b
583
584 /*
585 * Did we skip the cache setup earlier?
586 * That is indicated by the LSB in r4.
587 * Do it now if so.
588 */
589 tst r4, #1
590 bic r4, r4, #1
591 blne cache_on
592
593/*
594 * The C runtime environment should now be setup sufficiently.
595 * Set up some pointers, and start decompressing.
596 * r4 = kernel execution address
597 * r7 = architecture ID
598 * r8 = atags pointer
599 */
600 mov r0, r4
601 mov r1, sp @ malloc space above stack
602 add r2, sp, #0x10000 @ 64k max
603 mov r3, r7
604 bl decompress_kernel
605 bl cache_clean_flush
606 bl cache_off
607
608#ifdef CONFIG_ARM_VIRT_EXT
609 mrs r0, spsr @ Get saved CPU boot mode
610 and r0, r0, #MODE_MASK
611 cmp r0, #HYP_MODE @ if not booted in HYP mode...
612 bne __enter_kernel @ boot kernel directly
613
614 adr r12, .L__hyp_reentry_vectors_offset
615 ldr r0, [r12]
616 add r0, r0, r12
617
618 bl __hyp_set_vectors
619 __HVC(0) @ otherwise bounce to hyp mode
620
621 b . @ should never be reached
622
623 .align 2
624.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
625#else
626 b __enter_kernel
627#endif
628
629 .align 2
630 .type LC0, #object
631LC0: .word LC0 @ r1
632 .word __bss_start @ r2
633 .word _end @ r3
634 .word _edata @ r6
635 .word input_data_end - 4 @ r10 (inflated size location)
636 .word _got_start @ r11
637 .word _got_end @ ip
638 .word .L_user_stack_end @ sp
639 .word _end - restart + 16384 + 1024*1024
640 .size LC0, . - LC0
641
642#ifdef CONFIG_ARCH_RPC
643 .globl params
644params: ldr r0, =0x10000100 @ params_phys for RPC
645 mov pc, lr
646 .ltorg
647 .align
648#endif
649
650/*
651 * Turn on the cache. We need to setup some page tables so that we
652 * can have both the I and D caches on.
653 *
654 * We place the page tables 16k down from the kernel execution address,
655 * and we hope that nothing else is using it. If we're using it, we
656 * will go pop!
657 *
658 * On entry,
659 * r4 = kernel execution address
660 * r7 = architecture number
661 * r8 = atags pointer
662 * On exit,
663 * r0, r1, r2, r3, r9, r10, r12 corrupted
664 * This routine must preserve:
665 * r4, r7, r8
666 */
667 .align 5
668cache_on: mov r3, #8 @ cache_on function
669 b call_cache_fn
670
671/*
672 * Initialize the highest priority protection region, PR7
673 * to cover all 32bit address and cacheable and bufferable.
674 */
675__armv4_mpu_cache_on:
676 mov r0, #0x3f @ 4G, the whole
677 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
678 mcr p15, 0, r0, c6, c7, 1
679
680 mov r0, #0x80 @ PR7
681 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
682 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
683 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
684
685 mov r0, #0xc000
686 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
687 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
688
689 mov r0, #0
690 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
691 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
692 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
693 mrc p15, 0, r0, c1, c0, 0 @ read control reg
694 @ ...I .... ..D. WC.M
695 orr r0, r0, #0x002d @ .... .... ..1. 11.1
696 orr r0, r0, #0x1000 @ ...1 .... .... ....
697
698 mcr p15, 0, r0, c1, c0, 0 @ write control reg
699
700 mov r0, #0
701 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
702 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
703 mov pc, lr
704
705__armv3_mpu_cache_on:
706 mov r0, #0x3f @ 4G, the whole
707 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
708
709 mov r0, #0x80 @ PR7
710 mcr p15, 0, r0, c2, c0, 0 @ cache on
711 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
712
713 mov r0, #0xc000
714 mcr p15, 0, r0, c5, c0, 0 @ access permission
715
716 mov r0, #0
717 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
718 /*
719 * ?? ARMv3 MMU does not allow reading the control register,
720 * does this really work on ARMv3 MPU?
721 */
722 mrc p15, 0, r0, c1, c0, 0 @ read control reg
723 @ .... .... .... WC.M
724 orr r0, r0, #0x000d @ .... .... .... 11.1
725 /* ?? this overwrites the value constructed above? */
726 mov r0, #0
727 mcr p15, 0, r0, c1, c0, 0 @ write control reg
728
729 /* ?? invalidate for the second time? */
730 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
731 mov pc, lr
732
733#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
734#define CB_BITS 0x08
735#else
736#define CB_BITS 0x0c
737#endif
738
739__setup_mmu: sub r3, r4, #16384 @ Page directory size
740 bic r3, r3, #0xff @ Align the pointer
741 bic r3, r3, #0x3f00
742/*
743 * Initialise the page tables, turning on the cacheable and bufferable
744 * bits for the RAM area only.
745 */
746 mov r0, r3
747 mov r9, r0, lsr #18
748 mov r9, r9, lsl #18 @ start of RAM
749 add r10, r9, #0x10000000 @ a reasonable RAM size
750 mov r1, #0x12 @ XN|U + section mapping
751 orr r1, r1, #3 << 10 @ AP=11
752 add r2, r3, #16384
7531: cmp r1, r9 @ if virt > start of RAM
754 cmphs r10, r1 @ && end of RAM > virt
755 bic r1, r1, #0x1c @ clear XN|U + C + B
756 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
757 orrhs r1, r1, r6 @ set RAM section settings
758 str r1, [r0], #4 @ 1:1 mapping
759 add r1, r1, #1048576
760 teq r0, r2
761 bne 1b
762/*
763 * If ever we are running from Flash, then we surely want the cache
764 * to be enabled also for our execution instance... We map 2MB of it
765 * so there is no map overlap problem for up to 1 MB compressed kernel.
766 * If the execution is in RAM then we would only be duplicating the above.
767 */
768 orr r1, r6, #0x04 @ ensure B is set for this
769 orr r1, r1, #3 << 10
770 mov r2, pc
771 mov r2, r2, lsr #20
772 orr r1, r1, r2, lsl #20
773 add r0, r3, r2, lsl #2
774 str r1, [r0], #4
775 add r1, r1, #1048576
776 str r1, [r0]
777 mov pc, lr
778ENDPROC(__setup_mmu)
779
780@ Enable unaligned access on v6, to allow better code generation
781@ for the decompressor C code:
782__armv6_mmu_cache_on:
783 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
784 bic r0, r0, #2 @ A (no unaligned access fault)
785 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
786 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
787 b __armv4_mmu_cache_on
788
789__arm926ejs_mmu_cache_on:
790#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
791 mov r0, #4 @ put dcache in WT mode
792 mcr p15, 7, r0, c15, c0, 0
793#endif
794
795__armv4_mmu_cache_on:
796 mov r12, lr
797#ifdef CONFIG_MMU
798 mov r6, #CB_BITS | 0x12 @ U
799 bl __setup_mmu
800 mov r0, #0
801 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
802 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
803 mrc p15, 0, r0, c1, c0, 0 @ read control reg
804 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
805 orr r0, r0, #0x0030
806 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
807 bl __common_mmu_cache_on
808 mov r0, #0
809 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
810#endif
811 mov pc, r12
812
813__armv7_mmu_cache_on:
814 mov r12, lr
815#ifdef CONFIG_MMU
816 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
817 tst r11, #0xf @ VMSA
818 movne r6, #CB_BITS | 0x02 @ !XN
819 blne __setup_mmu
820 mov r0, #0
821 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
822 tst r11, #0xf @ VMSA
823 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
824#endif
825 mrc p15, 0, r0, c1, c0, 0 @ read control reg
826 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
827 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
828 orr r0, r0, #0x003c @ write buffer
829 bic r0, r0, #2 @ A (no unaligned access fault)
830 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
831 @ (needed for ARM1176)
832#ifdef CONFIG_MMU
833 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
834 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
835 orrne r0, r0, #1 @ MMU enabled
836 movne r1, #0xfffffffd @ domain 0 = client
837 bic r6, r6, #1 << 31 @ 32-bit translation system
838 bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
839 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
840 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
841 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
842#endif
843 mcr p15, 0, r0, c7, c5, 4 @ ISB
844 mcr p15, 0, r0, c1, c0, 0 @ load control register
845 mrc p15, 0, r0, c1, c0, 0 @ and read it back
846 mov r0, #0
847 mcr p15, 0, r0, c7, c5, 4 @ ISB
848 mov pc, r12
849
850__fa526_cache_on:
851 mov r12, lr
852 mov r6, #CB_BITS | 0x12 @ U
853 bl __setup_mmu
854 mov r0, #0
855 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
856 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
857 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
858 mrc p15, 0, r0, c1, c0, 0 @ read control reg
859 orr r0, r0, #0x1000 @ I-cache enable
860 bl __common_mmu_cache_on
861 mov r0, #0
862 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
863 mov pc, r12
864
865__common_mmu_cache_on:
866#ifndef CONFIG_THUMB2_KERNEL
867#ifndef DEBUG
868 orr r0, r0, #0x000d @ Write buffer, mmu
869#endif
870 mov r1, #-1
871 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
872 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
873 b 1f
874 .align 5 @ cache line aligned
8751: mcr p15, 0, r0, c1, c0, 0 @ load control register
876 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
877 sub pc, lr, r0, lsr #32 @ properly flush pipeline
878#endif
879
880#define PROC_ENTRY_SIZE (4*5)
881
882/*
883 * Here follow the relocatable cache support functions for the
884 * various processors. This is a generic hook for locating an
885 * entry and jumping to an instruction at the specified offset
886 * from the start of the block. Please note this is all position
887 * independent code.
888 *
889 * r1 = corrupted
890 * r2 = corrupted
891 * r3 = block offset
892 * r9 = corrupted
893 * r12 = corrupted
894 */
895
896call_cache_fn: adr r12, proc_types
897#ifdef CONFIG_CPU_CP15
898 mrc p15, 0, r9, c0, c0 @ get processor ID
899#elif defined(CONFIG_CPU_V7M)
900 /*
901 * On v7-M the processor id is located in the V7M_SCB_CPUID
902 * register, but as cache handling is IMPLEMENTATION DEFINED on
903 * v7-M (if existant at all) we just return early here.
904 * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
905 * __armv7_mmu_cache_{on,off,flush}) would be selected which
906 * use cp15 registers that are not implemented on v7-M.
907 */
908 bx lr
909#else
910 ldr r9, =CONFIG_PROCESSOR_ID
911#endif
9121: ldr r1, [r12, #0] @ get value
913 ldr r2, [r12, #4] @ get mask
914 eor r1, r1, r9 @ (real ^ match)
915 tst r1, r2 @ & mask
916 ARM( addeq pc, r12, r3 ) @ call cache function
917 THUMB( addeq r12, r3 )
918 THUMB( moveq pc, r12 ) @ call cache function
919 add r12, r12, #PROC_ENTRY_SIZE
920 b 1b
921
922/*
923 * Table for cache operations. This is basically:
924 * - CPU ID match
925 * - CPU ID mask
926 * - 'cache on' method instruction
927 * - 'cache off' method instruction
928 * - 'cache flush' method instruction
929 *
930 * We match an entry using: ((real_id ^ match) & mask) == 0
931 *
932 * Writethrough caches generally only need 'on' and 'off'
933 * methods. Writeback caches _must_ have the flush method
934 * defined.
935 */
936 .align 2
937 .type proc_types,#object
938proc_types:
939 .word 0x41000000 @ old ARM ID
940 .word 0xff00f000
941 mov pc, lr
942 THUMB( nop )
943 mov pc, lr
944 THUMB( nop )
945 mov pc, lr
946 THUMB( nop )
947
948 .word 0x41007000 @ ARM7/710
949 .word 0xfff8fe00
950 mov pc, lr
951 THUMB( nop )
952 mov pc, lr
953 THUMB( nop )
954 mov pc, lr
955 THUMB( nop )
956
957 .word 0x41807200 @ ARM720T (writethrough)
958 .word 0xffffff00
959 W(b) __armv4_mmu_cache_on
960 W(b) __armv4_mmu_cache_off
961 mov pc, lr
962 THUMB( nop )
963
964 .word 0x41007400 @ ARM74x
965 .word 0xff00ff00
966 W(b) __armv3_mpu_cache_on
967 W(b) __armv3_mpu_cache_off
968 W(b) __armv3_mpu_cache_flush
969
970 .word 0x41009400 @ ARM94x
971 .word 0xff00ff00
972 W(b) __armv4_mpu_cache_on
973 W(b) __armv4_mpu_cache_off
974 W(b) __armv4_mpu_cache_flush
975
976 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
977 .word 0xff0ffff0
978 W(b) __arm926ejs_mmu_cache_on
979 W(b) __armv4_mmu_cache_off
980 W(b) __armv5tej_mmu_cache_flush
981
982 .word 0x00007000 @ ARM7 IDs
983 .word 0x0000f000
984 mov pc, lr
985 THUMB( nop )
986 mov pc, lr
987 THUMB( nop )
988 mov pc, lr
989 THUMB( nop )
990
991 @ Everything from here on will be the new ID system.
992
993 .word 0x4401a100 @ sa110 / sa1100
994 .word 0xffffffe0
995 W(b) __armv4_mmu_cache_on
996 W(b) __armv4_mmu_cache_off
997 W(b) __armv4_mmu_cache_flush
998
999 .word 0x6901b110 @ sa1110
1000 .word 0xfffffff0
1001 W(b) __armv4_mmu_cache_on
1002 W(b) __armv4_mmu_cache_off
1003 W(b) __armv4_mmu_cache_flush
1004
1005 .word 0x56056900
1006 .word 0xffffff00 @ PXA9xx
1007 W(b) __armv4_mmu_cache_on
1008 W(b) __armv4_mmu_cache_off
1009 W(b) __armv4_mmu_cache_flush
1010
1011 .word 0x56158000 @ PXA168
1012 .word 0xfffff000
1013 W(b) __armv4_mmu_cache_on
1014 W(b) __armv4_mmu_cache_off
1015 W(b) __armv5tej_mmu_cache_flush
1016
1017 .word 0x56050000 @ Feroceon
1018 .word 0xff0f0000
1019 W(b) __armv4_mmu_cache_on
1020 W(b) __armv4_mmu_cache_off
1021 W(b) __armv5tej_mmu_cache_flush
1022
1023#ifdef CONFIG_CPU_FEROCEON_OLD_ID
1024 /* this conflicts with the standard ARMv5TE entry */
1025 .long 0x41009260 @ Old Feroceon
1026 .long 0xff00fff0
1027 b __armv4_mmu_cache_on
1028 b __armv4_mmu_cache_off
1029 b __armv5tej_mmu_cache_flush
1030#endif
1031
1032 .word 0x66015261 @ FA526
1033 .word 0xff01fff1
1034 W(b) __fa526_cache_on
1035 W(b) __armv4_mmu_cache_off
1036 W(b) __fa526_cache_flush
1037
1038 @ These match on the architecture ID
1039
1040 .word 0x00020000 @ ARMv4T
1041 .word 0x000f0000
1042 W(b) __armv4_mmu_cache_on
1043 W(b) __armv4_mmu_cache_off
1044 W(b) __armv4_mmu_cache_flush
1045
1046 .word 0x00050000 @ ARMv5TE
1047 .word 0x000f0000
1048 W(b) __armv4_mmu_cache_on
1049 W(b) __armv4_mmu_cache_off
1050 W(b) __armv4_mmu_cache_flush
1051
1052 .word 0x00060000 @ ARMv5TEJ
1053 .word 0x000f0000
1054 W(b) __armv4_mmu_cache_on
1055 W(b) __armv4_mmu_cache_off
1056 W(b) __armv5tej_mmu_cache_flush
1057
1058 .word 0x0007b000 @ ARMv6
1059 .word 0x000ff000
1060 W(b) __armv6_mmu_cache_on
1061 W(b) __armv4_mmu_cache_off
1062 W(b) __armv6_mmu_cache_flush
1063
1064 .word 0x000f0000 @ new CPU Id
1065 .word 0x000f0000
1066 W(b) __armv7_mmu_cache_on
1067 W(b) __armv7_mmu_cache_off
1068 W(b) __armv7_mmu_cache_flush
1069
1070 .word 0 @ unrecognised type
1071 .word 0
1072 mov pc, lr
1073 THUMB( nop )
1074 mov pc, lr
1075 THUMB( nop )
1076 mov pc, lr
1077 THUMB( nop )
1078
1079 .size proc_types, . - proc_types
1080
1081 /*
1082 * If you get a "non-constant expression in ".if" statement"
1083 * error from the assembler on this line, check that you have
1084 * not accidentally written a "b" instruction where you should
1085 * have written W(b).
1086 */
1087 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
1088 .error "The size of one or more proc_types entries is wrong."
1089 .endif
1090
1091/*
1092 * Turn off the Cache and MMU. ARMv3 does not support
1093 * reading the control register, but ARMv4 does.
1094 *
1095 * On exit,
1096 * r0, r1, r2, r3, r9, r12 corrupted
1097 * This routine must preserve:
1098 * r4, r7, r8
1099 */
1100 .align 5
1101cache_off: mov r3, #12 @ cache_off function
1102 b call_cache_fn
1103
1104__armv4_mpu_cache_off:
1105 mrc p15, 0, r0, c1, c0
1106 bic r0, r0, #0x000d
1107 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
1108 mov r0, #0
1109 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
1110 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
1111 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
1112 mov pc, lr
1113
1114__armv3_mpu_cache_off:
1115 mrc p15, 0, r0, c1, c0
1116 bic r0, r0, #0x000d
1117 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
1118 mov r0, #0
1119 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
1120 mov pc, lr
1121
1122__armv4_mmu_cache_off:
1123#ifdef CONFIG_MMU
1124 mrc p15, 0, r0, c1, c0
1125 bic r0, r0, #0x000d
1126 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1127 mov r0, #0
1128 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1129 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
1130#endif
1131 mov pc, lr
1132
1133__armv7_mmu_cache_off:
1134 mrc p15, 0, r0, c1, c0
1135#ifdef CONFIG_MMU
1136 bic r0, r0, #0x000d
1137#else
1138 bic r0, r0, #0x000c
1139#endif
1140 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1141 mov r12, lr
1142 bl __armv7_mmu_cache_flush
1143 mov r0, #0
1144#ifdef CONFIG_MMU
1145 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
1146#endif
1147 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1148 mcr p15, 0, r0, c7, c10, 4 @ DSB
1149 mcr p15, 0, r0, c7, c5, 4 @ ISB
1150 mov pc, r12
1151
1152/*
1153 * Clean and flush the cache to maintain consistency.
1154 *
1155 * On exit,
1156 * r1, r2, r3, r9, r10, r11, r12 corrupted
1157 * This routine must preserve:
1158 * r4, r6, r7, r8
1159 */
1160 .align 5
1161cache_clean_flush:
1162 mov r3, #16
1163 b call_cache_fn
1164
1165__armv4_mpu_cache_flush:
1166 tst r4, #1
1167 movne pc, lr
1168 mov r2, #1
1169 mov r3, #0
1170 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1171 mov r1, #7 << 5 @ 8 segments
11721: orr r3, r1, #63 << 26 @ 64 entries
11732: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1174 subs r3, r3, #1 << 26
1175 bcs 2b @ entries 63 to 0
1176 subs r1, r1, #1 << 5
1177 bcs 1b @ segments 7 to 0
1178
1179 teq r2, #0
1180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1181 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1182 mov pc, lr
1183
1184__fa526_cache_flush:
1185 tst r4, #1
1186 movne pc, lr
1187 mov r1, #0
1188 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1189 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1190 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1191 mov pc, lr
1192
1193__armv6_mmu_cache_flush:
1194 mov r1, #0
1195 tst r4, #1
1196 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1197 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1198 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1199 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1200 mov pc, lr
1201
1202__armv7_mmu_cache_flush:
1203 tst r4, #1
1204 bne iflush
1205 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1206 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1207 mov r10, #0
1208 beq hierarchical
1209 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1210 b iflush
1211hierarchical:
1212 mcr p15, 0, r10, c7, c10, 5 @ DMB
1213 stmfd sp!, {r0-r7, r9-r11}
1214 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1215 ands r3, r0, #0x7000000 @ extract loc from clidr
1216 mov r3, r3, lsr #23 @ left align loc bit field
1217 beq finished @ if loc is 0, then no need to clean
1218 mov r10, #0 @ start clean at cache level 0
1219loop1:
1220 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1221 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1222 and r1, r1, #7 @ mask of the bits for current cache only
1223 cmp r1, #2 @ see what cache we have at this level
1224 blt skip @ skip if no cache, or just i-cache
1225 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1226 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1227 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1228 and r2, r1, #7 @ extract the length of the cache lines
1229 add r2, r2, #4 @ add 4 (line length offset)
1230 ldr r4, =0x3ff
1231 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1232 clz r5, r4 @ find bit position of way size increment
1233 ldr r7, =0x7fff
1234 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1235loop2:
1236 mov r9, r4 @ create working copy of max way size
1237loop3:
1238 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1239 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1240 THUMB( lsl r6, r9, r5 )
1241 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1242 THUMB( lsl r6, r7, r2 )
1243 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1244 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1245 subs r9, r9, #1 @ decrement the way
1246 bge loop3
1247 subs r7, r7, #1 @ decrement the index
1248 bge loop2
1249skip:
1250 add r10, r10, #2 @ increment cache number
1251 cmp r3, r10
1252 bgt loop1
1253finished:
1254 ldmfd sp!, {r0-r7, r9-r11}
1255 mov r10, #0 @ switch back to cache level 0
1256 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1257iflush:
1258 mcr p15, 0, r10, c7, c10, 4 @ DSB
1259 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1260 mcr p15, 0, r10, c7, c10, 4 @ DSB
1261 mcr p15, 0, r10, c7, c5, 4 @ ISB
1262 mov pc, lr
1263
1264__armv5tej_mmu_cache_flush:
1265 tst r4, #1
1266 movne pc, lr
12671: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1268 bne 1b
1269 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1270 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1271 mov pc, lr
1272
1273__armv4_mmu_cache_flush:
1274 tst r4, #1
1275 movne pc, lr
1276 mov r2, #64*1024 @ default: 32K dcache size (*2)
1277 mov r11, #32 @ default: 32 byte line size
1278 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1279 teq r3, r9 @ cache ID register present?
1280 beq no_cache_id
1281 mov r1, r3, lsr #18
1282 and r1, r1, #7
1283 mov r2, #1024
1284 mov r2, r2, lsl r1 @ base dcache size *2
1285 tst r3, #1 << 14 @ test M bit
1286 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1287 mov r3, r3, lsr #12
1288 and r3, r3, #3
1289 mov r11, #8
1290 mov r11, r11, lsl r3 @ cache line size in bytes
1291no_cache_id:
1292 mov r1, pc
1293 bic r1, r1, #63 @ align to longest cache line
1294 add r2, r1, r2
12951:
1296 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1297 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1298 THUMB( add r1, r1, r11 )
1299 teq r1, r2
1300 bne 1b
1301
1302 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1303 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1304 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1305 mov pc, lr
1306
1307__armv3_mmu_cache_flush:
1308__armv3_mpu_cache_flush:
1309 tst r4, #1
1310 movne pc, lr
1311 mov r1, #0
1312 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1313 mov pc, lr
1314
1315/*
1316 * Various debugging routines for printing hex characters and
1317 * memory, which again must be relocatable.
1318 */
1319#ifdef DEBUG
1320 .align 2
1321 .type phexbuf,#object
1322phexbuf: .space 12
1323 .size phexbuf, . - phexbuf
1324
1325@ phex corrupts {r0, r1, r2, r3}
1326phex: adr r3, phexbuf
1327 mov r2, #0
1328 strb r2, [r3, r1]
13291: subs r1, r1, #1
1330 movmi r0, r3
1331 bmi puts
1332 and r2, r0, #15
1333 mov r0, r0, lsr #4
1334 cmp r2, #10
1335 addge r2, r2, #7
1336 add r2, r2, #'0'
1337 strb r2, [r3, r1]
1338 b 1b
1339
1340@ puts corrupts {r0, r1, r2, r3}
1341puts: loadsp r3, r2, r1
13421: ldrb r2, [r0], #1
1343 teq r2, #0
1344 moveq pc, lr
13452: writeb r2, r3
1346 mov r1, #0x00020000
13473: subs r1, r1, #1
1348 bne 3b
1349 teq r2, #'\n'
1350 moveq r2, #'\r'
1351 beq 2b
1352 teq r0, #0
1353 bne 1b
1354 mov pc, lr
1355@ putc corrupts {r0, r1, r2, r3}
1356putc:
1357 mov r2, r0
1358 loadsp r3, r1, r0
1359 mov r0, #0
1360 b 2b
1361
1362@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1363memdump: mov r12, r0
1364 mov r10, lr
1365 mov r11, #0
13662: mov r0, r11, lsl #2
1367 add r0, r0, r12
1368 mov r1, #8
1369 bl phex
1370 mov r0, #':'
1371 bl putc
13721: mov r0, #' '
1373 bl putc
1374 ldr r0, [r12, r11, lsl #2]
1375 mov r1, #8
1376 bl phex
1377 and r0, r11, #7
1378 teq r0, #3
1379 moveq r0, #' '
1380 bleq putc
1381 and r0, r11, #7
1382 add r11, r11, #1
1383 teq r0, #7
1384 bne 1b
1385 mov r0, #'\n'
1386 bl putc
1387 cmp r11, #64
1388 blt 2b
1389 mov pc, r10
1390#endif
1391
1392 .ltorg
1393
1394#ifdef CONFIG_ARM_VIRT_EXT
1395.align 5
1396__hyp_reentry_vectors:
1397 W(b) . @ reset
1398 W(b) . @ undef
1399 W(b) . @ svc
1400 W(b) . @ pabort
1401 W(b) . @ dabort
1402 W(b) __enter_kernel @ hyp
1403 W(b) . @ irq
1404 W(b) . @ fiq
1405#endif /* CONFIG_ARM_VIRT_EXT */
1406
1407__enter_kernel:
1408 mov r0, #0 @ must be 0
1409 mov r1, r7 @ restore architecture number
1410 mov r2, r8 @ restore atags pointer
1411 ARM( mov pc, r4 ) @ call kernel
1412 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
1413 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
1414
1415reloc_code_end:
1416
1417#ifdef CONFIG_EFI_STUB
1418 .align 2
1419_start: .long start - .
1420
1421ENTRY(efi_stub_entry)
1422 @ allocate space on stack for passing current zImage address
1423 @ and for the EFI stub to return of new entry point of
1424 @ zImage, as EFI stub may copy the kernel. Pointer address
1425 @ is passed in r2. r0 and r1 are passed through from the
1426 @ EFI firmware to efi_entry
1427 adr ip, _start
1428 ldr r3, [ip]
1429 add r3, r3, ip
1430 stmfd sp!, {r3, lr}
1431 mov r2, sp @ pass zImage address in r2
1432 bl efi_entry
1433
1434 @ Check for error return from EFI stub. r0 has FDT address
1435 @ or error code.
1436 cmn r0, #1
1437 beq efi_load_fail
1438
1439 @ Preserve return value of efi_entry() in r4
1440 mov r4, r0
1441 bl cache_clean_flush
1442 bl cache_off
1443
1444 @ Set parameters for booting zImage according to boot protocol
1445 @ put FDT address in r2, it was returned by efi_entry()
1446 @ r1 is the machine type, and r0 needs to be 0
1447 mov r0, #0
1448 mov r1, #0xFFFFFFFF
1449 mov r2, r4
1450
1451 @ Branch to (possibly) relocated zImage that is in [sp]
1452 ldr lr, [sp]
1453 ldr ip, =start_offset
1454 add lr, lr, ip
1455 mov pc, lr @ no mode switch
1456
1457efi_load_fail:
1458 @ Return EFI_LOAD_ERROR to EFI firmware on error.
1459 ldr r0, =0x80000001
1460 ldmfd sp!, {ip, pc}
1461ENDPROC(efi_stub_entry)
1462#endif
1463
1464 .align
1465 .section ".stack", "aw", %nobits
1466.L_user_stack: .space 4096
1467.L_user_stack_end: