"Das U-Boot" Source Tree
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * (C) Copyright 2002
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
5 */
6
7#include <config.h>
8#include <cpu_func.h>
9#include <log.h>
10#include <malloc.h>
11#include <asm/cache.h>
12#include <asm/global_data.h>
13
14DECLARE_GLOBAL_DATA_PTR;
15
16/*
17 * Flush range from all levels of d-cache/unified-cache.
18 * Affects the range [start, start + size - 1].
19 */
20__weak void flush_cache(unsigned long start, unsigned long size)
21{
22 flush_dcache_range(start, start + size);
23}
24
25/*
26 * Default implementation:
27 * do a range flush for the entire range
28 */
29__weak void flush_dcache_all(void)
30{
31 flush_cache(0, ~0);
32}
33
34/*
35 * Default implementation of enable_caches()
36 * Real implementation should be in platform code
37 */
38__weak void enable_caches(void)
39{
40 puts("WARNING: Caches not enabled\n");
41}
42
43__weak void invalidate_dcache_range(unsigned long start, unsigned long stop)
44{
45 /* An empty stub, real implementation should be in platform code */
46}
47__weak void flush_dcache_range(unsigned long start, unsigned long stop)
48{
49 /* An empty stub, real implementation should be in platform code */
50}
51
52int check_cache_range(unsigned long start, unsigned long stop)
53{
54 int ok = 1;
55
56 if (start & (CONFIG_SYS_CACHELINE_SIZE - 1))
57 ok = 0;
58
59 if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1))
60 ok = 0;
61
62 if (!ok) {
63 warn_non_xpl("CACHE: Misaligned operation at range [%08lx, %08lx]\n",
64 start, stop);
65 }
66
67 return ok;
68}
69
70#ifdef CONFIG_SYS_NONCACHED_MEMORY
71/*
72 * Reserve one MMU section worth of address space below the malloc() area that
73 * will be mapped uncached.
74 */
75static unsigned long noncached_start;
76static unsigned long noncached_end;
77static unsigned long noncached_next;
78
79void noncached_set_region(void)
80{
81#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
82 mmu_set_region_dcache_behaviour(noncached_start,
83 noncached_end - noncached_start,
84 DCACHE_OFF);
85#endif
86}
87
88int noncached_init(void)
89{
90 phys_addr_t start, end;
91 size_t size;
92
93 /* If this calculation changes, update board_f.c:reserve_noncached() */
94 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
95 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
96 start = end - size;
97
98 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
99
100 noncached_start = start;
101 noncached_end = end;
102 noncached_next = start;
103
104 noncached_set_region();
105
106 return 0;
107}
108
109phys_addr_t noncached_alloc(size_t size, size_t align)
110{
111 phys_addr_t next = ALIGN(noncached_next, align);
112
113 if (next >= noncached_end || (noncached_end - next) < size)
114 return 0;
115
116 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
117 noncached_next = next + size;
118
119 return next;
120}
121#endif /* CONFIG_SYS_NONCACHED_MEMORY */
122
123#if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
124void invalidate_l2_cache(void)
125{
126 unsigned int val = 0;
127
128 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
129 : : "r" (val) : "cc");
130 isb();
131}
132#endif
133
134int arch_reserve_mmu(void)
135{
136 return arm_reserve_mmu();
137}
138
139__weak int arm_reserve_mmu(void)
140{
141#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
142 /* reserve TLB table */
143 gd->arch.tlb_size = PGTABLE_SIZE;
144 gd->relocaddr -= gd->arch.tlb_size;
145
146 /* round down to next 64 kB limit */
147 gd->relocaddr &= ~(0x10000 - 1);
148
149 gd->arch.tlb_addr = gd->relocaddr;
150 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
151 gd->arch.tlb_addr + gd->arch.tlb_size);
152
153#ifdef CFG_SYS_MEM_RESERVE_SECURE
154 /*
155 * Record allocated tlb_addr in case gd->tlb_addr to be overwritten
156 * with location within secure ram.
157 */
158 gd->arch.tlb_allocated = gd->arch.tlb_addr;
159#endif
160
161 if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) {
162 /*
163 * As invalidate_dcache_all() will be called before
164 * mmu_setup(), we should make sure that the PTs are
165 * already in a valid state.
166 */
167 memset((void *)gd->arch.tlb_addr, 0, gd->arch.tlb_size);
168 }
169#endif
170
171 return 0;
172}