"Das U-Boot" Source Tree
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
7 */
8
9#include <alist.h>
10#include <efi_loader.h>
11#include <event.h>
12#include <image.h>
13#include <mapmem.h>
14#include <lmb.h>
15#include <log.h>
16#include <malloc.h>
17#include <spl.h>
18
19#include <asm/global_data.h>
20#include <asm/sections.h>
21#include <linux/kernel.h>
22#include <linux/sizes.h>
23
24DECLARE_GLOBAL_DATA_PTR;
25
26/*
27 * The following low level LMB functions must not access the global LMB memory
28 * map since they are also used to manage IOVA memory maps in iommu drivers like
29 * apple_dart.
30 */
31
32static long lmb_addrs_overlap(phys_addr_t base1, phys_size_t size1,
33 phys_addr_t base2, phys_size_t size2)
34{
35 const phys_addr_t base1_end = base1 + size1 - 1;
36 const phys_addr_t base2_end = base2 + size2 - 1;
37
38 return ((base1 <= base2_end) && (base2 <= base1_end));
39}
40
41static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
42 phys_addr_t base2, phys_size_t size2)
43{
44 if (base2 == base1 + size1)
45 return 1;
46 else if (base1 == base2 + size2)
47 return -1;
48
49 return 0;
50}
51
52static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
53 unsigned long r2)
54{
55 struct lmb_region *rgn = lmb_rgn_lst->data;
56 phys_addr_t base1 = rgn[r1].base;
57 phys_size_t size1 = rgn[r1].size;
58 phys_addr_t base2 = rgn[r2].base;
59 phys_size_t size2 = rgn[r2].size;
60
61 return lmb_addrs_overlap(base1, size1, base2, size2);
62}
63
64static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
65 unsigned long r2)
66{
67 struct lmb_region *rgn = lmb_rgn_lst->data;
68 phys_addr_t base1 = rgn[r1].base;
69 phys_size_t size1 = rgn[r1].size;
70 phys_addr_t base2 = rgn[r2].base;
71 phys_size_t size2 = rgn[r2].size;
72
73 return lmb_addrs_adjacent(base1, size1, base2, size2);
74}
75
76static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
77{
78 unsigned long i;
79 struct lmb_region *rgn = lmb_rgn_lst->data;
80
81 for (i = r; i < lmb_rgn_lst->count - 1; i++) {
82 rgn[i].base = rgn[i + 1].base;
83 rgn[i].size = rgn[i + 1].size;
84 rgn[i].flags = rgn[i + 1].flags;
85 }
86 lmb_rgn_lst->count--;
87}
88
89/* Assumption: base addr of region 1 < base addr of region 2 */
90static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
91 unsigned long r2)
92{
93 struct lmb_region *rgn = lmb_rgn_lst->data;
94
95 rgn[r1].size += rgn[r2].size;
96 lmb_remove_region(lmb_rgn_lst, r2);
97}
98
99/*Assumption : base addr of region 1 < base addr of region 2*/
100static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
101 unsigned long r1, unsigned long r2)
102{
103 struct lmb_region *rgn = lmb_rgn_lst->data;
104
105 phys_addr_t base1 = rgn[r1].base;
106 phys_size_t size1 = rgn[r1].size;
107 phys_addr_t base2 = rgn[r2].base;
108 phys_size_t size2 = rgn[r2].size;
109
110 if (base1 + size1 > base2 + size2) {
111 printf("This will not be a case any time\n");
112 return;
113 }
114 rgn[r1].size = base2 + size2 - base1;
115 lmb_remove_region(lmb_rgn_lst, r2);
116}
117
118static long lmb_resize_regions(struct alist *lmb_rgn_lst,
119 unsigned long idx_start,
120 phys_addr_t base, phys_size_t size)
121{
122 phys_size_t rgnsize;
123 unsigned long rgn_cnt, idx, idx_end;
124 phys_addr_t rgnbase, rgnend;
125 phys_addr_t mergebase, mergeend;
126 struct lmb_region *rgn = lmb_rgn_lst->data;
127
128 rgn_cnt = 0;
129 idx = idx_start;
130 idx_end = idx_start;
131
132 /*
133 * First thing to do is to identify how many regions
134 * the requested region overlaps.
135 * If the flags match, combine all these overlapping
136 * regions into a single region, and remove the merged
137 * regions.
138 */
139 while (idx <= lmb_rgn_lst->count - 1) {
140 rgnbase = rgn[idx].base;
141 rgnsize = rgn[idx].size;
142
143 if (lmb_addrs_overlap(base, size, rgnbase,
144 rgnsize)) {
145 if (rgn[idx].flags != LMB_NONE)
146 return -1;
147 rgn_cnt++;
148 idx_end = idx;
149 }
150 idx++;
151 }
152
153 /* The merged region's base and size */
154 rgnbase = rgn[idx_start].base;
155 mergebase = min(base, rgnbase);
156 rgnend = rgn[idx_end].base + rgn[idx_end].size;
157 mergeend = max(rgnend, (base + size));
158
159 rgn[idx_start].base = mergebase;
160 rgn[idx_start].size = mergeend - mergebase;
161
162 /* Now remove the merged regions */
163 while (--rgn_cnt)
164 lmb_remove_region(lmb_rgn_lst, idx_start + 1);
165
166 return 0;
167}
168
169/**
170 * lmb_add_region_flags() - Add an lmb region to the given list
171 * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
172 * @base: Start address of the region
173 * @size: Size of the region to be added
174 * @flags: Attributes of the LMB region
175 *
176 * Add a region of memory to the list. If the region does not exist, add
177 * it to the list. Depending on the attributes of the region to be added,
178 * the function might resize an already existing region or coalesce two
179 * adjacent regions.
180 *
181 * Return:
182 * * %0 - Added successfully, or it's already added (only if LMB_NONE)
183 * * %-EEXIST - The region is already added, and flags != LMB_NONE
184 * * %-1 - Failure
185 */
186static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
187 phys_size_t size, u32 flags)
188{
189 unsigned long coalesced = 0;
190 long ret, i;
191 struct lmb_region *rgn = lmb_rgn_lst->data;
192
193 if (alist_err(lmb_rgn_lst))
194 return -1;
195
196 /* First try and coalesce this LMB with another. */
197 for (i = 0; i < lmb_rgn_lst->count; i++) {
198 phys_addr_t rgnbase = rgn[i].base;
199 phys_size_t rgnsize = rgn[i].size;
200 u32 rgnflags = rgn[i].flags;
201
202 ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
203 if (ret > 0) {
204 if (flags != rgnflags)
205 break;
206 rgn[i].base -= size;
207 rgn[i].size += size;
208 coalesced++;
209 break;
210 } else if (ret < 0) {
211 if (flags != rgnflags)
212 break;
213 rgn[i].size += size;
214 coalesced++;
215 break;
216 } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
217 if (flags != LMB_NONE)
218 return -EEXIST;
219
220 ret = lmb_resize_regions(lmb_rgn_lst, i, base, size);
221 if (ret < 0)
222 return -1;
223
224 coalesced++;
225 break;
226
227 return -1;
228 }
229 }
230
231 if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
232 rgn = lmb_rgn_lst->data;
233 if (rgn[i].flags == rgn[i + 1].flags) {
234 if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
235 lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
236 coalesced++;
237 } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
238 /* fix overlapping area */
239 lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
240 coalesced++;
241 }
242 }
243 }
244
245 if (coalesced)
246 return 0;
247
248 if (alist_full(lmb_rgn_lst) &&
249 !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
250 return -1;
251 rgn = lmb_rgn_lst->data;
252
253 /* Couldn't coalesce the LMB, so add it to the sorted table. */
254 for (i = lmb_rgn_lst->count; i >= 0; i--) {
255 if (i && base < rgn[i - 1].base) {
256 rgn[i] = rgn[i - 1];
257 } else {
258 rgn[i].base = base;
259 rgn[i].size = size;
260 rgn[i].flags = flags;
261 break;
262 }
263 }
264
265 lmb_rgn_lst->count++;
266
267 return 0;
268}
269
270static long _lmb_free(struct alist *lmb_rgn_lst, phys_addr_t base,
271 phys_size_t size)
272{
273 struct lmb_region *rgn;
274 phys_addr_t rgnbegin, rgnend;
275 phys_addr_t end = base + size - 1;
276 int i;
277
278 /* Suppress GCC warnings */
279 rgnbegin = 0;
280 rgnend = 0;
281
282 rgn = lmb_rgn_lst->data;
283 /* Find the region where (base, size) belongs to */
284 for (i = 0; i < lmb_rgn_lst->count; i++) {
285 rgnbegin = rgn[i].base;
286 rgnend = rgnbegin + rgn[i].size - 1;
287
288 if (rgnbegin <= base && end <= rgnend)
289 break;
290 }
291
292 /* Didn't find the region */
293 if (i == lmb_rgn_lst->count)
294 return -1;
295
296 /* Check to see if we are removing entire region */
297 if (rgnbegin == base && rgnend == end) {
298 lmb_remove_region(lmb_rgn_lst, i);
299 return 0;
300 }
301
302 /* Check to see if region is matching at the front */
303 if (rgnbegin == base) {
304 rgn[i].base = end + 1;
305 rgn[i].size -= size;
306 return 0;
307 }
308
309 /* Check to see if the region is matching at the end */
310 if (rgnend == end) {
311 rgn[i].size -= size;
312 return 0;
313 }
314
315 /*
316 * We need to split the entry - adjust the current one to the
317 * beginging of the hole and add the region after hole.
318 */
319 rgn[i].size = base - rgn[i].base;
320 return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
321 rgn[i].flags);
322}
323
324static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
325 phys_size_t size)
326{
327 unsigned long i;
328 struct lmb_region *rgn = lmb_rgn_lst->data;
329
330 for (i = 0; i < lmb_rgn_lst->count; i++) {
331 phys_addr_t rgnbase = rgn[i].base;
332 phys_size_t rgnsize = rgn[i].size;
333
334 if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
335 break;
336 }
337
338 return (i < lmb_rgn_lst->count) ? i : -1;
339}
340
341/*
342 * IOVA LMB memory maps using lmb pointers instead of the global LMB memory map.
343 */
344
345int io_lmb_setup(struct lmb *io_lmb)
346{
347 int ret;
348
349 ret = alist_init(&io_lmb->available_mem, sizeof(struct lmb_region),
350 (uint)LMB_ALIST_INITIAL_SIZE);
351 if (!ret) {
352 log_debug("Unable to initialise the list for LMB free IOVA\n");
353 return -ENOMEM;
354 }
355
356 ret = alist_init(&io_lmb->used_mem, sizeof(struct lmb_region),
357 (uint)LMB_ALIST_INITIAL_SIZE);
358 if (!ret) {
359 log_debug("Unable to initialise the list for LMB used IOVA\n");
360 return -ENOMEM;
361 }
362
363 io_lmb->test = false;
364
365 return 0;
366}
367
368void io_lmb_teardown(struct lmb *io_lmb)
369{
370 alist_uninit(&io_lmb->available_mem);
371 alist_uninit(&io_lmb->used_mem);
372}
373
374long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
375{
376 return lmb_add_region_flags(&io_lmb->available_mem, base, size, LMB_NONE);
377}
378
379/* derived and simplified from _lmb_alloc_base() */
380phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align)
381{
382 long i, rgn;
383 phys_addr_t base = 0;
384 phys_addr_t res_base;
385 struct lmb_region *lmb_used = io_lmb->used_mem.data;
386 struct lmb_region *lmb_memory = io_lmb->available_mem.data;
387
388 for (i = io_lmb->available_mem.count - 1; i >= 0; i--) {
389 phys_addr_t lmbbase = lmb_memory[i].base;
390 phys_size_t lmbsize = lmb_memory[i].size;
391
392 if (lmbsize < size)
393 continue;
394 base = ALIGN_DOWN(lmbbase + lmbsize - size, align);
395
396 while (base && lmbbase <= base) {
397 rgn = lmb_overlaps_region(&io_lmb->used_mem, base, size);
398 if (rgn < 0) {
399 /* This area isn't reserved, take it */
400 if (lmb_add_region_flags(&io_lmb->used_mem, base,
401 size, LMB_NONE) < 0)
402 return 0;
403
404 return base;
405 }
406
407 res_base = lmb_used[rgn].base;
408 if (res_base < size)
409 break;
410 base = ALIGN_DOWN(res_base - size, align);
411 }
412 }
413 return 0;
414}
415
416long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
417{
418 return _lmb_free(&io_lmb->used_mem, base, size);
419}
420
421/*
422 * Low level LMB functions are used to manage IOVA memory maps for the Apple
423 * dart iommu. They must not access the global LMB memory map.
424 * So keep the global LMB variable declaration unreachable from them.
425 */
426
427static struct lmb lmb;
428
429static int lmb_map_update_notify(phys_addr_t addr, phys_size_t size,
430 enum lmb_map_op op, u32 flags)
431{
432 if (CONFIG_IS_ENABLED(EFI_LOADER) &&
433 !lmb.test && !(flags & LMB_NONOTIFY))
434 return efi_map_update_notify(addr, size, op);
435
436 return 0;
437}
438
439static void lmb_print_region_flags(u32 flags)
440{
441 const char * const flag_str[] = { "none", "no-map", "no-overwrite",
442 "no-notify" };
443 unsigned int pflags = flags &
444 (LMB_NOMAP | LMB_NOOVERWRITE | LMB_NONOTIFY);
445
446 if (flags != pflags) {
447 printf("invalid %#x\n", flags);
448 return;
449 }
450
451 do {
452 int bitpos = pflags ? fls(pflags) - 1 : 0;
453
454 printf("%s", flag_str[bitpos]);
455 pflags &= ~(1u << bitpos);
456 puts(pflags ? ", " : "\n");
457 } while (pflags);
458}
459
460static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
461{
462 struct lmb_region *rgn = lmb_rgn_lst->data;
463 unsigned long long base, size, end;
464 u32 flags;
465 int i;
466
467 printf(" %s.count = %#x\n", name, lmb_rgn_lst->count);
468
469 for (i = 0; i < lmb_rgn_lst->count; i++) {
470 base = rgn[i].base;
471 size = rgn[i].size;
472 end = base + size - 1;
473 flags = rgn[i].flags;
474
475 printf(" %s[%d]\t[%#llx-%#llx], %#llx bytes, flags: ",
476 name, i, base, end, size);
477 lmb_print_region_flags(flags);
478 }
479}
480
481void lmb_dump_all_force(void)
482{
483 printf("lmb_dump_all:\n");
484 lmb_dump_region(&lmb.available_mem, "memory");
485 lmb_dump_region(&lmb.used_mem, "reserved");
486}
487
488void lmb_dump_all(void)
489{
490#ifdef DEBUG
491 lmb_dump_all_force();
492#endif
493}
494
495static void lmb_reserve_uboot_region(void)
496{
497 int bank;
498 ulong end, bank_end;
499 phys_addr_t rsv_start;
500
501 rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE;
502 end = gd->ram_top;
503
504 /*
505 * Reserve memory from aligned address below the bottom of U-Boot stack
506 * until end of RAM area to prevent LMB from overwriting that memory.
507 */
508 debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start);
509
510 for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
511 if (!gd->bd->bi_dram[bank].size ||
512 rsv_start < gd->bd->bi_dram[bank].start)
513 continue;
514 /* Watch out for RAM at end of address space! */
515 bank_end = gd->bd->bi_dram[bank].start +
516 gd->bd->bi_dram[bank].size - 1;
517 if (rsv_start > bank_end)
518 continue;
519 if (bank_end > end)
520 bank_end = end - 1;
521
522 lmb_reserve(rsv_start, bank_end - rsv_start + 1, LMB_NOOVERWRITE);
523
524 if (gd->flags & GD_FLG_SKIP_RELOC)
525 lmb_reserve((phys_addr_t)(uintptr_t)_start,
526 gd->mon_len, LMB_NOOVERWRITE);
527
528 break;
529 }
530}
531
532static void lmb_reserve_common(void *fdt_blob)
533{
534 lmb_reserve_uboot_region();
535
536 if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
537 boot_fdt_add_mem_rsv_regions(fdt_blob);
538}
539
540static __maybe_unused void lmb_reserve_common_spl(void)
541{
542 phys_addr_t rsv_start;
543 phys_size_t rsv_size;
544
545 /*
546 * Assume a SPL stack of 16KB. This must be
547 * more than enough for the SPL stage.
548 */
549 if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) {
550 rsv_start = gd->start_addr_sp - 16384;
551 rsv_size = 16384;
552 lmb_reserve(rsv_start, rsv_size, LMB_NOOVERWRITE);
553 }
554
555 if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) {
556 /* Reserve the bss region */
557 rsv_start = (phys_addr_t)(uintptr_t)__bss_start;
558 rsv_size = (phys_addr_t)(uintptr_t)__bss_end -
559 (phys_addr_t)(uintptr_t)__bss_start;
560 lmb_reserve(rsv_start, rsv_size, LMB_NOOVERWRITE);
561 }
562}
563
564void lmb_add_memory(void)
565{
566 int i;
567 phys_addr_t bank_end;
568 phys_size_t size;
569 u64 ram_top = gd->ram_top;
570 struct bd_info *bd = gd->bd;
571
572 if (CONFIG_IS_ENABLED(LMB_ARCH_MEM_MAP))
573 return lmb_arch_add_memory();
574
575 /* Assume a 4GB ram_top if not defined */
576 if (!ram_top)
577 ram_top = 0x100000000ULL;
578
579 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
580 size = bd->bi_dram[i].size;
581 bank_end = bd->bi_dram[i].start + size;
582
583 if (size) {
584 lmb_add(bd->bi_dram[i].start, size);
585
586 /*
587 * Reserve memory above ram_top as
588 * no-overwrite so that it cannot be
589 * allocated
590 */
591 if (bd->bi_dram[i].start >= ram_top)
592 lmb_reserve(bd->bi_dram[i].start, size,
593 LMB_NOOVERWRITE);
594 else if (bank_end > ram_top)
595 lmb_reserve(ram_top, bank_end - ram_top,
596 LMB_NOOVERWRITE);
597 }
598 }
599}
600
601/* This routine may be called with relocation disabled. */
602long lmb_add(phys_addr_t base, phys_size_t size)
603{
604 long ret;
605 struct alist *lmb_rgn_lst = &lmb.available_mem;
606
607 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
608 if (ret)
609 return ret;
610
611 return lmb_map_update_notify(base, size, LMB_MAP_OP_ADD, LMB_NONE);
612}
613
614long lmb_free_flags(phys_addr_t base, phys_size_t size,
615 uint flags)
616{
617 long ret;
618
619 ret = _lmb_free(&lmb.used_mem, base, size);
620 if (ret < 0)
621 return ret;
622
623 return lmb_map_update_notify(base, size, LMB_MAP_OP_FREE, flags);
624}
625
626long lmb_free(phys_addr_t base, phys_size_t size)
627{
628 return lmb_free_flags(base, size, LMB_NONE);
629}
630
631long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags)
632{
633 long ret = 0;
634 struct alist *lmb_rgn_lst = &lmb.used_mem;
635
636 ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
637 if (ret)
638 return ret;
639
640 return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags);
641}
642
643static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
644 phys_addr_t max_addr, u32 flags)
645{
646 int ret;
647 long i, rgn;
648 phys_addr_t base = 0;
649 phys_addr_t res_base;
650 struct lmb_region *lmb_used = lmb.used_mem.data;
651 struct lmb_region *lmb_memory = lmb.available_mem.data;
652
653 for (i = lmb.available_mem.count - 1; i >= 0; i--) {
654 phys_addr_t lmbbase = lmb_memory[i].base;
655 phys_size_t lmbsize = lmb_memory[i].size;
656
657 if (lmbsize < size)
658 continue;
659
660 if (max_addr == LMB_ALLOC_ANYWHERE) {
661 base = ALIGN_DOWN(lmbbase + lmbsize - size, align);
662 } else if (lmbbase < max_addr) {
663 base = lmbbase + lmbsize;
664 if (base < lmbbase)
665 base = -1;
666 base = min(base, max_addr);
667 base = ALIGN_DOWN(base - size, align);
668 } else {
669 continue;
670 }
671
672 while (base && lmbbase <= base) {
673 rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
674 if (rgn < 0) {
675 /* This area isn't reserved, take it */
676 if (lmb_add_region_flags(&lmb.used_mem, base,
677 size, flags))
678 return 0;
679
680 ret = lmb_map_update_notify(base, size,
681 LMB_MAP_OP_RESERVE,
682 flags);
683 if (ret)
684 return ret;
685
686 return base;
687 }
688
689 res_base = lmb_used[rgn].base;
690 if (res_base < size)
691 break;
692 base = ALIGN_DOWN(res_base - size, align);
693 }
694 }
695 return 0;
696}
697
698phys_addr_t lmb_alloc(phys_size_t size, ulong align)
699{
700 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE);
701}
702
703phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr,
704 uint flags)
705{
706 phys_addr_t alloc;
707
708 alloc = _lmb_alloc_base(size, align, max_addr, flags);
709
710 if (alloc == 0)
711 printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
712 (ulong)size, (ulong)max_addr);
713
714 return alloc;
715}
716
717phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
718{
719 long rgn;
720 struct lmb_region *lmb_memory = lmb.available_mem.data;
721
722 /* Check if the requested address is in one of the memory regions */
723 rgn = lmb_overlaps_region(&lmb.available_mem, base, size);
724 if (rgn >= 0) {
725 /*
726 * Check if the requested end address is in the same memory
727 * region we found.
728 */
729 if (lmb_addrs_overlap(lmb_memory[rgn].base,
730 lmb_memory[rgn].size,
731 base + size - 1, 1)) {
732 /* ok, reserve the memory */
733 if (!lmb_reserve(base, size, flags))
734 return base;
735 }
736 }
737
738 return 0;
739}
740
741/* Return number of bytes from a given address that are free */
742phys_size_t lmb_get_free_size(phys_addr_t addr)
743{
744 int i;
745 long rgn;
746 struct lmb_region *lmb_used = lmb.used_mem.data;
747 struct lmb_region *lmb_memory = lmb.available_mem.data;
748
749 /* check if the requested address is in the memory regions */
750 rgn = lmb_overlaps_region(&lmb.available_mem, addr, 1);
751 if (rgn >= 0) {
752 for (i = 0; i < lmb.used_mem.count; i++) {
753 if (addr < lmb_used[i].base) {
754 /* first reserved range > requested address */
755 return lmb_used[i].base - addr;
756 }
757 if (lmb_used[i].base +
758 lmb_used[i].size > addr) {
759 /* requested addr is in this reserved range */
760 return 0;
761 }
762 }
763 /* if we come here: no reserved ranges above requested addr */
764 return lmb_memory[lmb.available_mem.count - 1].base +
765 lmb_memory[lmb.available_mem.count - 1].size - addr;
766 }
767 return 0;
768}
769
770int lmb_is_reserved_flags(phys_addr_t addr, int flags)
771{
772 int i;
773 struct lmb_region *lmb_used = lmb.used_mem.data;
774
775 for (i = 0; i < lmb.used_mem.count; i++) {
776 phys_addr_t upper = lmb_used[i].base +
777 lmb_used[i].size - 1;
778 if (addr >= lmb_used[i].base && addr <= upper)
779 return (lmb_used[i].flags & flags) == flags;
780 }
781 return 0;
782}
783
784static int lmb_setup(bool test)
785{
786 bool ret;
787
788 ret = alist_init(&lmb.available_mem, sizeof(struct lmb_region),
789 (uint)LMB_ALIST_INITIAL_SIZE);
790 if (!ret) {
791 log_debug("Unable to initialise the list for LMB free memory\n");
792 return -ENOMEM;
793 }
794
795 ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
796 (uint)LMB_ALIST_INITIAL_SIZE);
797 if (!ret) {
798 log_debug("Unable to initialise the list for LMB used memory\n");
799 return -ENOMEM;
800 }
801
802 lmb.test = test;
803
804 return 0;
805}
806
807int lmb_init(void)
808{
809 int ret;
810
811 ret = lmb_setup(false);
812 if (ret) {
813 log_info("Unable to init LMB\n");
814 return ret;
815 }
816
817 lmb_add_memory();
818
819 /* Reserve the U-Boot image region once U-Boot has relocated */
820 if (xpl_phase() == PHASE_SPL)
821 lmb_reserve_common_spl();
822 else if (xpl_phase() == PHASE_BOARD_R)
823 lmb_reserve_common((void *)gd->fdt_blob);
824
825 return 0;
826}
827
828struct lmb *lmb_get(void)
829{
830 return &lmb;
831}
832
833#if CONFIG_IS_ENABLED(UNIT_TEST)
834int lmb_push(struct lmb *store)
835{
836 int ret;
837
838 *store = lmb;
839 ret = lmb_setup(true);
840 if (ret)
841 return ret;
842
843 return 0;
844}
845
846void lmb_pop(struct lmb *store)
847{
848 alist_uninit(&lmb.available_mem);
849 alist_uninit(&lmb.used_mem);
850 lmb = *store;
851}
852#endif /* UNIT_TEST */