Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Device tree based initialization code for reserved memory.
4 *
5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
7 * http://www.samsung.com
8 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
9 * Author: Josh Cartwright <joshc@codeaurora.org>
10 */
11
12#define pr_fmt(fmt) "OF: reserved mem: " fmt
13
14#include <linux/err.h>
15#include <linux/ioport.h>
16#include <linux/libfdt.h>
17#include <linux/of.h>
18#include <linux/of_fdt.h>
19#include <linux/of_platform.h>
20#include <linux/mm.h>
21#include <linux/sizes.h>
22#include <linux/of_reserved_mem.h>
23#include <linux/sort.h>
24#include <linux/slab.h>
25#include <linux/memblock.h>
26#include <linux/kmemleak.h>
27#include <linux/cma.h>
28#include <linux/dma-map-ops.h>
29
30#include "of_private.h"
31
32static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata;
33static struct reserved_mem *reserved_mem __refdata = reserved_mem_array;
34static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
35static int reserved_mem_count;
36
37static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
38 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
39 phys_addr_t *res_base)
40{
41 phys_addr_t base;
42 int err = 0;
43
44 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
45 align = !align ? SMP_CACHE_BYTES : align;
46 base = memblock_phys_alloc_range(size, align, start, end);
47 if (!base)
48 return -ENOMEM;
49
50 *res_base = base;
51 if (nomap) {
52 err = memblock_mark_nomap(base, size);
53 if (err)
54 memblock_phys_free(base, size);
55 }
56
57 if (!err)
58 kmemleak_ignore_phys(base);
59
60 return err;
61}
62
63/*
64 * alloc_reserved_mem_array() - allocate memory for the reserved_mem
65 * array using memblock
66 *
67 * This function is used to allocate memory for the reserved_mem
68 * array according to the total number of reserved memory regions
69 * defined in the DT.
70 * After the new array is allocated, the information stored in
71 * the initial static array is copied over to this new array and
72 * the new array is used from this point on.
73 */
74static void __init alloc_reserved_mem_array(void)
75{
76 struct reserved_mem *new_array;
77 size_t alloc_size, copy_size, memset_size;
78
79 alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array));
80 if (alloc_size == SIZE_MAX) {
81 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
82 return;
83 }
84
85 new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
86 if (!new_array) {
87 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM);
88 return;
89 }
90
91 copy_size = array_size(reserved_mem_count, sizeof(*new_array));
92 if (copy_size == SIZE_MAX) {
93 memblock_free(new_array, alloc_size);
94 total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
95 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
96 return;
97 }
98
99 memset_size = alloc_size - copy_size;
100
101 memcpy(new_array, reserved_mem, copy_size);
102 memset(new_array + reserved_mem_count, 0, memset_size);
103
104 reserved_mem = new_array;
105}
106
107static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
108/*
109 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
110 */
111static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
112 phys_addr_t base, phys_addr_t size)
113{
114 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
115
116 if (reserved_mem_count == total_reserved_mem_cnt) {
117 pr_err("not enough space for all defined regions.\n");
118 return;
119 }
120
121 rmem->fdt_node = node;
122 rmem->name = uname;
123 rmem->base = base;
124 rmem->size = size;
125
126 /* Call the region specific initialization function */
127 fdt_init_reserved_mem_node(rmem);
128
129 reserved_mem_count++;
130 return;
131}
132
133static int __init early_init_dt_reserve_memory(phys_addr_t base,
134 phys_addr_t size, bool nomap)
135{
136 if (nomap) {
137 /*
138 * If the memory is already reserved (by another region), we
139 * should not allow it to be marked nomap, but don't worry
140 * if the region isn't memory as it won't be mapped.
141 */
142 if (memblock_overlaps_region(&memblock.memory, base, size) &&
143 memblock_is_region_reserved(base, size))
144 return -EBUSY;
145
146 return memblock_mark_nomap(base, size);
147 }
148 return memblock_reserve(base, size);
149}
150
151/*
152 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
153 */
154static int __init __reserved_mem_reserve_reg(unsigned long node,
155 const char *uname)
156{
157 phys_addr_t base, size;
158 int i, len;
159 const __be32 *prop;
160 bool nomap;
161
162 prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
163 if (!prop)
164 return -ENOENT;
165
166 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
167
168 for (i = 0; i < len; i++) {
169 u64 b, s;
170
171 of_flat_dt_read_addr_size(prop, i, &b, &s);
172
173 base = b;
174 size = s;
175
176 if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
177 /* Architecture specific contiguous memory fixup. */
178 if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
179 of_get_flat_dt_prop(node, "reusable", NULL))
180 dma_contiguous_early_fixup(base, size);
181 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
182 uname, &base, (unsigned long)(size / SZ_1M));
183 } else {
184 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
185 uname, &base, (unsigned long)(size / SZ_1M));
186 }
187 }
188 return 0;
189}
190
191/*
192 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
193 * in /reserved-memory matches the values supported by the current implementation,
194 * also check if ranges property has been provided
195 */
196static int __init __reserved_mem_check_root(unsigned long node)
197{
198 const __be32 *prop;
199
200 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
201 if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
202 return -EINVAL;
203
204 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
205 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
206 return -EINVAL;
207
208 prop = of_get_flat_dt_prop(node, "ranges", NULL);
209 if (!prop)
210 return -EINVAL;
211 return 0;
212}
213
214static void __init __rmem_check_for_overlap(void);
215
216/**
217 * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
218 * reserved memory regions.
219 *
220 * This function is used to scan through the DT and store the
221 * information for the reserved memory regions that are defined using
222 * the "reg" property. The region node number, name, base address, and
223 * size are all stored in the reserved_mem array by calling the
224 * fdt_reserved_mem_save_node() function.
225 */
226void __init fdt_scan_reserved_mem_reg_nodes(void)
227{
228 const void *fdt = initial_boot_params;
229 phys_addr_t base, size;
230 int node, child;
231
232 if (!fdt)
233 return;
234
235 node = fdt_path_offset(fdt, "/reserved-memory");
236 if (node < 0) {
237 pr_info("Reserved memory: No reserved-memory node in the DT\n");
238 return;
239 }
240
241 /* Attempt dynamic allocation of a new reserved_mem array */
242 alloc_reserved_mem_array();
243
244 if (__reserved_mem_check_root(node)) {
245 pr_err("Reserved memory: unsupported node format, ignoring\n");
246 return;
247 }
248
249 fdt_for_each_subnode(child, fdt, node) {
250 const char *uname;
251 u64 b, s;
252
253 if (!of_fdt_device_is_available(fdt, child))
254 continue;
255
256 if (!of_flat_dt_get_addr_size(child, "reg", &b, &s))
257 continue;
258
259 base = b;
260 size = s;
261
262 if (size) {
263 uname = fdt_get_name(fdt, child, NULL);
264 fdt_reserved_mem_save_node(child, uname, base, size);
265 }
266 }
267
268 /* check for overlapping reserved regions */
269 __rmem_check_for_overlap();
270}
271
272static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
273
274/*
275 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
276 */
277int __init fdt_scan_reserved_mem(void)
278{
279 int node, child;
280 int dynamic_nodes_cnt = 0, count = 0;
281 int dynamic_nodes[MAX_RESERVED_REGIONS];
282 const void *fdt = initial_boot_params;
283
284 node = fdt_path_offset(fdt, "/reserved-memory");
285 if (node < 0)
286 return -ENODEV;
287
288 if (__reserved_mem_check_root(node) != 0) {
289 pr_err("Reserved memory: unsupported node format, ignoring\n");
290 return -EINVAL;
291 }
292
293 fdt_for_each_subnode(child, fdt, node) {
294 const char *uname;
295 int err;
296
297 if (!of_fdt_device_is_available(fdt, child))
298 continue;
299
300 uname = fdt_get_name(fdt, child, NULL);
301
302 err = __reserved_mem_reserve_reg(child, uname);
303 if (!err)
304 count++;
305 /*
306 * Save the nodes for the dynamically-placed regions
307 * into an array which will be used for allocation right
308 * after all the statically-placed regions are reserved
309 * or marked as no-map. This is done to avoid dynamically
310 * allocating from one of the statically-placed regions.
311 */
312 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
313 dynamic_nodes[dynamic_nodes_cnt] = child;
314 dynamic_nodes_cnt++;
315 }
316 }
317 for (int i = 0; i < dynamic_nodes_cnt; i++) {
318 const char *uname;
319 int err;
320
321 child = dynamic_nodes[i];
322 uname = fdt_get_name(fdt, child, NULL);
323 err = __reserved_mem_alloc_size(child, uname);
324 if (!err)
325 count++;
326 }
327 total_reserved_mem_cnt = count;
328 return 0;
329}
330
331/*
332 * __reserved_mem_alloc_in_range() - allocate reserved memory described with
333 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
334 * reserved regions to keep the reserved memory contiguous if possible.
335 */
336static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
337 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
338 phys_addr_t *res_base)
339{
340 bool prev_bottom_up = memblock_bottom_up();
341 bool bottom_up = false, top_down = false;
342 int ret, i;
343
344 for (i = 0; i < reserved_mem_count; i++) {
345 struct reserved_mem *rmem = &reserved_mem[i];
346
347 /* Skip regions that were not reserved yet */
348 if (rmem->size == 0)
349 continue;
350
351 /*
352 * If range starts next to an existing reservation, use bottom-up:
353 * |....RRRR................RRRRRRRR..............|
354 * --RRRR------
355 */
356 if (start >= rmem->base && start <= (rmem->base + rmem->size))
357 bottom_up = true;
358
359 /*
360 * If range ends next to an existing reservation, use top-down:
361 * |....RRRR................RRRRRRRR..............|
362 * -------RRRR-----
363 */
364 if (end >= rmem->base && end <= (rmem->base + rmem->size))
365 top_down = true;
366 }
367
368 /* Change setting only if either bottom-up or top-down was selected */
369 if (bottom_up != top_down)
370 memblock_set_bottom_up(bottom_up);
371
372 ret = early_init_dt_alloc_reserved_memory_arch(size, align,
373 start, end, nomap, res_base);
374
375 /* Restore old setting if needed */
376 if (bottom_up != top_down)
377 memblock_set_bottom_up(prev_bottom_up);
378
379 return ret;
380}
381
382/*
383 * __reserved_mem_alloc_size() - allocate reserved memory described by
384 * 'size', 'alignment' and 'alloc-ranges' properties.
385 */
386static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
387{
388 phys_addr_t start = 0, end = 0;
389 phys_addr_t base = 0, align = 0, size;
390 int i, len;
391 const __be32 *prop;
392 bool nomap;
393 int ret;
394
395 prop = of_get_flat_dt_prop(node, "size", &len);
396 if (!prop)
397 return -EINVAL;
398
399 if (len != dt_root_size_cells * sizeof(__be32)) {
400 pr_err("invalid size property in '%s' node.\n", uname);
401 return -EINVAL;
402 }
403 size = dt_mem_next_cell(dt_root_size_cells, &prop);
404
405 prop = of_get_flat_dt_prop(node, "alignment", &len);
406 if (prop) {
407 if (len != dt_root_addr_cells * sizeof(__be32)) {
408 pr_err("invalid alignment property in '%s' node.\n",
409 uname);
410 return -EINVAL;
411 }
412 align = dt_mem_next_cell(dt_root_addr_cells, &prop);
413 }
414
415 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
416
417 /* Need adjust the alignment to satisfy the CMA requirement */
418 if (IS_ENABLED(CONFIG_CMA)
419 && of_flat_dt_is_compatible(node, "shared-dma-pool")
420 && of_get_flat_dt_prop(node, "reusable", NULL)
421 && !nomap)
422 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
423
424 prop = of_flat_dt_get_addr_size_prop(node, "alloc-ranges", &len);
425 if (prop) {
426 for (i = 0; i < len; i++) {
427 u64 b, s;
428
429 of_flat_dt_read_addr_size(prop, i, &b, &s);
430
431 start = b;
432 end = b + s;
433
434 base = 0;
435 ret = __reserved_mem_alloc_in_range(size, align,
436 start, end, nomap, &base);
437 if (ret == 0) {
438 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
439 uname, &base,
440 (unsigned long)(size / SZ_1M));
441 break;
442 }
443 }
444 } else {
445 ret = early_init_dt_alloc_reserved_memory_arch(size, align,
446 0, 0, nomap, &base);
447 if (ret == 0)
448 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
449 uname, &base, (unsigned long)(size / SZ_1M));
450 }
451
452 if (base == 0) {
453 pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
454 uname, (unsigned long)(size / SZ_1M));
455 return -ENOMEM;
456 }
457 /* Architecture specific contiguous memory fixup. */
458 if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
459 of_get_flat_dt_prop(node, "reusable", NULL))
460 dma_contiguous_early_fixup(base, size);
461 /* Save region in the reserved_mem array */
462 fdt_reserved_mem_save_node(node, uname, base, size);
463 return 0;
464}
465
466static const struct of_device_id __rmem_of_table_sentinel
467 __used __section("__reservedmem_of_table_end");
468
469/*
470 * __reserved_mem_init_node() - call region specific reserved memory init code
471 */
472static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
473{
474 extern const struct of_device_id __reservedmem_of_table[];
475 const struct of_device_id *i;
476 int ret = -ENOENT;
477
478 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
479 reservedmem_of_init_fn initfn = i->data;
480 const char *compat = i->compatible;
481
482 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
483 continue;
484
485 ret = initfn(rmem);
486 if (ret == 0) {
487 pr_info("initialized node %s, compatible id %s\n",
488 rmem->name, compat);
489 break;
490 }
491 }
492 return ret;
493}
494
495static int __init __rmem_cmp(const void *a, const void *b)
496{
497 const struct reserved_mem *ra = a, *rb = b;
498
499 if (ra->base < rb->base)
500 return -1;
501
502 if (ra->base > rb->base)
503 return 1;
504
505 /*
506 * Put the dynamic allocations (address == 0, size == 0) before static
507 * allocations at address 0x0 so that overlap detection works
508 * correctly.
509 */
510 if (ra->size < rb->size)
511 return -1;
512 if (ra->size > rb->size)
513 return 1;
514
515 if (ra->fdt_node < rb->fdt_node)
516 return -1;
517 if (ra->fdt_node > rb->fdt_node)
518 return 1;
519
520 return 0;
521}
522
523static void __init __rmem_check_for_overlap(void)
524{
525 int i;
526
527 if (reserved_mem_count < 2)
528 return;
529
530 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
531 __rmem_cmp, NULL);
532 for (i = 0; i < reserved_mem_count - 1; i++) {
533 struct reserved_mem *this, *next;
534
535 this = &reserved_mem[i];
536 next = &reserved_mem[i + 1];
537
538 if (this->base + this->size > next->base) {
539 phys_addr_t this_end, next_end;
540
541 this_end = this->base + this->size;
542 next_end = next->base + next->size;
543 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
544 this->name, &this->base, &this_end,
545 next->name, &next->base, &next_end);
546 }
547 }
548}
549
550/**
551 * fdt_init_reserved_mem_node() - Initialize a reserved memory region
552 * @rmem: reserved_mem struct of the memory region to be initialized.
553 *
554 * This function is used to call the region specific initialization
555 * function for a reserved memory region.
556 */
557static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
558{
559 unsigned long node = rmem->fdt_node;
560 int err = 0;
561 bool nomap;
562
563 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
564
565 err = __reserved_mem_init_node(rmem);
566 if (err != 0 && err != -ENOENT) {
567 pr_info("node %s compatible matching fail\n", rmem->name);
568 if (nomap)
569 memblock_clear_nomap(rmem->base, rmem->size);
570 else
571 memblock_phys_free(rmem->base, rmem->size);
572 } else {
573 phys_addr_t end = rmem->base + rmem->size - 1;
574 bool reusable =
575 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
576
577 pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
578 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
579 nomap ? "nomap" : "map",
580 reusable ? "reusable" : "non-reusable",
581 rmem->name ? rmem->name : "unknown");
582 }
583}
584
585struct rmem_assigned_device {
586 struct device *dev;
587 struct reserved_mem *rmem;
588 struct list_head list;
589};
590
591static LIST_HEAD(of_rmem_assigned_device_list);
592static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
593
594/**
595 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
596 * given device
597 * @dev: Pointer to the device to configure
598 * @np: Pointer to the device_node with 'reserved-memory' property
599 * @idx: Index of selected region
600 *
601 * This function assigns respective DMA-mapping operations based on reserved
602 * memory region specified by 'memory-region' property in @np node to the @dev
603 * device. When driver needs to use more than one reserved memory region, it
604 * should allocate child devices and initialize regions by name for each of
605 * child device.
606 *
607 * Returns error code or zero on success.
608 */
609int of_reserved_mem_device_init_by_idx(struct device *dev,
610 struct device_node *np, int idx)
611{
612 struct rmem_assigned_device *rd;
613 struct device_node *target;
614 struct reserved_mem *rmem;
615 int ret;
616
617 if (!np || !dev)
618 return -EINVAL;
619
620 target = of_parse_phandle(np, "memory-region", idx);
621 if (!target)
622 return -ENODEV;
623
624 if (!of_device_is_available(target)) {
625 of_node_put(target);
626 return 0;
627 }
628
629 rmem = of_reserved_mem_lookup(target);
630 of_node_put(target);
631
632 if (!rmem || !rmem->ops || !rmem->ops->device_init)
633 return -EINVAL;
634
635 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
636 if (!rd)
637 return -ENOMEM;
638
639 ret = rmem->ops->device_init(rmem, dev);
640 if (ret == 0) {
641 rd->dev = dev;
642 rd->rmem = rmem;
643
644 mutex_lock(&of_rmem_assigned_device_mutex);
645 list_add(&rd->list, &of_rmem_assigned_device_list);
646 mutex_unlock(&of_rmem_assigned_device_mutex);
647
648 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
649 } else {
650 kfree(rd);
651 }
652
653 return ret;
654}
655EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
656
657/**
658 * of_reserved_mem_device_init_by_name() - assign named reserved memory region
659 * to given device
660 * @dev: pointer to the device to configure
661 * @np: pointer to the device node with 'memory-region' property
662 * @name: name of the selected memory region
663 *
664 * Returns: 0 on success or a negative error-code on failure.
665 */
666int of_reserved_mem_device_init_by_name(struct device *dev,
667 struct device_node *np,
668 const char *name)
669{
670 int idx = of_property_match_string(np, "memory-region-names", name);
671
672 return of_reserved_mem_device_init_by_idx(dev, np, idx);
673}
674EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
675
676/**
677 * of_reserved_mem_device_release() - release reserved memory device structures
678 * @dev: Pointer to the device to deconfigure
679 *
680 * This function releases structures allocated for memory region handling for
681 * the given device.
682 */
683void of_reserved_mem_device_release(struct device *dev)
684{
685 struct rmem_assigned_device *rd, *tmp;
686 LIST_HEAD(release_list);
687
688 mutex_lock(&of_rmem_assigned_device_mutex);
689 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
690 if (rd->dev == dev)
691 list_move_tail(&rd->list, &release_list);
692 }
693 mutex_unlock(&of_rmem_assigned_device_mutex);
694
695 list_for_each_entry_safe(rd, tmp, &release_list, list) {
696 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
697 rd->rmem->ops->device_release(rd->rmem, dev);
698
699 kfree(rd);
700 }
701}
702EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
703
704/**
705 * of_reserved_mem_lookup() - acquire reserved_mem from a device node
706 * @np: node pointer of the desired reserved-memory region
707 *
708 * This function allows drivers to acquire a reference to the reserved_mem
709 * struct based on a device node handle.
710 *
711 * Returns a reserved_mem reference, or NULL on error.
712 */
713struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
714{
715 const char *name;
716 int i;
717
718 if (!np->full_name)
719 return NULL;
720
721 name = kbasename(np->full_name);
722 for (i = 0; i < reserved_mem_count; i++)
723 if (!strcmp(reserved_mem[i].name, name))
724 return &reserved_mem[i];
725
726 return NULL;
727}
728EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
729
730/**
731 * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource
732 * @np: node containing 'memory-region' property
733 * @idx: index of 'memory-region' property to lookup
734 * @res: Pointer to a struct resource to fill in with reserved region
735 *
736 * This function allows drivers to lookup a node's 'memory-region' property
737 * entries by index and return a struct resource for the entry.
738 *
739 * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region'
740 * is missing or unavailable, -EINVAL for any other error.
741 */
742int of_reserved_mem_region_to_resource(const struct device_node *np,
743 unsigned int idx, struct resource *res)
744{
745 struct reserved_mem *rmem;
746
747 if (!np)
748 return -EINVAL;
749
750 struct device_node __free(device_node) *target = of_parse_phandle(np, "memory-region", idx);
751 if (!target || !of_device_is_available(target))
752 return -ENODEV;
753
754 rmem = of_reserved_mem_lookup(target);
755 if (!rmem)
756 return -EINVAL;
757
758 resource_set_range(res, rmem->base, rmem->size);
759 res->flags = IORESOURCE_MEM;
760 res->name = rmem->name;
761 return 0;
762}
763EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource);
764
765/**
766 * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource
767 * @np: node containing 'memory-region' property
768 * @name: name of 'memory-region' property entry to lookup
769 * @res: Pointer to a struct resource to fill in with reserved region
770 *
771 * This function allows drivers to lookup a node's 'memory-region' property
772 * entries by name and return a struct resource for the entry.
773 *
774 * Returns 0 on success with @res filled in, or a negative error-code on
775 * failure.
776 */
777int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
778 const char *name,
779 struct resource *res)
780{
781 int idx;
782
783 if (!name)
784 return -EINVAL;
785
786 idx = of_property_match_string(np, "memory-region-names", name);
787 if (idx < 0)
788 return idx;
789
790 return of_reserved_mem_region_to_resource(np, idx, res);
791}
792EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname);
793
794/**
795 * of_reserved_mem_region_count() - Return the number of 'memory-region' entries
796 * @np: node containing 'memory-region' property
797 *
798 * This function allows drivers to retrieve the number of entries for a node's
799 * 'memory-region' property.
800 *
801 * Returns the number of entries on success, or negative error code on a
802 * malformed property.
803 */
804int of_reserved_mem_region_count(const struct device_node *np)
805{
806 return of_count_phandle_with_args(np, "memory-region", NULL);
807}
808EXPORT_SYMBOL_GPL(of_reserved_mem_region_count);