Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

misc: sram: implement reserved sram areas

This implements support for defining reserved areas as subnodes,
to keep the genpool from using these.

Suggested-by: Rob Herring <robherring2@gmail.com>
Signed-off-by: Heiko Stuebner <heiko@sntech.de>
Tested-by: Ulrich Prinz <ulrich.prinz@googlemail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Heiko Stübner and committed by
Greg Kroah-Hartman
2da19688 96328cda

+118 -7
+118 -7
drivers/misc/sram.c
··· 24 24 #include <linux/err.h> 25 25 #include <linux/io.h> 26 26 #include <linux/of.h> 27 + #include <linux/of_address.h> 28 + #include <linux/list.h> 29 + #include <linux/list_sort.h> 27 30 #include <linux/platform_device.h> 28 31 #include <linux/slab.h> 29 32 #include <linux/spinlock.h> ··· 39 36 struct clk *clk; 40 37 }; 41 38 39 + struct sram_reserve { 40 + struct list_head list; 41 + u32 start; 42 + u32 size; 43 + }; 44 + 45 + static int sram_reserve_cmp(void *priv, struct list_head *a, 46 + struct list_head *b) 47 + { 48 + struct sram_reserve *ra = list_entry(a, struct sram_reserve, list); 49 + struct sram_reserve *rb = list_entry(b, struct sram_reserve, list); 50 + 51 + return ra->start - rb->start; 52 + } 53 + 42 54 static int sram_probe(struct platform_device *pdev) 43 55 { 44 56 void __iomem *virt_base; 45 57 struct sram_dev *sram; 46 58 struct resource *res; 47 - unsigned long size; 59 + struct device_node *np = pdev->dev.of_node, *child; 60 + unsigned long size, cur_start, cur_size; 61 + struct sram_reserve *rblocks, *block; 62 + struct list_head reserve_list; 63 + unsigned int nblocks; 48 64 int ret; 65 + 66 + INIT_LIST_HEAD(&reserve_list); 49 67 50 68 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 51 69 virt_base = devm_ioremap_resource(&pdev->dev, res); ··· 89 65 if (!sram->pool) 90 66 return -ENOMEM; 91 67 92 - ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, 93 - res->start, size, -1); 94 - if (ret < 0) { 95 - if (sram->clk) 96 - clk_disable_unprepare(sram->clk); 97 - return ret; 68 + /* 69 + * We need an additional block to mark the end of the memory region 70 + * after the reserved blocks from the dt are processed. 71 + */ 72 + nblocks = (np) ? of_get_available_child_count(np) + 1 : 1; 73 + rblocks = kmalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL); 74 + if (!rblocks) { 75 + ret = -ENOMEM; 76 + goto err_alloc; 98 77 } 78 + 79 + block = &rblocks[0]; 80 + for_each_available_child_of_node(np, child) { 81 + struct resource child_res; 82 + 83 + ret = of_address_to_resource(child, 0, &child_res); 84 + if (ret < 0) { 85 + dev_err(&pdev->dev, 86 + "could not get address for node %s\n", 87 + child->full_name); 88 + goto err_chunks; 89 + } 90 + 91 + if (child_res.start < res->start || child_res.end > res->end) { 92 + dev_err(&pdev->dev, 93 + "reserved block %s outside the sram area\n", 94 + child->full_name); 95 + ret = -EINVAL; 96 + goto err_chunks; 97 + } 98 + 99 + block->start = child_res.start - res->start; 100 + block->size = resource_size(&child_res); 101 + list_add_tail(&block->list, &reserve_list); 102 + 103 + dev_dbg(&pdev->dev, "found reserved block 0x%x-0x%x\n", 104 + block->start, 105 + block->start + block->size); 106 + 107 + block++; 108 + } 109 + 110 + /* the last chunk marks the end of the region */ 111 + rblocks[nblocks - 1].start = size; 112 + rblocks[nblocks - 1].size = 0; 113 + list_add_tail(&rblocks[nblocks - 1].list, &reserve_list); 114 + 115 + list_sort(NULL, &reserve_list, sram_reserve_cmp); 116 + 117 + cur_start = 0; 118 + 119 + list_for_each_entry(block, &reserve_list, list) { 120 + /* can only happen if sections overlap */ 121 + if (block->start < cur_start) { 122 + dev_err(&pdev->dev, 123 + "block at 0x%x starts after current offset 0x%lx\n", 124 + block->start, cur_start); 125 + ret = -EINVAL; 126 + goto err_chunks; 127 + } 128 + 129 + /* current start is in a reserved block, so continue after it */ 130 + if (block->start == cur_start) { 131 + cur_start = block->start + block->size; 132 + continue; 133 + } 134 + 135 + /* 136 + * allocate the space between the current starting 137 + * address and the following reserved block, or the 138 + * end of the region. 139 + */ 140 + cur_size = block->start - cur_start; 141 + 142 + dev_dbg(&pdev->dev, "adding chunk 0x%lx-0x%lx\n", 143 + cur_start, cur_start + cur_size); 144 + ret = gen_pool_add_virt(sram->pool, 145 + (unsigned long)virt_base + cur_start, 146 + res->start + cur_start, cur_size, -1); 147 + if (ret < 0) 148 + goto err_chunks; 149 + 150 + /* next allocation after this reserved block */ 151 + cur_start = block->start + block->size; 152 + } 153 + 154 + kfree(rblocks); 99 155 100 156 platform_set_drvdata(pdev, sram); 101 157 102 158 dev_dbg(&pdev->dev, "SRAM pool: %ld KiB @ 0x%p\n", size / 1024, virt_base); 103 159 104 160 return 0; 161 + 162 + err_chunks: 163 + kfree(rblocks); 164 + err_alloc: 165 + if (sram->clk) 166 + clk_disable_unprepare(sram->clk); 167 + return ret; 105 168 } 106 169 107 170 static int sram_remove(struct platform_device *pdev)