Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.7-rc6 514 lines 11 kB view raw
1/* 2 * VRAM manager for OMAP 3 * 4 * Copyright (C) 2009 Nokia Corporation 5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License along 17 * with this program; if not, write to the Free Software Foundation, Inc., 18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21/*#define DEBUG*/ 22 23#include <linux/kernel.h> 24#include <linux/mm.h> 25#include <linux/list.h> 26#include <linux/slab.h> 27#include <linux/seq_file.h> 28#include <linux/memblock.h> 29#include <linux/completion.h> 30#include <linux/debugfs.h> 31#include <linux/jiffies.h> 32#include <linux/module.h> 33 34#include <asm/setup.h> 35 36#include <plat/vram.h> 37 38#ifdef DEBUG 39#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__) 40#else 41#define DBG(format, ...) 42#endif 43 44/* postponed regions are used to temporarily store region information at boot 45 * time when we cannot yet allocate the region list */ 46#define MAX_POSTPONED_REGIONS 10 47 48static bool vram_initialized; 49static int postponed_cnt; 50static struct { 51 unsigned long paddr; 52 size_t size; 53} postponed_regions[MAX_POSTPONED_REGIONS]; 54 55struct vram_alloc { 56 struct list_head list; 57 unsigned long paddr; 58 unsigned pages; 59}; 60 61struct vram_region { 62 struct list_head list; 63 struct list_head alloc_list; 64 unsigned long paddr; 65 unsigned pages; 66}; 67 68static DEFINE_MUTEX(region_mutex); 69static LIST_HEAD(region_list); 70 71static struct vram_region *omap_vram_create_region(unsigned long paddr, 72 unsigned pages) 73{ 74 struct vram_region *rm; 75 76 rm = kzalloc(sizeof(*rm), GFP_KERNEL); 77 78 if (rm) { 79 INIT_LIST_HEAD(&rm->alloc_list); 80 rm->paddr = paddr; 81 rm->pages = pages; 82 } 83 84 return rm; 85} 86 87#if 0 88static void omap_vram_free_region(struct vram_region *vr) 89{ 90 list_del(&vr->list); 91 kfree(vr); 92} 93#endif 94 95static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr, 96 unsigned long paddr, unsigned pages) 97{ 98 struct vram_alloc *va; 99 struct vram_alloc *new; 100 101 new = kzalloc(sizeof(*va), GFP_KERNEL); 102 103 if (!new) 104 return NULL; 105 106 new->paddr = paddr; 107 new->pages = pages; 108 109 list_for_each_entry(va, &vr->alloc_list, list) { 110 if (va->paddr > new->paddr) 111 break; 112 } 113 114 list_add_tail(&new->list, &va->list); 115 116 return new; 117} 118 119static void omap_vram_free_allocation(struct vram_alloc *va) 120{ 121 list_del(&va->list); 122 kfree(va); 123} 124 125int omap_vram_add_region(unsigned long paddr, size_t size) 126{ 127 struct vram_region *rm; 128 unsigned pages; 129 130 if (vram_initialized) { 131 DBG("adding region paddr %08lx size %d\n", 132 paddr, size); 133 134 size &= PAGE_MASK; 135 pages = size >> PAGE_SHIFT; 136 137 rm = omap_vram_create_region(paddr, pages); 138 if (rm == NULL) 139 return -ENOMEM; 140 141 list_add(&rm->list, &region_list); 142 } else { 143 if (postponed_cnt == MAX_POSTPONED_REGIONS) 144 return -ENOMEM; 145 146 postponed_regions[postponed_cnt].paddr = paddr; 147 postponed_regions[postponed_cnt].size = size; 148 149 ++postponed_cnt; 150 } 151 return 0; 152} 153 154int omap_vram_free(unsigned long paddr, size_t size) 155{ 156 struct vram_region *rm; 157 struct vram_alloc *alloc; 158 unsigned start, end; 159 160 DBG("free mem paddr %08lx size %d\n", paddr, size); 161 162 size = PAGE_ALIGN(size); 163 164 mutex_lock(&region_mutex); 165 166 list_for_each_entry(rm, &region_list, list) { 167 list_for_each_entry(alloc, &rm->alloc_list, list) { 168 start = alloc->paddr; 169 end = alloc->paddr + (alloc->pages >> PAGE_SHIFT); 170 171 if (start >= paddr && end < paddr + size) 172 goto found; 173 } 174 } 175 176 mutex_unlock(&region_mutex); 177 return -EINVAL; 178 179found: 180 omap_vram_free_allocation(alloc); 181 182 mutex_unlock(&region_mutex); 183 return 0; 184} 185EXPORT_SYMBOL(omap_vram_free); 186 187static int _omap_vram_reserve(unsigned long paddr, unsigned pages) 188{ 189 struct vram_region *rm; 190 struct vram_alloc *alloc; 191 size_t size; 192 193 size = pages << PAGE_SHIFT; 194 195 list_for_each_entry(rm, &region_list, list) { 196 unsigned long start, end; 197 198 DBG("checking region %lx %d\n", rm->paddr, rm->pages); 199 200 start = rm->paddr; 201 end = start + (rm->pages << PAGE_SHIFT) - 1; 202 if (start > paddr || end < paddr + size - 1) 203 continue; 204 205 DBG("block ok, checking allocs\n"); 206 207 list_for_each_entry(alloc, &rm->alloc_list, list) { 208 end = alloc->paddr - 1; 209 210 if (start <= paddr && end >= paddr + size - 1) 211 goto found; 212 213 start = alloc->paddr + (alloc->pages << PAGE_SHIFT); 214 } 215 216 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1; 217 218 if (!(start <= paddr && end >= paddr + size - 1)) 219 continue; 220found: 221 DBG("found area start %lx, end %lx\n", start, end); 222 223 if (omap_vram_create_allocation(rm, paddr, pages) == NULL) 224 return -ENOMEM; 225 226 return 0; 227 } 228 229 return -ENOMEM; 230} 231 232int omap_vram_reserve(unsigned long paddr, size_t size) 233{ 234 unsigned pages; 235 int r; 236 237 DBG("reserve mem paddr %08lx size %d\n", paddr, size); 238 239 size = PAGE_ALIGN(size); 240 pages = size >> PAGE_SHIFT; 241 242 mutex_lock(&region_mutex); 243 244 r = _omap_vram_reserve(paddr, pages); 245 246 mutex_unlock(&region_mutex); 247 248 return r; 249} 250EXPORT_SYMBOL(omap_vram_reserve); 251 252static int _omap_vram_alloc(unsigned pages, unsigned long *paddr) 253{ 254 struct vram_region *rm; 255 struct vram_alloc *alloc; 256 257 list_for_each_entry(rm, &region_list, list) { 258 unsigned long start, end; 259 260 DBG("checking region %lx %d\n", rm->paddr, rm->pages); 261 262 start = rm->paddr; 263 264 list_for_each_entry(alloc, &rm->alloc_list, list) { 265 end = alloc->paddr; 266 267 if (end - start >= pages << PAGE_SHIFT) 268 goto found; 269 270 start = alloc->paddr + (alloc->pages << PAGE_SHIFT); 271 } 272 273 end = rm->paddr + (rm->pages << PAGE_SHIFT); 274found: 275 if (end - start < pages << PAGE_SHIFT) 276 continue; 277 278 DBG("found %lx, end %lx\n", start, end); 279 280 alloc = omap_vram_create_allocation(rm, start, pages); 281 if (alloc == NULL) 282 return -ENOMEM; 283 284 *paddr = start; 285 286 return 0; 287 } 288 289 return -ENOMEM; 290} 291 292int omap_vram_alloc(size_t size, unsigned long *paddr) 293{ 294 unsigned pages; 295 int r; 296 297 BUG_ON(!size); 298 299 DBG("alloc mem size %d\n", size); 300 301 size = PAGE_ALIGN(size); 302 pages = size >> PAGE_SHIFT; 303 304 mutex_lock(&region_mutex); 305 306 r = _omap_vram_alloc(pages, paddr); 307 308 mutex_unlock(&region_mutex); 309 310 return r; 311} 312EXPORT_SYMBOL(omap_vram_alloc); 313 314void omap_vram_get_info(unsigned long *vram, 315 unsigned long *free_vram, 316 unsigned long *largest_free_block) 317{ 318 struct vram_region *vr; 319 struct vram_alloc *va; 320 321 *vram = 0; 322 *free_vram = 0; 323 *largest_free_block = 0; 324 325 mutex_lock(&region_mutex); 326 327 list_for_each_entry(vr, &region_list, list) { 328 unsigned free; 329 unsigned long pa; 330 331 pa = vr->paddr; 332 *vram += vr->pages << PAGE_SHIFT; 333 334 list_for_each_entry(va, &vr->alloc_list, list) { 335 free = va->paddr - pa; 336 *free_vram += free; 337 if (free > *largest_free_block) 338 *largest_free_block = free; 339 pa = va->paddr + (va->pages << PAGE_SHIFT); 340 } 341 342 free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa; 343 *free_vram += free; 344 if (free > *largest_free_block) 345 *largest_free_block = free; 346 } 347 348 mutex_unlock(&region_mutex); 349} 350EXPORT_SYMBOL(omap_vram_get_info); 351 352#if defined(CONFIG_DEBUG_FS) 353static int vram_debug_show(struct seq_file *s, void *unused) 354{ 355 struct vram_region *vr; 356 struct vram_alloc *va; 357 unsigned size; 358 359 mutex_lock(&region_mutex); 360 361 list_for_each_entry(vr, &region_list, list) { 362 size = vr->pages << PAGE_SHIFT; 363 seq_printf(s, "%08lx-%08lx (%d bytes)\n", 364 vr->paddr, vr->paddr + size - 1, 365 size); 366 367 list_for_each_entry(va, &vr->alloc_list, list) { 368 size = va->pages << PAGE_SHIFT; 369 seq_printf(s, " %08lx-%08lx (%d bytes)\n", 370 va->paddr, va->paddr + size - 1, 371 size); 372 } 373 } 374 375 mutex_unlock(&region_mutex); 376 377 return 0; 378} 379 380static int vram_debug_open(struct inode *inode, struct file *file) 381{ 382 return single_open(file, vram_debug_show, inode->i_private); 383} 384 385static const struct file_operations vram_debug_fops = { 386 .open = vram_debug_open, 387 .read = seq_read, 388 .llseek = seq_lseek, 389 .release = single_release, 390}; 391 392static int __init omap_vram_create_debugfs(void) 393{ 394 struct dentry *d; 395 396 d = debugfs_create_file("vram", S_IRUGO, NULL, 397 NULL, &vram_debug_fops); 398 if (IS_ERR(d)) 399 return PTR_ERR(d); 400 401 return 0; 402} 403#endif 404 405static __init int omap_vram_init(void) 406{ 407 int i; 408 409 vram_initialized = 1; 410 411 for (i = 0; i < postponed_cnt; i++) 412 omap_vram_add_region(postponed_regions[i].paddr, 413 postponed_regions[i].size); 414 415#ifdef CONFIG_DEBUG_FS 416 if (omap_vram_create_debugfs()) 417 pr_err("VRAM: Failed to create debugfs file\n"); 418#endif 419 420 return 0; 421} 422 423arch_initcall(omap_vram_init); 424 425/* boottime vram alloc stuff */ 426 427/* set from board file */ 428static u32 omap_vram_sdram_start __initdata; 429static u32 omap_vram_sdram_size __initdata; 430 431/* set from kernel cmdline */ 432static u32 omap_vram_def_sdram_size __initdata; 433static u32 omap_vram_def_sdram_start __initdata; 434 435static int __init omap_vram_early_vram(char *p) 436{ 437 omap_vram_def_sdram_size = memparse(p, &p); 438 if (*p == ',') 439 omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16); 440 return 0; 441} 442early_param("vram", omap_vram_early_vram); 443 444/* 445 * Called from map_io. We need to call to this early enough so that we 446 * can reserve the fixed SDRAM regions before VM could get hold of them. 447 */ 448void __init omap_vram_reserve_sdram_memblock(void) 449{ 450 u32 paddr; 451 u32 size = 0; 452 453 /* cmdline arg overrides the board file definition */ 454 if (omap_vram_def_sdram_size) { 455 size = omap_vram_def_sdram_size; 456 paddr = omap_vram_def_sdram_start; 457 } 458 459 if (!size) { 460 size = omap_vram_sdram_size; 461 paddr = omap_vram_sdram_start; 462 } 463 464#ifdef CONFIG_OMAP2_VRAM_SIZE 465 if (!size) { 466 size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024; 467 paddr = 0; 468 } 469#endif 470 471 if (!size) 472 return; 473 474 size = ALIGN(size, SZ_2M); 475 476 if (paddr) { 477 if (paddr & ~PAGE_MASK) { 478 pr_err("VRAM start address 0x%08x not page aligned\n", 479 paddr); 480 return; 481 } 482 483 if (!memblock_is_region_memory(paddr, size)) { 484 pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n", 485 paddr, paddr + size - 1); 486 return; 487 } 488 489 if (memblock_is_region_reserved(paddr, size)) { 490 pr_err("FB: failed to reserve VRAM - busy\n"); 491 return; 492 } 493 494 if (memblock_reserve(paddr, size) < 0) { 495 pr_err("FB: failed to reserve VRAM - no memory\n"); 496 return; 497 } 498 } else { 499 paddr = memblock_alloc(size, SZ_2M); 500 } 501 502 memblock_free(paddr, size); 503 memblock_remove(paddr, size); 504 505 omap_vram_add_region(paddr, size); 506 507 pr_info("Reserving %u bytes SDRAM for VRAM\n", size); 508} 509 510void __init omap_vram_set_sdram_vram(u32 size, u32 start) 511{ 512 omap_vram_sdram_start = start; 513 omap_vram_sdram_size = size; 514}