Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.15 899 lines 21 kB view raw
1/* 2 * SRAM allocator for Blackfin on-chip memory 3 * 4 * Copyright 2004-2009 Analog Devices Inc. 5 * 6 * Licensed under the GPL-2 or later. 7 */ 8 9#include <linux/module.h> 10#include <linux/kernel.h> 11#include <linux/types.h> 12#include <linux/miscdevice.h> 13#include <linux/ioport.h> 14#include <linux/fcntl.h> 15#include <linux/init.h> 16#include <linux/poll.h> 17#include <linux/proc_fs.h> 18#include <linux/seq_file.h> 19#include <linux/spinlock.h> 20#include <linux/rtc.h> 21#include <linux/slab.h> 22#include <linux/mm_types.h> 23 24#include <asm/blackfin.h> 25#include <asm/mem_map.h> 26#include "blackfin_sram.h" 27 28/* the data structure for L1 scratchpad and DATA SRAM */ 29struct sram_piece { 30 void *paddr; 31 int size; 32 pid_t pid; 33 struct sram_piece *next; 34}; 35 36static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock); 37static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head); 38static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head); 39 40#if L1_DATA_A_LENGTH != 0 41static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head); 42static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head); 43#endif 44 45#if L1_DATA_B_LENGTH != 0 46static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head); 47static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head); 48#endif 49 50#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH 51static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock); 52#endif 53 54#if L1_CODE_LENGTH != 0 55static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock); 56static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head); 57static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head); 58#endif 59 60#if L2_LENGTH != 0 61static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp; 62static struct sram_piece free_l2_sram_head, used_l2_sram_head; 63#endif 64 65static struct kmem_cache *sram_piece_cache; 66 67/* L1 Scratchpad SRAM initialization function */ 68static void __init l1sram_init(void) 69{ 70 unsigned int cpu; 71 unsigned long reserve; 72 73#ifdef CONFIG_SMP 74 reserve = 0; 75#else 76 reserve = sizeof(struct l1_scratch_task_info); 77#endif 78 79 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 80 per_cpu(free_l1_ssram_head, cpu).next = 81 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 82 if (!per_cpu(free_l1_ssram_head, cpu).next) { 83 printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n"); 84 return; 85 } 86 87 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; 88 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; 89 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; 90 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; 91 92 per_cpu(used_l1_ssram_head, cpu).next = NULL; 93 94 /* mutex initialize */ 95 spin_lock_init(&per_cpu(l1sram_lock, cpu)); 96 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n", 97 L1_SCRATCH_LENGTH >> 10); 98 } 99} 100 101static void __init l1_data_sram_init(void) 102{ 103#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0 104 unsigned int cpu; 105#endif 106#if L1_DATA_A_LENGTH != 0 107 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 108 per_cpu(free_l1_data_A_sram_head, cpu).next = 109 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 110 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) { 111 printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n"); 112 return; 113 } 114 115 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr = 116 (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1); 117 per_cpu(free_l1_data_A_sram_head, cpu).next->size = 118 L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1); 119 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0; 120 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL; 121 122 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL; 123 124 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n", 125 L1_DATA_A_LENGTH >> 10, 126 per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10); 127 } 128#endif 129#if L1_DATA_B_LENGTH != 0 130 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 131 per_cpu(free_l1_data_B_sram_head, cpu).next = 132 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 133 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) { 134 printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n"); 135 return; 136 } 137 138 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr = 139 (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1); 140 per_cpu(free_l1_data_B_sram_head, cpu).next->size = 141 L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1); 142 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0; 143 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL; 144 145 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL; 146 147 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n", 148 L1_DATA_B_LENGTH >> 10, 149 per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10); 150 /* mutex initialize */ 151 } 152#endif 153 154#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0 155 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) 156 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu)); 157#endif 158} 159 160static void __init l1_inst_sram_init(void) 161{ 162#if L1_CODE_LENGTH != 0 163 unsigned int cpu; 164 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 165 per_cpu(free_l1_inst_sram_head, cpu).next = 166 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 167 if (!per_cpu(free_l1_inst_sram_head, cpu).next) { 168 printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n"); 169 return; 170 } 171 172 per_cpu(free_l1_inst_sram_head, cpu).next->paddr = 173 (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1); 174 per_cpu(free_l1_inst_sram_head, cpu).next->size = 175 L1_CODE_LENGTH - (_etext_l1 - _stext_l1); 176 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0; 177 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL; 178 179 per_cpu(used_l1_inst_sram_head, cpu).next = NULL; 180 181 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n", 182 L1_CODE_LENGTH >> 10, 183 per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10); 184 185 /* mutex initialize */ 186 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu)); 187 } 188#endif 189} 190 191#ifdef __ADSPBF60x__ 192static irqreturn_t l2_ecc_err(int irq, void *dev_id) 193{ 194 int status; 195 196 printk(KERN_ERR "L2 ecc error happened\n"); 197 status = bfin_read32(L2CTL0_STAT); 198 if (status & 0x1) 199 printk(KERN_ERR "Core channel error type:0x%x, addr:0x%x\n", 200 bfin_read32(L2CTL0_ET0), bfin_read32(L2CTL0_EADDR0)); 201 if (status & 0x2) 202 printk(KERN_ERR "System channel error type:0x%x, addr:0x%x\n", 203 bfin_read32(L2CTL0_ET1), bfin_read32(L2CTL0_EADDR1)); 204 205 status = status >> 8; 206 if (status) 207 printk(KERN_ERR "L2 Bank%d error, addr:0x%x\n", 208 status, bfin_read32(L2CTL0_ERRADDR0 + status)); 209 210 panic("L2 Ecc error"); 211 return IRQ_HANDLED; 212} 213#endif 214 215static void __init l2_sram_init(void) 216{ 217#if L2_LENGTH != 0 218 219#ifdef __ADSPBF60x__ 220 int ret; 221 222 ret = request_irq(IRQ_L2CTL0_ECC_ERR, l2_ecc_err, 0, "l2-ecc-err", 223 NULL); 224 if (unlikely(ret < 0)) { 225 printk(KERN_INFO "Fail to request l2 ecc error interrupt"); 226 return; 227 } 228#endif 229 230 free_l2_sram_head.next = 231 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 232 if (!free_l2_sram_head.next) { 233 printk(KERN_INFO "Fail to initialize L2 SRAM.\n"); 234 return; 235 } 236 237 free_l2_sram_head.next->paddr = 238 (void *)L2_START + (_ebss_l2 - _stext_l2); 239 free_l2_sram_head.next->size = 240 L2_LENGTH - (_ebss_l2 - _stext_l2); 241 free_l2_sram_head.next->pid = 0; 242 free_l2_sram_head.next->next = NULL; 243 244 used_l2_sram_head.next = NULL; 245 246 printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n", 247 L2_LENGTH >> 10, 248 free_l2_sram_head.next->size >> 10); 249 250 /* mutex initialize */ 251 spin_lock_init(&l2_sram_lock); 252#endif 253} 254 255static int __init bfin_sram_init(void) 256{ 257 sram_piece_cache = kmem_cache_create("sram_piece_cache", 258 sizeof(struct sram_piece), 259 0, SLAB_PANIC, NULL); 260 261 l1sram_init(); 262 l1_data_sram_init(); 263 l1_inst_sram_init(); 264 l2_sram_init(); 265 266 return 0; 267} 268pure_initcall(bfin_sram_init); 269 270/* SRAM allocate function */ 271static void *_sram_alloc(size_t size, struct sram_piece *pfree_head, 272 struct sram_piece *pused_head) 273{ 274 struct sram_piece *pslot, *plast, *pavail; 275 276 if (size <= 0 || !pfree_head || !pused_head) 277 return NULL; 278 279 /* Align the size */ 280 size = (size + 3) & ~3; 281 282 pslot = pfree_head->next; 283 plast = pfree_head; 284 285 /* search an available piece slot */ 286 while (pslot != NULL && size > pslot->size) { 287 plast = pslot; 288 pslot = pslot->next; 289 } 290 291 if (!pslot) 292 return NULL; 293 294 if (pslot->size == size) { 295 plast->next = pslot->next; 296 pavail = pslot; 297 } else { 298 /* use atomic so our L1 allocator can be used atomically */ 299 pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC); 300 301 if (!pavail) 302 return NULL; 303 304 pavail->paddr = pslot->paddr; 305 pavail->size = size; 306 pslot->paddr += size; 307 pslot->size -= size; 308 } 309 310 pavail->pid = current->pid; 311 312 pslot = pused_head->next; 313 plast = pused_head; 314 315 /* insert new piece into used piece list !!! */ 316 while (pslot != NULL && pavail->paddr < pslot->paddr) { 317 plast = pslot; 318 pslot = pslot->next; 319 } 320 321 pavail->next = pslot; 322 plast->next = pavail; 323 324 return pavail->paddr; 325} 326 327/* Allocate the largest available block. */ 328static void *_sram_alloc_max(struct sram_piece *pfree_head, 329 struct sram_piece *pused_head, 330 unsigned long *psize) 331{ 332 struct sram_piece *pslot, *pmax; 333 334 if (!pfree_head || !pused_head) 335 return NULL; 336 337 pmax = pslot = pfree_head->next; 338 339 /* search an available piece slot */ 340 while (pslot != NULL) { 341 if (pslot->size > pmax->size) 342 pmax = pslot; 343 pslot = pslot->next; 344 } 345 346 if (!pmax) 347 return NULL; 348 349 *psize = pmax->size; 350 351 return _sram_alloc(*psize, pfree_head, pused_head); 352} 353 354/* SRAM free function */ 355static int _sram_free(const void *addr, 356 struct sram_piece *pfree_head, 357 struct sram_piece *pused_head) 358{ 359 struct sram_piece *pslot, *plast, *pavail; 360 361 if (!pfree_head || !pused_head) 362 return -1; 363 364 /* search the relevant memory slot */ 365 pslot = pused_head->next; 366 plast = pused_head; 367 368 /* search an available piece slot */ 369 while (pslot != NULL && pslot->paddr != addr) { 370 plast = pslot; 371 pslot = pslot->next; 372 } 373 374 if (!pslot) 375 return -1; 376 377 plast->next = pslot->next; 378 pavail = pslot; 379 pavail->pid = 0; 380 381 /* insert free pieces back to the free list */ 382 pslot = pfree_head->next; 383 plast = pfree_head; 384 385 while (pslot != NULL && addr > pslot->paddr) { 386 plast = pslot; 387 pslot = pslot->next; 388 } 389 390 if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) { 391 plast->size += pavail->size; 392 kmem_cache_free(sram_piece_cache, pavail); 393 } else { 394 pavail->next = plast->next; 395 plast->next = pavail; 396 plast = pavail; 397 } 398 399 if (pslot && plast->paddr + plast->size == pslot->paddr) { 400 plast->size += pslot->size; 401 plast->next = pslot->next; 402 kmem_cache_free(sram_piece_cache, pslot); 403 } 404 405 return 0; 406} 407 408int sram_free(const void *addr) 409{ 410 411#if L1_CODE_LENGTH != 0 412 if (addr >= (void *)get_l1_code_start() 413 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH)) 414 return l1_inst_sram_free(addr); 415 else 416#endif 417#if L1_DATA_A_LENGTH != 0 418 if (addr >= (void *)get_l1_data_a_start() 419 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH)) 420 return l1_data_A_sram_free(addr); 421 else 422#endif 423#if L1_DATA_B_LENGTH != 0 424 if (addr >= (void *)get_l1_data_b_start() 425 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH)) 426 return l1_data_B_sram_free(addr); 427 else 428#endif 429#if L2_LENGTH != 0 430 if (addr >= (void *)L2_START 431 && addr < (void *)(L2_START + L2_LENGTH)) 432 return l2_sram_free(addr); 433 else 434#endif 435 return -1; 436} 437EXPORT_SYMBOL(sram_free); 438 439void *l1_data_A_sram_alloc(size_t size) 440{ 441#if L1_DATA_A_LENGTH != 0 442 unsigned long flags; 443 void *addr; 444 unsigned int cpu; 445 446 cpu = smp_processor_id(); 447 /* add mutex operation */ 448 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 449 450 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu), 451 &per_cpu(used_l1_data_A_sram_head, cpu)); 452 453 /* add mutex operation */ 454 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 455 456 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", 457 (long unsigned int)addr, size); 458 459 return addr; 460#else 461 return NULL; 462#endif 463} 464EXPORT_SYMBOL(l1_data_A_sram_alloc); 465 466int l1_data_A_sram_free(const void *addr) 467{ 468#if L1_DATA_A_LENGTH != 0 469 unsigned long flags; 470 int ret; 471 unsigned int cpu; 472 473 cpu = smp_processor_id(); 474 /* add mutex operation */ 475 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 476 477 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu), 478 &per_cpu(used_l1_data_A_sram_head, cpu)); 479 480 /* add mutex operation */ 481 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 482 483 return ret; 484#else 485 return -1; 486#endif 487} 488EXPORT_SYMBOL(l1_data_A_sram_free); 489 490void *l1_data_B_sram_alloc(size_t size) 491{ 492#if L1_DATA_B_LENGTH != 0 493 unsigned long flags; 494 void *addr; 495 unsigned int cpu; 496 497 cpu = smp_processor_id(); 498 /* add mutex operation */ 499 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 500 501 addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu), 502 &per_cpu(used_l1_data_B_sram_head, cpu)); 503 504 /* add mutex operation */ 505 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 506 507 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", 508 (long unsigned int)addr, size); 509 510 return addr; 511#else 512 return NULL; 513#endif 514} 515EXPORT_SYMBOL(l1_data_B_sram_alloc); 516 517int l1_data_B_sram_free(const void *addr) 518{ 519#if L1_DATA_B_LENGTH != 0 520 unsigned long flags; 521 int ret; 522 unsigned int cpu; 523 524 cpu = smp_processor_id(); 525 /* add mutex operation */ 526 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 527 528 ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu), 529 &per_cpu(used_l1_data_B_sram_head, cpu)); 530 531 /* add mutex operation */ 532 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 533 534 return ret; 535#else 536 return -1; 537#endif 538} 539EXPORT_SYMBOL(l1_data_B_sram_free); 540 541void *l1_data_sram_alloc(size_t size) 542{ 543 void *addr = l1_data_A_sram_alloc(size); 544 545 if (!addr) 546 addr = l1_data_B_sram_alloc(size); 547 548 return addr; 549} 550EXPORT_SYMBOL(l1_data_sram_alloc); 551 552void *l1_data_sram_zalloc(size_t size) 553{ 554 void *addr = l1_data_sram_alloc(size); 555 556 if (addr) 557 memset(addr, 0x00, size); 558 559 return addr; 560} 561EXPORT_SYMBOL(l1_data_sram_zalloc); 562 563int l1_data_sram_free(const void *addr) 564{ 565 int ret; 566 ret = l1_data_A_sram_free(addr); 567 if (ret == -1) 568 ret = l1_data_B_sram_free(addr); 569 return ret; 570} 571EXPORT_SYMBOL(l1_data_sram_free); 572 573void *l1_inst_sram_alloc(size_t size) 574{ 575#if L1_CODE_LENGTH != 0 576 unsigned long flags; 577 void *addr; 578 unsigned int cpu; 579 580 cpu = smp_processor_id(); 581 /* add mutex operation */ 582 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); 583 584 addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu), 585 &per_cpu(used_l1_inst_sram_head, cpu)); 586 587 /* add mutex operation */ 588 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); 589 590 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", 591 (long unsigned int)addr, size); 592 593 return addr; 594#else 595 return NULL; 596#endif 597} 598EXPORT_SYMBOL(l1_inst_sram_alloc); 599 600int l1_inst_sram_free(const void *addr) 601{ 602#if L1_CODE_LENGTH != 0 603 unsigned long flags; 604 int ret; 605 unsigned int cpu; 606 607 cpu = smp_processor_id(); 608 /* add mutex operation */ 609 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); 610 611 ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu), 612 &per_cpu(used_l1_inst_sram_head, cpu)); 613 614 /* add mutex operation */ 615 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); 616 617 return ret; 618#else 619 return -1; 620#endif 621} 622EXPORT_SYMBOL(l1_inst_sram_free); 623 624/* L1 Scratchpad memory allocate function */ 625void *l1sram_alloc(size_t size) 626{ 627 unsigned long flags; 628 void *addr; 629 unsigned int cpu; 630 631 cpu = smp_processor_id(); 632 /* add mutex operation */ 633 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 634 635 addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu), 636 &per_cpu(used_l1_ssram_head, cpu)); 637 638 /* add mutex operation */ 639 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 640 641 return addr; 642} 643 644/* L1 Scratchpad memory allocate function */ 645void *l1sram_alloc_max(size_t *psize) 646{ 647 unsigned long flags; 648 void *addr; 649 unsigned int cpu; 650 651 cpu = smp_processor_id(); 652 /* add mutex operation */ 653 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 654 655 addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu), 656 &per_cpu(used_l1_ssram_head, cpu), psize); 657 658 /* add mutex operation */ 659 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 660 661 return addr; 662} 663 664/* L1 Scratchpad memory free function */ 665int l1sram_free(const void *addr) 666{ 667 unsigned long flags; 668 int ret; 669 unsigned int cpu; 670 671 cpu = smp_processor_id(); 672 /* add mutex operation */ 673 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 674 675 ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu), 676 &per_cpu(used_l1_ssram_head, cpu)); 677 678 /* add mutex operation */ 679 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 680 681 return ret; 682} 683 684void *l2_sram_alloc(size_t size) 685{ 686#if L2_LENGTH != 0 687 unsigned long flags; 688 void *addr; 689 690 /* add mutex operation */ 691 spin_lock_irqsave(&l2_sram_lock, flags); 692 693 addr = _sram_alloc(size, &free_l2_sram_head, 694 &used_l2_sram_head); 695 696 /* add mutex operation */ 697 spin_unlock_irqrestore(&l2_sram_lock, flags); 698 699 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n", 700 (long unsigned int)addr, size); 701 702 return addr; 703#else 704 return NULL; 705#endif 706} 707EXPORT_SYMBOL(l2_sram_alloc); 708 709void *l2_sram_zalloc(size_t size) 710{ 711 void *addr = l2_sram_alloc(size); 712 713 if (addr) 714 memset(addr, 0x00, size); 715 716 return addr; 717} 718EXPORT_SYMBOL(l2_sram_zalloc); 719 720int l2_sram_free(const void *addr) 721{ 722#if L2_LENGTH != 0 723 unsigned long flags; 724 int ret; 725 726 /* add mutex operation */ 727 spin_lock_irqsave(&l2_sram_lock, flags); 728 729 ret = _sram_free(addr, &free_l2_sram_head, 730 &used_l2_sram_head); 731 732 /* add mutex operation */ 733 spin_unlock_irqrestore(&l2_sram_lock, flags); 734 735 return ret; 736#else 737 return -1; 738#endif 739} 740EXPORT_SYMBOL(l2_sram_free); 741 742int sram_free_with_lsl(const void *addr) 743{ 744 struct sram_list_struct *lsl, **tmp; 745 struct mm_struct *mm = current->mm; 746 int ret = -1; 747 748 for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next) 749 if ((*tmp)->addr == addr) { 750 lsl = *tmp; 751 ret = sram_free(addr); 752 *tmp = lsl->next; 753 kfree(lsl); 754 break; 755 } 756 757 return ret; 758} 759EXPORT_SYMBOL(sram_free_with_lsl); 760 761/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are 762 * tracked. These are designed for userspace so that when a process exits, 763 * we can safely reap their resources. 764 */ 765void *sram_alloc_with_lsl(size_t size, unsigned long flags) 766{ 767 void *addr = NULL; 768 struct sram_list_struct *lsl = NULL; 769 struct mm_struct *mm = current->mm; 770 771 lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL); 772 if (!lsl) 773 return NULL; 774 775 if (flags & L1_INST_SRAM) 776 addr = l1_inst_sram_alloc(size); 777 778 if (addr == NULL && (flags & L1_DATA_A_SRAM)) 779 addr = l1_data_A_sram_alloc(size); 780 781 if (addr == NULL && (flags & L1_DATA_B_SRAM)) 782 addr = l1_data_B_sram_alloc(size); 783 784 if (addr == NULL && (flags & L2_SRAM)) 785 addr = l2_sram_alloc(size); 786 787 if (addr == NULL) { 788 kfree(lsl); 789 return NULL; 790 } 791 lsl->addr = addr; 792 lsl->length = size; 793 lsl->next = mm->context.sram_list; 794 mm->context.sram_list = lsl; 795 return addr; 796} 797EXPORT_SYMBOL(sram_alloc_with_lsl); 798 799#ifdef CONFIG_PROC_FS 800/* Once we get a real allocator, we'll throw all of this away. 801 * Until then, we need some sort of visibility into the L1 alloc. 802 */ 803/* Need to keep line of output the same. Currently, that is 44 bytes 804 * (including newline). 805 */ 806static int _sram_proc_show(struct seq_file *m, const char *desc, 807 struct sram_piece *pfree_head, 808 struct sram_piece *pused_head) 809{ 810 struct sram_piece *pslot; 811 812 if (!pfree_head || !pused_head) 813 return -1; 814 815 seq_printf(m, "--- SRAM %-14s Size PID State \n", desc); 816 817 /* search the relevant memory slot */ 818 pslot = pused_head->next; 819 820 while (pslot != NULL) { 821 seq_printf(m, "%p-%p %10i %5i %-10s\n", 822 pslot->paddr, pslot->paddr + pslot->size, 823 pslot->size, pslot->pid, "ALLOCATED"); 824 825 pslot = pslot->next; 826 } 827 828 pslot = pfree_head->next; 829 830 while (pslot != NULL) { 831 seq_printf(m, "%p-%p %10i %5i %-10s\n", 832 pslot->paddr, pslot->paddr + pslot->size, 833 pslot->size, pslot->pid, "FREE"); 834 835 pslot = pslot->next; 836 } 837 838 return 0; 839} 840static int sram_proc_show(struct seq_file *m, void *v) 841{ 842 unsigned int cpu; 843 844 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 845 if (_sram_proc_show(m, "Scratchpad", 846 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu))) 847 goto not_done; 848#if L1_DATA_A_LENGTH != 0 849 if (_sram_proc_show(m, "L1 Data A", 850 &per_cpu(free_l1_data_A_sram_head, cpu), 851 &per_cpu(used_l1_data_A_sram_head, cpu))) 852 goto not_done; 853#endif 854#if L1_DATA_B_LENGTH != 0 855 if (_sram_proc_show(m, "L1 Data B", 856 &per_cpu(free_l1_data_B_sram_head, cpu), 857 &per_cpu(used_l1_data_B_sram_head, cpu))) 858 goto not_done; 859#endif 860#if L1_CODE_LENGTH != 0 861 if (_sram_proc_show(m, "L1 Instruction", 862 &per_cpu(free_l1_inst_sram_head, cpu), 863 &per_cpu(used_l1_inst_sram_head, cpu))) 864 goto not_done; 865#endif 866 } 867#if L2_LENGTH != 0 868 if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head)) 869 goto not_done; 870#endif 871 not_done: 872 return 0; 873} 874 875static int sram_proc_open(struct inode *inode, struct file *file) 876{ 877 return single_open(file, sram_proc_show, NULL); 878} 879 880static const struct file_operations sram_proc_ops = { 881 .open = sram_proc_open, 882 .read = seq_read, 883 .llseek = seq_lseek, 884 .release = single_release, 885}; 886 887static int __init sram_proc_init(void) 888{ 889 struct proc_dir_entry *ptr; 890 891 ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops); 892 if (!ptr) { 893 printk(KERN_WARNING "unable to create /proc/sram\n"); 894 return -1; 895 } 896 return 0; 897} 898late_initcall(sram_proc_init); 899#endif