Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 7114/1: cache-l2x0: add resume entry for l2 in secure mode

we save the l2x0 registers at the first initialization, and platform codes
can get them to restore l2x0 status after wakeup.

Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Barry Song <Baohua.Song@csr.com>
Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Barry Song and committed by
Russell King
91c2ebb9 8d4e652d

+163 -10
+25
arch/arm/include/asm/hardware/cache-l2x0.h
··· 67 67 #define L2X0_CACHE_ID_PART_MASK (0xf << 6) 68 68 #define L2X0_CACHE_ID_PART_L210 (1 << 6) 69 69 #define L2X0_CACHE_ID_PART_L310 (3 << 6) 70 + #define L2X0_CACHE_ID_RTL_MASK 0x3f 71 + #define L2X0_CACHE_ID_RTL_R0P0 0x0 72 + #define L2X0_CACHE_ID_RTL_R1P0 0x2 73 + #define L2X0_CACHE_ID_RTL_R2P0 0x4 74 + #define L2X0_CACHE_ID_RTL_R3P0 0x5 75 + #define L2X0_CACHE_ID_RTL_R3P1 0x6 76 + #define L2X0_CACHE_ID_RTL_R3P2 0x8 70 77 71 78 #define L2X0_AUX_CTRL_MASK 0xc0000fff 72 79 #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 ··· 103 96 #ifndef __ASSEMBLY__ 104 97 extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); 105 98 extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); 99 + 100 + struct l2x0_regs { 101 + unsigned long phy_base; 102 + unsigned long aux_ctrl; 103 + /* 104 + * Whether the following registers need to be saved/restored 105 + * depends on platform 106 + */ 107 + unsigned long tag_latency; 108 + unsigned long data_latency; 109 + unsigned long filter_start; 110 + unsigned long filter_end; 111 + unsigned long prefetch_ctrl; 112 + unsigned long pwr_ctrl; 113 + }; 114 + 115 + extern struct l2x0_regs l2x0_saved_regs; 116 + 106 117 #endif 107 118 108 119 #endif
+7
arch/arm/include/asm/outercache.h
··· 34 34 void (*sync)(void); 35 35 #endif 36 36 void (*set_debug)(unsigned long); 37 + void (*resume)(void); 37 38 }; 38 39 39 40 #ifdef CONFIG_OUTER_CACHE ··· 73 72 { 74 73 if (outer_cache.disable) 75 74 outer_cache.disable(); 75 + } 76 + 77 + static inline void outer_resume(void) 78 + { 79 + if (outer_cache.resume) 80 + outer_cache.resume(); 76 81 } 77 82 78 83 #else
+12
arch/arm/kernel/asm-offsets.c
··· 20 20 #include <asm/thread_info.h> 21 21 #include <asm/memory.h> 22 22 #include <asm/procinfo.h> 23 + #include <asm/hardware/cache-l2x0.h> 23 24 #include <linux/kbuild.h> 24 25 25 26 /* ··· 93 92 DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); 94 93 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); 95 94 BLANK(); 95 + #ifdef CONFIG_CACHE_L2X0 96 + DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); 97 + DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl)); 98 + DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency)); 99 + DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency)); 100 + DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start)); 101 + DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end)); 102 + DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl)); 103 + DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl)); 104 + BLANK(); 105 + #endif 96 106 #ifdef CONFIG_CPU_HAS_ASID 97 107 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); 98 108 BLANK();
+119 -10
arch/arm/mm/cache-l2x0.c
··· 33 33 static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 34 34 static uint32_t l2x0_size; 35 35 36 + struct l2x0_regs l2x0_saved_regs; 37 + 38 + struct l2x0_of_data { 39 + void (*setup)(const struct device_node *, __u32 *, __u32 *); 40 + void (*save)(void); 41 + void (*resume)(void); 42 + }; 43 + 36 44 static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 37 45 { 38 46 /* wait for cache operation by line or way to complete */ ··· 288 280 spin_unlock_irqrestore(&l2x0_lock, flags); 289 281 } 290 282 291 - static void __init l2x0_unlock(__u32 cache_id) 283 + static void l2x0_unlock(__u32 cache_id) 292 284 { 293 285 int lockregs; 294 286 int i; ··· 363 355 364 356 /* l2x0 controller is disabled */ 365 357 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 358 + 359 + l2x0_saved_regs.aux_ctrl = aux; 366 360 367 361 l2x0_inv_all(); 368 362 ··· 455 445 } 456 446 } 457 447 448 + static void __init pl310_save(void) 449 + { 450 + u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 451 + L2X0_CACHE_ID_RTL_MASK; 452 + 453 + l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + 454 + L2X0_TAG_LATENCY_CTRL); 455 + l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + 456 + L2X0_DATA_LATENCY_CTRL); 457 + l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + 458 + L2X0_ADDR_FILTER_END); 459 + l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + 460 + L2X0_ADDR_FILTER_START); 461 + 462 + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 463 + /* 464 + * From r2p0, there is Prefetch offset/control register 465 + */ 466 + l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + 467 + L2X0_PREFETCH_CTRL); 468 + /* 469 + * From r3p0, there is Power control register 470 + */ 471 + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 472 + l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + 473 + L2X0_POWER_CTRL); 474 + } 475 + } 476 + 477 + static void l2x0_resume(void) 478 + { 479 + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 480 + /* restore aux ctrl and enable l2 */ 481 + l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); 482 + 483 + writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + 484 + L2X0_AUX_CTRL); 485 + 486 + l2x0_inv_all(); 487 + 488 + writel_relaxed(1, l2x0_base + L2X0_CTRL); 489 + } 490 + } 491 + 492 + static void pl310_resume(void) 493 + { 494 + u32 l2x0_revision; 495 + 496 + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 497 + /* restore pl310 setup */ 498 + writel_relaxed(l2x0_saved_regs.tag_latency, 499 + l2x0_base + L2X0_TAG_LATENCY_CTRL); 500 + writel_relaxed(l2x0_saved_regs.data_latency, 501 + l2x0_base + L2X0_DATA_LATENCY_CTRL); 502 + writel_relaxed(l2x0_saved_regs.filter_end, 503 + l2x0_base + L2X0_ADDR_FILTER_END); 504 + writel_relaxed(l2x0_saved_regs.filter_start, 505 + l2x0_base + L2X0_ADDR_FILTER_START); 506 + 507 + l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & 508 + L2X0_CACHE_ID_RTL_MASK; 509 + 510 + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { 511 + writel_relaxed(l2x0_saved_regs.prefetch_ctrl, 512 + l2x0_base + L2X0_PREFETCH_CTRL); 513 + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) 514 + writel_relaxed(l2x0_saved_regs.pwr_ctrl, 515 + l2x0_base + L2X0_POWER_CTRL); 516 + } 517 + } 518 + 519 + l2x0_resume(); 520 + } 521 + 522 + static const struct l2x0_of_data pl310_data = { 523 + pl310_of_setup, 524 + pl310_save, 525 + pl310_resume, 526 + }; 527 + 528 + static const struct l2x0_of_data l2x0_data = { 529 + l2x0_of_setup, 530 + NULL, 531 + l2x0_resume, 532 + }; 533 + 458 534 static const struct of_device_id l2x0_ids[] __initconst = { 459 - { .compatible = "arm,pl310-cache", .data = pl310_of_setup }, 460 - { .compatible = "arm,l220-cache", .data = l2x0_of_setup }, 461 - { .compatible = "arm,l210-cache", .data = l2x0_of_setup }, 535 + { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, 536 + { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, 537 + { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, 462 538 {} 463 539 }; 464 540 465 541 int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) 466 542 { 467 543 struct device_node *np; 468 - void (*l2_setup)(const struct device_node *np, 469 - __u32 *aux_val, __u32 *aux_mask); 544 + struct l2x0_of_data *data; 545 + struct resource res; 470 546 471 547 np = of_find_matching_node(NULL, l2x0_ids); 472 548 if (!np) 473 549 return -ENODEV; 474 - l2x0_base = of_iomap(np, 0); 550 + 551 + if (of_address_to_resource(np, 0, &res)) 552 + return -ENODEV; 553 + 554 + l2x0_base = ioremap(res.start, resource_size(&res)); 475 555 if (!l2x0_base) 476 556 return -ENOMEM; 477 557 558 + l2x0_saved_regs.phy_base = res.start; 559 + 560 + data = of_match_node(l2x0_ids, np)->data; 561 + 478 562 /* L2 configuration can only be changed if the cache is disabled */ 479 563 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 480 - l2_setup = of_match_node(l2x0_ids, np)->data; 481 - if (l2_setup) 482 - l2_setup(np, &aux_val, &aux_mask); 564 + if (data->setup) 565 + data->setup(np, &aux_val, &aux_mask); 483 566 } 567 + 568 + if (data->save) 569 + data->save(); 570 + 484 571 l2x0_init(l2x0_base, aux_val, aux_mask); 572 + 573 + outer_cache.resume = data->resume; 485 574 return 0; 486 575 } 487 576 #endif