Merge tag 'for-linus' of https://github.com/openrisc/linux

Pull OpenRISC updates from Stafford Horne:

- Support for cacheinfo API to expose OpenRISC cache info via sysfs,
this also translated to some cleanups to OpenRISC cache flush and
invalidate API's

- Documentation updates for new mailing list and toolchain binaries

* tag 'for-linus' of https://github.com/openrisc/linux:
Documentation: openrisc: Update toolchain binaries URL
Documentation: openrisc: Update mailing list
openrisc: Add cacheinfo support
openrisc: Introduce new utility functions to flush and invalidate caches
openrisc: Refactor struct cpuinfo_or1k to reduce duplication

+214 -93
+6 -6
Documentation/arch/openrisc/openrisc_port.rst
··· 7 7 8 8 For information about OpenRISC processors and ongoing development: 9 9 10 - ======= ============================= 10 + ======= ============================== 11 11 website https://openrisc.io 12 - email openrisc@lists.librecores.org 13 - ======= ============================= 12 + email linux-openrisc@vger.kernel.org 13 + ======= ============================== 14 14 15 15 --------------------------------------------------------------------- 16 16 ··· 27 27 Instructions for building the different toolchains can be found on openrisc.io 28 28 or Stafford's toolchain build and release scripts. 29 29 30 - ========== ================================================= 31 - binaries https://github.com/openrisc/or1k-gcc/releases 30 + ========== ========================================================== 31 + binaries https://github.com/stffrdhrn/or1k-toolchain-build/releases 32 32 toolchains https://openrisc.io/software 33 33 building https://github.com/stffrdhrn/or1k-toolchain-build 34 - ========== ================================================= 34 + ========== ========================================================== 35 35 36 36 2) Building 37 37
+6 -6
Documentation/translations/zh_CN/arch/openrisc/openrisc_port.rst
··· 17 17 18 18 关于OpenRISC处理器和正在进行中的开发的信息: 19 19 20 - ======= ============================= 20 + ======= ============================== 21 21 网站 https://openrisc.io 22 - 邮箱 openrisc@lists.librecores.org 23 - ======= ============================= 22 + 邮箱 linux-openrisc@vger.kernel.org 23 + ======= ============================== 24 24 25 25 --------------------------------------------------------------------- 26 26 ··· 36 36 工具链的构建指南可以在openrisc.io或Stafford的工具链构建和发布脚本 37 37 中找到。 38 38 39 - ====== ================================================= 40 - 二进制 https://github.com/openrisc/or1k-gcc/releases 39 + ====== ========================================================== 40 + 二进制 https://github.com/stffrdhrn/or1k-toolchain-build/releases 41 41 工具链 https://openrisc.io/software 42 42 构建 https://github.com/stffrdhrn/or1k-toolchain-build 43 - ====== ================================================= 43 + ====== ========================================================== 44 44 45 45 2) 构建 46 46
+6 -6
Documentation/translations/zh_TW/arch/openrisc/openrisc_port.rst
··· 17 17 18 18 關於OpenRISC處理器和正在進行中的開發的信息: 19 19 20 - ======= ============================= 20 + ======= ============================== 21 21 網站 https://openrisc.io 22 - 郵箱 openrisc@lists.librecores.org 23 - ======= ============================= 22 + 郵箱 linux-openrisc@vger.kernel.org 23 + ======= ============================== 24 24 25 25 --------------------------------------------------------------------- 26 26 ··· 36 36 工具鏈的構建指南可以在openrisc.io或Stafford的工具鏈構建和發佈腳本 37 37 中找到。 38 38 39 - ====== ================================================= 40 - 二進制 https://github.com/openrisc/or1k-gcc/releases 39 + ====== ========================================================== 40 + 二進制 https://github.com/stffrdhrn/or1k-toolchain-build/releases 41 41 工具鏈 https://openrisc.io/software 42 42 構建 https://github.com/stffrdhrn/or1k-toolchain-build 43 - ====== ================================================= 43 + ====== ========================================================== 44 44 45 45 2) 構建 46 46
+17
arch/openrisc/include/asm/cacheflush.h
··· 23 23 */ 24 24 extern void local_dcache_page_flush(struct page *page); 25 25 extern void local_icache_page_inv(struct page *page); 26 + extern void local_dcache_range_flush(unsigned long start, unsigned long end); 27 + extern void local_dcache_range_inv(unsigned long start, unsigned long end); 28 + extern void local_icache_range_inv(unsigned long start, unsigned long end); 26 29 27 30 /* 28 31 * Data cache flushing always happen on the local cpu. Instruction cache ··· 40 37 #define icache_page_inv(page) smp_icache_page_inv(page) 41 38 extern void smp_icache_page_inv(struct page *page); 42 39 #endif /* CONFIG_SMP */ 40 + 41 + /* 42 + * Even if the actual block size is larger than L1_CACHE_BYTES, paddr 43 + * can be incremented by L1_CACHE_BYTES. When paddr is written to the 44 + * invalidate register, the entire cache line encompassing this address 45 + * is invalidated. Each subsequent reference to the same cache line will 46 + * not affect the invalidation process. 47 + */ 48 + #define local_dcache_block_flush(addr) \ 49 + local_dcache_range_flush(addr, addr + L1_CACHE_BYTES) 50 + #define local_dcache_block_inv(addr) \ 51 + local_dcache_range_inv(addr, addr + L1_CACHE_BYTES) 52 + #define local_icache_block_inv(addr) \ 53 + local_icache_range_inv(addr, addr + L1_CACHE_BYTES) 43 54 44 55 /* 45 56 * Synchronizes caches. Whenever a cpu writes executable code to memory, this
+17 -7
arch/openrisc/include/asm/cpuinfo.h
··· 15 15 #ifndef __ASM_OPENRISC_CPUINFO_H 16 16 #define __ASM_OPENRISC_CPUINFO_H 17 17 18 + #include <asm/spr.h> 19 + #include <asm/spr_defs.h> 20 + 21 + struct cache_desc { 22 + u32 size; 23 + u32 sets; 24 + u32 block_size; 25 + u32 ways; 26 + }; 27 + 18 28 struct cpuinfo_or1k { 19 29 u32 clock_frequency; 20 30 21 - u32 icache_size; 22 - u32 icache_block_size; 23 - u32 icache_ways; 24 - 25 - u32 dcache_size; 26 - u32 dcache_block_size; 27 - u32 dcache_ways; 31 + struct cache_desc icache; 32 + struct cache_desc dcache; 28 33 29 34 u16 coreid; 30 35 }; 31 36 32 37 extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS]; 33 38 extern void setup_cpuinfo(void); 39 + 40 + /* 41 + * Check if the cache component exists. 42 + */ 43 + extern bool cpu_cache_is_present(const unsigned int cache_type); 34 44 35 45 #endif /* __ASM_OPENRISC_CPUINFO_H */
+1 -1
arch/openrisc/kernel/Makefile
··· 7 7 8 8 obj-y := head.o setup.o or32_ksyms.o process.o dma.o \ 9 9 traps.o time.o irq.o entry.o ptrace.o signal.o \ 10 - sys_call_table.o unwinder.o 10 + sys_call_table.o unwinder.o cacheinfo.o 11 11 12 12 obj-$(CONFIG_SMP) += smp.o sync-timer.o 13 13 obj-$(CONFIG_STACKTRACE) += stacktrace.o
+104
arch/openrisc/kernel/cacheinfo.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * OpenRISC cacheinfo support 4 + * 5 + * Based on work done for MIPS and LoongArch. All original copyrights 6 + * apply as per the original source declaration. 7 + * 8 + * OpenRISC implementation: 9 + * Copyright (C) 2025 Sahil Siddiq <sahilcdq@proton.me> 10 + */ 11 + 12 + #include <linux/cacheinfo.h> 13 + #include <asm/cpuinfo.h> 14 + #include <asm/spr.h> 15 + #include <asm/spr_defs.h> 16 + 17 + static inline void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type, 18 + unsigned int level, struct cache_desc *cache, int cpu) 19 + { 20 + this_leaf->type = type; 21 + this_leaf->level = level; 22 + this_leaf->coherency_line_size = cache->block_size; 23 + this_leaf->number_of_sets = cache->sets; 24 + this_leaf->ways_of_associativity = cache->ways; 25 + this_leaf->size = cache->size; 26 + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); 27 + } 28 + 29 + int init_cache_level(unsigned int cpu) 30 + { 31 + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 32 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 33 + int leaves = 0, levels = 0; 34 + unsigned long upr = mfspr(SPR_UPR); 35 + unsigned long iccfgr, dccfgr; 36 + 37 + if (!(upr & SPR_UPR_UP)) { 38 + printk(KERN_INFO 39 + "-- no UPR register... unable to detect configuration\n"); 40 + return -ENOENT; 41 + } 42 + 43 + if (cpu_cache_is_present(SPR_UPR_DCP)) { 44 + dccfgr = mfspr(SPR_DCCFGR); 45 + cpuinfo->dcache.ways = 1 << (dccfgr & SPR_DCCFGR_NCW); 46 + cpuinfo->dcache.sets = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); 47 + cpuinfo->dcache.block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); 48 + cpuinfo->dcache.size = 49 + cpuinfo->dcache.sets * cpuinfo->dcache.ways * cpuinfo->dcache.block_size; 50 + leaves += 1; 51 + printk(KERN_INFO 52 + "-- dcache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n", 53 + cpuinfo->dcache.size, cpuinfo->dcache.block_size, 54 + cpuinfo->dcache.sets, cpuinfo->dcache.ways); 55 + } else 56 + printk(KERN_INFO "-- dcache disabled\n"); 57 + 58 + if (cpu_cache_is_present(SPR_UPR_ICP)) { 59 + iccfgr = mfspr(SPR_ICCFGR); 60 + cpuinfo->icache.ways = 1 << (iccfgr & SPR_ICCFGR_NCW); 61 + cpuinfo->icache.sets = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); 62 + cpuinfo->icache.block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); 63 + cpuinfo->icache.size = 64 + cpuinfo->icache.sets * cpuinfo->icache.ways * cpuinfo->icache.block_size; 65 + leaves += 1; 66 + printk(KERN_INFO 67 + "-- icache: %d bytes total, %d bytes/line, %d set(s), %d way(s)\n", 68 + cpuinfo->icache.size, cpuinfo->icache.block_size, 69 + cpuinfo->icache.sets, cpuinfo->icache.ways); 70 + } else 71 + printk(KERN_INFO "-- icache disabled\n"); 72 + 73 + if (!leaves) 74 + return -ENOENT; 75 + 76 + levels = 1; 77 + 78 + this_cpu_ci->num_leaves = leaves; 79 + this_cpu_ci->num_levels = levels; 80 + 81 + return 0; 82 + } 83 + 84 + int populate_cache_leaves(unsigned int cpu) 85 + { 86 + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 87 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); 88 + struct cacheinfo *this_leaf = this_cpu_ci->info_list; 89 + int level = 1; 90 + 91 + if (cpu_cache_is_present(SPR_UPR_DCP)) { 92 + ci_leaf_init(this_leaf, CACHE_TYPE_DATA, level, &cpuinfo->dcache, cpu); 93 + this_leaf->attributes = ((mfspr(SPR_DCCFGR) & SPR_DCCFGR_CWS) >> 8) ? 94 + CACHE_WRITE_BACK : CACHE_WRITE_THROUGH; 95 + this_leaf++; 96 + } 97 + 98 + if (cpu_cache_is_present(SPR_UPR_ICP)) 99 + ci_leaf_init(this_leaf, CACHE_TYPE_INST, level, &cpuinfo->icache, cpu); 100 + 101 + this_cpu_ci->cpu_map_populated = true; 102 + 103 + return 0; 104 + }
+4 -14
arch/openrisc/kernel/dma.c
··· 17 17 #include <linux/pagewalk.h> 18 18 19 19 #include <asm/cpuinfo.h> 20 + #include <asm/cacheflush.h> 20 21 #include <asm/spr_defs.h> 21 22 #include <asm/tlbflush.h> 22 23 ··· 25 24 page_set_nocache(pte_t *pte, unsigned long addr, 26 25 unsigned long next, struct mm_walk *walk) 27 26 { 28 - unsigned long cl; 29 - struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 30 - 31 27 pte_val(*pte) |= _PAGE_CI; 32 28 33 29 /* ··· 34 36 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 35 37 36 38 /* Flush page out of dcache */ 37 - for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) 38 - mtspr(SPR_DCBFR, cl); 39 + local_dcache_range_flush(__pa(addr), __pa(next)); 39 40 40 41 return 0; 41 42 } ··· 95 98 void arch_sync_dma_for_device(phys_addr_t addr, size_t size, 96 99 enum dma_data_direction dir) 97 100 { 98 - unsigned long cl; 99 - struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; 100 - 101 101 switch (dir) { 102 102 case DMA_TO_DEVICE: 103 103 /* Flush the dcache for the requested range */ 104 - for (cl = addr; cl < addr + size; 105 - cl += cpuinfo->dcache_block_size) 106 - mtspr(SPR_DCBFR, cl); 104 + local_dcache_range_flush(addr, addr + size); 107 105 break; 108 106 case DMA_FROM_DEVICE: 109 107 /* Invalidate the dcache for the requested range */ 110 - for (cl = addr; cl < addr + size; 111 - cl += cpuinfo->dcache_block_size) 112 - mtspr(SPR_DCBIR, cl); 108 + local_dcache_range_inv(addr, addr + size); 113 109 break; 114 110 default: 115 111 /*
+3 -42
arch/openrisc/kernel/setup.c
··· 113 113 return; 114 114 } 115 115 116 - if (upr & SPR_UPR_DCP) 117 - printk(KERN_INFO 118 - "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n", 119 - cpuinfo->dcache_size, cpuinfo->dcache_block_size, 120 - cpuinfo->dcache_ways); 121 - else 122 - printk(KERN_INFO "-- dcache disabled\n"); 123 - if (upr & SPR_UPR_ICP) 124 - printk(KERN_INFO 125 - "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n", 126 - cpuinfo->icache_size, cpuinfo->icache_block_size, 127 - cpuinfo->icache_ways); 128 - else 129 - printk(KERN_INFO "-- icache disabled\n"); 130 - 131 116 if (upr & SPR_UPR_DMP) 132 117 printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n", 133 118 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), ··· 140 155 void __init setup_cpuinfo(void) 141 156 { 142 157 struct device_node *cpu; 143 - unsigned long iccfgr, dccfgr; 144 - unsigned long cache_set_size; 145 158 int cpu_id = smp_processor_id(); 146 159 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id]; 147 160 148 161 cpu = of_get_cpu_node(cpu_id, NULL); 149 162 if (!cpu) 150 163 panic("Couldn't find CPU%d in device tree...\n", cpu_id); 151 - 152 - iccfgr = mfspr(SPR_ICCFGR); 153 - cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); 154 - cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); 155 - cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); 156 - cpuinfo->icache_size = 157 - cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size; 158 - 159 - dccfgr = mfspr(SPR_DCCFGR); 160 - cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); 161 - cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); 162 - cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); 163 - cpuinfo->dcache_size = 164 - cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size; 165 164 166 165 if (of_property_read_u32(cpu, "clock-frequency", 167 166 &cpuinfo->clock_frequency)) { ··· 263 294 unsigned int vr, cpucfgr; 264 295 unsigned int avr; 265 296 unsigned int version; 297 + #ifdef CONFIG_SMP 266 298 struct cpuinfo_or1k *cpuinfo = v; 299 + seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid); 300 + #endif 267 301 268 302 vr = mfspr(SPR_VR); 269 303 cpucfgr = mfspr(SPR_CPUCFGR); 270 304 271 - #ifdef CONFIG_SMP 272 - seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid); 273 - #endif 274 305 if (vr & SPR_VR_UVRP) { 275 306 vr = mfspr(SPR_VR2); 276 307 version = vr & SPR_VR2_VER; ··· 289 320 seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV); 290 321 } 291 322 seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ); 292 - seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size); 293 - seq_printf(m, "dcache block size\t: %d bytes\n", 294 - cpuinfo->dcache_block_size); 295 - seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways); 296 - seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size); 297 - seq_printf(m, "icache block size\t: %d bytes\n", 298 - cpuinfo->icache_block_size); 299 - seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways); 300 323 seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n", 301 324 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), 302 325 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
+47 -9
arch/openrisc/mm/cache.c
··· 14 14 #include <asm/spr_defs.h> 15 15 #include <asm/cache.h> 16 16 #include <asm/cacheflush.h> 17 + #include <asm/cpuinfo.h> 17 18 #include <asm/tlbflush.h> 18 19 19 - static __always_inline void cache_loop(struct page *page, const unsigned int reg) 20 + /* 21 + * Check if the cache component exists. 22 + */ 23 + bool cpu_cache_is_present(const unsigned int cache_type) 24 + { 25 + unsigned long upr = mfspr(SPR_UPR); 26 + unsigned long mask = SPR_UPR_UP | cache_type; 27 + 28 + return !((upr & mask) ^ mask); 29 + } 30 + 31 + static __always_inline void cache_loop(unsigned long paddr, unsigned long end, 32 + const unsigned short reg, const unsigned int cache_type) 33 + { 34 + if (!cpu_cache_is_present(cache_type)) 35 + return; 36 + 37 + while (paddr < end) { 38 + mtspr(reg, paddr); 39 + paddr += L1_CACHE_BYTES; 40 + } 41 + } 42 + 43 + static __always_inline void cache_loop_page(struct page *page, const unsigned short reg, 44 + const unsigned int cache_type) 20 45 { 21 46 unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; 22 - unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); 47 + unsigned long end = paddr + PAGE_SIZE; 23 48 24 - while (line < paddr + PAGE_SIZE) { 25 - mtspr(reg, line); 26 - line += L1_CACHE_BYTES; 27 - } 49 + paddr &= ~(L1_CACHE_BYTES - 1); 50 + 51 + cache_loop(paddr, end, reg, cache_type); 28 52 } 29 53 30 54 void local_dcache_page_flush(struct page *page) 31 55 { 32 - cache_loop(page, SPR_DCBFR); 56 + cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP); 33 57 } 34 58 EXPORT_SYMBOL(local_dcache_page_flush); 35 59 36 60 void local_icache_page_inv(struct page *page) 37 61 { 38 - cache_loop(page, SPR_ICBIR); 62 + cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP); 39 63 } 40 64 EXPORT_SYMBOL(local_icache_page_inv); 65 + 66 + void local_dcache_range_flush(unsigned long start, unsigned long end) 67 + { 68 + cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP); 69 + } 70 + 71 + void local_dcache_range_inv(unsigned long start, unsigned long end) 72 + { 73 + cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP); 74 + } 75 + 76 + void local_icache_range_inv(unsigned long start, unsigned long end) 77 + { 78 + cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP); 79 + } 41 80 42 81 void update_cache(struct vm_area_struct *vma, unsigned long address, 43 82 pte_t *pte) ··· 97 58 sync_icache_dcache(folio_page(folio, nr)); 98 59 } 99 60 } 100 -
+3 -2
arch/openrisc/mm/init.c
··· 35 35 #include <asm/fixmap.h> 36 36 #include <asm/tlbflush.h> 37 37 #include <asm/sections.h> 38 + #include <asm/cacheflush.h> 38 39 39 40 int mem_init_done; 40 41 ··· 177 176 barrier(); 178 177 179 178 /* Invalidate instruction caches after code modification */ 180 - mtspr(SPR_ICBIR, 0x900); 181 - mtspr(SPR_ICBIR, 0xa00); 179 + local_icache_block_inv(0x900); 180 + local_icache_block_inv(0xa00); 182 181 183 182 /* New TLB miss handlers and kernel page tables are in now place. 184 183 * Make sure that page flags get updated for all pages in TLB by