Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'percpu-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu

Pull percpu updates from Dennis Zhou:
"Enable percpu page allocator for RISC-V.

There are RISC-V configurations with sparse NUMA configurations and
small vmalloc space causing dynamic percpu allocations to fail as the
backing chunk stride is too far apart"

* tag 'percpu-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu:
riscv: Enable pcpu page first chunk allocator
mm: Introduce flush_cache_vmap_early()

+42 -10
+1
arch/arc/include/asm/cacheflush.h
··· 40 40 41 41 /* TBD: optimize this */ 42 42 #define flush_cache_vmap(start, end) flush_cache_all() 43 + #define flush_cache_vmap_early(start, end) do { } while (0) 43 44 #define flush_cache_vunmap(start, end) flush_cache_all() 44 45 45 46 #define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
+2
arch/arm/include/asm/cacheflush.h
··· 340 340 dsb(ishst); 341 341 } 342 342 343 + #define flush_cache_vmap_early(start, end) do { } while (0) 344 + 343 345 static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 344 346 { 345 347 if (!cache_is_vipt_nonaliasing())
+1
arch/csky/abiv1/inc/abi/cacheflush.h
··· 43 43 */ 44 44 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 45 45 #define flush_cache_vmap(start, end) cache_wbinv_all() 46 + #define flush_cache_vmap_early(start, end) do { } while (0) 46 47 #define flush_cache_vunmap(start, end) cache_wbinv_all() 47 48 48 49 #define flush_icache_range(start, end) cache_wbinv_range(start, end)
+1
arch/csky/abiv2/inc/abi/cacheflush.h
··· 41 41 void flush_icache_deferred(struct mm_struct *mm); 42 42 43 43 #define flush_cache_vmap(start, end) do { } while (0) 44 + #define flush_cache_vmap_early(start, end) do { } while (0) 44 45 #define flush_cache_vunmap(start, end) do { } while (0) 45 46 46 47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+1
arch/m68k/include/asm/cacheflush_mm.h
··· 191 191 #define flush_cache_all() __flush_cache_all() 192 192 193 193 #define flush_cache_vmap(start, end) flush_cache_all() 194 + #define flush_cache_vmap_early(start, end) do { } while (0) 194 195 #define flush_cache_vunmap(start, end) flush_cache_all() 195 196 196 197 static inline void flush_cache_mm(struct mm_struct *mm)
+2
arch/mips/include/asm/cacheflush.h
··· 97 97 __flush_cache_vmap(); 98 98 } 99 99 100 + #define flush_cache_vmap_early(start, end) do { } while (0) 101 + 100 102 extern void (*__flush_cache_vunmap)(void); 101 103 102 104 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+1
arch/nios2/include/asm/cacheflush.h
··· 38 38 #define flush_icache_pages flush_icache_pages 39 39 40 40 #define flush_cache_vmap(start, end) flush_dcache_range(start, end) 41 + #define flush_cache_vmap_early(start, end) do { } while (0) 41 42 #define flush_cache_vunmap(start, end) flush_dcache_range(start, end) 42 43 43 44 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+1
arch/parisc/include/asm/cacheflush.h
··· 41 41 void invalidate_kernel_vmap_range(void *vaddr, int size); 42 42 43 43 #define flush_cache_vmap(start, end) flush_cache_all() 44 + #define flush_cache_vmap_early(start, end) do { } while (0) 44 45 #define flush_cache_vunmap(start, end) flush_cache_all() 45 46 46 47 void flush_dcache_folio(struct folio *folio);
+2
arch/riscv/Kconfig
··· 416 416 depends on SMP && MMU 417 417 select ARCH_SUPPORTS_NUMA_BALANCING 418 418 select GENERIC_ARCH_NUMA 419 + select HAVE_SETUP_PER_CPU_AREA 419 420 select NEED_PER_CPU_EMBED_FIRST_CHUNK 421 + select NEED_PER_CPU_PAGE_FIRST_CHUNK 420 422 select OF_NUMA 421 423 select USE_PERCPU_NUMA_NODE_ID 422 424 help
+2 -1
arch/riscv/include/asm/cacheflush.h
··· 37 37 flush_icache_mm(vma->vm_mm, 0) 38 38 39 39 #ifdef CONFIG_64BIT 40 - #define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) 40 + #define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) 41 + #define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) 41 42 #endif 42 43 43 44 #ifndef CONFIG_SMP
+1
arch/riscv/include/asm/tlbflush.h
··· 41 41 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 42 42 unsigned long end); 43 43 void flush_tlb_kernel_range(unsigned long start, unsigned long end); 44 + void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); 44 45 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 45 46 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 46 47 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+8
arch/riscv/mm/kasan_init.c
··· 441 441 kasan_shallow_populate_pgd(vaddr, vend); 442 442 } 443 443 444 + #ifdef CONFIG_KASAN_VMALLOC 445 + void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) 446 + { 447 + kasan_populate(kasan_mem_to_shadow(start), 448 + kasan_mem_to_shadow(start + size)); 449 + } 450 + #endif 451 + 444 452 static void __init create_tmp_mapping(void) 445 453 { 446 454 void *ptr;
+5
arch/riscv/mm/tlbflush.c
··· 66 66 local_flush_tlb_range_threshold_asid(start, size, stride, asid); 67 67 } 68 68 69 + void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) 70 + { 71 + local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID); 72 + } 73 + 69 74 static void __ipi_flush_tlb_all(void *info) 70 75 { 71 76 local_flush_tlb_all();
+1
arch/sh/include/asm/cacheflush.h
··· 90 90 unsigned long len); 91 91 92 92 #define flush_cache_vmap(start, end) local_flush_cache_all(NULL) 93 + #define flush_cache_vmap_early(start, end) do { } while (0) 93 94 #define flush_cache_vunmap(start, end) local_flush_cache_all(NULL) 94 95 95 96 #define flush_dcache_mmap_lock(mapping) do { } while (0)
+1
arch/sparc/include/asm/cacheflush_32.h
··· 48 48 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 49 49 50 50 #define flush_cache_vmap(start, end) flush_cache_all() 51 + #define flush_cache_vmap_early(start, end) do { } while (0) 51 52 #define flush_cache_vunmap(start, end) flush_cache_all() 52 53 53 54 /* When a context switch happens we must flush all user windows so that
+1
arch/sparc/include/asm/cacheflush_64.h
··· 75 75 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 76 76 77 77 #define flush_cache_vmap(start, end) do { } while (0) 78 + #define flush_cache_vmap_early(start, end) do { } while (0) 78 79 #define flush_cache_vunmap(start, end) do { } while (0) 79 80 80 81 #endif /* !__ASSEMBLY__ */
+4 -2
arch/xtensa/include/asm/cacheflush.h
··· 116 116 #define flush_cache_mm(mm) flush_cache_all() 117 117 #define flush_cache_dup_mm(mm) flush_cache_mm(mm) 118 118 119 - #define flush_cache_vmap(start,end) flush_cache_all() 120 - #define flush_cache_vunmap(start,end) flush_cache_all() 119 + #define flush_cache_vmap(start,end) flush_cache_all() 120 + #define flush_cache_vmap_early(start,end) do { } while (0) 121 + #define flush_cache_vunmap(start,end) flush_cache_all() 121 122 122 123 void flush_dcache_folio(struct folio *folio); 123 124 #define flush_dcache_folio flush_dcache_folio ··· 141 140 #define flush_cache_dup_mm(mm) do { } while (0) 142 141 143 142 #define flush_cache_vmap(start,end) do { } while (0) 143 + #define flush_cache_vmap_early(start,end) do { } while (0) 144 144 #define flush_cache_vunmap(start,end) do { } while (0) 145 145 146 146 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+6
include/asm-generic/cacheflush.h
··· 91 91 } 92 92 #endif 93 93 94 + #ifndef flush_cache_vmap_early 95 + static inline void flush_cache_vmap_early(unsigned long start, unsigned long end) 96 + { 97 + } 98 + #endif 99 + 94 100 #ifndef flush_cache_vunmap 95 101 static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 96 102 {
+1 -7
mm/percpu.c
··· 3333 3333 if (rc < 0) 3334 3334 panic("failed to map percpu area, err=%d\n", rc); 3335 3335 3336 - /* 3337 - * FIXME: Archs with virtual cache should flush local 3338 - * cache for the linear mapping here - something 3339 - * equivalent to flush_cache_vmap() on the local cpu. 3340 - * flush_cache_vmap() can't be used as most supporting 3341 - * data structures are not set up yet. 3342 - */ 3336 + flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size); 3343 3337 3344 3338 /* copy static data */ 3345 3339 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);