at v6.10 5.3 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_PERCPU_H 3#define __LINUX_PERCPU_H 4 5#include <linux/alloc_tag.h> 6#include <linux/mmdebug.h> 7#include <linux/preempt.h> 8#include <linux/smp.h> 9#include <linux/cpumask.h> 10#include <linux/pfn.h> 11#include <linux/init.h> 12#include <linux/cleanup.h> 13#include <linux/sched.h> 14 15#include <asm/percpu.h> 16 17/* enough to cover all DEFINE_PER_CPUs in modules */ 18#ifdef CONFIG_MODULES 19#ifdef CONFIG_MEM_ALLOC_PROFILING 20#define PERCPU_MODULE_RESERVE (8 << 13) 21#else 22#define PERCPU_MODULE_RESERVE (8 << 10) 23#endif 24#else 25#define PERCPU_MODULE_RESERVE 0 26#endif 27 28/* minimum unit size, also is the maximum supported allocation size */ 29#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) 30 31/* minimum allocation size and shift in bytes */ 32#define PCPU_MIN_ALLOC_SHIFT 2 33#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT) 34 35/* 36 * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the 37 * updating of hints is used to manage the nr_empty_pop_pages in both 38 * the chunk and globally. 39 */ 40#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE 41#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \ 42 PCPU_MIN_ALLOC_SHIFT) 43 44#ifdef CONFIG_RANDOM_KMALLOC_CACHES 45#define PERCPU_DYNAMIC_SIZE_SHIFT 12 46#else 47#define PERCPU_DYNAMIC_SIZE_SHIFT 10 48#endif 49 50/* 51 * Percpu allocator can serve percpu allocations before slab is 52 * initialized which allows slab to depend on the percpu allocator. 53 * The following parameter decide how much resource to preallocate 54 * for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than 55 * PERCPU_DYNAMIC_EARLY_SIZE. 56 */ 57#define PERCPU_DYNAMIC_EARLY_SIZE (20 << PERCPU_DYNAMIC_SIZE_SHIFT) 58 59/* 60 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy 61 * back on the first chunk for dynamic percpu allocation if arch is 62 * manually allocating and mapping it for faster access (as a part of 63 * large page mapping for example). 64 * 65 * The following values give between one and two pages of free space 66 * after typical minimal boot (2-way SMP, single disk and NIC) with 67 * both defconfig and a distro config on x86_64 and 32. More 68 * intelligent way to determine this would be nice. 69 */ 70#if BITS_PER_LONG > 32 71#define PERCPU_DYNAMIC_RESERVE (28 << PERCPU_DYNAMIC_SIZE_SHIFT) 72#else 73#define PERCPU_DYNAMIC_RESERVE (20 << PERCPU_DYNAMIC_SIZE_SHIFT) 74#endif 75 76extern void *pcpu_base_addr; 77extern const unsigned long *pcpu_unit_offsets; 78 79struct pcpu_group_info { 80 int nr_units; /* aligned # of units */ 81 unsigned long base_offset; /* base address offset */ 82 unsigned int *cpu_map; /* unit->cpu map, empty 83 * entries contain NR_CPUS */ 84}; 85 86struct pcpu_alloc_info { 87 size_t static_size; 88 size_t reserved_size; 89 size_t dyn_size; 90 size_t unit_size; 91 size_t atom_size; 92 size_t alloc_size; 93 size_t __ai_size; /* internal, don't use */ 94 int nr_groups; /* 0 if grouping unnecessary */ 95 struct pcpu_group_info groups[]; 96}; 97 98enum pcpu_fc { 99 PCPU_FC_AUTO, 100 PCPU_FC_EMBED, 101 PCPU_FC_PAGE, 102 103 PCPU_FC_NR, 104}; 105extern const char * const pcpu_fc_names[PCPU_FC_NR]; 106 107extern enum pcpu_fc pcpu_chosen_fc; 108 109typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu); 110typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); 111 112extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 113 int nr_units); 114extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai); 115 116extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 117 void *base_addr); 118 119extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 120 size_t atom_size, 121 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 122 pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn); 123 124#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 125void __init pcpu_populate_pte(unsigned long addr); 126extern int __init pcpu_page_first_chunk(size_t reserved_size, 127 pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn); 128#endif 129 130extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); 131extern bool is_kernel_percpu_address(unsigned long addr); 132 133#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 134extern void __init setup_per_cpu_areas(void); 135#endif 136 137extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, 138 gfp_t gfp) __alloc_size(1); 139extern size_t pcpu_alloc_size(void __percpu *__pdata); 140 141#define __alloc_percpu_gfp(_size, _align, _gfp) \ 142 alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp)) 143#define __alloc_percpu(_size, _align) \ 144 alloc_hooks(pcpu_alloc_noprof(_size, _align, false, GFP_KERNEL)) 145#define __alloc_reserved_percpu(_size, _align) \ 146 alloc_hooks(pcpu_alloc_noprof(_size, _align, true, GFP_KERNEL)) 147 148#define alloc_percpu_gfp(type, gfp) \ 149 (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ 150 __alignof__(type), gfp) 151#define alloc_percpu(type) \ 152 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ 153 __alignof__(type)) 154#define alloc_percpu_noprof(type) \ 155 ((typeof(type) __percpu *)pcpu_alloc_noprof(sizeof(type), \ 156 __alignof__(type), false, GFP_KERNEL)) 157 158extern void free_percpu(void __percpu *__pdata); 159 160DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) 161 162extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 163 164extern unsigned long pcpu_nr_pages(void); 165 166#endif /* __LINUX_PERCPU_H */