at v6.17 5.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_PAGE_EXT_H 3#define __LINUX_PAGE_EXT_H 4 5#include <linux/types.h> 6#include <linux/mmzone.h> 7#include <linux/stacktrace.h> 8 9struct pglist_data; 10 11#ifdef CONFIG_PAGE_EXTENSION 12/** 13 * struct page_ext_operations - per page_ext client operations 14 * @offset: Offset to the client's data within page_ext. Offset is returned to 15 * the client by page_ext_init. 16 * @size: The size of the client data within page_ext. 17 * @need: Function that returns true if client requires page_ext. 18 * @init: (optional) Called to initialize client once page_exts are allocated. 19 * @need_shared_flags: True when client is using shared page_ext->flags 20 * field. 21 * 22 * Each Page Extension client must define page_ext_operations in 23 * page_ext_ops array. 24 */ 25struct page_ext_operations { 26 size_t offset; 27 size_t size; 28 bool (*need)(void); 29 void (*init)(void); 30 bool need_shared_flags; 31}; 32 33/* 34 * The page_ext_flags users must set need_shared_flags to true. 35 */ 36enum page_ext_flags { 37 PAGE_EXT_OWNER, 38 PAGE_EXT_OWNER_ALLOCATED, 39#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) 40 PAGE_EXT_YOUNG, 41 PAGE_EXT_IDLE, 42#endif 43}; 44 45/* 46 * Page Extension can be considered as an extended mem_map. 47 * A page_ext page is associated with every page descriptor. The 48 * page_ext helps us add more information about the page. 49 * All page_ext are allocated at boot or memory hotplug event, 50 * then the page_ext for pfn always exists. 51 */ 52struct page_ext { 53 unsigned long flags; 54}; 55 56extern bool early_page_ext; 57extern unsigned long page_ext_size; 58extern void pgdat_page_ext_init(struct pglist_data *pgdat); 59 60static inline bool early_page_ext_enabled(void) 61{ 62 return early_page_ext; 63} 64 65#ifdef CONFIG_SPARSEMEM 66static inline void page_ext_init_flatmem(void) 67{ 68} 69extern void page_ext_init(void); 70static inline void page_ext_init_flatmem_late(void) 71{ 72} 73 74static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) 75{ 76 /* 77 * page_ext is allocated per memory section. Once we cross a 78 * memory section, we have to fetch the new pointer. 79 */ 80 return next_pfn % PAGES_PER_SECTION; 81} 82#else 83extern void page_ext_init_flatmem(void); 84extern void page_ext_init_flatmem_late(void); 85static inline void page_ext_init(void) 86{ 87} 88 89static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn) 90{ 91 return true; 92} 93#endif 94 95extern struct page_ext *page_ext_get(const struct page *page); 96extern void page_ext_put(struct page_ext *page_ext); 97extern struct page_ext *page_ext_lookup(unsigned long pfn); 98 99static inline void *page_ext_data(struct page_ext *page_ext, 100 struct page_ext_operations *ops) 101{ 102 return (void *)(page_ext) + ops->offset; 103} 104 105static inline struct page_ext *page_ext_next(struct page_ext *curr) 106{ 107 void *next = curr; 108 next += page_ext_size; 109 return next; 110} 111 112struct page_ext_iter { 113 unsigned long index; 114 unsigned long start_pfn; 115 struct page_ext *page_ext; 116}; 117 118/** 119 * page_ext_iter_begin() - Prepare for iterating through page extensions. 120 * @iter: page extension iterator. 121 * @pfn: PFN of the page we're interested in. 122 * 123 * Must be called with RCU read lock taken. 124 * 125 * Return: NULL if no page_ext exists for this page. 126 */ 127static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter, 128 unsigned long pfn) 129{ 130 iter->index = 0; 131 iter->start_pfn = pfn; 132 iter->page_ext = page_ext_lookup(pfn); 133 134 return iter->page_ext; 135} 136 137/** 138 * page_ext_iter_next() - Get next page extension 139 * @iter: page extension iterator. 140 * 141 * Must be called with RCU read lock taken. 142 * 143 * Return: NULL if no next page_ext exists. 144 */ 145static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter) 146{ 147 unsigned long pfn; 148 149 if (WARN_ON_ONCE(!iter->page_ext)) 150 return NULL; 151 152 iter->index++; 153 pfn = iter->start_pfn + iter->index; 154 155 if (page_ext_iter_next_fast_possible(pfn)) 156 iter->page_ext = page_ext_next(iter->page_ext); 157 else 158 iter->page_ext = page_ext_lookup(pfn); 159 160 return iter->page_ext; 161} 162 163/** 164 * page_ext_iter_get() - Get current page extension 165 * @iter: page extension iterator. 166 * 167 * Return: NULL if no page_ext exists for this iterator. 168 */ 169static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter) 170{ 171 return iter->page_ext; 172} 173 174/** 175 * for_each_page_ext(): iterate through page_ext objects. 176 * @__page: the page we're interested in 177 * @__pgcount: how many pages to iterate through 178 * @__page_ext: struct page_ext pointer where the current page_ext 179 * object is returned 180 * @__iter: struct page_ext_iter object (defined in the stack) 181 * 182 * IMPORTANT: must be called with RCU read lock taken. 183 */ 184#define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \ 185 for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\ 186 __page_ext && __iter.index < __pgcount; \ 187 __page_ext = page_ext_iter_next(&__iter)) 188 189#else /* !CONFIG_PAGE_EXTENSION */ 190struct page_ext; 191 192static inline bool early_page_ext_enabled(void) 193{ 194 return false; 195} 196 197static inline void pgdat_page_ext_init(struct pglist_data *pgdat) 198{ 199} 200 201static inline void page_ext_init(void) 202{ 203} 204 205static inline void page_ext_init_flatmem_late(void) 206{ 207} 208 209static inline void page_ext_init_flatmem(void) 210{ 211} 212 213static inline struct page_ext *page_ext_get(const struct page *page) 214{ 215 return NULL; 216} 217 218static inline void page_ext_put(struct page_ext *page_ext) 219{ 220} 221#endif /* CONFIG_PAGE_EXTENSION */ 222#endif /* __LINUX_PAGE_EXT_H */