at v6.17 7.8 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_PAGE_REF_H 3#define _LINUX_PAGE_REF_H 4 5#include <linux/atomic.h> 6#include <linux/mm_types.h> 7#include <linux/page-flags.h> 8#include <linux/tracepoint-defs.h> 9 10DECLARE_TRACEPOINT(page_ref_set); 11DECLARE_TRACEPOINT(page_ref_mod); 12DECLARE_TRACEPOINT(page_ref_mod_and_test); 13DECLARE_TRACEPOINT(page_ref_mod_and_return); 14DECLARE_TRACEPOINT(page_ref_mod_unless); 15DECLARE_TRACEPOINT(page_ref_freeze); 16DECLARE_TRACEPOINT(page_ref_unfreeze); 17 18#ifdef CONFIG_DEBUG_PAGE_REF 19 20/* 21 * Ideally we would want to use the trace_<tracepoint>_enabled() helper 22 * functions. But due to include header file issues, that is not 23 * feasible. Instead we have to open code the static key functions. 24 * 25 * See trace_##name##_enabled(void) in include/linux/tracepoint.h 26 */ 27#define page_ref_tracepoint_active(t) tracepoint_enabled(t) 28 29extern void __page_ref_set(struct page *page, int v); 30extern void __page_ref_mod(struct page *page, int v); 31extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33extern void __page_ref_mod_unless(struct page *page, int v, int u); 34extern void __page_ref_freeze(struct page *page, int v, int ret); 35extern void __page_ref_unfreeze(struct page *page, int v); 36 37#else 38 39#define page_ref_tracepoint_active(t) false 40 41static inline void __page_ref_set(struct page *page, int v) 42{ 43} 44static inline void __page_ref_mod(struct page *page, int v) 45{ 46} 47static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) 48{ 49} 50static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) 51{ 52} 53static inline void __page_ref_mod_unless(struct page *page, int v, int u) 54{ 55} 56static inline void __page_ref_freeze(struct page *page, int v, int ret) 57{ 58} 59static inline void __page_ref_unfreeze(struct page *page, int v) 60{ 61} 62 63#endif 64 65static inline int page_ref_count(const struct page *page) 66{ 67 return atomic_read(&page->_refcount); 68} 69 70/** 71 * folio_ref_count - The reference count on this folio. 72 * @folio: The folio. 73 * 74 * The refcount is usually incremented by calls to folio_get() and 75 * decremented by calls to folio_put(). Some typical users of the 76 * folio refcount: 77 * 78 * - Each reference from a page table 79 * - The page cache 80 * - Filesystem private data 81 * - The LRU list 82 * - Pipes 83 * - Direct IO which references this page in the process address space 84 * 85 * Return: The number of references to this folio. 86 */ 87static inline int folio_ref_count(const struct folio *folio) 88{ 89 return page_ref_count(&folio->page); 90} 91 92static inline int page_count(const struct page *page) 93{ 94 return folio_ref_count(page_folio(page)); 95} 96 97static inline void set_page_count(struct page *page, int v) 98{ 99 atomic_set(&page->_refcount, v); 100 if (page_ref_tracepoint_active(page_ref_set)) 101 __page_ref_set(page, v); 102} 103 104static inline void folio_set_count(struct folio *folio, int v) 105{ 106 set_page_count(&folio->page, v); 107} 108 109/* 110 * Setup the page count before being freed into the page allocator for 111 * the first time (boot or memory hotplug) 112 */ 113static inline void init_page_count(struct page *page) 114{ 115 set_page_count(page, 1); 116} 117 118static inline void page_ref_add(struct page *page, int nr) 119{ 120 atomic_add(nr, &page->_refcount); 121 if (page_ref_tracepoint_active(page_ref_mod)) 122 __page_ref_mod(page, nr); 123} 124 125static inline void folio_ref_add(struct folio *folio, int nr) 126{ 127 page_ref_add(&folio->page, nr); 128} 129 130static inline void page_ref_sub(struct page *page, int nr) 131{ 132 atomic_sub(nr, &page->_refcount); 133 if (page_ref_tracepoint_active(page_ref_mod)) 134 __page_ref_mod(page, -nr); 135} 136 137static inline void folio_ref_sub(struct folio *folio, int nr) 138{ 139 page_ref_sub(&folio->page, nr); 140} 141 142static inline int folio_ref_sub_return(struct folio *folio, int nr) 143{ 144 int ret = atomic_sub_return(nr, &folio->_refcount); 145 146 if (page_ref_tracepoint_active(page_ref_mod_and_return)) 147 __page_ref_mod_and_return(&folio->page, -nr, ret); 148 return ret; 149} 150 151static inline void page_ref_inc(struct page *page) 152{ 153 atomic_inc(&page->_refcount); 154 if (page_ref_tracepoint_active(page_ref_mod)) 155 __page_ref_mod(page, 1); 156} 157 158static inline void folio_ref_inc(struct folio *folio) 159{ 160 page_ref_inc(&folio->page); 161} 162 163static inline void page_ref_dec(struct page *page) 164{ 165 atomic_dec(&page->_refcount); 166 if (page_ref_tracepoint_active(page_ref_mod)) 167 __page_ref_mod(page, -1); 168} 169 170static inline void folio_ref_dec(struct folio *folio) 171{ 172 page_ref_dec(&folio->page); 173} 174 175static inline int page_ref_sub_and_test(struct page *page, int nr) 176{ 177 int ret = atomic_sub_and_test(nr, &page->_refcount); 178 179 if (page_ref_tracepoint_active(page_ref_mod_and_test)) 180 __page_ref_mod_and_test(page, -nr, ret); 181 return ret; 182} 183 184static inline int folio_ref_sub_and_test(struct folio *folio, int nr) 185{ 186 return page_ref_sub_and_test(&folio->page, nr); 187} 188 189static inline int page_ref_inc_return(struct page *page) 190{ 191 int ret = atomic_inc_return(&page->_refcount); 192 193 if (page_ref_tracepoint_active(page_ref_mod_and_return)) 194 __page_ref_mod_and_return(page, 1, ret); 195 return ret; 196} 197 198static inline int folio_ref_inc_return(struct folio *folio) 199{ 200 return page_ref_inc_return(&folio->page); 201} 202 203static inline int page_ref_dec_and_test(struct page *page) 204{ 205 int ret = atomic_dec_and_test(&page->_refcount); 206 207 if (page_ref_tracepoint_active(page_ref_mod_and_test)) 208 __page_ref_mod_and_test(page, -1, ret); 209 return ret; 210} 211 212static inline int folio_ref_dec_and_test(struct folio *folio) 213{ 214 return page_ref_dec_and_test(&folio->page); 215} 216 217static inline int page_ref_dec_return(struct page *page) 218{ 219 int ret = atomic_dec_return(&page->_refcount); 220 221 if (page_ref_tracepoint_active(page_ref_mod_and_return)) 222 __page_ref_mod_and_return(page, -1, ret); 223 return ret; 224} 225 226static inline int folio_ref_dec_return(struct folio *folio) 227{ 228 return page_ref_dec_return(&folio->page); 229} 230 231static inline bool page_ref_add_unless(struct page *page, int nr, int u) 232{ 233 bool ret = false; 234 235 rcu_read_lock(); 236 /* avoid writing to the vmemmap area being remapped */ 237 if (page_count_writable(page, u)) 238 ret = atomic_add_unless(&page->_refcount, nr, u); 239 rcu_read_unlock(); 240 241 if (page_ref_tracepoint_active(page_ref_mod_unless)) 242 __page_ref_mod_unless(page, nr, ret); 243 return ret; 244} 245 246static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u) 247{ 248 return page_ref_add_unless(&folio->page, nr, u); 249} 250 251/** 252 * folio_try_get - Attempt to increase the refcount on a folio. 253 * @folio: The folio. 254 * 255 * If you do not already have a reference to a folio, you can attempt to 256 * get one using this function. It may fail if, for example, the folio 257 * has been freed since you found a pointer to it, or it is frozen for 258 * the purposes of splitting or migration. 259 * 260 * Return: True if the reference count was successfully incremented. 261 */ 262static inline bool folio_try_get(struct folio *folio) 263{ 264 return folio_ref_add_unless(folio, 1, 0); 265} 266 267static inline bool folio_ref_try_add(struct folio *folio, int count) 268{ 269 return folio_ref_add_unless(folio, count, 0); 270} 271 272static inline int page_ref_freeze(struct page *page, int count) 273{ 274 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); 275 276 if (page_ref_tracepoint_active(page_ref_freeze)) 277 __page_ref_freeze(page, count, ret); 278 return ret; 279} 280 281static inline int folio_ref_freeze(struct folio *folio, int count) 282{ 283 return page_ref_freeze(&folio->page, count); 284} 285 286static inline void page_ref_unfreeze(struct page *page, int count) 287{ 288 VM_BUG_ON_PAGE(page_count(page) != 0, page); 289 VM_BUG_ON(count == 0); 290 291 atomic_set_release(&page->_refcount, count); 292 if (page_ref_tracepoint_active(page_ref_unfreeze)) 293 __page_ref_unfreeze(page, count); 294} 295 296static inline void folio_ref_unfreeze(struct folio *folio, int count) 297{ 298 page_ref_unfreeze(&folio->page, count); 299} 300#endif