at v4.12 5.1 kB view raw
1#ifndef _LINUX_PAGE_REF_H 2#define _LINUX_PAGE_REF_H 3 4#include <linux/atomic.h> 5#include <linux/mm_types.h> 6#include <linux/page-flags.h> 7#include <linux/tracepoint-defs.h> 8 9extern struct tracepoint __tracepoint_page_ref_set; 10extern struct tracepoint __tracepoint_page_ref_mod; 11extern struct tracepoint __tracepoint_page_ref_mod_and_test; 12extern struct tracepoint __tracepoint_page_ref_mod_and_return; 13extern struct tracepoint __tracepoint_page_ref_mod_unless; 14extern struct tracepoint __tracepoint_page_ref_freeze; 15extern struct tracepoint __tracepoint_page_ref_unfreeze; 16 17#ifdef CONFIG_DEBUG_PAGE_REF 18 19/* 20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper 21 * functions. But due to include header file issues, that is not 22 * feasible. Instead we have to open code the static key functions. 23 * 24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h 25 */ 26#define page_ref_tracepoint_active(t) static_key_false(&(t).key) 27 28extern void __page_ref_set(struct page *page, int v); 29extern void __page_ref_mod(struct page *page, int v); 30extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 31extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 32extern void __page_ref_mod_unless(struct page *page, int v, int u); 33extern void __page_ref_freeze(struct page *page, int v, int ret); 34extern void __page_ref_unfreeze(struct page *page, int v); 35 36#else 37 38#define page_ref_tracepoint_active(t) false 39 40static inline void __page_ref_set(struct page *page, int v) 41{ 42} 43static inline void __page_ref_mod(struct page *page, int v) 44{ 45} 46static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) 47{ 48} 49static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) 50{ 51} 52static inline void __page_ref_mod_unless(struct page *page, int v, int u) 53{ 54} 55static inline void __page_ref_freeze(struct page *page, int v, int ret) 56{ 57} 58static inline void __page_ref_unfreeze(struct page *page, int v) 59{ 60} 61 62#endif 63 64static inline int page_ref_count(struct page *page) 65{ 66 return atomic_read(&page->_refcount); 67} 68 69static inline int page_count(struct page *page) 70{ 71 return atomic_read(&compound_head(page)->_refcount); 72} 73 74static inline void set_page_count(struct page *page, int v) 75{ 76 atomic_set(&page->_refcount, v); 77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) 78 __page_ref_set(page, v); 79} 80 81/* 82 * Setup the page count before being freed into the page allocator for 83 * the first time (boot or memory hotplug) 84 */ 85static inline void init_page_count(struct page *page) 86{ 87 set_page_count(page, 1); 88} 89 90static inline void page_ref_add(struct page *page, int nr) 91{ 92 atomic_add(nr, &page->_refcount); 93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 94 __page_ref_mod(page, nr); 95} 96 97static inline void page_ref_sub(struct page *page, int nr) 98{ 99 atomic_sub(nr, &page->_refcount); 100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 101 __page_ref_mod(page, -nr); 102} 103 104static inline void page_ref_inc(struct page *page) 105{ 106 atomic_inc(&page->_refcount); 107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 108 __page_ref_mod(page, 1); 109} 110 111static inline void page_ref_dec(struct page *page) 112{ 113 atomic_dec(&page->_refcount); 114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 115 __page_ref_mod(page, -1); 116} 117 118static inline int page_ref_sub_and_test(struct page *page, int nr) 119{ 120 int ret = atomic_sub_and_test(nr, &page->_refcount); 121 122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 123 __page_ref_mod_and_test(page, -nr, ret); 124 return ret; 125} 126 127static inline int page_ref_inc_return(struct page *page) 128{ 129 int ret = atomic_inc_return(&page->_refcount); 130 131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 132 __page_ref_mod_and_return(page, 1, ret); 133 return ret; 134} 135 136static inline int page_ref_dec_and_test(struct page *page) 137{ 138 int ret = atomic_dec_and_test(&page->_refcount); 139 140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 141 __page_ref_mod_and_test(page, -1, ret); 142 return ret; 143} 144 145static inline int page_ref_dec_return(struct page *page) 146{ 147 int ret = atomic_dec_return(&page->_refcount); 148 149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 150 __page_ref_mod_and_return(page, -1, ret); 151 return ret; 152} 153 154static inline int page_ref_add_unless(struct page *page, int nr, int u) 155{ 156 int ret = atomic_add_unless(&page->_refcount, nr, u); 157 158 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) 159 __page_ref_mod_unless(page, nr, ret); 160 return ret; 161} 162 163static inline int page_ref_freeze(struct page *page, int count) 164{ 165 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); 166 167 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) 168 __page_ref_freeze(page, count, ret); 169 return ret; 170} 171 172static inline void page_ref_unfreeze(struct page *page, int count) 173{ 174 VM_BUG_ON_PAGE(page_count(page) != 0, page); 175 VM_BUG_ON(count == 0); 176 177 atomic_set(&page->_refcount, count); 178 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) 179 __page_ref_unfreeze(page, count); 180} 181 182#endif