Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/page_ref: Convert the open coded tracepoint enabled to the new helper

As more use cases of checking if a tracepoint is enabled in a header are
coming to fruition, a helper macro, tracepoint_enabled(), has been added to
check if a tracepoint is enabled or not, and can be used with minimal header
requirements (avoid "include hell"). Convert the page_ref logic over to the
new helper macro.

Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

+21 -21
+21 -21
include/linux/page_ref.h
··· 7 7 #include <linux/page-flags.h> 8 8 #include <linux/tracepoint-defs.h> 9 9 10 - extern struct tracepoint __tracepoint_page_ref_set; 11 - extern struct tracepoint __tracepoint_page_ref_mod; 12 - extern struct tracepoint __tracepoint_page_ref_mod_and_test; 13 - extern struct tracepoint __tracepoint_page_ref_mod_and_return; 14 - extern struct tracepoint __tracepoint_page_ref_mod_unless; 15 - extern struct tracepoint __tracepoint_page_ref_freeze; 16 - extern struct tracepoint __tracepoint_page_ref_unfreeze; 10 + DECLARE_TRACEPOINT(page_ref_set); 11 + DECLARE_TRACEPOINT(page_ref_mod); 12 + DECLARE_TRACEPOINT(page_ref_mod_and_test); 13 + DECLARE_TRACEPOINT(page_ref_mod_and_return); 14 + DECLARE_TRACEPOINT(page_ref_mod_unless); 15 + DECLARE_TRACEPOINT(page_ref_freeze); 16 + DECLARE_TRACEPOINT(page_ref_unfreeze); 17 17 18 18 #ifdef CONFIG_DEBUG_PAGE_REF 19 19 ··· 24 24 * 25 25 * See trace_##name##_enabled(void) in include/linux/tracepoint.h 26 26 */ 27 - #define page_ref_tracepoint_active(t) static_key_false(&(t).key) 27 + #define page_ref_tracepoint_active(t) tracepoint_enabled(t) 28 28 29 29 extern void __page_ref_set(struct page *page, int v); 30 30 extern void __page_ref_mod(struct page *page, int v); ··· 75 75 static inline void set_page_count(struct page *page, int v) 76 76 { 77 77 atomic_set(&page->_refcount, v); 78 - if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) 78 + if (page_ref_tracepoint_active(page_ref_set)) 79 79 __page_ref_set(page, v); 80 80 } 81 81 ··· 91 91 static inline void page_ref_add(struct page *page, int nr) 92 92 { 93 93 atomic_add(nr, &page->_refcount); 94 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 94 + if (page_ref_tracepoint_active(page_ref_mod)) 95 95 __page_ref_mod(page, nr); 96 96 } 97 97 98 98 static inline void page_ref_sub(struct page *page, int nr) 99 99 { 100 100 atomic_sub(nr, &page->_refcount); 101 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 101 + if (page_ref_tracepoint_active(page_ref_mod)) 102 102 __page_ref_mod(page, -nr); 103 103 } 104 104 ··· 106 106 { 107 107 int ret = atomic_sub_return(nr, &page->_refcount); 108 108 109 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 109 + if (page_ref_tracepoint_active(page_ref_mod_and_return)) 110 110 __page_ref_mod_and_return(page, -nr, ret); 111 111 return ret; 112 112 } ··· 114 114 static inline void page_ref_inc(struct page *page) 115 115 { 116 116 atomic_inc(&page->_refcount); 117 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 117 + if (page_ref_tracepoint_active(page_ref_mod)) 118 118 __page_ref_mod(page, 1); 119 119 } 120 120 121 121 static inline void page_ref_dec(struct page *page) 122 122 { 123 123 atomic_dec(&page->_refcount); 124 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 124 + if (page_ref_tracepoint_active(page_ref_mod)) 125 125 __page_ref_mod(page, -1); 126 126 } 127 127 ··· 129 129 { 130 130 int ret = atomic_sub_and_test(nr, &page->_refcount); 131 131 132 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 132 + if (page_ref_tracepoint_active(page_ref_mod_and_test)) 133 133 __page_ref_mod_and_test(page, -nr, ret); 134 134 return ret; 135 135 } ··· 138 138 { 139 139 int ret = atomic_inc_return(&page->_refcount); 140 140 141 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 141 + if (page_ref_tracepoint_active(page_ref_mod_and_return)) 142 142 __page_ref_mod_and_return(page, 1, ret); 143 143 return ret; 144 144 } ··· 147 147 { 148 148 int ret = atomic_dec_and_test(&page->_refcount); 149 149 150 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 150 + if (page_ref_tracepoint_active(page_ref_mod_and_test)) 151 151 __page_ref_mod_and_test(page, -1, ret); 152 152 return ret; 153 153 } ··· 156 156 { 157 157 int ret = atomic_dec_return(&page->_refcount); 158 158 159 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 159 + if (page_ref_tracepoint_active(page_ref_mod_and_return)) 160 160 __page_ref_mod_and_return(page, -1, ret); 161 161 return ret; 162 162 } ··· 165 165 { 166 166 int ret = atomic_add_unless(&page->_refcount, nr, u); 167 167 168 - if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) 168 + if (page_ref_tracepoint_active(page_ref_mod_unless)) 169 169 __page_ref_mod_unless(page, nr, ret); 170 170 return ret; 171 171 } ··· 174 174 { 175 175 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); 176 176 177 - if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) 177 + if (page_ref_tracepoint_active(page_ref_freeze)) 178 178 __page_ref_freeze(page, count, ret); 179 179 return ret; 180 180 } ··· 185 185 VM_BUG_ON(count == 0); 186 186 187 187 atomic_set_release(&page->_refcount, count); 188 - if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) 188 + if (page_ref_tracepoint_active(page_ref_unfreeze)) 189 189 __page_ref_unfreeze(page, count); 190 190 } 191 191