Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/compaction: add tracepoint to observe behaviour of compaction defer

Compaction deferring logic is heavy hammer that block the way to the
compaction. It doesn't consider overall system state, so it could prevent
user from doing compaction falsely. In other words, even if system has
enough range of memory to compact, compaction would be skipped due to
compaction deferring logic. This patch add new tracepoint to understand
work of deferring logic. This will also help to check compaction success
and fail.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joonsoo Kim and committed by
Linus Torvalds
24e2716f 837d026d

+132 -60
+5 -60
include/linux/compaction.h
··· 44 44 extern unsigned long compaction_suitable(struct zone *zone, int order, 45 45 int alloc_flags, int classzone_idx); 46 46 47 - /* Do not skip compaction more than 64 times */ 48 - #define COMPACT_MAX_DEFER_SHIFT 6 49 - 50 - /* 51 - * Compaction is deferred when compaction fails to result in a page 52 - * allocation success. 1 << compact_defer_limit compactions are skipped up 53 - * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 54 - */ 55 - static inline void defer_compaction(struct zone *zone, int order) 56 - { 57 - zone->compact_considered = 0; 58 - zone->compact_defer_shift++; 59 - 60 - if (order < zone->compact_order_failed) 61 - zone->compact_order_failed = order; 62 - 63 - if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 64 - zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 65 - } 66 - 67 - /* Returns true if compaction should be skipped this time */ 68 - static inline bool compaction_deferred(struct zone *zone, int order) 69 - { 70 - unsigned long defer_limit = 1UL << zone->compact_defer_shift; 71 - 72 - if (order < zone->compact_order_failed) 73 - return false; 74 - 75 - /* Avoid possible overflow */ 76 - if (++zone->compact_considered > defer_limit) 77 - zone->compact_considered = defer_limit; 78 - 79 - return zone->compact_considered < defer_limit; 80 - } 81 - 82 - /* 83 - * Update defer tracking counters after successful compaction of given order, 84 - * which means an allocation either succeeded (alloc_success == true) or is 85 - * expected to succeed. 86 - */ 87 - static inline void compaction_defer_reset(struct zone *zone, int order, 88 - bool alloc_success) 89 - { 90 - if (alloc_success) { 91 - zone->compact_considered = 0; 92 - zone->compact_defer_shift = 0; 93 - } 94 - if (order >= zone->compact_order_failed) 95 - zone->compact_order_failed = order + 1; 96 - } 97 - 98 - /* Returns true if restarting compaction after many failures */ 99 - static inline bool compaction_restarting(struct zone *zone, int order) 100 - { 101 - if (order < zone->compact_order_failed) 102 - return false; 103 - 104 - return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 105 - zone->compact_considered >= 1UL << zone->compact_defer_shift; 106 - } 47 + extern void defer_compaction(struct zone *zone, int order); 48 + extern bool compaction_deferred(struct zone *zone, int order); 49 + extern void compaction_defer_reset(struct zone *zone, int order, 50 + bool alloc_success); 51 + extern bool compaction_restarting(struct zone *zone, int order); 107 52 108 53 #else 109 54 static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
+56
include/trace/events/compaction.h
··· 238 238 TP_ARGS(zone, order, ret) 239 239 ); 240 240 241 + #ifdef CONFIG_COMPACTION 242 + DECLARE_EVENT_CLASS(mm_compaction_defer_template, 243 + 244 + TP_PROTO(struct zone *zone, int order), 245 + 246 + TP_ARGS(zone, order), 247 + 248 + TP_STRUCT__entry( 249 + __field(int, nid) 250 + __field(char *, name) 251 + __field(int, order) 252 + __field(unsigned int, considered) 253 + __field(unsigned int, defer_shift) 254 + __field(int, order_failed) 255 + ), 256 + 257 + TP_fast_assign( 258 + __entry->nid = zone_to_nid(zone); 259 + __entry->name = (char *)zone->name; 260 + __entry->order = order; 261 + __entry->considered = zone->compact_considered; 262 + __entry->defer_shift = zone->compact_defer_shift; 263 + __entry->order_failed = zone->compact_order_failed; 264 + ), 265 + 266 + TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu", 267 + __entry->nid, 268 + __entry->name, 269 + __entry->order, 270 + __entry->order_failed, 271 + __entry->considered, 272 + 1UL << __entry->defer_shift) 273 + ); 274 + 275 + DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred, 276 + 277 + TP_PROTO(struct zone *zone, int order), 278 + 279 + TP_ARGS(zone, order) 280 + ); 281 + 282 + DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction, 283 + 284 + TP_PROTO(struct zone *zone, int order), 285 + 286 + TP_ARGS(zone, order) 287 + ); 288 + 289 + DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset, 290 + 291 + TP_PROTO(struct zone *zone, int order), 292 + 293 + TP_ARGS(zone, order) 294 + ); 295 + #endif 296 + 241 297 #endif /* _TRACE_COMPACTION_H */ 242 298 243 299 /* This part must be outside protection */
+71
mm/compaction.c
··· 124 124 } 125 125 126 126 #ifdef CONFIG_COMPACTION 127 + 128 + /* Do not skip compaction more than 64 times */ 129 + #define COMPACT_MAX_DEFER_SHIFT 6 130 + 131 + /* 132 + * Compaction is deferred when compaction fails to result in a page 133 + * allocation success. 1 << compact_defer_limit compactions are skipped up 134 + * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 135 + */ 136 + void defer_compaction(struct zone *zone, int order) 137 + { 138 + zone->compact_considered = 0; 139 + zone->compact_defer_shift++; 140 + 141 + if (order < zone->compact_order_failed) 142 + zone->compact_order_failed = order; 143 + 144 + if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 145 + zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 146 + 147 + trace_mm_compaction_defer_compaction(zone, order); 148 + } 149 + 150 + /* Returns true if compaction should be skipped this time */ 151 + bool compaction_deferred(struct zone *zone, int order) 152 + { 153 + unsigned long defer_limit = 1UL << zone->compact_defer_shift; 154 + 155 + if (order < zone->compact_order_failed) 156 + return false; 157 + 158 + /* Avoid possible overflow */ 159 + if (++zone->compact_considered > defer_limit) 160 + zone->compact_considered = defer_limit; 161 + 162 + if (zone->compact_considered >= defer_limit) 163 + return false; 164 + 165 + trace_mm_compaction_deferred(zone, order); 166 + 167 + return true; 168 + } 169 + 170 + /* 171 + * Update defer tracking counters after successful compaction of given order, 172 + * which means an allocation either succeeded (alloc_success == true) or is 173 + * expected to succeed. 174 + */ 175 + void compaction_defer_reset(struct zone *zone, int order, 176 + bool alloc_success) 177 + { 178 + if (alloc_success) { 179 + zone->compact_considered = 0; 180 + zone->compact_defer_shift = 0; 181 + } 182 + if (order >= zone->compact_order_failed) 183 + zone->compact_order_failed = order + 1; 184 + 185 + trace_mm_compaction_defer_reset(zone, order); 186 + } 187 + 188 + /* Returns true if restarting compaction after many failures */ 189 + bool compaction_restarting(struct zone *zone, int order) 190 + { 191 + if (order < zone->compact_order_failed) 192 + return false; 193 + 194 + return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 195 + zone->compact_considered >= 1UL << zone->compact_defer_shift; 196 + } 197 + 127 198 /* Returns true if the pageblock should be scanned for pages to isolate. */ 128 199 static inline bool isolation_suitable(struct compact_control *cc, 129 200 struct page *page)