Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/compaction: more trace to understand when/why compaction start/finish

It is not well analyzed that when/why compaction start/finish or not.
With these new tracepoints, we can know much more about start/finish
reason of compaction. I can find following bug with these tracepoint.

http://www.spinics.net/lists/linux-mm/msg81582.html

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joonsoo Kim and committed by
Linus Torvalds
837d026d e34d85f0

+111 -4
+3
include/linux/compaction.h
··· 12 12 #define COMPACT_PARTIAL 3 13 13 /* The full zone was compacted */ 14 14 #define COMPACT_COMPLETE 4 15 + /* For more detailed tracepoint output */ 16 + #define COMPACT_NO_SUITABLE_PAGE 5 17 + #define COMPACT_NOT_SUITABLE_ZONE 6 15 18 /* When adding new state, please change compaction_status_string, too */ 16 19 17 20 /* Used to signal whether compaction detected need_sched() or lock contention */
+74
include/trace/events/compaction.h
··· 164 164 compaction_status_string[__entry->status]) 165 165 ); 166 166 167 + TRACE_EVENT(mm_compaction_try_to_compact_pages, 168 + 169 + TP_PROTO( 170 + int order, 171 + gfp_t gfp_mask, 172 + enum migrate_mode mode), 173 + 174 + TP_ARGS(order, gfp_mask, mode), 175 + 176 + TP_STRUCT__entry( 177 + __field(int, order) 178 + __field(gfp_t, gfp_mask) 179 + __field(enum migrate_mode, mode) 180 + ), 181 + 182 + TP_fast_assign( 183 + __entry->order = order; 184 + __entry->gfp_mask = gfp_mask; 185 + __entry->mode = mode; 186 + ), 187 + 188 + TP_printk("order=%d gfp_mask=0x%x mode=%d", 189 + __entry->order, 190 + __entry->gfp_mask, 191 + (int)__entry->mode) 192 + ); 193 + 194 + DECLARE_EVENT_CLASS(mm_compaction_suitable_template, 195 + 196 + TP_PROTO(struct zone *zone, 197 + int order, 198 + int ret), 199 + 200 + TP_ARGS(zone, order, ret), 201 + 202 + TP_STRUCT__entry( 203 + __field(int, nid) 204 + __field(char *, name) 205 + __field(int, order) 206 + __field(int, ret) 207 + ), 208 + 209 + TP_fast_assign( 210 + __entry->nid = zone_to_nid(zone); 211 + __entry->name = (char *)zone->name; 212 + __entry->order = order; 213 + __entry->ret = ret; 214 + ), 215 + 216 + TP_printk("node=%d zone=%-8s order=%d ret=%s", 217 + __entry->nid, 218 + __entry->name, 219 + __entry->order, 220 + compaction_status_string[__entry->ret]) 221 + ); 222 + 223 + DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished, 224 + 225 + TP_PROTO(struct zone *zone, 226 + int order, 227 + int ret), 228 + 229 + TP_ARGS(zone, order, ret) 230 + ); 231 + 232 + DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable, 233 + 234 + TP_PROTO(struct zone *zone, 235 + int order, 236 + int ret), 237 + 238 + TP_ARGS(zone, order, ret) 239 + ); 240 + 167 241 #endif /* _TRACE_COMPACTION_H */ 168 242 169 243 /* This part must be outside protection */
+34 -4
mm/compaction.c
··· 41 41 "continue", 42 42 "partial", 43 43 "complete", 44 + "no_suitable_page", 45 + "not_suitable_zone", 44 46 }; 45 47 #endif 46 48 ··· 1051 1049 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1052 1050 } 1053 1051 1054 - static int compact_finished(struct zone *zone, struct compact_control *cc, 1052 + static int __compact_finished(struct zone *zone, struct compact_control *cc, 1055 1053 const int migratetype) 1056 1054 { 1057 1055 unsigned int order; ··· 1106 1104 return COMPACT_PARTIAL; 1107 1105 } 1108 1106 1109 - return COMPACT_CONTINUE; 1107 + return COMPACT_NO_SUITABLE_PAGE; 1108 + } 1109 + 1110 + static int compact_finished(struct zone *zone, struct compact_control *cc, 1111 + const int migratetype) 1112 + { 1113 + int ret; 1114 + 1115 + ret = __compact_finished(zone, cc, migratetype); 1116 + trace_mm_compaction_finished(zone, cc->order, ret); 1117 + if (ret == COMPACT_NO_SUITABLE_PAGE) 1118 + ret = COMPACT_CONTINUE; 1119 + 1120 + return ret; 1110 1121 } 1111 1122 1112 1123 /* ··· 1129 1114 * COMPACT_PARTIAL - If the allocation would succeed without compaction 1130 1115 * COMPACT_CONTINUE - If compaction should run now 1131 1116 */ 1132 - unsigned long compaction_suitable(struct zone *zone, int order, 1117 + static unsigned long __compaction_suitable(struct zone *zone, int order, 1133 1118 int alloc_flags, int classzone_idx) 1134 1119 { 1135 1120 int fragindex; ··· 1173 1158 */ 1174 1159 fragindex = fragmentation_index(zone, order); 1175 1160 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1176 - return COMPACT_SKIPPED; 1161 + return COMPACT_NOT_SUITABLE_ZONE; 1177 1162 1178 1163 return COMPACT_CONTINUE; 1164 + } 1165 + 1166 + unsigned long compaction_suitable(struct zone *zone, int order, 1167 + int alloc_flags, int classzone_idx) 1168 + { 1169 + unsigned long ret; 1170 + 1171 + ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx); 1172 + trace_mm_compaction_suitable(zone, order, ret); 1173 + if (ret == COMPACT_NOT_SUITABLE_ZONE) 1174 + ret = COMPACT_SKIPPED; 1175 + 1176 + return ret; 1179 1177 } 1180 1178 1181 1179 static int compact_zone(struct zone *zone, struct compact_control *cc) ··· 1403 1375 /* Check if the GFP flags allow compaction */ 1404 1376 if (!order || !may_enter_fs || !may_perform_io) 1405 1377 return COMPACT_SKIPPED; 1378 + 1379 + trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1406 1380 1407 1381 /* Compact each zone in the list */ 1408 1382 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,