Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/page_owner: don't define fields on struct page_ext by hard-coding

There is a memory waste problem if we define field on struct page_ext by
hard-coding. Entry size of struct page_ext includes the size of those
fields even if it is disabled at runtime. Now, extra memory request at
runtime is possible so page_owner don't need to define it's own fields
by hard-coding.

This patch removes hard-coded define and uses extra memory for storing
page_owner information in page_owner. Most of code are just mechanical
changes.

Link: http://lkml.kernel.org/r/1471315879-32294-7-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joonsoo Kim and committed by
Linus Torvalds
9300d8df 980ac167

+58 -31
-6
include/linux/page_ext.h
··· 44 44 */ 45 45 struct page_ext { 46 46 unsigned long flags; 47 - #ifdef CONFIG_PAGE_OWNER 48 - unsigned int order; 49 - gfp_t gfp_mask; 50 - int last_migrate_reason; 51 - depot_stack_handle_t handle; 52 - #endif 53 47 }; 54 48 55 49 extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+58 -25
mm/page_owner.c
··· 18 18 */ 19 19 #define PAGE_OWNER_STACK_DEPTH (16) 20 20 21 + struct page_owner { 22 + unsigned int order; 23 + gfp_t gfp_mask; 24 + int last_migrate_reason; 25 + depot_stack_handle_t handle; 26 + }; 27 + 21 28 static bool page_owner_disabled = true; 22 29 DEFINE_STATIC_KEY_FALSE(page_owner_inited); 23 30 ··· 93 86 } 94 87 95 88 struct page_ext_operations page_owner_ops = { 89 + .size = sizeof(struct page_owner), 96 90 .need = need_page_owner, 97 91 .init = init_page_owner, 98 92 }; 93 + 94 + static inline struct page_owner *get_page_owner(struct page_ext *page_ext) 95 + { 96 + return (void *)page_ext + page_owner_ops.offset; 97 + } 99 98 100 99 void __reset_page_owner(struct page *page, unsigned int order) 101 100 { ··· 169 156 gfp_t gfp_mask) 170 157 { 171 158 struct page_ext *page_ext = lookup_page_ext(page); 159 + struct page_owner *page_owner; 172 160 173 161 if (unlikely(!page_ext)) 174 162 return; 175 163 176 - page_ext->handle = save_stack(gfp_mask); 177 - page_ext->order = order; 178 - page_ext->gfp_mask = gfp_mask; 179 - page_ext->last_migrate_reason = -1; 164 + page_owner = get_page_owner(page_ext); 165 + page_owner->handle = save_stack(gfp_mask); 166 + page_owner->order = order; 167 + page_owner->gfp_mask = gfp_mask; 168 + page_owner->last_migrate_reason = -1; 180 169 181 170 __set_bit(PAGE_EXT_OWNER, &page_ext->flags); 182 171 } ··· 186 171 void __set_page_owner_migrate_reason(struct page *page, int reason) 187 172 { 188 173 struct page_ext *page_ext = lookup_page_ext(page); 174 + struct page_owner *page_owner; 175 + 189 176 if (unlikely(!page_ext)) 190 177 return; 191 178 192 - page_ext->last_migrate_reason = reason; 179 + page_owner = get_page_owner(page_ext); 180 + page_owner->last_migrate_reason = reason; 193 181 } 194 182 195 183 void __split_page_owner(struct page *page, unsigned int order) 196 184 { 197 185 int i; 198 186 struct page_ext *page_ext = lookup_page_ext(page); 187 + struct page_owner *page_owner; 199 188 200 189 if (unlikely(!page_ext)) 201 190 return; 202 191 203 - page_ext->order = 0; 192 + page_owner = get_page_owner(page_ext); 193 + page_owner->order = 0; 204 194 for (i = 1; i < (1 << order); i++) 205 195 __copy_page_owner(page, page + i); 206 196 } ··· 214 194 { 215 195 struct page_ext *old_ext = lookup_page_ext(oldpage); 216 196 struct page_ext *new_ext = lookup_page_ext(newpage); 197 + struct page_owner *old_page_owner, *new_page_owner; 217 198 218 199 if (unlikely(!old_ext || !new_ext)) 219 200 return; 220 201 221 - new_ext->order = old_ext->order; 222 - new_ext->gfp_mask = old_ext->gfp_mask; 223 - new_ext->last_migrate_reason = old_ext->last_migrate_reason; 224 - new_ext->handle = old_ext->handle; 202 + old_page_owner = get_page_owner(old_ext); 203 + new_page_owner = get_page_owner(new_ext); 204 + new_page_owner->order = old_page_owner->order; 205 + new_page_owner->gfp_mask = old_page_owner->gfp_mask; 206 + new_page_owner->last_migrate_reason = 207 + old_page_owner->last_migrate_reason; 208 + new_page_owner->handle = old_page_owner->handle; 225 209 226 210 /* 227 211 * We don't clear the bit on the oldpage as it's going to be freed ··· 244 220 { 245 221 struct page *page; 246 222 struct page_ext *page_ext; 223 + struct page_owner *page_owner; 247 224 unsigned long pfn = zone->zone_start_pfn, block_end_pfn; 248 225 unsigned long end_pfn = pfn + zone->spanned_pages; 249 226 unsigned long count[MIGRATE_TYPES] = { 0, }; ··· 295 270 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 296 271 continue; 297 272 298 - page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); 273 + page_owner = get_page_owner(page_ext); 274 + page_mt = gfpflags_to_migratetype( 275 + page_owner->gfp_mask); 299 276 if (pageblock_mt != page_mt) { 300 277 if (is_migrate_cma(pageblock_mt)) 301 278 count[MIGRATE_MOVABLE]++; ··· 307 280 pfn = block_end_pfn; 308 281 break; 309 282 } 310 - pfn += (1UL << page_ext->order) - 1; 283 + pfn += (1UL << page_owner->order) - 1; 311 284 } 312 285 } 313 286 ··· 320 293 321 294 static ssize_t 322 295 print_page_owner(char __user *buf, size_t count, unsigned long pfn, 323 - struct page *page, struct page_ext *page_ext, 296 + struct page *page, struct page_owner *page_owner, 324 297 depot_stack_handle_t handle) 325 298 { 326 299 int ret; ··· 340 313 341 314 ret = snprintf(kbuf, count, 342 315 "Page allocated via order %u, mask %#x(%pGg)\n", 343 - page_ext->order, page_ext->gfp_mask, 344 - &page_ext->gfp_mask); 316 + page_owner->order, page_owner->gfp_mask, 317 + &page_owner->gfp_mask); 345 318 346 319 if (ret >= count) 347 320 goto err; 348 321 349 322 /* Print information relevant to grouping pages by mobility */ 350 323 pageblock_mt = get_pageblock_migratetype(page); 351 - page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); 324 + page_mt = gfpflags_to_migratetype(page_owner->gfp_mask); 352 325 ret += snprintf(kbuf + ret, count - ret, 353 326 "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", 354 327 pfn, ··· 365 338 if (ret >= count) 366 339 goto err; 367 340 368 - if (page_ext->last_migrate_reason != -1) { 341 + if (page_owner->last_migrate_reason != -1) { 369 342 ret += snprintf(kbuf + ret, count - ret, 370 343 "Page has been migrated, last migrate reason: %s\n", 371 - migrate_reason_names[page_ext->last_migrate_reason]); 344 + migrate_reason_names[page_owner->last_migrate_reason]); 372 345 if (ret >= count) 373 346 goto err; 374 347 } ··· 391 364 void __dump_page_owner(struct page *page) 392 365 { 393 366 struct page_ext *page_ext = lookup_page_ext(page); 367 + struct page_owner *page_owner; 394 368 unsigned long entries[PAGE_OWNER_STACK_DEPTH]; 395 369 struct stack_trace trace = { 396 370 .nr_entries = 0, ··· 407 379 pr_alert("There is not page extension available.\n"); 408 380 return; 409 381 } 410 - gfp_mask = page_ext->gfp_mask; 382 + 383 + page_owner = get_page_owner(page_ext); 384 + gfp_mask = page_owner->gfp_mask; 411 385 mt = gfpflags_to_migratetype(gfp_mask); 412 386 413 387 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { ··· 417 387 return; 418 388 } 419 389 420 - handle = READ_ONCE(page_ext->handle); 390 + handle = READ_ONCE(page_owner->handle); 421 391 if (!handle) { 422 392 pr_alert("page_owner info is not active (free page?)\n"); 423 393 return; ··· 425 395 426 396 depot_fetch_stack(handle, &trace); 427 397 pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", 428 - page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask); 398 + page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); 429 399 print_stack_trace(&trace, 0); 430 400 431 - if (page_ext->last_migrate_reason != -1) 401 + if (page_owner->last_migrate_reason != -1) 432 402 pr_alert("page has been migrated, last migrate reason: %s\n", 433 - migrate_reason_names[page_ext->last_migrate_reason]); 403 + migrate_reason_names[page_owner->last_migrate_reason]); 434 404 } 435 405 436 406 static ssize_t ··· 439 409 unsigned long pfn; 440 410 struct page *page; 441 411 struct page_ext *page_ext; 412 + struct page_owner *page_owner; 442 413 depot_stack_handle_t handle; 443 414 444 415 if (!static_branch_unlikely(&page_owner_inited)) ··· 489 458 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) 490 459 continue; 491 460 461 + page_owner = get_page_owner(page_ext); 462 + 492 463 /* 493 464 * Access to page_ext->handle isn't synchronous so we should 494 465 * be careful to access it. 495 466 */ 496 - handle = READ_ONCE(page_ext->handle); 467 + handle = READ_ONCE(page_owner->handle); 497 468 if (!handle) 498 469 continue; 499 470 ··· 503 470 *ppos = (pfn - min_low_pfn) + 1; 504 471 505 472 return print_page_owner(buf, count, pfn, page, 506 - page_ext, handle); 473 + page_owner, handle); 507 474 } 508 475 509 476 return 0;