Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: compaction: export some of the functions

This commit exports some of the functions from compaction.c file
outside of it adding their declaration into internal.h header
file so that other mm related code can use them.

This forced compaction.c to always be compiled (as opposed to being
compiled only if CONFIG_COMPACTION is defined) but as to avoid
introducing code that user did not ask for, part of the compaction.c
is now wrapped in on #ifdef.

Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: Rob Clark <rob.clark@linaro.org>
Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Tested-by: Robert Nelson <robertcnelson@gmail.com>
Tested-by: Barry Song <Baohua.Song@csr.com>

authored by

Michal Nazarewicz and committed by
Marek Szyprowski
ff9543fd 85aa125f

+193 -175
+1 -2
mm/Makefile
··· 13 13 readahead.o swap.o truncate.o vmscan.o shmem.o \ 14 14 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 15 15 page_isolation.o mm_init.o mmu_context.o percpu.o \ 16 - $(mmu-y) 16 + compaction.o $(mmu-y) 17 17 obj-y += init-mm.o 18 18 19 19 ifdef CONFIG_NO_BOOTMEM ··· 32 32 obj-$(CONFIG_SPARSEMEM) += sparse.o 33 33 obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o 34 34 obj-$(CONFIG_SLOB) += slob.o 35 - obj-$(CONFIG_COMPACTION) += compaction.o 36 35 obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o 37 36 obj-$(CONFIG_KSM) += ksm.o 38 37 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
+159 -173
mm/compaction.c
··· 16 16 #include <linux/sysfs.h> 17 17 #include "internal.h" 18 18 19 + #if defined CONFIG_COMPACTION || defined CONFIG_CMA 20 + 19 21 #define CREATE_TRACE_POINTS 20 22 #include <trace/events/compaction.h> 21 - 22 - /* 23 - * compact_control is used to track pages being migrated and the free pages 24 - * they are being migrated to during memory compaction. The free_pfn starts 25 - * at the end of a zone and migrate_pfn begins at the start. Movable pages 26 - * are moved to the end of a zone during a compaction run and the run 27 - * completes when free_pfn <= migrate_pfn 28 - */ 29 - struct compact_control { 30 - struct list_head freepages; /* List of free pages to migrate to */ 31 - struct list_head migratepages; /* List of pages being migrated */ 32 - unsigned long nr_freepages; /* Number of isolated free pages */ 33 - unsigned long nr_migratepages; /* Number of pages to migrate */ 34 - unsigned long free_pfn; /* isolate_freepages search base */ 35 - unsigned long migrate_pfn; /* isolate_migratepages search base */ 36 - bool sync; /* Synchronous migration */ 37 - 38 - int order; /* order a direct compactor needs */ 39 - int migratetype; /* MOVABLE, RECLAIMABLE etc */ 40 - struct zone *zone; 41 - }; 42 23 43 24 static unsigned long release_freepages(struct list_head *freelist) 44 25 { ··· 33 52 } 34 53 35 54 return count; 55 + } 56 + 57 + static void map_pages(struct list_head *list) 58 + { 59 + struct page *page; 60 + 61 + list_for_each_entry(page, list, lru) { 62 + arch_alloc_page(page, 0); 63 + kernel_map_pages(page, 1, 1); 64 + } 36 65 } 37 66 38 67 /* ··· 113 122 * (which may be greater then end_pfn if end fell in a middle of 114 123 * a free page). 115 124 */ 116 - static unsigned long 125 + unsigned long 117 126 isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) 118 127 { 119 128 unsigned long isolated, pfn, block_end_pfn, flags; ··· 167 176 return pfn; 168 177 } 169 178 170 - /* Returns true if the page is within a block suitable for migration to */ 171 - static bool suitable_migration_target(struct page *page) 172 - { 173 - 174 - int migratetype = get_pageblock_migratetype(page); 175 - 176 - /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 177 - if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 178 - return false; 179 - 180 - /* If the page is a large free page, then allow migration */ 181 - if (PageBuddy(page) && page_order(page) >= pageblock_order) 182 - return true; 183 - 184 - /* If the block is MIGRATE_MOVABLE, allow migration */ 185 - if (migratetype == MIGRATE_MOVABLE) 186 - return true; 187 - 188 - /* Otherwise skip the block */ 189 - return false; 190 - } 191 - 192 - static void map_pages(struct list_head *list) 193 - { 194 - struct page *page; 195 - 196 - list_for_each_entry(page, list, lru) { 197 - arch_alloc_page(page, 0); 198 - kernel_map_pages(page, 1, 1); 199 - } 200 - } 201 - 202 - /* 203 - * Based on information in the current compact_control, find blocks 204 - * suitable for isolating free pages from and then isolate them. 205 - */ 206 - static void isolate_freepages(struct zone *zone, 207 - struct compact_control *cc) 208 - { 209 - struct page *page; 210 - unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 211 - unsigned long flags; 212 - int nr_freepages = cc->nr_freepages; 213 - struct list_head *freelist = &cc->freepages; 214 - 215 - /* 216 - * Initialise the free scanner. The starting point is where we last 217 - * scanned from (or the end of the zone if starting). The low point 218 - * is the end of the pageblock the migration scanner is using. 219 - */ 220 - pfn = cc->free_pfn; 221 - low_pfn = cc->migrate_pfn + pageblock_nr_pages; 222 - 223 - /* 224 - * Take care that if the migration scanner is at the end of the zone 225 - * that the free scanner does not accidentally move to the next zone 226 - * in the next isolation cycle. 227 - */ 228 - high_pfn = min(low_pfn, pfn); 229 - 230 - zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 231 - 232 - /* 233 - * Isolate free pages until enough are available to migrate the 234 - * pages on cc->migratepages. We stop searching if the migrate 235 - * and free page scanners meet or enough free pages are isolated. 236 - */ 237 - for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 238 - pfn -= pageblock_nr_pages) { 239 - unsigned long isolated; 240 - 241 - if (!pfn_valid(pfn)) 242 - continue; 243 - 244 - /* 245 - * Check for overlapping nodes/zones. It's possible on some 246 - * configurations to have a setup like 247 - * node0 node1 node0 248 - * i.e. it's possible that all pages within a zones range of 249 - * pages do not belong to a single zone. 250 - */ 251 - page = pfn_to_page(pfn); 252 - if (page_zone(page) != zone) 253 - continue; 254 - 255 - /* Check the block is suitable for migration */ 256 - if (!suitable_migration_target(page)) 257 - continue; 258 - 259 - /* 260 - * Found a block suitable for isolating free pages from. Now 261 - * we disabled interrupts, double check things are ok and 262 - * isolate the pages. This is to minimise the time IRQs 263 - * are disabled 264 - */ 265 - isolated = 0; 266 - spin_lock_irqsave(&zone->lock, flags); 267 - if (suitable_migration_target(page)) { 268 - end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 269 - isolated = isolate_freepages_block(pfn, end_pfn, 270 - freelist, false); 271 - nr_freepages += isolated; 272 - } 273 - spin_unlock_irqrestore(&zone->lock, flags); 274 - 275 - /* 276 - * Record the highest PFN we isolated pages from. When next 277 - * looking for free pages, the search will restart here as 278 - * page migration may have returned some pages to the allocator 279 - */ 280 - if (isolated) 281 - high_pfn = max(high_pfn, pfn); 282 - } 283 - 284 - /* split_free_page does not map the pages */ 285 - map_pages(freelist); 286 - 287 - cc->free_pfn = high_pfn; 288 - cc->nr_freepages = nr_freepages; 289 - } 290 - 291 179 /* Update the number of anon and file isolated pages in the zone */ 292 180 static void acct_isolated(struct zone *zone, struct compact_control *cc) 293 181 { ··· 195 325 return isolated > (inactive + active) / 2; 196 326 } 197 327 198 - /* possible outcome of isolate_migratepages */ 199 - typedef enum { 200 - ISOLATE_ABORT, /* Abort compaction now */ 201 - ISOLATE_NONE, /* No pages isolated, continue scanning */ 202 - ISOLATE_SUCCESS, /* Pages isolated, migrate */ 203 - } isolate_migrate_t; 204 - 205 328 /** 206 329 * isolate_migratepages_range() - isolate all migrate-able pages in range. 207 330 * @zone: Zone pages are in. ··· 214 351 * does not modify any cc's fields, in particular it does not modify 215 352 * (or read for that matter) cc->migrate_pfn. 216 353 */ 217 - static unsigned long 354 + unsigned long 218 355 isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 219 356 unsigned long low_pfn, unsigned long end_pfn) 220 357 { ··· 350 487 return low_pfn; 351 488 } 352 489 353 - /* 354 - * Isolate all pages that can be migrated from the block pointed to by 355 - * the migrate scanner within compact_control. 356 - */ 357 - static isolate_migrate_t isolate_migratepages(struct zone *zone, 358 - struct compact_control *cc) 490 + #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 491 + #ifdef CONFIG_COMPACTION 492 + 493 + /* Returns true if the page is within a block suitable for migration to */ 494 + static bool suitable_migration_target(struct page *page) 359 495 { 360 - unsigned long low_pfn, end_pfn; 361 496 362 - /* Do not scan outside zone boundaries */ 363 - low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 497 + int migratetype = get_pageblock_migratetype(page); 364 498 365 - /* Only scan within a pageblock boundary */ 366 - end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 499 + /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 500 + if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 501 + return false; 367 502 368 - /* Do not cross the free scanner or scan within a memory hole */ 369 - if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 370 - cc->migrate_pfn = end_pfn; 371 - return ISOLATE_NONE; 503 + /* If the page is a large free page, then allow migration */ 504 + if (PageBuddy(page) && page_order(page) >= pageblock_order) 505 + return true; 506 + 507 + /* If the block is MIGRATE_MOVABLE, allow migration */ 508 + if (migratetype == MIGRATE_MOVABLE) 509 + return true; 510 + 511 + /* Otherwise skip the block */ 512 + return false; 513 + } 514 + 515 + /* 516 + * Based on information in the current compact_control, find blocks 517 + * suitable for isolating free pages from and then isolate them. 518 + */ 519 + static void isolate_freepages(struct zone *zone, 520 + struct compact_control *cc) 521 + { 522 + struct page *page; 523 + unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 524 + unsigned long flags; 525 + int nr_freepages = cc->nr_freepages; 526 + struct list_head *freelist = &cc->freepages; 527 + 528 + /* 529 + * Initialise the free scanner. The starting point is where we last 530 + * scanned from (or the end of the zone if starting). The low point 531 + * is the end of the pageblock the migration scanner is using. 532 + */ 533 + pfn = cc->free_pfn; 534 + low_pfn = cc->migrate_pfn + pageblock_nr_pages; 535 + 536 + /* 537 + * Take care that if the migration scanner is at the end of the zone 538 + * that the free scanner does not accidentally move to the next zone 539 + * in the next isolation cycle. 540 + */ 541 + high_pfn = min(low_pfn, pfn); 542 + 543 + zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 544 + 545 + /* 546 + * Isolate free pages until enough are available to migrate the 547 + * pages on cc->migratepages. We stop searching if the migrate 548 + * and free page scanners meet or enough free pages are isolated. 549 + */ 550 + for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 551 + pfn -= pageblock_nr_pages) { 552 + unsigned long isolated; 553 + 554 + if (!pfn_valid(pfn)) 555 + continue; 556 + 557 + /* 558 + * Check for overlapping nodes/zones. It's possible on some 559 + * configurations to have a setup like 560 + * node0 node1 node0 561 + * i.e. it's possible that all pages within a zones range of 562 + * pages do not belong to a single zone. 563 + */ 564 + page = pfn_to_page(pfn); 565 + if (page_zone(page) != zone) 566 + continue; 567 + 568 + /* Check the block is suitable for migration */ 569 + if (!suitable_migration_target(page)) 570 + continue; 571 + 572 + /* 573 + * Found a block suitable for isolating free pages from. Now 574 + * we disabled interrupts, double check things are ok and 575 + * isolate the pages. This is to minimise the time IRQs 576 + * are disabled 577 + */ 578 + isolated = 0; 579 + spin_lock_irqsave(&zone->lock, flags); 580 + if (suitable_migration_target(page)) { 581 + end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 582 + isolated = isolate_freepages_block(pfn, end_pfn, 583 + freelist, false); 584 + nr_freepages += isolated; 585 + } 586 + spin_unlock_irqrestore(&zone->lock, flags); 587 + 588 + /* 589 + * Record the highest PFN we isolated pages from. When next 590 + * looking for free pages, the search will restart here as 591 + * page migration may have returned some pages to the allocator 592 + */ 593 + if (isolated) 594 + high_pfn = max(high_pfn, pfn); 372 595 } 373 596 374 - /* Perform the isolation */ 375 - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 376 - if (!low_pfn) 377 - return ISOLATE_ABORT; 597 + /* split_free_page does not map the pages */ 598 + map_pages(freelist); 378 599 379 - cc->migrate_pfn = low_pfn; 380 - 381 - return ISOLATE_SUCCESS; 600 + cc->free_pfn = high_pfn; 601 + cc->nr_freepages = nr_freepages; 382 602 } 383 603 384 604 /* ··· 508 562 509 563 cc->nr_migratepages = nr_migratepages; 510 564 cc->nr_freepages = nr_freepages; 565 + } 566 + 567 + /* possible outcome of isolate_migratepages */ 568 + typedef enum { 569 + ISOLATE_ABORT, /* Abort compaction now */ 570 + ISOLATE_NONE, /* No pages isolated, continue scanning */ 571 + ISOLATE_SUCCESS, /* Pages isolated, migrate */ 572 + } isolate_migrate_t; 573 + 574 + /* 575 + * Isolate all pages that can be migrated from the block pointed to by 576 + * the migrate scanner within compact_control. 577 + */ 578 + static isolate_migrate_t isolate_migratepages(struct zone *zone, 579 + struct compact_control *cc) 580 + { 581 + unsigned long low_pfn, end_pfn; 582 + 583 + /* Do not scan outside zone boundaries */ 584 + low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn); 585 + 586 + /* Only scan within a pageblock boundary */ 587 + end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages); 588 + 589 + /* Do not cross the free scanner or scan within a memory hole */ 590 + if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 591 + cc->migrate_pfn = end_pfn; 592 + return ISOLATE_NONE; 593 + } 594 + 595 + /* Perform the isolation */ 596 + low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); 597 + if (!low_pfn) 598 + return ISOLATE_ABORT; 599 + 600 + cc->migrate_pfn = low_pfn; 601 + 602 + return ISOLATE_SUCCESS; 511 603 } 512 604 513 605 static int compact_finished(struct zone *zone, ··· 894 910 return device_remove_file(&node->dev, &dev_attr_compact); 895 911 } 896 912 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 913 + 914 + #endif /* CONFIG_COMPACTION */
+33
mm/internal.h
··· 100 100 extern bool is_free_buddy_page(struct page *page); 101 101 #endif 102 102 103 + #if defined CONFIG_COMPACTION || defined CONFIG_CMA 104 + 105 + /* 106 + * in mm/compaction.c 107 + */ 108 + /* 109 + * compact_control is used to track pages being migrated and the free pages 110 + * they are being migrated to during memory compaction. The free_pfn starts 111 + * at the end of a zone and migrate_pfn begins at the start. Movable pages 112 + * are moved to the end of a zone during a compaction run and the run 113 + * completes when free_pfn <= migrate_pfn 114 + */ 115 + struct compact_control { 116 + struct list_head freepages; /* List of free pages to migrate to */ 117 + struct list_head migratepages; /* List of pages being migrated */ 118 + unsigned long nr_freepages; /* Number of isolated free pages */ 119 + unsigned long nr_migratepages; /* Number of pages to migrate */ 120 + unsigned long free_pfn; /* isolate_freepages search base */ 121 + unsigned long migrate_pfn; /* isolate_migratepages search base */ 122 + bool sync; /* Synchronous migration */ 123 + 124 + int order; /* order a direct compactor needs */ 125 + int migratetype; /* MOVABLE, RECLAIMABLE etc */ 126 + struct zone *zone; 127 + }; 128 + 129 + unsigned long 130 + isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn); 131 + unsigned long 132 + isolate_migratepages_range(struct zone *zone, struct compact_control *cc, 133 + unsigned long low_pfn, unsigned long end_pfn); 134 + 135 + #endif 103 136 104 137 /* 105 138 * function for dealing with page's order in buddy system.