[PATCH] unpaged: PG_reserved bad_page

It used to be the case that PG_reserved pages were silently never freed, but
in 2.6.15-rc1 they may be freed with a "Bad page state" message. We should
work through such cases as they appear, fixing the code; but for now it's
safer to issue the message without freeing the page, leaving PG_reserved set.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Hugh Dickins and committed by Linus Torvalds 689bcebf f57e88a8

+34 -12
+34 -12
mm/page_alloc.c
··· 140 1 << PG_reclaim | 141 1 << PG_slab | 142 1 << PG_swapcache | 143 - 1 << PG_writeback | 144 - 1 << PG_reserved ); 145 set_page_count(page, 0); 146 reset_page_mapcount(page); 147 page->mapping = NULL; ··· 334 zone->free_area[order].nr_free++; 335 } 336 337 - static inline void free_pages_check(const char *function, struct page *page) 338 { 339 if ( page_mapcount(page) || 340 page->mapping != NULL || ··· 352 bad_page(function, page); 353 if (PageDirty(page)) 354 __ClearPageDirty(page); 355 } 356 357 /* ··· 397 { 398 LIST_HEAD(list); 399 int i; 400 401 arch_free_page(page, order); 402 - 403 - mod_page_state(pgfree, 1 << order); 404 405 #ifndef CONFIG_MMU 406 if (order > 0) ··· 408 #endif 409 410 for (i = 0 ; i < (1 << order) ; ++i) 411 - free_pages_check(__FUNCTION__, page + i); 412 list_add(&page->lru, &list); 413 kernel_map_pages(page, 1<<order, 0); 414 free_pages_bulk(page_zone(page), 1, &list, order); 415 } ··· 471 /* 472 * This page is about to be returned from the page allocator 473 */ 474 - static void prep_new_page(struct page *page, int order) 475 { 476 if ( page_mapcount(page) || 477 page->mapping != NULL || ··· 489 1 << PG_reserved ))) 490 bad_page(__FUNCTION__, page); 491 492 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 493 1 << PG_referenced | 1 << PG_arch_1 | 494 1 << PG_checked | 1 << PG_mappedtodisk); 495 set_page_private(page, 0); 496 set_page_refs(page, order); 497 kernel_map_pages(page, 1 << order, 1); 498 } 499 500 /* ··· 685 686 arch_free_page(page, 0); 687 688 - kernel_map_pages(page, 1, 0); 689 - inc_page_state(pgfree); 690 if (PageAnon(page)) 691 page->mapping = NULL; 692 - free_pages_check(__FUNCTION__, page); 693 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 694 local_irq_save(flags); 695 list_add(&page->lru, &pcp->list); ··· 731 buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 732 { 733 unsigned long flags; 734 - struct page *page = NULL; 735 int cold = !!(gfp_flags & __GFP_COLD); 736 737 if (order == 0) { 738 struct per_cpu_pages *pcp; 739 740 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 741 local_irq_save(flags); 742 if (pcp->count <= pcp->low) ··· 760 if (page != NULL) { 761 BUG_ON(bad_range(zone, page)); 762 mod_page_state_zone(zone, pgalloc, 1 << order); 763 - prep_new_page(page, order); 764 765 if (gfp_flags & __GFP_ZERO) 766 prep_zero_page(page, order, gfp_flags);
··· 140 1 << PG_reclaim | 141 1 << PG_slab | 142 1 << PG_swapcache | 143 + 1 << PG_writeback ); 144 set_page_count(page, 0); 145 reset_page_mapcount(page); 146 page->mapping = NULL; ··· 335 zone->free_area[order].nr_free++; 336 } 337 338 + static inline int free_pages_check(const char *function, struct page *page) 339 { 340 if ( page_mapcount(page) || 341 page->mapping != NULL || ··· 353 bad_page(function, page); 354 if (PageDirty(page)) 355 __ClearPageDirty(page); 356 + /* 357 + * For now, we report if PG_reserved was found set, but do not 358 + * clear it, and do not free the page. But we shall soon need 359 + * to do more, for when the ZERO_PAGE count wraps negative. 360 + */ 361 + return PageReserved(page); 362 } 363 364 /* ··· 392 { 393 LIST_HEAD(list); 394 int i; 395 + int reserved = 0; 396 397 arch_free_page(page, order); 398 399 #ifndef CONFIG_MMU 400 if (order > 0) ··· 404 #endif 405 406 for (i = 0 ; i < (1 << order) ; ++i) 407 + reserved += free_pages_check(__FUNCTION__, page + i); 408 + if (reserved) 409 + return; 410 + 411 list_add(&page->lru, &list); 412 + mod_page_state(pgfree, 1 << order); 413 kernel_map_pages(page, 1<<order, 0); 414 free_pages_bulk(page_zone(page), 1, &list, order); 415 } ··· 463 /* 464 * This page is about to be returned from the page allocator 465 */ 466 + static int prep_new_page(struct page *page, int order) 467 { 468 if ( page_mapcount(page) || 469 page->mapping != NULL || ··· 481 1 << PG_reserved ))) 482 bad_page(__FUNCTION__, page); 483 484 + /* 485 + * For now, we report if PG_reserved was found set, but do not 486 + * clear it, and do not allocate the page: as a safety net. 487 + */ 488 + if (PageReserved(page)) 489 + return 1; 490 + 491 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 492 1 << PG_referenced | 1 << PG_arch_1 | 493 1 << PG_checked | 1 << PG_mappedtodisk); 494 set_page_private(page, 0); 495 set_page_refs(page, order); 496 kernel_map_pages(page, 1 << order, 1); 497 + return 0; 498 } 499 500 /* ··· 669 670 arch_free_page(page, 0); 671 672 if (PageAnon(page)) 673 page->mapping = NULL; 674 + if (free_pages_check(__FUNCTION__, page)) 675 + return; 676 + 677 + inc_page_state(pgfree); 678 + kernel_map_pages(page, 1, 0); 679 + 680 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 681 local_irq_save(flags); 682 list_add(&page->lru, &pcp->list); ··· 712 buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 713 { 714 unsigned long flags; 715 + struct page *page; 716 int cold = !!(gfp_flags & __GFP_COLD); 717 718 + again: 719 if (order == 0) { 720 struct per_cpu_pages *pcp; 721 722 + page = NULL; 723 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 724 local_irq_save(flags); 725 if (pcp->count <= pcp->low) ··· 739 if (page != NULL) { 740 BUG_ON(bad_range(zone, page)); 741 mod_page_state_zone(zone, pgalloc, 1 << order); 742 + if (prep_new_page(page, order)) 743 + goto again; 744 745 if (gfp_flags & __GFP_ZERO) 746 prep_zero_page(page, order, gfp_flags);