[PATCH] unpaged: PG_reserved bad_page

It used to be the case that PG_reserved pages were silently never freed, but
in 2.6.15-rc1 they may be freed with a "Bad page state" message. We should
work through such cases as they appear, fixing the code; but for now it's
safer to issue the message without freeing the page, leaving PG_reserved set.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Hugh Dickins and committed by Linus Torvalds 689bcebf f57e88a8

+34 -12
+34 -12
mm/page_alloc.c
··· 140 140 1 << PG_reclaim | 141 141 1 << PG_slab | 142 142 1 << PG_swapcache | 143 - 1 << PG_writeback | 144 - 1 << PG_reserved ); 143 + 1 << PG_writeback ); 145 144 set_page_count(page, 0); 146 145 reset_page_mapcount(page); 147 146 page->mapping = NULL; ··· 334 335 zone->free_area[order].nr_free++; 335 336 } 336 337 337 - static inline void free_pages_check(const char *function, struct page *page) 338 + static inline int free_pages_check(const char *function, struct page *page) 338 339 { 339 340 if ( page_mapcount(page) || 340 341 page->mapping != NULL || ··· 352 353 bad_page(function, page); 353 354 if (PageDirty(page)) 354 355 __ClearPageDirty(page); 356 + /* 357 + * For now, we report if PG_reserved was found set, but do not 358 + * clear it, and do not free the page. But we shall soon need 359 + * to do more, for when the ZERO_PAGE count wraps negative. 360 + */ 361 + return PageReserved(page); 355 362 } 356 363 357 364 /* ··· 397 392 { 398 393 LIST_HEAD(list); 399 394 int i; 395 + int reserved = 0; 400 396 401 397 arch_free_page(page, order); 402 - 403 - mod_page_state(pgfree, 1 << order); 404 398 405 399 #ifndef CONFIG_MMU 406 400 if (order > 0) ··· 408 404 #endif 409 405 410 406 for (i = 0 ; i < (1 << order) ; ++i) 411 - free_pages_check(__FUNCTION__, page + i); 407 + reserved += free_pages_check(__FUNCTION__, page + i); 408 + if (reserved) 409 + return; 410 + 412 411 list_add(&page->lru, &list); 412 + mod_page_state(pgfree, 1 << order); 413 413 kernel_map_pages(page, 1<<order, 0); 414 414 free_pages_bulk(page_zone(page), 1, &list, order); 415 415 } ··· 471 463 /* 472 464 * This page is about to be returned from the page allocator 473 465 */ 474 - static void prep_new_page(struct page *page, int order) 466 + static int prep_new_page(struct page *page, int order) 475 467 { 476 468 if ( page_mapcount(page) || 477 469 page->mapping != NULL || ··· 489 481 1 << PG_reserved ))) 490 482 bad_page(__FUNCTION__, page); 491 483 484 + /* 485 + * For now, we report if PG_reserved was found set, but do not 486 + * clear it, and do not allocate the page: as a safety net. 487 + */ 488 + if (PageReserved(page)) 489 + return 1; 490 + 492 491 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 493 492 1 << PG_referenced | 1 << PG_arch_1 | 494 493 1 << PG_checked | 1 << PG_mappedtodisk); 495 494 set_page_private(page, 0); 496 495 set_page_refs(page, order); 497 496 kernel_map_pages(page, 1 << order, 1); 497 + return 0; 498 498 } 499 499 500 500 /* ··· 685 669 686 670 arch_free_page(page, 0); 687 671 688 - kernel_map_pages(page, 1, 0); 689 - inc_page_state(pgfree); 690 672 if (PageAnon(page)) 691 673 page->mapping = NULL; 692 - free_pages_check(__FUNCTION__, page); 674 + if (free_pages_check(__FUNCTION__, page)) 675 + return; 676 + 677 + inc_page_state(pgfree); 678 + kernel_map_pages(page, 1, 0); 679 + 693 680 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 694 681 local_irq_save(flags); 695 682 list_add(&page->lru, &pcp->list); ··· 731 712 buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 732 713 { 733 714 unsigned long flags; 734 - struct page *page = NULL; 715 + struct page *page; 735 716 int cold = !!(gfp_flags & __GFP_COLD); 736 717 718 + again: 737 719 if (order == 0) { 738 720 struct per_cpu_pages *pcp; 739 721 722 + page = NULL; 740 723 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 741 724 local_irq_save(flags); 742 725 if (pcp->count <= pcp->low) ··· 760 739 if (page != NULL) { 761 740 BUG_ON(bad_range(zone, page)); 762 741 mod_page_state_zone(zone, pgalloc, 1 << order); 763 - prep_new_page(page, order); 742 + if (prep_new_page(page, order)) 743 + goto again; 764 744 765 745 if (gfp_flags & __GFP_ZERO) 766 746 prep_zero_page(page, order, gfp_flags);