Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE

Most of the VM_BUG_ON assertions are performed on a page. Usually, when
one of these assertions fails we'll get a BUG_ON with a call stack and
the registers.

I've recently noticed based on the requests to add a small piece of code
that dumps the page to various VM_BUG_ON sites that the page dump is
quite useful to people debugging issues in mm.

This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what
VM_BUG_ON() does, also dumps the page before executing the actual
BUG_ON.

[akpm@linux-foundation.org: fix up includes]
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Sasha Levin and committed by
Linus Torvalds
309381fe e3bba3c3

+181 -170
+4 -4
arch/x86/mm/gup.c
··· 108 108 109 109 static inline void get_head_page_multiple(struct page *page, int nr) 110 110 { 111 - VM_BUG_ON(page != compound_head(page)); 112 - VM_BUG_ON(page_count(page) == 0); 111 + VM_BUG_ON_PAGE(page != compound_head(page), page); 112 + VM_BUG_ON_PAGE(page_count(page) == 0, page); 113 113 atomic_add(nr, &page->_count); 114 114 SetPageReferenced(page); 115 115 } ··· 135 135 head = pte_page(pte); 136 136 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 137 137 do { 138 - VM_BUG_ON(compound_head(page) != head); 138 + VM_BUG_ON_PAGE(compound_head(page) != head, page); 139 139 pages[*nr] = page; 140 140 if (PageTail(page)) 141 141 get_huge_page_tail(page); ··· 212 212 head = pte_page(pte); 213 213 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 214 214 do { 215 - VM_BUG_ON(compound_head(page) != head); 215 + VM_BUG_ON_PAGE(compound_head(page) != head, page); 216 216 pages[*nr] = page; 217 217 if (PageTail(page)) 218 218 get_huge_page_tail(page);
+1
include/linux/gfp.h
··· 1 1 #ifndef __LINUX_GFP_H 2 2 #define __LINUX_GFP_H 3 3 4 + #include <linux/mmdebug.h> 4 5 #include <linux/mmzone.h> 5 6 #include <linux/stddef.h> 6 7 #include <linux/linkage.h>
+2 -1
include/linux/hugetlb.h
··· 2 2 #define _LINUX_HUGETLB_H 3 3 4 4 #include <linux/mm_types.h> 5 + #include <linux/mmdebug.h> 5 6 #include <linux/fs.h> 6 7 #include <linux/hugetlb_inline.h> 7 8 #include <linux/cgroup.h> ··· 355 354 356 355 static inline struct hstate *page_hstate(struct page *page) 357 356 { 358 - VM_BUG_ON(!PageHuge(page)); 357 + VM_BUG_ON_PAGE(!PageHuge(page), page); 359 358 return size_to_hstate(PAGE_SIZE << compound_order(page)); 360 359 } 361 360
+3 -2
include/linux/hugetlb_cgroup.h
··· 15 15 #ifndef _LINUX_HUGETLB_CGROUP_H 16 16 #define _LINUX_HUGETLB_CGROUP_H 17 17 18 + #include <linux/mmdebug.h> 18 19 #include <linux/res_counter.h> 19 20 20 21 struct hugetlb_cgroup; ··· 29 28 30 29 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) 31 30 { 32 - VM_BUG_ON(!PageHuge(page)); 31 + VM_BUG_ON_PAGE(!PageHuge(page), page); 33 32 34 33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 35 34 return NULL; ··· 39 38 static inline 40 39 int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) 41 40 { 42 - VM_BUG_ON(!PageHuge(page)); 41 + VM_BUG_ON_PAGE(!PageHuge(page), page); 43 42 44 43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 45 44 return -1;
+13 -16
include/linux/mm.h
··· 5 5 6 6 #ifdef __KERNEL__ 7 7 8 + #include <linux/mmdebug.h> 8 9 #include <linux/gfp.h> 9 10 #include <linux/bug.h> 10 11 #include <linux/list.h> ··· 304 303 */ 305 304 static inline int put_page_testzero(struct page *page) 306 305 { 307 - VM_BUG_ON(atomic_read(&page->_count) == 0); 306 + VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); 308 307 return atomic_dec_and_test(&page->_count); 309 308 } 310 309 ··· 365 364 static inline void compound_lock(struct page *page) 366 365 { 367 366 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 368 - VM_BUG_ON(PageSlab(page)); 367 + VM_BUG_ON_PAGE(PageSlab(page), page); 369 368 bit_spin_lock(PG_compound_lock, &page->flags); 370 369 #endif 371 370 } ··· 373 372 static inline void compound_unlock(struct page *page) 374 373 { 375 374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 376 - VM_BUG_ON(PageSlab(page)); 375 + VM_BUG_ON_PAGE(PageSlab(page), page); 377 376 bit_spin_unlock(PG_compound_lock, &page->flags); 378 377 #endif 379 378 } ··· 448 447 */ 449 448 static inline bool compound_tail_refcounted(struct page *page) 450 449 { 451 - VM_BUG_ON(!PageHead(page)); 450 + VM_BUG_ON_PAGE(!PageHead(page), page); 452 451 return __compound_tail_refcounted(page); 453 452 } 454 453 ··· 457 456 /* 458 457 * __split_huge_page_refcount() cannot run from under us. 459 458 */ 460 - VM_BUG_ON(!PageTail(page)); 461 - VM_BUG_ON(page_mapcount(page) < 0); 462 - VM_BUG_ON(atomic_read(&page->_count) != 0); 459 + VM_BUG_ON_PAGE(!PageTail(page), page); 460 + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 461 + VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 463 462 if (compound_tail_refcounted(page->first_page)) 464 463 atomic_inc(&page->_mapcount); 465 464 } ··· 475 474 * Getting a normal page or the head of a compound page 476 475 * requires to already have an elevated page->_count. 477 476 */ 478 - VM_BUG_ON(atomic_read(&page->_count) <= 0); 477 + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); 479 478 atomic_inc(&page->_count); 480 479 } 481 480 ··· 512 511 513 512 static inline void __SetPageBuddy(struct page *page) 514 513 { 515 - VM_BUG_ON(atomic_read(&page->_mapcount) != -1); 514 + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); 516 515 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 517 516 } 518 517 519 518 static inline void __ClearPageBuddy(struct page *page) 520 519 { 521 - VM_BUG_ON(!PageBuddy(page)); 520 + VM_BUG_ON_PAGE(!PageBuddy(page), page); 522 521 atomic_set(&page->_mapcount, -1); 523 522 } 524 523 ··· 1402 1401 * slab code uses page->slab_cache and page->first_page (for tail 1403 1402 * pages), which share storage with page->ptl. 1404 1403 */ 1405 - VM_BUG_ON(*(unsigned long *)&page->ptl); 1404 + VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1406 1405 if (!ptlock_alloc(page)) 1407 1406 return false; 1408 1407 spin_lock_init(ptlock_ptr(page)); ··· 1493 1492 static inline void pgtable_pmd_page_dtor(struct page *page) 1494 1493 { 1495 1494 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1496 - VM_BUG_ON(page->pmd_huge_pte); 1495 + VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1497 1496 #endif 1498 1497 ptlock_free(page); 1499 1498 } ··· 2029 2028 extern void shake_page(struct page *p, int access); 2030 2029 extern atomic_long_t num_poisoned_pages; 2031 2030 extern int soft_offline_page(struct page *page, int flags); 2032 - 2033 - extern void dump_page(struct page *page, char *reason); 2034 - extern void dump_page_badflags(struct page *page, char *reason, 2035 - unsigned long badflags); 2036 2031 2037 2032 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2038 2033 extern void clear_huge_page(struct page *page,
+9
include/linux/mmdebug.h
··· 1 1 #ifndef LINUX_MM_DEBUG_H 2 2 #define LINUX_MM_DEBUG_H 1 3 3 4 + struct page; 5 + 6 + extern void dump_page(struct page *page, char *reason); 7 + extern void dump_page_badflags(struct page *page, char *reason, 8 + unsigned long badflags); 9 + 4 10 #ifdef CONFIG_DEBUG_VM 5 11 #define VM_BUG_ON(cond) BUG_ON(cond) 12 + #define VM_BUG_ON_PAGE(cond, page) \ 13 + do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0) 6 14 #else 7 15 #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) 16 + #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) 8 17 #endif 9 18 10 19 #ifdef CONFIG_DEBUG_VIRTUAL
+5 -5
include/linux/page-flags.h
··· 412 412 */ 413 413 static inline int PageTransHuge(struct page *page) 414 414 { 415 - VM_BUG_ON(PageTail(page)); 415 + VM_BUG_ON_PAGE(PageTail(page), page); 416 416 return PageHead(page); 417 417 } 418 418 ··· 460 460 */ 461 461 static inline int PageSlabPfmemalloc(struct page *page) 462 462 { 463 - VM_BUG_ON(!PageSlab(page)); 463 + VM_BUG_ON_PAGE(!PageSlab(page), page); 464 464 return PageActive(page); 465 465 } 466 466 467 467 static inline void SetPageSlabPfmemalloc(struct page *page) 468 468 { 469 - VM_BUG_ON(!PageSlab(page)); 469 + VM_BUG_ON_PAGE(!PageSlab(page), page); 470 470 SetPageActive(page); 471 471 } 472 472 473 473 static inline void __ClearPageSlabPfmemalloc(struct page *page) 474 474 { 475 - VM_BUG_ON(!PageSlab(page)); 475 + VM_BUG_ON_PAGE(!PageSlab(page), page); 476 476 __ClearPageActive(page); 477 477 } 478 478 479 479 static inline void ClearPageSlabPfmemalloc(struct page *page) 480 480 { 481 - VM_BUG_ON(!PageSlab(page)); 481 + VM_BUG_ON_PAGE(!PageSlab(page), page); 482 482 ClearPageActive(page); 483 483 } 484 484
+5 -5
include/linux/pagemap.h
··· 162 162 * disabling preempt, and hence no need for the "speculative get" that 163 163 * SMP requires. 164 164 */ 165 - VM_BUG_ON(page_count(page) == 0); 165 + VM_BUG_ON_PAGE(page_count(page) == 0, page); 166 166 atomic_inc(&page->_count); 167 167 168 168 #else ··· 175 175 return 0; 176 176 } 177 177 #endif 178 - VM_BUG_ON(PageTail(page)); 178 + VM_BUG_ON_PAGE(PageTail(page), page); 179 179 180 180 return 1; 181 181 } ··· 191 191 # ifdef CONFIG_PREEMPT_COUNT 192 192 VM_BUG_ON(!in_atomic()); 193 193 # endif 194 - VM_BUG_ON(page_count(page) == 0); 194 + VM_BUG_ON_PAGE(page_count(page) == 0, page); 195 195 atomic_add(count, &page->_count); 196 196 197 197 #else 198 198 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 199 199 return 0; 200 200 #endif 201 - VM_BUG_ON(PageCompound(page) && page != compound_head(page)); 201 + VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); 202 202 203 203 return 1; 204 204 } ··· 210 210 211 211 static inline void page_unfreeze_refs(struct page *page, int count) 212 212 { 213 - VM_BUG_ON(page_count(page) != 0); 213 + VM_BUG_ON_PAGE(page_count(page) != 0, page); 214 214 VM_BUG_ON(count == 0); 215 215 216 216 atomic_set(&page->_count, count);
+1
include/linux/percpu.h
··· 1 1 #ifndef __LINUX_PERCPU_H 2 2 #define __LINUX_PERCPU_H 3 3 4 + #include <linux/mmdebug.h> 4 5 #include <linux/preempt.h> 5 6 #include <linux/smp.h> 6 7 #include <linux/cpumask.h>
+3 -3
mm/cleancache.c
··· 237 237 goto out; 238 238 } 239 239 240 - VM_BUG_ON(!PageLocked(page)); 240 + VM_BUG_ON_PAGE(!PageLocked(page), page); 241 241 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; 242 242 if (fake_pool_id < 0) 243 243 goto out; ··· 279 279 return; 280 280 } 281 281 282 - VM_BUG_ON(!PageLocked(page)); 282 + VM_BUG_ON_PAGE(!PageLocked(page), page); 283 283 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; 284 284 if (fake_pool_id < 0) 285 285 return; ··· 318 318 if (pool_id < 0) 319 319 return; 320 320 321 - VM_BUG_ON(!PageLocked(page)); 321 + VM_BUG_ON_PAGE(!PageLocked(page), page); 322 322 if (cleancache_get_key(mapping->host, &key) >= 0) { 323 323 cleancache_ops->invalidate_page(pool_id, 324 324 key, page->index);
+1 -1
mm/compaction.c
··· 601 601 if (__isolate_lru_page(page, mode) != 0) 602 602 continue; 603 603 604 - VM_BUG_ON(PageTransCompound(page)); 604 + VM_BUG_ON_PAGE(PageTransCompound(page), page); 605 605 606 606 /* Successfully isolated */ 607 607 cc->finished_update_migrate = true;
+8 -8
mm/filemap.c
··· 409 409 { 410 410 int error; 411 411 412 - VM_BUG_ON(!PageLocked(old)); 413 - VM_BUG_ON(!PageLocked(new)); 414 - VM_BUG_ON(new->mapping); 412 + VM_BUG_ON_PAGE(!PageLocked(old), old); 413 + VM_BUG_ON_PAGE(!PageLocked(new), new); 414 + VM_BUG_ON_PAGE(new->mapping, new); 415 415 416 416 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 417 417 if (!error) { ··· 461 461 { 462 462 int error; 463 463 464 - VM_BUG_ON(!PageLocked(page)); 465 - VM_BUG_ON(PageSwapBacked(page)); 464 + VM_BUG_ON_PAGE(!PageLocked(page), page); 465 + VM_BUG_ON_PAGE(PageSwapBacked(page), page); 466 466 467 467 error = mem_cgroup_cache_charge(page, current->mm, 468 468 gfp_mask & GFP_RECLAIM_MASK); ··· 607 607 */ 608 608 void unlock_page(struct page *page) 609 609 { 610 - VM_BUG_ON(!PageLocked(page)); 610 + VM_BUG_ON_PAGE(!PageLocked(page), page); 611 611 clear_bit_unlock(PG_locked, &page->flags); 612 612 smp_mb__after_clear_bit(); 613 613 wake_up_page(page, PG_locked); ··· 760 760 page_cache_release(page); 761 761 goto repeat; 762 762 } 763 - VM_BUG_ON(page->index != offset); 763 + VM_BUG_ON_PAGE(page->index != offset, page); 764 764 } 765 765 return page; 766 766 } ··· 1656 1656 put_page(page); 1657 1657 goto retry_find; 1658 1658 } 1659 - VM_BUG_ON(page->index != offset); 1659 + VM_BUG_ON_PAGE(page->index != offset, page); 1660 1660 1661 1661 /* 1662 1662 * We have a locked page in the page cache, now we need to check
+18 -18
mm/huge_memory.c
··· 712 712 pgtable_t pgtable; 713 713 spinlock_t *ptl; 714 714 715 - VM_BUG_ON(!PageCompound(page)); 715 + VM_BUG_ON_PAGE(!PageCompound(page), page); 716 716 pgtable = pte_alloc_one(mm, haddr); 717 717 if (unlikely(!pgtable)) 718 718 return VM_FAULT_OOM; ··· 893 893 goto out; 894 894 } 895 895 src_page = pmd_page(pmd); 896 - VM_BUG_ON(!PageHead(src_page)); 896 + VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 897 897 get_page(src_page); 898 898 page_dup_rmap(src_page); 899 899 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); ··· 1067 1067 ptl = pmd_lock(mm, pmd); 1068 1068 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1069 1069 goto out_free_pages; 1070 - VM_BUG_ON(!PageHead(page)); 1070 + VM_BUG_ON_PAGE(!PageHead(page), page); 1071 1071 1072 1072 pmdp_clear_flush(vma, haddr, pmd); 1073 1073 /* leave pmd empty until pte is filled */ ··· 1133 1133 goto out_unlock; 1134 1134 1135 1135 page = pmd_page(orig_pmd); 1136 - VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 1136 + VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); 1137 1137 if (page_mapcount(page) == 1) { 1138 1138 pmd_t entry; 1139 1139 entry = pmd_mkyoung(orig_pmd); ··· 1211 1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1212 1212 put_huge_zero_page(); 1213 1213 } else { 1214 - VM_BUG_ON(!PageHead(page)); 1214 + VM_BUG_ON_PAGE(!PageHead(page), page); 1215 1215 page_remove_rmap(page); 1216 1216 put_page(page); 1217 1217 } ··· 1249 1249 goto out; 1250 1250 1251 1251 page = pmd_page(*pmd); 1252 - VM_BUG_ON(!PageHead(page)); 1252 + VM_BUG_ON_PAGE(!PageHead(page), page); 1253 1253 if (flags & FOLL_TOUCH) { 1254 1254 pmd_t _pmd; 1255 1255 /* ··· 1274 1274 } 1275 1275 } 1276 1276 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1277 - VM_BUG_ON(!PageCompound(page)); 1277 + VM_BUG_ON_PAGE(!PageCompound(page), page); 1278 1278 if (flags & FOLL_GET) 1279 1279 get_page_foll(page); 1280 1280 ··· 1432 1432 } else { 1433 1433 page = pmd_page(orig_pmd); 1434 1434 page_remove_rmap(page); 1435 - VM_BUG_ON(page_mapcount(page) < 0); 1435 + VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 1436 1436 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1437 - VM_BUG_ON(!PageHead(page)); 1437 + VM_BUG_ON_PAGE(!PageHead(page), page); 1438 1438 atomic_long_dec(&tlb->mm->nr_ptes); 1439 1439 spin_unlock(ptl); 1440 1440 tlb_remove_page(tlb, page); ··· 2176 2176 if (unlikely(!page)) 2177 2177 goto out; 2178 2178 2179 - VM_BUG_ON(PageCompound(page)); 2180 - BUG_ON(!PageAnon(page)); 2181 - VM_BUG_ON(!PageSwapBacked(page)); 2179 + VM_BUG_ON_PAGE(PageCompound(page), page); 2180 + VM_BUG_ON_PAGE(!PageAnon(page), page); 2181 + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 2182 2182 2183 2183 /* cannot use mapcount: can't collapse if there's a gup pin */ 2184 2184 if (page_count(page) != 1) ··· 2201 2201 } 2202 2202 /* 0 stands for page_is_file_cache(page) == false */ 2203 2203 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2204 - VM_BUG_ON(!PageLocked(page)); 2205 - VM_BUG_ON(PageLRU(page)); 2204 + VM_BUG_ON_PAGE(!PageLocked(page), page); 2205 + VM_BUG_ON_PAGE(PageLRU(page), page); 2206 2206 2207 2207 /* If there is no mapped pte young don't collapse the page */ 2208 2208 if (pte_young(pteval) || PageReferenced(page) || ··· 2232 2232 } else { 2233 2233 src_page = pte_page(pteval); 2234 2234 copy_user_highpage(page, src_page, address, vma); 2235 - VM_BUG_ON(page_mapcount(src_page) != 1); 2235 + VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); 2236 2236 release_pte_page(src_page); 2237 2237 /* 2238 2238 * ptl mostly unnecessary, but preempt has to ··· 2311 2311 struct vm_area_struct *vma, unsigned long address, 2312 2312 int node) 2313 2313 { 2314 - VM_BUG_ON(*hpage); 2314 + VM_BUG_ON_PAGE(*hpage, *hpage); 2315 2315 /* 2316 2316 * Allocate the page while the vma is still valid and under 2317 2317 * the mmap_sem read mode so there is no memory allocation ··· 2580 2580 */ 2581 2581 node = page_to_nid(page); 2582 2582 khugepaged_node_load[node]++; 2583 - VM_BUG_ON(PageCompound(page)); 2583 + VM_BUG_ON_PAGE(PageCompound(page), page); 2584 2584 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2585 2585 goto out_unmap; 2586 2586 /* cannot use mapcount: can't collapse if there's a gup pin */ ··· 2876 2876 return; 2877 2877 } 2878 2878 page = pmd_page(*pmd); 2879 - VM_BUG_ON(!page_count(page)); 2879 + VM_BUG_ON_PAGE(!page_count(page), page); 2880 2880 get_page(page); 2881 2881 spin_unlock(ptl); 2882 2882 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+5 -5
mm/hugetlb.c
··· 584 584 1 << PG_active | 1 << PG_reserved | 585 585 1 << PG_private | 1 << PG_writeback); 586 586 } 587 - VM_BUG_ON(hugetlb_cgroup_from_page(page)); 587 + VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 588 588 set_compound_page_dtor(page, NULL); 589 589 set_page_refcounted(page); 590 590 arch_release_hugepage(page); ··· 1089 1089 * no users -- drop the buddy allocator's reference. 1090 1090 */ 1091 1091 put_page_testzero(page); 1092 - VM_BUG_ON(page_count(page)); 1092 + VM_BUG_ON_PAGE(page_count(page), page); 1093 1093 enqueue_huge_page(h, page); 1094 1094 } 1095 1095 free: ··· 3503 3503 3504 3504 bool isolate_huge_page(struct page *page, struct list_head *list) 3505 3505 { 3506 - VM_BUG_ON(!PageHead(page)); 3506 + VM_BUG_ON_PAGE(!PageHead(page), page); 3507 3507 if (!get_page_unless_zero(page)) 3508 3508 return false; 3509 3509 spin_lock(&hugetlb_lock); ··· 3514 3514 3515 3515 void putback_active_hugepage(struct page *page) 3516 3516 { 3517 - VM_BUG_ON(!PageHead(page)); 3517 + VM_BUG_ON_PAGE(!PageHead(page), page); 3518 3518 spin_lock(&hugetlb_lock); 3519 3519 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 3520 3520 spin_unlock(&hugetlb_lock); ··· 3523 3523 3524 3524 bool is_hugepage_active(struct page *page) 3525 3525 { 3526 - VM_BUG_ON(!PageHuge(page)); 3526 + VM_BUG_ON_PAGE(!PageHuge(page), page); 3527 3527 /* 3528 3528 * This function can be called for a tail page because the caller, 3529 3529 * scan_movable_pages, scans through a given pfn-range which typically
+1 -1
mm/hugetlb_cgroup.c
··· 390 390 if (hugetlb_cgroup_disabled()) 391 391 return; 392 392 393 - VM_BUG_ON(!PageHuge(oldhpage)); 393 + VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage); 394 394 spin_lock(&hugetlb_lock); 395 395 h_cg = hugetlb_cgroup_from_page(oldhpage); 396 396 set_hugetlb_cgroup(oldhpage, NULL);
+5 -5
mm/internal.h
··· 27 27 */ 28 28 static inline void set_page_refcounted(struct page *page) 29 29 { 30 - VM_BUG_ON(PageTail(page)); 31 - VM_BUG_ON(atomic_read(&page->_count)); 30 + VM_BUG_ON_PAGE(PageTail(page), page); 31 + VM_BUG_ON_PAGE(atomic_read(&page->_count), page); 32 32 set_page_count(page, 1); 33 33 } 34 34 ··· 46 46 * speculative page access (like in 47 47 * page_cache_get_speculative()) on tail pages. 48 48 */ 49 - VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); 49 + VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); 50 50 if (get_page_head) 51 51 atomic_inc(&page->first_page->_count); 52 52 get_huge_page_tail(page); ··· 71 71 * Getting a normal page or the head of a compound page 72 72 * requires to already have an elevated page->_count. 73 73 */ 74 - VM_BUG_ON(atomic_read(&page->_count) <= 0); 74 + VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); 75 75 atomic_inc(&page->_count); 76 76 } 77 77 } ··· 173 173 static inline int mlocked_vma_newpage(struct vm_area_struct *vma, 174 174 struct page *page) 175 175 { 176 - VM_BUG_ON(PageLRU(page)); 176 + VM_BUG_ON_PAGE(PageLRU(page), page); 177 177 178 178 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 179 179 return 0;
+6 -6
mm/ksm.c
··· 1898 1898 int ret = SWAP_AGAIN; 1899 1899 int search_new_forks = 0; 1900 1900 1901 - VM_BUG_ON(!PageKsm(page)); 1901 + VM_BUG_ON_PAGE(!PageKsm(page), page); 1902 1902 1903 1903 /* 1904 1904 * Rely on the page lock to protect against concurrent modifications 1905 1905 * to that page's node of the stable tree. 1906 1906 */ 1907 - VM_BUG_ON(!PageLocked(page)); 1907 + VM_BUG_ON_PAGE(!PageLocked(page), page); 1908 1908 1909 1909 stable_node = page_stable_node(page); 1910 1910 if (!stable_node) ··· 1958 1958 { 1959 1959 struct stable_node *stable_node; 1960 1960 1961 - VM_BUG_ON(!PageLocked(oldpage)); 1962 - VM_BUG_ON(!PageLocked(newpage)); 1963 - VM_BUG_ON(newpage->mapping != oldpage->mapping); 1961 + VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 1962 + VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 1963 + VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); 1964 1964 1965 1965 stable_node = page_stable_node(newpage); 1966 1966 if (stable_node) { 1967 - VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); 1967 + VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); 1968 1968 stable_node->kpfn = page_to_pfn(newpage); 1969 1969 /* 1970 1970 * newpage->mapping was set in advance; now we need smp_wmb()
+14 -14
mm/memcontrol.c
··· 2897 2897 unsigned short id; 2898 2898 swp_entry_t ent; 2899 2899 2900 - VM_BUG_ON(!PageLocked(page)); 2900 + VM_BUG_ON_PAGE(!PageLocked(page), page); 2901 2901 2902 2902 pc = lookup_page_cgroup(page); 2903 2903 lock_page_cgroup(pc); ··· 2931 2931 bool anon; 2932 2932 2933 2933 lock_page_cgroup(pc); 2934 - VM_BUG_ON(PageCgroupUsed(pc)); 2934 + VM_BUG_ON_PAGE(PageCgroupUsed(pc), page); 2935 2935 /* 2936 2936 * we don't need page_cgroup_lock about tail pages, becase they are not 2937 2937 * accessed by any other context at this point. ··· 2966 2966 if (lrucare) { 2967 2967 if (was_on_lru) { 2968 2968 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2969 - VM_BUG_ON(PageLRU(page)); 2969 + VM_BUG_ON_PAGE(PageLRU(page), page); 2970 2970 SetPageLRU(page); 2971 2971 add_page_to_lru_list(page, lruvec, page_lru(page)); 2972 2972 } ··· 3780 3780 if (!memcg) 3781 3781 return; 3782 3782 3783 - VM_BUG_ON(mem_cgroup_is_root(memcg)); 3783 + VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); 3784 3784 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3785 3785 } 3786 3786 #else ··· 3859 3859 bool anon = PageAnon(page); 3860 3860 3861 3861 VM_BUG_ON(from == to); 3862 - VM_BUG_ON(PageLRU(page)); 3862 + VM_BUG_ON_PAGE(PageLRU(page), page); 3863 3863 /* 3864 3864 * The page is isolated from LRU. So, collapse function 3865 3865 * will not handle this page. But page splitting can happen. ··· 3952 3952 parent = root_mem_cgroup; 3953 3953 3954 3954 if (nr_pages > 1) { 3955 - VM_BUG_ON(!PageTransHuge(page)); 3955 + VM_BUG_ON_PAGE(!PageTransHuge(page), page); 3956 3956 flags = compound_lock_irqsave(page); 3957 3957 } 3958 3958 ··· 3986 3986 3987 3987 if (PageTransHuge(page)) { 3988 3988 nr_pages <<= compound_order(page); 3989 - VM_BUG_ON(!PageTransHuge(page)); 3989 + VM_BUG_ON_PAGE(!PageTransHuge(page), page); 3990 3990 /* 3991 3991 * Never OOM-kill a process for a huge page. The 3992 3992 * fault handler will fall back to regular pages. ··· 4006 4006 { 4007 4007 if (mem_cgroup_disabled()) 4008 4008 return 0; 4009 - VM_BUG_ON(page_mapped(page)); 4010 - VM_BUG_ON(page->mapping && !PageAnon(page)); 4009 + VM_BUG_ON_PAGE(page_mapped(page), page); 4010 + VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); 4011 4011 VM_BUG_ON(!mm); 4012 4012 return mem_cgroup_charge_common(page, mm, gfp_mask, 4013 4013 MEM_CGROUP_CHARGE_TYPE_ANON); ··· 4211 4211 4212 4212 if (PageTransHuge(page)) { 4213 4213 nr_pages <<= compound_order(page); 4214 - VM_BUG_ON(!PageTransHuge(page)); 4214 + VM_BUG_ON_PAGE(!PageTransHuge(page), page); 4215 4215 } 4216 4216 /* 4217 4217 * Check if our page_cgroup is valid ··· 4303 4303 /* early check. */ 4304 4304 if (page_mapped(page)) 4305 4305 return; 4306 - VM_BUG_ON(page->mapping && !PageAnon(page)); 4306 + VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page); 4307 4307 /* 4308 4308 * If the page is in swap cache, uncharge should be deferred 4309 4309 * to the swap path, which also properly accounts swap usage ··· 4323 4323 4324 4324 void mem_cgroup_uncharge_cache_page(struct page *page) 4325 4325 { 4326 - VM_BUG_ON(page_mapped(page)); 4327 - VM_BUG_ON(page->mapping); 4326 + VM_BUG_ON_PAGE(page_mapped(page), page); 4327 + VM_BUG_ON_PAGE(page->mapping, page); 4328 4328 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); 4329 4329 } 4330 4330 ··· 6880 6880 enum mc_target_type ret = MC_TARGET_NONE; 6881 6881 6882 6882 page = pmd_page(pmd); 6883 - VM_BUG_ON(!page || !PageHead(page)); 6883 + VM_BUG_ON_PAGE(!page || !PageHead(page), page); 6884 6884 if (!move_anon()) 6885 6885 return ret; 6886 6886 pc = lookup_page_cgroup(page);
+4 -4
mm/memory.c
··· 289 289 return 0; 290 290 batch = tlb->active; 291 291 } 292 - VM_BUG_ON(batch->nr > batch->max); 292 + VM_BUG_ON_PAGE(batch->nr > batch->max, page); 293 293 294 294 return batch->max - batch->nr; 295 295 } ··· 2702 2702 goto unwritable_page; 2703 2703 } 2704 2704 } else 2705 - VM_BUG_ON(!PageLocked(old_page)); 2705 + VM_BUG_ON_PAGE(!PageLocked(old_page), old_page); 2706 2706 2707 2707 /* 2708 2708 * Since we dropped the lock we need to revalidate ··· 3358 3358 if (unlikely(!(ret & VM_FAULT_LOCKED))) 3359 3359 lock_page(vmf.page); 3360 3360 else 3361 - VM_BUG_ON(!PageLocked(vmf.page)); 3361 + VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); 3362 3362 3363 3363 /* 3364 3364 * Should we do an early C-O-W break? ··· 3395 3395 goto unwritable_page; 3396 3396 } 3397 3397 } else 3398 - VM_BUG_ON(!PageLocked(page)); 3398 + VM_BUG_ON_PAGE(!PageLocked(page), page); 3399 3399 page_mkwrite = 1; 3400 3400 } 3401 3401 }
+3 -3
mm/migrate.c
··· 499 499 if (PageUptodate(page)) 500 500 SetPageUptodate(newpage); 501 501 if (TestClearPageActive(page)) { 502 - VM_BUG_ON(PageUnevictable(page)); 502 + VM_BUG_ON_PAGE(PageUnevictable(page), page); 503 503 SetPageActive(newpage); 504 504 } else if (TestClearPageUnevictable(page)) 505 505 SetPageUnevictable(newpage); ··· 871 871 * free the metadata, so the page can be freed. 872 872 */ 873 873 if (!page->mapping) { 874 - VM_BUG_ON(PageAnon(page)); 874 + VM_BUG_ON_PAGE(PageAnon(page), page); 875 875 if (page_has_private(page)) { 876 876 try_to_free_buffers(page); 877 877 goto uncharge; ··· 1618 1618 { 1619 1619 int page_lru; 1620 1620 1621 - VM_BUG_ON(compound_order(page) && !PageTransHuge(page)); 1621 + VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); 1622 1622 1623 1623 /* Avoid migrating to a node that is nearly full */ 1624 1624 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
+2 -2
mm/mlock.c
··· 279 279 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, 280 280 int *pgrescued) 281 281 { 282 - VM_BUG_ON(PageLRU(page)); 283 - VM_BUG_ON(!PageLocked(page)); 282 + VM_BUG_ON_PAGE(PageLRU(page), page); 283 + VM_BUG_ON_PAGE(!PageLocked(page), page); 284 284 285 285 if (page_mapcount(page) <= 1 && page_evictable(page)) { 286 286 pagevec_add(pvec, page);
+11 -10
mm/page_alloc.c
··· 509 509 return 0; 510 510 511 511 if (page_is_guard(buddy) && page_order(buddy) == order) { 512 - VM_BUG_ON(page_count(buddy) != 0); 512 + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 513 513 return 1; 514 514 } 515 515 516 516 if (PageBuddy(buddy) && page_order(buddy) == order) { 517 - VM_BUG_ON(page_count(buddy) != 0); 517 + VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 518 518 return 1; 519 519 } 520 520 return 0; ··· 564 564 565 565 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 566 566 567 - VM_BUG_ON(page_idx & ((1 << order) - 1)); 568 - VM_BUG_ON(bad_range(zone, page)); 567 + VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 568 + VM_BUG_ON_PAGE(bad_range(zone, page), page); 569 569 570 570 while (order < MAX_ORDER-1) { 571 571 buddy_idx = __find_buddy_index(page_idx, order); ··· 827 827 area--; 828 828 high--; 829 829 size >>= 1; 830 - VM_BUG_ON(bad_range(zone, &page[size])); 830 + VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 831 831 832 832 #ifdef CONFIG_DEBUG_PAGEALLOC 833 833 if (high < debug_guardpage_minorder()) { ··· 980 980 981 981 for (page = start_page; page <= end_page;) { 982 982 /* Make sure we are not inadvertently changing nodes */ 983 - VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 983 + VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 984 984 985 985 if (!pfn_valid_within(page_to_pfn(page))) { 986 986 page++; ··· 1429 1429 { 1430 1430 int i; 1431 1431 1432 - VM_BUG_ON(PageCompound(page)); 1433 - VM_BUG_ON(!page_count(page)); 1432 + VM_BUG_ON_PAGE(PageCompound(page), page); 1433 + VM_BUG_ON_PAGE(!page_count(page), page); 1434 1434 1435 1435 #ifdef CONFIG_KMEMCHECK 1436 1436 /* ··· 1577 1577 zone_statistics(preferred_zone, zone, gfp_flags); 1578 1578 local_irq_restore(flags); 1579 1579 1580 - VM_BUG_ON(bad_range(zone, page)); 1580 + VM_BUG_ON_PAGE(bad_range(zone, page), page); 1581 1581 if (prep_new_page(page, order, gfp_flags)) 1582 1582 goto again; 1583 1583 return page; ··· 6021 6021 pfn = page_to_pfn(page); 6022 6022 bitmap = get_pageblock_bitmap(zone, pfn); 6023 6023 bitidx = pfn_to_bitidx(zone, pfn); 6024 - VM_BUG_ON(!zone_spans_pfn(zone, pfn)); 6024 + VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); 6025 6025 6026 6026 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6027 6027 if (flags & value) ··· 6539 6539 { 6540 6540 dump_page_badflags(page, reason, 0); 6541 6541 } 6542 + EXPORT_SYMBOL_GPL(dump_page);
+2 -2
mm/page_io.c
··· 320 320 int ret = 0; 321 321 struct swap_info_struct *sis = page_swap_info(page); 322 322 323 - VM_BUG_ON(!PageLocked(page)); 324 - VM_BUG_ON(PageUptodate(page)); 323 + VM_BUG_ON_PAGE(!PageLocked(page), page); 324 + VM_BUG_ON_PAGE(PageUptodate(page), page); 325 325 if (frontswap_load(page) == 0) { 326 326 SetPageUptodate(page); 327 327 unlock_page(page);
+5 -5
mm/rmap.c
··· 894 894 { 895 895 struct anon_vma *anon_vma = vma->anon_vma; 896 896 897 - VM_BUG_ON(!PageLocked(page)); 897 + VM_BUG_ON_PAGE(!PageLocked(page), page); 898 898 VM_BUG_ON(!anon_vma); 899 - VM_BUG_ON(page->index != linear_page_index(vma, address)); 899 + VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); 900 900 901 901 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 902 902 page->mapping = (struct address_space *) anon_vma; ··· 995 995 if (unlikely(PageKsm(page))) 996 996 return; 997 997 998 - VM_BUG_ON(!PageLocked(page)); 998 + VM_BUG_ON_PAGE(!PageLocked(page), page); 999 999 /* address might be in next vma when migration races vma_adjust */ 1000 1000 if (first) 1001 1001 __page_set_anon_rmap(page, vma, address, exclusive); ··· 1481 1481 .anon_lock = page_lock_anon_vma_read, 1482 1482 }; 1483 1483 1484 - VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); 1484 + VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); 1485 1485 1486 1486 /* 1487 1487 * During exec, a temporary VMA is setup and later moved. ··· 1533 1533 1534 1534 }; 1535 1535 1536 - VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1536 + VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1537 1537 1538 1538 ret = rmap_walk(page, &rwc); 1539 1539 return ret;
+4 -4
mm/shmem.c
··· 285 285 { 286 286 int error; 287 287 288 - VM_BUG_ON(!PageLocked(page)); 289 - VM_BUG_ON(!PageSwapBacked(page)); 288 + VM_BUG_ON_PAGE(!PageLocked(page), page); 289 + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 290 290 291 291 page_cache_get(page); 292 292 page->mapping = mapping; ··· 491 491 continue; 492 492 if (!unfalloc || !PageUptodate(page)) { 493 493 if (page->mapping == mapping) { 494 - VM_BUG_ON(PageWriteback(page)); 494 + VM_BUG_ON_PAGE(PageWriteback(page), page); 495 495 truncate_inode_page(mapping, page); 496 496 } 497 497 } ··· 568 568 lock_page(page); 569 569 if (!unfalloc || !PageUptodate(page)) { 570 570 if (page->mapping == mapping) { 571 - VM_BUG_ON(PageWriteback(page)); 571 + VM_BUG_ON_PAGE(PageWriteback(page), page); 572 572 truncate_inode_page(mapping, page); 573 573 } 574 574 }
+6 -6
mm/slub.c
··· 1559 1559 new.freelist = freelist; 1560 1560 } 1561 1561 1562 - VM_BUG_ON(new.frozen); 1562 + VM_BUG_ON_PAGE(new.frozen, &new); 1563 1563 new.frozen = 1; 1564 1564 1565 1565 if (!__cmpxchg_double_slab(s, page, ··· 1812 1812 set_freepointer(s, freelist, prior); 1813 1813 new.counters = counters; 1814 1814 new.inuse--; 1815 - VM_BUG_ON(!new.frozen); 1815 + VM_BUG_ON_PAGE(!new.frozen, &new); 1816 1816 1817 1817 } while (!__cmpxchg_double_slab(s, page, 1818 1818 prior, counters, ··· 1840 1840 1841 1841 old.freelist = page->freelist; 1842 1842 old.counters = page->counters; 1843 - VM_BUG_ON(!old.frozen); 1843 + VM_BUG_ON_PAGE(!old.frozen, &old); 1844 1844 1845 1845 /* Determine target state of the slab */ 1846 1846 new.counters = old.counters; ··· 1952 1952 1953 1953 old.freelist = page->freelist; 1954 1954 old.counters = page->counters; 1955 - VM_BUG_ON(!old.frozen); 1955 + VM_BUG_ON_PAGE(!old.frozen, &old); 1956 1956 1957 1957 new.counters = old.counters; 1958 1958 new.freelist = old.freelist; ··· 2225 2225 counters = page->counters; 2226 2226 2227 2227 new.counters = counters; 2228 - VM_BUG_ON(!new.frozen); 2228 + VM_BUG_ON_PAGE(!new.frozen, &new); 2229 2229 2230 2230 new.inuse = page->objects; 2231 2231 new.frozen = freelist != NULL; ··· 2319 2319 * page is pointing to the page from which the objects are obtained. 2320 2320 * That page must be frozen for per cpu allocations to work. 2321 2321 */ 2322 - VM_BUG_ON(!c->page->frozen); 2322 + VM_BUG_ON_PAGE(!c->page->frozen, c->page); 2323 2323 c->freelist = get_freepointer(s, freelist); 2324 2324 c->tid = next_tid(c->tid); 2325 2325 local_irq_restore(flags);
+18 -18
mm/swap.c
··· 57 57 58 58 spin_lock_irqsave(&zone->lru_lock, flags); 59 59 lruvec = mem_cgroup_page_lruvec(page, zone); 60 - VM_BUG_ON(!PageLRU(page)); 60 + VM_BUG_ON_PAGE(!PageLRU(page), page); 61 61 __ClearPageLRU(page); 62 62 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 63 63 spin_unlock_irqrestore(&zone->lru_lock, flags); ··· 130 130 * __split_huge_page_refcount cannot race 131 131 * here. 132 132 */ 133 - VM_BUG_ON(!PageHead(page_head)); 134 - VM_BUG_ON(page_mapcount(page) != 0); 133 + VM_BUG_ON_PAGE(!PageHead(page_head), page_head); 134 + VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); 135 135 if (put_page_testzero(page_head)) { 136 136 /* 137 137 * If this is the tail of a slab ··· 148 148 * the compound page enters the buddy 149 149 * allocator. 150 150 */ 151 - VM_BUG_ON(PageSlab(page_head)); 151 + VM_BUG_ON_PAGE(PageSlab(page_head), page_head); 152 152 __put_compound_page(page_head); 153 153 } 154 154 return; ··· 199 199 __put_single_page(page); 200 200 return; 201 201 } 202 - VM_BUG_ON(page_head != page->first_page); 202 + VM_BUG_ON_PAGE(page_head != page->first_page, page); 203 203 /* 204 204 * We can release the refcount taken by 205 205 * get_page_unless_zero() now that ··· 207 207 * compound_lock. 208 208 */ 209 209 if (put_page_testzero(page_head)) 210 - VM_BUG_ON(1); 210 + VM_BUG_ON_PAGE(1, page_head); 211 211 /* __split_huge_page_refcount will wait now */ 212 - VM_BUG_ON(page_mapcount(page) <= 0); 212 + VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); 213 213 atomic_dec(&page->_mapcount); 214 - VM_BUG_ON(atomic_read(&page_head->_count) <= 0); 215 - VM_BUG_ON(atomic_read(&page->_count) != 0); 214 + VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head); 215 + VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 216 216 compound_unlock_irqrestore(page_head, flags); 217 217 218 218 if (put_page_testzero(page_head)) { ··· 223 223 } 224 224 } else { 225 225 /* page_head is a dangling pointer */ 226 - VM_BUG_ON(PageTail(page)); 226 + VM_BUG_ON_PAGE(PageTail(page), page); 227 227 goto out_put_single; 228 228 } 229 229 } ··· 264 264 * page. __split_huge_page_refcount 265 265 * cannot race here. 266 266 */ 267 - VM_BUG_ON(!PageHead(page_head)); 267 + VM_BUG_ON_PAGE(!PageHead(page_head), page_head); 268 268 __get_page_tail_foll(page, true); 269 269 return true; 270 270 } else { ··· 604 604 */ 605 605 void lru_cache_add(struct page *page) 606 606 { 607 - VM_BUG_ON(PageActive(page) && PageUnevictable(page)); 608 - VM_BUG_ON(PageLRU(page)); 607 + VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 608 + VM_BUG_ON_PAGE(PageLRU(page), page); 609 609 __lru_cache_add(page); 610 610 } 611 611 ··· 846 846 } 847 847 848 848 lruvec = mem_cgroup_page_lruvec(page, zone); 849 - VM_BUG_ON(!PageLRU(page)); 849 + VM_BUG_ON_PAGE(!PageLRU(page), page); 850 850 __ClearPageLRU(page); 851 851 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 852 852 } ··· 888 888 { 889 889 const int file = 0; 890 890 891 - VM_BUG_ON(!PageHead(page)); 892 - VM_BUG_ON(PageCompound(page_tail)); 893 - VM_BUG_ON(PageLRU(page_tail)); 891 + VM_BUG_ON_PAGE(!PageHead(page), page); 892 + VM_BUG_ON_PAGE(PageCompound(page_tail), page); 893 + VM_BUG_ON_PAGE(PageLRU(page_tail), page); 894 894 VM_BUG_ON(NR_CPUS != 1 && 895 895 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 896 896 ··· 929 929 int active = PageActive(page); 930 930 enum lru_list lru = page_lru(page); 931 931 932 - VM_BUG_ON(PageLRU(page)); 932 + VM_BUG_ON_PAGE(PageLRU(page), page); 933 933 934 934 SetPageLRU(page); 935 935 add_page_to_lru_list(page, lruvec, lru);
+8 -8
mm/swap_state.c
··· 83 83 int error; 84 84 struct address_space *address_space; 85 85 86 - VM_BUG_ON(!PageLocked(page)); 87 - VM_BUG_ON(PageSwapCache(page)); 88 - VM_BUG_ON(!PageSwapBacked(page)); 86 + VM_BUG_ON_PAGE(!PageLocked(page), page); 87 + VM_BUG_ON_PAGE(PageSwapCache(page), page); 88 + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 89 89 90 90 page_cache_get(page); 91 91 SetPageSwapCache(page); ··· 139 139 swp_entry_t entry; 140 140 struct address_space *address_space; 141 141 142 - VM_BUG_ON(!PageLocked(page)); 143 - VM_BUG_ON(!PageSwapCache(page)); 144 - VM_BUG_ON(PageWriteback(page)); 142 + VM_BUG_ON_PAGE(!PageLocked(page), page); 143 + VM_BUG_ON_PAGE(!PageSwapCache(page), page); 144 + VM_BUG_ON_PAGE(PageWriteback(page), page); 145 145 146 146 entry.val = page_private(page); 147 147 address_space = swap_address_space(entry); ··· 165 165 swp_entry_t entry; 166 166 int err; 167 167 168 - VM_BUG_ON(!PageLocked(page)); 169 - VM_BUG_ON(!PageUptodate(page)); 168 + VM_BUG_ON_PAGE(!PageLocked(page), page); 169 + VM_BUG_ON_PAGE(!PageUptodate(page), page); 170 170 171 171 entry = get_swap_page(); 172 172 if (!entry.val)
+4 -4
mm/swapfile.c
··· 906 906 { 907 907 int count; 908 908 909 - VM_BUG_ON(!PageLocked(page)); 909 + VM_BUG_ON_PAGE(!PageLocked(page), page); 910 910 if (unlikely(PageKsm(page))) 911 911 return 0; 912 912 count = page_mapcount(page); ··· 926 926 */ 927 927 int try_to_free_swap(struct page *page) 928 928 { 929 - VM_BUG_ON(!PageLocked(page)); 929 + VM_BUG_ON_PAGE(!PageLocked(page), page); 930 930 931 931 if (!PageSwapCache(page)) 932 932 return 0; ··· 2714 2714 */ 2715 2715 struct address_space *__page_file_mapping(struct page *page) 2716 2716 { 2717 - VM_BUG_ON(!PageSwapCache(page)); 2717 + VM_BUG_ON_PAGE(!PageSwapCache(page), page); 2718 2718 return page_swap_info(page)->swap_file->f_mapping; 2719 2719 } 2720 2720 EXPORT_SYMBOL_GPL(__page_file_mapping); ··· 2722 2722 pgoff_t __page_file_index(struct page *page) 2723 2723 { 2724 2724 swp_entry_t swap = { .val = page_private(page) }; 2725 - VM_BUG_ON(!PageSwapCache(page)); 2725 + VM_BUG_ON_PAGE(!PageSwapCache(page), page); 2726 2726 return swp_offset(swap); 2727 2727 } 2728 2728 EXPORT_SYMBOL_GPL(__page_file_index);
+10 -10
mm/vmscan.c
··· 603 603 bool is_unevictable; 604 604 int was_unevictable = PageUnevictable(page); 605 605 606 - VM_BUG_ON(PageLRU(page)); 606 + VM_BUG_ON_PAGE(PageLRU(page), page); 607 607 608 608 redo: 609 609 ClearPageUnevictable(page); ··· 794 794 if (!trylock_page(page)) 795 795 goto keep; 796 796 797 - VM_BUG_ON(PageActive(page)); 798 - VM_BUG_ON(page_zone(page) != zone); 797 + VM_BUG_ON_PAGE(PageActive(page), page); 798 + VM_BUG_ON_PAGE(page_zone(page) != zone, page); 799 799 800 800 sc->nr_scanned++; 801 801 ··· 1079 1079 /* Not a candidate for swapping, so reclaim swap space. */ 1080 1080 if (PageSwapCache(page) && vm_swap_full()) 1081 1081 try_to_free_swap(page); 1082 - VM_BUG_ON(PageActive(page)); 1082 + VM_BUG_ON_PAGE(PageActive(page), page); 1083 1083 SetPageActive(page); 1084 1084 pgactivate++; 1085 1085 keep_locked: 1086 1086 unlock_page(page); 1087 1087 keep: 1088 1088 list_add(&page->lru, &ret_pages); 1089 - VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1089 + VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 1090 1090 } 1091 1091 1092 1092 free_hot_cold_page_list(&free_pages, 1); ··· 1240 1240 page = lru_to_page(src); 1241 1241 prefetchw_prev_lru_page(page, src, flags); 1242 1242 1243 - VM_BUG_ON(!PageLRU(page)); 1243 + VM_BUG_ON_PAGE(!PageLRU(page), page); 1244 1244 1245 1245 switch (__isolate_lru_page(page, mode)) { 1246 1246 case 0: ··· 1295 1295 { 1296 1296 int ret = -EBUSY; 1297 1297 1298 - VM_BUG_ON(!page_count(page)); 1298 + VM_BUG_ON_PAGE(!page_count(page), page); 1299 1299 1300 1300 if (PageLRU(page)) { 1301 1301 struct zone *zone = page_zone(page); ··· 1366 1366 struct page *page = lru_to_page(page_list); 1367 1367 int lru; 1368 1368 1369 - VM_BUG_ON(PageLRU(page)); 1369 + VM_BUG_ON_PAGE(PageLRU(page), page); 1370 1370 list_del(&page->lru); 1371 1371 if (unlikely(!page_evictable(page))) { 1372 1372 spin_unlock_irq(&zone->lru_lock); ··· 1586 1586 page = lru_to_page(list); 1587 1587 lruvec = mem_cgroup_page_lruvec(page, zone); 1588 1588 1589 - VM_BUG_ON(PageLRU(page)); 1589 + VM_BUG_ON_PAGE(PageLRU(page), page); 1590 1590 SetPageLRU(page); 1591 1591 1592 1592 nr_pages = hpage_nr_pages(page); ··· 3701 3701 if (page_evictable(page)) { 3702 3702 enum lru_list lru = page_lru_base_type(page); 3703 3703 3704 - VM_BUG_ON(PageActive(page)); 3704 + VM_BUG_ON_PAGE(PageActive(page), page); 3705 3705 ClearPageUnevictable(page); 3706 3706 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 3707 3707 add_page_to_lru_list(page, lruvec, lru);