Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/gup: remove (VM_)BUG_ONs

Especially once we hit one of the assertions in
sanity_check_pinned_pages(), observing follow-up assertions failing in
other code can give good clues about what went wrong, so use
VM_WARN_ON_ONCE instead.

While at it, let's just convert all VM_BUG_ON to VM_WARN_ON_ONCE as well.
Add one comment for the pfn_valid() check.

We have to introduce VM_WARN_ON_ONCE_VMA() to make that fly.

Drop the BUG_ON after mmap_read_lock_killable(), if that ever returns
something > 0 we're in bigger trouble. Convert the other BUG_ON's into
VM_WARN_ON_ONCE as well, they are in a similar domain "should never
happen", but more reasonable to check for during early testing.

[david@redhat.com: use the _FOLIO variant where possible, per Lorenzo]
Link: https://lkml.kernel.org/r/844bd929-a551-48e3-a12e-285cd65ba580@redhat.com
Link: https://lkml.kernel.org/r/20250604140544.688711-1-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: SeongJae Park <sj@kernel.org>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

David Hildenbrand and committed by
Andrew Morton
792b429d 7c33c6c4

+31 -22
+12
include/linux/mmdebug.h
··· 89 89 } \ 90 90 unlikely(__ret_warn_once); \ 91 91 }) 92 + #define VM_WARN_ON_ONCE_VMA(cond, vma) ({ \ 93 + static bool __section(".data..once") __warned; \ 94 + int __ret_warn_once = !!(cond); \ 95 + \ 96 + if (unlikely(__ret_warn_once && !__warned)) { \ 97 + dump_vma(vma); \ 98 + __warned = true; \ 99 + WARN_ON(1); \ 100 + } \ 101 + unlikely(__ret_warn_once); \ 102 + }) 92 103 #define VM_WARN_ON_VMG(cond, vmg) ({ \ 93 104 int __ret_warn = !!(cond); \ 94 105 \ ··· 126 115 #define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) 127 116 #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) 128 117 #define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond) 118 + #define VM_WARN_ON_ONCE_VMA(cond, vma) BUILD_BUG_ON_INVALID(cond) 129 119 #define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond) 130 120 #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) 131 121 #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
+19 -22
mm/gup.c
··· 64 64 !folio_test_anon(folio)) 65 65 continue; 66 66 if (!folio_test_large(folio) || folio_test_hugetlb(folio)) 67 - VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); 67 + VM_WARN_ON_ONCE_FOLIO(!PageAnonExclusive(&folio->page), folio); 68 68 else 69 69 /* Either a PTE-mapped or a PMD-mapped THP. */ 70 - VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && 71 - !PageAnonExclusive(page), page); 70 + VM_WARN_ON_ONCE_PAGE(!PageAnonExclusive(&folio->page) && 71 + !PageAnonExclusive(page), page); 72 72 } 73 73 } 74 74 ··· 760 760 if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page)) 761 761 return ERR_PTR(-EMLINK); 762 762 763 - VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 764 - !PageAnonExclusive(page), page); 763 + VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) && 764 + !PageAnonExclusive(page), page); 765 765 766 766 ret = try_grab_folio(page_folio(page), 1, flags); 767 767 if (ret) ··· 899 899 goto out; 900 900 } 901 901 902 - VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 903 - !PageAnonExclusive(page), page); 902 + VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) && 903 + !PageAnonExclusive(page), page); 904 904 905 905 /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ 906 906 ret = try_grab_folio(folio, 1, flags); ··· 1180 1180 if (unshare) { 1181 1181 fault_flags |= FAULT_FLAG_UNSHARE; 1182 1182 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ 1183 - VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); 1183 + VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_WRITE); 1184 1184 } 1185 1185 1186 1186 ret = handle_mm_fault(vma, address, fault_flags, NULL); ··· 1760 1760 } 1761 1761 1762 1762 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ 1763 - if (!*locked) { 1764 - BUG_ON(ret < 0); 1765 - BUG_ON(ret >= nr_pages); 1766 - } 1763 + VM_WARN_ON_ONCE(!*locked && (ret < 0 || ret >= nr_pages)); 1767 1764 1768 1765 if (ret > 0) { 1769 1766 nr_pages -= ret; ··· 1805 1808 1806 1809 ret = mmap_read_lock_killable(mm); 1807 1810 if (ret) { 1808 - BUG_ON(ret > 0); 1809 1811 if (!pages_done) 1810 1812 pages_done = ret; 1811 1813 break; ··· 1815 1819 pages, locked); 1816 1820 if (!*locked) { 1817 1821 /* Continue to retry until we succeeded */ 1818 - BUG_ON(ret != 0); 1822 + VM_WARN_ON_ONCE(ret != 0); 1819 1823 goto retry; 1820 1824 } 1821 1825 if (ret != 1) { 1822 - BUG_ON(ret > 1); 1826 + VM_WARN_ON_ONCE(ret > 1); 1823 1827 if (!pages_done) 1824 1828 pages_done = ret; 1825 1829 break; ··· 1881 1885 int gup_flags; 1882 1886 long ret; 1883 1887 1884 - VM_BUG_ON(!PAGE_ALIGNED(start)); 1885 - VM_BUG_ON(!PAGE_ALIGNED(end)); 1886 - VM_BUG_ON_VMA(start < vma->vm_start, vma); 1887 - VM_BUG_ON_VMA(end > vma->vm_end, vma); 1888 + VM_WARN_ON_ONCE(!PAGE_ALIGNED(start)); 1889 + VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); 1890 + VM_WARN_ON_ONCE_VMA(start < vma->vm_start, vma); 1891 + VM_WARN_ON_ONCE_VMA(end > vma->vm_end, vma); 1888 1892 mmap_assert_locked(mm); 1889 1893 1890 1894 /* ··· 1953 1957 int gup_flags; 1954 1958 long ret; 1955 1959 1956 - VM_BUG_ON(!PAGE_ALIGNED(start)); 1957 - VM_BUG_ON(!PAGE_ALIGNED(end)); 1960 + VM_WARN_ON_ONCE(!PAGE_ALIGNED(start)); 1961 + VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); 1958 1962 mmap_assert_locked(mm); 1959 1963 1960 1964 /* ··· 2910 2914 } else if (pte_special(pte)) 2911 2915 goto pte_unmap; 2912 2916 2913 - VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 2917 + /* If it's not marked as special it must have a valid memmap. */ 2918 + VM_WARN_ON_ONCE(!pfn_valid(pte_pfn(pte))); 2914 2919 page = pte_page(pte); 2915 2920 2916 2921 folio = try_grab_folio_fast(page, 1, flags);