Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: convert DAX lock/unlock page to lock/unlock folio

The one caller of DAX lock/unlock page already calls compound_head(), so
use page_folio() instead, then use a folio throughout the DAX code to
remove uses of page->mapping and page->index.

[jane.chu@oracle.com: add comment to mf_generic_kill_procss(), simplify mf_generic_kill_procs:folio initialization]
Link: https://lkml.kernel.org/r/20230908222336.186313-1-jane.chu@oracle.com
Link: https://lkml.kernel.org/r/20230822231314.349200-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Jane Chu <jane.chu@oracle.com>
Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jane Chu <jane.chu@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
91e79d22 bc0c3357

+33 -30
+12 -12
fs/dax.c
··· 412 412 return NULL; 413 413 } 414 414 415 - /* 416 - * dax_lock_page - Lock the DAX entry corresponding to a page 417 - * @page: The page whose entry we want to lock 415 + /** 416 + * dax_lock_folio - Lock the DAX entry corresponding to a folio 417 + * @folio: The folio whose entry we want to lock 418 418 * 419 419 * Context: Process context. 420 - * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could 420 + * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could 421 421 * not be locked. 422 422 */ 423 - dax_entry_t dax_lock_page(struct page *page) 423 + dax_entry_t dax_lock_folio(struct folio *folio) 424 424 { 425 425 XA_STATE(xas, NULL, 0); 426 426 void *entry; 427 427 428 - /* Ensure page->mapping isn't freed while we look at it */ 428 + /* Ensure folio->mapping isn't freed while we look at it */ 429 429 rcu_read_lock(); 430 430 for (;;) { 431 - struct address_space *mapping = READ_ONCE(page->mapping); 431 + struct address_space *mapping = READ_ONCE(folio->mapping); 432 432 433 433 entry = NULL; 434 434 if (!mapping || !dax_mapping(mapping)) ··· 447 447 448 448 xas.xa = &mapping->i_pages; 449 449 xas_lock_irq(&xas); 450 - if (mapping != page->mapping) { 450 + if (mapping != folio->mapping) { 451 451 xas_unlock_irq(&xas); 452 452 continue; 453 453 } 454 - xas_set(&xas, page->index); 454 + xas_set(&xas, folio->index); 455 455 entry = xas_load(&xas); 456 456 if (dax_is_locked(entry)) { 457 457 rcu_read_unlock(); ··· 467 467 return (dax_entry_t)entry; 468 468 } 469 469 470 - void dax_unlock_page(struct page *page, dax_entry_t cookie) 470 + void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) 471 471 { 472 - struct address_space *mapping = page->mapping; 473 - XA_STATE(xas, &mapping->i_pages, page->index); 472 + struct address_space *mapping = folio->mapping; 473 + XA_STATE(xas, &mapping->i_pages, folio->index); 474 474 475 475 if (S_ISCHR(mapping->host->i_mode)) 476 476 return;
+5 -5
include/linux/dax.h
··· 159 159 160 160 struct page *dax_layout_busy_page(struct address_space *mapping); 161 161 struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); 162 - dax_entry_t dax_lock_page(struct page *page); 163 - void dax_unlock_page(struct page *page, dax_entry_t cookie); 162 + dax_entry_t dax_lock_folio(struct folio *folio); 163 + void dax_unlock_folio(struct folio *folio, dax_entry_t cookie); 164 164 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, 165 165 unsigned long index, struct page **page); 166 166 void dax_unlock_mapping_entry(struct address_space *mapping, ··· 182 182 return -EOPNOTSUPP; 183 183 } 184 184 185 - static inline dax_entry_t dax_lock_page(struct page *page) 185 + static inline dax_entry_t dax_lock_folio(struct folio *folio) 186 186 { 187 - if (IS_DAX(page->mapping->host)) 187 + if (IS_DAX(folio->mapping->host)) 188 188 return ~0UL; 189 189 return 0; 190 190 } 191 191 192 - static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) 192 + static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) 193 193 { 194 194 } 195 195
+16 -13
mm/memory-failure.c
··· 1713 1713 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); 1714 1714 } 1715 1715 1716 + /* 1717 + * Only dev_pagemap pages get here, such as fsdax when the filesystem 1718 + * either do not claim or fails to claim a hwpoison event, or devdax. 1719 + * The fsdax pages are initialized per base page, and the devdax pages 1720 + * could be initialized either as base pages, or as compound pages with 1721 + * vmemmap optimization enabled. Devdax is simplistic in its dealing with 1722 + * hwpoison, such that, if a subpage of a compound page is poisoned, 1723 + * simply mark the compound head page is by far sufficient. 1724 + */ 1716 1725 static int mf_generic_kill_procs(unsigned long long pfn, int flags, 1717 1726 struct dev_pagemap *pgmap) 1718 1727 { 1719 - struct page *page = pfn_to_page(pfn); 1728 + struct folio *folio = pfn_folio(pfn); 1720 1729 LIST_HEAD(to_kill); 1721 1730 dax_entry_t cookie; 1722 1731 int rc = 0; 1723 - 1724 - /* 1725 - * Pages instantiated by device-dax (not filesystem-dax) 1726 - * may be compound pages. 1727 - */ 1728 - page = compound_head(page); 1729 1732 1730 1733 /* 1731 1734 * Prevent the inode from being freed while we are interrogating ··· 1737 1734 * also prevents changes to the mapping of this pfn until 1738 1735 * poison signaling is complete. 1739 1736 */ 1740 - cookie = dax_lock_page(page); 1737 + cookie = dax_lock_folio(folio); 1741 1738 if (!cookie) 1742 1739 return -EBUSY; 1743 1740 1744 - if (hwpoison_filter(page)) { 1741 + if (hwpoison_filter(&folio->page)) { 1745 1742 rc = -EOPNOTSUPP; 1746 1743 goto unlock; 1747 1744 } ··· 1763 1760 * Use this flag as an indication that the dax page has been 1764 1761 * remapped UC to prevent speculative consumption of poison. 1765 1762 */ 1766 - SetPageHWPoison(page); 1763 + SetPageHWPoison(&folio->page); 1767 1764 1768 1765 /* 1769 1766 * Unlike System-RAM there is no possibility to swap in a ··· 1772 1769 * SIGBUS (i.e. MF_MUST_KILL) 1773 1770 */ 1774 1771 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; 1775 - collect_procs(page, &to_kill, true); 1772 + collect_procs(&folio->page, &to_kill, true); 1776 1773 1777 - unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); 1774 + unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags); 1778 1775 unlock: 1779 - dax_unlock_page(page, cookie); 1776 + dax_unlock_folio(folio, cookie); 1780 1777 return rc; 1781 1778 } 1782 1779