mm: add new 'read_cache_page_gfp()' helper function

It's a simplified 'read_cache_page()' which takes a page allocation
flag, so that different paths can control how aggressive the memory
allocations are that populate a address space.

In particular, the intel GPU object mapping code wants to be able to do
a certain amount of own internal memory management by automatically
shrinking the address space when memory starts getting tight. This
allows it to dynamically use different memory allocation policies on a
per-allocation basis, rather than depend on the (static) address space
gfp policy.

The actual new function is a one-liner, but re-organizing the helper
functions to the point where you can do this with a single line of code
is what most of the patch is all about.

Tested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+70 -32
+2
include/linux/pagemap.h
··· 253 extern struct page * read_cache_page(struct address_space *mapping, 254 pgoff_t index, filler_t *filler, 255 void *data); 256 extern int read_cache_pages(struct address_space *mapping, 257 struct list_head *pages, filler_t *filler, void *data); 258
··· 253 extern struct page * read_cache_page(struct address_space *mapping, 254 pgoff_t index, filler_t *filler, 255 void *data); 256 + extern struct page * read_cache_page_gfp(struct address_space *mapping, 257 + pgoff_t index, gfp_t gfp_mask); 258 extern int read_cache_pages(struct address_space *mapping, 259 struct list_head *pages, filler_t *filler, void *data); 260
+68 -32
mm/filemap.c
··· 1634 static struct page *__read_cache_page(struct address_space *mapping, 1635 pgoff_t index, 1636 int (*filler)(void *,struct page*), 1637 - void *data) 1638 { 1639 struct page *page; 1640 int err; 1641 repeat: 1642 page = find_get_page(mapping, index); 1643 if (!page) { 1644 - page = page_cache_alloc_cold(mapping); 1645 if (!page) 1646 return ERR_PTR(-ENOMEM); 1647 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); ··· 1662 return page; 1663 } 1664 1665 - /** 1666 - * read_cache_page_async - read into page cache, fill it if needed 1667 - * @mapping: the page's address_space 1668 - * @index: the page index 1669 - * @filler: function to perform the read 1670 - * @data: destination for read data 1671 - * 1672 - * Same as read_cache_page, but don't wait for page to become unlocked 1673 - * after submitting it to the filler. 1674 - * 1675 - * Read into the page cache. If a page already exists, and PageUptodate() is 1676 - * not set, try to fill the page but don't wait for it to become unlocked. 1677 - * 1678 - * If the page does not get brought uptodate, return -EIO. 1679 - */ 1680 - struct page *read_cache_page_async(struct address_space *mapping, 1681 pgoff_t index, 1682 int (*filler)(void *,struct page*), 1683 - void *data) 1684 { 1685 struct page *page; 1686 int err; 1687 1688 retry: 1689 - page = __read_cache_page(mapping, index, filler, data); 1690 if (IS_ERR(page)) 1691 return page; 1692 if (PageUptodate(page)) ··· 1698 mark_page_accessed(page); 1699 return page; 1700 } 1701 EXPORT_SYMBOL(read_cache_page_async); 1702 1703 /** 1704 * read_cache_page - read into page cache, fill it if needed ··· 1776 int (*filler)(void *,struct page*), 1777 void *data) 1778 { 1779 - struct page *page; 1780 - 1781 - page = read_cache_page_async(mapping, index, filler, data); 1782 - if (IS_ERR(page)) 1783 - goto out; 1784 - wait_on_page_locked(page); 1785 - if (!PageUptodate(page)) { 1786 - page_cache_release(page); 1787 - page = ERR_PTR(-EIO); 1788 - } 1789 - out: 1790 - return page; 1791 } 1792 EXPORT_SYMBOL(read_cache_page); 1793
··· 1634 static struct page *__read_cache_page(struct address_space *mapping, 1635 pgoff_t index, 1636 int (*filler)(void *,struct page*), 1637 + void *data, 1638 + gfp_t gfp) 1639 { 1640 struct page *page; 1641 int err; 1642 repeat: 1643 page = find_get_page(mapping, index); 1644 if (!page) { 1645 + page = __page_cache_alloc(gfp | __GFP_COLD); 1646 if (!page) 1647 return ERR_PTR(-ENOMEM); 1648 err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); ··· 1661 return page; 1662 } 1663 1664 + static struct page *do_read_cache_page(struct address_space *mapping, 1665 pgoff_t index, 1666 int (*filler)(void *,struct page*), 1667 + void *data, 1668 + gfp_t gfp) 1669 + 1670 { 1671 struct page *page; 1672 int err; 1673 1674 retry: 1675 + page = __read_cache_page(mapping, index, filler, data, gfp); 1676 if (IS_ERR(page)) 1677 return page; 1678 if (PageUptodate(page)) ··· 1710 mark_page_accessed(page); 1711 return page; 1712 } 1713 + 1714 + /** 1715 + * read_cache_page_async - read into page cache, fill it if needed 1716 + * @mapping: the page's address_space 1717 + * @index: the page index 1718 + * @filler: function to perform the read 1719 + * @data: destination for read data 1720 + * 1721 + * Same as read_cache_page, but don't wait for page to become unlocked 1722 + * after submitting it to the filler. 1723 + * 1724 + * Read into the page cache. If a page already exists, and PageUptodate() is 1725 + * not set, try to fill the page but don't wait for it to become unlocked. 1726 + * 1727 + * If the page does not get brought uptodate, return -EIO. 1728 + */ 1729 + struct page *read_cache_page_async(struct address_space *mapping, 1730 + pgoff_t index, 1731 + int (*filler)(void *,struct page*), 1732 + void *data) 1733 + { 1734 + return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); 1735 + } 1736 EXPORT_SYMBOL(read_cache_page_async); 1737 + 1738 + static struct page *wait_on_page_read(struct page *page) 1739 + { 1740 + if (!IS_ERR(page)) { 1741 + wait_on_page_locked(page); 1742 + if (!PageUptodate(page)) { 1743 + page_cache_release(page); 1744 + page = ERR_PTR(-EIO); 1745 + } 1746 + } 1747 + return page; 1748 + } 1749 + 1750 + /** 1751 + * read_cache_page_gfp - read into page cache, using specified page allocation flags. 1752 + * @mapping: the page's address_space 1753 + * @index: the page index 1754 + * @gfp: the page allocator flags to use if allocating 1755 + * 1756 + * This is the same as "read_mapping_page(mapping, index, NULL)", but with 1757 + * any new page allocations done using the specified allocation flags. Note 1758 + * that the Radix tree operations will still use GFP_KERNEL, so you can't 1759 + * expect to do this atomically or anything like that - but you can pass in 1760 + * other page requirements. 1761 + * 1762 + * If the page does not get brought uptodate, return -EIO. 1763 + */ 1764 + struct page *read_cache_page_gfp(struct address_space *mapping, 1765 + pgoff_t index, 1766 + gfp_t gfp) 1767 + { 1768 + filler_t *filler = (filler_t *)mapping->a_ops->readpage; 1769 + 1770 + return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp)); 1771 + } 1772 + EXPORT_SYMBOL(read_cache_page_gfp); 1773 1774 /** 1775 * read_cache_page - read into page cache, fill it if needed ··· 1729 int (*filler)(void *,struct page*), 1730 void *data) 1731 { 1732 + return wait_on_page_read(read_cache_page_async(mapping, index, filler, data)); 1733 } 1734 EXPORT_SYMBOL(read_cache_page); 1735