Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/gup: change write parameter to flags in fast walk

In order to support more options in the GUP fast walk, change the write
parameter to flags throughout the call stack.

This patch does not change functionality and passes FOLL_WRITE where write
was previously used.

Link: http://lkml.kernel.org/r/20190328084422.29911-3-ira.weiny@intel.com
Link: http://lkml.kernel.org/r/20190317183438.2057-3-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Mike Marshall <hubcap@omnibond.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Ira Weiny and committed by
Linus Torvalds
b798bec4 932f4a63

+26 -26
+26 -26
mm/gup.c
··· 1615 1615 1616 1616 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL 1617 1617 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1618 - int write, struct page **pages, int *nr) 1618 + unsigned int flags, struct page **pages, int *nr) 1619 1619 { 1620 1620 struct dev_pagemap *pgmap = NULL; 1621 1621 int nr_start = *nr, ret = 0; ··· 1633 1633 if (pte_protnone(pte)) 1634 1634 goto pte_unmap; 1635 1635 1636 - if (!pte_access_permitted(pte, write)) 1636 + if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 1637 1637 goto pte_unmap; 1638 1638 1639 1639 if (pte_devmap(pte)) { ··· 1685 1685 * useful to have gup_huge_pmd even if we can't operate on ptes. 1686 1686 */ 1687 1687 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, 1688 - int write, struct page **pages, int *nr) 1688 + unsigned int flags, struct page **pages, int *nr) 1689 1689 { 1690 1690 return 0; 1691 1691 } ··· 1768 1768 #endif 1769 1769 1770 1770 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, 1771 - unsigned long end, int write, struct page **pages, int *nr) 1771 + unsigned long end, unsigned int flags, struct page **pages, int *nr) 1772 1772 { 1773 1773 struct page *head, *page; 1774 1774 int refs; 1775 1775 1776 - if (!pmd_access_permitted(orig, write)) 1776 + if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) 1777 1777 return 0; 1778 1778 1779 1779 if (pmd_devmap(orig)) ··· 1806 1806 } 1807 1807 1808 1808 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, 1809 - unsigned long end, int write, struct page **pages, int *nr) 1809 + unsigned long end, unsigned int flags, struct page **pages, int *nr) 1810 1810 { 1811 1811 struct page *head, *page; 1812 1812 int refs; 1813 1813 1814 - if (!pud_access_permitted(orig, write)) 1814 + if (!pud_access_permitted(orig, flags & FOLL_WRITE)) 1815 1815 return 0; 1816 1816 1817 1817 if (pud_devmap(orig)) ··· 1844 1844 } 1845 1845 1846 1846 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, 1847 - unsigned long end, int write, 1847 + unsigned long end, unsigned int flags, 1848 1848 struct page **pages, int *nr) 1849 1849 { 1850 1850 int refs; 1851 1851 struct page *head, *page; 1852 1852 1853 - if (!pgd_access_permitted(orig, write)) 1853 + if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) 1854 1854 return 0; 1855 1855 1856 1856 BUILD_BUG_ON(pgd_devmap(orig)); ··· 1881 1881 } 1882 1882 1883 1883 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 1884 - int write, struct page **pages, int *nr) 1884 + unsigned int flags, struct page **pages, int *nr) 1885 1885 { 1886 1886 unsigned long next; 1887 1887 pmd_t *pmdp; ··· 1904 1904 if (pmd_protnone(pmd)) 1905 1905 return 0; 1906 1906 1907 - if (!gup_huge_pmd(pmd, pmdp, addr, next, write, 1907 + if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, 1908 1908 pages, nr)) 1909 1909 return 0; 1910 1910 ··· 1914 1914 * pmd format and THP pmd format 1915 1915 */ 1916 1916 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, 1917 - PMD_SHIFT, next, write, pages, nr)) 1917 + PMD_SHIFT, next, flags, pages, nr)) 1918 1918 return 0; 1919 - } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 1919 + } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) 1920 1920 return 0; 1921 1921 } while (pmdp++, addr = next, addr != end); 1922 1922 ··· 1924 1924 } 1925 1925 1926 1926 static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, 1927 - int write, struct page **pages, int *nr) 1927 + unsigned int flags, struct page **pages, int *nr) 1928 1928 { 1929 1929 unsigned long next; 1930 1930 pud_t *pudp; ··· 1937 1937 if (pud_none(pud)) 1938 1938 return 0; 1939 1939 if (unlikely(pud_huge(pud))) { 1940 - if (!gup_huge_pud(pud, pudp, addr, next, write, 1940 + if (!gup_huge_pud(pud, pudp, addr, next, flags, 1941 1941 pages, nr)) 1942 1942 return 0; 1943 1943 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { 1944 1944 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, 1945 - PUD_SHIFT, next, write, pages, nr)) 1945 + PUD_SHIFT, next, flags, pages, nr)) 1946 1946 return 0; 1947 - } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) 1947 + } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) 1948 1948 return 0; 1949 1949 } while (pudp++, addr = next, addr != end); 1950 1950 ··· 1952 1952 } 1953 1953 1954 1954 static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, 1955 - int write, struct page **pages, int *nr) 1955 + unsigned int flags, struct page **pages, int *nr) 1956 1956 { 1957 1957 unsigned long next; 1958 1958 p4d_t *p4dp; ··· 1967 1967 BUILD_BUG_ON(p4d_huge(p4d)); 1968 1968 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { 1969 1969 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, 1970 - P4D_SHIFT, next, write, pages, nr)) 1970 + P4D_SHIFT, next, flags, pages, nr)) 1971 1971 return 0; 1972 - } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) 1972 + } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) 1973 1973 return 0; 1974 1974 } while (p4dp++, addr = next, addr != end); 1975 1975 ··· 1977 1977 } 1978 1978 1979 1979 static void gup_pgd_range(unsigned long addr, unsigned long end, 1980 - int write, struct page **pages, int *nr) 1980 + unsigned int flags, struct page **pages, int *nr) 1981 1981 { 1982 1982 unsigned long next; 1983 1983 pgd_t *pgdp; ··· 1990 1990 if (pgd_none(pgd)) 1991 1991 return; 1992 1992 if (unlikely(pgd_huge(pgd))) { 1993 - if (!gup_huge_pgd(pgd, pgdp, addr, next, write, 1993 + if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, 1994 1994 pages, nr)) 1995 1995 return; 1996 1996 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { 1997 1997 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, 1998 - PGDIR_SHIFT, next, write, pages, nr)) 1998 + PGDIR_SHIFT, next, flags, pages, nr)) 1999 1999 return; 2000 - } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) 2000 + } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) 2001 2001 return; 2002 2002 } while (pgdp++, addr = next, addr != end); 2003 2003 } ··· 2051 2051 2052 2052 if (gup_fast_permitted(start, nr_pages)) { 2053 2053 local_irq_save(flags); 2054 - gup_pgd_range(start, end, write, pages, &nr); 2054 + gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr); 2055 2055 local_irq_restore(flags); 2056 2056 } 2057 2057 ··· 2093 2093 2094 2094 if (gup_fast_permitted(start, nr_pages)) { 2095 2095 local_irq_disable(); 2096 - gup_pgd_range(addr, end, write, pages, &nr); 2096 + gup_pgd_range(addr, end, write ? FOLL_WRITE : 0, pages, &nr); 2097 2097 local_irq_enable(); 2098 2098 ret = nr; 2099 2099 }