Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/gup: Overload get_user_pages() functions

The concept here was a suggestion from Ingo. The implementation
horrors are all mine.

This allows get_user_pages(), get_user_pages_unlocked(), and
get_user_pages_locked() to be called with or without the
leading tsk/mm arguments. We will give a compile-time warning
about the old style being __deprecated and we will also
WARN_ON() if the non-remote version is used for a remote-style
access.

Doing this, folks will get nice warnings and will not break the
build. This should be nice for -next and will hopefully let
developers fix up their own code instead of maintainers needing
to do it at merge time.

The way we do this is hideous. It uses the __VA_ARGS__ macro
functionality to call different functions based on the number
of arguments passed to the macro.

There's an additional hack to ensure that our EXPORT_SYMBOL()
of the deprecated symbols doesn't trigger a warning.

We should be able to remove this mess as soon as -rc1 hits in
the release after this is merged.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Alexander Kuleshov <kuleshovmail@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Geliang Tang <geliangtang@163.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Leon Romanovsky <leon@leon.nu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Mateusz Guzik <mguzik@redhat.com>
Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Xie XiuQi <xiexiuqi@huawei.com>
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20160212210155.73222EE1@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Dave Hansen and committed by
Ingo Molnar
cde70140 1e987790

+158 -46
+64 -10
include/linux/mm.h
··· 1229 1229 unsigned long start, unsigned long nr_pages, 1230 1230 int write, int force, struct page **pages, 1231 1231 struct vm_area_struct **vmas); 1232 - long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1233 - unsigned long start, unsigned long nr_pages, 1234 - int write, int force, struct page **pages, 1235 - struct vm_area_struct **vmas); 1236 - long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 1237 - unsigned long start, unsigned long nr_pages, 1238 - int write, int force, struct page **pages, 1239 - int *locked); 1232 + long get_user_pages6(unsigned long start, unsigned long nr_pages, 1233 + int write, int force, struct page **pages, 1234 + struct vm_area_struct **vmas); 1235 + long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, 1236 + int write, int force, struct page **pages, int *locked); 1240 1237 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1241 1238 unsigned long start, unsigned long nr_pages, 1242 1239 int write, int force, struct page **pages, 1243 1240 unsigned int gup_flags); 1244 - long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1245 - unsigned long start, unsigned long nr_pages, 1241 + long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, 1246 1242 int write, int force, struct page **pages); 1247 1243 int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1248 1244 struct page **pages); 1245 + 1246 + /* suppress warnings from use in EXPORT_SYMBOL() */ 1247 + #ifndef __DISABLE_GUP_DEPRECATED 1248 + #define __gup_deprecated __deprecated 1249 + #else 1250 + #define __gup_deprecated 1251 + #endif 1252 + /* 1253 + * These macros provide backward-compatibility with the old 1254 + * get_user_pages() variants which took tsk/mm. These 1255 + * functions/macros provide both compile-time __deprecated so we 1256 + * can catch old-style use and not break the build. The actual 1257 + * functions also have WARN_ON()s to let us know at runtime if 1258 + * the get_user_pages() should have been the "remote" variant. 1259 + * 1260 + * These are hideous, but temporary. 1261 + * 1262 + * If you run into one of these __deprecated warnings, look 1263 + * at how you are calling get_user_pages(). If you are calling 1264 + * it with current/current->mm as the first two arguments, 1265 + * simply remove those arguments. The behavior will be the same 1266 + * as it is now. If you are calling it on another task, use 1267 + * get_user_pages_remote() instead. 1268 + * 1269 + * Any questions? Ask Dave Hansen <dave@sr71.net> 1270 + */ 1271 + long 1272 + __gup_deprecated 1273 + get_user_pages8(struct task_struct *tsk, struct mm_struct *mm, 1274 + unsigned long start, unsigned long nr_pages, 1275 + int write, int force, struct page **pages, 1276 + struct vm_area_struct **vmas); 1277 + #define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \ 1278 + get_user_pages 1279 + #define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \ 1280 + get_user_pages8, x, \ 1281 + get_user_pages6, x, x, x, x, x)(__VA_ARGS__) 1282 + 1283 + __gup_deprecated 1284 + long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm, 1285 + unsigned long start, unsigned long nr_pages, 1286 + int write, int force, struct page **pages, 1287 + int *locked); 1288 + #define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \ 1289 + get_user_pages_locked 1290 + #define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \ 1291 + get_user_pages_locked8, x, \ 1292 + get_user_pages_locked6, x, x, x, x)(__VA_ARGS__) 1293 + 1294 + __gup_deprecated 1295 + long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm, 1296 + unsigned long start, unsigned long nr_pages, 1297 + int write, int force, struct page **pages); 1298 + #define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \ 1299 + get_user_pages_unlocked 1300 + #define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \ 1301 + get_user_pages_unlocked7, x, \ 1302 + get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__) 1249 1303 1250 1304 /* Container for pinned pfns / pages */ 1251 1305 struct frame_vector {
+48 -14
mm/gup.c
··· 1 + #define __DISABLE_GUP_DEPRECATED 1 1 2 #include <linux/kernel.h> 2 3 #include <linux/errno.h> 3 4 #include <linux/err.h> ··· 808 807 * if (locked) 809 808 * up_read(&mm->mmap_sem); 810 809 */ 811 - long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 812 - unsigned long start, unsigned long nr_pages, 810 + long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, 813 811 int write, int force, struct page **pages, 814 812 int *locked) 815 813 { 816 - return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, 817 - pages, NULL, locked, true, FOLL_TOUCH); 814 + return __get_user_pages_locked(current, current->mm, start, nr_pages, 815 + write, force, pages, NULL, locked, true, 816 + FOLL_TOUCH); 818 817 } 819 - EXPORT_SYMBOL(get_user_pages_locked); 818 + EXPORT_SYMBOL(get_user_pages_locked6); 820 819 821 820 /* 822 821 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to ··· 861 860 * or if "force" shall be set to 1 (get_user_pages_fast misses the 862 861 * "force" parameter). 863 862 */ 864 - long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 865 - unsigned long start, unsigned long nr_pages, 863 + long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, 866 864 int write, int force, struct page **pages) 867 865 { 868 - return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, 869 - force, pages, FOLL_TOUCH); 866 + return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 867 + write, force, pages, FOLL_TOUCH); 870 868 } 871 - EXPORT_SYMBOL(get_user_pages_unlocked); 869 + EXPORT_SYMBOL(get_user_pages_unlocked5); 872 870 873 871 /* 874 872 * get_user_pages_remote() - pin user pages in memory ··· 939 939 * This is the same as get_user_pages_remote() for the time 940 940 * being. 941 941 */ 942 - long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 943 - unsigned long start, unsigned long nr_pages, 942 + long get_user_pages6(unsigned long start, unsigned long nr_pages, 944 943 int write, int force, struct page **pages, 945 944 struct vm_area_struct **vmas) 946 945 { 947 - return __get_user_pages_locked(tsk, mm, start, nr_pages, 946 + return __get_user_pages_locked(current, current->mm, start, nr_pages, 948 947 write, force, pages, vmas, NULL, false, 949 948 FOLL_TOUCH); 950 949 } 951 - EXPORT_SYMBOL(get_user_pages); 950 + EXPORT_SYMBOL(get_user_pages6); 952 951 953 952 /** 954 953 * populate_vma_page_range() - populate a range of pages in the vma. ··· 1483 1484 } 1484 1485 1485 1486 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */ 1487 + 1488 + long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm, 1489 + unsigned long start, unsigned long nr_pages, 1490 + int write, int force, struct page **pages, 1491 + struct vm_area_struct **vmas) 1492 + { 1493 + WARN_ONCE(tsk != current, "get_user_pages() called on remote task"); 1494 + WARN_ONCE(mm != current->mm, "get_user_pages() called on remote mm"); 1495 + 1496 + return get_user_pages6(start, nr_pages, write, force, pages, vmas); 1497 + } 1498 + EXPORT_SYMBOL(get_user_pages8); 1499 + 1500 + long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm, 1501 + unsigned long start, unsigned long nr_pages, 1502 + int write, int force, struct page **pages, int *locked) 1503 + { 1504 + WARN_ONCE(tsk != current, "get_user_pages_locked() called on remote task"); 1505 + WARN_ONCE(mm != current->mm, "get_user_pages_locked() called on remote mm"); 1506 + 1507 + return get_user_pages_locked6(start, nr_pages, write, force, pages, locked); 1508 + } 1509 + EXPORT_SYMBOL(get_user_pages_locked8); 1510 + 1511 + long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm, 1512 + unsigned long start, unsigned long nr_pages, 1513 + int write, int force, struct page **pages) 1514 + { 1515 + WARN_ONCE(tsk != current, "get_user_pages_unlocked() called on remote task"); 1516 + WARN_ONCE(mm != current->mm, "get_user_pages_unlocked() called on remote mm"); 1517 + 1518 + return get_user_pages_unlocked5(start, nr_pages, write, force, pages); 1519 + } 1520 + EXPORT_SYMBOL(get_user_pages_unlocked7); 1521 +
+45 -19
mm/nommu.c
··· 15 15 16 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 17 18 + #define __DISABLE_GUP_DEPRECATED 19 + 18 20 #include <linux/export.h> 19 21 #include <linux/mm.h> 20 22 #include <linux/vmacache.h> ··· 184 182 * slab page or a secondary page from a compound page 185 183 * - don't permit access to VMAs that don't support it, such as I/O mappings 186 184 */ 187 - long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 188 - unsigned long start, unsigned long nr_pages, 185 + long get_user_pages6(unsigned long start, unsigned long nr_pages, 189 186 int write, int force, struct page **pages, 190 187 struct vm_area_struct **vmas) 191 188 { ··· 195 194 if (force) 196 195 flags |= FOLL_FORCE; 197 196 198 - return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, 199 - NULL); 197 + return __get_user_pages(current, current->mm, start, nr_pages, flags, 198 + pages, vmas, NULL); 200 199 } 201 - EXPORT_SYMBOL(get_user_pages); 200 + EXPORT_SYMBOL(get_user_pages6); 202 201 203 - long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, 204 - unsigned long start, unsigned long nr_pages, 205 - int write, int force, struct page **pages, 206 - int *locked) 202 + long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, 203 + int write, int force, struct page **pages, 204 + int *locked) 207 205 { 208 - return get_user_pages(tsk, mm, start, nr_pages, write, force, 209 - pages, NULL); 206 + return get_user_pages6(start, nr_pages, write, force, pages, NULL); 210 207 } 211 - EXPORT_SYMBOL(get_user_pages_locked); 208 + EXPORT_SYMBOL(get_user_pages_locked6); 212 209 213 210 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 214 211 unsigned long start, unsigned long nr_pages, ··· 215 216 { 216 217 long ret; 217 218 down_read(&mm->mmap_sem); 218 - ret = get_user_pages(tsk, mm, start, nr_pages, write, force, 219 - pages, NULL); 219 + ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages, 220 + NULL, NULL); 220 221 up_read(&mm->mmap_sem); 221 222 return ret; 222 223 } 223 224 EXPORT_SYMBOL(__get_user_pages_unlocked); 224 225 225 - long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 226 - unsigned long start, unsigned long nr_pages, 226 + long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, 227 227 int write, int force, struct page **pages) 228 228 { 229 - return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, 230 - force, pages, 0); 229 + return __get_user_pages_unlocked(current, current->mm, start, nr_pages, 230 + write, force, pages, 0); 231 231 } 232 - EXPORT_SYMBOL(get_user_pages_unlocked); 232 + EXPORT_SYMBOL(get_user_pages_unlocked5); 233 233 234 234 /** 235 235 * follow_pfn - look up PFN at a user virtual address ··· 2106 2108 return 0; 2107 2109 } 2108 2110 subsys_initcall(init_admin_reserve); 2111 + 2112 + long get_user_pages8(struct task_struct *tsk, struct mm_struct *mm, 2113 + unsigned long start, unsigned long nr_pages, 2114 + int write, int force, struct page **pages, 2115 + struct vm_area_struct **vmas) 2116 + { 2117 + return get_user_pages6(start, nr_pages, write, force, pages, vmas); 2118 + } 2119 + EXPORT_SYMBOL(get_user_pages8); 2120 + 2121 + long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm, 2122 + unsigned long start, unsigned long nr_pages, 2123 + int write, int force, struct page **pages, 2124 + int *locked) 2125 + { 2126 + return get_user_pages_locked6(start, nr_pages, write, 2127 + force, pages, locked); 2128 + } 2129 + EXPORT_SYMBOL(get_user_pages_locked8); 2130 + 2131 + long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm, 2132 + unsigned long start, unsigned long nr_pages, 2133 + int write, int force, struct page **pages) 2134 + { 2135 + return get_user_pages_unlocked5(start, nr_pages, write, force, pages); 2136 + } 2137 + EXPORT_SYMBOL(get_user_pages_unlocked7); 2138 +
+1 -3
mm/util.c
··· 283 283 int __weak get_user_pages_fast(unsigned long start, 284 284 int nr_pages, int write, struct page **pages) 285 285 { 286 - struct mm_struct *mm = current->mm; 287 - return get_user_pages_unlocked(current, mm, start, nr_pages, 288 - write, 0, pages); 286 + return get_user_pages_unlocked(start, nr_pages, write, 0, pages); 289 287 } 290 288 EXPORT_SYMBOL_GPL(get_user_pages_fast); 291 289