Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmap locking API: convert mmap_sem API comments

Convert comments that reference old mmap_sem APIs to reference
corresponding new mmap locking APIs instead.

Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-12-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Michel Lespinasse and committed by
Linus Torvalds
3e4e28c5 da1c55f1

+43 -43
+3 -3
Documentation/vm/hmm.rst
··· 191 191 192 192 again: 193 193 range.notifier_seq = mmu_interval_read_begin(&interval_sub); 194 - down_read(&mm->mmap_sem); 194 + mmap_read_lock(mm); 195 195 ret = hmm_range_fault(&range); 196 196 if (ret) { 197 - up_read(&mm->mmap_sem); 197 + mmap_read_unlock(mm); 198 198 if (ret == -EBUSY) 199 199 goto again; 200 200 return ret; 201 201 } 202 - up_read(&mm->mmap_sem); 202 + mmap_read_unlock(mm); 203 203 204 204 take_lock(driver->update); 205 205 if (mmu_interval_read_retry(&ni, range.notifier_seq) {
+1 -1
arch/alpha/mm/fault.c
··· 171 171 if (fault & VM_FAULT_RETRY) { 172 172 flags |= FAULT_FLAG_TRIED; 173 173 174 - /* No need to up_read(&mm->mmap_sem) as we would 174 + /* No need to mmap_read_unlock(mm) as we would 175 175 * have already released it in __lock_page_or_retry 176 176 * in mm/filemap.c. 177 177 */
+1 -1
arch/ia64/mm/fault.c
··· 173 173 if (fault & VM_FAULT_RETRY) { 174 174 flags |= FAULT_FLAG_TRIED; 175 175 176 - /* No need to up_read(&mm->mmap_sem) as we would 176 + /* No need to mmap_read_unlock(mm) as we would 177 177 * have already released it in __lock_page_or_retry 178 178 * in mm/filemap.c. 179 179 */
+1 -1
arch/m68k/mm/fault.c
··· 165 165 flags |= FAULT_FLAG_TRIED; 166 166 167 167 /* 168 - * No need to up_read(&mm->mmap_sem) as we would 168 + * No need to mmap_read_unlock(mm) as we would 169 169 * have already released it in __lock_page_or_retry 170 170 * in mm/filemap.c. 171 171 */
+1 -1
arch/microblaze/mm/fault.c
··· 238 238 flags |= FAULT_FLAG_TRIED; 239 239 240 240 /* 241 - * No need to up_read(&mm->mmap_sem) as we would 241 + * No need to mmap_read_unlock(mm) as we would 242 242 * have already released it in __lock_page_or_retry 243 243 * in mm/filemap.c. 244 244 */
+1 -1
arch/mips/mm/fault.c
··· 181 181 flags |= FAULT_FLAG_TRIED; 182 182 183 183 /* 184 - * No need to up_read(&mm->mmap_sem) as we would 184 + * No need to mmap_read_unlock(mm) as we would 185 185 * have already released it in __lock_page_or_retry 186 186 * in mm/filemap.c. 187 187 */
+1 -1
arch/nds32/mm/fault.c
··· 247 247 if (fault & VM_FAULT_RETRY) { 248 248 flags |= FAULT_FLAG_TRIED; 249 249 250 - /* No need to up_read(&mm->mmap_sem) as we would 250 + /* No need to mmap_read_unlock(mm) as we would 251 251 * have already released it in __lock_page_or_retry 252 252 * in mm/filemap.c. 253 253 */
+1 -1
arch/nios2/mm/fault.c
··· 160 160 flags |= FAULT_FLAG_TRIED; 161 161 162 162 /* 163 - * No need to up_read(&mm->mmap_sem) as we would 163 + * No need to mmap_read_unlock(mm) as we would 164 164 * have already released it in __lock_page_or_retry 165 165 * in mm/filemap.c. 166 166 */
+1 -1
arch/openrisc/mm/fault.c
··· 183 183 if (fault & VM_FAULT_RETRY) { 184 184 flags |= FAULT_FLAG_TRIED; 185 185 186 - /* No need to up_read(&mm->mmap_sem) as we would 186 + /* No need to mmap_read_unlock(mm) as we would 187 187 * have already released it in __lock_page_or_retry 188 188 * in mm/filemap.c. 189 189 */
+1 -1
arch/parisc/mm/fault.c
··· 329 329 current->min_flt++; 330 330 if (fault & VM_FAULT_RETRY) { 331 331 /* 332 - * No need to up_read(&mm->mmap_sem) as we would 332 + * No need to mmap_read_unlock(mm) as we would 333 333 * have already released it in __lock_page_or_retry 334 334 * in mm/filemap.c. 335 335 */
+1 -1
arch/riscv/mm/fault.c
··· 147 147 flags |= FAULT_FLAG_TRIED; 148 148 149 149 /* 150 - * No need to up_read(&mm->mmap_sem) as we would 150 + * No need to mmap_read_unlock(mm) as we would 151 151 * have already released it in __lock_page_or_retry 152 152 * in mm/filemap.c. 153 153 */
+1 -1
arch/sh/mm/fault.c
··· 502 502 flags |= FAULT_FLAG_TRIED; 503 503 504 504 /* 505 - * No need to up_read(&mm->mmap_sem) as we would 505 + * No need to mmap_read_unlock(mm) as we would 506 506 * have already released it in __lock_page_or_retry 507 507 * in mm/filemap.c. 508 508 */
+1 -1
arch/sparc/mm/fault_32.c
··· 262 262 if (fault & VM_FAULT_RETRY) { 263 263 flags |= FAULT_FLAG_TRIED; 264 264 265 - /* No need to up_read(&mm->mmap_sem) as we would 265 + /* No need to mmap_read_unlock(mm) as we would 266 266 * have already released it in __lock_page_or_retry 267 267 * in mm/filemap.c. 268 268 */
+1 -1
arch/sparc/mm/fault_64.c
··· 450 450 if (fault & VM_FAULT_RETRY) { 451 451 flags |= FAULT_FLAG_TRIED; 452 452 453 - /* No need to up_read(&mm->mmap_sem) as we would 453 + /* No need to mmap_read_unlock(mm) as we would 454 454 * have already released it in __lock_page_or_retry 455 455 * in mm/filemap.c. 456 456 */
+1 -1
arch/xtensa/mm/fault.c
··· 130 130 if (fault & VM_FAULT_RETRY) { 131 131 flags |= FAULT_FLAG_TRIED; 132 132 133 - /* No need to up_read(&mm->mmap_sem) as we would 133 + /* No need to mmap_read_unlock(mm) as we would 134 134 * have already released it in __lock_page_or_retry 135 135 * in mm/filemap.c. 136 136 */
+2 -2
drivers/android/binder_alloc.c
··· 933 933 if (!mmget_not_zero(mm)) 934 934 goto err_mmget; 935 935 if (!mmap_read_trylock(mm)) 936 - goto err_down_read_mmap_sem_failed; 936 + goto err_mmap_read_lock_failed; 937 937 vma = binder_alloc_get_vma(alloc); 938 938 939 939 list_lru_isolate(lru, item); ··· 960 960 mutex_unlock(&alloc->mutex); 961 961 return LRU_REMOVED_RETRY; 962 962 963 - err_down_read_mmap_sem_failed: 963 + err_mmap_read_lock_failed: 964 964 mmput_async(mm); 965 965 err_mmget: 966 966 err_page_already_freed:
+1 -1
fs/hugetlbfs/inode.c
··· 187 187 } 188 188 189 189 /* 190 - * Called under down_write(mmap_sem). 190 + * Called under mmap_write_lock(mm). 191 191 */ 192 192 193 193 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+1 -1
fs/userfaultfd.c
··· 1248 1248 /* 1249 1249 * To be sure waitqueue_active() is not reordered by the CPU 1250 1250 * before the pagetable update, use an explicit SMP memory 1251 - * barrier here. PT lock release or up_read(mmap_sem) still 1251 + * barrier here. PT lock release or mmap_read_unlock(mm) still 1252 1252 * have release semantics that can allow the 1253 1253 * waitqueue_active() to be reordered before the pte update. 1254 1254 */
+1 -1
mm/filemap.c
··· 1373 1373 * Return values: 1374 1374 * 1 - page is locked; mmap_sem is still held. 1375 1375 * 0 - page is not locked. 1376 - * mmap_sem has been released (up_read()), unless flags had both 1376 + * mmap_lock has been released (mmap_read_unlock(), unless flags had both 1377 1377 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 1378 1378 * which case mmap_sem is still held. 1379 1379 *
+6 -6
mm/gup.c
··· 1993 1993 /** 1994 1994 * get_user_pages_locked() is suitable to replace the form: 1995 1995 * 1996 - * down_read(&mm->mmap_sem); 1996 + * mmap_read_lock(mm); 1997 1997 * do_something() 1998 1998 * get_user_pages(tsk, mm, ..., pages, NULL); 1999 - * up_read(&mm->mmap_sem); 1999 + * mmap_read_unlock(mm); 2000 2000 * 2001 2001 * to: 2002 2002 * 2003 2003 * int locked = 1; 2004 - * down_read(&mm->mmap_sem); 2004 + * mmap_read_lock(mm); 2005 2005 * do_something() 2006 2006 * get_user_pages_locked(tsk, mm, ..., pages, &locked); 2007 2007 * if (locked) 2008 - * up_read(&mm->mmap_sem); 2008 + * mmap_read_unlock(mm); 2009 2009 * 2010 2010 * @start: starting user address 2011 2011 * @nr_pages: number of pages from start to pin ··· 2050 2050 /* 2051 2051 * get_user_pages_unlocked() is suitable to replace the form: 2052 2052 * 2053 - * down_read(&mm->mmap_sem); 2053 + * mmap_read_lock(mm); 2054 2054 * get_user_pages(tsk, mm, ..., pages, NULL); 2055 - * up_read(&mm->mmap_sem); 2055 + * mmap_read_unlock(mm); 2056 2056 * 2057 2057 * with: 2058 2058 *
+2 -2
mm/huge_memory.c
··· 1833 1833 goto unlock; 1834 1834 1835 1835 /* 1836 - * In case prot_numa, we are under down_read(mmap_sem). It's critical 1836 + * In case prot_numa, we are under mmap_read_lock(mm). It's critical 1837 1837 * to not clear pmd intermittently to avoid race with MADV_DONTNEED 1838 - * which is also under down_read(mmap_sem): 1838 + * which is also under mmap_read_lock(mm): 1839 1839 * 1840 1840 * CPU0: CPU1: 1841 1841 * change_huge_pmd(prot_numa=1)
+1 -1
mm/khugepaged.c
··· 1543 1543 /* 1544 1544 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that 1545 1545 * got written to. These VMAs are likely not worth investing 1546 - * down_write(mmap_sem) as PMD-mapping is likely to be split 1546 + * mmap_write_lock(mm) as PMD-mapping is likely to be split 1547 1547 * later. 1548 1548 * 1549 1549 * Not that vma->anon_vma check is racy: it can be set up after
+1 -1
mm/ksm.c
··· 2362 2362 } else { 2363 2363 mmap_read_unlock(mm); 2364 2364 /* 2365 - * up_read(&mm->mmap_sem) first because after 2365 + * mmap_read_unlock(mm) first because after 2366 2366 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may 2367 2367 * already have been freed under us by __ksm_exit() 2368 2368 * because the "mm_slot" is still hashed and
+2 -2
mm/memory.c
··· 3323 3323 * pte_offset_map() on pmds where a huge pmd might be created 3324 3324 * from a different thread. 3325 3325 * 3326 - * pte_alloc_map() is safe to use under down_write(mmap_sem) or when 3326 + * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 3327 3327 * parallel threads are excluded by other means. 3328 3328 * 3329 - * Here we only have down_read(mmap_sem). 3329 + * Here we only have mmap_read_lock(mm). 3330 3330 */ 3331 3331 if (pte_alloc(vma->vm_mm, vmf->pmd)) 3332 3332 return VM_FAULT_OOM;
+1 -1
mm/mempolicy.c
··· 2185 2185 * 2186 2186 * This function allocates a page from the kernel page pool and applies 2187 2187 * a NUMA policy associated with the VMA or the current process. 2188 - * When VMA is not NULL caller must hold down_read on the mmap_sem of the 2188 + * When VMA is not NULL caller must read-lock the mmap_lock of the 2189 2189 * mm_struct of the VMA to prevent it from going away. Should be used for 2190 2190 * all allocations for pages that will be mapped into user space. Returns 2191 2191 * NULL when no page can be allocated.
+2 -2
mm/migrate.c
··· 2772 2772 * pte_offset_map() on pmds where a huge pmd might be created 2773 2773 * from a different thread. 2774 2774 * 2775 - * pte_alloc_map() is safe to use under down_write(mmap_sem) or when 2775 + * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 2776 2776 * parallel threads are excluded by other means. 2777 2777 * 2778 - * Here we only have down_read(mmap_sem). 2778 + * Here we only have mmap_read_lock(mm). 2779 2779 */ 2780 2780 if (pte_alloc(mm, pmdp)) 2781 2781 goto abort;
+1 -1
mm/mmap.c
··· 1361 1361 } 1362 1362 1363 1363 /* 1364 - * The caller must hold down_write(&current->mm->mmap_sem). 1364 + * The caller must write-lock current->mm->mmap_lock. 1365 1365 */ 1366 1366 unsigned long do_mmap(struct file *file, unsigned long addr, 1367 1367 unsigned long len, unsigned long prot,
+4 -4
mm/oom_kill.c
··· 577 577 /* 578 578 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't 579 579 * work on the mm anymore. The check for MMF_OOM_SKIP must run 580 - * under mmap_sem for reading because it serializes against the 581 - * down_write();up_write() cycle in exit_mmap(). 580 + * under mmap_lock for reading because it serializes against the 581 + * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap(). 582 582 */ 583 583 if (test_bit(MMF_OOM_SKIP, &mm->flags)) { 584 584 trace_skip_task_reaping(tsk->pid); ··· 611 611 int attempts = 0; 612 612 struct mm_struct *mm = tsk->signal->oom_mm; 613 613 614 - /* Retry the down_read_trylock(mmap_sem) a few times */ 614 + /* Retry the mmap_read_trylock(mm) a few times */ 615 615 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) 616 616 schedule_timeout_idle(HZ/10); 617 617 ··· 629 629 630 630 /* 631 631 * Hide this mm from OOM killer because it has been either reaped or 632 - * somebody can't call up_write(mmap_sem). 632 + * somebody can't call mmap_write_unlock(mm). 633 633 */ 634 634 set_bit(MMF_OOM_SKIP, &mm->flags); 635 635
+1 -1
net/ipv4/tcp.c
··· 1734 1734 return -EPERM; 1735 1735 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); 1736 1736 1737 - /* Instruct vm_insert_page() to not down_read(mmap_sem) */ 1737 + /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 1738 1738 vma->vm_flags |= VM_MIXEDMAP; 1739 1739 1740 1740 vma->vm_ops = &tcp_vm_ops;