Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: convert anon_vma->lock to a mutex

Straightforward conversion of anon_vma->lock to a mutex.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Hugh Dickins <hughd@google.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Peter Zijlstra and committed by
Linus Torvalds
2b575eb6 746b18d4

+21 -25
+2 -6
include/linux/huge_mm.h
··· 92 92 #define wait_split_huge_page(__anon_vma, __pmd) \ 93 93 do { \ 94 94 pmd_t *____pmd = (__pmd); \ 95 - spin_unlock_wait(&(__anon_vma)->root->lock); \ 96 - /* \ 97 - * spin_unlock_wait() is just a loop in C and so the \ 98 - * CPU can reorder anything around it. \ 99 - */ \ 100 - smp_mb(); \ 95 + anon_vma_lock(__anon_vma); \ 96 + anon_vma_unlock(__anon_vma); \ 101 97 BUG_ON(pmd_trans_splitting(*____pmd) || \ 102 98 pmd_trans_huge(*____pmd)); \ 103 99 } while (0)
+1 -1
include/linux/mmu_notifier.h
··· 150 150 * Therefore notifier chains can only be traversed when either 151 151 * 152 152 * 1. mmap_sem is held. 153 - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->lock). 153 + * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex). 154 154 * 3. No other concurrent thread can access the list (release) 155 155 */ 156 156 struct mmu_notifier {
+7 -7
include/linux/rmap.h
··· 7 7 #include <linux/list.h> 8 8 #include <linux/slab.h> 9 9 #include <linux/mm.h> 10 - #include <linux/spinlock.h> 10 + #include <linux/mutex.h> 11 11 #include <linux/memcontrol.h> 12 12 13 13 /* ··· 26 26 */ 27 27 struct anon_vma { 28 28 struct anon_vma *root; /* Root of this anon_vma tree */ 29 - spinlock_t lock; /* Serialize access to vma list */ 29 + struct mutex mutex; /* Serialize access to vma list */ 30 30 /* 31 31 * The refcount is taken on an anon_vma when there is no 32 32 * guarantee that the vma of page tables will exist for ··· 64 64 struct vm_area_struct *vma; 65 65 struct anon_vma *anon_vma; 66 66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ 67 - struct list_head same_anon_vma; /* locked by anon_vma->lock */ 67 + struct list_head same_anon_vma; /* locked by anon_vma->mutex */ 68 68 }; 69 69 70 70 #ifdef CONFIG_MMU ··· 93 93 { 94 94 struct anon_vma *anon_vma = vma->anon_vma; 95 95 if (anon_vma) 96 - spin_lock(&anon_vma->root->lock); 96 + mutex_lock(&anon_vma->root->mutex); 97 97 } 98 98 99 99 static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) 100 100 { 101 101 struct anon_vma *anon_vma = vma->anon_vma; 102 102 if (anon_vma) 103 - spin_unlock(&anon_vma->root->lock); 103 + mutex_unlock(&anon_vma->root->mutex); 104 104 } 105 105 106 106 static inline void anon_vma_lock(struct anon_vma *anon_vma) 107 107 { 108 - spin_lock(&anon_vma->root->lock); 108 + mutex_lock(&anon_vma->root->mutex); 109 109 } 110 110 111 111 static inline void anon_vma_unlock(struct anon_vma *anon_vma) 112 112 { 113 - spin_unlock(&anon_vma->root->lock); 113 + mutex_unlock(&anon_vma->root->mutex); 114 114 } 115 115 116 116 /*
+2 -2
mm/huge_memory.c
··· 1139 1139 * We can't temporarily set the pmd to null in order 1140 1140 * to split it, the pmd must remain marked huge at all 1141 1141 * times or the VM won't take the pmd_trans_huge paths 1142 - * and it won't wait on the anon_vma->root->lock to 1142 + * and it won't wait on the anon_vma->root->mutex to 1143 1143 * serialize against split_huge_page*. 1144 1144 */ 1145 1145 pmdp_splitting_flush_notify(vma, address, pmd); ··· 1333 1333 return ret; 1334 1334 } 1335 1335 1336 - /* must be called with anon_vma->root->lock hold */ 1336 + /* must be called with anon_vma->root->mutex hold */ 1337 1337 static void __split_huge_page(struct page *page, 1338 1338 struct anon_vma *anon_vma) 1339 1339 {
+5 -5
mm/mmap.c
··· 2502 2502 * The LSB of head.next can't change from under us 2503 2503 * because we hold the mm_all_locks_mutex. 2504 2504 */ 2505 - spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); 2505 + mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem); 2506 2506 /* 2507 2507 * We can safely modify head.next after taking the 2508 - * anon_vma->root->lock. If some other vma in this mm shares 2508 + * anon_vma->root->mutex. If some other vma in this mm shares 2509 2509 * the same anon_vma we won't take it again. 2510 2510 * 2511 2511 * No need of atomic instructions here, head.next 2512 2512 * can't change from under us thanks to the 2513 - * anon_vma->root->lock. 2513 + * anon_vma->root->mutex. 2514 2514 */ 2515 2515 if (__test_and_set_bit(0, (unsigned long *) 2516 2516 &anon_vma->root->head.next)) ··· 2559 2559 * vma in this mm is backed by the same anon_vma or address_space. 2560 2560 * 2561 2561 * We can take all the locks in random order because the VM code 2562 - * taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never 2562 + * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never 2563 2563 * takes more than one of them in a row. Secondly we're protected 2564 2564 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2565 2565 * ··· 2615 2615 * 2616 2616 * No need of atomic instructions here, head.next 2617 2617 * can't change from under us until we release the 2618 - * anon_vma->root->lock. 2618 + * anon_vma->root->mutex. 2619 2619 */ 2620 2620 if (!__test_and_clear_bit(0, (unsigned long *) 2621 2621 &anon_vma->root->head.next))
+4 -4
mm/rmap.c
··· 25 25 * mm->mmap_sem 26 26 * page->flags PG_locked (lock_page) 27 27 * mapping->i_mmap_mutex 28 - * anon_vma->lock 28 + * anon_vma->mutex 29 29 * mm->page_table_lock or pte_lock 30 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 31 31 * swap_lock (in swap_duplicate, swap_info_get) ··· 40 40 * 41 41 * (code doesn't rely on that order so it could be switched around) 42 42 * ->tasklist_lock 43 - * anon_vma->lock (memory_failure, collect_procs_anon) 43 + * anon_vma->mutex (memory_failure, collect_procs_anon) 44 44 * pte map lock 45 45 */ 46 46 ··· 307 307 { 308 308 struct anon_vma *anon_vma = data; 309 309 310 - spin_lock_init(&anon_vma->lock); 310 + mutex_init(&anon_vma->mutex); 311 311 atomic_set(&anon_vma->refcount, 0); 312 312 INIT_LIST_HEAD(&anon_vma->head); 313 313 } ··· 1143 1143 /* 1144 1144 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1145 1145 * unstable result and race. Plus, We can't wait here because 1146 - * we now hold anon_vma->lock or mapping->i_mmap_mutex. 1146 + * we now hold anon_vma->mutex or mapping->i_mmap_mutex. 1147 1147 * if trylock failed, the page remain in evictable lru and later 1148 1148 * vmscan could retry to move the page to unevictable lru if the 1149 1149 * page is actually mlocked.