Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 6464/2: fix spinlock recursion in adjust_pte()

When running following code in a machine which has VIVT caches and
USE_SPLIT_PTLOCKS is not defined:

fd = open("/etc/passwd", O_RDONLY);
addr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
addr2 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);

v = *((int *)addr);

we will hang in spinlock recursion in the page fault handler:

BUG: spinlock recursion on CPU#0, mmap_test/717
lock: c5e295d8, .magic: dead4ead, .owner: mmap_test/717,
.owner_cpu: 0
[<c0026604>] (unwind_backtrace+0x0/0xec)
[<c014ee48>] (do_raw_spin_lock+0x40/0x140)
[<c0027f68>] (update_mmu_cache+0x208/0x250)
[<c0079db4>] (__do_fault+0x320/0x3ec)
[<c007af7c>] (handle_mm_fault+0x2f0/0x6d8)
[<c0027834>] (do_page_fault+0xdc/0x1cc)
[<c00202d0>] (do_DataAbort+0x34/0x94)

This comes from the fact that when USE_SPLIT_PTLOCKS is not defined,
the only lock protecting the page tables is mm->page_table_lock
which is already locked before update_mmu_cache() is called.

Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Mika Westerberg and committed by
Russell King
4e54d93d 4e929d2b

+26 -2
+26 -2
arch/arm/mm/fault-armv.c
··· 66 66 return ret; 67 67 } 68 68 69 + #if USE_SPLIT_PTLOCKS 70 + /* 71 + * If we are using split PTE locks, then we need to take the page 72 + * lock here. Otherwise we are using shared mm->page_table_lock 73 + * which is already locked, thus cannot take it. 74 + */ 75 + static inline void do_pte_lock(spinlock_t *ptl) 76 + { 77 + /* 78 + * Use nested version here to indicate that we are already 79 + * holding one similar spinlock. 80 + */ 81 + spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); 82 + } 83 + 84 + static inline void do_pte_unlock(spinlock_t *ptl) 85 + { 86 + spin_unlock(ptl); 87 + } 88 + #else /* !USE_SPLIT_PTLOCKS */ 89 + static inline void do_pte_lock(spinlock_t *ptl) {} 90 + static inline void do_pte_unlock(spinlock_t *ptl) {} 91 + #endif /* USE_SPLIT_PTLOCKS */ 92 + 69 93 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, 70 94 unsigned long pfn) 71 95 { ··· 114 90 */ 115 91 ptl = pte_lockptr(vma->vm_mm, pmd); 116 92 pte = pte_offset_map(pmd, address); 117 - spin_lock(ptl); 93 + do_pte_lock(ptl); 118 94 119 95 ret = do_adjust_pte(vma, address, pfn, pte); 120 96 121 - spin_unlock(ptl); 97 + do_pte_unlock(ptl); 122 98 pte_unmap(pte); 123 99 124 100 return ret;