Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: spinlock tree_lock

mapping->tree_lock has no read lockers. convert the lock from an rwlock
to a spinlock.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Nick Piggin and committed by
Linus Torvalds
19fd6231 a60637c8

+38 -39
+2 -2
fs/buffer.c
··· 706 706 if (TestSetPageDirty(page)) 707 707 return 0; 708 708 709 - write_lock_irq(&mapping->tree_lock); 709 + spin_lock_irq(&mapping->tree_lock); 710 710 if (page->mapping) { /* Race with truncate? */ 711 711 WARN_ON_ONCE(warn && !PageUptodate(page)); 712 712 ··· 719 719 radix_tree_tag_set(&mapping->page_tree, 720 720 page_index(page), PAGECACHE_TAG_DIRTY); 721 721 } 722 - write_unlock_irq(&mapping->tree_lock); 722 + spin_unlock_irq(&mapping->tree_lock); 723 723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 724 724 725 725 return 1;
+1 -1
fs/inode.c
··· 209 209 INIT_LIST_HEAD(&inode->i_dentry); 210 210 INIT_LIST_HEAD(&inode->i_devices); 211 211 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); 212 - rwlock_init(&inode->i_data.tree_lock); 212 + spin_lock_init(&inode->i_data.tree_lock); 213 213 spin_lock_init(&inode->i_data.i_mmap_lock); 214 214 INIT_LIST_HEAD(&inode->i_data.private_list); 215 215 spin_lock_init(&inode->i_data.private_lock);
+2 -2
include/asm-arm/cacheflush.h
··· 424 424 } 425 425 426 426 #define flush_dcache_mmap_lock(mapping) \ 427 - write_lock_irq(&(mapping)->tree_lock) 427 + spin_lock_irq(&(mapping)->tree_lock) 428 428 #define flush_dcache_mmap_unlock(mapping) \ 429 - write_unlock_irq(&(mapping)->tree_lock) 429 + spin_unlock_irq(&(mapping)->tree_lock) 430 430 431 431 #define flush_icache_user_range(vma,page,addr,len) \ 432 432 flush_dcache_page(page)
+2 -2
include/asm-parisc/cacheflush.h
··· 45 45 extern void flush_dcache_page(struct page *page); 46 46 47 47 #define flush_dcache_mmap_lock(mapping) \ 48 - write_lock_irq(&(mapping)->tree_lock) 48 + spin_lock_irq(&(mapping)->tree_lock) 49 49 #define flush_dcache_mmap_unlock(mapping) \ 50 - write_unlock_irq(&(mapping)->tree_lock) 50 + spin_unlock_irq(&(mapping)->tree_lock) 51 51 52 52 #define flush_icache_page(vma,page) do { \ 53 53 flush_kernel_dcache_page(page); \
+1 -1
include/linux/fs.h
··· 499 499 struct address_space { 500 500 struct inode *host; /* owner: inode, block_device */ 501 501 struct radix_tree_root page_tree; /* radix tree of all pages */ 502 - rwlock_t tree_lock; /* and rwlock protecting it */ 502 + spinlock_t tree_lock; /* and lock protecting it */ 503 503 unsigned int i_mmap_writable;/* count VM_SHARED mappings */ 504 504 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 505 505 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
+5 -5
mm/filemap.c
··· 109 109 /* 110 110 * Remove a page from the page cache and free it. Caller has to make 111 111 * sure the page is locked and that nobody else uses it - or that usage 112 - * is safe. The caller must hold a write_lock on the mapping's tree_lock. 112 + * is safe. The caller must hold the mapping's tree_lock. 113 113 */ 114 114 void __remove_from_page_cache(struct page *page) 115 115 { ··· 141 141 142 142 BUG_ON(!PageLocked(page)); 143 143 144 - write_lock_irq(&mapping->tree_lock); 144 + spin_lock_irq(&mapping->tree_lock); 145 145 __remove_from_page_cache(page); 146 - write_unlock_irq(&mapping->tree_lock); 146 + spin_unlock_irq(&mapping->tree_lock); 147 147 } 148 148 149 149 static int sync_page(void *word) ··· 469 469 page->mapping = mapping; 470 470 page->index = offset; 471 471 472 - write_lock_irq(&mapping->tree_lock); 472 + spin_lock_irq(&mapping->tree_lock); 473 473 error = radix_tree_insert(&mapping->page_tree, offset, page); 474 474 if (likely(!error)) { 475 475 mapping->nrpages++; ··· 480 480 page_cache_release(page); 481 481 } 482 482 483 - write_unlock_irq(&mapping->tree_lock); 483 + spin_unlock_irq(&mapping->tree_lock); 484 484 radix_tree_preload_end(); 485 485 } else 486 486 mem_cgroup_uncharge_cache_page(page);
+5 -6
mm/migrate.c
··· 323 323 return 0; 324 324 } 325 325 326 - write_lock_irq(&mapping->tree_lock); 326 + spin_lock_irq(&mapping->tree_lock); 327 327 328 328 pslot = radix_tree_lookup_slot(&mapping->page_tree, 329 329 page_index(page)); ··· 331 331 expected_count = 2 + !!PagePrivate(page); 332 332 if (page_count(page) != expected_count || 333 333 (struct page *)radix_tree_deref_slot(pslot) != page) { 334 - write_unlock_irq(&mapping->tree_lock); 334 + spin_unlock_irq(&mapping->tree_lock); 335 335 return -EAGAIN; 336 336 } 337 337 338 338 if (!page_freeze_refs(page, expected_count)) { 339 - write_unlock_irq(&mapping->tree_lock); 339 + spin_unlock_irq(&mapping->tree_lock); 340 340 return -EAGAIN; 341 341 } 342 342 ··· 373 373 __dec_zone_page_state(page, NR_FILE_PAGES); 374 374 __inc_zone_page_state(newpage, NR_FILE_PAGES); 375 375 376 - write_unlock_irq(&mapping->tree_lock); 377 - if (!PageSwapCache(newpage)) { 376 + spin_unlock_irq(&mapping->tree_lock); 377 + if (!PageSwapCache(newpage)) 378 378 mem_cgroup_uncharge_cache_page(page); 379 - } 380 379 381 380 return 0; 382 381 }
+6 -6
mm/page-writeback.c
··· 1088 1088 if (!mapping) 1089 1089 return 1; 1090 1090 1091 - write_lock_irq(&mapping->tree_lock); 1091 + spin_lock_irq(&mapping->tree_lock); 1092 1092 mapping2 = page_mapping(page); 1093 1093 if (mapping2) { /* Race with truncate? */ 1094 1094 BUG_ON(mapping2 != mapping); ··· 1102 1102 radix_tree_tag_set(&mapping->page_tree, 1103 1103 page_index(page), PAGECACHE_TAG_DIRTY); 1104 1104 } 1105 - write_unlock_irq(&mapping->tree_lock); 1105 + spin_unlock_irq(&mapping->tree_lock); 1106 1106 if (mapping->host) { 1107 1107 /* !PageAnon && !swapper_space */ 1108 1108 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); ··· 1258 1258 struct backing_dev_info *bdi = mapping->backing_dev_info; 1259 1259 unsigned long flags; 1260 1260 1261 - write_lock_irqsave(&mapping->tree_lock, flags); 1261 + spin_lock_irqsave(&mapping->tree_lock, flags); 1262 1262 ret = TestClearPageWriteback(page); 1263 1263 if (ret) { 1264 1264 radix_tree_tag_clear(&mapping->page_tree, ··· 1269 1269 __bdi_writeout_inc(bdi); 1270 1270 } 1271 1271 } 1272 - write_unlock_irqrestore(&mapping->tree_lock, flags); 1272 + spin_unlock_irqrestore(&mapping->tree_lock, flags); 1273 1273 } else { 1274 1274 ret = TestClearPageWriteback(page); 1275 1275 } ··· 1287 1287 struct backing_dev_info *bdi = mapping->backing_dev_info; 1288 1288 unsigned long flags; 1289 1289 1290 - write_lock_irqsave(&mapping->tree_lock, flags); 1290 + spin_lock_irqsave(&mapping->tree_lock, flags); 1291 1291 ret = TestSetPageWriteback(page); 1292 1292 if (!ret) { 1293 1293 radix_tree_tag_set(&mapping->page_tree, ··· 1300 1300 radix_tree_tag_clear(&mapping->page_tree, 1301 1301 page_index(page), 1302 1302 PAGECACHE_TAG_DIRTY); 1303 - write_unlock_irqrestore(&mapping->tree_lock, flags); 1303 + spin_unlock_irqrestore(&mapping->tree_lock, flags); 1304 1304 } else { 1305 1305 ret = TestSetPageWriteback(page); 1306 1306 }
+5 -5
mm/swap_state.c
··· 39 39 40 40 struct address_space swapper_space = { 41 41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 42 - .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), 42 + .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), 43 43 .a_ops = &swap_aops, 44 44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), 45 45 .backing_dev_info = &swap_backing_dev_info, ··· 80 80 SetPageSwapCache(page); 81 81 set_page_private(page, entry.val); 82 82 83 - write_lock_irq(&swapper_space.tree_lock); 83 + spin_lock_irq(&swapper_space.tree_lock); 84 84 error = radix_tree_insert(&swapper_space.page_tree, 85 85 entry.val, page); 86 86 if (likely(!error)) { ··· 88 88 __inc_zone_page_state(page, NR_FILE_PAGES); 89 89 INC_CACHE_INFO(add_total); 90 90 } 91 - write_unlock_irq(&swapper_space.tree_lock); 91 + spin_unlock_irq(&swapper_space.tree_lock); 92 92 radix_tree_preload_end(); 93 93 94 94 if (unlikely(error)) { ··· 182 182 183 183 entry.val = page_private(page); 184 184 185 - write_lock_irq(&swapper_space.tree_lock); 185 + spin_lock_irq(&swapper_space.tree_lock); 186 186 __delete_from_swap_cache(page); 187 - write_unlock_irq(&swapper_space.tree_lock); 187 + spin_unlock_irq(&swapper_space.tree_lock); 188 188 189 189 swap_free(entry); 190 190 page_cache_release(page);
+2 -2
mm/swapfile.c
··· 369 369 retval = 0; 370 370 if (p->swap_map[swp_offset(entry)] == 1) { 371 371 /* Recheck the page count with the swapcache lock held.. */ 372 - write_lock_irq(&swapper_space.tree_lock); 372 + spin_lock_irq(&swapper_space.tree_lock); 373 373 if ((page_count(page) == 2) && !PageWriteback(page)) { 374 374 __delete_from_swap_cache(page); 375 375 SetPageDirty(page); 376 376 retval = 1; 377 377 } 378 - write_unlock_irq(&swapper_space.tree_lock); 378 + spin_unlock_irq(&swapper_space.tree_lock); 379 379 } 380 380 spin_unlock(&swap_lock); 381 381
+3 -3
mm/truncate.c
··· 349 349 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 350 350 return 0; 351 351 352 - write_lock_irq(&mapping->tree_lock); 352 + spin_lock_irq(&mapping->tree_lock); 353 353 if (PageDirty(page)) 354 354 goto failed; 355 355 356 356 BUG_ON(PagePrivate(page)); 357 357 __remove_from_page_cache(page); 358 - write_unlock_irq(&mapping->tree_lock); 358 + spin_unlock_irq(&mapping->tree_lock); 359 359 ClearPageUptodate(page); 360 360 page_cache_release(page); /* pagecache ref */ 361 361 return 1; 362 362 failed: 363 - write_unlock_irq(&mapping->tree_lock); 363 + spin_unlock_irq(&mapping->tree_lock); 364 364 return 0; 365 365 } 366 366
+4 -4
mm/vmscan.c
··· 399 399 BUG_ON(!PageLocked(page)); 400 400 BUG_ON(mapping != page_mapping(page)); 401 401 402 - write_lock_irq(&mapping->tree_lock); 402 + spin_lock_irq(&mapping->tree_lock); 403 403 /* 404 404 * The non racy check for a busy page. 405 405 * ··· 436 436 if (PageSwapCache(page)) { 437 437 swp_entry_t swap = { .val = page_private(page) }; 438 438 __delete_from_swap_cache(page); 439 - write_unlock_irq(&mapping->tree_lock); 439 + spin_unlock_irq(&mapping->tree_lock); 440 440 swap_free(swap); 441 441 } else { 442 442 __remove_from_page_cache(page); 443 - write_unlock_irq(&mapping->tree_lock); 443 + spin_unlock_irq(&mapping->tree_lock); 444 444 } 445 445 446 446 return 1; 447 447 448 448 cannot_free: 449 - write_unlock_irq(&mapping->tree_lock); 449 + spin_unlock_irq(&mapping->tree_lock); 450 450 return 0; 451 451 } 452 452