Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'vm-cleanups' (unmap_vma() interface cleanup)

This series sanitizes the interface to unmap_vma(). The crazy interface
annoyed me no end when I was looking at unmap_single_vma(), which we can
spend quite a lot of time in (especially with loads that have a lot of
small fork/exec's: shell scripts etc).

Moving the nr_accounted calculations to where they belong at least
clarifies things a little. I hope to come back to look at the
performance of this later, but if/when I get back to it I at least don't
have to see the crazy interfaces any more.

* vm-cleanups:
vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces
vm: simplify unmap_vmas() calling convention

+25 -27
+2 -4
include/linux/mm.h
··· 896 896 unsigned long size); 897 897 void zap_page_range(struct vm_area_struct *vma, unsigned long address, 898 898 unsigned long size, struct zap_details *); 899 - void unmap_vmas(struct mmu_gather *tlb, 900 - struct vm_area_struct *start_vma, unsigned long start_addr, 901 - unsigned long end_addr, unsigned long *nr_accounted, 902 - struct zap_details *); 899 + void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 900 + unsigned long start, unsigned long end); 903 901 904 902 /** 905 903 * mm_walk - callbacks for walk_page_range
+11 -17
mm/memory.c
··· 1295 1295 1296 1296 static void unmap_single_vma(struct mmu_gather *tlb, 1297 1297 struct vm_area_struct *vma, unsigned long start_addr, 1298 - unsigned long end_addr, unsigned long *nr_accounted, 1298 + unsigned long end_addr, 1299 1299 struct zap_details *details) 1300 1300 { 1301 1301 unsigned long start = max(vma->vm_start, start_addr); ··· 1306 1306 end = min(vma->vm_end, end_addr); 1307 1307 if (end <= vma->vm_start) 1308 1308 return; 1309 - 1310 - if (vma->vm_flags & VM_ACCOUNT) 1311 - *nr_accounted += (end - start) >> PAGE_SHIFT; 1312 1309 1313 1310 if (unlikely(is_pfn_mapping(vma))) 1314 1311 untrack_pfn_vma(vma, 0, 0); ··· 1336 1339 * @vma: the starting vma 1337 1340 * @start_addr: virtual address at which to start unmapping 1338 1341 * @end_addr: virtual address at which to end unmapping 1339 - * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 1340 - * @details: details of nonlinear truncation or shared cache invalidation 1341 1342 * 1342 1343 * Unmap all pages in the vma list. 1343 1344 * ··· 1350 1355 */ 1351 1356 void unmap_vmas(struct mmu_gather *tlb, 1352 1357 struct vm_area_struct *vma, unsigned long start_addr, 1353 - unsigned long end_addr, unsigned long *nr_accounted, 1354 - struct zap_details *details) 1358 + unsigned long end_addr) 1355 1359 { 1356 1360 struct mm_struct *mm = vma->vm_mm; 1357 1361 1358 1362 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1359 1363 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 1360 - unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, 1361 - details); 1364 + unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); 1362 1365 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1363 1366 } 1364 1367 ··· 1369 1376 * 1370 1377 * Caller must protect the VMA list 1371 1378 */ 1372 - void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1379 + void zap_page_range(struct vm_area_struct *vma, unsigned long start, 1373 1380 unsigned long size, struct zap_details *details) 1374 1381 { 1375 1382 struct mm_struct *mm = vma->vm_mm; 1376 1383 struct mmu_gather tlb; 1377 - unsigned long end = address + size; 1378 - unsigned long nr_accounted = 0; 1384 + unsigned long end = start + size; 1379 1385 1380 1386 lru_add_drain(); 1381 1387 tlb_gather_mmu(&tlb, mm, 0); 1382 1388 update_hiwater_rss(mm); 1383 - unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1384 - tlb_finish_mmu(&tlb, address, end); 1389 + mmu_notifier_invalidate_range_start(mm, start, end); 1390 + for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1391 + unmap_single_vma(&tlb, vma, start, end, details); 1392 + mmu_notifier_invalidate_range_end(mm, start, end); 1393 + tlb_finish_mmu(&tlb, start, end); 1385 1394 } 1386 1395 1387 1396 /** ··· 1401 1406 struct mm_struct *mm = vma->vm_mm; 1402 1407 struct mmu_gather tlb; 1403 1408 unsigned long end = address + size; 1404 - unsigned long nr_accounted = 0; 1405 1409 1406 1410 lru_add_drain(); 1407 1411 tlb_gather_mmu(&tlb, mm, 0); 1408 1412 update_hiwater_rss(mm); 1409 1413 mmu_notifier_invalidate_range_start(mm, address, end); 1410 - unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); 1414 + unmap_single_vma(&tlb, vma, address, end, details); 1411 1415 mmu_notifier_invalidate_range_end(mm, address, end); 1412 1416 tlb_finish_mmu(&tlb, address, end); 1413 1417 }
+12 -6
mm/mmap.c
··· 1889 1889 */ 1890 1890 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 1891 1891 { 1892 + unsigned long nr_accounted = 0; 1893 + 1892 1894 /* Update high watermark before we lower total_vm */ 1893 1895 update_hiwater_vm(mm); 1894 1896 do { 1895 1897 long nrpages = vma_pages(vma); 1896 1898 1899 + if (vma->vm_flags & VM_ACCOUNT) 1900 + nr_accounted += nrpages; 1897 1901 mm->total_vm -= nrpages; 1898 1902 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1899 1903 vma = remove_vma(vma); 1900 1904 } while (vma); 1905 + vm_unacct_memory(nr_accounted); 1901 1906 validate_mm(mm); 1902 1907 } 1903 1908 ··· 1917 1912 { 1918 1913 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1919 1914 struct mmu_gather tlb; 1920 - unsigned long nr_accounted = 0; 1921 1915 1922 1916 lru_add_drain(); 1923 1917 tlb_gather_mmu(&tlb, mm, 0); 1924 1918 update_hiwater_rss(mm); 1925 - unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1926 - vm_unacct_memory(nr_accounted); 1919 + unmap_vmas(&tlb, vma, start, end); 1927 1920 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 1928 1921 next ? next->vm_start : 0); 1929 1922 tlb_finish_mmu(&tlb, start, end); ··· 2308 2305 tlb_gather_mmu(&tlb, mm, 1); 2309 2306 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2310 2307 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2311 - unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2312 - vm_unacct_memory(nr_accounted); 2308 + unmap_vmas(&tlb, vma, 0, -1); 2313 2309 2314 2310 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2315 2311 tlb_finish_mmu(&tlb, 0, -1); ··· 2317 2315 * Walk the list again, actually closing and freeing it, 2318 2316 * with preemption enabled, without holding any MM locks. 2319 2317 */ 2320 - while (vma) 2318 + while (vma) { 2319 + if (vma->vm_flags & VM_ACCOUNT) 2320 + nr_accounted += vma_pages(vma); 2321 2321 vma = remove_vma(vma); 2322 + } 2323 + vm_unacct_memory(nr_accounted); 2322 2324 2323 2325 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2324 2326 }