Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vma_page_offset() has no callees: drop it

Hugh adds: vma_pagecache_offset() has a dangerously misleading name, since
it's using hugepage units: rename it to vma_hugecache_offset().

[apw@shadowen.org: restack onto fixed MAP_PRIVATE reservations]
[akpm@linux-foundation.org: vma_split conversion]
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
a858f7b2 84afd99b

+9 -20
+9 -20
mm/hugetlb.c
··· 201 201 202 202 /* 203 203 * Convert the address within this vma to the page offset within 204 - * the mapping, in base page units. 205 - */ 206 - static pgoff_t vma_page_offset(struct vm_area_struct *vma, 207 - unsigned long address) 208 - { 209 - return ((address - vma->vm_start) >> PAGE_SHIFT) + 210 - (vma->vm_pgoff >> PAGE_SHIFT); 211 - } 212 - 213 - /* 214 - * Convert the address within this vma to the page offset within 215 204 * the mapping, in pagecache page units; huge pages here. 216 205 */ 217 - static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma, 206 + static pgoff_t vma_hugecache_offset(struct vm_area_struct *vma, 218 207 unsigned long address) 219 208 { 220 209 return ((address - vma->vm_start) >> HPAGE_SHIFT) + ··· 795 806 struct inode *inode = mapping->host; 796 807 797 808 if (vma->vm_flags & VM_SHARED) { 798 - pgoff_t idx = vma_pagecache_offset(vma, addr); 809 + pgoff_t idx = vma_hugecache_offset(vma, addr); 799 810 return region_chg(&inode->i_mapping->private_list, 800 811 idx, idx + 1); 801 812 ··· 804 815 805 816 } else { 806 817 int err; 807 - pgoff_t idx = vma_pagecache_offset(vma, addr); 818 + pgoff_t idx = vma_hugecache_offset(vma, addr); 808 819 struct resv_map *reservations = vma_resv_map(vma); 809 820 810 821 err = region_chg(&reservations->regions, idx, idx + 1); ··· 820 831 struct inode *inode = mapping->host; 821 832 822 833 if (vma->vm_flags & VM_SHARED) { 823 - pgoff_t idx = vma_pagecache_offset(vma, addr); 834 + pgoff_t idx = vma_hugecache_offset(vma, addr); 824 835 region_add(&inode->i_mapping->private_list, idx, idx + 1); 825 836 826 837 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 827 - pgoff_t idx = vma_pagecache_offset(vma, addr); 838 + pgoff_t idx = vma_hugecache_offset(vma, addr); 828 839 struct resv_map *reservations = vma_resv_map(vma); 829 840 830 841 /* Mark this page used in the map. */ ··· 1142 1153 unsigned long end; 1143 1154 1144 1155 if (reservations) { 1145 - start = vma_pagecache_offset(vma, vma->vm_start); 1146 - end = vma_pagecache_offset(vma, vma->vm_end); 1156 + start = vma_hugecache_offset(vma, vma->vm_start); 1157 + end = vma_hugecache_offset(vma, vma->vm_end); 1147 1158 1148 1159 reserve = (end - start) - 1149 1160 region_count(&reservations->regions, start, end); ··· 1460 1471 pgoff_t idx; 1461 1472 1462 1473 mapping = vma->vm_file->f_mapping; 1463 - idx = vma_pagecache_offset(vma, address); 1474 + idx = vma_hugecache_offset(vma, address); 1464 1475 1465 1476 return find_lock_page(mapping, idx); 1466 1477 } ··· 1488 1499 } 1489 1500 1490 1501 mapping = vma->vm_file->f_mapping; 1491 - idx = vma_pagecache_offset(vma, address); 1502 + idx = vma_hugecache_offset(vma, address); 1492 1503 1493 1504 /* 1494 1505 * Use page lock to guard against racing truncation