[PATCH] freepgt: hugetlb_free_pgd_range

ia64 and ppc64 had hugetlb_free_pgtables functions which were no longer being
called, and it wasn't obvious what to do about them.

The ppc64 case turns out to be easy: the associated tables are noted elsewhere
and freed later, safe to either skip its hugetlb areas or go through the
motions of freeing nothing. Since ia64 does need a special case, restore to
ppc64 the special case of skipping them.

The ia64 hugetlb case has been broken since pgd_addr_end went in, though it
probably appeared to work okay if you just had one such area; in fact it's
been broken much longer if you consider a long munmap spanning from another
region into the hugetlb region.

In the ia64 hugetlb region, more virtual address bits are available than in
the other regions, yet the page tables are structured the same way: the page
at the bottom is larger. Here we need to scale down each addr before passing
it to the standard free_pgd_range. Was about to write a hugely_scaled_down
macro, but found htlbpage_to_page already exists for just this purpose. Fixed
off-by-one in ia64 is_hugepage_only_range.

Uninline free_pgd_range to make it available to ia64. Make sure the
vma-gathering loop in free_pgtables cannot join a hugepage_only_range to any
other (safe to join huges? probably but don't bother).

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Hugh Dickins and committed by Linus Torvalds 3bf5ee95 ee39b37b

+65 -38
+23 -6
arch/ia64/mm/hugetlbpage.c
··· 186 return NULL; 187 } 188 189 - /* 190 - * Do nothing, until we've worked out what to do! To allow build, we 191 - * must remove reference to clear_page_range since it no longer exists. 192 - */ 193 - void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, 194 - unsigned long start, unsigned long end) 195 { 196 } 197 198 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
··· 186 return NULL; 187 } 188 189 + void hugetlb_free_pgd_range(struct mmu_gather **tlb, 190 + unsigned long addr, unsigned long end, 191 + unsigned long floor, unsigned long ceiling) 192 { 193 + /* 194 + * This is called only when is_hugepage_only_range(addr,), 195 + * and it follows that is_hugepage_only_range(end,) also. 196 + * 197 + * The offset of these addresses from the base of the hugetlb 198 + * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that 199 + * the standard free_pgd_range will free the right page tables. 200 + * 201 + * If floor and ceiling are also in the hugetlb region, they 202 + * must likewise be scaled down; but if outside, left unchanged. 203 + */ 204 + 205 + addr = htlbpage_to_page(addr); 206 + end = htlbpage_to_page(end); 207 + if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE)) 208 + floor = htlbpage_to_page(floor); 209 + if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE)) 210 + ceiling = htlbpage_to_page(ceiling); 211 + 212 + free_pgd_range(tlb, addr, end, floor, ceiling); 213 } 214 215 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-10
arch/ppc64/mm/hugetlbpage.c
··· 430 flush_tlb_pending(); 431 } 432 433 - void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, 434 - unsigned long start, unsigned long end) 435 - { 436 - /* Because the huge pgtables are only 2 level, they can take 437 - * at most around 4M, much less than one hugepage which the 438 - * process is presumably entitled to use. So we don't bother 439 - * freeing up the pagetables on unmap, and wait until 440 - * destroy_context() to clean up the lot. */ 441 - } 442 - 443 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) 444 { 445 struct mm_struct *mm = current->mm;
··· 430 flush_tlb_pending(); 431 } 432 433 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) 434 { 435 struct mm_struct *mm = current->mm;
+1 -1
include/asm-ia64/page.h
··· 139 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 140 # define is_hugepage_only_range(mm, addr, len) \ 141 (REGION_NUMBER(addr) == REGION_HPAGE && \ 142 - REGION_NUMBER((addr)+(len)) == REGION_HPAGE) 143 extern unsigned int hpage_shift; 144 #endif 145
··· 139 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 140 # define is_hugepage_only_range(mm, addr, len) \ 141 (REGION_NUMBER(addr) == REGION_HPAGE && \ 142 + REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE) 143 extern unsigned int hpage_shift; 144 #endif 145
+2 -2
include/asm-ia64/pgtable.h
··· 472 #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) 473 #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) 474 struct mmu_gather; 475 - extern void hugetlb_free_pgtables(struct mmu_gather *tlb, 476 - struct vm_area_struct * prev, unsigned long start, unsigned long end); 477 #endif 478 479 /*
··· 472 #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) 473 #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) 474 struct mmu_gather; 475 + void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, 476 + unsigned long end, unsigned long floor, unsigned long ceiling); 477 #endif 478 479 /*
+9 -3
include/asm-ppc64/pgtable.h
··· 500 501 extern void paging_init(void); 502 503 - struct mmu_gather; 504 - void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, 505 - unsigned long start, unsigned long end); 506 507 /* 508 * This gets called at the end of handling a page fault, when
··· 500 501 extern void paging_init(void); 502 503 + /* 504 + * Because the huge pgtables are only 2 level, they can take 505 + * at most around 4M, much less than one hugepage which the 506 + * process is presumably entitled to use. So we don't bother 507 + * freeing up the pagetables on unmap, and wait until 508 + * destroy_context() to clean up the lot. 509 + */ 510 + #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 511 + do { } while (0) 512 513 /* 514 * This gets called at the end of handling a page fault, when
+4 -2
include/linux/hugetlb.h
··· 37 38 #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE 39 #define is_hugepage_only_range(mm, addr, len) 0 40 - #define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0) 41 #endif 42 43 #ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE ··· 73 #define prepare_hugepage_range(addr, len) (-EINVAL) 74 #define pmd_huge(x) 0 75 #define is_hugepage_only_range(mm, addr, len) 0 76 - #define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0) 77 #define alloc_huge_page() ({ NULL; }) 78 #define free_huge_page(p) ({ (void)(p); BUG(); }) 79
··· 37 38 #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE 39 #define is_hugepage_only_range(mm, addr, len) 0 40 + #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 41 + do { } while (0) 42 #endif 43 44 #ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE ··· 72 #define prepare_hugepage_range(addr, len) (-EINVAL) 73 #define pmd_huge(x) 0 74 #define is_hugepage_only_range(mm, addr, len) 0 75 + #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 76 + do { } while (0) 77 #define alloc_huge_page() ({ NULL; }) 78 #define free_huge_page(p) ({ (void)(p); BUG(); }) 79
+3 -1
include/linux/mm.h
··· 587 struct vm_area_struct *start_vma, unsigned long start_addr, 588 unsigned long end_addr, unsigned long *nr_accounted, 589 struct zap_details *); 590 - void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 591 unsigned long floor, unsigned long ceiling); 592 int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 593 struct vm_area_struct *vma);
··· 587 struct vm_area_struct *start_vma, unsigned long start_addr, 588 unsigned long end_addr, unsigned long *nr_accounted, 589 struct zap_details *); 590 + void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, 591 + unsigned long end, unsigned long floor, unsigned long ceiling); 592 + void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, 593 unsigned long floor, unsigned long ceiling); 594 int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 595 struct vm_area_struct *vma);
+23 -13
mm/memory.c
··· 190 * 191 * Must be called with pagetable lock held. 192 */ 193 - static inline void free_pgd_range(struct mmu_gather *tlb, 194 unsigned long addr, unsigned long end, 195 unsigned long floor, unsigned long ceiling) 196 { ··· 241 return; 242 243 start = addr; 244 - pgd = pgd_offset(tlb->mm, addr); 245 do { 246 next = pgd_addr_end(addr, end); 247 if (pgd_none_or_clear_bad(pgd)) 248 continue; 249 - free_pud_range(tlb, pgd, addr, next, floor, ceiling); 250 } while (pgd++, addr = next, addr != end); 251 252 - if (!tlb_is_full_mm(tlb)) 253 - flush_tlb_pgtables(tlb->mm, start, end); 254 } 255 256 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 257 - unsigned long floor, unsigned long ceiling) 258 { 259 while (vma) { 260 struct vm_area_struct *next = vma->vm_next; 261 unsigned long addr = vma->vm_start; 262 263 - /* Optimization: gather nearby vmas into a single call down */ 264 - while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { 265 - vma = next; 266 - next = vma->vm_next; 267 - } 268 - free_pgd_range(*tlb, addr, vma->vm_end, 269 floor, next? next->vm_start: ceiling); 270 vma = next; 271 } 272 } 273 274 - pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 275 { 276 if (!pmd_present(*pmd)) { 277 struct page *new;
··· 190 * 191 * Must be called with pagetable lock held. 192 */ 193 + void free_pgd_range(struct mmu_gather **tlb, 194 unsigned long addr, unsigned long end, 195 unsigned long floor, unsigned long ceiling) 196 { ··· 241 return; 242 243 start = addr; 244 + pgd = pgd_offset((*tlb)->mm, addr); 245 do { 246 next = pgd_addr_end(addr, end); 247 if (pgd_none_or_clear_bad(pgd)) 248 continue; 249 + free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 250 } while (pgd++, addr = next, addr != end); 251 252 + if (!tlb_is_full_mm(*tlb)) 253 + flush_tlb_pgtables((*tlb)->mm, start, end); 254 } 255 256 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 257 + unsigned long floor, unsigned long ceiling) 258 { 259 while (vma) { 260 struct vm_area_struct *next = vma->vm_next; 261 unsigned long addr = vma->vm_start; 262 263 + if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { 264 + hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 265 floor, next? next->vm_start: ceiling); 266 + } else { 267 + /* 268 + * Optimization: gather nearby vmas into one call down 269 + */ 270 + while (next && next->vm_start <= vma->vm_end + PMD_SIZE 271 + && !is_hugepage_only_range(vma->vm_mm, next->vm_start, 272 + HPAGE_SIZE)) { 273 + vma = next; 274 + next = vma->vm_next; 275 + } 276 + free_pgd_range(tlb, addr, vma->vm_end, 277 + floor, next? next->vm_start: ceiling); 278 + } 279 vma = next; 280 } 281 } 282 283 + pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, 284 + unsigned long address) 285 { 286 if (!pmd_present(*pmd)) { 287 struct page *new;