Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] mm: sh64 hugetlbpage.c

The sh64 hugetlbpage.c seems to be erroneous, left over from a bygone age,
clashing with the common hugetlb.c. Replace it by a copy of the sh
hugetlbpage.c. Except, delete that mk_pte_huge macro neither uses.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Hugh Dickins and committed by
Linus Torvalds
147efea8 7ee78232

+12 -178
-2
arch/sh/mm/hugetlbpage.c
··· 54 54 return pte; 55 55 } 56 56 57 - #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) 58 - 59 57 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 60 58 pte_t *ptep, pte_t entry) 61 59 {
+12 -176
arch/sh64/mm/hugetlbpage.c
··· 54 54 return pte; 55 55 } 56 56 57 - #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) 58 - 59 - static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, 60 - struct page *page, pte_t * page_table, int write_access) 57 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 58 + pte_t *ptep, pte_t entry) 61 59 { 62 - unsigned long i; 63 - pte_t entry; 64 - 65 - add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); 66 - 67 - if (write_access) 68 - entry = pte_mkwrite(pte_mkdirty(mk_pte(page, 69 - vma->vm_page_prot))); 70 - else 71 - entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 72 - entry = pte_mkyoung(entry); 73 - mk_pte_huge(entry); 60 + int i; 74 61 75 62 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 76 - set_pte(page_table, entry); 77 - page_table++; 78 - 63 + set_pte_at(mm, addr, ptep, entry); 64 + ptep++; 65 + addr += PAGE_SIZE; 79 66 pte_val(entry) += PAGE_SIZE; 80 67 } 81 68 } 82 69 83 - pte_t huge_ptep_get_and_clear(pte_t *ptep) 70 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 71 + pte_t *ptep) 84 72 { 85 73 pte_t entry; 74 + int i; 86 75 87 76 entry = *ptep; 88 77 89 78 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 90 - pte_clear(pte); 91 - pte++; 79 + pte_clear(mm, addr, ptep); 80 + addr += PAGE_SIZE; 81 + ptep++; 92 82 } 93 83 94 84 return entry; ··· 96 106 return 0; 97 107 } 98 108 99 - int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 100 - struct vm_area_struct *vma) 101 - { 102 - pte_t *src_pte, *dst_pte, entry; 103 - struct page *ptepage; 104 - unsigned long addr = vma->vm_start; 105 - unsigned long end = vma->vm_end; 106 - int i; 107 - 108 - while (addr < end) { 109 - dst_pte = huge_pte_alloc(dst, addr); 110 - if (!dst_pte) 111 - goto nomem; 112 - src_pte = huge_pte_offset(src, addr); 113 - BUG_ON(!src_pte || pte_none(*src_pte)); 114 - entry = *src_pte; 115 - ptepage = pte_page(entry); 116 - get_page(ptepage); 117 - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 118 - set_pte(dst_pte, entry); 119 - pte_val(entry) += PAGE_SIZE; 120 - dst_pte++; 121 - } 122 - add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); 123 - addr += HPAGE_SIZE; 124 - } 125 - return 0; 126 - 127 - nomem: 128 - return -ENOMEM; 129 - } 130 - 131 - int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 132 - struct page **pages, struct vm_area_struct **vmas, 133 - unsigned long *position, int *length, int i) 134 - { 135 - unsigned long vaddr = *position; 136 - int remainder = *length; 137 - 138 - WARN_ON(!is_vm_hugetlb_page(vma)); 139 - 140 - while (vaddr < vma->vm_end && remainder) { 141 - if (pages) { 142 - pte_t *pte; 143 - struct page *page; 144 - 145 - pte = huge_pte_offset(mm, vaddr); 146 - 147 - /* hugetlb should be locked, and hence, prefaulted */ 148 - BUG_ON(!pte || pte_none(*pte)); 149 - 150 - page = pte_page(*pte); 151 - 152 - WARN_ON(!PageCompound(page)); 153 - 154 - get_page(page); 155 - pages[i] = page; 156 - } 157 - 158 - if (vmas) 159 - vmas[i] = vma; 160 - 161 - vaddr += PAGE_SIZE; 162 - --remainder; 163 - ++i; 164 - } 165 - 166 - *length = remainder; 167 - *position = vaddr; 168 - 169 - return i; 170 - } 171 - 172 109 struct page *follow_huge_addr(struct mm_struct *mm, 173 110 unsigned long address, int write) 174 111 { ··· 111 194 pmd_t *pmd, int write) 112 195 { 113 196 return NULL; 114 - } 115 - 116 - void unmap_hugepage_range(struct vm_area_struct *vma, 117 - unsigned long start, unsigned long end) 118 - { 119 - struct mm_struct *mm = vma->vm_mm; 120 - unsigned long address; 121 - pte_t *pte; 122 - struct page *page; 123 - int i; 124 - 125 - BUG_ON(start & (HPAGE_SIZE - 1)); 126 - BUG_ON(end & (HPAGE_SIZE - 1)); 127 - 128 - for (address = start; address < end; address += HPAGE_SIZE) { 129 - pte = huge_pte_offset(mm, address); 130 - BUG_ON(!pte); 131 - if (pte_none(*pte)) 132 - continue; 133 - page = pte_page(*pte); 134 - put_page(page); 135 - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 136 - pte_clear(mm, address+(i*PAGE_SIZE), pte); 137 - pte++; 138 - } 139 - } 140 - add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); 141 - flush_tlb_range(vma, start, end); 142 - } 143 - 144 - int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) 145 - { 146 - struct mm_struct *mm = current->mm; 147 - unsigned long addr; 148 - int ret = 0; 149 - 150 - BUG_ON(vma->vm_start & ~HPAGE_MASK); 151 - BUG_ON(vma->vm_end & ~HPAGE_MASK); 152 - 153 - spin_lock(&mm->page_table_lock); 154 - for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 155 - unsigned long idx; 156 - pte_t *pte = huge_pte_alloc(mm, addr); 157 - struct page *page; 158 - 159 - if (!pte) { 160 - ret = -ENOMEM; 161 - goto out; 162 - } 163 - if (!pte_none(*pte)) 164 - continue; 165 - 166 - idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) 167 - + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 168 - page = find_get_page(mapping, idx); 169 - if (!page) { 170 - /* charge the fs quota first */ 171 - if (hugetlb_get_quota(mapping)) { 172 - ret = -ENOMEM; 173 - goto out; 174 - } 175 - page = alloc_huge_page(); 176 - if (!page) { 177 - hugetlb_put_quota(mapping); 178 - ret = -ENOMEM; 179 - goto out; 180 - } 181 - ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); 182 - if (! ret) { 183 - unlock_page(page); 184 - } else { 185 - hugetlb_put_quota(mapping); 186 - free_huge_page(page); 187 - goto out; 188 - } 189 - } 190 - set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); 191 - } 192 - out: 193 - spin_unlock(&mm->page_table_lock); 194 - return ret; 195 197 }