Support strange discontiguous PFN remappings

These get created by some drivers that don't generally even want a pfn
remapping at all, but would really mostly prefer to just map pages
they've allocated individually instead.

For now, create a helper function that turns such an incomplete PFN
remapping call into a loop that does that explicit mapping. In the long
run we almost certainly want to export a totally different interface for
that, though.

Signed-off-by: Linus Torvalds <torvalds@osdl.org>

+93
+1
include/linux/mm.h
··· 163 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 164 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 165 #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ 166 167 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 168 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
··· 163 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 164 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 165 #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ 166 + #define VM_INCOMPLETE 0x02000000 /* Strange partial PFN mapping marker */ 167 168 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 169 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+92
mm/memory.c
··· 1147 } 1148 1149 /* 1150 * maps a range of physical memory into the requested pages. the old 1151 * mappings are removed. any references to nonexistent pages results 1152 * in null mappings (currently treated as "copy-on-access") ··· 1308 unsigned long end = addr + PAGE_ALIGN(size); 1309 struct mm_struct *mm = vma->vm_mm; 1310 int err; 1311 1312 /* 1313 * Physically remapped pages are special. Tell the
··· 1147 } 1148 1149 /* 1150 + * This is the old fallback for page remapping. 1151 + * 1152 + * For historical reasons, it only allows reserved pages. Only 1153 + * old drivers should use this, and they needed to mark their 1154 + * pages reserved for the old functions anyway. 1155 + */ 1156 + static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot) 1157 + { 1158 + int retval; 1159 + pgd_t * pgd; 1160 + pud_t * pud; 1161 + pmd_t * pmd; 1162 + pte_t * pte; 1163 + spinlock_t *ptl; 1164 + 1165 + retval = -EINVAL; 1166 + if (PageAnon(page) || !PageReserved(page)) 1167 + goto out; 1168 + retval = -ENOMEM; 1169 + flush_dcache_page(page); 1170 + pgd = pgd_offset(mm, addr); 1171 + pud = pud_alloc(mm, pgd, addr); 1172 + if (!pud) 1173 + goto out; 1174 + pmd = pmd_alloc(mm, pud, addr); 1175 + if (!pmd) 1176 + goto out; 1177 + pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1178 + if (!pte) 1179 + goto out; 1180 + retval = -EBUSY; 1181 + if (!pte_none(*pte)) 1182 + goto out_unlock; 1183 + 1184 + /* Ok, finally just insert the thing.. */ 1185 + get_page(page); 1186 + inc_mm_counter(mm, file_rss); 1187 + page_add_file_rmap(page); 1188 + set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1189 + 1190 + retval = 0; 1191 + out_unlock: 1192 + pte_unmap_unlock(pte, ptl); 1193 + out: 1194 + return retval; 1195 + } 1196 + 1197 + /* 1198 + * Somebody does a pfn remapping that doesn't actually work as a vma. 1199 + * 1200 + * Do it as individual pages instead, and warn about it. It's bad form, 1201 + * and very inefficient. 1202 + */ 1203 + static int incomplete_pfn_remap(struct vm_area_struct *vma, 1204 + unsigned long start, unsigned long end, 1205 + unsigned long pfn, pgprot_t prot) 1206 + { 1207 + static int warn = 10; 1208 + struct page *page; 1209 + int retval; 1210 + 1211 + if (!(vma->vm_flags & VM_INCOMPLETE)) { 1212 + if (warn) { 1213 + warn--; 1214 + printk("%s does an incomplete pfn remapping", current->comm); 1215 + dump_stack(); 1216 + } 1217 + } 1218 + vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED; 1219 + 1220 + if (start < vma->vm_start || end > vma->vm_end) 1221 + return -EINVAL; 1222 + 1223 + if (!pfn_valid(pfn)) 1224 + return -EINVAL; 1225 + 1226 + retval = 0; 1227 + page = pfn_to_page(pfn); 1228 + while (start < end) { 1229 + retval = insert_page(vma->vm_mm, start, page, prot); 1230 + if (retval < 0) 1231 + break; 1232 + start += PAGE_SIZE; 1233 + page++; 1234 + } 1235 + return retval; 1236 + } 1237 + 1238 + /* 1239 * maps a range of physical memory into the requested pages. the old 1240 * mappings are removed. any references to nonexistent pages results 1241 * in null mappings (currently treated as "copy-on-access") ··· 1219 unsigned long end = addr + PAGE_ALIGN(size); 1220 struct mm_struct *mm = vma->vm_mm; 1221 int err; 1222 + 1223 + if (addr != vma->vm_start || end != vma->vm_end) 1224 + return incomplete_pfn_remap(vma, addr, end, pfn, prot); 1225 1226 /* 1227 * Physically remapped pages are special. Tell the