Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] NOMMU: Order the per-mm_struct VMA list

Order the per-mm_struct VMA list by address so that searching it can be cut
short when the appropriate address has been exceeded.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

David Howells and committed by
Linus Torvalds
3034097a dbf8685c

+72 -32
+72 -32
mm/nommu.c
··· 310 310 } 311 311 #endif /* DEBUG */ 312 312 313 + /* 314 + * add a VMA into a process's mm_struct in the appropriate place in the list 315 + * - should be called with mm->mmap_sem held writelocked 316 + */ 317 + static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml) 318 + { 319 + struct vm_list_struct **ppv; 320 + 321 + for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next) 322 + if ((*ppv)->vma->vm_start > vml->vma->vm_start) 323 + break; 324 + 325 + vml->next = *ppv; 326 + *ppv = vml; 327 + } 328 + 329 + /* 330 + * look up the first VMA in which addr resides, NULL if none 331 + * - should be called with mm->mmap_sem at least held readlocked 332 + */ 333 + struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 334 + { 335 + struct vm_list_struct *loop, *vml; 336 + 337 + /* search the vm_start ordered list */ 338 + vml = NULL; 339 + for (loop = mm->context.vmlist; loop; loop = loop->next) { 340 + if (loop->vma->vm_start > addr) 341 + break; 342 + vml = loop; 343 + } 344 + 345 + if (vml && vml->vma->vm_end > addr) 346 + return vml->vma; 347 + 348 + return NULL; 349 + } 350 + EXPORT_SYMBOL(find_vma); 351 + 352 + /* 353 + * find a VMA in the global tree 354 + */ 313 355 static inline struct vm_area_struct *find_nommu_vma(unsigned long start) 314 356 { 315 357 struct vm_area_struct *vma; ··· 371 329 return NULL; 372 330 } 373 331 332 + /* 333 + * add a VMA in the global tree 334 + */ 374 335 static void add_nommu_vma(struct vm_area_struct *vma) 375 336 { 376 337 struct vm_area_struct *pvma; ··· 420 375 rb_insert_color(&vma->vm_rb, &nommu_vma_tree); 421 376 } 422 377 378 + /* 379 + * delete a VMA from the global list 380 + */ 423 381 static void delete_nommu_vma(struct vm_area_struct *vma) 424 382 { 425 383 struct address_space *mapping; ··· 900 852 realalloc += kobjsize(vml); 901 853 askedalloc += sizeof(*vml); 902 854 903 - vml->next = current->mm->context.vmlist; 904 - current->mm->context.vmlist = vml; 855 + add_vma_to_mm(current->mm, vml); 905 856 906 857 up_write(&nommu_vma_sem); 907 858 ··· 979 932 } 980 933 } 981 934 935 + /* 936 + * release a mapping 937 + * - under NOMMU conditions the parameters must match exactly to the mapping to 938 + * be removed 939 + */ 982 940 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) 983 941 { 984 942 struct vm_list_struct *vml, **parent; ··· 993 941 printk("do_munmap:\n"); 994 942 #endif 995 943 996 - for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) 944 + for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) { 945 + if ((*parent)->vma->vm_start > addr) 946 + break; 997 947 if ((*parent)->vma->vm_start == addr && 998 948 ((len == 0) || ((*parent)->vma->vm_end == end))) 999 949 goto found; 950 + } 1000 951 1001 952 printk("munmap of non-mmaped memory by process %d (%s): %p\n", 1002 953 current->pid, current->comm, (void *) addr); ··· 1025 970 return 0; 1026 971 } 1027 972 1028 - /* Release all mmaps. */ 973 + asmlinkage long sys_munmap(unsigned long addr, size_t len) 974 + { 975 + int ret; 976 + struct mm_struct *mm = current->mm; 977 + 978 + down_write(&mm->mmap_sem); 979 + ret = do_munmap(mm, addr, len); 980 + up_write(&mm->mmap_sem); 981 + return ret; 982 + } 983 + 984 + /* 985 + * Release all mappings 986 + */ 1029 987 void exit_mmap(struct mm_struct * mm) 1030 988 { 1031 989 struct vm_list_struct *tmp; ··· 1063 995 show_process_blocks(); 1064 996 #endif 1065 997 } 1066 - } 1067 - 1068 - asmlinkage long sys_munmap(unsigned long addr, size_t len) 1069 - { 1070 - int ret; 1071 - struct mm_struct *mm = current->mm; 1072 - 1073 - down_write(&mm->mmap_sem); 1074 - ret = do_munmap(mm, addr, len); 1075 - up_write(&mm->mmap_sem); 1076 - return ret; 1077 998 } 1078 999 1079 1000 unsigned long do_brk(unsigned long addr, unsigned long len) ··· 1117 1060 1118 1061 return vml->vma->vm_start; 1119 1062 } 1120 - 1121 - /* 1122 - * Look up the first VMA which satisfies addr < vm_end, NULL if none 1123 - * - should be called with mm->mmap_sem at least readlocked 1124 - */ 1125 - struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 1126 - { 1127 - struct vm_list_struct *vml; 1128 - 1129 - for (vml = mm->context.vmlist; vml; vml = vml->next) 1130 - if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end) 1131 - return vml->vma; 1132 - 1133 - return NULL; 1134 - } 1135 - 1136 - EXPORT_SYMBOL(find_vma); 1137 1063 1138 1064 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1139 1065 unsigned int foll_flags)