Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

um: remove force_flush_all from fork_handler

There should be no need for this. It may be that this used to work
around another issue where after a clone the MM was in a bad state.

Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Link: https://patch.msgid.link/20240703134536.1161108-11-benjamin@sipsolutions.net
Signed-off-by: Johannes Berg <johannes.berg@intel.com>

authored by

Benjamin Berg and committed by
Johannes Berg
ef714f15 5168f6b4

+15 -33
-2
arch/um/include/asm/mmu_context.h
··· 13 13 #include <asm/mm_hooks.h> 14 14 #include <asm/mmu.h> 15 15 16 - extern void force_flush_all(void); 17 - 18 16 #define activate_mm activate_mm 19 17 static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) 20 18 {
-2
arch/um/kernel/process.c
··· 122 122 /* Called magically, see new_thread_handler above */ 123 123 static void fork_handler(void) 124 124 { 125 - force_flush_all(); 126 - 127 125 schedule_tail(current->thread.prev_sched); 128 126 129 127 /*
+15 -29
arch/um/kernel/tlb.c
··· 41 41 int index; 42 42 struct mm_struct *mm; 43 43 void *data; 44 - int force; 45 44 }; 46 45 47 - #define INIT_HVC(mm, force, userspace) \ 46 + #define INIT_HVC(mm, userspace) \ 48 47 ((struct host_vm_change) \ 49 48 { .ops = { { .type = NONE } }, \ 50 49 .mm = mm, \ 51 50 .data = NULL, \ 52 51 .userspace = userspace, \ 53 - .index = 0, \ 54 - .force = force }) 52 + .index = 0 }) 55 53 56 54 void report_enomem(void) 57 55 { ··· 233 235 234 236 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | 235 237 (x ? UM_PROT_EXEC : 0)); 236 - if (hvc->force || pte_newpage(*pte)) { 238 + if (pte_newpage(*pte)) { 237 239 if (pte_present(*pte)) { 238 240 if (pte_newpage(*pte)) 239 241 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, ··· 259 261 do { 260 262 next = pmd_addr_end(addr, end); 261 263 if (!pmd_present(*pmd)) { 262 - if (hvc->force || pmd_newpage(*pmd)) { 264 + if (pmd_newpage(*pmd)) { 263 265 ret = add_munmap(addr, next - addr, hvc); 264 266 pmd_mkuptodate(*pmd); 265 267 } ··· 281 283 do { 282 284 next = pud_addr_end(addr, end); 283 285 if (!pud_present(*pud)) { 284 - if (hvc->force || pud_newpage(*pud)) { 286 + if (pud_newpage(*pud)) { 285 287 ret = add_munmap(addr, next - addr, hvc); 286 288 pud_mkuptodate(*pud); 287 289 } ··· 303 305 do { 304 306 next = p4d_addr_end(addr, end); 305 307 if (!p4d_present(*p4d)) { 306 - if (hvc->force || p4d_newpage(*p4d)) { 308 + if (p4d_newpage(*p4d)) { 307 309 ret = add_munmap(addr, next - addr, hvc); 308 310 p4d_mkuptodate(*p4d); 309 311 } ··· 314 316 } 315 317 316 318 static void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 317 - unsigned long end_addr, int force) 319 + unsigned long end_addr) 318 320 { 319 321 pgd_t *pgd; 320 322 struct host_vm_change hvc; 321 323 unsigned long addr = start_addr, next; 322 324 int ret = 0, userspace = 1; 323 325 324 - hvc = INIT_HVC(mm, force, userspace); 326 + hvc = INIT_HVC(mm, userspace); 325 327 pgd = pgd_offset(mm, addr); 326 328 do { 327 329 next = pgd_addr_end(addr, end_addr); 328 330 if (!pgd_present(*pgd)) { 329 - if (force || pgd_newpage(*pgd)) { 331 + if (pgd_newpage(*pgd)) { 330 332 ret = add_munmap(addr, next - addr, &hvc); 331 333 pgd_mkuptodate(*pgd); 332 334 } ··· 347 349 pmd_t *pmd; 348 350 pte_t *pte; 349 351 unsigned long addr, last; 350 - int updated = 0, err = 0, force = 0, userspace = 0; 352 + int updated = 0, err = 0, userspace = 0; 351 353 struct host_vm_change hvc; 352 354 353 355 mm = &init_mm; 354 - hvc = INIT_HVC(mm, force, userspace); 356 + hvc = INIT_HVC(mm, userspace); 355 357 for (addr = start; addr < end;) { 356 358 pgd = pgd_offset(mm, addr); 357 359 if (!pgd_present(*pgd)) { ··· 535 537 } 536 538 537 539 static void fix_range(struct mm_struct *mm, unsigned long start_addr, 538 - unsigned long end_addr, int force) 540 + unsigned long end_addr) 539 541 { 540 542 /* 541 543 * Don't bother flushing if this address space is about to be ··· 544 546 if (atomic_read(&mm->mm_users) == 0) 545 547 return; 546 548 547 - fix_range_common(mm, start_addr, end_addr, force); 549 + fix_range_common(mm, start_addr, end_addr); 548 550 } 549 551 550 552 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ··· 552 554 { 553 555 if (vma->vm_mm == NULL) 554 556 flush_tlb_kernel_range_common(start, end); 555 - else fix_range(vma->vm_mm, start, end, 0); 557 + else fix_range(vma->vm_mm, start, end); 556 558 } 557 559 EXPORT_SYMBOL(flush_tlb_range); 558 560 ··· 562 564 VMA_ITERATOR(vmi, mm, 0); 563 565 564 566 for_each_vma(vmi, vma) 565 - fix_range(mm, vma->vm_start, vma->vm_end, 0); 566 - } 567 - 568 - void force_flush_all(void) 569 - { 570 - struct mm_struct *mm = current->mm; 571 - struct vm_area_struct *vma; 572 - VMA_ITERATOR(vmi, mm, 0); 573 - 574 - mmap_read_lock(mm); 575 - for_each_vma(vmi, vma) 576 - fix_range(mm, vma->vm_start, vma->vm_end, 1); 577 - mmap_read_unlock(mm); 567 + fix_range(mm, vma->vm_start, vma->vm_end); 578 568 }