at v3.18 3.1 kB view raw
1/* 2 * Copyright (C) 2014 Davidlohr Bueso. 3 */ 4#include <linux/sched.h> 5#include <linux/mm.h> 6#include <linux/vmacache.h> 7 8/* 9 * Flush vma caches for threads that share a given mm. 10 * 11 * The operation is safe because the caller holds the mmap_sem 12 * exclusively and other threads accessing the vma cache will 13 * have mmap_sem held at least for read, so no extra locking 14 * is required to maintain the vma cache. 15 */ 16void vmacache_flush_all(struct mm_struct *mm) 17{ 18 struct task_struct *g, *p; 19 20 /* 21 * Single threaded tasks need not iterate the entire 22 * list of process. We can avoid the flushing as well 23 * since the mm's seqnum was increased and don't have 24 * to worry about other threads' seqnum. Current's 25 * flush will occur upon the next lookup. 26 */ 27 if (atomic_read(&mm->mm_users) == 1) 28 return; 29 30 rcu_read_lock(); 31 for_each_process_thread(g, p) { 32 /* 33 * Only flush the vmacache pointers as the 34 * mm seqnum is already set and curr's will 35 * be set upon invalidation when the next 36 * lookup is done. 37 */ 38 if (mm == p->mm) 39 vmacache_flush(p); 40 } 41 rcu_read_unlock(); 42} 43 44/* 45 * This task may be accessing a foreign mm via (for example) 46 * get_user_pages()->find_vma(). The vmacache is task-local and this 47 * task's vmacache pertains to a different mm (ie, its own). There is 48 * nothing we can do here. 49 * 50 * Also handle the case where a kernel thread has adopted this mm via use_mm(). 51 * That kernel thread's vmacache is not applicable to this mm. 52 */ 53static bool vmacache_valid_mm(struct mm_struct *mm) 54{ 55 return current->mm == mm && !(current->flags & PF_KTHREAD); 56} 57 58void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) 59{ 60 if (vmacache_valid_mm(newvma->vm_mm)) 61 current->vmacache[VMACACHE_HASH(addr)] = newvma; 62} 63 64static bool vmacache_valid(struct mm_struct *mm) 65{ 66 struct task_struct *curr; 67 68 if (!vmacache_valid_mm(mm)) 69 return false; 70 71 curr = current; 72 if (mm->vmacache_seqnum != curr->vmacache_seqnum) { 73 /* 74 * First attempt will always be invalid, initialize 75 * the new cache for this task here. 76 */ 77 curr->vmacache_seqnum = mm->vmacache_seqnum; 78 vmacache_flush(curr); 79 return false; 80 } 81 return true; 82} 83 84struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) 85{ 86 int i; 87 88 if (!vmacache_valid(mm)) 89 return NULL; 90 91 count_vm_vmacache_event(VMACACHE_FIND_CALLS); 92 93 for (i = 0; i < VMACACHE_SIZE; i++) { 94 struct vm_area_struct *vma = current->vmacache[i]; 95 96 if (!vma) 97 continue; 98 if (WARN_ON_ONCE(vma->vm_mm != mm)) 99 break; 100 if (vma->vm_start <= addr && vma->vm_end > addr) { 101 count_vm_vmacache_event(VMACACHE_FIND_HITS); 102 return vma; 103 } 104 } 105 106 return NULL; 107} 108 109#ifndef CONFIG_MMU 110struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, 111 unsigned long start, 112 unsigned long end) 113{ 114 int i; 115 116 if (!vmacache_valid(mm)) 117 return NULL; 118 119 count_vm_vmacache_event(VMACACHE_FIND_CALLS); 120 121 for (i = 0; i < VMACACHE_SIZE; i++) { 122 struct vm_area_struct *vma = current->vmacache[i]; 123 124 if (vma && vma->vm_start == start && vma->vm_end == end) { 125 count_vm_vmacache_event(VMACACHE_FIND_HITS); 126 return vma; 127 } 128 } 129 130 return NULL; 131} 132#endif