Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm/radix: Pick the address layout for radix config

Hash needs special get_unmapped_area() handling because of limitations
around base page size, so we have to set HAVE_ARCH_UNMAPPED_AREA.

With radix we don't have such restrictions, so we could use the generic
code. But because we've set HAVE_ARCH_UNMAPPED_AREA (for hash), we have
to re-implement the same logic as the generic code.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Aneesh Kumar K.V and committed by
Michael Ellerman
7a0eedee 177ba7c6

+109
+109
arch/powerpc/mm/mmap.c
··· 27 27 #include <linux/random.h> 28 28 #include <linux/sched.h> 29 29 #include <linux/elf-randomize.h> 30 + #include <linux/security.h> 31 + #include <linux/mman.h> 30 32 31 33 /* 32 34 * Top of mmap area (just below the process stack). ··· 81 79 return PAGE_ALIGN(TASK_SIZE - gap - rnd); 82 80 } 83 81 82 + #ifdef CONFIG_PPC_RADIX_MMU 83 + /* 84 + * Same function as generic code used only for radix, because we don't need to overload 85 + * the generic one. But we will have to duplicate, because hash select 86 + * HAVE_ARCH_UNMAPPED_AREA 87 + */ 88 + static unsigned long 89 + radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, 90 + unsigned long len, unsigned long pgoff, 91 + unsigned long flags) 92 + { 93 + struct mm_struct *mm = current->mm; 94 + struct vm_area_struct *vma; 95 + struct vm_unmapped_area_info info; 96 + 97 + if (len > TASK_SIZE - mmap_min_addr) 98 + return -ENOMEM; 99 + 100 + if (flags & MAP_FIXED) 101 + return addr; 102 + 103 + if (addr) { 104 + addr = PAGE_ALIGN(addr); 105 + vma = find_vma(mm, addr); 106 + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 107 + (!vma || addr + len <= vma->vm_start)) 108 + return addr; 109 + } 110 + 111 + info.flags = 0; 112 + info.length = len; 113 + info.low_limit = mm->mmap_base; 114 + info.high_limit = TASK_SIZE; 115 + info.align_mask = 0; 116 + return vm_unmapped_area(&info); 117 + } 118 + 119 + static unsigned long 120 + radix__arch_get_unmapped_area_topdown(struct file *filp, 121 + const unsigned long addr0, 122 + const unsigned long len, 123 + const unsigned long pgoff, 124 + const unsigned long flags) 125 + { 126 + struct vm_area_struct *vma; 127 + struct mm_struct *mm = current->mm; 128 + unsigned long addr = addr0; 129 + struct vm_unmapped_area_info info; 130 + 131 + /* requested length too big for entire address space */ 132 + if (len > TASK_SIZE - mmap_min_addr) 133 + return -ENOMEM; 134 + 135 + if (flags & MAP_FIXED) 136 + return addr; 137 + 138 + /* requesting a specific address */ 139 + if (addr) { 140 + addr = PAGE_ALIGN(addr); 141 + vma = find_vma(mm, addr); 142 + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 143 + (!vma || addr + len <= vma->vm_start)) 144 + return addr; 145 + } 146 + 147 + info.flags = VM_UNMAPPED_AREA_TOPDOWN; 148 + info.length = len; 149 + info.low_limit = max(PAGE_SIZE, mmap_min_addr); 150 + info.high_limit = mm->mmap_base; 151 + info.align_mask = 0; 152 + addr = vm_unmapped_area(&info); 153 + 154 + /* 155 + * A failed mmap() very likely causes application failure, 156 + * so fall back to the bottom-up function here. This scenario 157 + * can happen with large stack limits and large mmap() 158 + * allocations. 159 + */ 160 + if (addr & ~PAGE_MASK) { 161 + VM_BUG_ON(addr != -ENOMEM); 162 + info.flags = 0; 163 + info.low_limit = TASK_UNMAPPED_BASE; 164 + info.high_limit = TASK_SIZE; 165 + addr = vm_unmapped_area(&info); 166 + } 167 + 168 + return addr; 169 + } 170 + 171 + static void radix__arch_pick_mmap_layout(struct mm_struct *mm, 172 + unsigned long random_factor) 173 + { 174 + if (mmap_is_legacy()) { 175 + mm->mmap_base = TASK_UNMAPPED_BASE; 176 + mm->get_unmapped_area = radix__arch_get_unmapped_area; 177 + } else { 178 + mm->mmap_base = mmap_base(random_factor); 179 + mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown; 180 + } 181 + } 182 + #else 183 + /* dummy */ 184 + extern void radix__arch_pick_mmap_layout(struct mm_struct *mm, 185 + unsigned long random_factor); 186 + #endif 84 187 /* 85 188 * This function, called very early during the creation of a new 86 189 * process VM image, sets up which VM layout function to use: ··· 197 90 if (current->flags & PF_RANDOMIZE) 198 91 random_factor = arch_mmap_rnd(); 199 92 93 + if (radix_enabled()) 94 + return radix__arch_pick_mmap_layout(mm, random_factor); 200 95 /* 201 96 * Fall back to the standard layout if the personality 202 97 * bit is set, or if the expected stack growth is unlimited: