at v3.4 350 lines 8.3 kB view raw
1#include <linux/mm.h> 2#include <linux/slab.h> 3#include <linux/string.h> 4#include <linux/export.h> 5#include <linux/err.h> 6#include <linux/sched.h> 7#include <asm/uaccess.h> 8 9#include "internal.h" 10 11#define CREATE_TRACE_POINTS 12#include <trace/events/kmem.h> 13 14/** 15 * kstrdup - allocate space for and copy an existing string 16 * @s: the string to duplicate 17 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 18 */ 19char *kstrdup(const char *s, gfp_t gfp) 20{ 21 size_t len; 22 char *buf; 23 24 if (!s) 25 return NULL; 26 27 len = strlen(s) + 1; 28 buf = kmalloc_track_caller(len, gfp); 29 if (buf) 30 memcpy(buf, s, len); 31 return buf; 32} 33EXPORT_SYMBOL(kstrdup); 34 35/** 36 * kstrndup - allocate space for and copy an existing string 37 * @s: the string to duplicate 38 * @max: read at most @max chars from @s 39 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 40 */ 41char *kstrndup(const char *s, size_t max, gfp_t gfp) 42{ 43 size_t len; 44 char *buf; 45 46 if (!s) 47 return NULL; 48 49 len = strnlen(s, max); 50 buf = kmalloc_track_caller(len+1, gfp); 51 if (buf) { 52 memcpy(buf, s, len); 53 buf[len] = '\0'; 54 } 55 return buf; 56} 57EXPORT_SYMBOL(kstrndup); 58 59/** 60 * kmemdup - duplicate region of memory 61 * 62 * @src: memory region to duplicate 63 * @len: memory region length 64 * @gfp: GFP mask to use 65 */ 66void *kmemdup(const void *src, size_t len, gfp_t gfp) 67{ 68 void *p; 69 70 p = kmalloc_track_caller(len, gfp); 71 if (p) 72 memcpy(p, src, len); 73 return p; 74} 75EXPORT_SYMBOL(kmemdup); 76 77/** 78 * memdup_user - duplicate memory region from user space 79 * 80 * @src: source address in user space 81 * @len: number of bytes to copy 82 * 83 * Returns an ERR_PTR() on failure. 84 */ 85void *memdup_user(const void __user *src, size_t len) 86{ 87 void *p; 88 89 /* 90 * Always use GFP_KERNEL, since copy_from_user() can sleep and 91 * cause pagefault, which makes it pointless to use GFP_NOFS 92 * or GFP_ATOMIC. 93 */ 94 p = kmalloc_track_caller(len, GFP_KERNEL); 95 if (!p) 96 return ERR_PTR(-ENOMEM); 97 98 if (copy_from_user(p, src, len)) { 99 kfree(p); 100 return ERR_PTR(-EFAULT); 101 } 102 103 return p; 104} 105EXPORT_SYMBOL(memdup_user); 106 107/** 108 * __krealloc - like krealloc() but don't free @p. 109 * @p: object to reallocate memory for. 110 * @new_size: how many bytes of memory are required. 111 * @flags: the type of memory to allocate. 112 * 113 * This function is like krealloc() except it never frees the originally 114 * allocated buffer. Use this if you don't want to free the buffer immediately 115 * like, for example, with RCU. 116 */ 117void *__krealloc(const void *p, size_t new_size, gfp_t flags) 118{ 119 void *ret; 120 size_t ks = 0; 121 122 if (unlikely(!new_size)) 123 return ZERO_SIZE_PTR; 124 125 if (p) 126 ks = ksize(p); 127 128 if (ks >= new_size) 129 return (void *)p; 130 131 ret = kmalloc_track_caller(new_size, flags); 132 if (ret && p) 133 memcpy(ret, p, ks); 134 135 return ret; 136} 137EXPORT_SYMBOL(__krealloc); 138 139/** 140 * krealloc - reallocate memory. The contents will remain unchanged. 141 * @p: object to reallocate memory for. 142 * @new_size: how many bytes of memory are required. 143 * @flags: the type of memory to allocate. 144 * 145 * The contents of the object pointed to are preserved up to the 146 * lesser of the new and old sizes. If @p is %NULL, krealloc() 147 * behaves exactly like kmalloc(). If @size is 0 and @p is not a 148 * %NULL pointer, the object pointed to is freed. 149 */ 150void *krealloc(const void *p, size_t new_size, gfp_t flags) 151{ 152 void *ret; 153 154 if (unlikely(!new_size)) { 155 kfree(p); 156 return ZERO_SIZE_PTR; 157 } 158 159 ret = __krealloc(p, new_size, flags); 160 if (ret && p != ret) 161 kfree(p); 162 163 return ret; 164} 165EXPORT_SYMBOL(krealloc); 166 167/** 168 * kzfree - like kfree but zero memory 169 * @p: object to free memory of 170 * 171 * The memory of the object @p points to is zeroed before freed. 172 * If @p is %NULL, kzfree() does nothing. 173 * 174 * Note: this function zeroes the whole allocated buffer which can be a good 175 * deal bigger than the requested buffer size passed to kmalloc(). So be 176 * careful when using this function in performance sensitive code. 177 */ 178void kzfree(const void *p) 179{ 180 size_t ks; 181 void *mem = (void *)p; 182 183 if (unlikely(ZERO_OR_NULL_PTR(mem))) 184 return; 185 ks = ksize(mem); 186 memset(mem, 0, ks); 187 kfree(mem); 188} 189EXPORT_SYMBOL(kzfree); 190 191/* 192 * strndup_user - duplicate an existing string from user space 193 * @s: The string to duplicate 194 * @n: Maximum number of bytes to copy, including the trailing NUL. 195 */ 196char *strndup_user(const char __user *s, long n) 197{ 198 char *p; 199 long length; 200 201 length = strnlen_user(s, n); 202 203 if (!length) 204 return ERR_PTR(-EFAULT); 205 206 if (length > n) 207 return ERR_PTR(-EINVAL); 208 209 p = memdup_user(s, length); 210 211 if (IS_ERR(p)) 212 return p; 213 214 p[length - 1] = '\0'; 215 216 return p; 217} 218EXPORT_SYMBOL(strndup_user); 219 220void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 221 struct vm_area_struct *prev, struct rb_node *rb_parent) 222{ 223 struct vm_area_struct *next; 224 225 vma->vm_prev = prev; 226 if (prev) { 227 next = prev->vm_next; 228 prev->vm_next = vma; 229 } else { 230 mm->mmap = vma; 231 if (rb_parent) 232 next = rb_entry(rb_parent, 233 struct vm_area_struct, vm_rb); 234 else 235 next = NULL; 236 } 237 vma->vm_next = next; 238 if (next) 239 next->vm_prev = vma; 240} 241 242/* Check if the vma is being used as a stack by this task */ 243static int vm_is_stack_for_task(struct task_struct *t, 244 struct vm_area_struct *vma) 245{ 246 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 247} 248 249/* 250 * Check if the vma is being used as a stack. 251 * If is_group is non-zero, check in the entire thread group or else 252 * just check in the current task. Returns the pid of the task that 253 * the vma is stack for. 254 */ 255pid_t vm_is_stack(struct task_struct *task, 256 struct vm_area_struct *vma, int in_group) 257{ 258 pid_t ret = 0; 259 260 if (vm_is_stack_for_task(task, vma)) 261 return task->pid; 262 263 if (in_group) { 264 struct task_struct *t; 265 rcu_read_lock(); 266 if (!pid_alive(task)) 267 goto done; 268 269 t = task; 270 do { 271 if (vm_is_stack_for_task(t, vma)) { 272 ret = t->pid; 273 goto done; 274 } 275 } while_each_thread(task, t); 276done: 277 rcu_read_unlock(); 278 } 279 280 return ret; 281} 282 283#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 284void arch_pick_mmap_layout(struct mm_struct *mm) 285{ 286 mm->mmap_base = TASK_UNMAPPED_BASE; 287 mm->get_unmapped_area = arch_get_unmapped_area; 288 mm->unmap_area = arch_unmap_area; 289} 290#endif 291 292/* 293 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall 294 * back to the regular GUP. 295 * If the architecture not support this function, simply return with no 296 * page pinned 297 */ 298int __attribute__((weak)) __get_user_pages_fast(unsigned long start, 299 int nr_pages, int write, struct page **pages) 300{ 301 return 0; 302} 303EXPORT_SYMBOL_GPL(__get_user_pages_fast); 304 305/** 306 * get_user_pages_fast() - pin user pages in memory 307 * @start: starting user address 308 * @nr_pages: number of pages from start to pin 309 * @write: whether pages will be written to 310 * @pages: array that receives pointers to the pages pinned. 311 * Should be at least nr_pages long. 312 * 313 * Returns number of pages pinned. This may be fewer than the number 314 * requested. If nr_pages is 0 or negative, returns 0. If no pages 315 * were pinned, returns -errno. 316 * 317 * get_user_pages_fast provides equivalent functionality to get_user_pages, 318 * operating on current and current->mm, with force=0 and vma=NULL. However 319 * unlike get_user_pages, it must be called without mmap_sem held. 320 * 321 * get_user_pages_fast may take mmap_sem and page table locks, so no 322 * assumptions can be made about lack of locking. get_user_pages_fast is to be 323 * implemented in a way that is advantageous (vs get_user_pages()) when the 324 * user memory area is already faulted in and present in ptes. However if the 325 * pages have to be faulted in, it may turn out to be slightly slower so 326 * callers need to carefully consider what to use. On many architectures, 327 * get_user_pages_fast simply falls back to get_user_pages. 328 */ 329int __attribute__((weak)) get_user_pages_fast(unsigned long start, 330 int nr_pages, int write, struct page **pages) 331{ 332 struct mm_struct *mm = current->mm; 333 int ret; 334 335 down_read(&mm->mmap_sem); 336 ret = get_user_pages(current, mm, start, nr_pages, 337 write, 0, pages, NULL); 338 up_read(&mm->mmap_sem); 339 340 return ret; 341} 342EXPORT_SYMBOL_GPL(get_user_pages_fast); 343 344/* Tracepoints definitions. */ 345EXPORT_TRACEPOINT_SYMBOL(kmalloc); 346EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 347EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); 348EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); 349EXPORT_TRACEPOINT_SYMBOL(kfree); 350EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);