at v4.7-rc6 886 lines 23 kB view raw
1/* mm/ashmem.c 2 * 3 * Anonymous Shared Memory Subsystem, ashmem 4 * 5 * Copyright (C) 2008 Google, Inc. 6 * 7 * Robert Love <rlove@google.com> 8 * 9 * This software is licensed under the terms of the GNU General Public 10 * License version 2, as published by the Free Software Foundation, and 11 * may be copied, distributed, and modified under those terms. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19#define pr_fmt(fmt) "ashmem: " fmt 20 21#include <linux/init.h> 22#include <linux/export.h> 23#include <linux/file.h> 24#include <linux/fs.h> 25#include <linux/falloc.h> 26#include <linux/miscdevice.h> 27#include <linux/security.h> 28#include <linux/mm.h> 29#include <linux/mman.h> 30#include <linux/uaccess.h> 31#include <linux/personality.h> 32#include <linux/bitops.h> 33#include <linux/mutex.h> 34#include <linux/shmem_fs.h> 35#include "ashmem.h" 36 37#define ASHMEM_NAME_PREFIX "dev/ashmem/" 38#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) 39#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) 40 41/** 42 * struct ashmem_area - The anonymous shared memory area 43 * @name: The optional name in /proc/pid/maps 44 * @unpinned_list: The list of all ashmem areas 45 * @file: The shmem-based backing file 46 * @size: The size of the mapping, in bytes 47 * @prot_mask: The allowed protection bits, as vm_flags 48 * 49 * The lifecycle of this structure is from our parent file's open() until 50 * its release(). It is also protected by 'ashmem_mutex' 51 * 52 * Warning: Mappings do NOT pin this structure; It dies on close() 53 */ 54struct ashmem_area { 55 char name[ASHMEM_FULL_NAME_LEN]; 56 struct list_head unpinned_list; 57 struct file *file; 58 size_t size; 59 unsigned long prot_mask; 60}; 61 62/** 63 * struct ashmem_range - A range of unpinned/evictable pages 64 * @lru: The entry in the LRU list 65 * @unpinned: The entry in its area's unpinned list 66 * @asma: The associated anonymous shared memory area. 67 * @pgstart: The starting page (inclusive) 68 * @pgend: The ending page (inclusive) 69 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) 70 * 71 * The lifecycle of this structure is from unpin to pin. 72 * It is protected by 'ashmem_mutex' 73 */ 74struct ashmem_range { 75 struct list_head lru; 76 struct list_head unpinned; 77 struct ashmem_area *asma; 78 size_t pgstart; 79 size_t pgend; 80 unsigned int purged; 81}; 82 83/* LRU list of unpinned pages, protected by ashmem_mutex */ 84static LIST_HEAD(ashmem_lru_list); 85 86/* 87 * long lru_count - The count of pages on our LRU list. 88 * 89 * This is protected by ashmem_mutex. 90 */ 91static unsigned long lru_count; 92 93/* 94 * ashmem_mutex - protects the list of and each individual ashmem_area 95 * 96 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem 97 */ 98static DEFINE_MUTEX(ashmem_mutex); 99 100static struct kmem_cache *ashmem_area_cachep __read_mostly; 101static struct kmem_cache *ashmem_range_cachep __read_mostly; 102 103#define range_size(range) \ 104 ((range)->pgend - (range)->pgstart + 1) 105 106#define range_on_lru(range) \ 107 ((range)->purged == ASHMEM_NOT_PURGED) 108 109static inline int page_range_subsumes_range(struct ashmem_range *range, 110 size_t start, size_t end) 111{ 112 return (((range)->pgstart >= (start)) && ((range)->pgend <= (end))); 113} 114 115static inline int page_range_subsumed_by_range(struct ashmem_range *range, 116 size_t start, size_t end) 117{ 118 return (((range)->pgstart <= (start)) && ((range)->pgend >= (end))); 119} 120 121static inline int page_in_range(struct ashmem_range *range, size_t page) 122{ 123 return (((range)->pgstart <= (page)) && ((range)->pgend >= (page))); 124} 125 126static inline int page_range_in_range(struct ashmem_range *range, 127 size_t start, size_t end) 128{ 129 return (page_in_range(range, start) || page_in_range(range, end) || 130 page_range_subsumes_range(range, start, end)); 131} 132 133static inline int range_before_page(struct ashmem_range *range, size_t page) 134{ 135 return ((range)->pgend < (page)); 136} 137 138#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 139 140/** 141 * lru_add() - Adds a range of memory to the LRU list 142 * @range: The memory range being added. 143 * 144 * The range is first added to the end (tail) of the LRU list. 145 * After this, the size of the range is added to @lru_count 146 */ 147static inline void lru_add(struct ashmem_range *range) 148{ 149 list_add_tail(&range->lru, &ashmem_lru_list); 150 lru_count += range_size(range); 151} 152 153/** 154 * lru_del() - Removes a range of memory from the LRU list 155 * @range: The memory range being removed 156 * 157 * The range is first deleted from the LRU list. 158 * After this, the size of the range is removed from @lru_count 159 */ 160static inline void lru_del(struct ashmem_range *range) 161{ 162 list_del(&range->lru); 163 lru_count -= range_size(range); 164} 165 166/** 167 * range_alloc() - Allocates and initializes a new ashmem_range structure 168 * @asma: The associated ashmem_area 169 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list 170 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) 171 * @start: The starting page (inclusive) 172 * @end: The ending page (inclusive) 173 * 174 * This function is protected by ashmem_mutex. 175 * 176 * Return: 0 if successful, or -ENOMEM if there is an error 177 */ 178static int range_alloc(struct ashmem_area *asma, 179 struct ashmem_range *prev_range, unsigned int purged, 180 size_t start, size_t end) 181{ 182 struct ashmem_range *range; 183 184 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); 185 if (unlikely(!range)) 186 return -ENOMEM; 187 188 range->asma = asma; 189 range->pgstart = start; 190 range->pgend = end; 191 range->purged = purged; 192 193 list_add_tail(&range->unpinned, &prev_range->unpinned); 194 195 if (range_on_lru(range)) 196 lru_add(range); 197 198 return 0; 199} 200 201/** 202 * range_del() - Deletes and dealloctes an ashmem_range structure 203 * @range: The associated ashmem_range that has previously been allocated 204 */ 205static void range_del(struct ashmem_range *range) 206{ 207 list_del(&range->unpinned); 208 if (range_on_lru(range)) 209 lru_del(range); 210 kmem_cache_free(ashmem_range_cachep, range); 211} 212 213/** 214 * range_shrink() - Shrinks an ashmem_range 215 * @range: The associated ashmem_range being shrunk 216 * @start: The starting byte of the new range 217 * @end: The ending byte of the new range 218 * 219 * This does not modify the data inside the existing range in any way - It 220 * simply shrinks the boundaries of the range. 221 * 222 * Theoretically, with a little tweaking, this could eventually be changed 223 * to range_resize, and expand the lru_count if the new range is larger. 224 */ 225static inline void range_shrink(struct ashmem_range *range, 226 size_t start, size_t end) 227{ 228 size_t pre = range_size(range); 229 230 range->pgstart = start; 231 range->pgend = end; 232 233 if (range_on_lru(range)) 234 lru_count -= pre - range_size(range); 235} 236 237/** 238 * ashmem_open() - Opens an Anonymous Shared Memory structure 239 * @inode: The backing file's index node(?) 240 * @file: The backing file 241 * 242 * Please note that the ashmem_area is not returned by this function - It is 243 * instead written to "file->private_data". 244 * 245 * Return: 0 if successful, or another code if unsuccessful. 246 */ 247static int ashmem_open(struct inode *inode, struct file *file) 248{ 249 struct ashmem_area *asma; 250 int ret; 251 252 ret = generic_file_open(inode, file); 253 if (unlikely(ret)) 254 return ret; 255 256 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); 257 if (unlikely(!asma)) 258 return -ENOMEM; 259 260 INIT_LIST_HEAD(&asma->unpinned_list); 261 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); 262 asma->prot_mask = PROT_MASK; 263 file->private_data = asma; 264 265 return 0; 266} 267 268/** 269 * ashmem_release() - Releases an Anonymous Shared Memory structure 270 * @ignored: The backing file's Index Node(?) - It is ignored here. 271 * @file: The backing file 272 * 273 * Return: 0 if successful. If it is anything else, go have a coffee and 274 * try again. 275 */ 276static int ashmem_release(struct inode *ignored, struct file *file) 277{ 278 struct ashmem_area *asma = file->private_data; 279 struct ashmem_range *range, *next; 280 281 mutex_lock(&ashmem_mutex); 282 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) 283 range_del(range); 284 mutex_unlock(&ashmem_mutex); 285 286 if (asma->file) 287 fput(asma->file); 288 kmem_cache_free(ashmem_area_cachep, asma); 289 290 return 0; 291} 292 293/** 294 * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file 295 * @file: The associated backing file. 296 * @buf: The buffer of data being written to 297 * @len: The number of bytes being read 298 * @pos: The position of the first byte to read. 299 * 300 * Return: 0 if successful, or another return code if not. 301 */ 302static ssize_t ashmem_read(struct file *file, char __user *buf, 303 size_t len, loff_t *pos) 304{ 305 struct ashmem_area *asma = file->private_data; 306 int ret = 0; 307 308 mutex_lock(&ashmem_mutex); 309 310 /* If size is not set, or set to 0, always return EOF. */ 311 if (asma->size == 0) 312 goto out_unlock; 313 314 if (!asma->file) { 315 ret = -EBADF; 316 goto out_unlock; 317 } 318 319 mutex_unlock(&ashmem_mutex); 320 321 /* 322 * asma and asma->file are used outside the lock here. We assume 323 * once asma->file is set it will never be changed, and will not 324 * be destroyed until all references to the file are dropped and 325 * ashmem_release is called. 326 */ 327 ret = __vfs_read(asma->file, buf, len, pos); 328 if (ret >= 0) 329 /** Update backing file pos, since f_ops->read() doesn't */ 330 asma->file->f_pos = *pos; 331 return ret; 332 333out_unlock: 334 mutex_unlock(&ashmem_mutex); 335 return ret; 336} 337 338static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) 339{ 340 struct ashmem_area *asma = file->private_data; 341 int ret; 342 343 mutex_lock(&ashmem_mutex); 344 345 if (asma->size == 0) { 346 ret = -EINVAL; 347 goto out; 348 } 349 350 if (!asma->file) { 351 ret = -EBADF; 352 goto out; 353 } 354 355 ret = vfs_llseek(asma->file, offset, origin); 356 if (ret < 0) 357 goto out; 358 359 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 360 file->f_pos = asma->file->f_pos; 361 362out: 363 mutex_unlock(&ashmem_mutex); 364 return ret; 365} 366 367static inline vm_flags_t calc_vm_may_flags(unsigned long prot) 368{ 369 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | 370 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | 371 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); 372} 373 374static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) 375{ 376 struct ashmem_area *asma = file->private_data; 377 int ret = 0; 378 379 mutex_lock(&ashmem_mutex); 380 381 /* user needs to SET_SIZE before mapping */ 382 if (unlikely(!asma->size)) { 383 ret = -EINVAL; 384 goto out; 385 } 386 387 /* requested protection bits must match our allowed protection mask */ 388 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & 389 calc_vm_prot_bits(PROT_MASK, 0))) { 390 ret = -EPERM; 391 goto out; 392 } 393 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); 394 395 if (!asma->file) { 396 char *name = ASHMEM_NAME_DEF; 397 struct file *vmfile; 398 399 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 400 name = asma->name; 401 402 /* ... and allocate the backing shmem file */ 403 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); 404 if (IS_ERR(vmfile)) { 405 ret = PTR_ERR(vmfile); 406 goto out; 407 } 408 asma->file = vmfile; 409 } 410 get_file(asma->file); 411 412 /* 413 * XXX - Reworked to use shmem_zero_setup() instead of 414 * shmem_set_file while we're in staging. -jstultz 415 */ 416 if (vma->vm_flags & VM_SHARED) { 417 ret = shmem_zero_setup(vma); 418 if (ret) { 419 fput(asma->file); 420 goto out; 421 } 422 } 423 424 if (vma->vm_file) 425 fput(vma->vm_file); 426 vma->vm_file = asma->file; 427 428out: 429 mutex_unlock(&ashmem_mutex); 430 return ret; 431} 432 433/* 434 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c 435 * 436 * 'nr_to_scan' is the number of objects to scan for freeing. 437 * 438 * 'gfp_mask' is the mask of the allocation that got us into this mess. 439 * 440 * Return value is the number of objects freed or -1 if we cannot 441 * proceed without risk of deadlock (due to gfp_mask). 442 * 443 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial 444 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' 445 * pages freed. 446 */ 447static unsigned long 448ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 449{ 450 struct ashmem_range *range, *next; 451 unsigned long freed = 0; 452 453 /* We might recurse into filesystem code, so bail out if necessary */ 454 if (!(sc->gfp_mask & __GFP_FS)) 455 return SHRINK_STOP; 456 457 if (!mutex_trylock(&ashmem_mutex)) 458 return -1; 459 460 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { 461 loff_t start = range->pgstart * PAGE_SIZE; 462 loff_t end = (range->pgend + 1) * PAGE_SIZE; 463 464 vfs_fallocate(range->asma->file, 465 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 466 start, end - start); 467 range->purged = ASHMEM_WAS_PURGED; 468 lru_del(range); 469 470 freed += range_size(range); 471 if (--sc->nr_to_scan <= 0) 472 break; 473 } 474 mutex_unlock(&ashmem_mutex); 475 return freed; 476} 477 478static unsigned long 479ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 480{ 481 /* 482 * note that lru_count is count of pages on the lru, not a count of 483 * objects on the list. This means the scan function needs to return the 484 * number of pages freed, not the number of objects scanned. 485 */ 486 return lru_count; 487} 488 489static struct shrinker ashmem_shrinker = { 490 .count_objects = ashmem_shrink_count, 491 .scan_objects = ashmem_shrink_scan, 492 /* 493 * XXX (dchinner): I wish people would comment on why they need on 494 * significant changes to the default value here 495 */ 496 .seeks = DEFAULT_SEEKS * 4, 497}; 498 499static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) 500{ 501 int ret = 0; 502 503 mutex_lock(&ashmem_mutex); 504 505 /* the user can only remove, not add, protection bits */ 506 if (unlikely((asma->prot_mask & prot) != prot)) { 507 ret = -EINVAL; 508 goto out; 509 } 510 511 /* does the application expect PROT_READ to imply PROT_EXEC? */ 512 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 513 prot |= PROT_EXEC; 514 515 asma->prot_mask = prot; 516 517out: 518 mutex_unlock(&ashmem_mutex); 519 return ret; 520} 521 522static int set_name(struct ashmem_area *asma, void __user *name) 523{ 524 int len; 525 int ret = 0; 526 char local_name[ASHMEM_NAME_LEN]; 527 528 /* 529 * Holding the ashmem_mutex while doing a copy_from_user might cause 530 * an data abort which would try to access mmap_sem. If another 531 * thread has invoked ashmem_mmap then it will be holding the 532 * semaphore and will be waiting for ashmem_mutex, there by leading to 533 * deadlock. We'll release the mutex and take the name to a local 534 * variable that does not need protection and later copy the local 535 * variable to the structure member with lock held. 536 */ 537 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); 538 if (len < 0) 539 return len; 540 if (len == ASHMEM_NAME_LEN) 541 local_name[ASHMEM_NAME_LEN - 1] = '\0'; 542 mutex_lock(&ashmem_mutex); 543 /* cannot change an existing mapping's name */ 544 if (unlikely(asma->file)) 545 ret = -EINVAL; 546 else 547 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); 548 549 mutex_unlock(&ashmem_mutex); 550 return ret; 551} 552 553static int get_name(struct ashmem_area *asma, void __user *name) 554{ 555 int ret = 0; 556 size_t len; 557 /* 558 * Have a local variable to which we'll copy the content 559 * from asma with the lock held. Later we can copy this to the user 560 * space safely without holding any locks. So even if we proceed to 561 * wait for mmap_sem, it won't lead to deadlock. 562 */ 563 char local_name[ASHMEM_NAME_LEN]; 564 565 mutex_lock(&ashmem_mutex); 566 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { 567 /* 568 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes 569 * prevents us from revealing one user's stack to another. 570 */ 571 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; 572 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); 573 } else { 574 len = sizeof(ASHMEM_NAME_DEF); 575 memcpy(local_name, ASHMEM_NAME_DEF, len); 576 } 577 mutex_unlock(&ashmem_mutex); 578 579 /* 580 * Now we are just copying from the stack variable to userland 581 * No lock held 582 */ 583 if (unlikely(copy_to_user(name, local_name, len))) 584 ret = -EFAULT; 585 return ret; 586} 587 588/* 589 * ashmem_pin - pin the given ashmem region, returning whether it was 590 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). 591 * 592 * Caller must hold ashmem_mutex. 593 */ 594static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 595{ 596 struct ashmem_range *range, *next; 597 int ret = ASHMEM_NOT_PURGED; 598 599 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 600 /* moved past last applicable page; we can short circuit */ 601 if (range_before_page(range, pgstart)) 602 break; 603 604 /* 605 * The user can ask us to pin pages that span multiple ranges, 606 * or to pin pages that aren't even unpinned, so this is messy. 607 * 608 * Four cases: 609 * 1. The requested range subsumes an existing range, so we 610 * just remove the entire matching range. 611 * 2. The requested range overlaps the start of an existing 612 * range, so we just update that range. 613 * 3. The requested range overlaps the end of an existing 614 * range, so we just update that range. 615 * 4. The requested range punches a hole in an existing range, 616 * so we have to update one side of the range and then 617 * create a new range for the other side. 618 */ 619 if (page_range_in_range(range, pgstart, pgend)) { 620 ret |= range->purged; 621 622 /* Case #1: Easy. Just nuke the whole thing. */ 623 if (page_range_subsumes_range(range, pgstart, pgend)) { 624 range_del(range); 625 continue; 626 } 627 628 /* Case #2: We overlap from the start, so adjust it */ 629 if (range->pgstart >= pgstart) { 630 range_shrink(range, pgend + 1, range->pgend); 631 continue; 632 } 633 634 /* Case #3: We overlap from the rear, so adjust it */ 635 if (range->pgend <= pgend) { 636 range_shrink(range, range->pgstart, 637 pgstart - 1); 638 continue; 639 } 640 641 /* 642 * Case #4: We eat a chunk out of the middle. A bit 643 * more complicated, we allocate a new range for the 644 * second half and adjust the first chunk's endpoint. 645 */ 646 range_alloc(asma, range, range->purged, 647 pgend + 1, range->pgend); 648 range_shrink(range, range->pgstart, pgstart - 1); 649 break; 650 } 651 } 652 653 return ret; 654} 655 656/* 657 * ashmem_unpin - unpin the given range of pages. Returns zero on success. 658 * 659 * Caller must hold ashmem_mutex. 660 */ 661static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 662{ 663 struct ashmem_range *range, *next; 664 unsigned int purged = ASHMEM_NOT_PURGED; 665 666restart: 667 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 668 /* short circuit: this is our insertion point */ 669 if (range_before_page(range, pgstart)) 670 break; 671 672 /* 673 * The user can ask us to unpin pages that are already entirely 674 * or partially pinned. We handle those two cases here. 675 */ 676 if (page_range_subsumed_by_range(range, pgstart, pgend)) 677 return 0; 678 if (page_range_in_range(range, pgstart, pgend)) { 679 pgstart = min(range->pgstart, pgstart); 680 pgend = max(range->pgend, pgend); 681 purged |= range->purged; 682 range_del(range); 683 goto restart; 684 } 685 } 686 687 return range_alloc(asma, range, purged, pgstart, pgend); 688} 689 690/* 691 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the 692 * given interval are unpinned and ASHMEM_IS_PINNED otherwise. 693 * 694 * Caller must hold ashmem_mutex. 695 */ 696static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, 697 size_t pgend) 698{ 699 struct ashmem_range *range; 700 int ret = ASHMEM_IS_PINNED; 701 702 list_for_each_entry(range, &asma->unpinned_list, unpinned) { 703 if (range_before_page(range, pgstart)) 704 break; 705 if (page_range_in_range(range, pgstart, pgend)) { 706 ret = ASHMEM_IS_UNPINNED; 707 break; 708 } 709 } 710 711 return ret; 712} 713 714static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, 715 void __user *p) 716{ 717 struct ashmem_pin pin; 718 size_t pgstart, pgend; 719 int ret = -EINVAL; 720 721 if (unlikely(!asma->file)) 722 return -EINVAL; 723 724 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) 725 return -EFAULT; 726 727 /* per custom, you can pass zero for len to mean "everything onward" */ 728 if (!pin.len) 729 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 730 731 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 732 return -EINVAL; 733 734 if (unlikely(((__u32)-1) - pin.offset < pin.len)) 735 return -EINVAL; 736 737 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 738 return -EINVAL; 739 740 pgstart = pin.offset / PAGE_SIZE; 741 pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 742 743 mutex_lock(&ashmem_mutex); 744 745 switch (cmd) { 746 case ASHMEM_PIN: 747 ret = ashmem_pin(asma, pgstart, pgend); 748 break; 749 case ASHMEM_UNPIN: 750 ret = ashmem_unpin(asma, pgstart, pgend); 751 break; 752 case ASHMEM_GET_PIN_STATUS: 753 ret = ashmem_get_pin_status(asma, pgstart, pgend); 754 break; 755 } 756 757 mutex_unlock(&ashmem_mutex); 758 759 return ret; 760} 761 762static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 763{ 764 struct ashmem_area *asma = file->private_data; 765 long ret = -ENOTTY; 766 767 switch (cmd) { 768 case ASHMEM_SET_NAME: 769 ret = set_name(asma, (void __user *)arg); 770 break; 771 case ASHMEM_GET_NAME: 772 ret = get_name(asma, (void __user *)arg); 773 break; 774 case ASHMEM_SET_SIZE: 775 ret = -EINVAL; 776 if (!asma->file) { 777 ret = 0; 778 asma->size = (size_t)arg; 779 } 780 break; 781 case ASHMEM_GET_SIZE: 782 ret = asma->size; 783 break; 784 case ASHMEM_SET_PROT_MASK: 785 ret = set_prot_mask(asma, arg); 786 break; 787 case ASHMEM_GET_PROT_MASK: 788 ret = asma->prot_mask; 789 break; 790 case ASHMEM_PIN: 791 case ASHMEM_UNPIN: 792 case ASHMEM_GET_PIN_STATUS: 793 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); 794 break; 795 case ASHMEM_PURGE_ALL_CACHES: 796 ret = -EPERM; 797 if (capable(CAP_SYS_ADMIN)) { 798 struct shrink_control sc = { 799 .gfp_mask = GFP_KERNEL, 800 .nr_to_scan = LONG_MAX, 801 }; 802 ret = ashmem_shrink_count(&ashmem_shrinker, &sc); 803 ashmem_shrink_scan(&ashmem_shrinker, &sc); 804 } 805 break; 806 } 807 808 return ret; 809} 810 811/* support of 32bit userspace on 64bit platforms */ 812#ifdef CONFIG_COMPAT 813static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, 814 unsigned long arg) 815{ 816 switch (cmd) { 817 case COMPAT_ASHMEM_SET_SIZE: 818 cmd = ASHMEM_SET_SIZE; 819 break; 820 case COMPAT_ASHMEM_SET_PROT_MASK: 821 cmd = ASHMEM_SET_PROT_MASK; 822 break; 823 } 824 return ashmem_ioctl(file, cmd, arg); 825} 826#endif 827 828static const struct file_operations ashmem_fops = { 829 .owner = THIS_MODULE, 830 .open = ashmem_open, 831 .release = ashmem_release, 832 .read = ashmem_read, 833 .llseek = ashmem_llseek, 834 .mmap = ashmem_mmap, 835 .unlocked_ioctl = ashmem_ioctl, 836#ifdef CONFIG_COMPAT 837 .compat_ioctl = compat_ashmem_ioctl, 838#endif 839}; 840 841static struct miscdevice ashmem_misc = { 842 .minor = MISC_DYNAMIC_MINOR, 843 .name = "ashmem", 844 .fops = &ashmem_fops, 845}; 846 847static int __init ashmem_init(void) 848{ 849 int ret = -ENOMEM; 850 851 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", 852 sizeof(struct ashmem_area), 853 0, 0, NULL); 854 if (unlikely(!ashmem_area_cachep)) { 855 pr_err("failed to create slab cache\n"); 856 goto out; 857 } 858 859 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", 860 sizeof(struct ashmem_range), 861 0, 0, NULL); 862 if (unlikely(!ashmem_range_cachep)) { 863 pr_err("failed to create slab cache\n"); 864 goto out_free1; 865 } 866 867 ret = misc_register(&ashmem_misc); 868 if (unlikely(ret)) { 869 pr_err("failed to register misc device!\n"); 870 goto out_free2; 871 } 872 873 register_shrinker(&ashmem_shrinker); 874 875 pr_info("initialized\n"); 876 877 return 0; 878 879out_free2: 880 kmem_cache_destroy(ashmem_range_cachep); 881out_free1: 882 kmem_cache_destroy(ashmem_area_cachep); 883out: 884 return ret; 885} 886device_initcall(ashmem_init);