at v4.13-rc4 891 lines 23 kB view raw
1/* mm/ashmem.c 2 * 3 * Anonymous Shared Memory Subsystem, ashmem 4 * 5 * Copyright (C) 2008 Google, Inc. 6 * 7 * Robert Love <rlove@google.com> 8 * 9 * This software is licensed under the terms of the GNU General Public 10 * License version 2, as published by the Free Software Foundation, and 11 * may be copied, distributed, and modified under those terms. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19#define pr_fmt(fmt) "ashmem: " fmt 20 21#include <linux/init.h> 22#include <linux/export.h> 23#include <linux/file.h> 24#include <linux/fs.h> 25#include <linux/falloc.h> 26#include <linux/miscdevice.h> 27#include <linux/security.h> 28#include <linux/mm.h> 29#include <linux/mman.h> 30#include <linux/uaccess.h> 31#include <linux/personality.h> 32#include <linux/bitops.h> 33#include <linux/mutex.h> 34#include <linux/shmem_fs.h> 35#include "ashmem.h" 36 37#define ASHMEM_NAME_PREFIX "dev/ashmem/" 38#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) 39#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) 40 41/** 42 * struct ashmem_area - The anonymous shared memory area 43 * @name: The optional name in /proc/pid/maps 44 * @unpinned_list: The list of all ashmem areas 45 * @file: The shmem-based backing file 46 * @size: The size of the mapping, in bytes 47 * @prot_mask: The allowed protection bits, as vm_flags 48 * 49 * The lifecycle of this structure is from our parent file's open() until 50 * its release(). It is also protected by 'ashmem_mutex' 51 * 52 * Warning: Mappings do NOT pin this structure; It dies on close() 53 */ 54struct ashmem_area { 55 char name[ASHMEM_FULL_NAME_LEN]; 56 struct list_head unpinned_list; 57 struct file *file; 58 size_t size; 59 unsigned long prot_mask; 60}; 61 62/** 63 * struct ashmem_range - A range of unpinned/evictable pages 64 * @lru: The entry in the LRU list 65 * @unpinned: The entry in its area's unpinned list 66 * @asma: The associated anonymous shared memory area. 67 * @pgstart: The starting page (inclusive) 68 * @pgend: The ending page (inclusive) 69 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) 70 * 71 * The lifecycle of this structure is from unpin to pin. 72 * It is protected by 'ashmem_mutex' 73 */ 74struct ashmem_range { 75 struct list_head lru; 76 struct list_head unpinned; 77 struct ashmem_area *asma; 78 size_t pgstart; 79 size_t pgend; 80 unsigned int purged; 81}; 82 83/* LRU list of unpinned pages, protected by ashmem_mutex */ 84static LIST_HEAD(ashmem_lru_list); 85 86/* 87 * long lru_count - The count of pages on our LRU list. 88 * 89 * This is protected by ashmem_mutex. 90 */ 91static unsigned long lru_count; 92 93/* 94 * ashmem_mutex - protects the list of and each individual ashmem_area 95 * 96 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem 97 */ 98static DEFINE_MUTEX(ashmem_mutex); 99 100static struct kmem_cache *ashmem_area_cachep __read_mostly; 101static struct kmem_cache *ashmem_range_cachep __read_mostly; 102 103static inline unsigned long range_size(struct ashmem_range *range) 104{ 105 return range->pgend - range->pgstart + 1; 106} 107 108static inline bool range_on_lru(struct ashmem_range *range) 109{ 110 return range->purged == ASHMEM_NOT_PURGED; 111} 112 113static inline bool page_range_subsumes_range(struct ashmem_range *range, 114 size_t start, size_t end) 115{ 116 return (range->pgstart >= start) && (range->pgend <= end); 117} 118 119static inline bool page_range_subsumed_by_range(struct ashmem_range *range, 120 size_t start, size_t end) 121{ 122 return (range->pgstart <= start) && (range->pgend >= end); 123} 124 125static inline bool page_in_range(struct ashmem_range *range, size_t page) 126{ 127 return (range->pgstart <= page) && (range->pgend >= page); 128} 129 130static inline bool page_range_in_range(struct ashmem_range *range, 131 size_t start, size_t end) 132{ 133 return page_in_range(range, start) || page_in_range(range, end) || 134 page_range_subsumes_range(range, start, end); 135} 136 137static inline bool range_before_page(struct ashmem_range *range, size_t page) 138{ 139 return range->pgend < page; 140} 141 142#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 143 144/** 145 * lru_add() - Adds a range of memory to the LRU list 146 * @range: The memory range being added. 147 * 148 * The range is first added to the end (tail) of the LRU list. 149 * After this, the size of the range is added to @lru_count 150 */ 151static inline void lru_add(struct ashmem_range *range) 152{ 153 list_add_tail(&range->lru, &ashmem_lru_list); 154 lru_count += range_size(range); 155} 156 157/** 158 * lru_del() - Removes a range of memory from the LRU list 159 * @range: The memory range being removed 160 * 161 * The range is first deleted from the LRU list. 162 * After this, the size of the range is removed from @lru_count 163 */ 164static inline void lru_del(struct ashmem_range *range) 165{ 166 list_del(&range->lru); 167 lru_count -= range_size(range); 168} 169 170/** 171 * range_alloc() - Allocates and initializes a new ashmem_range structure 172 * @asma: The associated ashmem_area 173 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list 174 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) 175 * @start: The starting page (inclusive) 176 * @end: The ending page (inclusive) 177 * 178 * This function is protected by ashmem_mutex. 179 * 180 * Return: 0 if successful, or -ENOMEM if there is an error 181 */ 182static int range_alloc(struct ashmem_area *asma, 183 struct ashmem_range *prev_range, unsigned int purged, 184 size_t start, size_t end) 185{ 186 struct ashmem_range *range; 187 188 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); 189 if (unlikely(!range)) 190 return -ENOMEM; 191 192 range->asma = asma; 193 range->pgstart = start; 194 range->pgend = end; 195 range->purged = purged; 196 197 list_add_tail(&range->unpinned, &prev_range->unpinned); 198 199 if (range_on_lru(range)) 200 lru_add(range); 201 202 return 0; 203} 204 205/** 206 * range_del() - Deletes and dealloctes an ashmem_range structure 207 * @range: The associated ashmem_range that has previously been allocated 208 */ 209static void range_del(struct ashmem_range *range) 210{ 211 list_del(&range->unpinned); 212 if (range_on_lru(range)) 213 lru_del(range); 214 kmem_cache_free(ashmem_range_cachep, range); 215} 216 217/** 218 * range_shrink() - Shrinks an ashmem_range 219 * @range: The associated ashmem_range being shrunk 220 * @start: The starting byte of the new range 221 * @end: The ending byte of the new range 222 * 223 * This does not modify the data inside the existing range in any way - It 224 * simply shrinks the boundaries of the range. 225 * 226 * Theoretically, with a little tweaking, this could eventually be changed 227 * to range_resize, and expand the lru_count if the new range is larger. 228 */ 229static inline void range_shrink(struct ashmem_range *range, 230 size_t start, size_t end) 231{ 232 size_t pre = range_size(range); 233 234 range->pgstart = start; 235 range->pgend = end; 236 237 if (range_on_lru(range)) 238 lru_count -= pre - range_size(range); 239} 240 241/** 242 * ashmem_open() - Opens an Anonymous Shared Memory structure 243 * @inode: The backing file's index node(?) 244 * @file: The backing file 245 * 246 * Please note that the ashmem_area is not returned by this function - It is 247 * instead written to "file->private_data". 248 * 249 * Return: 0 if successful, or another code if unsuccessful. 250 */ 251static int ashmem_open(struct inode *inode, struct file *file) 252{ 253 struct ashmem_area *asma; 254 int ret; 255 256 ret = generic_file_open(inode, file); 257 if (unlikely(ret)) 258 return ret; 259 260 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); 261 if (unlikely(!asma)) 262 return -ENOMEM; 263 264 INIT_LIST_HEAD(&asma->unpinned_list); 265 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); 266 asma->prot_mask = PROT_MASK; 267 file->private_data = asma; 268 269 return 0; 270} 271 272/** 273 * ashmem_release() - Releases an Anonymous Shared Memory structure 274 * @ignored: The backing file's Index Node(?) - It is ignored here. 275 * @file: The backing file 276 * 277 * Return: 0 if successful. If it is anything else, go have a coffee and 278 * try again. 279 */ 280static int ashmem_release(struct inode *ignored, struct file *file) 281{ 282 struct ashmem_area *asma = file->private_data; 283 struct ashmem_range *range, *next; 284 285 mutex_lock(&ashmem_mutex); 286 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) 287 range_del(range); 288 mutex_unlock(&ashmem_mutex); 289 290 if (asma->file) 291 fput(asma->file); 292 kmem_cache_free(ashmem_area_cachep, asma); 293 294 return 0; 295} 296 297/** 298 * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file 299 * @file: The associated backing file. 300 * @buf: The buffer of data being written to 301 * @len: The number of bytes being read 302 * @pos: The position of the first byte to read. 303 * 304 * Return: 0 if successful, or another return code if not. 305 */ 306static ssize_t ashmem_read(struct file *file, char __user *buf, 307 size_t len, loff_t *pos) 308{ 309 struct ashmem_area *asma = file->private_data; 310 int ret = 0; 311 312 mutex_lock(&ashmem_mutex); 313 314 /* If size is not set, or set to 0, always return EOF. */ 315 if (asma->size == 0) 316 goto out_unlock; 317 318 if (!asma->file) { 319 ret = -EBADF; 320 goto out_unlock; 321 } 322 323 mutex_unlock(&ashmem_mutex); 324 325 /* 326 * asma and asma->file are used outside the lock here. We assume 327 * once asma->file is set it will never be changed, and will not 328 * be destroyed until all references to the file are dropped and 329 * ashmem_release is called. 330 */ 331 ret = __vfs_read(asma->file, buf, len, pos); 332 if (ret >= 0) 333 /** Update backing file pos, since f_ops->read() doesn't */ 334 asma->file->f_pos = *pos; 335 return ret; 336 337out_unlock: 338 mutex_unlock(&ashmem_mutex); 339 return ret; 340} 341 342static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) 343{ 344 struct ashmem_area *asma = file->private_data; 345 int ret; 346 347 mutex_lock(&ashmem_mutex); 348 349 if (asma->size == 0) { 350 ret = -EINVAL; 351 goto out; 352 } 353 354 if (!asma->file) { 355 ret = -EBADF; 356 goto out; 357 } 358 359 ret = vfs_llseek(asma->file, offset, origin); 360 if (ret < 0) 361 goto out; 362 363 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 364 file->f_pos = asma->file->f_pos; 365 366out: 367 mutex_unlock(&ashmem_mutex); 368 return ret; 369} 370 371static inline vm_flags_t calc_vm_may_flags(unsigned long prot) 372{ 373 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | 374 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | 375 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); 376} 377 378static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) 379{ 380 struct ashmem_area *asma = file->private_data; 381 int ret = 0; 382 383 mutex_lock(&ashmem_mutex); 384 385 /* user needs to SET_SIZE before mapping */ 386 if (unlikely(!asma->size)) { 387 ret = -EINVAL; 388 goto out; 389 } 390 391 /* requested protection bits must match our allowed protection mask */ 392 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & 393 calc_vm_prot_bits(PROT_MASK, 0))) { 394 ret = -EPERM; 395 goto out; 396 } 397 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); 398 399 if (!asma->file) { 400 char *name = ASHMEM_NAME_DEF; 401 struct file *vmfile; 402 403 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 404 name = asma->name; 405 406 /* ... and allocate the backing shmem file */ 407 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); 408 if (IS_ERR(vmfile)) { 409 ret = PTR_ERR(vmfile); 410 goto out; 411 } 412 vmfile->f_mode |= FMODE_LSEEK; 413 asma->file = vmfile; 414 } 415 get_file(asma->file); 416 417 /* 418 * XXX - Reworked to use shmem_zero_setup() instead of 419 * shmem_set_file while we're in staging. -jstultz 420 */ 421 if (vma->vm_flags & VM_SHARED) { 422 ret = shmem_zero_setup(vma); 423 if (ret) { 424 fput(asma->file); 425 goto out; 426 } 427 } 428 429 if (vma->vm_file) 430 fput(vma->vm_file); 431 vma->vm_file = asma->file; 432 433out: 434 mutex_unlock(&ashmem_mutex); 435 return ret; 436} 437 438/* 439 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c 440 * 441 * 'nr_to_scan' is the number of objects to scan for freeing. 442 * 443 * 'gfp_mask' is the mask of the allocation that got us into this mess. 444 * 445 * Return value is the number of objects freed or -1 if we cannot 446 * proceed without risk of deadlock (due to gfp_mask). 447 * 448 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial 449 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' 450 * pages freed. 451 */ 452static unsigned long 453ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 454{ 455 struct ashmem_range *range, *next; 456 unsigned long freed = 0; 457 458 /* We might recurse into filesystem code, so bail out if necessary */ 459 if (!(sc->gfp_mask & __GFP_FS)) 460 return SHRINK_STOP; 461 462 if (!mutex_trylock(&ashmem_mutex)) 463 return -1; 464 465 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { 466 loff_t start = range->pgstart * PAGE_SIZE; 467 loff_t end = (range->pgend + 1) * PAGE_SIZE; 468 469 vfs_fallocate(range->asma->file, 470 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 471 start, end - start); 472 range->purged = ASHMEM_WAS_PURGED; 473 lru_del(range); 474 475 freed += range_size(range); 476 if (--sc->nr_to_scan <= 0) 477 break; 478 } 479 mutex_unlock(&ashmem_mutex); 480 return freed; 481} 482 483static unsigned long 484ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 485{ 486 /* 487 * note that lru_count is count of pages on the lru, not a count of 488 * objects on the list. This means the scan function needs to return the 489 * number of pages freed, not the number of objects scanned. 490 */ 491 return lru_count; 492} 493 494static struct shrinker ashmem_shrinker = { 495 .count_objects = ashmem_shrink_count, 496 .scan_objects = ashmem_shrink_scan, 497 /* 498 * XXX (dchinner): I wish people would comment on why they need on 499 * significant changes to the default value here 500 */ 501 .seeks = DEFAULT_SEEKS * 4, 502}; 503 504static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) 505{ 506 int ret = 0; 507 508 mutex_lock(&ashmem_mutex); 509 510 /* the user can only remove, not add, protection bits */ 511 if (unlikely((asma->prot_mask & prot) != prot)) { 512 ret = -EINVAL; 513 goto out; 514 } 515 516 /* does the application expect PROT_READ to imply PROT_EXEC? */ 517 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 518 prot |= PROT_EXEC; 519 520 asma->prot_mask = prot; 521 522out: 523 mutex_unlock(&ashmem_mutex); 524 return ret; 525} 526 527static int set_name(struct ashmem_area *asma, void __user *name) 528{ 529 int len; 530 int ret = 0; 531 char local_name[ASHMEM_NAME_LEN]; 532 533 /* 534 * Holding the ashmem_mutex while doing a copy_from_user might cause 535 * an data abort which would try to access mmap_sem. If another 536 * thread has invoked ashmem_mmap then it will be holding the 537 * semaphore and will be waiting for ashmem_mutex, there by leading to 538 * deadlock. We'll release the mutex and take the name to a local 539 * variable that does not need protection and later copy the local 540 * variable to the structure member with lock held. 541 */ 542 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); 543 if (len < 0) 544 return len; 545 if (len == ASHMEM_NAME_LEN) 546 local_name[ASHMEM_NAME_LEN - 1] = '\0'; 547 mutex_lock(&ashmem_mutex); 548 /* cannot change an existing mapping's name */ 549 if (unlikely(asma->file)) 550 ret = -EINVAL; 551 else 552 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); 553 554 mutex_unlock(&ashmem_mutex); 555 return ret; 556} 557 558static int get_name(struct ashmem_area *asma, void __user *name) 559{ 560 int ret = 0; 561 size_t len; 562 /* 563 * Have a local variable to which we'll copy the content 564 * from asma with the lock held. Later we can copy this to the user 565 * space safely without holding any locks. So even if we proceed to 566 * wait for mmap_sem, it won't lead to deadlock. 567 */ 568 char local_name[ASHMEM_NAME_LEN]; 569 570 mutex_lock(&ashmem_mutex); 571 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { 572 /* 573 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes 574 * prevents us from revealing one user's stack to another. 575 */ 576 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; 577 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); 578 } else { 579 len = sizeof(ASHMEM_NAME_DEF); 580 memcpy(local_name, ASHMEM_NAME_DEF, len); 581 } 582 mutex_unlock(&ashmem_mutex); 583 584 /* 585 * Now we are just copying from the stack variable to userland 586 * No lock held 587 */ 588 if (unlikely(copy_to_user(name, local_name, len))) 589 ret = -EFAULT; 590 return ret; 591} 592 593/* 594 * ashmem_pin - pin the given ashmem region, returning whether it was 595 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). 596 * 597 * Caller must hold ashmem_mutex. 598 */ 599static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 600{ 601 struct ashmem_range *range, *next; 602 int ret = ASHMEM_NOT_PURGED; 603 604 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 605 /* moved past last applicable page; we can short circuit */ 606 if (range_before_page(range, pgstart)) 607 break; 608 609 /* 610 * The user can ask us to pin pages that span multiple ranges, 611 * or to pin pages that aren't even unpinned, so this is messy. 612 * 613 * Four cases: 614 * 1. The requested range subsumes an existing range, so we 615 * just remove the entire matching range. 616 * 2. The requested range overlaps the start of an existing 617 * range, so we just update that range. 618 * 3. The requested range overlaps the end of an existing 619 * range, so we just update that range. 620 * 4. The requested range punches a hole in an existing range, 621 * so we have to update one side of the range and then 622 * create a new range for the other side. 623 */ 624 if (page_range_in_range(range, pgstart, pgend)) { 625 ret |= range->purged; 626 627 /* Case #1: Easy. Just nuke the whole thing. */ 628 if (page_range_subsumes_range(range, pgstart, pgend)) { 629 range_del(range); 630 continue; 631 } 632 633 /* Case #2: We overlap from the start, so adjust it */ 634 if (range->pgstart >= pgstart) { 635 range_shrink(range, pgend + 1, range->pgend); 636 continue; 637 } 638 639 /* Case #3: We overlap from the rear, so adjust it */ 640 if (range->pgend <= pgend) { 641 range_shrink(range, range->pgstart, 642 pgstart - 1); 643 continue; 644 } 645 646 /* 647 * Case #4: We eat a chunk out of the middle. A bit 648 * more complicated, we allocate a new range for the 649 * second half and adjust the first chunk's endpoint. 650 */ 651 range_alloc(asma, range, range->purged, 652 pgend + 1, range->pgend); 653 range_shrink(range, range->pgstart, pgstart - 1); 654 break; 655 } 656 } 657 658 return ret; 659} 660 661/* 662 * ashmem_unpin - unpin the given range of pages. Returns zero on success. 663 * 664 * Caller must hold ashmem_mutex. 665 */ 666static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 667{ 668 struct ashmem_range *range, *next; 669 unsigned int purged = ASHMEM_NOT_PURGED; 670 671restart: 672 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 673 /* short circuit: this is our insertion point */ 674 if (range_before_page(range, pgstart)) 675 break; 676 677 /* 678 * The user can ask us to unpin pages that are already entirely 679 * or partially pinned. We handle those two cases here. 680 */ 681 if (page_range_subsumed_by_range(range, pgstart, pgend)) 682 return 0; 683 if (page_range_in_range(range, pgstart, pgend)) { 684 pgstart = min(range->pgstart, pgstart); 685 pgend = max(range->pgend, pgend); 686 purged |= range->purged; 687 range_del(range); 688 goto restart; 689 } 690 } 691 692 return range_alloc(asma, range, purged, pgstart, pgend); 693} 694 695/* 696 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the 697 * given interval are unpinned and ASHMEM_IS_PINNED otherwise. 698 * 699 * Caller must hold ashmem_mutex. 700 */ 701static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, 702 size_t pgend) 703{ 704 struct ashmem_range *range; 705 int ret = ASHMEM_IS_PINNED; 706 707 list_for_each_entry(range, &asma->unpinned_list, unpinned) { 708 if (range_before_page(range, pgstart)) 709 break; 710 if (page_range_in_range(range, pgstart, pgend)) { 711 ret = ASHMEM_IS_UNPINNED; 712 break; 713 } 714 } 715 716 return ret; 717} 718 719static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, 720 void __user *p) 721{ 722 struct ashmem_pin pin; 723 size_t pgstart, pgend; 724 int ret = -EINVAL; 725 726 if (unlikely(!asma->file)) 727 return -EINVAL; 728 729 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) 730 return -EFAULT; 731 732 /* per custom, you can pass zero for len to mean "everything onward" */ 733 if (!pin.len) 734 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 735 736 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 737 return -EINVAL; 738 739 if (unlikely(((__u32)-1) - pin.offset < pin.len)) 740 return -EINVAL; 741 742 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 743 return -EINVAL; 744 745 pgstart = pin.offset / PAGE_SIZE; 746 pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 747 748 mutex_lock(&ashmem_mutex); 749 750 switch (cmd) { 751 case ASHMEM_PIN: 752 ret = ashmem_pin(asma, pgstart, pgend); 753 break; 754 case ASHMEM_UNPIN: 755 ret = ashmem_unpin(asma, pgstart, pgend); 756 break; 757 case ASHMEM_GET_PIN_STATUS: 758 ret = ashmem_get_pin_status(asma, pgstart, pgend); 759 break; 760 } 761 762 mutex_unlock(&ashmem_mutex); 763 764 return ret; 765} 766 767static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 768{ 769 struct ashmem_area *asma = file->private_data; 770 long ret = -ENOTTY; 771 772 switch (cmd) { 773 case ASHMEM_SET_NAME: 774 ret = set_name(asma, (void __user *)arg); 775 break; 776 case ASHMEM_GET_NAME: 777 ret = get_name(asma, (void __user *)arg); 778 break; 779 case ASHMEM_SET_SIZE: 780 ret = -EINVAL; 781 if (!asma->file) { 782 ret = 0; 783 asma->size = (size_t)arg; 784 } 785 break; 786 case ASHMEM_GET_SIZE: 787 ret = asma->size; 788 break; 789 case ASHMEM_SET_PROT_MASK: 790 ret = set_prot_mask(asma, arg); 791 break; 792 case ASHMEM_GET_PROT_MASK: 793 ret = asma->prot_mask; 794 break; 795 case ASHMEM_PIN: 796 case ASHMEM_UNPIN: 797 case ASHMEM_GET_PIN_STATUS: 798 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); 799 break; 800 case ASHMEM_PURGE_ALL_CACHES: 801 ret = -EPERM; 802 if (capable(CAP_SYS_ADMIN)) { 803 struct shrink_control sc = { 804 .gfp_mask = GFP_KERNEL, 805 .nr_to_scan = LONG_MAX, 806 }; 807 ret = ashmem_shrink_count(&ashmem_shrinker, &sc); 808 ashmem_shrink_scan(&ashmem_shrinker, &sc); 809 } 810 break; 811 } 812 813 return ret; 814} 815 816/* support of 32bit userspace on 64bit platforms */ 817#ifdef CONFIG_COMPAT 818static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, 819 unsigned long arg) 820{ 821 switch (cmd) { 822 case COMPAT_ASHMEM_SET_SIZE: 823 cmd = ASHMEM_SET_SIZE; 824 break; 825 case COMPAT_ASHMEM_SET_PROT_MASK: 826 cmd = ASHMEM_SET_PROT_MASK; 827 break; 828 } 829 return ashmem_ioctl(file, cmd, arg); 830} 831#endif 832 833static const struct file_operations ashmem_fops = { 834 .owner = THIS_MODULE, 835 .open = ashmem_open, 836 .release = ashmem_release, 837 .read = ashmem_read, 838 .llseek = ashmem_llseek, 839 .mmap = ashmem_mmap, 840 .unlocked_ioctl = ashmem_ioctl, 841#ifdef CONFIG_COMPAT 842 .compat_ioctl = compat_ashmem_ioctl, 843#endif 844}; 845 846static struct miscdevice ashmem_misc = { 847 .minor = MISC_DYNAMIC_MINOR, 848 .name = "ashmem", 849 .fops = &ashmem_fops, 850}; 851 852static int __init ashmem_init(void) 853{ 854 int ret = -ENOMEM; 855 856 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", 857 sizeof(struct ashmem_area), 858 0, 0, NULL); 859 if (unlikely(!ashmem_area_cachep)) { 860 pr_err("failed to create slab cache\n"); 861 goto out; 862 } 863 864 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", 865 sizeof(struct ashmem_range), 866 0, 0, NULL); 867 if (unlikely(!ashmem_range_cachep)) { 868 pr_err("failed to create slab cache\n"); 869 goto out_free1; 870 } 871 872 ret = misc_register(&ashmem_misc); 873 if (unlikely(ret)) { 874 pr_err("failed to register misc device!\n"); 875 goto out_free2; 876 } 877 878 register_shrinker(&ashmem_shrinker); 879 880 pr_info("initialized\n"); 881 882 return 0; 883 884out_free2: 885 kmem_cache_destroy(ashmem_range_cachep); 886out_free1: 887 kmem_cache_destroy(ashmem_area_cachep); 888out: 889 return ret; 890} 891device_initcall(ashmem_init);