at v4.16-rc4 900 lines 23 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* mm/ashmem.c 3 * 4 * Anonymous Shared Memory Subsystem, ashmem 5 * 6 * Copyright (C) 2008 Google, Inc. 7 * 8 * Robert Love <rlove@google.com> 9 */ 10 11#define pr_fmt(fmt) "ashmem: " fmt 12 13#include <linux/init.h> 14#include <linux/export.h> 15#include <linux/file.h> 16#include <linux/fs.h> 17#include <linux/falloc.h> 18#include <linux/miscdevice.h> 19#include <linux/security.h> 20#include <linux/mm.h> 21#include <linux/mman.h> 22#include <linux/uaccess.h> 23#include <linux/personality.h> 24#include <linux/bitops.h> 25#include <linux/mutex.h> 26#include <linux/shmem_fs.h> 27#include "ashmem.h" 28 29#define ASHMEM_NAME_PREFIX "dev/ashmem/" 30#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) 31#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) 32 33/** 34 * struct ashmem_area - The anonymous shared memory area 35 * @name: The optional name in /proc/pid/maps 36 * @unpinned_list: The list of all ashmem areas 37 * @file: The shmem-based backing file 38 * @size: The size of the mapping, in bytes 39 * @prot_mask: The allowed protection bits, as vm_flags 40 * 41 * The lifecycle of this structure is from our parent file's open() until 42 * its release(). It is also protected by 'ashmem_mutex' 43 * 44 * Warning: Mappings do NOT pin this structure; It dies on close() 45 */ 46struct ashmem_area { 47 char name[ASHMEM_FULL_NAME_LEN]; 48 struct list_head unpinned_list; 49 struct file *file; 50 size_t size; 51 unsigned long prot_mask; 52}; 53 54/** 55 * struct ashmem_range - A range of unpinned/evictable pages 56 * @lru: The entry in the LRU list 57 * @unpinned: The entry in its area's unpinned list 58 * @asma: The associated anonymous shared memory area. 59 * @pgstart: The starting page (inclusive) 60 * @pgend: The ending page (inclusive) 61 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) 62 * 63 * The lifecycle of this structure is from unpin to pin. 64 * It is protected by 'ashmem_mutex' 65 */ 66struct ashmem_range { 67 struct list_head lru; 68 struct list_head unpinned; 69 struct ashmem_area *asma; 70 size_t pgstart; 71 size_t pgend; 72 unsigned int purged; 73}; 74 75/* LRU list of unpinned pages, protected by ashmem_mutex */ 76static LIST_HEAD(ashmem_lru_list); 77 78/* 79 * long lru_count - The count of pages on our LRU list. 80 * 81 * This is protected by ashmem_mutex. 82 */ 83static unsigned long lru_count; 84 85/* 86 * ashmem_mutex - protects the list of and each individual ashmem_area 87 * 88 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem 89 */ 90static DEFINE_MUTEX(ashmem_mutex); 91 92static struct kmem_cache *ashmem_area_cachep __read_mostly; 93static struct kmem_cache *ashmem_range_cachep __read_mostly; 94 95static inline unsigned long range_size(struct ashmem_range *range) 96{ 97 return range->pgend - range->pgstart + 1; 98} 99 100static inline bool range_on_lru(struct ashmem_range *range) 101{ 102 return range->purged == ASHMEM_NOT_PURGED; 103} 104 105static inline bool page_range_subsumes_range(struct ashmem_range *range, 106 size_t start, size_t end) 107{ 108 return (range->pgstart >= start) && (range->pgend <= end); 109} 110 111static inline bool page_range_subsumed_by_range(struct ashmem_range *range, 112 size_t start, size_t end) 113{ 114 return (range->pgstart <= start) && (range->pgend >= end); 115} 116 117static inline bool page_in_range(struct ashmem_range *range, size_t page) 118{ 119 return (range->pgstart <= page) && (range->pgend >= page); 120} 121 122static inline bool page_range_in_range(struct ashmem_range *range, 123 size_t start, size_t end) 124{ 125 return page_in_range(range, start) || page_in_range(range, end) || 126 page_range_subsumes_range(range, start, end); 127} 128 129static inline bool range_before_page(struct ashmem_range *range, size_t page) 130{ 131 return range->pgend < page; 132} 133 134#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 135 136/** 137 * lru_add() - Adds a range of memory to the LRU list 138 * @range: The memory range being added. 139 * 140 * The range is first added to the end (tail) of the LRU list. 141 * After this, the size of the range is added to @lru_count 142 */ 143static inline void lru_add(struct ashmem_range *range) 144{ 145 list_add_tail(&range->lru, &ashmem_lru_list); 146 lru_count += range_size(range); 147} 148 149/** 150 * lru_del() - Removes a range of memory from the LRU list 151 * @range: The memory range being removed 152 * 153 * The range is first deleted from the LRU list. 154 * After this, the size of the range is removed from @lru_count 155 */ 156static inline void lru_del(struct ashmem_range *range) 157{ 158 list_del(&range->lru); 159 lru_count -= range_size(range); 160} 161 162/** 163 * range_alloc() - Allocates and initializes a new ashmem_range structure 164 * @asma: The associated ashmem_area 165 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list 166 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) 167 * @start: The starting page (inclusive) 168 * @end: The ending page (inclusive) 169 * 170 * This function is protected by ashmem_mutex. 171 * 172 * Return: 0 if successful, or -ENOMEM if there is an error 173 */ 174static int range_alloc(struct ashmem_area *asma, 175 struct ashmem_range *prev_range, unsigned int purged, 176 size_t start, size_t end) 177{ 178 struct ashmem_range *range; 179 180 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); 181 if (unlikely(!range)) 182 return -ENOMEM; 183 184 range->asma = asma; 185 range->pgstart = start; 186 range->pgend = end; 187 range->purged = purged; 188 189 list_add_tail(&range->unpinned, &prev_range->unpinned); 190 191 if (range_on_lru(range)) 192 lru_add(range); 193 194 return 0; 195} 196 197/** 198 * range_del() - Deletes and dealloctes an ashmem_range structure 199 * @range: The associated ashmem_range that has previously been allocated 200 */ 201static void range_del(struct ashmem_range *range) 202{ 203 list_del(&range->unpinned); 204 if (range_on_lru(range)) 205 lru_del(range); 206 kmem_cache_free(ashmem_range_cachep, range); 207} 208 209/** 210 * range_shrink() - Shrinks an ashmem_range 211 * @range: The associated ashmem_range being shrunk 212 * @start: The starting byte of the new range 213 * @end: The ending byte of the new range 214 * 215 * This does not modify the data inside the existing range in any way - It 216 * simply shrinks the boundaries of the range. 217 * 218 * Theoretically, with a little tweaking, this could eventually be changed 219 * to range_resize, and expand the lru_count if the new range is larger. 220 */ 221static inline void range_shrink(struct ashmem_range *range, 222 size_t start, size_t end) 223{ 224 size_t pre = range_size(range); 225 226 range->pgstart = start; 227 range->pgend = end; 228 229 if (range_on_lru(range)) 230 lru_count -= pre - range_size(range); 231} 232 233/** 234 * ashmem_open() - Opens an Anonymous Shared Memory structure 235 * @inode: The backing file's index node(?) 236 * @file: The backing file 237 * 238 * Please note that the ashmem_area is not returned by this function - It is 239 * instead written to "file->private_data". 240 * 241 * Return: 0 if successful, or another code if unsuccessful. 242 */ 243static int ashmem_open(struct inode *inode, struct file *file) 244{ 245 struct ashmem_area *asma; 246 int ret; 247 248 ret = generic_file_open(inode, file); 249 if (unlikely(ret)) 250 return ret; 251 252 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); 253 if (unlikely(!asma)) 254 return -ENOMEM; 255 256 INIT_LIST_HEAD(&asma->unpinned_list); 257 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); 258 asma->prot_mask = PROT_MASK; 259 file->private_data = asma; 260 261 return 0; 262} 263 264/** 265 * ashmem_release() - Releases an Anonymous Shared Memory structure 266 * @ignored: The backing file's Index Node(?) - It is ignored here. 267 * @file: The backing file 268 * 269 * Return: 0 if successful. If it is anything else, go have a coffee and 270 * try again. 271 */ 272static int ashmem_release(struct inode *ignored, struct file *file) 273{ 274 struct ashmem_area *asma = file->private_data; 275 struct ashmem_range *range, *next; 276 277 mutex_lock(&ashmem_mutex); 278 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) 279 range_del(range); 280 mutex_unlock(&ashmem_mutex); 281 282 if (asma->file) 283 fput(asma->file); 284 kmem_cache_free(ashmem_area_cachep, asma); 285 286 return 0; 287} 288 289static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) 290{ 291 struct ashmem_area *asma = iocb->ki_filp->private_data; 292 int ret = 0; 293 294 mutex_lock(&ashmem_mutex); 295 296 /* If size is not set, or set to 0, always return EOF. */ 297 if (asma->size == 0) 298 goto out_unlock; 299 300 if (!asma->file) { 301 ret = -EBADF; 302 goto out_unlock; 303 } 304 305 /* 306 * asma and asma->file are used outside the lock here. We assume 307 * once asma->file is set it will never be changed, and will not 308 * be destroyed until all references to the file are dropped and 309 * ashmem_release is called. 310 */ 311 mutex_unlock(&ashmem_mutex); 312 ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0); 313 mutex_lock(&ashmem_mutex); 314 if (ret > 0) 315 asma->file->f_pos = iocb->ki_pos; 316out_unlock: 317 mutex_unlock(&ashmem_mutex); 318 return ret; 319} 320 321static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) 322{ 323 struct ashmem_area *asma = file->private_data; 324 int ret; 325 326 mutex_lock(&ashmem_mutex); 327 328 if (asma->size == 0) { 329 ret = -EINVAL; 330 goto out; 331 } 332 333 if (!asma->file) { 334 ret = -EBADF; 335 goto out; 336 } 337 338 ret = vfs_llseek(asma->file, offset, origin); 339 if (ret < 0) 340 goto out; 341 342 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 343 file->f_pos = asma->file->f_pos; 344 345out: 346 mutex_unlock(&ashmem_mutex); 347 return ret; 348} 349 350static inline vm_flags_t calc_vm_may_flags(unsigned long prot) 351{ 352 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | 353 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | 354 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); 355} 356 357static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) 358{ 359 struct ashmem_area *asma = file->private_data; 360 int ret = 0; 361 362 mutex_lock(&ashmem_mutex); 363 364 /* user needs to SET_SIZE before mapping */ 365 if (unlikely(!asma->size)) { 366 ret = -EINVAL; 367 goto out; 368 } 369 370 /* requested protection bits must match our allowed protection mask */ 371 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & 372 calc_vm_prot_bits(PROT_MASK, 0))) { 373 ret = -EPERM; 374 goto out; 375 } 376 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); 377 378 if (!asma->file) { 379 char *name = ASHMEM_NAME_DEF; 380 struct file *vmfile; 381 382 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 383 name = asma->name; 384 385 /* ... and allocate the backing shmem file */ 386 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); 387 if (IS_ERR(vmfile)) { 388 ret = PTR_ERR(vmfile); 389 goto out; 390 } 391 vmfile->f_mode |= FMODE_LSEEK; 392 asma->file = vmfile; 393 } 394 get_file(asma->file); 395 396 /* 397 * XXX - Reworked to use shmem_zero_setup() instead of 398 * shmem_set_file while we're in staging. -jstultz 399 */ 400 if (vma->vm_flags & VM_SHARED) { 401 ret = shmem_zero_setup(vma); 402 if (ret) { 403 fput(asma->file); 404 goto out; 405 } 406 } 407 408 if (vma->vm_file) 409 fput(vma->vm_file); 410 vma->vm_file = asma->file; 411 412out: 413 mutex_unlock(&ashmem_mutex); 414 return ret; 415} 416 417/* 418 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c 419 * 420 * 'nr_to_scan' is the number of objects to scan for freeing. 421 * 422 * 'gfp_mask' is the mask of the allocation that got us into this mess. 423 * 424 * Return value is the number of objects freed or -1 if we cannot 425 * proceed without risk of deadlock (due to gfp_mask). 426 * 427 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial 428 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' 429 * pages freed. 430 */ 431static unsigned long 432ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 433{ 434 struct ashmem_range *range, *next; 435 unsigned long freed = 0; 436 437 /* We might recurse into filesystem code, so bail out if necessary */ 438 if (!(sc->gfp_mask & __GFP_FS)) 439 return SHRINK_STOP; 440 441 if (!mutex_trylock(&ashmem_mutex)) 442 return -1; 443 444 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { 445 loff_t start = range->pgstart * PAGE_SIZE; 446 loff_t end = (range->pgend + 1) * PAGE_SIZE; 447 448 vfs_fallocate(range->asma->file, 449 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 450 start, end - start); 451 range->purged = ASHMEM_WAS_PURGED; 452 lru_del(range); 453 454 freed += range_size(range); 455 if (--sc->nr_to_scan <= 0) 456 break; 457 } 458 mutex_unlock(&ashmem_mutex); 459 return freed; 460} 461 462static unsigned long 463ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 464{ 465 /* 466 * note that lru_count is count of pages on the lru, not a count of 467 * objects on the list. This means the scan function needs to return the 468 * number of pages freed, not the number of objects scanned. 469 */ 470 return lru_count; 471} 472 473static struct shrinker ashmem_shrinker = { 474 .count_objects = ashmem_shrink_count, 475 .scan_objects = ashmem_shrink_scan, 476 /* 477 * XXX (dchinner): I wish people would comment on why they need on 478 * significant changes to the default value here 479 */ 480 .seeks = DEFAULT_SEEKS * 4, 481}; 482 483static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) 484{ 485 int ret = 0; 486 487 mutex_lock(&ashmem_mutex); 488 489 /* the user can only remove, not add, protection bits */ 490 if (unlikely((asma->prot_mask & prot) != prot)) { 491 ret = -EINVAL; 492 goto out; 493 } 494 495 /* does the application expect PROT_READ to imply PROT_EXEC? */ 496 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 497 prot |= PROT_EXEC; 498 499 asma->prot_mask = prot; 500 501out: 502 mutex_unlock(&ashmem_mutex); 503 return ret; 504} 505 506static int set_name(struct ashmem_area *asma, void __user *name) 507{ 508 int len; 509 int ret = 0; 510 char local_name[ASHMEM_NAME_LEN]; 511 512 /* 513 * Holding the ashmem_mutex while doing a copy_from_user might cause 514 * an data abort which would try to access mmap_sem. If another 515 * thread has invoked ashmem_mmap then it will be holding the 516 * semaphore and will be waiting for ashmem_mutex, there by leading to 517 * deadlock. We'll release the mutex and take the name to a local 518 * variable that does not need protection and later copy the local 519 * variable to the structure member with lock held. 520 */ 521 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); 522 if (len < 0) 523 return len; 524 if (len == ASHMEM_NAME_LEN) 525 local_name[ASHMEM_NAME_LEN - 1] = '\0'; 526 mutex_lock(&ashmem_mutex); 527 /* cannot change an existing mapping's name */ 528 if (unlikely(asma->file)) 529 ret = -EINVAL; 530 else 531 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); 532 533 mutex_unlock(&ashmem_mutex); 534 return ret; 535} 536 537static int get_name(struct ashmem_area *asma, void __user *name) 538{ 539 int ret = 0; 540 size_t len; 541 /* 542 * Have a local variable to which we'll copy the content 543 * from asma with the lock held. Later we can copy this to the user 544 * space safely without holding any locks. So even if we proceed to 545 * wait for mmap_sem, it won't lead to deadlock. 546 */ 547 char local_name[ASHMEM_NAME_LEN]; 548 549 mutex_lock(&ashmem_mutex); 550 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { 551 /* 552 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes 553 * prevents us from revealing one user's stack to another. 554 */ 555 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; 556 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); 557 } else { 558 len = sizeof(ASHMEM_NAME_DEF); 559 memcpy(local_name, ASHMEM_NAME_DEF, len); 560 } 561 mutex_unlock(&ashmem_mutex); 562 563 /* 564 * Now we are just copying from the stack variable to userland 565 * No lock held 566 */ 567 if (unlikely(copy_to_user(name, local_name, len))) 568 ret = -EFAULT; 569 return ret; 570} 571 572/* 573 * ashmem_pin - pin the given ashmem region, returning whether it was 574 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). 575 * 576 * Caller must hold ashmem_mutex. 577 */ 578static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 579{ 580 struct ashmem_range *range, *next; 581 int ret = ASHMEM_NOT_PURGED; 582 583 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 584 /* moved past last applicable page; we can short circuit */ 585 if (range_before_page(range, pgstart)) 586 break; 587 588 /* 589 * The user can ask us to pin pages that span multiple ranges, 590 * or to pin pages that aren't even unpinned, so this is messy. 591 * 592 * Four cases: 593 * 1. The requested range subsumes an existing range, so we 594 * just remove the entire matching range. 595 * 2. The requested range overlaps the start of an existing 596 * range, so we just update that range. 597 * 3. The requested range overlaps the end of an existing 598 * range, so we just update that range. 599 * 4. The requested range punches a hole in an existing range, 600 * so we have to update one side of the range and then 601 * create a new range for the other side. 602 */ 603 if (page_range_in_range(range, pgstart, pgend)) { 604 ret |= range->purged; 605 606 /* Case #1: Easy. Just nuke the whole thing. */ 607 if (page_range_subsumes_range(range, pgstart, pgend)) { 608 range_del(range); 609 continue; 610 } 611 612 /* Case #2: We overlap from the start, so adjust it */ 613 if (range->pgstart >= pgstart) { 614 range_shrink(range, pgend + 1, range->pgend); 615 continue; 616 } 617 618 /* Case #3: We overlap from the rear, so adjust it */ 619 if (range->pgend <= pgend) { 620 range_shrink(range, range->pgstart, 621 pgstart - 1); 622 continue; 623 } 624 625 /* 626 * Case #4: We eat a chunk out of the middle. A bit 627 * more complicated, we allocate a new range for the 628 * second half and adjust the first chunk's endpoint. 629 */ 630 range_alloc(asma, range, range->purged, 631 pgend + 1, range->pgend); 632 range_shrink(range, range->pgstart, pgstart - 1); 633 break; 634 } 635 } 636 637 return ret; 638} 639 640/* 641 * ashmem_unpin - unpin the given range of pages. Returns zero on success. 642 * 643 * Caller must hold ashmem_mutex. 644 */ 645static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 646{ 647 struct ashmem_range *range, *next; 648 unsigned int purged = ASHMEM_NOT_PURGED; 649 650restart: 651 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 652 /* short circuit: this is our insertion point */ 653 if (range_before_page(range, pgstart)) 654 break; 655 656 /* 657 * The user can ask us to unpin pages that are already entirely 658 * or partially pinned. We handle those two cases here. 659 */ 660 if (page_range_subsumed_by_range(range, pgstart, pgend)) 661 return 0; 662 if (page_range_in_range(range, pgstart, pgend)) { 663 pgstart = min(range->pgstart, pgstart); 664 pgend = max(range->pgend, pgend); 665 purged |= range->purged; 666 range_del(range); 667 goto restart; 668 } 669 } 670 671 return range_alloc(asma, range, purged, pgstart, pgend); 672} 673 674/* 675 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the 676 * given interval are unpinned and ASHMEM_IS_PINNED otherwise. 677 * 678 * Caller must hold ashmem_mutex. 679 */ 680static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, 681 size_t pgend) 682{ 683 struct ashmem_range *range; 684 int ret = ASHMEM_IS_PINNED; 685 686 list_for_each_entry(range, &asma->unpinned_list, unpinned) { 687 if (range_before_page(range, pgstart)) 688 break; 689 if (page_range_in_range(range, pgstart, pgend)) { 690 ret = ASHMEM_IS_UNPINNED; 691 break; 692 } 693 } 694 695 return ret; 696} 697 698static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, 699 void __user *p) 700{ 701 struct ashmem_pin pin; 702 size_t pgstart, pgend; 703 int ret = -EINVAL; 704 705 mutex_lock(&ashmem_mutex); 706 707 if (unlikely(!asma->file)) 708 goto out_unlock; 709 710 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) { 711 ret = -EFAULT; 712 goto out_unlock; 713 } 714 715 /* per custom, you can pass zero for len to mean "everything onward" */ 716 if (!pin.len) 717 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 718 719 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 720 goto out_unlock; 721 722 if (unlikely(((__u32)-1) - pin.offset < pin.len)) 723 goto out_unlock; 724 725 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 726 goto out_unlock; 727 728 pgstart = pin.offset / PAGE_SIZE; 729 pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 730 731 switch (cmd) { 732 case ASHMEM_PIN: 733 ret = ashmem_pin(asma, pgstart, pgend); 734 break; 735 case ASHMEM_UNPIN: 736 ret = ashmem_unpin(asma, pgstart, pgend); 737 break; 738 case ASHMEM_GET_PIN_STATUS: 739 ret = ashmem_get_pin_status(asma, pgstart, pgend); 740 break; 741 } 742 743out_unlock: 744 mutex_unlock(&ashmem_mutex); 745 746 return ret; 747} 748 749static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 750{ 751 struct ashmem_area *asma = file->private_data; 752 long ret = -ENOTTY; 753 754 switch (cmd) { 755 case ASHMEM_SET_NAME: 756 ret = set_name(asma, (void __user *)arg); 757 break; 758 case ASHMEM_GET_NAME: 759 ret = get_name(asma, (void __user *)arg); 760 break; 761 case ASHMEM_SET_SIZE: 762 ret = -EINVAL; 763 mutex_lock(&ashmem_mutex); 764 if (!asma->file) { 765 ret = 0; 766 asma->size = (size_t)arg; 767 } 768 mutex_unlock(&ashmem_mutex); 769 break; 770 case ASHMEM_GET_SIZE: 771 ret = asma->size; 772 break; 773 case ASHMEM_SET_PROT_MASK: 774 ret = set_prot_mask(asma, arg); 775 break; 776 case ASHMEM_GET_PROT_MASK: 777 ret = asma->prot_mask; 778 break; 779 case ASHMEM_PIN: 780 case ASHMEM_UNPIN: 781 case ASHMEM_GET_PIN_STATUS: 782 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); 783 break; 784 case ASHMEM_PURGE_ALL_CACHES: 785 ret = -EPERM; 786 if (capable(CAP_SYS_ADMIN)) { 787 struct shrink_control sc = { 788 .gfp_mask = GFP_KERNEL, 789 .nr_to_scan = LONG_MAX, 790 }; 791 ret = ashmem_shrink_count(&ashmem_shrinker, &sc); 792 ashmem_shrink_scan(&ashmem_shrinker, &sc); 793 } 794 break; 795 } 796 797 return ret; 798} 799 800/* support of 32bit userspace on 64bit platforms */ 801#ifdef CONFIG_COMPAT 802static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, 803 unsigned long arg) 804{ 805 switch (cmd) { 806 case COMPAT_ASHMEM_SET_SIZE: 807 cmd = ASHMEM_SET_SIZE; 808 break; 809 case COMPAT_ASHMEM_SET_PROT_MASK: 810 cmd = ASHMEM_SET_PROT_MASK; 811 break; 812 } 813 return ashmem_ioctl(file, cmd, arg); 814} 815#endif 816#ifdef CONFIG_PROC_FS 817static void ashmem_show_fdinfo(struct seq_file *m, struct file *file) 818{ 819 struct ashmem_area *asma = file->private_data; 820 821 mutex_lock(&ashmem_mutex); 822 823 if (asma->file) 824 seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino); 825 826 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 827 seq_printf(m, "name:\t%s\n", 828 asma->name + ASHMEM_NAME_PREFIX_LEN); 829 830 mutex_unlock(&ashmem_mutex); 831} 832#endif 833static const struct file_operations ashmem_fops = { 834 .owner = THIS_MODULE, 835 .open = ashmem_open, 836 .release = ashmem_release, 837 .read_iter = ashmem_read_iter, 838 .llseek = ashmem_llseek, 839 .mmap = ashmem_mmap, 840 .unlocked_ioctl = ashmem_ioctl, 841#ifdef CONFIG_COMPAT 842 .compat_ioctl = compat_ashmem_ioctl, 843#endif 844#ifdef CONFIG_PROC_FS 845 .show_fdinfo = ashmem_show_fdinfo, 846#endif 847}; 848 849static struct miscdevice ashmem_misc = { 850 .minor = MISC_DYNAMIC_MINOR, 851 .name = "ashmem", 852 .fops = &ashmem_fops, 853}; 854 855static int __init ashmem_init(void) 856{ 857 int ret = -ENOMEM; 858 859 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", 860 sizeof(struct ashmem_area), 861 0, 0, NULL); 862 if (unlikely(!ashmem_area_cachep)) { 863 pr_err("failed to create slab cache\n"); 864 goto out; 865 } 866 867 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", 868 sizeof(struct ashmem_range), 869 0, 0, NULL); 870 if (unlikely(!ashmem_range_cachep)) { 871 pr_err("failed to create slab cache\n"); 872 goto out_free1; 873 } 874 875 ret = misc_register(&ashmem_misc); 876 if (unlikely(ret)) { 877 pr_err("failed to register misc device!\n"); 878 goto out_free2; 879 } 880 881 ret = register_shrinker(&ashmem_shrinker); 882 if (ret) { 883 pr_err("failed to register shrinker!\n"); 884 goto out_demisc; 885 } 886 887 pr_info("initialized\n"); 888 889 return 0; 890 891out_demisc: 892 misc_deregister(&ashmem_misc); 893out_free2: 894 kmem_cache_destroy(ashmem_range_cachep); 895out_free1: 896 kmem_cache_destroy(ashmem_area_cachep); 897out: 898 return ret; 899} 900device_initcall(ashmem_init);