at v5.5-rc4 927 lines 24 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* mm/ashmem.c 3 * 4 * Anonymous Shared Memory Subsystem, ashmem 5 * 6 * Copyright (C) 2008 Google, Inc. 7 * 8 * Robert Love <rlove@google.com> 9 */ 10 11#define pr_fmt(fmt) "ashmem: " fmt 12 13#include <linux/init.h> 14#include <linux/export.h> 15#include <linux/file.h> 16#include <linux/fs.h> 17#include <linux/falloc.h> 18#include <linux/miscdevice.h> 19#include <linux/security.h> 20#include <linux/mm.h> 21#include <linux/mman.h> 22#include <linux/uaccess.h> 23#include <linux/personality.h> 24#include <linux/bitops.h> 25#include <linux/mutex.h> 26#include <linux/shmem_fs.h> 27#include "ashmem.h" 28 29#define ASHMEM_NAME_PREFIX "dev/ashmem/" 30#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) 31#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) 32 33/** 34 * struct ashmem_area - The anonymous shared memory area 35 * @name: The optional name in /proc/pid/maps 36 * @unpinned_list: The list of all ashmem areas 37 * @file: The shmem-based backing file 38 * @size: The size of the mapping, in bytes 39 * @prot_mask: The allowed protection bits, as vm_flags 40 * 41 * The lifecycle of this structure is from our parent file's open() until 42 * its release(). It is also protected by 'ashmem_mutex' 43 * 44 * Warning: Mappings do NOT pin this structure; It dies on close() 45 */ 46struct ashmem_area { 47 char name[ASHMEM_FULL_NAME_LEN]; 48 struct list_head unpinned_list; 49 struct file *file; 50 size_t size; 51 unsigned long prot_mask; 52}; 53 54/** 55 * struct ashmem_range - A range of unpinned/evictable pages 56 * @lru: The entry in the LRU list 57 * @unpinned: The entry in its area's unpinned list 58 * @asma: The associated anonymous shared memory area. 59 * @pgstart: The starting page (inclusive) 60 * @pgend: The ending page (inclusive) 61 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) 62 * 63 * The lifecycle of this structure is from unpin to pin. 64 * It is protected by 'ashmem_mutex' 65 */ 66struct ashmem_range { 67 struct list_head lru; 68 struct list_head unpinned; 69 struct ashmem_area *asma; 70 size_t pgstart; 71 size_t pgend; 72 unsigned int purged; 73}; 74 75/* LRU list of unpinned pages, protected by ashmem_mutex */ 76static LIST_HEAD(ashmem_lru_list); 77 78static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0); 79static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait); 80 81/* 82 * long lru_count - The count of pages on our LRU list. 83 * 84 * This is protected by ashmem_mutex. 85 */ 86static unsigned long lru_count; 87 88/* 89 * ashmem_mutex - protects the list of and each individual ashmem_area 90 * 91 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem 92 */ 93static DEFINE_MUTEX(ashmem_mutex); 94 95static struct kmem_cache *ashmem_area_cachep __read_mostly; 96static struct kmem_cache *ashmem_range_cachep __read_mostly; 97 98static inline unsigned long range_size(struct ashmem_range *range) 99{ 100 return range->pgend - range->pgstart + 1; 101} 102 103static inline bool range_on_lru(struct ashmem_range *range) 104{ 105 return range->purged == ASHMEM_NOT_PURGED; 106} 107 108static inline bool page_range_subsumes_range(struct ashmem_range *range, 109 size_t start, size_t end) 110{ 111 return (range->pgstart >= start) && (range->pgend <= end); 112} 113 114static inline bool page_range_subsumed_by_range(struct ashmem_range *range, 115 size_t start, size_t end) 116{ 117 return (range->pgstart <= start) && (range->pgend >= end); 118} 119 120static inline bool page_in_range(struct ashmem_range *range, size_t page) 121{ 122 return (range->pgstart <= page) && (range->pgend >= page); 123} 124 125static inline bool page_range_in_range(struct ashmem_range *range, 126 size_t start, size_t end) 127{ 128 return page_in_range(range, start) || page_in_range(range, end) || 129 page_range_subsumes_range(range, start, end); 130} 131 132static inline bool range_before_page(struct ashmem_range *range, 133 size_t page) 134{ 135 return range->pgend < page; 136} 137 138#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 139 140/** 141 * lru_add() - Adds a range of memory to the LRU list 142 * @range: The memory range being added. 143 * 144 * The range is first added to the end (tail) of the LRU list. 145 * After this, the size of the range is added to @lru_count 146 */ 147static inline void lru_add(struct ashmem_range *range) 148{ 149 list_add_tail(&range->lru, &ashmem_lru_list); 150 lru_count += range_size(range); 151} 152 153/** 154 * lru_del() - Removes a range of memory from the LRU list 155 * @range: The memory range being removed 156 * 157 * The range is first deleted from the LRU list. 158 * After this, the size of the range is removed from @lru_count 159 */ 160static inline void lru_del(struct ashmem_range *range) 161{ 162 list_del(&range->lru); 163 lru_count -= range_size(range); 164} 165 166/** 167 * range_alloc() - Allocates and initializes a new ashmem_range structure 168 * @asma: The associated ashmem_area 169 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list 170 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) 171 * @start: The starting page (inclusive) 172 * @end: The ending page (inclusive) 173 * 174 * This function is protected by ashmem_mutex. 175 */ 176static void range_alloc(struct ashmem_area *asma, 177 struct ashmem_range *prev_range, unsigned int purged, 178 size_t start, size_t end, 179 struct ashmem_range **new_range) 180{ 181 struct ashmem_range *range = *new_range; 182 183 *new_range = NULL; 184 range->asma = asma; 185 range->pgstart = start; 186 range->pgend = end; 187 range->purged = purged; 188 189 list_add_tail(&range->unpinned, &prev_range->unpinned); 190 191 if (range_on_lru(range)) 192 lru_add(range); 193} 194 195/** 196 * range_del() - Deletes and deallocates an ashmem_range structure 197 * @range: The associated ashmem_range that has previously been allocated 198 */ 199static void range_del(struct ashmem_range *range) 200{ 201 list_del(&range->unpinned); 202 if (range_on_lru(range)) 203 lru_del(range); 204 kmem_cache_free(ashmem_range_cachep, range); 205} 206 207/** 208 * range_shrink() - Shrinks an ashmem_range 209 * @range: The associated ashmem_range being shrunk 210 * @start: The starting byte of the new range 211 * @end: The ending byte of the new range 212 * 213 * This does not modify the data inside the existing range in any way - It 214 * simply shrinks the boundaries of the range. 215 * 216 * Theoretically, with a little tweaking, this could eventually be changed 217 * to range_resize, and expand the lru_count if the new range is larger. 218 */ 219static inline void range_shrink(struct ashmem_range *range, 220 size_t start, size_t end) 221{ 222 size_t pre = range_size(range); 223 224 range->pgstart = start; 225 range->pgend = end; 226 227 if (range_on_lru(range)) 228 lru_count -= pre - range_size(range); 229} 230 231/** 232 * ashmem_open() - Opens an Anonymous Shared Memory structure 233 * @inode: The backing file's index node(?) 234 * @file: The backing file 235 * 236 * Please note that the ashmem_area is not returned by this function - It is 237 * instead written to "file->private_data". 238 * 239 * Return: 0 if successful, or another code if unsuccessful. 240 */ 241static int ashmem_open(struct inode *inode, struct file *file) 242{ 243 struct ashmem_area *asma; 244 int ret; 245 246 ret = generic_file_open(inode, file); 247 if (ret) 248 return ret; 249 250 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); 251 if (!asma) 252 return -ENOMEM; 253 254 INIT_LIST_HEAD(&asma->unpinned_list); 255 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); 256 asma->prot_mask = PROT_MASK; 257 file->private_data = asma; 258 259 return 0; 260} 261 262/** 263 * ashmem_release() - Releases an Anonymous Shared Memory structure 264 * @ignored: The backing file's Index Node(?) - It is ignored here. 265 * @file: The backing file 266 * 267 * Return: 0 if successful. If it is anything else, go have a coffee and 268 * try again. 269 */ 270static int ashmem_release(struct inode *ignored, struct file *file) 271{ 272 struct ashmem_area *asma = file->private_data; 273 struct ashmem_range *range, *next; 274 275 mutex_lock(&ashmem_mutex); 276 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) 277 range_del(range); 278 mutex_unlock(&ashmem_mutex); 279 280 if (asma->file) 281 fput(asma->file); 282 kmem_cache_free(ashmem_area_cachep, asma); 283 284 return 0; 285} 286 287static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) 288{ 289 struct ashmem_area *asma = iocb->ki_filp->private_data; 290 int ret = 0; 291 292 mutex_lock(&ashmem_mutex); 293 294 /* If size is not set, or set to 0, always return EOF. */ 295 if (asma->size == 0) 296 goto out_unlock; 297 298 if (!asma->file) { 299 ret = -EBADF; 300 goto out_unlock; 301 } 302 303 /* 304 * asma and asma->file are used outside the lock here. We assume 305 * once asma->file is set it will never be changed, and will not 306 * be destroyed until all references to the file are dropped and 307 * ashmem_release is called. 308 */ 309 mutex_unlock(&ashmem_mutex); 310 ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0); 311 mutex_lock(&ashmem_mutex); 312 if (ret > 0) 313 asma->file->f_pos = iocb->ki_pos; 314out_unlock: 315 mutex_unlock(&ashmem_mutex); 316 return ret; 317} 318 319static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) 320{ 321 struct ashmem_area *asma = file->private_data; 322 loff_t ret; 323 324 mutex_lock(&ashmem_mutex); 325 326 if (asma->size == 0) { 327 mutex_unlock(&ashmem_mutex); 328 return -EINVAL; 329 } 330 331 if (!asma->file) { 332 mutex_unlock(&ashmem_mutex); 333 return -EBADF; 334 } 335 336 mutex_unlock(&ashmem_mutex); 337 338 ret = vfs_llseek(asma->file, offset, origin); 339 if (ret < 0) 340 return ret; 341 342 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 343 file->f_pos = asma->file->f_pos; 344 return ret; 345} 346 347static inline vm_flags_t calc_vm_may_flags(unsigned long prot) 348{ 349 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | 350 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | 351 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); 352} 353 354static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) 355{ 356 struct ashmem_area *asma = file->private_data; 357 int ret = 0; 358 359 mutex_lock(&ashmem_mutex); 360 361 /* user needs to SET_SIZE before mapping */ 362 if (!asma->size) { 363 ret = -EINVAL; 364 goto out; 365 } 366 367 /* requested mapping size larger than object size */ 368 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) { 369 ret = -EINVAL; 370 goto out; 371 } 372 373 /* requested protection bits must match our allowed protection mask */ 374 if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & 375 calc_vm_prot_bits(PROT_MASK, 0)) { 376 ret = -EPERM; 377 goto out; 378 } 379 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); 380 381 if (!asma->file) { 382 char *name = ASHMEM_NAME_DEF; 383 struct file *vmfile; 384 385 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 386 name = asma->name; 387 388 /* ... and allocate the backing shmem file */ 389 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); 390 if (IS_ERR(vmfile)) { 391 ret = PTR_ERR(vmfile); 392 goto out; 393 } 394 vmfile->f_mode |= FMODE_LSEEK; 395 asma->file = vmfile; 396 } 397 get_file(asma->file); 398 399 /* 400 * XXX - Reworked to use shmem_zero_setup() instead of 401 * shmem_set_file while we're in staging. -jstultz 402 */ 403 if (vma->vm_flags & VM_SHARED) { 404 ret = shmem_zero_setup(vma); 405 if (ret) { 406 fput(asma->file); 407 goto out; 408 } 409 } else { 410 vma_set_anonymous(vma); 411 } 412 413 if (vma->vm_file) 414 fput(vma->vm_file); 415 vma->vm_file = asma->file; 416 417out: 418 mutex_unlock(&ashmem_mutex); 419 return ret; 420} 421 422/* 423 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c 424 * 425 * 'nr_to_scan' is the number of objects to scan for freeing. 426 * 427 * 'gfp_mask' is the mask of the allocation that got us into this mess. 428 * 429 * Return value is the number of objects freed or -1 if we cannot 430 * proceed without risk of deadlock (due to gfp_mask). 431 * 432 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial 433 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' 434 * pages freed. 435 */ 436static unsigned long 437ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 438{ 439 unsigned long freed = 0; 440 441 /* We might recurse into filesystem code, so bail out if necessary */ 442 if (!(sc->gfp_mask & __GFP_FS)) 443 return SHRINK_STOP; 444 445 if (!mutex_trylock(&ashmem_mutex)) 446 return -1; 447 448 while (!list_empty(&ashmem_lru_list)) { 449 struct ashmem_range *range = 450 list_first_entry(&ashmem_lru_list, typeof(*range), lru); 451 loff_t start = range->pgstart * PAGE_SIZE; 452 loff_t end = (range->pgend + 1) * PAGE_SIZE; 453 struct file *f = range->asma->file; 454 455 get_file(f); 456 atomic_inc(&ashmem_shrink_inflight); 457 range->purged = ASHMEM_WAS_PURGED; 458 lru_del(range); 459 460 freed += range_size(range); 461 mutex_unlock(&ashmem_mutex); 462 f->f_op->fallocate(f, 463 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 464 start, end - start); 465 fput(f); 466 if (atomic_dec_and_test(&ashmem_shrink_inflight)) 467 wake_up_all(&ashmem_shrink_wait); 468 if (!mutex_trylock(&ashmem_mutex)) 469 goto out; 470 if (--sc->nr_to_scan <= 0) 471 break; 472 } 473 mutex_unlock(&ashmem_mutex); 474out: 475 return freed; 476} 477 478static unsigned long 479ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 480{ 481 /* 482 * note that lru_count is count of pages on the lru, not a count of 483 * objects on the list. This means the scan function needs to return the 484 * number of pages freed, not the number of objects scanned. 485 */ 486 return lru_count; 487} 488 489static struct shrinker ashmem_shrinker = { 490 .count_objects = ashmem_shrink_count, 491 .scan_objects = ashmem_shrink_scan, 492 /* 493 * XXX (dchinner): I wish people would comment on why they need on 494 * significant changes to the default value here 495 */ 496 .seeks = DEFAULT_SEEKS * 4, 497}; 498 499static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) 500{ 501 int ret = 0; 502 503 mutex_lock(&ashmem_mutex); 504 505 /* the user can only remove, not add, protection bits */ 506 if ((asma->prot_mask & prot) != prot) { 507 ret = -EINVAL; 508 goto out; 509 } 510 511 /* does the application expect PROT_READ to imply PROT_EXEC? */ 512 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 513 prot |= PROT_EXEC; 514 515 asma->prot_mask = prot; 516 517out: 518 mutex_unlock(&ashmem_mutex); 519 return ret; 520} 521 522static int set_name(struct ashmem_area *asma, void __user *name) 523{ 524 int len; 525 int ret = 0; 526 char local_name[ASHMEM_NAME_LEN]; 527 528 /* 529 * Holding the ashmem_mutex while doing a copy_from_user might cause 530 * an data abort which would try to access mmap_sem. If another 531 * thread has invoked ashmem_mmap then it will be holding the 532 * semaphore and will be waiting for ashmem_mutex, there by leading to 533 * deadlock. We'll release the mutex and take the name to a local 534 * variable that does not need protection and later copy the local 535 * variable to the structure member with lock held. 536 */ 537 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); 538 if (len < 0) 539 return len; 540 if (len == ASHMEM_NAME_LEN) 541 local_name[ASHMEM_NAME_LEN - 1] = '\0'; 542 mutex_lock(&ashmem_mutex); 543 /* cannot change an existing mapping's name */ 544 if (asma->file) 545 ret = -EINVAL; 546 else 547 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); 548 549 mutex_unlock(&ashmem_mutex); 550 return ret; 551} 552 553static int get_name(struct ashmem_area *asma, void __user *name) 554{ 555 int ret = 0; 556 size_t len; 557 /* 558 * Have a local variable to which we'll copy the content 559 * from asma with the lock held. Later we can copy this to the user 560 * space safely without holding any locks. So even if we proceed to 561 * wait for mmap_sem, it won't lead to deadlock. 562 */ 563 char local_name[ASHMEM_NAME_LEN]; 564 565 mutex_lock(&ashmem_mutex); 566 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { 567 /* 568 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes 569 * prevents us from revealing one user's stack to another. 570 */ 571 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; 572 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); 573 } else { 574 len = sizeof(ASHMEM_NAME_DEF); 575 memcpy(local_name, ASHMEM_NAME_DEF, len); 576 } 577 mutex_unlock(&ashmem_mutex); 578 579 /* 580 * Now we are just copying from the stack variable to userland 581 * No lock held 582 */ 583 if (copy_to_user(name, local_name, len)) 584 ret = -EFAULT; 585 return ret; 586} 587 588/* 589 * ashmem_pin - pin the given ashmem region, returning whether it was 590 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). 591 * 592 * Caller must hold ashmem_mutex. 593 */ 594static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend, 595 struct ashmem_range **new_range) 596{ 597 struct ashmem_range *range, *next; 598 int ret = ASHMEM_NOT_PURGED; 599 600 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 601 /* moved past last applicable page; we can short circuit */ 602 if (range_before_page(range, pgstart)) 603 break; 604 605 /* 606 * The user can ask us to pin pages that span multiple ranges, 607 * or to pin pages that aren't even unpinned, so this is messy. 608 * 609 * Four cases: 610 * 1. The requested range subsumes an existing range, so we 611 * just remove the entire matching range. 612 * 2. The requested range overlaps the start of an existing 613 * range, so we just update that range. 614 * 3. The requested range overlaps the end of an existing 615 * range, so we just update that range. 616 * 4. The requested range punches a hole in an existing range, 617 * so we have to update one side of the range and then 618 * create a new range for the other side. 619 */ 620 if (page_range_in_range(range, pgstart, pgend)) { 621 ret |= range->purged; 622 623 /* Case #1: Easy. Just nuke the whole thing. */ 624 if (page_range_subsumes_range(range, pgstart, pgend)) { 625 range_del(range); 626 continue; 627 } 628 629 /* Case #2: We overlap from the start, so adjust it */ 630 if (range->pgstart >= pgstart) { 631 range_shrink(range, pgend + 1, range->pgend); 632 continue; 633 } 634 635 /* Case #3: We overlap from the rear, so adjust it */ 636 if (range->pgend <= pgend) { 637 range_shrink(range, range->pgstart, 638 pgstart - 1); 639 continue; 640 } 641 642 /* 643 * Case #4: We eat a chunk out of the middle. A bit 644 * more complicated, we allocate a new range for the 645 * second half and adjust the first chunk's endpoint. 646 */ 647 range_alloc(asma, range, range->purged, 648 pgend + 1, range->pgend, new_range); 649 range_shrink(range, range->pgstart, pgstart - 1); 650 break; 651 } 652 } 653 654 return ret; 655} 656 657/* 658 * ashmem_unpin - unpin the given range of pages. Returns zero on success. 659 * 660 * Caller must hold ashmem_mutex. 661 */ 662static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend, 663 struct ashmem_range **new_range) 664{ 665 struct ashmem_range *range, *next; 666 unsigned int purged = ASHMEM_NOT_PURGED; 667 668restart: 669 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 670 /* short circuit: this is our insertion point */ 671 if (range_before_page(range, pgstart)) 672 break; 673 674 /* 675 * The user can ask us to unpin pages that are already entirely 676 * or partially pinned. We handle those two cases here. 677 */ 678 if (page_range_subsumed_by_range(range, pgstart, pgend)) 679 return 0; 680 if (page_range_in_range(range, pgstart, pgend)) { 681 pgstart = min(range->pgstart, pgstart); 682 pgend = max(range->pgend, pgend); 683 purged |= range->purged; 684 range_del(range); 685 goto restart; 686 } 687 } 688 689 range_alloc(asma, range, purged, pgstart, pgend, new_range); 690 return 0; 691} 692 693/* 694 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the 695 * given interval are unpinned and ASHMEM_IS_PINNED otherwise. 696 * 697 * Caller must hold ashmem_mutex. 698 */ 699static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, 700 size_t pgend) 701{ 702 struct ashmem_range *range; 703 int ret = ASHMEM_IS_PINNED; 704 705 list_for_each_entry(range, &asma->unpinned_list, unpinned) { 706 if (range_before_page(range, pgstart)) 707 break; 708 if (page_range_in_range(range, pgstart, pgend)) { 709 ret = ASHMEM_IS_UNPINNED; 710 break; 711 } 712 } 713 714 return ret; 715} 716 717static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, 718 void __user *p) 719{ 720 struct ashmem_pin pin; 721 size_t pgstart, pgend; 722 int ret = -EINVAL; 723 struct ashmem_range *range = NULL; 724 725 if (copy_from_user(&pin, p, sizeof(pin))) 726 return -EFAULT; 727 728 if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) { 729 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); 730 if (!range) 731 return -ENOMEM; 732 } 733 734 mutex_lock(&ashmem_mutex); 735 wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight)); 736 737 if (!asma->file) 738 goto out_unlock; 739 740 /* per custom, you can pass zero for len to mean "everything onward" */ 741 if (!pin.len) 742 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 743 744 if ((pin.offset | pin.len) & ~PAGE_MASK) 745 goto out_unlock; 746 747 if (((__u32)-1) - pin.offset < pin.len) 748 goto out_unlock; 749 750 if (PAGE_ALIGN(asma->size) < pin.offset + pin.len) 751 goto out_unlock; 752 753 pgstart = pin.offset / PAGE_SIZE; 754 pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 755 756 switch (cmd) { 757 case ASHMEM_PIN: 758 ret = ashmem_pin(asma, pgstart, pgend, &range); 759 break; 760 case ASHMEM_UNPIN: 761 ret = ashmem_unpin(asma, pgstart, pgend, &range); 762 break; 763 case ASHMEM_GET_PIN_STATUS: 764 ret = ashmem_get_pin_status(asma, pgstart, pgend); 765 break; 766 } 767 768out_unlock: 769 mutex_unlock(&ashmem_mutex); 770 if (range) 771 kmem_cache_free(ashmem_range_cachep, range); 772 773 return ret; 774} 775 776static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 777{ 778 struct ashmem_area *asma = file->private_data; 779 long ret = -ENOTTY; 780 781 switch (cmd) { 782 case ASHMEM_SET_NAME: 783 ret = set_name(asma, (void __user *)arg); 784 break; 785 case ASHMEM_GET_NAME: 786 ret = get_name(asma, (void __user *)arg); 787 break; 788 case ASHMEM_SET_SIZE: 789 ret = -EINVAL; 790 mutex_lock(&ashmem_mutex); 791 if (!asma->file) { 792 ret = 0; 793 asma->size = (size_t)arg; 794 } 795 mutex_unlock(&ashmem_mutex); 796 break; 797 case ASHMEM_GET_SIZE: 798 ret = asma->size; 799 break; 800 case ASHMEM_SET_PROT_MASK: 801 ret = set_prot_mask(asma, arg); 802 break; 803 case ASHMEM_GET_PROT_MASK: 804 ret = asma->prot_mask; 805 break; 806 case ASHMEM_PIN: 807 case ASHMEM_UNPIN: 808 case ASHMEM_GET_PIN_STATUS: 809 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); 810 break; 811 case ASHMEM_PURGE_ALL_CACHES: 812 ret = -EPERM; 813 if (capable(CAP_SYS_ADMIN)) { 814 struct shrink_control sc = { 815 .gfp_mask = GFP_KERNEL, 816 .nr_to_scan = LONG_MAX, 817 }; 818 ret = ashmem_shrink_count(&ashmem_shrinker, &sc); 819 ashmem_shrink_scan(&ashmem_shrinker, &sc); 820 } 821 break; 822 } 823 824 return ret; 825} 826 827/* support of 32bit userspace on 64bit platforms */ 828#ifdef CONFIG_COMPAT 829static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, 830 unsigned long arg) 831{ 832 switch (cmd) { 833 case COMPAT_ASHMEM_SET_SIZE: 834 cmd = ASHMEM_SET_SIZE; 835 break; 836 case COMPAT_ASHMEM_SET_PROT_MASK: 837 cmd = ASHMEM_SET_PROT_MASK; 838 break; 839 } 840 return ashmem_ioctl(file, cmd, arg); 841} 842#endif 843#ifdef CONFIG_PROC_FS 844static void ashmem_show_fdinfo(struct seq_file *m, struct file *file) 845{ 846 struct ashmem_area *asma = file->private_data; 847 848 mutex_lock(&ashmem_mutex); 849 850 if (asma->file) 851 seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino); 852 853 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 854 seq_printf(m, "name:\t%s\n", 855 asma->name + ASHMEM_NAME_PREFIX_LEN); 856 857 mutex_unlock(&ashmem_mutex); 858} 859#endif 860static const struct file_operations ashmem_fops = { 861 .owner = THIS_MODULE, 862 .open = ashmem_open, 863 .release = ashmem_release, 864 .read_iter = ashmem_read_iter, 865 .llseek = ashmem_llseek, 866 .mmap = ashmem_mmap, 867 .unlocked_ioctl = ashmem_ioctl, 868#ifdef CONFIG_COMPAT 869 .compat_ioctl = compat_ashmem_ioctl, 870#endif 871#ifdef CONFIG_PROC_FS 872 .show_fdinfo = ashmem_show_fdinfo, 873#endif 874}; 875 876static struct miscdevice ashmem_misc = { 877 .minor = MISC_DYNAMIC_MINOR, 878 .name = "ashmem", 879 .fops = &ashmem_fops, 880}; 881 882static int __init ashmem_init(void) 883{ 884 int ret = -ENOMEM; 885 886 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", 887 sizeof(struct ashmem_area), 888 0, 0, NULL); 889 if (!ashmem_area_cachep) { 890 pr_err("failed to create slab cache\n"); 891 goto out; 892 } 893 894 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", 895 sizeof(struct ashmem_range), 896 0, 0, NULL); 897 if (!ashmem_range_cachep) { 898 pr_err("failed to create slab cache\n"); 899 goto out_free1; 900 } 901 902 ret = misc_register(&ashmem_misc); 903 if (ret) { 904 pr_err("failed to register misc device!\n"); 905 goto out_free2; 906 } 907 908 ret = register_shrinker(&ashmem_shrinker); 909 if (ret) { 910 pr_err("failed to register shrinker!\n"); 911 goto out_demisc; 912 } 913 914 pr_info("initialized\n"); 915 916 return 0; 917 918out_demisc: 919 misc_deregister(&ashmem_misc); 920out_free2: 921 kmem_cache_destroy(ashmem_range_cachep); 922out_free1: 923 kmem_cache_destroy(ashmem_area_cachep); 924out: 925 return ret; 926} 927device_initcall(ashmem_init);