Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.18-rc5 967 lines 25 kB view raw
1/* Storage object read/write 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public Licence 8 * as published by the Free Software Foundation; either version 9 * 2 of the Licence, or (at your option) any later version. 10 */ 11 12#include <linux/mount.h> 13#include <linux/slab.h> 14#include <linux/file.h> 15#include <linux/swap.h> 16#include "internal.h" 17 18/* 19 * detect wake up events generated by the unlocking of pages in which we're 20 * interested 21 * - we use this to detect read completion of backing pages 22 * - the caller holds the waitqueue lock 23 */ 24static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, 25 int sync, void *_key) 26{ 27 struct cachefiles_one_read *monitor = 28 container_of(wait, struct cachefiles_one_read, monitor); 29 struct cachefiles_object *object; 30 struct wait_bit_key *key = _key; 31 struct page *page = wait->private; 32 33 ASSERT(key); 34 35 _enter("{%lu},%u,%d,{%p,%u}", 36 monitor->netfs_page->index, mode, sync, 37 key->flags, key->bit_nr); 38 39 if (key->flags != &page->flags || 40 key->bit_nr != PG_locked) 41 return 0; 42 43 _debug("--- monitor %p %lx ---", page, page->flags); 44 45 if (!PageUptodate(page) && !PageError(page)) { 46 /* unlocked, not uptodate and not erronous? */ 47 _debug("page probably truncated"); 48 } 49 50 /* remove from the waitqueue */ 51 list_del(&wait->entry); 52 53 /* move onto the action list and queue for FS-Cache thread pool */ 54 ASSERT(monitor->op); 55 56 object = container_of(monitor->op->op.object, 57 struct cachefiles_object, fscache); 58 59 spin_lock(&object->work_lock); 60 list_add_tail(&monitor->op_link, &monitor->op->to_do); 61 spin_unlock(&object->work_lock); 62 63 fscache_enqueue_retrieval(monitor->op); 64 return 0; 65} 66 67/* 68 * handle a probably truncated page 69 * - check to see if the page is still relevant and reissue the read if 70 * possible 71 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we 72 * must wait again and 0 if successful 73 */ 74static int cachefiles_read_reissue(struct cachefiles_object *object, 75 struct cachefiles_one_read *monitor) 76{ 77 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; 78 struct page *backpage = monitor->back_page, *backpage2; 79 int ret; 80 81 _enter("{ino=%lx},{%lx,%lx}", 82 d_backing_inode(object->backer)->i_ino, 83 backpage->index, backpage->flags); 84 85 /* skip if the page was truncated away completely */ 86 if (backpage->mapping != bmapping) { 87 _leave(" = -ENODATA [mapping]"); 88 return -ENODATA; 89 } 90 91 backpage2 = find_get_page(bmapping, backpage->index); 92 if (!backpage2) { 93 _leave(" = -ENODATA [gone]"); 94 return -ENODATA; 95 } 96 97 if (backpage != backpage2) { 98 put_page(backpage2); 99 _leave(" = -ENODATA [different]"); 100 return -ENODATA; 101 } 102 103 /* the page is still there and we already have a ref on it, so we don't 104 * need a second */ 105 put_page(backpage2); 106 107 INIT_LIST_HEAD(&monitor->op_link); 108 add_page_wait_queue(backpage, &monitor->monitor); 109 110 if (trylock_page(backpage)) { 111 ret = -EIO; 112 if (PageError(backpage)) 113 goto unlock_discard; 114 ret = 0; 115 if (PageUptodate(backpage)) 116 goto unlock_discard; 117 118 _debug("reissue read"); 119 ret = bmapping->a_ops->readpage(NULL, backpage); 120 if (ret < 0) 121 goto unlock_discard; 122 } 123 124 /* but the page may have been read before the monitor was installed, so 125 * the monitor may miss the event - so we have to ensure that we do get 126 * one in such a case */ 127 if (trylock_page(backpage)) { 128 _debug("jumpstart %p {%lx}", backpage, backpage->flags); 129 unlock_page(backpage); 130 } 131 132 /* it'll reappear on the todo list */ 133 _leave(" = -EINPROGRESS"); 134 return -EINPROGRESS; 135 136unlock_discard: 137 unlock_page(backpage); 138 spin_lock_irq(&object->work_lock); 139 list_del(&monitor->op_link); 140 spin_unlock_irq(&object->work_lock); 141 _leave(" = %d", ret); 142 return ret; 143} 144 145/* 146 * copy data from backing pages to netfs pages to complete a read operation 147 * - driven by FS-Cache's thread pool 148 */ 149static void cachefiles_read_copier(struct fscache_operation *_op) 150{ 151 struct cachefiles_one_read *monitor; 152 struct cachefiles_object *object; 153 struct fscache_retrieval *op; 154 int error, max; 155 156 op = container_of(_op, struct fscache_retrieval, op); 157 object = container_of(op->op.object, 158 struct cachefiles_object, fscache); 159 160 _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino); 161 162 max = 8; 163 spin_lock_irq(&object->work_lock); 164 165 while (!list_empty(&op->to_do)) { 166 monitor = list_entry(op->to_do.next, 167 struct cachefiles_one_read, op_link); 168 list_del(&monitor->op_link); 169 170 spin_unlock_irq(&object->work_lock); 171 172 _debug("- copy {%lu}", monitor->back_page->index); 173 174 recheck: 175 if (test_bit(FSCACHE_COOKIE_INVALIDATING, 176 &object->fscache.cookie->flags)) { 177 error = -ESTALE; 178 } else if (PageUptodate(monitor->back_page)) { 179 copy_highpage(monitor->netfs_page, monitor->back_page); 180 fscache_mark_page_cached(monitor->op, 181 monitor->netfs_page); 182 error = 0; 183 } else if (!PageError(monitor->back_page)) { 184 /* the page has probably been truncated */ 185 error = cachefiles_read_reissue(object, monitor); 186 if (error == -EINPROGRESS) 187 goto next; 188 goto recheck; 189 } else { 190 cachefiles_io_error_obj( 191 object, 192 "Readpage failed on backing file %lx", 193 (unsigned long) monitor->back_page->flags); 194 error = -EIO; 195 } 196 197 put_page(monitor->back_page); 198 199 fscache_end_io(op, monitor->netfs_page, error); 200 put_page(monitor->netfs_page); 201 fscache_retrieval_complete(op, 1); 202 fscache_put_retrieval(op); 203 kfree(monitor); 204 205 next: 206 /* let the thread pool have some air occasionally */ 207 max--; 208 if (max < 0 || need_resched()) { 209 if (!list_empty(&op->to_do)) 210 fscache_enqueue_retrieval(op); 211 _leave(" [maxed out]"); 212 return; 213 } 214 215 spin_lock_irq(&object->work_lock); 216 } 217 218 spin_unlock_irq(&object->work_lock); 219 _leave(""); 220} 221 222/* 223 * read the corresponding page to the given set from the backing file 224 * - an uncertain page is simply discarded, to be tried again another time 225 */ 226static int cachefiles_read_backing_file_one(struct cachefiles_object *object, 227 struct fscache_retrieval *op, 228 struct page *netpage) 229{ 230 struct cachefiles_one_read *monitor; 231 struct address_space *bmapping; 232 struct page *newpage, *backpage; 233 int ret; 234 235 _enter(""); 236 237 _debug("read back %p{%lu,%d}", 238 netpage, netpage->index, page_count(netpage)); 239 240 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); 241 if (!monitor) 242 goto nomem; 243 244 monitor->netfs_page = netpage; 245 monitor->op = fscache_get_retrieval(op); 246 247 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter); 248 249 /* attempt to get hold of the backing page */ 250 bmapping = d_backing_inode(object->backer)->i_mapping; 251 newpage = NULL; 252 253 for (;;) { 254 backpage = find_get_page(bmapping, netpage->index); 255 if (backpage) 256 goto backing_page_already_present; 257 258 if (!newpage) { 259 newpage = __page_cache_alloc(cachefiles_gfp); 260 if (!newpage) 261 goto nomem_monitor; 262 } 263 264 ret = add_to_page_cache_lru(newpage, bmapping, 265 netpage->index, cachefiles_gfp); 266 if (ret == 0) 267 goto installed_new_backing_page; 268 if (ret != -EEXIST) 269 goto nomem_page; 270 } 271 272 /* we've installed a new backing page, so now we need to start 273 * it reading */ 274installed_new_backing_page: 275 _debug("- new %p", newpage); 276 277 backpage = newpage; 278 newpage = NULL; 279 280read_backing_page: 281 ret = bmapping->a_ops->readpage(NULL, backpage); 282 if (ret < 0) 283 goto read_error; 284 285 /* set the monitor to transfer the data across */ 286monitor_backing_page: 287 _debug("- monitor add"); 288 289 /* install the monitor */ 290 get_page(monitor->netfs_page); 291 get_page(backpage); 292 monitor->back_page = backpage; 293 monitor->monitor.private = backpage; 294 add_page_wait_queue(backpage, &monitor->monitor); 295 monitor = NULL; 296 297 /* but the page may have been read before the monitor was installed, so 298 * the monitor may miss the event - so we have to ensure that we do get 299 * one in such a case */ 300 if (trylock_page(backpage)) { 301 _debug("jumpstart %p {%lx}", backpage, backpage->flags); 302 unlock_page(backpage); 303 } 304 goto success; 305 306 /* if the backing page is already present, it can be in one of 307 * three states: read in progress, read failed or read okay */ 308backing_page_already_present: 309 _debug("- present"); 310 311 if (newpage) { 312 put_page(newpage); 313 newpage = NULL; 314 } 315 316 if (PageError(backpage)) 317 goto io_error; 318 319 if (PageUptodate(backpage)) 320 goto backing_page_already_uptodate; 321 322 if (!trylock_page(backpage)) 323 goto monitor_backing_page; 324 _debug("read %p {%lx}", backpage, backpage->flags); 325 goto read_backing_page; 326 327 /* the backing page is already up to date, attach the netfs 328 * page to the pagecache and LRU and copy the data across */ 329backing_page_already_uptodate: 330 _debug("- uptodate"); 331 332 fscache_mark_page_cached(op, netpage); 333 334 copy_highpage(netpage, backpage); 335 fscache_end_io(op, netpage, 0); 336 fscache_retrieval_complete(op, 1); 337 338success: 339 _debug("success"); 340 ret = 0; 341 342out: 343 if (backpage) 344 put_page(backpage); 345 if (monitor) { 346 fscache_put_retrieval(monitor->op); 347 kfree(monitor); 348 } 349 _leave(" = %d", ret); 350 return ret; 351 352read_error: 353 _debug("read error %d", ret); 354 if (ret == -ENOMEM) { 355 fscache_retrieval_complete(op, 1); 356 goto out; 357 } 358io_error: 359 cachefiles_io_error_obj(object, "Page read error on backing file"); 360 fscache_retrieval_complete(op, 1); 361 ret = -ENOBUFS; 362 goto out; 363 364nomem_page: 365 put_page(newpage); 366nomem_monitor: 367 fscache_put_retrieval(monitor->op); 368 kfree(monitor); 369nomem: 370 fscache_retrieval_complete(op, 1); 371 _leave(" = -ENOMEM"); 372 return -ENOMEM; 373} 374 375/* 376 * read a page from the cache or allocate a block in which to store it 377 * - cache withdrawal is prevented by the caller 378 * - returns -EINTR if interrupted 379 * - returns -ENOMEM if ran out of memory 380 * - returns -ENOBUFS if no buffers can be made available 381 * - returns -ENOBUFS if page is beyond EOF 382 * - if the page is backed by a block in the cache: 383 * - a read will be started which will call the callback on completion 384 * - 0 will be returned 385 * - else if the page is unbacked: 386 * - the metadata will be retained 387 * - -ENODATA will be returned 388 */ 389int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, 390 struct page *page, 391 gfp_t gfp) 392{ 393 struct cachefiles_object *object; 394 struct cachefiles_cache *cache; 395 struct inode *inode; 396 sector_t block0, block; 397 unsigned shift; 398 int ret; 399 400 object = container_of(op->op.object, 401 struct cachefiles_object, fscache); 402 cache = container_of(object->fscache.cache, 403 struct cachefiles_cache, cache); 404 405 _enter("{%p},{%lx},,,", object, page->index); 406 407 if (!object->backer) 408 goto enobufs; 409 410 inode = d_backing_inode(object->backer); 411 ASSERT(S_ISREG(inode->i_mode)); 412 ASSERT(inode->i_mapping->a_ops->bmap); 413 ASSERT(inode->i_mapping->a_ops->readpages); 414 415 /* calculate the shift required to use bmap */ 416 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 417 418 op->op.flags &= FSCACHE_OP_KEEP_FLAGS; 419 op->op.flags |= FSCACHE_OP_ASYNC; 420 op->op.processor = cachefiles_read_copier; 421 422 /* we assume the absence or presence of the first block is a good 423 * enough indication for the page as a whole 424 * - TODO: don't use bmap() for this as it is _not_ actually good 425 * enough for this as it doesn't indicate errors, but it's all we've 426 * got for the moment 427 */ 428 block0 = page->index; 429 block0 <<= shift; 430 431 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0); 432 _debug("%llx -> %llx", 433 (unsigned long long) block0, 434 (unsigned long long) block); 435 436 if (block) { 437 /* submit the apparently valid page to the backing fs to be 438 * read from disk */ 439 ret = cachefiles_read_backing_file_one(object, op, page); 440 } else if (cachefiles_has_space(cache, 0, 1) == 0) { 441 /* there's space in the cache we can use */ 442 fscache_mark_page_cached(op, page); 443 fscache_retrieval_complete(op, 1); 444 ret = -ENODATA; 445 } else { 446 goto enobufs; 447 } 448 449 _leave(" = %d", ret); 450 return ret; 451 452enobufs: 453 fscache_retrieval_complete(op, 1); 454 _leave(" = -ENOBUFS"); 455 return -ENOBUFS; 456} 457 458/* 459 * read the corresponding pages to the given set from the backing file 460 * - any uncertain pages are simply discarded, to be tried again another time 461 */ 462static int cachefiles_read_backing_file(struct cachefiles_object *object, 463 struct fscache_retrieval *op, 464 struct list_head *list) 465{ 466 struct cachefiles_one_read *monitor = NULL; 467 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; 468 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; 469 int ret = 0; 470 471 _enter(""); 472 473 list_for_each_entry_safe(netpage, _n, list, lru) { 474 list_del(&netpage->lru); 475 476 _debug("read back %p{%lu,%d}", 477 netpage, netpage->index, page_count(netpage)); 478 479 if (!monitor) { 480 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); 481 if (!monitor) 482 goto nomem; 483 484 monitor->op = fscache_get_retrieval(op); 485 init_waitqueue_func_entry(&monitor->monitor, 486 cachefiles_read_waiter); 487 } 488 489 for (;;) { 490 backpage = find_get_page(bmapping, netpage->index); 491 if (backpage) 492 goto backing_page_already_present; 493 494 if (!newpage) { 495 newpage = __page_cache_alloc(cachefiles_gfp); 496 if (!newpage) 497 goto nomem; 498 } 499 500 ret = add_to_page_cache_lru(newpage, bmapping, 501 netpage->index, 502 cachefiles_gfp); 503 if (ret == 0) 504 goto installed_new_backing_page; 505 if (ret != -EEXIST) 506 goto nomem; 507 } 508 509 /* we've installed a new backing page, so now we need 510 * to start it reading */ 511 installed_new_backing_page: 512 _debug("- new %p", newpage); 513 514 backpage = newpage; 515 newpage = NULL; 516 517 reread_backing_page: 518 ret = bmapping->a_ops->readpage(NULL, backpage); 519 if (ret < 0) 520 goto read_error; 521 522 /* add the netfs page to the pagecache and LRU, and set the 523 * monitor to transfer the data across */ 524 monitor_backing_page: 525 _debug("- monitor add"); 526 527 ret = add_to_page_cache_lru(netpage, op->mapping, 528 netpage->index, cachefiles_gfp); 529 if (ret < 0) { 530 if (ret == -EEXIST) { 531 put_page(netpage); 532 fscache_retrieval_complete(op, 1); 533 continue; 534 } 535 goto nomem; 536 } 537 538 /* install a monitor */ 539 get_page(netpage); 540 monitor->netfs_page = netpage; 541 542 get_page(backpage); 543 monitor->back_page = backpage; 544 monitor->monitor.private = backpage; 545 add_page_wait_queue(backpage, &monitor->monitor); 546 monitor = NULL; 547 548 /* but the page may have been read before the monitor was 549 * installed, so the monitor may miss the event - so we have to 550 * ensure that we do get one in such a case */ 551 if (trylock_page(backpage)) { 552 _debug("2unlock %p {%lx}", backpage, backpage->flags); 553 unlock_page(backpage); 554 } 555 556 put_page(backpage); 557 backpage = NULL; 558 559 put_page(netpage); 560 netpage = NULL; 561 continue; 562 563 /* if the backing page is already present, it can be in one of 564 * three states: read in progress, read failed or read okay */ 565 backing_page_already_present: 566 _debug("- present %p", backpage); 567 568 if (PageError(backpage)) 569 goto io_error; 570 571 if (PageUptodate(backpage)) 572 goto backing_page_already_uptodate; 573 574 _debug("- not ready %p{%lx}", backpage, backpage->flags); 575 576 if (!trylock_page(backpage)) 577 goto monitor_backing_page; 578 579 if (PageError(backpage)) { 580 _debug("error %lx", backpage->flags); 581 unlock_page(backpage); 582 goto io_error; 583 } 584 585 if (PageUptodate(backpage)) 586 goto backing_page_already_uptodate_unlock; 587 588 /* we've locked a page that's neither up to date nor erroneous, 589 * so we need to attempt to read it again */ 590 goto reread_backing_page; 591 592 /* the backing page is already up to date, attach the netfs 593 * page to the pagecache and LRU and copy the data across */ 594 backing_page_already_uptodate_unlock: 595 _debug("uptodate %lx", backpage->flags); 596 unlock_page(backpage); 597 backing_page_already_uptodate: 598 _debug("- uptodate"); 599 600 ret = add_to_page_cache_lru(netpage, op->mapping, 601 netpage->index, cachefiles_gfp); 602 if (ret < 0) { 603 if (ret == -EEXIST) { 604 put_page(netpage); 605 fscache_retrieval_complete(op, 1); 606 continue; 607 } 608 goto nomem; 609 } 610 611 copy_highpage(netpage, backpage); 612 613 put_page(backpage); 614 backpage = NULL; 615 616 fscache_mark_page_cached(op, netpage); 617 618 /* the netpage is unlocked and marked up to date here */ 619 fscache_end_io(op, netpage, 0); 620 put_page(netpage); 621 netpage = NULL; 622 fscache_retrieval_complete(op, 1); 623 continue; 624 } 625 626 netpage = NULL; 627 628 _debug("out"); 629 630out: 631 /* tidy up */ 632 if (newpage) 633 put_page(newpage); 634 if (netpage) 635 put_page(netpage); 636 if (backpage) 637 put_page(backpage); 638 if (monitor) { 639 fscache_put_retrieval(op); 640 kfree(monitor); 641 } 642 643 list_for_each_entry_safe(netpage, _n, list, lru) { 644 list_del(&netpage->lru); 645 put_page(netpage); 646 fscache_retrieval_complete(op, 1); 647 } 648 649 _leave(" = %d", ret); 650 return ret; 651 652nomem: 653 _debug("nomem"); 654 ret = -ENOMEM; 655 goto record_page_complete; 656 657read_error: 658 _debug("read error %d", ret); 659 if (ret == -ENOMEM) 660 goto record_page_complete; 661io_error: 662 cachefiles_io_error_obj(object, "Page read error on backing file"); 663 ret = -ENOBUFS; 664record_page_complete: 665 fscache_retrieval_complete(op, 1); 666 goto out; 667} 668 669/* 670 * read a list of pages from the cache or allocate blocks in which to store 671 * them 672 */ 673int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, 674 struct list_head *pages, 675 unsigned *nr_pages, 676 gfp_t gfp) 677{ 678 struct cachefiles_object *object; 679 struct cachefiles_cache *cache; 680 struct list_head backpages; 681 struct pagevec pagevec; 682 struct inode *inode; 683 struct page *page, *_n; 684 unsigned shift, nrbackpages; 685 int ret, ret2, space; 686 687 object = container_of(op->op.object, 688 struct cachefiles_object, fscache); 689 cache = container_of(object->fscache.cache, 690 struct cachefiles_cache, cache); 691 692 _enter("{OBJ%x,%d},,%d,,", 693 object->fscache.debug_id, atomic_read(&op->op.usage), 694 *nr_pages); 695 696 if (!object->backer) 697 goto all_enobufs; 698 699 space = 1; 700 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) 701 space = 0; 702 703 inode = d_backing_inode(object->backer); 704 ASSERT(S_ISREG(inode->i_mode)); 705 ASSERT(inode->i_mapping->a_ops->bmap); 706 ASSERT(inode->i_mapping->a_ops->readpages); 707 708 /* calculate the shift required to use bmap */ 709 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 710 711 pagevec_init(&pagevec); 712 713 op->op.flags &= FSCACHE_OP_KEEP_FLAGS; 714 op->op.flags |= FSCACHE_OP_ASYNC; 715 op->op.processor = cachefiles_read_copier; 716 717 INIT_LIST_HEAD(&backpages); 718 nrbackpages = 0; 719 720 ret = space ? -ENODATA : -ENOBUFS; 721 list_for_each_entry_safe(page, _n, pages, lru) { 722 sector_t block0, block; 723 724 /* we assume the absence or presence of the first block is a 725 * good enough indication for the page as a whole 726 * - TODO: don't use bmap() for this as it is _not_ actually 727 * good enough for this as it doesn't indicate errors, but 728 * it's all we've got for the moment 729 */ 730 block0 = page->index; 731 block0 <<= shift; 732 733 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, 734 block0); 735 _debug("%llx -> %llx", 736 (unsigned long long) block0, 737 (unsigned long long) block); 738 739 if (block) { 740 /* we have data - add it to the list to give to the 741 * backing fs */ 742 list_move(&page->lru, &backpages); 743 (*nr_pages)--; 744 nrbackpages++; 745 } else if (space && pagevec_add(&pagevec, page) == 0) { 746 fscache_mark_pages_cached(op, &pagevec); 747 fscache_retrieval_complete(op, 1); 748 ret = -ENODATA; 749 } else { 750 fscache_retrieval_complete(op, 1); 751 } 752 } 753 754 if (pagevec_count(&pagevec) > 0) 755 fscache_mark_pages_cached(op, &pagevec); 756 757 if (list_empty(pages)) 758 ret = 0; 759 760 /* submit the apparently valid pages to the backing fs to be read from 761 * disk */ 762 if (nrbackpages > 0) { 763 ret2 = cachefiles_read_backing_file(object, op, &backpages); 764 if (ret2 == -ENOMEM || ret2 == -EINTR) 765 ret = ret2; 766 } 767 768 _leave(" = %d [nr=%u%s]", 769 ret, *nr_pages, list_empty(pages) ? " empty" : ""); 770 return ret; 771 772all_enobufs: 773 fscache_retrieval_complete(op, *nr_pages); 774 return -ENOBUFS; 775} 776 777/* 778 * allocate a block in the cache in which to store a page 779 * - cache withdrawal is prevented by the caller 780 * - returns -EINTR if interrupted 781 * - returns -ENOMEM if ran out of memory 782 * - returns -ENOBUFS if no buffers can be made available 783 * - returns -ENOBUFS if page is beyond EOF 784 * - otherwise: 785 * - the metadata will be retained 786 * - 0 will be returned 787 */ 788int cachefiles_allocate_page(struct fscache_retrieval *op, 789 struct page *page, 790 gfp_t gfp) 791{ 792 struct cachefiles_object *object; 793 struct cachefiles_cache *cache; 794 int ret; 795 796 object = container_of(op->op.object, 797 struct cachefiles_object, fscache); 798 cache = container_of(object->fscache.cache, 799 struct cachefiles_cache, cache); 800 801 _enter("%p,{%lx},", object, page->index); 802 803 ret = cachefiles_has_space(cache, 0, 1); 804 if (ret == 0) 805 fscache_mark_page_cached(op, page); 806 else 807 ret = -ENOBUFS; 808 809 fscache_retrieval_complete(op, 1); 810 _leave(" = %d", ret); 811 return ret; 812} 813 814/* 815 * allocate blocks in the cache in which to store a set of pages 816 * - cache withdrawal is prevented by the caller 817 * - returns -EINTR if interrupted 818 * - returns -ENOMEM if ran out of memory 819 * - returns -ENOBUFS if some buffers couldn't be made available 820 * - returns -ENOBUFS if some pages are beyond EOF 821 * - otherwise: 822 * - -ENODATA will be returned 823 * - metadata will be retained for any page marked 824 */ 825int cachefiles_allocate_pages(struct fscache_retrieval *op, 826 struct list_head *pages, 827 unsigned *nr_pages, 828 gfp_t gfp) 829{ 830 struct cachefiles_object *object; 831 struct cachefiles_cache *cache; 832 struct pagevec pagevec; 833 struct page *page; 834 int ret; 835 836 object = container_of(op->op.object, 837 struct cachefiles_object, fscache); 838 cache = container_of(object->fscache.cache, 839 struct cachefiles_cache, cache); 840 841 _enter("%p,,,%d,", object, *nr_pages); 842 843 ret = cachefiles_has_space(cache, 0, *nr_pages); 844 if (ret == 0) { 845 pagevec_init(&pagevec); 846 847 list_for_each_entry(page, pages, lru) { 848 if (pagevec_add(&pagevec, page) == 0) 849 fscache_mark_pages_cached(op, &pagevec); 850 } 851 852 if (pagevec_count(&pagevec) > 0) 853 fscache_mark_pages_cached(op, &pagevec); 854 ret = -ENODATA; 855 } else { 856 ret = -ENOBUFS; 857 } 858 859 fscache_retrieval_complete(op, *nr_pages); 860 _leave(" = %d", ret); 861 return ret; 862} 863 864/* 865 * request a page be stored in the cache 866 * - cache withdrawal is prevented by the caller 867 * - this request may be ignored if there's no cache block available, in which 868 * case -ENOBUFS will be returned 869 * - if the op is in progress, 0 will be returned 870 */ 871int cachefiles_write_page(struct fscache_storage *op, struct page *page) 872{ 873 struct cachefiles_object *object; 874 struct cachefiles_cache *cache; 875 struct file *file; 876 struct path path; 877 loff_t pos, eof; 878 size_t len; 879 void *data; 880 int ret = -ENOBUFS; 881 882 ASSERT(op != NULL); 883 ASSERT(page != NULL); 884 885 object = container_of(op->op.object, 886 struct cachefiles_object, fscache); 887 888 _enter("%p,%p{%lx},,,", object, page, page->index); 889 890 if (!object->backer) { 891 _leave(" = -ENOBUFS"); 892 return -ENOBUFS; 893 } 894 895 ASSERT(d_is_reg(object->backer)); 896 897 cache = container_of(object->fscache.cache, 898 struct cachefiles_cache, cache); 899 900 pos = (loff_t)page->index << PAGE_SHIFT; 901 902 /* We mustn't write more data than we have, so we have to beware of a 903 * partial page at EOF. 904 */ 905 eof = object->fscache.store_limit_l; 906 if (pos >= eof) 907 goto error; 908 909 /* write the page to the backing filesystem and let it store it in its 910 * own time */ 911 path.mnt = cache->mnt; 912 path.dentry = object->backer; 913 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred); 914 if (IS_ERR(file)) { 915 ret = PTR_ERR(file); 916 goto error_2; 917 } 918 919 len = PAGE_SIZE; 920 if (eof & ~PAGE_MASK) { 921 if (eof - pos < PAGE_SIZE) { 922 _debug("cut short %llx to %llx", 923 pos, eof); 924 len = eof - pos; 925 ASSERTCMP(pos + len, ==, eof); 926 } 927 } 928 929 data = kmap(page); 930 ret = __kernel_write(file, data, len, &pos); 931 kunmap(page); 932 fput(file); 933 if (ret != len) 934 goto error_eio; 935 936 _leave(" = 0"); 937 return 0; 938 939error_eio: 940 ret = -EIO; 941error_2: 942 if (ret == -EIO) 943 cachefiles_io_error_obj(object, 944 "Write page to backing file failed"); 945error: 946 _leave(" = -ENOBUFS [%d]", ret); 947 return -ENOBUFS; 948} 949 950/* 951 * detach a backing block from a page 952 * - cache withdrawal is prevented by the caller 953 */ 954void cachefiles_uncache_page(struct fscache_object *_object, struct page *page) 955 __releases(&object->fscache.cookie->lock) 956{ 957 struct cachefiles_object *object; 958 struct cachefiles_cache *cache; 959 960 object = container_of(_object, struct cachefiles_object, fscache); 961 cache = container_of(object->fscache.cache, 962 struct cachefiles_cache, cache); 963 964 _enter("%p,{%lu}", object, page->index); 965 966 spin_unlock(&object->fscache.cookie->lock); 967}