Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.8-rc1 974 lines 25 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* Storage object read/write 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8#include <linux/mount.h> 9#include <linux/slab.h> 10#include <linux/file.h> 11#include <linux/swap.h> 12#include "internal.h" 13 14/* 15 * detect wake up events generated by the unlocking of pages in which we're 16 * interested 17 * - we use this to detect read completion of backing pages 18 * - the caller holds the waitqueue lock 19 */ 20static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, 21 int sync, void *_key) 22{ 23 struct cachefiles_one_read *monitor = 24 container_of(wait, struct cachefiles_one_read, monitor); 25 struct cachefiles_object *object; 26 struct fscache_retrieval *op = monitor->op; 27 struct wait_bit_key *key = _key; 28 struct page *page = wait->private; 29 30 ASSERT(key); 31 32 _enter("{%lu},%u,%d,{%p,%u}", 33 monitor->netfs_page->index, mode, sync, 34 key->flags, key->bit_nr); 35 36 if (key->flags != &page->flags || 37 key->bit_nr != PG_locked) 38 return 0; 39 40 _debug("--- monitor %p %lx ---", page, page->flags); 41 42 if (!PageUptodate(page) && !PageError(page)) { 43 /* unlocked, not uptodate and not erronous? */ 44 _debug("page probably truncated"); 45 } 46 47 /* remove from the waitqueue */ 48 list_del(&wait->entry); 49 50 /* move onto the action list and queue for FS-Cache thread pool */ 51 ASSERT(op); 52 53 /* We need to temporarily bump the usage count as we don't own a ref 54 * here otherwise cachefiles_read_copier() may free the op between the 55 * monitor being enqueued on the op->to_do list and the op getting 56 * enqueued on the work queue. 57 */ 58 fscache_get_retrieval(op); 59 60 object = container_of(op->op.object, struct cachefiles_object, fscache); 61 spin_lock(&object->work_lock); 62 list_add_tail(&monitor->op_link, &op->to_do); 63 fscache_enqueue_retrieval(op); 64 spin_unlock(&object->work_lock); 65 66 fscache_put_retrieval(op); 67 return 0; 68} 69 70/* 71 * handle a probably truncated page 72 * - check to see if the page is still relevant and reissue the read if 73 * possible 74 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we 75 * must wait again and 0 if successful 76 */ 77static int cachefiles_read_reissue(struct cachefiles_object *object, 78 struct cachefiles_one_read *monitor) 79{ 80 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; 81 struct page *backpage = monitor->back_page, *backpage2; 82 int ret; 83 84 _enter("{ino=%lx},{%lx,%lx}", 85 d_backing_inode(object->backer)->i_ino, 86 backpage->index, backpage->flags); 87 88 /* skip if the page was truncated away completely */ 89 if (backpage->mapping != bmapping) { 90 _leave(" = -ENODATA [mapping]"); 91 return -ENODATA; 92 } 93 94 backpage2 = find_get_page(bmapping, backpage->index); 95 if (!backpage2) { 96 _leave(" = -ENODATA [gone]"); 97 return -ENODATA; 98 } 99 100 if (backpage != backpage2) { 101 put_page(backpage2); 102 _leave(" = -ENODATA [different]"); 103 return -ENODATA; 104 } 105 106 /* the page is still there and we already have a ref on it, so we don't 107 * need a second */ 108 put_page(backpage2); 109 110 INIT_LIST_HEAD(&monitor->op_link); 111 add_page_wait_queue(backpage, &monitor->monitor); 112 113 if (trylock_page(backpage)) { 114 ret = -EIO; 115 if (PageError(backpage)) 116 goto unlock_discard; 117 ret = 0; 118 if (PageUptodate(backpage)) 119 goto unlock_discard; 120 121 _debug("reissue read"); 122 ret = bmapping->a_ops->readpage(NULL, backpage); 123 if (ret < 0) 124 goto unlock_discard; 125 } 126 127 /* but the page may have been read before the monitor was installed, so 128 * the monitor may miss the event - so we have to ensure that we do get 129 * one in such a case */ 130 if (trylock_page(backpage)) { 131 _debug("jumpstart %p {%lx}", backpage, backpage->flags); 132 unlock_page(backpage); 133 } 134 135 /* it'll reappear on the todo list */ 136 _leave(" = -EINPROGRESS"); 137 return -EINPROGRESS; 138 139unlock_discard: 140 unlock_page(backpage); 141 spin_lock_irq(&object->work_lock); 142 list_del(&monitor->op_link); 143 spin_unlock_irq(&object->work_lock); 144 _leave(" = %d", ret); 145 return ret; 146} 147 148/* 149 * copy data from backing pages to netfs pages to complete a read operation 150 * - driven by FS-Cache's thread pool 151 */ 152static void cachefiles_read_copier(struct fscache_operation *_op) 153{ 154 struct cachefiles_one_read *monitor; 155 struct cachefiles_object *object; 156 struct fscache_retrieval *op; 157 int error, max; 158 159 op = container_of(_op, struct fscache_retrieval, op); 160 object = container_of(op->op.object, 161 struct cachefiles_object, fscache); 162 163 _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino); 164 165 max = 8; 166 spin_lock_irq(&object->work_lock); 167 168 while (!list_empty(&op->to_do)) { 169 monitor = list_entry(op->to_do.next, 170 struct cachefiles_one_read, op_link); 171 list_del(&monitor->op_link); 172 173 spin_unlock_irq(&object->work_lock); 174 175 _debug("- copy {%lu}", monitor->back_page->index); 176 177 recheck: 178 if (test_bit(FSCACHE_COOKIE_INVALIDATING, 179 &object->fscache.cookie->flags)) { 180 error = -ESTALE; 181 } else if (PageUptodate(monitor->back_page)) { 182 copy_highpage(monitor->netfs_page, monitor->back_page); 183 fscache_mark_page_cached(monitor->op, 184 monitor->netfs_page); 185 error = 0; 186 } else if (!PageError(monitor->back_page)) { 187 /* the page has probably been truncated */ 188 error = cachefiles_read_reissue(object, monitor); 189 if (error == -EINPROGRESS) 190 goto next; 191 goto recheck; 192 } else { 193 cachefiles_io_error_obj( 194 object, 195 "Readpage failed on backing file %lx", 196 (unsigned long) monitor->back_page->flags); 197 error = -EIO; 198 } 199 200 put_page(monitor->back_page); 201 202 fscache_end_io(op, monitor->netfs_page, error); 203 put_page(monitor->netfs_page); 204 fscache_retrieval_complete(op, 1); 205 fscache_put_retrieval(op); 206 kfree(monitor); 207 208 next: 209 /* let the thread pool have some air occasionally */ 210 max--; 211 if (max < 0 || need_resched()) { 212 if (!list_empty(&op->to_do)) 213 fscache_enqueue_retrieval(op); 214 _leave(" [maxed out]"); 215 return; 216 } 217 218 spin_lock_irq(&object->work_lock); 219 } 220 221 spin_unlock_irq(&object->work_lock); 222 _leave(""); 223} 224 225/* 226 * read the corresponding page to the given set from the backing file 227 * - an uncertain page is simply discarded, to be tried again another time 228 */ 229static int cachefiles_read_backing_file_one(struct cachefiles_object *object, 230 struct fscache_retrieval *op, 231 struct page *netpage) 232{ 233 struct cachefiles_one_read *monitor; 234 struct address_space *bmapping; 235 struct page *newpage, *backpage; 236 int ret; 237 238 _enter(""); 239 240 _debug("read back %p{%lu,%d}", 241 netpage, netpage->index, page_count(netpage)); 242 243 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); 244 if (!monitor) 245 goto nomem; 246 247 monitor->netfs_page = netpage; 248 monitor->op = fscache_get_retrieval(op); 249 250 init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter); 251 252 /* attempt to get hold of the backing page */ 253 bmapping = d_backing_inode(object->backer)->i_mapping; 254 newpage = NULL; 255 256 for (;;) { 257 backpage = find_get_page(bmapping, netpage->index); 258 if (backpage) 259 goto backing_page_already_present; 260 261 if (!newpage) { 262 newpage = __page_cache_alloc(cachefiles_gfp); 263 if (!newpage) 264 goto nomem_monitor; 265 } 266 267 ret = add_to_page_cache_lru(newpage, bmapping, 268 netpage->index, cachefiles_gfp); 269 if (ret == 0) 270 goto installed_new_backing_page; 271 if (ret != -EEXIST) 272 goto nomem_page; 273 } 274 275 /* we've installed a new backing page, so now we need to start 276 * it reading */ 277installed_new_backing_page: 278 _debug("- new %p", newpage); 279 280 backpage = newpage; 281 newpage = NULL; 282 283read_backing_page: 284 ret = bmapping->a_ops->readpage(NULL, backpage); 285 if (ret < 0) 286 goto read_error; 287 288 /* set the monitor to transfer the data across */ 289monitor_backing_page: 290 _debug("- monitor add"); 291 292 /* install the monitor */ 293 get_page(monitor->netfs_page); 294 get_page(backpage); 295 monitor->back_page = backpage; 296 monitor->monitor.private = backpage; 297 add_page_wait_queue(backpage, &monitor->monitor); 298 monitor = NULL; 299 300 /* but the page may have been read before the monitor was installed, so 301 * the monitor may miss the event - so we have to ensure that we do get 302 * one in such a case */ 303 if (trylock_page(backpage)) { 304 _debug("jumpstart %p {%lx}", backpage, backpage->flags); 305 unlock_page(backpage); 306 } 307 goto success; 308 309 /* if the backing page is already present, it can be in one of 310 * three states: read in progress, read failed or read okay */ 311backing_page_already_present: 312 _debug("- present"); 313 314 if (newpage) { 315 put_page(newpage); 316 newpage = NULL; 317 } 318 319 if (PageError(backpage)) 320 goto io_error; 321 322 if (PageUptodate(backpage)) 323 goto backing_page_already_uptodate; 324 325 if (!trylock_page(backpage)) 326 goto monitor_backing_page; 327 _debug("read %p {%lx}", backpage, backpage->flags); 328 goto read_backing_page; 329 330 /* the backing page is already up to date, attach the netfs 331 * page to the pagecache and LRU and copy the data across */ 332backing_page_already_uptodate: 333 _debug("- uptodate"); 334 335 fscache_mark_page_cached(op, netpage); 336 337 copy_highpage(netpage, backpage); 338 fscache_end_io(op, netpage, 0); 339 fscache_retrieval_complete(op, 1); 340 341success: 342 _debug("success"); 343 ret = 0; 344 345out: 346 if (backpage) 347 put_page(backpage); 348 if (monitor) { 349 fscache_put_retrieval(monitor->op); 350 kfree(monitor); 351 } 352 _leave(" = %d", ret); 353 return ret; 354 355read_error: 356 _debug("read error %d", ret); 357 if (ret == -ENOMEM) { 358 fscache_retrieval_complete(op, 1); 359 goto out; 360 } 361io_error: 362 cachefiles_io_error_obj(object, "Page read error on backing file"); 363 fscache_retrieval_complete(op, 1); 364 ret = -ENOBUFS; 365 goto out; 366 367nomem_page: 368 put_page(newpage); 369nomem_monitor: 370 fscache_put_retrieval(monitor->op); 371 kfree(monitor); 372nomem: 373 fscache_retrieval_complete(op, 1); 374 _leave(" = -ENOMEM"); 375 return -ENOMEM; 376} 377 378/* 379 * read a page from the cache or allocate a block in which to store it 380 * - cache withdrawal is prevented by the caller 381 * - returns -EINTR if interrupted 382 * - returns -ENOMEM if ran out of memory 383 * - returns -ENOBUFS if no buffers can be made available 384 * - returns -ENOBUFS if page is beyond EOF 385 * - if the page is backed by a block in the cache: 386 * - a read will be started which will call the callback on completion 387 * - 0 will be returned 388 * - else if the page is unbacked: 389 * - the metadata will be retained 390 * - -ENODATA will be returned 391 */ 392int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, 393 struct page *page, 394 gfp_t gfp) 395{ 396 struct cachefiles_object *object; 397 struct cachefiles_cache *cache; 398 struct inode *inode; 399 sector_t block; 400 unsigned shift; 401 int ret, ret2; 402 403 object = container_of(op->op.object, 404 struct cachefiles_object, fscache); 405 cache = container_of(object->fscache.cache, 406 struct cachefiles_cache, cache); 407 408 _enter("{%p},{%lx},,,", object, page->index); 409 410 if (!object->backer) 411 goto enobufs; 412 413 inode = d_backing_inode(object->backer); 414 ASSERT(S_ISREG(inode->i_mode)); 415 ASSERT(inode->i_mapping->a_ops->readpages); 416 417 /* calculate the shift required to use bmap */ 418 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 419 420 op->op.flags &= FSCACHE_OP_KEEP_FLAGS; 421 op->op.flags |= FSCACHE_OP_ASYNC; 422 op->op.processor = cachefiles_read_copier; 423 424 /* we assume the absence or presence of the first block is a good 425 * enough indication for the page as a whole 426 * - TODO: don't use bmap() for this as it is _not_ actually good 427 * enough for this as it doesn't indicate errors, but it's all we've 428 * got for the moment 429 */ 430 block = page->index; 431 block <<= shift; 432 433 ret2 = bmap(inode, &block); 434 ASSERT(ret2 == 0); 435 436 _debug("%llx -> %llx", 437 (unsigned long long) (page->index << shift), 438 (unsigned long long) block); 439 440 if (block) { 441 /* submit the apparently valid page to the backing fs to be 442 * read from disk */ 443 ret = cachefiles_read_backing_file_one(object, op, page); 444 } else if (cachefiles_has_space(cache, 0, 1) == 0) { 445 /* there's space in the cache we can use */ 446 fscache_mark_page_cached(op, page); 447 fscache_retrieval_complete(op, 1); 448 ret = -ENODATA; 449 } else { 450 goto enobufs; 451 } 452 453 _leave(" = %d", ret); 454 return ret; 455 456enobufs: 457 fscache_retrieval_complete(op, 1); 458 _leave(" = -ENOBUFS"); 459 return -ENOBUFS; 460} 461 462/* 463 * read the corresponding pages to the given set from the backing file 464 * - any uncertain pages are simply discarded, to be tried again another time 465 */ 466static int cachefiles_read_backing_file(struct cachefiles_object *object, 467 struct fscache_retrieval *op, 468 struct list_head *list) 469{ 470 struct cachefiles_one_read *monitor = NULL; 471 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; 472 struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; 473 int ret = 0; 474 475 _enter(""); 476 477 list_for_each_entry_safe(netpage, _n, list, lru) { 478 list_del(&netpage->lru); 479 480 _debug("read back %p{%lu,%d}", 481 netpage, netpage->index, page_count(netpage)); 482 483 if (!monitor) { 484 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp); 485 if (!monitor) 486 goto nomem; 487 488 monitor->op = fscache_get_retrieval(op); 489 init_waitqueue_func_entry(&monitor->monitor, 490 cachefiles_read_waiter); 491 } 492 493 for (;;) { 494 backpage = find_get_page(bmapping, netpage->index); 495 if (backpage) 496 goto backing_page_already_present; 497 498 if (!newpage) { 499 newpage = __page_cache_alloc(cachefiles_gfp); 500 if (!newpage) 501 goto nomem; 502 } 503 504 ret = add_to_page_cache_lru(newpage, bmapping, 505 netpage->index, 506 cachefiles_gfp); 507 if (ret == 0) 508 goto installed_new_backing_page; 509 if (ret != -EEXIST) 510 goto nomem; 511 } 512 513 /* we've installed a new backing page, so now we need 514 * to start it reading */ 515 installed_new_backing_page: 516 _debug("- new %p", newpage); 517 518 backpage = newpage; 519 newpage = NULL; 520 521 reread_backing_page: 522 ret = bmapping->a_ops->readpage(NULL, backpage); 523 if (ret < 0) 524 goto read_error; 525 526 /* add the netfs page to the pagecache and LRU, and set the 527 * monitor to transfer the data across */ 528 monitor_backing_page: 529 _debug("- monitor add"); 530 531 ret = add_to_page_cache_lru(netpage, op->mapping, 532 netpage->index, cachefiles_gfp); 533 if (ret < 0) { 534 if (ret == -EEXIST) { 535 put_page(backpage); 536 backpage = NULL; 537 put_page(netpage); 538 netpage = NULL; 539 fscache_retrieval_complete(op, 1); 540 continue; 541 } 542 goto nomem; 543 } 544 545 /* install a monitor */ 546 get_page(netpage); 547 monitor->netfs_page = netpage; 548 549 get_page(backpage); 550 monitor->back_page = backpage; 551 monitor->monitor.private = backpage; 552 add_page_wait_queue(backpage, &monitor->monitor); 553 monitor = NULL; 554 555 /* but the page may have been read before the monitor was 556 * installed, so the monitor may miss the event - so we have to 557 * ensure that we do get one in such a case */ 558 if (trylock_page(backpage)) { 559 _debug("2unlock %p {%lx}", backpage, backpage->flags); 560 unlock_page(backpage); 561 } 562 563 put_page(backpage); 564 backpage = NULL; 565 566 put_page(netpage); 567 netpage = NULL; 568 continue; 569 570 /* if the backing page is already present, it can be in one of 571 * three states: read in progress, read failed or read okay */ 572 backing_page_already_present: 573 _debug("- present %p", backpage); 574 575 if (PageError(backpage)) 576 goto io_error; 577 578 if (PageUptodate(backpage)) 579 goto backing_page_already_uptodate; 580 581 _debug("- not ready %p{%lx}", backpage, backpage->flags); 582 583 if (!trylock_page(backpage)) 584 goto monitor_backing_page; 585 586 if (PageError(backpage)) { 587 _debug("error %lx", backpage->flags); 588 unlock_page(backpage); 589 goto io_error; 590 } 591 592 if (PageUptodate(backpage)) 593 goto backing_page_already_uptodate_unlock; 594 595 /* we've locked a page that's neither up to date nor erroneous, 596 * so we need to attempt to read it again */ 597 goto reread_backing_page; 598 599 /* the backing page is already up to date, attach the netfs 600 * page to the pagecache and LRU and copy the data across */ 601 backing_page_already_uptodate_unlock: 602 _debug("uptodate %lx", backpage->flags); 603 unlock_page(backpage); 604 backing_page_already_uptodate: 605 _debug("- uptodate"); 606 607 ret = add_to_page_cache_lru(netpage, op->mapping, 608 netpage->index, cachefiles_gfp); 609 if (ret < 0) { 610 if (ret == -EEXIST) { 611 put_page(backpage); 612 backpage = NULL; 613 put_page(netpage); 614 netpage = NULL; 615 fscache_retrieval_complete(op, 1); 616 continue; 617 } 618 goto nomem; 619 } 620 621 copy_highpage(netpage, backpage); 622 623 put_page(backpage); 624 backpage = NULL; 625 626 fscache_mark_page_cached(op, netpage); 627 628 /* the netpage is unlocked and marked up to date here */ 629 fscache_end_io(op, netpage, 0); 630 put_page(netpage); 631 netpage = NULL; 632 fscache_retrieval_complete(op, 1); 633 continue; 634 } 635 636 netpage = NULL; 637 638 _debug("out"); 639 640out: 641 /* tidy up */ 642 if (newpage) 643 put_page(newpage); 644 if (netpage) 645 put_page(netpage); 646 if (backpage) 647 put_page(backpage); 648 if (monitor) { 649 fscache_put_retrieval(op); 650 kfree(monitor); 651 } 652 653 list_for_each_entry_safe(netpage, _n, list, lru) { 654 list_del(&netpage->lru); 655 put_page(netpage); 656 fscache_retrieval_complete(op, 1); 657 } 658 659 _leave(" = %d", ret); 660 return ret; 661 662nomem: 663 _debug("nomem"); 664 ret = -ENOMEM; 665 goto record_page_complete; 666 667read_error: 668 _debug("read error %d", ret); 669 if (ret == -ENOMEM) 670 goto record_page_complete; 671io_error: 672 cachefiles_io_error_obj(object, "Page read error on backing file"); 673 ret = -ENOBUFS; 674record_page_complete: 675 fscache_retrieval_complete(op, 1); 676 goto out; 677} 678 679/* 680 * read a list of pages from the cache or allocate blocks in which to store 681 * them 682 */ 683int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, 684 struct list_head *pages, 685 unsigned *nr_pages, 686 gfp_t gfp) 687{ 688 struct cachefiles_object *object; 689 struct cachefiles_cache *cache; 690 struct list_head backpages; 691 struct pagevec pagevec; 692 struct inode *inode; 693 struct page *page, *_n; 694 unsigned shift, nrbackpages; 695 int ret, ret2, space; 696 697 object = container_of(op->op.object, 698 struct cachefiles_object, fscache); 699 cache = container_of(object->fscache.cache, 700 struct cachefiles_cache, cache); 701 702 _enter("{OBJ%x,%d},,%d,,", 703 object->fscache.debug_id, atomic_read(&op->op.usage), 704 *nr_pages); 705 706 if (!object->backer) 707 goto all_enobufs; 708 709 space = 1; 710 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) 711 space = 0; 712 713 inode = d_backing_inode(object->backer); 714 ASSERT(S_ISREG(inode->i_mode)); 715 ASSERT(inode->i_mapping->a_ops->readpages); 716 717 /* calculate the shift required to use bmap */ 718 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 719 720 pagevec_init(&pagevec); 721 722 op->op.flags &= FSCACHE_OP_KEEP_FLAGS; 723 op->op.flags |= FSCACHE_OP_ASYNC; 724 op->op.processor = cachefiles_read_copier; 725 726 INIT_LIST_HEAD(&backpages); 727 nrbackpages = 0; 728 729 ret = space ? -ENODATA : -ENOBUFS; 730 list_for_each_entry_safe(page, _n, pages, lru) { 731 sector_t block; 732 733 /* we assume the absence or presence of the first block is a 734 * good enough indication for the page as a whole 735 * - TODO: don't use bmap() for this as it is _not_ actually 736 * good enough for this as it doesn't indicate errors, but 737 * it's all we've got for the moment 738 */ 739 block = page->index; 740 block <<= shift; 741 742 ret2 = bmap(inode, &block); 743 ASSERT(ret2 == 0); 744 745 _debug("%llx -> %llx", 746 (unsigned long long) (page->index << shift), 747 (unsigned long long) block); 748 749 if (block) { 750 /* we have data - add it to the list to give to the 751 * backing fs */ 752 list_move(&page->lru, &backpages); 753 (*nr_pages)--; 754 nrbackpages++; 755 } else if (space && pagevec_add(&pagevec, page) == 0) { 756 fscache_mark_pages_cached(op, &pagevec); 757 fscache_retrieval_complete(op, 1); 758 ret = -ENODATA; 759 } else { 760 fscache_retrieval_complete(op, 1); 761 } 762 } 763 764 if (pagevec_count(&pagevec) > 0) 765 fscache_mark_pages_cached(op, &pagevec); 766 767 if (list_empty(pages)) 768 ret = 0; 769 770 /* submit the apparently valid pages to the backing fs to be read from 771 * disk */ 772 if (nrbackpages > 0) { 773 ret2 = cachefiles_read_backing_file(object, op, &backpages); 774 if (ret2 == -ENOMEM || ret2 == -EINTR) 775 ret = ret2; 776 } 777 778 _leave(" = %d [nr=%u%s]", 779 ret, *nr_pages, list_empty(pages) ? " empty" : ""); 780 return ret; 781 782all_enobufs: 783 fscache_retrieval_complete(op, *nr_pages); 784 return -ENOBUFS; 785} 786 787/* 788 * allocate a block in the cache in which to store a page 789 * - cache withdrawal is prevented by the caller 790 * - returns -EINTR if interrupted 791 * - returns -ENOMEM if ran out of memory 792 * - returns -ENOBUFS if no buffers can be made available 793 * - returns -ENOBUFS if page is beyond EOF 794 * - otherwise: 795 * - the metadata will be retained 796 * - 0 will be returned 797 */ 798int cachefiles_allocate_page(struct fscache_retrieval *op, 799 struct page *page, 800 gfp_t gfp) 801{ 802 struct cachefiles_object *object; 803 struct cachefiles_cache *cache; 804 int ret; 805 806 object = container_of(op->op.object, 807 struct cachefiles_object, fscache); 808 cache = container_of(object->fscache.cache, 809 struct cachefiles_cache, cache); 810 811 _enter("%p,{%lx},", object, page->index); 812 813 ret = cachefiles_has_space(cache, 0, 1); 814 if (ret == 0) 815 fscache_mark_page_cached(op, page); 816 else 817 ret = -ENOBUFS; 818 819 fscache_retrieval_complete(op, 1); 820 _leave(" = %d", ret); 821 return ret; 822} 823 824/* 825 * allocate blocks in the cache in which to store a set of pages 826 * - cache withdrawal is prevented by the caller 827 * - returns -EINTR if interrupted 828 * - returns -ENOMEM if ran out of memory 829 * - returns -ENOBUFS if some buffers couldn't be made available 830 * - returns -ENOBUFS if some pages are beyond EOF 831 * - otherwise: 832 * - -ENODATA will be returned 833 * - metadata will be retained for any page marked 834 */ 835int cachefiles_allocate_pages(struct fscache_retrieval *op, 836 struct list_head *pages, 837 unsigned *nr_pages, 838 gfp_t gfp) 839{ 840 struct cachefiles_object *object; 841 struct cachefiles_cache *cache; 842 struct pagevec pagevec; 843 struct page *page; 844 int ret; 845 846 object = container_of(op->op.object, 847 struct cachefiles_object, fscache); 848 cache = container_of(object->fscache.cache, 849 struct cachefiles_cache, cache); 850 851 _enter("%p,,,%d,", object, *nr_pages); 852 853 ret = cachefiles_has_space(cache, 0, *nr_pages); 854 if (ret == 0) { 855 pagevec_init(&pagevec); 856 857 list_for_each_entry(page, pages, lru) { 858 if (pagevec_add(&pagevec, page) == 0) 859 fscache_mark_pages_cached(op, &pagevec); 860 } 861 862 if (pagevec_count(&pagevec) > 0) 863 fscache_mark_pages_cached(op, &pagevec); 864 ret = -ENODATA; 865 } else { 866 ret = -ENOBUFS; 867 } 868 869 fscache_retrieval_complete(op, *nr_pages); 870 _leave(" = %d", ret); 871 return ret; 872} 873 874/* 875 * request a page be stored in the cache 876 * - cache withdrawal is prevented by the caller 877 * - this request may be ignored if there's no cache block available, in which 878 * case -ENOBUFS will be returned 879 * - if the op is in progress, 0 will be returned 880 */ 881int cachefiles_write_page(struct fscache_storage *op, struct page *page) 882{ 883 struct cachefiles_object *object; 884 struct cachefiles_cache *cache; 885 struct file *file; 886 struct path path; 887 loff_t pos, eof; 888 size_t len; 889 void *data; 890 int ret = -ENOBUFS; 891 892 ASSERT(op != NULL); 893 ASSERT(page != NULL); 894 895 object = container_of(op->op.object, 896 struct cachefiles_object, fscache); 897 898 _enter("%p,%p{%lx},,,", object, page, page->index); 899 900 if (!object->backer) { 901 _leave(" = -ENOBUFS"); 902 return -ENOBUFS; 903 } 904 905 ASSERT(d_is_reg(object->backer)); 906 907 cache = container_of(object->fscache.cache, 908 struct cachefiles_cache, cache); 909 910 pos = (loff_t)page->index << PAGE_SHIFT; 911 912 /* We mustn't write more data than we have, so we have to beware of a 913 * partial page at EOF. 914 */ 915 eof = object->fscache.store_limit_l; 916 if (pos >= eof) 917 goto error; 918 919 /* write the page to the backing filesystem and let it store it in its 920 * own time */ 921 path.mnt = cache->mnt; 922 path.dentry = object->backer; 923 file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred); 924 if (IS_ERR(file)) { 925 ret = PTR_ERR(file); 926 goto error_2; 927 } 928 929 len = PAGE_SIZE; 930 if (eof & ~PAGE_MASK) { 931 if (eof - pos < PAGE_SIZE) { 932 _debug("cut short %llx to %llx", 933 pos, eof); 934 len = eof - pos; 935 ASSERTCMP(pos + len, ==, eof); 936 } 937 } 938 939 data = kmap(page); 940 ret = __kernel_write(file, data, len, &pos); 941 kunmap(page); 942 fput(file); 943 if (ret != len) 944 goto error_eio; 945 946 _leave(" = 0"); 947 return 0; 948 949error_eio: 950 ret = -EIO; 951error_2: 952 if (ret == -EIO) 953 cachefiles_io_error_obj(object, 954 "Write page to backing file failed"); 955error: 956 _leave(" = -ENOBUFS [%d]", ret); 957 return -ENOBUFS; 958} 959 960/* 961 * detach a backing block from a page 962 * - cache withdrawal is prevented by the caller 963 */ 964void cachefiles_uncache_page(struct fscache_object *_object, struct page *page) 965 __releases(&object->fscache.cookie->lock) 966{ 967 struct cachefiles_object *object; 968 969 object = container_of(_object, struct cachefiles_object, fscache); 970 971 _enter("%p,{%lu}", object, page->index); 972 973 spin_unlock(&object->fscache.cookie->lock); 974}