Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.34-rc4 993 lines 26 kB view raw
1/* Cache page management and data I/O routines 2 * 3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12#define FSCACHE_DEBUG_LEVEL PAGE 13#include <linux/module.h> 14#include <linux/fscache-cache.h> 15#include <linux/buffer_head.h> 16#include <linux/pagevec.h> 17#include <linux/slab.h> 18#include "internal.h" 19 20/* 21 * check to see if a page is being written to the cache 22 */ 23bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page) 24{ 25 void *val; 26 27 rcu_read_lock(); 28 val = radix_tree_lookup(&cookie->stores, page->index); 29 rcu_read_unlock(); 30 31 return val != NULL; 32} 33EXPORT_SYMBOL(__fscache_check_page_write); 34 35/* 36 * wait for a page to finish being written to the cache 37 */ 38void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page) 39{ 40 wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); 41 42 wait_event(*wq, !__fscache_check_page_write(cookie, page)); 43} 44EXPORT_SYMBOL(__fscache_wait_on_page_write); 45 46/* 47 * decide whether a page can be released, possibly by cancelling a store to it 48 * - we're allowed to sleep if __GFP_WAIT is flagged 49 */ 50bool __fscache_maybe_release_page(struct fscache_cookie *cookie, 51 struct page *page, 52 gfp_t gfp) 53{ 54 struct page *xpage; 55 void *val; 56 57 _enter("%p,%p,%x", cookie, page, gfp); 58 59 rcu_read_lock(); 60 val = radix_tree_lookup(&cookie->stores, page->index); 61 if (!val) { 62 rcu_read_unlock(); 63 fscache_stat(&fscache_n_store_vmscan_not_storing); 64 __fscache_uncache_page(cookie, page); 65 return true; 66 } 67 68 /* see if the page is actually undergoing storage - if so we can't get 69 * rid of it till the cache has finished with it */ 70 if (radix_tree_tag_get(&cookie->stores, page->index, 71 FSCACHE_COOKIE_STORING_TAG)) { 72 rcu_read_unlock(); 73 goto page_busy; 74 } 75 76 /* the page is pending storage, so we attempt to cancel the store and 77 * discard the store request so that the page can be reclaimed */ 78 spin_lock(&cookie->stores_lock); 79 rcu_read_unlock(); 80 81 if (radix_tree_tag_get(&cookie->stores, page->index, 82 FSCACHE_COOKIE_STORING_TAG)) { 83 /* the page started to undergo storage whilst we were looking, 84 * so now we can only wait or return */ 85 spin_unlock(&cookie->stores_lock); 86 goto page_busy; 87 } 88 89 xpage = radix_tree_delete(&cookie->stores, page->index); 90 spin_unlock(&cookie->stores_lock); 91 92 if (xpage) { 93 fscache_stat(&fscache_n_store_vmscan_cancelled); 94 fscache_stat(&fscache_n_store_radix_deletes); 95 ASSERTCMP(xpage, ==, page); 96 } else { 97 fscache_stat(&fscache_n_store_vmscan_gone); 98 } 99 100 wake_up_bit(&cookie->flags, 0); 101 if (xpage) 102 page_cache_release(xpage); 103 __fscache_uncache_page(cookie, page); 104 return true; 105 106page_busy: 107 /* we might want to wait here, but that could deadlock the allocator as 108 * the slow-work threads writing to the cache may all end up sleeping 109 * on memory allocation */ 110 fscache_stat(&fscache_n_store_vmscan_busy); 111 return false; 112} 113EXPORT_SYMBOL(__fscache_maybe_release_page); 114 115/* 116 * note that a page has finished being written to the cache 117 */ 118static void fscache_end_page_write(struct fscache_object *object, 119 struct page *page) 120{ 121 struct fscache_cookie *cookie; 122 struct page *xpage = NULL; 123 124 spin_lock(&object->lock); 125 cookie = object->cookie; 126 if (cookie) { 127 /* delete the page from the tree if it is now no longer 128 * pending */ 129 spin_lock(&cookie->stores_lock); 130 radix_tree_tag_clear(&cookie->stores, page->index, 131 FSCACHE_COOKIE_STORING_TAG); 132 if (!radix_tree_tag_get(&cookie->stores, page->index, 133 FSCACHE_COOKIE_PENDING_TAG)) { 134 fscache_stat(&fscache_n_store_radix_deletes); 135 xpage = radix_tree_delete(&cookie->stores, page->index); 136 } 137 spin_unlock(&cookie->stores_lock); 138 wake_up_bit(&cookie->flags, 0); 139 } 140 spin_unlock(&object->lock); 141 if (xpage) 142 page_cache_release(xpage); 143} 144 145/* 146 * actually apply the changed attributes to a cache object 147 */ 148static void fscache_attr_changed_op(struct fscache_operation *op) 149{ 150 struct fscache_object *object = op->object; 151 int ret; 152 153 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); 154 155 fscache_stat(&fscache_n_attr_changed_calls); 156 157 if (fscache_object_is_active(object)) { 158 fscache_set_op_state(op, "CallFS"); 159 fscache_stat(&fscache_n_cop_attr_changed); 160 ret = object->cache->ops->attr_changed(object); 161 fscache_stat_d(&fscache_n_cop_attr_changed); 162 fscache_set_op_state(op, "Done"); 163 if (ret < 0) 164 fscache_abort_object(object); 165 } 166 167 _leave(""); 168} 169 170/* 171 * notification that the attributes on an object have changed 172 */ 173int __fscache_attr_changed(struct fscache_cookie *cookie) 174{ 175 struct fscache_operation *op; 176 struct fscache_object *object; 177 178 _enter("%p", cookie); 179 180 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 181 182 fscache_stat(&fscache_n_attr_changed); 183 184 op = kzalloc(sizeof(*op), GFP_KERNEL); 185 if (!op) { 186 fscache_stat(&fscache_n_attr_changed_nomem); 187 _leave(" = -ENOMEM"); 188 return -ENOMEM; 189 } 190 191 fscache_operation_init(op, NULL); 192 fscache_operation_init_slow(op, fscache_attr_changed_op); 193 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); 194 fscache_set_op_name(op, "Attr"); 195 196 spin_lock(&cookie->lock); 197 198 if (hlist_empty(&cookie->backing_objects)) 199 goto nobufs; 200 object = hlist_entry(cookie->backing_objects.first, 201 struct fscache_object, cookie_link); 202 203 if (fscache_submit_exclusive_op(object, op) < 0) 204 goto nobufs; 205 spin_unlock(&cookie->lock); 206 fscache_stat(&fscache_n_attr_changed_ok); 207 fscache_put_operation(op); 208 _leave(" = 0"); 209 return 0; 210 211nobufs: 212 spin_unlock(&cookie->lock); 213 kfree(op); 214 fscache_stat(&fscache_n_attr_changed_nobufs); 215 _leave(" = %d", -ENOBUFS); 216 return -ENOBUFS; 217} 218EXPORT_SYMBOL(__fscache_attr_changed); 219 220/* 221 * handle secondary execution given to a retrieval op on behalf of the 222 * cache 223 */ 224static void fscache_retrieval_work(struct work_struct *work) 225{ 226 struct fscache_retrieval *op = 227 container_of(work, struct fscache_retrieval, op.fast_work); 228 unsigned long start; 229 230 _enter("{OP%x}", op->op.debug_id); 231 232 start = jiffies; 233 op->op.processor(&op->op); 234 fscache_hist(fscache_ops_histogram, start); 235 fscache_put_operation(&op->op); 236} 237 238/* 239 * release a retrieval op reference 240 */ 241static void fscache_release_retrieval_op(struct fscache_operation *_op) 242{ 243 struct fscache_retrieval *op = 244 container_of(_op, struct fscache_retrieval, op); 245 246 _enter("{OP%x}", op->op.debug_id); 247 248 fscache_hist(fscache_retrieval_histogram, op->start_time); 249 if (op->context) 250 fscache_put_context(op->op.object->cookie, op->context); 251 252 _leave(""); 253} 254 255/* 256 * allocate a retrieval op 257 */ 258static struct fscache_retrieval *fscache_alloc_retrieval( 259 struct address_space *mapping, 260 fscache_rw_complete_t end_io_func, 261 void *context) 262{ 263 struct fscache_retrieval *op; 264 265 /* allocate a retrieval operation and attempt to submit it */ 266 op = kzalloc(sizeof(*op), GFP_NOIO); 267 if (!op) { 268 fscache_stat(&fscache_n_retrievals_nomem); 269 return NULL; 270 } 271 272 fscache_operation_init(&op->op, fscache_release_retrieval_op); 273 op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); 274 op->mapping = mapping; 275 op->end_io_func = end_io_func; 276 op->context = context; 277 op->start_time = jiffies; 278 INIT_WORK(&op->op.fast_work, fscache_retrieval_work); 279 INIT_LIST_HEAD(&op->to_do); 280 fscache_set_op_name(&op->op, "Retr"); 281 return op; 282} 283 284/* 285 * wait for a deferred lookup to complete 286 */ 287static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) 288{ 289 unsigned long jif; 290 291 _enter(""); 292 293 if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) { 294 _leave(" = 0 [imm]"); 295 return 0; 296 } 297 298 fscache_stat(&fscache_n_retrievals_wait); 299 300 jif = jiffies; 301 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, 302 fscache_wait_bit_interruptible, 303 TASK_INTERRUPTIBLE) != 0) { 304 fscache_stat(&fscache_n_retrievals_intr); 305 _leave(" = -ERESTARTSYS"); 306 return -ERESTARTSYS; 307 } 308 309 ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)); 310 311 smp_rmb(); 312 fscache_hist(fscache_retrieval_delay_histogram, jif); 313 _leave(" = 0 [dly]"); 314 return 0; 315} 316 317/* 318 * wait for an object to become active (or dead) 319 */ 320static int fscache_wait_for_retrieval_activation(struct fscache_object *object, 321 struct fscache_retrieval *op, 322 atomic_t *stat_op_waits, 323 atomic_t *stat_object_dead) 324{ 325 int ret; 326 327 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags)) 328 goto check_if_dead; 329 330 _debug(">>> WT"); 331 fscache_stat(stat_op_waits); 332 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 333 fscache_wait_bit_interruptible, 334 TASK_INTERRUPTIBLE) < 0) { 335 ret = fscache_cancel_op(&op->op); 336 if (ret == 0) 337 return -ERESTARTSYS; 338 339 /* it's been removed from the pending queue by another party, 340 * so we should get to run shortly */ 341 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 342 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 343 } 344 _debug("<<< GO"); 345 346check_if_dead: 347 if (unlikely(fscache_object_is_dead(object))) { 348 fscache_stat(stat_object_dead); 349 return -ENOBUFS; 350 } 351 return 0; 352} 353 354/* 355 * read a page from the cache or allocate a block in which to store it 356 * - we return: 357 * -ENOMEM - out of memory, nothing done 358 * -ERESTARTSYS - interrupted 359 * -ENOBUFS - no backing object available in which to cache the block 360 * -ENODATA - no data available in the backing object for this block 361 * 0 - dispatched a read - it'll call end_io_func() when finished 362 */ 363int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, 364 struct page *page, 365 fscache_rw_complete_t end_io_func, 366 void *context, 367 gfp_t gfp) 368{ 369 struct fscache_retrieval *op; 370 struct fscache_object *object; 371 int ret; 372 373 _enter("%p,%p,,,", cookie, page); 374 375 fscache_stat(&fscache_n_retrievals); 376 377 if (hlist_empty(&cookie->backing_objects)) 378 goto nobufs; 379 380 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 381 ASSERTCMP(page, !=, NULL); 382 383 if (fscache_wait_for_deferred_lookup(cookie) < 0) 384 return -ERESTARTSYS; 385 386 op = fscache_alloc_retrieval(page->mapping, end_io_func, context); 387 if (!op) { 388 _leave(" = -ENOMEM"); 389 return -ENOMEM; 390 } 391 fscache_set_op_name(&op->op, "RetrRA1"); 392 393 spin_lock(&cookie->lock); 394 395 if (hlist_empty(&cookie->backing_objects)) 396 goto nobufs_unlock; 397 object = hlist_entry(cookie->backing_objects.first, 398 struct fscache_object, cookie_link); 399 400 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); 401 402 atomic_inc(&object->n_reads); 403 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 404 405 if (fscache_submit_op(object, &op->op) < 0) 406 goto nobufs_unlock; 407 spin_unlock(&cookie->lock); 408 409 fscache_stat(&fscache_n_retrieval_ops); 410 411 /* pin the netfs read context in case we need to do the actual netfs 412 * read because we've encountered a cache read failure */ 413 fscache_get_context(object->cookie, op->context); 414 415 /* we wait for the operation to become active, and then process it 416 * *here*, in this thread, and not in the thread pool */ 417 ret = fscache_wait_for_retrieval_activation( 418 object, op, 419 __fscache_stat(&fscache_n_retrieval_op_waits), 420 __fscache_stat(&fscache_n_retrievals_object_dead)); 421 if (ret < 0) 422 goto error; 423 424 /* ask the cache to honour the operation */ 425 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 426 fscache_stat(&fscache_n_cop_allocate_page); 427 ret = object->cache->ops->allocate_page(op, page, gfp); 428 fscache_stat_d(&fscache_n_cop_allocate_page); 429 if (ret == 0) 430 ret = -ENODATA; 431 } else { 432 fscache_stat(&fscache_n_cop_read_or_alloc_page); 433 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); 434 fscache_stat_d(&fscache_n_cop_read_or_alloc_page); 435 } 436 437error: 438 if (ret == -ENOMEM) 439 fscache_stat(&fscache_n_retrievals_nomem); 440 else if (ret == -ERESTARTSYS) 441 fscache_stat(&fscache_n_retrievals_intr); 442 else if (ret == -ENODATA) 443 fscache_stat(&fscache_n_retrievals_nodata); 444 else if (ret < 0) 445 fscache_stat(&fscache_n_retrievals_nobufs); 446 else 447 fscache_stat(&fscache_n_retrievals_ok); 448 449 fscache_put_retrieval(op); 450 _leave(" = %d", ret); 451 return ret; 452 453nobufs_unlock: 454 spin_unlock(&cookie->lock); 455 kfree(op); 456nobufs: 457 fscache_stat(&fscache_n_retrievals_nobufs); 458 _leave(" = -ENOBUFS"); 459 return -ENOBUFS; 460} 461EXPORT_SYMBOL(__fscache_read_or_alloc_page); 462 463/* 464 * read a list of page from the cache or allocate a block in which to store 465 * them 466 * - we return: 467 * -ENOMEM - out of memory, some pages may be being read 468 * -ERESTARTSYS - interrupted, some pages may be being read 469 * -ENOBUFS - no backing object or space available in which to cache any 470 * pages not being read 471 * -ENODATA - no data available in the backing object for some or all of 472 * the pages 473 * 0 - dispatched a read on all pages 474 * 475 * end_io_func() will be called for each page read from the cache as it is 476 * finishes being read 477 * 478 * any pages for which a read is dispatched will be removed from pages and 479 * nr_pages 480 */ 481int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, 482 struct address_space *mapping, 483 struct list_head *pages, 484 unsigned *nr_pages, 485 fscache_rw_complete_t end_io_func, 486 void *context, 487 gfp_t gfp) 488{ 489 struct fscache_retrieval *op; 490 struct fscache_object *object; 491 int ret; 492 493 _enter("%p,,%d,,,", cookie, *nr_pages); 494 495 fscache_stat(&fscache_n_retrievals); 496 497 if (hlist_empty(&cookie->backing_objects)) 498 goto nobufs; 499 500 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 501 ASSERTCMP(*nr_pages, >, 0); 502 ASSERT(!list_empty(pages)); 503 504 if (fscache_wait_for_deferred_lookup(cookie) < 0) 505 return -ERESTARTSYS; 506 507 op = fscache_alloc_retrieval(mapping, end_io_func, context); 508 if (!op) 509 return -ENOMEM; 510 fscache_set_op_name(&op->op, "RetrRAN"); 511 512 spin_lock(&cookie->lock); 513 514 if (hlist_empty(&cookie->backing_objects)) 515 goto nobufs_unlock; 516 object = hlist_entry(cookie->backing_objects.first, 517 struct fscache_object, cookie_link); 518 519 atomic_inc(&object->n_reads); 520 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 521 522 if (fscache_submit_op(object, &op->op) < 0) 523 goto nobufs_unlock; 524 spin_unlock(&cookie->lock); 525 526 fscache_stat(&fscache_n_retrieval_ops); 527 528 /* pin the netfs read context in case we need to do the actual netfs 529 * read because we've encountered a cache read failure */ 530 fscache_get_context(object->cookie, op->context); 531 532 /* we wait for the operation to become active, and then process it 533 * *here*, in this thread, and not in the thread pool */ 534 ret = fscache_wait_for_retrieval_activation( 535 object, op, 536 __fscache_stat(&fscache_n_retrieval_op_waits), 537 __fscache_stat(&fscache_n_retrievals_object_dead)); 538 if (ret < 0) 539 goto error; 540 541 /* ask the cache to honour the operation */ 542 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 543 fscache_stat(&fscache_n_cop_allocate_pages); 544 ret = object->cache->ops->allocate_pages( 545 op, pages, nr_pages, gfp); 546 fscache_stat_d(&fscache_n_cop_allocate_pages); 547 } else { 548 fscache_stat(&fscache_n_cop_read_or_alloc_pages); 549 ret = object->cache->ops->read_or_alloc_pages( 550 op, pages, nr_pages, gfp); 551 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages); 552 } 553 554error: 555 if (ret == -ENOMEM) 556 fscache_stat(&fscache_n_retrievals_nomem); 557 else if (ret == -ERESTARTSYS) 558 fscache_stat(&fscache_n_retrievals_intr); 559 else if (ret == -ENODATA) 560 fscache_stat(&fscache_n_retrievals_nodata); 561 else if (ret < 0) 562 fscache_stat(&fscache_n_retrievals_nobufs); 563 else 564 fscache_stat(&fscache_n_retrievals_ok); 565 566 fscache_put_retrieval(op); 567 _leave(" = %d", ret); 568 return ret; 569 570nobufs_unlock: 571 spin_unlock(&cookie->lock); 572 kfree(op); 573nobufs: 574 fscache_stat(&fscache_n_retrievals_nobufs); 575 _leave(" = -ENOBUFS"); 576 return -ENOBUFS; 577} 578EXPORT_SYMBOL(__fscache_read_or_alloc_pages); 579 580/* 581 * allocate a block in the cache on which to store a page 582 * - we return: 583 * -ENOMEM - out of memory, nothing done 584 * -ERESTARTSYS - interrupted 585 * -ENOBUFS - no backing object available in which to cache the block 586 * 0 - block allocated 587 */ 588int __fscache_alloc_page(struct fscache_cookie *cookie, 589 struct page *page, 590 gfp_t gfp) 591{ 592 struct fscache_retrieval *op; 593 struct fscache_object *object; 594 int ret; 595 596 _enter("%p,%p,,,", cookie, page); 597 598 fscache_stat(&fscache_n_allocs); 599 600 if (hlist_empty(&cookie->backing_objects)) 601 goto nobufs; 602 603 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 604 ASSERTCMP(page, !=, NULL); 605 606 if (fscache_wait_for_deferred_lookup(cookie) < 0) 607 return -ERESTARTSYS; 608 609 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 610 if (!op) 611 return -ENOMEM; 612 fscache_set_op_name(&op->op, "RetrAL1"); 613 614 spin_lock(&cookie->lock); 615 616 if (hlist_empty(&cookie->backing_objects)) 617 goto nobufs_unlock; 618 object = hlist_entry(cookie->backing_objects.first, 619 struct fscache_object, cookie_link); 620 621 if (fscache_submit_op(object, &op->op) < 0) 622 goto nobufs_unlock; 623 spin_unlock(&cookie->lock); 624 625 fscache_stat(&fscache_n_alloc_ops); 626 627 ret = fscache_wait_for_retrieval_activation( 628 object, op, 629 __fscache_stat(&fscache_n_alloc_op_waits), 630 __fscache_stat(&fscache_n_allocs_object_dead)); 631 if (ret < 0) 632 goto error; 633 634 /* ask the cache to honour the operation */ 635 fscache_stat(&fscache_n_cop_allocate_page); 636 ret = object->cache->ops->allocate_page(op, page, gfp); 637 fscache_stat_d(&fscache_n_cop_allocate_page); 638 639error: 640 if (ret == -ERESTARTSYS) 641 fscache_stat(&fscache_n_allocs_intr); 642 else if (ret < 0) 643 fscache_stat(&fscache_n_allocs_nobufs); 644 else 645 fscache_stat(&fscache_n_allocs_ok); 646 647 fscache_put_retrieval(op); 648 _leave(" = %d", ret); 649 return ret; 650 651nobufs_unlock: 652 spin_unlock(&cookie->lock); 653 kfree(op); 654nobufs: 655 fscache_stat(&fscache_n_allocs_nobufs); 656 _leave(" = -ENOBUFS"); 657 return -ENOBUFS; 658} 659EXPORT_SYMBOL(__fscache_alloc_page); 660 661/* 662 * release a write op reference 663 */ 664static void fscache_release_write_op(struct fscache_operation *_op) 665{ 666 _enter("{OP%x}", _op->debug_id); 667} 668 669/* 670 * perform the background storage of a page into the cache 671 */ 672static void fscache_write_op(struct fscache_operation *_op) 673{ 674 struct fscache_storage *op = 675 container_of(_op, struct fscache_storage, op); 676 struct fscache_object *object = op->op.object; 677 struct fscache_cookie *cookie; 678 struct page *page; 679 unsigned n; 680 void *results[1]; 681 int ret; 682 683 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 684 685 fscache_set_op_state(&op->op, "GetPage"); 686 687 spin_lock(&object->lock); 688 cookie = object->cookie; 689 690 if (!fscache_object_is_active(object) || !cookie) { 691 spin_unlock(&object->lock); 692 _leave(""); 693 return; 694 } 695 696 spin_lock(&cookie->stores_lock); 697 698 fscache_stat(&fscache_n_store_calls); 699 700 /* find a page to store */ 701 page = NULL; 702 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1, 703 FSCACHE_COOKIE_PENDING_TAG); 704 if (n != 1) 705 goto superseded; 706 page = results[0]; 707 _debug("gang %d [%lx]", n, page->index); 708 if (page->index > op->store_limit) { 709 fscache_stat(&fscache_n_store_pages_over_limit); 710 goto superseded; 711 } 712 713 if (page) { 714 radix_tree_tag_set(&cookie->stores, page->index, 715 FSCACHE_COOKIE_STORING_TAG); 716 radix_tree_tag_clear(&cookie->stores, page->index, 717 FSCACHE_COOKIE_PENDING_TAG); 718 } 719 720 spin_unlock(&cookie->stores_lock); 721 spin_unlock(&object->lock); 722 723 if (page) { 724 fscache_set_op_state(&op->op, "Store"); 725 fscache_stat(&fscache_n_store_pages); 726 fscache_stat(&fscache_n_cop_write_page); 727 ret = object->cache->ops->write_page(op, page); 728 fscache_stat_d(&fscache_n_cop_write_page); 729 fscache_set_op_state(&op->op, "EndWrite"); 730 fscache_end_page_write(object, page); 731 if (ret < 0) { 732 fscache_set_op_state(&op->op, "Abort"); 733 fscache_abort_object(object); 734 } else { 735 fscache_enqueue_operation(&op->op); 736 } 737 } 738 739 _leave(""); 740 return; 741 742superseded: 743 /* this writer is going away and there aren't any more things to 744 * write */ 745 _debug("cease"); 746 spin_unlock(&cookie->stores_lock); 747 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 748 spin_unlock(&object->lock); 749 _leave(""); 750} 751 752/* 753 * request a page be stored in the cache 754 * - returns: 755 * -ENOMEM - out of memory, nothing done 756 * -ENOBUFS - no backing object available in which to cache the page 757 * 0 - dispatched a write - it'll call end_io_func() when finished 758 * 759 * if the cookie still has a backing object at this point, that object can be 760 * in one of a few states with respect to storage processing: 761 * 762 * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is 763 * set) 764 * 765 * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred 766 * fill op) 767 * 768 * (b) writes deferred till post-creation (mark page for writing and 769 * return immediately) 770 * 771 * (2) negative lookup, object created, initial fill being made from netfs 772 * (FSCACHE_COOKIE_INITIAL_FILL is set) 773 * 774 * (a) fill point not yet reached this page (mark page for writing and 775 * return) 776 * 777 * (b) fill point passed this page (queue op to store this page) 778 * 779 * (3) object extant (queue op to store this page) 780 * 781 * any other state is invalid 782 */ 783int __fscache_write_page(struct fscache_cookie *cookie, 784 struct page *page, 785 gfp_t gfp) 786{ 787 struct fscache_storage *op; 788 struct fscache_object *object; 789 int ret; 790 791 _enter("%p,%x,", cookie, (u32) page->flags); 792 793 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 794 ASSERT(PageFsCache(page)); 795 796 fscache_stat(&fscache_n_stores); 797 798 op = kzalloc(sizeof(*op), GFP_NOIO); 799 if (!op) 800 goto nomem; 801 802 fscache_operation_init(&op->op, fscache_release_write_op); 803 fscache_operation_init_slow(&op->op, fscache_write_op); 804 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); 805 fscache_set_op_name(&op->op, "Write1"); 806 807 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 808 if (ret < 0) 809 goto nomem_free; 810 811 ret = -ENOBUFS; 812 spin_lock(&cookie->lock); 813 814 if (hlist_empty(&cookie->backing_objects)) 815 goto nobufs; 816 object = hlist_entry(cookie->backing_objects.first, 817 struct fscache_object, cookie_link); 818 if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) 819 goto nobufs; 820 821 /* add the page to the pending-storage radix tree on the backing 822 * object */ 823 spin_lock(&object->lock); 824 spin_lock(&cookie->stores_lock); 825 826 _debug("store limit %llx", (unsigned long long) object->store_limit); 827 828 ret = radix_tree_insert(&cookie->stores, page->index, page); 829 if (ret < 0) { 830 if (ret == -EEXIST) 831 goto already_queued; 832 _debug("insert failed %d", ret); 833 goto nobufs_unlock_obj; 834 } 835 836 radix_tree_tag_set(&cookie->stores, page->index, 837 FSCACHE_COOKIE_PENDING_TAG); 838 page_cache_get(page); 839 840 /* we only want one writer at a time, but we do need to queue new 841 * writers after exclusive ops */ 842 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 843 goto already_pending; 844 845 spin_unlock(&cookie->stores_lock); 846 spin_unlock(&object->lock); 847 848 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 849 op->store_limit = object->store_limit; 850 851 if (fscache_submit_op(object, &op->op) < 0) 852 goto submit_failed; 853 854 spin_unlock(&cookie->lock); 855 radix_tree_preload_end(); 856 fscache_stat(&fscache_n_store_ops); 857 fscache_stat(&fscache_n_stores_ok); 858 859 /* the slow work queue now carries its own ref on the object */ 860 fscache_put_operation(&op->op); 861 _leave(" = 0"); 862 return 0; 863 864already_queued: 865 fscache_stat(&fscache_n_stores_again); 866already_pending: 867 spin_unlock(&cookie->stores_lock); 868 spin_unlock(&object->lock); 869 spin_unlock(&cookie->lock); 870 radix_tree_preload_end(); 871 kfree(op); 872 fscache_stat(&fscache_n_stores_ok); 873 _leave(" = 0"); 874 return 0; 875 876submit_failed: 877 spin_lock(&cookie->stores_lock); 878 radix_tree_delete(&cookie->stores, page->index); 879 spin_unlock(&cookie->stores_lock); 880 page_cache_release(page); 881 ret = -ENOBUFS; 882 goto nobufs; 883 884nobufs_unlock_obj: 885 spin_unlock(&cookie->stores_lock); 886 spin_unlock(&object->lock); 887nobufs: 888 spin_unlock(&cookie->lock); 889 radix_tree_preload_end(); 890 kfree(op); 891 fscache_stat(&fscache_n_stores_nobufs); 892 _leave(" = -ENOBUFS"); 893 return -ENOBUFS; 894 895nomem_free: 896 kfree(op); 897nomem: 898 fscache_stat(&fscache_n_stores_oom); 899 _leave(" = -ENOMEM"); 900 return -ENOMEM; 901} 902EXPORT_SYMBOL(__fscache_write_page); 903 904/* 905 * remove a page from the cache 906 */ 907void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page) 908{ 909 struct fscache_object *object; 910 911 _enter(",%p", page); 912 913 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 914 ASSERTCMP(page, !=, NULL); 915 916 fscache_stat(&fscache_n_uncaches); 917 918 /* cache withdrawal may beat us to it */ 919 if (!PageFsCache(page)) 920 goto done; 921 922 /* get the object */ 923 spin_lock(&cookie->lock); 924 925 if (hlist_empty(&cookie->backing_objects)) { 926 ClearPageFsCache(page); 927 goto done_unlock; 928 } 929 930 object = hlist_entry(cookie->backing_objects.first, 931 struct fscache_object, cookie_link); 932 933 /* there might now be stuff on disk we could read */ 934 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); 935 936 /* only invoke the cache backend if we managed to mark the page 937 * uncached here; this deals with synchronisation vs withdrawal */ 938 if (TestClearPageFsCache(page) && 939 object->cache->ops->uncache_page) { 940 /* the cache backend releases the cookie lock */ 941 fscache_stat(&fscache_n_cop_uncache_page); 942 object->cache->ops->uncache_page(object, page); 943 fscache_stat_d(&fscache_n_cop_uncache_page); 944 goto done; 945 } 946 947done_unlock: 948 spin_unlock(&cookie->lock); 949done: 950 _leave(""); 951} 952EXPORT_SYMBOL(__fscache_uncache_page); 953 954/** 955 * fscache_mark_pages_cached - Mark pages as being cached 956 * @op: The retrieval op pages are being marked for 957 * @pagevec: The pages to be marked 958 * 959 * Mark a bunch of netfs pages as being cached. After this is called, 960 * the netfs must call fscache_uncache_page() to remove the mark. 961 */ 962void fscache_mark_pages_cached(struct fscache_retrieval *op, 963 struct pagevec *pagevec) 964{ 965 struct fscache_cookie *cookie = op->op.object->cookie; 966 unsigned long loop; 967 968#ifdef CONFIG_FSCACHE_STATS 969 atomic_add(pagevec->nr, &fscache_n_marks); 970#endif 971 972 for (loop = 0; loop < pagevec->nr; loop++) { 973 struct page *page = pagevec->pages[loop]; 974 975 _debug("- mark %p{%lx}", page, page->index); 976 if (TestSetPageFsCache(page)) { 977 static bool once_only; 978 if (!once_only) { 979 once_only = true; 980 printk(KERN_WARNING "FS-Cache:" 981 " Cookie type %s marked page %lx" 982 " multiple times\n", 983 cookie->def->name, page->index); 984 } 985 } 986 } 987 988 if (cookie->def->mark_pages_cached) 989 cookie->def->mark_pages_cached(cookie->netfs_data, 990 op->mapping, pagevec); 991 pagevec_reinit(pagevec); 992} 993EXPORT_SYMBOL(fscache_mark_pages_cached);