at v3.12 1011 lines 30 kB view raw
1/* FS-Cache object state machine handler 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * See Documentation/filesystems/caching/object.txt for a description of the 12 * object state machine and the in-kernel representations. 13 */ 14 15#define FSCACHE_DEBUG_LEVEL COOKIE 16#include <linux/module.h> 17#include <linux/slab.h> 18#include <linux/prefetch.h> 19#include "internal.h" 20 21static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int); 22static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int); 23static const struct fscache_state *fscache_drop_object(struct fscache_object *, int); 24static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int); 25static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int); 26static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int); 27static const struct fscache_state *fscache_kill_object(struct fscache_object *, int); 28static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int); 29static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int); 30static const struct fscache_state *fscache_object_available(struct fscache_object *, int); 31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); 32static const struct fscache_state *fscache_update_object(struct fscache_object *, int); 33 34#define __STATE_NAME(n) fscache_osm_##n 35#define STATE(n) (&__STATE_NAME(n)) 36 37/* 38 * Define a work state. Work states are execution states. No event processing 39 * is performed by them. The function attached to a work state returns a 40 * pointer indicating the next state to which the state machine should 41 * transition. Returning NO_TRANSIT repeats the current state, but goes back 42 * to the scheduler first. 43 */ 44#define WORK_STATE(n, sn, f) \ 45 const struct fscache_state __STATE_NAME(n) = { \ 46 .name = #n, \ 47 .short_name = sn, \ 48 .work = f \ 49 } 50 51/* 52 * Returns from work states. 53 */ 54#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) 55 56#define NO_TRANSIT ((struct fscache_state *)NULL) 57 58/* 59 * Define a wait state. Wait states are event processing states. No execution 60 * is performed by them. Wait states are just tables of "if event X occurs, 61 * clear it and transition to state Y". The dispatcher returns to the 62 * scheduler if none of the events in which the wait state has an interest are 63 * currently pending. 64 */ 65#define WAIT_STATE(n, sn, ...) \ 66 const struct fscache_state __STATE_NAME(n) = { \ 67 .name = #n, \ 68 .short_name = sn, \ 69 .work = NULL, \ 70 .transitions = { __VA_ARGS__, { 0, NULL } } \ 71 } 72 73#define TRANSIT_TO(state, emask) \ 74 { .events = (emask), .transit_to = STATE(state) } 75 76/* 77 * The object state machine. 78 */ 79static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object); 80static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready); 81static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation); 82static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object); 83static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object); 84static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available); 85static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents); 86 87static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object); 88static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object); 89 90static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); 91static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); 92static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); 93static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); 94static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); 95 96static WAIT_STATE(WAIT_FOR_INIT, "?INI", 97 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); 98 99static WAIT_STATE(WAIT_FOR_PARENT, "?PRN", 100 TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY)); 101 102static WAIT_STATE(WAIT_FOR_CMD, "?CMD", 103 TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE), 104 TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE), 105 TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); 106 107static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR", 108 TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED)); 109 110/* 111 * Out-of-band event transition tables. These are for handling unexpected 112 * events, such as an I/O error. If an OOB event occurs, the state machine 113 * clears and disables the event and forces a transition to the nominated work 114 * state (acurrently executing work states will complete first). 115 * 116 * In such a situation, object->state remembers the state the machine should 117 * have been in/gone to and returning NO_TRANSIT returns to that. 118 */ 119static const struct fscache_transition fscache_osm_init_oob[] = { 120 TRANSIT_TO(ABORT_INIT, 121 (1 << FSCACHE_OBJECT_EV_ERROR) | 122 (1 << FSCACHE_OBJECT_EV_KILL)), 123 { 0, NULL } 124}; 125 126static const struct fscache_transition fscache_osm_lookup_oob[] = { 127 TRANSIT_TO(LOOKUP_FAILURE, 128 (1 << FSCACHE_OBJECT_EV_ERROR) | 129 (1 << FSCACHE_OBJECT_EV_KILL)), 130 { 0, NULL } 131}; 132 133static const struct fscache_transition fscache_osm_run_oob[] = { 134 TRANSIT_TO(KILL_OBJECT, 135 (1 << FSCACHE_OBJECT_EV_ERROR) | 136 (1 << FSCACHE_OBJECT_EV_KILL)), 137 { 0, NULL } 138}; 139 140static int fscache_get_object(struct fscache_object *); 141static void fscache_put_object(struct fscache_object *); 142static bool fscache_enqueue_dependents(struct fscache_object *, int); 143static void fscache_dequeue_object(struct fscache_object *); 144 145/* 146 * we need to notify the parent when an op completes that we had outstanding 147 * upon it 148 */ 149static inline void fscache_done_parent_op(struct fscache_object *object) 150{ 151 struct fscache_object *parent = object->parent; 152 153 _enter("OBJ%x {OBJ%x,%x}", 154 object->debug_id, parent->debug_id, parent->n_ops); 155 156 spin_lock_nested(&parent->lock, 1); 157 parent->n_obj_ops--; 158 parent->n_ops--; 159 if (parent->n_ops == 0) 160 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); 161 spin_unlock(&parent->lock); 162} 163 164/* 165 * Object state machine dispatcher. 166 */ 167static void fscache_object_sm_dispatcher(struct fscache_object *object) 168{ 169 const struct fscache_transition *t; 170 const struct fscache_state *state, *new_state; 171 unsigned long events, event_mask; 172 int event = -1; 173 174 ASSERT(object != NULL); 175 176 _enter("{OBJ%x,%s,%lx}", 177 object->debug_id, object->state->name, object->events); 178 179 event_mask = object->event_mask; 180restart: 181 object->event_mask = 0; /* Mask normal event handling */ 182 state = object->state; 183restart_masked: 184 events = object->events; 185 186 /* Handle any out-of-band events (typically an error) */ 187 if (events & object->oob_event_mask) { 188 _debug("{OBJ%x} oob %lx", 189 object->debug_id, events & object->oob_event_mask); 190 for (t = object->oob_table; t->events; t++) { 191 if (events & t->events) { 192 state = t->transit_to; 193 ASSERT(state->work != NULL); 194 event = fls(events & t->events) - 1; 195 __clear_bit(event, &object->oob_event_mask); 196 clear_bit(event, &object->events); 197 goto execute_work_state; 198 } 199 } 200 } 201 202 /* Wait states are just transition tables */ 203 if (!state->work) { 204 if (events & event_mask) { 205 for (t = state->transitions; t->events; t++) { 206 if (events & t->events) { 207 new_state = t->transit_to; 208 event = fls(events & t->events) - 1; 209 clear_bit(event, &object->events); 210 _debug("{OBJ%x} ev %d: %s -> %s", 211 object->debug_id, event, 212 state->name, new_state->name); 213 object->state = state = new_state; 214 goto execute_work_state; 215 } 216 } 217 218 /* The event mask didn't include all the tabled bits */ 219 BUG(); 220 } 221 /* Randomly woke up */ 222 goto unmask_events; 223 } 224 225execute_work_state: 226 _debug("{OBJ%x} exec %s", object->debug_id, state->name); 227 228 new_state = state->work(object, event); 229 event = -1; 230 if (new_state == NO_TRANSIT) { 231 _debug("{OBJ%x} %s notrans", object->debug_id, state->name); 232 fscache_enqueue_object(object); 233 event_mask = object->oob_event_mask; 234 goto unmask_events; 235 } 236 237 _debug("{OBJ%x} %s -> %s", 238 object->debug_id, state->name, new_state->name); 239 object->state = state = new_state; 240 241 if (state->work) { 242 if (unlikely(state->work == ((void *)2UL))) { 243 _leave(" [dead]"); 244 return; 245 } 246 goto restart_masked; 247 } 248 249 /* Transited to wait state */ 250 event_mask = object->oob_event_mask; 251 for (t = state->transitions; t->events; t++) 252 event_mask |= t->events; 253 254unmask_events: 255 object->event_mask = event_mask; 256 smp_mb(); 257 events = object->events; 258 if (events & event_mask) 259 goto restart; 260 _leave(" [msk %lx]", event_mask); 261} 262 263/* 264 * execute an object 265 */ 266static void fscache_object_work_func(struct work_struct *work) 267{ 268 struct fscache_object *object = 269 container_of(work, struct fscache_object, work); 270 unsigned long start; 271 272 _enter("{OBJ%x}", object->debug_id); 273 274 start = jiffies; 275 fscache_object_sm_dispatcher(object); 276 fscache_hist(fscache_objs_histogram, start); 277 fscache_put_object(object); 278} 279 280/** 281 * fscache_object_init - Initialise a cache object description 282 * @object: Object description 283 * @cookie: Cookie object will be attached to 284 * @cache: Cache in which backing object will be found 285 * 286 * Initialise a cache object description to its basic values. 287 * 288 * See Documentation/filesystems/caching/backend-api.txt for a complete 289 * description. 290 */ 291void fscache_object_init(struct fscache_object *object, 292 struct fscache_cookie *cookie, 293 struct fscache_cache *cache) 294{ 295 const struct fscache_transition *t; 296 297 atomic_inc(&cache->object_count); 298 299 object->state = STATE(WAIT_FOR_INIT); 300 object->oob_table = fscache_osm_init_oob; 301 object->flags = 1 << FSCACHE_OBJECT_IS_LIVE; 302 spin_lock_init(&object->lock); 303 INIT_LIST_HEAD(&object->cache_link); 304 INIT_HLIST_NODE(&object->cookie_link); 305 INIT_WORK(&object->work, fscache_object_work_func); 306 INIT_LIST_HEAD(&object->dependents); 307 INIT_LIST_HEAD(&object->dep_link); 308 INIT_LIST_HEAD(&object->pending_ops); 309 object->n_children = 0; 310 object->n_ops = object->n_in_progress = object->n_exclusive = 0; 311 object->events = 0; 312 object->store_limit = 0; 313 object->store_limit_l = 0; 314 object->cache = cache; 315 object->cookie = cookie; 316 object->parent = NULL; 317 318 object->oob_event_mask = 0; 319 for (t = object->oob_table; t->events; t++) 320 object->oob_event_mask |= t->events; 321 object->event_mask = object->oob_event_mask; 322 for (t = object->state->transitions; t->events; t++) 323 object->event_mask |= t->events; 324} 325EXPORT_SYMBOL(fscache_object_init); 326 327/* 328 * Abort object initialisation before we start it. 329 */ 330static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, 331 int event) 332{ 333 _enter("{OBJ%x},%d", object->debug_id, event); 334 335 object->oob_event_mask = 0; 336 fscache_dequeue_object(object); 337 return transit_to(KILL_OBJECT); 338} 339 340/* 341 * initialise an object 342 * - check the specified object's parent to see if we can make use of it 343 * immediately to do a creation 344 * - we may need to start the process of creating a parent and we need to wait 345 * for the parent's lookup and creation to complete if it's not there yet 346 */ 347static const struct fscache_state *fscache_initialise_object(struct fscache_object *object, 348 int event) 349{ 350 struct fscache_object *parent; 351 bool success; 352 353 _enter("{OBJ%x},%d", object->debug_id, event); 354 355 ASSERT(list_empty(&object->dep_link)); 356 357 parent = object->parent; 358 if (!parent) { 359 _leave(" [no parent]"); 360 return transit_to(DROP_OBJECT); 361 } 362 363 _debug("parent: %s of:%lx", parent->state->name, parent->flags); 364 365 if (fscache_object_is_dying(parent)) { 366 _leave(" [bad parent]"); 367 return transit_to(DROP_OBJECT); 368 } 369 370 if (fscache_object_is_available(parent)) { 371 _leave(" [ready]"); 372 return transit_to(PARENT_READY); 373 } 374 375 _debug("wait"); 376 377 spin_lock(&parent->lock); 378 fscache_stat(&fscache_n_cop_grab_object); 379 success = false; 380 if (fscache_object_is_live(parent) && 381 object->cache->ops->grab_object(object)) { 382 list_add(&object->dep_link, &parent->dependents); 383 success = true; 384 } 385 fscache_stat_d(&fscache_n_cop_grab_object); 386 spin_unlock(&parent->lock); 387 if (!success) { 388 _leave(" [grab failed]"); 389 return transit_to(DROP_OBJECT); 390 } 391 392 /* fscache_acquire_non_index_cookie() uses this 393 * to wake the chain up */ 394 fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD); 395 _leave(" [wait]"); 396 return transit_to(WAIT_FOR_PARENT); 397} 398 399/* 400 * Once the parent object is ready, we should kick off our lookup op. 401 */ 402static const struct fscache_state *fscache_parent_ready(struct fscache_object *object, 403 int event) 404{ 405 struct fscache_object *parent = object->parent; 406 407 _enter("{OBJ%x},%d", object->debug_id, event); 408 409 ASSERT(parent != NULL); 410 411 spin_lock(&parent->lock); 412 parent->n_ops++; 413 parent->n_obj_ops++; 414 object->lookup_jif = jiffies; 415 spin_unlock(&parent->lock); 416 417 _leave(""); 418 return transit_to(LOOK_UP_OBJECT); 419} 420 421/* 422 * look an object up in the cache from which it was allocated 423 * - we hold an "access lock" on the parent object, so the parent object cannot 424 * be withdrawn by either party till we've finished 425 */ 426static const struct fscache_state *fscache_look_up_object(struct fscache_object *object, 427 int event) 428{ 429 struct fscache_cookie *cookie = object->cookie; 430 struct fscache_object *parent = object->parent; 431 int ret; 432 433 _enter("{OBJ%x},%d", object->debug_id, event); 434 435 object->oob_table = fscache_osm_lookup_oob; 436 437 ASSERT(parent != NULL); 438 ASSERTCMP(parent->n_ops, >, 0); 439 ASSERTCMP(parent->n_obj_ops, >, 0); 440 441 /* make sure the parent is still available */ 442 ASSERT(fscache_object_is_available(parent)); 443 444 if (fscache_object_is_dying(parent) || 445 test_bit(FSCACHE_IOERROR, &object->cache->flags) || 446 !fscache_use_cookie(object)) { 447 _leave(" [unavailable]"); 448 return transit_to(LOOKUP_FAILURE); 449 } 450 451 _debug("LOOKUP \"%s\" in \"%s\"", 452 cookie->def->name, object->cache->tag->name); 453 454 fscache_stat(&fscache_n_object_lookups); 455 fscache_stat(&fscache_n_cop_lookup_object); 456 ret = object->cache->ops->lookup_object(object); 457 fscache_stat_d(&fscache_n_cop_lookup_object); 458 459 fscache_unuse_cookie(object); 460 461 if (ret == -ETIMEDOUT) { 462 /* probably stuck behind another object, so move this one to 463 * the back of the queue */ 464 fscache_stat(&fscache_n_object_lookups_timed_out); 465 _leave(" [timeout]"); 466 return NO_TRANSIT; 467 } 468 469 if (ret < 0) { 470 _leave(" [error]"); 471 return transit_to(LOOKUP_FAILURE); 472 } 473 474 _leave(" [ok]"); 475 return transit_to(OBJECT_AVAILABLE); 476} 477 478/** 479 * fscache_object_lookup_negative - Note negative cookie lookup 480 * @object: Object pointing to cookie to mark 481 * 482 * Note negative lookup, permitting those waiting to read data from an already 483 * existing backing object to continue as there's no data for them to read. 484 */ 485void fscache_object_lookup_negative(struct fscache_object *object) 486{ 487 struct fscache_cookie *cookie = object->cookie; 488 489 _enter("{OBJ%x,%s}", object->debug_id, object->state->name); 490 491 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { 492 fscache_stat(&fscache_n_object_lookups_negative); 493 494 /* Allow write requests to begin stacking up and read requests to begin 495 * returning ENODATA. 496 */ 497 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); 498 499 _debug("wake up lookup %p", &cookie->flags); 500 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); 501 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); 502 } 503 _leave(""); 504} 505EXPORT_SYMBOL(fscache_object_lookup_negative); 506 507/** 508 * fscache_obtained_object - Note successful object lookup or creation 509 * @object: Object pointing to cookie to mark 510 * 511 * Note successful lookup and/or creation, permitting those waiting to write 512 * data to a backing object to continue. 513 * 514 * Note that after calling this, an object's cookie may be relinquished by the 515 * netfs, and so must be accessed with object lock held. 516 */ 517void fscache_obtained_object(struct fscache_object *object) 518{ 519 struct fscache_cookie *cookie = object->cookie; 520 521 _enter("{OBJ%x,%s}", object->debug_id, object->state->name); 522 523 /* if we were still looking up, then we must have a positive lookup 524 * result, in which case there may be data available */ 525 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { 526 fscache_stat(&fscache_n_object_lookups_positive); 527 528 /* We do (presumably) have data */ 529 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); 530 531 /* Allow write requests to begin stacking up and read requests 532 * to begin shovelling data. 533 */ 534 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); 535 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); 536 } else { 537 fscache_stat(&fscache_n_object_created); 538 } 539 540 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); 541 _leave(""); 542} 543EXPORT_SYMBOL(fscache_obtained_object); 544 545/* 546 * handle an object that has just become available 547 */ 548static const struct fscache_state *fscache_object_available(struct fscache_object *object, 549 int event) 550{ 551 _enter("{OBJ%x},%d", object->debug_id, event); 552 553 object->oob_table = fscache_osm_run_oob; 554 555 spin_lock(&object->lock); 556 557 fscache_done_parent_op(object); 558 if (object->n_in_progress == 0) { 559 if (object->n_ops > 0) { 560 ASSERTCMP(object->n_ops, >=, object->n_obj_ops); 561 fscache_start_operations(object); 562 } else { 563 ASSERT(list_empty(&object->pending_ops)); 564 } 565 } 566 spin_unlock(&object->lock); 567 568 fscache_stat(&fscache_n_cop_lookup_complete); 569 object->cache->ops->lookup_complete(object); 570 fscache_stat_d(&fscache_n_cop_lookup_complete); 571 572 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); 573 fscache_stat(&fscache_n_object_avail); 574 575 _leave(""); 576 return transit_to(JUMPSTART_DEPS); 577} 578 579/* 580 * Wake up this object's dependent objects now that we've become available. 581 */ 582static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object, 583 int event) 584{ 585 _enter("{OBJ%x},%d", object->debug_id, event); 586 587 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY)) 588 return NO_TRANSIT; /* Not finished; requeue */ 589 return transit_to(WAIT_FOR_CMD); 590} 591 592/* 593 * Handle lookup or creation failute. 594 */ 595static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object, 596 int event) 597{ 598 struct fscache_cookie *cookie; 599 600 _enter("{OBJ%x},%d", object->debug_id, event); 601 602 object->oob_event_mask = 0; 603 604 fscache_stat(&fscache_n_cop_lookup_complete); 605 object->cache->ops->lookup_complete(object); 606 fscache_stat_d(&fscache_n_cop_lookup_complete); 607 608 cookie = object->cookie; 609 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); 610 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) 611 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); 612 613 fscache_done_parent_op(object); 614 return transit_to(KILL_OBJECT); 615} 616 617/* 618 * Wait for completion of all active operations on this object and the death of 619 * all child objects of this object. 620 */ 621static const struct fscache_state *fscache_kill_object(struct fscache_object *object, 622 int event) 623{ 624 _enter("{OBJ%x,%d,%d},%d", 625 object->debug_id, object->n_ops, object->n_children, event); 626 627 clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); 628 object->oob_event_mask = 0; 629 630 if (list_empty(&object->dependents) && 631 object->n_ops == 0 && 632 object->n_children == 0) 633 return transit_to(DROP_OBJECT); 634 635 if (object->n_in_progress == 0) { 636 spin_lock(&object->lock); 637 if (object->n_ops > 0 && object->n_in_progress == 0) 638 fscache_start_operations(object); 639 spin_unlock(&object->lock); 640 } 641 642 if (!list_empty(&object->dependents)) 643 return transit_to(KILL_DEPENDENTS); 644 645 return transit_to(WAIT_FOR_CLEARANCE); 646} 647 648/* 649 * Kill dependent objects. 650 */ 651static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object, 652 int event) 653{ 654 _enter("{OBJ%x},%d", object->debug_id, event); 655 656 if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL)) 657 return NO_TRANSIT; /* Not finished */ 658 return transit_to(WAIT_FOR_CLEARANCE); 659} 660 661/* 662 * Drop an object's attachments 663 */ 664static const struct fscache_state *fscache_drop_object(struct fscache_object *object, 665 int event) 666{ 667 struct fscache_object *parent = object->parent; 668 struct fscache_cookie *cookie = object->cookie; 669 struct fscache_cache *cache = object->cache; 670 bool awaken = false; 671 672 _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event); 673 674 ASSERT(cookie != NULL); 675 ASSERT(!hlist_unhashed(&object->cookie_link)); 676 677 /* Make sure the cookie no longer points here and that the netfs isn't 678 * waiting for us. 679 */ 680 spin_lock(&cookie->lock); 681 hlist_del_init(&object->cookie_link); 682 if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) 683 awaken = true; 684 spin_unlock(&cookie->lock); 685 686 if (awaken) 687 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); 688 689 /* Prevent a race with our last child, which has to signal EV_CLEARED 690 * before dropping our spinlock. 691 */ 692 spin_lock(&object->lock); 693 spin_unlock(&object->lock); 694 695 /* Discard from the cache's collection of objects */ 696 spin_lock(&cache->object_list_lock); 697 list_del_init(&object->cache_link); 698 spin_unlock(&cache->object_list_lock); 699 700 fscache_stat(&fscache_n_cop_drop_object); 701 cache->ops->drop_object(object); 702 fscache_stat_d(&fscache_n_cop_drop_object); 703 704 /* The parent object wants to know when all it dependents have gone */ 705 if (parent) { 706 _debug("release parent OBJ%x {%d}", 707 parent->debug_id, parent->n_children); 708 709 spin_lock(&parent->lock); 710 parent->n_children--; 711 if (parent->n_children == 0) 712 fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED); 713 spin_unlock(&parent->lock); 714 object->parent = NULL; 715 } 716 717 /* this just shifts the object release to the work processor */ 718 fscache_put_object(object); 719 fscache_stat(&fscache_n_object_dead); 720 721 _leave(""); 722 return transit_to(OBJECT_DEAD); 723} 724 725/* 726 * get a ref on an object 727 */ 728static int fscache_get_object(struct fscache_object *object) 729{ 730 int ret; 731 732 fscache_stat(&fscache_n_cop_grab_object); 733 ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN; 734 fscache_stat_d(&fscache_n_cop_grab_object); 735 return ret; 736} 737 738/* 739 * Discard a ref on an object 740 */ 741static void fscache_put_object(struct fscache_object *object) 742{ 743 fscache_stat(&fscache_n_cop_put_object); 744 object->cache->ops->put_object(object); 745 fscache_stat_d(&fscache_n_cop_put_object); 746} 747 748/** 749 * fscache_object_destroy - Note that a cache object is about to be destroyed 750 * @object: The object to be destroyed 751 * 752 * Note the imminent destruction and deallocation of a cache object record. 753 */ 754void fscache_object_destroy(struct fscache_object *object) 755{ 756 fscache_objlist_remove(object); 757 758 /* We can get rid of the cookie now */ 759 fscache_cookie_put(object->cookie); 760 object->cookie = NULL; 761} 762EXPORT_SYMBOL(fscache_object_destroy); 763 764/* 765 * enqueue an object for metadata-type processing 766 */ 767void fscache_enqueue_object(struct fscache_object *object) 768{ 769 _enter("{OBJ%x}", object->debug_id); 770 771 if (fscache_get_object(object) >= 0) { 772 wait_queue_head_t *cong_wq = 773 &get_cpu_var(fscache_object_cong_wait); 774 775 if (queue_work(fscache_object_wq, &object->work)) { 776 if (fscache_object_congested()) 777 wake_up(cong_wq); 778 } else 779 fscache_put_object(object); 780 781 put_cpu_var(fscache_object_cong_wait); 782 } 783} 784 785/** 786 * fscache_object_sleep_till_congested - Sleep until object wq is congested 787 * @timeoutp: Scheduler sleep timeout 788 * 789 * Allow an object handler to sleep until the object workqueue is congested. 790 * 791 * The caller must set up a wake up event before calling this and must have set 792 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own 793 * condition before calling this function as no test is made here. 794 * 795 * %true is returned if the object wq is congested, %false otherwise. 796 */ 797bool fscache_object_sleep_till_congested(signed long *timeoutp) 798{ 799 wait_queue_head_t *cong_wq = &__get_cpu_var(fscache_object_cong_wait); 800 DEFINE_WAIT(wait); 801 802 if (fscache_object_congested()) 803 return true; 804 805 add_wait_queue_exclusive(cong_wq, &wait); 806 if (!fscache_object_congested()) 807 *timeoutp = schedule_timeout(*timeoutp); 808 finish_wait(cong_wq, &wait); 809 810 return fscache_object_congested(); 811} 812EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested); 813 814/* 815 * Enqueue the dependents of an object for metadata-type processing. 816 * 817 * If we don't manage to finish the list before the scheduler wants to run 818 * again then return false immediately. We return true if the list was 819 * cleared. 820 */ 821static bool fscache_enqueue_dependents(struct fscache_object *object, int event) 822{ 823 struct fscache_object *dep; 824 bool ret = true; 825 826 _enter("{OBJ%x}", object->debug_id); 827 828 if (list_empty(&object->dependents)) 829 return true; 830 831 spin_lock(&object->lock); 832 833 while (!list_empty(&object->dependents)) { 834 dep = list_entry(object->dependents.next, 835 struct fscache_object, dep_link); 836 list_del_init(&dep->dep_link); 837 838 fscache_raise_event(dep, event); 839 fscache_put_object(dep); 840 841 if (!list_empty(&object->dependents) && need_resched()) { 842 ret = false; 843 break; 844 } 845 } 846 847 spin_unlock(&object->lock); 848 return ret; 849} 850 851/* 852 * remove an object from whatever queue it's waiting on 853 */ 854static void fscache_dequeue_object(struct fscache_object *object) 855{ 856 _enter("{OBJ%x}", object->debug_id); 857 858 if (!list_empty(&object->dep_link)) { 859 spin_lock(&object->parent->lock); 860 list_del_init(&object->dep_link); 861 spin_unlock(&object->parent->lock); 862 } 863 864 _leave(""); 865} 866 867/** 868 * fscache_check_aux - Ask the netfs whether an object on disk is still valid 869 * @object: The object to ask about 870 * @data: The auxiliary data for the object 871 * @datalen: The size of the auxiliary data 872 * 873 * This function consults the netfs about the coherency state of an object. 874 * The caller must be holding a ref on cookie->n_active (held by 875 * fscache_look_up_object() on behalf of the cache backend during object lookup 876 * and creation). 877 */ 878enum fscache_checkaux fscache_check_aux(struct fscache_object *object, 879 const void *data, uint16_t datalen) 880{ 881 enum fscache_checkaux result; 882 883 if (!object->cookie->def->check_aux) { 884 fscache_stat(&fscache_n_checkaux_none); 885 return FSCACHE_CHECKAUX_OKAY; 886 } 887 888 result = object->cookie->def->check_aux(object->cookie->netfs_data, 889 data, datalen); 890 switch (result) { 891 /* entry okay as is */ 892 case FSCACHE_CHECKAUX_OKAY: 893 fscache_stat(&fscache_n_checkaux_okay); 894 break; 895 896 /* entry requires update */ 897 case FSCACHE_CHECKAUX_NEEDS_UPDATE: 898 fscache_stat(&fscache_n_checkaux_update); 899 break; 900 901 /* entry requires deletion */ 902 case FSCACHE_CHECKAUX_OBSOLETE: 903 fscache_stat(&fscache_n_checkaux_obsolete); 904 break; 905 906 default: 907 BUG(); 908 } 909 910 return result; 911} 912EXPORT_SYMBOL(fscache_check_aux); 913 914/* 915 * Asynchronously invalidate an object. 916 */ 917static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object, 918 int event) 919{ 920 struct fscache_operation *op; 921 struct fscache_cookie *cookie = object->cookie; 922 923 _enter("{OBJ%x},%d", object->debug_id, event); 924 925 /* We're going to need the cookie. If the cookie is not available then 926 * retire the object instead. 927 */ 928 if (!fscache_use_cookie(object)) { 929 ASSERT(object->cookie->stores.rnode == NULL); 930 set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags); 931 _leave(" [no cookie]"); 932 return transit_to(KILL_OBJECT); 933 } 934 935 /* Reject any new read/write ops and abort any that are pending. */ 936 fscache_invalidate_writes(cookie); 937 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 938 fscache_cancel_all_ops(object); 939 940 /* Now we have to wait for in-progress reads and writes */ 941 op = kzalloc(sizeof(*op), GFP_KERNEL); 942 if (!op) 943 goto nomem; 944 945 fscache_operation_init(op, object->cache->ops->invalidate_object, NULL); 946 op->flags = FSCACHE_OP_ASYNC | 947 (1 << FSCACHE_OP_EXCLUSIVE) | 948 (1 << FSCACHE_OP_UNUSE_COOKIE); 949 950 spin_lock(&cookie->lock); 951 if (fscache_submit_exclusive_op(object, op) < 0) 952 goto submit_op_failed; 953 spin_unlock(&cookie->lock); 954 fscache_put_operation(op); 955 956 /* Once we've completed the invalidation, we know there will be no data 957 * stored in the cache and thus we can reinstate the data-check-skip 958 * optimisation. 959 */ 960 set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); 961 962 /* We can allow read and write requests to come in once again. They'll 963 * queue up behind our exclusive invalidation operation. 964 */ 965 if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) 966 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); 967 _leave(" [ok]"); 968 return transit_to(UPDATE_OBJECT); 969 970nomem: 971 clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); 972 fscache_unuse_cookie(object); 973 _leave(" [ENOMEM]"); 974 return transit_to(KILL_OBJECT); 975 976submit_op_failed: 977 clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); 978 spin_unlock(&cookie->lock); 979 kfree(op); 980 _leave(" [EIO]"); 981 return transit_to(KILL_OBJECT); 982} 983 984static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object, 985 int event) 986{ 987 const struct fscache_state *s; 988 989 fscache_stat(&fscache_n_invalidates_run); 990 fscache_stat(&fscache_n_cop_invalidate_object); 991 s = _fscache_invalidate_object(object, event); 992 fscache_stat_d(&fscache_n_cop_invalidate_object); 993 return s; 994} 995 996/* 997 * Asynchronously update an object. 998 */ 999static const struct fscache_state *fscache_update_object(struct fscache_object *object, 1000 int event) 1001{ 1002 _enter("{OBJ%x},%d", object->debug_id, event); 1003 1004 fscache_stat(&fscache_n_updates_run); 1005 fscache_stat(&fscache_n_cop_update_object); 1006 object->cache->ops->update_object(object); 1007 fscache_stat_d(&fscache_n_cop_update_object); 1008 1009 _leave(""); 1010 return transit_to(WAIT_FOR_CMD); 1011}