Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.0-rc5 637 lines 18 kB view raw
1/* FS-Cache worker operation management routines 2 * 3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * See Documentation/filesystems/caching/operations.txt 12 */ 13 14#define FSCACHE_DEBUG_LEVEL OPERATION 15#include <linux/module.h> 16#include <linux/seq_file.h> 17#include <linux/slab.h> 18#include "internal.h" 19 20atomic_t fscache_op_debug_id; 21EXPORT_SYMBOL(fscache_op_debug_id); 22 23static void fscache_operation_dummy_cancel(struct fscache_operation *op) 24{ 25} 26 27/** 28 * fscache_operation_init - Do basic initialisation of an operation 29 * @op: The operation to initialise 30 * @release: The release function to assign 31 * 32 * Do basic initialisation of an operation. The caller must still set flags, 33 * object and processor if needed. 34 */ 35void fscache_operation_init(struct fscache_cookie *cookie, 36 struct fscache_operation *op, 37 fscache_operation_processor_t processor, 38 fscache_operation_cancel_t cancel, 39 fscache_operation_release_t release) 40{ 41 INIT_WORK(&op->work, fscache_op_work_func); 42 atomic_set(&op->usage, 1); 43 op->state = FSCACHE_OP_ST_INITIALISED; 44 op->debug_id = atomic_inc_return(&fscache_op_debug_id); 45 op->processor = processor; 46 op->cancel = cancel ?: fscache_operation_dummy_cancel; 47 op->release = release; 48 INIT_LIST_HEAD(&op->pend_link); 49 fscache_stat(&fscache_n_op_initialised); 50 trace_fscache_op(cookie, op, fscache_op_init); 51} 52EXPORT_SYMBOL(fscache_operation_init); 53 54/** 55 * fscache_enqueue_operation - Enqueue an operation for processing 56 * @op: The operation to enqueue 57 * 58 * Enqueue an operation for processing by the FS-Cache thread pool. 59 * 60 * This will get its own ref on the object. 61 */ 62void fscache_enqueue_operation(struct fscache_operation *op) 63{ 64 struct fscache_cookie *cookie = op->object->cookie; 65 66 _enter("{OBJ%x OP%x,%u}", 67 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 68 69 ASSERT(list_empty(&op->pend_link)); 70 ASSERT(op->processor != NULL); 71 ASSERT(fscache_object_is_available(op->object)); 72 ASSERTCMP(atomic_read(&op->usage), >, 0); 73 ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, 74 op->state, ==, FSCACHE_OP_ST_CANCELLED); 75 76 fscache_stat(&fscache_n_op_enqueue); 77 switch (op->flags & FSCACHE_OP_TYPE) { 78 case FSCACHE_OP_ASYNC: 79 trace_fscache_op(cookie, op, fscache_op_enqueue_async); 80 _debug("queue async"); 81 atomic_inc(&op->usage); 82 if (!queue_work(fscache_op_wq, &op->work)) 83 fscache_put_operation(op); 84 break; 85 case FSCACHE_OP_MYTHREAD: 86 trace_fscache_op(cookie, op, fscache_op_enqueue_mythread); 87 _debug("queue for caller's attention"); 88 break; 89 default: 90 pr_err("Unexpected op type %lx", op->flags); 91 BUG(); 92 break; 93 } 94} 95EXPORT_SYMBOL(fscache_enqueue_operation); 96 97/* 98 * start an op running 99 */ 100static void fscache_run_op(struct fscache_object *object, 101 struct fscache_operation *op) 102{ 103 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); 104 105 op->state = FSCACHE_OP_ST_IN_PROGRESS; 106 object->n_in_progress++; 107 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 108 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 109 if (op->processor) 110 fscache_enqueue_operation(op); 111 else 112 trace_fscache_op(object->cookie, op, fscache_op_run); 113 fscache_stat(&fscache_n_op_run); 114} 115 116/* 117 * report an unexpected submission 118 */ 119static void fscache_report_unexpected_submission(struct fscache_object *object, 120 struct fscache_operation *op, 121 const struct fscache_state *ostate) 122{ 123 static bool once_only; 124 struct fscache_operation *p; 125 unsigned n; 126 127 if (once_only) 128 return; 129 once_only = true; 130 131 kdebug("unexpected submission OP%x [OBJ%x %s]", 132 op->debug_id, object->debug_id, object->state->name); 133 kdebug("objstate=%s [%s]", object->state->name, ostate->name); 134 kdebug("objflags=%lx", object->flags); 135 kdebug("objevent=%lx [%lx]", object->events, object->event_mask); 136 kdebug("ops=%u inp=%u exc=%u", 137 object->n_ops, object->n_in_progress, object->n_exclusive); 138 139 if (!list_empty(&object->pending_ops)) { 140 n = 0; 141 list_for_each_entry(p, &object->pending_ops, pend_link) { 142 ASSERTCMP(p->object, ==, object); 143 kdebug("%p %p", op->processor, op->release); 144 n++; 145 } 146 147 kdebug("n=%u", n); 148 } 149 150 dump_stack(); 151} 152 153/* 154 * submit an exclusive operation for an object 155 * - other ops are excluded from running simultaneously with this one 156 * - this gets any extra refs it needs on an op 157 */ 158int fscache_submit_exclusive_op(struct fscache_object *object, 159 struct fscache_operation *op) 160{ 161 const struct fscache_state *ostate; 162 unsigned long flags; 163 int ret; 164 165 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 166 167 trace_fscache_op(object->cookie, op, fscache_op_submit_ex); 168 169 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); 170 ASSERTCMP(atomic_read(&op->usage), >, 0); 171 172 spin_lock(&object->lock); 173 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 174 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 175 ASSERT(list_empty(&op->pend_link)); 176 177 ostate = object->state; 178 smp_rmb(); 179 180 op->state = FSCACHE_OP_ST_PENDING; 181 flags = READ_ONCE(object->flags); 182 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { 183 fscache_stat(&fscache_n_op_rejected); 184 op->cancel(op); 185 op->state = FSCACHE_OP_ST_CANCELLED; 186 ret = -ENOBUFS; 187 } else if (unlikely(fscache_cache_is_broken(object))) { 188 op->cancel(op); 189 op->state = FSCACHE_OP_ST_CANCELLED; 190 ret = -EIO; 191 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { 192 op->object = object; 193 object->n_ops++; 194 object->n_exclusive++; /* reads and writes must wait */ 195 196 if (object->n_in_progress > 0) { 197 atomic_inc(&op->usage); 198 list_add_tail(&op->pend_link, &object->pending_ops); 199 fscache_stat(&fscache_n_op_pend); 200 } else if (!list_empty(&object->pending_ops)) { 201 atomic_inc(&op->usage); 202 list_add_tail(&op->pend_link, &object->pending_ops); 203 fscache_stat(&fscache_n_op_pend); 204 fscache_start_operations(object); 205 } else { 206 ASSERTCMP(object->n_in_progress, ==, 0); 207 fscache_run_op(object, op); 208 } 209 210 /* need to issue a new write op after this */ 211 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 212 ret = 0; 213 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { 214 op->object = object; 215 object->n_ops++; 216 object->n_exclusive++; /* reads and writes must wait */ 217 atomic_inc(&op->usage); 218 list_add_tail(&op->pend_link, &object->pending_ops); 219 fscache_stat(&fscache_n_op_pend); 220 ret = 0; 221 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { 222 op->cancel(op); 223 op->state = FSCACHE_OP_ST_CANCELLED; 224 ret = -ENOBUFS; 225 } else { 226 fscache_report_unexpected_submission(object, op, ostate); 227 op->cancel(op); 228 op->state = FSCACHE_OP_ST_CANCELLED; 229 ret = -ENOBUFS; 230 } 231 232 spin_unlock(&object->lock); 233 return ret; 234} 235 236/* 237 * submit an operation for an object 238 * - objects may be submitted only in the following states: 239 * - during object creation (write ops may be submitted) 240 * - whilst the object is active 241 * - after an I/O error incurred in one of the two above states (op rejected) 242 * - this gets any extra refs it needs on an op 243 */ 244int fscache_submit_op(struct fscache_object *object, 245 struct fscache_operation *op) 246{ 247 const struct fscache_state *ostate; 248 unsigned long flags; 249 int ret; 250 251 _enter("{OBJ%x OP%x},{%u}", 252 object->debug_id, op->debug_id, atomic_read(&op->usage)); 253 254 trace_fscache_op(object->cookie, op, fscache_op_submit); 255 256 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); 257 ASSERTCMP(atomic_read(&op->usage), >, 0); 258 259 spin_lock(&object->lock); 260 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 261 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 262 ASSERT(list_empty(&op->pend_link)); 263 264 ostate = object->state; 265 smp_rmb(); 266 267 op->state = FSCACHE_OP_ST_PENDING; 268 flags = READ_ONCE(object->flags); 269 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { 270 fscache_stat(&fscache_n_op_rejected); 271 op->cancel(op); 272 op->state = FSCACHE_OP_ST_CANCELLED; 273 ret = -ENOBUFS; 274 } else if (unlikely(fscache_cache_is_broken(object))) { 275 op->cancel(op); 276 op->state = FSCACHE_OP_ST_CANCELLED; 277 ret = -EIO; 278 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { 279 op->object = object; 280 object->n_ops++; 281 282 if (object->n_exclusive > 0) { 283 atomic_inc(&op->usage); 284 list_add_tail(&op->pend_link, &object->pending_ops); 285 fscache_stat(&fscache_n_op_pend); 286 } else if (!list_empty(&object->pending_ops)) { 287 atomic_inc(&op->usage); 288 list_add_tail(&op->pend_link, &object->pending_ops); 289 fscache_stat(&fscache_n_op_pend); 290 fscache_start_operations(object); 291 } else { 292 ASSERTCMP(object->n_exclusive, ==, 0); 293 fscache_run_op(object, op); 294 } 295 ret = 0; 296 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { 297 op->object = object; 298 object->n_ops++; 299 atomic_inc(&op->usage); 300 list_add_tail(&op->pend_link, &object->pending_ops); 301 fscache_stat(&fscache_n_op_pend); 302 ret = 0; 303 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { 304 op->cancel(op); 305 op->state = FSCACHE_OP_ST_CANCELLED; 306 ret = -ENOBUFS; 307 } else { 308 fscache_report_unexpected_submission(object, op, ostate); 309 ASSERT(!fscache_object_is_active(object)); 310 op->cancel(op); 311 op->state = FSCACHE_OP_ST_CANCELLED; 312 ret = -ENOBUFS; 313 } 314 315 spin_unlock(&object->lock); 316 return ret; 317} 318 319/* 320 * queue an object for withdrawal on error, aborting all following asynchronous 321 * operations 322 */ 323void fscache_abort_object(struct fscache_object *object) 324{ 325 _enter("{OBJ%x}", object->debug_id); 326 327 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); 328} 329 330/* 331 * Jump start the operation processing on an object. The caller must hold 332 * object->lock. 333 */ 334void fscache_start_operations(struct fscache_object *object) 335{ 336 struct fscache_operation *op; 337 bool stop = false; 338 339 while (!list_empty(&object->pending_ops) && !stop) { 340 op = list_entry(object->pending_ops.next, 341 struct fscache_operation, pend_link); 342 343 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { 344 if (object->n_in_progress > 0) 345 break; 346 stop = true; 347 } 348 list_del_init(&op->pend_link); 349 fscache_run_op(object, op); 350 351 /* the pending queue was holding a ref on the object */ 352 fscache_put_operation(op); 353 } 354 355 ASSERTCMP(object->n_in_progress, <=, object->n_ops); 356 357 _debug("woke %d ops on OBJ%x", 358 object->n_in_progress, object->debug_id); 359} 360 361/* 362 * cancel an operation that's pending on an object 363 */ 364int fscache_cancel_op(struct fscache_operation *op, 365 bool cancel_in_progress_op) 366{ 367 struct fscache_object *object = op->object; 368 bool put = false; 369 int ret; 370 371 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); 372 373 trace_fscache_op(object->cookie, op, fscache_op_cancel); 374 375 ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING); 376 ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED); 377 ASSERTCMP(atomic_read(&op->usage), >, 0); 378 379 spin_lock(&object->lock); 380 381 ret = -EBUSY; 382 if (op->state == FSCACHE_OP_ST_PENDING) { 383 ASSERT(!list_empty(&op->pend_link)); 384 list_del_init(&op->pend_link); 385 put = true; 386 387 fscache_stat(&fscache_n_op_cancelled); 388 op->cancel(op); 389 op->state = FSCACHE_OP_ST_CANCELLED; 390 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) 391 object->n_exclusive--; 392 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 393 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 394 ret = 0; 395 } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) { 396 ASSERTCMP(object->n_in_progress, >, 0); 397 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) 398 object->n_exclusive--; 399 object->n_in_progress--; 400 if (object->n_in_progress == 0) 401 fscache_start_operations(object); 402 403 fscache_stat(&fscache_n_op_cancelled); 404 op->cancel(op); 405 op->state = FSCACHE_OP_ST_CANCELLED; 406 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) 407 object->n_exclusive--; 408 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 409 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 410 ret = 0; 411 } 412 413 if (put) 414 fscache_put_operation(op); 415 spin_unlock(&object->lock); 416 _leave(" = %d", ret); 417 return ret; 418} 419 420/* 421 * Cancel all pending operations on an object 422 */ 423void fscache_cancel_all_ops(struct fscache_object *object) 424{ 425 struct fscache_operation *op; 426 427 _enter("OBJ%x", object->debug_id); 428 429 spin_lock(&object->lock); 430 431 while (!list_empty(&object->pending_ops)) { 432 op = list_entry(object->pending_ops.next, 433 struct fscache_operation, pend_link); 434 fscache_stat(&fscache_n_op_cancelled); 435 list_del_init(&op->pend_link); 436 437 trace_fscache_op(object->cookie, op, fscache_op_cancel_all); 438 439 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); 440 op->cancel(op); 441 op->state = FSCACHE_OP_ST_CANCELLED; 442 443 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) 444 object->n_exclusive--; 445 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 446 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 447 fscache_put_operation(op); 448 cond_resched_lock(&object->lock); 449 } 450 451 spin_unlock(&object->lock); 452 _leave(""); 453} 454 455/* 456 * Record the completion or cancellation of an in-progress operation. 457 */ 458void fscache_op_complete(struct fscache_operation *op, bool cancelled) 459{ 460 struct fscache_object *object = op->object; 461 462 _enter("OBJ%x", object->debug_id); 463 464 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); 465 ASSERTCMP(object->n_in_progress, >, 0); 466 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags), 467 object->n_exclusive, >, 0); 468 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags), 469 object->n_in_progress, ==, 1); 470 471 spin_lock(&object->lock); 472 473 if (!cancelled) { 474 trace_fscache_op(object->cookie, op, fscache_op_completed); 475 op->state = FSCACHE_OP_ST_COMPLETE; 476 } else { 477 op->cancel(op); 478 trace_fscache_op(object->cookie, op, fscache_op_cancelled); 479 op->state = FSCACHE_OP_ST_CANCELLED; 480 } 481 482 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) 483 object->n_exclusive--; 484 object->n_in_progress--; 485 if (object->n_in_progress == 0) 486 fscache_start_operations(object); 487 488 spin_unlock(&object->lock); 489 _leave(""); 490} 491EXPORT_SYMBOL(fscache_op_complete); 492 493/* 494 * release an operation 495 * - queues pending ops if this is the last in-progress op 496 */ 497void fscache_put_operation(struct fscache_operation *op) 498{ 499 struct fscache_object *object; 500 struct fscache_cache *cache; 501 502 _enter("{OBJ%x OP%x,%d}", 503 op->object ? op->object->debug_id : 0, 504 op->debug_id, atomic_read(&op->usage)); 505 506 ASSERTCMP(atomic_read(&op->usage), >, 0); 507 508 if (!atomic_dec_and_test(&op->usage)) 509 return; 510 511 trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put); 512 513 _debug("PUT OP"); 514 ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED && 515 op->state != FSCACHE_OP_ST_COMPLETE, 516 op->state, ==, FSCACHE_OP_ST_CANCELLED); 517 518 fscache_stat(&fscache_n_op_release); 519 520 if (op->release) { 521 op->release(op); 522 op->release = NULL; 523 } 524 op->state = FSCACHE_OP_ST_DEAD; 525 526 object = op->object; 527 if (likely(object)) { 528 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) 529 atomic_dec(&object->n_reads); 530 if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) 531 fscache_unuse_cookie(object); 532 533 /* now... we may get called with the object spinlock held, so we 534 * complete the cleanup here only if we can immediately acquire the 535 * lock, and defer it otherwise */ 536 if (!spin_trylock(&object->lock)) { 537 _debug("defer put"); 538 fscache_stat(&fscache_n_op_deferred_release); 539 540 cache = object->cache; 541 spin_lock(&cache->op_gc_list_lock); 542 list_add_tail(&op->pend_link, &cache->op_gc_list); 543 spin_unlock(&cache->op_gc_list_lock); 544 schedule_work(&cache->op_gc); 545 _leave(" [defer]"); 546 return; 547 } 548 549 ASSERTCMP(object->n_ops, >, 0); 550 object->n_ops--; 551 if (object->n_ops == 0) 552 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); 553 554 spin_unlock(&object->lock); 555 } 556 557 kfree(op); 558 _leave(" [done]"); 559} 560EXPORT_SYMBOL(fscache_put_operation); 561 562/* 563 * garbage collect operations that have had their release deferred 564 */ 565void fscache_operation_gc(struct work_struct *work) 566{ 567 struct fscache_operation *op; 568 struct fscache_object *object; 569 struct fscache_cache *cache = 570 container_of(work, struct fscache_cache, op_gc); 571 int count = 0; 572 573 _enter(""); 574 575 do { 576 spin_lock(&cache->op_gc_list_lock); 577 if (list_empty(&cache->op_gc_list)) { 578 spin_unlock(&cache->op_gc_list_lock); 579 break; 580 } 581 582 op = list_entry(cache->op_gc_list.next, 583 struct fscache_operation, pend_link); 584 list_del(&op->pend_link); 585 spin_unlock(&cache->op_gc_list_lock); 586 587 object = op->object; 588 trace_fscache_op(object->cookie, op, fscache_op_gc); 589 590 spin_lock(&object->lock); 591 592 _debug("GC DEFERRED REL OBJ%x OP%x", 593 object->debug_id, op->debug_id); 594 fscache_stat(&fscache_n_op_gc); 595 596 ASSERTCMP(atomic_read(&op->usage), ==, 0); 597 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD); 598 599 ASSERTCMP(object->n_ops, >, 0); 600 object->n_ops--; 601 if (object->n_ops == 0) 602 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); 603 604 spin_unlock(&object->lock); 605 kfree(op); 606 607 } while (count++ < 20); 608 609 if (!list_empty(&cache->op_gc_list)) 610 schedule_work(&cache->op_gc); 611 612 _leave(""); 613} 614 615/* 616 * execute an operation using fs_op_wq to provide processing context - 617 * the caller holds a ref to this object, so we don't need to hold one 618 */ 619void fscache_op_work_func(struct work_struct *work) 620{ 621 struct fscache_operation *op = 622 container_of(work, struct fscache_operation, work); 623 unsigned long start; 624 625 _enter("{OBJ%x OP%x,%d}", 626 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 627 628 trace_fscache_op(op->object->cookie, op, fscache_op_work); 629 630 ASSERT(op->processor != NULL); 631 start = jiffies; 632 op->processor(op); 633 fscache_hist(fscache_ops_histogram, start); 634 fscache_put_operation(op); 635 636 _leave(""); 637}