Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove per-queue plugging

Code has been converted over to the new explicit on-stack plugging,
and delay users have been converted to use the new API for that.
So lets kill off the old plugging along with aops->sync_page().

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

+151 -1269
-5
Documentation/block/biodoc.txt
··· 963 963 964 964 elevator_add_req_fn* called to add a new request into the scheduler 965 965 966 - elevator_queue_empty_fn returns true if the merge queue is empty. 967 - Drivers shouldn't use this, but rather check 968 - if elv_next_request is NULL (without losing the 969 - request if one exists!) 970 - 971 966 elevator_former_req_fn 972 967 elevator_latter_req_fn These return the request before or after the 973 968 one specified in disk sort order. Used by the
+20 -153
block/blk-core.c
··· 198 198 } 199 199 EXPORT_SYMBOL(blk_dump_rq_flags); 200 200 201 + /* 202 + * Make sure that plugs that were pending when this function was entered, 203 + * are now complete and requests pushed to the queue. 204 + */ 205 + static inline void queue_sync_plugs(struct request_queue *q) 206 + { 207 + /* 208 + * If the current process is plugged and has barriers submitted, 209 + * we will livelock if we don't unplug first. 210 + */ 211 + blk_flush_plug(current); 212 + } 213 + 201 214 static void blk_delay_work(struct work_struct *work) 202 215 { 203 216 struct request_queue *q; ··· 236 223 schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); 237 224 } 238 225 EXPORT_SYMBOL(blk_delay_queue); 239 - 240 - /* 241 - * "plug" the device if there are no outstanding requests: this will 242 - * force the transfer to start only after we have put all the requests 243 - * on the list. 244 - * 245 - * This is called with interrupts off and no requests on the queue and 246 - * with the queue lock held. 247 - */ 248 - void blk_plug_device(struct request_queue *q) 249 - { 250 - WARN_ON(!irqs_disabled()); 251 - 252 - /* 253 - * don't plug a stopped queue, it must be paired with blk_start_queue() 254 - * which will restart the queueing 255 - */ 256 - if (blk_queue_stopped(q)) 257 - return; 258 - 259 - if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { 260 - mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 261 - trace_block_plug(q); 262 - } 263 - } 264 - EXPORT_SYMBOL(blk_plug_device); 265 - 266 - /** 267 - * blk_plug_device_unlocked - plug a device without queue lock held 268 - * @q: The &struct request_queue to plug 269 - * 270 - * Description: 271 - * Like @blk_plug_device(), but grabs the queue lock and disables 272 - * interrupts. 273 - **/ 274 - void blk_plug_device_unlocked(struct request_queue *q) 275 - { 276 - unsigned long flags; 277 - 278 - spin_lock_irqsave(q->queue_lock, flags); 279 - blk_plug_device(q); 280 - spin_unlock_irqrestore(q->queue_lock, flags); 281 - } 282 - EXPORT_SYMBOL(blk_plug_device_unlocked); 283 - 284 - /* 285 - * remove the queue from the plugged list, if present. called with 286 - * queue lock held and interrupts disabled. 287 - */ 288 - int blk_remove_plug(struct request_queue *q) 289 - { 290 - WARN_ON(!irqs_disabled()); 291 - 292 - if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) 293 - return 0; 294 - 295 - del_timer(&q->unplug_timer); 296 - return 1; 297 - } 298 - EXPORT_SYMBOL(blk_remove_plug); 299 - 300 - /* 301 - * remove the plug and let it rip.. 302 - */ 303 - void __generic_unplug_device(struct request_queue *q) 304 - { 305 - if (unlikely(blk_queue_stopped(q))) 306 - return; 307 - if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) 308 - return; 309 - 310 - q->request_fn(q); 311 - } 312 - 313 - /** 314 - * generic_unplug_device - fire a request queue 315 - * @q: The &struct request_queue in question 316 - * 317 - * Description: 318 - * Linux uses plugging to build bigger requests queues before letting 319 - * the device have at them. If a queue is plugged, the I/O scheduler 320 - * is still adding and merging requests on the queue. Once the queue 321 - * gets unplugged, the request_fn defined for the queue is invoked and 322 - * transfers started. 323 - **/ 324 - void generic_unplug_device(struct request_queue *q) 325 - { 326 - if (blk_queue_plugged(q)) { 327 - spin_lock_irq(q->queue_lock); 328 - __generic_unplug_device(q); 329 - spin_unlock_irq(q->queue_lock); 330 - } 331 - } 332 - EXPORT_SYMBOL(generic_unplug_device); 333 - 334 - static void blk_backing_dev_unplug(struct backing_dev_info *bdi, 335 - struct page *page) 336 - { 337 - struct request_queue *q = bdi->unplug_io_data; 338 - 339 - blk_unplug(q); 340 - } 341 - 342 - void blk_unplug_work(struct work_struct *work) 343 - { 344 - struct request_queue *q = 345 - container_of(work, struct request_queue, unplug_work); 346 - 347 - trace_block_unplug_io(q); 348 - q->unplug_fn(q); 349 - } 350 - 351 - void blk_unplug_timeout(unsigned long data) 352 - { 353 - struct request_queue *q = (struct request_queue *)data; 354 - 355 - trace_block_unplug_timer(q); 356 - kblockd_schedule_work(q, &q->unplug_work); 357 - } 358 - 359 - void blk_unplug(struct request_queue *q) 360 - { 361 - /* 362 - * devices don't necessarily have an ->unplug_fn defined 363 - */ 364 - if (q->unplug_fn) { 365 - trace_block_unplug_io(q); 366 - q->unplug_fn(q); 367 - } 368 - } 369 - EXPORT_SYMBOL(blk_unplug); 370 226 371 227 /** 372 228 * blk_start_queue - restart a previously stopped queue ··· 271 389 **/ 272 390 void blk_stop_queue(struct request_queue *q) 273 391 { 274 - blk_remove_plug(q); 275 392 cancel_delayed_work(&q->delay_work); 276 393 queue_flag_set(QUEUE_FLAG_STOPPED, q); 277 394 } ··· 292 411 */ 293 412 void blk_sync_queue(struct request_queue *q) 294 413 { 295 - del_timer_sync(&q->unplug_timer); 296 414 del_timer_sync(&q->timeout); 297 - cancel_work_sync(&q->unplug_work); 298 415 throtl_shutdown_timer_wq(q); 299 416 cancel_delayed_work_sync(&q->delay_work); 417 + queue_sync_plugs(q); 300 418 } 301 419 EXPORT_SYMBOL(blk_sync_queue); 302 420 ··· 310 430 */ 311 431 void __blk_run_queue(struct request_queue *q) 312 432 { 313 - blk_remove_plug(q); 314 - 315 433 if (unlikely(blk_queue_stopped(q))) 316 - return; 317 - 318 - if (elv_queue_empty(q)) 319 434 return; 320 435 321 436 /* ··· 320 445 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 321 446 q->request_fn(q); 322 447 queue_flag_clear(QUEUE_FLAG_REENTER, q); 323 - } else { 324 - queue_flag_set(QUEUE_FLAG_PLUGGED, q); 325 - kblockd_schedule_work(q, &q->unplug_work); 326 - } 448 + } else 449 + queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 327 450 } 328 451 EXPORT_SYMBOL(__blk_run_queue); 329 452 ··· 408 535 if (!q) 409 536 return NULL; 410 537 411 - q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; 412 - q->backing_dev_info.unplug_io_data = q; 413 538 q->backing_dev_info.ra_pages = 414 539 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 415 540 q->backing_dev_info.state = 0; ··· 427 556 428 557 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, 429 558 laptop_mode_timer_fn, (unsigned long) q); 430 - init_timer(&q->unplug_timer); 431 559 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 432 560 INIT_LIST_HEAD(&q->timeout_list); 433 561 INIT_LIST_HEAD(&q->flush_queue[0]); 434 562 INIT_LIST_HEAD(&q->flush_queue[1]); 435 563 INIT_LIST_HEAD(&q->flush_data_in_flight); 436 - INIT_WORK(&q->unplug_work, blk_unplug_work); 437 564 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work); 438 565 439 566 kobject_init(&q->kobj, &blk_queue_ktype); ··· 521 652 q->request_fn = rfn; 522 653 q->prep_rq_fn = NULL; 523 654 q->unprep_rq_fn = NULL; 524 - q->unplug_fn = generic_unplug_device; 525 655 q->queue_flags = QUEUE_FLAG_DEFAULT; 526 656 q->queue_lock = lock; 527 657 ··· 778 910 } 779 911 780 912 /* 781 - * No available requests for this queue, unplug the device and wait for some 782 - * requests to become available. 913 + * No available requests for this queue, wait for some requests to become 914 + * available. 783 915 * 784 916 * Called with q->queue_lock held, and returns with it unlocked. 785 917 */ ··· 800 932 801 933 trace_block_sleeprq(q, bio, rw_flags & 1); 802 934 803 - __generic_unplug_device(q); 804 935 spin_unlock_irq(q->queue_lock); 805 936 io_schedule(); 806 937 ··· 925 1058 int where) 926 1059 { 927 1060 drive_stat_acct(rq, 1); 928 - __elv_add_request(q, rq, where, 0); 1061 + __elv_add_request(q, rq, where); 929 1062 } 930 1063 931 1064 /** ··· 2665 2798 /* 2666 2799 * rq is already accounted, so use raw insert 2667 2800 */ 2668 - __elv_add_request(q, rq, ELEVATOR_INSERT_SORT, 0); 2801 + __elv_add_request(q, rq, ELEVATOR_INSERT_SORT); 2669 2802 } 2670 2803 2671 2804 if (q) {
+2 -2
block/blk-exec.c
··· 54 54 rq->end_io = done; 55 55 WARN_ON(irqs_disabled()); 56 56 spin_lock_irq(q->queue_lock); 57 - __elv_add_request(q, rq, where, 1); 58 - __generic_unplug_device(q); 57 + __elv_add_request(q, rq, where); 58 + __blk_run_queue(q); 59 59 /* the queue is stopped so it won't be plugged+unplugged */ 60 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 61 61 q->request_fn(q);
+1 -2
block/blk-flush.c
··· 194 194 { 195 195 struct request_queue *q = flush_rq->q; 196 196 struct list_head *running = &q->flush_queue[q->flush_running_idx]; 197 - bool was_empty = elv_queue_empty(q); 198 197 bool queued = false; 199 198 struct request *rq, *n; 200 199 ··· 212 213 } 213 214 214 215 /* after populating an empty queue, kick it to avoid stall */ 215 - if (queued && was_empty) 216 + if (queued) 216 217 __blk_run_queue(q); 217 218 } 218 219
-8
block/blk-settings.c
··· 164 164 blk_queue_congestion_threshold(q); 165 165 q->nr_batching = BLK_BATCH_REQ; 166 166 167 - q->unplug_thresh = 4; /* hmm */ 168 - q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ 169 - if (q->unplug_delay == 0) 170 - q->unplug_delay = 1; 171 - 172 - q->unplug_timer.function = blk_unplug_timeout; 173 - q->unplug_timer.data = (unsigned long)q; 174 - 175 167 blk_set_default_limits(&q->limits); 176 168 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); 177 169
-1
block/blk-throttle.c
··· 800 800 if (nr_disp) { 801 801 while((bio = bio_list_pop(&bio_list_on_stack))) 802 802 generic_make_request(bio); 803 - blk_unplug(q); 804 803 } 805 804 return nr_disp; 806 805 }
-2
block/blk.h
··· 18 18 void blk_dequeue_request(struct request *rq); 19 19 void __blk_queue_free_tags(struct request_queue *q); 20 20 21 - void blk_unplug_work(struct work_struct *work); 22 - void blk_unplug_timeout(unsigned long data); 23 21 void blk_rq_timed_out_timer(unsigned long data); 24 22 void blk_delete_timer(struct request *); 25 23 void blk_add_timer(struct request *);
-8
block/cfq-iosched.c
··· 499 499 } 500 500 } 501 501 502 - static int cfq_queue_empty(struct request_queue *q) 503 - { 504 - struct cfq_data *cfqd = q->elevator->elevator_data; 505 - 506 - return !cfqd->rq_queued; 507 - } 508 - 509 502 /* 510 503 * Scale schedule slice based on io priority. Use the sync time slice only 511 504 * if a queue is marked sync and has sync io queued. A sync queue with async ··· 4054 4061 .elevator_add_req_fn = cfq_insert_request, 4055 4062 .elevator_activate_req_fn = cfq_activate_request, 4056 4063 .elevator_deactivate_req_fn = cfq_deactivate_request, 4057 - .elevator_queue_empty_fn = cfq_queue_empty, 4058 4064 .elevator_completed_req_fn = cfq_completed_request, 4059 4065 .elevator_former_req_fn = elv_rb_former_request, 4060 4066 .elevator_latter_req_fn = elv_rb_latter_request,
-9
block/deadline-iosched.c
··· 326 326 return 1; 327 327 } 328 328 329 - static int deadline_queue_empty(struct request_queue *q) 330 - { 331 - struct deadline_data *dd = q->elevator->elevator_data; 332 - 333 - return list_empty(&dd->fifo_list[WRITE]) 334 - && list_empty(&dd->fifo_list[READ]); 335 - } 336 - 337 329 static void deadline_exit_queue(struct elevator_queue *e) 338 330 { 339 331 struct deadline_data *dd = e->elevator_data; ··· 437 445 .elevator_merge_req_fn = deadline_merged_requests, 438 446 .elevator_dispatch_fn = deadline_dispatch_requests, 439 447 .elevator_add_req_fn = deadline_add_request, 440 - .elevator_queue_empty_fn = deadline_queue_empty, 441 448 .elevator_former_req_fn = elv_rb_former_request, 442 449 .elevator_latter_req_fn = elv_rb_latter_request, 443 450 .elevator_init_fn = deadline_init_queue,
+3 -40
block/elevator.c
··· 619 619 620 620 void elv_insert(struct request_queue *q, struct request *rq, int where) 621 621 { 622 - int unplug_it = 1; 623 - 624 622 trace_block_rq_insert(q, rq); 625 623 626 624 rq->q = q; 627 625 628 626 switch (where) { 629 627 case ELEVATOR_INSERT_REQUEUE: 630 - /* 631 - * Most requeues happen because of a busy condition, 632 - * don't force unplug of the queue for that case. 633 - * Clear unplug_it and fall through. 634 - */ 635 - unplug_it = 0; 636 - 637 628 case ELEVATOR_INSERT_FRONT: 638 629 rq->cmd_flags |= REQ_SOFTBARRIER; 639 630 list_add(&rq->queuelist, &q->queue_head); ··· 670 679 rq->cmd_flags |= REQ_SOFTBARRIER; 671 680 blk_insert_flush(rq); 672 681 break; 673 - 674 682 default: 675 683 printk(KERN_ERR "%s: bad insertion point %d\n", 676 684 __func__, where); 677 685 BUG(); 678 686 } 679 - 680 - if (unplug_it && blk_queue_plugged(q)) { 681 - int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 682 - - queue_in_flight(q); 683 - 684 - if (nrq >= q->unplug_thresh) 685 - __generic_unplug_device(q); 686 - } 687 687 } 688 688 689 - void __elv_add_request(struct request_queue *q, struct request *rq, int where, 690 - int plug) 689 + void __elv_add_request(struct request_queue *q, struct request *rq, int where) 691 690 { 692 691 BUG_ON(rq->cmd_flags & REQ_ON_PLUG); 693 692 ··· 692 711 where == ELEVATOR_INSERT_SORT) 693 712 where = ELEVATOR_INSERT_BACK; 694 713 695 - if (plug) 696 - blk_plug_device(q); 697 - 698 714 elv_insert(q, rq, where); 699 715 } 700 716 EXPORT_SYMBOL(__elv_add_request); 701 717 702 - void elv_add_request(struct request_queue *q, struct request *rq, int where, 703 - int plug) 718 + void elv_add_request(struct request_queue *q, struct request *rq, int where) 704 719 { 705 720 unsigned long flags; 706 721 707 722 spin_lock_irqsave(q->queue_lock, flags); 708 - __elv_add_request(q, rq, where, plug); 723 + __elv_add_request(q, rq, where); 709 724 spin_unlock_irqrestore(q->queue_lock, flags); 710 725 } 711 726 EXPORT_SYMBOL(elv_add_request); 712 - 713 - int elv_queue_empty(struct request_queue *q) 714 - { 715 - struct elevator_queue *e = q->elevator; 716 - 717 - if (!list_empty(&q->queue_head)) 718 - return 0; 719 - 720 - if (e->ops->elevator_queue_empty_fn) 721 - return e->ops->elevator_queue_empty_fn(q); 722 - 723 - return 1; 724 - } 725 - EXPORT_SYMBOL(elv_queue_empty); 726 727 727 728 struct request *elv_latter_request(struct request_queue *q, struct request *rq) 728 729 {
-8
block/noop-iosched.c
··· 39 39 list_add_tail(&rq->queuelist, &nd->queue); 40 40 } 41 41 42 - static int noop_queue_empty(struct request_queue *q) 43 - { 44 - struct noop_data *nd = q->elevator->elevator_data; 45 - 46 - return list_empty(&nd->queue); 47 - } 48 - 49 42 static struct request * 50 43 noop_former_request(struct request_queue *q, struct request *rq) 51 44 { ··· 83 90 .elevator_merge_req_fn = noop_merged_requests, 84 91 .elevator_dispatch_fn = noop_dispatch, 85 92 .elevator_add_req_fn = noop_add_request, 86 - .elevator_queue_empty_fn = noop_queue_empty, 87 93 .elevator_former_req_fn = noop_former_request, 88 94 .elevator_latter_req_fn = noop_latter_request, 89 95 .elevator_init_fn = noop_init_queue,
-6
drivers/block/cciss.c
··· 3170 3170 int sg_index = 0; 3171 3171 int chained = 0; 3172 3172 3173 - /* We call start_io here in case there is a command waiting on the 3174 - * queue that has not been sent. 3175 - */ 3176 - if (blk_queue_plugged(q)) 3177 - goto startio; 3178 - 3179 3173 queue: 3180 3174 creq = blk_peek_request(q); 3181 3175 if (!creq)
-3
drivers/block/cpqarray.c
··· 911 911 struct scatterlist tmp_sg[SG_MAX]; 912 912 int i, dir, seg; 913 913 914 - if (blk_queue_plugged(q)) 915 - goto startio; 916 - 917 914 queue_next: 918 915 creq = blk_peek_request(q); 919 916 if (!creq)
-2
drivers/block/drbd/drbd_actlog.c
··· 689 689 } 690 690 } 691 691 692 - drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); 693 - 694 692 /* always (try to) flush bitmap to stable storage */ 695 693 drbd_md_flush(mdev); 696 694
-1
drivers/block/drbd/drbd_bitmap.c
··· 840 840 for (i = 0; i < num_pages; i++) 841 841 bm_page_io_async(mdev, b, i, rw); 842 842 843 - drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); 844 843 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); 845 844 846 845 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
-14
drivers/block/drbd/drbd_int.h
··· 2382 2382 return QUEUE_ORDERED_NONE; 2383 2383 } 2384 2384 2385 - static inline void drbd_blk_run_queue(struct request_queue *q) 2386 - { 2387 - if (q && q->unplug_fn) 2388 - q->unplug_fn(q); 2389 - } 2390 - 2391 - static inline void drbd_kick_lo(struct drbd_conf *mdev) 2392 - { 2393 - if (get_ldev(mdev)) { 2394 - drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev)); 2395 - put_ldev(mdev); 2396 - } 2397 - } 2398 - 2399 2385 static inline void drbd_md_flush(struct drbd_conf *mdev) 2400 2386 { 2401 2387 int r;
+1 -32
drivers/block/drbd/drbd_main.c
··· 2719 2719 return 0; 2720 2720 } 2721 2721 2722 - static void drbd_unplug_fn(struct request_queue *q) 2723 - { 2724 - struct drbd_conf *mdev = q->queuedata; 2725 - 2726 - /* unplug FIRST */ 2727 - spin_lock_irq(q->queue_lock); 2728 - blk_remove_plug(q); 2729 - spin_unlock_irq(q->queue_lock); 2730 - 2731 - /* only if connected */ 2732 - spin_lock_irq(&mdev->req_lock); 2733 - if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) { 2734 - D_ASSERT(mdev->state.role == R_PRIMARY); 2735 - if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) { 2736 - /* add to the data.work queue, 2737 - * unless already queued. 2738 - * XXX this might be a good addition to drbd_queue_work 2739 - * anyways, to detect "double queuing" ... */ 2740 - if (list_empty(&mdev->unplug_work.list)) 2741 - drbd_queue_work(&mdev->data.work, 2742 - &mdev->unplug_work); 2743 - } 2744 - } 2745 - spin_unlock_irq(&mdev->req_lock); 2746 - 2747 - if (mdev->state.disk >= D_INCONSISTENT) 2748 - drbd_kick_lo(mdev); 2749 - } 2750 - 2751 2722 static void drbd_set_defaults(struct drbd_conf *mdev) 2752 2723 { 2753 2724 /* This way we get a compile error when sync_conf grows, ··· 3193 3222 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); 3194 3223 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3195 3224 blk_queue_merge_bvec(q, drbd_merge_bvec); 3196 - q->queue_lock = &mdev->req_lock; /* needed since we use */ 3197 - /* plugging on a queue, that actually has no requests! */ 3198 - q->unplug_fn = drbd_unplug_fn; 3225 + q->queue_lock = &mdev->req_lock; 3199 3226 3200 3227 mdev->md_io_page = alloc_page(GFP_KERNEL); 3201 3228 if (!mdev->md_io_page)
+1 -19
drivers/block/drbd/drbd_receiver.c
··· 187 187 return NULL; 188 188 } 189 189 190 - /* kick lower level device, if we have more than (arbitrary number) 191 - * reference counts on it, which typically are locally submitted io 192 - * requests. don't use unacked_cnt, so we speed up proto A and B, too. */ 193 - static void maybe_kick_lo(struct drbd_conf *mdev) 194 - { 195 - if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark) 196 - drbd_kick_lo(mdev); 197 - } 198 - 199 190 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 200 191 { 201 192 struct drbd_epoch_entry *e; ··· 210 219 LIST_HEAD(reclaimed); 211 220 struct drbd_epoch_entry *e, *t; 212 221 213 - maybe_kick_lo(mdev); 214 222 spin_lock_irq(&mdev->req_lock); 215 223 reclaim_net_ee(mdev, &reclaimed); 216 224 spin_unlock_irq(&mdev->req_lock); ··· 426 436 while (!list_empty(head)) { 427 437 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 428 438 spin_unlock_irq(&mdev->req_lock); 429 - drbd_kick_lo(mdev); 430 - schedule(); 439 + io_schedule(); 431 440 finish_wait(&mdev->ee_wait, &wait); 432 441 spin_lock_irq(&mdev->req_lock); 433 442 } ··· 1136 1147 1137 1148 drbd_generic_make_request(mdev, fault_type, bio); 1138 1149 } while (bios); 1139 - maybe_kick_lo(mdev); 1140 1150 return 0; 1141 1151 1142 1152 fail: ··· 1154 1166 struct drbd_epoch *epoch; 1155 1167 1156 1168 inc_unacked(mdev); 1157 - 1158 - if (mdev->net_conf->wire_protocol != DRBD_PROT_C) 1159 - drbd_kick_lo(mdev); 1160 1169 1161 1170 mdev->current_epoch->barrier_nr = p->barrier; 1162 1171 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); ··· 3541 3556 3542 3557 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3543 3558 { 3544 - if (mdev->state.disk >= D_INCONSISTENT) 3545 - drbd_kick_lo(mdev); 3546 - 3547 3559 /* Make sure we've acked all the TCP data associated 3548 3560 * with the data requests being unplugged */ 3549 3561 drbd_tcp_quickack(mdev->data.socket);
-4
drivers/block/drbd/drbd_req.c
··· 960 960 bio_endio(req->private_bio, -EIO); 961 961 } 962 962 963 - /* we need to plug ALWAYS since we possibly need to kick lo_dev. 964 - * we plug after submit, so we won't miss an unplug event */ 965 - drbd_plug_device(mdev); 966 - 967 963 return 0; 968 964 969 965 fail_conflicting:
-1
drivers/block/drbd/drbd_worker.c
··· 792 792 * queue (or even the read operations for those packets 793 793 * is not finished by now). Retry in 100ms. */ 794 794 795 - drbd_kick_lo(mdev); 796 795 __set_current_state(TASK_INTERRUPTIBLE); 797 796 schedule_timeout(HZ / 10); 798 797 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
-18
drivers/block/drbd/drbd_wrappers.h
··· 45 45 generic_make_request(bio); 46 46 } 47 47 48 - static inline void drbd_plug_device(struct drbd_conf *mdev) 49 - { 50 - struct request_queue *q; 51 - q = bdev_get_queue(mdev->this_bdev); 52 - 53 - spin_lock_irq(q->queue_lock); 54 - 55 - /* XXX the check on !blk_queue_plugged is redundant, 56 - * implicitly checked in blk_plug_device */ 57 - 58 - if (!blk_queue_plugged(q)) { 59 - blk_plug_device(q); 60 - del_timer(&q->unplug_timer); 61 - /* unplugging should not happen automatically... */ 62 - } 63 - spin_unlock_irq(q->queue_lock); 64 - } 65 - 66 48 static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm) 67 49 { 68 50 return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
-1
drivers/block/floppy.c
··· 3837 3837 bio.bi_end_io = floppy_rb0_complete; 3838 3838 3839 3839 submit_bio(READ, &bio); 3840 - generic_unplug_device(bdev_get_queue(bdev)); 3841 3840 process_fd_request(); 3842 3841 wait_for_completion(&complete); 3843 3842
-13
drivers/block/loop.c
··· 541 541 return 0; 542 542 } 543 543 544 - /* 545 - * kick off io on the underlying address space 546 - */ 547 - static void loop_unplug(struct request_queue *q) 548 - { 549 - struct loop_device *lo = q->queuedata; 550 - 551 - queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q); 552 - blk_run_address_space(lo->lo_backing_file->f_mapping); 553 - } 554 - 555 544 struct switch_request { 556 545 struct file *file; 557 546 struct completion wait; ··· 907 918 */ 908 919 blk_queue_make_request(lo->lo_queue, loop_make_request); 909 920 lo->lo_queue->queuedata = lo; 910 - lo->lo_queue->unplug_fn = loop_unplug; 911 921 912 922 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 913 923 blk_queue_flush(lo->lo_queue, REQ_FLUSH); ··· 1008 1020 1009 1021 kthread_stop(lo->lo_thread); 1010 1022 1011 - lo->lo_queue->unplug_fn = NULL; 1012 1023 lo->lo_backing_file = NULL; 1013 1024 1014 1025 loop_release_xfer(lo);
-2
drivers/block/pktcdvd.c
··· 1606 1606 min_sleep_time = pkt->sleep_time; 1607 1607 } 1608 1608 1609 - generic_unplug_device(bdev_get_queue(pd->bdev)); 1610 - 1611 1609 VPRINTK("kcdrwd: sleeping\n"); 1612 1610 residue = schedule_timeout(min_sleep_time); 1613 1611 VPRINTK("kcdrwd: wake up\n");
+1 -15
drivers/block/umem.c
··· 241 241 * 242 242 * Whenever IO on the active page completes, the Ready page is activated 243 243 * and the ex-Active page is clean out and made Ready. 244 - * Otherwise the Ready page is only activated when it becomes full, or 245 - * when mm_unplug_device is called via the unplug_io_fn. 244 + * Otherwise the Ready page is only activated when it becomes full. 246 245 * 247 246 * If a request arrives while both pages a full, it is queued, and b_rdev is 248 247 * overloaded to record whether it was a read or a write. ··· 330 331 page->headcnt = 0; 331 332 page->bio = NULL; 332 333 page->biotail = &page->bio; 333 - } 334 - 335 - static void mm_unplug_device(struct request_queue *q) 336 - { 337 - struct cardinfo *card = q->queuedata; 338 - unsigned long flags; 339 - 340 - spin_lock_irqsave(&card->lock, flags); 341 - if (blk_remove_plug(q)) 342 - activate(card); 343 - spin_unlock_irqrestore(&card->lock, flags); 344 334 } 345 335 346 336 /* ··· 523 535 *card->biotail = bio; 524 536 bio->bi_next = NULL; 525 537 card->biotail = &bio->bi_next; 526 - blk_plug_device(q); 527 538 spin_unlock_irq(&card->lock); 528 539 529 540 return 0; ··· 894 907 blk_queue_make_request(card->queue, mm_make_request); 895 908 card->queue->queue_lock = &card->lock; 896 909 card->queue->queuedata = card; 897 - card->queue->unplug_fn = mm_unplug_device; 898 910 899 911 tasklet_init(&card->tasklet, process_page, (unsigned long)card); 900 912
+1 -2
drivers/ide/ide-atapi.c
··· 233 233 234 234 drive->hwif->rq = NULL; 235 235 236 - elv_add_request(drive->queue, &drive->sense_rq, 237 - ELEVATOR_INSERT_FRONT, 0); 236 + elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); 238 237 return 0; 239 238 } 240 239 EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
-4
drivers/ide/ide-io.c
··· 549 549 550 550 if (rq) 551 551 blk_requeue_request(q, rq); 552 - if (!elv_queue_empty(q)) 553 - blk_plug_device(q); 554 552 } 555 553 556 554 void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) ··· 560 562 561 563 if (rq) 562 564 blk_requeue_request(q, rq); 563 - if (!elv_queue_empty(q)) 564 - blk_plug_device(q); 565 565 566 566 spin_unlock_irqrestore(q->queue_lock, flags); 567 567 }
+1 -1
drivers/ide/ide-park.c
··· 52 52 rq->cmd[0] = REQ_UNPARK_HEADS; 53 53 rq->cmd_len = 1; 54 54 rq->cmd_type = REQ_TYPE_SPECIAL; 55 - elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); 55 + elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); 56 56 57 57 out: 58 58 return;
+1 -2
drivers/md/bitmap.c
··· 1339 1339 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1340 1340 TASK_UNINTERRUPTIBLE); 1341 1341 spin_unlock_irq(&bitmap->lock); 1342 - md_unplug(bitmap->mddev); 1343 - schedule(); 1342 + io_schedule(); 1344 1343 finish_wait(&bitmap->overflow_wait, &__wait); 1345 1344 continue; 1346 1345 }
+1 -8
drivers/md/dm-crypt.c
··· 991 991 clone->bi_destructor = dm_crypt_bio_destructor; 992 992 } 993 993 994 - static void kcryptd_unplug(struct crypt_config *cc) 995 - { 996 - blk_unplug(bdev_get_queue(cc->dev->bdev)); 997 - } 998 - 999 994 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) 1000 995 { 1001 996 struct crypt_config *cc = io->target->private; ··· 1003 1008 * one in order to decrypt the whole bio data *afterwards*. 1004 1009 */ 1005 1010 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs); 1006 - if (!clone) { 1007 - kcryptd_unplug(cc); 1011 + if (!clone) 1008 1012 return 1; 1009 - } 1010 1013 1011 1014 crypt_inc_pending(io); 1012 1015
+5 -47
drivers/md/dm-kcopyd.c
··· 37 37 unsigned int nr_pages; 38 38 unsigned int nr_free_pages; 39 39 40 - /* 41 - * Block devices to unplug. 42 - * Non-NULL pointer means that a block device has some pending requests 43 - * and needs to be unplugged. 44 - */ 45 - struct block_device *unplug[2]; 46 - 47 40 struct dm_io_client *io_client; 48 41 49 42 wait_queue_head_t destroyq; ··· 308 315 return 0; 309 316 } 310 317 311 - /* 312 - * Unplug the block device at the specified index. 313 - */ 314 - static void unplug(struct dm_kcopyd_client *kc, int rw) 315 - { 316 - if (kc->unplug[rw] != NULL) { 317 - blk_unplug(bdev_get_queue(kc->unplug[rw])); 318 - kc->unplug[rw] = NULL; 319 - } 320 - } 321 - 322 - /* 323 - * Prepare block device unplug. If there's another device 324 - * to be unplugged at the same array index, we unplug that 325 - * device first. 326 - */ 327 - static void prepare_unplug(struct dm_kcopyd_client *kc, int rw, 328 - struct block_device *bdev) 329 - { 330 - if (likely(kc->unplug[rw] == bdev)) 331 - return; 332 - unplug(kc, rw); 333 - kc->unplug[rw] = bdev; 334 - } 335 - 336 318 static void complete_io(unsigned long error, void *context) 337 319 { 338 320 struct kcopyd_job *job = (struct kcopyd_job *) context; ··· 354 386 .client = job->kc->io_client, 355 387 }; 356 388 357 - if (job->rw == READ) { 389 + if (job->rw == READ) 358 390 r = dm_io(&io_req, 1, &job->source, NULL); 359 - prepare_unplug(job->kc, READ, job->source.bdev); 360 - } else { 391 + else { 361 392 if (job->num_dests > 1) 362 393 io_req.bi_rw |= REQ_UNPLUG; 363 394 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 364 - if (!(io_req.bi_rw & REQ_UNPLUG)) 365 - prepare_unplug(job->kc, WRITE, job->dests[0].bdev); 366 395 } 367 396 368 397 return r; ··· 431 466 { 432 467 struct dm_kcopyd_client *kc = container_of(work, 433 468 struct dm_kcopyd_client, kcopyd_work); 469 + struct blk_plug plug; 434 470 435 471 /* 436 472 * The order that these are called is *very* important. ··· 439 473 * Pages jobs when successful will jump onto the io jobs 440 474 * list. io jobs call wake when they complete and it all 441 475 * starts again. 442 - * 443 - * Note that io_jobs add block devices to the unplug array, 444 - * this array is cleared with "unplug" calls. It is thus 445 - * forbidden to run complete_jobs after io_jobs and before 446 - * unplug because the block device could be destroyed in 447 - * job completion callback. 448 476 */ 477 + blk_start_plug(&plug); 449 478 process_jobs(&kc->complete_jobs, kc, run_complete_job); 450 479 process_jobs(&kc->pages_jobs, kc, run_pages_job); 451 480 process_jobs(&kc->io_jobs, kc, run_io_job); 452 - unplug(kc, READ); 453 - unplug(kc, WRITE); 481 + blk_finish_plug(&plug); 454 482 } 455 483 456 484 /* ··· 624 664 INIT_LIST_HEAD(&kc->complete_jobs); 625 665 INIT_LIST_HEAD(&kc->io_jobs); 626 666 INIT_LIST_HEAD(&kc->pages_jobs); 627 - 628 - memset(kc->unplug, 0, sizeof(kc->unplug)); 629 667 630 668 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 631 669 if (!kc->job_pool)
+1 -1
drivers/md/dm-raid.c
··· 394 394 { 395 395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 396 396 397 - md_raid5_unplug_device(rs->md.private); 397 + md_raid5_kick_device(rs->md.private); 398 398 } 399 399 400 400 /*
-2
drivers/md/dm-raid1.c
··· 842 842 do_reads(ms, &reads); 843 843 do_writes(ms, &writes); 844 844 do_failures(ms, &failures); 845 - 846 - dm_table_unplug_all(ms->ti->table); 847 845 } 848 846 849 847 /*-----------------------------------------------------------------
-24
drivers/md/dm-table.c
··· 1275 1275 return 0; 1276 1276 } 1277 1277 1278 - void dm_table_unplug_all(struct dm_table *t) 1279 - { 1280 - struct dm_dev_internal *dd; 1281 - struct list_head *devices = dm_table_get_devices(t); 1282 - struct dm_target_callbacks *cb; 1283 - 1284 - list_for_each_entry(dd, devices, list) { 1285 - struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); 1286 - char b[BDEVNAME_SIZE]; 1287 - 1288 - if (likely(q)) 1289 - blk_unplug(q); 1290 - else 1291 - DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s", 1292 - dm_device_name(t->md), 1293 - bdevname(dd->dm_dev.bdev, b)); 1294 - } 1295 - 1296 - list_for_each_entry(cb, &t->target_callbacks, list) 1297 - if (cb->unplug_fn) 1298 - cb->unplug_fn(cb); 1299 - } 1300 - 1301 1278 struct mapped_device *dm_table_get_md(struct dm_table *t) 1302 1279 { 1303 1280 return t->md; ··· 1322 1345 EXPORT_SYMBOL(dm_table_get_md); 1323 1346 EXPORT_SYMBOL(dm_table_put); 1324 1347 EXPORT_SYMBOL(dm_table_get); 1325 - EXPORT_SYMBOL(dm_table_unplug_all);
+5 -28
drivers/md/dm.c
··· 807 807 dm_unprep_request(rq); 808 808 809 809 spin_lock_irqsave(q->queue_lock, flags); 810 - if (elv_queue_empty(q)) 811 - blk_plug_device(q); 812 810 blk_requeue_request(q, rq); 813 811 spin_unlock_irqrestore(q->queue_lock, flags); 814 812 ··· 1611 1613 * number of in-flight I/Os after the queue is stopped in 1612 1614 * dm_suspend(). 1613 1615 */ 1614 - while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) { 1616 + while (!blk_queue_stopped(q)) { 1615 1617 rq = blk_peek_request(q); 1616 1618 if (!rq) 1617 - goto plug_and_out; 1619 + goto delay_and_out; 1618 1620 1619 1621 /* always use block 0 to find the target for flushes for now */ 1620 1622 pos = 0; ··· 1625 1627 BUG_ON(!dm_target_is_valid(ti)); 1626 1628 1627 1629 if (ti->type->busy && ti->type->busy(ti)) 1628 - goto plug_and_out; 1630 + goto delay_and_out; 1629 1631 1630 1632 blk_start_request(rq); 1631 1633 clone = rq->special; ··· 1645 1647 BUG_ON(!irqs_disabled()); 1646 1648 spin_lock(q->queue_lock); 1647 1649 1648 - plug_and_out: 1649 - if (!elv_queue_empty(q)) 1650 - /* Some requests still remain, retry later */ 1651 - blk_plug_device(q); 1652 - 1650 + delay_and_out: 1651 + blk_delay_queue(q, HZ / 10); 1653 1652 out: 1654 1653 dm_table_put(map); 1655 1654 ··· 1673 1678 dm_table_put(map); 1674 1679 1675 1680 return r; 1676 - } 1677 - 1678 - static void dm_unplug_all(struct request_queue *q) 1679 - { 1680 - struct mapped_device *md = q->queuedata; 1681 - struct dm_table *map = dm_get_live_table(md); 1682 - 1683 - if (map) { 1684 - if (dm_request_based(md)) 1685 - generic_unplug_device(q); 1686 - 1687 - dm_table_unplug_all(map); 1688 - dm_table_put(map); 1689 - } 1690 1681 } 1691 1682 1692 1683 static int dm_any_congested(void *congested_data, int bdi_bits) ··· 1798 1817 md->queue->backing_dev_info.congested_data = md; 1799 1818 blk_queue_make_request(md->queue, dm_request); 1800 1819 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1801 - md->queue->unplug_fn = dm_unplug_all; 1802 1820 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1803 1821 blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); 1804 1822 } ··· 2243 2263 int r = 0; 2244 2264 DECLARE_WAITQUEUE(wait, current); 2245 2265 2246 - dm_unplug_all(md->queue); 2247 - 2248 2266 add_wait_queue(&md->wait, &wait); 2249 2267 2250 2268 while (1) { ··· 2517 2539 2518 2540 clear_bit(DMF_SUSPENDED, &md->flags); 2519 2541 2520 - dm_table_unplug_all(map); 2521 2542 r = 0; 2522 2543 out: 2523 2544 dm_table_put(map);
-17
drivers/md/linear.c
··· 87 87 return maxsectors << 9; 88 88 } 89 89 90 - static void linear_unplug(struct request_queue *q) 91 - { 92 - mddev_t *mddev = q->queuedata; 93 - linear_conf_t *conf; 94 - int i; 95 - 96 - rcu_read_lock(); 97 - conf = rcu_dereference(mddev->private); 98 - 99 - for (i=0; i < mddev->raid_disks; i++) { 100 - struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 101 - blk_unplug(r_queue); 102 - } 103 - rcu_read_unlock(); 104 - } 105 - 106 90 static int linear_congested(void *data, int bits) 107 91 { 108 92 mddev_t *mddev = data; ··· 209 225 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 210 226 211 227 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 212 - mddev->queue->unplug_fn = linear_unplug; 213 228 mddev->queue->backing_dev_info.congested_fn = linear_congested; 214 229 mddev->queue->backing_dev_info.congested_data = mddev; 215 230 md_integrity_register(mddev);
-7
drivers/md/md.c
··· 4812 4812 __md_stop_writes(mddev); 4813 4813 md_stop(mddev); 4814 4814 mddev->queue->merge_bvec_fn = NULL; 4815 - mddev->queue->unplug_fn = NULL; 4816 4815 mddev->queue->backing_dev_info.congested_fn = NULL; 4817 4816 4818 4817 /* tell userspace to handle 'inactive' */ ··· 6668 6669 6669 6670 void md_unplug(mddev_t *mddev) 6670 6671 { 6671 - if (mddev->queue) 6672 - blk_unplug(mddev->queue); 6673 6672 if (mddev->plug) 6674 6673 mddev->plug->unplug_fn(mddev->plug); 6675 6674 } ··· 6850 6853 >= mddev->resync_max - mddev->curr_resync_completed 6851 6854 )) { 6852 6855 /* time to update curr_resync_completed */ 6853 - md_unplug(mddev); 6854 6856 wait_event(mddev->recovery_wait, 6855 6857 atomic_read(&mddev->recovery_active) == 0); 6856 6858 mddev->curr_resync_completed = j; ··· 6925 6929 * about not overloading the IO subsystem. (things like an 6926 6930 * e2fsck being done on the RAID array should execute fast) 6927 6931 */ 6928 - md_unplug(mddev); 6929 6932 cond_resched(); 6930 6933 6931 6934 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 ··· 6943 6948 * this also signals 'finished resyncing' to md_stop 6944 6949 */ 6945 6950 out: 6946 - md_unplug(mddev); 6947 - 6948 6951 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6949 6952 6950 6953 /* tell personality that we are finished */
-31
drivers/md/multipath.c
··· 106 106 rdev_dec_pending(rdev, conf->mddev); 107 107 } 108 108 109 - static void unplug_slaves(mddev_t *mddev) 110 - { 111 - multipath_conf_t *conf = mddev->private; 112 - int i; 113 - 114 - rcu_read_lock(); 115 - for (i=0; i<mddev->raid_disks; i++) { 116 - mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 117 - if (rdev && !test_bit(Faulty, &rdev->flags) 118 - && atomic_read(&rdev->nr_pending)) { 119 - struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 120 - 121 - atomic_inc(&rdev->nr_pending); 122 - rcu_read_unlock(); 123 - 124 - blk_unplug(r_queue); 125 - 126 - rdev_dec_pending(rdev, mddev); 127 - rcu_read_lock(); 128 - } 129 - } 130 - rcu_read_unlock(); 131 - } 132 - 133 - static void multipath_unplug(struct request_queue *q) 134 - { 135 - unplug_slaves(q->queuedata); 136 - } 137 - 138 - 139 109 static int multipath_make_request(mddev_t *mddev, struct bio * bio) 140 110 { 141 111 multipath_conf_t *conf = mddev->private; ··· 488 518 */ 489 519 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); 490 520 491 - mddev->queue->unplug_fn = multipath_unplug; 492 521 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 493 522 mddev->queue->backing_dev_info.congested_data = mddev; 494 523 md_integrity_register(mddev);
-16
drivers/md/raid0.c
··· 25 25 #include "raid0.h" 26 26 #include "raid5.h" 27 27 28 - static void raid0_unplug(struct request_queue *q) 29 - { 30 - mddev_t *mddev = q->queuedata; 31 - raid0_conf_t *conf = mddev->private; 32 - mdk_rdev_t **devlist = conf->devlist; 33 - int raid_disks = conf->strip_zone[0].nb_dev; 34 - int i; 35 - 36 - for (i=0; i < raid_disks; i++) { 37 - struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); 38 - 39 - blk_unplug(r_queue); 40 - } 41 - } 42 - 43 28 static int raid0_congested(void *data, int bits) 44 29 { 45 30 mddev_t *mddev = data; ··· 257 272 mdname(mddev), 258 273 (unsigned long long)smallest->sectors); 259 274 } 260 - mddev->queue->unplug_fn = raid0_unplug; 261 275 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 262 276 mddev->queue->backing_dev_info.congested_data = mddev; 263 277
+17 -66
drivers/md/raid1.c
··· 52 52 #define NR_RAID1_BIOS 256 53 53 54 54 55 - static void unplug_slaves(mddev_t *mddev); 56 - 57 55 static void allow_barrier(conf_t *conf); 58 56 static void lower_barrier(conf_t *conf); 59 57 60 58 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 61 59 { 62 60 struct pool_info *pi = data; 63 - r1bio_t *r1_bio; 64 61 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 65 62 66 63 /* allocate a r1bio with room for raid_disks entries in the bios array */ 67 - r1_bio = kzalloc(size, gfp_flags); 68 - if (!r1_bio && pi->mddev) 69 - unplug_slaves(pi->mddev); 70 - 71 - return r1_bio; 64 + return kzalloc(size, gfp_flags); 72 65 } 73 66 74 67 static void r1bio_pool_free(void *r1_bio, void *data) ··· 84 91 int i, j; 85 92 86 93 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 87 - if (!r1_bio) { 88 - unplug_slaves(pi->mddev); 94 + if (!r1_bio) 89 95 return NULL; 90 - } 91 96 92 97 /* 93 98 * Allocate bios : 1 for reading, n-1 for writing ··· 511 520 return new_disk; 512 521 } 513 522 514 - static void unplug_slaves(mddev_t *mddev) 515 - { 516 - conf_t *conf = mddev->private; 517 - int i; 518 - 519 - rcu_read_lock(); 520 - for (i=0; i<mddev->raid_disks; i++) { 521 - mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 522 - if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 523 - struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 524 - 525 - atomic_inc(&rdev->nr_pending); 526 - rcu_read_unlock(); 527 - 528 - blk_unplug(r_queue); 529 - 530 - rdev_dec_pending(rdev, mddev); 531 - rcu_read_lock(); 532 - } 533 - } 534 - rcu_read_unlock(); 535 - } 536 - 537 - static void raid1_unplug(struct request_queue *q) 538 - { 539 - mddev_t *mddev = q->queuedata; 540 - 541 - unplug_slaves(mddev); 542 - md_wakeup_thread(mddev->thread); 543 - } 544 - 545 523 static int raid1_congested(void *data, int bits) 546 524 { 547 525 mddev_t *mddev = data; ··· 540 580 } 541 581 542 582 543 - static int flush_pending_writes(conf_t *conf) 583 + static void flush_pending_writes(conf_t *conf) 544 584 { 545 585 /* Any writes that have been queued but are awaiting 546 586 * bitmap updates get flushed here. 547 - * We return 1 if any requests were actually submitted. 548 587 */ 549 - int rv = 0; 550 - 551 588 spin_lock_irq(&conf->device_lock); 552 589 553 590 if (conf->pending_bio_list.head) { 554 591 struct bio *bio; 555 592 bio = bio_list_get(&conf->pending_bio_list); 556 - blk_remove_plug(conf->mddev->queue); 557 593 spin_unlock_irq(&conf->device_lock); 558 594 /* flush any pending bitmap writes to 559 595 * disk before proceeding w/ I/O */ ··· 561 605 generic_make_request(bio); 562 606 bio = next; 563 607 } 564 - rv = 1; 565 608 } else 566 609 spin_unlock_irq(&conf->device_lock); 567 - return rv; 610 + } 611 + 612 + static void md_kick_device(mddev_t *mddev) 613 + { 614 + blk_flush_plug(current); 615 + md_wakeup_thread(mddev->thread); 568 616 } 569 617 570 618 /* Barriers.... ··· 600 640 601 641 /* Wait until no block IO is waiting */ 602 642 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 603 - conf->resync_lock, 604 - raid1_unplug(conf->mddev->queue)); 643 + conf->resync_lock, md_kick_device(conf->mddev)); 605 644 606 645 /* block any new IO from starting */ 607 646 conf->barrier++; ··· 608 649 /* Now wait for all pending IO to complete */ 609 650 wait_event_lock_irq(conf->wait_barrier, 610 651 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 611 - conf->resync_lock, 612 - raid1_unplug(conf->mddev->queue)); 652 + conf->resync_lock, md_kick_device(conf->mddev)); 613 653 614 654 spin_unlock_irq(&conf->resync_lock); 615 655 } ··· 630 672 conf->nr_waiting++; 631 673 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 632 674 conf->resync_lock, 633 - raid1_unplug(conf->mddev->queue)); 675 + md_kick_device(conf->mddev)); 634 676 conf->nr_waiting--; 635 677 } 636 678 conf->nr_pending++; ··· 667 709 conf->nr_pending == conf->nr_queued+1, 668 710 conf->resync_lock, 669 711 ({ flush_pending_writes(conf); 670 - raid1_unplug(conf->mddev->queue); })); 712 + md_kick_device(conf->mddev); })); 671 713 spin_unlock_irq(&conf->resync_lock); 672 714 } 673 715 static void unfreeze_array(conf_t *conf) ··· 917 959 atomic_inc(&r1_bio->remaining); 918 960 spin_lock_irqsave(&conf->device_lock, flags); 919 961 bio_list_add(&conf->pending_bio_list, mbio); 920 - blk_plug_device(mddev->queue); 921 962 spin_unlock_irqrestore(&conf->device_lock, flags); 922 963 } 923 964 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); ··· 925 968 /* In case raid1d snuck in to freeze_array */ 926 969 wake_up(&conf->wait_barrier); 927 970 928 - if (do_sync) 971 + if (do_sync || !bitmap) 929 972 md_wakeup_thread(mddev->thread); 930 973 931 974 return 0; ··· 1515 1558 unsigned long flags; 1516 1559 conf_t *conf = mddev->private; 1517 1560 struct list_head *head = &conf->retry_list; 1518 - int unplug=0; 1519 1561 mdk_rdev_t *rdev; 1520 1562 1521 1563 md_check_recovery(mddev); ··· 1522 1566 for (;;) { 1523 1567 char b[BDEVNAME_SIZE]; 1524 1568 1525 - unplug += flush_pending_writes(conf); 1569 + flush_pending_writes(conf); 1526 1570 1527 1571 spin_lock_irqsave(&conf->device_lock, flags); 1528 1572 if (list_empty(head)) { ··· 1536 1580 1537 1581 mddev = r1_bio->mddev; 1538 1582 conf = mddev->private; 1539 - if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1583 + if (test_bit(R1BIO_IsSync, &r1_bio->state)) 1540 1584 sync_request_write(mddev, r1_bio); 1541 - unplug = 1; 1542 - } else { 1585 + else { 1543 1586 int disk; 1544 1587 1545 1588 /* we got a read error. Maybe the drive is bad. Maybe just ··· 1588 1633 bio->bi_end_io = raid1_end_read_request; 1589 1634 bio->bi_rw = READ | do_sync; 1590 1635 bio->bi_private = r1_bio; 1591 - unplug = 1; 1592 1636 generic_make_request(bio); 1593 1637 } 1594 1638 } 1595 1639 cond_resched(); 1596 1640 } 1597 - if (unplug) 1598 - unplug_slaves(mddev); 1599 1641 } 1600 1642 1601 1643 ··· 2016 2064 2017 2065 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2018 2066 2019 - mddev->queue->unplug_fn = raid1_unplug; 2020 2067 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2021 2068 mddev->queue->backing_dev_info.congested_data = mddev; 2022 2069 md_integrity_register(mddev);
+19 -68
drivers/md/raid10.c
··· 57 57 */ 58 58 #define NR_RAID10_BIOS 256 59 59 60 - static void unplug_slaves(mddev_t *mddev); 61 - 62 60 static void allow_barrier(conf_t *conf); 63 61 static void lower_barrier(conf_t *conf); 64 62 65 63 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 66 64 { 67 65 conf_t *conf = data; 68 - r10bio_t *r10_bio; 69 66 int size = offsetof(struct r10bio_s, devs[conf->copies]); 70 67 71 68 /* allocate a r10bio with room for raid_disks entries in the bios array */ 72 - r10_bio = kzalloc(size, gfp_flags); 73 - if (!r10_bio && conf->mddev) 74 - unplug_slaves(conf->mddev); 75 - 76 - return r10_bio; 69 + return kzalloc(size, gfp_flags); 77 70 } 78 71 79 72 static void r10bio_pool_free(void *r10_bio, void *data) ··· 99 106 int nalloc; 100 107 101 108 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 102 - if (!r10_bio) { 103 - unplug_slaves(conf->mddev); 109 + if (!r10_bio) 104 110 return NULL; 105 - } 106 111 107 112 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) 108 113 nalloc = conf->copies; /* resync */ ··· 588 597 return disk; 589 598 } 590 599 591 - static void unplug_slaves(mddev_t *mddev) 592 - { 593 - conf_t *conf = mddev->private; 594 - int i; 595 - 596 - rcu_read_lock(); 597 - for (i=0; i < conf->raid_disks; i++) { 598 - mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 599 - if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 600 - struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 601 - 602 - atomic_inc(&rdev->nr_pending); 603 - rcu_read_unlock(); 604 - 605 - blk_unplug(r_queue); 606 - 607 - rdev_dec_pending(rdev, mddev); 608 - rcu_read_lock(); 609 - } 610 - } 611 - rcu_read_unlock(); 612 - } 613 - 614 - static void raid10_unplug(struct request_queue *q) 615 - { 616 - mddev_t *mddev = q->queuedata; 617 - 618 - unplug_slaves(q->queuedata); 619 - md_wakeup_thread(mddev->thread); 620 - } 621 - 622 600 static int raid10_congested(void *data, int bits) 623 601 { 624 602 mddev_t *mddev = data; ··· 609 649 return ret; 610 650 } 611 651 612 - static int flush_pending_writes(conf_t *conf) 652 + static void flush_pending_writes(conf_t *conf) 613 653 { 614 654 /* Any writes that have been queued but are awaiting 615 655 * bitmap updates get flushed here. 616 - * We return 1 if any requests were actually submitted. 617 656 */ 618 - int rv = 0; 619 - 620 657 spin_lock_irq(&conf->device_lock); 621 658 622 659 if (conf->pending_bio_list.head) { 623 660 struct bio *bio; 624 661 bio = bio_list_get(&conf->pending_bio_list); 625 - blk_remove_plug(conf->mddev->queue); 626 662 spin_unlock_irq(&conf->device_lock); 627 663 /* flush any pending bitmap writes to disk 628 664 * before proceeding w/ I/O */ ··· 630 674 generic_make_request(bio); 631 675 bio = next; 632 676 } 633 - rv = 1; 634 677 } else 635 678 spin_unlock_irq(&conf->device_lock); 636 - return rv; 637 679 } 680 + 681 + static void md_kick_device(mddev_t *mddev) 682 + { 683 + blk_flush_plug(current); 684 + md_wakeup_thread(mddev->thread); 685 + } 686 + 638 687 /* Barriers.... 639 688 * Sometimes we need to suspend IO while we do something else, 640 689 * either some resync/recovery, or reconfigure the array. ··· 669 708 670 709 /* Wait until no block IO is waiting (unless 'force') */ 671 710 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 672 - conf->resync_lock, 673 - raid10_unplug(conf->mddev->queue)); 711 + conf->resync_lock, md_kick_device(conf->mddev)); 674 712 675 713 /* block any new IO from starting */ 676 714 conf->barrier++; ··· 677 717 /* No wait for all pending IO to complete */ 678 718 wait_event_lock_irq(conf->wait_barrier, 679 719 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 680 - conf->resync_lock, 681 - raid10_unplug(conf->mddev->queue)); 720 + conf->resync_lock, md_kick_device(conf->mddev)); 682 721 683 722 spin_unlock_irq(&conf->resync_lock); 684 723 } ··· 698 739 conf->nr_waiting++; 699 740 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 700 741 conf->resync_lock, 701 - raid10_unplug(conf->mddev->queue)); 742 + md_kick_device(conf->mddev)); 702 743 conf->nr_waiting--; 703 744 } 704 745 conf->nr_pending++; ··· 735 776 conf->nr_pending == conf->nr_queued+1, 736 777 conf->resync_lock, 737 778 ({ flush_pending_writes(conf); 738 - raid10_unplug(conf->mddev->queue); })); 779 + md_kick_device(conf->mddev); })); 739 780 spin_unlock_irq(&conf->resync_lock); 740 781 } 741 782 ··· 930 971 atomic_inc(&r10_bio->remaining); 931 972 spin_lock_irqsave(&conf->device_lock, flags); 932 973 bio_list_add(&conf->pending_bio_list, mbio); 933 - blk_plug_device(mddev->queue); 934 974 spin_unlock_irqrestore(&conf->device_lock, flags); 935 975 } 936 976 ··· 946 988 /* In case raid10d snuck in to freeze_array */ 947 989 wake_up(&conf->wait_barrier); 948 990 949 - if (do_sync) 991 + if (do_sync || !mddev->bitmap) 950 992 md_wakeup_thread(mddev->thread); 951 993 952 994 return 0; ··· 1639 1681 unsigned long flags; 1640 1682 conf_t *conf = mddev->private; 1641 1683 struct list_head *head = &conf->retry_list; 1642 - int unplug=0; 1643 1684 mdk_rdev_t *rdev; 1644 1685 1645 1686 md_check_recovery(mddev); ··· 1646 1689 for (;;) { 1647 1690 char b[BDEVNAME_SIZE]; 1648 1691 1649 - unplug += flush_pending_writes(conf); 1692 + flush_pending_writes(conf); 1650 1693 1651 1694 spin_lock_irqsave(&conf->device_lock, flags); 1652 1695 if (list_empty(head)) { ··· 1660 1703 1661 1704 mddev = r10_bio->mddev; 1662 1705 conf = mddev->private; 1663 - if (test_bit(R10BIO_IsSync, &r10_bio->state)) { 1706 + if (test_bit(R10BIO_IsSync, &r10_bio->state)) 1664 1707 sync_request_write(mddev, r10_bio); 1665 - unplug = 1; 1666 - } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) { 1708 + else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 1667 1709 recovery_request_write(mddev, r10_bio); 1668 - unplug = 1; 1669 - } else { 1710 + else { 1670 1711 int mirror; 1671 1712 /* we got a read error. Maybe the drive is bad. Maybe just 1672 1713 * the block and we can fix it. ··· 1711 1756 bio->bi_rw = READ | do_sync; 1712 1757 bio->bi_private = r10_bio; 1713 1758 bio->bi_end_io = raid10_end_read_request; 1714 - unplug = 1; 1715 1759 generic_make_request(bio); 1716 1760 } 1717 1761 } 1718 1762 cond_resched(); 1719 1763 } 1720 - if (unplug) 1721 - unplug_slaves(mddev); 1722 1764 } 1723 1765 1724 1766 ··· 2328 2376 md_set_array_sectors(mddev, size); 2329 2377 mddev->resync_max_sectors = size; 2330 2378 2331 - mddev->queue->unplug_fn = raid10_unplug; 2332 2379 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2333 2380 mddev->queue->backing_dev_info.congested_data = mddev; 2334 2381
+8 -54
drivers/md/raid5.c
··· 433 433 return 0; 434 434 } 435 435 436 - static void unplug_slaves(mddev_t *mddev); 437 - 438 436 static struct stripe_head * 439 437 get_active_stripe(raid5_conf_t *conf, sector_t sector, 440 438 int previous, int noblock, int noquiesce) ··· 461 463 < (conf->max_nr_stripes *3/4) 462 464 || !conf->inactive_blocked), 463 465 conf->device_lock, 464 - md_raid5_unplug_device(conf) 465 - ); 466 + md_raid5_kick_device(conf)); 466 467 conf->inactive_blocked = 0; 467 468 } else 468 469 init_stripe(sh, sector, previous); ··· 1470 1473 wait_event_lock_irq(conf->wait_for_stripe, 1471 1474 !list_empty(&conf->inactive_list), 1472 1475 conf->device_lock, 1473 - unplug_slaves(conf->mddev) 1474 - ); 1476 + blk_flush_plug(current)); 1475 1477 osh = get_free_stripe(conf); 1476 1478 spin_unlock_irq(&conf->device_lock); 1477 1479 atomic_set(&nsh->count, 1); ··· 3641 3645 } 3642 3646 } 3643 3647 3644 - static void unplug_slaves(mddev_t *mddev) 3648 + void md_raid5_kick_device(raid5_conf_t *conf) 3645 3649 { 3646 - raid5_conf_t *conf = mddev->private; 3647 - int i; 3648 - int devs = max(conf->raid_disks, conf->previous_raid_disks); 3649 - 3650 - rcu_read_lock(); 3651 - for (i = 0; i < devs; i++) { 3652 - mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3653 - if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3654 - struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3655 - 3656 - atomic_inc(&rdev->nr_pending); 3657 - rcu_read_unlock(); 3658 - 3659 - blk_unplug(r_queue); 3660 - 3661 - rdev_dec_pending(rdev, mddev); 3662 - rcu_read_lock(); 3663 - } 3664 - } 3665 - rcu_read_unlock(); 3666 - } 3667 - 3668 - void md_raid5_unplug_device(raid5_conf_t *conf) 3669 - { 3670 - unsigned long flags; 3671 - 3672 - spin_lock_irqsave(&conf->device_lock, flags); 3673 - 3674 - if (plugger_remove_plug(&conf->plug)) { 3675 - conf->seq_flush++; 3676 - raid5_activate_delayed(conf); 3677 - } 3650 + blk_flush_plug(current); 3651 + raid5_activate_delayed(conf); 3678 3652 md_wakeup_thread(conf->mddev->thread); 3679 - 3680 - spin_unlock_irqrestore(&conf->device_lock, flags); 3681 - 3682 - unplug_slaves(conf->mddev); 3683 3653 } 3684 - EXPORT_SYMBOL_GPL(md_raid5_unplug_device); 3654 + EXPORT_SYMBOL_GPL(md_raid5_kick_device); 3685 3655 3686 3656 static void raid5_unplug(struct plug_handle *plug) 3687 3657 { 3688 3658 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug); 3689 - md_raid5_unplug_device(conf); 3690 - } 3691 3659 3692 - static void raid5_unplug_queue(struct request_queue *q) 3693 - { 3694 - mddev_t *mddev = q->queuedata; 3695 - md_raid5_unplug_device(mddev->private); 3660 + md_raid5_kick_device(conf); 3696 3661 } 3697 3662 3698 3663 int md_raid5_congested(mddev_t *mddev, int bits) ··· 4057 4100 * add failed due to overlap. Flush everything 4058 4101 * and wait a while 4059 4102 */ 4060 - md_raid5_unplug_device(conf); 4103 + md_raid5_kick_device(conf); 4061 4104 release_stripe(sh); 4062 4105 schedule(); 4063 4106 goto retry; ··· 4322 4365 4323 4366 if (sector_nr >= max_sector) { 4324 4367 /* just being told to finish up .. nothing much to do */ 4325 - unplug_slaves(mddev); 4326 4368 4327 4369 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4328 4370 end_reshape(conf); ··· 4525 4569 spin_unlock_irq(&conf->device_lock); 4526 4570 4527 4571 async_tx_issue_pending_all(); 4528 - unplug_slaves(mddev); 4529 4572 4530 4573 pr_debug("--- raid5d inactive\n"); 4531 4574 } ··· 5160 5205 mddev->queue->backing_dev_info.congested_data = mddev; 5161 5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5162 5207 mddev->queue->queue_lock = &conf->device_lock; 5163 - mddev->queue->unplug_fn = raid5_unplug_queue; 5164 5208 5165 5209 chunk_size = mddev->chunk_sectors << 9; 5166 5210 blk_queue_io_min(mddev->queue, chunk_size);
+1 -1
drivers/md/raid5.h
··· 503 503 } 504 504 505 505 extern int md_raid5_congested(mddev_t *mddev, int bits); 506 - extern void md_raid5_unplug_device(raid5_conf_t *conf); 506 + extern void md_raid5_kick_device(raid5_conf_t *conf); 507 507 extern int raid5_set_cache_size(mddev_t *mddev, int size); 508 508 #endif
+1 -5
drivers/message/i2o/i2o_block.c
··· 895 895 { 896 896 struct request *req; 897 897 898 - while (!blk_queue_plugged(q)) { 899 - req = blk_peek_request(q); 900 - if (!req) 901 - break; 902 - 898 + while ((req = blk_peek_request(q)) != NULL) { 903 899 if (req->cmd_type == REQ_TYPE_FS) { 904 900 struct i2o_block_delayed_request *dreq; 905 901 struct i2o_block_request *ireq = req->special;
+1 -2
drivers/mmc/card/queue.c
··· 55 55 56 56 spin_lock_irq(q->queue_lock); 57 57 set_current_state(TASK_INTERRUPTIBLE); 58 - if (!blk_queue_plugged(q)) 59 - req = blk_fetch_request(q); 58 + req = blk_fetch_request(q); 60 59 mq->req = req; 61 60 spin_unlock_irq(q->queue_lock); 62 61
+1 -1
drivers/s390/block/dasd.c
··· 1917 1917 return; 1918 1918 } 1919 1919 /* Now we try to fetch requests from the request queue */ 1920 - while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1920 + while ((req = blk_peek_request(queue))) { 1921 1921 if (basedev->features & DASD_FEATURE_READONLY && 1922 1922 rq_data_dir(req) == WRITE) { 1923 1923 DBF_DEV_EVENT(DBF_ERR, basedev,
-1
drivers/s390/char/tape_block.c
··· 161 161 162 162 spin_lock_irq(&device->blk_data.request_queue_lock); 163 163 while ( 164 - !blk_queue_plugged(queue) && 165 164 blk_peek_request(queue) && 166 165 nr_queued < TAPEBLOCK_MIN_REQUEUE 167 166 ) {
+1 -1
drivers/scsi/scsi_transport_fc.c
··· 3913 3913 if (!get_device(dev)) 3914 3914 return; 3915 3915 3916 - while (!blk_queue_plugged(q)) { 3916 + while (1) { 3917 3917 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) && 3918 3918 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) 3919 3919 break;
+1 -5
drivers/scsi/scsi_transport_sas.c
··· 173 173 int ret; 174 174 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 175 175 176 - while (!blk_queue_plugged(q)) { 177 - req = blk_fetch_request(q); 178 - if (!req) 179 - break; 180 - 176 + while ((req = blk_fetch_request(q)) != NULL) { 181 177 spin_unlock_irq(q->queue_lock); 182 178 183 179 handler = to_sas_internal(shost->transportt)->f->smp_handler;
+3 -4
drivers/target/target_core_iblock.c
··· 392 392 { 393 393 struct se_device *dev = task->task_se_cmd->se_dev; 394 394 struct iblock_req *req = IBLOCK_REQ(task); 395 - struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev; 396 - struct request_queue *q = bdev_get_queue(ibd->ibd_bd); 397 395 struct bio *bio = req->ib_bio, *nbio = NULL; 396 + struct blk_plug plug; 398 397 int rw; 399 398 400 399 if (task->task_data_direction == DMA_TO_DEVICE) { ··· 411 412 rw = READ; 412 413 } 413 414 415 + blk_start_plug(&plug); 414 416 while (bio) { 415 417 nbio = bio->bi_next; 416 418 bio->bi_next = NULL; ··· 421 421 submit_bio(rw, bio); 422 422 bio = nbio; 423 423 } 424 + blk_finish_plug(&plug); 424 425 425 - if (q->unplug_fn) 426 - q->unplug_fn(q); 427 426 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 428 427 } 429 428
-1
fs/adfs/inode.c
··· 73 73 static const struct address_space_operations adfs_aops = { 74 74 .readpage = adfs_readpage, 75 75 .writepage = adfs_writepage, 76 - .sync_page = block_sync_page, 77 76 .write_begin = adfs_write_begin, 78 77 .write_end = generic_write_end, 79 78 .bmap = _adfs_bmap
-2
fs/affs/file.c
··· 429 429 const struct address_space_operations affs_aops = { 430 430 .readpage = affs_readpage, 431 431 .writepage = affs_writepage, 432 - .sync_page = block_sync_page, 433 432 .write_begin = affs_write_begin, 434 433 .write_end = generic_write_end, 435 434 .bmap = _affs_bmap ··· 785 786 const struct address_space_operations affs_aops_ofs = { 786 787 .readpage = affs_readpage_ofs, 787 788 //.writepage = affs_writepage_ofs, 788 - //.sync_page = affs_sync_page_ofs, 789 789 .write_begin = affs_write_begin_ofs, 790 790 .write_end = affs_write_end_ofs 791 791 };
+3 -1
fs/aio.c
··· 1550 1550 struct hlist_node *pos, *n; 1551 1551 int i; 1552 1552 1553 + /* 1554 + * TODO: kill this 1555 + */ 1553 1556 for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { 1554 1557 hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { 1555 - blk_run_address_space(abe->mapping); 1556 1558 iput(abe->mapping->host); 1557 1559 hlist_del(&abe->list); 1558 1560 mempool_free(abe, abe_pool);
-1
fs/befs/linuxvfs.c
··· 75 75 76 76 static const struct address_space_operations befs_aops = { 77 77 .readpage = befs_readpage, 78 - .sync_page = block_sync_page, 79 78 .bmap = befs_bmap, 80 79 }; 81 80
-1
fs/bfs/file.c
··· 186 186 const struct address_space_operations bfs_aops = { 187 187 .readpage = bfs_readpage, 188 188 .writepage = bfs_writepage, 189 - .sync_page = block_sync_page, 190 189 .write_begin = bfs_write_begin, 191 190 .write_end = generic_write_end, 192 191 .bmap = bfs_bmap,
-1
fs/block_dev.c
··· 1520 1520 static const struct address_space_operations def_blk_aops = { 1521 1521 .readpage = blkdev_readpage, 1522 1522 .writepage = blkdev_writepage, 1523 - .sync_page = block_sync_page, 1524 1523 .write_begin = blkdev_write_begin, 1525 1524 .write_end = blkdev_write_end, 1526 1525 .writepages = generic_writepages,
-79
fs/btrfs/disk-io.c
··· 847 847 .writepages = btree_writepages, 848 848 .releasepage = btree_releasepage, 849 849 .invalidatepage = btree_invalidatepage, 850 - .sync_page = block_sync_page, 851 850 #ifdef CONFIG_MIGRATION 852 851 .migratepage = btree_migratepage, 853 852 #endif ··· 1330 1331 } 1331 1332 1332 1333 /* 1333 - * this unplugs every device on the box, and it is only used when page 1334 - * is null 1335 - */ 1336 - static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1337 - { 1338 - struct btrfs_device *device; 1339 - struct btrfs_fs_info *info; 1340 - 1341 - info = (struct btrfs_fs_info *)bdi->unplug_io_data; 1342 - list_for_each_entry(device, &info->fs_devices->devices, dev_list) { 1343 - if (!device->bdev) 1344 - continue; 1345 - 1346 - bdi = blk_get_backing_dev_info(device->bdev); 1347 - if (bdi->unplug_io_fn) 1348 - bdi->unplug_io_fn(bdi, page); 1349 - } 1350 - } 1351 - 1352 - static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1353 - { 1354 - struct inode *inode; 1355 - struct extent_map_tree *em_tree; 1356 - struct extent_map *em; 1357 - struct address_space *mapping; 1358 - u64 offset; 1359 - 1360 - /* the generic O_DIRECT read code does this */ 1361 - if (1 || !page) { 1362 - __unplug_io_fn(bdi, page); 1363 - return; 1364 - } 1365 - 1366 - /* 1367 - * page->mapping may change at any time. Get a consistent copy 1368 - * and use that for everything below 1369 - */ 1370 - smp_mb(); 1371 - mapping = page->mapping; 1372 - if (!mapping) 1373 - return; 1374 - 1375 - inode = mapping->host; 1376 - 1377 - /* 1378 - * don't do the expensive searching for a small number of 1379 - * devices 1380 - */ 1381 - if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) { 1382 - __unplug_io_fn(bdi, page); 1383 - return; 1384 - } 1385 - 1386 - offset = page_offset(page); 1387 - 1388 - em_tree = &BTRFS_I(inode)->extent_tree; 1389 - read_lock(&em_tree->lock); 1390 - em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); 1391 - read_unlock(&em_tree->lock); 1392 - if (!em) { 1393 - __unplug_io_fn(bdi, page); 1394 - return; 1395 - } 1396 - 1397 - if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 1398 - free_extent_map(em); 1399 - __unplug_io_fn(bdi, page); 1400 - return; 1401 - } 1402 - offset = offset - em->start; 1403 - btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree, 1404 - em->block_start + offset, page); 1405 - free_extent_map(em); 1406 - } 1407 - 1408 - /* 1409 1334 * If this fails, caller must call bdi_destroy() to get rid of the 1410 1335 * bdi again. 1411 1336 */ ··· 1343 1420 return err; 1344 1421 1345 1422 bdi->ra_pages = default_backing_dev_info.ra_pages; 1346 - bdi->unplug_io_fn = btrfs_unplug_io_fn; 1347 - bdi->unplug_io_data = info; 1348 1423 bdi->congested_fn = btrfs_congested_fn; 1349 1424 bdi->congested_data = info; 1350 1425 return 0;
-1
fs/btrfs/inode.c
··· 7218 7218 .writepage = btrfs_writepage, 7219 7219 .writepages = btrfs_writepages, 7220 7220 .readpages = btrfs_readpages, 7221 - .sync_page = block_sync_page, 7222 7221 .direct_IO = btrfs_direct_IO, 7223 7222 .invalidatepage = btrfs_invalidatepage, 7224 7223 .releasepage = btrfs_releasepage,
+11 -80
fs/btrfs/volumes.c
··· 162 162 struct bio *cur; 163 163 int again = 0; 164 164 unsigned long num_run; 165 - unsigned long num_sync_run; 166 165 unsigned long batch_run = 0; 167 166 unsigned long limit; 168 167 unsigned long last_waited = 0; ··· 171 172 fs_info = device->dev_root->fs_info; 172 173 limit = btrfs_async_submit_limit(fs_info); 173 174 limit = limit * 2 / 3; 174 - 175 - /* we want to make sure that every time we switch from the sync 176 - * list to the normal list, we unplug 177 - */ 178 - num_sync_run = 0; 179 175 180 176 loop: 181 177 spin_lock(&device->io_lock); ··· 217 223 218 224 spin_unlock(&device->io_lock); 219 225 220 - /* 221 - * if we're doing the regular priority list, make sure we unplug 222 - * for any high prio bios we've sent down 223 - */ 224 - if (pending_bios == &device->pending_bios && num_sync_run > 0) { 225 - num_sync_run = 0; 226 - blk_run_backing_dev(bdi, NULL); 227 - } 228 - 229 226 while (pending) { 230 227 231 228 rmb(); ··· 244 259 245 260 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 246 261 247 - if (cur->bi_rw & REQ_SYNC) 248 - num_sync_run++; 249 - 250 262 submit_bio(cur->bi_rw, cur); 251 263 num_run++; 252 264 batch_run++; 253 - if (need_resched()) { 254 - if (num_sync_run) { 255 - blk_run_backing_dev(bdi, NULL); 256 - num_sync_run = 0; 257 - } 265 + if (need_resched()) 258 266 cond_resched(); 259 - } 260 267 261 268 /* 262 269 * we made progress, there is more work to do and the bdi ··· 281 304 * against it before looping 282 305 */ 283 306 last_waited = ioc->last_waited; 284 - if (need_resched()) { 285 - if (num_sync_run) { 286 - blk_run_backing_dev(bdi, NULL); 287 - num_sync_run = 0; 288 - } 307 + if (need_resched()) 289 308 cond_resched(); 290 - } 291 309 continue; 292 310 } 293 311 spin_lock(&device->io_lock); ··· 294 322 goto done; 295 323 } 296 324 } 297 - 298 - if (num_sync_run) { 299 - num_sync_run = 0; 300 - blk_run_backing_dev(bdi, NULL); 301 - } 302 - /* 303 - * IO has already been through a long path to get here. Checksumming, 304 - * async helper threads, perhaps compression. We've done a pretty 305 - * good job of collecting a batch of IO and should just unplug 306 - * the device right away. 307 - * 308 - * This will help anyone who is waiting on the IO, they might have 309 - * already unplugged, but managed to do so before the bio they 310 - * cared about found its way down here. 311 - */ 312 - blk_run_backing_dev(bdi, NULL); 313 325 314 326 cond_resched(); 315 327 if (again) ··· 2904 2948 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, 2905 2949 u64 logical, u64 *length, 2906 2950 struct btrfs_multi_bio **multi_ret, 2907 - int mirror_num, struct page *unplug_page) 2951 + int mirror_num) 2908 2952 { 2909 2953 struct extent_map *em; 2910 2954 struct map_lookup *map; ··· 2935 2979 read_lock(&em_tree->lock); 2936 2980 em = lookup_extent_mapping(em_tree, logical, *length); 2937 2981 read_unlock(&em_tree->lock); 2938 - 2939 - if (!em && unplug_page) { 2940 - kfree(multi); 2941 - return 0; 2942 - } 2943 2982 2944 2983 if (!em) { 2945 2984 printk(KERN_CRIT "unable to find logical %llu len %llu\n", ··· 2991 3040 *length = em->len - offset; 2992 3041 } 2993 3042 2994 - if (!multi_ret && !unplug_page) 3043 + if (!multi_ret) 2995 3044 goto out; 2996 3045 2997 3046 num_stripes = 1; 2998 3047 stripe_index = 0; 2999 3048 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 3000 - if (unplug_page || (rw & REQ_WRITE)) 3049 + if (rw & REQ_WRITE) 3001 3050 num_stripes = map->num_stripes; 3002 3051 else if (mirror_num) 3003 3052 stripe_index = mirror_num - 1; ··· 3019 3068 stripe_index = do_div(stripe_nr, factor); 3020 3069 stripe_index *= map->sub_stripes; 3021 3070 3022 - if (unplug_page || (rw & REQ_WRITE)) 3071 + if (rw & REQ_WRITE) 3023 3072 num_stripes = map->sub_stripes; 3024 3073 else if (mirror_num) 3025 3074 stripe_index += mirror_num - 1; ··· 3039 3088 BUG_ON(stripe_index >= map->num_stripes); 3040 3089 3041 3090 for (i = 0; i < num_stripes; i++) { 3042 - if (unplug_page) { 3043 - struct btrfs_device *device; 3044 - struct backing_dev_info *bdi; 3045 - 3046 - device = map->stripes[stripe_index].dev; 3047 - if (device->bdev) { 3048 - bdi = blk_get_backing_dev_info(device->bdev); 3049 - if (bdi->unplug_io_fn) 3050 - bdi->unplug_io_fn(bdi, unplug_page); 3051 - } 3052 - } else { 3053 - multi->stripes[i].physical = 3054 - map->stripes[stripe_index].physical + 3055 - stripe_offset + stripe_nr * map->stripe_len; 3056 - multi->stripes[i].dev = map->stripes[stripe_index].dev; 3057 - } 3091 + multi->stripes[i].physical = 3092 + map->stripes[stripe_index].physical + 3093 + stripe_offset + stripe_nr * map->stripe_len; 3094 + multi->stripes[i].dev = map->stripes[stripe_index].dev; 3058 3095 stripe_index++; 3059 3096 } 3060 3097 if (multi_ret) { ··· 3060 3121 struct btrfs_multi_bio **multi_ret, int mirror_num) 3061 3122 { 3062 3123 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret, 3063 - mirror_num, NULL); 3124 + mirror_num); 3064 3125 } 3065 3126 3066 3127 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, ··· 3126 3187 3127 3188 free_extent_map(em); 3128 3189 return 0; 3129 - } 3130 - 3131 - int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, 3132 - u64 logical, struct page *page) 3133 - { 3134 - u64 length = PAGE_CACHE_SIZE; 3135 - return __btrfs_map_block(map_tree, READ, logical, &length, 3136 - NULL, 0, page); 3137 3190 } 3138 3191 3139 3192 static void end_bio_multi_stripe(struct bio *bio, int err)
+4 -27
fs/buffer.c
··· 54 54 } 55 55 EXPORT_SYMBOL(init_buffer); 56 56 57 - static int sync_buffer(void *word) 57 + static int sleep_on_buffer(void *word) 58 58 { 59 - struct block_device *bd; 60 - struct buffer_head *bh 61 - = container_of(word, struct buffer_head, b_state); 62 - 63 - smp_mb(); 64 - bd = bh->b_bdev; 65 - if (bd) 66 - blk_run_address_space(bd->bd_inode->i_mapping); 67 59 io_schedule(); 68 60 return 0; 69 61 } 70 62 71 63 void __lock_buffer(struct buffer_head *bh) 72 64 { 73 - wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, 65 + wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer, 74 66 TASK_UNINTERRUPTIBLE); 75 67 } 76 68 EXPORT_SYMBOL(__lock_buffer); ··· 82 90 */ 83 91 void __wait_on_buffer(struct buffer_head * bh) 84 92 { 85 - wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 93 + wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE); 86 94 } 87 95 EXPORT_SYMBOL(__wait_on_buffer); 88 96 ··· 741 749 { 742 750 struct buffer_head *bh; 743 751 struct list_head tmp; 744 - struct address_space *mapping, *prev_mapping = NULL; 752 + struct address_space *mapping; 745 753 int err = 0, err2; 746 754 747 755 INIT_LIST_HEAD(&tmp); ··· 775 783 * wait_on_buffer() will do that for us 776 784 * through sync_buffer(). 777 785 */ 778 - if (prev_mapping && prev_mapping != mapping) 779 - blk_run_address_space(prev_mapping); 780 - prev_mapping = mapping; 781 - 782 786 brelse(bh); 783 787 spin_lock(lock); 784 788 } ··· 3125 3137 return ret; 3126 3138 } 3127 3139 EXPORT_SYMBOL(try_to_free_buffers); 3128 - 3129 - void block_sync_page(struct page *page) 3130 - { 3131 - struct address_space *mapping; 3132 - 3133 - smp_mb(); 3134 - mapping = page_mapping(page); 3135 - if (mapping) 3136 - blk_run_backing_dev(mapping->backing_dev_info, page); 3137 - } 3138 - EXPORT_SYMBOL(block_sync_page); 3139 3140 3140 3141 /* 3141 3142 * There are no bdflush tunables left. But distributions are
-30
fs/cifs/file.c
··· 1569 1569 return rc; 1570 1570 } 1571 1571 1572 - /* static void cifs_sync_page(struct page *page) 1573 - { 1574 - struct address_space *mapping; 1575 - struct inode *inode; 1576 - unsigned long index = page->index; 1577 - unsigned int rpages = 0; 1578 - int rc = 0; 1579 - 1580 - cFYI(1, "sync page %p", page); 1581 - mapping = page->mapping; 1582 - if (!mapping) 1583 - return 0; 1584 - inode = mapping->host; 1585 - if (!inode) 1586 - return; */ 1587 - 1588 - /* fill in rpages then 1589 - result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ 1590 - 1591 - /* cFYI(1, "rpages is %d for sync page of Index %ld", rpages, index); 1592 - 1593 - #if 0 1594 - if (rc < 0) 1595 - return rc; 1596 - return 0; 1597 - #endif 1598 - } */ 1599 - 1600 1572 /* 1601 1573 * As file closes, flush all cached write data for this inode checking 1602 1574 * for write behind errors. ··· 2482 2510 .set_page_dirty = __set_page_dirty_nobuffers, 2483 2511 .releasepage = cifs_release_page, 2484 2512 .invalidatepage = cifs_invalidate_page, 2485 - /* .sync_page = cifs_sync_page, */ 2486 2513 /* .direct_IO = */ 2487 2514 }; 2488 2515 ··· 2499 2528 .set_page_dirty = __set_page_dirty_nobuffers, 2500 2529 .releasepage = cifs_release_page, 2501 2530 .invalidatepage = cifs_invalidate_page, 2502 - /* .sync_page = cifs_sync_page, */ 2503 2531 /* .direct_IO = */ 2504 2532 };
+1 -4
fs/direct-io.c
··· 1110 1110 ((rw & READ) || (dio->result == dio->size))) 1111 1111 ret = -EIOCBQUEUED; 1112 1112 1113 - if (ret != -EIOCBQUEUED) { 1114 - /* All IO is now issued, send it on its way */ 1115 - blk_run_address_space(inode->i_mapping); 1113 + if (ret != -EIOCBQUEUED) 1116 1114 dio_await_completion(dio); 1117 - } 1118 1115 1119 1116 /* 1120 1117 * Sync will always be dropping the final ref and completing the
-1
fs/efs/inode.c
··· 23 23 } 24 24 static const struct address_space_operations efs_aops = { 25 25 .readpage = efs_readpage, 26 - .sync_page = block_sync_page, 27 26 .bmap = _efs_bmap 28 27 }; 29 28
-1
fs/exofs/inode.c
··· 795 795 .direct_IO = NULL, /* TODO: Should be trivial to do */ 796 796 797 797 /* With these NULL has special meaning or default is not exported */ 798 - .sync_page = NULL, 799 798 .get_xip_mem = NULL, 800 799 .migratepage = NULL, 801 800 .launder_page = NULL,
-2
fs/ext2/inode.c
··· 860 860 .readpage = ext2_readpage, 861 861 .readpages = ext2_readpages, 862 862 .writepage = ext2_writepage, 863 - .sync_page = block_sync_page, 864 863 .write_begin = ext2_write_begin, 865 864 .write_end = ext2_write_end, 866 865 .bmap = ext2_bmap, ··· 879 880 .readpage = ext2_readpage, 880 881 .readpages = ext2_readpages, 881 882 .writepage = ext2_nobh_writepage, 882 - .sync_page = block_sync_page, 883 883 .write_begin = ext2_nobh_write_begin, 884 884 .write_end = nobh_write_end, 885 885 .bmap = ext2_bmap,
-3
fs/ext3/inode.c
··· 1894 1894 .readpage = ext3_readpage, 1895 1895 .readpages = ext3_readpages, 1896 1896 .writepage = ext3_ordered_writepage, 1897 - .sync_page = block_sync_page, 1898 1897 .write_begin = ext3_write_begin, 1899 1898 .write_end = ext3_ordered_write_end, 1900 1899 .bmap = ext3_bmap, ··· 1909 1910 .readpage = ext3_readpage, 1910 1911 .readpages = ext3_readpages, 1911 1912 .writepage = ext3_writeback_writepage, 1912 - .sync_page = block_sync_page, 1913 1913 .write_begin = ext3_write_begin, 1914 1914 .write_end = ext3_writeback_write_end, 1915 1915 .bmap = ext3_bmap, ··· 1924 1926 .readpage = ext3_readpage, 1925 1927 .readpages = ext3_readpages, 1926 1928 .writepage = ext3_journalled_writepage, 1927 - .sync_page = block_sync_page, 1928 1929 .write_begin = ext3_write_begin, 1929 1930 .write_end = ext3_journalled_write_end, 1930 1931 .set_page_dirty = ext3_journalled_set_page_dirty,
-4
fs/ext4/inode.c
··· 3903 3903 .readpage = ext4_readpage, 3904 3904 .readpages = ext4_readpages, 3905 3905 .writepage = ext4_writepage, 3906 - .sync_page = block_sync_page, 3907 3906 .write_begin = ext4_write_begin, 3908 3907 .write_end = ext4_ordered_write_end, 3909 3908 .bmap = ext4_bmap, ··· 3918 3919 .readpage = ext4_readpage, 3919 3920 .readpages = ext4_readpages, 3920 3921 .writepage = ext4_writepage, 3921 - .sync_page = block_sync_page, 3922 3922 .write_begin = ext4_write_begin, 3923 3923 .write_end = ext4_writeback_write_end, 3924 3924 .bmap = ext4_bmap, ··· 3933 3935 .readpage = ext4_readpage, 3934 3936 .readpages = ext4_readpages, 3935 3937 .writepage = ext4_writepage, 3936 - .sync_page = block_sync_page, 3937 3938 .write_begin = ext4_write_begin, 3938 3939 .write_end = ext4_journalled_write_end, 3939 3940 .set_page_dirty = ext4_journalled_set_page_dirty, ··· 3948 3951 .readpages = ext4_readpages, 3949 3952 .writepage = ext4_writepage, 3950 3953 .writepages = ext4_da_writepages, 3951 - .sync_page = block_sync_page, 3952 3954 .write_begin = ext4_da_write_begin, 3953 3955 .write_end = ext4_da_write_end, 3954 3956 .bmap = ext4_bmap,
-1
fs/fat/inode.c
··· 236 236 .readpages = fat_readpages, 237 237 .writepage = fat_writepage, 238 238 .writepages = fat_writepages, 239 - .sync_page = block_sync_page, 240 239 .write_begin = fat_write_begin, 241 240 .write_end = fat_write_end, 242 241 .direct_IO = fat_direct_IO,
-1
fs/freevxfs/vxfs_subr.c
··· 44 44 const struct address_space_operations vxfs_aops = { 45 45 .readpage = vxfs_readpage, 46 46 .bmap = vxfs_bmap, 47 - .sync_page = block_sync_page, 48 47 }; 49 48 50 49 inline void
-1
fs/fuse/inode.c
··· 868 868 869 869 fc->bdi.name = "fuse"; 870 870 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 871 - fc->bdi.unplug_io_fn = default_unplug_io_fn; 872 871 /* fuse does it's own writeback accounting */ 873 872 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; 874 873
-3
fs/gfs2/aops.c
··· 1116 1116 .writepages = gfs2_writeback_writepages, 1117 1117 .readpage = gfs2_readpage, 1118 1118 .readpages = gfs2_readpages, 1119 - .sync_page = block_sync_page, 1120 1119 .write_begin = gfs2_write_begin, 1121 1120 .write_end = gfs2_write_end, 1122 1121 .bmap = gfs2_bmap, ··· 1131 1132 .writepage = gfs2_ordered_writepage, 1132 1133 .readpage = gfs2_readpage, 1133 1134 .readpages = gfs2_readpages, 1134 - .sync_page = block_sync_page, 1135 1135 .write_begin = gfs2_write_begin, 1136 1136 .write_end = gfs2_write_end, 1137 1137 .set_page_dirty = gfs2_set_page_dirty, ··· 1148 1150 .writepages = gfs2_jdata_writepages, 1149 1151 .readpage = gfs2_readpage, 1150 1152 .readpages = gfs2_readpages, 1151 - .sync_page = block_sync_page, 1152 1153 .write_begin = gfs2_write_begin, 1153 1154 .write_end = gfs2_write_end, 1154 1155 .set_page_dirty = gfs2_set_page_dirty,
-1
fs/gfs2/meta_io.c
··· 94 94 const struct address_space_operations gfs2_meta_aops = { 95 95 .writepage = gfs2_aspace_writepage, 96 96 .releasepage = gfs2_releasepage, 97 - .sync_page = block_sync_page, 98 97 }; 99 98 100 99 /**
-2
fs/hfs/inode.c
··· 150 150 const struct address_space_operations hfs_btree_aops = { 151 151 .readpage = hfs_readpage, 152 152 .writepage = hfs_writepage, 153 - .sync_page = block_sync_page, 154 153 .write_begin = hfs_write_begin, 155 154 .write_end = generic_write_end, 156 155 .bmap = hfs_bmap, ··· 159 160 const struct address_space_operations hfs_aops = { 160 161 .readpage = hfs_readpage, 161 162 .writepage = hfs_writepage, 162 - .sync_page = block_sync_page, 163 163 .write_begin = hfs_write_begin, 164 164 .write_end = generic_write_end, 165 165 .bmap = hfs_bmap,
-2
fs/hfsplus/inode.c
··· 146 146 const struct address_space_operations hfsplus_btree_aops = { 147 147 .readpage = hfsplus_readpage, 148 148 .writepage = hfsplus_writepage, 149 - .sync_page = block_sync_page, 150 149 .write_begin = hfsplus_write_begin, 151 150 .write_end = generic_write_end, 152 151 .bmap = hfsplus_bmap, ··· 155 156 const struct address_space_operations hfsplus_aops = { 156 157 .readpage = hfsplus_readpage, 157 158 .writepage = hfsplus_writepage, 158 - .sync_page = block_sync_page, 159 159 .write_begin = hfsplus_write_begin, 160 160 .write_end = generic_write_end, 161 161 .bmap = hfsplus_bmap,
-1
fs/hpfs/file.c
··· 120 120 const struct address_space_operations hpfs_aops = { 121 121 .readpage = hpfs_readpage, 122 122 .writepage = hpfs_writepage, 123 - .sync_page = block_sync_page, 124 123 .write_begin = hpfs_write_begin, 125 124 .write_end = generic_write_end, 126 125 .bmap = _hpfs_bmap
-1
fs/isofs/inode.c
··· 1158 1158 1159 1159 static const struct address_space_operations isofs_aops = { 1160 1160 .readpage = isofs_readpage, 1161 - .sync_page = block_sync_page, 1162 1161 .bmap = _isofs_bmap 1163 1162 }; 1164 1163
-1
fs/jfs/inode.c
··· 352 352 .readpages = jfs_readpages, 353 353 .writepage = jfs_writepage, 354 354 .writepages = jfs_writepages, 355 - .sync_page = block_sync_page, 356 355 .write_begin = jfs_write_begin, 357 356 .write_end = nobh_write_end, 358 357 .bmap = jfs_bmap,
-1
fs/jfs/jfs_metapage.c
··· 583 583 const struct address_space_operations jfs_metapage_aops = { 584 584 .readpage = metapage_readpage, 585 585 .writepage = metapage_writepage, 586 - .sync_page = block_sync_page, 587 586 .releasepage = metapage_releasepage, 588 587 .invalidatepage = metapage_invalidatepage, 589 588 .set_page_dirty = __set_page_dirty_nobuffers,
-2
fs/logfs/dev_bdev.c
··· 39 39 bio.bi_end_io = request_complete; 40 40 41 41 submit_bio(rw, &bio); 42 - generic_unplug_device(bdev_get_queue(bdev)); 43 42 wait_for_completion(&complete); 44 43 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; 45 44 } ··· 167 168 } 168 169 len = PAGE_ALIGN(len); 169 170 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); 170 - generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev)); 171 171 } 172 172 173 173
-1
fs/minix/inode.c
··· 399 399 static const struct address_space_operations minix_aops = { 400 400 .readpage = minix_readpage, 401 401 .writepage = minix_writepage, 402 - .sync_page = block_sync_page, 403 402 .write_begin = minix_write_begin, 404 403 .write_end = generic_write_end, 405 404 .bmap = minix_bmap
+1 -5
fs/nilfs2/btnode.c
··· 40 40 nilfs_mapping_init_once(btnc); 41 41 } 42 42 43 - static const struct address_space_operations def_btnode_aops = { 44 - .sync_page = block_sync_page, 45 - }; 46 - 47 43 void nilfs_btnode_cache_init(struct address_space *btnc, 48 44 struct backing_dev_info *bdi) 49 45 { 50 - nilfs_mapping_init(btnc, bdi, &def_btnode_aops); 46 + nilfs_mapping_init(btnc, bdi); 51 47 } 52 48 53 49 void nilfs_btnode_cache_clear(struct address_space *btnc)
-1
fs/nilfs2/gcinode.c
··· 49 49 #include "ifile.h" 50 50 51 51 static const struct address_space_operations def_gcinode_aops = { 52 - .sync_page = block_sync_page, 53 52 }; 54 53 55 54 /*
-1
fs/nilfs2/inode.c
··· 262 262 const struct address_space_operations nilfs_aops = { 263 263 .writepage = nilfs_writepage, 264 264 .readpage = nilfs_readpage, 265 - .sync_page = block_sync_page, 266 265 .writepages = nilfs_writepages, 267 266 .set_page_dirty = nilfs_set_page_dirty, 268 267 .readpages = nilfs_readpages,
+2 -7
fs/nilfs2/mdt.c
··· 399 399 400 400 static const struct address_space_operations def_mdt_aops = { 401 401 .writepage = nilfs_mdt_write_page, 402 - .sync_page = block_sync_page, 403 402 }; 404 403 405 404 static const struct inode_operations def_mdt_iops; ··· 437 438 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); 438 439 } 439 440 440 - static const struct address_space_operations shadow_map_aops = { 441 - .sync_page = block_sync_page, 442 - }; 443 - 444 441 /** 445 442 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file 446 443 * @inode: inode of the metadata file ··· 450 455 451 456 INIT_LIST_HEAD(&shadow->frozen_buffers); 452 457 nilfs_mapping_init_once(&shadow->frozen_data); 453 - nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); 458 + nilfs_mapping_init(&shadow->frozen_data, bdi); 454 459 nilfs_mapping_init_once(&shadow->frozen_btnodes); 455 - nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); 460 + nilfs_mapping_init(&shadow->frozen_btnodes, bdi); 456 461 mi->mi_shadow = shadow; 457 462 return 0; 458 463 }
+2 -3
fs/nilfs2/page.c
··· 506 506 } 507 507 508 508 void nilfs_mapping_init(struct address_space *mapping, 509 - struct backing_dev_info *bdi, 510 - const struct address_space_operations *aops) 509 + struct backing_dev_info *bdi) 511 510 { 512 511 mapping->host = NULL; 513 512 mapping->flags = 0; 514 513 mapping_set_gfp_mask(mapping, GFP_NOFS); 515 514 mapping->assoc_mapping = NULL; 516 515 mapping->backing_dev_info = bdi; 517 - mapping->a_ops = aops; 516 + mapping->a_ops = NULL; 518 517 } 519 518 520 519 /*
+1 -2
fs/nilfs2/page.h
··· 63 63 void nilfs_clear_dirty_pages(struct address_space *); 64 64 void nilfs_mapping_init_once(struct address_space *mapping); 65 65 void nilfs_mapping_init(struct address_space *mapping, 66 - struct backing_dev_info *bdi, 67 - const struct address_space_operations *aops); 66 + struct backing_dev_info *bdi); 68 67 unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); 69 68 unsigned long nilfs_find_uncommitted_extent(struct inode *inode, 70 69 sector_t start_blk,
-4
fs/ntfs/aops.c
··· 1543 1543 */ 1544 1544 const struct address_space_operations ntfs_aops = { 1545 1545 .readpage = ntfs_readpage, /* Fill page with data. */ 1546 - .sync_page = block_sync_page, /* Currently, just unplugs the 1547 - disk request queue. */ 1548 1546 #ifdef NTFS_RW 1549 1547 .writepage = ntfs_writepage, /* Write dirty page to disk. */ 1550 1548 #endif /* NTFS_RW */ ··· 1558 1560 */ 1559 1561 const struct address_space_operations ntfs_mst_aops = { 1560 1562 .readpage = ntfs_readpage, /* Fill page with data. */ 1561 - .sync_page = block_sync_page, /* Currently, just unplugs the 1562 - disk request queue. */ 1563 1563 #ifdef NTFS_RW 1564 1564 .writepage = ntfs_writepage, /* Write dirty page to disk. */ 1565 1565 .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
+1 -2
fs/ntfs/compress.c
··· 698 698 "uptodate! Unplugging the disk queue " 699 699 "and rescheduling."); 700 700 get_bh(tbh); 701 - blk_run_address_space(mapping); 702 - schedule(); 701 + io_schedule(); 703 702 put_bh(tbh); 704 703 if (unlikely(!buffer_uptodate(tbh))) 705 704 goto read_err;
-1
fs/ocfs2/aops.c
··· 2043 2043 .write_begin = ocfs2_write_begin, 2044 2044 .write_end = ocfs2_write_end, 2045 2045 .bmap = ocfs2_bmap, 2046 - .sync_page = block_sync_page, 2047 2046 .direct_IO = ocfs2_direct_IO, 2048 2047 .invalidatepage = ocfs2_invalidatepage, 2049 2048 .releasepage = ocfs2_releasepage,
-4
fs/ocfs2/cluster/heartbeat.c
··· 367 367 static void o2hb_wait_on_io(struct o2hb_region *reg, 368 368 struct o2hb_bio_wait_ctxt *wc) 369 369 { 370 - struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping; 371 - 372 - blk_run_address_space(mapping); 373 370 o2hb_bio_wait_dec(wc, 1); 374 - 375 371 wait_for_completion(&wc->wc_io_complete); 376 372 } 377 373
-1
fs/omfs/file.c
··· 372 372 .readpages = omfs_readpages, 373 373 .writepage = omfs_writepage, 374 374 .writepages = omfs_writepages, 375 - .sync_page = block_sync_page, 376 375 .write_begin = omfs_write_begin, 377 376 .write_end = generic_write_end, 378 377 .bmap = omfs_bmap,
-1
fs/qnx4/inode.c
··· 335 335 static const struct address_space_operations qnx4_aops = { 336 336 .readpage = qnx4_readpage, 337 337 .writepage = qnx4_writepage, 338 - .sync_page = block_sync_page, 339 338 .write_begin = qnx4_write_begin, 340 339 .write_end = generic_write_end, 341 340 .bmap = qnx4_bmap
-1
fs/reiserfs/inode.c
··· 3212 3212 .readpages = reiserfs_readpages, 3213 3213 .releasepage = reiserfs_releasepage, 3214 3214 .invalidatepage = reiserfs_invalidatepage, 3215 - .sync_page = block_sync_page, 3216 3215 .write_begin = reiserfs_write_begin, 3217 3216 .write_end = reiserfs_write_end, 3218 3217 .bmap = reiserfs_aop_bmap,
-1
fs/sysv/itree.c
··· 488 488 const struct address_space_operations sysv_aops = { 489 489 .readpage = sysv_readpage, 490 490 .writepage = sysv_writepage, 491 - .sync_page = block_sync_page, 492 491 .write_begin = sysv_write_begin, 493 492 .write_end = generic_write_end, 494 493 .bmap = sysv_bmap
-1
fs/ubifs/super.c
··· 1979 1979 */ 1980 1980 c->bdi.name = "ubifs", 1981 1981 c->bdi.capabilities = BDI_CAP_MAP_COPY; 1982 - c->bdi.unplug_io_fn = default_unplug_io_fn; 1983 1982 err = bdi_init(&c->bdi); 1984 1983 if (err) 1985 1984 goto out_close;
-1
fs/udf/file.c
··· 98 98 const struct address_space_operations udf_adinicb_aops = { 99 99 .readpage = udf_adinicb_readpage, 100 100 .writepage = udf_adinicb_writepage, 101 - .sync_page = block_sync_page, 102 101 .write_begin = simple_write_begin, 103 102 .write_end = udf_adinicb_write_end, 104 103 };
-1
fs/udf/inode.c
··· 133 133 const struct address_space_operations udf_aops = { 134 134 .readpage = udf_readpage, 135 135 .writepage = udf_writepage, 136 - .sync_page = block_sync_page, 137 136 .write_begin = udf_write_begin, 138 137 .write_end = generic_write_end, 139 138 .bmap = udf_bmap,
-1
fs/ufs/inode.c
··· 588 588 const struct address_space_operations ufs_aops = { 589 589 .readpage = ufs_readpage, 590 590 .writepage = ufs_writepage, 591 - .sync_page = block_sync_page, 592 591 .write_begin = ufs_write_begin, 593 592 .write_end = generic_write_end, 594 593 .bmap = ufs_bmap
+1 -1
fs/ufs/truncate.c
··· 481 481 break; 482 482 if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) 483 483 ufs_sync_inode (inode); 484 - blk_run_address_space(inode->i_mapping); 484 + blk_flush_plug(current); 485 485 yield(); 486 486 } 487 487
-1
fs/xfs/linux-2.6/xfs_aops.c
··· 1495 1495 .readpages = xfs_vm_readpages, 1496 1496 .writepage = xfs_vm_writepage, 1497 1497 .writepages = xfs_vm_writepages, 1498 - .sync_page = block_sync_page, 1499 1498 .releasepage = xfs_vm_releasepage, 1500 1499 .invalidatepage = xfs_vm_invalidatepage, 1501 1500 .write_begin = xfs_vm_write_begin,
+5 -8
fs/xfs/linux-2.6/xfs_buf.c
··· 991 991 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 992 992 xfs_log_force(bp->b_target->bt_mount, 0); 993 993 if (atomic_read(&bp->b_io_remaining)) 994 - blk_run_address_space(bp->b_target->bt_mapping); 994 + blk_flush_plug(current); 995 995 down(&bp->b_sema); 996 996 XB_SET_OWNER(bp); 997 997 ··· 1035 1035 set_current_state(TASK_UNINTERRUPTIBLE); 1036 1036 if (atomic_read(&bp->b_pin_count) == 0) 1037 1037 break; 1038 - if (atomic_read(&bp->b_io_remaining)) 1039 - blk_run_address_space(bp->b_target->bt_mapping); 1040 - schedule(); 1038 + io_schedule(); 1041 1039 } 1042 1040 remove_wait_queue(&bp->b_waiters, &wait); 1043 1041 set_current_state(TASK_RUNNING); ··· 1441 1443 trace_xfs_buf_iowait(bp, _RET_IP_); 1442 1444 1443 1445 if (atomic_read(&bp->b_io_remaining)) 1444 - blk_run_address_space(bp->b_target->bt_mapping); 1446 + blk_flush_plug(current); 1445 1447 wait_for_completion(&bp->b_iowait); 1446 1448 1447 1449 trace_xfs_buf_iowait_done(bp, _RET_IP_); ··· 1665 1667 struct inode *inode; 1666 1668 struct address_space *mapping; 1667 1669 static const struct address_space_operations mapping_aops = { 1668 - .sync_page = block_sync_page, 1669 1670 .migratepage = fail_migrate_page, 1670 1671 }; 1671 1672 ··· 1945 1948 count++; 1946 1949 } 1947 1950 if (count) 1948 - blk_run_address_space(target->bt_mapping); 1951 + blk_flush_plug(current); 1949 1952 1950 1953 } while (!kthread_should_stop()); 1951 1954 ··· 1993 1996 1994 1997 if (wait) { 1995 1998 /* Expedite and wait for IO to complete. */ 1996 - blk_run_address_space(target->bt_mapping); 1999 + blk_flush_plug(current); 1997 2000 while (!list_empty(&wait_list)) { 1998 2001 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 1999 2002
-16
include/linux/backing-dev.h
··· 66 66 unsigned int capabilities; /* Device capabilities */ 67 67 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 68 68 void *congested_data; /* Pointer to aux data for congested func */ 69 - void (*unplug_io_fn)(struct backing_dev_info *, struct page *); 70 - void *unplug_io_data; 71 69 72 70 char *name; 73 71 ··· 249 251 250 252 extern struct backing_dev_info default_backing_dev_info; 251 253 extern struct backing_dev_info noop_backing_dev_info; 252 - void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); 253 254 254 255 int writeback_in_progress(struct backing_dev_info *bdi); 255 256 ··· 331 334 { 332 335 schedule(); 333 336 return 0; 334 - } 335 - 336 - static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 337 - struct page *page) 338 - { 339 - if (bdi && bdi->unplug_io_fn) 340 - bdi->unplug_io_fn(bdi, page); 341 - } 342 - 343 - static inline void blk_run_address_space(struct address_space *mapping) 344 - { 345 - if (mapping) 346 - blk_run_backing_dev(mapping->backing_dev_info, NULL); 347 337 } 348 338 349 339 #endif /* _LINUX_BACKING_DEV_H */
+7 -24
include/linux/blkdev.h
··· 196 196 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 197 197 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 198 198 typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 199 - typedef void (unplug_fn) (struct request_queue *); 200 199 201 200 struct bio_vec; 202 201 struct bvec_merge_data { ··· 278 279 make_request_fn *make_request_fn; 279 280 prep_rq_fn *prep_rq_fn; 280 281 unprep_rq_fn *unprep_rq_fn; 281 - unplug_fn *unplug_fn; 282 282 merge_bvec_fn *merge_bvec_fn; 283 283 softirq_done_fn *softirq_done_fn; 284 284 rq_timed_out_fn *rq_timed_out_fn; ··· 289 291 */ 290 292 sector_t end_sector; 291 293 struct request *boundary_rq; 292 - 293 - /* 294 - * Auto-unplugging state 295 - */ 296 - struct timer_list unplug_timer; 297 - int unplug_thresh; /* After this many requests */ 298 - unsigned long unplug_delay; /* After this many jiffies */ 299 - struct work_struct unplug_work; 300 294 301 295 /* 302 296 * Delayed queue handling ··· 389 399 #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 390 400 #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 391 401 #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 392 - #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 393 - #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 394 - #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 395 - #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 396 - #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 397 - #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 398 - #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 399 - #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 402 + #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ 403 + #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ 404 + #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ 405 + #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ 406 + #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ 407 + #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ 408 + #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ 400 409 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 401 410 #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 402 411 #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ ··· 473 484 __clear_bit(flag, &q->queue_flags); 474 485 } 475 486 476 - #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 477 487 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 478 488 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 479 489 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) ··· 667 679 extern void blk_rq_unprep_clone(struct request *rq); 668 680 extern int blk_insert_cloned_request(struct request_queue *q, 669 681 struct request *rq); 670 - extern void blk_plug_device(struct request_queue *); 671 - extern void blk_plug_device_unlocked(struct request_queue *); 672 - extern int blk_remove_plug(struct request_queue *); 673 682 extern void blk_delay_queue(struct request_queue *, unsigned long); 674 683 extern void blk_recount_segments(struct request_queue *, struct bio *); 675 684 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, ··· 711 726 struct request *, int); 712 727 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 713 728 struct request *, int, rq_end_io_fn *); 714 - extern void blk_unplug(struct request_queue *q); 715 729 716 730 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 717 731 { ··· 847 863 848 864 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 849 865 extern void blk_dump_rq_flags(struct request *, char *); 850 - extern void generic_unplug_device(struct request_queue *); 851 866 extern long nr_blockdev_pages(void); 852 867 853 868 int blk_get_queue(struct request_queue *);
-1
include/linux/buffer_head.h
··· 219 219 int block_commit_write(struct page *page, unsigned from, unsigned to); 220 220 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 221 221 get_block_t get_block); 222 - void block_sync_page(struct page *); 223 222 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 224 223 int block_truncate_page(struct address_space *, loff_t, get_block_t *); 225 224 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
-5
include/linux/device-mapper.h
··· 286 286 int dm_table_complete(struct dm_table *t); 287 287 288 288 /* 289 - * Unplug all devices in a table. 290 - */ 291 - void dm_table_unplug_all(struct dm_table *t); 292 - 293 - /* 294 289 * Table reference counting. 295 290 */ 296 291 struct dm_table *dm_get_live_table(struct mapped_device *md);
+2 -5
include/linux/elevator.h
··· 20 20 typedef int (elevator_dispatch_fn) (struct request_queue *, int); 21 21 22 22 typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); 23 - typedef int (elevator_queue_empty_fn) (struct request_queue *); 24 23 typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); 25 24 typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); 26 25 typedef int (elevator_may_queue_fn) (struct request_queue *, int); ··· 45 46 elevator_activate_req_fn *elevator_activate_req_fn; 46 47 elevator_deactivate_req_fn *elevator_deactivate_req_fn; 47 48 48 - elevator_queue_empty_fn *elevator_queue_empty_fn; 49 49 elevator_completed_req_fn *elevator_completed_req_fn; 50 50 51 51 elevator_request_list_fn *elevator_former_req_fn; ··· 99 101 */ 100 102 extern void elv_dispatch_sort(struct request_queue *, struct request *); 101 103 extern void elv_dispatch_add_tail(struct request_queue *, struct request *); 102 - extern void elv_add_request(struct request_queue *, struct request *, int, int); 103 - extern void __elv_add_request(struct request_queue *, struct request *, int, int); 104 + extern void elv_add_request(struct request_queue *, struct request *, int); 105 + extern void __elv_add_request(struct request_queue *, struct request *, int); 104 106 extern void elv_insert(struct request_queue *, struct request *, int); 105 107 extern int elv_merge(struct request_queue *, struct request **, struct bio *); 106 108 extern int elv_try_merge(struct request *, struct bio *); ··· 110 112 extern void elv_bio_merged(struct request_queue *q, struct request *, 111 113 struct bio *); 112 114 extern void elv_requeue_request(struct request_queue *, struct request *); 113 - extern int elv_queue_empty(struct request_queue *); 114 115 extern struct request *elv_former_request(struct request_queue *, struct request *); 115 116 extern struct request *elv_latter_request(struct request_queue *, struct request *); 116 117 extern int elv_register_queue(struct request_queue *q);
-1
include/linux/fs.h
··· 583 583 struct address_space_operations { 584 584 int (*writepage)(struct page *page, struct writeback_control *wbc); 585 585 int (*readpage)(struct file *, struct page *); 586 - void (*sync_page)(struct page *); 587 586 588 587 /* Write back some dirty pages from this mapping. */ 589 588 int (*writepages)(struct address_space *, struct writeback_control *);
-12
include/linux/pagemap.h
··· 298 298 299 299 extern void __lock_page(struct page *page); 300 300 extern int __lock_page_killable(struct page *page); 301 - extern void __lock_page_nosync(struct page *page); 302 301 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 303 302 unsigned int flags); 304 303 extern void unlock_page(struct page *page); ··· 340 341 return 0; 341 342 } 342 343 343 - /* 344 - * lock_page_nosync should only be used if we can't pin the page's inode. 345 - * Doesn't play quite so well with block device plugging. 346 - */ 347 - static inline void lock_page_nosync(struct page *page) 348 - { 349 - might_sleep(); 350 - if (!trylock_page(page)) 351 - __lock_page_nosync(page); 352 - } 353 - 354 344 /* 355 345 * lock_page_or_retry - Lock the page, unless this would block and the 356 346 * caller indicated that it can handle a retry.
-2
include/linux/swap.h
··· 299 299 struct page **pagep, swp_entry_t *ent); 300 300 #endif 301 301 302 - extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); 303 - 304 302 #ifdef CONFIG_SWAP 305 303 /* linux/mm/page_io.c */ 306 304 extern int swap_readpage(struct page *);
-6
mm/backing-dev.c
··· 14 14 15 15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 16 16 17 - void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 18 - { 19 - } 20 - EXPORT_SYMBOL(default_unplug_io_fn); 21 - 22 17 struct backing_dev_info default_backing_dev_info = { 23 18 .name = "default", 24 19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, 25 20 .state = 0, 26 21 .capabilities = BDI_CAP_MAP_COPY, 27 - .unplug_io_fn = default_unplug_io_fn, 28 22 }; 29 23 EXPORT_SYMBOL_GPL(default_backing_dev_info); 30 24
+6 -61
mm/filemap.c
··· 155 155 } 156 156 EXPORT_SYMBOL(remove_from_page_cache); 157 157 158 - static int sync_page(void *word) 158 + static int sleep_on_page(void *word) 159 159 { 160 - struct address_space *mapping; 161 - struct page *page; 162 - 163 - page = container_of((unsigned long *)word, struct page, flags); 164 - 165 - /* 166 - * page_mapping() is being called without PG_locked held. 167 - * Some knowledge of the state and use of the page is used to 168 - * reduce the requirements down to a memory barrier. 169 - * The danger here is of a stale page_mapping() return value 170 - * indicating a struct address_space different from the one it's 171 - * associated with when it is associated with one. 172 - * After smp_mb(), it's either the correct page_mapping() for 173 - * the page, or an old page_mapping() and the page's own 174 - * page_mapping() has gone NULL. 175 - * The ->sync_page() address_space operation must tolerate 176 - * page_mapping() going NULL. By an amazing coincidence, 177 - * this comes about because none of the users of the page 178 - * in the ->sync_page() methods make essential use of the 179 - * page_mapping(), merely passing the page down to the backing 180 - * device's unplug functions when it's non-NULL, which in turn 181 - * ignore it for all cases but swap, where only page_private(page) is 182 - * of interest. When page_mapping() does go NULL, the entire 183 - * call stack gracefully ignores the page and returns. 184 - * -- wli 185 - */ 186 - smp_mb(); 187 - mapping = page_mapping(page); 188 - if (mapping && mapping->a_ops && mapping->a_ops->sync_page) 189 - mapping->a_ops->sync_page(page); 190 160 io_schedule(); 191 161 return 0; 192 162 } 193 163 194 - static int sync_page_killable(void *word) 164 + static int sleep_on_page_killable(void *word) 195 165 { 196 - sync_page(word); 166 + sleep_on_page(word); 197 167 return fatal_signal_pending(current) ? -EINTR : 0; 198 168 } 199 169 ··· 449 479 EXPORT_SYMBOL(__page_cache_alloc); 450 480 #endif 451 481 452 - static int __sleep_on_page_lock(void *word) 453 - { 454 - io_schedule(); 455 - return 0; 456 - } 457 - 458 482 /* 459 483 * In order to wait for pages to become available there must be 460 484 * waitqueues associated with pages. By using a hash table of ··· 476 512 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 477 513 478 514 if (test_bit(bit_nr, &page->flags)) 479 - __wait_on_bit(page_waitqueue(page), &wait, sync_page, 515 + __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page, 480 516 TASK_UNINTERRUPTIBLE); 481 517 } 482 518 EXPORT_SYMBOL(wait_on_page_bit); ··· 540 576 /** 541 577 * __lock_page - get a lock on the page, assuming we need to sleep to get it 542 578 * @page: the page to lock 543 - * 544 - * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some 545 - * random driver's requestfn sets TASK_RUNNING, we could busywait. However 546 - * chances are that on the second loop, the block layer's plug list is empty, 547 - * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. 548 579 */ 549 580 void __lock_page(struct page *page) 550 581 { 551 582 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 552 583 553 - __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, 584 + __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page, 554 585 TASK_UNINTERRUPTIBLE); 555 586 } 556 587 EXPORT_SYMBOL(__lock_page); ··· 555 596 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 556 597 557 598 return __wait_on_bit_lock(page_waitqueue(page), &wait, 558 - sync_page_killable, TASK_KILLABLE); 599 + sleep_on_page_killable, TASK_KILLABLE); 559 600 } 560 601 EXPORT_SYMBOL_GPL(__lock_page_killable); 561 - 562 - /** 563 - * __lock_page_nosync - get a lock on the page, without calling sync_page() 564 - * @page: the page to lock 565 - * 566 - * Variant of lock_page that does not require the caller to hold a reference 567 - * on the page's mapping. 568 - */ 569 - void __lock_page_nosync(struct page *page) 570 - { 571 - DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 572 - __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, 573 - TASK_UNINTERRUPTIBLE); 574 - } 575 602 576 603 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 577 604 unsigned int flags)
+4 -4
mm/memory-failure.c
··· 945 945 collect_procs(ppage, &tokill); 946 946 947 947 if (hpage != ppage) 948 - lock_page_nosync(ppage); 948 + lock_page(ppage); 949 949 950 950 ret = try_to_unmap(ppage, ttu); 951 951 if (ret != SWAP_SUCCESS) ··· 1038 1038 * Check "just unpoisoned", "filter hit", and 1039 1039 * "race with other subpage." 1040 1040 */ 1041 - lock_page_nosync(hpage); 1041 + lock_page(hpage); 1042 1042 if (!PageHWPoison(hpage) 1043 1043 || (hwpoison_filter(p) && TestClearPageHWPoison(p)) 1044 1044 || (p != hpage && TestSetPageHWPoison(hpage))) { ··· 1088 1088 * It's very difficult to mess with pages currently under IO 1089 1089 * and in many cases impossible, so we just avoid it here. 1090 1090 */ 1091 - lock_page_nosync(hpage); 1091 + lock_page(hpage); 1092 1092 1093 1093 /* 1094 1094 * unpoison always clear PG_hwpoison inside page lock ··· 1231 1231 return 0; 1232 1232 } 1233 1233 1234 - lock_page_nosync(page); 1234 + lock_page(page); 1235 1235 /* 1236 1236 * This test is racy because PG_hwpoison is set outside of page lock. 1237 1237 * That's acceptable because that won't trigger kernel panic. Instead,
-4
mm/nommu.c
··· 1842 1842 } 1843 1843 EXPORT_SYMBOL(remap_vmalloc_range); 1844 1844 1845 - void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1846 - { 1847 - } 1848 - 1849 1845 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, 1850 1846 unsigned long len, unsigned long pgoff, unsigned long flags) 1851 1847 {
+1 -1
mm/page-writeback.c
··· 1239 1239 { 1240 1240 int ret; 1241 1241 1242 - lock_page_nosync(page); 1242 + lock_page(page); 1243 1243 ret = set_page_dirty(page); 1244 1244 unlock_page(page); 1245 1245 return ret;
-12
mm/readahead.c
··· 554 554 555 555 /* do read-ahead */ 556 556 ondemand_readahead(mapping, ra, filp, true, offset, req_size); 557 - 558 - #ifdef CONFIG_BLOCK 559 - /* 560 - * Normally the current page is !uptodate and lock_page() will be 561 - * immediately called to implicitly unplug the device. However this 562 - * is not always true for RAID conifgurations, where data arrives 563 - * not strictly in their submission order. In this case we need to 564 - * explicitly kick off the IO. 565 - */ 566 - if (PageUptodate(page)) 567 - blk_run_backing_dev(mapping->backing_dev_info, NULL); 568 - #endif 569 557 } 570 558 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
-1
mm/shmem.c
··· 224 224 static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 225 225 .ra_pages = 0, /* No readahead */ 226 226 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 227 - .unplug_io_fn = default_unplug_io_fn, 228 227 }; 229 228 230 229 static LIST_HEAD(shmem_swaplist);
+1 -4
mm/swap_state.c
··· 24 24 25 25 /* 26 26 * swapper_space is a fiction, retained to simplify the path through 27 - * vmscan's shrink_page_list, to make sync_page look nicer, and to allow 28 - * future use of radix_tree tags in the swap cache. 27 + * vmscan's shrink_page_list. 29 28 */ 30 29 static const struct address_space_operations swap_aops = { 31 30 .writepage = swap_writepage, 32 - .sync_page = block_sync_page, 33 31 .set_page_dirty = __set_page_dirty_nobuffers, 34 32 .migratepage = migrate_page, 35 33 }; ··· 35 37 static struct backing_dev_info swap_backing_dev_info = { 36 38 .name = "swap", 37 39 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 38 - .unplug_io_fn = swap_unplug_io_fn, 39 40 }; 40 41 41 42 struct address_space swapper_space = {
-37
mm/swapfile.c
··· 95 95 } 96 96 97 97 /* 98 - * We need this because the bdev->unplug_fn can sleep and we cannot 99 - * hold swap_lock while calling the unplug_fn. And swap_lock 100 - * cannot be turned into a mutex. 101 - */ 102 - static DECLARE_RWSEM(swap_unplug_sem); 103 - 104 - void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) 105 - { 106 - swp_entry_t entry; 107 - 108 - down_read(&swap_unplug_sem); 109 - entry.val = page_private(page); 110 - if (PageSwapCache(page)) { 111 - struct block_device *bdev = swap_info[swp_type(entry)]->bdev; 112 - struct backing_dev_info *bdi; 113 - 114 - /* 115 - * If the page is removed from swapcache from under us (with a 116 - * racy try_to_unuse/swapoff) we need an additional reference 117 - * count to avoid reading garbage from page_private(page) above. 118 - * If the WARN_ON triggers during a swapoff it maybe the race 119 - * condition and it's harmless. However if it triggers without 120 - * swapoff it signals a problem. 121 - */ 122 - WARN_ON(page_count(page) <= 1); 123 - 124 - bdi = bdev->bd_inode->i_mapping->backing_dev_info; 125 - blk_run_backing_dev(bdi, page); 126 - } 127 - up_read(&swap_unplug_sem); 128 - } 129 - 130 - /* 131 98 * swapon tell device that all the old swap contents can be discarded, 132 99 * to allow the swap device to optimize its wear-levelling. 133 100 */ ··· 1609 1642 spin_unlock(&swap_lock); 1610 1643 goto out_dput; 1611 1644 } 1612 - 1613 - /* wait for any unplug function to finish */ 1614 - down_write(&swap_unplug_sem); 1615 - up_write(&swap_unplug_sem); 1616 1645 1617 1646 destroy_swap_extents(p); 1618 1647 if (p->flags & SWP_CONTINUED)
+1 -1
mm/vmscan.c
··· 358 358 static void handle_write_error(struct address_space *mapping, 359 359 struct page *page, int error) 360 360 { 361 - lock_page_nosync(page); 361 + lock_page(page); 362 362 if (page_mapping(page) == mapping) 363 363 mapping_set_error(mapping, error); 364 364 unlock_page(page);