Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
cfq-iosched: limit coop preemption
cfq-iosched: fix bad return value cfq_should_preempt()
backing-dev: bdi sb prune should be in the unregister path, not destroy
Fix bio_alloc() and bio_kmalloc() documentation
bio_put(): add bio_clone() to the list of functions in the comment

+32 -18
+16 -3
block/cfq-iosched.c
··· 196 196 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 197 197 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 198 198 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */ 199 + CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */ 199 200 }; 200 201 201 202 #define CFQ_CFQQ_FNS(name) \ ··· 223 222 CFQ_CFQQ_FNS(slice_new); 224 223 CFQ_CFQQ_FNS(sync); 225 224 CFQ_CFQQ_FNS(coop); 225 + CFQ_CFQQ_FNS(coop_preempt); 226 226 #undef CFQ_CFQQ_FNS 227 227 228 228 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ ··· 947 945 { 948 946 if (!cfqq) { 949 947 cfqq = cfq_get_next_queue(cfqd); 950 - if (cfqq) 948 + if (cfqq && !cfq_cfqq_coop_preempt(cfqq)) 951 949 cfq_clear_cfqq_coop(cfqq); 952 950 } 951 + 952 + if (cfqq) 953 + cfq_clear_cfqq_coop_preempt(cfqq); 953 954 954 955 __cfq_set_active_queue(cfqd, cfqq); 955 956 return cfqq; ··· 2056 2051 * it's a metadata request and the current queue is doing regular IO. 2057 2052 */ 2058 2053 if (rq_is_meta(rq) && !cfqq->meta_pending) 2059 - return false; 2054 + return true; 2060 2055 2061 2056 /* 2062 2057 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. ··· 2071 2066 * if this request is as-good as one we would expect from the 2072 2067 * current cfqq, let it preempt 2073 2068 */ 2074 - if (cfq_rq_close(cfqd, rq)) 2069 + if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) || 2070 + cfqd->busy_queues == 1)) { 2071 + /* 2072 + * Mark new queue coop_preempt, so its coop flag will not be 2073 + * cleared when new queue gets scheduled at the very first time 2074 + */ 2075 + cfq_mark_cfqq_coop_preempt(new_cfqq); 2076 + cfq_mark_cfqq_coop(new_cfqq); 2075 2077 return true; 2078 + } 2076 2079 2077 2080 return false; 2078 2081 }
+14 -14
fs/bio.c
··· 325 325 * @gfp_mask: allocation mask to use 326 326 * @nr_iovecs: number of iovecs 327 327 * 328 - * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask 329 - * contains __GFP_WAIT, the allocation is guaranteed to succeed. 328 + * bio_alloc will allocate a bio and associated bio_vec array that can hold 329 + * at least @nr_iovecs entries. Allocations will be done from the 330 + * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc. 331 + * 332 + * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate 333 + * a bio. This is due to the mempool guarantees. To make this work, callers 334 + * must never allocate more than 1 bio at a time from this pool. Callers 335 + * that need to allocate more than 1 bio must always submit the previously 336 + * allocated bio for IO before attempting to allocate a new one. Failure to 337 + * do so can cause livelocks under memory pressure. 330 338 * 331 339 * RETURNS: 332 340 * Pointer to new bio on success, NULL on failure. ··· 358 350 } 359 351 360 352 /** 361 - * bio_alloc - allocate a bio for I/O 353 + * bio_kmalloc - allocate a bio for I/O using kmalloc() 362 354 * @gfp_mask: the GFP_ mask given to the slab allocator 363 355 * @nr_iovecs: number of iovecs to pre-allocate 364 356 * 365 357 * Description: 366 - * bio_alloc will allocate a bio and associated bio_vec array that can hold 367 - * at least @nr_iovecs entries. Allocations will be done from the 368 - * fs_bio_set. Also see @bio_alloc_bioset. 369 - * 370 - * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate 371 - * a bio. This is due to the mempool guarantees. To make this work, callers 372 - * must never allocate more than 1 bio at a time from this pool. Callers 373 - * that need to allocate more than 1 bio must always submit the previously 374 - * allocated bio for IO before attempting to allocate a new one. Failure to 375 - * do so can cause livelocks under memory pressure. 358 + * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains 359 + * %__GFP_WAIT, the allocation is guaranteed to succeed. 376 360 * 377 361 **/ 378 362 struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) ··· 407 407 * 408 408 * Description: 409 409 * Put a reference to a &struct bio, either one you have gotten with 410 - * bio_alloc or bio_get. The last put of a bio will free it. 410 + * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. 411 411 **/ 412 412 void bio_put(struct bio *bio) 413 413 {
+2 -1
mm/backing-dev.c
··· 628 628 void bdi_unregister(struct backing_dev_info *bdi) 629 629 { 630 630 if (bdi->dev) { 631 + bdi_prune_sb(bdi); 632 + 631 633 if (!bdi_cap_flush_forker(bdi)) 632 634 bdi_wb_shutdown(bdi); 633 635 bdi_debug_unregister(bdi); ··· 699 697 spin_unlock(&inode_lock); 700 698 } 701 699 702 - bdi_prune_sb(bdi); 703 700 bdi_unregister(bdi); 704 701 705 702 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)