Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: Consolidate bio_alloc_bioset(), bio_kmalloc()

Previously, bio_kmalloc() and bio_alloc_bioset() behaved slightly
different because there was some almost-duplicated code - this fixes
some of that.

The important change is that previously bio_kmalloc() always set
bi_io_vec = bi_inline_vecs, even if nr_iovecs == 0 - unlike
bio_alloc_bioset(). This would cause bio_has_data() to return true; I
don't know if this resulted in any actual bugs but it was certainly
wrong.

bio_kmalloc() and bio_alloc_bioset() also have different arbitrary
limits on nr_iovecs - 1024 (UIO_MAXIOV) for bio_kmalloc(), 256
(BIO_MAX_PAGES) for bio_alloc_bioset(). This patch doesn't fix that, but
at least they're enforced closer together and hopefully they will be
fixed in a later patch.

This'll also help with some future cleanups - there are a fair number of
functions that allocate bios (e.g. bio_clone()), and now they don't have
to be duplicated for bio_alloc(), bio_alloc_bioset(), and bio_kmalloc().

Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
v7: Re-add dropped comments, improv patch description
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Kent Overstreet and committed by
Jens Axboe
3f86a82a 4254bba1

+49 -77
+37 -73
fs/bio.c
··· 55 55 * IO code that does not need private memory pools. 56 56 */ 57 57 struct bio_set *fs_bio_set; 58 + EXPORT_SYMBOL(fs_bio_set); 58 59 59 60 /* 60 61 * Our slab pool management ··· 302 301 * @bs: the bio_set to allocate from. 303 302 * 304 303 * Description: 305 - * bio_alloc_bioset will try its own mempool to satisfy the allocation. 306 - * If %__GFP_WAIT is set then we will block on the internal pool waiting 307 - * for a &struct bio to become free. 308 - **/ 304 + * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 305 + * backed by the @bs's mempool. 306 + * 307 + * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be 308 + * able to allocate a bio. This is due to the mempool guarantees. To make this 309 + * work, callers must never allocate more than 1 bio at a time from this pool. 310 + * Callers that need to allocate more than 1 bio must always submit the 311 + * previously allocated bio for IO before attempting to allocate a new one. 312 + * Failure to do so can cause deadlocks under memory pressure. 313 + * 314 + * RETURNS: 315 + * Pointer to new bio on success, NULL on failure. 316 + */ 309 317 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 310 318 { 319 + unsigned front_pad; 320 + unsigned inline_vecs; 311 321 unsigned long idx = BIO_POOL_NONE; 312 322 struct bio_vec *bvl = NULL; 313 323 struct bio *bio; 314 324 void *p; 315 325 316 - p = mempool_alloc(bs->bio_pool, gfp_mask); 326 + if (!bs) { 327 + if (nr_iovecs > UIO_MAXIOV) 328 + return NULL; 329 + 330 + p = kmalloc(sizeof(struct bio) + 331 + nr_iovecs * sizeof(struct bio_vec), 332 + gfp_mask); 333 + front_pad = 0; 334 + inline_vecs = nr_iovecs; 335 + } else { 336 + p = mempool_alloc(bs->bio_pool, gfp_mask); 337 + front_pad = bs->front_pad; 338 + inline_vecs = BIO_INLINE_VECS; 339 + } 340 + 317 341 if (unlikely(!p)) 318 342 return NULL; 319 - bio = p + bs->front_pad; 320 343 344 + bio = p + front_pad; 321 345 bio_init(bio); 322 - bio->bi_pool = bs; 323 346 324 - if (unlikely(!nr_iovecs)) 325 - goto out_set; 326 - 327 - if (nr_iovecs <= BIO_INLINE_VECS) { 328 - bvl = bio->bi_inline_vecs; 329 - nr_iovecs = BIO_INLINE_VECS; 330 - } else { 347 + if (nr_iovecs > inline_vecs) { 331 348 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); 332 349 if (unlikely(!bvl)) 333 350 goto err_free; 334 - 335 - nr_iovecs = bvec_nr_vecs(idx); 351 + } else if (nr_iovecs) { 352 + bvl = bio->bi_inline_vecs; 336 353 } 337 - out_set: 354 + 355 + bio->bi_pool = bs; 338 356 bio->bi_flags |= idx << BIO_POOL_OFFSET; 339 357 bio->bi_max_vecs = nr_iovecs; 340 358 bio->bi_io_vec = bvl; ··· 364 344 return NULL; 365 345 } 366 346 EXPORT_SYMBOL(bio_alloc_bioset); 367 - 368 - /** 369 - * bio_alloc - allocate a new bio, memory pool backed 370 - * @gfp_mask: allocation mask to use 371 - * @nr_iovecs: number of iovecs 372 - * 373 - * bio_alloc will allocate a bio and associated bio_vec array that can hold 374 - * at least @nr_iovecs entries. Allocations will be done from the 375 - * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc. 376 - * 377 - * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate 378 - * a bio. This is due to the mempool guarantees. To make this work, callers 379 - * must never allocate more than 1 bio at a time from this pool. Callers 380 - * that need to allocate more than 1 bio must always submit the previously 381 - * allocated bio for IO before attempting to allocate a new one. Failure to 382 - * do so can cause livelocks under memory pressure. 383 - * 384 - * RETURNS: 385 - * Pointer to new bio on success, NULL on failure. 386 - */ 387 - struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 388 - { 389 - return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 390 - } 391 - EXPORT_SYMBOL(bio_alloc); 392 - 393 - /** 394 - * bio_kmalloc - allocate a bio for I/O using kmalloc() 395 - * @gfp_mask: the GFP_ mask given to the slab allocator 396 - * @nr_iovecs: number of iovecs to pre-allocate 397 - * 398 - * Description: 399 - * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains 400 - * %__GFP_WAIT, the allocation is guaranteed to succeed. 401 - * 402 - **/ 403 - struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) 404 - { 405 - struct bio *bio; 406 - 407 - if (nr_iovecs > UIO_MAXIOV) 408 - return NULL; 409 - 410 - bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), 411 - gfp_mask); 412 - if (unlikely(!bio)) 413 - return NULL; 414 - 415 - bio_init(bio); 416 - bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; 417 - bio->bi_max_vecs = nr_iovecs; 418 - bio->bi_io_vec = bio->bi_inline_vecs; 419 - 420 - return bio; 421 - } 422 - EXPORT_SYMBOL(bio_kmalloc); 423 347 424 348 void zero_fill_bio(struct bio *bio) 425 349 {
+12 -4
include/linux/bio.h
··· 212 212 extern struct bio_set *bioset_create(unsigned int, unsigned int); 213 213 extern void bioset_free(struct bio_set *); 214 214 215 - extern struct bio *bio_alloc(gfp_t, unsigned int); 216 - extern struct bio *bio_kmalloc(gfp_t, unsigned int); 217 215 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 218 216 extern void bio_put(struct bio *); 217 + 218 + extern struct bio_set *fs_bio_set; 219 + 220 + static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 221 + { 222 + return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 223 + } 224 + 225 + static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) 226 + { 227 + return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); 228 + } 219 229 220 230 extern void bio_endio(struct bio *, int); 221 231 struct request_queue; ··· 313 303 char *name; 314 304 struct kmem_cache *slab; 315 305 }; 316 - 317 - extern struct bio_set *fs_bio_set; 318 306 319 307 /* 320 308 * a small number of entries is fine, not going to be performance critical.