Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bounce: Refactor __blk_queue_bounce to not use bi_io_vec

A bunch of what __blk_queue_bounce() was doing was problematic for the
immutable bvec work; this cleans that up and the code is quite a bit
smaller, too.

The __bio_for_each_segment() in copy_to_high_bio_irq() was changed
because that one's looping over the original bio, not the bounce bio -
a later patch renames __bio_for_each_segment() ->
bio_for_each_segment_all(), and documents that
bio_for_each_segment_all() is only for code that owns the bio.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>

+19 -54
+19 -54
mm/bounce.c
··· 101 101 struct bio_vec *tovec, *fromvec; 102 102 int i; 103 103 104 - __bio_for_each_segment(tovec, to, i, 0) { 104 + bio_for_each_segment(tovec, to, i) { 105 105 fromvec = from->bi_io_vec + i; 106 106 107 107 /* ··· 218 218 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, 219 219 mempool_t *pool, int force) 220 220 { 221 - struct page *page; 222 - struct bio *bio = NULL; 223 - int i, rw = bio_data_dir(*bio_orig); 221 + struct bio *bio; 222 + int rw = bio_data_dir(*bio_orig); 224 223 struct bio_vec *to, *from; 224 + unsigned i; 225 225 226 - bio_for_each_segment(from, *bio_orig, i) { 227 - page = from->bv_page; 226 + bio_for_each_segment(from, *bio_orig, i) 227 + if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) 228 + goto bounce; 228 229 229 - /* 230 - * is destination page below bounce pfn? 231 - */ 230 + return; 231 + bounce: 232 + bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); 233 + 234 + bio_for_each_segment(to, bio, i) { 235 + struct page *page = to->bv_page; 236 + 232 237 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) 233 238 continue; 234 239 235 - /* 236 - * irk, bounce it 237 - */ 238 - if (!bio) { 239 - unsigned int cnt = (*bio_orig)->bi_vcnt; 240 - 241 - bio = bio_alloc(GFP_NOIO, cnt); 242 - memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec)); 243 - } 244 - 245 - 246 - to = bio->bi_io_vec + i; 247 - 248 - to->bv_page = mempool_alloc(pool, q->bounce_gfp); 249 - to->bv_len = from->bv_len; 250 - to->bv_offset = from->bv_offset; 251 240 inc_zone_page_state(to->bv_page, NR_BOUNCE); 241 + to->bv_page = mempool_alloc(pool, q->bounce_gfp); 252 242 253 243 if (rw == WRITE) { 254 244 char *vto, *vfrom; 255 245 256 - flush_dcache_page(from->bv_page); 246 + flush_dcache_page(page); 247 + 257 248 vto = page_address(to->bv_page) + to->bv_offset; 258 - vfrom = kmap(from->bv_page) + from->bv_offset; 249 + vfrom = kmap_atomic(page) + to->bv_offset; 259 250 memcpy(vto, vfrom, to->bv_len); 260 - kunmap(from->bv_page); 251 + kunmap_atomic(vfrom); 261 252 } 262 253 } 263 - 264 - /* 265 - * no pages bounced 266 - */ 267 - if (!bio) 268 - return; 269 254 270 255 trace_block_bio_bounce(q, *bio_orig); 271 256 272 - /* 273 - * at least one page was bounced, fill in possible non-highmem 274 - * pages 275 - */ 276 - __bio_for_each_segment(from, *bio_orig, i, 0) { 277 - to = bio_iovec_idx(bio, i); 278 - if (!to->bv_page) { 279 - to->bv_page = from->bv_page; 280 - to->bv_len = from->bv_len; 281 - to->bv_offset = from->bv_offset; 282 - } 283 - } 284 - 285 - bio->bi_bdev = (*bio_orig)->bi_bdev; 286 257 bio->bi_flags |= (1 << BIO_BOUNCED); 287 - bio->bi_sector = (*bio_orig)->bi_sector; 288 - bio->bi_rw = (*bio_orig)->bi_rw; 289 - 290 - bio->bi_vcnt = (*bio_orig)->bi_vcnt; 291 - bio->bi_idx = (*bio_orig)->bi_idx; 292 - bio->bi_size = (*bio_orig)->bi_size; 293 258 294 259 if (pool == page_pool) { 295 260 bio->bi_end_io = bounce_end_io_write;