Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: add dma alignment and padding support to blk_rq_map_kern

This patch adds bio_copy_kern similar to
bio_copy_user. blk_rq_map_kern uses bio_copy_kern instead of
bio_map_kern if necessary.

bio_copy_kern uses temporary pages and the bi_end_io callback frees
these pages. bio_copy_kern saves the original kernel buffer at
bio->bi_private it doesn't use something like struct bio_map_data to
store the information about the caller.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

FUJITA Tomonori and committed by
Jens Axboe
68154e90 657e93be

+112 -1
+20 -1
block/blk-map.c
··· 255 255 * @kbuf: the kernel buffer 256 256 * @len: length of user data 257 257 * @gfp_mask: memory allocation flags 258 + * 259 + * Description: 260 + * Data will be mapped directly if possible. Otherwise a bounce 261 + * buffer is used. 258 262 */ 259 263 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 260 264 unsigned int len, gfp_t gfp_mask) 261 265 { 266 + unsigned long kaddr; 267 + unsigned int alignment; 268 + int reading = rq_data_dir(rq) == READ; 269 + int do_copy = 0; 262 270 struct bio *bio; 263 271 264 272 if (len > (q->max_hw_sectors << 9)) ··· 274 266 if (!len || !kbuf) 275 267 return -EINVAL; 276 268 277 - bio = bio_map_kern(q, kbuf, len, gfp_mask); 269 + kaddr = (unsigned long)kbuf; 270 + alignment = queue_dma_alignment(q) | q->dma_pad_mask; 271 + do_copy = ((kaddr & alignment) || (len & alignment)); 272 + 273 + if (do_copy) 274 + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 275 + else 276 + bio = bio_map_kern(q, kbuf, len, gfp_mask); 277 + 278 278 if (IS_ERR(bio)) 279 279 return PTR_ERR(bio); 280 280 281 281 if (rq_data_dir(rq) == WRITE) 282 282 bio->bi_rw |= (1 << BIO_RW); 283 + 284 + if (do_copy) 285 + rq->cmd_flags |= REQ_COPY_USER; 283 286 284 287 blk_rq_bio_prep(q, rq, bio); 285 288 blk_queue_bounce(q, &rq->bio);
+90
fs/bio.c
··· 937 937 return ERR_PTR(-EINVAL); 938 938 } 939 939 940 + static void bio_copy_kern_endio(struct bio *bio, int err) 941 + { 942 + struct bio_vec *bvec; 943 + const int read = bio_data_dir(bio) == READ; 944 + char *p = bio->bi_private; 945 + int i; 946 + 947 + __bio_for_each_segment(bvec, bio, i, 0) { 948 + char *addr = page_address(bvec->bv_page); 949 + 950 + if (read && !err) 951 + memcpy(p, addr, bvec->bv_len); 952 + 953 + __free_page(bvec->bv_page); 954 + p += bvec->bv_len; 955 + } 956 + 957 + bio_put(bio); 958 + } 959 + 960 + /** 961 + * bio_copy_kern - copy kernel address into bio 962 + * @q: the struct request_queue for the bio 963 + * @data: pointer to buffer to copy 964 + * @len: length in bytes 965 + * @gfp_mask: allocation flags for bio and page allocation 966 + * 967 + * copy the kernel address into a bio suitable for io to a block 968 + * device. Returns an error pointer in case of error. 969 + */ 970 + struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 971 + gfp_t gfp_mask, int reading) 972 + { 973 + unsigned long kaddr = (unsigned long)data; 974 + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 975 + unsigned long start = kaddr >> PAGE_SHIFT; 976 + const int nr_pages = end - start; 977 + struct bio *bio; 978 + struct bio_vec *bvec; 979 + int i, ret; 980 + 981 + bio = bio_alloc(gfp_mask, nr_pages); 982 + if (!bio) 983 + return ERR_PTR(-ENOMEM); 984 + 985 + while (len) { 986 + struct page *page; 987 + unsigned int bytes = PAGE_SIZE; 988 + 989 + if (bytes > len) 990 + bytes = len; 991 + 992 + page = alloc_page(q->bounce_gfp | gfp_mask); 993 + if (!page) { 994 + ret = -ENOMEM; 995 + goto cleanup; 996 + } 997 + 998 + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { 999 + ret = -EINVAL; 1000 + goto cleanup; 1001 + } 1002 + 1003 + len -= bytes; 1004 + } 1005 + 1006 + if (!reading) { 1007 + void *p = data; 1008 + 1009 + bio_for_each_segment(bvec, bio, i) { 1010 + char *addr = page_address(bvec->bv_page); 1011 + 1012 + memcpy(addr, p, bvec->bv_len); 1013 + p += bvec->bv_len; 1014 + } 1015 + } 1016 + 1017 + bio->bi_private = data; 1018 + bio->bi_end_io = bio_copy_kern_endio; 1019 + return bio; 1020 + cleanup: 1021 + bio_for_each_segment(bvec, bio, i) 1022 + __free_page(bvec->bv_page); 1023 + 1024 + bio_put(bio); 1025 + 1026 + return ERR_PTR(ret); 1027 + } 1028 + 940 1029 /* 941 1030 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 942 1031 * for performing direct-IO in BIOs. ··· 1362 1273 EXPORT_SYMBOL(bio_map_user); 1363 1274 EXPORT_SYMBOL(bio_unmap_user); 1364 1275 EXPORT_SYMBOL(bio_map_kern); 1276 + EXPORT_SYMBOL(bio_copy_kern); 1365 1277 EXPORT_SYMBOL(bio_pair_release); 1366 1278 EXPORT_SYMBOL(bio_split); 1367 1279 EXPORT_SYMBOL(bio_split_pool);
+2
include/linux/bio.h
··· 324 324 extern void bio_unmap_user(struct bio *); 325 325 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 326 326 gfp_t); 327 + extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, 328 + gfp_t, int); 327 329 extern void bio_set_pages_dirty(struct bio *bio); 328 330 extern void bio_check_pages_dirty(struct bio *bio); 329 331 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);