Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fix mismerge in ll_rw_blk.c

+400 -104
+163 -29
drivers/block/ll_rw_blk.c
··· 284 284 rq->special = NULL; 285 285 rq->data_len = 0; 286 286 rq->data = NULL; 287 + rq->nr_phys_segments = 0; 287 288 rq->sense = NULL; 288 289 rq->end_io = NULL; 289 290 rq->end_io_data = NULL; ··· 2116 2115 /** 2117 2116 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 2118 2117 * @q: request queue where request should be inserted 2119 - * @rw: READ or WRITE data 2118 + * @rq: request structure to fill 2120 2119 * @ubuf: the user buffer 2121 2120 * @len: length of user data 2122 2121 * ··· 2133 2132 * original bio must be passed back in to blk_rq_unmap_user() for proper 2134 2133 * unmapping. 2135 2134 */ 2136 - struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, 2137 - unsigned int len) 2135 + int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, 2136 + unsigned int len) 2138 2137 { 2139 2138 unsigned long uaddr; 2140 - struct request *rq; 2141 2139 struct bio *bio; 2140 + int reading; 2142 2141 2143 2142 if (len > (q->max_sectors << 9)) 2144 - return ERR_PTR(-EINVAL); 2145 - if ((!len && ubuf) || (len && !ubuf)) 2146 - return ERR_PTR(-EINVAL); 2143 + return -EINVAL; 2144 + if (!len || !ubuf) 2145 + return -EINVAL; 2147 2146 2148 - rq = blk_get_request(q, rw, __GFP_WAIT); 2149 - if (!rq) 2150 - return ERR_PTR(-ENOMEM); 2147 + reading = rq_data_dir(rq) == READ; 2151 2148 2152 2149 /* 2153 2150 * if alignment requirement is satisfied, map in user pages for ··· 2153 2154 */ 2154 2155 uaddr = (unsigned long) ubuf; 2155 2156 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 2156 - bio = bio_map_user(q, NULL, uaddr, len, rw == READ); 2157 + bio = bio_map_user(q, NULL, uaddr, len, reading); 2157 2158 else 2158 - bio = bio_copy_user(q, uaddr, len, rw == READ); 2159 + bio = bio_copy_user(q, uaddr, len, reading); 2159 2160 2160 2161 if (!IS_ERR(bio)) { 2161 2162 rq->bio = rq->biotail = bio; ··· 2163 2164 2164 2165 rq->buffer = rq->data = NULL; 2165 2166 rq->data_len = len; 2166 - return rq; 2167 + return 0; 2167 2168 } 2168 2169 2169 2170 /* 2170 2171 * bio is the err-ptr 2171 2172 */ 2172 - blk_put_request(rq); 2173 - return (struct request *) bio; 2173 + return PTR_ERR(bio); 2174 2174 } 2175 2175 2176 2176 EXPORT_SYMBOL(blk_rq_map_user); 2177 2177 2178 2178 /** 2179 + * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage 2180 + * @q: request queue where request should be inserted 2181 + * @rq: request to map data to 2182 + * @iov: pointer to the iovec 2183 + * @iov_count: number of elements in the iovec 2184 + * 2185 + * Description: 2186 + * Data will be mapped directly for zero copy io, if possible. Otherwise 2187 + * a kernel bounce buffer is used. 2188 + * 2189 + * A matching blk_rq_unmap_user() must be issued at the end of io, while 2190 + * still in process context. 2191 + * 2192 + * Note: The mapped bio may need to be bounced through blk_queue_bounce() 2193 + * before being submitted to the device, as pages mapped may be out of 2194 + * reach. It's the callers responsibility to make sure this happens. The 2195 + * original bio must be passed back in to blk_rq_unmap_user() for proper 2196 + * unmapping. 2197 + */ 2198 + int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, 2199 + struct sg_iovec *iov, int iov_count) 2200 + { 2201 + struct bio *bio; 2202 + 2203 + if (!iov || iov_count <= 0) 2204 + return -EINVAL; 2205 + 2206 + /* we don't allow misaligned data like bio_map_user() does. If the 2207 + * user is using sg, they're expected to know the alignment constraints 2208 + * and respect them accordingly */ 2209 + bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); 2210 + if (IS_ERR(bio)) 2211 + return PTR_ERR(bio); 2212 + 2213 + rq->bio = rq->biotail = bio; 2214 + blk_rq_bio_prep(q, rq, bio); 2215 + rq->buffer = rq->data = NULL; 2216 + rq->data_len = bio->bi_size; 2217 + return 0; 2218 + } 2219 + 2220 + EXPORT_SYMBOL(blk_rq_map_user_iov); 2221 + 2222 + /** 2179 2223 * blk_rq_unmap_user - unmap a request with user data 2180 - * @rq: request to be unmapped 2181 - * @bio: bio for the request 2224 + * @bio: bio to be unmapped 2182 2225 * @ulen: length of user buffer 2183 2226 * 2184 2227 * Description: 2185 - * Unmap a request previously mapped by blk_rq_map_user(). 2228 + * Unmap a bio previously mapped by blk_rq_map_user(). 2186 2229 */ 2187 - int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) 2230 + int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) 2188 2231 { 2189 2232 int ret = 0; 2190 2233 ··· 2237 2196 ret = bio_uncopy_user(bio); 2238 2197 } 2239 2198 2240 - blk_put_request(rq); 2241 - return ret; 2199 + return 0; 2242 2200 } 2243 2201 2244 2202 EXPORT_SYMBOL(blk_rq_unmap_user); 2203 + 2204 + /** 2205 + * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage 2206 + * @q: request queue where request should be inserted 2207 + * @rq: request to fill 2208 + * @kbuf: the kernel buffer 2209 + * @len: length of user data 2210 + * @gfp_mask: memory allocation flags 2211 + */ 2212 + int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, 2213 + unsigned int len, unsigned int gfp_mask) 2214 + { 2215 + struct bio *bio; 2216 + 2217 + if (len > (q->max_sectors << 9)) 2218 + return -EINVAL; 2219 + if (!len || !kbuf) 2220 + return -EINVAL; 2221 + 2222 + bio = bio_map_kern(q, kbuf, len, gfp_mask); 2223 + if (IS_ERR(bio)) 2224 + return PTR_ERR(bio); 2225 + 2226 + if (rq_data_dir(rq) == WRITE) 2227 + bio->bi_rw |= (1 << BIO_RW); 2228 + 2229 + rq->bio = rq->biotail = bio; 2230 + blk_rq_bio_prep(q, rq, bio); 2231 + 2232 + rq->buffer = rq->data = NULL; 2233 + rq->data_len = len; 2234 + return 0; 2235 + } 2236 + 2237 + EXPORT_SYMBOL(blk_rq_map_kern); 2238 + 2239 + /** 2240 + * blk_execute_rq_nowait - insert a request into queue for execution 2241 + * @q: queue to insert the request in 2242 + * @bd_disk: matching gendisk 2243 + * @rq: request to insert 2244 + * @at_head: insert request at head or tail of queue 2245 + * @done: I/O completion handler 2246 + * 2247 + * Description: 2248 + * Insert a fully prepared request at the back of the io scheduler queue 2249 + * for execution. Don't wait for completion. 2250 + */ 2251 + void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, 2252 + struct request *rq, int at_head, 2253 + void (*done)(struct request *)) 2254 + { 2255 + int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2256 + 2257 + rq->rq_disk = bd_disk; 2258 + rq->flags |= REQ_NOMERGE; 2259 + rq->end_io = done; 2260 + elv_add_request(q, rq, where, 1); 2261 + generic_unplug_device(q); 2262 + } 2245 2263 2246 2264 /** 2247 2265 * blk_execute_rq - insert a request into queue for execution 2248 2266 * @q: queue to insert the request in 2249 2267 * @bd_disk: matching gendisk 2250 2268 * @rq: request to insert 2269 + * @at_head: insert request at head or tail of queue 2251 2270 * 2252 2271 * Description: 2253 2272 * Insert a fully prepared request at the back of the io scheduler queue 2254 - * for execution. 2273 + * for execution and wait for completion. 2255 2274 */ 2256 2275 int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2257 - struct request *rq) 2276 + struct request *rq, int at_head) 2258 2277 { 2259 2278 DECLARE_COMPLETION(wait); 2260 2279 char sense[SCSI_SENSE_BUFFERSIZE]; 2261 2280 int err = 0; 2262 - 2263 - rq->rq_disk = bd_disk; 2264 2281 2265 2282 /* 2266 2283 * we need an extra reference to the request, so we can look at ··· 2332 2233 rq->sense_len = 0; 2333 2234 } 2334 2235 2335 - rq->flags |= REQ_NOMERGE; 2336 2236 rq->waiting = &wait; 2337 - rq->end_io = blk_end_sync_rq; 2338 - elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); 2339 - generic_unplug_device(q); 2237 + blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); 2340 2238 wait_for_completion(&wait); 2341 2239 rq->waiting = NULL; 2342 2240 ··· 2372 2276 } 2373 2277 2374 2278 EXPORT_SYMBOL(blkdev_issue_flush); 2279 + 2280 + /** 2281 + * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices 2282 + * @q: device queue 2283 + * @disk: gendisk 2284 + * @error_sector: error offset 2285 + * 2286 + * Description: 2287 + * Devices understanding the SCSI command set, can use this function as 2288 + * a helper for issuing a cache flush. Note: driver is required to store 2289 + * the error offset (in case of error flushing) in ->sector of struct 2290 + * request. 2291 + */ 2292 + int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 2293 + sector_t *error_sector) 2294 + { 2295 + struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT); 2296 + int ret; 2297 + 2298 + rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; 2299 + rq->sector = 0; 2300 + memset(rq->cmd, 0, sizeof(rq->cmd)); 2301 + rq->cmd[0] = 0x35; 2302 + rq->cmd_len = 12; 2303 + rq->data = NULL; 2304 + rq->data_len = 0; 2305 + rq->timeout = 60 * HZ; 2306 + 2307 + ret = blk_execute_rq(q, disk, rq, 0); 2308 + 2309 + if (ret && error_sector) 2310 + *error_sector = rq->sector; 2311 + 2312 + blk_put_request(rq); 2313 + return ret; 2314 + } 2315 + 2316 + EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn); 2375 2317 2376 2318 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2377 2319 {
+36 -24
drivers/block/scsi_ioctl.c
··· 216 216 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 217 217 { 218 218 unsigned long start_time; 219 - int reading, writing; 219 + int writing = 0, ret = 0; 220 220 struct request *rq; 221 221 struct bio *bio; 222 222 char sense[SCSI_SENSE_BUFFERSIZE]; ··· 231 231 if (verify_command(file, cmd)) 232 232 return -EPERM; 233 233 234 - /* 235 - * we'll do that later 236 - */ 237 - if (hdr->iovec_count) 238 - return -EOPNOTSUPP; 239 - 240 234 if (hdr->dxfer_len > (q->max_sectors << 9)) 241 235 return -EIO; 242 236 243 - reading = writing = 0; 244 - if (hdr->dxfer_len) { 237 + if (hdr->dxfer_len) 245 238 switch (hdr->dxfer_direction) { 246 239 default: 247 240 return -EINVAL; 248 241 case SG_DXFER_TO_FROM_DEV: 249 - reading = 1; 250 - /* fall through */ 251 242 case SG_DXFER_TO_DEV: 252 243 writing = 1; 253 244 break; 254 245 case SG_DXFER_FROM_DEV: 255 - reading = 1; 256 246 break; 257 247 } 258 248 259 - rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, 260 - hdr->dxfer_len); 249 + rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 250 + if (!rq) 251 + return -ENOMEM; 261 252 262 - if (IS_ERR(rq)) 263 - return PTR_ERR(rq); 264 - } else 265 - rq = blk_get_request(q, READ, __GFP_WAIT); 253 + if (hdr->iovec_count) { 254 + const int size = sizeof(struct sg_iovec) * hdr->iovec_count; 255 + struct sg_iovec *iov; 256 + 257 + iov = kmalloc(size, GFP_KERNEL); 258 + if (!iov) { 259 + ret = -ENOMEM; 260 + goto out; 261 + } 262 + 263 + if (copy_from_user(iov, hdr->dxferp, size)) { 264 + kfree(iov); 265 + ret = -EFAULT; 266 + goto out; 267 + } 268 + 269 + ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count); 270 + kfree(iov); 271 + } else if (hdr->dxfer_len) 272 + ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); 273 + 274 + if (ret) 275 + goto out; 266 276 267 277 /* 268 278 * fill in request structure ··· 308 298 * (if he doesn't check that is his problem). 309 299 * N.B. a non-zero SCSI status is _not_ necessarily an error. 310 300 */ 311 - blk_execute_rq(q, bd_disk, rq); 301 + blk_execute_rq(q, bd_disk, rq, 0); 312 302 313 303 /* write to all output members */ 314 304 hdr->status = 0xff & rq->errors; ··· 330 320 hdr->sb_len_wr = len; 331 321 } 332 322 333 - if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) 334 - return -EFAULT; 323 + if (blk_rq_unmap_user(bio, hdr->dxfer_len)) 324 + ret = -EFAULT; 335 325 336 326 /* may not have succeeded, but output values written to control 337 327 * structure (struct sg_io_hdr). */ 338 - return 0; 328 + out: 329 + blk_put_request(rq); 330 + return ret; 339 331 } 340 332 341 333 #define OMAX_SB_LEN 16 /* For backward compatibility */ ··· 420 408 rq->data_len = bytes; 421 409 rq->flags |= REQ_BLOCK_PC; 422 410 423 - blk_execute_rq(q, bd_disk, rq); 411 + blk_execute_rq(q, bd_disk, rq, 0); 424 412 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 425 413 if (err) { 426 414 if (rq->sense_len && rq->sense) { ··· 573 561 rq->cmd[0] = GPCMD_START_STOP_UNIT; 574 562 rq->cmd[4] = 0x02 + (close != 0); 575 563 rq->cmd_len = 6; 576 - err = blk_execute_rq(q, bd_disk, rq); 564 + err = blk_execute_rq(q, bd_disk, rq, 0); 577 565 blk_put_request(rq); 578 566 break; 579 567 default:
+10 -5
drivers/cdrom/cdrom.c
··· 2097 2097 if (!q) 2098 2098 return -ENXIO; 2099 2099 2100 + rq = blk_get_request(q, READ, GFP_KERNEL); 2101 + if (!rq) 2102 + return -ENOMEM; 2103 + 2100 2104 cdi->last_sense = 0; 2101 2105 2102 2106 while (nframes) { ··· 2112 2108 2113 2109 len = nr * CD_FRAMESIZE_RAW; 2114 2110 2115 - rq = blk_rq_map_user(q, READ, ubuf, len); 2116 - if (IS_ERR(rq)) 2117 - return PTR_ERR(rq); 2111 + ret = blk_rq_map_user(q, rq, ubuf, len); 2112 + if (ret) 2113 + break; 2118 2114 2119 2115 memset(rq->cmd, 0, sizeof(rq->cmd)); 2120 2116 rq->cmd[0] = GPCMD_READ_CD; ··· 2136 2132 if (rq->bio) 2137 2133 blk_queue_bounce(q, &rq->bio); 2138 2134 2139 - if (blk_execute_rq(q, cdi->disk, rq)) { 2135 + if (blk_execute_rq(q, cdi->disk, rq, 0)) { 2140 2136 struct request_sense *s = rq->sense; 2141 2137 ret = -EIO; 2142 2138 cdi->last_sense = s->sense_key; 2143 2139 } 2144 2140 2145 - if (blk_rq_unmap_user(rq, bio, len)) 2141 + if (blk_rq_unmap_user(bio, len)) 2146 2142 ret = -EFAULT; 2147 2143 2148 2144 if (ret) ··· 2153 2149 ubuf += len; 2154 2150 } 2155 2151 2152 + blk_put_request(rq); 2156 2153 return ret; 2157 2154 } 2158 2155
+1 -1
drivers/ide/ide-disk.c
··· 754 754 755 755 idedisk_prepare_flush(q, rq); 756 756 757 - ret = blk_execute_rq(q, disk, rq); 757 + ret = blk_execute_rq(q, disk, rq, 0); 758 758 759 759 /* 760 760 * if we failed and caller wants error offset, get it
+178 -41
fs/bio.c
··· 25 25 #include <linux/module.h> 26 26 #include <linux/mempool.h> 27 27 #include <linux/workqueue.h> 28 + #include <scsi/sg.h> /* for struct sg_iovec */ 28 29 29 30 #define BIO_POOL_SIZE 256 30 31 ··· 547 546 return ERR_PTR(ret); 548 547 } 549 548 550 - static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev, 551 - unsigned long uaddr, unsigned int len, 552 - int write_to_vm) 549 + static struct bio *__bio_map_user_iov(request_queue_t *q, 550 + struct block_device *bdev, 551 + struct sg_iovec *iov, int iov_count, 552 + int write_to_vm) 553 553 { 554 - unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 555 - unsigned long start = uaddr >> PAGE_SHIFT; 556 - const int nr_pages = end - start; 557 - int ret, offset, i; 554 + int i, j; 555 + int nr_pages = 0; 558 556 struct page **pages; 559 557 struct bio *bio; 558 + int cur_page = 0; 559 + int ret, offset; 560 560 561 - /* 562 - * transfer and buffer must be aligned to at least hardsector 563 - * size for now, in the future we can relax this restriction 564 - */ 565 - if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) 561 + for (i = 0; i < iov_count; i++) { 562 + unsigned long uaddr = (unsigned long)iov[i].iov_base; 563 + unsigned long len = iov[i].iov_len; 564 + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 565 + unsigned long start = uaddr >> PAGE_SHIFT; 566 + 567 + nr_pages += end - start; 568 + /* 569 + * transfer and buffer must be aligned to at least hardsector 570 + * size for now, in the future we can relax this restriction 571 + */ 572 + if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) 573 + return ERR_PTR(-EINVAL); 574 + } 575 + 576 + if (!nr_pages) 566 577 return ERR_PTR(-EINVAL); 567 578 568 579 bio = bio_alloc(GFP_KERNEL, nr_pages); ··· 586 573 if (!pages) 587 574 goto out; 588 575 589 - down_read(&current->mm->mmap_sem); 590 - ret = get_user_pages(current, current->mm, uaddr, nr_pages, 591 - write_to_vm, 0, pages, NULL); 592 - up_read(&current->mm->mmap_sem); 576 + memset(pages, 0, nr_pages * sizeof(struct page *)); 593 577 594 - if (ret < nr_pages) 595 - goto out; 578 + for (i = 0; i < iov_count; i++) { 579 + unsigned long uaddr = (unsigned long)iov[i].iov_base; 580 + unsigned long len = iov[i].iov_len; 581 + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 582 + unsigned long start = uaddr >> PAGE_SHIFT; 583 + const int local_nr_pages = end - start; 584 + const int page_limit = cur_page + local_nr_pages; 585 + 586 + down_read(&current->mm->mmap_sem); 587 + ret = get_user_pages(current, current->mm, uaddr, 588 + local_nr_pages, 589 + write_to_vm, 0, &pages[cur_page], NULL); 590 + up_read(&current->mm->mmap_sem); 596 591 597 - bio->bi_bdev = bdev; 592 + if (ret < local_nr_pages) 593 + goto out_unmap; 598 594 599 - offset = uaddr & ~PAGE_MASK; 600 - for (i = 0; i < nr_pages; i++) { 601 - unsigned int bytes = PAGE_SIZE - offset; 602 595 603 - if (len <= 0) 604 - break; 596 + offset = uaddr & ~PAGE_MASK; 597 + for (j = cur_page; j < page_limit; j++) { 598 + unsigned int bytes = PAGE_SIZE - offset; 605 599 606 - if (bytes > len) 607 - bytes = len; 600 + if (len <= 0) 601 + break; 602 + 603 + if (bytes > len) 604 + bytes = len; 608 605 606 + /* 607 + * sorry... 608 + */ 609 + if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) 610 + break; 611 + 612 + len -= bytes; 613 + offset = 0; 614 + } 615 + 616 + cur_page = j; 609 617 /* 610 - * sorry... 618 + * release the pages we didn't map into the bio, if any 611 619 */ 612 - if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes) 613 - break; 614 - 615 - len -= bytes; 616 - offset = 0; 620 + while (j < page_limit) 621 + page_cache_release(pages[j++]); 617 622 } 618 - 619 - /* 620 - * release the pages we didn't map into the bio, if any 621 - */ 622 - while (i < nr_pages) 623 - page_cache_release(pages[i++]); 624 623 625 624 kfree(pages); 626 625 ··· 642 617 if (!write_to_vm) 643 618 bio->bi_rw |= (1 << BIO_RW); 644 619 620 + bio->bi_bdev = bdev; 645 621 bio->bi_flags |= (1 << BIO_USER_MAPPED); 646 622 return bio; 647 - out: 623 + 624 + out_unmap: 625 + for (i = 0; i < nr_pages; i++) { 626 + if(!pages[i]) 627 + break; 628 + page_cache_release(pages[i]); 629 + } 630 + out: 648 631 kfree(pages); 649 632 bio_put(bio); 650 633 return ERR_PTR(ret); ··· 672 639 struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, 673 640 unsigned long uaddr, unsigned int len, int write_to_vm) 674 641 { 675 - struct bio *bio; 642 + struct sg_iovec iov; 676 643 677 - bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm); 644 + iov.iov_base = (__user void *)uaddr; 645 + iov.iov_len = len; 646 + 647 + return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); 648 + } 649 + 650 + /** 651 + * bio_map_user_iov - map user sg_iovec table into bio 652 + * @q: the request_queue_t for the bio 653 + * @bdev: destination block device 654 + * @iov: the iovec. 655 + * @iov_count: number of elements in the iovec 656 + * @write_to_vm: bool indicating writing to pages or not 657 + * 658 + * Map the user space address into a bio suitable for io to a block 659 + * device. Returns an error pointer in case of error. 660 + */ 661 + struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, 662 + struct sg_iovec *iov, int iov_count, 663 + int write_to_vm) 664 + { 665 + struct bio *bio; 666 + int len = 0, i; 667 + 668 + bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); 678 669 679 670 if (IS_ERR(bio)) 680 671 return bio; ··· 710 653 * reference to it 711 654 */ 712 655 bio_get(bio); 656 + 657 + for (i = 0; i < iov_count; i++) 658 + len += iov[i].iov_len; 713 659 714 660 if (bio->bi_size == len) 715 661 return bio; ··· 756 696 { 757 697 __bio_unmap_user(bio); 758 698 bio_put(bio); 699 + } 700 + 701 + static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) 702 + { 703 + if (bio->bi_size) 704 + return 1; 705 + 706 + bio_put(bio); 707 + return 0; 708 + } 709 + 710 + 711 + static struct bio *__bio_map_kern(request_queue_t *q, void *data, 712 + unsigned int len, unsigned int gfp_mask) 713 + { 714 + unsigned long kaddr = (unsigned long)data; 715 + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 716 + unsigned long start = kaddr >> PAGE_SHIFT; 717 + const int nr_pages = end - start; 718 + int offset, i; 719 + struct bio *bio; 720 + 721 + bio = bio_alloc(gfp_mask, nr_pages); 722 + if (!bio) 723 + return ERR_PTR(-ENOMEM); 724 + 725 + offset = offset_in_page(kaddr); 726 + for (i = 0; i < nr_pages; i++) { 727 + unsigned int bytes = PAGE_SIZE - offset; 728 + 729 + if (len <= 0) 730 + break; 731 + 732 + if (bytes > len) 733 + bytes = len; 734 + 735 + if (__bio_add_page(q, bio, virt_to_page(data), bytes, 736 + offset) < bytes) 737 + break; 738 + 739 + data += bytes; 740 + len -= bytes; 741 + offset = 0; 742 + } 743 + 744 + bio->bi_end_io = bio_map_kern_endio; 745 + return bio; 746 + } 747 + 748 + /** 749 + * bio_map_kern - map kernel address into bio 750 + * @q: the request_queue_t for the bio 751 + * @data: pointer to buffer to map 752 + * @len: length in bytes 753 + * @gfp_mask: allocation flags for bio allocation 754 + * 755 + * Map the kernel address into a bio suitable for io to a block 756 + * device. Returns an error pointer in case of error. 757 + */ 758 + struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, 759 + unsigned int gfp_mask) 760 + { 761 + struct bio *bio; 762 + 763 + bio = __bio_map_kern(q, data, len, gfp_mask); 764 + if (IS_ERR(bio)) 765 + return bio; 766 + 767 + if (bio->bi_size == len) 768 + return bio; 769 + 770 + /* 771 + * Don't support partial mappings. 772 + */ 773 + bio_put(bio); 774 + return ERR_PTR(-EINVAL); 759 775 } 760 776 761 777 /* ··· 1221 1085 EXPORT_SYMBOL(bio_get_nr_vecs); 1222 1086 EXPORT_SYMBOL(bio_map_user); 1223 1087 EXPORT_SYMBOL(bio_unmap_user); 1088 + EXPORT_SYMBOL(bio_map_kern); 1224 1089 EXPORT_SYMBOL(bio_pair_release); 1225 1090 EXPORT_SYMBOL(bio_split); 1226 1091 EXPORT_SYMBOL(bio_split_pool);
+6
include/linux/bio.h
··· 295 295 extern int bio_get_nr_vecs(struct block_device *); 296 296 extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 297 297 unsigned long, unsigned int, int); 298 + struct sg_iovec; 299 + extern struct bio *bio_map_user_iov(struct request_queue *, 300 + struct block_device *, 301 + struct sg_iovec *, int, int); 298 302 extern void bio_unmap_user(struct bio *); 303 + extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 304 + unsigned int); 299 305 extern void bio_set_pages_dirty(struct bio *bio); 300 306 extern void bio_check_pages_dirty(struct bio *bio); 301 307 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
+6 -4
include/linux/blkdev.h
··· 563 563 extern void __blk_stop_queue(request_queue_t *q); 564 564 extern void blk_run_queue(request_queue_t *); 565 565 extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); 566 - extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int); 567 - extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int); 568 - extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *); 569 - 566 + extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); 567 + extern int blk_rq_unmap_user(struct bio *, unsigned int); 568 + extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int); 569 + extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); 570 + extern int blk_execute_rq(request_queue_t *, struct gendisk *, 571 + struct request *, int); 570 572 static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 571 573 { 572 574 return bdev->bd_disk->queue;