···284284 rq->special = NULL;285285 rq->data_len = 0;286286 rq->data = NULL;287287+ rq->nr_phys_segments = 0;287288 rq->sense = NULL;288289 rq->end_io = NULL;289290 rq->end_io_data = NULL;···21162115/**21172116 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage21182117 * @q: request queue where request should be inserted21192119- * @rw: READ or WRITE data21182118+ * @rq: request structure to fill21202119 * @ubuf: the user buffer21212120 * @len: length of user data21222121 *···21332132 * original bio must be passed back in to blk_rq_unmap_user() for proper21342133 * unmapping.21352134 */21362136-struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,21372137- unsigned int len)21352135+int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,21362136+ unsigned int len)21382137{21392138 unsigned long uaddr;21402140- struct request *rq;21412139 struct bio *bio;21402140+ int reading;2142214121432142 if (len > (q->max_sectors << 9))21442144- return ERR_PTR(-EINVAL);21452145- if ((!len && ubuf) || (len && !ubuf))21462146- return ERR_PTR(-EINVAL);21432143+ return -EINVAL;21442144+ if (!len || !ubuf)21452145+ return -EINVAL;2147214621482148- rq = blk_get_request(q, rw, __GFP_WAIT);21492149- if (!rq)21502150- return ERR_PTR(-ENOMEM);21472147+ reading = rq_data_dir(rq) == READ;2151214821522149 /*21532150 * if alignment requirement is satisfied, map in user pages for···21532154 */21542155 uaddr = (unsigned long) ubuf;21552156 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))21562156- bio = bio_map_user(q, NULL, uaddr, len, rw == READ);21572157+ bio = bio_map_user(q, NULL, uaddr, len, reading);21572158 else21582158- bio = bio_copy_user(q, uaddr, len, rw == READ);21592159+ bio = bio_copy_user(q, uaddr, len, reading);2159216021602161 if (!IS_ERR(bio)) {21612162 rq->bio = rq->biotail = bio;···2163216421642165 rq->buffer = rq->data = NULL;21652166 rq->data_len = len;21662166- return rq;21672167+ return 0;21672168 }2168216921692170 /*21702171 * bio is the err-ptr21712172 */21722172- blk_put_request(rq);21732173- return (struct request *) bio;21732173+ return PTR_ERR(bio);21742174}2175217521762176EXPORT_SYMBOL(blk_rq_map_user);2177217721782178/**21792179+ * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage21802180+ * @q: request queue where request should be inserted21812181+ * @rq: request to map data to21822182+ * @iov: pointer to the iovec21832183+ * @iov_count: number of elements in the iovec21842184+ *21852185+ * Description:21862186+ * Data will be mapped directly for zero copy io, if possible. Otherwise21872187+ * a kernel bounce buffer is used.21882188+ *21892189+ * A matching blk_rq_unmap_user() must be issued at the end of io, while21902190+ * still in process context.21912191+ *21922192+ * Note: The mapped bio may need to be bounced through blk_queue_bounce()21932193+ * before being submitted to the device, as pages mapped may be out of21942194+ * reach. It's the callers responsibility to make sure this happens. The21952195+ * original bio must be passed back in to blk_rq_unmap_user() for proper21962196+ * unmapping.21972197+ */21982198+int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,21992199+ struct sg_iovec *iov, int iov_count)22002200+{22012201+ struct bio *bio;22022202+22032203+ if (!iov || iov_count <= 0)22042204+ return -EINVAL;22052205+22062206+ /* we don't allow misaligned data like bio_map_user() does. If the22072207+ * user is using sg, they're expected to know the alignment constraints22082208+ * and respect them accordingly */22092209+ bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);22102210+ if (IS_ERR(bio))22112211+ return PTR_ERR(bio);22122212+22132213+ rq->bio = rq->biotail = bio;22142214+ blk_rq_bio_prep(q, rq, bio);22152215+ rq->buffer = rq->data = NULL;22162216+ rq->data_len = bio->bi_size;22172217+ return 0;22182218+}22192219+22202220+EXPORT_SYMBOL(blk_rq_map_user_iov);22212221+22222222+/**21792223 * blk_rq_unmap_user - unmap a request with user data21802180- * @rq: request to be unmapped21812181- * @bio: bio for the request22242224+ * @bio: bio to be unmapped21822225 * @ulen: length of user buffer21832226 *21842227 * Description:21852185- * Unmap a request previously mapped by blk_rq_map_user().22282228+ * Unmap a bio previously mapped by blk_rq_map_user().21862229 */21872187-int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)22302230+int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)21882231{21892232 int ret = 0;21902233···22372196 ret = bio_uncopy_user(bio);22382197 }2239219822402240- blk_put_request(rq);22412241- return ret;21992199+ return 0;22422200}2243220122442202EXPORT_SYMBOL(blk_rq_unmap_user);22032203+22042204+/**22052205+ * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage22062206+ * @q: request queue where request should be inserted22072207+ * @rq: request to fill22082208+ * @kbuf: the kernel buffer22092209+ * @len: length of user data22102210+ * @gfp_mask: memory allocation flags22112211+ */22122212+int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,22132213+ unsigned int len, unsigned int gfp_mask)22142214+{22152215+ struct bio *bio;22162216+22172217+ if (len > (q->max_sectors << 9))22182218+ return -EINVAL;22192219+ if (!len || !kbuf)22202220+ return -EINVAL;22212221+22222222+ bio = bio_map_kern(q, kbuf, len, gfp_mask);22232223+ if (IS_ERR(bio))22242224+ return PTR_ERR(bio);22252225+22262226+ if (rq_data_dir(rq) == WRITE)22272227+ bio->bi_rw |= (1 << BIO_RW);22282228+22292229+ rq->bio = rq->biotail = bio;22302230+ blk_rq_bio_prep(q, rq, bio);22312231+22322232+ rq->buffer = rq->data = NULL;22332233+ rq->data_len = len;22342234+ return 0;22352235+}22362236+22372237+EXPORT_SYMBOL(blk_rq_map_kern);22382238+22392239+/**22402240+ * blk_execute_rq_nowait - insert a request into queue for execution22412241+ * @q: queue to insert the request in22422242+ * @bd_disk: matching gendisk22432243+ * @rq: request to insert22442244+ * @at_head: insert request at head or tail of queue22452245+ * @done: I/O completion handler22462246+ *22472247+ * Description:22482248+ * Insert a fully prepared request at the back of the io scheduler queue22492249+ * for execution. Don't wait for completion.22502250+ */22512251+void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,22522252+ struct request *rq, int at_head,22532253+ void (*done)(struct request *))22542254+{22552255+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;22562256+22572257+ rq->rq_disk = bd_disk;22582258+ rq->flags |= REQ_NOMERGE;22592259+ rq->end_io = done;22602260+ elv_add_request(q, rq, where, 1);22612261+ generic_unplug_device(q);22622262+}2245226322462264/**22472265 * blk_execute_rq - insert a request into queue for execution22482266 * @q: queue to insert the request in22492267 * @bd_disk: matching gendisk22502268 * @rq: request to insert22692269+ * @at_head: insert request at head or tail of queue22512270 *22522271 * Description:22532272 * Insert a fully prepared request at the back of the io scheduler queue22542254- * for execution.22732273+ * for execution and wait for completion.22552274 */22562275int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,22572257- struct request *rq)22762276+ struct request *rq, int at_head)22582277{22592278 DECLARE_COMPLETION(wait);22602279 char sense[SCSI_SENSE_BUFFERSIZE];22612280 int err = 0;22622262-22632263- rq->rq_disk = bd_disk;2264228122652282 /*22662283 * we need an extra reference to the request, so we can look at···23322233 rq->sense_len = 0;23332234 }2334223523352335- rq->flags |= REQ_NOMERGE;23362236 rq->waiting = &wait;23372337- rq->end_io = blk_end_sync_rq;23382338- elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);23392339- generic_unplug_device(q);22372237+ blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);23402238 wait_for_completion(&wait);23412239 rq->waiting = NULL;23422240···23722276}2373227723742278EXPORT_SYMBOL(blkdev_issue_flush);22792279+22802280+/**22812281+ * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices22822282+ * @q: device queue22832283+ * @disk: gendisk22842284+ * @error_sector: error offset22852285+ *22862286+ * Description:22872287+ * Devices understanding the SCSI command set, can use this function as22882288+ * a helper for issuing a cache flush. Note: driver is required to store22892289+ * the error offset (in case of error flushing) in ->sector of struct22902290+ * request.22912291+ */22922292+int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,22932293+ sector_t *error_sector)22942294+{22952295+ struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);22962296+ int ret;22972297+22982298+ rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;22992299+ rq->sector = 0;23002300+ memset(rq->cmd, 0, sizeof(rq->cmd));23012301+ rq->cmd[0] = 0x35;23022302+ rq->cmd_len = 12;23032303+ rq->data = NULL;23042304+ rq->data_len = 0;23052305+ rq->timeout = 60 * HZ;23062306+23072307+ ret = blk_execute_rq(q, disk, rq, 0);23082308+23092309+ if (ret && error_sector)23102310+ *error_sector = rq->sector;23112311+23122312+ blk_put_request(rq);23132313+ return ret;23142314+}23152315+23162316+EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);2375231723762318static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)23772319{
+36-24
drivers/block/scsi_ioctl.c
···216216 struct gendisk *bd_disk, struct sg_io_hdr *hdr)217217{218218 unsigned long start_time;219219- int reading, writing;219219+ int writing = 0, ret = 0;220220 struct request *rq;221221 struct bio *bio;222222 char sense[SCSI_SENSE_BUFFERSIZE];···231231 if (verify_command(file, cmd))232232 return -EPERM;233233234234- /*235235- * we'll do that later236236- */237237- if (hdr->iovec_count)238238- return -EOPNOTSUPP;239239-240234 if (hdr->dxfer_len > (q->max_sectors << 9))241235 return -EIO;242236243243- reading = writing = 0;244244- if (hdr->dxfer_len) {237237+ if (hdr->dxfer_len)245238 switch (hdr->dxfer_direction) {246239 default:247240 return -EINVAL;248241 case SG_DXFER_TO_FROM_DEV:249249- reading = 1;250250- /* fall through */251242 case SG_DXFER_TO_DEV:252243 writing = 1;253244 break;254245 case SG_DXFER_FROM_DEV:255255- reading = 1;256246 break;257247 }258248259259- rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp,260260- hdr->dxfer_len);249249+ rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);250250+ if (!rq)251251+ return -ENOMEM;261252262262- if (IS_ERR(rq))263263- return PTR_ERR(rq);264264- } else265265- rq = blk_get_request(q, READ, __GFP_WAIT);253253+ if (hdr->iovec_count) {254254+ const int size = sizeof(struct sg_iovec) * hdr->iovec_count;255255+ struct sg_iovec *iov;256256+257257+ iov = kmalloc(size, GFP_KERNEL);258258+ if (!iov) {259259+ ret = -ENOMEM;260260+ goto out;261261+ }262262+263263+ if (copy_from_user(iov, hdr->dxferp, size)) {264264+ kfree(iov);265265+ ret = -EFAULT;266266+ goto out;267267+ }268268+269269+ ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);270270+ kfree(iov);271271+ } else if (hdr->dxfer_len)272272+ ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);273273+274274+ if (ret)275275+ goto out;266276267277 /*268278 * fill in request structure···308298 * (if he doesn't check that is his problem).309299 * N.B. a non-zero SCSI status is _not_ necessarily an error.310300 */311311- blk_execute_rq(q, bd_disk, rq);301301+ blk_execute_rq(q, bd_disk, rq, 0);312302313303 /* write to all output members */314304 hdr->status = 0xff & rq->errors;···330320 hdr->sb_len_wr = len;331321 }332322333333- if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))334334- return -EFAULT;323323+ if (blk_rq_unmap_user(bio, hdr->dxfer_len))324324+ ret = -EFAULT;335325336326 /* may not have succeeded, but output values written to control337327 * structure (struct sg_io_hdr). */338338- return 0;328328+out:329329+ blk_put_request(rq);330330+ return ret;339331}340332341333#define OMAX_SB_LEN 16 /* For backward compatibility */···420408 rq->data_len = bytes;421409 rq->flags |= REQ_BLOCK_PC;422410423423- blk_execute_rq(q, bd_disk, rq);411411+ blk_execute_rq(q, bd_disk, rq, 0);424412 err = rq->errors & 0xff; /* only 8 bit SCSI status */425413 if (err) {426414 if (rq->sense_len && rq->sense) {···573561 rq->cmd[0] = GPCMD_START_STOP_UNIT;574562 rq->cmd[4] = 0x02 + (close != 0);575563 rq->cmd_len = 6;576576- err = blk_execute_rq(q, bd_disk, rq);564564+ err = blk_execute_rq(q, bd_disk, rq, 0);577565 blk_put_request(rq);578566 break;579567 default:
···754754755755 idedisk_prepare_flush(q, rq);756756757757- ret = blk_execute_rq(q, disk, rq);757757+ ret = blk_execute_rq(q, disk, rq, 0);758758759759 /*760760 * if we failed and caller wants error offset, get it
+178-41
fs/bio.c
···2525#include <linux/module.h>2626#include <linux/mempool.h>2727#include <linux/workqueue.h>2828+#include <scsi/sg.h> /* for struct sg_iovec */28292930#define BIO_POOL_SIZE 2563031···547546 return ERR_PTR(ret);548547}549548550550-static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,551551- unsigned long uaddr, unsigned int len,552552- int write_to_vm)549549+static struct bio *__bio_map_user_iov(request_queue_t *q,550550+ struct block_device *bdev,551551+ struct sg_iovec *iov, int iov_count,552552+ int write_to_vm)553553{554554- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;555555- unsigned long start = uaddr >> PAGE_SHIFT;556556- const int nr_pages = end - start;557557- int ret, offset, i;554554+ int i, j;555555+ int nr_pages = 0;558556 struct page **pages;559557 struct bio *bio;558558+ int cur_page = 0;559559+ int ret, offset;560560561561- /*562562- * transfer and buffer must be aligned to at least hardsector563563- * size for now, in the future we can relax this restriction564564- */565565- if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))561561+ for (i = 0; i < iov_count; i++) {562562+ unsigned long uaddr = (unsigned long)iov[i].iov_base;563563+ unsigned long len = iov[i].iov_len;564564+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;565565+ unsigned long start = uaddr >> PAGE_SHIFT;566566+567567+ nr_pages += end - start;568568+ /*569569+ * transfer and buffer must be aligned to at least hardsector570570+ * size for now, in the future we can relax this restriction571571+ */572572+ if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))573573+ return ERR_PTR(-EINVAL);574574+ }575575+576576+ if (!nr_pages)566577 return ERR_PTR(-EINVAL);567578568579 bio = bio_alloc(GFP_KERNEL, nr_pages);···586573 if (!pages)587574 goto out;588575589589- down_read(¤t->mm->mmap_sem);590590- ret = get_user_pages(current, current->mm, uaddr, nr_pages,591591- write_to_vm, 0, pages, NULL);592592- up_read(¤t->mm->mmap_sem);576576+ memset(pages, 0, nr_pages * sizeof(struct page *));593577594594- if (ret < nr_pages)595595- goto out;578578+ for (i = 0; i < iov_count; i++) {579579+ unsigned long uaddr = (unsigned long)iov[i].iov_base;580580+ unsigned long len = iov[i].iov_len;581581+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;582582+ unsigned long start = uaddr >> PAGE_SHIFT;583583+ const int local_nr_pages = end - start;584584+ const int page_limit = cur_page + local_nr_pages;585585+586586+ down_read(¤t->mm->mmap_sem);587587+ ret = get_user_pages(current, current->mm, uaddr,588588+ local_nr_pages,589589+ write_to_vm, 0, &pages[cur_page], NULL);590590+ up_read(¤t->mm->mmap_sem);596591597597- bio->bi_bdev = bdev;592592+ if (ret < local_nr_pages)593593+ goto out_unmap;598594599599- offset = uaddr & ~PAGE_MASK;600600- for (i = 0; i < nr_pages; i++) {601601- unsigned int bytes = PAGE_SIZE - offset;602595603603- if (len <= 0)604604- break;596596+ offset = uaddr & ~PAGE_MASK;597597+ for (j = cur_page; j < page_limit; j++) {598598+ unsigned int bytes = PAGE_SIZE - offset;605599606606- if (bytes > len)607607- bytes = len;600600+ if (len <= 0)601601+ break;602602+603603+ if (bytes > len)604604+ bytes = len;608605606606+ /*607607+ * sorry...608608+ */609609+ if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)610610+ break;611611+612612+ len -= bytes;613613+ offset = 0;614614+ }615615+616616+ cur_page = j;609617 /*610610- * sorry...618618+ * release the pages we didn't map into the bio, if any611619 */612612- if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes)613613- break;614614-615615- len -= bytes;616616- offset = 0;620620+ while (j < page_limit)621621+ page_cache_release(pages[j++]);617622 }618618-619619- /*620620- * release the pages we didn't map into the bio, if any621621- */622622- while (i < nr_pages)623623- page_cache_release(pages[i++]);624623625624 kfree(pages);626625···642617 if (!write_to_vm)643618 bio->bi_rw |= (1 << BIO_RW);644619620620+ bio->bi_bdev = bdev;645621 bio->bi_flags |= (1 << BIO_USER_MAPPED);646622 return bio;647647-out:623623+624624+ out_unmap:625625+ for (i = 0; i < nr_pages; i++) {626626+ if(!pages[i])627627+ break;628628+ page_cache_release(pages[i]);629629+ }630630+ out:648631 kfree(pages);649632 bio_put(bio);650633 return ERR_PTR(ret);···672639struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,673640 unsigned long uaddr, unsigned int len, int write_to_vm)674641{675675- struct bio *bio;642642+ struct sg_iovec iov;676643677677- bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);644644+ iov.iov_base = (__user void *)uaddr;645645+ iov.iov_len = len;646646+647647+ return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);648648+}649649+650650+/**651651+ * bio_map_user_iov - map user sg_iovec table into bio652652+ * @q: the request_queue_t for the bio653653+ * @bdev: destination block device654654+ * @iov: the iovec.655655+ * @iov_count: number of elements in the iovec656656+ * @write_to_vm: bool indicating writing to pages or not657657+ *658658+ * Map the user space address into a bio suitable for io to a block659659+ * device. Returns an error pointer in case of error.660660+ */661661+struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,662662+ struct sg_iovec *iov, int iov_count,663663+ int write_to_vm)664664+{665665+ struct bio *bio;666666+ int len = 0, i;667667+668668+ bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);678669679670 if (IS_ERR(bio))680671 return bio;···710653 * reference to it711654 */712655 bio_get(bio);656656+657657+ for (i = 0; i < iov_count; i++)658658+ len += iov[i].iov_len;713659714660 if (bio->bi_size == len)715661 return bio;···756696{757697 __bio_unmap_user(bio);758698 bio_put(bio);699699+}700700+701701+static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)702702+{703703+ if (bio->bi_size)704704+ return 1;705705+706706+ bio_put(bio);707707+ return 0;708708+}709709+710710+711711+static struct bio *__bio_map_kern(request_queue_t *q, void *data,712712+ unsigned int len, unsigned int gfp_mask)713713+{714714+ unsigned long kaddr = (unsigned long)data;715715+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;716716+ unsigned long start = kaddr >> PAGE_SHIFT;717717+ const int nr_pages = end - start;718718+ int offset, i;719719+ struct bio *bio;720720+721721+ bio = bio_alloc(gfp_mask, nr_pages);722722+ if (!bio)723723+ return ERR_PTR(-ENOMEM);724724+725725+ offset = offset_in_page(kaddr);726726+ for (i = 0; i < nr_pages; i++) {727727+ unsigned int bytes = PAGE_SIZE - offset;728728+729729+ if (len <= 0)730730+ break;731731+732732+ if (bytes > len)733733+ bytes = len;734734+735735+ if (__bio_add_page(q, bio, virt_to_page(data), bytes,736736+ offset) < bytes)737737+ break;738738+739739+ data += bytes;740740+ len -= bytes;741741+ offset = 0;742742+ }743743+744744+ bio->bi_end_io = bio_map_kern_endio;745745+ return bio;746746+}747747+748748+/**749749+ * bio_map_kern - map kernel address into bio750750+ * @q: the request_queue_t for the bio751751+ * @data: pointer to buffer to map752752+ * @len: length in bytes753753+ * @gfp_mask: allocation flags for bio allocation754754+ *755755+ * Map the kernel address into a bio suitable for io to a block756756+ * device. Returns an error pointer in case of error.757757+ */758758+struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,759759+ unsigned int gfp_mask)760760+{761761+ struct bio *bio;762762+763763+ bio = __bio_map_kern(q, data, len, gfp_mask);764764+ if (IS_ERR(bio))765765+ return bio;766766+767767+ if (bio->bi_size == len)768768+ return bio;769769+770770+ /*771771+ * Don't support partial mappings.772772+ */773773+ bio_put(bio);774774+ return ERR_PTR(-EINVAL);759775}760776761777/*···12211085EXPORT_SYMBOL(bio_get_nr_vecs);12221086EXPORT_SYMBOL(bio_map_user);12231087EXPORT_SYMBOL(bio_unmap_user);10881088+EXPORT_SYMBOL(bio_map_kern);12241089EXPORT_SYMBOL(bio_pair_release);12251090EXPORT_SYMBOL(bio_split);12261091EXPORT_SYMBOL(bio_split_pool);
+6
include/linux/bio.h
···295295extern int bio_get_nr_vecs(struct block_device *);296296extern struct bio *bio_map_user(struct request_queue *, struct block_device *,297297 unsigned long, unsigned int, int);298298+struct sg_iovec;299299+extern struct bio *bio_map_user_iov(struct request_queue *,300300+ struct block_device *,301301+ struct sg_iovec *, int, int);298302extern void bio_unmap_user(struct bio *);303303+extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,304304+ unsigned int);299305extern void bio_set_pages_dirty(struct bio *bio);300306extern void bio_check_pages_dirty(struct bio *bio);301307extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);