Merge branch 'request-queue-t' of git://git.kernel.dk/linux-2.6-block
* 'request-queue-t' of git://git.kernel.dk/linux-2.6-block: [BLOCK] Add request_queue_t and mark it deprecated [BLOCK] Get rid of request_queue_t typedef
···7979used to indicate the whole sequence of performing barrier requests8080including draining and flushing.81818282-typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq);8282+typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);83838484-int blk_queue_ordered(request_queue_t *q, unsigned ordered,8484+int blk_queue_ordered(struct request_queue *q, unsigned ordered,8585 prepare_flush_fn *prepare_flush_fn);86868787@q : the queue in question···9292For example, SCSI disk driver's prepare_flush_fn looks like the9393following.94949595-static void sd_prepare_flush(request_queue_t *q, struct request *rq)9595+static void sd_prepare_flush(struct request_queue *q, struct request *rq)9696{9797 memset(rq->cmd, 0, sizeof(rq->cmd));9898 rq->cmd_type = REQ_TYPE_BLOCK_PC;
+5-5
Documentation/block/biodoc.txt
···740740queueing (typically known as tagged command queueing), ie manage more than741741one outstanding command on a queue at any given time.742742743743- blk_queue_init_tags(request_queue_t *q, int depth)743743+ blk_queue_init_tags(struct request_queue *q, int depth)744744745745 Initialize internal command tagging structures for a maximum746746 depth of 'depth'.747747748748- blk_queue_free_tags((request_queue_t *q)748748+ blk_queue_free_tags((struct request_queue *q)749749750750 Teardown tag info associated with the queue. This will be done751751 automatically by block if blk_queue_cleanup() is called on a queue···754754The above are initialization and exit management, the main helpers during755755normal operations are:756756757757- blk_queue_start_tag(request_queue_t *q, struct request *rq)757757+ blk_queue_start_tag(struct request_queue *q, struct request *rq)758758759759 Start tagged operation for this request. A free tag number between760760 0 and 'depth' is assigned to the request (rq->tag holds this number),···762762 for this queue is already achieved (or if the tag wasn't started for763763 some other reason), 1 is returned. Otherwise 0 is returned.764764765765- blk_queue_end_tag(request_queue_t *q, struct request *rq)765765+ blk_queue_end_tag(struct request_queue *q, struct request *rq)766766767767 End tagged operation on this request. 'rq' is removed from the internal768768 book keeping structures.···781781the hardware and software block queue and enable the driver to sanely restart782782all the outstanding requests. There's a third helper to do that:783783784784- blk_queue_invalidate_tags(request_queue_t *q)784784+ blk_queue_invalidate_tags(struct request_queue *q)785785786786 Clear the internal block tag queue and re-add all the pending requests787787 to the request queue. The driver will receive them again on the
+1-1
Documentation/block/request.txt
···83838484struct bio *biotail DBI Last bio in request85858686-request_queue_t *q DB Request queue this request belongs to8686+struct request_queue *q DB Request queue this request belongs to87878888struct request_list *rl B Request list this request came from
+1-1
Documentation/iostats.txt
···7979 measured from __make_request() to end_that_request_last()).8080Field 9 -- # of I/Os currently in progress8181 The only field that should go to zero. Incremented as requests are8282- given to appropriate request_queue_t and decremented as they finish.8282+ given to appropriate struct request_queue and decremented as they finish.8383Field 10 -- # of milliseconds spent doing I/Os8484 This field is increases so long as field 9 is nonzero.8585Field 11 -- weighted # of milliseconds spent doing I/Os
···469469" Change the ubd device name to \"hd\".\n\n"470470);471471472472-static void do_ubd_request(request_queue_t * q);472472+static void do_ubd_request(struct request_queue * q);473473474474/* Only changed by ubd_init, which is an initcall. */475475int thread_fd = -1;···10811081}1082108210831083/* Called with dev->lock held */10841084-static void do_ubd_request(request_queue_t *q)10841084+static void do_ubd_request(struct request_queue *q)10851085{10861086 struct io_thread_req *io_req;10871087 struct request *req;
+14-12
block/as-iosched.c
···796796 * as_completed_request is to be called when a request has completed and797797 * returned something to the requesting process, be it an error or data.798798 */799799-static void as_completed_request(request_queue_t *q, struct request *rq)799799+static void as_completed_request(struct request_queue *q, struct request *rq)800800{801801 struct as_data *ad = q->elevator->elevator_data;802802···853853 * reference unless it replaces the request at somepart of the elevator854854 * (ie. the dispatch queue)855855 */856856-static void as_remove_queued_request(request_queue_t *q, struct request *rq)856856+static void as_remove_queued_request(struct request_queue *q,857857+ struct request *rq)857858{858859 const int data_dir = rq_is_sync(rq);859860 struct as_data *ad = q->elevator->elevator_data;···979978 * read/write expire, batch expire, etc, and moves it to the dispatch980979 * queue. Returns 1 if a request was found, 0 otherwise.981980 */982982-static int as_dispatch_request(request_queue_t *q, int force)981981+static int as_dispatch_request(struct request_queue *q, int force)983982{984983 struct as_data *ad = q->elevator->elevator_data;985984 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);···11401139/*11411140 * add rq to rbtree and fifo11421141 */11431143-static void as_add_request(request_queue_t *q, struct request *rq)11421142+static void as_add_request(struct request_queue *q, struct request *rq)11441143{11451144 struct as_data *ad = q->elevator->elevator_data;11461145 int data_dir;···11681167 RQ_SET_STATE(rq, AS_RQ_QUEUED);11691168}1170116911711171-static void as_activate_request(request_queue_t *q, struct request *rq)11701170+static void as_activate_request(struct request_queue *q, struct request *rq)11721171{11731172 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);11741173 RQ_SET_STATE(rq, AS_RQ_REMOVED);···11761175 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);11771176}1178117711791179-static void as_deactivate_request(request_queue_t *q, struct request *rq)11781178+static void as_deactivate_request(struct request_queue *q, struct request *rq)11801179{11811180 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);11821181 RQ_SET_STATE(rq, AS_RQ_DISPATCHED);···11901189 * is not empty - it is used in the block layer to check for plugging and11911190 * merging opportunities11921191 */11931193-static int as_queue_empty(request_queue_t *q)11921192+static int as_queue_empty(struct request_queue *q)11941193{11951194 struct as_data *ad = q->elevator->elevator_data;11961195···11991198}1200119912011200static int12021202-as_merge(request_queue_t *q, struct request **req, struct bio *bio)12011201+as_merge(struct request_queue *q, struct request **req, struct bio *bio)12031202{12041203 struct as_data *ad = q->elevator->elevator_data;12051204 sector_t rb_key = bio->bi_sector + bio_sectors(bio);···12171216 return ELEVATOR_NO_MERGE;12181217}1219121812201220-static void as_merged_request(request_queue_t *q, struct request *req, int type)12191219+static void as_merged_request(struct request_queue *q, struct request *req,12201220+ int type)12211221{12221222 struct as_data *ad = q->elevator->elevator_data;12231223···12361234 }12371235}1238123612391239-static void as_merged_requests(request_queue_t *q, struct request *req,12371237+static void as_merged_requests(struct request_queue *q, struct request *req,12401238 struct request *next)12411239{12421240 /*···12871285 spin_unlock_irqrestore(q->queue_lock, flags);12881286}1289128712901290-static int as_may_queue(request_queue_t *q, int rw)12881288+static int as_may_queue(struct request_queue *q, int rw)12911289{12921290 int ret = ELV_MQUEUE_MAY;12931291 struct as_data *ad = q->elevator->elevator_data;···13201318/*13211319 * initialize elevator private data (as_data).13221320 */13231323-static void *as_init_queue(request_queue_t *q)13211321+static void *as_init_queue(struct request_queue *q)13241322{13251323 struct as_data *ad;13261324
+5-5
block/blktrace.c
···231231 kfree(bt);232232}233233234234-static int blk_trace_remove(request_queue_t *q)234234+static int blk_trace_remove(struct request_queue *q)235235{236236 struct blk_trace *bt;237237···312312/*313313 * Setup everything required to start tracing314314 */315315-static int blk_trace_setup(request_queue_t *q, struct block_device *bdev,315315+static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,316316 char __user *arg)317317{318318 struct blk_user_trace_setup buts;···401401 return ret;402402}403403404404-static int blk_trace_startstop(request_queue_t *q, int start)404404+static int blk_trace_startstop(struct request_queue *q, int start)405405{406406 struct blk_trace *bt;407407 int ret;···444444 **/445445int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)446446{447447- request_queue_t *q;447447+ struct request_queue *q;448448 int ret, start = 0;449449450450 q = bdev_get_queue(bdev);···479479 * @q: the request queue associated with the device480480 *481481 **/482482-void blk_trace_shutdown(request_queue_t *q)482482+void blk_trace_shutdown(struct request_queue *q)483483{484484 if (q->blk_trace) {485485 blk_trace_startstop(q, 0);
+6-6
block/bsg.c
···3737#define BSG_VERSION "0.4"38383939struct bsg_device {4040- request_queue_t *queue;4040+ struct request_queue *queue;4141 spinlock_t lock;4242 struct list_head busy_list;4343 struct list_head done_list;···180180 return ret;181181}182182183183-static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,183183+static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,184184 struct sg_io_v4 *hdr, int has_write_perm)185185{186186 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */···214214 * Check if sg_io_v4 from user is allowed and valid215215 */216216static int217217-bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)217217+bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)218218{219219 int ret = 0;220220···250250static struct request *251251bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)252252{253253- request_queue_t *q = bd->queue;253253+ struct request_queue *q = bd->queue;254254 struct request *rq, *next_rq = NULL;255255 int ret, rw;256256 unsigned int dxfer_len;···345345 * do final setup of a 'bc' and submit the matching 'rq' to the block346346 * layer for io347347 */348348-static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,348348+static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,349349 struct bsg_command *bc, struct request *rq)350350{351351 rq->sense = bc->sense;···611611 bc = NULL;612612 ret = 0;613613 while (nr_commands) {614614- request_queue_t *q = bd->queue;614614+ struct request_queue *q = bd->queue;615615616616 bc = bsg_alloc_command(bd);617617 if (IS_ERR(bc)) {
···4040static void blk_unplug_timeout(unsigned long data);4141static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);4242static void init_request_from_bio(struct request *req, struct bio *bio);4343-static int __make_request(request_queue_t *q, struct bio *bio);4343+static int __make_request(struct request_queue *q, struct bio *bio);4444static struct io_context *current_io_context(gfp_t gfp_flags, int node);45454646/*···121121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)122122{123123 struct backing_dev_info *ret = NULL;124124- request_queue_t *q = bdev_get_queue(bdev);124124+ struct request_queue *q = bdev_get_queue(bdev);125125126126 if (q)127127 ret = &q->backing_dev_info;···140140 * cdb from the request data for instance.141141 *142142 */143143-void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)143143+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)144144{145145 q->prep_rq_fn = pfn;146146}···163163 * no merge_bvec_fn is defined for a queue, and only the fixed limits are164164 * honored.165165 */166166-void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)166166+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)167167{168168 q->merge_bvec_fn = mbfn;169169}170170171171EXPORT_SYMBOL(blk_queue_merge_bvec);172172173173-void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)173173+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)174174{175175 q->softirq_done_fn = fn;176176}···199199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling200200 * blk_queue_bounce() to create a buffer in normal memory.201201 **/202202-void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)202202+void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)203203{204204 /*205205 * set defaults···235235236236EXPORT_SYMBOL(blk_queue_make_request);237237238238-static void rq_init(request_queue_t *q, struct request *rq)238238+static void rq_init(struct request_queue *q, struct request *rq)239239{240240 INIT_LIST_HEAD(&rq->queuelist);241241 INIT_LIST_HEAD(&rq->donelist);···272272 * feature should call this function and indicate so.273273 *274274 **/275275-int blk_queue_ordered(request_queue_t *q, unsigned ordered,275275+int blk_queue_ordered(struct request_queue *q, unsigned ordered,276276 prepare_flush_fn *prepare_flush_fn)277277{278278 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&···311311 * to the block layer by defining it through this call.312312 *313313 **/314314-void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)314314+void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)315315{316316 q->issue_flush_fn = iff;317317}···321321/*322322 * Cache flushing for ordered writes handling323323 */324324-inline unsigned blk_ordered_cur_seq(request_queue_t *q)324324+inline unsigned blk_ordered_cur_seq(struct request_queue *q)325325{326326 if (!q->ordseq)327327 return 0;···330330331331unsigned blk_ordered_req_seq(struct request *rq)332332{333333- request_queue_t *q = rq->q;333333+ struct request_queue *q = rq->q;334334335335 BUG_ON(q->ordseq == 0);336336···357357 return QUEUE_ORDSEQ_DONE;358358}359359360360-void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)360360+void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)361361{362362 struct request *rq;363363 int uptodate;···401401 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);402402}403403404404-static void queue_flush(request_queue_t *q, unsigned which)404404+static void queue_flush(struct request_queue *q, unsigned which)405405{406406 struct request *rq;407407 rq_end_io_fn *end_io;···425425 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);426426}427427428428-static inline struct request *start_ordered(request_queue_t *q,428428+static inline struct request *start_ordered(struct request_queue *q,429429 struct request *rq)430430{431431 q->bi_size = 0;···476476 return rq;477477}478478479479-int blk_do_ordered(request_queue_t *q, struct request **rqp)479479+int blk_do_ordered(struct request_queue *q, struct request **rqp)480480{481481 struct request *rq = *rqp;482482 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);···527527528528static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)529529{530530- request_queue_t *q = bio->bi_private;530530+ struct request_queue *q = bio->bi_private;531531532532 /*533533 * This is dry run, restore bio_sector and size. We'll finish···551551static int ordered_bio_endio(struct request *rq, struct bio *bio,552552 unsigned int nbytes, int error)553553{554554- request_queue_t *q = rq->q;554554+ struct request_queue *q = rq->q;555555 bio_end_io_t *endio;556556 void *private;557557···588588 * blk_queue_bounce_limit to have lower memory pages allocated as bounce589589 * buffers for doing I/O to pages residing above @page.590590 **/591591-void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)591591+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)592592{593593 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;594594 int dma = 0;···624624 * Enables a low level driver to set an upper limit on the size of625625 * received requests.626626 **/627627-void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)627627+void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)628628{629629 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {630630 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);···651651 * physical data segments in a request. This would be the largest sized652652 * scatter list the driver could handle.653653 **/654654-void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)654654+void blk_queue_max_phys_segments(struct request_queue *q,655655+ unsigned short max_segments)655656{656657 if (!max_segments) {657658 max_segments = 1;···675674 * address/length pairs the host adapter can actually give as once676675 * to the device.677676 **/678678-void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)677677+void blk_queue_max_hw_segments(struct request_queue *q,678678+ unsigned short max_segments)679679{680680 if (!max_segments) {681681 max_segments = 1;···697695 * Enables a low level driver to set an upper limit on the size of a698696 * coalesced segment699697 **/700700-void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)698698+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)701699{702700 if (max_size < PAGE_CACHE_SIZE) {703701 max_size = PAGE_CACHE_SIZE;···720718 * even internal read-modify-write operations). Usually the default721719 * of 512 covers most hardware.722720 **/723723-void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)721721+void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)724722{725723 q->hardsect_size = size;726724}···737735 * @t: the stacking driver (top)738736 * @b: the underlying device (bottom)739737 **/740740-void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)738738+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)741739{742740 /* zero is "infinity" */743741 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);···758756 * @q: the request queue for the device759757 * @mask: the memory boundary mask760758 **/761761-void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)759759+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)762760{763761 if (mask < PAGE_CACHE_SIZE - 1) {764762 mask = PAGE_CACHE_SIZE - 1;···780778 * this is used when buiding direct io requests for the queue.781779 *782780 **/783783-void blk_queue_dma_alignment(request_queue_t *q, int mask)781781+void blk_queue_dma_alignment(struct request_queue *q, int mask)784782{785783 q->dma_alignment = mask;786784}···798796 *799797 * no locks need be held.800798 **/801801-struct request *blk_queue_find_tag(request_queue_t *q, int tag)799799+struct request *blk_queue_find_tag(struct request_queue *q, int tag)802800{803801 return blk_map_queue_find_tag(q->queue_tags, tag);804802}···842840 * blk_cleanup_queue() will take care of calling this function, if tagging843841 * has been used. So there's no need to call this directly.844842 **/845845-static void __blk_queue_free_tags(request_queue_t *q)843843+static void __blk_queue_free_tags(struct request_queue *q)846844{847845 struct blk_queue_tag *bqt = q->queue_tags;848846···879877 * This is used to disabled tagged queuing to a device, yet leave880878 * queue in function.881879 **/882882-void blk_queue_free_tags(request_queue_t *q)880880+void blk_queue_free_tags(struct request_queue *q)883881{884882 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);885883}···887885EXPORT_SYMBOL(blk_queue_free_tags);888886889887static int890890-init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)888888+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)891889{892890 struct request **tag_index;893891 unsigned long *tag_map;···957955 * @depth: the maximum queue depth supported958956 * @tags: the tag to use959957 **/960960-int blk_queue_init_tags(request_queue_t *q, int depth,958958+int blk_queue_init_tags(struct request_queue *q, int depth,961959 struct blk_queue_tag *tags)962960{963961 int rc;···998996 * Notes:999997 * Must be called with the queue lock held.1000998 **/10011001-int blk_queue_resize_tags(request_queue_t *q, int new_depth)999999+int blk_queue_resize_tags(struct request_queue *q, int new_depth)10021000{10031001 struct blk_queue_tag *bqt = q->queue_tags;10041002 struct request **tag_index;···10611059 * Notes:10621060 * queue lock must be held.10631061 **/10641064-void blk_queue_end_tag(request_queue_t *q, struct request *rq)10621062+void blk_queue_end_tag(struct request_queue *q, struct request *rq)10651063{10661064 struct blk_queue_tag *bqt = q->queue_tags;10671065 int tag = rq->tag;···11131111 * Notes:11141112 * queue lock must be held.11151113 **/11161116-int blk_queue_start_tag(request_queue_t *q, struct request *rq)11141114+int blk_queue_start_tag(struct request_queue *q, struct request *rq)11171115{11181116 struct blk_queue_tag *bqt = q->queue_tags;11191117 int tag;···11601158 * Notes:11611159 * queue lock must be held.11621160 **/11631163-void blk_queue_invalidate_tags(request_queue_t *q)11611161+void blk_queue_invalidate_tags(struct request_queue *q)11641162{11651163 struct blk_queue_tag *bqt = q->queue_tags;11661164 struct list_head *tmp, *n;···1207120512081206EXPORT_SYMBOL(blk_dump_rq_flags);1209120712101210-void blk_recount_segments(request_queue_t *q, struct bio *bio)12081208+void blk_recount_segments(struct request_queue *q, struct bio *bio)12111209{12121210 struct bio_vec *bv, *bvprv = NULL;12131211 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;···12691267}12701268EXPORT_SYMBOL(blk_recount_segments);1271126912721272-static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,12701270+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,12731271 struct bio *nxt)12741272{12751273 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))···12901288 return 0;12911289}1292129012931293-static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,12911291+static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,12941292 struct bio *nxt)12951293{12961294 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))···13101308 * map a request to scatterlist, return number of sg entries setup. Caller13111309 * must make sure sg can hold rq->nr_phys_segments entries13121310 */13131313-int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)13111311+int blk_rq_map_sg(struct request_queue *q, struct request *rq,13121312+ struct scatterlist *sg)13141313{13151314 struct bio_vec *bvec, *bvprv;13161315 struct bio *bio;···13641361 * specific ones if so desired13651362 */1366136313671367-static inline int ll_new_mergeable(request_queue_t *q,13641364+static inline int ll_new_mergeable(struct request_queue *q,13681365 struct request *req,13691366 struct bio *bio)13701367{···13851382 return 1;13861383}1387138413881388-static inline int ll_new_hw_segment(request_queue_t *q,13851385+static inline int ll_new_hw_segment(struct request_queue *q,13891386 struct request *req,13901387 struct bio *bio)13911388{···14091406 return 1;14101407}1411140814121412-int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)14091409+int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)14131410{14141411 unsigned short max_sectors;14151412 int len;···14471444}14481445EXPORT_SYMBOL(ll_back_merge_fn);1449144614501450-static int ll_front_merge_fn(request_queue_t *q, struct request *req, 14471447+static int ll_front_merge_fn(struct request_queue *q, struct request *req, 14511448 struct bio *bio)14521449{14531450 unsigned short max_sectors;···14861483 return ll_new_hw_segment(q, req, bio);14871484}1488148514891489-static int ll_merge_requests_fn(request_queue_t *q, struct request *req,14861486+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,14901487 struct request *next)14911488{14921489 int total_phys_segments;···15421539 * This is called with interrupts off and no requests on the queue and15431540 * with the queue lock held.15441541 */15451545-void blk_plug_device(request_queue_t *q)15421542+void blk_plug_device(struct request_queue *q)15461543{15471544 WARN_ON(!irqs_disabled());15481545···15651562 * remove the queue from the plugged list, if present. called with15661563 * queue lock held and interrupts disabled.15671564 */15681568-int blk_remove_plug(request_queue_t *q)15651565+int blk_remove_plug(struct request_queue *q)15691566{15701567 WARN_ON(!irqs_disabled());15711568···15811578/*15821579 * remove the plug and let it rip..15831580 */15841584-void __generic_unplug_device(request_queue_t *q)15811581+void __generic_unplug_device(struct request_queue *q)15851582{15861583 if (unlikely(blk_queue_stopped(q)))15871584 return;···1595159215961593/**15971594 * generic_unplug_device - fire a request queue15981598- * @q: The &request_queue_t in question15951595+ * @q: The &struct request_queue in question15991596 *16001597 * Description:16011598 * Linux uses plugging to build bigger requests queues before letting···16041601 * gets unplugged, the request_fn defined for the queue is invoked and16051602 * transfers started.16061603 **/16071607-void generic_unplug_device(request_queue_t *q)16041604+void generic_unplug_device(struct request_queue *q)16081605{16091606 spin_lock_irq(q->queue_lock);16101607 __generic_unplug_device(q);···16151612static void blk_backing_dev_unplug(struct backing_dev_info *bdi,16161613 struct page *page)16171614{16181618- request_queue_t *q = bdi->unplug_io_data;16151615+ struct request_queue *q = bdi->unplug_io_data;1619161616201617 /*16211618 * devices don't necessarily have an ->unplug_fn defined···1630162716311628static void blk_unplug_work(struct work_struct *work)16321629{16331633- request_queue_t *q = container_of(work, request_queue_t, unplug_work);16301630+ struct request_queue *q =16311631+ container_of(work, struct request_queue, unplug_work);1634163216351633 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,16361634 q->rq.count[READ] + q->rq.count[WRITE]);···1641163716421638static void blk_unplug_timeout(unsigned long data)16431639{16441644- request_queue_t *q = (request_queue_t *)data;16401640+ struct request_queue *q = (struct request_queue *)data;1645164116461642 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,16471643 q->rq.count[READ] + q->rq.count[WRITE]);···1651164716521648/**16531649 * blk_start_queue - restart a previously stopped queue16541654- * @q: The &request_queue_t in question16501650+ * @q: The &struct request_queue in question16551651 *16561652 * Description:16571653 * blk_start_queue() will clear the stop flag on the queue, and call16581654 * the request_fn for the queue if it was in a stopped state when16591655 * entered. Also see blk_stop_queue(). Queue lock must be held.16601656 **/16611661-void blk_start_queue(request_queue_t *q)16571657+void blk_start_queue(struct request_queue *q)16621658{16631659 WARN_ON(!irqs_disabled());16641660···1681167716821678/**16831679 * blk_stop_queue - stop a queue16841684- * @q: The &request_queue_t in question16801680+ * @q: The &struct request_queue in question16851681 *16861682 * Description:16871683 * The Linux block layer assumes that a block driver will consume all···16931689 * the driver has signalled it's ready to go again. This happens by calling16941690 * blk_start_queue() to restart queue operations. Queue lock must be held.16951691 **/16961696-void blk_stop_queue(request_queue_t *q)16921692+void blk_stop_queue(struct request_queue *q)16971693{16981694 blk_remove_plug(q);16991695 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);···17501746EXPORT_SYMBOL(blk_run_queue);1751174717521748/**17531753- * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed17491749+ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed17541750 * @kobj: the kobj belonging of the request queue to be released17551751 *17561752 * Description:···17661762 **/17671763static void blk_release_queue(struct kobject *kobj)17681764{17691769- request_queue_t *q = container_of(kobj, struct request_queue, kobj);17651765+ struct request_queue *q =17661766+ container_of(kobj, struct request_queue, kobj);17701767 struct request_list *rl = &q->rq;1771176817721769 blk_sync_queue(q);···17831778 kmem_cache_free(requestq_cachep, q);17841779}1785178017861786-void blk_put_queue(request_queue_t *q)17811781+void blk_put_queue(struct request_queue *q)17871782{17881783 kobject_put(&q->kobj);17891784}17901785EXPORT_SYMBOL(blk_put_queue);1791178617921792-void blk_cleanup_queue(request_queue_t * q)17871787+void blk_cleanup_queue(struct request_queue * q)17931788{17941789 mutex_lock(&q->sysfs_lock);17951790 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);···1803179818041799EXPORT_SYMBOL(blk_cleanup_queue);1805180018061806-static int blk_init_free_list(request_queue_t *q)18011801+static int blk_init_free_list(struct request_queue *q)18071802{18081803 struct request_list *rl = &q->rq;18091804···18221817 return 0;18231818}1824181918251825-request_queue_t *blk_alloc_queue(gfp_t gfp_mask)18201820+struct request_queue *blk_alloc_queue(gfp_t gfp_mask)18261821{18271822 return blk_alloc_queue_node(gfp_mask, -1);18281823}···1830182518311826static struct kobj_type queue_ktype;1832182718331833-request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)18281828+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)18341829{18351835- request_queue_t *q;18301830+ struct request_queue *q;1836183118371832 q = kmem_cache_alloc_node(requestq_cachep,18381833 gfp_mask | __GFP_ZERO, node_id);···18871882 * when the block device is deactivated (such as at module unload).18881883 **/1889188418901890-request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)18851885+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)18911886{18921887 return blk_init_queue_node(rfn, lock, -1);18931888}18941889EXPORT_SYMBOL(blk_init_queue);1895189018961896-request_queue_t *18911891+struct request_queue *18971892blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)18981893{18991899- request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);18941894+ struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);1900189519011896 if (!q)19021897 return NULL;···19451940}19461941EXPORT_SYMBOL(blk_init_queue_node);1947194219481948-int blk_get_queue(request_queue_t *q)19431943+int blk_get_queue(struct request_queue *q)19491944{19501945 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {19511946 kobject_get(&q->kobj);···1957195219581953EXPORT_SYMBOL(blk_get_queue);1959195419601960-static inline void blk_free_request(request_queue_t *q, struct request *rq)19551955+static inline void blk_free_request(struct request_queue *q, struct request *rq)19611956{19621957 if (rq->cmd_flags & REQ_ELVPRIV)19631958 elv_put_request(q, rq);···19651960}1966196119671962static struct request *19681968-blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)19631963+blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)19691964{19701965 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);19711966···19931988 * ioc_batching returns true if the ioc is a valid batching request and19941989 * should be given priority access to a request.19951990 */19961996-static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)19911991+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)19971992{19981993 if (!ioc)19991994 return 0;···20142009 * is the behaviour we want though - once it gets a wakeup it should be given20152010 * a nice run.20162011 */20172017-static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)20122012+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)20182013{20192014 if (!ioc || ioc_batching(q, ioc))20202015 return;···20232018 ioc->last_waited = jiffies;20242019}2025202020262026-static void __freed_request(request_queue_t *q, int rw)20212021+static void __freed_request(struct request_queue *q, int rw)20272022{20282023 struct request_list *rl = &q->rq;20292024···20422037 * A request has just been released. Account for it, update the full and20432038 * congestion status, wake up any waiters. Called under q->queue_lock.20442039 */20452045-static void freed_request(request_queue_t *q, int rw, int priv)20402040+static void freed_request(struct request_queue *q, int rw, int priv)20462041{20472042 struct request_list *rl = &q->rq;20482043···20622057 * Returns NULL on failure, with queue_lock held.20632058 * Returns !NULL on success, with queue_lock *not held*.20642059 */20652065-static struct request *get_request(request_queue_t *q, int rw_flags,20602060+static struct request *get_request(struct request_queue *q, int rw_flags,20662061 struct bio *bio, gfp_t gfp_mask)20672062{20682063 struct request *rq = NULL;···21672162 *21682163 * Called with q->queue_lock held, and returns with it unlocked.21692164 */21702170-static struct request *get_request_wait(request_queue_t *q, int rw_flags,21652165+static struct request *get_request_wait(struct request_queue *q, int rw_flags,21712166 struct bio *bio)21722167{21732168 const int rw = rw_flags & 0x01;···22092204 return rq;22102205}2211220622122212-struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)22072207+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)22132208{22142209 struct request *rq;22152210···22392234 *22402235 * The queue lock must be held with interrupts disabled.22412236 */22422242-void blk_start_queueing(request_queue_t *q)22372237+void blk_start_queueing(struct request_queue *q)22432238{22442239 if (!blk_queue_plugged(q))22452240 q->request_fn(q);···22582253 * more, when that condition happens we need to put the request back22592254 * on the queue. Must be called with queue lock held.22602255 */22612261-void blk_requeue_request(request_queue_t *q, struct request *rq)22562256+void blk_requeue_request(struct request_queue *q, struct request *rq)22622257{22632258 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);22642259···22892284 * of the queue for things like a QUEUE_FULL message from a device, or a22902285 * host that is unable to accept a particular command.22912286 */22922292-void blk_insert_request(request_queue_t *q, struct request *rq,22872287+void blk_insert_request(struct request_queue *q, struct request *rq,22932288 int at_head, void *data)22942289{22952290 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;···23352330 return ret;23362331}2337233223382338-static int __blk_rq_map_user(request_queue_t *q, struct request *rq,23332333+static int __blk_rq_map_user(struct request_queue *q, struct request *rq,23392334 void __user *ubuf, unsigned int len)23402335{23412336 unsigned long uaddr;···24082403 * original bio must be passed back in to blk_rq_unmap_user() for proper24092404 * unmapping.24102405 */24112411-int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,24122412- unsigned long len)24062406+int blk_rq_map_user(struct request_queue *q, struct request *rq,24072407+ void __user *ubuf, unsigned long len)24132408{24142409 unsigned long bytes_read = 0;24152410 struct bio *bio = NULL;···24752470 * original bio must be passed back in to blk_rq_unmap_user() for proper24762471 * unmapping.24772472 */24782478-int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,24732473+int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,24792474 struct sg_iovec *iov, int iov_count, unsigned int len)24802475{24812476 struct bio *bio;···25452540 * @len: length of user data25462541 * @gfp_mask: memory allocation flags25472542 */25482548-int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,25432543+int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,25492544 unsigned int len, gfp_t gfp_mask)25502545{25512546 struct bio *bio;···25822577 * Insert a fully prepared request at the back of the io scheduler queue25832578 * for execution. Don't wait for completion.25842579 */25852585-void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,25802580+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,25862581 struct request *rq, int at_head,25872582 rq_end_io_fn *done)25882583{···26102605 * Insert a fully prepared request at the back of the io scheduler queue26112606 * for execution and wait for completion.26122607 */26132613-int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,26082608+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,26142609 struct request *rq, int at_head)26152610{26162611 DECLARE_COMPLETION_ONSTACK(wait);···26532648 */26542649int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)26552650{26562656- request_queue_t *q;26512651+ struct request_queue *q;2657265226582653 if (bdev->bd_disk == NULL)26592654 return -ENXIO;···26892684 * queue lock is held and interrupts disabled, as we muck with the26902685 * request queue list.26912686 */26922692-static inline void add_request(request_queue_t * q, struct request * req)26872687+static inline void add_request(struct request_queue * q, struct request * req)26932688{26942689 drive_stat_acct(req, req->nr_sectors, 1);26952690···27352730/*27362731 * queue lock must be held27372732 */27382738-void __blk_put_request(request_queue_t *q, struct request *req)27332733+void __blk_put_request(struct request_queue *q, struct request *req)27392734{27402735 if (unlikely(!q))27412736 return;···27652760void blk_put_request(struct request *req)27662761{27672762 unsigned long flags;27682768- request_queue_t *q = req->q;27632763+ struct request_queue *q = req->q;2769276427702765 /*27712766 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the···28032798/*28042799 * Has to be called with the request spinlock acquired28052800 */28062806-static int attempt_merge(request_queue_t *q, struct request *req,28012801+static int attempt_merge(struct request_queue *q, struct request *req,28072802 struct request *next)28082803{28092804 if (!rq_mergeable(req) || !rq_mergeable(next))···28562851 return 1;28572852}2858285328592859-static inline int attempt_back_merge(request_queue_t *q, struct request *rq)28542854+static inline int attempt_back_merge(struct request_queue *q,28552855+ struct request *rq)28602856{28612857 struct request *next = elv_latter_request(q, rq);28622858···28672861 return 0;28682862}2869286328702870-static inline int attempt_front_merge(request_queue_t *q, struct request *rq)28642864+static inline int attempt_front_merge(struct request_queue *q,28652865+ struct request *rq)28712866{28722867 struct request *prev = elv_former_request(q, rq);28732868···29122905 req->start_time = jiffies;29132906}2914290729152915-static int __make_request(request_queue_t *q, struct bio *bio)29082908+static int __make_request(struct request_queue *q, struct bio *bio)29162909{29172910 struct request *req;29182911 int el_ret, nr_sectors, barrier, err;···31263119 */31273120static inline void __generic_make_request(struct bio *bio)31283121{31293129- request_queue_t *q;31223122+ struct request_queue *q;31303123 sector_t maxsector;31313124 sector_t old_sector;31323125 int ret, nr_sectors = bio_sectors(bio);···33193312 struct bio *bio, *prevbio = NULL;33203313 int nr_phys_segs, nr_hw_segs;33213314 unsigned int phys_size, hw_size;33223322- request_queue_t *q = rq->q;33153315+ struct request_queue *q = rq->q;3323331633243317 if (!rq->bio)33253318 return;···3665365836663659EXPORT_SYMBOL(end_request);3667366036683668-void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)36613661+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,36623662+ struct bio *bio)36693663{36703664 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */36713665 rq->cmd_flags |= (bio->bi_rw & 3);···37093701 sizeof(struct request), 0, SLAB_PANIC, NULL);3710370237113703 requestq_cachep = kmem_cache_create("blkdev_queue",37123712- sizeof(request_queue_t), 0, SLAB_PANIC, NULL);37043704+ sizeof(struct request_queue), 0, SLAB_PANIC, NULL);3713370537143706 iocontext_cachep = kmem_cache_create("blkdev_ioc",37153707 sizeof(struct io_context), 0, SLAB_PANIC, NULL);···40294021queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)40304022{40314023 struct queue_sysfs_entry *entry = to_queue(attr);40324032- request_queue_t *q = container_of(kobj, struct request_queue, kobj);40244024+ struct request_queue *q =40254025+ container_of(kobj, struct request_queue, kobj);40334026 ssize_t res;4034402740354028 if (!entry->show)···40504041 const char *page, size_t length)40514042{40524043 struct queue_sysfs_entry *entry = to_queue(attr);40534053- request_queue_t *q = container_of(kobj, struct request_queue, kobj);40444044+ struct request_queue *q = container_of(kobj, struct request_queue, kobj);4054404540554046 ssize_t res;40564047···40814072{40824073 int ret;4083407440844084- request_queue_t *q = disk->queue;40754075+ struct request_queue *q = disk->queue;4085407640864077 if (!q || !q->request_fn)40874078 return -ENXIO;···4106409741074098void blk_unregister_queue(struct gendisk *disk)41084099{41094109- request_queue_t *q = disk->queue;41004100+ struct request_queue *q = disk->queue;4110410141114102 if (q && q->request_fn) {41124103 elv_unregister_queue(q);
···4949 return put_user(sg_version_num, p);5050}51515252-static int scsi_get_idlun(request_queue_t *q, int __user *p)5252+static int scsi_get_idlun(struct request_queue *q, int __user *p)5353{5454 return put_user(0, p);5555}56565757-static int scsi_get_bus(request_queue_t *q, int __user *p)5757+static int scsi_get_bus(struct request_queue *q, int __user *p)5858{5959 return put_user(0, p);6060}61616262-static int sg_get_timeout(request_queue_t *q)6262+static int sg_get_timeout(struct request_queue *q)6363{6464 return q->sg_timeout / (HZ / USER_HZ);6565}66666767-static int sg_set_timeout(request_queue_t *q, int __user *p)6767+static int sg_set_timeout(struct request_queue *q, int __user *p)6868{6969 int timeout, err = get_user(timeout, p);7070···7474 return err;7575}76767777-static int sg_get_reserved_size(request_queue_t *q, int __user *p)7777+static int sg_get_reserved_size(struct request_queue *q, int __user *p)7878{7979 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);80808181 return put_user(val, p);8282}83838484-static int sg_set_reserved_size(request_queue_t *q, int __user *p)8484+static int sg_set_reserved_size(struct request_queue *q, int __user *p)8585{8686 int size, err = get_user(size, p);8787···101101 * will always return that we are ATAPI even for a real SCSI drive, I'm not102102 * so sure this is worth doing anything about (why would you care??)103103 */104104-static int sg_emulated_host(request_queue_t *q, int __user *p)104104+static int sg_emulated_host(struct request_queue *q, int __user *p)105105{106106 return put_user(1, p);107107}···214214}215215EXPORT_SYMBOL_GPL(blk_verify_command);216216217217-static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,217217+static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,218218 struct sg_io_hdr *hdr, int has_write_perm)219219{220220 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */···286286 return r;287287}288288289289-static int sg_io(struct file *file, request_queue_t *q,289289+static int sg_io(struct file *file, struct request_queue *q,290290 struct gendisk *bd_disk, struct sg_io_hdr *hdr)291291{292292 unsigned long start_time;···519519EXPORT_SYMBOL_GPL(sg_scsi_ioctl);520520521521/* Send basic block requests */522522-static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)522522+static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,523523+ int cmd, int data)523524{524525 struct request *rq;525526 int err;···540539 return err;541540}542541543543-static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data)542542+static inline int blk_send_start_stop(struct request_queue *q,543543+ struct gendisk *bd_disk, int data)544544{545545 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);546546}
+2-2
drivers/acorn/block/fd1772.c
···372372static void config_types(void);373373static int floppy_open(struct inode *inode, struct file *filp);374374static int floppy_release(struct inode *inode, struct file *filp);375375-static void do_fd_request(request_queue_t *);375375+static void do_fd_request(struct request_queue *);376376377377/************************* End of Prototypes **************************/378378···12711271 }12721272}1273127312741274-static void do_fd_request(request_queue_t* q)12741274+static void do_fd_request(struct request_queue* q)12751275{12761276 unsigned long flags;12771277
+1-1
drivers/acorn/block/mfmhd.c
···924924 DBG("mfm_request: Dropping out bottom\n");925925}926926927927-static void do_mfm_request(request_queue_t *q)927927+static void do_mfm_request(struct request_queue *q)928928{929929 DBG("do_mfm_request: about to mfm_request\n");930930 mfm_request();
···139139140140static ctlr_info_t *hba[MAX_CTLR];141141142142-static void do_cciss_request(request_queue_t *q);142142+static void do_cciss_request(struct request_queue *q);143143static irqreturn_t do_cciss_intr(int irq, void *dev_id);144144static int cciss_open(struct inode *inode, struct file *filep);145145static int cciss_release(struct inode *inode, struct file *filep);···15841584 */15851585 if (h->gendisk[0] != disk) {15861586 if (disk) {15871587- request_queue_t *q = disk->queue;15871587+ struct request_queue *q = disk->queue;15881588 if (disk->flags & GENHD_FL_UP)15891589 del_gendisk(disk);15901590 if (q) {···25112511/*25122512 * Get a request and submit it to the controller.25132513 */25142514-static void do_cciss_request(request_queue_t *q)25142514+static void do_cciss_request(struct request_queue *q)25152515{25162516 ctlr_info_t *h = q->queuedata;25172517 CommandList_struct *c;···33803380 do {33813381 drive_info_struct *drv = &(hba[i]->drv[j]);33823382 struct gendisk *disk = hba[i]->gendisk[j];33833383- request_queue_t *q;33833383+ struct request_queue *q;3384338433853385 /* Check if the disk was allocated already */33863386 if (!disk){···35233523 for (j = 0; j < CISS_MAX_LUN; j++) {35243524 struct gendisk *disk = hba[i]->gendisk[j];35253525 if (disk) {35263526- request_queue_t *q = disk->queue;35263526+ struct request_queue *q = disk->queue;3527352735283528 if (disk->flags & GENHD_FL_UP)35293529 del_gendisk(disk);
+3-3
drivers/block/cpqarray.c
···161161static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);162162static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);163163164164-static void do_ida_request(request_queue_t *q);164164+static void do_ida_request(struct request_queue *q);165165static void start_io(ctlr_info_t *h);166166167167static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);···391391/* pdev is NULL for eisa */392392static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)393393{394394- request_queue_t *q;394394+ struct request_queue *q;395395 int j;396396397397 /* ···886886 * are in here (either via the dummy do_ida_request functions or by being887887 * called from the interrupt handler888888 */889889-static void do_ida_request(request_queue_t *q)889889+static void do_ida_request(struct request_queue *q)890890{891891 ctlr_info_t *h = q->queuedata;892892 cmdlist_t *c;
···698698699699/* end of io request engine */700700701701-static void do_pd_request(request_queue_t * q)701701+static void do_pd_request(struct request_queue * q)702702{703703 if (pd_req)704704 return;
+2-2
drivers/block/paride/pf.c
···202202#define ATAPI_WRITE_10 0x2a203203204204static int pf_open(struct inode *inode, struct file *file);205205-static void do_pf_request(request_queue_t * q);205205+static void do_pf_request(struct request_queue * q);206206static int pf_ioctl(struct inode *inode, struct file *file,207207 unsigned int cmd, unsigned long arg);208208static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);···760760 }761761}762762763763-static void do_pf_request(request_queue_t * q)763763+static void do_pf_request(struct request_queue * q)764764{765765 if (pf_busy)766766 return;
+6-6
drivers/block/pktcdvd.c
···752752 */753753static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)754754{755755- request_queue_t *q = bdev_get_queue(pd->bdev);755755+ struct request_queue *q = bdev_get_queue(pd->bdev);756756 struct request *rq;757757 int ret = 0;758758···979979 * Special care is needed if the underlying block device has a small980980 * max_phys_segments value.981981 */982982-static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)982982+static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)983983{984984 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {985985 /*···23142314{23152315 int ret;23162316 long lba;23172317- request_queue_t *q;23172317+ struct request_queue *q;2318231823192319 /*23202320 * We need to re-open the cdrom device without O_NONBLOCK to be able···24772477 return 0;24782478}2479247924802480-static int pkt_make_request(request_queue_t *q, struct bio *bio)24802480+static int pkt_make_request(struct request_queue *q, struct bio *bio)24812481{24822482 struct pktcdvd_device *pd;24832483 char b[BDEVNAME_SIZE];···26262626262726272628262826292629-static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)26292629+static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)26302630{26312631 struct pktcdvd_device *pd = q->queuedata;26322632 sector_t zone = ZONE(bio->bi_sector, pd);···2647264726482648static void pkt_init_queue(struct pktcdvd_device *pd)26492649{26502650- request_queue_t *q = pd->disk->queue;26502650+ struct request_queue *q = pd->disk->queue;2651265126522652 blk_queue_make_request(q, pkt_make_request);26532653 blk_queue_hardsect_size(q, CD_FRAMESIZE);
+2-2
drivers/block/ps2esdi.c
···64646565static int ps2esdi_geninit(void);66666767-static void do_ps2esdi_request(request_queue_t * q);6767+static void do_ps2esdi_request(struct request_queue * q);68686969static void ps2esdi_readwrite(int cmd, struct request *req);7070···473473}474474475475/* strategy routine that handles most of the IO requests */476476-static void do_ps2esdi_request(request_queue_t * q)476476+static void do_ps2esdi_request(struct request_queue * q)477477{478478 struct request *req;479479 /* since, this routine is called with interrupts cleared - they
···526526527527void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)528528{529529- request_queue_t *q = bdev_get_queue(bdev);529529+ struct request_queue *q = bdev_get_queue(bdev);530530 struct io_restrictions *rs = &ti->limits;531531532532 /*···979979 devices = dm_table_get_devices(t);980980 for (d = devices->next; d != devices; d = d->next) {981981 struct dm_dev *dd = list_entry(d, struct dm_dev, list);982982- request_queue_t *q = bdev_get_queue(dd->bdev);982982+ struct request_queue *q = bdev_get_queue(dd->bdev);983983 r |= bdi_congested(&q->backing_dev_info, bdi_bits);984984 }985985···992992993993 for (d = devices->next; d != devices; d = d->next) {994994 struct dm_dev *dd = list_entry(d, struct dm_dev, list);995995- request_queue_t *q = bdev_get_queue(dd->bdev);995995+ struct request_queue *q = bdev_get_queue(dd->bdev);996996997997 if (q->unplug_fn)998998 q->unplug_fn(q);···1011101110121012 for (d = devices->next; d != devices; d = d->next) {10131013 struct dm_dev *dd = list_entry(d, struct dm_dev, list);10141014- request_queue_t *q = bdev_get_queue(dd->bdev);10141014+ struct request_queue *q = bdev_get_queue(dd->bdev);10151015 int err;1016101610171017 if (!q->issue_flush_fn)
+5-5
drivers/md/dm.c
···80808181 unsigned long flags;82828383- request_queue_t *queue;8383+ struct request_queue *queue;8484 struct gendisk *disk;8585 char name[16];8686···792792 * The request function that just remaps the bio built up by793793 * dm_merge_bvec.794794 */795795-static int dm_request(request_queue_t *q, struct bio *bio)795795+static int dm_request(struct request_queue *q, struct bio *bio)796796{797797 int r;798798 int rw = bio_data_dir(bio);···844844 return 0;845845}846846847847-static int dm_flush_all(request_queue_t *q, struct gendisk *disk,847847+static int dm_flush_all(struct request_queue *q, struct gendisk *disk,848848 sector_t *error_sector)849849{850850 struct mapped_device *md = q->queuedata;···859859 return ret;860860}861861862862-static void dm_unplug_all(request_queue_t *q)862862+static void dm_unplug_all(struct request_queue *q)863863{864864 struct mapped_device *md = q->queuedata;865865 struct dm_table *map = dm_get_table(md);···1110111011111111static int __bind(struct mapped_device *md, struct dm_table *t)11121112{11131113- request_queue_t *q = md->queue;11131113+ struct request_queue *q = md->queue;11141114 sector_t size;1115111511161116 size = dm_table_get_size(t);
+1-1
drivers/md/faulty.c
···167167 conf->nfaults = n+1;168168}169169170170-static int make_request(request_queue_t *q, struct bio *bio)170170+static int make_request(struct request_queue *q, struct bio *bio)171171{172172 mddev_t *mddev = q->queuedata;173173 conf_t *conf = (conf_t*)mddev->private;
+7-7
drivers/md/linear.c
···5555 *5656 * Return amount of bytes we can take at this offset5757 */5858-static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)5858+static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)5959{6060 mddev_t *mddev = q->queuedata;6161 dev_info_t *dev0;···7979 return maxsectors << 9;8080}81818282-static void linear_unplug(request_queue_t *q)8282+static void linear_unplug(struct request_queue *q)8383{8484 mddev_t *mddev = q->queuedata;8585 linear_conf_t *conf = mddev_to_conf(mddev);8686 int i;87878888 for (i=0; i < mddev->raid_disks; i++) {8989- request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);8989+ struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);9090 if (r_queue->unplug_fn)9191 r_queue->unplug_fn(r_queue);9292 }9393}94949595-static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,9595+static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,9696 sector_t *error_sector)9797{9898 mddev_t *mddev = q->queuedata;···101101102102 for (i=0; i < mddev->raid_disks && ret == 0; i++) {103103 struct block_device *bdev = conf->disks[i].rdev->bdev;104104- request_queue_t *r_queue = bdev_get_queue(bdev);104104+ struct request_queue *r_queue = bdev_get_queue(bdev);105105106106 if (!r_queue->issue_flush_fn)107107 ret = -EOPNOTSUPP;···118118 int i, ret = 0;119119120120 for (i = 0; i < mddev->raid_disks && !ret ; i++) {121121- request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev);121121+ struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);122122 ret |= bdi_congested(&q->backing_dev_info, bits);123123 }124124 return ret;···330330 return 0;331331}332332333333-static int linear_make_request (request_queue_t *q, struct bio *bio)333333+static int linear_make_request (struct request_queue *q, struct bio *bio)334334{335335 const int rw = bio_data_dir(bio);336336 mddev_t *mddev = q->queuedata;
+1-1
drivers/md/md.c
···211211 )212212213213214214-static int md_fail_request (request_queue_t *q, struct bio *bio)214214+static int md_fail_request (struct request_queue *q, struct bio *bio)215215{216216 bio_io_error(bio, bio->bi_size);217217 return 0;
+6-6
drivers/md/multipath.c
···125125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);126126 if (rdev && !test_bit(Faulty, &rdev->flags)127127 && atomic_read(&rdev->nr_pending)) {128128- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);128128+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);129129130130 atomic_inc(&rdev->nr_pending);131131 rcu_read_unlock();···140140 rcu_read_unlock();141141}142142143143-static void multipath_unplug(request_queue_t *q)143143+static void multipath_unplug(struct request_queue *q)144144{145145 unplug_slaves(q->queuedata);146146}147147148148149149-static int multipath_make_request (request_queue_t *q, struct bio * bio)149149+static int multipath_make_request (struct request_queue *q, struct bio * bio)150150{151151 mddev_t *mddev = q->queuedata;152152 multipath_conf_t *conf = mddev_to_conf(mddev);···199199 seq_printf (seq, "]");200200}201201202202-static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,202202+static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,203203 sector_t *error_sector)204204{205205 mddev_t *mddev = q->queuedata;···211211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);212212 if (rdev && !test_bit(Faulty, &rdev->flags)) {213213 struct block_device *bdev = rdev->bdev;214214- request_queue_t *r_queue = bdev_get_queue(bdev);214214+ struct request_queue *r_queue = bdev_get_queue(bdev);215215216216 if (!r_queue->issue_flush_fn)217217 ret = -EOPNOTSUPP;···238238 for (i = 0; i < mddev->raid_disks ; i++) {239239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);240240 if (rdev && !test_bit(Faulty, &rdev->flags)) {241241- request_queue_t *q = bdev_get_queue(rdev->bdev);241241+ struct request_queue *q = bdev_get_queue(rdev->bdev);242242243243 ret |= bdi_congested(&q->backing_dev_info, bits);244244 /* Just like multipath_map, we just check the
+7-7
drivers/md/raid0.c
···2525#define MD_DRIVER2626#define MD_PERSONALITY27272828-static void raid0_unplug(request_queue_t *q)2828+static void raid0_unplug(struct request_queue *q)2929{3030 mddev_t *mddev = q->queuedata;3131 raid0_conf_t *conf = mddev_to_conf(mddev);···3333 int i;34343535 for (i=0; i<mddev->raid_disks; i++) {3636- request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);3636+ struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);37373838 if (r_queue->unplug_fn)3939 r_queue->unplug_fn(r_queue);4040 }4141}42424343-static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,4343+static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,4444 sector_t *error_sector)4545{4646 mddev_t *mddev = q->queuedata;···50505151 for (i=0; i<mddev->raid_disks && ret == 0; i++) {5252 struct block_device *bdev = devlist[i]->bdev;5353- request_queue_t *r_queue = bdev_get_queue(bdev);5353+ struct request_queue *r_queue = bdev_get_queue(bdev);54545555 if (!r_queue->issue_flush_fn)5656 ret = -EOPNOTSUPP;···6868 int i, ret = 0;69697070 for (i = 0; i < mddev->raid_disks && !ret ; i++) {7171- request_queue_t *q = bdev_get_queue(devlist[i]->bdev);7171+ struct request_queue *q = bdev_get_queue(devlist[i]->bdev);72727373 ret |= bdi_congested(&q->backing_dev_info, bits);7474 }···268268 *269269 * Return amount of bytes we can accept at this offset270270 */271271-static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)271271+static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)272272{273273 mddev_t *mddev = q->queuedata;274274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);···408408 return 0;409409}410410411411-static int raid0_make_request (request_queue_t *q, struct bio *bio)411411+static int raid0_make_request (struct request_queue *q, struct bio *bio)412412{413413 mddev_t *mddev = q->queuedata;414414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
+6-6
drivers/md/raid1.c
···552552 for (i=0; i<mddev->raid_disks; i++) {553553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);554554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {555555- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);555555+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);556556557557 atomic_inc(&rdev->nr_pending);558558 rcu_read_unlock();···567567 rcu_read_unlock();568568}569569570570-static void raid1_unplug(request_queue_t *q)570570+static void raid1_unplug(struct request_queue *q)571571{572572 mddev_t *mddev = q->queuedata;573573···575575 md_wakeup_thread(mddev->thread);576576}577577578578-static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,578578+static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,579579 sector_t *error_sector)580580{581581 mddev_t *mddev = q->queuedata;···587587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);588588 if (rdev && !test_bit(Faulty, &rdev->flags)) {589589 struct block_device *bdev = rdev->bdev;590590- request_queue_t *r_queue = bdev_get_queue(bdev);590590+ struct request_queue *r_queue = bdev_get_queue(bdev);591591592592 if (!r_queue->issue_flush_fn)593593 ret = -EOPNOTSUPP;···615615 for (i = 0; i < mddev->raid_disks; i++) {616616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);617617 if (rdev && !test_bit(Faulty, &rdev->flags)) {618618- request_queue_t *q = bdev_get_queue(rdev->bdev);618618+ struct request_queue *q = bdev_get_queue(rdev->bdev);619619620620 /* Note the '|| 1' - when read_balance prefers621621 * non-congested targets, it can be removed···765765 return NULL;766766}767767768768-static int make_request(request_queue_t *q, struct bio * bio)768768+static int make_request(struct request_queue *q, struct bio * bio)769769{770770 mddev_t *mddev = q->queuedata;771771 conf_t *conf = mddev_to_conf(mddev);
+7-7
drivers/md/raid10.c
···453453 * If near_copies == raid_disk, there are no striping issues,454454 * but in that case, the function isn't called at all.455455 */456456-static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,456456+static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,457457 struct bio_vec *bio_vec)458458{459459 mddev_t *mddev = q->queuedata;···595595 for (i=0; i<mddev->raid_disks; i++) {596596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);597597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {598598- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);598598+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);599599600600 atomic_inc(&rdev->nr_pending);601601 rcu_read_unlock();···610610 rcu_read_unlock();611611}612612613613-static void raid10_unplug(request_queue_t *q)613613+static void raid10_unplug(struct request_queue *q)614614{615615 mddev_t *mddev = q->queuedata;616616···618618 md_wakeup_thread(mddev->thread);619619}620620621621-static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,621621+static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,622622 sector_t *error_sector)623623{624624 mddev_t *mddev = q->queuedata;···630630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);631631 if (rdev && !test_bit(Faulty, &rdev->flags)) {632632 struct block_device *bdev = rdev->bdev;633633- request_queue_t *r_queue = bdev_get_queue(bdev);633633+ struct request_queue *r_queue = bdev_get_queue(bdev);634634635635 if (!r_queue->issue_flush_fn)636636 ret = -EOPNOTSUPP;···658658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) {659659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);660660 if (rdev && !test_bit(Faulty, &rdev->flags)) {661661- request_queue_t *q = bdev_get_queue(rdev->bdev);661661+ struct request_queue *q = bdev_get_queue(rdev->bdev);662662663663 ret |= bdi_congested(&q->backing_dev_info, bits);664664 }···772772 spin_unlock_irq(&conf->resync_lock);773773}774774775775-static int make_request(request_queue_t *q, struct bio * bio)775775+static int make_request(struct request_queue *q, struct bio * bio)776776{777777 mddev_t *mddev = q->queuedata;778778 conf_t *conf = mddev_to_conf(mddev);
+9-9
drivers/md/raid5.c
···289289}290290291291static void unplug_slaves(mddev_t *mddev);292292-static void raid5_unplug_device(request_queue_t *q);292292+static void raid5_unplug_device(struct request_queue *q);293293294294static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,295295 int pd_idx, int noblock)···31823182 for (i=0; i<mddev->raid_disks; i++) {31833183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);31843184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {31853185- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);31853185+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);3186318631873187 atomic_inc(&rdev->nr_pending);31883188 rcu_read_unlock();···31973197 rcu_read_unlock();31983198}3199319932003200-static void raid5_unplug_device(request_queue_t *q)32003200+static void raid5_unplug_device(struct request_queue *q)32013201{32023202 mddev_t *mddev = q->queuedata;32033203 raid5_conf_t *conf = mddev_to_conf(mddev);···32163216 unplug_slaves(mddev);32173217}3218321832193219-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,32193219+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,32203220 sector_t *error_sector)32213221{32223222 mddev_t *mddev = q->queuedata;···32283228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);32293229 if (rdev && !test_bit(Faulty, &rdev->flags)) {32303230 struct block_device *bdev = rdev->bdev;32313231- request_queue_t *r_queue = bdev_get_queue(bdev);32313231+ struct request_queue *r_queue = bdev_get_queue(bdev);3232323232333233 if (!r_queue->issue_flush_fn)32343234 ret = -EOPNOTSUPP;···32673267/* We want read requests to align with chunks where possible,32683268 * but write requests don't need to.32693269 */32703270-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)32703270+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)32713271{32723272 mddev_t *mddev = q->queuedata;32733273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);···3377337733783378static int bio_fits_rdev(struct bio *bi)33793379{33803380- request_queue_t *q = bdev_get_queue(bi->bi_bdev);33803380+ struct request_queue *q = bdev_get_queue(bi->bi_bdev);3381338133823382 if ((bi->bi_size>>9) > q->max_sectors)33833383 return 0;···33963396}339733973398339833993399-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)33993399+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)34003400{34013401 mddev_t *mddev = q->queuedata;34023402 raid5_conf_t *conf = mddev_to_conf(mddev);···34663466}346734673468346834693469-static int make_request(request_queue_t *q, struct bio * bi)34693469+static int make_request(struct request_queue *q, struct bio * bi)34703470{34713471 mddev_t *mddev = q->queuedata;34723472 raid5_conf_t *conf = mddev_to_conf(mddev);
+2-2
drivers/message/i2o/i2o_block.c
···159159 * Returns 0 on success or negative error code on failure.160160 */161161162162-static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,162162+static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,163163 sector_t * error_sector)164164{165165 struct i2o_block_device *i2o_blk_dev = queue->queuedata;···445445{446446 struct i2o_block_request *ireq = req->special;447447 struct i2o_block_device *dev = ireq->i2o_blk_dev;448448- request_queue_t *q = req->q;448448+ struct request_queue *q = req->q;449449 unsigned long flags;450450451451 if (end_that_request_chunk(req, uptodate, nr_bytes)) {
+4-4
drivers/mmc/card/queue.c
···8383 * on any queue on this host, and attempt to issue it. This may8484 * not be the queue we were asked to process.8585 */8686-static void mmc_request(request_queue_t *q)8686+static void mmc_request(struct request_queue *q)8787{8888 struct mmc_queue *mq = q->queuedata;8989 struct request *req;···211211212212void mmc_cleanup_queue(struct mmc_queue *mq)213213{214214- request_queue_t *q = mq->queue;214214+ struct request_queue *q = mq->queue;215215 unsigned long flags;216216217217 /* Mark that we should start throwing out stragglers */···252252 */253253void mmc_queue_suspend(struct mmc_queue *mq)254254{255255- request_queue_t *q = mq->queue;255255+ struct request_queue *q = mq->queue;256256 unsigned long flags;257257258258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {···272272 */273273void mmc_queue_resume(struct mmc_queue *mq)274274{275275- request_queue_t *q = mq->queue;275275+ struct request_queue *q = mq->queue;276276 unsigned long flags;277277278278 if (mq->flags & MMC_QUEUE_SUSPENDED) {
···624624 unsigned char *buffer;625625 int the_result, retries = 3;626626 int sector_size;627627- request_queue_t *queue;627627+ struct request_queue *queue;628628629629 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);630630 if (!buffer)
+15-15
fs/bio.c
···230230 }231231}232232233233-inline int bio_phys_segments(request_queue_t *q, struct bio *bio)233233+inline int bio_phys_segments(struct request_queue *q, struct bio *bio)234234{235235 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))236236 blk_recount_segments(q, bio);···238238 return bio->bi_phys_segments;239239}240240241241-inline int bio_hw_segments(request_queue_t *q, struct bio *bio)241241+inline int bio_hw_segments(struct request_queue *q, struct bio *bio)242242{243243 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))244244 blk_recount_segments(q, bio);···257257 */258258void __bio_clone(struct bio *bio, struct bio *bio_src)259259{260260- request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);260260+ struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);261261262262 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,263263 bio_src->bi_max_vecs * sizeof(struct bio_vec));···303303 */304304int bio_get_nr_vecs(struct block_device *bdev)305305{306306- request_queue_t *q = bdev_get_queue(bdev);306306+ struct request_queue *q = bdev_get_queue(bdev);307307 int nr_pages;308308309309 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;···315315 return nr_pages;316316}317317318318-static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page318318+static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page319319 *page, unsigned int len, unsigned int offset,320320 unsigned short max_sectors)321321{···425425 * smaller than PAGE_SIZE, so it is always possible to add a single426426 * page to an empty bio. This should only be used by REQ_PC bios.427427 */428428-int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,428428+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,429429 unsigned int len, unsigned int offset)430430{431431 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);···523523 * to/from kernel pages as necessary. Must be paired with524524 * call bio_uncopy_user() on io completion.525525 */526526-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,526526+struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,527527 unsigned int len, int write_to_vm)528528{529529 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;···600600 return ERR_PTR(ret);601601}602602603603-static struct bio *__bio_map_user_iov(request_queue_t *q,603603+static struct bio *__bio_map_user_iov(struct request_queue *q,604604 struct block_device *bdev,605605 struct sg_iovec *iov, int iov_count,606606 int write_to_vm)···712712713713/**714714 * bio_map_user - map user address into bio715715- * @q: the request_queue_t for the bio715715+ * @q: the struct request_queue for the bio716716 * @bdev: destination block device717717 * @uaddr: start of user address718718 * @len: length in bytes···721721 * Map the user space address into a bio suitable for io to a block722722 * device. Returns an error pointer in case of error.723723 */724724-struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,724724+struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,725725 unsigned long uaddr, unsigned int len, int write_to_vm)726726{727727 struct sg_iovec iov;···734734735735/**736736 * bio_map_user_iov - map user sg_iovec table into bio737737- * @q: the request_queue_t for the bio737737+ * @q: the struct request_queue for the bio738738 * @bdev: destination block device739739 * @iov: the iovec.740740 * @iov_count: number of elements in the iovec···743743 * Map the user space address into a bio suitable for io to a block744744 * device. Returns an error pointer in case of error.745745 */746746-struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,746746+struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,747747 struct sg_iovec *iov, int iov_count,748748 int write_to_vm)749749{···808808}809809810810811811-static struct bio *__bio_map_kern(request_queue_t *q, void *data,811811+static struct bio *__bio_map_kern(struct request_queue *q, void *data,812812 unsigned int len, gfp_t gfp_mask)813813{814814 unsigned long kaddr = (unsigned long)data;···847847848848/**849849 * bio_map_kern - map kernel address into bio850850- * @q: the request_queue_t for the bio850850+ * @q: the struct request_queue for the bio851851 * @data: pointer to buffer to map852852 * @len: length in bytes853853 * @gfp_mask: allocation flags for bio allocation···855855 * Map the kernel address into a bio suitable for io to a block856856 * device. Returns an error pointer in case of error.857857 */858858-struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,858858+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,859859 gfp_t gfp_mask)860860{861861 struct bio *bio;