Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[BLOCK] Get rid of request_queue_t typedef

Some of the code has been gradually transitioned to using the proper
struct request_queue, but there's lots left. So do a full sweet of
the kernel and get rid of this typedef and replace its uses with
the proper type.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

+529 -510
+3 -3
Documentation/block/barrier.txt
··· 79 79 used to indicate the whole sequence of performing barrier requests 80 80 including draining and flushing. 81 81 82 - typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq); 82 + typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq); 83 83 84 - int blk_queue_ordered(request_queue_t *q, unsigned ordered, 84 + int blk_queue_ordered(struct request_queue *q, unsigned ordered, 85 85 prepare_flush_fn *prepare_flush_fn); 86 86 87 87 @q : the queue in question ··· 92 92 For example, SCSI disk driver's prepare_flush_fn looks like the 93 93 following. 94 94 95 - static void sd_prepare_flush(request_queue_t *q, struct request *rq) 95 + static void sd_prepare_flush(struct request_queue *q, struct request *rq) 96 96 { 97 97 memset(rq->cmd, 0, sizeof(rq->cmd)); 98 98 rq->cmd_type = REQ_TYPE_BLOCK_PC;
+5 -5
Documentation/block/biodoc.txt
··· 740 740 queueing (typically known as tagged command queueing), ie manage more than 741 741 one outstanding command on a queue at any given time. 742 742 743 - blk_queue_init_tags(request_queue_t *q, int depth) 743 + blk_queue_init_tags(struct request_queue *q, int depth) 744 744 745 745 Initialize internal command tagging structures for a maximum 746 746 depth of 'depth'. 747 747 748 - blk_queue_free_tags((request_queue_t *q) 748 + blk_queue_free_tags((struct request_queue *q) 749 749 750 750 Teardown tag info associated with the queue. This will be done 751 751 automatically by block if blk_queue_cleanup() is called on a queue ··· 754 754 The above are initialization and exit management, the main helpers during 755 755 normal operations are: 756 756 757 - blk_queue_start_tag(request_queue_t *q, struct request *rq) 757 + blk_queue_start_tag(struct request_queue *q, struct request *rq) 758 758 759 759 Start tagged operation for this request. A free tag number between 760 760 0 and 'depth' is assigned to the request (rq->tag holds this number), ··· 762 762 for this queue is already achieved (or if the tag wasn't started for 763 763 some other reason), 1 is returned. Otherwise 0 is returned. 764 764 765 - blk_queue_end_tag(request_queue_t *q, struct request *rq) 765 + blk_queue_end_tag(struct request_queue *q, struct request *rq) 766 766 767 767 End tagged operation on this request. 'rq' is removed from the internal 768 768 book keeping structures. ··· 781 781 the hardware and software block queue and enable the driver to sanely restart 782 782 all the outstanding requests. There's a third helper to do that: 783 783 784 - blk_queue_invalidate_tags(request_queue_t *q) 784 + blk_queue_invalidate_tags(struct request_queue *q) 785 785 786 786 Clear the internal block tag queue and re-add all the pending requests 787 787 to the request queue. The driver will receive them again on the
+1 -1
Documentation/block/request.txt
··· 83 83 84 84 struct bio *biotail DBI Last bio in request 85 85 86 - request_queue_t *q DB Request queue this request belongs to 86 + struct request_queue *q DB Request queue this request belongs to 87 87 88 88 struct request_list *rl B Request list this request came from
+1 -1
Documentation/iostats.txt
··· 79 79 measured from __make_request() to end_that_request_last()). 80 80 Field 9 -- # of I/Os currently in progress 81 81 The only field that should go to zero. Incremented as requests are 82 - given to appropriate request_queue_t and decremented as they finish. 82 + given to appropriate struct request_queue and decremented as they finish. 83 83 Field 10 -- # of milliseconds spent doing I/Os 84 84 This field is increases so long as field 9 is nonzero. 85 85 Field 11 -- weighted # of milliseconds spent doing I/Os
+4 -4
arch/arm/plat-omap/mailbox.c
··· 161 161 /* 162 162 * Mailbox interrupt handler 163 163 */ 164 - static void mbox_txq_fn(request_queue_t * q) 164 + static void mbox_txq_fn(struct request_queue * q) 165 165 { 166 166 } 167 167 168 - static void mbox_rxq_fn(request_queue_t * q) 168 + static void mbox_rxq_fn(struct request_queue * q) 169 169 { 170 170 } 171 171 ··· 180 180 { 181 181 struct request *rq; 182 182 mbox_msg_t msg; 183 - request_queue_t *q = mbox->rxq->queue; 183 + struct request_queue *q = mbox->rxq->queue; 184 184 185 185 disable_mbox_irq(mbox, IRQ_RX); 186 186 ··· 297 297 request_fn_proc * proc, 298 298 void (*work) (struct work_struct *)) 299 299 { 300 - request_queue_t *q; 300 + struct request_queue *q; 301 301 struct omap_mbox_queue *mq; 302 302 303 303 mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
+2 -2
arch/um/drivers/ubd_kern.c
··· 469 469 " Change the ubd device name to \"hd\".\n\n" 470 470 ); 471 471 472 - static void do_ubd_request(request_queue_t * q); 472 + static void do_ubd_request(struct request_queue * q); 473 473 474 474 /* Only changed by ubd_init, which is an initcall. */ 475 475 int thread_fd = -1; ··· 1081 1081 } 1082 1082 1083 1083 /* Called with dev->lock held */ 1084 - static void do_ubd_request(request_queue_t *q) 1084 + static void do_ubd_request(struct request_queue *q) 1085 1085 { 1086 1086 struct io_thread_req *io_req; 1087 1087 struct request *req;
+14 -12
block/as-iosched.c
··· 796 796 * as_completed_request is to be called when a request has completed and 797 797 * returned something to the requesting process, be it an error or data. 798 798 */ 799 - static void as_completed_request(request_queue_t *q, struct request *rq) 799 + static void as_completed_request(struct request_queue *q, struct request *rq) 800 800 { 801 801 struct as_data *ad = q->elevator->elevator_data; 802 802 ··· 853 853 * reference unless it replaces the request at somepart of the elevator 854 854 * (ie. the dispatch queue) 855 855 */ 856 - static void as_remove_queued_request(request_queue_t *q, struct request *rq) 856 + static void as_remove_queued_request(struct request_queue *q, 857 + struct request *rq) 857 858 { 858 859 const int data_dir = rq_is_sync(rq); 859 860 struct as_data *ad = q->elevator->elevator_data; ··· 979 978 * read/write expire, batch expire, etc, and moves it to the dispatch 980 979 * queue. Returns 1 if a request was found, 0 otherwise. 981 980 */ 982 - static int as_dispatch_request(request_queue_t *q, int force) 981 + static int as_dispatch_request(struct request_queue *q, int force) 983 982 { 984 983 struct as_data *ad = q->elevator->elevator_data; 985 984 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); ··· 1140 1139 /* 1141 1140 * add rq to rbtree and fifo 1142 1141 */ 1143 - static void as_add_request(request_queue_t *q, struct request *rq) 1142 + static void as_add_request(struct request_queue *q, struct request *rq) 1144 1143 { 1145 1144 struct as_data *ad = q->elevator->elevator_data; 1146 1145 int data_dir; ··· 1168 1167 RQ_SET_STATE(rq, AS_RQ_QUEUED); 1169 1168 } 1170 1169 1171 - static void as_activate_request(request_queue_t *q, struct request *rq) 1170 + static void as_activate_request(struct request_queue *q, struct request *rq) 1172 1171 { 1173 1172 WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED); 1174 1173 RQ_SET_STATE(rq, AS_RQ_REMOVED); ··· 1176 1175 atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched); 1177 1176 } 1178 1177 1179 - static void as_deactivate_request(request_queue_t *q, struct request *rq) 1178 + static void as_deactivate_request(struct request_queue *q, struct request *rq) 1180 1179 { 1181 1180 WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED); 1182 1181 RQ_SET_STATE(rq, AS_RQ_DISPATCHED); ··· 1190 1189 * is not empty - it is used in the block layer to check for plugging and 1191 1190 * merging opportunities 1192 1191 */ 1193 - static int as_queue_empty(request_queue_t *q) 1192 + static int as_queue_empty(struct request_queue *q) 1194 1193 { 1195 1194 struct as_data *ad = q->elevator->elevator_data; 1196 1195 ··· 1199 1198 } 1200 1199 1201 1200 static int 1202 - as_merge(request_queue_t *q, struct request **req, struct bio *bio) 1201 + as_merge(struct request_queue *q, struct request **req, struct bio *bio) 1203 1202 { 1204 1203 struct as_data *ad = q->elevator->elevator_data; 1205 1204 sector_t rb_key = bio->bi_sector + bio_sectors(bio); ··· 1217 1216 return ELEVATOR_NO_MERGE; 1218 1217 } 1219 1218 1220 - static void as_merged_request(request_queue_t *q, struct request *req, int type) 1219 + static void as_merged_request(struct request_queue *q, struct request *req, 1220 + int type) 1221 1221 { 1222 1222 struct as_data *ad = q->elevator->elevator_data; 1223 1223 ··· 1236 1234 } 1237 1235 } 1238 1236 1239 - static void as_merged_requests(request_queue_t *q, struct request *req, 1237 + static void as_merged_requests(struct request_queue *q, struct request *req, 1240 1238 struct request *next) 1241 1239 { 1242 1240 /* ··· 1287 1285 spin_unlock_irqrestore(q->queue_lock, flags); 1288 1286 } 1289 1287 1290 - static int as_may_queue(request_queue_t *q, int rw) 1288 + static int as_may_queue(struct request_queue *q, int rw) 1291 1289 { 1292 1290 int ret = ELV_MQUEUE_MAY; 1293 1291 struct as_data *ad = q->elevator->elevator_data; ··· 1320 1318 /* 1321 1319 * initialize elevator private data (as_data). 1322 1320 */ 1323 - static void *as_init_queue(request_queue_t *q) 1321 + static void *as_init_queue(struct request_queue *q) 1324 1322 { 1325 1323 struct as_data *ad; 1326 1324
+5 -5
block/blktrace.c
··· 231 231 kfree(bt); 232 232 } 233 233 234 - static int blk_trace_remove(request_queue_t *q) 234 + static int blk_trace_remove(struct request_queue *q) 235 235 { 236 236 struct blk_trace *bt; 237 237 ··· 312 312 /* 313 313 * Setup everything required to start tracing 314 314 */ 315 - static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, 315 + static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, 316 316 char __user *arg) 317 317 { 318 318 struct blk_user_trace_setup buts; ··· 401 401 return ret; 402 402 } 403 403 404 - static int blk_trace_startstop(request_queue_t *q, int start) 404 + static int blk_trace_startstop(struct request_queue *q, int start) 405 405 { 406 406 struct blk_trace *bt; 407 407 int ret; ··· 444 444 **/ 445 445 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) 446 446 { 447 - request_queue_t *q; 447 + struct request_queue *q; 448 448 int ret, start = 0; 449 449 450 450 q = bdev_get_queue(bdev); ··· 479 479 * @q: the request queue associated with the device 480 480 * 481 481 **/ 482 - void blk_trace_shutdown(request_queue_t *q) 482 + void blk_trace_shutdown(struct request_queue *q) 483 483 { 484 484 if (q->blk_trace) { 485 485 blk_trace_startstop(q, 0);
+6 -6
block/bsg.c
··· 37 37 #define BSG_VERSION "0.4" 38 38 39 39 struct bsg_device { 40 - request_queue_t *queue; 40 + struct request_queue *queue; 41 41 spinlock_t lock; 42 42 struct list_head busy_list; 43 43 struct list_head done_list; ··· 180 180 return ret; 181 181 } 182 182 183 - static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, 183 + static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, 184 184 struct sg_io_v4 *hdr, int has_write_perm) 185 185 { 186 186 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ ··· 214 214 * Check if sg_io_v4 from user is allowed and valid 215 215 */ 216 216 static int 217 - bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) 217 + bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) 218 218 { 219 219 int ret = 0; 220 220 ··· 250 250 static struct request * 251 251 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) 252 252 { 253 - request_queue_t *q = bd->queue; 253 + struct request_queue *q = bd->queue; 254 254 struct request *rq, *next_rq = NULL; 255 255 int ret, rw; 256 256 unsigned int dxfer_len; ··· 345 345 * do final setup of a 'bc' and submit the matching 'rq' to the block 346 346 * layer for io 347 347 */ 348 - static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, 348 + static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, 349 349 struct bsg_command *bc, struct request *rq) 350 350 { 351 351 rq->sense = bc->sense; ··· 611 611 bc = NULL; 612 612 ret = 0; 613 613 while (nr_commands) { 614 - request_queue_t *q = bd->queue; 614 + struct request_queue *q = bd->queue; 615 615 616 616 bc = bsg_alloc_command(bd); 617 617 if (IS_ERR(bc)) {
+20 -19
block/cfq-iosched.c
··· 71 71 * Per block device queue structure 72 72 */ 73 73 struct cfq_data { 74 - request_queue_t *queue; 74 + struct request_queue *queue; 75 75 76 76 /* 77 77 * rr list of queues with requests and the count of them ··· 197 197 CFQ_CFQQ_FNS(sync); 198 198 #undef CFQ_CFQQ_FNS 199 199 200 - static void cfq_dispatch_insert(request_queue_t *, struct request *); 200 + static void cfq_dispatch_insert(struct request_queue *, struct request *); 201 201 static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 202 202 struct task_struct *, gfp_t); 203 203 static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, ··· 237 237 kblockd_schedule_work(&cfqd->unplug_work); 238 238 } 239 239 240 - static int cfq_queue_empty(request_queue_t *q) 240 + static int cfq_queue_empty(struct request_queue *q) 241 241 { 242 242 struct cfq_data *cfqd = q->elevator->elevator_data; 243 243 ··· 623 623 return NULL; 624 624 } 625 625 626 - static void cfq_activate_request(request_queue_t *q, struct request *rq) 626 + static void cfq_activate_request(struct request_queue *q, struct request *rq) 627 627 { 628 628 struct cfq_data *cfqd = q->elevator->elevator_data; 629 629 ··· 641 641 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 642 642 } 643 643 644 - static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 644 + static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 645 645 { 646 646 struct cfq_data *cfqd = q->elevator->elevator_data; 647 647 ··· 665 665 } 666 666 } 667 667 668 - static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) 668 + static int cfq_merge(struct request_queue *q, struct request **req, 669 + struct bio *bio) 669 670 { 670 671 struct cfq_data *cfqd = q->elevator->elevator_data; 671 672 struct request *__rq; ··· 680 679 return ELEVATOR_NO_MERGE; 681 680 } 682 681 683 - static void cfq_merged_request(request_queue_t *q, struct request *req, 682 + static void cfq_merged_request(struct request_queue *q, struct request *req, 684 683 int type) 685 684 { 686 685 if (type == ELEVATOR_FRONT_MERGE) { ··· 691 690 } 692 691 693 692 static void 694 - cfq_merged_requests(request_queue_t *q, struct request *rq, 693 + cfq_merged_requests(struct request_queue *q, struct request *rq, 695 694 struct request *next) 696 695 { 697 696 /* ··· 704 703 cfq_remove_request(next); 705 704 } 706 705 707 - static int cfq_allow_merge(request_queue_t *q, struct request *rq, 706 + static int cfq_allow_merge(struct request_queue *q, struct request *rq, 708 707 struct bio *bio) 709 708 { 710 709 struct cfq_data *cfqd = q->elevator->elevator_data; ··· 914 913 /* 915 914 * Move request from internal lists to the request queue dispatch list. 916 915 */ 917 - static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) 916 + static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 918 917 { 919 918 struct cfq_data *cfqd = q->elevator->elevator_data; 920 919 struct cfq_queue *cfqq = RQ_CFQQ(rq); ··· 1094 1093 return dispatched; 1095 1094 } 1096 1095 1097 - static int cfq_dispatch_requests(request_queue_t *q, int force) 1096 + static int cfq_dispatch_requests(struct request_queue *q, int force) 1098 1097 { 1099 1098 struct cfq_data *cfqd = q->elevator->elevator_data; 1100 1099 struct cfq_queue *cfqq; ··· 1215 1214 struct cfq_data *cfqd = cic->key; 1216 1215 1217 1216 if (cfqd) { 1218 - request_queue_t *q = cfqd->queue; 1217 + struct request_queue *q = cfqd->queue; 1219 1218 1220 1219 spin_lock_irq(q->queue_lock); 1221 1220 __cfq_exit_single_io_context(cfqd, cic); ··· 1776 1775 } 1777 1776 } 1778 1777 1779 - static void cfq_insert_request(request_queue_t *q, struct request *rq) 1778 + static void cfq_insert_request(struct request_queue *q, struct request *rq) 1780 1779 { 1781 1780 struct cfq_data *cfqd = q->elevator->elevator_data; 1782 1781 struct cfq_queue *cfqq = RQ_CFQQ(rq); ··· 1790 1789 cfq_rq_enqueued(cfqd, cfqq, rq); 1791 1790 } 1792 1791 1793 - static void cfq_completed_request(request_queue_t *q, struct request *rq) 1792 + static void cfq_completed_request(struct request_queue *q, struct request *rq) 1794 1793 { 1795 1794 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1796 1795 struct cfq_data *cfqd = cfqq->cfqd; ··· 1869 1868 return ELV_MQUEUE_MAY; 1870 1869 } 1871 1870 1872 - static int cfq_may_queue(request_queue_t *q, int rw) 1871 + static int cfq_may_queue(struct request_queue *q, int rw) 1873 1872 { 1874 1873 struct cfq_data *cfqd = q->elevator->elevator_data; 1875 1874 struct task_struct *tsk = current; ··· 1923 1922 * Allocate cfq data structures associated with this request. 1924 1923 */ 1925 1924 static int 1926 - cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 1925 + cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 1927 1926 { 1928 1927 struct cfq_data *cfqd = q->elevator->elevator_data; 1929 1928 struct task_struct *tsk = current; ··· 1975 1974 { 1976 1975 struct cfq_data *cfqd = 1977 1976 container_of(work, struct cfq_data, unplug_work); 1978 - request_queue_t *q = cfqd->queue; 1977 + struct request_queue *q = cfqd->queue; 1979 1978 unsigned long flags; 1980 1979 1981 1980 spin_lock_irqsave(q->queue_lock, flags); ··· 2073 2072 static void cfq_exit_queue(elevator_t *e) 2074 2073 { 2075 2074 struct cfq_data *cfqd = e->elevator_data; 2076 - request_queue_t *q = cfqd->queue; 2075 + struct request_queue *q = cfqd->queue; 2077 2076 2078 2077 cfq_shutdown_timer_wq(cfqd); 2079 2078 ··· 2099 2098 kfree(cfqd); 2100 2099 } 2101 2100 2102 - static void *cfq_init_queue(request_queue_t *q) 2101 + static void *cfq_init_queue(struct request_queue *q) 2103 2102 { 2104 2103 struct cfq_data *cfqd; 2105 2104
+9 -9
block/deadline-iosched.c
··· 106 106 /* 107 107 * remove rq from rbtree and fifo. 108 108 */ 109 - static void deadline_remove_request(request_queue_t *q, struct request *rq) 109 + static void deadline_remove_request(struct request_queue *q, struct request *rq) 110 110 { 111 111 struct deadline_data *dd = q->elevator->elevator_data; 112 112 ··· 115 115 } 116 116 117 117 static int 118 - deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) 118 + deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) 119 119 { 120 120 struct deadline_data *dd = q->elevator->elevator_data; 121 121 struct request *__rq; ··· 144 144 return ret; 145 145 } 146 146 147 - static void deadline_merged_request(request_queue_t *q, struct request *req, 148 - int type) 147 + static void deadline_merged_request(struct request_queue *q, 148 + struct request *req, int type) 149 149 { 150 150 struct deadline_data *dd = q->elevator->elevator_data; 151 151 ··· 159 159 } 160 160 161 161 static void 162 - deadline_merged_requests(request_queue_t *q, struct request *req, 162 + deadline_merged_requests(struct request_queue *q, struct request *req, 163 163 struct request *next) 164 164 { 165 165 /* ··· 185 185 static inline void 186 186 deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) 187 187 { 188 - request_queue_t *q = rq->q; 188 + struct request_queue *q = rq->q; 189 189 190 190 deadline_remove_request(q, rq); 191 191 elv_dispatch_add_tail(q, rq); ··· 236 236 * deadline_dispatch_requests selects the best request according to 237 237 * read/write expire, fifo_batch, etc 238 238 */ 239 - static int deadline_dispatch_requests(request_queue_t *q, int force) 239 + static int deadline_dispatch_requests(struct request_queue *q, int force) 240 240 { 241 241 struct deadline_data *dd = q->elevator->elevator_data; 242 242 const int reads = !list_empty(&dd->fifo_list[READ]); ··· 335 335 return 1; 336 336 } 337 337 338 - static int deadline_queue_empty(request_queue_t *q) 338 + static int deadline_queue_empty(struct request_queue *q) 339 339 { 340 340 struct deadline_data *dd = q->elevator->elevator_data; 341 341 ··· 356 356 /* 357 357 * initialize elevator private data (deadline_data). 358 358 */ 359 - static void *deadline_init_queue(request_queue_t *q) 359 + static void *deadline_init_queue(struct request_queue *q) 360 360 { 361 361 struct deadline_data *dd; 362 362
+40 -35
block/elevator.c
··· 56 56 */ 57 57 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) 58 58 { 59 - request_queue_t *q = rq->q; 59 + struct request_queue *q = rq->q; 60 60 elevator_t *e = q->elevator; 61 61 62 62 if (e->ops->elevator_allow_merge_fn) ··· 141 141 return e; 142 142 } 143 143 144 - static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) 144 + static void *elevator_init_queue(struct request_queue *q, 145 + struct elevator_queue *eq) 145 146 { 146 147 return eq->ops->elevator_init_fn(q); 147 148 } 148 149 149 - static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, 150 + static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, 150 151 void *data) 151 152 { 152 153 q->elevator = eq; ··· 173 172 174 173 static struct kobj_type elv_ktype; 175 174 176 - static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) 175 + static elevator_t *elevator_alloc(struct request_queue *q, 176 + struct elevator_type *e) 177 177 { 178 178 elevator_t *eq; 179 179 int i; ··· 214 212 kfree(e); 215 213 } 216 214 217 - int elevator_init(request_queue_t *q, char *name) 215 + int elevator_init(struct request_queue *q, char *name) 218 216 { 219 217 struct elevator_type *e = NULL; 220 218 struct elevator_queue *eq; ··· 266 264 267 265 EXPORT_SYMBOL(elevator_exit); 268 266 269 - static void elv_activate_rq(request_queue_t *q, struct request *rq) 267 + static void elv_activate_rq(struct request_queue *q, struct request *rq) 270 268 { 271 269 elevator_t *e = q->elevator; 272 270 ··· 274 272 e->ops->elevator_activate_req_fn(q, rq); 275 273 } 276 274 277 - static void elv_deactivate_rq(request_queue_t *q, struct request *rq) 275 + static void elv_deactivate_rq(struct request_queue *q, struct request *rq) 278 276 { 279 277 elevator_t *e = q->elevator; 280 278 ··· 287 285 hlist_del_init(&rq->hash); 288 286 } 289 287 290 - static void elv_rqhash_del(request_queue_t *q, struct request *rq) 288 + static void elv_rqhash_del(struct request_queue *q, struct request *rq) 291 289 { 292 290 if (ELV_ON_HASH(rq)) 293 291 __elv_rqhash_del(rq); 294 292 } 295 293 296 - static void elv_rqhash_add(request_queue_t *q, struct request *rq) 294 + static void elv_rqhash_add(struct request_queue *q, struct request *rq) 297 295 { 298 296 elevator_t *e = q->elevator; 299 297 ··· 301 299 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); 302 300 } 303 301 304 - static void elv_rqhash_reposition(request_queue_t *q, struct request *rq) 302 + static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 305 303 { 306 304 __elv_rqhash_del(rq); 307 305 elv_rqhash_add(q, rq); 308 306 } 309 307 310 - static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset) 308 + static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) 311 309 { 312 310 elevator_t *e = q->elevator; 313 311 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; ··· 393 391 * entry. rq is sort insted into the dispatch queue. To be used by 394 392 * specific elevators. 395 393 */ 396 - void elv_dispatch_sort(request_queue_t *q, struct request *rq) 394 + void elv_dispatch_sort(struct request_queue *q, struct request *rq) 397 395 { 398 396 sector_t boundary; 399 397 struct list_head *entry; ··· 451 449 452 450 EXPORT_SYMBOL(elv_dispatch_add_tail); 453 451 454 - int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) 452 + int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) 455 453 { 456 454 elevator_t *e = q->elevator; 457 455 struct request *__rq; ··· 483 481 return ELEVATOR_NO_MERGE; 484 482 } 485 483 486 - void elv_merged_request(request_queue_t *q, struct request *rq, int type) 484 + void elv_merged_request(struct request_queue *q, struct request *rq, int type) 487 485 { 488 486 elevator_t *e = q->elevator; 489 487 ··· 496 494 q->last_merge = rq; 497 495 } 498 496 499 - void elv_merge_requests(request_queue_t *q, struct request *rq, 497 + void elv_merge_requests(struct request_queue *q, struct request *rq, 500 498 struct request *next) 501 499 { 502 500 elevator_t *e = q->elevator; ··· 511 509 q->last_merge = rq; 512 510 } 513 511 514 - void elv_requeue_request(request_queue_t *q, struct request *rq) 512 + void elv_requeue_request(struct request_queue *q, struct request *rq) 515 513 { 516 514 /* 517 515 * it already went through dequeue, we need to decrement the ··· 528 526 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 529 527 } 530 528 531 - static void elv_drain_elevator(request_queue_t *q) 529 + static void elv_drain_elevator(struct request_queue *q) 532 530 { 533 531 static int printed; 534 532 while (q->elevator->ops->elevator_dispatch_fn(q, 1)) ··· 542 540 } 543 541 } 544 542 545 - void elv_insert(request_queue_t *q, struct request *rq, int where) 543 + void elv_insert(struct request_queue *q, struct request *rq, int where) 546 544 { 547 545 struct list_head *pos; 548 546 unsigned ordseq; ··· 640 638 } 641 639 } 642 640 643 - void __elv_add_request(request_queue_t *q, struct request *rq, int where, 641 + void __elv_add_request(struct request_queue *q, struct request *rq, int where, 644 642 int plug) 645 643 { 646 644 if (q->ordcolor) ··· 678 676 679 677 EXPORT_SYMBOL(__elv_add_request); 680 678 681 - void elv_add_request(request_queue_t *q, struct request *rq, int where, 679 + void elv_add_request(struct request_queue *q, struct request *rq, int where, 682 680 int plug) 683 681 { 684 682 unsigned long flags; ··· 690 688 691 689 EXPORT_SYMBOL(elv_add_request); 692 690 693 - static inline struct request *__elv_next_request(request_queue_t *q) 691 + static inline struct request *__elv_next_request(struct request_queue *q) 694 692 { 695 693 struct request *rq; 696 694 ··· 706 704 } 707 705 } 708 706 709 - struct request *elv_next_request(request_queue_t *q) 707 + struct request *elv_next_request(struct request_queue *q) 710 708 { 711 709 struct request *rq; 712 710 int ret; ··· 772 770 773 771 EXPORT_SYMBOL(elv_next_request); 774 772 775 - void elv_dequeue_request(request_queue_t *q, struct request *rq) 773 + void elv_dequeue_request(struct request_queue *q, struct request *rq) 776 774 { 777 775 BUG_ON(list_empty(&rq->queuelist)); 778 776 BUG_ON(ELV_ON_HASH(rq)); ··· 790 788 791 789 EXPORT_SYMBOL(elv_dequeue_request); 792 790 793 - int elv_queue_empty(request_queue_t *q) 791 + int elv_queue_empty(struct request_queue *q) 794 792 { 795 793 elevator_t *e = q->elevator; 796 794 ··· 805 803 806 804 EXPORT_SYMBOL(elv_queue_empty); 807 805 808 - struct request *elv_latter_request(request_queue_t *q, struct request *rq) 806 + struct request *elv_latter_request(struct request_queue *q, struct request *rq) 809 807 { 810 808 elevator_t *e = q->elevator; 811 809 ··· 814 812 return NULL; 815 813 } 816 814 817 - struct request *elv_former_request(request_queue_t *q, struct request *rq) 815 + struct request *elv_former_request(struct request_queue *q, struct request *rq) 818 816 { 819 817 elevator_t *e = q->elevator; 820 818 ··· 823 821 return NULL; 824 822 } 825 823 826 - int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 824 + int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 827 825 { 828 826 elevator_t *e = q->elevator; 829 827 ··· 834 832 return 0; 835 833 } 836 834 837 - void elv_put_request(request_queue_t *q, struct request *rq) 835 + void elv_put_request(struct request_queue *q, struct request *rq) 838 836 { 839 837 elevator_t *e = q->elevator; 840 838 ··· 842 840 e->ops->elevator_put_req_fn(rq); 843 841 } 844 842 845 - int elv_may_queue(request_queue_t *q, int rw) 843 + int elv_may_queue(struct request_queue *q, int rw) 846 844 { 847 845 elevator_t *e = q->elevator; 848 846 ··· 852 850 return ELV_MQUEUE_MAY; 853 851 } 854 852 855 - void elv_completed_request(request_queue_t *q, struct request *rq) 853 + void elv_completed_request(struct request_queue *q, struct request *rq) 856 854 { 857 855 elevator_t *e = q->elevator; 858 856 ··· 1008 1006 * need for the new one. this way we have a chance of going back to the old 1009 1007 * one, if the new one fails init for some reason. 1010 1008 */ 1011 - static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) 1009 + static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) 1012 1010 { 1013 1011 elevator_t *old_elevator, *e; 1014 1012 void *data; ··· 1080 1078 return 0; 1081 1079 } 1082 1080 1083 - ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) 1081 + ssize_t elv_iosched_store(struct request_queue *q, const char *name, 1082 + size_t count) 1084 1083 { 1085 1084 char elevator_name[ELV_NAME_MAX]; 1086 1085 size_t len; ··· 1110 1107 return count; 1111 1108 } 1112 1109 1113 - ssize_t elv_iosched_show(request_queue_t *q, char *name) 1110 + ssize_t elv_iosched_show(struct request_queue *q, char *name) 1114 1111 { 1115 1112 elevator_t *e = q->elevator; 1116 1113 struct elevator_type *elv = e->elevator_type; ··· 1130 1127 return len; 1131 1128 } 1132 1129 1133 - struct request *elv_rb_former_request(request_queue_t *q, struct request *rq) 1130 + struct request *elv_rb_former_request(struct request_queue *q, 1131 + struct request *rq) 1134 1132 { 1135 1133 struct rb_node *rbprev = rb_prev(&rq->rb_node); 1136 1134 ··· 1143 1139 1144 1140 EXPORT_SYMBOL(elv_rb_former_request); 1145 1141 1146 - struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq) 1142 + struct request *elv_rb_latter_request(struct request_queue *q, 1143 + struct request *rq) 1147 1144 { 1148 1145 struct rb_node *rbnext = rb_next(&rq->rb_node); 1149 1146
+112 -103
block/ll_rw_blk.c
··· 40 40 static void blk_unplug_timeout(unsigned long data); 41 41 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 42 42 static void init_request_from_bio(struct request *req, struct bio *bio); 43 - static int __make_request(request_queue_t *q, struct bio *bio); 43 + static int __make_request(struct request_queue *q, struct bio *bio); 44 44 static struct io_context *current_io_context(gfp_t gfp_flags, int node); 45 45 46 46 /* ··· 121 121 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) 122 122 { 123 123 struct backing_dev_info *ret = NULL; 124 - request_queue_t *q = bdev_get_queue(bdev); 124 + struct request_queue *q = bdev_get_queue(bdev); 125 125 126 126 if (q) 127 127 ret = &q->backing_dev_info; ··· 140 140 * cdb from the request data for instance. 141 141 * 142 142 */ 143 - void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) 143 + void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) 144 144 { 145 145 q->prep_rq_fn = pfn; 146 146 } ··· 163 163 * no merge_bvec_fn is defined for a queue, and only the fixed limits are 164 164 * honored. 165 165 */ 166 - void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) 166 + void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) 167 167 { 168 168 q->merge_bvec_fn = mbfn; 169 169 } 170 170 171 171 EXPORT_SYMBOL(blk_queue_merge_bvec); 172 172 173 - void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) 173 + void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 174 174 { 175 175 q->softirq_done_fn = fn; 176 176 } ··· 199 199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 200 200 * blk_queue_bounce() to create a buffer in normal memory. 201 201 **/ 202 - void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) 202 + void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) 203 203 { 204 204 /* 205 205 * set defaults ··· 235 235 236 236 EXPORT_SYMBOL(blk_queue_make_request); 237 237 238 - static void rq_init(request_queue_t *q, struct request *rq) 238 + static void rq_init(struct request_queue *q, struct request *rq) 239 239 { 240 240 INIT_LIST_HEAD(&rq->queuelist); 241 241 INIT_LIST_HEAD(&rq->donelist); ··· 272 272 * feature should call this function and indicate so. 273 273 * 274 274 **/ 275 - int blk_queue_ordered(request_queue_t *q, unsigned ordered, 275 + int blk_queue_ordered(struct request_queue *q, unsigned ordered, 276 276 prepare_flush_fn *prepare_flush_fn) 277 277 { 278 278 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && ··· 311 311 * to the block layer by defining it through this call. 312 312 * 313 313 **/ 314 - void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) 314 + void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff) 315 315 { 316 316 q->issue_flush_fn = iff; 317 317 } ··· 321 321 /* 322 322 * Cache flushing for ordered writes handling 323 323 */ 324 - inline unsigned blk_ordered_cur_seq(request_queue_t *q) 324 + inline unsigned blk_ordered_cur_seq(struct request_queue *q) 325 325 { 326 326 if (!q->ordseq) 327 327 return 0; ··· 330 330 331 331 unsigned blk_ordered_req_seq(struct request *rq) 332 332 { 333 - request_queue_t *q = rq->q; 333 + struct request_queue *q = rq->q; 334 334 335 335 BUG_ON(q->ordseq == 0); 336 336 ··· 357 357 return QUEUE_ORDSEQ_DONE; 358 358 } 359 359 360 - void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error) 360 + void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) 361 361 { 362 362 struct request *rq; 363 363 int uptodate; ··· 401 401 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); 402 402 } 403 403 404 - static void queue_flush(request_queue_t *q, unsigned which) 404 + static void queue_flush(struct request_queue *q, unsigned which) 405 405 { 406 406 struct request *rq; 407 407 rq_end_io_fn *end_io; ··· 425 425 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 426 426 } 427 427 428 - static inline struct request *start_ordered(request_queue_t *q, 428 + static inline struct request *start_ordered(struct request_queue *q, 429 429 struct request *rq) 430 430 { 431 431 q->bi_size = 0; ··· 476 476 return rq; 477 477 } 478 478 479 - int blk_do_ordered(request_queue_t *q, struct request **rqp) 479 + int blk_do_ordered(struct request_queue *q, struct request **rqp) 480 480 { 481 481 struct request *rq = *rqp; 482 482 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); ··· 527 527 528 528 static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) 529 529 { 530 - request_queue_t *q = bio->bi_private; 530 + struct request_queue *q = bio->bi_private; 531 531 532 532 /* 533 533 * This is dry run, restore bio_sector and size. We'll finish ··· 551 551 static int ordered_bio_endio(struct request *rq, struct bio *bio, 552 552 unsigned int nbytes, int error) 553 553 { 554 - request_queue_t *q = rq->q; 554 + struct request_queue *q = rq->q; 555 555 bio_end_io_t *endio; 556 556 void *private; 557 557 ··· 588 588 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 589 589 * buffers for doing I/O to pages residing above @page. 590 590 **/ 591 - void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) 591 + void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 592 592 { 593 593 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 594 594 int dma = 0; ··· 624 624 * Enables a low level driver to set an upper limit on the size of 625 625 * received requests. 626 626 **/ 627 - void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors) 627 + void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) 628 628 { 629 629 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 630 630 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); ··· 651 651 * physical data segments in a request. This would be the largest sized 652 652 * scatter list the driver could handle. 653 653 **/ 654 - void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments) 654 + void blk_queue_max_phys_segments(struct request_queue *q, 655 + unsigned short max_segments) 655 656 { 656 657 if (!max_segments) { 657 658 max_segments = 1; ··· 675 674 * address/length pairs the host adapter can actually give as once 676 675 * to the device. 677 676 **/ 678 - void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments) 677 + void blk_queue_max_hw_segments(struct request_queue *q, 678 + unsigned short max_segments) 679 679 { 680 680 if (!max_segments) { 681 681 max_segments = 1; ··· 697 695 * Enables a low level driver to set an upper limit on the size of a 698 696 * coalesced segment 699 697 **/ 700 - void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size) 698 + void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) 701 699 { 702 700 if (max_size < PAGE_CACHE_SIZE) { 703 701 max_size = PAGE_CACHE_SIZE; ··· 720 718 * even internal read-modify-write operations). Usually the default 721 719 * of 512 covers most hardware. 722 720 **/ 723 - void blk_queue_hardsect_size(request_queue_t *q, unsigned short size) 721 + void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) 724 722 { 725 723 q->hardsect_size = size; 726 724 } ··· 737 735 * @t: the stacking driver (top) 738 736 * @b: the underlying device (bottom) 739 737 **/ 740 - void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) 738 + void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 741 739 { 742 740 /* zero is "infinity" */ 743 741 t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); ··· 758 756 * @q: the request queue for the device 759 757 * @mask: the memory boundary mask 760 758 **/ 761 - void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) 759 + void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) 762 760 { 763 761 if (mask < PAGE_CACHE_SIZE - 1) { 764 762 mask = PAGE_CACHE_SIZE - 1; ··· 780 778 * this is used when buiding direct io requests for the queue. 781 779 * 782 780 **/ 783 - void blk_queue_dma_alignment(request_queue_t *q, int mask) 781 + void blk_queue_dma_alignment(struct request_queue *q, int mask) 784 782 { 785 783 q->dma_alignment = mask; 786 784 } ··· 798 796 * 799 797 * no locks need be held. 800 798 **/ 801 - struct request *blk_queue_find_tag(request_queue_t *q, int tag) 799 + struct request *blk_queue_find_tag(struct request_queue *q, int tag) 802 800 { 803 801 return blk_map_queue_find_tag(q->queue_tags, tag); 804 802 } ··· 842 840 * blk_cleanup_queue() will take care of calling this function, if tagging 843 841 * has been used. So there's no need to call this directly. 844 842 **/ 845 - static void __blk_queue_free_tags(request_queue_t *q) 843 + static void __blk_queue_free_tags(struct request_queue *q) 846 844 { 847 845 struct blk_queue_tag *bqt = q->queue_tags; 848 846 ··· 879 877 * This is used to disabled tagged queuing to a device, yet leave 880 878 * queue in function. 881 879 **/ 882 - void blk_queue_free_tags(request_queue_t *q) 880 + void blk_queue_free_tags(struct request_queue *q) 883 881 { 884 882 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 885 883 } ··· 887 885 EXPORT_SYMBOL(blk_queue_free_tags); 888 886 889 887 static int 890 - init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) 888 + init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) 891 889 { 892 890 struct request **tag_index; 893 891 unsigned long *tag_map; ··· 957 955 * @depth: the maximum queue depth supported 958 956 * @tags: the tag to use 959 957 **/ 960 - int blk_queue_init_tags(request_queue_t *q, int depth, 958 + int blk_queue_init_tags(struct request_queue *q, int depth, 961 959 struct blk_queue_tag *tags) 962 960 { 963 961 int rc; ··· 998 996 * Notes: 999 997 * Must be called with the queue lock held. 1000 998 **/ 1001 - int blk_queue_resize_tags(request_queue_t *q, int new_depth) 999 + int blk_queue_resize_tags(struct request_queue *q, int new_depth) 1002 1000 { 1003 1001 struct blk_queue_tag *bqt = q->queue_tags; 1004 1002 struct request **tag_index; ··· 1061 1059 * Notes: 1062 1060 * queue lock must be held. 1063 1061 **/ 1064 - void blk_queue_end_tag(request_queue_t *q, struct request *rq) 1062 + void blk_queue_end_tag(struct request_queue *q, struct request *rq) 1065 1063 { 1066 1064 struct blk_queue_tag *bqt = q->queue_tags; 1067 1065 int tag = rq->tag; ··· 1113 1111 * Notes: 1114 1112 * queue lock must be held. 1115 1113 **/ 1116 - int blk_queue_start_tag(request_queue_t *q, struct request *rq) 1114 + int blk_queue_start_tag(struct request_queue *q, struct request *rq) 1117 1115 { 1118 1116 struct blk_queue_tag *bqt = q->queue_tags; 1119 1117 int tag; ··· 1160 1158 * Notes: 1161 1159 * queue lock must be held. 1162 1160 **/ 1163 - void blk_queue_invalidate_tags(request_queue_t *q) 1161 + void blk_queue_invalidate_tags(struct request_queue *q) 1164 1162 { 1165 1163 struct blk_queue_tag *bqt = q->queue_tags; 1166 1164 struct list_head *tmp, *n; ··· 1207 1205 1208 1206 EXPORT_SYMBOL(blk_dump_rq_flags); 1209 1207 1210 - void blk_recount_segments(request_queue_t *q, struct bio *bio) 1208 + void blk_recount_segments(struct request_queue *q, struct bio *bio) 1211 1209 { 1212 1210 struct bio_vec *bv, *bvprv = NULL; 1213 1211 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; ··· 1269 1267 } 1270 1268 EXPORT_SYMBOL(blk_recount_segments); 1271 1269 1272 - static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, 1270 + static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 1273 1271 struct bio *nxt) 1274 1272 { 1275 1273 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) ··· 1290 1288 return 0; 1291 1289 } 1292 1290 1293 - static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, 1291 + static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, 1294 1292 struct bio *nxt) 1295 1293 { 1296 1294 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) ··· 1310 1308 * map a request to scatterlist, return number of sg entries setup. Caller 1311 1309 * must make sure sg can hold rq->nr_phys_segments entries 1312 1310 */ 1313 - int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) 1311 + int blk_rq_map_sg(struct request_queue *q, struct request *rq, 1312 + struct scatterlist *sg) 1314 1313 { 1315 1314 struct bio_vec *bvec, *bvprv; 1316 1315 struct bio *bio; ··· 1364 1361 * specific ones if so desired 1365 1362 */ 1366 1363 1367 - static inline int ll_new_mergeable(request_queue_t *q, 1364 + static inline int ll_new_mergeable(struct request_queue *q, 1368 1365 struct request *req, 1369 1366 struct bio *bio) 1370 1367 { ··· 1385 1382 return 1; 1386 1383 } 1387 1384 1388 - static inline int ll_new_hw_segment(request_queue_t *q, 1385 + static inline int ll_new_hw_segment(struct request_queue *q, 1389 1386 struct request *req, 1390 1387 struct bio *bio) 1391 1388 { ··· 1409 1406 return 1; 1410 1407 } 1411 1408 1412 - int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) 1409 + int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) 1413 1410 { 1414 1411 unsigned short max_sectors; 1415 1412 int len; ··· 1447 1444 } 1448 1445 EXPORT_SYMBOL(ll_back_merge_fn); 1449 1446 1450 - static int ll_front_merge_fn(request_queue_t *q, struct request *req, 1447 + static int ll_front_merge_fn(struct request_queue *q, struct request *req, 1451 1448 struct bio *bio) 1452 1449 { 1453 1450 unsigned short max_sectors; ··· 1486 1483 return ll_new_hw_segment(q, req, bio); 1487 1484 } 1488 1485 1489 - static int ll_merge_requests_fn(request_queue_t *q, struct request *req, 1486 + static int ll_merge_requests_fn(struct request_queue *q, struct request *req, 1490 1487 struct request *next) 1491 1488 { 1492 1489 int total_phys_segments; ··· 1542 1539 * This is called with interrupts off and no requests on the queue and 1543 1540 * with the queue lock held. 1544 1541 */ 1545 - void blk_plug_device(request_queue_t *q) 1542 + void blk_plug_device(struct request_queue *q) 1546 1543 { 1547 1544 WARN_ON(!irqs_disabled()); 1548 1545 ··· 1565 1562 * remove the queue from the plugged list, if present. called with 1566 1563 * queue lock held and interrupts disabled. 1567 1564 */ 1568 - int blk_remove_plug(request_queue_t *q) 1565 + int blk_remove_plug(struct request_queue *q) 1569 1566 { 1570 1567 WARN_ON(!irqs_disabled()); 1571 1568 ··· 1581 1578 /* 1582 1579 * remove the plug and let it rip.. 1583 1580 */ 1584 - void __generic_unplug_device(request_queue_t *q) 1581 + void __generic_unplug_device(struct request_queue *q) 1585 1582 { 1586 1583 if (unlikely(blk_queue_stopped(q))) 1587 1584 return; ··· 1595 1592 1596 1593 /** 1597 1594 * generic_unplug_device - fire a request queue 1598 - * @q: The &request_queue_t in question 1595 + * @q: The &struct request_queue in question 1599 1596 * 1600 1597 * Description: 1601 1598 * Linux uses plugging to build bigger requests queues before letting ··· 1604 1601 * gets unplugged, the request_fn defined for the queue is invoked and 1605 1602 * transfers started. 1606 1603 **/ 1607 - void generic_unplug_device(request_queue_t *q) 1604 + void generic_unplug_device(struct request_queue *q) 1608 1605 { 1609 1606 spin_lock_irq(q->queue_lock); 1610 1607 __generic_unplug_device(q); ··· 1615 1612 static void blk_backing_dev_unplug(struct backing_dev_info *bdi, 1616 1613 struct page *page) 1617 1614 { 1618 - request_queue_t *q = bdi->unplug_io_data; 1615 + struct request_queue *q = bdi->unplug_io_data; 1619 1616 1620 1617 /* 1621 1618 * devices don't necessarily have an ->unplug_fn defined ··· 1630 1627 1631 1628 static void blk_unplug_work(struct work_struct *work) 1632 1629 { 1633 - request_queue_t *q = container_of(work, request_queue_t, unplug_work); 1630 + struct request_queue *q = 1631 + container_of(work, struct request_queue, unplug_work); 1634 1632 1635 1633 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, 1636 1634 q->rq.count[READ] + q->rq.count[WRITE]); ··· 1641 1637 1642 1638 static void blk_unplug_timeout(unsigned long data) 1643 1639 { 1644 - request_queue_t *q = (request_queue_t *)data; 1640 + struct request_queue *q = (struct request_queue *)data; 1645 1641 1646 1642 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, 1647 1643 q->rq.count[READ] + q->rq.count[WRITE]); ··· 1651 1647 1652 1648 /** 1653 1649 * blk_start_queue - restart a previously stopped queue 1654 - * @q: The &request_queue_t in question 1650 + * @q: The &struct request_queue in question 1655 1651 * 1656 1652 * Description: 1657 1653 * blk_start_queue() will clear the stop flag on the queue, and call 1658 1654 * the request_fn for the queue if it was in a stopped state when 1659 1655 * entered. Also see blk_stop_queue(). Queue lock must be held. 1660 1656 **/ 1661 - void blk_start_queue(request_queue_t *q) 1657 + void blk_start_queue(struct request_queue *q) 1662 1658 { 1663 1659 WARN_ON(!irqs_disabled()); 1664 1660 ··· 1681 1677 1682 1678 /** 1683 1679 * blk_stop_queue - stop a queue 1684 - * @q: The &request_queue_t in question 1680 + * @q: The &struct request_queue in question 1685 1681 * 1686 1682 * Description: 1687 1683 * The Linux block layer assumes that a block driver will consume all ··· 1693 1689 * the driver has signalled it's ready to go again. This happens by calling 1694 1690 * blk_start_queue() to restart queue operations. Queue lock must be held. 1695 1691 **/ 1696 - void blk_stop_queue(request_queue_t *q) 1692 + void blk_stop_queue(struct request_queue *q) 1697 1693 { 1698 1694 blk_remove_plug(q); 1699 1695 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); ··· 1750 1746 EXPORT_SYMBOL(blk_run_queue); 1751 1747 1752 1748 /** 1753 - * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed 1749 + * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed 1754 1750 * @kobj: the kobj belonging of the request queue to be released 1755 1751 * 1756 1752 * Description: ··· 1766 1762 **/ 1767 1763 static void blk_release_queue(struct kobject *kobj) 1768 1764 { 1769 - request_queue_t *q = container_of(kobj, struct request_queue, kobj); 1765 + struct request_queue *q = 1766 + container_of(kobj, struct request_queue, kobj); 1770 1767 struct request_list *rl = &q->rq; 1771 1768 1772 1769 blk_sync_queue(q); ··· 1783 1778 kmem_cache_free(requestq_cachep, q); 1784 1779 } 1785 1780 1786 - void blk_put_queue(request_queue_t *q) 1781 + void blk_put_queue(struct request_queue *q) 1787 1782 { 1788 1783 kobject_put(&q->kobj); 1789 1784 } 1790 1785 EXPORT_SYMBOL(blk_put_queue); 1791 1786 1792 - void blk_cleanup_queue(request_queue_t * q) 1787 + void blk_cleanup_queue(struct request_queue * q) 1793 1788 { 1794 1789 mutex_lock(&q->sysfs_lock); 1795 1790 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); ··· 1803 1798 1804 1799 EXPORT_SYMBOL(blk_cleanup_queue); 1805 1800 1806 - static int blk_init_free_list(request_queue_t *q) 1801 + static int blk_init_free_list(struct request_queue *q) 1807 1802 { 1808 1803 struct request_list *rl = &q->rq; 1809 1804 ··· 1822 1817 return 0; 1823 1818 } 1824 1819 1825 - request_queue_t *blk_alloc_queue(gfp_t gfp_mask) 1820 + struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 1826 1821 { 1827 1822 return blk_alloc_queue_node(gfp_mask, -1); 1828 1823 } ··· 1830 1825 1831 1826 static struct kobj_type queue_ktype; 1832 1827 1833 - request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 1828 + struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 1834 1829 { 1835 - request_queue_t *q; 1830 + struct request_queue *q; 1836 1831 1837 1832 q = kmem_cache_alloc_node(requestq_cachep, 1838 1833 gfp_mask | __GFP_ZERO, node_id); ··· 1887 1882 * when the block device is deactivated (such as at module unload). 1888 1883 **/ 1889 1884 1890 - request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 1885 + struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) 1891 1886 { 1892 1887 return blk_init_queue_node(rfn, lock, -1); 1893 1888 } 1894 1889 EXPORT_SYMBOL(blk_init_queue); 1895 1890 1896 - request_queue_t * 1891 + struct request_queue * 1897 1892 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) 1898 1893 { 1899 - request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 1894 + struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); 1900 1895 1901 1896 if (!q) 1902 1897 return NULL; ··· 1945 1940 } 1946 1941 EXPORT_SYMBOL(blk_init_queue_node); 1947 1942 1948 - int blk_get_queue(request_queue_t *q) 1943 + int blk_get_queue(struct request_queue *q) 1949 1944 { 1950 1945 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 1951 1946 kobject_get(&q->kobj); ··· 1957 1952 1958 1953 EXPORT_SYMBOL(blk_get_queue); 1959 1954 1960 - static inline void blk_free_request(request_queue_t *q, struct request *rq) 1955 + static inline void blk_free_request(struct request_queue *q, struct request *rq) 1961 1956 { 1962 1957 if (rq->cmd_flags & REQ_ELVPRIV) 1963 1958 elv_put_request(q, rq); ··· 1965 1960 } 1966 1961 1967 1962 static struct request * 1968 - blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) 1963 + blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) 1969 1964 { 1970 1965 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 1971 1966 ··· 1993 1988 * ioc_batching returns true if the ioc is a valid batching request and 1994 1989 * should be given priority access to a request. 1995 1990 */ 1996 - static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) 1991 + static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) 1997 1992 { 1998 1993 if (!ioc) 1999 1994 return 0; ··· 2014 2009 * is the behaviour we want though - once it gets a wakeup it should be given 2015 2010 * a nice run. 2016 2011 */ 2017 - static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) 2012 + static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) 2018 2013 { 2019 2014 if (!ioc || ioc_batching(q, ioc)) 2020 2015 return; ··· 2023 2018 ioc->last_waited = jiffies; 2024 2019 } 2025 2020 2026 - static void __freed_request(request_queue_t *q, int rw) 2021 + static void __freed_request(struct request_queue *q, int rw) 2027 2022 { 2028 2023 struct request_list *rl = &q->rq; 2029 2024 ··· 2042 2037 * A request has just been released. Account for it, update the full and 2043 2038 * congestion status, wake up any waiters. Called under q->queue_lock. 2044 2039 */ 2045 - static void freed_request(request_queue_t *q, int rw, int priv) 2040 + static void freed_request(struct request_queue *q, int rw, int priv) 2046 2041 { 2047 2042 struct request_list *rl = &q->rq; 2048 2043 ··· 2062 2057 * Returns NULL on failure, with queue_lock held. 2063 2058 * Returns !NULL on success, with queue_lock *not held*. 2064 2059 */ 2065 - static struct request *get_request(request_queue_t *q, int rw_flags, 2060 + static struct request *get_request(struct request_queue *q, int rw_flags, 2066 2061 struct bio *bio, gfp_t gfp_mask) 2067 2062 { 2068 2063 struct request *rq = NULL; ··· 2167 2162 * 2168 2163 * Called with q->queue_lock held, and returns with it unlocked. 2169 2164 */ 2170 - static struct request *get_request_wait(request_queue_t *q, int rw_flags, 2165 + static struct request *get_request_wait(struct request_queue *q, int rw_flags, 2171 2166 struct bio *bio) 2172 2167 { 2173 2168 const int rw = rw_flags & 0x01; ··· 2209 2204 return rq; 2210 2205 } 2211 2206 2212 - struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) 2207 + struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) 2213 2208 { 2214 2209 struct request *rq; 2215 2210 ··· 2239 2234 * 2240 2235 * The queue lock must be held with interrupts disabled. 2241 2236 */ 2242 - void blk_start_queueing(request_queue_t *q) 2237 + void blk_start_queueing(struct request_queue *q) 2243 2238 { 2244 2239 if (!blk_queue_plugged(q)) 2245 2240 q->request_fn(q); ··· 2258 2253 * more, when that condition happens we need to put the request back 2259 2254 * on the queue. Must be called with queue lock held. 2260 2255 */ 2261 - void blk_requeue_request(request_queue_t *q, struct request *rq) 2256 + void blk_requeue_request(struct request_queue *q, struct request *rq) 2262 2257 { 2263 2258 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 2264 2259 ··· 2289 2284 * of the queue for things like a QUEUE_FULL message from a device, or a 2290 2285 * host that is unable to accept a particular command. 2291 2286 */ 2292 - void blk_insert_request(request_queue_t *q, struct request *rq, 2287 + void blk_insert_request(struct request_queue *q, struct request *rq, 2293 2288 int at_head, void *data) 2294 2289 { 2295 2290 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; ··· 2335 2330 return ret; 2336 2331 } 2337 2332 2338 - static int __blk_rq_map_user(request_queue_t *q, struct request *rq, 2333 + static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 2339 2334 void __user *ubuf, unsigned int len) 2340 2335 { 2341 2336 unsigned long uaddr; ··· 2408 2403 * original bio must be passed back in to blk_rq_unmap_user() for proper 2409 2404 * unmapping. 2410 2405 */ 2411 - int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, 2412 - unsigned long len) 2406 + int blk_rq_map_user(struct request_queue *q, struct request *rq, 2407 + void __user *ubuf, unsigned long len) 2413 2408 { 2414 2409 unsigned long bytes_read = 0; 2415 2410 struct bio *bio = NULL; ··· 2475 2470 * original bio must be passed back in to blk_rq_unmap_user() for proper 2476 2471 * unmapping. 2477 2472 */ 2478 - int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, 2473 + int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 2479 2474 struct sg_iovec *iov, int iov_count, unsigned int len) 2480 2475 { 2481 2476 struct bio *bio; ··· 2545 2540 * @len: length of user data 2546 2541 * @gfp_mask: memory allocation flags 2547 2542 */ 2548 - int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, 2543 + int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 2549 2544 unsigned int len, gfp_t gfp_mask) 2550 2545 { 2551 2546 struct bio *bio; ··· 2582 2577 * Insert a fully prepared request at the back of the io scheduler queue 2583 2578 * for execution. Don't wait for completion. 2584 2579 */ 2585 - void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, 2580 + void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, 2586 2581 struct request *rq, int at_head, 2587 2582 rq_end_io_fn *done) 2588 2583 { ··· 2610 2605 * Insert a fully prepared request at the back of the io scheduler queue 2611 2606 * for execution and wait for completion. 2612 2607 */ 2613 - int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, 2608 + int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, 2614 2609 struct request *rq, int at_head) 2615 2610 { 2616 2611 DECLARE_COMPLETION_ONSTACK(wait); ··· 2653 2648 */ 2654 2649 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) 2655 2650 { 2656 - request_queue_t *q; 2651 + struct request_queue *q; 2657 2652 2658 2653 if (bdev->bd_disk == NULL) 2659 2654 return -ENXIO; ··· 2689 2684 * queue lock is held and interrupts disabled, as we muck with the 2690 2685 * request queue list. 2691 2686 */ 2692 - static inline void add_request(request_queue_t * q, struct request * req) 2687 + static inline void add_request(struct request_queue * q, struct request * req) 2693 2688 { 2694 2689 drive_stat_acct(req, req->nr_sectors, 1); 2695 2690 ··· 2735 2730 /* 2736 2731 * queue lock must be held 2737 2732 */ 2738 - void __blk_put_request(request_queue_t *q, struct request *req) 2733 + void __blk_put_request(struct request_queue *q, struct request *req) 2739 2734 { 2740 2735 if (unlikely(!q)) 2741 2736 return; ··· 2765 2760 void blk_put_request(struct request *req) 2766 2761 { 2767 2762 unsigned long flags; 2768 - request_queue_t *q = req->q; 2763 + struct request_queue *q = req->q; 2769 2764 2770 2765 /* 2771 2766 * Gee, IDE calls in w/ NULL q. Fix IDE and remove the ··· 2803 2798 /* 2804 2799 * Has to be called with the request spinlock acquired 2805 2800 */ 2806 - static int attempt_merge(request_queue_t *q, struct request *req, 2801 + static int attempt_merge(struct request_queue *q, struct request *req, 2807 2802 struct request *next) 2808 2803 { 2809 2804 if (!rq_mergeable(req) || !rq_mergeable(next)) ··· 2856 2851 return 1; 2857 2852 } 2858 2853 2859 - static inline int attempt_back_merge(request_queue_t *q, struct request *rq) 2854 + static inline int attempt_back_merge(struct request_queue *q, 2855 + struct request *rq) 2860 2856 { 2861 2857 struct request *next = elv_latter_request(q, rq); 2862 2858 ··· 2867 2861 return 0; 2868 2862 } 2869 2863 2870 - static inline int attempt_front_merge(request_queue_t *q, struct request *rq) 2864 + static inline int attempt_front_merge(struct request_queue *q, 2865 + struct request *rq) 2871 2866 { 2872 2867 struct request *prev = elv_former_request(q, rq); 2873 2868 ··· 2912 2905 req->start_time = jiffies; 2913 2906 } 2914 2907 2915 - static int __make_request(request_queue_t *q, struct bio *bio) 2908 + static int __make_request(struct request_queue *q, struct bio *bio) 2916 2909 { 2917 2910 struct request *req; 2918 2911 int el_ret, nr_sectors, barrier, err; ··· 3126 3119 */ 3127 3120 static inline void __generic_make_request(struct bio *bio) 3128 3121 { 3129 - request_queue_t *q; 3122 + struct request_queue *q; 3130 3123 sector_t maxsector; 3131 3124 sector_t old_sector; 3132 3125 int ret, nr_sectors = bio_sectors(bio); ··· 3319 3312 struct bio *bio, *prevbio = NULL; 3320 3313 int nr_phys_segs, nr_hw_segs; 3321 3314 unsigned int phys_size, hw_size; 3322 - request_queue_t *q = rq->q; 3315 + struct request_queue *q = rq->q; 3323 3316 3324 3317 if (!rq->bio) 3325 3318 return; ··· 3665 3658 3666 3659 EXPORT_SYMBOL(end_request); 3667 3660 3668 - void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) 3661 + void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3662 + struct bio *bio) 3669 3663 { 3670 3664 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ 3671 3665 rq->cmd_flags |= (bio->bi_rw & 3); ··· 3709 3701 sizeof(struct request), 0, SLAB_PANIC, NULL); 3710 3702 3711 3703 requestq_cachep = kmem_cache_create("blkdev_queue", 3712 - sizeof(request_queue_t), 0, SLAB_PANIC, NULL); 3704 + sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 3713 3705 3714 3706 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3715 3707 sizeof(struct io_context), 0, SLAB_PANIC, NULL); ··· 4029 4021 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4030 4022 { 4031 4023 struct queue_sysfs_entry *entry = to_queue(attr); 4032 - request_queue_t *q = container_of(kobj, struct request_queue, kobj); 4024 + struct request_queue *q = 4025 + container_of(kobj, struct request_queue, kobj); 4033 4026 ssize_t res; 4034 4027 4035 4028 if (!entry->show) ··· 4050 4041 const char *page, size_t length) 4051 4042 { 4052 4043 struct queue_sysfs_entry *entry = to_queue(attr); 4053 - request_queue_t *q = container_of(kobj, struct request_queue, kobj); 4044 + struct request_queue *q = container_of(kobj, struct request_queue, kobj); 4054 4045 4055 4046 ssize_t res; 4056 4047 ··· 4081 4072 { 4082 4073 int ret; 4083 4074 4084 - request_queue_t *q = disk->queue; 4075 + struct request_queue *q = disk->queue; 4085 4076 4086 4077 if (!q || !q->request_fn) 4087 4078 return -ENXIO; ··· 4106 4097 4107 4098 void blk_unregister_queue(struct gendisk *disk) 4108 4099 { 4109 - request_queue_t *q = disk->queue; 4100 + struct request_queue *q = disk->queue; 4110 4101 4111 4102 if (q && q->request_fn) { 4112 4103 elv_unregister_queue(q);
+7 -7
block/noop-iosched.c
··· 11 11 struct list_head queue; 12 12 }; 13 13 14 - static void noop_merged_requests(request_queue_t *q, struct request *rq, 14 + static void noop_merged_requests(struct request_queue *q, struct request *rq, 15 15 struct request *next) 16 16 { 17 17 list_del_init(&next->queuelist); 18 18 } 19 19 20 - static int noop_dispatch(request_queue_t *q, int force) 20 + static int noop_dispatch(struct request_queue *q, int force) 21 21 { 22 22 struct noop_data *nd = q->elevator->elevator_data; 23 23 ··· 31 31 return 0; 32 32 } 33 33 34 - static void noop_add_request(request_queue_t *q, struct request *rq) 34 + static void noop_add_request(struct request_queue *q, struct request *rq) 35 35 { 36 36 struct noop_data *nd = q->elevator->elevator_data; 37 37 38 38 list_add_tail(&rq->queuelist, &nd->queue); 39 39 } 40 40 41 - static int noop_queue_empty(request_queue_t *q) 41 + static int noop_queue_empty(struct request_queue *q) 42 42 { 43 43 struct noop_data *nd = q->elevator->elevator_data; 44 44 ··· 46 46 } 47 47 48 48 static struct request * 49 - noop_former_request(request_queue_t *q, struct request *rq) 49 + noop_former_request(struct request_queue *q, struct request *rq) 50 50 { 51 51 struct noop_data *nd = q->elevator->elevator_data; 52 52 ··· 56 56 } 57 57 58 58 static struct request * 59 - noop_latter_request(request_queue_t *q, struct request *rq) 59 + noop_latter_request(struct request_queue *q, struct request *rq) 60 60 { 61 61 struct noop_data *nd = q->elevator->elevator_data; 62 62 ··· 65 65 return list_entry(rq->queuelist.next, struct request, queuelist); 66 66 } 67 67 68 - static void *noop_init_queue(request_queue_t *q) 68 + static void *noop_init_queue(struct request_queue *q) 69 69 { 70 70 struct noop_data *nd; 71 71
+13 -11
block/scsi_ioctl.c
··· 49 49 return put_user(sg_version_num, p); 50 50 } 51 51 52 - static int scsi_get_idlun(request_queue_t *q, int __user *p) 52 + static int scsi_get_idlun(struct request_queue *q, int __user *p) 53 53 { 54 54 return put_user(0, p); 55 55 } 56 56 57 - static int scsi_get_bus(request_queue_t *q, int __user *p) 57 + static int scsi_get_bus(struct request_queue *q, int __user *p) 58 58 { 59 59 return put_user(0, p); 60 60 } 61 61 62 - static int sg_get_timeout(request_queue_t *q) 62 + static int sg_get_timeout(struct request_queue *q) 63 63 { 64 64 return q->sg_timeout / (HZ / USER_HZ); 65 65 } 66 66 67 - static int sg_set_timeout(request_queue_t *q, int __user *p) 67 + static int sg_set_timeout(struct request_queue *q, int __user *p) 68 68 { 69 69 int timeout, err = get_user(timeout, p); 70 70 ··· 74 74 return err; 75 75 } 76 76 77 - static int sg_get_reserved_size(request_queue_t *q, int __user *p) 77 + static int sg_get_reserved_size(struct request_queue *q, int __user *p) 78 78 { 79 79 unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 80 80 81 81 return put_user(val, p); 82 82 } 83 83 84 - static int sg_set_reserved_size(request_queue_t *q, int __user *p) 84 + static int sg_set_reserved_size(struct request_queue *q, int __user *p) 85 85 { 86 86 int size, err = get_user(size, p); 87 87 ··· 101 101 * will always return that we are ATAPI even for a real SCSI drive, I'm not 102 102 * so sure this is worth doing anything about (why would you care??) 103 103 */ 104 - static int sg_emulated_host(request_queue_t *q, int __user *p) 104 + static int sg_emulated_host(struct request_queue *q, int __user *p) 105 105 { 106 106 return put_user(1, p); 107 107 } ··· 214 214 } 215 215 EXPORT_SYMBOL_GPL(blk_verify_command); 216 216 217 - static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq, 217 + static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, 218 218 struct sg_io_hdr *hdr, int has_write_perm) 219 219 { 220 220 memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ ··· 286 286 return r; 287 287 } 288 288 289 - static int sg_io(struct file *file, request_queue_t *q, 289 + static int sg_io(struct file *file, struct request_queue *q, 290 290 struct gendisk *bd_disk, struct sg_io_hdr *hdr) 291 291 { 292 292 unsigned long start_time; ··· 519 519 EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 520 520 521 521 /* Send basic block requests */ 522 - static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) 522 + static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, 523 + int cmd, int data) 523 524 { 524 525 struct request *rq; 525 526 int err; ··· 540 539 return err; 541 540 } 542 541 543 - static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data) 542 + static inline int blk_send_start_stop(struct request_queue *q, 543 + struct gendisk *bd_disk, int data) 544 544 { 545 545 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); 546 546 }
+2 -2
drivers/acorn/block/fd1772.c
··· 372 372 static void config_types(void); 373 373 static int floppy_open(struct inode *inode, struct file *filp); 374 374 static int floppy_release(struct inode *inode, struct file *filp); 375 - static void do_fd_request(request_queue_t *); 375 + static void do_fd_request(struct request_queue *); 376 376 377 377 /************************* End of Prototypes **************************/ 378 378 ··· 1271 1271 } 1272 1272 } 1273 1273 1274 - static void do_fd_request(request_queue_t* q) 1274 + static void do_fd_request(struct request_queue* q) 1275 1275 { 1276 1276 unsigned long flags; 1277 1277
+1 -1
drivers/acorn/block/mfmhd.c
··· 924 924 DBG("mfm_request: Dropping out bottom\n"); 925 925 } 926 926 927 - static void do_mfm_request(request_queue_t *q) 927 + static void do_mfm_request(struct request_queue *q) 928 928 { 929 929 DBG("do_mfm_request: about to mfm_request\n"); 930 930 mfm_request();
+1 -1
drivers/ata/libata-scsi.c
··· 768 768 * Decrement max hw segments accordingly. 769 769 */ 770 770 if (dev->class == ATA_DEV_ATAPI) { 771 - request_queue_t *q = sdev->request_queue; 771 + struct request_queue *q = sdev->request_queue; 772 772 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 773 773 } 774 774
+1 -1
drivers/block/amiflop.c
··· 1422 1422 goto repeat; 1423 1423 } 1424 1424 1425 - static void do_fd_request(request_queue_t * q) 1425 + static void do_fd_request(struct request_queue * q) 1426 1426 { 1427 1427 redo_fd_request(); 1428 1428 }
+1 -1
drivers/block/aoe/aoe.h
··· 138 138 u16 maxbcnt; 139 139 struct work_struct work;/* disk create work struct */ 140 140 struct gendisk *gd; 141 - request_queue_t blkq; 141 + struct request_queue blkq; 142 142 struct hd_geometry geo; 143 143 sector_t ssize; 144 144 struct timer_list timer;
+1 -1
drivers/block/aoe/aoeblk.c
··· 125 125 } 126 126 127 127 static int 128 - aoeblk_make_request(request_queue_t *q, struct bio *bio) 128 + aoeblk_make_request(struct request_queue *q, struct bio *bio) 129 129 { 130 130 struct aoedev *d; 131 131 struct buf *buf;
+1 -1
drivers/block/ataflop.c
··· 1466 1466 } 1467 1467 1468 1468 1469 - void do_fd_request(request_queue_t * q) 1469 + void do_fd_request(struct request_queue * q) 1470 1470 { 1471 1471 unsigned long flags; 1472 1472
+5 -5
drivers/block/cciss.c
··· 139 139 140 140 static ctlr_info_t *hba[MAX_CTLR]; 141 141 142 - static void do_cciss_request(request_queue_t *q); 142 + static void do_cciss_request(struct request_queue *q); 143 143 static irqreturn_t do_cciss_intr(int irq, void *dev_id); 144 144 static int cciss_open(struct inode *inode, struct file *filep); 145 145 static int cciss_release(struct inode *inode, struct file *filep); ··· 1584 1584 */ 1585 1585 if (h->gendisk[0] != disk) { 1586 1586 if (disk) { 1587 - request_queue_t *q = disk->queue; 1587 + struct request_queue *q = disk->queue; 1588 1588 if (disk->flags & GENHD_FL_UP) 1589 1589 del_gendisk(disk); 1590 1590 if (q) { ··· 2511 2511 /* 2512 2512 * Get a request and submit it to the controller. 2513 2513 */ 2514 - static void do_cciss_request(request_queue_t *q) 2514 + static void do_cciss_request(struct request_queue *q) 2515 2515 { 2516 2516 ctlr_info_t *h = q->queuedata; 2517 2517 CommandList_struct *c; ··· 3380 3380 do { 3381 3381 drive_info_struct *drv = &(hba[i]->drv[j]); 3382 3382 struct gendisk *disk = hba[i]->gendisk[j]; 3383 - request_queue_t *q; 3383 + struct request_queue *q; 3384 3384 3385 3385 /* Check if the disk was allocated already */ 3386 3386 if (!disk){ ··· 3523 3523 for (j = 0; j < CISS_MAX_LUN; j++) { 3524 3524 struct gendisk *disk = hba[i]->gendisk[j]; 3525 3525 if (disk) { 3526 - request_queue_t *q = disk->queue; 3526 + struct request_queue *q = disk->queue; 3527 3527 3528 3528 if (disk->flags & GENHD_FL_UP) 3529 3529 del_gendisk(disk);
+3 -3
drivers/block/cpqarray.c
··· 161 161 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); 162 162 static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); 163 163 164 - static void do_ida_request(request_queue_t *q); 164 + static void do_ida_request(struct request_queue *q); 165 165 static void start_io(ctlr_info_t *h); 166 166 167 167 static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); ··· 391 391 /* pdev is NULL for eisa */ 392 392 static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) 393 393 { 394 - request_queue_t *q; 394 + struct request_queue *q; 395 395 int j; 396 396 397 397 /* ··· 886 886 * are in here (either via the dummy do_ida_request functions or by being 887 887 * called from the interrupt handler 888 888 */ 889 - static void do_ida_request(request_queue_t *q) 889 + static void do_ida_request(struct request_queue *q) 890 890 { 891 891 ctlr_info_t *h = q->queuedata; 892 892 cmdlist_t *c;
+2 -2
drivers/block/floppy.c
··· 251 251 252 252 static struct request *current_req; 253 253 static struct request_queue *floppy_queue; 254 - static void do_fd_request(request_queue_t * q); 254 + static void do_fd_request(struct request_queue * q); 255 255 256 256 #ifndef fd_get_dma_residue 257 257 #define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) ··· 2981 2981 schedule_bh(redo_fd_request); 2982 2982 } 2983 2983 2984 - static void do_fd_request(request_queue_t * q) 2984 + static void do_fd_request(struct request_queue * q) 2985 2985 { 2986 2986 if (max_buffer_sectors == 0) { 2987 2987 printk("VFS: do_fd_request called on non-open device\n");
+1 -1
drivers/block/lguest_blk.c
··· 137 137 lguest_send_dma(bd->phys_addr, &ping); 138 138 } 139 139 140 - static void do_lgb_request(request_queue_t *q) 140 + static void do_lgb_request(struct request_queue *q) 141 141 { 142 142 struct blockdev *bd; 143 143 struct request *req;
+2 -2
drivers/block/loop.c
··· 529 529 return bio; 530 530 } 531 531 532 - static int loop_make_request(request_queue_t *q, struct bio *old_bio) 532 + static int loop_make_request(struct request_queue *q, struct bio *old_bio) 533 533 { 534 534 struct loop_device *lo = q->queuedata; 535 535 int rw = bio_rw(old_bio); ··· 558 558 /* 559 559 * kick off io on the underlying address space 560 560 */ 561 - static void loop_unplug(request_queue_t *q) 561 + static void loop_unplug(struct request_queue *q) 562 562 { 563 563 struct loop_device *lo = q->queuedata; 564 564
+2 -2
drivers/block/nbd.c
··· 100 100 static void nbd_end_request(struct request *req) 101 101 { 102 102 int uptodate = (req->errors == 0) ? 1 : 0; 103 - request_queue_t *q = req->q; 103 + struct request_queue *q = req->q; 104 104 unsigned long flags; 105 105 106 106 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, ··· 410 410 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } 411 411 */ 412 412 413 - static void do_nbd_request(request_queue_t * q) 413 + static void do_nbd_request(struct request_queue * q) 414 414 { 415 415 struct request *req; 416 416
+2 -2
drivers/block/paride/pcd.c
··· 183 183 static int pcd_detect(void); 184 184 static void pcd_probe_capabilities(void); 185 185 static void do_pcd_read_drq(void); 186 - static void do_pcd_request(request_queue_t * q); 186 + static void do_pcd_request(struct request_queue * q); 187 187 static void do_pcd_read(void); 188 188 189 189 struct pcd_unit { ··· 713 713 /* I/O request processing */ 714 714 static struct request_queue *pcd_queue; 715 715 716 - static void do_pcd_request(request_queue_t * q) 716 + static void do_pcd_request(struct request_queue * q) 717 717 { 718 718 if (pcd_busy) 719 719 return;
+1 -1
drivers/block/paride/pd.c
··· 698 698 699 699 /* end of io request engine */ 700 700 701 - static void do_pd_request(request_queue_t * q) 701 + static void do_pd_request(struct request_queue * q) 702 702 { 703 703 if (pd_req) 704 704 return;
+2 -2
drivers/block/paride/pf.c
··· 202 202 #define ATAPI_WRITE_10 0x2a 203 203 204 204 static int pf_open(struct inode *inode, struct file *file); 205 - static void do_pf_request(request_queue_t * q); 205 + static void do_pf_request(struct request_queue * q); 206 206 static int pf_ioctl(struct inode *inode, struct file *file, 207 207 unsigned int cmd, unsigned long arg); 208 208 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); ··· 760 760 } 761 761 } 762 762 763 - static void do_pf_request(request_queue_t * q) 763 + static void do_pf_request(struct request_queue * q) 764 764 { 765 765 if (pf_busy) 766 766 return;
+6 -6
drivers/block/pktcdvd.c
··· 752 752 */ 753 753 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) 754 754 { 755 - request_queue_t *q = bdev_get_queue(pd->bdev); 755 + struct request_queue *q = bdev_get_queue(pd->bdev); 756 756 struct request *rq; 757 757 int ret = 0; 758 758 ··· 979 979 * Special care is needed if the underlying block device has a small 980 980 * max_phys_segments value. 981 981 */ 982 - static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q) 982 + static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 983 983 { 984 984 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { 985 985 /* ··· 2314 2314 { 2315 2315 int ret; 2316 2316 long lba; 2317 - request_queue_t *q; 2317 + struct request_queue *q; 2318 2318 2319 2319 /* 2320 2320 * We need to re-open the cdrom device without O_NONBLOCK to be able ··· 2477 2477 return 0; 2478 2478 } 2479 2479 2480 - static int pkt_make_request(request_queue_t *q, struct bio *bio) 2480 + static int pkt_make_request(struct request_queue *q, struct bio *bio) 2481 2481 { 2482 2482 struct pktcdvd_device *pd; 2483 2483 char b[BDEVNAME_SIZE]; ··· 2626 2626 2627 2627 2628 2628 2629 - static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec) 2629 + static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec) 2630 2630 { 2631 2631 struct pktcdvd_device *pd = q->queuedata; 2632 2632 sector_t zone = ZONE(bio->bi_sector, pd); ··· 2647 2647 2648 2648 static void pkt_init_queue(struct pktcdvd_device *pd) 2649 2649 { 2650 - request_queue_t *q = pd->disk->queue; 2650 + struct request_queue *q = pd->disk->queue; 2651 2651 2652 2652 blk_queue_make_request(q, pkt_make_request); 2653 2653 blk_queue_hardsect_size(q, CD_FRAMESIZE);
+2 -2
drivers/block/ps2esdi.c
··· 64 64 65 65 static int ps2esdi_geninit(void); 66 66 67 - static void do_ps2esdi_request(request_queue_t * q); 67 + static void do_ps2esdi_request(struct request_queue * q); 68 68 69 69 static void ps2esdi_readwrite(int cmd, struct request *req); 70 70 ··· 473 473 } 474 474 475 475 /* strategy routine that handles most of the IO requests */ 476 - static void do_ps2esdi_request(request_queue_t * q) 476 + static void do_ps2esdi_request(struct request_queue * q) 477 477 { 478 478 struct request *req; 479 479 /* since, this routine is called with interrupts cleared - they
+4 -4
drivers/block/ps3disk.c
··· 190 190 } 191 191 192 192 static void ps3disk_do_request(struct ps3_storage_device *dev, 193 - request_queue_t *q) 193 + struct request_queue *q) 194 194 { 195 195 struct request *req; 196 196 ··· 211 211 } 212 212 } 213 213 214 - static void ps3disk_request(request_queue_t *q) 214 + static void ps3disk_request(struct request_queue *q) 215 215 { 216 216 struct ps3_storage_device *dev = q->queuedata; 217 217 struct ps3disk_private *priv = dev->sbd.core.driver_data; ··· 404 404 return 0; 405 405 } 406 406 407 - static void ps3disk_prepare_flush(request_queue_t *q, struct request *req) 407 + static void ps3disk_prepare_flush(struct request_queue *q, struct request *req) 408 408 { 409 409 struct ps3_storage_device *dev = q->queuedata; 410 410 ··· 414 414 req->cmd_type = REQ_TYPE_FLUSH; 415 415 } 416 416 417 - static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk, 417 + static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk, 418 418 sector_t *sector) 419 419 { 420 420 struct ps3_storage_device *dev = q->queuedata;
+1 -1
drivers/block/rd.c
··· 264 264 * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support 265 265 * 266 266 */ 267 - static int rd_make_request(request_queue_t *q, struct bio *bio) 267 + static int rd_make_request(struct request_queue *q, struct bio *bio) 268 268 { 269 269 struct block_device *bdev = bio->bi_bdev; 270 270 struct address_space * mapping = bdev->bd_inode->i_mapping;
+1 -1
drivers/block/sunvdc.c
··· 444 444 return err; 445 445 } 446 446 447 - static void do_vdc_request(request_queue_t *q) 447 + static void do_vdc_request(struct request_queue *q) 448 448 { 449 449 while (1) { 450 450 struct request *req = elv_next_request(q);
+2 -2
drivers/block/swim3.c
··· 225 225 static void swim3_select(struct floppy_state *fs, int sel); 226 226 static void swim3_action(struct floppy_state *fs, int action); 227 227 static int swim3_readbit(struct floppy_state *fs, int bit); 228 - static void do_fd_request(request_queue_t * q); 228 + static void do_fd_request(struct request_queue * q); 229 229 static void start_request(struct floppy_state *fs); 230 230 static void set_timeout(struct floppy_state *fs, int nticks, 231 231 void (*proc)(unsigned long)); ··· 290 290 return (stat & DATA) == 0; 291 291 } 292 292 293 - static void do_fd_request(request_queue_t * q) 293 + static void do_fd_request(struct request_queue * q) 294 294 { 295 295 int i; 296 296 for(i=0;i<floppy_count;i++)
+10 -10
drivers/block/sx8.c
··· 278 278 unsigned int state; 279 279 u32 fw_ver; 280 280 281 - request_queue_t *oob_q; 281 + struct request_queue *oob_q; 282 282 unsigned int n_oob; 283 283 284 284 unsigned int hw_sg_used; ··· 287 287 288 288 unsigned int wait_q_prod; 289 289 unsigned int wait_q_cons; 290 - request_queue_t *wait_q[CARM_MAX_WAIT_Q]; 290 + struct request_queue *wait_q[CARM_MAX_WAIT_Q]; 291 291 292 292 unsigned int n_msgs; 293 293 u64 msg_alloc; ··· 756 756 assert(rc == 0); 757 757 } 758 758 759 - static inline void carm_push_q (struct carm_host *host, request_queue_t *q) 759 + static inline void carm_push_q (struct carm_host *host, struct request_queue *q) 760 760 { 761 761 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; 762 762 ··· 768 768 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ 769 769 } 770 770 771 - static inline request_queue_t *carm_pop_q(struct carm_host *host) 771 + static inline struct request_queue *carm_pop_q(struct carm_host *host) 772 772 { 773 773 unsigned int idx; 774 774 ··· 783 783 784 784 static inline void carm_round_robin(struct carm_host *host) 785 785 { 786 - request_queue_t *q = carm_pop_q(host); 786 + struct request_queue *q = carm_pop_q(host); 787 787 if (q) { 788 788 blk_start_queue(q); 789 789 VPRINTK("STARTED QUEUE %p\n", q); ··· 802 802 } 803 803 } 804 804 805 - static void carm_oob_rq_fn(request_queue_t *q) 805 + static void carm_oob_rq_fn(struct request_queue *q) 806 806 { 807 807 struct carm_host *host = q->queuedata; 808 808 struct carm_request *crq; ··· 833 833 } 834 834 } 835 835 836 - static void carm_rq_fn(request_queue_t *q) 836 + static void carm_rq_fn(struct request_queue *q) 837 837 { 838 838 struct carm_port *port = q->queuedata; 839 839 struct carm_host *host = port->host; ··· 1494 1494 1495 1495 for (i = 0; i < CARM_MAX_PORTS; i++) { 1496 1496 struct gendisk *disk; 1497 - request_queue_t *q; 1497 + struct request_queue *q; 1498 1498 struct carm_port *port; 1499 1499 1500 1500 port = &host->port[i]; ··· 1538 1538 for (i = 0; i < CARM_MAX_PORTS; i++) { 1539 1539 struct gendisk *disk = host->port[i].disk; 1540 1540 if (disk) { 1541 - request_queue_t *q = disk->queue; 1541 + struct request_queue *q = disk->queue; 1542 1542 1543 1543 if (disk->flags & GENHD_FL_UP) 1544 1544 del_gendisk(disk); ··· 1571 1571 struct carm_host *host; 1572 1572 unsigned int pci_dac; 1573 1573 int rc; 1574 - request_queue_t *q; 1574 + struct request_queue *q; 1575 1575 unsigned int i; 1576 1576 1577 1577 if (!printed_version++)
+3 -3
drivers/block/ub.c
··· 503 503 { 504 504 struct list_head *p; 505 505 struct ub_lun *lun; 506 - request_queue_t *q; 506 + struct request_queue *q; 507 507 508 508 while (!list_empty(&sc->luns)) { 509 509 p = sc->luns.next; ··· 619 619 * The request function is our main entry point 620 620 */ 621 621 622 - static void ub_request_fn(request_queue_t *q) 622 + static void ub_request_fn(struct request_queue *q) 623 623 { 624 624 struct ub_lun *lun = q->queuedata; 625 625 struct request *rq; ··· 2273 2273 static int ub_probe_lun(struct ub_dev *sc, int lnum) 2274 2274 { 2275 2275 struct ub_lun *lun; 2276 - request_queue_t *q; 2276 + struct request_queue *q; 2277 2277 struct gendisk *disk; 2278 2278 int rc; 2279 2279
+3 -3
drivers/block/umem.c
··· 114 114 */ 115 115 struct bio *bio, *currentbio, **biotail; 116 116 117 - request_queue_t *queue; 117 + struct request_queue *queue; 118 118 119 119 struct mm_page { 120 120 dma_addr_t page_dma; ··· 357 357 page->biotail = & page->bio; 358 358 } 359 359 360 - static void mm_unplug_device(request_queue_t *q) 360 + static void mm_unplug_device(struct request_queue *q) 361 361 { 362 362 struct cardinfo *card = q->queuedata; 363 363 unsigned long flags; ··· 541 541 -- mm_make_request 542 542 ----------------------------------------------------------------------------------- 543 543 */ 544 - static int mm_make_request(request_queue_t *q, struct bio *bio) 544 + static int mm_make_request(struct request_queue *q, struct bio *bio) 545 545 { 546 546 struct cardinfo *card = q->queuedata; 547 547 pr_debug("mm_make_request %llu %u\n",
+1 -1
drivers/block/viodasd.c
··· 400 400 /* 401 401 * This is the external request processing routine 402 402 */ 403 - static void do_viodasd_request(request_queue_t *q) 403 + static void do_viodasd_request(struct request_queue *q) 404 404 { 405 405 struct request *req; 406 406
+1 -1
drivers/block/xd.c
··· 298 298 } 299 299 300 300 /* do_xd_request: handle an incoming request */ 301 - static void do_xd_request (request_queue_t * q) 301 + static void do_xd_request (struct request_queue * q) 302 302 { 303 303 struct request *req; 304 304
+1 -1
drivers/block/xd.h
··· 104 104 static u_char xd_detect (u_char *controller, unsigned int *address); 105 105 static u_char xd_initdrives (void (*init_drive)(u_char drive)); 106 106 107 - static void do_xd_request (request_queue_t * q); 107 + static void do_xd_request (struct request_queue * q); 108 108 static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg); 109 109 static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count); 110 110 static void xd_recalibrate (u_char drive);
+2 -2
drivers/block/xen-blkfront.c
··· 241 241 * do_blkif_request 242 242 * read a block; request is in a request queue 243 243 */ 244 - static void do_blkif_request(request_queue_t *rq) 244 + static void do_blkif_request(struct request_queue *rq) 245 245 { 246 246 struct blkfront_info *info = NULL; 247 247 struct request *req; ··· 287 287 288 288 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) 289 289 { 290 - request_queue_t *rq; 290 + struct request_queue *rq; 291 291 292 292 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 293 293 if (rq == NULL)
+2 -2
drivers/block/xsysace.c
··· 458 458 } 459 459 460 460 /* Get the next read/write request; ending requests that we don't handle */ 461 - struct request *ace_get_next_request(request_queue_t * q) 461 + struct request *ace_get_next_request(struct request_queue * q) 462 462 { 463 463 struct request *req; 464 464 ··· 825 825 /* --------------------------------------------------------------------- 826 826 * Block ops 827 827 */ 828 - static void ace_request(request_queue_t * q) 828 + static void ace_request(struct request_queue * q) 829 829 { 830 830 struct request *req; 831 831 struct ace_device *ace;
+1 -1
drivers/block/z2ram.c
··· 67 67 static struct block_device_operations z2_fops; 68 68 static struct gendisk *z2ram_gendisk; 69 69 70 - static void do_z2_request(request_queue_t *q) 70 + static void do_z2_request(struct request_queue *q) 71 71 { 72 72 struct request *req; 73 73 while ((req = elv_next_request(q)) != NULL) {
+1 -1
drivers/cdrom/cdrom.c
··· 2094 2094 static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, 2095 2095 int lba, int nframes) 2096 2096 { 2097 - request_queue_t *q = cdi->disk->queue; 2097 + struct request_queue *q = cdi->disk->queue; 2098 2098 struct request *rq; 2099 2099 struct bio *bio; 2100 2100 unsigned int len;
+1 -1
drivers/cdrom/viocd.c
··· 398 398 399 399 static int rwreq; 400 400 401 - static void do_viocd_request(request_queue_t *q) 401 + static void do_viocd_request(struct request_queue *q) 402 402 { 403 403 struct request *req; 404 404
+2 -2
drivers/ide/ide-cd.c
··· 3071 3071 /* 3072 3072 * standard prep_rq_fn that builds 10 byte cmds 3073 3073 */ 3074 - static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq) 3074 + static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 3075 3075 { 3076 3076 int hard_sect = queue_hardsect_size(q); 3077 3077 long block = (long)rq->hard_sector / (hard_sect >> 9); ··· 3137 3137 return BLKPREP_OK; 3138 3138 } 3139 3139 3140 - static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) 3140 + static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) 3141 3141 { 3142 3142 if (blk_fs_request(rq)) 3143 3143 return ide_cdrom_prep_fs(q, rq);
+2 -2
drivers/ide/ide-disk.c
··· 679 679 }; 680 680 #endif /* CONFIG_IDE_PROC_FS */ 681 681 682 - static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) 682 + static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) 683 683 { 684 684 ide_drive_t *drive = q->queuedata; 685 685 ··· 697 697 rq->buffer = rq->cmd; 698 698 } 699 699 700 - static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk, 700 + static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk, 701 701 sector_t *error_sector) 702 702 { 703 703 ide_drive_t *drive = q->queuedata;
+1 -1
drivers/ide/ide-io.c
··· 1327 1327 /* 1328 1328 * Passes the stuff to ide_do_request 1329 1329 */ 1330 - void do_ide_request(request_queue_t *q) 1330 + void do_ide_request(struct request_queue *q) 1331 1331 { 1332 1332 ide_drive_t *drive = q->queuedata; 1333 1333
+1 -1
drivers/ide/ide-probe.c
··· 945 945 */ 946 946 static int ide_init_queue(ide_drive_t *drive) 947 947 { 948 - request_queue_t *q; 948 + struct request_queue *q; 949 949 ide_hwif_t *hwif = HWIF(drive); 950 950 int max_sectors = 256; 951 951 int max_sg_entries = PRD_ENTRIES;
+1 -1
drivers/ide/legacy/hd.c
··· 652 652 } 653 653 } 654 654 655 - static void do_hd_request (request_queue_t * q) 655 + static void do_hd_request (struct request_queue * q) 656 656 { 657 657 disable_irq(HD_IRQ); 658 658 hd_request();
+4 -4
drivers/md/dm-table.c
··· 526 526 527 527 void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) 528 528 { 529 - request_queue_t *q = bdev_get_queue(bdev); 529 + struct request_queue *q = bdev_get_queue(bdev); 530 530 struct io_restrictions *rs = &ti->limits; 531 531 532 532 /* ··· 979 979 devices = dm_table_get_devices(t); 980 980 for (d = devices->next; d != devices; d = d->next) { 981 981 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 982 - request_queue_t *q = bdev_get_queue(dd->bdev); 982 + struct request_queue *q = bdev_get_queue(dd->bdev); 983 983 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 984 984 } 985 985 ··· 992 992 993 993 for (d = devices->next; d != devices; d = d->next) { 994 994 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 995 - request_queue_t *q = bdev_get_queue(dd->bdev); 995 + struct request_queue *q = bdev_get_queue(dd->bdev); 996 996 997 997 if (q->unplug_fn) 998 998 q->unplug_fn(q); ··· 1011 1011 1012 1012 for (d = devices->next; d != devices; d = d->next) { 1013 1013 struct dm_dev *dd = list_entry(d, struct dm_dev, list); 1014 - request_queue_t *q = bdev_get_queue(dd->bdev); 1014 + struct request_queue *q = bdev_get_queue(dd->bdev); 1015 1015 int err; 1016 1016 1017 1017 if (!q->issue_flush_fn)
+5 -5
drivers/md/dm.c
··· 80 80 81 81 unsigned long flags; 82 82 83 - request_queue_t *queue; 83 + struct request_queue *queue; 84 84 struct gendisk *disk; 85 85 char name[16]; 86 86 ··· 792 792 * The request function that just remaps the bio built up by 793 793 * dm_merge_bvec. 794 794 */ 795 - static int dm_request(request_queue_t *q, struct bio *bio) 795 + static int dm_request(struct request_queue *q, struct bio *bio) 796 796 { 797 797 int r; 798 798 int rw = bio_data_dir(bio); ··· 844 844 return 0; 845 845 } 846 846 847 - static int dm_flush_all(request_queue_t *q, struct gendisk *disk, 847 + static int dm_flush_all(struct request_queue *q, struct gendisk *disk, 848 848 sector_t *error_sector) 849 849 { 850 850 struct mapped_device *md = q->queuedata; ··· 859 859 return ret; 860 860 } 861 861 862 - static void dm_unplug_all(request_queue_t *q) 862 + static void dm_unplug_all(struct request_queue *q) 863 863 { 864 864 struct mapped_device *md = q->queuedata; 865 865 struct dm_table *map = dm_get_table(md); ··· 1110 1110 1111 1111 static int __bind(struct mapped_device *md, struct dm_table *t) 1112 1112 { 1113 - request_queue_t *q = md->queue; 1113 + struct request_queue *q = md->queue; 1114 1114 sector_t size; 1115 1115 1116 1116 size = dm_table_get_size(t);
+1 -1
drivers/md/faulty.c
··· 167 167 conf->nfaults = n+1; 168 168 } 169 169 170 - static int make_request(request_queue_t *q, struct bio *bio) 170 + static int make_request(struct request_queue *q, struct bio *bio) 171 171 { 172 172 mddev_t *mddev = q->queuedata; 173 173 conf_t *conf = (conf_t*)mddev->private;
+7 -7
drivers/md/linear.c
··· 55 55 * 56 56 * Return amount of bytes we can take at this offset 57 57 */ 58 - static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 58 + static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 59 59 { 60 60 mddev_t *mddev = q->queuedata; 61 61 dev_info_t *dev0; ··· 79 79 return maxsectors << 9; 80 80 } 81 81 82 - static void linear_unplug(request_queue_t *q) 82 + static void linear_unplug(struct request_queue *q) 83 83 { 84 84 mddev_t *mddev = q->queuedata; 85 85 linear_conf_t *conf = mddev_to_conf(mddev); 86 86 int i; 87 87 88 88 for (i=0; i < mddev->raid_disks; i++) { 89 - request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 89 + struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 90 90 if (r_queue->unplug_fn) 91 91 r_queue->unplug_fn(r_queue); 92 92 } 93 93 } 94 94 95 - static int linear_issue_flush(request_queue_t *q, struct gendisk *disk, 95 + static int linear_issue_flush(struct request_queue *q, struct gendisk *disk, 96 96 sector_t *error_sector) 97 97 { 98 98 mddev_t *mddev = q->queuedata; ··· 101 101 102 102 for (i=0; i < mddev->raid_disks && ret == 0; i++) { 103 103 struct block_device *bdev = conf->disks[i].rdev->bdev; 104 - request_queue_t *r_queue = bdev_get_queue(bdev); 104 + struct request_queue *r_queue = bdev_get_queue(bdev); 105 105 106 106 if (!r_queue->issue_flush_fn) 107 107 ret = -EOPNOTSUPP; ··· 118 118 int i, ret = 0; 119 119 120 120 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 121 - request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev); 121 + struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); 122 122 ret |= bdi_congested(&q->backing_dev_info, bits); 123 123 } 124 124 return ret; ··· 330 330 return 0; 331 331 } 332 332 333 - static int linear_make_request (request_queue_t *q, struct bio *bio) 333 + static int linear_make_request (struct request_queue *q, struct bio *bio) 334 334 { 335 335 const int rw = bio_data_dir(bio); 336 336 mddev_t *mddev = q->queuedata;
+1 -1
drivers/md/md.c
··· 211 211 ) 212 212 213 213 214 - static int md_fail_request (request_queue_t *q, struct bio *bio) 214 + static int md_fail_request (struct request_queue *q, struct bio *bio) 215 215 { 216 216 bio_io_error(bio, bio->bi_size); 217 217 return 0;
+6 -6
drivers/md/multipath.c
··· 125 125 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 126 126 if (rdev && !test_bit(Faulty, &rdev->flags) 127 127 && atomic_read(&rdev->nr_pending)) { 128 - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 128 + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 129 129 130 130 atomic_inc(&rdev->nr_pending); 131 131 rcu_read_unlock(); ··· 140 140 rcu_read_unlock(); 141 141 } 142 142 143 - static void multipath_unplug(request_queue_t *q) 143 + static void multipath_unplug(struct request_queue *q) 144 144 { 145 145 unplug_slaves(q->queuedata); 146 146 } 147 147 148 148 149 - static int multipath_make_request (request_queue_t *q, struct bio * bio) 149 + static int multipath_make_request (struct request_queue *q, struct bio * bio) 150 150 { 151 151 mddev_t *mddev = q->queuedata; 152 152 multipath_conf_t *conf = mddev_to_conf(mddev); ··· 199 199 seq_printf (seq, "]"); 200 200 } 201 201 202 - static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, 202 + static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk, 203 203 sector_t *error_sector) 204 204 { 205 205 mddev_t *mddev = q->queuedata; ··· 211 211 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 212 212 if (rdev && !test_bit(Faulty, &rdev->flags)) { 213 213 struct block_device *bdev = rdev->bdev; 214 - request_queue_t *r_queue = bdev_get_queue(bdev); 214 + struct request_queue *r_queue = bdev_get_queue(bdev); 215 215 216 216 if (!r_queue->issue_flush_fn) 217 217 ret = -EOPNOTSUPP; ··· 238 238 for (i = 0; i < mddev->raid_disks ; i++) { 239 239 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); 240 240 if (rdev && !test_bit(Faulty, &rdev->flags)) { 241 - request_queue_t *q = bdev_get_queue(rdev->bdev); 241 + struct request_queue *q = bdev_get_queue(rdev->bdev); 242 242 243 243 ret |= bdi_congested(&q->backing_dev_info, bits); 244 244 /* Just like multipath_map, we just check the
+7 -7
drivers/md/raid0.c
··· 25 25 #define MD_DRIVER 26 26 #define MD_PERSONALITY 27 27 28 - static void raid0_unplug(request_queue_t *q) 28 + static void raid0_unplug(struct request_queue *q) 29 29 { 30 30 mddev_t *mddev = q->queuedata; 31 31 raid0_conf_t *conf = mddev_to_conf(mddev); ··· 33 33 int i; 34 34 35 35 for (i=0; i<mddev->raid_disks; i++) { 36 - request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev); 36 + struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); 37 37 38 38 if (r_queue->unplug_fn) 39 39 r_queue->unplug_fn(r_queue); 40 40 } 41 41 } 42 42 43 - static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk, 43 + static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk, 44 44 sector_t *error_sector) 45 45 { 46 46 mddev_t *mddev = q->queuedata; ··· 50 50 51 51 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 52 52 struct block_device *bdev = devlist[i]->bdev; 53 - request_queue_t *r_queue = bdev_get_queue(bdev); 53 + struct request_queue *r_queue = bdev_get_queue(bdev); 54 54 55 55 if (!r_queue->issue_flush_fn) 56 56 ret = -EOPNOTSUPP; ··· 68 68 int i, ret = 0; 69 69 70 70 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 71 - request_queue_t *q = bdev_get_queue(devlist[i]->bdev); 71 + struct request_queue *q = bdev_get_queue(devlist[i]->bdev); 72 72 73 73 ret |= bdi_congested(&q->backing_dev_info, bits); 74 74 } ··· 268 268 * 269 269 * Return amount of bytes we can accept at this offset 270 270 */ 271 - static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 271 + static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 272 272 { 273 273 mddev_t *mddev = q->queuedata; 274 274 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); ··· 408 408 return 0; 409 409 } 410 410 411 - static int raid0_make_request (request_queue_t *q, struct bio *bio) 411 + static int raid0_make_request (struct request_queue *q, struct bio *bio) 412 412 { 413 413 mddev_t *mddev = q->queuedata; 414 414 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
+6 -6
drivers/md/raid1.c
··· 552 552 for (i=0; i<mddev->raid_disks; i++) { 553 553 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 554 554 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 555 - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 555 + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 556 556 557 557 atomic_inc(&rdev->nr_pending); 558 558 rcu_read_unlock(); ··· 567 567 rcu_read_unlock(); 568 568 } 569 569 570 - static void raid1_unplug(request_queue_t *q) 570 + static void raid1_unplug(struct request_queue *q) 571 571 { 572 572 mddev_t *mddev = q->queuedata; 573 573 ··· 575 575 md_wakeup_thread(mddev->thread); 576 576 } 577 577 578 - static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, 578 + static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk, 579 579 sector_t *error_sector) 580 580 { 581 581 mddev_t *mddev = q->queuedata; ··· 587 587 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 588 588 if (rdev && !test_bit(Faulty, &rdev->flags)) { 589 589 struct block_device *bdev = rdev->bdev; 590 - request_queue_t *r_queue = bdev_get_queue(bdev); 590 + struct request_queue *r_queue = bdev_get_queue(bdev); 591 591 592 592 if (!r_queue->issue_flush_fn) 593 593 ret = -EOPNOTSUPP; ··· 615 615 for (i = 0; i < mddev->raid_disks; i++) { 616 616 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 617 617 if (rdev && !test_bit(Faulty, &rdev->flags)) { 618 - request_queue_t *q = bdev_get_queue(rdev->bdev); 618 + struct request_queue *q = bdev_get_queue(rdev->bdev); 619 619 620 620 /* Note the '|| 1' - when read_balance prefers 621 621 * non-congested targets, it can be removed ··· 765 765 return NULL; 766 766 } 767 767 768 - static int make_request(request_queue_t *q, struct bio * bio) 768 + static int make_request(struct request_queue *q, struct bio * bio) 769 769 { 770 770 mddev_t *mddev = q->queuedata; 771 771 conf_t *conf = mddev_to_conf(mddev);
+7 -7
drivers/md/raid10.c
··· 453 453 * If near_copies == raid_disk, there are no striping issues, 454 454 * but in that case, the function isn't called at all. 455 455 */ 456 - static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio, 456 + static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio, 457 457 struct bio_vec *bio_vec) 458 458 { 459 459 mddev_t *mddev = q->queuedata; ··· 595 595 for (i=0; i<mddev->raid_disks; i++) { 596 596 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 597 597 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 598 - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 598 + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 599 599 600 600 atomic_inc(&rdev->nr_pending); 601 601 rcu_read_unlock(); ··· 610 610 rcu_read_unlock(); 611 611 } 612 612 613 - static void raid10_unplug(request_queue_t *q) 613 + static void raid10_unplug(struct request_queue *q) 614 614 { 615 615 mddev_t *mddev = q->queuedata; 616 616 ··· 618 618 md_wakeup_thread(mddev->thread); 619 619 } 620 620 621 - static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, 621 + static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk, 622 622 sector_t *error_sector) 623 623 { 624 624 mddev_t *mddev = q->queuedata; ··· 630 630 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 631 631 if (rdev && !test_bit(Faulty, &rdev->flags)) { 632 632 struct block_device *bdev = rdev->bdev; 633 - request_queue_t *r_queue = bdev_get_queue(bdev); 633 + struct request_queue *r_queue = bdev_get_queue(bdev); 634 634 635 635 if (!r_queue->issue_flush_fn) 636 636 ret = -EOPNOTSUPP; ··· 658 658 for (i = 0; i < mddev->raid_disks && ret == 0; i++) { 659 659 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 660 660 if (rdev && !test_bit(Faulty, &rdev->flags)) { 661 - request_queue_t *q = bdev_get_queue(rdev->bdev); 661 + struct request_queue *q = bdev_get_queue(rdev->bdev); 662 662 663 663 ret |= bdi_congested(&q->backing_dev_info, bits); 664 664 } ··· 772 772 spin_unlock_irq(&conf->resync_lock); 773 773 } 774 774 775 - static int make_request(request_queue_t *q, struct bio * bio) 775 + static int make_request(struct request_queue *q, struct bio * bio) 776 776 { 777 777 mddev_t *mddev = q->queuedata; 778 778 conf_t *conf = mddev_to_conf(mddev);
+9 -9
drivers/md/raid5.c
··· 289 289 } 290 290 291 291 static void unplug_slaves(mddev_t *mddev); 292 - static void raid5_unplug_device(request_queue_t *q); 292 + static void raid5_unplug_device(struct request_queue *q); 293 293 294 294 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 295 295 int pd_idx, int noblock) ··· 3182 3182 for (i=0; i<mddev->raid_disks; i++) { 3183 3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3184 3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3185 - request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 3185 + struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3186 3186 3187 3187 atomic_inc(&rdev->nr_pending); 3188 3188 rcu_read_unlock(); ··· 3197 3197 rcu_read_unlock(); 3198 3198 } 3199 3199 3200 - static void raid5_unplug_device(request_queue_t *q) 3200 + static void raid5_unplug_device(struct request_queue *q) 3201 3201 { 3202 3202 mddev_t *mddev = q->queuedata; 3203 3203 raid5_conf_t *conf = mddev_to_conf(mddev); ··· 3216 3216 unplug_slaves(mddev); 3217 3217 } 3218 3218 3219 - static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 3219 + static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk, 3220 3220 sector_t *error_sector) 3221 3221 { 3222 3222 mddev_t *mddev = q->queuedata; ··· 3228 3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3229 3229 if (rdev && !test_bit(Faulty, &rdev->flags)) { 3230 3230 struct block_device *bdev = rdev->bdev; 3231 - request_queue_t *r_queue = bdev_get_queue(bdev); 3231 + struct request_queue *r_queue = bdev_get_queue(bdev); 3232 3232 3233 3233 if (!r_queue->issue_flush_fn) 3234 3234 ret = -EOPNOTSUPP; ··· 3267 3267 /* We want read requests to align with chunks where possible, 3268 3268 * but write requests don't need to. 3269 3269 */ 3270 - static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 3270 + static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 3271 3271 { 3272 3272 mddev_t *mddev = q->queuedata; 3273 3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); ··· 3377 3377 3378 3378 static int bio_fits_rdev(struct bio *bi) 3379 3379 { 3380 - request_queue_t *q = bdev_get_queue(bi->bi_bdev); 3380 + struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3381 3381 3382 3382 if ((bi->bi_size>>9) > q->max_sectors) 3383 3383 return 0; ··· 3396 3396 } 3397 3397 3398 3398 3399 - static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) 3399 + static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3400 3400 { 3401 3401 mddev_t *mddev = q->queuedata; 3402 3402 raid5_conf_t *conf = mddev_to_conf(mddev); ··· 3466 3466 } 3467 3467 3468 3468 3469 - static int make_request(request_queue_t *q, struct bio * bi) 3469 + static int make_request(struct request_queue *q, struct bio * bi) 3470 3470 { 3471 3471 mddev_t *mddev = q->queuedata; 3472 3472 raid5_conf_t *conf = mddev_to_conf(mddev);
+2 -2
drivers/message/i2o/i2o_block.c
··· 159 159 * Returns 0 on success or negative error code on failure. 160 160 */ 161 161 162 - static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, 162 + static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk, 163 163 sector_t * error_sector) 164 164 { 165 165 struct i2o_block_device *i2o_blk_dev = queue->queuedata; ··· 445 445 { 446 446 struct i2o_block_request *ireq = req->special; 447 447 struct i2o_block_device *dev = ireq->i2o_blk_dev; 448 - request_queue_t *q = req->q; 448 + struct request_queue *q = req->q; 449 449 unsigned long flags; 450 450 451 451 if (end_that_request_chunk(req, uptodate, nr_bytes)) {
+4 -4
drivers/mmc/card/queue.c
··· 83 83 * on any queue on this host, and attempt to issue it. This may 84 84 * not be the queue we were asked to process. 85 85 */ 86 - static void mmc_request(request_queue_t *q) 86 + static void mmc_request(struct request_queue *q) 87 87 { 88 88 struct mmc_queue *mq = q->queuedata; 89 89 struct request *req; ··· 211 211 212 212 void mmc_cleanup_queue(struct mmc_queue *mq) 213 213 { 214 - request_queue_t *q = mq->queue; 214 + struct request_queue *q = mq->queue; 215 215 unsigned long flags; 216 216 217 217 /* Mark that we should start throwing out stragglers */ ··· 252 252 */ 253 253 void mmc_queue_suspend(struct mmc_queue *mq) 254 254 { 255 - request_queue_t *q = mq->queue; 255 + struct request_queue *q = mq->queue; 256 256 unsigned long flags; 257 257 258 258 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { ··· 272 272 */ 273 273 void mmc_queue_resume(struct mmc_queue *mq) 274 274 { 275 - request_queue_t *q = mq->queue; 275 + struct request_queue *q = mq->queue; 276 276 unsigned long flags; 277 277 278 278 if (mq->flags & MMC_QUEUE_SUSPENDED) {
+2 -2
drivers/s390/block/dasd.c
··· 1187 1187 static void 1188 1188 __dasd_process_blk_queue(struct dasd_device * device) 1189 1189 { 1190 - request_queue_t *queue; 1190 + struct request_queue *queue; 1191 1191 struct request *req; 1192 1192 struct dasd_ccw_req *cqr; 1193 1193 int nr_queued; ··· 1740 1740 * Dasd request queue function. Called from ll_rw_blk.c 1741 1741 */ 1742 1742 static void 1743 - do_dasd_request(request_queue_t * queue) 1743 + do_dasd_request(struct request_queue * queue) 1744 1744 { 1745 1745 struct dasd_device *device; 1746 1746
+1 -1
drivers/s390/block/dasd_int.h
··· 293 293 struct dasd_device { 294 294 /* Block device stuff. */ 295 295 struct gendisk *gdp; 296 - request_queue_t *request_queue; 296 + struct request_queue *request_queue; 297 297 spinlock_t request_queue_lock; 298 298 struct block_device *bdev; 299 299 unsigned int devindex;
+1 -1
drivers/s390/block/dcssblk.c
··· 621 621 } 622 622 623 623 static int 624 - dcssblk_make_request(request_queue_t *q, struct bio *bio) 624 + dcssblk_make_request(struct request_queue *q, struct bio *bio) 625 625 { 626 626 struct dcssblk_dev_info *dev_info; 627 627 struct bio_vec *bvec;
+1 -1
drivers/s390/block/xpram.c
··· 191 191 /* 192 192 * Block device make request function. 193 193 */ 194 - static int xpram_make_request(request_queue_t *q, struct bio *bio) 194 + static int xpram_make_request(struct request_queue *q, struct bio *bio) 195 195 { 196 196 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 197 197 struct bio_vec *bvec;
+1 -1
drivers/s390/char/tape.h
··· 188 188 { 189 189 struct tape_device * device; 190 190 /* Block device request queue. */ 191 - request_queue_t * request_queue; 191 + struct request_queue * request_queue; 192 192 spinlock_t request_queue_lock; 193 193 194 194 /* Task to move entries from block request to CCS request queue. */
+2 -2
drivers/s390/char/tape_block.c
··· 147 147 tapeblock_requeue(struct work_struct *work) { 148 148 struct tape_blk_data * blkdat; 149 149 struct tape_device * device; 150 - request_queue_t * queue; 150 + struct request_queue * queue; 151 151 int nr_queued; 152 152 struct request * req; 153 153 struct list_head * l; ··· 194 194 * Tape request queue function. Called from ll_rw_blk.c 195 195 */ 196 196 static void 197 - tapeblock_request_fn(request_queue_t *queue) 197 + tapeblock_request_fn(struct request_queue *queue) 198 198 { 199 199 struct tape_device *device; 200 200
+1 -1
drivers/sbus/char/jsflash.c
··· 185 185 } 186 186 } 187 187 188 - static void jsfd_do_request(request_queue_t *q) 188 + static void jsfd_do_request(struct request_queue *q) 189 189 { 190 190 struct request *req; 191 191
+6 -6
drivers/scsi/scsi_lib.c
··· 654 654 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 655 655 int bytes, int requeue) 656 656 { 657 - request_queue_t *q = cmd->device->request_queue; 657 + struct request_queue *q = cmd->device->request_queue; 658 658 struct request *req = cmd->request; 659 659 unsigned long flags; 660 660 ··· 818 818 { 819 819 int result = cmd->result; 820 820 int this_count = cmd->request_bufflen; 821 - request_queue_t *q = cmd->device->request_queue; 821 + struct request_queue *q = cmd->device->request_queue; 822 822 struct request *req = cmd->request; 823 823 int clear_errors = 1; 824 824 struct scsi_sense_hdr sshdr; ··· 1038 1038 return BLKPREP_KILL; 1039 1039 } 1040 1040 1041 - static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1041 + static int scsi_issue_flush_fn(struct request_queue *q, struct gendisk *disk, 1042 1042 sector_t *error_sector) 1043 1043 { 1044 1044 struct scsi_device *sdev = q->queuedata; ··· 1340 1340 /* 1341 1341 * Kill a request for a dead device 1342 1342 */ 1343 - static void scsi_kill_request(struct request *req, request_queue_t *q) 1343 + static void scsi_kill_request(struct request *req, struct request_queue *q) 1344 1344 { 1345 1345 struct scsi_cmnd *cmd = req->special; 1346 1346 struct scsi_device *sdev = cmd->device; ··· 2119 2119 int 2120 2120 scsi_internal_device_block(struct scsi_device *sdev) 2121 2121 { 2122 - request_queue_t *q = sdev->request_queue; 2122 + struct request_queue *q = sdev->request_queue; 2123 2123 unsigned long flags; 2124 2124 int err = 0; 2125 2125 ··· 2159 2159 int 2160 2160 scsi_internal_device_unblock(struct scsi_device *sdev) 2161 2161 { 2162 - request_queue_t *q = sdev->request_queue; 2162 + struct request_queue *q = sdev->request_queue; 2163 2163 int err; 2164 2164 unsigned long flags; 2165 2165
+2 -2
drivers/scsi/sd.c
··· 814 814 return ret; 815 815 } 816 816 817 - static void sd_prepare_flush(request_queue_t *q, struct request *rq) 817 + static void sd_prepare_flush(struct request_queue *q, struct request *rq) 818 818 { 819 819 memset(rq->cmd, 0, sizeof(rq->cmd)); 820 820 rq->cmd_type = REQ_TYPE_BLOCK_PC; ··· 1285 1285 */ 1286 1286 int hard_sector = sector_size; 1287 1287 sector_t sz = (sdkp->capacity/2) * (hard_sector/256); 1288 - request_queue_t *queue = sdp->request_queue; 1288 + struct request_queue *queue = sdp->request_queue; 1289 1289 sector_t mb = sz; 1290 1290 1291 1291 blk_queue_hardsect_size(queue, hard_sector);
+1 -1
drivers/scsi/sr.c
··· 624 624 unsigned char *buffer; 625 625 int the_result, retries = 3; 626 626 int sector_size; 627 - request_queue_t *queue; 627 + struct request_queue *queue; 628 628 629 629 buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); 630 630 if (!buffer)
+15 -15
fs/bio.c
··· 230 230 } 231 231 } 232 232 233 - inline int bio_phys_segments(request_queue_t *q, struct bio *bio) 233 + inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 234 234 { 235 235 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 236 236 blk_recount_segments(q, bio); ··· 238 238 return bio->bi_phys_segments; 239 239 } 240 240 241 - inline int bio_hw_segments(request_queue_t *q, struct bio *bio) 241 + inline int bio_hw_segments(struct request_queue *q, struct bio *bio) 242 242 { 243 243 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 244 244 blk_recount_segments(q, bio); ··· 257 257 */ 258 258 void __bio_clone(struct bio *bio, struct bio *bio_src) 259 259 { 260 - request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); 260 + struct request_queue *q = bdev_get_queue(bio_src->bi_bdev); 261 261 262 262 memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 263 263 bio_src->bi_max_vecs * sizeof(struct bio_vec)); ··· 303 303 */ 304 304 int bio_get_nr_vecs(struct block_device *bdev) 305 305 { 306 - request_queue_t *q = bdev_get_queue(bdev); 306 + struct request_queue *q = bdev_get_queue(bdev); 307 307 int nr_pages; 308 308 309 309 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 315 315 return nr_pages; 316 316 } 317 317 318 - static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page 318 + static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page 319 319 *page, unsigned int len, unsigned int offset, 320 320 unsigned short max_sectors) 321 321 { ··· 425 425 * smaller than PAGE_SIZE, so it is always possible to add a single 426 426 * page to an empty bio. This should only be used by REQ_PC bios. 427 427 */ 428 - int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, 428 + int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 429 429 unsigned int len, unsigned int offset) 430 430 { 431 431 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); ··· 523 523 * to/from kernel pages as necessary. Must be paired with 524 524 * call bio_uncopy_user() on io completion. 525 525 */ 526 - struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, 526 + struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, 527 527 unsigned int len, int write_to_vm) 528 528 { 529 529 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; ··· 600 600 return ERR_PTR(ret); 601 601 } 602 602 603 - static struct bio *__bio_map_user_iov(request_queue_t *q, 603 + static struct bio *__bio_map_user_iov(struct request_queue *q, 604 604 struct block_device *bdev, 605 605 struct sg_iovec *iov, int iov_count, 606 606 int write_to_vm) ··· 712 712 713 713 /** 714 714 * bio_map_user - map user address into bio 715 - * @q: the request_queue_t for the bio 715 + * @q: the struct request_queue for the bio 716 716 * @bdev: destination block device 717 717 * @uaddr: start of user address 718 718 * @len: length in bytes ··· 721 721 * Map the user space address into a bio suitable for io to a block 722 722 * device. Returns an error pointer in case of error. 723 723 */ 724 - struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, 724 + struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, 725 725 unsigned long uaddr, unsigned int len, int write_to_vm) 726 726 { 727 727 struct sg_iovec iov; ··· 734 734 735 735 /** 736 736 * bio_map_user_iov - map user sg_iovec table into bio 737 - * @q: the request_queue_t for the bio 737 + * @q: the struct request_queue for the bio 738 738 * @bdev: destination block device 739 739 * @iov: the iovec. 740 740 * @iov_count: number of elements in the iovec ··· 743 743 * Map the user space address into a bio suitable for io to a block 744 744 * device. Returns an error pointer in case of error. 745 745 */ 746 - struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, 746 + struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 747 747 struct sg_iovec *iov, int iov_count, 748 748 int write_to_vm) 749 749 { ··· 808 808 } 809 809 810 810 811 - static struct bio *__bio_map_kern(request_queue_t *q, void *data, 811 + static struct bio *__bio_map_kern(struct request_queue *q, void *data, 812 812 unsigned int len, gfp_t gfp_mask) 813 813 { 814 814 unsigned long kaddr = (unsigned long)data; ··· 847 847 848 848 /** 849 849 * bio_map_kern - map kernel address into bio 850 - * @q: the request_queue_t for the bio 850 + * @q: the struct request_queue for the bio 851 851 * @data: pointer to buffer to map 852 852 * @len: length in bytes 853 853 * @gfp_mask: allocation flags for bio allocation ··· 855 855 * Map the kernel address into a bio suitable for io to a block 856 856 * device. Returns an error pointer in case of error. 857 857 */ 858 - struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, 858 + struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 859 859 gfp_t gfp_mask) 860 860 { 861 861 struct bio *bio;
+1 -1
include/asm-arm/arch-omap/mailbox.h
··· 37 37 38 38 struct omap_mbox_queue { 39 39 spinlock_t lock; 40 - request_queue_t *queue; 40 + struct request_queue *queue; 41 41 struct work_struct work; 42 42 int (*callback)(void *); 43 43 struct omap_mbox *mbox;
+70 -70
include/linux/blkdev.h
··· 37 37 struct scsi_ioctl_command; 38 38 39 39 struct request_queue; 40 - typedef struct request_queue request_queue_t; 41 40 struct elevator_queue; 42 41 typedef struct elevator_queue elevator_t; 43 42 struct request_pm_state; ··· 232 233 struct list_head queuelist; 233 234 struct list_head donelist; 234 235 235 - request_queue_t *q; 236 + struct request_queue *q; 236 237 237 238 unsigned int cmd_flags; 238 239 enum rq_cmd_type_bits cmd_type; ··· 336 337 337 338 #include <linux/elevator.h> 338 339 339 - typedef void (request_fn_proc) (request_queue_t *q); 340 - typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); 341 - typedef int (prep_rq_fn) (request_queue_t *, struct request *); 342 - typedef void (unplug_fn) (request_queue_t *); 340 + typedef void (request_fn_proc) (struct request_queue *q); 341 + typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 342 + typedef int (prep_rq_fn) (struct request_queue *, struct request *); 343 + typedef void (unplug_fn) (struct request_queue *); 343 344 344 345 struct bio_vec; 345 - typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); 346 - typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); 347 - typedef void (prepare_flush_fn) (request_queue_t *, struct request *); 346 + typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); 347 + typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *); 348 + typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 348 349 typedef void (softirq_done_fn)(struct request *); 349 350 350 351 enum blk_queue_state { ··· 625 626 626 627 #ifdef CONFIG_BOUNCE 627 628 extern int init_emergency_isa_pool(void); 628 - extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); 629 + extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 629 630 #else 630 631 static inline int init_emergency_isa_pool(void) 631 632 { 632 633 return 0; 633 634 } 634 - static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) 635 + static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 635 636 { 636 637 } 637 638 #endif /* CONFIG_MMU */ ··· 645 646 extern void register_disk(struct gendisk *dev); 646 647 extern void generic_make_request(struct bio *bio); 647 648 extern void blk_put_request(struct request *); 648 - extern void __blk_put_request(request_queue_t *, struct request *); 649 + extern void __blk_put_request(struct request_queue *, struct request *); 649 650 extern void blk_end_sync_rq(struct request *rq, int error); 650 - extern struct request *blk_get_request(request_queue_t *, int, gfp_t); 651 - extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 652 - extern void blk_requeue_request(request_queue_t *, struct request *); 653 - extern void blk_plug_device(request_queue_t *); 654 - extern int blk_remove_plug(request_queue_t *); 655 - extern void blk_recount_segments(request_queue_t *, struct bio *); 651 + extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 652 + extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 653 + extern void blk_requeue_request(struct request_queue *, struct request *); 654 + extern void blk_plug_device(struct request_queue *); 655 + extern int blk_remove_plug(struct request_queue *); 656 + extern void blk_recount_segments(struct request_queue *, struct bio *); 656 657 extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 657 658 struct gendisk *, unsigned int, void __user *); 658 659 extern int sg_scsi_ioctl(struct file *, struct request_queue *, ··· 661 662 /* 662 663 * Temporary export, until SCSI gets fixed up. 663 664 */ 664 - extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); 665 + extern int ll_back_merge_fn(struct request_queue *, struct request *, 666 + struct bio *); 665 667 666 668 /* 667 669 * A queue has just exitted congestion. Note this in the global counter of 668 670 * congested queues, and wake up anyone who was waiting for requests to be 669 671 * put back. 670 672 */ 671 - static inline void blk_clear_queue_congested(request_queue_t *q, int rw) 673 + static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 672 674 { 673 675 clear_bdi_congested(&q->backing_dev_info, rw); 674 676 } ··· 678 678 * A queue has just entered congestion. Flag that in the queue's VM-visible 679 679 * state flags and increment the global gounter of congested queues. 680 680 */ 681 - static inline void blk_set_queue_congested(request_queue_t *q, int rw) 681 + static inline void blk_set_queue_congested(struct request_queue *q, int rw) 682 682 { 683 683 set_bdi_congested(&q->backing_dev_info, rw); 684 684 } 685 685 686 - extern void blk_start_queue(request_queue_t *q); 687 - extern void blk_stop_queue(request_queue_t *q); 686 + extern void blk_start_queue(struct request_queue *q); 687 + extern void blk_stop_queue(struct request_queue *q); 688 688 extern void blk_sync_queue(struct request_queue *q); 689 - extern void __blk_stop_queue(request_queue_t *q); 690 - extern void blk_run_queue(request_queue_t *); 691 - extern void blk_start_queueing(request_queue_t *); 692 - extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); 689 + extern void __blk_stop_queue(struct request_queue *q); 690 + extern void blk_run_queue(struct request_queue *); 691 + extern void blk_start_queueing(struct request_queue *); 692 + extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 693 693 extern int blk_rq_unmap_user(struct bio *); 694 - extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); 695 - extern int blk_rq_map_user_iov(request_queue_t *, struct request *, 694 + extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 695 + extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 696 696 struct sg_iovec *, int, unsigned int); 697 - extern int blk_execute_rq(request_queue_t *, struct gendisk *, 697 + extern int blk_execute_rq(struct request_queue *, struct gendisk *, 698 698 struct request *, int); 699 - extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, 699 + extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 700 700 struct request *, int, rq_end_io_fn *); 701 701 extern int blk_verify_command(unsigned char *, int); 702 702 703 - static inline request_queue_t *bdev_get_queue(struct block_device *bdev) 703 + static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 704 704 { 705 705 return bdev->bd_disk->queue; 706 706 } ··· 749 749 /* 750 750 * Access functions for manipulating queue properties 751 751 */ 752 - extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, 752 + extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 753 753 spinlock_t *lock, int node_id); 754 - extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); 755 - extern void blk_cleanup_queue(request_queue_t *); 756 - extern void blk_queue_make_request(request_queue_t *, make_request_fn *); 757 - extern void blk_queue_bounce_limit(request_queue_t *, u64); 758 - extern void blk_queue_max_sectors(request_queue_t *, unsigned int); 759 - extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); 760 - extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); 761 - extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); 762 - extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); 763 - extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); 764 - extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); 765 - extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); 766 - extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); 767 - extern void blk_queue_dma_alignment(request_queue_t *, int); 768 - extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); 754 + extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 755 + extern void blk_cleanup_queue(struct request_queue *); 756 + extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 757 + extern void blk_queue_bounce_limit(struct request_queue *, u64); 758 + extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 759 + extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 760 + extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 761 + extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 762 + extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 763 + extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 764 + extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 765 + extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 766 + extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 767 + extern void blk_queue_dma_alignment(struct request_queue *, int); 768 + extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 769 769 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 770 - extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); 771 - extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 772 - extern int blk_do_ordered(request_queue_t *, struct request **); 773 - extern unsigned blk_ordered_cur_seq(request_queue_t *); 770 + extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 771 + extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *); 772 + extern int blk_do_ordered(struct request_queue *, struct request **); 773 + extern unsigned blk_ordered_cur_seq(struct request_queue *); 774 774 extern unsigned blk_ordered_req_seq(struct request *); 775 - extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); 775 + extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 776 776 777 - extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); 777 + extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 778 778 extern void blk_dump_rq_flags(struct request *, char *); 779 - extern void generic_unplug_device(request_queue_t *); 780 - extern void __generic_unplug_device(request_queue_t *); 779 + extern void generic_unplug_device(struct request_queue *); 780 + extern void __generic_unplug_device(struct request_queue *); 781 781 extern long nr_blockdev_pages(void); 782 782 783 - int blk_get_queue(request_queue_t *); 784 - request_queue_t *blk_alloc_queue(gfp_t); 785 - request_queue_t *blk_alloc_queue_node(gfp_t, int); 786 - extern void blk_put_queue(request_queue_t *); 783 + int blk_get_queue(struct request_queue *); 784 + struct request_queue *blk_alloc_queue(gfp_t); 785 + struct request_queue *blk_alloc_queue_node(gfp_t, int); 786 + extern void blk_put_queue(struct request_queue *); 787 787 788 788 /* 789 789 * tag stuff ··· 791 791 #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 792 792 #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 793 793 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 794 - extern int blk_queue_start_tag(request_queue_t *, struct request *); 795 - extern struct request *blk_queue_find_tag(request_queue_t *, int); 796 - extern void blk_queue_end_tag(request_queue_t *, struct request *); 797 - extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); 798 - extern void blk_queue_free_tags(request_queue_t *); 799 - extern int blk_queue_resize_tags(request_queue_t *, int); 800 - extern void blk_queue_invalidate_tags(request_queue_t *); 794 + extern int blk_queue_start_tag(struct request_queue *, struct request *); 795 + extern struct request *blk_queue_find_tag(struct request_queue *, int); 796 + extern void blk_queue_end_tag(struct request_queue *, struct request *); 797 + extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 798 + extern void blk_queue_free_tags(struct request_queue *); 799 + extern int blk_queue_resize_tags(struct request_queue *, int); 800 + extern void blk_queue_invalidate_tags(struct request_queue *); 801 801 extern struct blk_queue_tag *blk_init_tags(int); 802 802 extern void blk_free_tags(struct blk_queue_tag *); 803 803 ··· 809 809 return bqt->tag_index[tag]; 810 810 } 811 811 812 - extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); 812 + extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *); 813 813 extern int blkdev_issue_flush(struct block_device *, sector_t *); 814 814 815 815 #define MAX_PHYS_SEGMENTS 128 ··· 821 821 822 822 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 823 823 824 - static inline int queue_hardsect_size(request_queue_t *q) 824 + static inline int queue_hardsect_size(struct request_queue *q) 825 825 { 826 826 int retval = 512; 827 827 ··· 836 836 return queue_hardsect_size(bdev_get_queue(bdev)); 837 837 } 838 838 839 - static inline int queue_dma_alignment(request_queue_t *q) 839 + static inline int queue_dma_alignment(struct request_queue *q) 840 840 { 841 841 int retval = 511; 842 842
+1 -1
include/linux/blktrace_api.h
··· 144 144 145 145 #if defined(CONFIG_BLK_DEV_IO_TRACE) 146 146 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 147 - extern void blk_trace_shutdown(request_queue_t *); 147 + extern void blk_trace_shutdown(struct request_queue *); 148 148 extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 149 149 150 150 /**
+38 -38
include/linux/elevator.h
··· 5 5 6 6 #ifdef CONFIG_BLOCK 7 7 8 - typedef int (elevator_merge_fn) (request_queue_t *, struct request **, 8 + typedef int (elevator_merge_fn) (struct request_queue *, struct request **, 9 9 struct bio *); 10 10 11 - typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *); 11 + typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); 12 12 13 - typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int); 13 + typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int); 14 14 15 - typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *); 15 + typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *); 16 16 17 - typedef int (elevator_dispatch_fn) (request_queue_t *, int); 17 + typedef int (elevator_dispatch_fn) (struct request_queue *, int); 18 18 19 - typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); 20 - typedef int (elevator_queue_empty_fn) (request_queue_t *); 21 - typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); 22 - typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); 23 - typedef int (elevator_may_queue_fn) (request_queue_t *, int); 19 + typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); 20 + typedef int (elevator_queue_empty_fn) (struct request_queue *); 21 + typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); 22 + typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); 23 + typedef int (elevator_may_queue_fn) (struct request_queue *, int); 24 24 25 - typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t); 25 + typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); 26 26 typedef void (elevator_put_req_fn) (struct request *); 27 - typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); 28 - typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); 27 + typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); 28 + typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); 29 29 30 - typedef void *(elevator_init_fn) (request_queue_t *); 30 + typedef void *(elevator_init_fn) (struct request_queue *); 31 31 typedef void (elevator_exit_fn) (elevator_t *); 32 32 33 33 struct elevator_ops ··· 94 94 /* 95 95 * block elevator interface 96 96 */ 97 - extern void elv_dispatch_sort(request_queue_t *, struct request *); 98 - extern void elv_dispatch_add_tail(request_queue_t *, struct request *); 99 - extern void elv_add_request(request_queue_t *, struct request *, int, int); 100 - extern void __elv_add_request(request_queue_t *, struct request *, int, int); 101 - extern void elv_insert(request_queue_t *, struct request *, int); 102 - extern int elv_merge(request_queue_t *, struct request **, struct bio *); 103 - extern void elv_merge_requests(request_queue_t *, struct request *, 97 + extern void elv_dispatch_sort(struct request_queue *, struct request *); 98 + extern void elv_dispatch_add_tail(struct request_queue *, struct request *); 99 + extern void elv_add_request(struct request_queue *, struct request *, int, int); 100 + extern void __elv_add_request(struct request_queue *, struct request *, int, int); 101 + extern void elv_insert(struct request_queue *, struct request *, int); 102 + extern int elv_merge(struct request_queue *, struct request **, struct bio *); 103 + extern void elv_merge_requests(struct request_queue *, struct request *, 104 104 struct request *); 105 - extern void elv_merged_request(request_queue_t *, struct request *, int); 106 - extern void elv_dequeue_request(request_queue_t *, struct request *); 107 - extern void elv_requeue_request(request_queue_t *, struct request *); 108 - extern int elv_queue_empty(request_queue_t *); 105 + extern void elv_merged_request(struct request_queue *, struct request *, int); 106 + extern void elv_dequeue_request(struct request_queue *, struct request *); 107 + extern void elv_requeue_request(struct request_queue *, struct request *); 108 + extern int elv_queue_empty(struct request_queue *); 109 109 extern struct request *elv_next_request(struct request_queue *q); 110 - extern struct request *elv_former_request(request_queue_t *, struct request *); 111 - extern struct request *elv_latter_request(request_queue_t *, struct request *); 112 - extern int elv_register_queue(request_queue_t *q); 113 - extern void elv_unregister_queue(request_queue_t *q); 114 - extern int elv_may_queue(request_queue_t *, int); 115 - extern void elv_completed_request(request_queue_t *, struct request *); 116 - extern int elv_set_request(request_queue_t *, struct request *, gfp_t); 117 - extern void elv_put_request(request_queue_t *, struct request *); 110 + extern struct request *elv_former_request(struct request_queue *, struct request *); 111 + extern struct request *elv_latter_request(struct request_queue *, struct request *); 112 + extern int elv_register_queue(struct request_queue *q); 113 + extern void elv_unregister_queue(struct request_queue *q); 114 + extern int elv_may_queue(struct request_queue *, int); 115 + extern void elv_completed_request(struct request_queue *, struct request *); 116 + extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 117 + extern void elv_put_request(struct request_queue *, struct request *); 118 118 119 119 /* 120 120 * io scheduler registration ··· 125 125 /* 126 126 * io scheduler sysfs switching 127 127 */ 128 - extern ssize_t elv_iosched_show(request_queue_t *, char *); 129 - extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t); 128 + extern ssize_t elv_iosched_show(struct request_queue *, char *); 129 + extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); 130 130 131 - extern int elevator_init(request_queue_t *, char *); 131 + extern int elevator_init(struct request_queue *, char *); 132 132 extern void elevator_exit(elevator_t *); 133 133 extern int elv_rq_merge_ok(struct request *, struct bio *); 134 134 135 135 /* 136 136 * Helper functions. 137 137 */ 138 - extern struct request *elv_rb_former_request(request_queue_t *, struct request *); 139 - extern struct request *elv_rb_latter_request(request_queue_t *, struct request *); 138 + extern struct request *elv_rb_former_request(struct request_queue *, struct request *); 139 + extern struct request *elv_rb_latter_request(struct request_queue *, struct request *); 140 140 141 141 /* 142 142 * rb support functions.
+2 -2
include/linux/ide.h
··· 555 555 char name[4]; /* drive name, such as "hda" */ 556 556 char driver_req[10]; /* requests specific driver */ 557 557 558 - request_queue_t *queue; /* request queue */ 558 + struct request_queue *queue; /* request queue */ 559 559 560 560 struct request *rq; /* current request */ 561 561 struct ide_drive_s *next; /* circular list of hwgroup drives */ ··· 1206 1206 extern int ide_spin_wait_hwgroup(ide_drive_t *); 1207 1207 extern void ide_timer_expiry(unsigned long); 1208 1208 extern irqreturn_t ide_intr(int irq, void *dev_id); 1209 - extern void do_ide_request(request_queue_t *); 1209 + extern void do_ide_request(struct request_queue *); 1210 1210 1211 1211 void ide_init_disk(struct gendisk *, ide_drive_t *); 1212 1212
+1 -1
include/linux/loop.h
··· 63 63 struct task_struct *lo_thread; 64 64 wait_queue_head_t lo_event; 65 65 66 - request_queue_t *lo_queue; 66 + struct request_queue *lo_queue; 67 67 struct gendisk *lo_disk; 68 68 struct list_head lo_list; 69 69 };
+2 -2
include/linux/raid/md_k.h
··· 227 227 unsigned int safemode_delay; 228 228 struct timer_list safemode_timer; 229 229 atomic_t writes_pending; 230 - request_queue_t *queue; /* for plugging ... */ 230 + struct request_queue *queue; /* for plugging ... */ 231 231 232 232 atomic_t write_behind; /* outstanding async IO */ 233 233 unsigned int max_write_behind; /* 0 = sync */ ··· 265 265 int level; 266 266 struct list_head list; 267 267 struct module *owner; 268 - int (*make_request)(request_queue_t *q, struct bio *bio); 268 + int (*make_request)(struct request_queue *q, struct bio *bio); 269 269 int (*run)(mddev_t *mddev); 270 270 int (*stop)(mddev_t *mddev); 271 271 void (*status)(struct seq_file *seq, mddev_t *mddev);
+1 -1
include/scsi/sd.h
··· 57 57 static void sd_rescan(struct device *); 58 58 static int sd_init_command(struct scsi_cmnd *); 59 59 static int sd_issue_flush(struct device *, sector_t *); 60 - static void sd_prepare_flush(request_queue_t *, struct request *); 60 + static void sd_prepare_flush(struct request_queue *, struct request *); 61 61 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 62 62 static void scsi_disk_release(struct class_device *cdev); 63 63 static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
+2 -2
mm/bounce.c
··· 190 190 return 0; 191 191 } 192 192 193 - static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, 193 + static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, 194 194 mempool_t *pool) 195 195 { 196 196 struct page *page; ··· 275 275 *bio_orig = bio; 276 276 } 277 277 278 - void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) 278 + void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) 279 279 { 280 280 mempool_t *pool; 281 281