1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#ifdef CONFIG_BLOCK 5 6#include <linux/sched.h> 7#include <linux/major.h> 8#include <linux/genhd.h> 9#include <linux/list.h> 10#include <linux/timer.h> 11#include <linux/workqueue.h> 12#include <linux/pagemap.h> 13#include <linux/backing-dev.h> 14#include <linux/wait.h> 15#include <linux/mempool.h> 16#include <linux/bio.h> 17#include <linux/module.h> 18#include <linux/stringify.h> 19#include <linux/bsg.h> 20 21#include <asm/scatterlist.h> 22 23struct scsi_ioctl_command; 24 25struct request_queue; 26struct elevator_queue; 27typedef struct elevator_queue elevator_t; 28struct request_pm_state; 29struct blk_trace; 30struct request; 31struct sg_io_hdr; 32 33#define BLKDEV_MIN_RQ 4 34#define BLKDEV_MAX_RQ 128 /* Default maximum */ 35 36struct request; 37typedef void (rq_end_io_fn)(struct request *, int); 38 39struct request_list { 40 int count[2]; 41 int starved[2]; 42 int elvpriv; 43 mempool_t *rq_pool; 44 wait_queue_head_t wait[2]; 45}; 46 47/* 48 * request command types 49 */ 50enum rq_cmd_type_bits { 51 REQ_TYPE_FS = 1, /* fs request */ 52 REQ_TYPE_BLOCK_PC, /* scsi command */ 53 REQ_TYPE_SENSE, /* sense request */ 54 REQ_TYPE_PM_SUSPEND, /* suspend request */ 55 REQ_TYPE_PM_RESUME, /* resume request */ 56 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 57 REQ_TYPE_FLUSH, /* flush request */ 58 REQ_TYPE_SPECIAL, /* driver defined type */ 59 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 60 /* 61 * for ATA/ATAPI devices. this really doesn't belong here, ide should 62 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 63 * private REQ_LB opcodes to differentiate what type of request this is 64 */ 65 REQ_TYPE_ATA_TASKFILE, 66 REQ_TYPE_ATA_PC, 67}; 68 69/* 70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 72 * SCSI cdb. 73 * 74 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 75 * typically to differentiate REQ_TYPE_SPECIAL requests. 76 * 77 */ 78enum { 79 /* 80 * just examples for now 81 */ 82 REQ_LB_OP_EJECT = 0x40, /* eject request */ 83 REQ_LB_OP_FLUSH = 0x41, /* flush device */ 84}; 85 86/* 87 * request type modified bits. first three bits match BIO_RW* bits, important 88 */ 89enum rq_flag_bits { 90 __REQ_RW, /* not set, read. set, write */ 91 __REQ_FAILFAST, /* no low level driver retries */ 92 __REQ_SORTED, /* elevator knows about this request */ 93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 94 __REQ_HARDBARRIER, /* may not be passed by drive either */ 95 __REQ_FUA, /* forced unit access */ 96 __REQ_NOMERGE, /* don't touch this for merging */ 97 __REQ_STARTED, /* drive already may have started this one */ 98 __REQ_DONTPREP, /* don't call prep for this one */ 99 __REQ_QUEUED, /* uses queueing */ 100 __REQ_ELVPRIV, /* elevator private data attached */ 101 __REQ_FAILED, /* set if the request failed */ 102 __REQ_QUIET, /* don't worry about errors */ 103 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 104 __REQ_ORDERED_COLOR, /* is before or after barrier */ 105 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 106 __REQ_ALLOCED, /* request came from our alloc pool */ 107 __REQ_RW_META, /* metadata io request */ 108 __REQ_COPY_USER, /* contains copies of user pages */ 109 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 110 __REQ_NR_BITS, /* stops here */ 111}; 112 113#define REQ_RW (1 << __REQ_RW) 114#define REQ_FAILFAST (1 << __REQ_FAILFAST) 115#define REQ_SORTED (1 << __REQ_SORTED) 116#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 117#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 118#define REQ_FUA (1 << __REQ_FUA) 119#define REQ_NOMERGE (1 << __REQ_NOMERGE) 120#define REQ_STARTED (1 << __REQ_STARTED) 121#define REQ_DONTPREP (1 << __REQ_DONTPREP) 122#define REQ_QUEUED (1 << __REQ_QUEUED) 123#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 124#define REQ_FAILED (1 << __REQ_FAILED) 125#define REQ_QUIET (1 << __REQ_QUIET) 126#define REQ_PREEMPT (1 << __REQ_PREEMPT) 127#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 128#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 129#define REQ_ALLOCED (1 << __REQ_ALLOCED) 130#define REQ_RW_META (1 << __REQ_RW_META) 131#define REQ_COPY_USER (1 << __REQ_COPY_USER) 132#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 133 134#define BLK_MAX_CDB 16 135 136/* 137 * try to put the fields that are referenced together in the same cacheline. 138 * if you modify this structure, be sure to check block/blk-core.c:rq_init() 139 * as well! 140 */ 141struct request { 142 struct list_head queuelist; 143 struct list_head donelist; 144 145 struct request_queue *q; 146 147 unsigned int cmd_flags; 148 enum rq_cmd_type_bits cmd_type; 149 150 /* Maintain bio traversal state for part by part I/O submission. 151 * hard_* are block layer internals, no driver should touch them! 152 */ 153 154 sector_t sector; /* next sector to submit */ 155 sector_t hard_sector; /* next sector to complete */ 156 unsigned long nr_sectors; /* no. of sectors left to submit */ 157 unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 158 /* no. of sectors left to submit in the current segment */ 159 unsigned int current_nr_sectors; 160 161 /* no. of sectors left to complete in the current segment */ 162 unsigned int hard_cur_sectors; 163 164 struct bio *bio; 165 struct bio *biotail; 166 167 struct hlist_node hash; /* merge hash */ 168 /* 169 * The rb_node is only used inside the io scheduler, requests 170 * are pruned when moved to the dispatch queue. So let the 171 * completion_data share space with the rb_node. 172 */ 173 union { 174 struct rb_node rb_node; /* sort/lookup */ 175 void *completion_data; 176 }; 177 178 /* 179 * two pointers are available for the IO schedulers, if they need 180 * more they have to dynamically allocate it. 181 */ 182 void *elevator_private; 183 void *elevator_private2; 184 185 struct gendisk *rq_disk; 186 unsigned long start_time; 187 188 /* Number of scatter-gather DMA addr+len pairs after 189 * physical address coalescing is performed. 190 */ 191 unsigned short nr_phys_segments; 192 193 /* Number of scatter-gather addr+len pairs after 194 * physical and DMA remapping hardware coalescing is performed. 195 * This is the number of scatter-gather entries the driver 196 * will actually have to deal with after DMA mapping is done. 197 */ 198 unsigned short nr_hw_segments; 199 200 unsigned short ioprio; 201 202 void *special; 203 char *buffer; 204 205 int tag; 206 int errors; 207 208 int ref_count; 209 210 /* 211 * when request is used as a packet command carrier 212 */ 213 unsigned short cmd_len; 214 unsigned char __cmd[BLK_MAX_CDB]; 215 unsigned char *cmd; 216 217 unsigned int data_len; 218 unsigned int extra_len; /* length of alignment and padding */ 219 unsigned int sense_len; 220 void *data; 221 void *sense; 222 223 unsigned int timeout; 224 int retries; 225 226 /* 227 * completion callback. 228 */ 229 rq_end_io_fn *end_io; 230 void *end_io_data; 231 232 /* for bidi */ 233 struct request *next_rq; 234}; 235 236/* 237 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 238 * requests. Some step values could eventually be made generic. 239 */ 240struct request_pm_state 241{ 242 /* PM state machine step value, currently driver specific */ 243 int pm_step; 244 /* requested PM state value (S1, S2, S3, S4, ...) */ 245 u32 pm_state; 246 void* data; /* for driver use */ 247}; 248 249#include <linux/elevator.h> 250 251typedef void (request_fn_proc) (struct request_queue *q); 252typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 253typedef int (prep_rq_fn) (struct request_queue *, struct request *); 254typedef void (unplug_fn) (struct request_queue *); 255 256struct bio_vec; 257struct bvec_merge_data { 258 struct block_device *bi_bdev; 259 sector_t bi_sector; 260 unsigned bi_size; 261 unsigned long bi_rw; 262}; 263typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 264 struct bio_vec *); 265typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 266typedef void (softirq_done_fn)(struct request *); 267typedef int (dma_drain_needed_fn)(struct request *); 268 269enum blk_queue_state { 270 Queue_down, 271 Queue_up, 272}; 273 274struct blk_queue_tag { 275 struct request **tag_index; /* map of busy tags */ 276 unsigned long *tag_map; /* bit map of free/busy tags */ 277 int busy; /* current depth */ 278 int max_depth; /* what we will send to device */ 279 int real_max_depth; /* what the array can hold */ 280 atomic_t refcnt; /* map can be shared */ 281}; 282 283#define BLK_SCSI_MAX_CMDS (256) 284#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 285 286struct blk_cmd_filter { 287 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; 288 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; 289 struct kobject kobj; 290}; 291 292struct request_queue 293{ 294 /* 295 * Together with queue_head for cacheline sharing 296 */ 297 struct list_head queue_head; 298 struct request *last_merge; 299 elevator_t *elevator; 300 301 /* 302 * the queue request freelist, one for reads and one for writes 303 */ 304 struct request_list rq; 305 306 request_fn_proc *request_fn; 307 make_request_fn *make_request_fn; 308 prep_rq_fn *prep_rq_fn; 309 unplug_fn *unplug_fn; 310 merge_bvec_fn *merge_bvec_fn; 311 prepare_flush_fn *prepare_flush_fn; 312 softirq_done_fn *softirq_done_fn; 313 dma_drain_needed_fn *dma_drain_needed; 314 315 /* 316 * Dispatch queue sorting 317 */ 318 sector_t end_sector; 319 struct request *boundary_rq; 320 321 /* 322 * Auto-unplugging state 323 */ 324 struct timer_list unplug_timer; 325 int unplug_thresh; /* After this many requests */ 326 unsigned long unplug_delay; /* After this many jiffies */ 327 struct work_struct unplug_work; 328 329 struct backing_dev_info backing_dev_info; 330 331 /* 332 * The queue owner gets to use this for whatever they like. 333 * ll_rw_blk doesn't touch it. 334 */ 335 void *queuedata; 336 337 /* 338 * queue needs bounce pages for pages above this limit 339 */ 340 unsigned long bounce_pfn; 341 gfp_t bounce_gfp; 342 343 /* 344 * various queue flags, see QUEUE_* below 345 */ 346 unsigned long queue_flags; 347 348 /* 349 * protects queue structures from reentrancy. ->__queue_lock should 350 * _never_ be used directly, it is queue private. always use 351 * ->queue_lock. 352 */ 353 spinlock_t __queue_lock; 354 spinlock_t *queue_lock; 355 356 /* 357 * queue kobject 358 */ 359 struct kobject kobj; 360 361 /* 362 * queue settings 363 */ 364 unsigned long nr_requests; /* Max # of requests */ 365 unsigned int nr_congestion_on; 366 unsigned int nr_congestion_off; 367 unsigned int nr_batching; 368 369 unsigned int max_sectors; 370 unsigned int max_hw_sectors; 371 unsigned short max_phys_segments; 372 unsigned short max_hw_segments; 373 unsigned short hardsect_size; 374 unsigned int max_segment_size; 375 376 unsigned long seg_boundary_mask; 377 void *dma_drain_buffer; 378 unsigned int dma_drain_size; 379 unsigned int dma_pad_mask; 380 unsigned int dma_alignment; 381 382 struct blk_queue_tag *queue_tags; 383 struct list_head tag_busy_list; 384 385 unsigned int nr_sorted; 386 unsigned int in_flight; 387 388 /* 389 * sg stuff 390 */ 391 unsigned int sg_timeout; 392 unsigned int sg_reserved_size; 393 int node; 394#ifdef CONFIG_BLK_DEV_IO_TRACE 395 struct blk_trace *blk_trace; 396#endif 397 /* 398 * reserved for flush operations 399 */ 400 unsigned int ordered, next_ordered, ordseq; 401 int orderr, ordcolor; 402 struct request pre_flush_rq, bar_rq, post_flush_rq; 403 struct request *orig_bar_rq; 404 405 struct mutex sysfs_lock; 406 407#if defined(CONFIG_BLK_DEV_BSG) 408 struct bsg_class_device bsg_dev; 409#endif 410 struct blk_cmd_filter cmd_filter; 411}; 412 413#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 414#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 415#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 416#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 417#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 418#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 419#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 420#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 421#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 422#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 423#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 424 425static inline int queue_is_locked(struct request_queue *q) 426{ 427#ifdef CONFIG_SMP 428 spinlock_t *lock = q->queue_lock; 429 return lock && spin_is_locked(lock); 430#else 431 return 1; 432#endif 433} 434 435static inline void queue_flag_set_unlocked(unsigned int flag, 436 struct request_queue *q) 437{ 438 __set_bit(flag, &q->queue_flags); 439} 440 441static inline int queue_flag_test_and_clear(unsigned int flag, 442 struct request_queue *q) 443{ 444 WARN_ON_ONCE(!queue_is_locked(q)); 445 446 if (test_bit(flag, &q->queue_flags)) { 447 __clear_bit(flag, &q->queue_flags); 448 return 1; 449 } 450 451 return 0; 452} 453 454static inline int queue_flag_test_and_set(unsigned int flag, 455 struct request_queue *q) 456{ 457 WARN_ON_ONCE(!queue_is_locked(q)); 458 459 if (!test_bit(flag, &q->queue_flags)) { 460 __set_bit(flag, &q->queue_flags); 461 return 0; 462 } 463 464 return 1; 465} 466 467static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 468{ 469 WARN_ON_ONCE(!queue_is_locked(q)); 470 __set_bit(flag, &q->queue_flags); 471} 472 473static inline void queue_flag_clear_unlocked(unsigned int flag, 474 struct request_queue *q) 475{ 476 __clear_bit(flag, &q->queue_flags); 477} 478 479static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 480{ 481 WARN_ON_ONCE(!queue_is_locked(q)); 482 __clear_bit(flag, &q->queue_flags); 483} 484 485enum { 486 /* 487 * Hardbarrier is supported with one of the following methods. 488 * 489 * NONE : hardbarrier unsupported 490 * DRAIN : ordering by draining is enough 491 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 492 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 493 * TAG : ordering by tag is enough 494 * TAG_FLUSH : ordering by tag w/ pre and post flushes 495 * TAG_FUA : ordering by tag w/ pre flush and FUA write 496 */ 497 QUEUE_ORDERED_NONE = 0x00, 498 QUEUE_ORDERED_DRAIN = 0x01, 499 QUEUE_ORDERED_TAG = 0x02, 500 501 QUEUE_ORDERED_PREFLUSH = 0x10, 502 QUEUE_ORDERED_POSTFLUSH = 0x20, 503 QUEUE_ORDERED_FUA = 0x40, 504 505 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 506 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 507 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 508 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 509 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 510 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 511 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 512 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 513 514 /* 515 * Ordered operation sequence 516 */ 517 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 518 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 519 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 520 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 521 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 522 QUEUE_ORDSEQ_DONE = 0x20, 523}; 524 525#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 526#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 527#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 528#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 529#define blk_queue_flushing(q) ((q)->ordseq) 530 531#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 532#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 533#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 534#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 535 536#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 537#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 538 539#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 540 541#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 542#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 543#define blk_pm_request(rq) \ 544 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 545 546#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 547#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 548#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 549#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 550#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 551/* rq->queuelist of dequeued request must be list_empty() */ 552#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 553 554#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 555 556#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 557 558/* 559 * We regard a request as sync, if it's a READ or a SYNC write. 560 */ 561#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 562#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 563 564static inline int blk_queue_full(struct request_queue *q, int rw) 565{ 566 if (rw == READ) 567 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 568 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 569} 570 571static inline void blk_set_queue_full(struct request_queue *q, int rw) 572{ 573 if (rw == READ) 574 queue_flag_set(QUEUE_FLAG_READFULL, q); 575 else 576 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 577} 578 579static inline void blk_clear_queue_full(struct request_queue *q, int rw) 580{ 581 if (rw == READ) 582 queue_flag_clear(QUEUE_FLAG_READFULL, q); 583 else 584 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 585} 586 587 588/* 589 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 590 * it already be started by driver. 591 */ 592#define RQ_NOMERGE_FLAGS \ 593 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 594#define rq_mergeable(rq) \ 595 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 596 597/* 598 * q->prep_rq_fn return values 599 */ 600#define BLKPREP_OK 0 /* serve it */ 601#define BLKPREP_KILL 1 /* fatal error, kill */ 602#define BLKPREP_DEFER 2 /* leave on queue */ 603 604extern unsigned long blk_max_low_pfn, blk_max_pfn; 605 606/* 607 * standard bounce addresses: 608 * 609 * BLK_BOUNCE_HIGH : bounce all highmem pages 610 * BLK_BOUNCE_ANY : don't bounce anything 611 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 612 */ 613 614#if BITS_PER_LONG == 32 615#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 616#else 617#define BLK_BOUNCE_HIGH -1ULL 618#endif 619#define BLK_BOUNCE_ANY (-1ULL) 620#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 621 622/* 623 * default timeout for SG_IO if none specified 624 */ 625#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 626 627#ifdef CONFIG_BOUNCE 628extern int init_emergency_isa_pool(void); 629extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 630#else 631static inline int init_emergency_isa_pool(void) 632{ 633 return 0; 634} 635static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 636{ 637} 638#endif /* CONFIG_MMU */ 639 640struct req_iterator { 641 int i; 642 struct bio *bio; 643}; 644 645/* This should not be used directly - use rq_for_each_segment */ 646#define __rq_for_each_bio(_bio, rq) \ 647 if ((rq->bio)) \ 648 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 649 650#define rq_for_each_segment(bvl, _rq, _iter) \ 651 __rq_for_each_bio(_iter.bio, _rq) \ 652 bio_for_each_segment(bvl, _iter.bio, _iter.i) 653 654#define rq_iter_last(rq, _iter) \ 655 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 656 657extern int blk_register_queue(struct gendisk *disk); 658extern void blk_unregister_queue(struct gendisk *disk); 659extern void register_disk(struct gendisk *dev); 660extern void generic_make_request(struct bio *bio); 661extern void blk_rq_init(struct request_queue *q, struct request *rq); 662extern void blk_put_request(struct request *); 663extern void __blk_put_request(struct request_queue *, struct request *); 664extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 665extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 666extern void blk_requeue_request(struct request_queue *, struct request *); 667extern void blk_plug_device(struct request_queue *); 668extern void blk_plug_device_unlocked(struct request_queue *); 669extern int blk_remove_plug(struct request_queue *); 670extern void blk_recount_segments(struct request_queue *, struct bio *); 671extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 672 struct gendisk *, unsigned int, void __user *); 673extern int sg_scsi_ioctl(struct file *, struct request_queue *, 674 struct gendisk *, struct scsi_ioctl_command __user *); 675 676/* 677 * Temporary export, until SCSI gets fixed up. 678 */ 679extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 680 struct bio *bio); 681 682/* 683 * A queue has just exitted congestion. Note this in the global counter of 684 * congested queues, and wake up anyone who was waiting for requests to be 685 * put back. 686 */ 687static inline void blk_clear_queue_congested(struct request_queue *q, int rw) 688{ 689 clear_bdi_congested(&q->backing_dev_info, rw); 690} 691 692/* 693 * A queue has just entered congestion. Flag that in the queue's VM-visible 694 * state flags and increment the global gounter of congested queues. 695 */ 696static inline void blk_set_queue_congested(struct request_queue *q, int rw) 697{ 698 set_bdi_congested(&q->backing_dev_info, rw); 699} 700 701extern void blk_start_queue(struct request_queue *q); 702extern void blk_stop_queue(struct request_queue *q); 703extern void blk_sync_queue(struct request_queue *q); 704extern void __blk_stop_queue(struct request_queue *q); 705extern void __blk_run_queue(struct request_queue *); 706extern void blk_run_queue(struct request_queue *); 707extern void blk_start_queueing(struct request_queue *); 708extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 709extern int blk_rq_unmap_user(struct bio *); 710extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 711extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 712 struct sg_iovec *, int, unsigned int); 713extern int blk_execute_rq(struct request_queue *, struct gendisk *, 714 struct request *, int); 715extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 716 struct request *, int, rq_end_io_fn *); 717extern void blk_unplug(struct request_queue *q); 718 719static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 720{ 721 return bdev->bd_disk->queue; 722} 723 724static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 725 struct page *page) 726{ 727 if (bdi && bdi->unplug_io_fn) 728 bdi->unplug_io_fn(bdi, page); 729} 730 731static inline void blk_run_address_space(struct address_space *mapping) 732{ 733 if (mapping) 734 blk_run_backing_dev(mapping->backing_dev_info, NULL); 735} 736 737/* 738 * blk_end_request() and friends. 739 * __blk_end_request() and end_request() must be called with 740 * the request queue spinlock acquired. 741 * 742 * Several drivers define their own end_request and call 743 * blk_end_request() for parts of the original function. 744 * This prevents code duplication in drivers. 745 */ 746extern int blk_end_request(struct request *rq, int error, 747 unsigned int nr_bytes); 748extern int __blk_end_request(struct request *rq, int error, 749 unsigned int nr_bytes); 750extern int blk_end_bidi_request(struct request *rq, int error, 751 unsigned int nr_bytes, unsigned int bidi_bytes); 752extern void end_request(struct request *, int); 753extern void end_queued_request(struct request *, int); 754extern void end_dequeued_request(struct request *, int); 755extern int blk_end_request_callback(struct request *rq, int error, 756 unsigned int nr_bytes, 757 int (drv_callback)(struct request *)); 758extern void blk_complete_request(struct request *); 759 760/* 761 * blk_end_request() takes bytes instead of sectors as a complete size. 762 * blk_rq_bytes() returns bytes left to complete in the entire request. 763 * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 764 */ 765extern unsigned int blk_rq_bytes(struct request *rq); 766extern unsigned int blk_rq_cur_bytes(struct request *rq); 767 768static inline void blkdev_dequeue_request(struct request *req) 769{ 770 elv_dequeue_request(req->q, req); 771} 772 773/* 774 * Access functions for manipulating queue properties 775 */ 776extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 777 spinlock_t *lock, int node_id); 778extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 779extern void blk_cleanup_queue(struct request_queue *); 780extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 781extern void blk_queue_bounce_limit(struct request_queue *, u64); 782extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 783extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 784extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 785extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 786extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 787extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 788extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 789extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 790extern int blk_queue_dma_drain(struct request_queue *q, 791 dma_drain_needed_fn *dma_drain_needed, 792 void *buf, unsigned int size); 793extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 794extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 795extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 796extern void blk_queue_dma_alignment(struct request_queue *, int); 797extern void blk_queue_update_dma_alignment(struct request_queue *, int); 798extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 799extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 800extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 801extern int blk_do_ordered(struct request_queue *, struct request **); 802extern unsigned blk_ordered_cur_seq(struct request_queue *); 803extern unsigned blk_ordered_req_seq(struct request *); 804extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 805 806extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 807extern void blk_dump_rq_flags(struct request *, char *); 808extern void generic_unplug_device(struct request_queue *); 809extern void __generic_unplug_device(struct request_queue *); 810extern long nr_blockdev_pages(void); 811 812int blk_get_queue(struct request_queue *); 813struct request_queue *blk_alloc_queue(gfp_t); 814struct request_queue *blk_alloc_queue_node(gfp_t, int); 815extern void blk_put_queue(struct request_queue *); 816 817/* 818 * tag stuff 819 */ 820#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 821extern int blk_queue_start_tag(struct request_queue *, struct request *); 822extern struct request *blk_queue_find_tag(struct request_queue *, int); 823extern void blk_queue_end_tag(struct request_queue *, struct request *); 824extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 825extern void blk_queue_free_tags(struct request_queue *); 826extern int blk_queue_resize_tags(struct request_queue *, int); 827extern void blk_queue_invalidate_tags(struct request_queue *); 828extern struct blk_queue_tag *blk_init_tags(int); 829extern void blk_free_tags(struct blk_queue_tag *); 830 831static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 832 int tag) 833{ 834 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 835 return NULL; 836 return bqt->tag_index[tag]; 837} 838 839extern int blkdev_issue_flush(struct block_device *, sector_t *); 840 841/* 842* command filter functions 843*/ 844extern int blk_verify_command(struct blk_cmd_filter *filter, 845 unsigned char *cmd, int has_write_perm); 846extern int blk_register_filter(struct gendisk *disk); 847extern void blk_unregister_filter(struct gendisk *disk); 848extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); 849 850#define MAX_PHYS_SEGMENTS 128 851#define MAX_HW_SEGMENTS 128 852#define SAFE_MAX_SECTORS 255 853#define BLK_DEF_MAX_SECTORS 1024 854 855#define MAX_SEGMENT_SIZE 65536 856 857#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 858 859static inline int queue_hardsect_size(struct request_queue *q) 860{ 861 int retval = 512; 862 863 if (q && q->hardsect_size) 864 retval = q->hardsect_size; 865 866 return retval; 867} 868 869static inline int bdev_hardsect_size(struct block_device *bdev) 870{ 871 return queue_hardsect_size(bdev_get_queue(bdev)); 872} 873 874static inline int queue_dma_alignment(struct request_queue *q) 875{ 876 return q ? q->dma_alignment : 511; 877} 878 879/* assumes size > 256 */ 880static inline unsigned int blksize_bits(unsigned int size) 881{ 882 unsigned int bits = 8; 883 do { 884 bits++; 885 size >>= 1; 886 } while (size > 256); 887 return bits; 888} 889 890static inline unsigned int block_size(struct block_device *bdev) 891{ 892 return bdev->bd_block_size; 893} 894 895typedef struct {struct page *v;} Sector; 896 897unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 898 899static inline void put_dev_sector(Sector p) 900{ 901 page_cache_release(p.v); 902} 903 904struct work_struct; 905int kblockd_schedule_work(struct work_struct *work); 906void kblockd_flush_work(struct work_struct *work); 907 908#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 909 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 910#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 911 MODULE_ALIAS("block-major-" __stringify(major) "-*") 912 913#if defined(CONFIG_BLK_DEV_INTEGRITY) 914 915#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 916#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 917 918struct blk_integrity_exchg { 919 void *prot_buf; 920 void *data_buf; 921 sector_t sector; 922 unsigned int data_size; 923 unsigned short sector_size; 924 const char *disk_name; 925}; 926 927typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 928typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 929typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 930typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 931 932struct blk_integrity { 933 integrity_gen_fn *generate_fn; 934 integrity_vrfy_fn *verify_fn; 935 integrity_set_tag_fn *set_tag_fn; 936 integrity_get_tag_fn *get_tag_fn; 937 938 unsigned short flags; 939 unsigned short tuple_size; 940 unsigned short sector_size; 941 unsigned short tag_size; 942 943 const char *name; 944 945 struct kobject kobj; 946}; 947 948extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 949extern void blk_integrity_unregister(struct gendisk *); 950extern int blk_integrity_compare(struct block_device *, struct block_device *); 951extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 952extern int blk_rq_count_integrity_sg(struct request *); 953 954static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) 955{ 956 if (bi) 957 return bi->tuple_size; 958 959 return 0; 960} 961 962static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 963{ 964 return bdev->bd_disk->integrity; 965} 966 967static inline unsigned int bdev_get_tag_size(struct block_device *bdev) 968{ 969 struct blk_integrity *bi = bdev_get_integrity(bdev); 970 971 if (bi) 972 return bi->tag_size; 973 974 return 0; 975} 976 977static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) 978{ 979 struct blk_integrity *bi = bdev_get_integrity(bdev); 980 981 if (bi == NULL) 982 return 0; 983 984 if (rw == READ && bi->verify_fn != NULL && 985 (bi->flags & INTEGRITY_FLAG_READ)) 986 return 1; 987 988 if (rw == WRITE && bi->generate_fn != NULL && 989 (bi->flags & INTEGRITY_FLAG_WRITE)) 990 return 1; 991 992 return 0; 993} 994 995static inline int blk_integrity_rq(struct request *rq) 996{ 997 if (rq->bio == NULL) 998 return 0; 999 1000 return bio_integrity(rq->bio); 1001} 1002 1003#else /* CONFIG_BLK_DEV_INTEGRITY */ 1004 1005#define blk_integrity_rq(rq) (0) 1006#define blk_rq_count_integrity_sg(a) (0) 1007#define blk_rq_map_integrity_sg(a, b) (0) 1008#define bdev_get_integrity(a) (0) 1009#define bdev_get_tag_size(a) (0) 1010#define blk_integrity_compare(a, b) (0) 1011#define blk_integrity_register(a, b) (0) 1012#define blk_integrity_unregister(a) do { } while (0); 1013 1014#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1015 1016#else /* CONFIG_BLOCK */ 1017/* 1018 * stubs for when the block layer is configured out 1019 */ 1020#define buffer_heads_over_limit 0 1021 1022static inline long nr_blockdev_pages(void) 1023{ 1024 return 0; 1025} 1026 1027#endif /* CONFIG_BLOCK */ 1028 1029#endif