1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#ifdef CONFIG_BLOCK 5 6#include <linux/sched.h> 7#include <linux/major.h> 8#include <linux/genhd.h> 9#include <linux/list.h> 10#include <linux/timer.h> 11#include <linux/workqueue.h> 12#include <linux/pagemap.h> 13#include <linux/backing-dev.h> 14#include <linux/wait.h> 15#include <linux/mempool.h> 16#include <linux/bio.h> 17#include <linux/module.h> 18#include <linux/stringify.h> 19#include <linux/gfp.h> 20#include <linux/bsg.h> 21#include <linux/smp.h> 22 23#include <asm/scatterlist.h> 24 25struct scsi_ioctl_command; 26 27struct request_queue; 28struct elevator_queue; 29struct request_pm_state; 30struct blk_trace; 31struct request; 32struct sg_io_hdr; 33 34#define BLKDEV_MIN_RQ 4 35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 36 37struct request; 38typedef void (rq_end_io_fn)(struct request *, int); 39 40struct request_list { 41 /* 42 * count[], starved[], and wait[] are indexed by 43 * BLK_RW_SYNC/BLK_RW_ASYNC 44 */ 45 int count[2]; 46 int starved[2]; 47 int elvpriv; 48 mempool_t *rq_pool; 49 wait_queue_head_t wait[2]; 50}; 51 52/* 53 * request command types 54 */ 55enum rq_cmd_type_bits { 56 REQ_TYPE_FS = 1, /* fs request */ 57 REQ_TYPE_BLOCK_PC, /* scsi command */ 58 REQ_TYPE_SENSE, /* sense request */ 59 REQ_TYPE_PM_SUSPEND, /* suspend request */ 60 REQ_TYPE_PM_RESUME, /* resume request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 62 REQ_TYPE_SPECIAL, /* driver defined type */ 63 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 64 /* 65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 67 * private REQ_LB opcodes to differentiate what type of request this is 68 */ 69 REQ_TYPE_ATA_TASKFILE, 70 REQ_TYPE_ATA_PC, 71}; 72 73/* 74 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 75 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 76 * SCSI cdb. 77 * 78 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 79 * typically to differentiate REQ_TYPE_SPECIAL requests. 80 * 81 */ 82enum { 83 REQ_LB_OP_EJECT = 0x40, /* eject request */ 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 85}; 86 87/* 88 * request type modified bits. first four bits match BIO_RW* bits, important 89 */ 90enum rq_flag_bits { 91 __REQ_RW, /* not set, read. set, write */ 92 __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 93 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 94 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 95 /* above flags must match BIO_RW_* */ 96 __REQ_DISCARD, /* request to discard sectors */ 97 __REQ_SORTED, /* elevator knows about this request */ 98 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 99 __REQ_HARDBARRIER, /* may not be passed by drive either */ 100 __REQ_FUA, /* forced unit access */ 101 __REQ_NOMERGE, /* don't touch this for merging */ 102 __REQ_STARTED, /* drive already may have started this one */ 103 __REQ_DONTPREP, /* don't call prep for this one */ 104 __REQ_QUEUED, /* uses queueing */ 105 __REQ_ELVPRIV, /* elevator private data attached */ 106 __REQ_FAILED, /* set if the request failed */ 107 __REQ_QUIET, /* don't worry about errors */ 108 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 109 __REQ_ORDERED_COLOR, /* is before or after barrier */ 110 __REQ_RW_SYNC, /* request is sync (sync write or read) */ 111 __REQ_ALLOCED, /* request came from our alloc pool */ 112 __REQ_RW_META, /* metadata io request */ 113 __REQ_COPY_USER, /* contains copies of user pages */ 114 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 115 __REQ_NOIDLE, /* Don't anticipate more IO after this one */ 116 __REQ_IO_STAT, /* account I/O stat */ 117 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 118 __REQ_NR_BITS, /* stops here */ 119}; 120 121#define REQ_RW (1 << __REQ_RW) 122#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 123#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 124#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 125#define REQ_DISCARD (1 << __REQ_DISCARD) 126#define REQ_SORTED (1 << __REQ_SORTED) 127#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 128#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 129#define REQ_FUA (1 << __REQ_FUA) 130#define REQ_NOMERGE (1 << __REQ_NOMERGE) 131#define REQ_STARTED (1 << __REQ_STARTED) 132#define REQ_DONTPREP (1 << __REQ_DONTPREP) 133#define REQ_QUEUED (1 << __REQ_QUEUED) 134#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 135#define REQ_FAILED (1 << __REQ_FAILED) 136#define REQ_QUIET (1 << __REQ_QUIET) 137#define REQ_PREEMPT (1 << __REQ_PREEMPT) 138#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 139#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 140#define REQ_ALLOCED (1 << __REQ_ALLOCED) 141#define REQ_RW_META (1 << __REQ_RW_META) 142#define REQ_COPY_USER (1 << __REQ_COPY_USER) 143#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 144#define REQ_NOIDLE (1 << __REQ_NOIDLE) 145#define REQ_IO_STAT (1 << __REQ_IO_STAT) 146#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 147 148#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ 149 REQ_FAILFAST_DRIVER) 150 151#define BLK_MAX_CDB 16 152 153/* 154 * try to put the fields that are referenced together in the same cacheline. 155 * if you modify this structure, be sure to check block/blk-core.c:rq_init() 156 * as well! 157 */ 158struct request { 159 struct list_head queuelist; 160 struct call_single_data csd; 161 int cpu; 162 163 struct request_queue *q; 164 165 unsigned int cmd_flags; 166 enum rq_cmd_type_bits cmd_type; 167 unsigned long atomic_flags; 168 169 /* the following two fields are internal, NEVER access directly */ 170 sector_t __sector; /* sector cursor */ 171 unsigned int __data_len; /* total data len */ 172 173 struct bio *bio; 174 struct bio *biotail; 175 176 struct hlist_node hash; /* merge hash */ 177 /* 178 * The rb_node is only used inside the io scheduler, requests 179 * are pruned when moved to the dispatch queue. So let the 180 * completion_data share space with the rb_node. 181 */ 182 union { 183 struct rb_node rb_node; /* sort/lookup */ 184 void *completion_data; 185 }; 186 187 /* 188 * two pointers are available for the IO schedulers, if they need 189 * more they have to dynamically allocate it. 190 */ 191 void *elevator_private; 192 void *elevator_private2; 193 194 struct gendisk *rq_disk; 195 unsigned long start_time; 196 197 /* Number of scatter-gather DMA addr+len pairs after 198 * physical address coalescing is performed. 199 */ 200 unsigned short nr_phys_segments; 201 202 unsigned short ioprio; 203 204 void *special; /* opaque pointer available for LLD use */ 205 char *buffer; /* kaddr of the current segment if available */ 206 207 int tag; 208 int errors; 209 210 int ref_count; 211 212 /* 213 * when request is used as a packet command carrier 214 */ 215 unsigned short cmd_len; 216 unsigned char __cmd[BLK_MAX_CDB]; 217 unsigned char *cmd; 218 219 unsigned int extra_len; /* length of alignment and padding */ 220 unsigned int sense_len; 221 unsigned int resid_len; /* residual count */ 222 void *sense; 223 224 unsigned long deadline; 225 struct list_head timeout_list; 226 unsigned int timeout; 227 int retries; 228 229 /* 230 * completion callback. 231 */ 232 rq_end_io_fn *end_io; 233 void *end_io_data; 234 235 /* for bidi */ 236 struct request *next_rq; 237}; 238 239static inline unsigned short req_get_ioprio(struct request *req) 240{ 241 return req->ioprio; 242} 243 244/* 245 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 246 * requests. Some step values could eventually be made generic. 247 */ 248struct request_pm_state 249{ 250 /* PM state machine step value, currently driver specific */ 251 int pm_step; 252 /* requested PM state value (S1, S2, S3, S4, ...) */ 253 u32 pm_state; 254 void* data; /* for driver use */ 255}; 256 257#include <linux/elevator.h> 258 259typedef void (request_fn_proc) (struct request_queue *q); 260typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 261typedef int (prep_rq_fn) (struct request_queue *, struct request *); 262typedef void (unplug_fn) (struct request_queue *); 263 264struct bio_vec; 265struct bvec_merge_data { 266 struct block_device *bi_bdev; 267 sector_t bi_sector; 268 unsigned bi_size; 269 unsigned long bi_rw; 270}; 271typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 272 struct bio_vec *); 273typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 274typedef void (softirq_done_fn)(struct request *); 275typedef int (dma_drain_needed_fn)(struct request *); 276typedef int (lld_busy_fn) (struct request_queue *q); 277 278enum blk_eh_timer_return { 279 BLK_EH_NOT_HANDLED, 280 BLK_EH_HANDLED, 281 BLK_EH_RESET_TIMER, 282}; 283 284typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 285 286enum blk_queue_state { 287 Queue_down, 288 Queue_up, 289}; 290 291struct blk_queue_tag { 292 struct request **tag_index; /* map of busy tags */ 293 unsigned long *tag_map; /* bit map of free/busy tags */ 294 int busy; /* current depth */ 295 int max_depth; /* what we will send to device */ 296 int real_max_depth; /* what the array can hold */ 297 atomic_t refcnt; /* map can be shared */ 298}; 299 300#define BLK_SCSI_MAX_CMDS (256) 301#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 302 303struct queue_limits { 304 unsigned long bounce_pfn; 305 unsigned long seg_boundary_mask; 306 307 unsigned int max_hw_sectors; 308 unsigned int max_sectors; 309 unsigned int max_segment_size; 310 unsigned int physical_block_size; 311 unsigned int alignment_offset; 312 unsigned int io_min; 313 unsigned int io_opt; 314 unsigned int max_discard_sectors; 315 unsigned int discard_granularity; 316 unsigned int discard_alignment; 317 318 unsigned short logical_block_size; 319 unsigned short max_segments; 320 321 unsigned char misaligned; 322 unsigned char discard_misaligned; 323 unsigned char no_cluster; 324 signed char discard_zeroes_data; 325}; 326 327struct request_queue 328{ 329 /* 330 * Together with queue_head for cacheline sharing 331 */ 332 struct list_head queue_head; 333 struct request *last_merge; 334 struct elevator_queue *elevator; 335 336 /* 337 * the queue request freelist, one for reads and one for writes 338 */ 339 struct request_list rq; 340 341 request_fn_proc *request_fn; 342 make_request_fn *make_request_fn; 343 prep_rq_fn *prep_rq_fn; 344 unplug_fn *unplug_fn; 345 merge_bvec_fn *merge_bvec_fn; 346 prepare_flush_fn *prepare_flush_fn; 347 softirq_done_fn *softirq_done_fn; 348 rq_timed_out_fn *rq_timed_out_fn; 349 dma_drain_needed_fn *dma_drain_needed; 350 lld_busy_fn *lld_busy_fn; 351 352 /* 353 * Dispatch queue sorting 354 */ 355 sector_t end_sector; 356 struct request *boundary_rq; 357 358 /* 359 * Auto-unplugging state 360 */ 361 struct timer_list unplug_timer; 362 int unplug_thresh; /* After this many requests */ 363 unsigned long unplug_delay; /* After this many jiffies */ 364 struct work_struct unplug_work; 365 366 struct backing_dev_info backing_dev_info; 367 368 /* 369 * The queue owner gets to use this for whatever they like. 370 * ll_rw_blk doesn't touch it. 371 */ 372 void *queuedata; 373 374 /* 375 * queue needs bounce pages for pages above this limit 376 */ 377 gfp_t bounce_gfp; 378 379 /* 380 * various queue flags, see QUEUE_* below 381 */ 382 unsigned long queue_flags; 383 384 /* 385 * protects queue structures from reentrancy. ->__queue_lock should 386 * _never_ be used directly, it is queue private. always use 387 * ->queue_lock. 388 */ 389 spinlock_t __queue_lock; 390 spinlock_t *queue_lock; 391 392 /* 393 * queue kobject 394 */ 395 struct kobject kobj; 396 397 /* 398 * queue settings 399 */ 400 unsigned long nr_requests; /* Max # of requests */ 401 unsigned int nr_congestion_on; 402 unsigned int nr_congestion_off; 403 unsigned int nr_batching; 404 405 void *dma_drain_buffer; 406 unsigned int dma_drain_size; 407 unsigned int dma_pad_mask; 408 unsigned int dma_alignment; 409 410 struct blk_queue_tag *queue_tags; 411 struct list_head tag_busy_list; 412 413 unsigned int nr_sorted; 414 unsigned int in_flight[2]; 415 416 unsigned int rq_timeout; 417 struct timer_list timeout; 418 struct list_head timeout_list; 419 420 struct queue_limits limits; 421 422 /* 423 * sg stuff 424 */ 425 unsigned int sg_timeout; 426 unsigned int sg_reserved_size; 427 int node; 428#ifdef CONFIG_BLK_DEV_IO_TRACE 429 struct blk_trace *blk_trace; 430#endif 431 /* 432 * reserved for flush operations 433 */ 434 unsigned int ordered, next_ordered, ordseq; 435 int orderr, ordcolor; 436 struct request pre_flush_rq, bar_rq, post_flush_rq; 437 struct request *orig_bar_rq; 438 439 struct mutex sysfs_lock; 440 441#if defined(CONFIG_BLK_DEV_BSG) 442 struct bsg_class_device bsg_dev; 443#endif 444}; 445 446#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 447#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 448#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 449#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 450#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 451#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 452#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 453#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 454#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 455#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 456#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 457#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 458#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 459#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 460#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 461#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 462#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 463#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 464#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 465 466#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 467 (1 << QUEUE_FLAG_CLUSTER) | \ 468 (1 << QUEUE_FLAG_STACKABLE) | \ 469 (1 << QUEUE_FLAG_SAME_COMP)) 470 471static inline int queue_is_locked(struct request_queue *q) 472{ 473#ifdef CONFIG_SMP 474 spinlock_t *lock = q->queue_lock; 475 return lock && spin_is_locked(lock); 476#else 477 return 1; 478#endif 479} 480 481static inline void queue_flag_set_unlocked(unsigned int flag, 482 struct request_queue *q) 483{ 484 __set_bit(flag, &q->queue_flags); 485} 486 487static inline int queue_flag_test_and_clear(unsigned int flag, 488 struct request_queue *q) 489{ 490 WARN_ON_ONCE(!queue_is_locked(q)); 491 492 if (test_bit(flag, &q->queue_flags)) { 493 __clear_bit(flag, &q->queue_flags); 494 return 1; 495 } 496 497 return 0; 498} 499 500static inline int queue_flag_test_and_set(unsigned int flag, 501 struct request_queue *q) 502{ 503 WARN_ON_ONCE(!queue_is_locked(q)); 504 505 if (!test_bit(flag, &q->queue_flags)) { 506 __set_bit(flag, &q->queue_flags); 507 return 0; 508 } 509 510 return 1; 511} 512 513static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 514{ 515 WARN_ON_ONCE(!queue_is_locked(q)); 516 __set_bit(flag, &q->queue_flags); 517} 518 519static inline void queue_flag_clear_unlocked(unsigned int flag, 520 struct request_queue *q) 521{ 522 __clear_bit(flag, &q->queue_flags); 523} 524 525static inline int queue_in_flight(struct request_queue *q) 526{ 527 return q->in_flight[0] + q->in_flight[1]; 528} 529 530static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 531{ 532 WARN_ON_ONCE(!queue_is_locked(q)); 533 __clear_bit(flag, &q->queue_flags); 534} 535 536enum { 537 /* 538 * Hardbarrier is supported with one of the following methods. 539 * 540 * NONE : hardbarrier unsupported 541 * DRAIN : ordering by draining is enough 542 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 543 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 544 * TAG : ordering by tag is enough 545 * TAG_FLUSH : ordering by tag w/ pre and post flushes 546 * TAG_FUA : ordering by tag w/ pre flush and FUA write 547 */ 548 QUEUE_ORDERED_BY_DRAIN = 0x01, 549 QUEUE_ORDERED_BY_TAG = 0x02, 550 QUEUE_ORDERED_DO_PREFLUSH = 0x10, 551 QUEUE_ORDERED_DO_BAR = 0x20, 552 QUEUE_ORDERED_DO_POSTFLUSH = 0x40, 553 QUEUE_ORDERED_DO_FUA = 0x80, 554 555 QUEUE_ORDERED_NONE = 0x00, 556 557 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | 558 QUEUE_ORDERED_DO_BAR, 559 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 560 QUEUE_ORDERED_DO_PREFLUSH | 561 QUEUE_ORDERED_DO_POSTFLUSH, 562 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 563 QUEUE_ORDERED_DO_PREFLUSH | 564 QUEUE_ORDERED_DO_FUA, 565 566 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | 567 QUEUE_ORDERED_DO_BAR, 568 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 569 QUEUE_ORDERED_DO_PREFLUSH | 570 QUEUE_ORDERED_DO_POSTFLUSH, 571 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 572 QUEUE_ORDERED_DO_PREFLUSH | 573 QUEUE_ORDERED_DO_FUA, 574 575 /* 576 * Ordered operation sequence 577 */ 578 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 579 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 580 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 581 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 582 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 583 QUEUE_ORDSEQ_DONE = 0x20, 584}; 585 586#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 587#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 588#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 589#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 590#define blk_queue_noxmerges(q) \ 591 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 592#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 593#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 594#define blk_queue_flushing(q) ((q)->ordseq) 595#define blk_queue_stackable(q) \ 596 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 597#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 598 599#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 600#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 601#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 602#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 603 604#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 605#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 606#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 607#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 608 blk_failfast_transport(rq) || \ 609 blk_failfast_driver(rq)) 610#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 611#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) 612#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) 613 614#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 615 616#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 617#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 618#define blk_pm_request(rq) \ 619 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 620 621#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 622#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 623#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 624#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 625#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 626#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 627/* rq->queuelist of dequeued request must be list_empty() */ 628#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 629 630#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 631 632#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 633 634/* 635 * We regard a request as sync, if either a read or a sync write 636 */ 637static inline bool rw_is_sync(unsigned int rw_flags) 638{ 639 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); 640} 641 642static inline bool rq_is_sync(struct request *rq) 643{ 644 return rw_is_sync(rq->cmd_flags); 645} 646 647#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 648#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) 649 650static inline int blk_queue_full(struct request_queue *q, int sync) 651{ 652 if (sync) 653 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 654 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 655} 656 657static inline void blk_set_queue_full(struct request_queue *q, int sync) 658{ 659 if (sync) 660 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 661 else 662 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); 663} 664 665static inline void blk_clear_queue_full(struct request_queue *q, int sync) 666{ 667 if (sync) 668 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 669 else 670 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); 671} 672 673 674/* 675 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 676 * it already be started by driver. 677 */ 678#define RQ_NOMERGE_FLAGS \ 679 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 680#define rq_mergeable(rq) \ 681 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 682 (blk_discard_rq(rq) || blk_fs_request((rq)))) 683 684/* 685 * q->prep_rq_fn return values 686 */ 687#define BLKPREP_OK 0 /* serve it */ 688#define BLKPREP_KILL 1 /* fatal error, kill */ 689#define BLKPREP_DEFER 2 /* leave on queue */ 690 691extern unsigned long blk_max_low_pfn, blk_max_pfn; 692 693/* 694 * standard bounce addresses: 695 * 696 * BLK_BOUNCE_HIGH : bounce all highmem pages 697 * BLK_BOUNCE_ANY : don't bounce anything 698 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 699 */ 700 701#if BITS_PER_LONG == 32 702#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 703#else 704#define BLK_BOUNCE_HIGH -1ULL 705#endif 706#define BLK_BOUNCE_ANY (-1ULL) 707#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 708 709/* 710 * default timeout for SG_IO if none specified 711 */ 712#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 713#define BLK_MIN_SG_TIMEOUT (7 * HZ) 714 715#ifdef CONFIG_BOUNCE 716extern int init_emergency_isa_pool(void); 717extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 718#else 719static inline int init_emergency_isa_pool(void) 720{ 721 return 0; 722} 723static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 724{ 725} 726#endif /* CONFIG_MMU */ 727 728struct rq_map_data { 729 struct page **pages; 730 int page_order; 731 int nr_entries; 732 unsigned long offset; 733 int null_mapped; 734 int from_user; 735}; 736 737struct req_iterator { 738 int i; 739 struct bio *bio; 740}; 741 742/* This should not be used directly - use rq_for_each_segment */ 743#define for_each_bio(_bio) \ 744 for (; _bio; _bio = _bio->bi_next) 745#define __rq_for_each_bio(_bio, rq) \ 746 if ((rq->bio)) \ 747 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 748 749#define rq_for_each_segment(bvl, _rq, _iter) \ 750 __rq_for_each_bio(_iter.bio, _rq) \ 751 bio_for_each_segment(bvl, _iter.bio, _iter.i) 752 753#define rq_iter_last(rq, _iter) \ 754 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 755 756#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 757# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 758#endif 759#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 760extern void rq_flush_dcache_pages(struct request *rq); 761#else 762static inline void rq_flush_dcache_pages(struct request *rq) 763{ 764} 765#endif 766 767extern int blk_register_queue(struct gendisk *disk); 768extern void blk_unregister_queue(struct gendisk *disk); 769extern void register_disk(struct gendisk *dev); 770extern void generic_make_request(struct bio *bio); 771extern void blk_rq_init(struct request_queue *q, struct request *rq); 772extern void blk_put_request(struct request *); 773extern void __blk_put_request(struct request_queue *, struct request *); 774extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 775extern struct request *blk_make_request(struct request_queue *, struct bio *, 776 gfp_t); 777extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 778extern void blk_requeue_request(struct request_queue *, struct request *); 779extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 780extern int blk_lld_busy(struct request_queue *q); 781extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 782 struct bio_set *bs, gfp_t gfp_mask, 783 int (*bio_ctr)(struct bio *, struct bio *, void *), 784 void *data); 785extern void blk_rq_unprep_clone(struct request *rq); 786extern int blk_insert_cloned_request(struct request_queue *q, 787 struct request *rq); 788extern void blk_plug_device(struct request_queue *); 789extern void blk_plug_device_unlocked(struct request_queue *); 790extern int blk_remove_plug(struct request_queue *); 791extern void blk_recount_segments(struct request_queue *, struct bio *); 792extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 793 unsigned int, void __user *); 794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 795 struct scsi_ioctl_command __user *); 796 797/* 798 * A queue has just exitted congestion. Note this in the global counter of 799 * congested queues, and wake up anyone who was waiting for requests to be 800 * put back. 801 */ 802static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 803{ 804 clear_bdi_congested(&q->backing_dev_info, sync); 805} 806 807/* 808 * A queue has just entered congestion. Flag that in the queue's VM-visible 809 * state flags and increment the global gounter of congested queues. 810 */ 811static inline void blk_set_queue_congested(struct request_queue *q, int sync) 812{ 813 set_bdi_congested(&q->backing_dev_info, sync); 814} 815 816extern void blk_start_queue(struct request_queue *q); 817extern void blk_stop_queue(struct request_queue *q); 818extern void blk_sync_queue(struct request_queue *q); 819extern void __blk_stop_queue(struct request_queue *q); 820extern void __blk_run_queue(struct request_queue *); 821extern void blk_run_queue(struct request_queue *); 822extern int blk_rq_map_user(struct request_queue *, struct request *, 823 struct rq_map_data *, void __user *, unsigned long, 824 gfp_t); 825extern int blk_rq_unmap_user(struct bio *); 826extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 827extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 828 struct rq_map_data *, struct sg_iovec *, int, 829 unsigned int, gfp_t); 830extern int blk_execute_rq(struct request_queue *, struct gendisk *, 831 struct request *, int); 832extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 833 struct request *, int, rq_end_io_fn *); 834extern void blk_unplug(struct request_queue *q); 835 836static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 837{ 838 return bdev->bd_disk->queue; 839} 840 841/* 842 * blk_rq_pos() : the current sector 843 * blk_rq_bytes() : bytes left in the entire request 844 * blk_rq_cur_bytes() : bytes left in the current segment 845 * blk_rq_err_bytes() : bytes left till the next error boundary 846 * blk_rq_sectors() : sectors left in the entire request 847 * blk_rq_cur_sectors() : sectors left in the current segment 848 */ 849static inline sector_t blk_rq_pos(const struct request *rq) 850{ 851 return rq->__sector; 852} 853 854static inline unsigned int blk_rq_bytes(const struct request *rq) 855{ 856 return rq->__data_len; 857} 858 859static inline int blk_rq_cur_bytes(const struct request *rq) 860{ 861 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 862} 863 864extern unsigned int blk_rq_err_bytes(const struct request *rq); 865 866static inline unsigned int blk_rq_sectors(const struct request *rq) 867{ 868 return blk_rq_bytes(rq) >> 9; 869} 870 871static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 872{ 873 return blk_rq_cur_bytes(rq) >> 9; 874} 875 876/* 877 * Request issue related functions. 878 */ 879extern struct request *blk_peek_request(struct request_queue *q); 880extern void blk_start_request(struct request *rq); 881extern struct request *blk_fetch_request(struct request_queue *q); 882 883/* 884 * Request completion related functions. 885 * 886 * blk_update_request() completes given number of bytes and updates 887 * the request without completing it. 888 * 889 * blk_end_request() and friends. __blk_end_request() must be called 890 * with the request queue spinlock acquired. 891 * 892 * Several drivers define their own end_request and call 893 * blk_end_request() for parts of the original function. 894 * This prevents code duplication in drivers. 895 */ 896extern bool blk_update_request(struct request *rq, int error, 897 unsigned int nr_bytes); 898extern bool blk_end_request(struct request *rq, int error, 899 unsigned int nr_bytes); 900extern void blk_end_request_all(struct request *rq, int error); 901extern bool blk_end_request_cur(struct request *rq, int error); 902extern bool blk_end_request_err(struct request *rq, int error); 903extern bool __blk_end_request(struct request *rq, int error, 904 unsigned int nr_bytes); 905extern void __blk_end_request_all(struct request *rq, int error); 906extern bool __blk_end_request_cur(struct request *rq, int error); 907extern bool __blk_end_request_err(struct request *rq, int error); 908 909extern void blk_complete_request(struct request *); 910extern void __blk_complete_request(struct request *); 911extern void blk_abort_request(struct request *); 912extern void blk_abort_queue(struct request_queue *); 913 914/* 915 * Access functions for manipulating queue properties 916 */ 917extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 918 spinlock_t *lock, int node_id); 919extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 920extern void blk_cleanup_queue(struct request_queue *); 921extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 922extern void blk_queue_bounce_limit(struct request_queue *, u64); 923extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 924 925/* Temporary compatibility wrapper */ 926static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max) 927{ 928 blk_queue_max_hw_sectors(q, max); 929} 930 931extern void blk_queue_max_segments(struct request_queue *, unsigned short); 932 933static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max) 934{ 935 blk_queue_max_segments(q, max); 936} 937 938static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max) 939{ 940 blk_queue_max_segments(q, max); 941} 942 943 944extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 945extern void blk_queue_max_discard_sectors(struct request_queue *q, 946 unsigned int max_discard_sectors); 947extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 948extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 949extern void blk_queue_alignment_offset(struct request_queue *q, 950 unsigned int alignment); 951extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 952extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 953extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 954extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 955extern void blk_set_default_limits(struct queue_limits *lim); 956extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 957 sector_t offset); 958extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 959 sector_t offset); 960extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 961 sector_t offset); 962extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 963extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 964extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 965extern int blk_queue_dma_drain(struct request_queue *q, 966 dma_drain_needed_fn *dma_drain_needed, 967 void *buf, unsigned int size); 968extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 969extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 970extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 971extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 972extern void blk_queue_dma_alignment(struct request_queue *, int); 973extern void blk_queue_update_dma_alignment(struct request_queue *, int); 974extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 975extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 976extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 977extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 978extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 979extern bool blk_do_ordered(struct request_queue *, struct request **); 980extern unsigned blk_ordered_cur_seq(struct request_queue *); 981extern unsigned blk_ordered_req_seq(struct request *); 982extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); 983 984extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 985extern void blk_dump_rq_flags(struct request *, char *); 986extern void generic_unplug_device(struct request_queue *); 987extern long nr_blockdev_pages(void); 988 989int blk_get_queue(struct request_queue *); 990struct request_queue *blk_alloc_queue(gfp_t); 991struct request_queue *blk_alloc_queue_node(gfp_t, int); 992extern void blk_put_queue(struct request_queue *); 993 994/* 995 * tag stuff 996 */ 997#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 998extern int blk_queue_start_tag(struct request_queue *, struct request *); 999extern struct request *blk_queue_find_tag(struct request_queue *, int); 1000extern void blk_queue_end_tag(struct request_queue *, struct request *); 1001extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 1002extern void blk_queue_free_tags(struct request_queue *); 1003extern int blk_queue_resize_tags(struct request_queue *, int); 1004extern void blk_queue_invalidate_tags(struct request_queue *); 1005extern struct blk_queue_tag *blk_init_tags(int); 1006extern void blk_free_tags(struct blk_queue_tag *); 1007 1008static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1009 int tag) 1010{ 1011 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1012 return NULL; 1013 return bqt->tag_index[tag]; 1014} 1015 1016extern int blkdev_issue_flush(struct block_device *, sector_t *); 1017#define DISCARD_FL_WAIT 0x01 /* wait for completion */ 1018#define DISCARD_FL_BARRIER 0x02 /* issue DISCARD_BARRIER request */ 1019extern int blkdev_issue_discard(struct block_device *, sector_t sector, 1020 sector_t nr_sects, gfp_t, int flags); 1021 1022static inline int sb_issue_discard(struct super_block *sb, 1023 sector_t block, sector_t nr_blocks) 1024{ 1025 block <<= (sb->s_blocksize_bits - 9); 1026 nr_blocks <<= (sb->s_blocksize_bits - 9); 1027 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, 1028 DISCARD_FL_BARRIER); 1029} 1030 1031extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1032 1033#define MAX_PHYS_SEGMENTS 128 1034#define MAX_HW_SEGMENTS 128 1035#define SAFE_MAX_SECTORS 255 1036#define MAX_SEGMENT_SIZE 65536 1037 1038enum blk_default_limits { 1039 BLK_MAX_SEGMENTS = 128, 1040 BLK_SAFE_MAX_SECTORS = 255, 1041 BLK_DEF_MAX_SECTORS = 1024, 1042 BLK_MAX_SEGMENT_SIZE = 65536, 1043 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1044}; 1045 1046#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1047 1048static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1049{ 1050 return q->limits.bounce_pfn; 1051} 1052 1053static inline unsigned long queue_segment_boundary(struct request_queue *q) 1054{ 1055 return q->limits.seg_boundary_mask; 1056} 1057 1058static inline unsigned int queue_max_sectors(struct request_queue *q) 1059{ 1060 return q->limits.max_sectors; 1061} 1062 1063static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1064{ 1065 return q->limits.max_hw_sectors; 1066} 1067 1068static inline unsigned short queue_max_segments(struct request_queue *q) 1069{ 1070 return q->limits.max_segments; 1071} 1072 1073static inline unsigned int queue_max_segment_size(struct request_queue *q) 1074{ 1075 return q->limits.max_segment_size; 1076} 1077 1078static inline unsigned short queue_logical_block_size(struct request_queue *q) 1079{ 1080 int retval = 512; 1081 1082 if (q && q->limits.logical_block_size) 1083 retval = q->limits.logical_block_size; 1084 1085 return retval; 1086} 1087 1088static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1089{ 1090 return queue_logical_block_size(bdev_get_queue(bdev)); 1091} 1092 1093static inline unsigned int queue_physical_block_size(struct request_queue *q) 1094{ 1095 return q->limits.physical_block_size; 1096} 1097 1098static inline int bdev_physical_block_size(struct block_device *bdev) 1099{ 1100 return queue_physical_block_size(bdev_get_queue(bdev)); 1101} 1102 1103static inline unsigned int queue_io_min(struct request_queue *q) 1104{ 1105 return q->limits.io_min; 1106} 1107 1108static inline int bdev_io_min(struct block_device *bdev) 1109{ 1110 return queue_io_min(bdev_get_queue(bdev)); 1111} 1112 1113static inline unsigned int queue_io_opt(struct request_queue *q) 1114{ 1115 return q->limits.io_opt; 1116} 1117 1118static inline int bdev_io_opt(struct block_device *bdev) 1119{ 1120 return queue_io_opt(bdev_get_queue(bdev)); 1121} 1122 1123static inline int queue_alignment_offset(struct request_queue *q) 1124{ 1125 if (q->limits.misaligned) 1126 return -1; 1127 1128 return q->limits.alignment_offset; 1129} 1130 1131static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1132{ 1133 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1134 unsigned int alignment = (sector << 9) & (granularity - 1); 1135 1136 return (granularity + lim->alignment_offset - alignment) 1137 & (granularity - 1); 1138} 1139 1140static inline int bdev_alignment_offset(struct block_device *bdev) 1141{ 1142 struct request_queue *q = bdev_get_queue(bdev); 1143 1144 if (q->limits.misaligned) 1145 return -1; 1146 1147 if (bdev != bdev->bd_contains) 1148 return bdev->bd_part->alignment_offset; 1149 1150 return q->limits.alignment_offset; 1151} 1152 1153static inline int queue_discard_alignment(struct request_queue *q) 1154{ 1155 if (q->limits.discard_misaligned) 1156 return -1; 1157 1158 return q->limits.discard_alignment; 1159} 1160 1161static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1162{ 1163 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1164 1165 return (lim->discard_granularity + lim->discard_alignment - alignment) 1166 & (lim->discard_granularity - 1); 1167} 1168 1169static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1170{ 1171 if (q->limits.discard_zeroes_data == 1) 1172 return 1; 1173 1174 return 0; 1175} 1176 1177static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1178{ 1179 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1180} 1181 1182static inline int queue_dma_alignment(struct request_queue *q) 1183{ 1184 return q ? q->dma_alignment : 511; 1185} 1186 1187static inline int blk_rq_aligned(struct request_queue *q, void *addr, 1188 unsigned int len) 1189{ 1190 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1191 return !((unsigned long)addr & alignment) && !(len & alignment); 1192} 1193 1194/* assumes size > 256 */ 1195static inline unsigned int blksize_bits(unsigned int size) 1196{ 1197 unsigned int bits = 8; 1198 do { 1199 bits++; 1200 size >>= 1; 1201 } while (size > 256); 1202 return bits; 1203} 1204 1205static inline unsigned int block_size(struct block_device *bdev) 1206{ 1207 return bdev->bd_block_size; 1208} 1209 1210typedef struct {struct page *v;} Sector; 1211 1212unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1213 1214static inline void put_dev_sector(Sector p) 1215{ 1216 page_cache_release(p.v); 1217} 1218 1219struct work_struct; 1220int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1221 1222#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1223 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1224#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1225 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1226 1227#if defined(CONFIG_BLK_DEV_INTEGRITY) 1228 1229#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1230#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1231 1232struct blk_integrity_exchg { 1233 void *prot_buf; 1234 void *data_buf; 1235 sector_t sector; 1236 unsigned int data_size; 1237 unsigned short sector_size; 1238 const char *disk_name; 1239}; 1240 1241typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1242typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1243typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1244typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1245 1246struct blk_integrity { 1247 integrity_gen_fn *generate_fn; 1248 integrity_vrfy_fn *verify_fn; 1249 integrity_set_tag_fn *set_tag_fn; 1250 integrity_get_tag_fn *get_tag_fn; 1251 1252 unsigned short flags; 1253 unsigned short tuple_size; 1254 unsigned short sector_size; 1255 unsigned short tag_size; 1256 1257 const char *name; 1258 1259 struct kobject kobj; 1260}; 1261 1262extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1263extern void blk_integrity_unregister(struct gendisk *); 1264extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1265extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1266extern int blk_rq_count_integrity_sg(struct request *); 1267 1268static inline 1269struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1270{ 1271 return bdev->bd_disk->integrity; 1272} 1273 1274static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1275{ 1276 return disk->integrity; 1277} 1278 1279static inline int blk_integrity_rq(struct request *rq) 1280{ 1281 if (rq->bio == NULL) 1282 return 0; 1283 1284 return bio_integrity(rq->bio); 1285} 1286 1287#else /* CONFIG_BLK_DEV_INTEGRITY */ 1288 1289#define blk_integrity_rq(rq) (0) 1290#define blk_rq_count_integrity_sg(a) (0) 1291#define blk_rq_map_integrity_sg(a, b) (0) 1292#define bdev_get_integrity(a) (0) 1293#define blk_get_integrity(a) (0) 1294#define blk_integrity_compare(a, b) (0) 1295#define blk_integrity_register(a, b) (0) 1296#define blk_integrity_unregister(a) do { } while (0); 1297 1298#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1299 1300struct block_device_operations { 1301 int (*open) (struct block_device *, fmode_t); 1302 int (*release) (struct gendisk *, fmode_t); 1303 int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1304 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1305 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1306 int (*direct_access) (struct block_device *, sector_t, 1307 void **, unsigned long *); 1308 int (*media_changed) (struct gendisk *); 1309 unsigned long long (*set_capacity) (struct gendisk *, 1310 unsigned long long); 1311 int (*revalidate_disk) (struct gendisk *); 1312 int (*getgeo)(struct block_device *, struct hd_geometry *); 1313 struct module *owner; 1314}; 1315 1316extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1317 unsigned long); 1318#else /* CONFIG_BLOCK */ 1319/* 1320 * stubs for when the block layer is configured out 1321 */ 1322#define buffer_heads_over_limit 0 1323 1324static inline long nr_blockdev_pages(void) 1325{ 1326 return 0; 1327} 1328 1329#endif /* CONFIG_BLOCK */ 1330 1331#endif