at v2.6.31 38 kB view raw
1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#ifdef CONFIG_BLOCK 5 6#include <linux/sched.h> 7#include <linux/major.h> 8#include <linux/genhd.h> 9#include <linux/list.h> 10#include <linux/timer.h> 11#include <linux/workqueue.h> 12#include <linux/pagemap.h> 13#include <linux/backing-dev.h> 14#include <linux/wait.h> 15#include <linux/mempool.h> 16#include <linux/bio.h> 17#include <linux/module.h> 18#include <linux/stringify.h> 19#include <linux/gfp.h> 20#include <linux/bsg.h> 21#include <linux/smp.h> 22 23#include <asm/scatterlist.h> 24 25struct scsi_ioctl_command; 26 27struct request_queue; 28struct elevator_queue; 29struct request_pm_state; 30struct blk_trace; 31struct request; 32struct sg_io_hdr; 33 34#define BLKDEV_MIN_RQ 4 35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 36 37struct request; 38typedef void (rq_end_io_fn)(struct request *, int); 39 40struct request_list { 41 /* 42 * count[], starved[], and wait[] are indexed by 43 * BLK_RW_SYNC/BLK_RW_ASYNC 44 */ 45 int count[2]; 46 int starved[2]; 47 int elvpriv; 48 mempool_t *rq_pool; 49 wait_queue_head_t wait[2]; 50}; 51 52/* 53 * request command types 54 */ 55enum rq_cmd_type_bits { 56 REQ_TYPE_FS = 1, /* fs request */ 57 REQ_TYPE_BLOCK_PC, /* scsi command */ 58 REQ_TYPE_SENSE, /* sense request */ 59 REQ_TYPE_PM_SUSPEND, /* suspend request */ 60 REQ_TYPE_PM_RESUME, /* resume request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 62 REQ_TYPE_SPECIAL, /* driver defined type */ 63 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 64 /* 65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 67 * private REQ_LB opcodes to differentiate what type of request this is 68 */ 69 REQ_TYPE_ATA_TASKFILE, 70 REQ_TYPE_ATA_PC, 71}; 72 73/* 74 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 75 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 76 * SCSI cdb. 77 * 78 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 79 * typically to differentiate REQ_TYPE_SPECIAL requests. 80 * 81 */ 82enum { 83 REQ_LB_OP_EJECT = 0x40, /* eject request */ 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 85 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ 86}; 87 88/* 89 * request type modified bits. first two bits match BIO_RW* bits, important 90 */ 91enum rq_flag_bits { 92 __REQ_RW, /* not set, read. set, write */ 93 __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 94 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 95 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 96 __REQ_DISCARD, /* request to discard sectors */ 97 __REQ_SORTED, /* elevator knows about this request */ 98 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 99 __REQ_HARDBARRIER, /* may not be passed by drive either */ 100 __REQ_FUA, /* forced unit access */ 101 __REQ_NOMERGE, /* don't touch this for merging */ 102 __REQ_STARTED, /* drive already may have started this one */ 103 __REQ_DONTPREP, /* don't call prep for this one */ 104 __REQ_QUEUED, /* uses queueing */ 105 __REQ_ELVPRIV, /* elevator private data attached */ 106 __REQ_FAILED, /* set if the request failed */ 107 __REQ_QUIET, /* don't worry about errors */ 108 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 109 __REQ_ORDERED_COLOR, /* is before or after barrier */ 110 __REQ_RW_SYNC, /* request is sync (sync write or read) */ 111 __REQ_ALLOCED, /* request came from our alloc pool */ 112 __REQ_RW_META, /* metadata io request */ 113 __REQ_COPY_USER, /* contains copies of user pages */ 114 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 115 __REQ_NOIDLE, /* Don't anticipate more IO after this one */ 116 __REQ_IO_STAT, /* account I/O stat */ 117 __REQ_NR_BITS, /* stops here */ 118}; 119 120#define REQ_RW (1 << __REQ_RW) 121#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 122#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 123#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 124#define REQ_DISCARD (1 << __REQ_DISCARD) 125#define REQ_SORTED (1 << __REQ_SORTED) 126#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 127#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 128#define REQ_FUA (1 << __REQ_FUA) 129#define REQ_NOMERGE (1 << __REQ_NOMERGE) 130#define REQ_STARTED (1 << __REQ_STARTED) 131#define REQ_DONTPREP (1 << __REQ_DONTPREP) 132#define REQ_QUEUED (1 << __REQ_QUEUED) 133#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 134#define REQ_FAILED (1 << __REQ_FAILED) 135#define REQ_QUIET (1 << __REQ_QUIET) 136#define REQ_PREEMPT (1 << __REQ_PREEMPT) 137#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 138#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 139#define REQ_ALLOCED (1 << __REQ_ALLOCED) 140#define REQ_RW_META (1 << __REQ_RW_META) 141#define REQ_COPY_USER (1 << __REQ_COPY_USER) 142#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 143#define REQ_NOIDLE (1 << __REQ_NOIDLE) 144#define REQ_IO_STAT (1 << __REQ_IO_STAT) 145 146#define BLK_MAX_CDB 16 147 148/* 149 * try to put the fields that are referenced together in the same cacheline. 150 * if you modify this structure, be sure to check block/blk-core.c:rq_init() 151 * as well! 152 */ 153struct request { 154 struct list_head queuelist; 155 struct call_single_data csd; 156 int cpu; 157 158 struct request_queue *q; 159 160 unsigned int cmd_flags; 161 enum rq_cmd_type_bits cmd_type; 162 unsigned long atomic_flags; 163 164 /* the following two fields are internal, NEVER access directly */ 165 sector_t __sector; /* sector cursor */ 166 unsigned int __data_len; /* total data len */ 167 168 struct bio *bio; 169 struct bio *biotail; 170 171 struct hlist_node hash; /* merge hash */ 172 /* 173 * The rb_node is only used inside the io scheduler, requests 174 * are pruned when moved to the dispatch queue. So let the 175 * completion_data share space with the rb_node. 176 */ 177 union { 178 struct rb_node rb_node; /* sort/lookup */ 179 void *completion_data; 180 }; 181 182 /* 183 * two pointers are available for the IO schedulers, if they need 184 * more they have to dynamically allocate it. 185 */ 186 void *elevator_private; 187 void *elevator_private2; 188 189 struct gendisk *rq_disk; 190 unsigned long start_time; 191 192 /* Number of scatter-gather DMA addr+len pairs after 193 * physical address coalescing is performed. 194 */ 195 unsigned short nr_phys_segments; 196 197 unsigned short ioprio; 198 199 void *special; /* opaque pointer available for LLD use */ 200 char *buffer; /* kaddr of the current segment if available */ 201 202 int tag; 203 int errors; 204 205 int ref_count; 206 207 /* 208 * when request is used as a packet command carrier 209 */ 210 unsigned short cmd_len; 211 unsigned char __cmd[BLK_MAX_CDB]; 212 unsigned char *cmd; 213 214 unsigned int extra_len; /* length of alignment and padding */ 215 unsigned int sense_len; 216 unsigned int resid_len; /* residual count */ 217 void *sense; 218 219 unsigned long deadline; 220 struct list_head timeout_list; 221 unsigned int timeout; 222 int retries; 223 224 /* 225 * completion callback. 226 */ 227 rq_end_io_fn *end_io; 228 void *end_io_data; 229 230 /* for bidi */ 231 struct request *next_rq; 232}; 233 234static inline unsigned short req_get_ioprio(struct request *req) 235{ 236 return req->ioprio; 237} 238 239/* 240 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 241 * requests. Some step values could eventually be made generic. 242 */ 243struct request_pm_state 244{ 245 /* PM state machine step value, currently driver specific */ 246 int pm_step; 247 /* requested PM state value (S1, S2, S3, S4, ...) */ 248 u32 pm_state; 249 void* data; /* for driver use */ 250}; 251 252#include <linux/elevator.h> 253 254typedef void (request_fn_proc) (struct request_queue *q); 255typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 256typedef int (prep_rq_fn) (struct request_queue *, struct request *); 257typedef void (unplug_fn) (struct request_queue *); 258typedef int (prepare_discard_fn) (struct request_queue *, struct request *); 259 260struct bio_vec; 261struct bvec_merge_data { 262 struct block_device *bi_bdev; 263 sector_t bi_sector; 264 unsigned bi_size; 265 unsigned long bi_rw; 266}; 267typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 268 struct bio_vec *); 269typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 270typedef void (softirq_done_fn)(struct request *); 271typedef int (dma_drain_needed_fn)(struct request *); 272typedef int (lld_busy_fn) (struct request_queue *q); 273 274enum blk_eh_timer_return { 275 BLK_EH_NOT_HANDLED, 276 BLK_EH_HANDLED, 277 BLK_EH_RESET_TIMER, 278}; 279 280typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 281 282enum blk_queue_state { 283 Queue_down, 284 Queue_up, 285}; 286 287struct blk_queue_tag { 288 struct request **tag_index; /* map of busy tags */ 289 unsigned long *tag_map; /* bit map of free/busy tags */ 290 int busy; /* current depth */ 291 int max_depth; /* what we will send to device */ 292 int real_max_depth; /* what the array can hold */ 293 atomic_t refcnt; /* map can be shared */ 294}; 295 296#define BLK_SCSI_MAX_CMDS (256) 297#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 298 299struct queue_limits { 300 unsigned long bounce_pfn; 301 unsigned long seg_boundary_mask; 302 303 unsigned int max_hw_sectors; 304 unsigned int max_sectors; 305 unsigned int max_segment_size; 306 unsigned int physical_block_size; 307 unsigned int alignment_offset; 308 unsigned int io_min; 309 unsigned int io_opt; 310 311 unsigned short logical_block_size; 312 unsigned short max_hw_segments; 313 unsigned short max_phys_segments; 314 315 unsigned char misaligned; 316 unsigned char no_cluster; 317}; 318 319struct request_queue 320{ 321 /* 322 * Together with queue_head for cacheline sharing 323 */ 324 struct list_head queue_head; 325 struct request *last_merge; 326 struct elevator_queue *elevator; 327 328 /* 329 * the queue request freelist, one for reads and one for writes 330 */ 331 struct request_list rq; 332 333 request_fn_proc *request_fn; 334 make_request_fn *make_request_fn; 335 prep_rq_fn *prep_rq_fn; 336 unplug_fn *unplug_fn; 337 prepare_discard_fn *prepare_discard_fn; 338 merge_bvec_fn *merge_bvec_fn; 339 prepare_flush_fn *prepare_flush_fn; 340 softirq_done_fn *softirq_done_fn; 341 rq_timed_out_fn *rq_timed_out_fn; 342 dma_drain_needed_fn *dma_drain_needed; 343 lld_busy_fn *lld_busy_fn; 344 345 /* 346 * Dispatch queue sorting 347 */ 348 sector_t end_sector; 349 struct request *boundary_rq; 350 351 /* 352 * Auto-unplugging state 353 */ 354 struct timer_list unplug_timer; 355 int unplug_thresh; /* After this many requests */ 356 unsigned long unplug_delay; /* After this many jiffies */ 357 struct work_struct unplug_work; 358 359 struct backing_dev_info backing_dev_info; 360 361 /* 362 * The queue owner gets to use this for whatever they like. 363 * ll_rw_blk doesn't touch it. 364 */ 365 void *queuedata; 366 367 /* 368 * queue needs bounce pages for pages above this limit 369 */ 370 gfp_t bounce_gfp; 371 372 /* 373 * various queue flags, see QUEUE_* below 374 */ 375 unsigned long queue_flags; 376 377 /* 378 * protects queue structures from reentrancy. ->__queue_lock should 379 * _never_ be used directly, it is queue private. always use 380 * ->queue_lock. 381 */ 382 spinlock_t __queue_lock; 383 spinlock_t *queue_lock; 384 385 /* 386 * queue kobject 387 */ 388 struct kobject kobj; 389 390 /* 391 * queue settings 392 */ 393 unsigned long nr_requests; /* Max # of requests */ 394 unsigned int nr_congestion_on; 395 unsigned int nr_congestion_off; 396 unsigned int nr_batching; 397 398 void *dma_drain_buffer; 399 unsigned int dma_drain_size; 400 unsigned int dma_pad_mask; 401 unsigned int dma_alignment; 402 403 struct blk_queue_tag *queue_tags; 404 struct list_head tag_busy_list; 405 406 unsigned int nr_sorted; 407 unsigned int in_flight[2]; 408 409 unsigned int rq_timeout; 410 struct timer_list timeout; 411 struct list_head timeout_list; 412 413 struct queue_limits limits; 414 415 /* 416 * sg stuff 417 */ 418 unsigned int sg_timeout; 419 unsigned int sg_reserved_size; 420 int node; 421#ifdef CONFIG_BLK_DEV_IO_TRACE 422 struct blk_trace *blk_trace; 423#endif 424 /* 425 * reserved for flush operations 426 */ 427 unsigned int ordered, next_ordered, ordseq; 428 int orderr, ordcolor; 429 struct request pre_flush_rq, bar_rq, post_flush_rq; 430 struct request *orig_bar_rq; 431 432 struct mutex sysfs_lock; 433 434#if defined(CONFIG_BLK_DEV_BSG) 435 struct bsg_class_device bsg_dev; 436#endif 437}; 438 439#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 440#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 441#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 442#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 443#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 444#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 445#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 446#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 447#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 448#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 449#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 450#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 451#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 452#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 453#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 454#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 455#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 456 457#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 458 (1 << QUEUE_FLAG_CLUSTER) | \ 459 (1 << QUEUE_FLAG_STACKABLE)) 460 461static inline int queue_is_locked(struct request_queue *q) 462{ 463#ifdef CONFIG_SMP 464 spinlock_t *lock = q->queue_lock; 465 return lock && spin_is_locked(lock); 466#else 467 return 1; 468#endif 469} 470 471static inline void queue_flag_set_unlocked(unsigned int flag, 472 struct request_queue *q) 473{ 474 __set_bit(flag, &q->queue_flags); 475} 476 477static inline int queue_flag_test_and_clear(unsigned int flag, 478 struct request_queue *q) 479{ 480 WARN_ON_ONCE(!queue_is_locked(q)); 481 482 if (test_bit(flag, &q->queue_flags)) { 483 __clear_bit(flag, &q->queue_flags); 484 return 1; 485 } 486 487 return 0; 488} 489 490static inline int queue_flag_test_and_set(unsigned int flag, 491 struct request_queue *q) 492{ 493 WARN_ON_ONCE(!queue_is_locked(q)); 494 495 if (!test_bit(flag, &q->queue_flags)) { 496 __set_bit(flag, &q->queue_flags); 497 return 0; 498 } 499 500 return 1; 501} 502 503static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 504{ 505 WARN_ON_ONCE(!queue_is_locked(q)); 506 __set_bit(flag, &q->queue_flags); 507} 508 509static inline void queue_flag_clear_unlocked(unsigned int flag, 510 struct request_queue *q) 511{ 512 __clear_bit(flag, &q->queue_flags); 513} 514 515static inline int queue_in_flight(struct request_queue *q) 516{ 517 return q->in_flight[0] + q->in_flight[1]; 518} 519 520static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 521{ 522 WARN_ON_ONCE(!queue_is_locked(q)); 523 __clear_bit(flag, &q->queue_flags); 524} 525 526enum { 527 /* 528 * Hardbarrier is supported with one of the following methods. 529 * 530 * NONE : hardbarrier unsupported 531 * DRAIN : ordering by draining is enough 532 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 533 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 534 * TAG : ordering by tag is enough 535 * TAG_FLUSH : ordering by tag w/ pre and post flushes 536 * TAG_FUA : ordering by tag w/ pre flush and FUA write 537 */ 538 QUEUE_ORDERED_BY_DRAIN = 0x01, 539 QUEUE_ORDERED_BY_TAG = 0x02, 540 QUEUE_ORDERED_DO_PREFLUSH = 0x10, 541 QUEUE_ORDERED_DO_BAR = 0x20, 542 QUEUE_ORDERED_DO_POSTFLUSH = 0x40, 543 QUEUE_ORDERED_DO_FUA = 0x80, 544 545 QUEUE_ORDERED_NONE = 0x00, 546 547 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | 548 QUEUE_ORDERED_DO_BAR, 549 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 550 QUEUE_ORDERED_DO_PREFLUSH | 551 QUEUE_ORDERED_DO_POSTFLUSH, 552 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 553 QUEUE_ORDERED_DO_PREFLUSH | 554 QUEUE_ORDERED_DO_FUA, 555 556 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | 557 QUEUE_ORDERED_DO_BAR, 558 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 559 QUEUE_ORDERED_DO_PREFLUSH | 560 QUEUE_ORDERED_DO_POSTFLUSH, 561 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 562 QUEUE_ORDERED_DO_PREFLUSH | 563 QUEUE_ORDERED_DO_FUA, 564 565 /* 566 * Ordered operation sequence 567 */ 568 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 569 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 570 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 571 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 572 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 573 QUEUE_ORDSEQ_DONE = 0x20, 574}; 575 576#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 577#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 578#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 579#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 580#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 581#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 582#define blk_queue_flushing(q) ((q)->ordseq) 583#define blk_queue_stackable(q) \ 584 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 585 586#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 587#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 588#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 589#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 590 591#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 592#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 593#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 594#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 595 blk_failfast_transport(rq) || \ 596 blk_failfast_driver(rq)) 597#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 598#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) 599#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) 600 601#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 602 603#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 604#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 605#define blk_pm_request(rq) \ 606 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 607 608#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 609#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 610#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 611#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 612#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 613#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 614/* rq->queuelist of dequeued request must be list_empty() */ 615#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 616 617#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 618 619#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 620 621/* 622 * We regard a request as sync, if either a read or a sync write 623 */ 624static inline bool rw_is_sync(unsigned int rw_flags) 625{ 626 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); 627} 628 629static inline bool rq_is_sync(struct request *rq) 630{ 631 return rw_is_sync(rq->cmd_flags); 632} 633 634#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 635#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) 636 637static inline int blk_queue_full(struct request_queue *q, int sync) 638{ 639 if (sync) 640 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 641 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 642} 643 644static inline void blk_set_queue_full(struct request_queue *q, int sync) 645{ 646 if (sync) 647 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 648 else 649 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); 650} 651 652static inline void blk_clear_queue_full(struct request_queue *q, int sync) 653{ 654 if (sync) 655 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 656 else 657 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); 658} 659 660 661/* 662 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 663 * it already be started by driver. 664 */ 665#define RQ_NOMERGE_FLAGS \ 666 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 667#define rq_mergeable(rq) \ 668 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 669 (blk_discard_rq(rq) || blk_fs_request((rq)))) 670 671/* 672 * q->prep_rq_fn return values 673 */ 674#define BLKPREP_OK 0 /* serve it */ 675#define BLKPREP_KILL 1 /* fatal error, kill */ 676#define BLKPREP_DEFER 2 /* leave on queue */ 677 678extern unsigned long blk_max_low_pfn, blk_max_pfn; 679 680/* 681 * standard bounce addresses: 682 * 683 * BLK_BOUNCE_HIGH : bounce all highmem pages 684 * BLK_BOUNCE_ANY : don't bounce anything 685 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 686 */ 687 688#if BITS_PER_LONG == 32 689#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 690#else 691#define BLK_BOUNCE_HIGH -1ULL 692#endif 693#define BLK_BOUNCE_ANY (-1ULL) 694#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 695 696/* 697 * default timeout for SG_IO if none specified 698 */ 699#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 700#define BLK_MIN_SG_TIMEOUT (7 * HZ) 701 702#ifdef CONFIG_BOUNCE 703extern int init_emergency_isa_pool(void); 704extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 705#else 706static inline int init_emergency_isa_pool(void) 707{ 708 return 0; 709} 710static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 711{ 712} 713#endif /* CONFIG_MMU */ 714 715struct rq_map_data { 716 struct page **pages; 717 int page_order; 718 int nr_entries; 719 unsigned long offset; 720 int null_mapped; 721 int from_user; 722}; 723 724struct req_iterator { 725 int i; 726 struct bio *bio; 727}; 728 729/* This should not be used directly - use rq_for_each_segment */ 730#define for_each_bio(_bio) \ 731 for (; _bio; _bio = _bio->bi_next) 732#define __rq_for_each_bio(_bio, rq) \ 733 if ((rq->bio)) \ 734 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 735 736#define rq_for_each_segment(bvl, _rq, _iter) \ 737 __rq_for_each_bio(_iter.bio, _rq) \ 738 bio_for_each_segment(bvl, _iter.bio, _iter.i) 739 740#define rq_iter_last(rq, _iter) \ 741 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 742 743extern int blk_register_queue(struct gendisk *disk); 744extern void blk_unregister_queue(struct gendisk *disk); 745extern void register_disk(struct gendisk *dev); 746extern void generic_make_request(struct bio *bio); 747extern void blk_rq_init(struct request_queue *q, struct request *rq); 748extern void blk_put_request(struct request *); 749extern void __blk_put_request(struct request_queue *, struct request *); 750extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 751extern struct request *blk_make_request(struct request_queue *, struct bio *, 752 gfp_t); 753extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 754extern void blk_requeue_request(struct request_queue *, struct request *); 755extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 756extern int blk_lld_busy(struct request_queue *q); 757extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 758 struct bio_set *bs, gfp_t gfp_mask, 759 int (*bio_ctr)(struct bio *, struct bio *, void *), 760 void *data); 761extern void blk_rq_unprep_clone(struct request *rq); 762extern int blk_insert_cloned_request(struct request_queue *q, 763 struct request *rq); 764extern void blk_plug_device(struct request_queue *); 765extern void blk_plug_device_unlocked(struct request_queue *); 766extern int blk_remove_plug(struct request_queue *); 767extern void blk_recount_segments(struct request_queue *, struct bio *); 768extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 769 unsigned int, void __user *); 770extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 771 struct scsi_ioctl_command __user *); 772 773/* 774 * A queue has just exitted congestion. Note this in the global counter of 775 * congested queues, and wake up anyone who was waiting for requests to be 776 * put back. 777 */ 778static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 779{ 780 clear_bdi_congested(&q->backing_dev_info, sync); 781} 782 783/* 784 * A queue has just entered congestion. Flag that in the queue's VM-visible 785 * state flags and increment the global gounter of congested queues. 786 */ 787static inline void blk_set_queue_congested(struct request_queue *q, int sync) 788{ 789 set_bdi_congested(&q->backing_dev_info, sync); 790} 791 792extern void blk_start_queue(struct request_queue *q); 793extern void blk_stop_queue(struct request_queue *q); 794extern void blk_sync_queue(struct request_queue *q); 795extern void __blk_stop_queue(struct request_queue *q); 796extern void __blk_run_queue(struct request_queue *); 797extern void blk_run_queue(struct request_queue *); 798extern int blk_rq_map_user(struct request_queue *, struct request *, 799 struct rq_map_data *, void __user *, unsigned long, 800 gfp_t); 801extern int blk_rq_unmap_user(struct bio *); 802extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 803extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 804 struct rq_map_data *, struct sg_iovec *, int, 805 unsigned int, gfp_t); 806extern int blk_execute_rq(struct request_queue *, struct gendisk *, 807 struct request *, int); 808extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 809 struct request *, int, rq_end_io_fn *); 810extern void blk_unplug(struct request_queue *q); 811 812static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 813{ 814 return bdev->bd_disk->queue; 815} 816 817static inline void blk_run_backing_dev(struct backing_dev_info *bdi, 818 struct page *page) 819{ 820 if (bdi && bdi->unplug_io_fn) 821 bdi->unplug_io_fn(bdi, page); 822} 823 824static inline void blk_run_address_space(struct address_space *mapping) 825{ 826 if (mapping) 827 blk_run_backing_dev(mapping->backing_dev_info, NULL); 828} 829 830/* 831 * blk_rq_pos() : the current sector 832 * blk_rq_bytes() : bytes left in the entire request 833 * blk_rq_cur_bytes() : bytes left in the current segment 834 * blk_rq_sectors() : sectors left in the entire request 835 * blk_rq_cur_sectors() : sectors left in the current segment 836 */ 837static inline sector_t blk_rq_pos(const struct request *rq) 838{ 839 return rq->__sector; 840} 841 842static inline unsigned int blk_rq_bytes(const struct request *rq) 843{ 844 return rq->__data_len; 845} 846 847static inline int blk_rq_cur_bytes(const struct request *rq) 848{ 849 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 850} 851 852static inline unsigned int blk_rq_sectors(const struct request *rq) 853{ 854 return blk_rq_bytes(rq) >> 9; 855} 856 857static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 858{ 859 return blk_rq_cur_bytes(rq) >> 9; 860} 861 862/* 863 * Request issue related functions. 864 */ 865extern struct request *blk_peek_request(struct request_queue *q); 866extern void blk_start_request(struct request *rq); 867extern struct request *blk_fetch_request(struct request_queue *q); 868 869/* 870 * Request completion related functions. 871 * 872 * blk_update_request() completes given number of bytes and updates 873 * the request without completing it. 874 * 875 * blk_end_request() and friends. __blk_end_request() must be called 876 * with the request queue spinlock acquired. 877 * 878 * Several drivers define their own end_request and call 879 * blk_end_request() for parts of the original function. 880 * This prevents code duplication in drivers. 881 */ 882extern bool blk_update_request(struct request *rq, int error, 883 unsigned int nr_bytes); 884extern bool blk_end_request(struct request *rq, int error, 885 unsigned int nr_bytes); 886extern void blk_end_request_all(struct request *rq, int error); 887extern bool blk_end_request_cur(struct request *rq, int error); 888extern bool __blk_end_request(struct request *rq, int error, 889 unsigned int nr_bytes); 890extern void __blk_end_request_all(struct request *rq, int error); 891extern bool __blk_end_request_cur(struct request *rq, int error); 892 893extern void blk_complete_request(struct request *); 894extern void __blk_complete_request(struct request *); 895extern void blk_abort_request(struct request *); 896extern void blk_abort_queue(struct request_queue *); 897 898/* 899 * Access functions for manipulating queue properties 900 */ 901extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 902 spinlock_t *lock, int node_id); 903extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 904extern void blk_cleanup_queue(struct request_queue *); 905extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 906extern void blk_queue_bounce_limit(struct request_queue *, u64); 907extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 908extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 909extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 910extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 911extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 912extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 913extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 914extern void blk_queue_alignment_offset(struct request_queue *q, 915 unsigned int alignment); 916extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 917extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 918extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 919extern void blk_set_default_limits(struct queue_limits *lim); 920extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 921 sector_t offset); 922extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 923 sector_t offset); 924extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 925extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 926extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 927extern int blk_queue_dma_drain(struct request_queue *q, 928 dma_drain_needed_fn *dma_drain_needed, 929 void *buf, unsigned int size); 930extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 931extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 932extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 933extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 934extern void blk_queue_dma_alignment(struct request_queue *, int); 935extern void blk_queue_update_dma_alignment(struct request_queue *, int); 936extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 937extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); 938extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 939extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 940extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 941extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 942extern bool blk_do_ordered(struct request_queue *, struct request **); 943extern unsigned blk_ordered_cur_seq(struct request_queue *); 944extern unsigned blk_ordered_req_seq(struct request *); 945extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); 946 947extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 948extern void blk_dump_rq_flags(struct request *, char *); 949extern void generic_unplug_device(struct request_queue *); 950extern long nr_blockdev_pages(void); 951 952int blk_get_queue(struct request_queue *); 953struct request_queue *blk_alloc_queue(gfp_t); 954struct request_queue *blk_alloc_queue_node(gfp_t, int); 955extern void blk_put_queue(struct request_queue *); 956 957/* 958 * tag stuff 959 */ 960#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 961extern int blk_queue_start_tag(struct request_queue *, struct request *); 962extern struct request *blk_queue_find_tag(struct request_queue *, int); 963extern void blk_queue_end_tag(struct request_queue *, struct request *); 964extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 965extern void blk_queue_free_tags(struct request_queue *); 966extern int blk_queue_resize_tags(struct request_queue *, int); 967extern void blk_queue_invalidate_tags(struct request_queue *); 968extern struct blk_queue_tag *blk_init_tags(int); 969extern void blk_free_tags(struct blk_queue_tag *); 970 971static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 972 int tag) 973{ 974 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 975 return NULL; 976 return bqt->tag_index[tag]; 977} 978 979extern int blkdev_issue_flush(struct block_device *, sector_t *); 980extern int blkdev_issue_discard(struct block_device *, 981 sector_t sector, sector_t nr_sects, gfp_t); 982 983static inline int sb_issue_discard(struct super_block *sb, 984 sector_t block, sector_t nr_blocks) 985{ 986 block <<= (sb->s_blocksize_bits - 9); 987 nr_blocks <<= (sb->s_blocksize_bits - 9); 988 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); 989} 990 991extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 992 993#define MAX_PHYS_SEGMENTS 128 994#define MAX_HW_SEGMENTS 128 995#define SAFE_MAX_SECTORS 255 996#define BLK_DEF_MAX_SECTORS 1024 997 998#define MAX_SEGMENT_SIZE 65536 999 1000#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL 1001 1002#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1003 1004static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1005{ 1006 return q->limits.bounce_pfn; 1007} 1008 1009static inline unsigned long queue_segment_boundary(struct request_queue *q) 1010{ 1011 return q->limits.seg_boundary_mask; 1012} 1013 1014static inline unsigned int queue_max_sectors(struct request_queue *q) 1015{ 1016 return q->limits.max_sectors; 1017} 1018 1019static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1020{ 1021 return q->limits.max_hw_sectors; 1022} 1023 1024static inline unsigned short queue_max_hw_segments(struct request_queue *q) 1025{ 1026 return q->limits.max_hw_segments; 1027} 1028 1029static inline unsigned short queue_max_phys_segments(struct request_queue *q) 1030{ 1031 return q->limits.max_phys_segments; 1032} 1033 1034static inline unsigned int queue_max_segment_size(struct request_queue *q) 1035{ 1036 return q->limits.max_segment_size; 1037} 1038 1039static inline unsigned short queue_logical_block_size(struct request_queue *q) 1040{ 1041 int retval = 512; 1042 1043 if (q && q->limits.logical_block_size) 1044 retval = q->limits.logical_block_size; 1045 1046 return retval; 1047} 1048 1049static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1050{ 1051 return queue_logical_block_size(bdev_get_queue(bdev)); 1052} 1053 1054static inline unsigned int queue_physical_block_size(struct request_queue *q) 1055{ 1056 return q->limits.physical_block_size; 1057} 1058 1059static inline unsigned int queue_io_min(struct request_queue *q) 1060{ 1061 return q->limits.io_min; 1062} 1063 1064static inline unsigned int queue_io_opt(struct request_queue *q) 1065{ 1066 return q->limits.io_opt; 1067} 1068 1069static inline int queue_alignment_offset(struct request_queue *q) 1070{ 1071 if (q && q->limits.misaligned) 1072 return -1; 1073 1074 if (q && q->limits.alignment_offset) 1075 return q->limits.alignment_offset; 1076 1077 return 0; 1078} 1079 1080static inline int queue_sector_alignment_offset(struct request_queue *q, 1081 sector_t sector) 1082{ 1083 return ((sector << 9) - q->limits.alignment_offset) 1084 & (q->limits.io_min - 1); 1085} 1086 1087static inline int queue_dma_alignment(struct request_queue *q) 1088{ 1089 return q ? q->dma_alignment : 511; 1090} 1091 1092static inline int blk_rq_aligned(struct request_queue *q, void *addr, 1093 unsigned int len) 1094{ 1095 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1096 return !((unsigned long)addr & alignment) && !(len & alignment); 1097} 1098 1099/* assumes size > 256 */ 1100static inline unsigned int blksize_bits(unsigned int size) 1101{ 1102 unsigned int bits = 8; 1103 do { 1104 bits++; 1105 size >>= 1; 1106 } while (size > 256); 1107 return bits; 1108} 1109 1110static inline unsigned int block_size(struct block_device *bdev) 1111{ 1112 return bdev->bd_block_size; 1113} 1114 1115typedef struct {struct page *v;} Sector; 1116 1117unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1118 1119static inline void put_dev_sector(Sector p) 1120{ 1121 page_cache_release(p.v); 1122} 1123 1124struct work_struct; 1125int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1126 1127#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1128 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1129#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1130 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1131 1132#if defined(CONFIG_BLK_DEV_INTEGRITY) 1133 1134#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1135#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1136 1137struct blk_integrity_exchg { 1138 void *prot_buf; 1139 void *data_buf; 1140 sector_t sector; 1141 unsigned int data_size; 1142 unsigned short sector_size; 1143 const char *disk_name; 1144}; 1145 1146typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1147typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1148typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1149typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1150 1151struct blk_integrity { 1152 integrity_gen_fn *generate_fn; 1153 integrity_vrfy_fn *verify_fn; 1154 integrity_set_tag_fn *set_tag_fn; 1155 integrity_get_tag_fn *get_tag_fn; 1156 1157 unsigned short flags; 1158 unsigned short tuple_size; 1159 unsigned short sector_size; 1160 unsigned short tag_size; 1161 1162 const char *name; 1163 1164 struct kobject kobj; 1165}; 1166 1167extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1168extern void blk_integrity_unregister(struct gendisk *); 1169extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1170extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1171extern int blk_rq_count_integrity_sg(struct request *); 1172 1173static inline 1174struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1175{ 1176 return bdev->bd_disk->integrity; 1177} 1178 1179static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1180{ 1181 return disk->integrity; 1182} 1183 1184static inline int blk_integrity_rq(struct request *rq) 1185{ 1186 if (rq->bio == NULL) 1187 return 0; 1188 1189 return bio_integrity(rq->bio); 1190} 1191 1192#else /* CONFIG_BLK_DEV_INTEGRITY */ 1193 1194#define blk_integrity_rq(rq) (0) 1195#define blk_rq_count_integrity_sg(a) (0) 1196#define blk_rq_map_integrity_sg(a, b) (0) 1197#define bdev_get_integrity(a) (0) 1198#define blk_get_integrity(a) (0) 1199#define blk_integrity_compare(a, b) (0) 1200#define blk_integrity_register(a, b) (0) 1201#define blk_integrity_unregister(a) do { } while (0); 1202 1203#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1204 1205struct block_device_operations { 1206 int (*open) (struct block_device *, fmode_t); 1207 int (*release) (struct gendisk *, fmode_t); 1208 int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1209 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1210 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1211 int (*direct_access) (struct block_device *, sector_t, 1212 void **, unsigned long *); 1213 int (*media_changed) (struct gendisk *); 1214 unsigned long long (*set_capacity) (struct gendisk *, 1215 unsigned long long); 1216 int (*revalidate_disk) (struct gendisk *); 1217 int (*getgeo)(struct block_device *, struct hd_geometry *); 1218 struct module *owner; 1219}; 1220 1221extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1222 unsigned long); 1223#else /* CONFIG_BLOCK */ 1224/* 1225 * stubs for when the block layer is configured out 1226 */ 1227#define buffer_heads_over_limit 0 1228 1229static inline long nr_blockdev_pages(void) 1230{ 1231 return 0; 1232} 1233 1234#endif /* CONFIG_BLOCK */ 1235 1236#endif