at v3.14-rc2 47 kB view raw
1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#include <linux/sched.h> 5 6#ifdef CONFIG_BLOCK 7 8#include <linux/major.h> 9#include <linux/genhd.h> 10#include <linux/list.h> 11#include <linux/llist.h> 12#include <linux/timer.h> 13#include <linux/workqueue.h> 14#include <linux/pagemap.h> 15#include <linux/backing-dev.h> 16#include <linux/wait.h> 17#include <linux/mempool.h> 18#include <linux/bio.h> 19#include <linux/stringify.h> 20#include <linux/gfp.h> 21#include <linux/bsg.h> 22#include <linux/smp.h> 23#include <linux/rcupdate.h> 24 25#include <asm/scatterlist.h> 26 27struct module; 28struct scsi_ioctl_command; 29 30struct request_queue; 31struct elevator_queue; 32struct request_pm_state; 33struct blk_trace; 34struct request; 35struct sg_io_hdr; 36struct bsg_job; 37struct blkcg_gq; 38 39#define BLKDEV_MIN_RQ 4 40#define BLKDEV_MAX_RQ 128 /* Default maximum */ 41 42/* 43 * Maximum number of blkcg policies allowed to be registered concurrently. 44 * Defined here to simplify include dependency. 45 */ 46#define BLKCG_MAX_POLS 2 47 48struct request; 49typedef void (rq_end_io_fn)(struct request *, int); 50 51#define BLK_RL_SYNCFULL (1U << 0) 52#define BLK_RL_ASYNCFULL (1U << 1) 53 54struct request_list { 55 struct request_queue *q; /* the queue this rl belongs to */ 56#ifdef CONFIG_BLK_CGROUP 57 struct blkcg_gq *blkg; /* blkg this request pool belongs to */ 58#endif 59 /* 60 * count[], starved[], and wait[] are indexed by 61 * BLK_RW_SYNC/BLK_RW_ASYNC 62 */ 63 int count[2]; 64 int starved[2]; 65 mempool_t *rq_pool; 66 wait_queue_head_t wait[2]; 67 unsigned int flags; 68}; 69 70/* 71 * request command types 72 */ 73enum rq_cmd_type_bits { 74 REQ_TYPE_FS = 1, /* fs request */ 75 REQ_TYPE_BLOCK_PC, /* scsi command */ 76 REQ_TYPE_SENSE, /* sense request */ 77 REQ_TYPE_PM_SUSPEND, /* suspend request */ 78 REQ_TYPE_PM_RESUME, /* resume request */ 79 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 80 REQ_TYPE_SPECIAL, /* driver defined type */ 81 /* 82 * for ATA/ATAPI devices. this really doesn't belong here, ide should 83 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 84 * private REQ_LB opcodes to differentiate what type of request this is 85 */ 86 REQ_TYPE_ATA_TASKFILE, 87 REQ_TYPE_ATA_PC, 88}; 89 90#define BLK_MAX_CDB 16 91 92/* 93 * try to put the fields that are referenced together in the same cacheline. 94 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 95 * as well! 96 */ 97struct request { 98 struct list_head queuelist; 99 union { 100 struct call_single_data csd; 101 struct work_struct mq_flush_data; 102 }; 103 104 struct request_queue *q; 105 struct blk_mq_ctx *mq_ctx; 106 107 u64 cmd_flags; 108 enum rq_cmd_type_bits cmd_type; 109 unsigned long atomic_flags; 110 111 int cpu; 112 113 /* the following two fields are internal, NEVER access directly */ 114 unsigned int __data_len; /* total data len */ 115 sector_t __sector; /* sector cursor */ 116 117 struct bio *bio; 118 struct bio *biotail; 119 120 struct hlist_node hash; /* merge hash */ 121 /* 122 * The rb_node is only used inside the io scheduler, requests 123 * are pruned when moved to the dispatch queue. So let the 124 * completion_data share space with the rb_node. 125 */ 126 union { 127 struct rb_node rb_node; /* sort/lookup */ 128 void *completion_data; 129 }; 130 131 /* 132 * Three pointers are available for the IO schedulers, if they need 133 * more they have to dynamically allocate it. Flush requests are 134 * never put on the IO scheduler. So let the flush fields share 135 * space with the elevator data. 136 */ 137 union { 138 struct { 139 struct io_cq *icq; 140 void *priv[2]; 141 } elv; 142 143 struct { 144 unsigned int seq; 145 struct list_head list; 146 rq_end_io_fn *saved_end_io; 147 } flush; 148 }; 149 150 struct gendisk *rq_disk; 151 struct hd_struct *part; 152 unsigned long start_time; 153#ifdef CONFIG_BLK_CGROUP 154 struct request_list *rl; /* rl this rq is alloced from */ 155 unsigned long long start_time_ns; 156 unsigned long long io_start_time_ns; /* when passed to hardware */ 157#endif 158 /* Number of scatter-gather DMA addr+len pairs after 159 * physical address coalescing is performed. 160 */ 161 unsigned short nr_phys_segments; 162#if defined(CONFIG_BLK_DEV_INTEGRITY) 163 unsigned short nr_integrity_segments; 164#endif 165 166 unsigned short ioprio; 167 168 void *special; /* opaque pointer available for LLD use */ 169 char *buffer; /* kaddr of the current segment if available */ 170 171 int tag; 172 int errors; 173 174 /* 175 * when request is used as a packet command carrier 176 */ 177 unsigned char __cmd[BLK_MAX_CDB]; 178 unsigned char *cmd; 179 unsigned short cmd_len; 180 181 unsigned int extra_len; /* length of alignment and padding */ 182 unsigned int sense_len; 183 unsigned int resid_len; /* residual count */ 184 void *sense; 185 186 unsigned long deadline; 187 struct list_head timeout_list; 188 unsigned int timeout; 189 int retries; 190 191 /* 192 * completion callback. 193 */ 194 rq_end_io_fn *end_io; 195 void *end_io_data; 196 197 /* for bidi */ 198 struct request *next_rq; 199}; 200 201static inline unsigned short req_get_ioprio(struct request *req) 202{ 203 return req->ioprio; 204} 205 206/* 207 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 208 * requests. Some step values could eventually be made generic. 209 */ 210struct request_pm_state 211{ 212 /* PM state machine step value, currently driver specific */ 213 int pm_step; 214 /* requested PM state value (S1, S2, S3, S4, ...) */ 215 u32 pm_state; 216 void* data; /* for driver use */ 217}; 218 219#include <linux/elevator.h> 220 221struct blk_queue_ctx; 222 223typedef void (request_fn_proc) (struct request_queue *q); 224typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 225typedef int (prep_rq_fn) (struct request_queue *, struct request *); 226typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 227 228struct bio_vec; 229struct bvec_merge_data { 230 struct block_device *bi_bdev; 231 sector_t bi_sector; 232 unsigned bi_size; 233 unsigned long bi_rw; 234}; 235typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 236 struct bio_vec *); 237typedef void (softirq_done_fn)(struct request *); 238typedef int (dma_drain_needed_fn)(struct request *); 239typedef int (lld_busy_fn) (struct request_queue *q); 240typedef int (bsg_job_fn) (struct bsg_job *); 241 242enum blk_eh_timer_return { 243 BLK_EH_NOT_HANDLED, 244 BLK_EH_HANDLED, 245 BLK_EH_RESET_TIMER, 246}; 247 248typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 249 250enum blk_queue_state { 251 Queue_down, 252 Queue_up, 253}; 254 255struct blk_queue_tag { 256 struct request **tag_index; /* map of busy tags */ 257 unsigned long *tag_map; /* bit map of free/busy tags */ 258 int busy; /* current depth */ 259 int max_depth; /* what we will send to device */ 260 int real_max_depth; /* what the array can hold */ 261 atomic_t refcnt; /* map can be shared */ 262}; 263 264#define BLK_SCSI_MAX_CMDS (256) 265#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 266 267struct queue_limits { 268 unsigned long bounce_pfn; 269 unsigned long seg_boundary_mask; 270 271 unsigned int max_hw_sectors; 272 unsigned int max_sectors; 273 unsigned int max_segment_size; 274 unsigned int physical_block_size; 275 unsigned int alignment_offset; 276 unsigned int io_min; 277 unsigned int io_opt; 278 unsigned int max_discard_sectors; 279 unsigned int max_write_same_sectors; 280 unsigned int discard_granularity; 281 unsigned int discard_alignment; 282 283 unsigned short logical_block_size; 284 unsigned short max_segments; 285 unsigned short max_integrity_segments; 286 287 unsigned char misaligned; 288 unsigned char discard_misaligned; 289 unsigned char cluster; 290 unsigned char discard_zeroes_data; 291 unsigned char raid_partial_stripes_expensive; 292}; 293 294struct request_queue { 295 /* 296 * Together with queue_head for cacheline sharing 297 */ 298 struct list_head queue_head; 299 struct request *last_merge; 300 struct elevator_queue *elevator; 301 int nr_rqs[2]; /* # allocated [a]sync rqs */ 302 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 303 304 /* 305 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 306 * is used, root blkg allocates from @q->root_rl and all other 307 * blkgs from their own blkg->rl. Which one to use should be 308 * determined using bio_request_list(). 309 */ 310 struct request_list root_rl; 311 312 request_fn_proc *request_fn; 313 make_request_fn *make_request_fn; 314 prep_rq_fn *prep_rq_fn; 315 unprep_rq_fn *unprep_rq_fn; 316 merge_bvec_fn *merge_bvec_fn; 317 softirq_done_fn *softirq_done_fn; 318 rq_timed_out_fn *rq_timed_out_fn; 319 dma_drain_needed_fn *dma_drain_needed; 320 lld_busy_fn *lld_busy_fn; 321 322 struct blk_mq_ops *mq_ops; 323 324 unsigned int *mq_map; 325 326 /* sw queues */ 327 struct blk_mq_ctx *queue_ctx; 328 unsigned int nr_queues; 329 330 /* hw dispatch queues */ 331 struct blk_mq_hw_ctx **queue_hw_ctx; 332 unsigned int nr_hw_queues; 333 334 /* 335 * Dispatch queue sorting 336 */ 337 sector_t end_sector; 338 struct request *boundary_rq; 339 340 /* 341 * Delayed queue handling 342 */ 343 struct delayed_work delay_work; 344 345 struct backing_dev_info backing_dev_info; 346 347 /* 348 * The queue owner gets to use this for whatever they like. 349 * ll_rw_blk doesn't touch it. 350 */ 351 void *queuedata; 352 353 /* 354 * various queue flags, see QUEUE_* below 355 */ 356 unsigned long queue_flags; 357 358 /* 359 * ida allocated id for this queue. Used to index queues from 360 * ioctx. 361 */ 362 int id; 363 364 /* 365 * queue needs bounce pages for pages above this limit 366 */ 367 gfp_t bounce_gfp; 368 369 /* 370 * protects queue structures from reentrancy. ->__queue_lock should 371 * _never_ be used directly, it is queue private. always use 372 * ->queue_lock. 373 */ 374 spinlock_t __queue_lock; 375 spinlock_t *queue_lock; 376 377 /* 378 * queue kobject 379 */ 380 struct kobject kobj; 381 382 /* 383 * mq queue kobject 384 */ 385 struct kobject mq_kobj; 386 387#ifdef CONFIG_PM_RUNTIME 388 struct device *dev; 389 int rpm_status; 390 unsigned int nr_pending; 391#endif 392 393 /* 394 * queue settings 395 */ 396 unsigned long nr_requests; /* Max # of requests */ 397 unsigned int nr_congestion_on; 398 unsigned int nr_congestion_off; 399 unsigned int nr_batching; 400 401 unsigned int dma_drain_size; 402 void *dma_drain_buffer; 403 unsigned int dma_pad_mask; 404 unsigned int dma_alignment; 405 406 struct blk_queue_tag *queue_tags; 407 struct list_head tag_busy_list; 408 409 unsigned int nr_sorted; 410 unsigned int in_flight[2]; 411 /* 412 * Number of active block driver functions for which blk_drain_queue() 413 * must wait. Must be incremented around functions that unlock the 414 * queue_lock internally, e.g. scsi_request_fn(). 415 */ 416 unsigned int request_fn_active; 417 418 unsigned int rq_timeout; 419 struct timer_list timeout; 420 struct list_head timeout_list; 421 422 struct list_head icq_list; 423#ifdef CONFIG_BLK_CGROUP 424 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS); 425 struct blkcg_gq *root_blkg; 426 struct list_head blkg_list; 427#endif 428 429 struct queue_limits limits; 430 431 /* 432 * sg stuff 433 */ 434 unsigned int sg_timeout; 435 unsigned int sg_reserved_size; 436 int node; 437#ifdef CONFIG_BLK_DEV_IO_TRACE 438 struct blk_trace *blk_trace; 439#endif 440 /* 441 * for flush operations 442 */ 443 unsigned int flush_flags; 444 unsigned int flush_not_queueable:1; 445 unsigned int flush_queue_delayed:1; 446 unsigned int flush_pending_idx:1; 447 unsigned int flush_running_idx:1; 448 unsigned long flush_pending_since; 449 struct list_head flush_queue[2]; 450 struct list_head flush_data_in_flight; 451 union { 452 struct request flush_rq; 453 struct { 454 spinlock_t mq_flush_lock; 455 struct work_struct mq_flush_work; 456 }; 457 }; 458 459 struct mutex sysfs_lock; 460 461 int bypass_depth; 462 463#if defined(CONFIG_BLK_DEV_BSG) 464 bsg_job_fn *bsg_job_fn; 465 int bsg_job_size; 466 struct bsg_class_device bsg_dev; 467#endif 468 469#ifdef CONFIG_BLK_DEV_THROTTLING 470 /* Throttle data */ 471 struct throtl_data *td; 472#endif 473 struct rcu_head rcu_head; 474 wait_queue_head_t mq_freeze_wq; 475 struct percpu_counter mq_usage_counter; 476 struct list_head all_q_node; 477}; 478 479#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 480#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 481#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 482#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 483#define QUEUE_FLAG_DYING 5 /* queue being torn down */ 484#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 485#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 486#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 487#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 488#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 489#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 490#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 491#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 492#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 493#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 494#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 495#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 496#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 497#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 498#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 499#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 500 501#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 502 (1 << QUEUE_FLAG_STACKABLE) | \ 503 (1 << QUEUE_FLAG_SAME_COMP) | \ 504 (1 << QUEUE_FLAG_ADD_RANDOM)) 505 506#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 507 (1 << QUEUE_FLAG_SAME_COMP)) 508 509static inline void queue_lockdep_assert_held(struct request_queue *q) 510{ 511 if (q->queue_lock) 512 lockdep_assert_held(q->queue_lock); 513} 514 515static inline void queue_flag_set_unlocked(unsigned int flag, 516 struct request_queue *q) 517{ 518 __set_bit(flag, &q->queue_flags); 519} 520 521static inline int queue_flag_test_and_clear(unsigned int flag, 522 struct request_queue *q) 523{ 524 queue_lockdep_assert_held(q); 525 526 if (test_bit(flag, &q->queue_flags)) { 527 __clear_bit(flag, &q->queue_flags); 528 return 1; 529 } 530 531 return 0; 532} 533 534static inline int queue_flag_test_and_set(unsigned int flag, 535 struct request_queue *q) 536{ 537 queue_lockdep_assert_held(q); 538 539 if (!test_bit(flag, &q->queue_flags)) { 540 __set_bit(flag, &q->queue_flags); 541 return 0; 542 } 543 544 return 1; 545} 546 547static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 548{ 549 queue_lockdep_assert_held(q); 550 __set_bit(flag, &q->queue_flags); 551} 552 553static inline void queue_flag_clear_unlocked(unsigned int flag, 554 struct request_queue *q) 555{ 556 __clear_bit(flag, &q->queue_flags); 557} 558 559static inline int queue_in_flight(struct request_queue *q) 560{ 561 return q->in_flight[0] + q->in_flight[1]; 562} 563 564static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 565{ 566 queue_lockdep_assert_held(q); 567 __clear_bit(flag, &q->queue_flags); 568} 569 570#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 571#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 572#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 573#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 574#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 575#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) 576#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 577#define blk_queue_noxmerges(q) \ 578 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 579#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 580#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 581#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 582#define blk_queue_stackable(q) \ 583 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 584#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 585#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 586 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 587 588#define blk_noretry_request(rq) \ 589 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 590 REQ_FAILFAST_DRIVER)) 591 592#define blk_account_rq(rq) \ 593 (((rq)->cmd_flags & REQ_STARTED) && \ 594 ((rq)->cmd_type == REQ_TYPE_FS)) 595 596#define blk_pm_request(rq) \ 597 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 598 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 599 600#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 601#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 602/* rq->queuelist of dequeued request must be list_empty() */ 603#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 604 605#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 606 607#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) 608 609static inline unsigned int blk_queue_cluster(struct request_queue *q) 610{ 611 return q->limits.cluster; 612} 613 614/* 615 * We regard a request as sync, if either a read or a sync write 616 */ 617static inline bool rw_is_sync(unsigned int rw_flags) 618{ 619 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 620} 621 622static inline bool rq_is_sync(struct request *rq) 623{ 624 return rw_is_sync(rq->cmd_flags); 625} 626 627static inline bool blk_rl_full(struct request_list *rl, bool sync) 628{ 629 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 630 631 return rl->flags & flag; 632} 633 634static inline void blk_set_rl_full(struct request_list *rl, bool sync) 635{ 636 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 637 638 rl->flags |= flag; 639} 640 641static inline void blk_clear_rl_full(struct request_list *rl, bool sync) 642{ 643 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; 644 645 rl->flags &= ~flag; 646} 647 648static inline bool rq_mergeable(struct request *rq) 649{ 650 if (rq->cmd_type != REQ_TYPE_FS) 651 return false; 652 653 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 654 return false; 655 656 return true; 657} 658 659static inline bool blk_check_merge_flags(unsigned int flags1, 660 unsigned int flags2) 661{ 662 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) 663 return false; 664 665 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) 666 return false; 667 668 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) 669 return false; 670 671 return true; 672} 673 674static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) 675{ 676 if (bio_data(a) == bio_data(b)) 677 return true; 678 679 return false; 680} 681 682/* 683 * q->prep_rq_fn return values 684 */ 685#define BLKPREP_OK 0 /* serve it */ 686#define BLKPREP_KILL 1 /* fatal error, kill */ 687#define BLKPREP_DEFER 2 /* leave on queue */ 688 689extern unsigned long blk_max_low_pfn, blk_max_pfn; 690 691/* 692 * standard bounce addresses: 693 * 694 * BLK_BOUNCE_HIGH : bounce all highmem pages 695 * BLK_BOUNCE_ANY : don't bounce anything 696 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 697 */ 698 699#if BITS_PER_LONG == 32 700#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 701#else 702#define BLK_BOUNCE_HIGH -1ULL 703#endif 704#define BLK_BOUNCE_ANY (-1ULL) 705#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 706 707/* 708 * default timeout for SG_IO if none specified 709 */ 710#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 711#define BLK_MIN_SG_TIMEOUT (7 * HZ) 712 713#ifdef CONFIG_BOUNCE 714extern int init_emergency_isa_pool(void); 715extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 716#else 717static inline int init_emergency_isa_pool(void) 718{ 719 return 0; 720} 721static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 722{ 723} 724#endif /* CONFIG_MMU */ 725 726struct rq_map_data { 727 struct page **pages; 728 int page_order; 729 int nr_entries; 730 unsigned long offset; 731 int null_mapped; 732 int from_user; 733}; 734 735struct req_iterator { 736 struct bvec_iter iter; 737 struct bio *bio; 738}; 739 740/* This should not be used directly - use rq_for_each_segment */ 741#define for_each_bio(_bio) \ 742 for (; _bio; _bio = _bio->bi_next) 743#define __rq_for_each_bio(_bio, rq) \ 744 if ((rq->bio)) \ 745 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 746 747#define rq_for_each_segment(bvl, _rq, _iter) \ 748 __rq_for_each_bio(_iter.bio, _rq) \ 749 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 750 751#define rq_iter_last(bvec, _iter) \ 752 (_iter.bio->bi_next == NULL && \ 753 bio_iter_last(bvec, _iter.iter)) 754 755#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 756# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 757#endif 758#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 759extern void rq_flush_dcache_pages(struct request *rq); 760#else 761static inline void rq_flush_dcache_pages(struct request *rq) 762{ 763} 764#endif 765 766extern int blk_register_queue(struct gendisk *disk); 767extern void blk_unregister_queue(struct gendisk *disk); 768extern void generic_make_request(struct bio *bio); 769extern void blk_rq_init(struct request_queue *q, struct request *rq); 770extern void blk_put_request(struct request *); 771extern void __blk_put_request(struct request_queue *, struct request *); 772extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 773extern struct request *blk_make_request(struct request_queue *, struct bio *, 774 gfp_t); 775extern void blk_requeue_request(struct request_queue *, struct request *); 776extern void blk_add_request_payload(struct request *rq, struct page *page, 777 unsigned int len); 778extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 779extern int blk_lld_busy(struct request_queue *q); 780extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 781 struct bio_set *bs, gfp_t gfp_mask, 782 int (*bio_ctr)(struct bio *, struct bio *, void *), 783 void *data); 784extern void blk_rq_unprep_clone(struct request *rq); 785extern int blk_insert_cloned_request(struct request_queue *q, 786 struct request *rq); 787extern void blk_delay_queue(struct request_queue *, unsigned long); 788extern void blk_recount_segments(struct request_queue *, struct bio *); 789extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); 790extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, 791 unsigned int, void __user *); 792extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 793 unsigned int, void __user *); 794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 795 struct scsi_ioctl_command __user *); 796 797extern void blk_queue_bio(struct request_queue *q, struct bio *bio); 798 799/* 800 * A queue has just exitted congestion. Note this in the global counter of 801 * congested queues, and wake up anyone who was waiting for requests to be 802 * put back. 803 */ 804static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 805{ 806 clear_bdi_congested(&q->backing_dev_info, sync); 807} 808 809/* 810 * A queue has just entered congestion. Flag that in the queue's VM-visible 811 * state flags and increment the global gounter of congested queues. 812 */ 813static inline void blk_set_queue_congested(struct request_queue *q, int sync) 814{ 815 set_bdi_congested(&q->backing_dev_info, sync); 816} 817 818extern void blk_start_queue(struct request_queue *q); 819extern void blk_stop_queue(struct request_queue *q); 820extern void blk_sync_queue(struct request_queue *q); 821extern void __blk_stop_queue(struct request_queue *q); 822extern void __blk_run_queue(struct request_queue *q); 823extern void blk_run_queue(struct request_queue *); 824extern void blk_run_queue_async(struct request_queue *q); 825extern int blk_rq_map_user(struct request_queue *, struct request *, 826 struct rq_map_data *, void __user *, unsigned long, 827 gfp_t); 828extern int blk_rq_unmap_user(struct bio *); 829extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 830extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 831 struct rq_map_data *, struct sg_iovec *, int, 832 unsigned int, gfp_t); 833extern int blk_execute_rq(struct request_queue *, struct gendisk *, 834 struct request *, int); 835extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 836 struct request *, int, rq_end_io_fn *); 837 838static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 839{ 840 return bdev->bd_disk->queue; 841} 842 843/* 844 * blk_rq_pos() : the current sector 845 * blk_rq_bytes() : bytes left in the entire request 846 * blk_rq_cur_bytes() : bytes left in the current segment 847 * blk_rq_err_bytes() : bytes left till the next error boundary 848 * blk_rq_sectors() : sectors left in the entire request 849 * blk_rq_cur_sectors() : sectors left in the current segment 850 */ 851static inline sector_t blk_rq_pos(const struct request *rq) 852{ 853 return rq->__sector; 854} 855 856static inline unsigned int blk_rq_bytes(const struct request *rq) 857{ 858 return rq->__data_len; 859} 860 861static inline int blk_rq_cur_bytes(const struct request *rq) 862{ 863 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 864} 865 866extern unsigned int blk_rq_err_bytes(const struct request *rq); 867 868static inline unsigned int blk_rq_sectors(const struct request *rq) 869{ 870 return blk_rq_bytes(rq) >> 9; 871} 872 873static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 874{ 875 return blk_rq_cur_bytes(rq) >> 9; 876} 877 878static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 879 unsigned int cmd_flags) 880{ 881 if (unlikely(cmd_flags & REQ_DISCARD)) 882 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 883 884 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 885 return q->limits.max_write_same_sectors; 886 887 return q->limits.max_sectors; 888} 889 890static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 891{ 892 struct request_queue *q = rq->q; 893 894 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 895 return q->limits.max_hw_sectors; 896 897 return blk_queue_get_max_sectors(q, rq->cmd_flags); 898} 899 900static inline unsigned int blk_rq_count_bios(struct request *rq) 901{ 902 unsigned int nr_bios = 0; 903 struct bio *bio; 904 905 __rq_for_each_bio(bio, rq) 906 nr_bios++; 907 908 return nr_bios; 909} 910 911/* 912 * Request issue related functions. 913 */ 914extern struct request *blk_peek_request(struct request_queue *q); 915extern void blk_start_request(struct request *rq); 916extern struct request *blk_fetch_request(struct request_queue *q); 917 918/* 919 * Request completion related functions. 920 * 921 * blk_update_request() completes given number of bytes and updates 922 * the request without completing it. 923 * 924 * blk_end_request() and friends. __blk_end_request() must be called 925 * with the request queue spinlock acquired. 926 * 927 * Several drivers define their own end_request and call 928 * blk_end_request() for parts of the original function. 929 * This prevents code duplication in drivers. 930 */ 931extern bool blk_update_request(struct request *rq, int error, 932 unsigned int nr_bytes); 933extern bool blk_end_request(struct request *rq, int error, 934 unsigned int nr_bytes); 935extern void blk_end_request_all(struct request *rq, int error); 936extern bool blk_end_request_cur(struct request *rq, int error); 937extern bool blk_end_request_err(struct request *rq, int error); 938extern bool __blk_end_request(struct request *rq, int error, 939 unsigned int nr_bytes); 940extern void __blk_end_request_all(struct request *rq, int error); 941extern bool __blk_end_request_cur(struct request *rq, int error); 942extern bool __blk_end_request_err(struct request *rq, int error); 943 944extern void blk_complete_request(struct request *); 945extern void __blk_complete_request(struct request *); 946extern void blk_abort_request(struct request *); 947extern void blk_unprep_request(struct request *); 948 949/* 950 * Access functions for manipulating queue properties 951 */ 952extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 953 spinlock_t *lock, int node_id); 954extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 955extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 956 request_fn_proc *, spinlock_t *); 957extern void blk_cleanup_queue(struct request_queue *); 958extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 959extern void blk_queue_bounce_limit(struct request_queue *, u64); 960extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 961extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 962extern void blk_queue_max_segments(struct request_queue *, unsigned short); 963extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 964extern void blk_queue_max_discard_sectors(struct request_queue *q, 965 unsigned int max_discard_sectors); 966extern void blk_queue_max_write_same_sectors(struct request_queue *q, 967 unsigned int max_write_same_sectors); 968extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 969extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 970extern void blk_queue_alignment_offset(struct request_queue *q, 971 unsigned int alignment); 972extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 973extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 974extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 975extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 976extern void blk_set_default_limits(struct queue_limits *lim); 977extern void blk_set_stacking_limits(struct queue_limits *lim); 978extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 979 sector_t offset); 980extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 981 sector_t offset); 982extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 983 sector_t offset); 984extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 985extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 986extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 987extern int blk_queue_dma_drain(struct request_queue *q, 988 dma_drain_needed_fn *dma_drain_needed, 989 void *buf, unsigned int size); 990extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 991extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 992extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 993extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 994extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 995extern void blk_queue_dma_alignment(struct request_queue *, int); 996extern void blk_queue_update_dma_alignment(struct request_queue *, int); 997extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 998extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 999extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 1000extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 1001extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); 1002extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 1003 1004extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 1005extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 1006 struct scatterlist *sglist); 1007extern void blk_dump_rq_flags(struct request *, char *); 1008extern long nr_blockdev_pages(void); 1009 1010bool __must_check blk_get_queue(struct request_queue *); 1011struct request_queue *blk_alloc_queue(gfp_t); 1012struct request_queue *blk_alloc_queue_node(gfp_t, int); 1013extern void blk_put_queue(struct request_queue *); 1014 1015/* 1016 * block layer runtime pm functions 1017 */ 1018#ifdef CONFIG_PM_RUNTIME 1019extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1020extern int blk_pre_runtime_suspend(struct request_queue *q); 1021extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1022extern void blk_pre_runtime_resume(struct request_queue *q); 1023extern void blk_post_runtime_resume(struct request_queue *q, int err); 1024#else 1025static inline void blk_pm_runtime_init(struct request_queue *q, 1026 struct device *dev) {} 1027static inline int blk_pre_runtime_suspend(struct request_queue *q) 1028{ 1029 return -ENOSYS; 1030} 1031static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} 1032static inline void blk_pre_runtime_resume(struct request_queue *q) {} 1033static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} 1034#endif 1035 1036/* 1037 * blk_plug permits building a queue of related requests by holding the I/O 1038 * fragments for a short period. This allows merging of sequential requests 1039 * into single larger request. As the requests are moved from a per-task list to 1040 * the device's request_queue in a batch, this results in improved scalability 1041 * as the lock contention for request_queue lock is reduced. 1042 * 1043 * It is ok not to disable preemption when adding the request to the plug list 1044 * or when attempting a merge, because blk_schedule_flush_list() will only flush 1045 * the plug list when the task sleeps by itself. For details, please see 1046 * schedule() where blk_schedule_flush_plug() is called. 1047 */ 1048struct blk_plug { 1049 unsigned long magic; /* detect uninitialized use-cases */ 1050 struct list_head list; /* requests */ 1051 struct list_head mq_list; /* blk-mq requests */ 1052 struct list_head cb_list; /* md requires an unplug callback */ 1053}; 1054#define BLK_MAX_REQUEST_COUNT 16 1055 1056struct blk_plug_cb; 1057typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); 1058struct blk_plug_cb { 1059 struct list_head list; 1060 blk_plug_cb_fn callback; 1061 void *data; 1062}; 1063extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, 1064 void *data, int size); 1065extern void blk_start_plug(struct blk_plug *); 1066extern void blk_finish_plug(struct blk_plug *); 1067extern void blk_flush_plug_list(struct blk_plug *, bool); 1068 1069static inline void blk_flush_plug(struct task_struct *tsk) 1070{ 1071 struct blk_plug *plug = tsk->plug; 1072 1073 if (plug) 1074 blk_flush_plug_list(plug, false); 1075} 1076 1077static inline void blk_schedule_flush_plug(struct task_struct *tsk) 1078{ 1079 struct blk_plug *plug = tsk->plug; 1080 1081 if (plug) 1082 blk_flush_plug_list(plug, true); 1083} 1084 1085static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1086{ 1087 struct blk_plug *plug = tsk->plug; 1088 1089 return plug && 1090 (!list_empty(&plug->list) || 1091 !list_empty(&plug->mq_list) || 1092 !list_empty(&plug->cb_list)); 1093} 1094 1095/* 1096 * tag stuff 1097 */ 1098#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 1099extern int blk_queue_start_tag(struct request_queue *, struct request *); 1100extern struct request *blk_queue_find_tag(struct request_queue *, int); 1101extern void blk_queue_end_tag(struct request_queue *, struct request *); 1102extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 1103extern void blk_queue_free_tags(struct request_queue *); 1104extern int blk_queue_resize_tags(struct request_queue *, int); 1105extern void blk_queue_invalidate_tags(struct request_queue *); 1106extern struct blk_queue_tag *blk_init_tags(int); 1107extern void blk_free_tags(struct blk_queue_tag *); 1108 1109static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1110 int tag) 1111{ 1112 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 1113 return NULL; 1114 return bqt->tag_index[tag]; 1115} 1116 1117#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1118 1119extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1120extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1121 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1122extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1123 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1124extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1125 sector_t nr_sects, gfp_t gfp_mask); 1126static inline int sb_issue_discard(struct super_block *sb, sector_t block, 1127 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) 1128{ 1129 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), 1130 nr_blocks << (sb->s_blocksize_bits - 9), 1131 gfp_mask, flags); 1132} 1133static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, 1134 sector_t nr_blocks, gfp_t gfp_mask) 1135{ 1136 return blkdev_issue_zeroout(sb->s_bdev, 1137 block << (sb->s_blocksize_bits - 9), 1138 nr_blocks << (sb->s_blocksize_bits - 9), 1139 gfp_mask); 1140} 1141 1142extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 1143 1144enum blk_default_limits { 1145 BLK_MAX_SEGMENTS = 128, 1146 BLK_SAFE_MAX_SECTORS = 255, 1147 BLK_DEF_MAX_SECTORS = 1024, 1148 BLK_MAX_SEGMENT_SIZE = 65536, 1149 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1150}; 1151 1152#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1153 1154static inline unsigned long queue_bounce_pfn(struct request_queue *q) 1155{ 1156 return q->limits.bounce_pfn; 1157} 1158 1159static inline unsigned long queue_segment_boundary(struct request_queue *q) 1160{ 1161 return q->limits.seg_boundary_mask; 1162} 1163 1164static inline unsigned int queue_max_sectors(struct request_queue *q) 1165{ 1166 return q->limits.max_sectors; 1167} 1168 1169static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 1170{ 1171 return q->limits.max_hw_sectors; 1172} 1173 1174static inline unsigned short queue_max_segments(struct request_queue *q) 1175{ 1176 return q->limits.max_segments; 1177} 1178 1179static inline unsigned int queue_max_segment_size(struct request_queue *q) 1180{ 1181 return q->limits.max_segment_size; 1182} 1183 1184static inline unsigned short queue_logical_block_size(struct request_queue *q) 1185{ 1186 int retval = 512; 1187 1188 if (q && q->limits.logical_block_size) 1189 retval = q->limits.logical_block_size; 1190 1191 return retval; 1192} 1193 1194static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1195{ 1196 return queue_logical_block_size(bdev_get_queue(bdev)); 1197} 1198 1199static inline unsigned int queue_physical_block_size(struct request_queue *q) 1200{ 1201 return q->limits.physical_block_size; 1202} 1203 1204static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1205{ 1206 return queue_physical_block_size(bdev_get_queue(bdev)); 1207} 1208 1209static inline unsigned int queue_io_min(struct request_queue *q) 1210{ 1211 return q->limits.io_min; 1212} 1213 1214static inline int bdev_io_min(struct block_device *bdev) 1215{ 1216 return queue_io_min(bdev_get_queue(bdev)); 1217} 1218 1219static inline unsigned int queue_io_opt(struct request_queue *q) 1220{ 1221 return q->limits.io_opt; 1222} 1223 1224static inline int bdev_io_opt(struct block_device *bdev) 1225{ 1226 return queue_io_opt(bdev_get_queue(bdev)); 1227} 1228 1229static inline int queue_alignment_offset(struct request_queue *q) 1230{ 1231 if (q->limits.misaligned) 1232 return -1; 1233 1234 return q->limits.alignment_offset; 1235} 1236 1237static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1238{ 1239 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1240 unsigned int alignment = (sector << 9) & (granularity - 1); 1241 1242 return (granularity + lim->alignment_offset - alignment) 1243 & (granularity - 1); 1244} 1245 1246static inline int bdev_alignment_offset(struct block_device *bdev) 1247{ 1248 struct request_queue *q = bdev_get_queue(bdev); 1249 1250 if (q->limits.misaligned) 1251 return -1; 1252 1253 if (bdev != bdev->bd_contains) 1254 return bdev->bd_part->alignment_offset; 1255 1256 return q->limits.alignment_offset; 1257} 1258 1259static inline int queue_discard_alignment(struct request_queue *q) 1260{ 1261 if (q->limits.discard_misaligned) 1262 return -1; 1263 1264 return q->limits.discard_alignment; 1265} 1266 1267static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1268{ 1269 unsigned int alignment, granularity, offset; 1270 1271 if (!lim->max_discard_sectors) 1272 return 0; 1273 1274 /* Why are these in bytes, not sectors? */ 1275 alignment = lim->discard_alignment >> 9; 1276 granularity = lim->discard_granularity >> 9; 1277 if (!granularity) 1278 return 0; 1279 1280 /* Offset of the partition start in 'granularity' sectors */ 1281 offset = sector_div(sector, granularity); 1282 1283 /* And why do we do this modulus *again* in blkdev_issue_discard()? */ 1284 offset = (granularity + alignment - offset) % granularity; 1285 1286 /* Turn it back into bytes, gaah */ 1287 return offset << 9; 1288} 1289 1290static inline int bdev_discard_alignment(struct block_device *bdev) 1291{ 1292 struct request_queue *q = bdev_get_queue(bdev); 1293 1294 if (bdev != bdev->bd_contains) 1295 return bdev->bd_part->discard_alignment; 1296 1297 return q->limits.discard_alignment; 1298} 1299 1300static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1301{ 1302 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) 1303 return 1; 1304 1305 return 0; 1306} 1307 1308static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1309{ 1310 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1311} 1312 1313static inline unsigned int bdev_write_same(struct block_device *bdev) 1314{ 1315 struct request_queue *q = bdev_get_queue(bdev); 1316 1317 if (q) 1318 return q->limits.max_write_same_sectors; 1319 1320 return 0; 1321} 1322 1323static inline int queue_dma_alignment(struct request_queue *q) 1324{ 1325 return q ? q->dma_alignment : 511; 1326} 1327 1328static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, 1329 unsigned int len) 1330{ 1331 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1332 return !(addr & alignment) && !(len & alignment); 1333} 1334 1335/* assumes size > 256 */ 1336static inline unsigned int blksize_bits(unsigned int size) 1337{ 1338 unsigned int bits = 8; 1339 do { 1340 bits++; 1341 size >>= 1; 1342 } while (size > 256); 1343 return bits; 1344} 1345 1346static inline unsigned int block_size(struct block_device *bdev) 1347{ 1348 return bdev->bd_block_size; 1349} 1350 1351static inline bool queue_flush_queueable(struct request_queue *q) 1352{ 1353 return !q->flush_not_queueable; 1354} 1355 1356typedef struct {struct page *v;} Sector; 1357 1358unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1359 1360static inline void put_dev_sector(Sector p) 1361{ 1362 page_cache_release(p.v); 1363} 1364 1365struct work_struct; 1366int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1367int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); 1368 1369#ifdef CONFIG_BLK_CGROUP 1370/* 1371 * This should not be using sched_clock(). A real patch is in progress 1372 * to fix this up, until that is in place we need to disable preemption 1373 * around sched_clock() in this function and set_io_start_time_ns(). 1374 */ 1375static inline void set_start_time_ns(struct request *req) 1376{ 1377 preempt_disable(); 1378 req->start_time_ns = sched_clock(); 1379 preempt_enable(); 1380} 1381 1382static inline void set_io_start_time_ns(struct request *req) 1383{ 1384 preempt_disable(); 1385 req->io_start_time_ns = sched_clock(); 1386 preempt_enable(); 1387} 1388 1389static inline uint64_t rq_start_time_ns(struct request *req) 1390{ 1391 return req->start_time_ns; 1392} 1393 1394static inline uint64_t rq_io_start_time_ns(struct request *req) 1395{ 1396 return req->io_start_time_ns; 1397} 1398#else 1399static inline void set_start_time_ns(struct request *req) {} 1400static inline void set_io_start_time_ns(struct request *req) {} 1401static inline uint64_t rq_start_time_ns(struct request *req) 1402{ 1403 return 0; 1404} 1405static inline uint64_t rq_io_start_time_ns(struct request *req) 1406{ 1407 return 0; 1408} 1409#endif 1410 1411#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1412 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1413#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1414 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1415 1416#if defined(CONFIG_BLK_DEV_INTEGRITY) 1417 1418#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1419#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1420 1421struct blk_integrity_exchg { 1422 void *prot_buf; 1423 void *data_buf; 1424 sector_t sector; 1425 unsigned int data_size; 1426 unsigned short sector_size; 1427 const char *disk_name; 1428}; 1429 1430typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1431typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1432typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1433typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1434 1435struct blk_integrity { 1436 integrity_gen_fn *generate_fn; 1437 integrity_vrfy_fn *verify_fn; 1438 integrity_set_tag_fn *set_tag_fn; 1439 integrity_get_tag_fn *get_tag_fn; 1440 1441 unsigned short flags; 1442 unsigned short tuple_size; 1443 unsigned short sector_size; 1444 unsigned short tag_size; 1445 1446 const char *name; 1447 1448 struct kobject kobj; 1449}; 1450 1451extern bool blk_integrity_is_initialized(struct gendisk *); 1452extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1453extern void blk_integrity_unregister(struct gendisk *); 1454extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1455extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1456 struct scatterlist *); 1457extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1458extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1459 struct request *); 1460extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1461 struct bio *); 1462 1463static inline 1464struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1465{ 1466 return bdev->bd_disk->integrity; 1467} 1468 1469static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1470{ 1471 return disk->integrity; 1472} 1473 1474static inline int blk_integrity_rq(struct request *rq) 1475{ 1476 if (rq->bio == NULL) 1477 return 0; 1478 1479 return bio_integrity(rq->bio); 1480} 1481 1482static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1483 unsigned int segs) 1484{ 1485 q->limits.max_integrity_segments = segs; 1486} 1487 1488static inline unsigned short 1489queue_max_integrity_segments(struct request_queue *q) 1490{ 1491 return q->limits.max_integrity_segments; 1492} 1493 1494#else /* CONFIG_BLK_DEV_INTEGRITY */ 1495 1496struct bio; 1497struct block_device; 1498struct gendisk; 1499struct blk_integrity; 1500 1501static inline int blk_integrity_rq(struct request *rq) 1502{ 1503 return 0; 1504} 1505static inline int blk_rq_count_integrity_sg(struct request_queue *q, 1506 struct bio *b) 1507{ 1508 return 0; 1509} 1510static inline int blk_rq_map_integrity_sg(struct request_queue *q, 1511 struct bio *b, 1512 struct scatterlist *s) 1513{ 1514 return 0; 1515} 1516static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1517{ 1518 return 0; 1519} 1520static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1521{ 1522 return NULL; 1523} 1524static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) 1525{ 1526 return 0; 1527} 1528static inline int blk_integrity_register(struct gendisk *d, 1529 struct blk_integrity *b) 1530{ 1531 return 0; 1532} 1533static inline void blk_integrity_unregister(struct gendisk *d) 1534{ 1535} 1536static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1537 unsigned int segs) 1538{ 1539} 1540static inline unsigned short queue_max_integrity_segments(struct request_queue *q) 1541{ 1542 return 0; 1543} 1544static inline int blk_integrity_merge_rq(struct request_queue *rq, 1545 struct request *r1, 1546 struct request *r2) 1547{ 1548 return 0; 1549} 1550static inline int blk_integrity_merge_bio(struct request_queue *rq, 1551 struct request *r, 1552 struct bio *b) 1553{ 1554 return 0; 1555} 1556static inline bool blk_integrity_is_initialized(struct gendisk *g) 1557{ 1558 return 0; 1559} 1560 1561#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1562 1563struct block_device_operations { 1564 int (*open) (struct block_device *, fmode_t); 1565 void (*release) (struct gendisk *, fmode_t); 1566 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1567 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1568 int (*direct_access) (struct block_device *, sector_t, 1569 void **, unsigned long *); 1570 unsigned int (*check_events) (struct gendisk *disk, 1571 unsigned int clearing); 1572 /* ->media_changed() is DEPRECATED, use ->check_events() instead */ 1573 int (*media_changed) (struct gendisk *); 1574 void (*unlock_native_capacity) (struct gendisk *); 1575 int (*revalidate_disk) (struct gendisk *); 1576 int (*getgeo)(struct block_device *, struct hd_geometry *); 1577 /* this callback is with swap_lock and sometimes page table lock held */ 1578 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1579 struct module *owner; 1580}; 1581 1582extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1583 unsigned long); 1584#else /* CONFIG_BLOCK */ 1585/* 1586 * stubs for when the block layer is configured out 1587 */ 1588#define buffer_heads_over_limit 0 1589 1590static inline long nr_blockdev_pages(void) 1591{ 1592 return 0; 1593} 1594 1595struct blk_plug { 1596}; 1597 1598static inline void blk_start_plug(struct blk_plug *plug) 1599{ 1600} 1601 1602static inline void blk_finish_plug(struct blk_plug *plug) 1603{ 1604} 1605 1606static inline void blk_flush_plug(struct task_struct *task) 1607{ 1608} 1609 1610static inline void blk_schedule_flush_plug(struct task_struct *task) 1611{ 1612} 1613 1614 1615static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1616{ 1617 return false; 1618} 1619 1620#endif /* CONFIG_BLOCK */ 1621 1622#endif