Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.13-rc1 363 lines 11 kB view raw
1#ifndef BLK_INTERNAL_H 2#define BLK_INTERNAL_H 3 4#include <linux/idr.h> 5#include <linux/blk-mq.h> 6#include "blk-mq.h" 7 8/* Amount of time in which a process may batch requests */ 9#define BLK_BATCH_TIME (HZ/50UL) 10 11/* Number of requests a "batching" process may submit */ 12#define BLK_BATCH_REQ 32 13 14/* Max future timer expiry for timeouts */ 15#define BLK_MAX_TIMEOUT (5 * HZ) 16 17#ifdef CONFIG_DEBUG_FS 18extern struct dentry *blk_debugfs_root; 19#endif 20 21struct blk_flush_queue { 22 unsigned int flush_queue_delayed:1; 23 unsigned int flush_pending_idx:1; 24 unsigned int flush_running_idx:1; 25 unsigned long flush_pending_since; 26 struct list_head flush_queue[2]; 27 struct list_head flush_data_in_flight; 28 struct request *flush_rq; 29 30 /* 31 * flush_rq shares tag with this rq, both can't be active 32 * at the same time 33 */ 34 struct request *orig_rq; 35 spinlock_t mq_flush_lock; 36}; 37 38extern struct kmem_cache *blk_requestq_cachep; 39extern struct kmem_cache *request_cachep; 40extern struct kobj_type blk_queue_ktype; 41extern struct ida blk_queue_ida; 42 43static inline struct blk_flush_queue *blk_get_flush_queue( 44 struct request_queue *q, struct blk_mq_ctx *ctx) 45{ 46 if (q->mq_ops) 47 return blk_mq_map_queue(q, ctx->cpu)->fq; 48 return q->fq; 49} 50 51static inline void __blk_get_queue(struct request_queue *q) 52{ 53 kobject_get(&q->kobj); 54} 55 56struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, 57 int node, int cmd_size); 58void blk_free_flush_queue(struct blk_flush_queue *q); 59 60int blk_init_rl(struct request_list *rl, struct request_queue *q, 61 gfp_t gfp_mask); 62void blk_exit_rl(struct request_queue *q, struct request_list *rl); 63void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 64 struct bio *bio); 65void blk_queue_bypass_start(struct request_queue *q); 66void blk_queue_bypass_end(struct request_queue *q); 67void blk_dequeue_request(struct request *rq); 68void __blk_queue_free_tags(struct request_queue *q); 69void blk_freeze_queue(struct request_queue *q); 70 71static inline void blk_queue_enter_live(struct request_queue *q) 72{ 73 /* 74 * Given that running in generic_make_request() context 75 * guarantees that a live reference against q_usage_counter has 76 * been established, further references under that same context 77 * need not check that the queue has been frozen (marked dead). 78 */ 79 percpu_ref_get(&q->q_usage_counter); 80} 81 82#ifdef CONFIG_BLK_DEV_INTEGRITY 83void blk_flush_integrity(void); 84bool __bio_integrity_endio(struct bio *); 85static inline bool bio_integrity_endio(struct bio *bio) 86{ 87 if (bio_integrity(bio)) 88 return __bio_integrity_endio(bio); 89 return true; 90} 91#else 92static inline void blk_flush_integrity(void) 93{ 94} 95static inline bool bio_integrity_endio(struct bio *bio) 96{ 97 return true; 98} 99#endif 100 101void blk_timeout_work(struct work_struct *work); 102unsigned long blk_rq_timeout(unsigned long timeout); 103void blk_add_timer(struct request *req); 104void blk_delete_timer(struct request *); 105 106 107bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 108 struct bio *bio); 109bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 110 struct bio *bio); 111bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, 112 struct bio *bio); 113bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 114 unsigned int *request_count, 115 struct request **same_queue_rq); 116unsigned int blk_plug_queued_count(struct request_queue *q); 117 118void blk_account_io_start(struct request *req, bool new_io); 119void blk_account_io_completion(struct request *req, unsigned int bytes); 120void blk_account_io_done(struct request *req); 121 122/* 123 * Internal atomic flags for request handling 124 */ 125enum rq_atomic_flags { 126 REQ_ATOM_COMPLETE = 0, 127 REQ_ATOM_STARTED, 128 REQ_ATOM_POLL_SLEPT, 129}; 130 131/* 132 * EH timer and IO completion will both attempt to 'grab' the request, make 133 * sure that only one of them succeeds 134 */ 135static inline int blk_mark_rq_complete(struct request *rq) 136{ 137 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 138} 139 140static inline void blk_clear_rq_complete(struct request *rq) 141{ 142 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 143} 144 145/* 146 * Internal elevator interface 147 */ 148#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 149 150void blk_insert_flush(struct request *rq); 151 152static inline struct request *__elv_next_request(struct request_queue *q) 153{ 154 struct request *rq; 155 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); 156 157 WARN_ON_ONCE(q->mq_ops); 158 159 while (1) { 160 if (!list_empty(&q->queue_head)) { 161 rq = list_entry_rq(q->queue_head.next); 162 return rq; 163 } 164 165 /* 166 * Flush request is running and flush request isn't queueable 167 * in the drive, we can hold the queue till flush request is 168 * finished. Even we don't do this, driver can't dispatch next 169 * requests and will requeue them. And this can improve 170 * throughput too. For example, we have request flush1, write1, 171 * flush 2. flush1 is dispatched, then queue is hold, write1 172 * isn't inserted to queue. After flush1 is finished, flush2 173 * will be dispatched. Since disk cache is already clean, 174 * flush2 will be finished very soon, so looks like flush2 is 175 * folded to flush1. 176 * Since the queue is hold, a flag is set to indicate the queue 177 * should be restarted later. Please see flush_end_io() for 178 * details. 179 */ 180 if (fq->flush_pending_idx != fq->flush_running_idx && 181 !queue_flush_queueable(q)) { 182 fq->flush_queue_delayed = 1; 183 return NULL; 184 } 185 if (unlikely(blk_queue_bypass(q)) || 186 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) 187 return NULL; 188 } 189} 190 191static inline void elv_activate_rq(struct request_queue *q, struct request *rq) 192{ 193 struct elevator_queue *e = q->elevator; 194 195 if (e->type->ops.sq.elevator_activate_req_fn) 196 e->type->ops.sq.elevator_activate_req_fn(q, rq); 197} 198 199static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) 200{ 201 struct elevator_queue *e = q->elevator; 202 203 if (e->type->ops.sq.elevator_deactivate_req_fn) 204 e->type->ops.sq.elevator_deactivate_req_fn(q, rq); 205} 206 207#ifdef CONFIG_FAIL_IO_TIMEOUT 208int blk_should_fake_timeout(struct request_queue *); 209ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 210ssize_t part_timeout_store(struct device *, struct device_attribute *, 211 const char *, size_t); 212#else 213static inline int blk_should_fake_timeout(struct request_queue *q) 214{ 215 return 0; 216} 217#endif 218 219int ll_back_merge_fn(struct request_queue *q, struct request *req, 220 struct bio *bio); 221int ll_front_merge_fn(struct request_queue *q, struct request *req, 222 struct bio *bio); 223struct request *attempt_back_merge(struct request_queue *q, struct request *rq); 224struct request *attempt_front_merge(struct request_queue *q, struct request *rq); 225int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 226 struct request *next); 227void blk_recalc_rq_segments(struct request *rq); 228void blk_rq_set_mixed_merge(struct request *rq); 229bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 230enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); 231 232void blk_queue_congestion_threshold(struct request_queue *q); 233 234int blk_dev_init(void); 235 236 237/* 238 * Return the threshold (number of used requests) at which the queue is 239 * considered to be congested. It include a little hysteresis to keep the 240 * context switch rate down. 241 */ 242static inline int queue_congestion_on_threshold(struct request_queue *q) 243{ 244 return q->nr_congestion_on; 245} 246 247/* 248 * The threshold at which a queue is considered to be uncongested 249 */ 250static inline int queue_congestion_off_threshold(struct request_queue *q) 251{ 252 return q->nr_congestion_off; 253} 254 255extern int blk_update_nr_requests(struct request_queue *, unsigned int); 256 257/* 258 * Contribute to IO statistics IFF: 259 * 260 * a) it's attached to a gendisk, and 261 * b) the queue had IO stats enabled when this request was started, and 262 * c) it's a file system request 263 */ 264static inline int blk_do_io_stat(struct request *rq) 265{ 266 return rq->rq_disk && 267 (rq->rq_flags & RQF_IO_STAT) && 268 !blk_rq_is_passthrough(rq); 269} 270 271static inline void req_set_nomerge(struct request_queue *q, struct request *req) 272{ 273 req->cmd_flags |= REQ_NOMERGE; 274 if (req == q->last_merge) 275 q->last_merge = NULL; 276} 277 278/* 279 * Internal io_context interface 280 */ 281void get_io_context(struct io_context *ioc); 282struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 283struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 284 gfp_t gfp_mask); 285void ioc_clear_queue(struct request_queue *q); 286 287int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 288 289/** 290 * rq_ioc - determine io_context for request allocation 291 * @bio: request being allocated is for this bio (can be %NULL) 292 * 293 * Determine io_context to use for request allocation for @bio. May return 294 * %NULL if %current->io_context doesn't exist. 295 */ 296static inline struct io_context *rq_ioc(struct bio *bio) 297{ 298#ifdef CONFIG_BLK_CGROUP 299 if (bio && bio->bi_ioc) 300 return bio->bi_ioc; 301#endif 302 return current->io_context; 303} 304 305/** 306 * create_io_context - try to create task->io_context 307 * @gfp_mask: allocation mask 308 * @node: allocation node 309 * 310 * If %current->io_context is %NULL, allocate a new io_context and install 311 * it. Returns the current %current->io_context which may be %NULL if 312 * allocation failed. 313 * 314 * Note that this function can't be called with IRQ disabled because 315 * task_lock which protects %current->io_context is IRQ-unsafe. 316 */ 317static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 318{ 319 WARN_ON_ONCE(irqs_disabled()); 320 if (unlikely(!current->io_context)) 321 create_task_io_context(current, gfp_mask, node); 322 return current->io_context; 323} 324 325/* 326 * Internal throttling interface 327 */ 328#ifdef CONFIG_BLK_DEV_THROTTLING 329extern void blk_throtl_drain(struct request_queue *q); 330extern int blk_throtl_init(struct request_queue *q); 331extern void blk_throtl_exit(struct request_queue *q); 332extern void blk_throtl_register_queue(struct request_queue *q); 333#else /* CONFIG_BLK_DEV_THROTTLING */ 334static inline void blk_throtl_drain(struct request_queue *q) { } 335static inline int blk_throtl_init(struct request_queue *q) { return 0; } 336static inline void blk_throtl_exit(struct request_queue *q) { } 337static inline void blk_throtl_register_queue(struct request_queue *q) { } 338#endif /* CONFIG_BLK_DEV_THROTTLING */ 339#ifdef CONFIG_BLK_DEV_THROTTLING_LOW 340extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 341extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, 342 const char *page, size_t count); 343extern void blk_throtl_bio_endio(struct bio *bio); 344extern void blk_throtl_stat_add(struct request *rq, u64 time); 345#else 346static inline void blk_throtl_bio_endio(struct bio *bio) { } 347static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } 348#endif 349 350#ifdef CONFIG_BOUNCE 351extern int init_emergency_isa_pool(void); 352extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 353#else 354static inline int init_emergency_isa_pool(void) 355{ 356 return 0; 357} 358static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 359{ 360} 361#endif /* CONFIG_BOUNCE */ 362 363#endif /* BLK_INTERNAL_H */