at v3.13 251 lines 8.0 kB view raw
1#ifndef BLK_INTERNAL_H 2#define BLK_INTERNAL_H 3 4#include <linux/idr.h> 5 6/* Amount of time in which a process may batch requests */ 7#define BLK_BATCH_TIME (HZ/50UL) 8 9/* Number of requests a "batching" process may submit */ 10#define BLK_BATCH_REQ 32 11 12extern struct kmem_cache *blk_requestq_cachep; 13extern struct kmem_cache *request_cachep; 14extern struct kobj_type blk_queue_ktype; 15extern struct ida blk_queue_ida; 16 17static inline void __blk_get_queue(struct request_queue *q) 18{ 19 kobject_get(&q->kobj); 20} 21 22int blk_init_rl(struct request_list *rl, struct request_queue *q, 23 gfp_t gfp_mask); 24void blk_exit_rl(struct request_list *rl); 25void init_request_from_bio(struct request *req, struct bio *bio); 26void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 27 struct bio *bio); 28int blk_rq_append_bio(struct request_queue *q, struct request *rq, 29 struct bio *bio); 30void blk_queue_bypass_start(struct request_queue *q); 31void blk_queue_bypass_end(struct request_queue *q); 32void blk_dequeue_request(struct request *rq); 33void __blk_queue_free_tags(struct request_queue *q); 34bool __blk_end_bidi_request(struct request *rq, int error, 35 unsigned int nr_bytes, unsigned int bidi_bytes); 36 37void blk_rq_timed_out_timer(unsigned long data); 38void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, 39 unsigned int *next_set); 40void __blk_add_timer(struct request *req, struct list_head *timeout_list); 41void blk_delete_timer(struct request *); 42void blk_add_timer(struct request *); 43 44 45bool bio_attempt_front_merge(struct request_queue *q, struct request *req, 46 struct bio *bio); 47bool bio_attempt_back_merge(struct request_queue *q, struct request *req, 48 struct bio *bio); 49bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 50 unsigned int *request_count); 51 52void blk_account_io_start(struct request *req, bool new_io); 53void blk_account_io_completion(struct request *req, unsigned int bytes); 54void blk_account_io_done(struct request *req); 55 56/* 57 * Internal atomic flags for request handling 58 */ 59enum rq_atomic_flags { 60 REQ_ATOM_COMPLETE = 0, 61 REQ_ATOM_STARTED, 62}; 63 64/* 65 * EH timer and IO completion will both attempt to 'grab' the request, make 66 * sure that only one of them succeeds 67 */ 68static inline int blk_mark_rq_complete(struct request *rq) 69{ 70 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 71} 72 73static inline void blk_clear_rq_complete(struct request *rq) 74{ 75 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 76} 77 78/* 79 * Internal elevator interface 80 */ 81#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash) 82 83void blk_insert_flush(struct request *rq); 84void blk_abort_flushes(struct request_queue *q); 85 86static inline struct request *__elv_next_request(struct request_queue *q) 87{ 88 struct request *rq; 89 90 while (1) { 91 if (!list_empty(&q->queue_head)) { 92 rq = list_entry_rq(q->queue_head.next); 93 return rq; 94 } 95 96 /* 97 * Flush request is running and flush request isn't queueable 98 * in the drive, we can hold the queue till flush request is 99 * finished. Even we don't do this, driver can't dispatch next 100 * requests and will requeue them. And this can improve 101 * throughput too. For example, we have request flush1, write1, 102 * flush 2. flush1 is dispatched, then queue is hold, write1 103 * isn't inserted to queue. After flush1 is finished, flush2 104 * will be dispatched. Since disk cache is already clean, 105 * flush2 will be finished very soon, so looks like flush2 is 106 * folded to flush1. 107 * Since the queue is hold, a flag is set to indicate the queue 108 * should be restarted later. Please see flush_end_io() for 109 * details. 110 */ 111 if (q->flush_pending_idx != q->flush_running_idx && 112 !queue_flush_queueable(q)) { 113 q->flush_queue_delayed = 1; 114 return NULL; 115 } 116 if (unlikely(blk_queue_dying(q)) || 117 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) 118 return NULL; 119 } 120} 121 122static inline void elv_activate_rq(struct request_queue *q, struct request *rq) 123{ 124 struct elevator_queue *e = q->elevator; 125 126 if (e->type->ops.elevator_activate_req_fn) 127 e->type->ops.elevator_activate_req_fn(q, rq); 128} 129 130static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) 131{ 132 struct elevator_queue *e = q->elevator; 133 134 if (e->type->ops.elevator_deactivate_req_fn) 135 e->type->ops.elevator_deactivate_req_fn(q, rq); 136} 137 138#ifdef CONFIG_FAIL_IO_TIMEOUT 139int blk_should_fake_timeout(struct request_queue *); 140ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); 141ssize_t part_timeout_store(struct device *, struct device_attribute *, 142 const char *, size_t); 143#else 144static inline int blk_should_fake_timeout(struct request_queue *q) 145{ 146 return 0; 147} 148#endif 149 150int ll_back_merge_fn(struct request_queue *q, struct request *req, 151 struct bio *bio); 152int ll_front_merge_fn(struct request_queue *q, struct request *req, 153 struct bio *bio); 154int attempt_back_merge(struct request_queue *q, struct request *rq); 155int attempt_front_merge(struct request_queue *q, struct request *rq); 156int blk_attempt_req_merge(struct request_queue *q, struct request *rq, 157 struct request *next); 158void blk_recalc_rq_segments(struct request *rq); 159void blk_rq_set_mixed_merge(struct request *rq); 160bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 161int blk_try_merge(struct request *rq, struct bio *bio); 162 163void blk_queue_congestion_threshold(struct request_queue *q); 164 165void __blk_run_queue_uncond(struct request_queue *q); 166 167int blk_dev_init(void); 168 169 170/* 171 * Return the threshold (number of used requests) at which the queue is 172 * considered to be congested. It include a little hysteresis to keep the 173 * context switch rate down. 174 */ 175static inline int queue_congestion_on_threshold(struct request_queue *q) 176{ 177 return q->nr_congestion_on; 178} 179 180/* 181 * The threshold at which a queue is considered to be uncongested 182 */ 183static inline int queue_congestion_off_threshold(struct request_queue *q) 184{ 185 return q->nr_congestion_off; 186} 187 188/* 189 * Contribute to IO statistics IFF: 190 * 191 * a) it's attached to a gendisk, and 192 * b) the queue had IO stats enabled when this request was started, and 193 * c) it's a file system request 194 */ 195static inline int blk_do_io_stat(struct request *rq) 196{ 197 return rq->rq_disk && 198 (rq->cmd_flags & REQ_IO_STAT) && 199 (rq->cmd_type == REQ_TYPE_FS); 200} 201 202/* 203 * Internal io_context interface 204 */ 205void get_io_context(struct io_context *ioc); 206struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); 207struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, 208 gfp_t gfp_mask); 209void ioc_clear_queue(struct request_queue *q); 210 211int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 212 213/** 214 * create_io_context - try to create task->io_context 215 * @gfp_mask: allocation mask 216 * @node: allocation node 217 * 218 * If %current->io_context is %NULL, allocate a new io_context and install 219 * it. Returns the current %current->io_context which may be %NULL if 220 * allocation failed. 221 * 222 * Note that this function can't be called with IRQ disabled because 223 * task_lock which protects %current->io_context is IRQ-unsafe. 224 */ 225static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) 226{ 227 WARN_ON_ONCE(irqs_disabled()); 228 if (unlikely(!current->io_context)) 229 create_task_io_context(current, gfp_mask, node); 230 return current->io_context; 231} 232 233/* 234 * Internal throttling interface 235 */ 236#ifdef CONFIG_BLK_DEV_THROTTLING 237extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); 238extern void blk_throtl_drain(struct request_queue *q); 239extern int blk_throtl_init(struct request_queue *q); 240extern void blk_throtl_exit(struct request_queue *q); 241#else /* CONFIG_BLK_DEV_THROTTLING */ 242static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) 243{ 244 return false; 245} 246static inline void blk_throtl_drain(struct request_queue *q) { } 247static inline int blk_throtl_init(struct request_queue *q) { return 0; } 248static inline void blk_throtl_exit(struct request_queue *q) { } 249#endif /* CONFIG_BLK_DEV_THROTTLING */ 250 251#endif /* BLK_INTERNAL_H */