Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at cfd63736726a7fabb3dd89ea91cff143ac4dc8a7 99 lines 2.9 kB view raw
1#ifndef BLK_MQ_SCHED_H 2#define BLK_MQ_SCHED_H 3 4#include "blk-mq.h" 5#include "blk-mq-tag.h" 6 7void blk_mq_sched_free_hctx_data(struct request_queue *q, 8 void (*exit)(struct blk_mq_hw_ctx *)); 9 10void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio); 11 12void blk_mq_sched_request_inserted(struct request *rq); 13bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 14 struct request **merged_request); 15bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); 16bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); 17void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); 18 19void blk_mq_sched_insert_request(struct request *rq, bool at_head, 20 bool run_queue, bool async, bool can_block); 21void blk_mq_sched_insert_requests(struct request_queue *q, 22 struct blk_mq_ctx *ctx, 23 struct list_head *list, bool run_queue_async); 24 25void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 26 27int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); 28void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); 29 30int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 31 unsigned int hctx_idx); 32void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, 33 unsigned int hctx_idx); 34 35int blk_mq_sched_init(struct request_queue *q); 36 37static inline bool 38blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) 39{ 40 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) 41 return false; 42 43 return __blk_mq_sched_bio_merge(q, bio); 44} 45 46static inline bool 47blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, 48 struct bio *bio) 49{ 50 struct elevator_queue *e = q->elevator; 51 52 if (e && e->type->ops.mq.allow_merge) 53 return e->type->ops.mq.allow_merge(q, rq, bio); 54 55 return true; 56} 57 58static inline void blk_mq_sched_completed_request(struct request *rq) 59{ 60 struct elevator_queue *e = rq->q->elevator; 61 62 if (e && e->type->ops.mq.completed_request) 63 e->type->ops.mq.completed_request(rq); 64} 65 66static inline void blk_mq_sched_started_request(struct request *rq) 67{ 68 struct request_queue *q = rq->q; 69 struct elevator_queue *e = q->elevator; 70 71 if (e && e->type->ops.mq.started_request) 72 e->type->ops.mq.started_request(rq); 73} 74 75static inline void blk_mq_sched_requeue_request(struct request *rq) 76{ 77 struct request_queue *q = rq->q; 78 struct elevator_queue *e = q->elevator; 79 80 if (e && e->type->ops.mq.requeue_request) 81 e->type->ops.mq.requeue_request(rq); 82} 83 84static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) 85{ 86 struct elevator_queue *e = hctx->queue->elevator; 87 88 if (e && e->type->ops.mq.has_work) 89 return e->type->ops.mq.has_work(hctx); 90 91 return false; 92} 93 94static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 95{ 96 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 97} 98 99#endif