Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes and updates from Jens Axboe:
"Some fixes and followup features/changes that should go in, in this
merge window. This contains:

- Two fixes for lightnvm from Javier, fixing problems in the new code
merge previously in this merge window.

- A fix from Jan for the backing device changes, fixing an issue in
NFS that causes a failure to mount on certain setups.

- A change from Christoph, cleaning up the blk-mq init and exit
request paths.

- Remove elevator_change(), which is now unused. From Bart.

- A fix for queue operation invocation on a dead queue, from Bart.

- A series fixing up mtip32xx for blk-mq scheduling, removing a
bandaid we previously had in place for this. From me.

- A regression fix for this series, fixing a case where we wait on
workqueue flushing from an invalid (non-blocking) context. From me.

- A fix/optimization from Ming, ensuring that we don't both quiesce
and freeze a queue at the same time.

- A fix from Peter on lock ordering for CPU hotplug. Not a real
problem right now, but will be once the CPU hotplug rework goes in.

- A series from Omar, cleaning up out blk-mq debugfs support, and
adding support for exporting info from schedulers in debugfs as
well. This is really useful in debugging stalls or livelocks. From
Omar"

* 'for-linus' of git://git.kernel.dk/linux-block: (28 commits)
mq-deadline: add debugfs attributes
kyber: add debugfs attributes
blk-mq-debugfs: allow schedulers to register debugfs attributes
blk-mq: untangle debugfs and sysfs
blk-mq: move debugfs declarations to a separate header file
blk-mq: Do not invoke queue operations on a dead queue
blk-mq-debugfs: get rid of a bunch of boilerplate
blk-mq-debugfs: rename hw queue directories from <n> to hctx<n>
blk-mq-debugfs: don't open code strstrip()
blk-mq-debugfs: error on long write to queue "state" file
blk-mq-debugfs: clean up flag definitions
blk-mq-debugfs: separate flags with |
nfs: Fix bdi handling for cloned superblocks
block/mq: Cure cpu hotplug lock inversion
lightnvm: fix bad back free on error path
lightnvm: create cmd before allocating request
blk-mq: don't use sync workqueue flushing from drivers
mtip32xx: convert internal commands to regular block infrastructure
mtip32xx: cleanup internal tag assumptions
block: don't call blk_mq_quiesce_queue() after queue is frozen
...

+1038 -1014
+2 -6
block/blk-core.c
··· 561 561 * prevent that q->request_fn() gets invoked after draining finished. 562 562 */ 563 563 blk_freeze_queue(q); 564 - if (!q->mq_ops) { 565 - spin_lock_irq(lock); 564 + spin_lock_irq(lock); 565 + if (!q->mq_ops) 566 566 __blk_drain_queue(q, true); 567 - } else { 568 - blk_mq_debugfs_unregister_mq(q); 569 - spin_lock_irq(lock); 570 - } 571 567 queue_flag_set(QUEUE_FLAG_DEAD, q); 572 568 spin_unlock_irq(lock); 573 569
+395 -483
block/blk-mq-debugfs.c
··· 21 21 #include <linux/blk-mq.h> 22 22 #include "blk.h" 23 23 #include "blk-mq.h" 24 + #include "blk-mq-debugfs.h" 24 25 #include "blk-mq-tag.h" 25 - 26 - struct blk_mq_debugfs_attr { 27 - const char *name; 28 - umode_t mode; 29 - const struct file_operations *fops; 30 - }; 31 - 32 - static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file, 33 - const struct seq_operations *ops) 34 - { 35 - struct seq_file *m; 36 - int ret; 37 - 38 - ret = seq_open(file, ops); 39 - if (!ret) { 40 - m = file->private_data; 41 - m->private = inode->i_private; 42 - } 43 - return ret; 44 - } 45 26 46 27 static int blk_flags_show(struct seq_file *m, const unsigned long flags, 47 28 const char *const *flag_name, int flag_name_count) ··· 34 53 if (!(flags & BIT(i))) 35 54 continue; 36 55 if (sep) 37 - seq_puts(m, " "); 56 + seq_puts(m, "|"); 38 57 sep = true; 39 58 if (i < flag_name_count && flag_name[i]) 40 59 seq_puts(m, flag_name[i]); ··· 44 63 return 0; 45 64 } 46 65 66 + #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 47 67 static const char *const blk_queue_flag_name[] = { 48 - [QUEUE_FLAG_QUEUED] = "QUEUED", 49 - [QUEUE_FLAG_STOPPED] = "STOPPED", 50 - [QUEUE_FLAG_SYNCFULL] = "SYNCFULL", 51 - [QUEUE_FLAG_ASYNCFULL] = "ASYNCFULL", 52 - [QUEUE_FLAG_DYING] = "DYING", 53 - [QUEUE_FLAG_BYPASS] = "BYPASS", 54 - [QUEUE_FLAG_BIDI] = "BIDI", 55 - [QUEUE_FLAG_NOMERGES] = "NOMERGES", 56 - [QUEUE_FLAG_SAME_COMP] = "SAME_COMP", 57 - [QUEUE_FLAG_FAIL_IO] = "FAIL_IO", 58 - [QUEUE_FLAG_STACKABLE] = "STACKABLE", 59 - [QUEUE_FLAG_NONROT] = "NONROT", 60 - [QUEUE_FLAG_IO_STAT] = "IO_STAT", 61 - [QUEUE_FLAG_DISCARD] = "DISCARD", 62 - [QUEUE_FLAG_NOXMERGES] = "NOXMERGES", 63 - [QUEUE_FLAG_ADD_RANDOM] = "ADD_RANDOM", 64 - [QUEUE_FLAG_SECERASE] = "SECERASE", 65 - [QUEUE_FLAG_SAME_FORCE] = "SAME_FORCE", 66 - [QUEUE_FLAG_DEAD] = "DEAD", 67 - [QUEUE_FLAG_INIT_DONE] = "INIT_DONE", 68 - [QUEUE_FLAG_NO_SG_MERGE] = "NO_SG_MERGE", 69 - [QUEUE_FLAG_POLL] = "POLL", 70 - [QUEUE_FLAG_WC] = "WC", 71 - [QUEUE_FLAG_FUA] = "FUA", 72 - [QUEUE_FLAG_FLUSH_NQ] = "FLUSH_NQ", 73 - [QUEUE_FLAG_DAX] = "DAX", 74 - [QUEUE_FLAG_STATS] = "STATS", 75 - [QUEUE_FLAG_POLL_STATS] = "POLL_STATS", 76 - [QUEUE_FLAG_REGISTERED] = "REGISTERED", 68 + QUEUE_FLAG_NAME(QUEUED), 69 + QUEUE_FLAG_NAME(STOPPED), 70 + QUEUE_FLAG_NAME(SYNCFULL), 71 + QUEUE_FLAG_NAME(ASYNCFULL), 72 + QUEUE_FLAG_NAME(DYING), 73 + QUEUE_FLAG_NAME(BYPASS), 74 + QUEUE_FLAG_NAME(BIDI), 75 + QUEUE_FLAG_NAME(NOMERGES), 76 + QUEUE_FLAG_NAME(SAME_COMP), 77 + QUEUE_FLAG_NAME(FAIL_IO), 78 + QUEUE_FLAG_NAME(STACKABLE), 79 + QUEUE_FLAG_NAME(NONROT), 80 + QUEUE_FLAG_NAME(IO_STAT), 81 + QUEUE_FLAG_NAME(DISCARD), 82 + QUEUE_FLAG_NAME(NOXMERGES), 83 + QUEUE_FLAG_NAME(ADD_RANDOM), 84 + QUEUE_FLAG_NAME(SECERASE), 85 + QUEUE_FLAG_NAME(SAME_FORCE), 86 + QUEUE_FLAG_NAME(DEAD), 87 + QUEUE_FLAG_NAME(INIT_DONE), 88 + QUEUE_FLAG_NAME(NO_SG_MERGE), 89 + QUEUE_FLAG_NAME(POLL), 90 + QUEUE_FLAG_NAME(WC), 91 + QUEUE_FLAG_NAME(FUA), 92 + QUEUE_FLAG_NAME(FLUSH_NQ), 93 + QUEUE_FLAG_NAME(DAX), 94 + QUEUE_FLAG_NAME(STATS), 95 + QUEUE_FLAG_NAME(POLL_STATS), 96 + QUEUE_FLAG_NAME(REGISTERED), 77 97 }; 98 + #undef QUEUE_FLAG_NAME 78 99 79 - static int blk_queue_flags_show(struct seq_file *m, void *v) 100 + static int queue_state_show(void *data, struct seq_file *m) 80 101 { 81 - struct request_queue *q = m->private; 102 + struct request_queue *q = data; 82 103 83 104 blk_flags_show(m, q->queue_flags, blk_queue_flag_name, 84 105 ARRAY_SIZE(blk_queue_flag_name)); ··· 88 105 return 0; 89 106 } 90 107 91 - static ssize_t blk_queue_flags_store(struct file *file, const char __user *ubuf, 92 - size_t len, loff_t *offp) 108 + static ssize_t queue_state_write(void *data, const char __user *buf, 109 + size_t count, loff_t *ppos) 93 110 { 94 - struct request_queue *q = file_inode(file)->i_private; 95 - char op[16] = { }, *s; 111 + struct request_queue *q = data; 112 + char opbuf[16] = { }, *op; 96 113 97 - len = min(len, sizeof(op) - 1); 98 - if (copy_from_user(op, ubuf, len)) 114 + /* 115 + * The "state" attribute is removed after blk_cleanup_queue() has called 116 + * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid 117 + * triggering a use-after-free. 118 + */ 119 + if (blk_queue_dead(q)) 120 + return -ENOENT; 121 + 122 + if (count >= sizeof(opbuf)) { 123 + pr_err("%s: operation too long\n", __func__); 124 + goto inval; 125 + } 126 + 127 + if (copy_from_user(opbuf, buf, count)) 99 128 return -EFAULT; 100 - s = op; 101 - strsep(&s, " \t\n"); /* strip trailing whitespace */ 129 + op = strstrip(opbuf); 102 130 if (strcmp(op, "run") == 0) { 103 131 blk_mq_run_hw_queues(q, true); 104 132 } else if (strcmp(op, "start") == 0) { 105 133 blk_mq_start_stopped_hw_queues(q, true); 106 134 } else { 107 - pr_err("%s: unsupported operation %s. Use either 'run' or 'start'\n", 108 - __func__, op); 135 + pr_err("%s: unsupported operation '%s'\n", __func__, op); 136 + inval: 137 + pr_err("%s: use either 'run' or 'start'\n", __func__); 109 138 return -EINVAL; 110 139 } 111 - return len; 140 + return count; 112 141 } 113 - 114 - static int blk_queue_flags_open(struct inode *inode, struct file *file) 115 - { 116 - return single_open(file, blk_queue_flags_show, inode->i_private); 117 - } 118 - 119 - static const struct file_operations blk_queue_flags_fops = { 120 - .open = blk_queue_flags_open, 121 - .read = seq_read, 122 - .llseek = seq_lseek, 123 - .release = single_release, 124 - .write = blk_queue_flags_store, 125 - }; 126 142 127 143 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) 128 144 { ··· 133 151 } 134 152 } 135 153 136 - static int queue_poll_stat_show(struct seq_file *m, void *v) 154 + static int queue_poll_stat_show(void *data, struct seq_file *m) 137 155 { 138 - struct request_queue *q = m->private; 156 + struct request_queue *q = data; 139 157 int bucket; 140 158 141 159 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) { ··· 150 168 return 0; 151 169 } 152 170 153 - static int queue_poll_stat_open(struct inode *inode, struct file *file) 154 - { 155 - return single_open(file, queue_poll_stat_show, inode->i_private); 156 - } 157 - 158 - static const struct file_operations queue_poll_stat_fops = { 159 - .open = queue_poll_stat_open, 160 - .read = seq_read, 161 - .llseek = seq_lseek, 162 - .release = single_release, 163 - }; 164 - 171 + #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name 165 172 static const char *const hctx_state_name[] = { 166 - [BLK_MQ_S_STOPPED] = "STOPPED", 167 - [BLK_MQ_S_TAG_ACTIVE] = "TAG_ACTIVE", 168 - [BLK_MQ_S_SCHED_RESTART] = "SCHED_RESTART", 169 - [BLK_MQ_S_TAG_WAITING] = "TAG_WAITING", 170 - 173 + HCTX_STATE_NAME(STOPPED), 174 + HCTX_STATE_NAME(TAG_ACTIVE), 175 + HCTX_STATE_NAME(SCHED_RESTART), 176 + HCTX_STATE_NAME(TAG_WAITING), 177 + HCTX_STATE_NAME(START_ON_RUN), 171 178 }; 172 - static int hctx_state_show(struct seq_file *m, void *v) 179 + #undef HCTX_STATE_NAME 180 + 181 + static int hctx_state_show(void *data, struct seq_file *m) 173 182 { 174 - struct blk_mq_hw_ctx *hctx = m->private; 183 + struct blk_mq_hw_ctx *hctx = data; 175 184 176 185 blk_flags_show(m, hctx->state, hctx_state_name, 177 186 ARRAY_SIZE(hctx_state_name)); ··· 170 197 return 0; 171 198 } 172 199 173 - static int hctx_state_open(struct inode *inode, struct file *file) 174 - { 175 - return single_open(file, hctx_state_show, inode->i_private); 176 - } 177 - 178 - static const struct file_operations hctx_state_fops = { 179 - .open = hctx_state_open, 180 - .read = seq_read, 181 - .llseek = seq_lseek, 182 - .release = single_release, 183 - }; 184 - 200 + #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name 185 201 static const char *const alloc_policy_name[] = { 186 - [BLK_TAG_ALLOC_FIFO] = "fifo", 187 - [BLK_TAG_ALLOC_RR] = "rr", 202 + BLK_TAG_ALLOC_NAME(FIFO), 203 + BLK_TAG_ALLOC_NAME(RR), 188 204 }; 205 + #undef BLK_TAG_ALLOC_NAME 189 206 207 + #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name 190 208 static const char *const hctx_flag_name[] = { 191 - [ilog2(BLK_MQ_F_SHOULD_MERGE)] = "SHOULD_MERGE", 192 - [ilog2(BLK_MQ_F_TAG_SHARED)] = "TAG_SHARED", 193 - [ilog2(BLK_MQ_F_SG_MERGE)] = "SG_MERGE", 194 - [ilog2(BLK_MQ_F_BLOCKING)] = "BLOCKING", 195 - [ilog2(BLK_MQ_F_NO_SCHED)] = "NO_SCHED", 209 + HCTX_FLAG_NAME(SHOULD_MERGE), 210 + HCTX_FLAG_NAME(TAG_SHARED), 211 + HCTX_FLAG_NAME(SG_MERGE), 212 + HCTX_FLAG_NAME(BLOCKING), 213 + HCTX_FLAG_NAME(NO_SCHED), 196 214 }; 215 + #undef HCTX_FLAG_NAME 197 216 198 - static int hctx_flags_show(struct seq_file *m, void *v) 217 + static int hctx_flags_show(void *data, struct seq_file *m) 199 218 { 200 - struct blk_mq_hw_ctx *hctx = m->private; 219 + struct blk_mq_hw_ctx *hctx = data; 201 220 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); 202 221 203 222 seq_puts(m, "alloc_policy="); ··· 206 241 return 0; 207 242 } 208 243 209 - static int hctx_flags_open(struct inode *inode, struct file *file) 210 - { 211 - return single_open(file, hctx_flags_show, inode->i_private); 212 - } 213 - 214 - static const struct file_operations hctx_flags_fops = { 215 - .open = hctx_flags_open, 216 - .read = seq_read, 217 - .llseek = seq_lseek, 218 - .release = single_release, 219 - }; 220 - 244 + #define REQ_OP_NAME(name) [REQ_OP_##name] = #name 221 245 static const char *const op_name[] = { 222 - [REQ_OP_READ] = "READ", 223 - [REQ_OP_WRITE] = "WRITE", 224 - [REQ_OP_FLUSH] = "FLUSH", 225 - [REQ_OP_DISCARD] = "DISCARD", 226 - [REQ_OP_ZONE_REPORT] = "ZONE_REPORT", 227 - [REQ_OP_SECURE_ERASE] = "SECURE_ERASE", 228 - [REQ_OP_ZONE_RESET] = "ZONE_RESET", 229 - [REQ_OP_WRITE_SAME] = "WRITE_SAME", 230 - [REQ_OP_WRITE_ZEROES] = "WRITE_ZEROES", 231 - [REQ_OP_SCSI_IN] = "SCSI_IN", 232 - [REQ_OP_SCSI_OUT] = "SCSI_OUT", 233 - [REQ_OP_DRV_IN] = "DRV_IN", 234 - [REQ_OP_DRV_OUT] = "DRV_OUT", 246 + REQ_OP_NAME(READ), 247 + REQ_OP_NAME(WRITE), 248 + REQ_OP_NAME(FLUSH), 249 + REQ_OP_NAME(DISCARD), 250 + REQ_OP_NAME(ZONE_REPORT), 251 + REQ_OP_NAME(SECURE_ERASE), 252 + REQ_OP_NAME(ZONE_RESET), 253 + REQ_OP_NAME(WRITE_SAME), 254 + REQ_OP_NAME(WRITE_ZEROES), 255 + REQ_OP_NAME(SCSI_IN), 256 + REQ_OP_NAME(SCSI_OUT), 257 + REQ_OP_NAME(DRV_IN), 258 + REQ_OP_NAME(DRV_OUT), 235 259 }; 260 + #undef REQ_OP_NAME 236 261 262 + #define CMD_FLAG_NAME(name) [__REQ_##name] = #name 237 263 static const char *const cmd_flag_name[] = { 238 - [__REQ_FAILFAST_DEV] = "FAILFAST_DEV", 239 - [__REQ_FAILFAST_TRANSPORT] = "FAILFAST_TRANSPORT", 240 - [__REQ_FAILFAST_DRIVER] = "FAILFAST_DRIVER", 241 - [__REQ_SYNC] = "SYNC", 242 - [__REQ_META] = "META", 243 - [__REQ_PRIO] = "PRIO", 244 - [__REQ_NOMERGE] = "NOMERGE", 245 - [__REQ_IDLE] = "IDLE", 246 - [__REQ_INTEGRITY] = "INTEGRITY", 247 - [__REQ_FUA] = "FUA", 248 - [__REQ_PREFLUSH] = "PREFLUSH", 249 - [__REQ_RAHEAD] = "RAHEAD", 250 - [__REQ_BACKGROUND] = "BACKGROUND", 251 - [__REQ_NR_BITS] = "NR_BITS", 264 + CMD_FLAG_NAME(FAILFAST_DEV), 265 + CMD_FLAG_NAME(FAILFAST_TRANSPORT), 266 + CMD_FLAG_NAME(FAILFAST_DRIVER), 267 + CMD_FLAG_NAME(SYNC), 268 + CMD_FLAG_NAME(META), 269 + CMD_FLAG_NAME(PRIO), 270 + CMD_FLAG_NAME(NOMERGE), 271 + CMD_FLAG_NAME(IDLE), 272 + CMD_FLAG_NAME(INTEGRITY), 273 + CMD_FLAG_NAME(FUA), 274 + CMD_FLAG_NAME(PREFLUSH), 275 + CMD_FLAG_NAME(RAHEAD), 276 + CMD_FLAG_NAME(BACKGROUND), 277 + CMD_FLAG_NAME(NOUNMAP), 252 278 }; 279 + #undef CMD_FLAG_NAME 253 280 281 + #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name 254 282 static const char *const rqf_name[] = { 255 - [ilog2((__force u32)RQF_SORTED)] = "SORTED", 256 - [ilog2((__force u32)RQF_STARTED)] = "STARTED", 257 - [ilog2((__force u32)RQF_QUEUED)] = "QUEUED", 258 - [ilog2((__force u32)RQF_SOFTBARRIER)] = "SOFTBARRIER", 259 - [ilog2((__force u32)RQF_FLUSH_SEQ)] = "FLUSH_SEQ", 260 - [ilog2((__force u32)RQF_MIXED_MERGE)] = "MIXED_MERGE", 261 - [ilog2((__force u32)RQF_MQ_INFLIGHT)] = "MQ_INFLIGHT", 262 - [ilog2((__force u32)RQF_DONTPREP)] = "DONTPREP", 263 - [ilog2((__force u32)RQF_PREEMPT)] = "PREEMPT", 264 - [ilog2((__force u32)RQF_COPY_USER)] = "COPY_USER", 265 - [ilog2((__force u32)RQF_FAILED)] = "FAILED", 266 - [ilog2((__force u32)RQF_QUIET)] = "QUIET", 267 - [ilog2((__force u32)RQF_ELVPRIV)] = "ELVPRIV", 268 - [ilog2((__force u32)RQF_IO_STAT)] = "IO_STAT", 269 - [ilog2((__force u32)RQF_ALLOCED)] = "ALLOCED", 270 - [ilog2((__force u32)RQF_PM)] = "PM", 271 - [ilog2((__force u32)RQF_HASHED)] = "HASHED", 272 - [ilog2((__force u32)RQF_STATS)] = "STATS", 273 - [ilog2((__force u32)RQF_SPECIAL_PAYLOAD)] = "SPECIAL_PAYLOAD", 283 + RQF_NAME(SORTED), 284 + RQF_NAME(STARTED), 285 + RQF_NAME(QUEUED), 286 + RQF_NAME(SOFTBARRIER), 287 + RQF_NAME(FLUSH_SEQ), 288 + RQF_NAME(MIXED_MERGE), 289 + RQF_NAME(MQ_INFLIGHT), 290 + RQF_NAME(DONTPREP), 291 + RQF_NAME(PREEMPT), 292 + RQF_NAME(COPY_USER), 293 + RQF_NAME(FAILED), 294 + RQF_NAME(QUIET), 295 + RQF_NAME(ELVPRIV), 296 + RQF_NAME(IO_STAT), 297 + RQF_NAME(ALLOCED), 298 + RQF_NAME(PM), 299 + RQF_NAME(HASHED), 300 + RQF_NAME(STATS), 301 + RQF_NAME(SPECIAL_PAYLOAD), 274 302 }; 303 + #undef RQF_NAME 275 304 276 - static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 305 + int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) 277 306 { 278 - struct request *rq = list_entry_rq(v); 279 307 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; 280 308 const unsigned int op = rq->cmd_flags & REQ_OP_MASK; 281 309 ··· 290 332 seq_puts(m, "}\n"); 291 333 return 0; 292 334 } 335 + EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); 336 + 337 + int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) 338 + { 339 + return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); 340 + } 341 + EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); 293 342 294 343 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) 295 344 __acquires(&hctx->lock) ··· 329 364 .show = blk_mq_debugfs_rq_show, 330 365 }; 331 366 332 - static int hctx_dispatch_open(struct inode *inode, struct file *file) 367 + static int hctx_ctx_map_show(void *data, struct seq_file *m) 333 368 { 334 - return blk_mq_debugfs_seq_open(inode, file, &hctx_dispatch_seq_ops); 335 - } 336 - 337 - static const struct file_operations hctx_dispatch_fops = { 338 - .open = hctx_dispatch_open, 339 - .read = seq_read, 340 - .llseek = seq_lseek, 341 - .release = seq_release, 342 - }; 343 - 344 - static int hctx_ctx_map_show(struct seq_file *m, void *v) 345 - { 346 - struct blk_mq_hw_ctx *hctx = m->private; 369 + struct blk_mq_hw_ctx *hctx = data; 347 370 348 371 sbitmap_bitmap_show(&hctx->ctx_map, m); 349 372 return 0; 350 373 } 351 - 352 - static int hctx_ctx_map_open(struct inode *inode, struct file *file) 353 - { 354 - return single_open(file, hctx_ctx_map_show, inode->i_private); 355 - } 356 - 357 - static const struct file_operations hctx_ctx_map_fops = { 358 - .open = hctx_ctx_map_open, 359 - .read = seq_read, 360 - .llseek = seq_lseek, 361 - .release = single_release, 362 - }; 363 374 364 375 static void blk_mq_debugfs_tags_show(struct seq_file *m, 365 376 struct blk_mq_tags *tags) ··· 354 413 } 355 414 } 356 415 357 - static int hctx_tags_show(struct seq_file *m, void *v) 416 + static int hctx_tags_show(void *data, struct seq_file *m) 358 417 { 359 - struct blk_mq_hw_ctx *hctx = m->private; 418 + struct blk_mq_hw_ctx *hctx = data; 360 419 struct request_queue *q = hctx->queue; 361 420 int res; 362 421 ··· 371 430 return res; 372 431 } 373 432 374 - static int hctx_tags_open(struct inode *inode, struct file *file) 433 + static int hctx_tags_bitmap_show(void *data, struct seq_file *m) 375 434 { 376 - return single_open(file, hctx_tags_show, inode->i_private); 377 - } 378 - 379 - static const struct file_operations hctx_tags_fops = { 380 - .open = hctx_tags_open, 381 - .read = seq_read, 382 - .llseek = seq_lseek, 383 - .release = single_release, 384 - }; 385 - 386 - static int hctx_tags_bitmap_show(struct seq_file *m, void *v) 387 - { 388 - struct blk_mq_hw_ctx *hctx = m->private; 435 + struct blk_mq_hw_ctx *hctx = data; 389 436 struct request_queue *q = hctx->queue; 390 437 int res; 391 438 ··· 388 459 return res; 389 460 } 390 461 391 - static int hctx_tags_bitmap_open(struct inode *inode, struct file *file) 462 + static int hctx_sched_tags_show(void *data, struct seq_file *m) 392 463 { 393 - return single_open(file, hctx_tags_bitmap_show, inode->i_private); 394 - } 395 - 396 - static const struct file_operations hctx_tags_bitmap_fops = { 397 - .open = hctx_tags_bitmap_open, 398 - .read = seq_read, 399 - .llseek = seq_lseek, 400 - .release = single_release, 401 - }; 402 - 403 - static int hctx_sched_tags_show(struct seq_file *m, void *v) 404 - { 405 - struct blk_mq_hw_ctx *hctx = m->private; 464 + struct blk_mq_hw_ctx *hctx = data; 406 465 struct request_queue *q = hctx->queue; 407 466 int res; 408 467 ··· 405 488 return res; 406 489 } 407 490 408 - static int hctx_sched_tags_open(struct inode *inode, struct file *file) 491 + static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) 409 492 { 410 - return single_open(file, hctx_sched_tags_show, inode->i_private); 411 - } 412 - 413 - static const struct file_operations hctx_sched_tags_fops = { 414 - .open = hctx_sched_tags_open, 415 - .read = seq_read, 416 - .llseek = seq_lseek, 417 - .release = single_release, 418 - }; 419 - 420 - static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v) 421 - { 422 - struct blk_mq_hw_ctx *hctx = m->private; 493 + struct blk_mq_hw_ctx *hctx = data; 423 494 struct request_queue *q = hctx->queue; 424 495 int res; 425 496 ··· 422 517 return res; 423 518 } 424 519 425 - static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file) 520 + static int hctx_io_poll_show(void *data, struct seq_file *m) 426 521 { 427 - return single_open(file, hctx_sched_tags_bitmap_show, inode->i_private); 428 - } 429 - 430 - static const struct file_operations hctx_sched_tags_bitmap_fops = { 431 - .open = hctx_sched_tags_bitmap_open, 432 - .read = seq_read, 433 - .llseek = seq_lseek, 434 - .release = single_release, 435 - }; 436 - 437 - static int hctx_io_poll_show(struct seq_file *m, void *v) 438 - { 439 - struct blk_mq_hw_ctx *hctx = m->private; 522 + struct blk_mq_hw_ctx *hctx = data; 440 523 441 524 seq_printf(m, "considered=%lu\n", hctx->poll_considered); 442 525 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); ··· 432 539 return 0; 433 540 } 434 541 435 - static int hctx_io_poll_open(struct inode *inode, struct file *file) 436 - { 437 - return single_open(file, hctx_io_poll_show, inode->i_private); 438 - } 439 - 440 - static ssize_t hctx_io_poll_write(struct file *file, const char __user *buf, 542 + static ssize_t hctx_io_poll_write(void *data, const char __user *buf, 441 543 size_t count, loff_t *ppos) 442 544 { 443 - struct seq_file *m = file->private_data; 444 - struct blk_mq_hw_ctx *hctx = m->private; 545 + struct blk_mq_hw_ctx *hctx = data; 445 546 446 547 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; 447 548 return count; 448 549 } 449 550 450 - static const struct file_operations hctx_io_poll_fops = { 451 - .open = hctx_io_poll_open, 452 - .read = seq_read, 453 - .write = hctx_io_poll_write, 454 - .llseek = seq_lseek, 455 - .release = single_release, 456 - }; 457 - 458 - static int hctx_dispatched_show(struct seq_file *m, void *v) 551 + static int hctx_dispatched_show(void *data, struct seq_file *m) 459 552 { 460 - struct blk_mq_hw_ctx *hctx = m->private; 553 + struct blk_mq_hw_ctx *hctx = data; 461 554 int i; 462 555 463 556 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); ··· 458 579 return 0; 459 580 } 460 581 461 - static int hctx_dispatched_open(struct inode *inode, struct file *file) 462 - { 463 - return single_open(file, hctx_dispatched_show, inode->i_private); 464 - } 465 - 466 - static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf, 582 + static ssize_t hctx_dispatched_write(void *data, const char __user *buf, 467 583 size_t count, loff_t *ppos) 468 584 { 469 - struct seq_file *m = file->private_data; 470 - struct blk_mq_hw_ctx *hctx = m->private; 585 + struct blk_mq_hw_ctx *hctx = data; 471 586 int i; 472 587 473 588 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) ··· 469 596 return count; 470 597 } 471 598 472 - static const struct file_operations hctx_dispatched_fops = { 473 - .open = hctx_dispatched_open, 474 - .read = seq_read, 475 - .write = hctx_dispatched_write, 476 - .llseek = seq_lseek, 477 - .release = single_release, 478 - }; 479 - 480 - static int hctx_queued_show(struct seq_file *m, void *v) 599 + static int hctx_queued_show(void *data, struct seq_file *m) 481 600 { 482 - struct blk_mq_hw_ctx *hctx = m->private; 601 + struct blk_mq_hw_ctx *hctx = data; 483 602 484 603 seq_printf(m, "%lu\n", hctx->queued); 485 604 return 0; 486 605 } 487 606 488 - static int hctx_queued_open(struct inode *inode, struct file *file) 489 - { 490 - return single_open(file, hctx_queued_show, inode->i_private); 491 - } 492 - 493 - static ssize_t hctx_queued_write(struct file *file, const char __user *buf, 607 + static ssize_t hctx_queued_write(void *data, const char __user *buf, 494 608 size_t count, loff_t *ppos) 495 609 { 496 - struct seq_file *m = file->private_data; 497 - struct blk_mq_hw_ctx *hctx = m->private; 610 + struct blk_mq_hw_ctx *hctx = data; 498 611 499 612 hctx->queued = 0; 500 613 return count; 501 614 } 502 615 503 - static const struct file_operations hctx_queued_fops = { 504 - .open = hctx_queued_open, 505 - .read = seq_read, 506 - .write = hctx_queued_write, 507 - .llseek = seq_lseek, 508 - .release = single_release, 509 - }; 510 - 511 - static int hctx_run_show(struct seq_file *m, void *v) 616 + static int hctx_run_show(void *data, struct seq_file *m) 512 617 { 513 - struct blk_mq_hw_ctx *hctx = m->private; 618 + struct blk_mq_hw_ctx *hctx = data; 514 619 515 620 seq_printf(m, "%lu\n", hctx->run); 516 621 return 0; 517 622 } 518 623 519 - static int hctx_run_open(struct inode *inode, struct file *file) 624 + static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, 625 + loff_t *ppos) 520 626 { 521 - return single_open(file, hctx_run_show, inode->i_private); 522 - } 523 - 524 - static ssize_t hctx_run_write(struct file *file, const char __user *buf, 525 - size_t count, loff_t *ppos) 526 - { 527 - struct seq_file *m = file->private_data; 528 - struct blk_mq_hw_ctx *hctx = m->private; 627 + struct blk_mq_hw_ctx *hctx = data; 529 628 530 629 hctx->run = 0; 531 630 return count; 532 631 } 533 632 534 - static const struct file_operations hctx_run_fops = { 535 - .open = hctx_run_open, 536 - .read = seq_read, 537 - .write = hctx_run_write, 538 - .llseek = seq_lseek, 539 - .release = single_release, 540 - }; 541 - 542 - static int hctx_active_show(struct seq_file *m, void *v) 633 + static int hctx_active_show(void *data, struct seq_file *m) 543 634 { 544 - struct blk_mq_hw_ctx *hctx = m->private; 635 + struct blk_mq_hw_ctx *hctx = data; 545 636 546 637 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); 547 638 return 0; 548 639 } 549 - 550 - static int hctx_active_open(struct inode *inode, struct file *file) 551 - { 552 - return single_open(file, hctx_active_show, inode->i_private); 553 - } 554 - 555 - static const struct file_operations hctx_active_fops = { 556 - .open = hctx_active_open, 557 - .read = seq_read, 558 - .llseek = seq_lseek, 559 - .release = single_release, 560 - }; 561 640 562 641 static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos) 563 642 __acquires(&ctx->lock) ··· 541 716 .stop = ctx_rq_list_stop, 542 717 .show = blk_mq_debugfs_rq_show, 543 718 }; 544 - 545 - static int ctx_rq_list_open(struct inode *inode, struct file *file) 719 + static int ctx_dispatched_show(void *data, struct seq_file *m) 546 720 { 547 - return blk_mq_debugfs_seq_open(inode, file, &ctx_rq_list_seq_ops); 548 - } 549 - 550 - static const struct file_operations ctx_rq_list_fops = { 551 - .open = ctx_rq_list_open, 552 - .read = seq_read, 553 - .llseek = seq_lseek, 554 - .release = seq_release, 555 - }; 556 - 557 - static int ctx_dispatched_show(struct seq_file *m, void *v) 558 - { 559 - struct blk_mq_ctx *ctx = m->private; 721 + struct blk_mq_ctx *ctx = data; 560 722 561 723 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); 562 724 return 0; 563 725 } 564 726 565 - static int ctx_dispatched_open(struct inode *inode, struct file *file) 566 - { 567 - return single_open(file, ctx_dispatched_show, inode->i_private); 568 - } 569 - 570 - static ssize_t ctx_dispatched_write(struct file *file, const char __user *buf, 727 + static ssize_t ctx_dispatched_write(void *data, const char __user *buf, 571 728 size_t count, loff_t *ppos) 572 729 { 573 - struct seq_file *m = file->private_data; 574 - struct blk_mq_ctx *ctx = m->private; 730 + struct blk_mq_ctx *ctx = data; 575 731 576 732 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0; 577 733 return count; 578 734 } 579 735 580 - static const struct file_operations ctx_dispatched_fops = { 581 - .open = ctx_dispatched_open, 582 - .read = seq_read, 583 - .write = ctx_dispatched_write, 584 - .llseek = seq_lseek, 585 - .release = single_release, 586 - }; 587 - 588 - static int ctx_merged_show(struct seq_file *m, void *v) 736 + static int ctx_merged_show(void *data, struct seq_file *m) 589 737 { 590 - struct blk_mq_ctx *ctx = m->private; 738 + struct blk_mq_ctx *ctx = data; 591 739 592 740 seq_printf(m, "%lu\n", ctx->rq_merged); 593 741 return 0; 594 742 } 595 743 596 - static int ctx_merged_open(struct inode *inode, struct file *file) 744 + static ssize_t ctx_merged_write(void *data, const char __user *buf, 745 + size_t count, loff_t *ppos) 597 746 { 598 - return single_open(file, ctx_merged_show, inode->i_private); 599 - } 600 - 601 - static ssize_t ctx_merged_write(struct file *file, const char __user *buf, 602 - size_t count, loff_t *ppos) 603 - { 604 - struct seq_file *m = file->private_data; 605 - struct blk_mq_ctx *ctx = m->private; 747 + struct blk_mq_ctx *ctx = data; 606 748 607 749 ctx->rq_merged = 0; 608 750 return count; 609 751 } 610 752 611 - static const struct file_operations ctx_merged_fops = { 612 - .open = ctx_merged_open, 613 - .read = seq_read, 614 - .write = ctx_merged_write, 615 - .llseek = seq_lseek, 616 - .release = single_release, 617 - }; 618 - 619 - static int ctx_completed_show(struct seq_file *m, void *v) 753 + static int ctx_completed_show(void *data, struct seq_file *m) 620 754 { 621 - struct blk_mq_ctx *ctx = m->private; 755 + struct blk_mq_ctx *ctx = data; 622 756 623 757 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); 624 758 return 0; 625 759 } 626 760 627 - static int ctx_completed_open(struct inode *inode, struct file *file) 628 - { 629 - return single_open(file, ctx_completed_show, inode->i_private); 630 - } 631 - 632 - static ssize_t ctx_completed_write(struct file *file, const char __user *buf, 761 + static ssize_t ctx_completed_write(void *data, const char __user *buf, 633 762 size_t count, loff_t *ppos) 634 763 { 635 - struct seq_file *m = file->private_data; 636 - struct blk_mq_ctx *ctx = m->private; 764 + struct blk_mq_ctx *ctx = data; 637 765 638 766 ctx->rq_completed[0] = ctx->rq_completed[1] = 0; 639 767 return count; 640 768 } 641 769 642 - static const struct file_operations ctx_completed_fops = { 643 - .open = ctx_completed_open, 770 + static int blk_mq_debugfs_show(struct seq_file *m, void *v) 771 + { 772 + const struct blk_mq_debugfs_attr *attr = m->private; 773 + void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; 774 + 775 + return attr->show(data, m); 776 + } 777 + 778 + static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, 779 + size_t count, loff_t *ppos) 780 + { 781 + struct seq_file *m = file->private_data; 782 + const struct blk_mq_debugfs_attr *attr = m->private; 783 + void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 784 + 785 + if (!attr->write) 786 + return -EPERM; 787 + 788 + return attr->write(data, buf, count, ppos); 789 + } 790 + 791 + static int blk_mq_debugfs_open(struct inode *inode, struct file *file) 792 + { 793 + const struct blk_mq_debugfs_attr *attr = inode->i_private; 794 + void *data = d_inode(file->f_path.dentry->d_parent)->i_private; 795 + struct seq_file *m; 796 + int ret; 797 + 798 + if (attr->seq_ops) { 799 + ret = seq_open(file, attr->seq_ops); 800 + if (!ret) { 801 + m = file->private_data; 802 + m->private = data; 803 + } 804 + return ret; 805 + } 806 + 807 + if (WARN_ON_ONCE(!attr->show)) 808 + return -EPERM; 809 + 810 + return single_open(file, blk_mq_debugfs_show, inode->i_private); 811 + } 812 + 813 + static int blk_mq_debugfs_release(struct inode *inode, struct file *file) 814 + { 815 + const struct blk_mq_debugfs_attr *attr = inode->i_private; 816 + 817 + if (attr->show) 818 + return single_release(inode, file); 819 + else 820 + return seq_release(inode, file); 821 + } 822 + 823 + const struct file_operations blk_mq_debugfs_fops = { 824 + .open = blk_mq_debugfs_open, 644 825 .read = seq_read, 645 - .write = ctx_completed_write, 826 + .write = blk_mq_debugfs_write, 646 827 .llseek = seq_lseek, 647 - .release = single_release, 828 + .release = blk_mq_debugfs_release, 648 829 }; 649 830 650 831 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { 651 - {"poll_stat", 0400, &queue_poll_stat_fops}, 652 - {"state", 0600, &blk_queue_flags_fops}, 832 + {"poll_stat", 0400, queue_poll_stat_show}, 833 + {"state", 0600, queue_state_show, queue_state_write}, 653 834 {}, 654 835 }; 655 836 656 837 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { 657 - {"state", 0400, &hctx_state_fops}, 658 - {"flags", 0400, &hctx_flags_fops}, 659 - {"dispatch", 0400, &hctx_dispatch_fops}, 660 - {"ctx_map", 0400, &hctx_ctx_map_fops}, 661 - {"tags", 0400, &hctx_tags_fops}, 662 - {"tags_bitmap", 0400, &hctx_tags_bitmap_fops}, 663 - {"sched_tags", 0400, &hctx_sched_tags_fops}, 664 - {"sched_tags_bitmap", 0400, &hctx_sched_tags_bitmap_fops}, 665 - {"io_poll", 0600, &hctx_io_poll_fops}, 666 - {"dispatched", 0600, &hctx_dispatched_fops}, 667 - {"queued", 0600, &hctx_queued_fops}, 668 - {"run", 0600, &hctx_run_fops}, 669 - {"active", 0400, &hctx_active_fops}, 838 + {"state", 0400, hctx_state_show}, 839 + {"flags", 0400, hctx_flags_show}, 840 + {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, 841 + {"ctx_map", 0400, hctx_ctx_map_show}, 842 + {"tags", 0400, hctx_tags_show}, 843 + {"tags_bitmap", 0400, hctx_tags_bitmap_show}, 844 + {"sched_tags", 0400, hctx_sched_tags_show}, 845 + {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, 846 + {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write}, 847 + {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, 848 + {"queued", 0600, hctx_queued_show, hctx_queued_write}, 849 + {"run", 0600, hctx_run_show, hctx_run_write}, 850 + {"active", 0400, hctx_active_show}, 670 851 {}, 671 852 }; 672 853 673 854 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { 674 - {"rq_list", 0400, &ctx_rq_list_fops}, 675 - {"dispatched", 0600, &ctx_dispatched_fops}, 676 - {"merged", 0600, &ctx_merged_fops}, 677 - {"completed", 0600, &ctx_completed_fops}, 855 + {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops}, 856 + {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, 857 + {"merged", 0600, ctx_merged_show, ctx_merged_write}, 858 + {"completed", 0600, ctx_completed_show, ctx_completed_write}, 678 859 {}, 679 860 }; 680 861 862 + static bool debugfs_create_files(struct dentry *parent, void *data, 863 + const struct blk_mq_debugfs_attr *attr) 864 + { 865 + d_inode(parent)->i_private = data; 866 + 867 + for (; attr->name; attr++) { 868 + if (!debugfs_create_file(attr->name, attr->mode, parent, 869 + (void *)attr, &blk_mq_debugfs_fops)) 870 + return false; 871 + } 872 + return true; 873 + } 874 + 681 875 int blk_mq_debugfs_register(struct request_queue *q) 682 876 { 877 + struct blk_mq_hw_ctx *hctx; 878 + int i; 879 + 683 880 if (!blk_debugfs_root) 684 881 return -ENOENT; 685 882 686 883 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), 687 884 blk_debugfs_root); 688 885 if (!q->debugfs_dir) 886 + return -ENOMEM; 887 + 888 + if (!debugfs_create_files(q->debugfs_dir, q, 889 + blk_mq_debugfs_queue_attrs)) 689 890 goto err; 690 891 691 - if (blk_mq_debugfs_register_mq(q)) 692 - goto err; 892 + /* 893 + * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir 894 + * didn't exist yet (because we don't know what to name the directory 895 + * until the queue is registered to a gendisk). 896 + */ 897 + queue_for_each_hw_ctx(q, hctx, i) { 898 + if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) 899 + goto err; 900 + if (q->elevator && !hctx->sched_debugfs_dir && 901 + blk_mq_debugfs_register_sched_hctx(q, hctx)) 902 + goto err; 903 + } 693 904 694 905 return 0; 695 906 ··· 737 876 void blk_mq_debugfs_unregister(struct request_queue *q) 738 877 { 739 878 debugfs_remove_recursive(q->debugfs_dir); 740 - q->mq_debugfs_dir = NULL; 879 + q->sched_debugfs_dir = NULL; 741 880 q->debugfs_dir = NULL; 742 881 } 743 882 744 - static bool debugfs_create_files(struct dentry *parent, void *data, 745 - const struct blk_mq_debugfs_attr *attr) 746 - { 747 - for (; attr->name; attr++) { 748 - if (!debugfs_create_file(attr->name, attr->mode, parent, 749 - data, attr->fops)) 750 - return false; 751 - } 752 - return true; 753 - } 754 - 755 - static int blk_mq_debugfs_register_ctx(struct request_queue *q, 756 - struct blk_mq_ctx *ctx, 757 - struct dentry *hctx_dir) 883 + static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, 884 + struct blk_mq_ctx *ctx) 758 885 { 759 886 struct dentry *ctx_dir; 760 887 char name[20]; 761 888 762 889 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); 763 - ctx_dir = debugfs_create_dir(name, hctx_dir); 890 + ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); 764 891 if (!ctx_dir) 765 892 return -ENOMEM; 766 893 ··· 758 909 return 0; 759 910 } 760 911 761 - static int blk_mq_debugfs_register_hctx(struct request_queue *q, 762 - struct blk_mq_hw_ctx *hctx) 912 + int blk_mq_debugfs_register_hctx(struct request_queue *q, 913 + struct blk_mq_hw_ctx *hctx) 763 914 { 764 915 struct blk_mq_ctx *ctx; 765 - struct dentry *hctx_dir; 766 916 char name[20]; 767 - int i; 768 - 769 - snprintf(name, sizeof(name), "%u", hctx->queue_num); 770 - hctx_dir = debugfs_create_dir(name, q->mq_debugfs_dir); 771 - if (!hctx_dir) 772 - return -ENOMEM; 773 - 774 - if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs)) 775 - return -ENOMEM; 776 - 777 - hctx_for_each_ctx(hctx, ctx, i) { 778 - if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir)) 779 - return -ENOMEM; 780 - } 781 - 782 - return 0; 783 - } 784 - 785 - int blk_mq_debugfs_register_mq(struct request_queue *q) 786 - { 787 - struct blk_mq_hw_ctx *hctx; 788 917 int i; 789 918 790 919 if (!q->debugfs_dir) 791 920 return -ENOENT; 792 921 793 - q->mq_debugfs_dir = debugfs_create_dir("mq", q->debugfs_dir); 794 - if (!q->mq_debugfs_dir) 922 + snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); 923 + hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); 924 + if (!hctx->debugfs_dir) 925 + return -ENOMEM; 926 + 927 + if (!debugfs_create_files(hctx->debugfs_dir, hctx, 928 + blk_mq_debugfs_hctx_attrs)) 795 929 goto err; 796 930 797 - if (!debugfs_create_files(q->mq_debugfs_dir, q, blk_mq_debugfs_queue_attrs)) 798 - goto err; 799 - 800 - queue_for_each_hw_ctx(q, hctx, i) { 801 - if (blk_mq_debugfs_register_hctx(q, hctx)) 931 + hctx_for_each_ctx(hctx, ctx, i) { 932 + if (blk_mq_debugfs_register_ctx(hctx, ctx)) 802 933 goto err; 803 934 } 804 935 805 936 return 0; 806 937 807 938 err: 808 - blk_mq_debugfs_unregister_mq(q); 939 + blk_mq_debugfs_unregister_hctx(hctx); 809 940 return -ENOMEM; 810 941 } 811 942 812 - void blk_mq_debugfs_unregister_mq(struct request_queue *q) 943 + void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 813 944 { 814 - debugfs_remove_recursive(q->mq_debugfs_dir); 815 - q->mq_debugfs_dir = NULL; 945 + debugfs_remove_recursive(hctx->debugfs_dir); 946 + hctx->sched_debugfs_dir = NULL; 947 + hctx->debugfs_dir = NULL; 948 + } 949 + 950 + int blk_mq_debugfs_register_hctxs(struct request_queue *q) 951 + { 952 + struct blk_mq_hw_ctx *hctx; 953 + int i; 954 + 955 + queue_for_each_hw_ctx(q, hctx, i) { 956 + if (blk_mq_debugfs_register_hctx(q, hctx)) 957 + return -ENOMEM; 958 + } 959 + 960 + return 0; 961 + } 962 + 963 + void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 964 + { 965 + struct blk_mq_hw_ctx *hctx; 966 + int i; 967 + 968 + queue_for_each_hw_ctx(q, hctx, i) 969 + blk_mq_debugfs_unregister_hctx(hctx); 970 + } 971 + 972 + int blk_mq_debugfs_register_sched(struct request_queue *q) 973 + { 974 + struct elevator_type *e = q->elevator->type; 975 + 976 + if (!q->debugfs_dir) 977 + return -ENOENT; 978 + 979 + if (!e->queue_debugfs_attrs) 980 + return 0; 981 + 982 + q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); 983 + if (!q->sched_debugfs_dir) 984 + return -ENOMEM; 985 + 986 + if (!debugfs_create_files(q->sched_debugfs_dir, q, 987 + e->queue_debugfs_attrs)) 988 + goto err; 989 + 990 + return 0; 991 + 992 + err: 993 + blk_mq_debugfs_unregister_sched(q); 994 + return -ENOMEM; 995 + } 996 + 997 + void blk_mq_debugfs_unregister_sched(struct request_queue *q) 998 + { 999 + debugfs_remove_recursive(q->sched_debugfs_dir); 1000 + q->sched_debugfs_dir = NULL; 1001 + } 1002 + 1003 + int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 1004 + struct blk_mq_hw_ctx *hctx) 1005 + { 1006 + struct elevator_type *e = q->elevator->type; 1007 + 1008 + if (!hctx->debugfs_dir) 1009 + return -ENOENT; 1010 + 1011 + if (!e->hctx_debugfs_attrs) 1012 + return 0; 1013 + 1014 + hctx->sched_debugfs_dir = debugfs_create_dir("sched", 1015 + hctx->debugfs_dir); 1016 + if (!hctx->sched_debugfs_dir) 1017 + return -ENOMEM; 1018 + 1019 + if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx, 1020 + e->hctx_debugfs_attrs)) 1021 + return -ENOMEM; 1022 + 1023 + return 0; 1024 + } 1025 + 1026 + void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 1027 + { 1028 + debugfs_remove_recursive(hctx->sched_debugfs_dir); 1029 + hctx->sched_debugfs_dir = NULL; 816 1030 }
+82
block/blk-mq-debugfs.h
··· 1 + #ifndef INT_BLK_MQ_DEBUGFS_H 2 + #define INT_BLK_MQ_DEBUGFS_H 3 + 4 + #ifdef CONFIG_BLK_DEBUG_FS 5 + 6 + #include <linux/seq_file.h> 7 + 8 + struct blk_mq_debugfs_attr { 9 + const char *name; 10 + umode_t mode; 11 + int (*show)(void *, struct seq_file *); 12 + ssize_t (*write)(void *, const char __user *, size_t, loff_t *); 13 + /* Set either .show or .seq_ops. */ 14 + const struct seq_operations *seq_ops; 15 + }; 16 + 17 + int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq); 18 + int blk_mq_debugfs_rq_show(struct seq_file *m, void *v); 19 + 20 + int blk_mq_debugfs_register(struct request_queue *q); 21 + void blk_mq_debugfs_unregister(struct request_queue *q); 22 + int blk_mq_debugfs_register_hctx(struct request_queue *q, 23 + struct blk_mq_hw_ctx *hctx); 24 + void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx); 25 + int blk_mq_debugfs_register_hctxs(struct request_queue *q); 26 + void blk_mq_debugfs_unregister_hctxs(struct request_queue *q); 27 + 28 + int blk_mq_debugfs_register_sched(struct request_queue *q); 29 + void blk_mq_debugfs_unregister_sched(struct request_queue *q); 30 + int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 31 + struct blk_mq_hw_ctx *hctx); 32 + void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx); 33 + #else 34 + static inline int blk_mq_debugfs_register(struct request_queue *q) 35 + { 36 + return 0; 37 + } 38 + 39 + static inline void blk_mq_debugfs_unregister(struct request_queue *q) 40 + { 41 + } 42 + 43 + static inline int blk_mq_debugfs_register_hctx(struct request_queue *q, 44 + struct blk_mq_hw_ctx *hctx) 45 + { 46 + return 0; 47 + } 48 + 49 + static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) 50 + { 51 + } 52 + 53 + static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q) 54 + { 55 + return 0; 56 + } 57 + 58 + static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) 59 + { 60 + } 61 + 62 + static inline int blk_mq_debugfs_register_sched(struct request_queue *q) 63 + { 64 + return 0; 65 + } 66 + 67 + static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q) 68 + { 69 + } 70 + 71 + static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q, 72 + struct blk_mq_hw_ctx *hctx) 73 + { 74 + return 0; 75 + } 76 + 77 + static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) 78 + { 79 + } 80 + #endif 81 + 82 + #endif
+17 -13
block/blk-mq-sched.c
··· 11 11 12 12 #include "blk.h" 13 13 #include "blk-mq.h" 14 + #include "blk-mq-debugfs.h" 14 15 #include "blk-mq-sched.h" 15 16 #include "blk-mq-tag.h" 16 17 #include "blk-wbt.h" ··· 83 82 if (likely(!data->hctx)) 84 83 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 85 84 86 - /* 87 - * For a reserved tag, allocate a normal request since we might 88 - * have driver dependencies on the value of the internal tag. 89 - */ 90 - if (e && !(data->flags & BLK_MQ_REQ_RESERVED)) { 85 + if (e) { 91 86 data->flags |= BLK_MQ_REQ_INTERNAL; 92 87 93 88 /* ··· 473 476 } 474 477 } 475 478 479 + blk_mq_debugfs_register_sched_hctx(q, hctx); 480 + 476 481 return 0; 477 482 } 478 483 ··· 485 486 486 487 if (!e) 487 488 return; 489 + 490 + blk_mq_debugfs_unregister_sched_hctx(hctx); 488 491 489 492 if (e->type->ops.mq.exit_hctx && hctx->sched_data) { 490 493 e->type->ops.mq.exit_hctx(hctx, hctx_idx); ··· 524 523 if (ret) 525 524 goto err; 526 525 527 - if (e->ops.mq.init_hctx) { 528 - queue_for_each_hw_ctx(q, hctx, i) { 526 + blk_mq_debugfs_register_sched(q); 527 + 528 + queue_for_each_hw_ctx(q, hctx, i) { 529 + if (e->ops.mq.init_hctx) { 529 530 ret = e->ops.mq.init_hctx(hctx, i); 530 531 if (ret) { 531 532 eq = q->elevator; ··· 536 533 return ret; 537 534 } 538 535 } 536 + blk_mq_debugfs_register_sched_hctx(q, hctx); 539 537 } 540 538 541 539 return 0; ··· 552 548 struct blk_mq_hw_ctx *hctx; 553 549 unsigned int i; 554 550 555 - if (e->type->ops.mq.exit_hctx) { 556 - queue_for_each_hw_ctx(q, hctx, i) { 557 - if (hctx->sched_data) { 558 - e->type->ops.mq.exit_hctx(hctx, i); 559 - hctx->sched_data = NULL; 560 - } 551 + queue_for_each_hw_ctx(q, hctx, i) { 552 + blk_mq_debugfs_unregister_sched_hctx(hctx); 553 + if (e->type->ops.mq.exit_hctx && hctx->sched_data) { 554 + e->type->ops.mq.exit_hctx(hctx, i); 555 + hctx->sched_data = NULL; 561 556 } 562 557 } 558 + blk_mq_debugfs_unregister_sched(q); 563 559 if (e->type->ops.mq.exit_sched) 564 560 e->type->ops.mq.exit_sched(e); 565 561 blk_mq_sched_tags_teardown(q);
-10
block/blk-mq-sysfs.c
··· 258 258 queue_for_each_hw_ctx(q, hctx, i) 259 259 blk_mq_unregister_hctx(hctx); 260 260 261 - blk_mq_debugfs_unregister_mq(q); 262 - 263 261 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); 264 262 kobject_del(&q->mq_kobj); 265 263 kobject_put(&dev->kobj); ··· 316 318 317 319 kobject_uevent(&q->mq_kobj, KOBJ_ADD); 318 320 319 - blk_mq_debugfs_register(q); 320 - 321 321 queue_for_each_hw_ctx(q, hctx, i) { 322 322 ret = blk_mq_register_hctx(hctx); 323 323 if (ret) ··· 330 334 unreg: 331 335 while (--i >= 0) 332 336 blk_mq_unregister_hctx(q->queue_hw_ctx[i]); 333 - 334 - blk_mq_debugfs_unregister_mq(q); 335 337 336 338 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); 337 339 kobject_del(&q->mq_kobj); ··· 358 364 if (!q->mq_sysfs_init_done) 359 365 goto unlock; 360 366 361 - blk_mq_debugfs_unregister_mq(q); 362 - 363 367 queue_for_each_hw_ctx(q, hctx, i) 364 368 blk_mq_unregister_hctx(hctx); 365 369 ··· 373 381 mutex_lock(&q->sysfs_lock); 374 382 if (!q->mq_sysfs_init_done) 375 383 goto unlock; 376 - 377 - blk_mq_debugfs_register_mq(q); 378 384 379 385 queue_for_each_hw_ctx(q, hctx, i) { 380 386 ret = blk_mq_register_hctx(hctx);
+33 -21
block/blk-mq.c
··· 31 31 #include <linux/blk-mq.h> 32 32 #include "blk.h" 33 33 #include "blk-mq.h" 34 + #include "blk-mq-debugfs.h" 34 35 #include "blk-mq-tag.h" 35 36 #include "blk-stat.h" 36 37 #include "blk-wbt.h" ··· 42 41 43 42 static void blk_mq_poll_stats_start(struct request_queue *q); 44 43 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); 44 + static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync); 45 45 46 46 static int blk_mq_poll_stats_bkt(const struct request *rq) 47 47 { ··· 168 166 unsigned int i; 169 167 bool rcu = false; 170 168 171 - blk_mq_stop_hw_queues(q); 169 + __blk_mq_stop_hw_queues(q, true); 172 170 173 171 queue_for_each_hw_ctx(q, hctx, i) { 174 172 if (hctx->flags & BLK_MQ_F_BLOCKING) ··· 1220 1218 } 1221 1219 EXPORT_SYMBOL(blk_mq_queue_stopped); 1222 1220 1221 + static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync) 1222 + { 1223 + if (sync) 1224 + cancel_delayed_work_sync(&hctx->run_work); 1225 + else 1226 + cancel_delayed_work(&hctx->run_work); 1227 + 1228 + set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1229 + } 1230 + 1223 1231 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) 1224 1232 { 1225 - cancel_delayed_work_sync(&hctx->run_work); 1226 - set_bit(BLK_MQ_S_STOPPED, &hctx->state); 1233 + __blk_mq_stop_hw_queue(hctx, false); 1227 1234 } 1228 1235 EXPORT_SYMBOL(blk_mq_stop_hw_queue); 1229 1236 1230 - void blk_mq_stop_hw_queues(struct request_queue *q) 1237 + void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync) 1231 1238 { 1232 1239 struct blk_mq_hw_ctx *hctx; 1233 1240 int i; 1234 1241 1235 1242 queue_for_each_hw_ctx(q, hctx, i) 1236 - blk_mq_stop_hw_queue(hctx); 1243 + __blk_mq_stop_hw_queue(hctx, sync); 1244 + } 1245 + 1246 + void blk_mq_stop_hw_queues(struct request_queue *q) 1247 + { 1248 + __blk_mq_stop_hw_queues(q, false); 1237 1249 } 1238 1250 EXPORT_SYMBOL(blk_mq_stop_hw_queues); 1239 1251 ··· 1671 1655 1672 1656 if (!rq) 1673 1657 continue; 1674 - set->ops->exit_request(set->driver_data, rq, 1675 - hctx_idx, i); 1658 + set->ops->exit_request(set, rq, hctx_idx); 1676 1659 tags->static_rqs[i] = NULL; 1677 1660 } 1678 1661 } ··· 1802 1787 1803 1788 tags->static_rqs[i] = rq; 1804 1789 if (set->ops->init_request) { 1805 - if (set->ops->init_request(set->driver_data, 1806 - rq, hctx_idx, i, 1790 + if (set->ops->init_request(set, rq, hctx_idx, 1807 1791 node)) { 1808 1792 tags->static_rqs[i] = NULL; 1809 1793 goto fail; ··· 1863 1849 struct blk_mq_tag_set *set, 1864 1850 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) 1865 1851 { 1866 - unsigned flush_start_tag = set->queue_depth; 1852 + blk_mq_debugfs_unregister_hctx(hctx); 1867 1853 1868 1854 blk_mq_tag_idle(hctx); 1869 1855 1870 1856 if (set->ops->exit_request) 1871 - set->ops->exit_request(set->driver_data, 1872 - hctx->fq->flush_rq, hctx_idx, 1873 - flush_start_tag + hctx_idx); 1857 + set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); 1874 1858 1875 1859 blk_mq_sched_exit_hctx(q, hctx, hctx_idx); 1876 1860 ··· 1901 1889 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) 1902 1890 { 1903 1891 int node; 1904 - unsigned flush_start_tag = set->queue_depth; 1905 1892 1906 1893 node = hctx->numa_node; 1907 1894 if (node == NUMA_NO_NODE) ··· 1944 1933 goto sched_exit_hctx; 1945 1934 1946 1935 if (set->ops->init_request && 1947 - set->ops->init_request(set->driver_data, 1948 - hctx->fq->flush_rq, hctx_idx, 1949 - flush_start_tag + hctx_idx, node)) 1936 + set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx, 1937 + node)) 1950 1938 goto free_fq; 1951 1939 1952 1940 if (hctx->flags & BLK_MQ_F_BLOCKING) 1953 1941 init_srcu_struct(&hctx->queue_rq_srcu); 1942 + 1943 + blk_mq_debugfs_register_hctx(q, hctx); 1954 1944 1955 1945 return 0; 1956 1946 ··· 2341 2329 2342 2330 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2343 2331 2344 - get_online_cpus(); 2345 2332 mutex_lock(&all_q_mutex); 2333 + get_online_cpus(); 2346 2334 2347 2335 list_add_tail(&q->all_q_node, &all_q_list); 2348 2336 blk_mq_add_queue_tag_set(set, q); 2349 2337 blk_mq_map_swqueue(q, cpu_online_mask); 2350 2338 2351 - mutex_unlock(&all_q_mutex); 2352 2339 put_online_cpus(); 2340 + mutex_unlock(&all_q_mutex); 2353 2341 2354 2342 if (!(set->flags & BLK_MQ_F_NO_SCHED)) { 2355 2343 int ret; ··· 2390 2378 { 2391 2379 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); 2392 2380 2381 + blk_mq_debugfs_unregister_hctxs(q); 2393 2382 blk_mq_sysfs_unregister(q); 2394 2383 2395 2384 /* ··· 2402 2389 blk_mq_map_swqueue(q, online_mask); 2403 2390 2404 2391 blk_mq_sysfs_register(q); 2392 + blk_mq_debugfs_register_hctxs(q); 2405 2393 } 2406 2394 2407 2395 /* ··· 2631 2617 return -EINVAL; 2632 2618 2633 2619 blk_mq_freeze_queue(q); 2634 - blk_mq_quiesce_queue(q); 2635 2620 2636 2621 ret = 0; 2637 2622 queue_for_each_hw_ctx(q, hctx, i) { ··· 2656 2643 q->nr_requests = nr; 2657 2644 2658 2645 blk_mq_unfreeze_queue(q); 2659 - blk_mq_start_stopped_hw_queues(q, true); 2660 2646 2661 2647 return ret; 2662 2648 }
-28
block/blk-mq.h
··· 83 83 extern void blk_mq_sysfs_unregister(struct request_queue *q); 84 84 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); 85 85 86 - /* 87 - * debugfs helpers 88 - */ 89 - #ifdef CONFIG_BLK_DEBUG_FS 90 - int blk_mq_debugfs_register(struct request_queue *q); 91 - void blk_mq_debugfs_unregister(struct request_queue *q); 92 - int blk_mq_debugfs_register_mq(struct request_queue *q); 93 - void blk_mq_debugfs_unregister_mq(struct request_queue *q); 94 - #else 95 - static inline int blk_mq_debugfs_register(struct request_queue *q) 96 - { 97 - return 0; 98 - } 99 - 100 - static inline void blk_mq_debugfs_unregister(struct request_queue *q) 101 - { 102 - } 103 - 104 - static inline int blk_mq_debugfs_register_mq(struct request_queue *q) 105 - { 106 - return 0; 107 - } 108 - 109 - static inline void blk_mq_debugfs_unregister_mq(struct request_queue *q) 110 - { 111 - } 112 - #endif 113 - 114 86 extern void blk_mq_rq_timed_out(struct request *req, bool reserved); 115 87 116 88 void blk_mq_release(struct request_queue *q);
+3
block/blk-sysfs.c
··· 13 13 14 14 #include "blk.h" 15 15 #include "blk-mq.h" 16 + #include "blk-mq-debugfs.h" 16 17 #include "blk-wbt.h" 17 18 18 19 struct queue_sysfs_entry { ··· 889 888 890 889 if (q->mq_ops) 891 890 __blk_mq_register_dev(dev, q); 891 + 892 + blk_mq_debugfs_register(q); 892 893 893 894 kobject_uevent(&q->kobj, KOBJ_ADD); 894 895
-16
block/elevator.c
··· 950 950 int ret; 951 951 952 952 blk_mq_freeze_queue(q); 953 - blk_mq_quiesce_queue(q); 954 953 955 954 if (q->elevator) { 956 955 if (q->elevator->registered) ··· 977 978 978 979 out: 979 980 blk_mq_unfreeze_queue(q); 980 - blk_mq_start_stopped_hw_queues(q, true); 981 981 return ret; 982 - 983 982 } 984 983 985 984 /* ··· 1084 1087 1085 1088 return elevator_switch(q, e); 1086 1089 } 1087 - 1088 - int elevator_change(struct request_queue *q, const char *name) 1089 - { 1090 - int ret; 1091 - 1092 - /* Protect q->elevator from elevator_init() */ 1093 - mutex_lock(&q->sysfs_lock); 1094 - ret = __elevator_change(q, name); 1095 - mutex_unlock(&q->sysfs_lock); 1096 - 1097 - return ret; 1098 - } 1099 - EXPORT_SYMBOL(elevator_change); 1100 1090 1101 1091 static inline bool elv_support_iosched(struct request_queue *q) 1102 1092 {
+130
block/kyber-iosched.c
··· 26 26 27 27 #include "blk.h" 28 28 #include "blk-mq.h" 29 + #include "blk-mq-debugfs.h" 29 30 #include "blk-mq-sched.h" 30 31 #include "blk-mq-tag.h" 31 32 #include "blk-stat.h" ··· 684 683 }; 685 684 #undef KYBER_LAT_ATTR 686 685 686 + #ifdef CONFIG_BLK_DEBUG_FS 687 + #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \ 688 + static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \ 689 + { \ 690 + struct request_queue *q = data; \ 691 + struct kyber_queue_data *kqd = q->elevator->elevator_data; \ 692 + \ 693 + sbitmap_queue_show(&kqd->domain_tokens[domain], m); \ 694 + return 0; \ 695 + } \ 696 + \ 697 + static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \ 698 + __acquires(&khd->lock) \ 699 + { \ 700 + struct blk_mq_hw_ctx *hctx = m->private; \ 701 + struct kyber_hctx_data *khd = hctx->sched_data; \ 702 + \ 703 + spin_lock(&khd->lock); \ 704 + return seq_list_start(&khd->rqs[domain], *pos); \ 705 + } \ 706 + \ 707 + static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \ 708 + loff_t *pos) \ 709 + { \ 710 + struct blk_mq_hw_ctx *hctx = m->private; \ 711 + struct kyber_hctx_data *khd = hctx->sched_data; \ 712 + \ 713 + return seq_list_next(v, &khd->rqs[domain], pos); \ 714 + } \ 715 + \ 716 + static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \ 717 + __releases(&khd->lock) \ 718 + { \ 719 + struct blk_mq_hw_ctx *hctx = m->private; \ 720 + struct kyber_hctx_data *khd = hctx->sched_data; \ 721 + \ 722 + spin_unlock(&khd->lock); \ 723 + } \ 724 + \ 725 + static const struct seq_operations kyber_##name##_rqs_seq_ops = { \ 726 + .start = kyber_##name##_rqs_start, \ 727 + .next = kyber_##name##_rqs_next, \ 728 + .stop = kyber_##name##_rqs_stop, \ 729 + .show = blk_mq_debugfs_rq_show, \ 730 + }; \ 731 + \ 732 + static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ 733 + { \ 734 + struct blk_mq_hw_ctx *hctx = data; \ 735 + struct kyber_hctx_data *khd = hctx->sched_data; \ 736 + wait_queue_t *wait = &khd->domain_wait[domain]; \ 737 + \ 738 + seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \ 739 + return 0; \ 740 + } 741 + KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read) 742 + KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write) 743 + KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other) 744 + #undef KYBER_DEBUGFS_DOMAIN_ATTRS 745 + 746 + static int kyber_async_depth_show(void *data, struct seq_file *m) 747 + { 748 + struct request_queue *q = data; 749 + struct kyber_queue_data *kqd = q->elevator->elevator_data; 750 + 751 + seq_printf(m, "%u\n", kqd->async_depth); 752 + return 0; 753 + } 754 + 755 + static int kyber_cur_domain_show(void *data, struct seq_file *m) 756 + { 757 + struct blk_mq_hw_ctx *hctx = data; 758 + struct kyber_hctx_data *khd = hctx->sched_data; 759 + 760 + switch (khd->cur_domain) { 761 + case KYBER_READ: 762 + seq_puts(m, "READ\n"); 763 + break; 764 + case KYBER_SYNC_WRITE: 765 + seq_puts(m, "SYNC_WRITE\n"); 766 + break; 767 + case KYBER_OTHER: 768 + seq_puts(m, "OTHER\n"); 769 + break; 770 + default: 771 + seq_printf(m, "%u\n", khd->cur_domain); 772 + break; 773 + } 774 + return 0; 775 + } 776 + 777 + static int kyber_batching_show(void *data, struct seq_file *m) 778 + { 779 + struct blk_mq_hw_ctx *hctx = data; 780 + struct kyber_hctx_data *khd = hctx->sched_data; 781 + 782 + seq_printf(m, "%u\n", khd->batching); 783 + return 0; 784 + } 785 + 786 + #define KYBER_QUEUE_DOMAIN_ATTRS(name) \ 787 + {#name "_tokens", 0400, kyber_##name##_tokens_show} 788 + static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = { 789 + KYBER_QUEUE_DOMAIN_ATTRS(read), 790 + KYBER_QUEUE_DOMAIN_ATTRS(sync_write), 791 + KYBER_QUEUE_DOMAIN_ATTRS(other), 792 + {"async_depth", 0400, kyber_async_depth_show}, 793 + {}, 794 + }; 795 + #undef KYBER_QUEUE_DOMAIN_ATTRS 796 + 797 + #define KYBER_HCTX_DOMAIN_ATTRS(name) \ 798 + {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \ 799 + {#name "_waiting", 0400, kyber_##name##_waiting_show} 800 + static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = { 801 + KYBER_HCTX_DOMAIN_ATTRS(read), 802 + KYBER_HCTX_DOMAIN_ATTRS(sync_write), 803 + KYBER_HCTX_DOMAIN_ATTRS(other), 804 + {"cur_domain", 0400, kyber_cur_domain_show}, 805 + {"batching", 0400, kyber_batching_show}, 806 + {}, 807 + }; 808 + #undef KYBER_HCTX_DOMAIN_ATTRS 809 + #endif 810 + 687 811 static struct elevator_type kyber_sched = { 688 812 .ops.mq = { 689 813 .init_sched = kyber_init_sched, ··· 822 696 .has_work = kyber_has_work, 823 697 }, 824 698 .uses_mq = true, 699 + #ifdef CONFIG_BLK_DEBUG_FS 700 + .queue_debugfs_attrs = kyber_queue_debugfs_attrs, 701 + .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs, 702 + #endif 825 703 .elevator_attrs = kyber_sched_attrs, 826 704 .elevator_name = "kyber", 827 705 .elevator_owner = THIS_MODULE,
+123
block/mq-deadline.c
··· 19 19 20 20 #include "blk.h" 21 21 #include "blk-mq.h" 22 + #include "blk-mq-debugfs.h" 22 23 #include "blk-mq-tag.h" 23 24 #include "blk-mq-sched.h" 24 25 ··· 518 517 __ATTR_NULL 519 518 }; 520 519 520 + #ifdef CONFIG_BLK_DEBUG_FS 521 + #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \ 522 + static void *deadline_##name##_fifo_start(struct seq_file *m, \ 523 + loff_t *pos) \ 524 + __acquires(&dd->lock) \ 525 + { \ 526 + struct request_queue *q = m->private; \ 527 + struct deadline_data *dd = q->elevator->elevator_data; \ 528 + \ 529 + spin_lock(&dd->lock); \ 530 + return seq_list_start(&dd->fifo_list[ddir], *pos); \ 531 + } \ 532 + \ 533 + static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ 534 + loff_t *pos) \ 535 + { \ 536 + struct request_queue *q = m->private; \ 537 + struct deadline_data *dd = q->elevator->elevator_data; \ 538 + \ 539 + return seq_list_next(v, &dd->fifo_list[ddir], pos); \ 540 + } \ 541 + \ 542 + static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ 543 + __releases(&dd->lock) \ 544 + { \ 545 + struct request_queue *q = m->private; \ 546 + struct deadline_data *dd = q->elevator->elevator_data; \ 547 + \ 548 + spin_unlock(&dd->lock); \ 549 + } \ 550 + \ 551 + static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ 552 + .start = deadline_##name##_fifo_start, \ 553 + .next = deadline_##name##_fifo_next, \ 554 + .stop = deadline_##name##_fifo_stop, \ 555 + .show = blk_mq_debugfs_rq_show, \ 556 + }; \ 557 + \ 558 + static int deadline_##name##_next_rq_show(void *data, \ 559 + struct seq_file *m) \ 560 + { \ 561 + struct request_queue *q = data; \ 562 + struct deadline_data *dd = q->elevator->elevator_data; \ 563 + struct request *rq = dd->next_rq[ddir]; \ 564 + \ 565 + if (rq) \ 566 + __blk_mq_debugfs_rq_show(m, rq); \ 567 + return 0; \ 568 + } 569 + DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read) 570 + DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write) 571 + #undef DEADLINE_DEBUGFS_DDIR_ATTRS 572 + 573 + static int deadline_batching_show(void *data, struct seq_file *m) 574 + { 575 + struct request_queue *q = data; 576 + struct deadline_data *dd = q->elevator->elevator_data; 577 + 578 + seq_printf(m, "%u\n", dd->batching); 579 + return 0; 580 + } 581 + 582 + static int deadline_starved_show(void *data, struct seq_file *m) 583 + { 584 + struct request_queue *q = data; 585 + struct deadline_data *dd = q->elevator->elevator_data; 586 + 587 + seq_printf(m, "%u\n", dd->starved); 588 + return 0; 589 + } 590 + 591 + static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) 592 + __acquires(&dd->lock) 593 + { 594 + struct request_queue *q = m->private; 595 + struct deadline_data *dd = q->elevator->elevator_data; 596 + 597 + spin_lock(&dd->lock); 598 + return seq_list_start(&dd->dispatch, *pos); 599 + } 600 + 601 + static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) 602 + { 603 + struct request_queue *q = m->private; 604 + struct deadline_data *dd = q->elevator->elevator_data; 605 + 606 + return seq_list_next(v, &dd->dispatch, pos); 607 + } 608 + 609 + static void deadline_dispatch_stop(struct seq_file *m, void *v) 610 + __releases(&dd->lock) 611 + { 612 + struct request_queue *q = m->private; 613 + struct deadline_data *dd = q->elevator->elevator_data; 614 + 615 + spin_unlock(&dd->lock); 616 + } 617 + 618 + static const struct seq_operations deadline_dispatch_seq_ops = { 619 + .start = deadline_dispatch_start, 620 + .next = deadline_dispatch_next, 621 + .stop = deadline_dispatch_stop, 622 + .show = blk_mq_debugfs_rq_show, 623 + }; 624 + 625 + #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ 626 + {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \ 627 + {#name "_next_rq", 0400, deadline_##name##_next_rq_show} 628 + static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { 629 + DEADLINE_QUEUE_DDIR_ATTRS(read), 630 + DEADLINE_QUEUE_DDIR_ATTRS(write), 631 + {"batching", 0400, deadline_batching_show}, 632 + {"starved", 0400, deadline_starved_show}, 633 + {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, 634 + {}, 635 + }; 636 + #undef DEADLINE_QUEUE_DDIR_ATTRS 637 + #endif 638 + 521 639 static struct elevator_type mq_deadline = { 522 640 .ops.mq = { 523 641 .insert_requests = dd_insert_requests, ··· 653 533 }, 654 534 655 535 .uses_mq = true, 536 + #ifdef CONFIG_BLK_DEBUG_FS 537 + .queue_debugfs_attrs = deadline_queue_debugfs_attrs, 538 + #endif 656 539 .elevator_attrs = deadline_attrs, 657 540 .elevator_name = "mq-deadline", 658 541 .elevator_owner = THIS_MODULE,
+2 -3
drivers/block/loop.c
··· 1697 1697 loop_handle_cmd(cmd); 1698 1698 } 1699 1699 1700 - static int loop_init_request(void *data, struct request *rq, 1701 - unsigned int hctx_idx, unsigned int request_idx, 1702 - unsigned int numa_node) 1700 + static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq, 1701 + unsigned int hctx_idx, unsigned int numa_node) 1703 1702 { 1704 1703 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); 1705 1704
+160 -328
drivers/block/mtip32xx/mtip32xx.c
··· 195 195 if (mtip_check_surprise_removal(dd->pdev)) 196 196 return NULL; 197 197 198 - rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED); 198 + rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED); 199 199 if (IS_ERR(rq)) 200 200 return NULL; 201 201 ··· 205 205 return blk_mq_rq_to_pdu(rq); 206 206 } 207 207 208 - static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd) 209 - { 210 - blk_put_request(blk_mq_rq_from_pdu(cmd)); 211 - } 212 - 213 - /* 214 - * Once we add support for one hctx per mtip group, this will change a bit 215 - */ 216 - static struct request *mtip_rq_from_tag(struct driver_data *dd, 217 - unsigned int tag) 218 - { 219 - struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; 220 - 221 - return blk_mq_tag_to_rq(hctx->tags, tag); 222 - } 223 - 224 208 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, 225 209 unsigned int tag) 226 210 { 227 - struct request *rq = mtip_rq_from_tag(dd, tag); 211 + struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; 228 212 229 - return blk_mq_rq_to_pdu(rq); 230 - } 231 - 232 - /* 233 - * IO completion function. 234 - * 235 - * This completion function is called by the driver ISR when a 236 - * command that was issued by the kernel completes. It first calls the 237 - * asynchronous completion function which normally calls back into the block 238 - * layer passing the asynchronous callback data, then unmaps the 239 - * scatter list associated with the completed command, and finally 240 - * clears the allocated bit associated with the completed command. 241 - * 242 - * @port Pointer to the port data structure. 243 - * @tag Tag of the command. 244 - * @data Pointer to driver_data. 245 - * @status Completion status. 246 - * 247 - * return value 248 - * None 249 - */ 250 - static void mtip_async_complete(struct mtip_port *port, 251 - int tag, struct mtip_cmd *cmd, int status) 252 - { 253 - struct driver_data *dd = port->dd; 254 - struct request *rq; 255 - 256 - if (unlikely(!dd) || unlikely(!port)) 257 - return; 258 - 259 - if (unlikely(status == PORT_IRQ_TF_ERR)) { 260 - dev_warn(&port->dd->pdev->dev, 261 - "Command tag %d failed due to TFE\n", tag); 262 - } 263 - 264 - rq = mtip_rq_from_tag(dd, tag); 265 - 266 - cmd->status = status; 267 - blk_mq_complete_request(rq); 213 + return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag)); 268 214 } 269 215 270 216 /* ··· 527 581 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); 528 582 } 529 583 530 - /* 531 - * Internal command completion callback function. 532 - * 533 - * This function is normally called by the driver ISR when an internal 534 - * command completed. This function signals the command completion by 535 - * calling complete(). 536 - * 537 - * @port Pointer to the port data structure. 538 - * @tag Tag of the command that has completed. 539 - * @data Pointer to a completion structure. 540 - * @status Completion status. 541 - * 542 - * return value 543 - * None 544 - */ 545 - static void mtip_completion(struct mtip_port *port, 546 - int tag, struct mtip_cmd *command, int status) 547 - { 548 - struct completion *waiting = command->comp_data; 549 - if (unlikely(status == PORT_IRQ_TF_ERR)) 550 - dev_warn(&port->dd->pdev->dev, 551 - "Internal command %d completed with TFE\n", tag); 552 - 553 - command->comp_func = NULL; 554 - command->comp_data = NULL; 555 - complete(waiting); 556 - } 557 - 558 - static void mtip_null_completion(struct mtip_port *port, 559 - int tag, struct mtip_cmd *command, int status) 560 - { 561 - } 562 - 563 584 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 564 585 dma_addr_t buffer_dma, unsigned int sectors); 565 586 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, 566 587 struct smart_attr *attrib); 588 + 589 + static void mtip_complete_command(struct mtip_cmd *cmd, int status) 590 + { 591 + struct request *req = blk_mq_rq_from_pdu(cmd); 592 + 593 + cmd->status = status; 594 + blk_mq_complete_request(req); 595 + } 596 + 567 597 /* 568 598 * Handle an error. 569 599 * ··· 568 646 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 569 647 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); 570 648 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); 571 - 572 - if (cmd->comp_data && cmd->comp_func) { 573 - cmd->comp_func(port, MTIP_TAG_INTERNAL, 574 - cmd, PORT_IRQ_TF_ERR); 575 - } 649 + mtip_complete_command(cmd, -EIO); 576 650 return; 577 651 } 578 652 ··· 595 677 continue; 596 678 597 679 cmd = mtip_cmd_from_tag(dd, tag); 598 - if (likely(cmd->comp_func)) { 599 - set_bit(tag, tagaccum); 600 - cmd_cnt++; 601 - cmd->comp_func(port, tag, cmd, 0); 602 - } else { 603 - dev_err(&port->dd->pdev->dev, 604 - "Missing completion func for tag %d", 605 - tag); 606 - if (mtip_check_surprise_removal(dd->pdev)) { 607 - /* don't proceed further */ 608 - return; 609 - } 610 - } 680 + mtip_complete_command(cmd, 0); 681 + set_bit(tag, tagaccum); 682 + cmd_cnt++; 611 683 } 612 684 } 613 685 ··· 667 759 tag, 668 760 fail_reason != NULL ? 669 761 fail_reason : "unknown"); 670 - if (cmd->comp_func) { 671 - cmd->comp_func(port, tag, 672 - cmd, -ENODATA); 673 - } 762 + mtip_complete_command(cmd, -ENODATA); 674 763 continue; 675 764 } 676 765 } ··· 690 785 dev_warn(&port->dd->pdev->dev, 691 786 "retiring tag %d\n", tag); 692 787 693 - if (cmd->comp_func) 694 - cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR); 695 - else 696 - dev_warn(&port->dd->pdev->dev, 697 - "Bad completion for tag %d\n", 698 - tag); 788 + mtip_complete_command(cmd, -EIO); 699 789 } 700 790 } 701 791 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); ··· 723 823 continue; 724 824 725 825 command = mtip_cmd_from_tag(dd, tag); 726 - if (likely(command->comp_func)) 727 - command->comp_func(port, tag, command, 0); 728 - else { 729 - dev_dbg(&dd->pdev->dev, 730 - "Null completion for tag %d", 731 - tag); 732 - 733 - if (mtip_check_surprise_removal( 734 - dd->pdev)) { 735 - return; 736 - } 737 - } 826 + mtip_complete_command(command, 0); 738 827 } 739 828 completed >>= 1; 740 829 } ··· 741 852 struct mtip_port *port = dd->port; 742 853 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); 743 854 744 - if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && 745 - (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 746 - & (1 << MTIP_TAG_INTERNAL))) { 747 - if (cmd->comp_func) { 748 - cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0); 749 - return; 750 - } 751 - } 855 + if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) { 856 + int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL); 857 + int status = readl(port->cmd_issue[group]); 752 858 753 - return; 859 + if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL)))) 860 + mtip_complete_command(cmd, 0); 861 + } 754 862 } 755 863 756 864 /* ··· 755 869 */ 756 870 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) 757 871 { 758 - 759 872 if (unlikely(port_stat & PORT_IRQ_CONNECT)) { 760 873 dev_warn(&dd->pdev->dev, 761 874 "Clearing PxSERR.DIAG.x\n"); ··· 881 996 882 997 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) 883 998 { 884 - writel(1 << MTIP_TAG_BIT(tag), 885 - port->cmd_issue[MTIP_TAG_INDEX(tag)]); 999 + writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]); 886 1000 } 887 1001 888 1002 static bool mtip_pause_ncq(struct mtip_port *port, ··· 919 1035 return false; 920 1036 } 921 1037 1038 + static bool mtip_commands_active(struct mtip_port *port) 1039 + { 1040 + unsigned int active; 1041 + unsigned int n; 1042 + 1043 + /* 1044 + * Ignore s_active bit 0 of array element 0. 1045 + * This bit will always be set 1046 + */ 1047 + active = readl(port->s_active[0]) & 0xFFFFFFFE; 1048 + for (n = 1; n < port->dd->slot_groups; n++) 1049 + active |= readl(port->s_active[n]); 1050 + 1051 + return active != 0; 1052 + } 1053 + 922 1054 /* 923 1055 * Wait for port to quiesce 924 1056 * 925 1057 * @port Pointer to port data structure 926 1058 * @timeout Max duration to wait (ms) 927 - * @atomic gfp_t flag to indicate blockable context or not 928 1059 * 929 1060 * return value 930 1061 * 0 Success 931 1062 * -EBUSY Commands still active 932 1063 */ 933 - static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout, 934 - gfp_t atomic) 1064 + static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) 935 1065 { 936 1066 unsigned long to; 937 - unsigned int n; 938 - unsigned int active = 1; 1067 + bool active = true; 939 1068 940 1069 blk_mq_stop_hw_queues(port->dd->queue); 941 1070 942 1071 to = jiffies + msecs_to_jiffies(timeout); 943 1072 do { 944 1073 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) && 945 - test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) && 946 - atomic == GFP_KERNEL) { 1074 + test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { 947 1075 msleep(20); 948 1076 continue; /* svc thd is actively issuing commands */ 949 1077 } 950 1078 951 - if (atomic == GFP_KERNEL) 952 - msleep(100); 953 - else { 954 - cpu_relax(); 955 - udelay(100); 956 - } 1079 + msleep(100); 957 1080 958 1081 if (mtip_check_surprise_removal(port->dd->pdev)) 959 1082 goto err_fault; 960 1083 961 - /* 962 - * Ignore s_active bit 0 of array element 0. 963 - * This bit will always be set 964 - */ 965 - active = readl(port->s_active[0]) & 0xFFFFFFFE; 966 - for (n = 1; n < port->dd->slot_groups; n++) 967 - active |= readl(port->s_active[n]); 968 - 1084 + active = mtip_commands_active(port); 969 1085 if (!active) 970 1086 break; 971 1087 } while (time_before(jiffies, to)); ··· 976 1092 blk_mq_start_stopped_hw_queues(port->dd->queue, true); 977 1093 return -EFAULT; 978 1094 } 1095 + 1096 + struct mtip_int_cmd { 1097 + int fis_len; 1098 + dma_addr_t buffer; 1099 + int buf_len; 1100 + u32 opts; 1101 + }; 979 1102 980 1103 /* 981 1104 * Execute an internal command and wait for the completion. ··· 1008 1117 dma_addr_t buffer, 1009 1118 int buf_len, 1010 1119 u32 opts, 1011 - gfp_t atomic, 1012 1120 unsigned long timeout) 1013 1121 { 1014 - struct mtip_cmd_sg *command_sg; 1015 - DECLARE_COMPLETION_ONSTACK(wait); 1016 1122 struct mtip_cmd *int_cmd; 1017 1123 struct driver_data *dd = port->dd; 1124 + struct request *rq; 1125 + struct mtip_int_cmd icmd = { 1126 + .fis_len = fis_len, 1127 + .buffer = buffer, 1128 + .buf_len = buf_len, 1129 + .opts = opts 1130 + }; 1018 1131 int rv = 0; 1019 1132 unsigned long start; 1020 1133 ··· 1033 1138 dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n"); 1034 1139 return -EFAULT; 1035 1140 } 1141 + rq = blk_mq_rq_from_pdu(int_cmd); 1142 + rq->special = &icmd; 1036 1143 1037 1144 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1038 1145 ··· 1043 1146 1044 1147 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 1045 1148 1046 - if (atomic == GFP_KERNEL) { 1047 - if (fis->command != ATA_CMD_STANDBYNOW1) { 1048 - /* wait for io to complete if non atomic */ 1049 - if (mtip_quiesce_io(port, 1050 - MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) { 1051 - dev_warn(&dd->pdev->dev, 1052 - "Failed to quiesce IO\n"); 1053 - mtip_put_int_command(dd, int_cmd); 1054 - clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1055 - wake_up_interruptible(&port->svc_wait); 1056 - return -EBUSY; 1057 - } 1149 + if (fis->command != ATA_CMD_STANDBYNOW1) { 1150 + /* wait for io to complete if non atomic */ 1151 + if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) { 1152 + dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n"); 1153 + blk_mq_free_request(rq); 1154 + clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1155 + wake_up_interruptible(&port->svc_wait); 1156 + return -EBUSY; 1058 1157 } 1059 - 1060 - /* Set the completion function and data for the command. */ 1061 - int_cmd->comp_data = &wait; 1062 - int_cmd->comp_func = mtip_completion; 1063 - 1064 - } else { 1065 - /* Clear completion - we're going to poll */ 1066 - int_cmd->comp_data = NULL; 1067 - int_cmd->comp_func = mtip_null_completion; 1068 1158 } 1069 1159 1070 1160 /* Copy the command to the command table */ 1071 1161 memcpy(int_cmd->command, fis, fis_len*4); 1072 1162 1073 - /* Populate the SG list */ 1074 - int_cmd->command_header->opts = 1075 - __force_bit2int cpu_to_le32(opts | fis_len); 1076 - if (buf_len) { 1077 - command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ; 1078 - 1079 - command_sg->info = 1080 - __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF); 1081 - command_sg->dba = 1082 - __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF); 1083 - command_sg->dba_upper = 1084 - __force_bit2int cpu_to_le32((buffer >> 16) >> 16); 1085 - 1086 - int_cmd->command_header->opts |= 1087 - __force_bit2int cpu_to_le32((1 << 16)); 1088 - } 1089 - 1090 - /* Populate the command header */ 1091 - int_cmd->command_header->byte_count = 0; 1092 - 1093 1163 start = jiffies; 1164 + rq->timeout = timeout; 1094 1165 1095 - /* Issue the command to the hardware */ 1096 - mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); 1166 + /* insert request and run queue */ 1167 + blk_execute_rq(rq->q, NULL, rq, true); 1097 1168 1098 - if (atomic == GFP_KERNEL) { 1099 - /* Wait for the command to complete or timeout. */ 1100 - if ((rv = wait_for_completion_interruptible_timeout( 1101 - &wait, 1102 - msecs_to_jiffies(timeout))) <= 0) { 1169 + rv = int_cmd->status; 1170 + if (rv < 0) { 1171 + if (rv == -ERESTARTSYS) { /* interrupted */ 1172 + dev_err(&dd->pdev->dev, 1173 + "Internal command [%02X] was interrupted after %u ms\n", 1174 + fis->command, 1175 + jiffies_to_msecs(jiffies - start)); 1176 + rv = -EINTR; 1177 + goto exec_ic_exit; 1178 + } else if (rv == 0) /* timeout */ 1179 + dev_err(&dd->pdev->dev, 1180 + "Internal command did not complete [%02X] within timeout of %lu ms\n", 1181 + fis->command, timeout); 1182 + else 1183 + dev_err(&dd->pdev->dev, 1184 + "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", 1185 + fis->command, rv, timeout); 1103 1186 1104 - if (rv == -ERESTARTSYS) { /* interrupted */ 1105 - dev_err(&dd->pdev->dev, 1106 - "Internal command [%02X] was interrupted after %u ms\n", 1107 - fis->command, 1108 - jiffies_to_msecs(jiffies - start)); 1109 - rv = -EINTR; 1110 - goto exec_ic_exit; 1111 - } else if (rv == 0) /* timeout */ 1112 - dev_err(&dd->pdev->dev, 1113 - "Internal command did not complete [%02X] within timeout of %lu ms\n", 1114 - fis->command, timeout); 1115 - else 1116 - dev_err(&dd->pdev->dev, 1117 - "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", 1118 - fis->command, rv, timeout); 1119 - 1120 - if (mtip_check_surprise_removal(dd->pdev) || 1121 - test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1122 - &dd->dd_flag)) { 1123 - dev_err(&dd->pdev->dev, 1124 - "Internal command [%02X] wait returned due to SR\n", 1125 - fis->command); 1126 - rv = -ENXIO; 1127 - goto exec_ic_exit; 1128 - } 1129 - mtip_device_reset(dd); /* recover from timeout issue */ 1130 - rv = -EAGAIN; 1187 + if (mtip_check_surprise_removal(dd->pdev) || 1188 + test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1189 + &dd->dd_flag)) { 1190 + dev_err(&dd->pdev->dev, 1191 + "Internal command [%02X] wait returned due to SR\n", 1192 + fis->command); 1193 + rv = -ENXIO; 1131 1194 goto exec_ic_exit; 1132 1195 } 1133 - } else { 1134 - u32 hba_stat, port_stat; 1135 - 1136 - /* Spin for <timeout> checking if command still outstanding */ 1137 - timeout = jiffies + msecs_to_jiffies(timeout); 1138 - while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1139 - & (1 << MTIP_TAG_INTERNAL)) 1140 - && time_before(jiffies, timeout)) { 1141 - if (mtip_check_surprise_removal(dd->pdev)) { 1142 - rv = -ENXIO; 1143 - goto exec_ic_exit; 1144 - } 1145 - if ((fis->command != ATA_CMD_STANDBYNOW1) && 1146 - test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1147 - &dd->dd_flag)) { 1148 - rv = -ENXIO; 1149 - goto exec_ic_exit; 1150 - } 1151 - port_stat = readl(port->mmio + PORT_IRQ_STAT); 1152 - if (!port_stat) 1153 - continue; 1154 - 1155 - if (port_stat & PORT_IRQ_ERR) { 1156 - dev_err(&dd->pdev->dev, 1157 - "Internal command [%02X] failed\n", 1158 - fis->command); 1159 - mtip_device_reset(dd); 1160 - rv = -EIO; 1161 - goto exec_ic_exit; 1162 - } else { 1163 - writel(port_stat, port->mmio + PORT_IRQ_STAT); 1164 - hba_stat = readl(dd->mmio + HOST_IRQ_STAT); 1165 - if (hba_stat) 1166 - writel(hba_stat, 1167 - dd->mmio + HOST_IRQ_STAT); 1168 - } 1169 - break; 1170 - } 1196 + mtip_device_reset(dd); /* recover from timeout issue */ 1197 + rv = -EAGAIN; 1198 + goto exec_ic_exit; 1171 1199 } 1172 1200 1173 - if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1174 - & (1 << MTIP_TAG_INTERNAL)) { 1201 + if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)]) 1202 + & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) { 1175 1203 rv = -ENXIO; 1176 1204 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 1177 1205 mtip_device_reset(dd); ··· 1105 1283 } 1106 1284 exec_ic_exit: 1107 1285 /* Clear the allocated and active bits for the internal command. */ 1108 - mtip_put_int_command(dd, int_cmd); 1286 + blk_mq_free_request(rq); 1109 1287 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1110 1288 if (rv >= 0 && mtip_pause_ncq(port, fis)) { 1111 1289 /* NCQ paused */ ··· 1213 1391 port->identify_dma, 1214 1392 sizeof(u16) * ATA_ID_WORDS, 1215 1393 0, 1216 - GFP_KERNEL, 1217 1394 MTIP_INT_CMD_TIMEOUT_MS) 1218 1395 < 0) { 1219 1396 rv = -1; ··· 1298 1477 0, 1299 1478 0, 1300 1479 0, 1301 - GFP_ATOMIC, 1302 1480 timeout); 1303 1481 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", 1304 1482 jiffies_to_msecs(jiffies - start)); ··· 1343 1523 buffer_dma, 1344 1524 sectors * ATA_SECT_SIZE, 1345 1525 0, 1346 - GFP_ATOMIC, 1347 1526 MTIP_INT_CMD_TIMEOUT_MS); 1348 1527 } 1349 1528 ··· 1377 1558 buffer_dma, 1378 1559 ATA_SECT_SIZE, 1379 1560 0, 1380 - GFP_ATOMIC, 1381 1561 15000); 1382 1562 } 1383 1563 ··· 1504 1686 dma_addr, 1505 1687 ATA_SECT_SIZE, 1506 1688 0, 1507 - GFP_KERNEL, 1508 1689 MTIP_TRIM_TIMEOUT_MS) < 0) 1509 1690 rv = -EIO; 1510 1691 ··· 1667 1850 0, 1668 1851 0, 1669 1852 0, 1670 - GFP_KERNEL, 1671 1853 to) < 0) { 1672 1854 return -1; 1673 1855 } ··· 1762 1946 (xfer_sz ? dma_addr : 0), 1763 1947 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0), 1764 1948 0, 1765 - GFP_KERNEL, 1766 1949 to) 1767 1950 < 0) { 1768 1951 rv = -EFAULT; ··· 2004 2189 dma_buffer, 2005 2190 transfer_size, 2006 2191 0, 2007 - GFP_KERNEL, 2008 2192 timeout) < 0) { 2009 2193 err = -EIO; 2010 2194 goto abort; ··· 2260 2446 (nents << 16) | 5 | AHCI_CMD_PREFETCH); 2261 2447 command->command_header->byte_count = 0; 2262 2448 2263 - /* 2264 - * Set the completion function and data for the command 2265 - * within this layer. 2266 - */ 2267 - command->comp_data = dd; 2268 - command->comp_func = mtip_async_complete; 2269 2449 command->direction = dma_dir; 2270 2450 2271 2451 /* ··· 3633 3825 return false; 3634 3826 } 3635 3827 3828 + static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, 3829 + struct request *rq) 3830 + { 3831 + struct driver_data *dd = hctx->queue->queuedata; 3832 + struct mtip_int_cmd *icmd = rq->special; 3833 + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3834 + struct mtip_cmd_sg *command_sg; 3835 + 3836 + if (mtip_commands_active(dd->port)) 3837 + return BLK_MQ_RQ_QUEUE_BUSY; 3838 + 3839 + /* Populate the SG list */ 3840 + cmd->command_header->opts = 3841 + __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len); 3842 + if (icmd->buf_len) { 3843 + command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ; 3844 + 3845 + command_sg->info = 3846 + __force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF); 3847 + command_sg->dba = 3848 + __force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF); 3849 + command_sg->dba_upper = 3850 + __force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16); 3851 + 3852 + cmd->command_header->opts |= 3853 + __force_bit2int cpu_to_le32((1 << 16)); 3854 + } 3855 + 3856 + /* Populate the command header */ 3857 + cmd->command_header->byte_count = 0; 3858 + 3859 + blk_mq_start_request(rq); 3860 + mtip_issue_non_ncq_command(dd->port, rq->tag); 3861 + return BLK_MQ_RQ_QUEUE_OK; 3862 + } 3863 + 3636 3864 static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, 3637 3865 const struct blk_mq_queue_data *bd) 3638 3866 { ··· 3676 3832 int ret; 3677 3833 3678 3834 mtip_init_cmd_header(rq); 3835 + 3836 + if (blk_rq_is_passthrough(rq)) 3837 + return mtip_issue_reserved_cmd(hctx, rq); 3679 3838 3680 3839 if (unlikely(mtip_check_unal_depth(hctx, rq))) 3681 3840 return BLK_MQ_RQ_QUEUE_BUSY; ··· 3692 3845 return BLK_MQ_RQ_QUEUE_ERROR; 3693 3846 } 3694 3847 3695 - static void mtip_free_cmd(void *data, struct request *rq, 3696 - unsigned int hctx_idx, unsigned int request_idx) 3848 + static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq, 3849 + unsigned int hctx_idx) 3697 3850 { 3698 - struct driver_data *dd = data; 3851 + struct driver_data *dd = set->driver_data; 3699 3852 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3700 3853 3701 3854 if (!cmd->command) ··· 3705 3858 cmd->command, cmd->command_dma); 3706 3859 } 3707 3860 3708 - static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, 3709 - unsigned int request_idx, unsigned int numa_node) 3861 + static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, 3862 + unsigned int hctx_idx, unsigned int numa_node) 3710 3863 { 3711 - struct driver_data *dd = data; 3864 + struct driver_data *dd = set->driver_data; 3712 3865 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3713 - 3714 - /* 3715 - * For flush requests, request_idx starts at the end of the 3716 - * tag space. Since we don't support FLUSH/FUA, simply return 3717 - * 0 as there's nothing to be done. 3718 - */ 3719 - if (request_idx >= MTIP_MAX_COMMAND_SLOTS) 3720 - return 0; 3721 3866 3722 3867 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3723 3868 &cmd->command_dma, GFP_KERNEL); ··· 3727 3888 { 3728 3889 struct driver_data *dd = req->q->queuedata; 3729 3890 3730 - if (reserved) 3731 - goto exit_handler; 3891 + if (reserved) { 3892 + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); 3893 + 3894 + cmd->status = -ETIME; 3895 + return BLK_EH_HANDLED; 3896 + } 3732 3897 3733 3898 if (test_bit(req->tag, dd->port->cmds_to_issue)) 3734 3899 goto exit_handler; ··· 3825 3982 dd->tags.reserved_tags = 1; 3826 3983 dd->tags.cmd_size = sizeof(struct mtip_cmd); 3827 3984 dd->tags.numa_node = dd->numa_node; 3828 - dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED; 3985 + dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; 3829 3986 dd->tags.driver_data = dd; 3830 3987 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS; 3831 3988 ··· 3959 4116 3960 4117 static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv) 3961 4118 { 3962 - struct driver_data *dd = (struct driver_data *)data; 3963 - struct mtip_cmd *cmd; 4119 + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3964 4120 3965 - if (likely(!reserv)) { 3966 - cmd = blk_mq_rq_to_pdu(rq); 3967 - cmd->status = -ENODEV; 3968 - blk_mq_complete_request(rq); 3969 - } else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) { 3970 - 3971 - cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); 3972 - if (cmd->comp_func) 3973 - cmd->comp_func(dd->port, MTIP_TAG_INTERNAL, 3974 - cmd, -ENODEV); 3975 - } 4121 + cmd->status = -ENODEV; 4122 + blk_mq_complete_request(rq); 3976 4123 } 3977 4124 3978 4125 /* ··· 4001 4168 * Explicitly wait here for IOs to quiesce, 4002 4169 * as mtip_standby_drive usually won't wait for IOs. 4003 4170 */ 4004 - if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS, 4005 - GFP_KERNEL)) 4171 + if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS)) 4006 4172 mtip_standby_drive(dd); 4007 4173 } 4008 4174 else
-10
drivers/block/mtip32xx/mtip32xx.h
··· 333 333 334 334 dma_addr_t command_dma; /* corresponding physical address */ 335 335 336 - void *comp_data; /* data passed to completion function comp_func() */ 337 - /* 338 - * Completion function called by the ISR upon completion of 339 - * a command. 340 - */ 341 - void (*comp_func)(struct mtip_port *port, 342 - int tag, 343 - struct mtip_cmd *cmd, 344 - int status); 345 - 346 336 int scatter_ents; /* Number of scatter list entries used */ 347 337 348 338 int unaligned; /* command is unaligned on 4k boundary */
+3 -4
drivers/block/nbd.c
··· 1396 1396 1397 1397 #endif 1398 1398 1399 - static int nbd_init_request(void *data, struct request *rq, 1400 - unsigned int hctx_idx, unsigned int request_idx, 1401 - unsigned int numa_node) 1399 + static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 1400 + unsigned int hctx_idx, unsigned int numa_node) 1402 1401 { 1403 1402 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1404 - cmd->nbd = data; 1403 + cmd->nbd = set->driver_data; 1405 1404 return 0; 1406 1405 } 1407 1406
+2 -3
drivers/block/rbd.c
··· 4307 4307 return ret; 4308 4308 } 4309 4309 4310 - static int rbd_init_request(void *data, struct request *rq, 4311 - unsigned int hctx_idx, unsigned int request_idx, 4312 - unsigned int numa_node) 4310 + static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 4311 + unsigned int hctx_idx, unsigned int numa_node) 4313 4312 { 4314 4313 struct work_struct *work = blk_mq_rq_to_pdu(rq); 4315 4314
+3 -4
drivers/block/virtio_blk.c
··· 573 573 __ATTR(cache_type, S_IRUGO|S_IWUSR, 574 574 virtblk_cache_type_show, virtblk_cache_type_store); 575 575 576 - static int virtblk_init_request(void *data, struct request *rq, 577 - unsigned int hctx_idx, unsigned int request_idx, 578 - unsigned int numa_node) 576 + static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq, 577 + unsigned int hctx_idx, unsigned int numa_node) 579 578 { 580 - struct virtio_blk *vblk = data; 579 + struct virtio_blk *vblk = set->driver_data; 581 580 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); 582 581 583 582 #ifdef CONFIG_VIRTIO_BLK_SCSI
+2 -2
drivers/lightnvm/core.c
··· 74 74 75 75 return 0; 76 76 err: 77 - while (--i > lun_begin) 77 + while (--i >= lun_begin) 78 78 clear_bit(i, dev->lun_map); 79 79 80 80 return -EBUSY; ··· 211 211 212 212 return tgt_dev; 213 213 err_ch: 214 - while (--i > 0) 214 + while (--i >= 0) 215 215 kfree(dev_map->chnls[i].lun_offs); 216 216 kfree(luns); 217 217 err_luns:
+3 -4
drivers/md/dm-rq.c
··· 720 720 return 0; 721 721 } 722 722 723 - static int dm_mq_init_request(void *data, struct request *rq, 724 - unsigned int hctx_idx, unsigned int request_idx, 725 - unsigned int numa_node) 723 + static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, 724 + unsigned int hctx_idx, unsigned int numa_node) 726 725 { 727 - return __dm_rq_init_rq(data, rq); 726 + return __dm_rq_init_rq(set->driver_data, rq); 728 727 } 729 728 730 729 static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+3 -4
drivers/mtd/ubi/block.c
··· 334 334 335 335 } 336 336 337 - static int ubiblock_init_request(void *data, struct request *req, 338 - unsigned int hctx_idx, 339 - unsigned int request_idx, 340 - unsigned int numa_node) 337 + static int ubiblock_init_request(struct blk_mq_tag_set *set, 338 + struct request *req, unsigned int hctx_idx, 339 + unsigned int numa_node) 341 340 { 342 341 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); 343 342
+9 -11
drivers/nvme/host/fc.c
··· 1172 1172 } 1173 1173 1174 1174 static void 1175 - nvme_fc_exit_request(void *data, struct request *rq, 1176 - unsigned int hctx_idx, unsigned int rq_idx) 1175 + nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, 1176 + unsigned int hctx_idx) 1177 1177 { 1178 1178 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1179 1179 1180 - return __nvme_fc_exit_request(data, op); 1180 + return __nvme_fc_exit_request(set->driver_data, op); 1181 1181 } 1182 1182 1183 1183 static int ··· 1434 1434 } 1435 1435 1436 1436 static int 1437 - nvme_fc_init_request(void *data, struct request *rq, 1438 - unsigned int hctx_idx, unsigned int rq_idx, 1439 - unsigned int numa_node) 1437 + nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, 1438 + unsigned int hctx_idx, unsigned int numa_node) 1440 1439 { 1441 - struct nvme_fc_ctrl *ctrl = data; 1440 + struct nvme_fc_ctrl *ctrl = set->driver_data; 1442 1441 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1443 1442 struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1]; 1444 1443 ··· 1445 1446 } 1446 1447 1447 1448 static int 1448 - nvme_fc_init_admin_request(void *data, struct request *rq, 1449 - unsigned int hctx_idx, unsigned int rq_idx, 1450 - unsigned int numa_node) 1449 + nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq, 1450 + unsigned int hctx_idx, unsigned int numa_node) 1451 1451 { 1452 - struct nvme_fc_ctrl *ctrl = data; 1452 + struct nvme_fc_ctrl *ctrl = set->driver_data; 1453 1453 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1454 1454 struct nvme_fc_queue *queue = &ctrl->queues[0]; 1455 1455
+2 -2
drivers/nvme/host/lightnvm.c
··· 503 503 if (!cmd) 504 504 return -ENOMEM; 505 505 506 + nvme_nvm_rqtocmd(rq, rqd, ns, cmd); 507 + 506 508 rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY); 507 509 if (IS_ERR(rq)) { 508 510 kfree(cmd); ··· 518 516 rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); 519 517 rq->__data_len = 0; 520 518 } 521 - 522 - nvme_nvm_rqtocmd(rq, rqd, ns, cmd); 523 519 524 520 rq->end_io_data = rqd; 525 521
+7 -8
drivers/nvme/host/pci.c
··· 356 356 nvmeq->tags = NULL; 357 357 } 358 358 359 - static int nvme_admin_init_request(void *data, struct request *req, 360 - unsigned int hctx_idx, unsigned int rq_idx, 361 - unsigned int numa_node) 359 + static int nvme_admin_init_request(struct blk_mq_tag_set *set, 360 + struct request *req, unsigned int hctx_idx, 361 + unsigned int numa_node) 362 362 { 363 - struct nvme_dev *dev = data; 363 + struct nvme_dev *dev = set->driver_data; 364 364 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 365 365 struct nvme_queue *nvmeq = dev->queues[0]; 366 366 ··· 383 383 return 0; 384 384 } 385 385 386 - static int nvme_init_request(void *data, struct request *req, 387 - unsigned int hctx_idx, unsigned int rq_idx, 388 - unsigned int numa_node) 386 + static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 387 + unsigned int hctx_idx, unsigned int numa_node) 389 388 { 390 - struct nvme_dev *dev = data; 389 + struct nvme_dev *dev = set->driver_data; 391 390 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 392 391 struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; 393 392
+14 -14
drivers/nvme/host/rdma.c
··· 315 315 DMA_TO_DEVICE); 316 316 } 317 317 318 - static void nvme_rdma_exit_request(void *data, struct request *rq, 319 - unsigned int hctx_idx, unsigned int rq_idx) 318 + static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, 319 + struct request *rq, unsigned int hctx_idx) 320 320 { 321 - return __nvme_rdma_exit_request(data, rq, hctx_idx + 1); 321 + return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1); 322 322 } 323 323 324 - static void nvme_rdma_exit_admin_request(void *data, struct request *rq, 325 - unsigned int hctx_idx, unsigned int rq_idx) 324 + static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set, 325 + struct request *rq, unsigned int hctx_idx) 326 326 { 327 - return __nvme_rdma_exit_request(data, rq, 0); 327 + return __nvme_rdma_exit_request(set->driver_data, rq, 0); 328 328 } 329 329 330 330 static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl, ··· 358 358 return -ENOMEM; 359 359 } 360 360 361 - static int nvme_rdma_init_request(void *data, struct request *rq, 362 - unsigned int hctx_idx, unsigned int rq_idx, 363 - unsigned int numa_node) 361 + static int nvme_rdma_init_request(struct blk_mq_tag_set *set, 362 + struct request *rq, unsigned int hctx_idx, 363 + unsigned int numa_node) 364 364 { 365 - return __nvme_rdma_init_request(data, rq, hctx_idx + 1); 365 + return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1); 366 366 } 367 367 368 - static int nvme_rdma_init_admin_request(void *data, struct request *rq, 369 - unsigned int hctx_idx, unsigned int rq_idx, 370 - unsigned int numa_node) 368 + static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set, 369 + struct request *rq, unsigned int hctx_idx, 370 + unsigned int numa_node) 371 371 { 372 - return __nvme_rdma_init_request(data, rq, 0); 372 + return __nvme_rdma_init_request(set->driver_data, rq, 0); 373 373 } 374 374 375 375 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+9 -8
drivers/nvme/target/loop.c
··· 230 230 return 0; 231 231 } 232 232 233 - static int nvme_loop_init_request(void *data, struct request *req, 234 - unsigned int hctx_idx, unsigned int rq_idx, 235 - unsigned int numa_node) 233 + static int nvme_loop_init_request(struct blk_mq_tag_set *set, 234 + struct request *req, unsigned int hctx_idx, 235 + unsigned int numa_node) 236 236 { 237 - return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1); 237 + return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 238 + hctx_idx + 1); 238 239 } 239 240 240 - static int nvme_loop_init_admin_request(void *data, struct request *req, 241 - unsigned int hctx_idx, unsigned int rq_idx, 242 - unsigned int numa_node) 241 + static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set, 242 + struct request *req, unsigned int hctx_idx, 243 + unsigned int numa_node) 243 244 { 244 - return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0); 245 + return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0); 245 246 } 246 247 247 248 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+6 -7
drivers/scsi/scsi_lib.c
··· 1999 1999 return scsi_times_out(req); 2000 2000 } 2001 2001 2002 - static int scsi_init_request(void *data, struct request *rq, 2003 - unsigned int hctx_idx, unsigned int request_idx, 2004 - unsigned int numa_node) 2002 + static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, 2003 + unsigned int hctx_idx, unsigned int numa_node) 2005 2004 { 2006 - struct Scsi_Host *shost = data; 2005 + struct Scsi_Host *shost = set->driver_data; 2007 2006 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2008 2007 2009 2008 cmd->sense_buffer = ··· 2013 2014 return 0; 2014 2015 } 2015 2016 2016 - static void scsi_exit_request(void *data, struct request *rq, 2017 - unsigned int hctx_idx, unsigned int request_idx) 2017 + static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq, 2018 + unsigned int hctx_idx) 2018 2019 { 2019 - struct Scsi_Host *shost = data; 2020 + struct Scsi_Host *shost = set->driver_data; 2020 2021 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 2021 2022 2022 2023 scsi_free_sense_buffer(shost, cmd->sense_buffer);
+3 -3
fs/nfs/internal.h
··· 139 139 }; 140 140 141 141 struct nfs_mount_info { 142 - int (*fill_super)(struct super_block *, struct nfs_mount_info *); 142 + void (*fill_super)(struct super_block *, struct nfs_mount_info *); 143 143 int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *); 144 144 struct nfs_parsed_mount_data *parsed; 145 145 struct nfs_clone_mount *cloned; ··· 407 407 struct dentry * nfs_xdev_mount_common(struct file_system_type *, int, 408 408 const char *, struct nfs_mount_info *); 409 409 void nfs_kill_super(struct super_block *); 410 - int nfs_fill_super(struct super_block *, struct nfs_mount_info *); 410 + void nfs_fill_super(struct super_block *, struct nfs_mount_info *); 411 411 412 412 extern struct rpc_stat nfs_rpcstat; 413 413 ··· 458 458 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); 459 459 460 460 /* super.c */ 461 - int nfs_clone_super(struct super_block *, struct nfs_mount_info *); 461 + void nfs_clone_super(struct super_block *, struct nfs_mount_info *); 462 462 void nfs_umount_begin(struct super_block *); 463 463 int nfs_statfs(struct dentry *, struct kstatfs *); 464 464 int nfs_show_options(struct seq_file *, struct dentry *);
+10 -18
fs/nfs/super.c
··· 2321 2321 /* 2322 2322 * Finish setting up an NFS2/3 superblock 2323 2323 */ 2324 - int nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info) 2324 + void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info) 2325 2325 { 2326 2326 struct nfs_parsed_mount_data *data = mount_info->parsed; 2327 2327 struct nfs_server *server = NFS_SB(sb); 2328 - int ret; 2329 2328 2330 2329 sb->s_blocksize_bits = 0; 2331 2330 sb->s_blocksize = 0; ··· 2342 2343 } 2343 2344 2344 2345 nfs_initialise_sb(sb); 2345 - 2346 - ret = super_setup_bdi_name(sb, "%u:%u", MAJOR(server->s_dev), 2347 - MINOR(server->s_dev)); 2348 - if (ret) 2349 - return ret; 2350 - sb->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD; 2351 - return 0; 2352 - 2353 2346 } 2354 2347 EXPORT_SYMBOL_GPL(nfs_fill_super); 2355 2348 2356 2349 /* 2357 2350 * Finish setting up a cloned NFS2/3/4 superblock 2358 2351 */ 2359 - int nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info) 2352 + void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info) 2360 2353 { 2361 2354 const struct super_block *old_sb = mount_info->cloned->sb; 2362 2355 struct nfs_server *server = NFS_SB(sb); ··· 2368 2377 } 2369 2378 2370 2379 nfs_initialise_sb(sb); 2371 - 2372 - sb->s_bdi = bdi_get(old_sb->s_bdi); 2373 - 2374 - return 0; 2375 2380 } 2376 2381 2377 2382 static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags) ··· 2587 2600 nfs_free_server(server); 2588 2601 server = NULL; 2589 2602 } else { 2603 + error = super_setup_bdi_name(s, "%u:%u", MAJOR(server->s_dev), 2604 + MINOR(server->s_dev)); 2605 + if (error) { 2606 + mntroot = ERR_PTR(error); 2607 + goto error_splat_super; 2608 + } 2609 + s->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD; 2590 2610 server->super = s; 2591 2611 } 2592 2612 2593 2613 if (!s->s_root) { 2594 2614 /* initial superblock/root creation */ 2595 - error = mount_info->fill_super(s, mount_info); 2596 - if (error) 2597 - goto error_splat_super; 2615 + mount_info->fill_super(s, mount_info); 2598 2616 nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); 2599 2617 } 2600 2618
+7 -2
include/linux/blk-mq.h
··· 57 57 unsigned long poll_considered; 58 58 unsigned long poll_invoked; 59 59 unsigned long poll_success; 60 + 61 + #ifdef CONFIG_BLK_DEBUG_FS 62 + struct dentry *debugfs_dir; 63 + struct dentry *sched_debugfs_dir; 64 + #endif 60 65 }; 61 66 62 67 struct blk_mq_tag_set { ··· 91 86 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 92 87 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 93 88 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 94 - typedef int (init_request_fn)(void *, struct request *, unsigned int, 89 + typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, 95 90 unsigned int, unsigned int); 96 - typedef void (exit_request_fn)(void *, struct request *, unsigned int, 91 + typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, 97 92 unsigned int); 98 93 typedef int (reinit_request_fn)(void *, struct request *); 99 94
+1 -1
include/linux/blkdev.h
··· 579 579 580 580 #ifdef CONFIG_BLK_DEBUG_FS 581 581 struct dentry *debugfs_dir; 582 - struct dentry *mq_debugfs_dir; 582 + struct dentry *sched_debugfs_dir; 583 583 #endif 584 584 585 585 bool mq_sysfs_init_done;
+7 -1
include/linux/elevator.h
··· 8 8 9 9 struct io_cq; 10 10 struct elevator_type; 11 + #ifdef CONFIG_BLK_DEBUG_FS 12 + struct blk_mq_debugfs_attr; 13 + #endif 11 14 12 15 /* 13 16 * Return values from elevator merger ··· 147 144 char elevator_name[ELV_NAME_MAX]; 148 145 struct module *elevator_owner; 149 146 bool uses_mq; 147 + #ifdef CONFIG_BLK_DEBUG_FS 148 + const struct blk_mq_debugfs_attr *queue_debugfs_attrs; 149 + const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; 150 + #endif 150 151 151 152 /* managed by elevator core */ 152 153 char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ ··· 221 214 222 215 extern int elevator_init(struct request_queue *, char *); 223 216 extern void elevator_exit(struct request_queue *, struct elevator_queue *); 224 - extern int elevator_change(struct request_queue *, const char *); 225 217 extern bool elv_bio_merge_ok(struct request *, struct bio *); 226 218 extern struct elevator_queue *elevator_alloc(struct request_queue *, 227 219 struct elevator_type *);