Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bsg: convert to use blk-mq

Requires a few changes to the FC transport class as well.

Cc: linux-scsi@vger.kernel.org
Reviewed-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Tested-by: Benjamin Block <bblock@linux.vnet.ibm.com>
Tested-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

+108 -70
+73 -46
block/bsg-lib.c
··· 21 21 * 22 22 */ 23 23 #include <linux/slab.h> 24 - #include <linux/blkdev.h> 24 + #include <linux/blk-mq.h> 25 25 #include <linux/delay.h> 26 26 #include <linux/scatterlist.h> 27 27 #include <linux/bsg-lib.h> ··· 129 129 kfree(job->request_payload.sg_list); 130 130 kfree(job->reply_payload.sg_list); 131 131 132 - blk_end_request_all(rq, BLK_STS_OK); 132 + blk_mq_end_request(rq, BLK_STS_OK); 133 133 } 134 134 135 135 void bsg_job_put(struct bsg_job *job) ··· 157 157 { 158 158 job->result = result; 159 159 job->reply_payload_rcv_len = reply_payload_rcv_len; 160 - blk_complete_request(blk_mq_rq_from_pdu(job)); 160 + blk_mq_complete_request(blk_mq_rq_from_pdu(job)); 161 161 } 162 162 EXPORT_SYMBOL_GPL(bsg_job_done); 163 163 164 164 /** 165 - * bsg_softirq_done - softirq done routine for destroying the bsg requests 165 + * bsg_complete - softirq done routine for destroying the bsg requests 166 166 * @rq: BSG request that holds the job to be destroyed 167 167 */ 168 - static void bsg_softirq_done(struct request *rq) 168 + static void bsg_complete(struct request *rq) 169 169 { 170 170 struct bsg_job *job = blk_mq_rq_to_pdu(rq); 171 171 ··· 224 224 } 225 225 226 226 /** 227 - * bsg_request_fn - generic handler for bsg requests 228 - * @q: request queue to manage 227 + * bsg_queue_rq - generic handler for bsg requests 228 + * @hctx: hardware queue 229 + * @bd: queue data 229 230 * 230 231 * On error the create_bsg_job function should return a -Exyz error value 231 232 * that will be set to ->result. 232 233 * 233 234 * Drivers/subsys should pass this to the queue init function. 234 235 */ 235 - static void bsg_request_fn(struct request_queue *q) 236 - __releases(q->queue_lock) 237 - __acquires(q->queue_lock) 236 + static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, 237 + const struct blk_mq_queue_data *bd) 238 238 { 239 + struct request_queue *q = hctx->queue; 239 240 struct device *dev = q->queuedata; 240 - struct request *req; 241 + struct request *req = bd->rq; 241 242 int ret; 242 243 244 + blk_mq_start_request(req); 245 + 243 246 if (!get_device(dev)) 244 - return; 247 + return BLK_STS_IOERR; 245 248 246 - while (1) { 247 - req = blk_fetch_request(q); 248 - if (!req) 249 - break; 250 - spin_unlock_irq(q->queue_lock); 249 + if (!bsg_prepare_job(dev, req)) 250 + return BLK_STS_IOERR; 251 251 252 - if (!bsg_prepare_job(dev, req)) { 253 - blk_end_request_all(req, BLK_STS_OK); 254 - spin_lock_irq(q->queue_lock); 255 - continue; 256 - } 252 + ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req)); 253 + if (ret) 254 + return BLK_STS_IOERR; 257 255 258 - ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req)); 259 - spin_lock_irq(q->queue_lock); 260 - if (ret) 261 - break; 262 - } 263 - 264 - spin_unlock_irq(q->queue_lock); 265 256 put_device(dev); 266 - spin_lock_irq(q->queue_lock); 257 + return BLK_STS_OK; 267 258 } 268 259 269 260 /* called right after the request is allocated for the request_queue */ 270 - static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) 261 + static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req, 262 + unsigned int hctx_idx, unsigned int numa_node) 271 263 { 272 264 struct bsg_job *job = blk_mq_rq_to_pdu(req); 273 265 274 - job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); 266 + job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 275 267 if (!job->reply) 276 268 return -ENOMEM; 277 269 return 0; ··· 281 289 job->dd_data = job + 1; 282 290 } 283 291 284 - static void bsg_exit_rq(struct request_queue *q, struct request *req) 292 + static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req, 293 + unsigned int hctx_idx) 285 294 { 286 295 struct bsg_job *job = blk_mq_rq_to_pdu(req); 287 296 ··· 292 299 void bsg_remove_queue(struct request_queue *q) 293 300 { 294 301 if (q) { 302 + struct blk_mq_tag_set *set = q->tag_set; 303 + 295 304 bsg_unregister_queue(q); 296 305 blk_cleanup_queue(q); 306 + blk_mq_free_tag_set(set); 307 + kfree(set); 297 308 } 298 309 } 299 310 EXPORT_SYMBOL_GPL(bsg_remove_queue); 311 + 312 + static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved) 313 + { 314 + enum blk_eh_timer_return ret = BLK_EH_DONE; 315 + struct request_queue *q = rq->q; 316 + 317 + if (q->rq_timed_out_fn) 318 + ret = q->rq_timed_out_fn(rq); 319 + 320 + return ret; 321 + } 322 + 323 + static const struct blk_mq_ops bsg_mq_ops = { 324 + .queue_rq = bsg_queue_rq, 325 + .init_request = bsg_init_rq, 326 + .exit_request = bsg_exit_rq, 327 + .initialize_rq_fn = bsg_initialize_rq, 328 + .complete = bsg_complete, 329 + .timeout = bsg_timeout, 330 + }; 300 331 301 332 /** 302 333 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests ··· 332 315 struct request_queue *bsg_setup_queue(struct device *dev, const char *name, 333 316 bsg_job_fn *job_fn, rq_timed_out_fn *timeout, int dd_job_size) 334 317 { 318 + struct blk_mq_tag_set *set; 335 319 struct request_queue *q; 336 - int ret; 320 + int ret = -ENOMEM; 337 321 338 - q = blk_alloc_queue(GFP_KERNEL); 339 - if (!q) 322 + set = kzalloc(sizeof(*set), GFP_KERNEL); 323 + if (!set) 340 324 return ERR_PTR(-ENOMEM); 341 - q->cmd_size = sizeof(struct bsg_job) + dd_job_size; 342 - q->init_rq_fn = bsg_init_rq; 343 - q->exit_rq_fn = bsg_exit_rq; 344 - q->initialize_rq_fn = bsg_initialize_rq; 345 - q->request_fn = bsg_request_fn; 346 325 347 - ret = blk_init_allocated_queue(q); 348 - if (ret) 349 - goto out_cleanup_queue; 326 + set->ops = &bsg_mq_ops, 327 + set->nr_hw_queues = 1; 328 + set->queue_depth = 128; 329 + set->numa_node = NUMA_NO_NODE; 330 + set->cmd_size = sizeof(struct bsg_job) + dd_job_size; 331 + set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING; 332 + if (blk_mq_alloc_tag_set(set)) 333 + goto out_tag_set; 334 + 335 + q = blk_mq_init_queue(set); 336 + if (IS_ERR(q)) { 337 + ret = PTR_ERR(q); 338 + goto out_queue; 339 + } 350 340 351 341 q->queuedata = dev; 352 342 q->bsg_job_fn = job_fn; 353 343 blk_queue_flag_set(QUEUE_FLAG_BIDI, q); 354 - blk_queue_softirq_done(q, bsg_softirq_done); 355 344 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 356 - blk_queue_rq_timed_out(q, timeout); 345 + q->rq_timed_out_fn = timeout; 357 346 358 347 ret = bsg_register_queue(q, dev, name, &bsg_transport_ops); 359 348 if (ret) { ··· 371 348 return q; 372 349 out_cleanup_queue: 373 350 blk_cleanup_queue(q); 351 + out_queue: 352 + blk_mq_free_tag_set(set); 353 + out_tag_set: 354 + kfree(set); 374 355 return ERR_PTR(ret); 375 356 } 376 357 EXPORT_SYMBOL_GPL(bsg_setup_queue);
+35 -24
drivers/scsi/scsi_transport_fc.c
··· 3592 3592 3593 3593 /* the blk_end_sync_io() doesn't check the error */ 3594 3594 if (inflight) 3595 - __blk_complete_request(req); 3595 + blk_mq_end_request(req, BLK_STS_IOERR); 3596 3596 return BLK_EH_DONE; 3597 3597 } 3598 3598 ··· 3684 3684 fc_bsg_goose_queue(struct fc_rport *rport) 3685 3685 { 3686 3686 struct request_queue *q = rport->rqst_q; 3687 - unsigned long flags; 3688 3687 3689 - if (!q) 3690 - return; 3691 - 3692 - spin_lock_irqsave(q->queue_lock, flags); 3693 - blk_run_queue_async(q); 3694 - spin_unlock_irqrestore(q->queue_lock, flags); 3688 + if (q) 3689 + blk_mq_run_hw_queues(q, true); 3695 3690 } 3696 3691 3697 3692 /** ··· 3754 3759 return fc_bsg_host_dispatch(shost, job); 3755 3760 } 3756 3761 3762 + static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport) 3763 + { 3764 + if (rport->port_state == FC_PORTSTATE_BLOCKED && 3765 + !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) 3766 + return BLK_STS_RESOURCE; 3767 + 3768 + if (rport->port_state != FC_PORTSTATE_ONLINE) 3769 + return BLK_STS_IOERR; 3770 + 3771 + return BLK_STS_OK; 3772 + } 3773 + 3774 + 3775 + static int fc_bsg_dispatch_prep(struct bsg_job *job) 3776 + { 3777 + struct fc_rport *rport = fc_bsg_to_rport(job); 3778 + blk_status_t ret; 3779 + 3780 + ret = fc_bsg_rport_prep(rport); 3781 + switch (ret) { 3782 + case BLK_STS_OK: 3783 + break; 3784 + case BLK_STS_RESOURCE: 3785 + return -EAGAIN; 3786 + default: 3787 + return -EIO; 3788 + } 3789 + 3790 + return fc_bsg_dispatch(job); 3791 + } 3792 + 3757 3793 /** 3758 3794 * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests 3759 3795 * @shost: shost for fc_host ··· 3820 3794 return 0; 3821 3795 } 3822 3796 3823 - static int fc_bsg_rport_prep(struct request_queue *q, struct request *req) 3824 - { 3825 - struct fc_rport *rport = dev_to_rport(q->queuedata); 3826 - 3827 - if (rport->port_state == FC_PORTSTATE_BLOCKED && 3828 - !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) 3829 - return BLKPREP_DEFER; 3830 - 3831 - if (rport->port_state != FC_PORTSTATE_ONLINE) 3832 - return BLKPREP_KILL; 3833 - 3834 - return BLKPREP_OK; 3835 - } 3836 - 3837 3797 /** 3838 3798 * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests 3839 3799 * @shost: shost that rport is attached to ··· 3837 3825 if (!i->f->bsg_request) 3838 3826 return -ENOTSUPP; 3839 3827 3840 - q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch, 3828 + q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep, 3841 3829 fc_bsg_job_timeout, i->f->dd_bsg_size); 3842 3830 if (IS_ERR(q)) { 3843 3831 dev_err(dev, "failed to setup bsg queue\n"); 3844 3832 return PTR_ERR(q); 3845 3833 } 3846 3834 __scsi_init_queue(shost, q); 3847 - blk_queue_prep_rq(q, fc_bsg_rport_prep); 3848 3835 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 3849 3836 rport->rqst_q = q; 3850 3837 return 0;