Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gdrom: convert to blk-mq

Ditch the deffered list, lock, and workqueue handling. Just mark the
set as being blocking, so we are invoked from a workqueue already.

Signed-off-by: Jens Axboe <axboe@kernel.dk>

+77 -93
+77 -93
drivers/cdrom/gdrom.c
··· 31 31 #include <linux/cdrom.h> 32 32 #include <linux/genhd.h> 33 33 #include <linux/bio.h> 34 - #include <linux/blkdev.h> 34 + #include <linux/blk-mq.h> 35 35 #include <linux/interrupt.h> 36 36 #include <linux/device.h> 37 37 #include <linux/mutex.h> 38 38 #include <linux/wait.h> 39 - #include <linux/workqueue.h> 40 39 #include <linux/platform_device.h> 41 40 #include <scsi/scsi.h> 42 41 #include <asm/io.h> ··· 101 102 static DECLARE_WAIT_QUEUE_HEAD(command_queue); 102 103 static DECLARE_WAIT_QUEUE_HEAD(request_queue); 103 104 104 - static DEFINE_SPINLOCK(gdrom_lock); 105 - static void gdrom_readdisk_dma(struct work_struct *work); 106 - static DECLARE_WORK(work, gdrom_readdisk_dma); 107 - static LIST_HEAD(gdrom_deferred); 108 - 109 105 struct gdromtoc { 110 106 unsigned int entry[99]; 111 107 unsigned int first, last; ··· 116 122 char disk_type; 117 123 struct gdromtoc *toc; 118 124 struct request_queue *gdrom_rq; 125 + struct blk_mq_tag_set tag_set; 119 126 } gd; 120 127 121 128 struct gdrom_id { ··· 579 584 * 9 -> sectors >> 8 580 585 * 10 -> sectors 581 586 */ 582 - static void gdrom_readdisk_dma(struct work_struct *work) 587 + static blk_status_t gdrom_readdisk_dma(struct request *req) 583 588 { 584 589 int block, block_cnt; 585 590 blk_status_t err; 586 591 struct packet_command *read_command; 587 - struct list_head *elem, *next; 588 - struct request *req; 589 592 unsigned long timeout; 590 593 591 - if (list_empty(&gdrom_deferred)) 592 - return; 593 594 read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL); 594 595 if (!read_command) 595 - return; /* get more memory later? */ 596 + return BLK_STS_RESOURCE; 597 + 596 598 read_command->cmd[0] = 0x30; 597 599 read_command->cmd[1] = 0x20; 598 - spin_lock(&gdrom_lock); 599 - list_for_each_safe(elem, next, &gdrom_deferred) { 600 - req = list_entry(elem, struct request, queuelist); 601 - spin_unlock(&gdrom_lock); 602 - block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; 603 - block_cnt = blk_rq_sectors(req)/GD_TO_BLK; 604 - __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG); 605 - __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 606 - __raw_writel(1, GDROM_DMA_DIRECTION_REG); 607 - __raw_writel(1, GDROM_DMA_ENABLE_REG); 608 - read_command->cmd[2] = (block >> 16) & 0xFF; 609 - read_command->cmd[3] = (block >> 8) & 0xFF; 610 - read_command->cmd[4] = block & 0xFF; 611 - read_command->cmd[8] = (block_cnt >> 16) & 0xFF; 612 - read_command->cmd[9] = (block_cnt >> 8) & 0xFF; 613 - read_command->cmd[10] = block_cnt & 0xFF; 614 - /* set for DMA */ 615 - __raw_writeb(1, GDROM_ERROR_REG); 616 - /* other registers */ 617 - __raw_writeb(0, GDROM_SECNUM_REG); 618 - __raw_writeb(0, GDROM_BCL_REG); 619 - __raw_writeb(0, GDROM_BCH_REG); 620 - __raw_writeb(0, GDROM_DSEL_REG); 621 - __raw_writeb(0, GDROM_INTSEC_REG); 622 - /* Wait for registers to reset after any previous activity */ 623 - timeout = jiffies + HZ / 2; 624 - while (gdrom_is_busy() && time_before(jiffies, timeout)) 625 - cpu_relax(); 626 - __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG); 627 - timeout = jiffies + HZ / 2; 628 - /* Wait for packet command to finish */ 629 - while (gdrom_is_busy() && time_before(jiffies, timeout)) 630 - cpu_relax(); 631 - gd.pending = 1; 632 - gd.transfer = 1; 633 - outsw(GDROM_DATA_REG, &read_command->cmd, 6); 634 - timeout = jiffies + HZ / 2; 635 - /* Wait for any pending DMA to finish */ 636 - while (__raw_readb(GDROM_DMA_STATUS_REG) && 637 - time_before(jiffies, timeout)) 638 - cpu_relax(); 639 - /* start transfer */ 640 - __raw_writeb(1, GDROM_DMA_STATUS_REG); 641 - wait_event_interruptible_timeout(request_queue, 642 - gd.transfer == 0, GDROM_DEFAULT_TIMEOUT); 643 - err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK; 644 - gd.transfer = 0; 645 - gd.pending = 0; 646 - /* now seek to take the request spinlock 647 - * before handling ending the request */ 648 - spin_lock(&gdrom_lock); 649 - list_del_init(&req->queuelist); 650 - __blk_end_request_all(req, err); 651 - } 652 - spin_unlock(&gdrom_lock); 600 + block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; 601 + block_cnt = blk_rq_sectors(req)/GD_TO_BLK; 602 + __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG); 603 + __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 604 + __raw_writel(1, GDROM_DMA_DIRECTION_REG); 605 + __raw_writel(1, GDROM_DMA_ENABLE_REG); 606 + read_command->cmd[2] = (block >> 16) & 0xFF; 607 + read_command->cmd[3] = (block >> 8) & 0xFF; 608 + read_command->cmd[4] = block & 0xFF; 609 + read_command->cmd[8] = (block_cnt >> 16) & 0xFF; 610 + read_command->cmd[9] = (block_cnt >> 8) & 0xFF; 611 + read_command->cmd[10] = block_cnt & 0xFF; 612 + /* set for DMA */ 613 + __raw_writeb(1, GDROM_ERROR_REG); 614 + /* other registers */ 615 + __raw_writeb(0, GDROM_SECNUM_REG); 616 + __raw_writeb(0, GDROM_BCL_REG); 617 + __raw_writeb(0, GDROM_BCH_REG); 618 + __raw_writeb(0, GDROM_DSEL_REG); 619 + __raw_writeb(0, GDROM_INTSEC_REG); 620 + /* Wait for registers to reset after any previous activity */ 621 + timeout = jiffies + HZ / 2; 622 + while (gdrom_is_busy() && time_before(jiffies, timeout)) 623 + cpu_relax(); 624 + __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG); 625 + timeout = jiffies + HZ / 2; 626 + /* Wait for packet command to finish */ 627 + while (gdrom_is_busy() && time_before(jiffies, timeout)) 628 + cpu_relax(); 629 + gd.pending = 1; 630 + gd.transfer = 1; 631 + outsw(GDROM_DATA_REG, &read_command->cmd, 6); 632 + timeout = jiffies + HZ / 2; 633 + /* Wait for any pending DMA to finish */ 634 + while (__raw_readb(GDROM_DMA_STATUS_REG) && 635 + time_before(jiffies, timeout)) 636 + cpu_relax(); 637 + /* start transfer */ 638 + __raw_writeb(1, GDROM_DMA_STATUS_REG); 639 + wait_event_interruptible_timeout(request_queue, 640 + gd.transfer == 0, GDROM_DEFAULT_TIMEOUT); 641 + err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK; 642 + gd.transfer = 0; 643 + gd.pending = 0; 644 + 645 + blk_mq_end_request(req, err); 653 646 kfree(read_command); 647 + return BLK_STS_OK; 654 648 } 655 649 656 - static void gdrom_request(struct request_queue *rq) 650 + static blk_status_t gdrom_queue_rq(struct blk_mq_hw_ctx *hctx, 651 + const struct blk_mq_queue_data *bd) 657 652 { 658 - struct request *req; 653 + blk_mq_start_request(bd->rq); 659 654 660 - while ((req = blk_fetch_request(rq)) != NULL) { 661 - switch (req_op(req)) { 662 - case REQ_OP_READ: 663 - /* 664 - * Add to list of deferred work and then schedule 665 - * workqueue. 666 - */ 667 - list_add_tail(&req->queuelist, &gdrom_deferred); 668 - schedule_work(&work); 669 - break; 670 - case REQ_OP_WRITE: 671 - pr_notice("Read only device - write request ignored\n"); 672 - __blk_end_request_all(req, BLK_STS_IOERR); 673 - break; 674 - default: 675 - printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); 676 - __blk_end_request_all(req, BLK_STS_IOERR); 677 - break; 678 - } 655 + switch (req_op(bd->rq)) { 656 + case REQ_OP_READ: 657 + return gdrom_readdisk_dma(bd->rq); 658 + case REQ_OP_WRITE: 659 + pr_notice("Read only device - write request ignored\n"); 660 + return BLK_STS_IOERR; 661 + default: 662 + printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); 663 + return BLK_STS_IOERR; 679 664 } 680 665 } 681 666 ··· 743 768 return gdrom_init_dma_mode(); 744 769 } 745 770 771 + static const struct blk_mq_ops gdrom_mq_ops = { 772 + .queue_rq = gdrom_queue_rq, 773 + }; 774 + 746 775 /* 747 776 * register this as a block device and as compliant with the 748 777 * universal CD Rom driver interface ··· 790 811 err = gdrom_set_interrupt_handlers(); 791 812 if (err) 792 813 goto probe_fail_cmdirq_register; 793 - gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock); 794 - if (!gd.gdrom_rq) { 795 - err = -ENOMEM; 814 + 815 + gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1, 816 + BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING); 817 + if (IS_ERR(gd.gdrom_rq)) { 818 + rc = PTR_ERR(gd.gdrom_rq); 819 + gd.gdrom_rq = NULL; 796 820 goto probe_fail_requestq; 797 821 } 822 + 798 823 blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH); 799 824 800 825 err = probe_gdrom_setupqueue(); ··· 815 832 816 833 probe_fail_toc: 817 834 blk_cleanup_queue(gd.gdrom_rq); 835 + blk_mq_free_tag_set(&gd.tag_set); 818 836 probe_fail_requestq: 819 837 free_irq(HW_EVENT_GDROM_DMA, &gd); 820 838 free_irq(HW_EVENT_GDROM_CMD, &gd); ··· 833 849 834 850 static int remove_gdrom(struct platform_device *devptr) 835 851 { 836 - flush_work(&work); 837 852 blk_cleanup_queue(gd.gdrom_rq); 853 + blk_mq_free_tag_set(&gd.tag_set); 838 854 free_irq(HW_EVENT_GDROM_CMD, &gd); 839 855 free_irq(HW_EVENT_GDROM_DMA, &gd); 840 856 del_gendisk(gd.disk);