Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-20190215' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

- Ensure we insert into the hctx dispatch list, if a request is marked
as DONTPREP (Jianchao)

- NVMe pull request, single missing unlock on error fix (Keith)

- MD pull request, single fix for a potentially data corrupting issue
(Nate)

- Floppy check_events regression fix (Yufen)

* tag 'for-linus-20190215' of git://git.kernel.dk/linux-block:
md/raid1: don't clear bitmap bits on interrupted recovery.
floppy: check_events callback should not return a negative number
nvme-pci: add missing unlock for reset error
blk-mq: insert rq with DONTPREP to hctx dispatch list when requeue

+34 -16
+10 -2
block/blk-mq.c
··· 737 737 spin_unlock_irq(&q->requeue_lock); 738 738 739 739 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 740 - if (!(rq->rq_flags & RQF_SOFTBARRIER)) 740 + if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) 741 741 continue; 742 742 743 743 rq->rq_flags &= ~RQF_SOFTBARRIER; 744 744 list_del_init(&rq->queuelist); 745 - blk_mq_sched_insert_request(rq, true, false, false); 745 + /* 746 + * If RQF_DONTPREP, rq has contained some driver specific 747 + * data, so insert it to hctx dispatch list to avoid any 748 + * merge. 749 + */ 750 + if (rq->rq_flags & RQF_DONTPREP) 751 + blk_mq_request_bypass_insert(rq, false); 752 + else 753 + blk_mq_sched_insert_request(rq, true, false, false); 746 754 } 747 755 748 756 while (!list_empty(&rq_list)) {
+1 -1
drivers/block/floppy.c
··· 4075 4075 4076 4076 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 4077 4077 if (lock_fdc(drive)) 4078 - return -EINTR; 4078 + return 0; 4079 4079 poll_drive(false, 0); 4080 4080 process_fd_request(); 4081 4081 }
+18 -10
drivers/md/raid1.c
··· 1863 1863 reschedule_retry(r1_bio); 1864 1864 } 1865 1865 1866 + static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) 1867 + { 1868 + sector_t sync_blocks = 0; 1869 + sector_t s = r1_bio->sector; 1870 + long sectors_to_go = r1_bio->sectors; 1871 + 1872 + /* make sure these bits don't get cleared. */ 1873 + do { 1874 + md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); 1875 + s += sync_blocks; 1876 + sectors_to_go -= sync_blocks; 1877 + } while (sectors_to_go > 0); 1878 + } 1879 + 1866 1880 static void end_sync_write(struct bio *bio) 1867 1881 { 1868 1882 int uptodate = !bio->bi_status; ··· 1888 1874 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1889 1875 1890 1876 if (!uptodate) { 1891 - sector_t sync_blocks = 0; 1892 - sector_t s = r1_bio->sector; 1893 - long sectors_to_go = r1_bio->sectors; 1894 - /* make sure these bits doesn't get cleared. */ 1895 - do { 1896 - md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); 1897 - s += sync_blocks; 1898 - sectors_to_go -= sync_blocks; 1899 - } while (sectors_to_go > 0); 1877 + abort_sync_write(mddev, r1_bio); 1900 1878 set_bit(WriteErrorSeen, &rdev->flags); 1901 1879 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1902 1880 set_bit(MD_RECOVERY_NEEDED, & ··· 2178 2172 (i == r1_bio->read_disk || 2179 2173 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 2180 2174 continue; 2181 - if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 2175 + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { 2176 + abort_sync_write(mddev, r1_bio); 2182 2177 continue; 2178 + } 2183 2179 2184 2180 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2185 2181 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+5 -3
drivers/nvme/host/pci.c
··· 2560 2560 mutex_lock(&dev->shutdown_lock); 2561 2561 result = nvme_pci_enable(dev); 2562 2562 if (result) 2563 - goto out; 2563 + goto out_unlock; 2564 2564 2565 2565 result = nvme_pci_configure_admin_queue(dev); 2566 2566 if (result) 2567 - goto out; 2567 + goto out_unlock; 2568 2568 2569 2569 result = nvme_alloc_admin_tags(dev); 2570 2570 if (result) 2571 - goto out; 2571 + goto out_unlock; 2572 2572 2573 2573 /* 2574 2574 * Limit the max command size to prevent iod->sg allocations going ··· 2651 2651 nvme_start_ctrl(&dev->ctrl); 2652 2652 return; 2653 2653 2654 + out_unlock: 2655 + mutex_unlock(&dev->shutdown_lock); 2654 2656 out: 2655 2657 nvme_remove_dead_ctrl(dev, result); 2656 2658 }