Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
"A small collection of fixes that has been gathered over the last few
weeks. This contains:

- A one-liner fix for NVMe, fixing a missing list_head init that
could makes us oops on hitting recovery at load time.

- Two small blk-mq fixes:
- Fixup a bad goto jump on error handling.
- Fix for oopsing if running out of reserved tags.

- A memory leak fix for NBD.

- Two small writeback fixes from Tejun, fixing a missing init to
INITIAL_JIFFIES, and a possible underflow introduced recently.

- A core merge fixup in sg gap detection, where rq->biotail was
indexed with the count of rq->bio"

* 'for-linus' of git://git.kernel.dk/linux-block:
writeback: fix possible underflow in write bandwidth calculation
NVMe: Initialize device list head before starting
Fix bug in blk_rq_merge_ok
blkmq: Fix NULL pointer deref when all reserved tags in
blk-mq: fix use of incorrect goto label in blk_mq_init_queue error path
nbd: fix possible memory leak
writeback: add missing INITIAL_JIFFIES init in global_update_bandwidth()

+18 -12
+1 -1
block/blk-merge.c
··· 592 592 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { 593 593 struct bio_vec *bprev; 594 594 595 - bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; 595 + bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; 596 596 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) 597 597 return false; 598 598 }
+4 -2
block/blk-mq-tag.c
··· 278 278 /* 279 279 * We're out of tags on this hardware queue, kick any 280 280 * pending IO submits before going to sleep waiting for 281 - * some to complete. 281 + * some to complete. Note that hctx can be NULL here for 282 + * reserved tag allocation. 282 283 */ 283 - blk_mq_run_hw_queue(hctx, false); 284 + if (hctx) 285 + blk_mq_run_hw_queue(hctx, false); 284 286 285 287 /* 286 288 * Retry tag allocation after running the hardware queue,
+3 -3
block/blk-mq.c
··· 1938 1938 */ 1939 1939 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, 1940 1940 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) 1941 - goto err_map; 1941 + goto err_mq_usage; 1942 1942 1943 1943 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 1944 1944 blk_queue_rq_timeout(q, 30000); ··· 1981 1981 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 1982 1982 1983 1983 if (blk_mq_init_hw_queues(q, set)) 1984 - goto err_hw; 1984 + goto err_mq_usage; 1985 1985 1986 1986 mutex_lock(&all_q_mutex); 1987 1987 list_add_tail(&q->all_q_node, &all_q_list); ··· 1993 1993 1994 1994 return q; 1995 1995 1996 - err_hw: 1996 + err_mq_usage: 1997 1997 blk_cleanup_queue(q); 1998 1998 err_hctxs: 1999 1999 kfree(map);
+4 -4
drivers/block/nbd.c
··· 803 803 return -EINVAL; 804 804 } 805 805 806 - nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); 807 - if (!nbd_dev) 808 - return -ENOMEM; 809 - 810 806 part_shift = 0; 811 807 if (max_part > 0) { 812 808 part_shift = fls(max_part); ··· 823 827 824 828 if (nbds_max > 1UL << (MINORBITS - part_shift)) 825 829 return -EINVAL; 830 + 831 + nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); 832 + if (!nbd_dev) 833 + return -ENOMEM; 826 834 827 835 for (i = 0; i < nbds_max; i++) { 828 836 struct gendisk *disk = alloc_disk(1 << part_shift);
+1
drivers/block/nvme-core.c
··· 3003 3003 } 3004 3004 get_device(dev->device); 3005 3005 3006 + INIT_LIST_HEAD(&dev->node); 3006 3007 INIT_WORK(&dev->probe_work, nvme_async_probe); 3007 3008 schedule_work(&dev->probe_work); 3008 3009 return 0;
+5 -2
mm/page-writeback.c
··· 857 857 * bw * elapsed + write_bandwidth * (period - elapsed) 858 858 * write_bandwidth = --------------------------------------------------- 859 859 * period 860 + * 861 + * @written may have decreased due to account_page_redirty(). 862 + * Avoid underflowing @bw calculation. 860 863 */ 861 - bw = written - bdi->written_stamp; 864 + bw = written - min(written, bdi->written_stamp); 862 865 bw *= HZ; 863 866 if (unlikely(elapsed > period)) { 864 867 do_div(bw, elapsed); ··· 925 922 unsigned long now) 926 923 { 927 924 static DEFINE_SPINLOCK(dirty_lock); 928 - static unsigned long update_time; 925 + static unsigned long update_time = INITIAL_JIFFIES; 929 926 930 927 /* 931 928 * check locklessly first to optimize away locking for the most time