Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
"Just a set of small fixes that have either been queued up after the
original pull for this merge window, or just missed the original pull
request.

- a few bcache fixes/changes from Eric and Kent

- add WRITE_SAME to the command filter whitelist frm Mauricio

- kill an unused struct member from Ritesh

- partition IO alignment fix from Stefan

- nvme sysfs printf fix from Stephen"

* 'for-linus' of git://git.kernel.dk/linux-block:
block: check partition alignment
nvme : Use correct scnprintf in cmb show
block: allow WRITE_SAME commands with the SG_IO ioctl
block: Remove unused member (busy) from struct blk_queue_tag
bcache: partition support: add 16 minors per bcacheN device
bcache: Make gc wakeup sane, remove set_task_state()

+39 -31
+3
block/ioctl.c
··· 45 45 || pstart < 0 || plength < 0 || partno > 65535) 46 46 return -EINVAL; 47 47 } 48 + /* check if partition is aligned to blocksize */ 49 + if (p.start & (bdev_logical_block_size(bdev) - 1)) 50 + return -EINVAL; 48 51 49 52 mutex_lock(&bdev->bd_mutex); 50 53
+3
block/scsi_ioctl.c
··· 182 182 __set_bit(WRITE_16, filter->write_ok); 183 183 __set_bit(WRITE_LONG, filter->write_ok); 184 184 __set_bit(WRITE_LONG_2, filter->write_ok); 185 + __set_bit(WRITE_SAME, filter->write_ok); 186 + __set_bit(WRITE_SAME_16, filter->write_ok); 187 + __set_bit(WRITE_SAME_32, filter->write_ok); 185 188 __set_bit(ERASE, filter->write_ok); 186 189 __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); 187 190 __set_bit(MODE_SELECT, filter->write_ok);
+2 -2
drivers/md/bcache/bcache.h
··· 425 425 * until a gc finishes - otherwise we could pointlessly burn a ton of 426 426 * cpu 427 427 */ 428 - unsigned invalidate_needs_gc:1; 428 + unsigned invalidate_needs_gc; 429 429 430 430 bool discard; /* Get rid of? */ 431 431 ··· 593 593 594 594 /* Counts how many sectors bio_insert has added to the cache */ 595 595 atomic_t sectors_to_gc; 596 + wait_queue_head_t gc_wait; 596 597 597 - wait_queue_head_t moving_gc_wait; 598 598 struct keybuf moving_gc_keys; 599 599 /* Number of moving GC bios in flight */ 600 600 struct semaphore moving_in_flight;
+21 -20
drivers/md/bcache/btree.c
··· 1757 1757 bch_moving_gc(c); 1758 1758 } 1759 1759 1760 - static int bch_gc_thread(void *arg) 1760 + static bool gc_should_run(struct cache_set *c) 1761 1761 { 1762 - struct cache_set *c = arg; 1763 1762 struct cache *ca; 1764 1763 unsigned i; 1765 1764 1766 - while (1) { 1767 - again: 1768 - bch_btree_gc(c); 1765 + for_each_cache(ca, c, i) 1766 + if (ca->invalidate_needs_gc) 1767 + return true; 1769 1768 1770 - set_current_state(TASK_INTERRUPTIBLE); 1769 + if (atomic_read(&c->sectors_to_gc) < 0) 1770 + return true; 1771 + 1772 + return false; 1773 + } 1774 + 1775 + static int bch_gc_thread(void *arg) 1776 + { 1777 + struct cache_set *c = arg; 1778 + 1779 + while (1) { 1780 + wait_event_interruptible(c->gc_wait, 1781 + kthread_should_stop() || gc_should_run(c)); 1782 + 1771 1783 if (kthread_should_stop()) 1772 1784 break; 1773 1785 1774 - mutex_lock(&c->bucket_lock); 1775 - 1776 - for_each_cache(ca, c, i) 1777 - if (ca->invalidate_needs_gc) { 1778 - mutex_unlock(&c->bucket_lock); 1779 - set_current_state(TASK_RUNNING); 1780 - goto again; 1781 - } 1782 - 1783 - mutex_unlock(&c->bucket_lock); 1784 - 1785 - schedule(); 1786 + set_gc_sectors(c); 1787 + bch_btree_gc(c); 1786 1788 } 1787 1789 1788 1790 return 0; ··· 1792 1790 1793 1791 int bch_gc_thread_start(struct cache_set *c) 1794 1792 { 1795 - c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); 1793 + c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); 1796 1794 if (IS_ERR(c->gc_thread)) 1797 1795 return PTR_ERR(c->gc_thread); 1798 1796 1799 - set_task_state(c->gc_thread, TASK_INTERRUPTIBLE); 1800 1797 return 0; 1801 1798 } 1802 1799
+1 -2
drivers/md/bcache/btree.h
··· 260 260 261 261 static inline void wake_up_gc(struct cache_set *c) 262 262 { 263 - if (c->gc_thread) 264 - wake_up_process(c->gc_thread); 263 + wake_up(&c->gc_wait); 265 264 } 266 265 267 266 #define MAP_DONE 0
+1 -3
drivers/md/bcache/request.c
··· 196 196 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 197 197 struct bio *bio = op->bio, *n; 198 198 199 - if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { 200 - set_gc_sectors(op->c); 199 + if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) 201 200 wake_up_gc(op->c); 202 - } 203 201 204 202 if (op->bypass) 205 203 return bch_data_invalidate(cl);
+6 -1
drivers/md/bcache/super.c
··· 58 58 struct workqueue_struct *bcache_wq; 59 59 60 60 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 61 + #define BCACHE_MINORS 16 /* partition support */ 61 62 62 63 /* Superblock */ 63 64 ··· 784 783 if (minor < 0) 785 784 return minor; 786 785 786 + minor *= BCACHE_MINORS; 787 + 787 788 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 788 - !(d->disk = alloc_disk(1))) { 789 + !(d->disk = alloc_disk(BCACHE_MINORS))) { 789 790 ida_simple_remove(&bcache_minor, minor); 790 791 return -ENOMEM; 791 792 } ··· 1492 1489 mutex_init(&c->bucket_lock); 1493 1490 init_waitqueue_head(&c->btree_cache_wait); 1494 1491 init_waitqueue_head(&c->bucket_wait); 1492 + init_waitqueue_head(&c->gc_wait); 1495 1493 sema_init(&c->uuid_write_mutex, 1); 1496 1494 1497 1495 spin_lock_init(&c->btree_gc_time.lock); ··· 1552 1548 1553 1549 for_each_cache(ca, c, i) 1554 1550 c->nbuckets += ca->sb.nbuckets; 1551 + set_gc_sectors(c); 1555 1552 1556 1553 if (CACHE_SYNC(&c->sb)) { 1557 1554 LIST_HEAD(journal);
+2 -2
drivers/nvme/host/pci.c
··· 50 50 #define NVME_AQ_DEPTH 256 51 51 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) 52 52 #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) 53 - 53 + 54 54 /* 55 55 * We handle AEN commands ourselves and don't even let the 56 56 * block layer know about them. ··· 1349 1349 { 1350 1350 struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1351 1351 1352 - return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1352 + return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1353 1353 ndev->cmbloc, ndev->cmbsz); 1354 1354 } 1355 1355 static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-1
include/linux/blkdev.h
··· 288 288 struct blk_queue_tag { 289 289 struct request **tag_index; /* map of busy tags */ 290 290 unsigned long *tag_map; /* bit map of free/busy tags */ 291 - int busy; /* current depth */ 292 291 int max_depth; /* what we will send to device */ 293 292 int real_max_depth; /* what the array can hold */ 294 293 atomic_t refcnt; /* map can be shared */