Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
"Outside of bcache (which really isn't super big), these are all
few-liners. There are a few important fixes in here:

- Fix blk pm sleeping when holding the queue lock

- A small collection of bcache fixes that have been done and tested
since bcache was included in this merge window.

- A fix for a raid5 regression introduced with the bio changes.

- Two important fixes for mtip32xx, fixing an oops and potential data
corruption (or hang) due to wrong bio iteration on stacked devices."

* 'for-linus' of git://git.kernel.dk/linux-block:
scatterlist: sg_set_buf() argument must be in linear mapping
raid5: Initialize bi_vcnt
pktcdvd: silence static checker warning
block: remove refs to XD disks from documentation
blkpm: avoid sleep when holding queue lock
mtip32xx: Correctly handle bio->bi_idx != 0 conditions
mtip32xx: Fix NULL pointer dereference during module unload
bcache: Fix error handling in init code
bcache: clarify free/available/unused space
bcache: drop "select CLOSURES"
bcache: Fix incompatible pointer type warning

+124 -143
+9 -3
Documentation/bcache.txt
··· 319 319 Symlink to each of the cache devices comprising this cache set. 320 320 321 321 cache_available_percent 322 - Percentage of cache device free. 322 + Percentage of cache device which doesn't contain dirty data, and could 323 + potentially be used for writeback. This doesn't mean this space isn't used 324 + for clean cached data; the unused statistic (in priority_stats) is typically 325 + much lower. 323 326 324 327 clear_stats 325 328 Clears the statistics associated with this cache ··· 426 423 Total buckets in this cache 427 424 428 425 priority_stats 429 - Statistics about how recently data in the cache has been accessed. This can 430 - reveal your working set size. 426 + Statistics about how recently data in the cache has been accessed. 427 + This can reveal your working set size. Unused is the percentage of 428 + the cache that doesn't contain any data. Metadata is bcache's 429 + metadata overhead. Average is the average priority of cache buckets. 430 + Next is a list of quantiles with the priority threshold of each. 431 431 432 432 written 433 433 Sum of all data that has been written to the cache; comparison with
+2 -6
Documentation/devices.txt
··· 498 498 499 499 Each device type has 5 bits (32 minors). 500 500 501 - 13 block 8-bit MFM/RLL/IDE controller 502 - 0 = /dev/xda First XT disk whole disk 503 - 64 = /dev/xdb Second XT disk whole disk 504 - 505 - Partitions are handled in the same way as IDE disks 506 - (see major number 3). 501 + 13 block Previously used for the XT disk (/dev/xdN) 502 + Deleted in kernel v3.9. 507 503 508 504 14 char Open Sound System (OSS) 509 505 0 = /dev/mixer Mixer control
-3
Documentation/kernel-parameters.txt
··· 3351 3351 plus one apbt timer for broadcast timer. 3352 3352 x86_mrst_timer=apbt_only | lapic_and_apbt 3353 3353 3354 - xd= [HW,XT] Original XT pre-IDE (RLL encoded) disks. 3355 - xd_geo= See header of drivers/block/xd.c. 3356 - 3357 3354 xen_emul_unplug= [HW,X86,XEN] 3358 3355 Unplug Xen emulated devices 3359 3356 Format: [unplug0,][unplug1]
-2
Documentation/m68k/kernel-options.txt
··· 80 80 /dev/sdd: -> 0x0830 (forth SCSI disk) 81 81 /dev/sde: -> 0x0840 (fifth SCSI disk) 82 82 /dev/fd : -> 0x0200 (floppy disk) 83 - /dev/xda: -> 0x0c00 (first XT disk, unused in Linux/m68k) 84 - /dev/xdb: -> 0x0c40 (second XT disk, unused in Linux/m68k) 85 83 86 84 The name must be followed by a decimal number, that stands for the 87 85 partition number. Internally, the value of the number is just
+1 -1
block/blk-core.c
··· 3164 3164 q->rpm_status = RPM_ACTIVE; 3165 3165 __blk_run_queue(q); 3166 3166 pm_runtime_mark_last_busy(q->dev); 3167 - pm_runtime_autosuspend(q->dev); 3167 + pm_request_autosuspend(q->dev); 3168 3168 } else { 3169 3169 q->rpm_status = RPM_SUSPENDED; 3170 3170 }
+5 -3
drivers/block/mtip32xx/mtip32xx.c
··· 3002 3002 3003 3003 static void mtip_hw_debugfs_exit(struct driver_data *dd) 3004 3004 { 3005 - debugfs_remove_recursive(dd->dfs_node); 3005 + if (dd->dfs_node) 3006 + debugfs_remove_recursive(dd->dfs_node); 3006 3007 } 3007 3008 3008 3009 ··· 3864 3863 struct driver_data *dd = queue->queuedata; 3865 3864 struct scatterlist *sg; 3866 3865 struct bio_vec *bvec; 3867 - int nents = 0; 3866 + int i, nents = 0; 3868 3867 int tag = 0, unaligned = 0; 3869 3868 3870 3869 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { ··· 3922 3921 } 3923 3922 3924 3923 /* Create the scatter list for this bio. */ 3925 - bio_for_each_segment(bvec, bio, nents) { 3924 + bio_for_each_segment(bvec, bio, i) { 3926 3925 sg_set_page(&sg[nents], 3927 3926 bvec->bv_page, 3928 3927 bvec->bv_len, 3929 3928 bvec->bv_offset); 3929 + nents++; 3930 3930 } 3931 3931 3932 3932 /* Issue the read/write. */
+2 -1
drivers/block/pktcdvd.c
··· 83 83 84 84 #define MAX_SPEED 0xffff 85 85 86 - #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) 86 + #define ZONE(sector, pd) (((sector) + (pd)->offset) & \ 87 + ~(sector_t)((pd)->settings.size - 1)) 87 88 88 89 static DEFINE_MUTEX(pktcdvd_mutex); 89 90 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
-1
drivers/md/bcache/Kconfig
··· 1 1 2 2 config BCACHE 3 3 tristate "Block device as cache" 4 - select CLOSURES 5 4 ---help--- 6 5 Allows a block device to be used as cache for other devices; uses 7 6 a btree for indexing and the layout is optimized for SSDs.
+1 -1
drivers/md/bcache/bcache.h
··· 1241 1241 struct cache_set *bch_cache_set_alloc(struct cache_sb *); 1242 1242 void bch_btree_cache_free(struct cache_set *); 1243 1243 int bch_btree_cache_alloc(struct cache_set *); 1244 - void bch_writeback_init_cached_dev(struct cached_dev *); 1244 + void bch_cached_dev_writeback_init(struct cached_dev *); 1245 1245 void bch_moving_init_cache_set(struct cache_set *); 1246 1246 1247 1247 void bch_cache_allocator_exit(struct cache *ca);
+16 -18
drivers/md/bcache/stats.c
··· 93 93 }; 94 94 static KTYPE(bch_stats); 95 95 96 - static void scale_accounting(unsigned long data); 97 - 98 - void bch_cache_accounting_init(struct cache_accounting *acc, 99 - struct closure *parent) 100 - { 101 - kobject_init(&acc->total.kobj, &bch_stats_ktype); 102 - kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); 103 - kobject_init(&acc->hour.kobj, &bch_stats_ktype); 104 - kobject_init(&acc->day.kobj, &bch_stats_ktype); 105 - 106 - closure_init(&acc->cl, parent); 107 - init_timer(&acc->timer); 108 - acc->timer.expires = jiffies + accounting_delay; 109 - acc->timer.data = (unsigned long) acc; 110 - acc->timer.function = scale_accounting; 111 - add_timer(&acc->timer); 112 - } 113 - 114 96 int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, 115 97 struct kobject *parent) 116 98 { ··· 225 243 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 226 244 atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); 227 245 atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); 246 + } 247 + 248 + void bch_cache_accounting_init(struct cache_accounting *acc, 249 + struct closure *parent) 250 + { 251 + kobject_init(&acc->total.kobj, &bch_stats_ktype); 252 + kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); 253 + kobject_init(&acc->hour.kobj, &bch_stats_ktype); 254 + kobject_init(&acc->day.kobj, &bch_stats_ktype); 255 + 256 + closure_init(&acc->cl, parent); 257 + init_timer(&acc->timer); 258 + acc->timer.expires = jiffies + accounting_delay; 259 + acc->timer.data = (unsigned long) acc; 260 + acc->timer.function = scale_accounting; 261 + add_timer(&acc->timer); 228 262 }
+82 -103
drivers/md/bcache/super.c
··· 634 634 return 0; 635 635 } 636 636 637 - static int release_dev(struct gendisk *b, fmode_t mode) 637 + static void release_dev(struct gendisk *b, fmode_t mode) 638 638 { 639 639 struct bcache_device *d = b->private_data; 640 640 closure_put(&d->cl); 641 - return 0; 642 641 } 643 642 644 643 static int ioctl_dev(struct block_device *b, fmode_t mode, ··· 731 732 732 733 if (d->c) 733 734 bcache_device_detach(d); 734 - 735 - if (d->disk) 735 + if (d->disk && d->disk->flags & GENHD_FL_UP) 736 736 del_gendisk(d->disk); 737 737 if (d->disk && d->disk->queue) 738 738 blk_cleanup_queue(d->disk->queue); ··· 754 756 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 755 757 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 756 758 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 757 - bio_split_pool_init(&d->bio_split_hook)) 758 - 759 - return -ENOMEM; 760 - 761 - d->disk = alloc_disk(1); 762 - if (!d->disk) 759 + bio_split_pool_init(&d->bio_split_hook) || 760 + !(d->disk = alloc_disk(1)) || 761 + !(q = blk_alloc_queue(GFP_KERNEL))) 763 762 return -ENOMEM; 764 763 765 764 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); ··· 765 770 d->disk->first_minor = bcache_minor++; 766 771 d->disk->fops = &bcache_ops; 767 772 d->disk->private_data = d; 768 - 769 - q = blk_alloc_queue(GFP_KERNEL); 770 - if (!q) 771 - return -ENOMEM; 772 773 773 774 blk_queue_make_request(q, NULL); 774 775 d->disk->queue = q; ··· 990 999 991 1000 mutex_lock(&bch_register_lock); 992 1001 993 - bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 1002 + if (atomic_read(&dc->running)) 1003 + bd_unlink_disk_holder(dc->bdev, dc->disk.disk); 994 1004 bcache_device_free(&dc->disk); 995 1005 list_del(&dc->list); 996 1006 997 1007 mutex_unlock(&bch_register_lock); 998 1008 999 1009 if (!IS_ERR_OR_NULL(dc->bdev)) { 1000 - blk_sync_queue(bdev_get_queue(dc->bdev)); 1010 + if (dc->bdev->bd_disk) 1011 + blk_sync_queue(bdev_get_queue(dc->bdev)); 1012 + 1001 1013 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1002 1014 } 1003 1015 ··· 1022 1028 1023 1029 static int cached_dev_init(struct cached_dev *dc, unsigned block_size) 1024 1030 { 1025 - int err; 1031 + int ret; 1026 1032 struct io *io; 1027 - 1028 - closure_init(&dc->disk.cl, NULL); 1029 - set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1033 + struct request_queue *q = bdev_get_queue(dc->bdev); 1030 1034 1031 1035 __module_get(THIS_MODULE); 1032 1036 INIT_LIST_HEAD(&dc->list); 1037 + closure_init(&dc->disk.cl, NULL); 1038 + set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); 1033 1039 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); 1034 - 1035 - bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1036 - 1037 - err = bcache_device_init(&dc->disk, block_size); 1038 - if (err) 1039 - goto err; 1040 - 1041 - spin_lock_init(&dc->io_lock); 1042 - closure_init_unlocked(&dc->sb_write); 1043 1040 INIT_WORK(&dc->detach, cached_dev_detach_finish); 1041 + closure_init_unlocked(&dc->sb_write); 1042 + INIT_LIST_HEAD(&dc->io_lru); 1043 + spin_lock_init(&dc->io_lock); 1044 + bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); 1044 1045 1045 1046 dc->sequential_merge = true; 1046 1047 dc->sequential_cutoff = 4 << 20; 1047 - 1048 - INIT_LIST_HEAD(&dc->io_lru); 1049 - dc->sb_bio.bi_max_vecs = 1; 1050 - dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; 1051 1048 1052 1049 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1053 1050 list_add(&io->lru, &dc->io_lru); 1054 1051 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1055 1052 } 1056 1053 1057 - bch_writeback_init_cached_dev(dc); 1054 + ret = bcache_device_init(&dc->disk, block_size); 1055 + if (ret) 1056 + return ret; 1057 + 1058 + set_capacity(dc->disk.disk, 1059 + dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1060 + 1061 + dc->disk.disk->queue->backing_dev_info.ra_pages = 1062 + max(dc->disk.disk->queue->backing_dev_info.ra_pages, 1063 + q->backing_dev_info.ra_pages); 1064 + 1065 + bch_cached_dev_request_init(dc); 1066 + bch_cached_dev_writeback_init(dc); 1058 1067 return 0; 1059 - err: 1060 - bcache_device_stop(&dc->disk); 1061 - return err; 1062 1068 } 1063 1069 1064 1070 /* Cached device - bcache superblock */ 1065 1071 1066 - static const char *register_bdev(struct cache_sb *sb, struct page *sb_page, 1072 + static void register_bdev(struct cache_sb *sb, struct page *sb_page, 1067 1073 struct block_device *bdev, 1068 1074 struct cached_dev *dc) 1069 1075 { 1070 1076 char name[BDEVNAME_SIZE]; 1071 1077 const char *err = "cannot allocate memory"; 1072 - struct gendisk *g; 1073 1078 struct cache_set *c; 1074 1079 1075 - if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0) 1076 - return err; 1077 - 1078 1080 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1079 - dc->sb_bio.bi_io_vec[0].bv_page = sb_page; 1080 1081 dc->bdev = bdev; 1081 1082 dc->bdev->bd_holder = dc; 1082 1083 1083 - g = dc->disk.disk; 1084 + bio_init(&dc->sb_bio); 1085 + dc->sb_bio.bi_max_vecs = 1; 1086 + dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs; 1087 + dc->sb_bio.bi_io_vec[0].bv_page = sb_page; 1088 + get_page(sb_page); 1084 1089 1085 - set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1086 - 1087 - g->queue->backing_dev_info.ra_pages = 1088 - max(g->queue->backing_dev_info.ra_pages, 1089 - bdev->bd_queue->backing_dev_info.ra_pages); 1090 - 1091 - bch_cached_dev_request_init(dc); 1090 + if (cached_dev_init(dc, sb->block_size << 9)) 1091 + goto err; 1092 1092 1093 1093 err = "error creating kobject"; 1094 1094 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, ··· 1090 1102 goto err; 1091 1103 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1092 1104 goto err; 1105 + 1106 + pr_info("registered backing device %s", bdevname(bdev, name)); 1093 1107 1094 1108 list_add(&dc->list, &uncached_devices); 1095 1109 list_for_each_entry(c, &bch_cache_sets, list) ··· 1101 1111 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) 1102 1112 bch_cached_dev_run(dc); 1103 1113 1104 - return NULL; 1114 + return; 1105 1115 err: 1106 - kobject_put(&dc->disk.kobj); 1107 1116 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1108 - /* 1109 - * Return NULL instead of an error because kobject_put() cleans 1110 - * everything up 1111 - */ 1112 - return NULL; 1117 + bcache_device_stop(&dc->disk); 1113 1118 } 1114 1119 1115 1120 /* Flash only volumes */ ··· 1702 1717 size_t free; 1703 1718 struct bucket *b; 1704 1719 1705 - if (!ca) 1706 - return -ENOMEM; 1707 - 1708 1720 __module_get(THIS_MODULE); 1709 1721 kobject_init(&ca->kobj, &bch_cache_ktype); 1710 1722 1711 - memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1712 - 1713 1723 INIT_LIST_HEAD(&ca->discards); 1714 - 1715 - bio_init(&ca->sb_bio); 1716 - ca->sb_bio.bi_max_vecs = 1; 1717 - ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; 1718 1724 1719 1725 bio_init(&ca->journal.bio); 1720 1726 ca->journal.bio.bi_max_vecs = 8; ··· 1718 1742 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1719 1743 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1720 1744 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1721 - !(ca->buckets = vmalloc(sizeof(struct bucket) * 1745 + !(ca->buckets = vzalloc(sizeof(struct bucket) * 1722 1746 ca->sb.nbuckets)) || 1723 1747 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1724 1748 2, GFP_KERNEL)) || 1725 1749 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || 1726 1750 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || 1727 1751 bio_split_pool_init(&ca->bio_split_hook)) 1728 - goto err; 1752 + return -ENOMEM; 1729 1753 1730 1754 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1731 1755 1732 - memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket)); 1733 1756 for_each_bucket(b, ca) 1734 1757 atomic_set(&b->pin, 0); 1735 1758 ··· 1741 1766 return -ENOMEM; 1742 1767 } 1743 1768 1744 - static const char *register_cache(struct cache_sb *sb, struct page *sb_page, 1769 + static void register_cache(struct cache_sb *sb, struct page *sb_page, 1745 1770 struct block_device *bdev, struct cache *ca) 1746 1771 { 1747 1772 char name[BDEVNAME_SIZE]; 1748 1773 const char *err = "cannot allocate memory"; 1749 1774 1750 - if (cache_alloc(sb, ca) != 0) 1751 - return err; 1752 - 1753 - ca->sb_bio.bi_io_vec[0].bv_page = sb_page; 1775 + memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1754 1776 ca->bdev = bdev; 1755 1777 ca->bdev->bd_holder = ca; 1756 1778 1779 + bio_init(&ca->sb_bio); 1780 + ca->sb_bio.bi_max_vecs = 1; 1781 + ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs; 1782 + ca->sb_bio.bi_io_vec[0].bv_page = sb_page; 1783 + get_page(sb_page); 1784 + 1757 1785 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1758 1786 ca->discard = CACHE_DISCARD(&ca->sb); 1787 + 1788 + if (cache_alloc(sb, ca) != 0) 1789 + goto err; 1759 1790 1760 1791 err = "error creating kobject"; 1761 1792 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) ··· 1772 1791 goto err; 1773 1792 1774 1793 pr_info("registered cache device %s", bdevname(bdev, name)); 1775 - 1776 - return NULL; 1794 + return; 1777 1795 err: 1796 + pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1778 1797 kobject_put(&ca->kobj); 1779 - pr_info("error opening %s: %s", bdevname(bdev, name), err); 1780 - /* Return NULL instead of an error because kobject_put() cleans 1781 - * everything up 1782 - */ 1783 - return NULL; 1784 1798 } 1785 1799 1786 1800 /* Global interfaces/init */ ··· 1809 1833 bdev = blkdev_get_by_path(strim(path), 1810 1834 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1811 1835 sb); 1812 - if (bdev == ERR_PTR(-EBUSY)) 1813 - err = "device busy"; 1814 - 1815 - if (IS_ERR(bdev) || 1816 - set_blocksize(bdev, 4096)) 1836 + if (IS_ERR(bdev)) { 1837 + if (bdev == ERR_PTR(-EBUSY)) 1838 + err = "device busy"; 1817 1839 goto err; 1840 + } 1841 + 1842 + err = "failed to set blocksize"; 1843 + if (set_blocksize(bdev, 4096)) 1844 + goto err_close; 1818 1845 1819 1846 err = read_super(sb, bdev, &sb_page); 1820 1847 if (err) ··· 1825 1846 1826 1847 if (SB_IS_BDEV(sb)) { 1827 1848 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1849 + if (!dc) 1850 + goto err_close; 1828 1851 1829 - err = register_bdev(sb, sb_page, bdev, dc); 1852 + register_bdev(sb, sb_page, bdev, dc); 1830 1853 } else { 1831 1854 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); 1855 + if (!ca) 1856 + goto err_close; 1832 1857 1833 - err = register_cache(sb, sb_page, bdev, ca); 1858 + register_cache(sb, sb_page, bdev, ca); 1834 1859 } 1835 - 1836 - if (err) { 1837 - /* register_(bdev|cache) will only return an error if they 1838 - * didn't get far enough to create the kobject - if they did, 1839 - * the kobject destructor will do this cleanup. 1840 - */ 1860 + out: 1861 + if (sb_page) 1841 1862 put_page(sb_page); 1842 - err_close: 1843 - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1844 - err: 1845 - if (attr != &ksysfs_register_quiet) 1846 - pr_info("error opening %s: %s", path, err); 1847 - ret = -EINVAL; 1848 - } 1849 - 1850 1863 kfree(sb); 1851 1864 kfree(path); 1852 1865 mutex_unlock(&bch_register_lock); 1853 1866 module_put(THIS_MODULE); 1854 1867 return ret; 1868 + 1869 + err_close: 1870 + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 1871 + err: 1872 + if (attr != &ksysfs_register_quiet) 1873 + pr_info("error opening %s: %s", path, err); 1874 + ret = -EINVAL; 1875 + goto out; 1855 1876 } 1856 1877 1857 1878 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
+1 -1
drivers/md/bcache/writeback.c
··· 375 375 refill_dirty(cl); 376 376 } 377 377 378 - void bch_writeback_init_cached_dev(struct cached_dev *dc) 378 + void bch_cached_dev_writeback_init(struct cached_dev *dc) 379 379 { 380 380 closure_init_unlocked(&dc->writeback); 381 381 init_rwsem(&dc->writeback_lock);
+2
drivers/md/raid5.c
··· 664 664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 665 665 bi->bi_rw |= REQ_FLUSH; 666 666 667 + bi->bi_vcnt = 1; 667 668 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 668 669 bi->bi_io_vec[0].bv_offset = 0; 669 670 bi->bi_size = STRIPE_SIZE; ··· 702 701 else 703 702 rbi->bi_sector = (sh->sector 704 703 + rrdev->data_offset); 704 + rbi->bi_vcnt = 1; 705 705 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 706 706 rbi->bi_io_vec[0].bv_offset = 0; 707 707 rbi->bi_size = STRIPE_SIZE;
+3
include/linux/scatterlist.h
··· 111 111 static inline void sg_set_buf(struct scatterlist *sg, const void *buf, 112 112 unsigned int buflen) 113 113 { 114 + #ifdef CONFIG_DEBUG_SG 115 + BUG_ON(!virt_addr_valid(buf)); 116 + #endif 114 117 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); 115 118 } 116 119