Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'md/4.2-rc5-fixes' of git://neil.brown.name/md

Pull md fixes from Neil Brown:
"Three more fixes for md in 4.2

Mostly corner-case stuff.

One of these patches is for a CVE: CVE-2015-5697

I'm not convinced it is serious (data leak from CAP_SYS_ADMIN ioctl)
but as people seem to want to back-port it, I've included a minimal
version here. The remainder of that patch from Benjamin is
code-cleanup and will arrive in the 4.3 merge window"

* tag 'md/4.2-rc5-fixes' of git://neil.brown.name/md:
md/raid5: don't let shrink_slab shrink too far.
md: use kzalloc() when bitmap is disabled
md/raid1: extend spinlock to protect raid1_end_read_request against inconsistencies

+10 -7
+1 -1
drivers/md/md.c
··· 5759 5759 char *ptr; 5760 5760 int err; 5761 5761 5762 - file = kmalloc(sizeof(*file), GFP_NOIO); 5762 + file = kzalloc(sizeof(*file), GFP_NOIO); 5763 5763 if (!file) 5764 5764 return -ENOMEM; 5765 5765
+6 -4
drivers/md/raid1.c
··· 1476 1476 { 1477 1477 char b[BDEVNAME_SIZE]; 1478 1478 struct r1conf *conf = mddev->private; 1479 + unsigned long flags; 1479 1480 1480 1481 /* 1481 1482 * If it is not operational, then we have already marked it as dead ··· 1496 1495 return; 1497 1496 } 1498 1497 set_bit(Blocked, &rdev->flags); 1498 + spin_lock_irqsave(&conf->device_lock, flags); 1499 1499 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1500 - unsigned long flags; 1501 - spin_lock_irqsave(&conf->device_lock, flags); 1502 1500 mddev->degraded++; 1503 1501 set_bit(Faulty, &rdev->flags); 1504 - spin_unlock_irqrestore(&conf->device_lock, flags); 1505 1502 } else 1506 1503 set_bit(Faulty, &rdev->flags); 1504 + spin_unlock_irqrestore(&conf->device_lock, flags); 1507 1505 /* 1508 1506 * if recovery is running, make sure it aborts. 1509 1507 */ ··· 1568 1568 * Find all failed disks within the RAID1 configuration 1569 1569 * and mark them readable. 1570 1570 * Called under mddev lock, so rcu protection not needed. 1571 + * device_lock used to avoid races with raid1_end_read_request 1572 + * which expects 'In_sync' flags and ->degraded to be consistent. 1571 1573 */ 1574 + spin_lock_irqsave(&conf->device_lock, flags); 1572 1575 for (i = 0; i < conf->raid_disks; i++) { 1573 1576 struct md_rdev *rdev = conf->mirrors[i].rdev; 1574 1577 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; ··· 1602 1599 sysfs_notify_dirent_safe(rdev->sysfs_state); 1603 1600 } 1604 1601 } 1605 - spin_lock_irqsave(&conf->device_lock, flags); 1606 1602 mddev->degraded -= count; 1607 1603 spin_unlock_irqrestore(&conf->device_lock, flags); 1608 1604
+3 -2
drivers/md/raid5.c
··· 2256 2256 static int drop_one_stripe(struct r5conf *conf) 2257 2257 { 2258 2258 struct stripe_head *sh; 2259 - int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 2259 + int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; 2260 2260 2261 2261 spin_lock_irq(conf->hash_locks + hash); 2262 2262 sh = get_free_stripe(conf, hash); ··· 6388 6388 6389 6389 if (mutex_trylock(&conf->cache_size_mutex)) { 6390 6390 ret= 0; 6391 - while (ret < sc->nr_to_scan) { 6391 + while (ret < sc->nr_to_scan && 6392 + conf->max_nr_stripes > conf->min_nr_stripes) { 6392 6393 if (drop_one_stripe(conf) == 0) { 6393 6394 ret = SHRINK_STOP; 6394 6395 break;