Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'work.thaw' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs thaw updates from Al Viro:
"An ancient series that has fallen through the cracks in the previous
cycle"

* 'work.thaw' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
buffer.c: call thaw_super during emergency thaw
vfs: factor sb iteration out of do_emergency_remount

+85 -51
+1 -24
fs/buffer.c
··· 494 494 return err; 495 495 } 496 496 497 - static void do_thaw_one(struct super_block *sb, void *unused) 497 + void emergency_thaw_bdev(struct super_block *sb) 498 498 { 499 499 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) 500 500 printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); 501 - } 502 - 503 - static void do_thaw_all(struct work_struct *work) 504 - { 505 - iterate_supers(do_thaw_one, NULL); 506 - kfree(work); 507 - printk(KERN_WARNING "Emergency Thaw complete\n"); 508 - } 509 - 510 - /** 511 - * emergency_thaw_all -- forcibly thaw every frozen filesystem 512 - * 513 - * Used for emergency unfreeze of all filesystems via SysRq 514 - */ 515 - void emergency_thaw_all(void) 516 - { 517 - struct work_struct *work; 518 - 519 - work = kmalloc(sizeof(*work), GFP_ATOMIC); 520 - if (work) { 521 - INIT_WORK(work, do_thaw_all); 522 - schedule_work(work); 523 - } 524 501 } 525 502 526 503 /**
+78 -27
fs/super.c
··· 37 37 #include <linux/user_namespace.h> 38 38 #include "internal.h" 39 39 40 + static int thaw_super_locked(struct super_block *sb); 40 41 41 42 static LIST_HEAD(super_blocks); 42 43 static DEFINE_SPINLOCK(sb_lock); ··· 575 574 } 576 575 EXPORT_SYMBOL(drop_super_exclusive); 577 576 577 + static void __iterate_supers(void (*f)(struct super_block *)) 578 + { 579 + struct super_block *sb, *p = NULL; 580 + 581 + spin_lock(&sb_lock); 582 + list_for_each_entry(sb, &super_blocks, s_list) { 583 + if (hlist_unhashed(&sb->s_instances)) 584 + continue; 585 + sb->s_count++; 586 + spin_unlock(&sb_lock); 587 + 588 + f(sb); 589 + 590 + spin_lock(&sb_lock); 591 + if (p) 592 + __put_super(p); 593 + p = sb; 594 + } 595 + if (p) 596 + __put_super(p); 597 + spin_unlock(&sb_lock); 598 + } 578 599 /** 579 600 * iterate_supers - call function for all active superblocks 580 601 * @f: function to call ··· 904 881 return retval; 905 882 } 906 883 884 + static void do_emergency_remount_callback(struct super_block *sb) 885 + { 886 + down_write(&sb->s_umount); 887 + if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) && 888 + !sb_rdonly(sb)) { 889 + /* 890 + * What lock protects sb->s_flags?? 891 + */ 892 + do_remount_sb(sb, SB_RDONLY, NULL, 1); 893 + } 894 + up_write(&sb->s_umount); 895 + } 896 + 907 897 static void do_emergency_remount(struct work_struct *work) 908 898 { 909 - struct super_block *sb, *p = NULL; 910 - 911 - spin_lock(&sb_lock); 912 - list_for_each_entry(sb, &super_blocks, s_list) { 913 - if (hlist_unhashed(&sb->s_instances)) 914 - continue; 915 - sb->s_count++; 916 - spin_unlock(&sb_lock); 917 - down_write(&sb->s_umount); 918 - if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) && 919 - !sb_rdonly(sb)) { 920 - /* 921 - * What lock protects sb->s_flags?? 922 - */ 923 - do_remount_sb(sb, SB_RDONLY, NULL, 1); 924 - } 925 - up_write(&sb->s_umount); 926 - spin_lock(&sb_lock); 927 - if (p) 928 - __put_super(p); 929 - p = sb; 930 - } 931 - if (p) 932 - __put_super(p); 933 - spin_unlock(&sb_lock); 899 + __iterate_supers(do_emergency_remount_callback); 934 900 kfree(work); 935 901 printk("Emergency Remount complete\n"); 936 902 } ··· 931 919 work = kmalloc(sizeof(*work), GFP_ATOMIC); 932 920 if (work) { 933 921 INIT_WORK(work, do_emergency_remount); 922 + schedule_work(work); 923 + } 924 + } 925 + 926 + static void do_thaw_all_callback(struct super_block *sb) 927 + { 928 + down_write(&sb->s_umount); 929 + if (sb->s_root && sb->s_flags & MS_BORN) { 930 + emergency_thaw_bdev(sb); 931 + thaw_super_locked(sb); 932 + } else { 933 + up_write(&sb->s_umount); 934 + } 935 + } 936 + 937 + static void do_thaw_all(struct work_struct *work) 938 + { 939 + __iterate_supers(do_thaw_all_callback); 940 + kfree(work); 941 + printk(KERN_WARNING "Emergency Thaw complete\n"); 942 + } 943 + 944 + /** 945 + * emergency_thaw_all -- forcibly thaw every frozen filesystem 946 + * 947 + * Used for emergency unfreeze of all filesystems via SysRq 948 + */ 949 + void emergency_thaw_all(void) 950 + { 951 + struct work_struct *work; 952 + 953 + work = kmalloc(sizeof(*work), GFP_ATOMIC); 954 + if (work) { 955 + INIT_WORK(work, do_thaw_all); 934 956 schedule_work(work); 935 957 } 936 958 } ··· 1538 1492 * 1539 1493 * Unlocks the filesystem and marks it writeable again after freeze_super(). 1540 1494 */ 1541 - int thaw_super(struct super_block *sb) 1495 + static int thaw_super_locked(struct super_block *sb) 1542 1496 { 1543 1497 int error; 1544 1498 1545 - down_write(&sb->s_umount); 1546 1499 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) { 1547 1500 up_write(&sb->s_umount); 1548 1501 return -EINVAL; ··· 1571 1526 wake_up(&sb->s_writers.wait_unfrozen); 1572 1527 deactivate_locked_super(sb); 1573 1528 return 0; 1529 + } 1530 + 1531 + int thaw_super(struct super_block *sb) 1532 + { 1533 + down_write(&sb->s_umount); 1534 + return thaw_super_locked(sb); 1574 1535 } 1575 1536 EXPORT_SYMBOL(thaw_super);
+6
include/linux/fs.h
··· 2445 2445 extern void kill_bdev(struct block_device *); 2446 2446 extern struct super_block *freeze_bdev(struct block_device *); 2447 2447 extern void emergency_thaw_all(void); 2448 + extern void emergency_thaw_bdev(struct super_block *sb); 2448 2449 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); 2449 2450 extern int fsync_bdev(struct block_device *); 2450 2451 ··· 2467 2466 } 2468 2467 2469 2468 static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) 2469 + { 2470 + return 0; 2471 + } 2472 + 2473 + static inline int emergency_thaw_bdev(struct super_block *sb) 2470 2474 { 2471 2475 return 0; 2472 2476 }