Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

switch the remnants of releasing the mountpoint away from fs_pin

We used to need rather convoluted ordering trickery to guarantee
that dput() of ex-mountpoints happens before the final mntput()
of the same. Since we don't need that anymore, there's no point
playing with fs_pin for that.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro 56cbb429 2763d119

+28 -31
+4 -10
fs/fs_pin.c
··· 19 19 spin_unlock_irq(&pin->wait.lock); 20 20 } 21 21 22 - void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p) 23 - { 24 - spin_lock(&pin_lock); 25 - if (p) 26 - hlist_add_head(&pin->s_list, p); 27 - hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins); 28 - spin_unlock(&pin_lock); 29 - } 30 - 31 22 void pin_insert(struct fs_pin *pin, struct vfsmount *m) 32 23 { 33 - pin_insert_group(pin, m, &m->mnt_sb->s_pins); 24 + spin_lock(&pin_lock); 25 + hlist_add_head(&pin->s_list, &m->mnt_sb->s_pins); 26 + hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins); 27 + spin_unlock(&pin_lock); 34 28 } 35 29 36 30 void pin_kill(struct fs_pin *p)
+5 -2
fs/mount.h
··· 58 58 struct mount *mnt_master; /* slave is on master->mnt_slave_list */ 59 59 struct mnt_namespace *mnt_ns; /* containing namespace */ 60 60 struct mountpoint *mnt_mp; /* where is it mounted */ 61 - struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ 61 + union { 62 + struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ 63 + struct hlist_node mnt_umount; 64 + }; 62 65 struct list_head mnt_umounting; /* list entry for umount propagation */ 63 66 #ifdef CONFIG_FSNOTIFY 64 67 struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks; ··· 71 68 int mnt_group_id; /* peer group identifier */ 72 69 int mnt_expiry_mark; /* true if marked for expiry */ 73 70 struct hlist_head mnt_pins; 74 - struct fs_pin mnt_umount; 71 + struct hlist_head mnt_stuck_children; 75 72 } __randomize_layout; 76 73 77 74 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
+19 -18
fs/namespace.c
··· 171 171 #endif 172 172 } 173 173 174 - static void drop_mountpoint(struct fs_pin *p) 175 - { 176 - struct mount *m = container_of(p, struct mount, mnt_umount); 177 - pin_remove(p); 178 - mntput(&m->mnt); 179 - } 180 - 181 174 static struct mount *alloc_vfsmnt(const char *name) 182 175 { 183 176 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); ··· 208 215 INIT_LIST_HEAD(&mnt->mnt_slave); 209 216 INIT_HLIST_NODE(&mnt->mnt_mp_list); 210 217 INIT_LIST_HEAD(&mnt->mnt_umounting); 211 - init_fs_pin(&mnt->mnt_umount, drop_mountpoint); 218 + INIT_HLIST_HEAD(&mnt->mnt_stuck_children); 212 219 } 213 220 return mnt; 214 221 ··· 1080 1087 1081 1088 static void cleanup_mnt(struct mount *mnt) 1082 1089 { 1090 + struct hlist_node *p; 1091 + struct mount *m; 1083 1092 /* 1084 - * This probably indicates that somebody messed 1085 - * up a mnt_want/drop_write() pair. If this 1086 - * happens, the filesystem was probably unable 1087 - * to make r/w->r/o transitions. 1088 - */ 1089 - /* 1093 + * The warning here probably indicates that somebody messed 1094 + * up a mnt_want/drop_write() pair. If this happens, the 1095 + * filesystem was probably unable to make r/w->r/o transitions. 1090 1096 * The locking used to deal with mnt_count decrement provides barriers, 1091 1097 * so mnt_get_writers() below is safe. 1092 1098 */ 1093 1099 WARN_ON(mnt_get_writers(mnt)); 1094 1100 if (unlikely(mnt->mnt_pins.first)) 1095 1101 mnt_pin_kill(mnt); 1102 + hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { 1103 + hlist_del(&m->mnt_umount); 1104 + mntput(&m->mnt); 1105 + } 1096 1106 fsnotify_vfsmount_delete(&mnt->mnt); 1097 1107 dput(mnt->mnt.mnt_root); 1098 1108 deactivate_super(mnt->mnt.mnt_sb); ··· 1164 1168 struct mount *p, *tmp; 1165 1169 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1166 1170 __put_mountpoint(unhash_mnt(p), &list); 1171 + hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); 1167 1172 } 1168 1173 } 1169 1174 unlock_mount_hash(); ··· 1357 1360 static void namespace_unlock(void) 1358 1361 { 1359 1362 struct hlist_head head; 1363 + struct hlist_node *p; 1364 + struct mount *m; 1360 1365 LIST_HEAD(list); 1361 1366 1362 1367 hlist_move_list(&unmounted, &head); ··· 1373 1374 1374 1375 synchronize_rcu_expedited(); 1375 1376 1376 - group_pin_kill(&head); 1377 + hlist_for_each_entry_safe(m, p, &head, mnt_umount) { 1378 + hlist_del(&m->mnt_umount); 1379 + mntput(&m->mnt); 1380 + } 1377 1381 } 1378 1382 1379 1383 static inline void namespace_lock(void) ··· 1463 1461 1464 1462 disconnect = disconnect_mount(p, how); 1465 1463 1466 - pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, 1467 - disconnect ? &unmounted : NULL); 1468 1464 if (mnt_has_parent(p)) { 1469 1465 mnt_add_count(p->mnt_parent, -1); 1470 1466 if (!disconnect) { ··· 1470 1470 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); 1471 1471 } else { 1472 1472 umount_mnt(p); 1473 + hlist_add_head(&p->mnt_umount, &unmounted); 1473 1474 } 1474 1475 } 1475 1476 change_mnt_propagation(p, MS_PRIVATE); ··· 1623 1622 while (!hlist_empty(&mp->m_list)) { 1624 1623 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1625 1624 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1626 - hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); 1627 1625 umount_mnt(mnt); 1626 + hlist_add_head(&mnt->mnt_umount, &unmounted); 1628 1627 } 1629 1628 else umount_tree(mnt, UMOUNT_CONNECTED); 1630 1629 }
-1
include/linux/fs_pin.h
··· 20 20 } 21 21 22 22 void pin_remove(struct fs_pin *); 23 - void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *); 24 23 void pin_insert(struct fs_pin *, struct vfsmount *); 25 24 void pin_kill(struct fs_pin *);