Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs pile 2 (of many) from Al Viro:
"Mostly Miklos' series this time"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
constify dcache.c inlined helpers where possible
fuse: drop dentry on failed revalidate
fuse: clean up return in fuse_dentry_revalidate()
fuse: use d_materialise_unique()
sysfs: use check_submounts_and_drop()
nfs: use check_submounts_and_drop()
gfs2: use check_submounts_and_drop()
afs: use check_submounts_and_drop()
vfs: check unlinked ancestors before mount
vfs: check submounts and drop atomically
vfs: add d_walk()
vfs: restructure d_genocide()

+326 -261
+3 -7
fs/afs/dir.c
··· 685 685 spin_unlock(&dentry->d_lock); 686 686 687 687 out_bad: 688 - if (dentry->d_inode) { 689 - /* don't unhash if we have submounts */ 690 - if (have_submounts(dentry)) 691 - goto out_skip; 692 - } 688 + /* don't unhash if we have submounts */ 689 + if (check_submounts_and_drop(dentry) != 0) 690 + goto out_skip; 693 691 694 692 _debug("dropping dentry %s/%s", 695 693 parent->d_name.name, dentry->d_name.name); 696 - shrink_dcache_parent(dentry); 697 - d_drop(dentry); 698 694 dput(parent); 699 695 key_put(key); 700 696
+249 -168
fs/dcache.c
··· 1031 1031 return new; 1032 1032 } 1033 1033 1034 - 1035 - /* 1036 - * Search for at least 1 mount point in the dentry's subdirs. 1037 - * We descend to the next level whenever the d_subdirs 1038 - * list is non-empty and continue searching. 1039 - */ 1040 - 1041 1034 /** 1042 - * have_submounts - check for mounts over a dentry 1043 - * @parent: dentry to check. 1044 - * 1045 - * Return true if the parent or its subdirectories contain 1046 - * a mount point 1035 + * enum d_walk_ret - action to talke during tree walk 1036 + * @D_WALK_CONTINUE: contrinue walk 1037 + * @D_WALK_QUIT: quit walk 1038 + * @D_WALK_NORETRY: quit when retry is needed 1039 + * @D_WALK_SKIP: skip this dentry and its children 1047 1040 */ 1048 - int have_submounts(struct dentry *parent) 1041 + enum d_walk_ret { 1042 + D_WALK_CONTINUE, 1043 + D_WALK_QUIT, 1044 + D_WALK_NORETRY, 1045 + D_WALK_SKIP, 1046 + }; 1047 + 1048 + /** 1049 + * d_walk - walk the dentry tree 1050 + * @parent: start of walk 1051 + * @data: data passed to @enter() and @finish() 1052 + * @enter: callback when first entering the dentry 1053 + * @finish: callback when successfully finished the walk 1054 + * 1055 + * The @enter() and @finish() callbacks are called with d_lock held. 1056 + */ 1057 + static void d_walk(struct dentry *parent, void *data, 1058 + enum d_walk_ret (*enter)(void *, struct dentry *), 1059 + void (*finish)(void *)) 1049 1060 { 1050 1061 struct dentry *this_parent; 1051 1062 struct list_head *next; 1052 1063 unsigned seq; 1053 1064 int locked = 0; 1065 + enum d_walk_ret ret; 1066 + bool retry = true; 1054 1067 1055 1068 seq = read_seqbegin(&rename_lock); 1056 1069 again: 1057 1070 this_parent = parent; 1058 - 1059 - if (d_mountpoint(parent)) 1060 - goto positive; 1061 1071 spin_lock(&this_parent->d_lock); 1072 + 1073 + ret = enter(data, this_parent); 1074 + switch (ret) { 1075 + case D_WALK_CONTINUE: 1076 + break; 1077 + case D_WALK_QUIT: 1078 + case D_WALK_SKIP: 1079 + goto out_unlock; 1080 + case D_WALK_NORETRY: 1081 + retry = false; 1082 + break; 1083 + } 1062 1084 repeat: 1063 1085 next = this_parent->d_subdirs.next; 1064 1086 resume: ··· 1090 1068 next = tmp->next; 1091 1069 1092 1070 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1093 - /* Have we found a mount point ? */ 1094 - if (d_mountpoint(dentry)) { 1071 + 1072 + ret = enter(data, dentry); 1073 + switch (ret) { 1074 + case D_WALK_CONTINUE: 1075 + break; 1076 + case D_WALK_QUIT: 1095 1077 spin_unlock(&dentry->d_lock); 1096 - spin_unlock(&this_parent->d_lock); 1097 - goto positive; 1078 + goto out_unlock; 1079 + case D_WALK_NORETRY: 1080 + retry = false; 1081 + break; 1082 + case D_WALK_SKIP: 1083 + spin_unlock(&dentry->d_lock); 1084 + continue; 1098 1085 } 1086 + 1099 1087 if (!list_empty(&dentry->d_subdirs)) { 1100 1088 spin_unlock(&this_parent->d_lock); 1101 1089 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); ··· 1126 1094 next = child->d_u.d_child.next; 1127 1095 goto resume; 1128 1096 } 1097 + if (!locked && read_seqretry(&rename_lock, seq)) { 1098 + spin_unlock(&this_parent->d_lock); 1099 + goto rename_retry; 1100 + } 1101 + if (finish) 1102 + finish(data); 1103 + 1104 + out_unlock: 1129 1105 spin_unlock(&this_parent->d_lock); 1130 - if (!locked && read_seqretry(&rename_lock, seq)) 1131 - goto rename_retry; 1132 1106 if (locked) 1133 1107 write_sequnlock(&rename_lock); 1134 - return 0; /* No mount points found in tree */ 1135 - positive: 1136 - if (!locked && read_seqretry(&rename_lock, seq)) 1137 - goto rename_retry; 1138 - if (locked) 1139 - write_sequnlock(&rename_lock); 1140 - return 1; 1108 + return; 1141 1109 1142 1110 rename_retry: 1111 + if (!retry) 1112 + return; 1143 1113 if (locked) 1144 1114 goto again; 1145 1115 locked = 1; 1146 1116 write_seqlock(&rename_lock); 1147 1117 goto again; 1148 1118 } 1119 + 1120 + /* 1121 + * Search for at least 1 mount point in the dentry's subdirs. 1122 + * We descend to the next level whenever the d_subdirs 1123 + * list is non-empty and continue searching. 1124 + */ 1125 + 1126 + /** 1127 + * have_submounts - check for mounts over a dentry 1128 + * @parent: dentry to check. 1129 + * 1130 + * Return true if the parent or its subdirectories contain 1131 + * a mount point 1132 + */ 1133 + 1134 + static enum d_walk_ret check_mount(void *data, struct dentry *dentry) 1135 + { 1136 + int *ret = data; 1137 + if (d_mountpoint(dentry)) { 1138 + *ret = 1; 1139 + return D_WALK_QUIT; 1140 + } 1141 + return D_WALK_CONTINUE; 1142 + } 1143 + 1144 + int have_submounts(struct dentry *parent) 1145 + { 1146 + int ret = 0; 1147 + 1148 + d_walk(parent, &ret, check_mount, NULL); 1149 + 1150 + return ret; 1151 + } 1149 1152 EXPORT_SYMBOL(have_submounts); 1153 + 1154 + /* 1155 + * Called by mount code to set a mountpoint and check if the mountpoint is 1156 + * reachable (e.g. NFS can unhash a directory dentry and then the complete 1157 + * subtree can become unreachable). 1158 + * 1159 + * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For 1160 + * this reason take rename_lock and d_lock on dentry and ancestors. 1161 + */ 1162 + int d_set_mounted(struct dentry *dentry) 1163 + { 1164 + struct dentry *p; 1165 + int ret = -ENOENT; 1166 + write_seqlock(&rename_lock); 1167 + for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { 1168 + /* Need exclusion wrt. check_submounts_and_drop() */ 1169 + spin_lock(&p->d_lock); 1170 + if (unlikely(d_unhashed(p))) { 1171 + spin_unlock(&p->d_lock); 1172 + goto out; 1173 + } 1174 + spin_unlock(&p->d_lock); 1175 + } 1176 + spin_lock(&dentry->d_lock); 1177 + if (!d_unlinked(dentry)) { 1178 + dentry->d_flags |= DCACHE_MOUNTED; 1179 + ret = 0; 1180 + } 1181 + spin_unlock(&dentry->d_lock); 1182 + out: 1183 + write_sequnlock(&rename_lock); 1184 + return ret; 1185 + } 1150 1186 1151 1187 /* 1152 1188 * Search the dentry child list of the specified parent, ··· 1230 1130 * drop the lock and return early due to latency 1231 1131 * constraints. 1232 1132 */ 1233 - static int select_parent(struct dentry *parent, struct list_head *dispose) 1133 + 1134 + struct select_data { 1135 + struct dentry *start; 1136 + struct list_head dispose; 1137 + int found; 1138 + }; 1139 + 1140 + static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) 1234 1141 { 1235 - struct dentry *this_parent; 1236 - struct list_head *next; 1237 - unsigned seq; 1238 - int found = 0; 1239 - int locked = 0; 1142 + struct select_data *data = _data; 1143 + enum d_walk_ret ret = D_WALK_CONTINUE; 1240 1144 1241 - seq = read_seqbegin(&rename_lock); 1242 - again: 1243 - this_parent = parent; 1244 - spin_lock(&this_parent->d_lock); 1245 - repeat: 1246 - next = this_parent->d_subdirs.next; 1247 - resume: 1248 - while (next != &this_parent->d_subdirs) { 1249 - struct list_head *tmp = next; 1250 - struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 1251 - next = tmp->next; 1145 + if (data->start == dentry) 1146 + goto out; 1252 1147 1253 - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1254 - 1255 - /* 1256 - * move only zero ref count dentries to the dispose list. 1257 - * 1258 - * Those which are presently on the shrink list, being processed 1259 - * by shrink_dentry_list(), shouldn't be moved. Otherwise the 1260 - * loop in shrink_dcache_parent() might not make any progress 1261 - * and loop forever. 1262 - */ 1263 - if (dentry->d_lockref.count) { 1264 - dentry_lru_del(dentry); 1265 - } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1266 - dentry_lru_move_list(dentry, dispose); 1267 - dentry->d_flags |= DCACHE_SHRINK_LIST; 1268 - found++; 1269 - } 1270 - /* 1271 - * We can return to the caller if we have found some (this 1272 - * ensures forward progress). We'll be coming back to find 1273 - * the rest. 1274 - */ 1275 - if (found && need_resched()) { 1276 - spin_unlock(&dentry->d_lock); 1277 - goto out; 1278 - } 1279 - 1280 - /* 1281 - * Descend a level if the d_subdirs list is non-empty. 1282 - */ 1283 - if (!list_empty(&dentry->d_subdirs)) { 1284 - spin_unlock(&this_parent->d_lock); 1285 - spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1286 - this_parent = dentry; 1287 - spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1288 - goto repeat; 1289 - } 1290 - 1291 - spin_unlock(&dentry->d_lock); 1148 + /* 1149 + * move only zero ref count dentries to the dispose list. 1150 + * 1151 + * Those which are presently on the shrink list, being processed 1152 + * by shrink_dentry_list(), shouldn't be moved. Otherwise the 1153 + * loop in shrink_dcache_parent() might not make any progress 1154 + * and loop forever. 1155 + */ 1156 + if (dentry->d_lockref.count) { 1157 + dentry_lru_del(dentry); 1158 + } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1159 + dentry_lru_move_list(dentry, &data->dispose); 1160 + dentry->d_flags |= DCACHE_SHRINK_LIST; 1161 + data->found++; 1162 + ret = D_WALK_NORETRY; 1292 1163 } 1293 1164 /* 1294 - * All done at this level ... ascend and resume the search. 1165 + * We can return to the caller if we have found some (this 1166 + * ensures forward progress). We'll be coming back to find 1167 + * the rest. 1295 1168 */ 1296 - if (this_parent != parent) { 1297 - struct dentry *child = this_parent; 1298 - this_parent = try_to_ascend(this_parent, locked, seq); 1299 - if (!this_parent) 1300 - goto rename_retry; 1301 - next = child->d_u.d_child.next; 1302 - goto resume; 1303 - } 1169 + if (data->found && need_resched()) 1170 + ret = D_WALK_QUIT; 1304 1171 out: 1305 - spin_unlock(&this_parent->d_lock); 1306 - if (!locked && read_seqretry(&rename_lock, seq)) 1307 - goto rename_retry; 1308 - if (locked) 1309 - write_sequnlock(&rename_lock); 1310 - return found; 1311 - 1312 - rename_retry: 1313 - if (found) 1314 - return found; 1315 - if (locked) 1316 - goto again; 1317 - locked = 1; 1318 - write_seqlock(&rename_lock); 1319 - goto again; 1172 + return ret; 1320 1173 } 1321 1174 1322 1175 /** ··· 1278 1225 * 1279 1226 * Prune the dcache to remove unused children of the parent dentry. 1280 1227 */ 1281 - void shrink_dcache_parent(struct dentry * parent) 1228 + void shrink_dcache_parent(struct dentry *parent) 1282 1229 { 1283 - LIST_HEAD(dispose); 1284 - int found; 1230 + for (;;) { 1231 + struct select_data data; 1285 1232 1286 - while ((found = select_parent(parent, &dispose)) != 0) { 1287 - shrink_dentry_list(&dispose); 1233 + INIT_LIST_HEAD(&data.dispose); 1234 + data.start = parent; 1235 + data.found = 0; 1236 + 1237 + d_walk(parent, &data, select_collect, NULL); 1238 + if (!data.found) 1239 + break; 1240 + 1241 + shrink_dentry_list(&data.dispose); 1288 1242 cond_resched(); 1289 1243 } 1290 1244 } 1291 1245 EXPORT_SYMBOL(shrink_dcache_parent); 1246 + 1247 + static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry) 1248 + { 1249 + struct select_data *data = _data; 1250 + 1251 + if (d_mountpoint(dentry)) { 1252 + data->found = -EBUSY; 1253 + return D_WALK_QUIT; 1254 + } 1255 + 1256 + return select_collect(_data, dentry); 1257 + } 1258 + 1259 + static void check_and_drop(void *_data) 1260 + { 1261 + struct select_data *data = _data; 1262 + 1263 + if (d_mountpoint(data->start)) 1264 + data->found = -EBUSY; 1265 + if (!data->found) 1266 + __d_drop(data->start); 1267 + } 1268 + 1269 + /** 1270 + * check_submounts_and_drop - prune dcache, check for submounts and drop 1271 + * 1272 + * All done as a single atomic operation relative to has_unlinked_ancestor(). 1273 + * Returns 0 if successfully unhashed @parent. If there were submounts then 1274 + * return -EBUSY. 1275 + * 1276 + * @dentry: dentry to prune and drop 1277 + */ 1278 + int check_submounts_and_drop(struct dentry *dentry) 1279 + { 1280 + int ret = 0; 1281 + 1282 + /* Negative dentries can be dropped without further checks */ 1283 + if (!dentry->d_inode) { 1284 + d_drop(dentry); 1285 + goto out; 1286 + } 1287 + 1288 + for (;;) { 1289 + struct select_data data; 1290 + 1291 + INIT_LIST_HEAD(&data.dispose); 1292 + data.start = dentry; 1293 + data.found = 0; 1294 + 1295 + d_walk(dentry, &data, check_and_collect, check_and_drop); 1296 + ret = data.found; 1297 + 1298 + if (!list_empty(&data.dispose)) 1299 + shrink_dentry_list(&data.dispose); 1300 + 1301 + if (ret <= 0) 1302 + break; 1303 + 1304 + cond_resched(); 1305 + } 1306 + 1307 + out: 1308 + return ret; 1309 + } 1310 + EXPORT_SYMBOL(check_submounts_and_drop); 1292 1311 1293 1312 /** 1294 1313 * __d_alloc - allocate a dcache entry ··· 3053 2928 return result; 3054 2929 } 3055 2930 3056 - void d_genocide(struct dentry *root) 2931 + static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry) 3057 2932 { 3058 - struct dentry *this_parent; 3059 - struct list_head *next; 3060 - unsigned seq; 3061 - int locked = 0; 2933 + struct dentry *root = data; 2934 + if (dentry != root) { 2935 + if (d_unhashed(dentry) || !dentry->d_inode) 2936 + return D_WALK_SKIP; 3062 2937 3063 - seq = read_seqbegin(&rename_lock); 3064 - again: 3065 - this_parent = root; 3066 - spin_lock(&this_parent->d_lock); 3067 - repeat: 3068 - next = this_parent->d_subdirs.next; 3069 - resume: 3070 - while (next != &this_parent->d_subdirs) { 3071 - struct list_head *tmp = next; 3072 - struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 3073 - next = tmp->next; 3074 - 3075 - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 3076 - if (d_unhashed(dentry) || !dentry->d_inode) { 3077 - spin_unlock(&dentry->d_lock); 3078 - continue; 3079 - } 3080 - if (!list_empty(&dentry->d_subdirs)) { 3081 - spin_unlock(&this_parent->d_lock); 3082 - spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 3083 - this_parent = dentry; 3084 - spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 3085 - goto repeat; 3086 - } 3087 2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 3088 2939 dentry->d_flags |= DCACHE_GENOCIDE; 3089 2940 dentry->d_lockref.count--; 3090 2941 } 3091 - spin_unlock(&dentry->d_lock); 3092 2942 } 3093 - if (this_parent != root) { 3094 - struct dentry *child = this_parent; 3095 - if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 3096 - this_parent->d_flags |= DCACHE_GENOCIDE; 3097 - this_parent->d_lockref.count--; 3098 - } 3099 - this_parent = try_to_ascend(this_parent, locked, seq); 3100 - if (!this_parent) 3101 - goto rename_retry; 3102 - next = child->d_u.d_child.next; 3103 - goto resume; 3104 - } 3105 - spin_unlock(&this_parent->d_lock); 3106 - if (!locked && read_seqretry(&rename_lock, seq)) 3107 - goto rename_retry; 3108 - if (locked) 3109 - write_sequnlock(&rename_lock); 3110 - return; 2943 + return D_WALK_CONTINUE; 2944 + } 3111 2945 3112 - rename_retry: 3113 - if (locked) 3114 - goto again; 3115 - locked = 1; 3116 - write_seqlock(&rename_lock); 3117 - goto again; 2946 + void d_genocide(struct dentry *parent) 2947 + { 2948 + d_walk(parent, parent, d_genocide_kill, NULL); 3118 2949 } 3119 2950 3120 2951 void d_tmpfile(struct dentry *dentry, struct inode *inode)
+46 -51
fs/fuse/dir.c
··· 182 182 struct inode *inode; 183 183 struct dentry *parent; 184 184 struct fuse_conn *fc; 185 + int ret; 185 186 186 187 inode = ACCESS_ONCE(entry->d_inode); 187 188 if (inode && is_bad_inode(inode)) 188 - return 0; 189 + goto invalid; 189 190 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 190 191 int err; 191 192 struct fuse_entry_out outarg; ··· 196 195 197 196 /* For negative dentries, always do a fresh lookup */ 198 197 if (!inode) 199 - return 0; 198 + goto invalid; 200 199 200 + ret = -ECHILD; 201 201 if (flags & LOOKUP_RCU) 202 - return -ECHILD; 202 + goto out; 203 203 204 204 fc = get_fuse_conn(inode); 205 205 req = fuse_get_req_nopages(fc); 206 + ret = PTR_ERR(req); 206 207 if (IS_ERR(req)) 207 - return 0; 208 + goto out; 208 209 209 210 forget = fuse_alloc_forget(); 210 211 if (!forget) { 211 212 fuse_put_request(fc, req); 212 - return 0; 213 + ret = -ENOMEM; 214 + goto out; 213 215 } 214 216 215 217 attr_version = fuse_get_attr_version(fc); ··· 231 227 struct fuse_inode *fi = get_fuse_inode(inode); 232 228 if (outarg.nodeid != get_node_id(inode)) { 233 229 fuse_queue_forget(fc, forget, outarg.nodeid, 1); 234 - return 0; 230 + goto invalid; 235 231 } 236 232 spin_lock(&fc->lock); 237 233 fi->nlookup++; ··· 239 235 } 240 236 kfree(forget); 241 237 if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT) 242 - return 0; 238 + goto invalid; 243 239 244 240 fuse_change_attributes(inode, &outarg.attr, 245 241 entry_attr_timeout(&outarg), ··· 253 249 dput(parent); 254 250 } 255 251 } 256 - return 1; 252 + ret = 1; 253 + out: 254 + return ret; 255 + 256 + invalid: 257 + ret = 0; 258 + if (check_submounts_and_drop(entry) != 0) 259 + ret = 1; 260 + goto out; 257 261 } 258 262 259 263 static int invalid_nodeid(u64 nodeid) ··· 277 265 { 278 266 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || 279 267 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m); 280 - } 281 - 282 - /* 283 - * Add a directory inode to a dentry, ensuring that no other dentry 284 - * refers to this inode. Called with fc->inst_mutex. 285 - */ 286 - static struct dentry *fuse_d_add_directory(struct dentry *entry, 287 - struct inode *inode) 288 - { 289 - struct dentry *alias = d_find_alias(inode); 290 - if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) { 291 - /* This tries to shrink the subtree below alias */ 292 - fuse_invalidate_entry(alias); 293 - dput(alias); 294 - if (!hlist_empty(&inode->i_dentry)) 295 - return ERR_PTR(-EBUSY); 296 - } else { 297 - dput(alias); 298 - } 299 - return d_splice_alias(inode, entry); 300 268 } 301 269 302 270 int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name, ··· 337 345 return err; 338 346 } 339 347 348 + static struct dentry *fuse_materialise_dentry(struct dentry *dentry, 349 + struct inode *inode) 350 + { 351 + struct dentry *newent; 352 + 353 + if (inode && S_ISDIR(inode->i_mode)) { 354 + struct fuse_conn *fc = get_fuse_conn(inode); 355 + 356 + mutex_lock(&fc->inst_mutex); 357 + newent = d_materialise_unique(dentry, inode); 358 + mutex_unlock(&fc->inst_mutex); 359 + } else { 360 + newent = d_materialise_unique(dentry, inode); 361 + } 362 + 363 + return newent; 364 + } 365 + 340 366 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, 341 367 unsigned int flags) 342 368 { ··· 362 352 struct fuse_entry_out outarg; 363 353 struct inode *inode; 364 354 struct dentry *newent; 365 - struct fuse_conn *fc = get_fuse_conn(dir); 366 355 bool outarg_valid = true; 367 356 368 357 err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, ··· 377 368 if (inode && get_node_id(inode) == FUSE_ROOT_ID) 378 369 goto out_iput; 379 370 380 - if (inode && S_ISDIR(inode->i_mode)) { 381 - mutex_lock(&fc->inst_mutex); 382 - newent = fuse_d_add_directory(entry, inode); 383 - mutex_unlock(&fc->inst_mutex); 384 - err = PTR_ERR(newent); 385 - if (IS_ERR(newent)) 386 - goto out_iput; 387 - } else { 388 - newent = d_splice_alias(inode, entry); 389 - } 371 + newent = fuse_materialise_dentry(entry, inode); 372 + err = PTR_ERR(newent); 373 + if (IS_ERR(newent)) 374 + goto out_err; 390 375 391 376 entry = newent ? newent : entry; 392 377 if (outarg_valid) ··· 1278 1275 if (!inode) 1279 1276 goto out; 1280 1277 1281 - if (S_ISDIR(inode->i_mode)) { 1282 - mutex_lock(&fc->inst_mutex); 1283 - alias = fuse_d_add_directory(dentry, inode); 1284 - mutex_unlock(&fc->inst_mutex); 1285 - err = PTR_ERR(alias); 1286 - if (IS_ERR(alias)) { 1287 - iput(inode); 1288 - goto out; 1289 - } 1290 - } else { 1291 - alias = d_splice_alias(inode, dentry); 1292 - } 1278 + alias = fuse_materialise_dentry(dentry, inode); 1279 + err = PTR_ERR(alias); 1280 + if (IS_ERR(alias)) 1281 + goto out; 1293 1282 1294 1283 if (alias) { 1295 1284 dput(dentry);
+3 -6
fs/gfs2/dentry.c
··· 93 93 if (!had_lock) 94 94 gfs2_glock_dq_uninit(&d_gh); 95 95 invalid: 96 - if (inode && S_ISDIR(inode->i_mode)) { 97 - if (have_submounts(dentry)) 98 - goto valid; 99 - shrink_dcache_parent(dentry); 100 - } 101 - d_drop(dentry); 96 + if (check_submounts_and_drop(dentry) != 0) 97 + goto valid; 98 + 102 99 dput(parent); 103 100 return 0; 104 101
+1
fs/internal.h
··· 126 126 * dcache.c 127 127 */ 128 128 extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); 129 + extern int d_set_mounted(struct dentry *dentry); 129 130 130 131 /* 131 132 * read_write.c
+5 -6
fs/namespace.c
··· 611 611 { 612 612 struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry); 613 613 struct mountpoint *mp; 614 + int ret; 614 615 615 616 list_for_each_entry(mp, chain, m_hash) { 616 617 if (mp->m_dentry == dentry) { ··· 627 626 if (!mp) 628 627 return ERR_PTR(-ENOMEM); 629 628 630 - spin_lock(&dentry->d_lock); 631 - if (d_unlinked(dentry)) { 632 - spin_unlock(&dentry->d_lock); 629 + ret = d_set_mounted(dentry); 630 + if (ret) { 633 631 kfree(mp); 634 - return ERR_PTR(-ENOENT); 632 + return ERR_PTR(ret); 635 633 } 636 - dentry->d_flags |= DCACHE_MOUNTED; 637 - spin_unlock(&dentry->d_lock); 634 + 638 635 mp->m_dentry = dentry; 639 636 mp->m_count = 1; 640 637 list_add(&mp->m_hash, chain);
+4 -5
fs/nfs/dir.c
··· 1135 1135 if (inode && S_ISDIR(inode->i_mode)) { 1136 1136 /* Purge readdir caches. */ 1137 1137 nfs_zap_caches(inode); 1138 - /* If we have submounts, don't unhash ! */ 1139 - if (have_submounts(dentry)) 1140 - goto out_valid; 1141 1138 if (dentry->d_flags & DCACHE_DISCONNECTED) 1142 1139 goto out_valid; 1143 - shrink_dcache_parent(dentry); 1144 1140 } 1145 - d_drop(dentry); 1141 + /* If we have submounts, don't unhash ! */ 1142 + if (check_submounts_and_drop(dentry) != 0) 1143 + goto out_valid; 1144 + 1146 1145 dput(parent); 1147 1146 dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n", 1148 1147 __func__, dentry->d_parent->d_name.name,
+8 -12
fs/sysfs/dir.c
··· 297 297 static int sysfs_dentry_revalidate(struct dentry *dentry, unsigned int flags) 298 298 { 299 299 struct sysfs_dirent *sd; 300 - int is_dir; 301 300 int type; 302 301 303 302 if (flags & LOOKUP_RCU) ··· 340 341 * is performed at its new name the dentry will be readded 341 342 * to the dcache hashes. 342 343 */ 343 - is_dir = (sysfs_type(sd) == SYSFS_DIR); 344 344 mutex_unlock(&sysfs_mutex); 345 - if (is_dir) { 346 - /* If we have submounts we must allow the vfs caches 347 - * to lie about the state of the filesystem to prevent 348 - * leaks and other nasty things. 349 - */ 350 - if (have_submounts(dentry)) 351 - goto out_valid; 352 - shrink_dcache_parent(dentry); 353 - } 354 - d_drop(dentry); 345 + 346 + /* If we have submounts we must allow the vfs caches 347 + * to lie about the state of the filesystem to prevent 348 + * leaks and other nasty things. 349 + */ 350 + if (check_submounts_and_drop(dentry) != 0) 351 + goto out_valid; 352 + 355 353 return 0; 356 354 } 357 355
+7 -6
include/linux/dcache.h
··· 212 212 213 213 extern seqlock_t rename_lock; 214 214 215 - static inline int dname_external(struct dentry *dentry) 215 + static inline int dname_external(const struct dentry *dentry) 216 216 { 217 217 return dentry->d_name.name != dentry->d_iname; 218 218 } ··· 253 253 254 254 /* test whether we have any submounts in a subdir tree */ 255 255 extern int have_submounts(struct dentry *); 256 + extern int check_submounts_and_drop(struct dentry *); 256 257 257 258 /* 258 259 * This adds the entry to the hash queues. ··· 358 357 * Returns true if the dentry passed is not currently hashed. 359 358 */ 360 359 361 - static inline int d_unhashed(struct dentry *dentry) 360 + static inline int d_unhashed(const struct dentry *dentry) 362 361 { 363 362 return hlist_bl_unhashed(&dentry->d_hash); 364 363 } 365 364 366 - static inline int d_unlinked(struct dentry *dentry) 365 + static inline int d_unlinked(const struct dentry *dentry) 367 366 { 368 367 return d_unhashed(dentry) && !IS_ROOT(dentry); 369 368 } 370 369 371 - static inline int cant_mount(struct dentry *dentry) 370 + static inline int cant_mount(const struct dentry *dentry) 372 371 { 373 372 return (dentry->d_flags & DCACHE_CANT_MOUNT); 374 373 } ··· 382 381 383 382 extern void dput(struct dentry *); 384 383 385 - static inline bool d_managed(struct dentry *dentry) 384 + static inline bool d_managed(const struct dentry *dentry) 386 385 { 387 386 return dentry->d_flags & DCACHE_MANAGED_DENTRY; 388 387 } 389 388 390 - static inline bool d_mountpoint(struct dentry *dentry) 389 + static inline bool d_mountpoint(const struct dentry *dentry) 391 390 { 392 391 return dentry->d_flags & DCACHE_MOUNTED; 393 392 }