Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2:
[PATCH] ocfs2: fix oops in mmap_truncate testing
configfs: call drop_link() to cleanup after create_link() failure
configfs: Allow ->make_item() and ->make_group() to return detailed errors.
configfs: Fix failing mkdir() making racing rmdir() fail
configfs: Fix deadlock with racing rmdir() and rename()
configfs: Make configfs_new_dirent() return error code instead of NULL
configfs: Protect configfs_dirent s_links list mutations
configfs: Introduce configfs_dirent_lock
ocfs2: Don't snprintf() without a format.
ocfs2: Fix CONFIG_OCFS2_DEBUG_FS #ifdefs
ocfs2/net: Silence build warnings on sparc64
ocfs2: Handle error during journal load
ocfs2: Silence an error message in ocfs2_file_aio_read()
ocfs2: use simple_read_from_buffer()
ocfs2: fix printk format warnings with OCFS2_FS_STATS=n
[PATCH 2/2] ocfs2: Instrument fs cluster locks
[PATCH 1/2] ocfs2: Add CONFIG_OCFS2_FS_STATS config option

+383 -127
+6 -4
Documentation/filesystems/configfs/configfs.txt
··· 233 config_item_type. 234 235 struct configfs_group_operations { 236 - struct config_item *(*make_item)(struct config_group *group, 237 - const char *name); 238 - struct config_group *(*make_group)(struct config_group *group, 239 - const char *name); 240 int (*commit_item)(struct config_item *item); 241 void (*disconnect_notify)(struct config_group *group, 242 struct config_item *item);
··· 233 config_item_type. 234 235 struct configfs_group_operations { 236 + int (*make_item)(struct config_group *group, 237 + const char *name, 238 + struct config_item **new_item); 239 + int (*make_group)(struct config_group *group, 240 + const char *name, 241 + struct config_group **new_group); 242 int (*commit_item)(struct config_item *item); 243 void (*disconnect_notify)(struct config_group *group, 244 struct config_item *item);
+8 -6
Documentation/filesystems/configfs/configfs_example.c
··· 273 return item ? container_of(to_config_group(item), struct simple_children, group) : NULL; 274 } 275 276 - static struct config_item *simple_children_make_item(struct config_group *group, const char *name) 277 { 278 struct simple_child *simple_child; 279 280 simple_child = kzalloc(sizeof(struct simple_child), GFP_KERNEL); 281 if (!simple_child) 282 - return NULL; 283 284 285 config_item_init_type_name(&simple_child->item, name, ··· 287 288 simple_child->storeme = 0; 289 290 - return &simple_child->item; 291 } 292 293 static struct configfs_attribute simple_children_attr_description = { ··· 360 * children of its own. 361 */ 362 363 - static struct config_group *group_children_make_group(struct config_group *group, const char *name) 364 { 365 struct simple_children *simple_children; 366 367 simple_children = kzalloc(sizeof(struct simple_children), 368 GFP_KERNEL); 369 if (!simple_children) 370 - return NULL; 371 372 373 config_group_init_type_name(&simple_children->group, name, 374 &simple_children_type); 375 376 - return &simple_children->group; 377 } 378 379 static struct configfs_attribute group_children_attr_description = {
··· 273 return item ? container_of(to_config_group(item), struct simple_children, group) : NULL; 274 } 275 276 + static int simple_children_make_item(struct config_group *group, const char *name, struct config_item **new_item) 277 { 278 struct simple_child *simple_child; 279 280 simple_child = kzalloc(sizeof(struct simple_child), GFP_KERNEL); 281 if (!simple_child) 282 + return -ENOMEM; 283 284 285 config_item_init_type_name(&simple_child->item, name, ··· 287 288 simple_child->storeme = 0; 289 290 + *new_item = &simple_child->item; 291 + return 0; 292 } 293 294 static struct configfs_attribute simple_children_attr_description = { ··· 359 * children of its own. 360 */ 361 362 + static int group_children_make_group(struct config_group *group, const char *name, struct config_group **new_group) 363 { 364 struct simple_children *simple_children; 365 366 simple_children = kzalloc(sizeof(struct simple_children), 367 GFP_KERNEL); 368 if (!simple_children) 369 + return -ENOMEM; 370 371 372 config_group_init_type_name(&simple_children->group, name, 373 &simple_children_type); 374 375 + *new_group = &simple_children->group; 376 + return 0; 377 } 378 379 static struct configfs_attribute group_children_attr_description = {
+6 -4
drivers/net/netconsole.c
··· 585 * Group operations and type for netconsole_subsys. 586 */ 587 588 - static struct config_item *make_netconsole_target(struct config_group *group, 589 - const char *name) 590 { 591 unsigned long flags; 592 struct netconsole_target *nt; ··· 599 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 600 if (!nt) { 601 printk(KERN_ERR "netconsole: failed to allocate memory\n"); 602 - return NULL; 603 } 604 605 nt->np.name = "netconsole"; ··· 616 list_add(&nt->list, &target_list); 617 spin_unlock_irqrestore(&target_list_lock, flags); 618 619 - return &nt->item; 620 } 621 622 static void drop_netconsole_target(struct config_group *group,
··· 585 * Group operations and type for netconsole_subsys. 586 */ 587 588 + static int make_netconsole_target(struct config_group *group, 589 + const char *name, 590 + struct config_item **new_item) 591 { 592 unsigned long flags; 593 struct netconsole_target *nt; ··· 598 nt = kzalloc(sizeof(*nt), GFP_KERNEL); 599 if (!nt) { 600 printk(KERN_ERR "netconsole: failed to allocate memory\n"); 601 + return -ENOMEM; 602 } 603 604 nt->np.name = "netconsole"; ··· 615 list_add(&nt->list, &target_list); 616 spin_unlock_irqrestore(&target_list_lock, flags); 617 618 + *new_item = &nt->item; 619 + return 0; 620 } 621 622 static void drop_netconsole_target(struct config_group *group,
+8
fs/Kconfig
··· 470 It is safe to say Y, as the clustering method is run-time 471 selectable. 472 473 config OCFS2_DEBUG_MASKLOG 474 bool "OCFS2 logging support" 475 depends on OCFS2_FS
··· 470 It is safe to say Y, as the clustering method is run-time 471 selectable. 472 473 + config OCFS2_FS_STATS 474 + bool "OCFS2 statistics" 475 + depends on OCFS2_FS 476 + default y 477 + help 478 + This option allows some fs statistics to be captured. Enabling 479 + this option may increase the memory consumption. 480 + 481 config OCFS2_DEBUG_MASKLOG 482 bool "OCFS2 logging support" 483 depends on OCFS2_FS
+4
fs/configfs/configfs_internal.h
··· 26 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 30 struct configfs_dirent { 31 atomic_t s_count; ··· 48 #define CONFIGFS_USET_DIR 0x0040 49 #define CONFIGFS_USET_DEFAULT 0x0080 50 #define CONFIGFS_USET_DROPPING 0x0100 51 #define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR) 52 53 extern struct vfsmount * configfs_mount; 54 extern struct kmem_cache *configfs_dir_cachep;
··· 26 27 #include <linux/slab.h> 28 #include <linux/list.h> 29 + #include <linux/spinlock.h> 30 31 struct configfs_dirent { 32 atomic_t s_count; ··· 47 #define CONFIGFS_USET_DIR 0x0040 48 #define CONFIGFS_USET_DEFAULT 0x0080 49 #define CONFIGFS_USET_DROPPING 0x0100 50 + #define CONFIGFS_USET_IN_MKDIR 0x0200 51 #define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR) 52 + 53 + extern spinlock_t configfs_dirent_lock; 54 55 extern struct vfsmount * configfs_mount; 56 extern struct kmem_cache *configfs_dir_cachep;
+106 -41
fs/configfs/dir.c
··· 30 #include <linux/mount.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 34 #include <linux/configfs.h> 35 #include "configfs_internal.h" 36 37 DECLARE_RWSEM(configfs_rename_sem); 38 39 static void configfs_d_iput(struct dentry * dentry, 40 struct inode * inode) ··· 88 89 sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL); 90 if (!sd) 91 - return NULL; 92 93 atomic_set(&sd->s_count, 1); 94 INIT_LIST_HEAD(&sd->s_links); 95 INIT_LIST_HEAD(&sd->s_children); 96 - list_add(&sd->s_sibling, &parent_sd->s_children); 97 sd->s_element = element; 98 99 return sd; 100 } ··· 139 struct configfs_dirent * sd; 140 141 sd = configfs_new_dirent(parent_sd, element); 142 - if (!sd) 143 - return -ENOMEM; 144 145 sd->s_mode = mode; 146 sd->s_type = type; ··· 194 } else { 195 struct configfs_dirent *sd = d->d_fsdata; 196 if (sd) { 197 list_del_init(&sd->s_sibling); 198 configfs_put(sd); 199 } 200 } ··· 247 else { 248 struct configfs_dirent *sd = dentry->d_fsdata; 249 if (sd) { 250 list_del_init(&sd->s_sibling); 251 configfs_put(sd); 252 } 253 } ··· 263 struct configfs_dirent * sd; 264 265 sd = d->d_fsdata; 266 list_del_init(&sd->s_sibling); 267 configfs_put(sd); 268 if (d->d_inode) 269 simple_rmdir(parent->d_inode,d); ··· 358 359 /* 360 * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are 361 - * attributes and are removed by rmdir(). We recurse, taking i_mutex 362 - * on all children that are candidates for default detach. If the 363 - * result is clean, then configfs_detach_group() will handle dropping 364 - * i_mutex. If there is an error, the caller will clean up the i_mutex 365 - * holders via configfs_detach_rollback(). 366 */ 367 - static int configfs_detach_prep(struct dentry *dentry) 368 { 369 struct configfs_dirent *parent_sd = dentry->d_fsdata; 370 struct configfs_dirent *sd; ··· 379 if (sd->s_type & CONFIGFS_NOT_PINNED) 380 continue; 381 if (sd->s_type & CONFIGFS_USET_DEFAULT) { 382 - mutex_lock(&sd->s_dentry->d_inode->i_mutex); 383 - /* Mark that we've taken i_mutex */ 384 sd->s_type |= CONFIGFS_USET_DROPPING; 385 386 /* 387 * Yup, recursive. If there's a problem, blame 388 * deep nesting of default_groups 389 */ 390 - ret = configfs_detach_prep(sd->s_dentry); 391 if (!ret) 392 continue; 393 } else ··· 406 } 407 408 /* 409 - * Walk the tree, dropping i_mutex wherever CONFIGFS_USET_DROPPING is 410 * set. 411 */ 412 static void configfs_detach_rollback(struct dentry *dentry) ··· 417 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 418 if (sd->s_type & CONFIGFS_USET_DEFAULT) { 419 configfs_detach_rollback(sd->s_dentry); 420 - 421 - if (sd->s_type & CONFIGFS_USET_DROPPING) { 422 - sd->s_type &= ~CONFIGFS_USET_DROPPING; 423 - mutex_unlock(&sd->s_dentry->d_inode->i_mutex); 424 - } 425 } 426 } 427 } ··· 438 list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) { 439 if (!sd->s_element || !(sd->s_type & CONFIGFS_NOT_PINNED)) 440 continue; 441 list_del_init(&sd->s_sibling); 442 configfs_drop_dentry(sd, dentry); 443 configfs_put(sd); 444 } ··· 496 497 child = sd->s_dentry; 498 499 configfs_detach_group(sd->s_element); 500 child->d_inode->i_flags |= S_DEAD; 501 502 - /* 503 - * From rmdir/unregister, a configfs_detach_prep() pass 504 - * has taken our i_mutex for us. Drop it. 505 - * From mkdir/register cleanup, there is no sem held. 506 - */ 507 - if (sd->s_type & CONFIGFS_USET_DROPPING) 508 - mutex_unlock(&child->d_inode->i_mutex); 509 510 d_delete(child); 511 dput(child); ··· 1073 group = NULL; 1074 item = NULL; 1075 if (type->ct_group_ops->make_group) { 1076 - group = type->ct_group_ops->make_group(to_config_group(parent_item), name); 1077 - if (group) { 1078 link_group(to_config_group(parent_item), group); 1079 item = &group->cg_item; 1080 } 1081 } else { 1082 - item = type->ct_group_ops->make_item(to_config_group(parent_item), name); 1083 - if (item) 1084 link_obj(parent_item, item); 1085 } 1086 mutex_unlock(&subsys->su_mutex); 1087 1088 kfree(name); 1089 - if (!item) { 1090 /* 1091 - * If item == NULL, then link_obj() was never called. 1092 * There are no extra references to clean up. 1093 */ 1094 - ret = -ENOMEM; 1095 goto out_put; 1096 } 1097 ··· 1118 */ 1119 module_got = 1; 1120 1121 if (group) 1122 ret = configfs_attach_group(parent_item, item, dentry); 1123 else 1124 ret = configfs_attach_item(parent_item, item, dentry); 1125 1126 out_unlink: 1127 if (ret) { ··· 1201 return -EINVAL; 1202 } 1203 1204 - ret = configfs_detach_prep(dentry); 1205 - if (ret) { 1206 - configfs_detach_rollback(dentry); 1207 - config_item_put(parent_item); 1208 - return ret; 1209 - } 1210 1211 /* Get a working ref for the duration of this function */ 1212 item = configfs_get_config_item(dentry); ··· 1313 file->private_data = configfs_new_dirent(parent_sd, NULL); 1314 mutex_unlock(&dentry->d_inode->i_mutex); 1315 1316 - return file->private_data ? 0 : -ENOMEM; 1317 1318 } 1319 ··· 1323 struct configfs_dirent * cursor = file->private_data; 1324 1325 mutex_lock(&dentry->d_inode->i_mutex); 1326 list_del_init(&cursor->s_sibling); 1327 mutex_unlock(&dentry->d_inode->i_mutex); 1328 1329 release_configfs_dirent(cursor); ··· 1365 /* fallthrough */ 1366 default: 1367 if (filp->f_pos == 2) { 1368 list_move(q, &parent_sd->s_children); 1369 } 1370 for (p=q->next; p!= &parent_sd->s_children; p=p->next) { 1371 struct configfs_dirent *next; ··· 1390 dt_type(next)) < 0) 1391 return 0; 1392 1393 list_move(q, p); 1394 p = q; 1395 filp->f_pos++; 1396 } ··· 1423 struct list_head *p; 1424 loff_t n = file->f_pos - 2; 1425 1426 list_del(&cursor->s_sibling); 1427 p = sd->s_children.next; 1428 while (n && p != &sd->s_children) { ··· 1435 p = p->next; 1436 } 1437 list_add_tail(&cursor->s_sibling, p); 1438 } 1439 } 1440 mutex_unlock(&dentry->d_inode->i_mutex); ··· 1511 mutex_lock_nested(&configfs_sb->s_root->d_inode->i_mutex, 1512 I_MUTEX_PARENT); 1513 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); 1514 - if (configfs_detach_prep(dentry)) { 1515 printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n"); 1516 } 1517 configfs_detach_group(&group->cg_item); 1518 dentry->d_inode->i_flags |= S_DEAD; 1519 mutex_unlock(&dentry->d_inode->i_mutex);
··· 30 #include <linux/mount.h> 31 #include <linux/module.h> 32 #include <linux/slab.h> 33 + #include <linux/err.h> 34 35 #include <linux/configfs.h> 36 #include "configfs_internal.h" 37 38 DECLARE_RWSEM(configfs_rename_sem); 39 + /* 40 + * Protects mutations of configfs_dirent linkage together with proper i_mutex 41 + * Also protects mutations of symlinks linkage to target configfs_dirent 42 + * Mutators of configfs_dirent linkage must *both* have the proper inode locked 43 + * and configfs_dirent_lock locked, in that order. 44 + * This allows one to safely traverse configfs_dirent trees and symlinks without 45 + * having to lock inodes. 46 + * 47 + * Protects setting of CONFIGFS_USET_DROPPING: checking the flag 48 + * unlocked is not reliable unless in detach_groups() called from 49 + * rmdir()/unregister() and from configfs_attach_group() 50 + */ 51 + DEFINE_SPINLOCK(configfs_dirent_lock); 52 53 static void configfs_d_iput(struct dentry * dentry, 54 struct inode * inode) ··· 74 75 sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL); 76 if (!sd) 77 + return ERR_PTR(-ENOMEM); 78 79 atomic_set(&sd->s_count, 1); 80 INIT_LIST_HEAD(&sd->s_links); 81 INIT_LIST_HEAD(&sd->s_children); 82 sd->s_element = element; 83 + spin_lock(&configfs_dirent_lock); 84 + if (parent_sd->s_type & CONFIGFS_USET_DROPPING) { 85 + spin_unlock(&configfs_dirent_lock); 86 + kmem_cache_free(configfs_dir_cachep, sd); 87 + return ERR_PTR(-ENOENT); 88 + } 89 + list_add(&sd->s_sibling, &parent_sd->s_children); 90 + spin_unlock(&configfs_dirent_lock); 91 92 return sd; 93 } ··· 118 struct configfs_dirent * sd; 119 120 sd = configfs_new_dirent(parent_sd, element); 121 + if (IS_ERR(sd)) 122 + return PTR_ERR(sd); 123 124 sd->s_mode = mode; 125 sd->s_type = type; ··· 173 } else { 174 struct configfs_dirent *sd = d->d_fsdata; 175 if (sd) { 176 + spin_lock(&configfs_dirent_lock); 177 list_del_init(&sd->s_sibling); 178 + spin_unlock(&configfs_dirent_lock); 179 configfs_put(sd); 180 } 181 } ··· 224 else { 225 struct configfs_dirent *sd = dentry->d_fsdata; 226 if (sd) { 227 + spin_lock(&configfs_dirent_lock); 228 list_del_init(&sd->s_sibling); 229 + spin_unlock(&configfs_dirent_lock); 230 configfs_put(sd); 231 } 232 } ··· 238 struct configfs_dirent * sd; 239 240 sd = d->d_fsdata; 241 + spin_lock(&configfs_dirent_lock); 242 list_del_init(&sd->s_sibling); 243 + spin_unlock(&configfs_dirent_lock); 244 configfs_put(sd); 245 if (d->d_inode) 246 simple_rmdir(parent->d_inode,d); ··· 331 332 /* 333 * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are 334 + * attributes and are removed by rmdir(). We recurse, setting 335 + * CONFIGFS_USET_DROPPING on all children that are candidates for 336 + * default detach. 337 + * If there is an error, the caller will reset the flags via 338 + * configfs_detach_rollback(). 339 */ 340 + static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex) 341 { 342 struct configfs_dirent *parent_sd = dentry->d_fsdata; 343 struct configfs_dirent *sd; ··· 352 if (sd->s_type & CONFIGFS_NOT_PINNED) 353 continue; 354 if (sd->s_type & CONFIGFS_USET_DEFAULT) { 355 + /* Abort if racing with mkdir() */ 356 + if (sd->s_type & CONFIGFS_USET_IN_MKDIR) { 357 + if (wait_mutex) 358 + *wait_mutex = &sd->s_dentry->d_inode->i_mutex; 359 + return -EAGAIN; 360 + } 361 + /* Mark that we're trying to drop the group */ 362 sd->s_type |= CONFIGFS_USET_DROPPING; 363 364 /* 365 * Yup, recursive. If there's a problem, blame 366 * deep nesting of default_groups 367 */ 368 + ret = configfs_detach_prep(sd->s_dentry, wait_mutex); 369 if (!ret) 370 continue; 371 } else ··· 374 } 375 376 /* 377 + * Walk the tree, resetting CONFIGFS_USET_DROPPING wherever it was 378 * set. 379 */ 380 static void configfs_detach_rollback(struct dentry *dentry) ··· 385 list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { 386 if (sd->s_type & CONFIGFS_USET_DEFAULT) { 387 configfs_detach_rollback(sd->s_dentry); 388 + sd->s_type &= ~CONFIGFS_USET_DROPPING; 389 } 390 } 391 } ··· 410 list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) { 411 if (!sd->s_element || !(sd->s_type & CONFIGFS_NOT_PINNED)) 412 continue; 413 + spin_lock(&configfs_dirent_lock); 414 list_del_init(&sd->s_sibling); 415 + spin_unlock(&configfs_dirent_lock); 416 configfs_drop_dentry(sd, dentry); 417 configfs_put(sd); 418 } ··· 466 467 child = sd->s_dentry; 468 469 + mutex_lock(&child->d_inode->i_mutex); 470 + 471 configfs_detach_group(sd->s_element); 472 child->d_inode->i_flags |= S_DEAD; 473 474 + mutex_unlock(&child->d_inode->i_mutex); 475 476 d_delete(child); 477 dput(child); ··· 1047 group = NULL; 1048 item = NULL; 1049 if (type->ct_group_ops->make_group) { 1050 + ret = type->ct_group_ops->make_group(to_config_group(parent_item), name, &group); 1051 + if (!ret) { 1052 link_group(to_config_group(parent_item), group); 1053 item = &group->cg_item; 1054 } 1055 } else { 1056 + ret = type->ct_group_ops->make_item(to_config_group(parent_item), name, &item); 1057 + if (!ret) 1058 link_obj(parent_item, item); 1059 } 1060 mutex_unlock(&subsys->su_mutex); 1061 1062 kfree(name); 1063 + if (ret) { 1064 /* 1065 + * If ret != 0, then link_obj() was never called. 1066 * There are no extra references to clean up. 1067 */ 1068 goto out_put; 1069 } 1070 ··· 1093 */ 1094 module_got = 1; 1095 1096 + /* 1097 + * Make racing rmdir() fail if it did not tag parent with 1098 + * CONFIGFS_USET_DROPPING 1099 + * Note: if CONFIGFS_USET_DROPPING is already set, attach_group() will 1100 + * fail and let rmdir() terminate correctly 1101 + */ 1102 + spin_lock(&configfs_dirent_lock); 1103 + /* This will make configfs_detach_prep() fail */ 1104 + sd->s_type |= CONFIGFS_USET_IN_MKDIR; 1105 + spin_unlock(&configfs_dirent_lock); 1106 + 1107 if (group) 1108 ret = configfs_attach_group(parent_item, item, dentry); 1109 else 1110 ret = configfs_attach_item(parent_item, item, dentry); 1111 + 1112 + spin_lock(&configfs_dirent_lock); 1113 + sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; 1114 + spin_unlock(&configfs_dirent_lock); 1115 1116 out_unlink: 1117 if (ret) { ··· 1161 return -EINVAL; 1162 } 1163 1164 + spin_lock(&configfs_dirent_lock); 1165 + do { 1166 + struct mutex *wait_mutex; 1167 + 1168 + ret = configfs_detach_prep(dentry, &wait_mutex); 1169 + if (ret) { 1170 + configfs_detach_rollback(dentry); 1171 + spin_unlock(&configfs_dirent_lock); 1172 + if (ret != -EAGAIN) { 1173 + config_item_put(parent_item); 1174 + return ret; 1175 + } 1176 + 1177 + /* Wait until the racing operation terminates */ 1178 + mutex_lock(wait_mutex); 1179 + mutex_unlock(wait_mutex); 1180 + 1181 + spin_lock(&configfs_dirent_lock); 1182 + } 1183 + } while (ret == -EAGAIN); 1184 + spin_unlock(&configfs_dirent_lock); 1185 1186 /* Get a working ref for the duration of this function */ 1187 item = configfs_get_config_item(dentry); ··· 1258 file->private_data = configfs_new_dirent(parent_sd, NULL); 1259 mutex_unlock(&dentry->d_inode->i_mutex); 1260 1261 + return IS_ERR(file->private_data) ? PTR_ERR(file->private_data) : 0; 1262 1263 } 1264 ··· 1268 struct configfs_dirent * cursor = file->private_data; 1269 1270 mutex_lock(&dentry->d_inode->i_mutex); 1271 + spin_lock(&configfs_dirent_lock); 1272 list_del_init(&cursor->s_sibling); 1273 + spin_unlock(&configfs_dirent_lock); 1274 mutex_unlock(&dentry->d_inode->i_mutex); 1275 1276 release_configfs_dirent(cursor); ··· 1308 /* fallthrough */ 1309 default: 1310 if (filp->f_pos == 2) { 1311 + spin_lock(&configfs_dirent_lock); 1312 list_move(q, &parent_sd->s_children); 1313 + spin_unlock(&configfs_dirent_lock); 1314 } 1315 for (p=q->next; p!= &parent_sd->s_children; p=p->next) { 1316 struct configfs_dirent *next; ··· 1331 dt_type(next)) < 0) 1332 return 0; 1333 1334 + spin_lock(&configfs_dirent_lock); 1335 list_move(q, p); 1336 + spin_unlock(&configfs_dirent_lock); 1337 p = q; 1338 filp->f_pos++; 1339 } ··· 1362 struct list_head *p; 1363 loff_t n = file->f_pos - 2; 1364 1365 + spin_lock(&configfs_dirent_lock); 1366 list_del(&cursor->s_sibling); 1367 p = sd->s_children.next; 1368 while (n && p != &sd->s_children) { ··· 1373 p = p->next; 1374 } 1375 list_add_tail(&cursor->s_sibling, p); 1376 + spin_unlock(&configfs_dirent_lock); 1377 } 1378 } 1379 mutex_unlock(&dentry->d_inode->i_mutex); ··· 1448 mutex_lock_nested(&configfs_sb->s_root->d_inode->i_mutex, 1449 I_MUTEX_PARENT); 1450 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); 1451 + spin_lock(&configfs_dirent_lock); 1452 + if (configfs_detach_prep(dentry, NULL)) { 1453 printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n"); 1454 } 1455 + spin_unlock(&configfs_dirent_lock); 1456 configfs_detach_group(&group->cg_item); 1457 dentry->d_inode->i_flags |= S_DEAD; 1458 mutex_unlock(&dentry->d_inode->i_mutex);
+2
fs/configfs/inode.c
··· 247 if (!sd->s_element) 248 continue; 249 if (!strcmp(configfs_get_name(sd), name)) { 250 list_del_init(&sd->s_sibling); 251 configfs_drop_dentry(sd, dir); 252 configfs_put(sd); 253 break;
··· 247 if (!sd->s_element) 248 continue; 249 if (!strcmp(configfs_get_name(sd), name)) { 250 + spin_lock(&configfs_dirent_lock); 251 list_del_init(&sd->s_sibling); 252 + spin_unlock(&configfs_dirent_lock); 253 configfs_drop_dentry(sd, dir); 254 configfs_put(sd); 255 break;
+13 -3
fs/configfs/symlink.c
··· 77 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); 78 if (sl) { 79 sl->sl_target = config_item_get(item); 80 - /* FIXME: needs a lock, I'd bet */ 81 list_add(&sl->sl_list, &target_sd->s_links); 82 ret = configfs_create_link(sl, parent_item->ci_dentry, 83 dentry); 84 if (ret) { 85 list_del_init(&sl->sl_list); 86 config_item_put(item); 87 kfree(sl); 88 } ··· 140 goto out_put; 141 142 ret = type->ct_item_ops->allow_link(parent_item, target_item); 143 - if (!ret) 144 ret = create_link(parent_item, target_item, dentry); 145 146 config_item_put(target_item); 147 path_put(&nd.path); ··· 176 parent_item = configfs_get_config_item(dentry->d_parent); 177 type = parent_item->ci_type; 178 179 list_del_init(&sd->s_sibling); 180 configfs_drop_dentry(sd, dentry->d_parent); 181 dput(dentry); 182 configfs_put(sd); ··· 193 type->ct_item_ops->drop_link(parent_item, 194 sl->sl_target); 195 196 - /* FIXME: Needs lock */ 197 list_del_init(&sl->sl_list); 198 199 /* Put reference from create_link() */ 200 config_item_put(sl->sl_target);
··· 77 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); 78 if (sl) { 79 sl->sl_target = config_item_get(item); 80 + spin_lock(&configfs_dirent_lock); 81 list_add(&sl->sl_list, &target_sd->s_links); 82 + spin_unlock(&configfs_dirent_lock); 83 ret = configfs_create_link(sl, parent_item->ci_dentry, 84 dentry); 85 if (ret) { 86 + spin_lock(&configfs_dirent_lock); 87 list_del_init(&sl->sl_list); 88 + spin_unlock(&configfs_dirent_lock); 89 config_item_put(item); 90 kfree(sl); 91 } ··· 137 goto out_put; 138 139 ret = type->ct_item_ops->allow_link(parent_item, target_item); 140 + if (!ret) { 141 ret = create_link(parent_item, target_item, dentry); 142 + if (ret && type->ct_item_ops->drop_link) 143 + type->ct_item_ops->drop_link(parent_item, 144 + target_item); 145 + } 146 147 config_item_put(target_item); 148 path_put(&nd.path); ··· 169 parent_item = configfs_get_config_item(dentry->d_parent); 170 type = parent_item->ci_type; 171 172 + spin_lock(&configfs_dirent_lock); 173 list_del_init(&sd->s_sibling); 174 + spin_unlock(&configfs_dirent_lock); 175 configfs_drop_dentry(sd, dentry->d_parent); 176 dput(dentry); 177 configfs_put(sd); ··· 184 type->ct_item_ops->drop_link(parent_item, 185 sl->sl_target); 186 187 + spin_lock(&configfs_dirent_lock); 188 list_del_init(&sl->sl_list); 189 + spin_unlock(&configfs_dirent_lock); 190 191 /* Put reference from create_link() */ 192 config_item_put(sl->sl_target);
+28 -17
fs/dlm/config.c
··· 41 struct nodes; 42 struct node; 43 44 - static struct config_group *make_cluster(struct config_group *, const char *); 45 static void drop_cluster(struct config_group *, struct config_item *); 46 static void release_cluster(struct config_item *); 47 - static struct config_group *make_space(struct config_group *, const char *); 48 static void drop_space(struct config_group *, struct config_item *); 49 static void release_space(struct config_item *); 50 - static struct config_item *make_comm(struct config_group *, const char *); 51 static void drop_comm(struct config_group *, struct config_item *); 52 static void release_comm(struct config_item *); 53 - static struct config_item *make_node(struct config_group *, const char *); 54 static void drop_node(struct config_group *, struct config_item *); 55 static void release_node(struct config_item *); 56 ··· 396 return i ? container_of(i, struct node, item) : NULL; 397 } 398 399 - static struct config_group *make_cluster(struct config_group *g, 400 - const char *name) 401 { 402 struct cluster *cl = NULL; 403 struct spaces *sps = NULL; ··· 435 436 space_list = &sps->ss_group; 437 comm_list = &cms->cs_group; 438 - return &cl->group; 439 440 fail: 441 kfree(cl); 442 kfree(gps); 443 kfree(sps); 444 kfree(cms); 445 - return NULL; 446 } 447 448 static void drop_cluster(struct config_group *g, struct config_item *i) ··· 471 kfree(cl); 472 } 473 474 - static struct config_group *make_space(struct config_group *g, const char *name) 475 { 476 struct space *sp = NULL; 477 struct nodes *nds = NULL; ··· 495 INIT_LIST_HEAD(&sp->members); 496 mutex_init(&sp->members_lock); 497 sp->members_count = 0; 498 - return &sp->group; 499 500 fail: 501 kfree(sp); 502 kfree(gps); 503 kfree(nds); 504 - return NULL; 505 } 506 507 static void drop_space(struct config_group *g, struct config_item *i) ··· 529 kfree(sp); 530 } 531 532 - static struct config_item *make_comm(struct config_group *g, const char *name) 533 { 534 struct comm *cm; 535 536 cm = kzalloc(sizeof(struct comm), GFP_KERNEL); 537 if (!cm) 538 - return NULL; 539 540 config_item_init_type_name(&cm->item, name, &comm_type); 541 cm->nodeid = -1; 542 cm->local = 0; 543 cm->addr_count = 0; 544 - return &cm->item; 545 } 546 547 static void drop_comm(struct config_group *g, struct config_item *i) ··· 563 kfree(cm); 564 } 565 566 - static struct config_item *make_node(struct config_group *g, const char *name) 567 { 568 struct space *sp = to_space(g->cg_item.ci_parent); 569 struct node *nd; 570 571 nd = kzalloc(sizeof(struct node), GFP_KERNEL); 572 if (!nd) 573 - return NULL; 574 575 config_item_init_type_name(&nd->item, name, &node_type); 576 nd->nodeid = -1; ··· 583 sp->members_count++; 584 mutex_unlock(&sp->members_lock); 585 586 - return &nd->item; 587 } 588 589 static void drop_node(struct config_group *g, struct config_item *i)
··· 41 struct nodes; 42 struct node; 43 44 + static int make_cluster(struct config_group *, const char *, 45 + struct config_group **); 46 static void drop_cluster(struct config_group *, struct config_item *); 47 static void release_cluster(struct config_item *); 48 + static int make_space(struct config_group *, const char *, 49 + struct config_group **); 50 static void drop_space(struct config_group *, struct config_item *); 51 static void release_space(struct config_item *); 52 + static int make_comm(struct config_group *, const char *, 53 + struct config_item **); 54 static void drop_comm(struct config_group *, struct config_item *); 55 static void release_comm(struct config_item *); 56 + static int make_node(struct config_group *, const char *, 57 + struct config_item **); 58 static void drop_node(struct config_group *, struct config_item *); 59 static void release_node(struct config_item *); 60 ··· 392 return i ? container_of(i, struct node, item) : NULL; 393 } 394 395 + static int make_cluster(struct config_group *g, const char *name, 396 + struct config_group **new_g) 397 { 398 struct cluster *cl = NULL; 399 struct spaces *sps = NULL; ··· 431 432 space_list = &sps->ss_group; 433 comm_list = &cms->cs_group; 434 + *new_g = &cl->group; 435 + return 0; 436 437 fail: 438 kfree(cl); 439 kfree(gps); 440 kfree(sps); 441 kfree(cms); 442 + return -ENOMEM; 443 } 444 445 static void drop_cluster(struct config_group *g, struct config_item *i) ··· 466 kfree(cl); 467 } 468 469 + static int make_space(struct config_group *g, const char *name, 470 + struct config_group **new_g) 471 { 472 struct space *sp = NULL; 473 struct nodes *nds = NULL; ··· 489 INIT_LIST_HEAD(&sp->members); 490 mutex_init(&sp->members_lock); 491 sp->members_count = 0; 492 + *new_g = &sp->group; 493 + return 0; 494 495 fail: 496 kfree(sp); 497 kfree(gps); 498 kfree(nds); 499 + return -ENOMEM; 500 } 501 502 static void drop_space(struct config_group *g, struct config_item *i) ··· 522 kfree(sp); 523 } 524 525 + static int make_comm(struct config_group *g, const char *name, 526 + struct config_item **new_i) 527 { 528 struct comm *cm; 529 530 cm = kzalloc(sizeof(struct comm), GFP_KERNEL); 531 if (!cm) 532 + return -ENOMEM; 533 534 config_item_init_type_name(&cm->item, name, &comm_type); 535 cm->nodeid = -1; 536 cm->local = 0; 537 cm->addr_count = 0; 538 + *new_i = &cm->item; 539 + return 0; 540 } 541 542 static void drop_comm(struct config_group *g, struct config_item *i) ··· 554 kfree(cm); 555 } 556 557 + static int make_node(struct config_group *g, const char *name, 558 + struct config_item **new_i) 559 { 560 struct space *sp = to_space(g->cg_item.ci_parent); 561 struct node *nd; 562 563 nd = kzalloc(sizeof(struct node), GFP_KERNEL); 564 if (!nd) 565 + return -ENOMEM; 566 567 config_item_init_type_name(&nd->item, name, &node_type); 568 nd->nodeid = -1; ··· 573 sp->members_count++; 574 mutex_unlock(&sp->members_lock); 575 576 + *new_i = &nd->item; 577 + return 0; 578 } 579 580 static void drop_node(struct config_group *g, struct config_item *i)
+10 -3
fs/ocfs2/aops.c
··· 174 * need to use BH_New is when we're extending i_size on a file 175 * system which doesn't support holes, in which case BH_New 176 * allows block_prepare_write() to zero. 177 */ 178 - mlog_bug_on_msg(create && p_blkno == 0 && ocfs2_sparse_alloc(osb), 179 - "ino %lu, iblock %llu\n", inode->i_ino, 180 - (unsigned long long)iblock); 181 182 /* Treat the unwritten extent as a hole for zeroing purposes. */ 183 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
··· 174 * need to use BH_New is when we're extending i_size on a file 175 * system which doesn't support holes, in which case BH_New 176 * allows block_prepare_write() to zero. 177 + * 178 + * If we see this on a sparse file system, then a truncate has 179 + * raced us and removed the cluster. In this case, we clear 180 + * the buffers dirty and uptodate bits and let the buffer code 181 + * ignore it as a hole. 182 */ 183 + if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { 184 + clear_buffer_dirty(bh_result); 185 + clear_buffer_uptodate(bh_result); 186 + goto bail; 187 + } 188 189 /* Treat the unwritten extent as a hole for zeroing purposes. */ 190 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
+10 -7
fs/ocfs2/cluster/heartbeat.c
··· 1489 : NULL; 1490 } 1491 1492 - static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group, 1493 - const char *name) 1494 { 1495 struct o2hb_region *reg = NULL; 1496 - struct config_item *ret = NULL; 1497 1498 reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL); 1499 - if (reg == NULL) 1500 - goto out; /* ENOMEM */ 1501 1502 config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type); 1503 1504 - ret = &reg->hr_item; 1505 1506 spin_lock(&o2hb_live_lock); 1507 list_add_tail(&reg->hr_all_item, &o2hb_all_regions); 1508 spin_unlock(&o2hb_live_lock); 1509 out: 1510 - if (ret == NULL) 1511 kfree(reg); 1512 1513 return ret;
··· 1489 : NULL; 1490 } 1491 1492 + static int o2hb_heartbeat_group_make_item(struct config_group *group, 1493 + const char *name, 1494 + struct config_item **new_item) 1495 { 1496 struct o2hb_region *reg = NULL; 1497 + int ret = 0; 1498 1499 reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL); 1500 + if (reg == NULL) { 1501 + ret = -ENOMEM; 1502 + goto out; 1503 + } 1504 1505 config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type); 1506 1507 + *new_item = &reg->hr_item; 1508 1509 spin_lock(&o2hb_live_lock); 1510 list_add_tail(&reg->hr_all_item, &o2hb_all_regions); 1511 spin_unlock(&o2hb_live_lock); 1512 out: 1513 + if (ret) 1514 kfree(reg); 1515 1516 return ret;
+5 -3
fs/ocfs2/cluster/netdebug.c
··· 146 nst->st_task->comm, nst->st_node, 147 nst->st_sc, nst->st_id, nst->st_msg_type, 148 nst->st_msg_key, 149 - nst->st_sock_time.tv_sec, nst->st_sock_time.tv_usec, 150 - nst->st_send_time.tv_sec, nst->st_send_time.tv_usec, 151 nst->st_status_time.tv_sec, 152 nst->st_status_time.tv_usec); 153 } ··· 276 return sc; /* unused, just needs to be null when done */ 277 } 278 279 - #define TV_SEC_USEC(TV) TV.tv_sec, TV.tv_usec 280 281 static int sc_seq_show(struct seq_file *seq, void *v) 282 {
··· 146 nst->st_task->comm, nst->st_node, 147 nst->st_sc, nst->st_id, nst->st_msg_type, 148 nst->st_msg_key, 149 + nst->st_sock_time.tv_sec, 150 + (unsigned long)nst->st_sock_time.tv_usec, 151 + nst->st_send_time.tv_sec, 152 + (unsigned long)nst->st_send_time.tv_usec, 153 nst->st_status_time.tv_sec, 154 nst->st_status_time.tv_usec); 155 } ··· 274 return sc; /* unused, just needs to be null when done */ 275 } 276 277 + #define TV_SEC_USEC(TV) TV.tv_sec, (unsigned long)TV.tv_usec 278 279 static int sc_seq_show(struct seq_file *seq, void *v) 280 {
+28 -17
fs/ocfs2/cluster/nodemanager.c
··· 644 return ret; 645 } 646 647 - static struct config_item *o2nm_node_group_make_item(struct config_group *group, 648 - const char *name) 649 { 650 struct o2nm_node *node = NULL; 651 - struct config_item *ret = NULL; 652 653 - if (strlen(name) > O2NM_MAX_NAME_LEN) 654 - goto out; /* ENAMETOOLONG */ 655 656 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); 657 - if (node == NULL) 658 - goto out; /* ENOMEM */ 659 660 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ 661 config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); 662 spin_lock_init(&node->nd_lock); 663 664 - ret = &node->nd_item; 665 666 out: 667 - if (ret == NULL) 668 kfree(node); 669 670 return ret; ··· 756 } 757 #endif 758 759 - static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, 760 - const char *name) 761 { 762 struct o2nm_cluster *cluster = NULL; 763 struct o2nm_node_group *ns = NULL; 764 - struct config_group *o2hb_group = NULL, *ret = NULL; 765 void *defs = NULL; 766 767 /* this runs under the parent dir's i_mutex; there can be only 768 * one caller in here at a time */ 769 - if (o2nm_single_cluster) 770 - goto out; /* ENOSPC */ 771 772 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); 773 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); 774 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); 775 o2hb_group = o2hb_alloc_hb_set(); 776 - if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL) 777 goto out; 778 779 config_group_init_type_name(&cluster->cl_group, name, 780 &o2nm_cluster_type); ··· 797 cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; 798 cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; 799 800 - ret = &cluster->cl_group; 801 o2nm_single_cluster = cluster; 802 803 out: 804 - if (ret == NULL) { 805 kfree(cluster); 806 kfree(ns); 807 o2hb_free_hb_set(o2hb_group);
··· 644 return ret; 645 } 646 647 + static int o2nm_node_group_make_item(struct config_group *group, 648 + const char *name, 649 + struct config_item **new_item) 650 { 651 struct o2nm_node *node = NULL; 652 + int ret = 0; 653 654 + if (strlen(name) > O2NM_MAX_NAME_LEN) { 655 + ret = -ENAMETOOLONG; 656 + goto out; 657 + } 658 659 node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); 660 + if (node == NULL) { 661 + ret = -ENOMEM; 662 + goto out; 663 + } 664 665 strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ 666 config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); 667 spin_lock_init(&node->nd_lock); 668 669 + *new_item = &node->nd_item; 670 671 out: 672 + if (ret) 673 kfree(node); 674 675 return ret; ··· 751 } 752 #endif 753 754 + static int o2nm_cluster_group_make_group(struct config_group *group, 755 + const char *name, 756 + struct config_group **new_group) 757 { 758 struct o2nm_cluster *cluster = NULL; 759 struct o2nm_node_group *ns = NULL; 760 + struct config_group *o2hb_group = NULL; 761 void *defs = NULL; 762 + int ret = 0; 763 764 /* this runs under the parent dir's i_mutex; there can be only 765 * one caller in here at a time */ 766 + if (o2nm_single_cluster) { 767 + ret = -ENOSPC; 768 + goto out; 769 + } 770 771 cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); 772 ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); 773 defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); 774 o2hb_group = o2hb_alloc_hb_set(); 775 + if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL) { 776 + ret = -ENOMEM; 777 goto out; 778 + } 779 780 config_group_init_type_name(&cluster->cl_group, name, 781 &o2nm_cluster_type); ··· 786 cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; 787 cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; 788 789 + *new_group = &cluster->cl_group; 790 o2nm_single_cluster = cluster; 791 792 out: 793 + if (ret) { 794 kfree(cluster); 795 kfree(ns); 796 o2hb_free_hb_set(o2hb_group);
+121 -1
fs/ocfs2/dlmglue.c
··· 31 #include <linux/pagemap.h> 32 #include <linux/debugfs.h> 33 #include <linux/seq_file.h> 34 35 #define MLOG_MASK_PREFIX ML_DLM_GLUE 36 #include <cluster/masklog.h> ··· 60 struct completion mw_complete; 61 unsigned long mw_mask; 62 unsigned long mw_goal; 63 }; 64 65 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres); ··· 370 spin_unlock(&ocfs2_dlm_tracking_lock); 371 } 372 373 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb, 374 struct ocfs2_lock_res *res, 375 enum ocfs2_lock_type type, ··· 458 res->l_flags = OCFS2_LOCK_INITIALIZED; 459 460 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug); 461 } 462 463 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res) ··· 1123 { 1124 INIT_LIST_HEAD(&mw->mw_item); 1125 init_completion(&mw->mw_complete); 1126 } 1127 1128 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) ··· 1330 goto again; 1331 mlog_errno(ret); 1332 } 1333 1334 mlog_exit(ret); 1335 return ret; ··· 2060 le32_to_cpu(fe->i_flags)); 2061 2062 ocfs2_refresh_inode(inode, fe); 2063 } 2064 2065 status = 0; ··· 2345 2346 if (status < 0) 2347 mlog_errno(status); 2348 } 2349 bail: 2350 mlog_exit(status); ··· 2540 } 2541 2542 /* So that debugfs.ocfs2 can determine which format is being used */ 2543 - #define OCFS2_DLM_DEBUG_STR_VERSION 1 2544 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) 2545 { 2546 int i; ··· 2580 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2581 for(i = 0; i < DLM_LVB_LEN; i++) 2582 seq_printf(m, "0x%x\t", lvb[i]); 2583 2584 /* End the line */ 2585 seq_printf(m, "\n");
··· 31 #include <linux/pagemap.h> 32 #include <linux/debugfs.h> 33 #include <linux/seq_file.h> 34 + #include <linux/time.h> 35 36 #define MLOG_MASK_PREFIX ML_DLM_GLUE 37 #include <cluster/masklog.h> ··· 59 struct completion mw_complete; 60 unsigned long mw_mask; 61 unsigned long mw_goal; 62 + #ifdef CONFIG_OCFS2_FS_STATS 63 + unsigned long long mw_lock_start; 64 + #endif 65 }; 66 67 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres); ··· 366 spin_unlock(&ocfs2_dlm_tracking_lock); 367 } 368 369 + #ifdef CONFIG_OCFS2_FS_STATS 370 + static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) 371 + { 372 + res->l_lock_num_prmode = 0; 373 + res->l_lock_num_prmode_failed = 0; 374 + res->l_lock_total_prmode = 0; 375 + res->l_lock_max_prmode = 0; 376 + res->l_lock_num_exmode = 0; 377 + res->l_lock_num_exmode_failed = 0; 378 + res->l_lock_total_exmode = 0; 379 + res->l_lock_max_exmode = 0; 380 + res->l_lock_refresh = 0; 381 + } 382 + 383 + static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, 384 + struct ocfs2_mask_waiter *mw, int ret) 385 + { 386 + unsigned long long *num, *sum; 387 + unsigned int *max, *failed; 388 + struct timespec ts = current_kernel_time(); 389 + unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start; 390 + 391 + if (level == LKM_PRMODE) { 392 + num = &res->l_lock_num_prmode; 393 + sum = &res->l_lock_total_prmode; 394 + max = &res->l_lock_max_prmode; 395 + failed = &res->l_lock_num_prmode_failed; 396 + } else if (level == LKM_EXMODE) { 397 + num = &res->l_lock_num_exmode; 398 + sum = &res->l_lock_total_exmode; 399 + max = &res->l_lock_max_exmode; 400 + failed = &res->l_lock_num_exmode_failed; 401 + } else 402 + return; 403 + 404 + (*num)++; 405 + (*sum) += time; 406 + if (time > *max) 407 + *max = time; 408 + if (ret) 409 + (*failed)++; 410 + } 411 + 412 + static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) 413 + { 414 + lockres->l_lock_refresh++; 415 + } 416 + 417 + static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) 418 + { 419 + struct timespec ts = current_kernel_time(); 420 + mw->mw_lock_start = timespec_to_ns(&ts); 421 + } 422 + #else 423 + static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) 424 + { 425 + } 426 + static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, 427 + int level, struct ocfs2_mask_waiter *mw, int ret) 428 + { 429 + } 430 + static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) 431 + { 432 + } 433 + static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) 434 + { 435 + } 436 + #endif 437 + 438 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb, 439 struct ocfs2_lock_res *res, 440 enum ocfs2_lock_type type, ··· 385 res->l_flags = OCFS2_LOCK_INITIALIZED; 386 387 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug); 388 + 389 + ocfs2_init_lock_stats(res); 390 } 391 392 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res) ··· 1048 { 1049 INIT_LIST_HEAD(&mw->mw_item); 1050 init_completion(&mw->mw_complete); 1051 + ocfs2_init_start_time(mw); 1052 } 1053 1054 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) ··· 1254 goto again; 1255 mlog_errno(ret); 1256 } 1257 + ocfs2_update_lock_stats(lockres, level, &mw, ret); 1258 1259 mlog_exit(ret); 1260 return ret; ··· 1983 le32_to_cpu(fe->i_flags)); 1984 1985 ocfs2_refresh_inode(inode, fe); 1986 + ocfs2_track_lock_refresh(lockres); 1987 } 1988 1989 status = 0; ··· 2267 2268 if (status < 0) 2269 mlog_errno(status); 2270 + ocfs2_track_lock_refresh(lockres); 2271 } 2272 bail: 2273 mlog_exit(status); ··· 2461 } 2462 2463 /* So that debugfs.ocfs2 can determine which format is being used */ 2464 + #define OCFS2_DLM_DEBUG_STR_VERSION 2 2465 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) 2466 { 2467 int i; ··· 2501 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2502 for(i = 0; i < DLM_LVB_LEN; i++) 2503 seq_printf(m, "0x%x\t", lvb[i]); 2504 + 2505 + #ifdef CONFIG_OCFS2_FS_STATS 2506 + # define lock_num_prmode(_l) (_l)->l_lock_num_prmode 2507 + # define lock_num_exmode(_l) (_l)->l_lock_num_exmode 2508 + # define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed 2509 + # define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed 2510 + # define lock_total_prmode(_l) (_l)->l_lock_total_prmode 2511 + # define lock_total_exmode(_l) (_l)->l_lock_total_exmode 2512 + # define lock_max_prmode(_l) (_l)->l_lock_max_prmode 2513 + # define lock_max_exmode(_l) (_l)->l_lock_max_exmode 2514 + # define lock_refresh(_l) (_l)->l_lock_refresh 2515 + #else 2516 + # define lock_num_prmode(_l) (0ULL) 2517 + # define lock_num_exmode(_l) (0ULL) 2518 + # define lock_num_prmode_failed(_l) (0) 2519 + # define lock_num_exmode_failed(_l) (0) 2520 + # define lock_total_prmode(_l) (0ULL) 2521 + # define lock_total_exmode(_l) (0ULL) 2522 + # define lock_max_prmode(_l) (0) 2523 + # define lock_max_exmode(_l) (0) 2524 + # define lock_refresh(_l) (0) 2525 + #endif 2526 + /* The following seq_print was added in version 2 of this output */ 2527 + seq_printf(m, "%llu\t" 2528 + "%llu\t" 2529 + "%u\t" 2530 + "%u\t" 2531 + "%llu\t" 2532 + "%llu\t" 2533 + "%u\t" 2534 + "%u\t" 2535 + "%u\t", 2536 + lock_num_prmode(lockres), 2537 + lock_num_exmode(lockres), 2538 + lock_num_prmode_failed(lockres), 2539 + lock_num_exmode_failed(lockres), 2540 + lock_total_prmode(lockres), 2541 + lock_total_exmode(lockres), 2542 + lock_max_prmode(lockres), 2543 + lock_max_exmode(lockres), 2544 + lock_refresh(lockres)); 2545 2546 /* End the line */ 2547 seq_printf(m, "\n");
+1 -1
fs/ocfs2/file.c
··· 2202 2203 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); 2204 if (ret == -EINVAL) 2205 - mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n"); 2206 2207 /* buffered aio wouldn't have proper lock coverage today */ 2208 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
··· 2202 2203 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); 2204 if (ret == -EINVAL) 2205 + mlog(0, "generic_file_aio_read returned -EINVAL\n"); 2206 2207 /* buffered aio wouldn't have proper lock coverage today */ 2208 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
+1 -1
fs/ocfs2/journal.c
··· 329 330 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); 331 332 - #ifdef OCFS2_DEBUG_FS 333 status = 1; 334 #else 335 status = journal_extend(handle, nblocks);
··· 329 330 mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); 331 332 + #ifdef CONFIG_OCFS2_DEBUG_FS 333 status = 1; 334 #else 335 status = journal_extend(handle, nblocks);
+1 -1
fs/ocfs2/localalloc.c
··· 498 499 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 500 501 - #ifdef OCFS2_DEBUG_FS 502 if (le32_to_cpu(alloc->id1.bitmap1.i_used) != 503 ocfs2_local_alloc_count_bits(alloc)) { 504 ocfs2_error(osb->sb, "local alloc inode %llu says it has "
··· 498 499 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 500 501 + #ifdef CONFIG_OCFS2_DEBUG_FS 502 if (le32_to_cpu(alloc->id1.bitmap1.i_used) != 503 ocfs2_local_alloc_count_bits(alloc)) { 504 ocfs2_error(osb->sb, "local alloc inode %llu says it has "
+12
fs/ocfs2/ocfs2.h
··· 132 wait_queue_head_t l_event; 133 134 struct list_head l_debug_list; 135 }; 136 137 struct ocfs2_dlm_debug {
··· 132 wait_queue_head_t l_event; 133 134 struct list_head l_debug_list; 135 + 136 + #ifdef CONFIG_OCFS2_FS_STATS 137 + unsigned long long l_lock_num_prmode; /* PR acquires */ 138 + unsigned long long l_lock_num_exmode; /* EX acquires */ 139 + unsigned int l_lock_num_prmode_failed; /* Failed PR gets */ 140 + unsigned int l_lock_num_exmode_failed; /* Failed EX gets */ 141 + unsigned long long l_lock_total_prmode; /* Tot wait for PR */ 142 + unsigned long long l_lock_total_exmode; /* Tot wait for EX */ 143 + unsigned int l_lock_max_prmode; /* Max wait for PR */ 144 + unsigned int l_lock_max_exmode; /* Max wait for EX */ 145 + unsigned int l_lock_refresh; /* Disk refreshes */ 146 + #endif 147 }; 148 149 struct ocfs2_dlm_debug {
+1 -1
fs/ocfs2/ocfs2_fs.h
··· 901 * list has a copy per slot. 902 */ 903 if (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE) 904 - chars = snprintf(buf, len, 905 ocfs2_system_inodes[type].si_name); 906 else 907 chars = snprintf(buf, len,
··· 901 * list has a copy per slot. 902 */ 903 if (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE) 904 + chars = snprintf(buf, len, "%s", 905 ocfs2_system_inodes[type].si_name); 906 else 907 chars = snprintf(buf, len,
+5 -14
fs/ocfs2/stack_user.c
··· 550 size_t count, 551 loff_t *ppos) 552 { 553 - char *proto_string = OCFS2_CONTROL_PROTO; 554 - size_t to_write = 0; 555 556 - if (*ppos >= OCFS2_CONTROL_PROTO_LEN) 557 - return 0; 558 - 559 - to_write = OCFS2_CONTROL_PROTO_LEN - *ppos; 560 - if (to_write > count) 561 - to_write = count; 562 - if (copy_to_user(buf, proto_string + *ppos, to_write)) 563 - return -EFAULT; 564 - 565 - *ppos += to_write; 566 567 /* Have we read the whole protocol list? */ 568 - if (*ppos >= OCFS2_CONTROL_PROTO_LEN) 569 ocfs2_control_set_handshake_state(file, 570 OCFS2_CONTROL_HANDSHAKE_READ); 571 572 - return to_write; 573 } 574 575 static int ocfs2_control_release(struct inode *inode, struct file *file)
··· 550 size_t count, 551 loff_t *ppos) 552 { 553 + ssize_t ret; 554 555 + ret = simple_read_from_buffer(buf, count, ppos, 556 + OCFS2_CONTROL_PROTO, OCFS2_CONTROL_PROTO_LEN); 557 558 /* Have we read the whole protocol list? */ 559 + if (ret > 0 && *ppos >= OCFS2_CONTROL_PROTO_LEN) 560 ocfs2_control_set_handshake_state(file, 561 OCFS2_CONTROL_HANDSHAKE_READ); 562 563 + return ret; 564 } 565 566 static int ocfs2_control_release(struct inode *inode, struct file *file)
+5 -1
fs/ocfs2/super.c
··· 1703 local = ocfs2_mount_local(osb); 1704 1705 /* will play back anything left in the journal. */ 1706 - ocfs2_journal_load(osb->journal, local); 1707 1708 if (dirty) { 1709 /* recover my local alloc if we didn't unmount cleanly. */
··· 1703 local = ocfs2_mount_local(osb); 1704 1705 /* will play back anything left in the journal. */ 1706 + status = ocfs2_journal_load(osb->journal, local); 1707 + if (status < 0) { 1708 + mlog(ML_ERROR, "ocfs2 journal load failed! %d\n", status); 1709 + goto finally; 1710 + } 1711 1712 if (dirty) { 1713 /* recover my local alloc if we didn't unmount cleanly. */
+2 -2
include/linux/configfs.h
··· 165 }; 166 167 struct configfs_group_operations { 168 - struct config_item *(*make_item)(struct config_group *group, const char *name); 169 - struct config_group *(*make_group)(struct config_group *group, const char *name); 170 int (*commit_item)(struct config_item *item); 171 void (*disconnect_notify)(struct config_group *group, struct config_item *item); 172 void (*drop_item)(struct config_group *group, struct config_item *item);
··· 165 }; 166 167 struct configfs_group_operations { 168 + int (*make_item)(struct config_group *group, const char *name, struct config_item **new_item); 169 + int (*make_group)(struct config_group *group, const char *name, struct config_group **new_group); 170 int (*commit_item)(struct config_item *item); 171 void (*disconnect_notify)(struct config_group *group, struct config_item *item); 172 void (*drop_item)(struct config_group *group, struct config_item *item);