Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2:
ocfs2: remove redundant NULL checks in ocfs2_direct_IO_get_blocks()
ocfs2: clean up some osb fields
ocfs2: fix init of uuid_net_key
ocfs2: silence a debug print
ocfs2: silence ENOENT during lookup of broken links
ocfs2: Cleanup message prints
ocfs2: silence -EEXIST from ocfs2_extent_map_insert/lookup
[PATCH] fs/ocfs2/dlm/dlmrecovery.c: make dlm_lockres_master_requery() static
ocfs2: warn the user on a dead timeout mismatch
ocfs2: OCFS2_FS must depend on SYSFS
ocfs2: Compile-time disabling of ocfs2 debugging output.
configfs: Clear up a few extra spaces where there should be TABs.
configfs: Release memory in configfs_example.

+131 -97
+15 -4
Documentation/filesystems/configfs/configfs_example.c
··· 264 264 }; 265 265 266 266 267 + struct simple_children { 268 + struct config_group group; 269 + }; 270 + 271 + static inline struct simple_children *to_simple_children(struct config_item *item) 272 + { 273 + return item ? container_of(to_config_group(item), struct simple_children, group) : NULL; 274 + } 275 + 267 276 static struct config_item *simple_children_make_item(struct config_group *group, const char *name) 268 277 { 269 278 struct simple_child *simple_child; ··· 313 304 "items have only one attribute that is readable and writeable.\n"); 314 305 } 315 306 307 + static void simple_children_release(struct config_item *item) 308 + { 309 + kfree(to_simple_children(item)); 310 + } 311 + 316 312 static struct configfs_item_operations simple_children_item_ops = { 313 + .release = simple_children_release, 317 314 .show_attribute = simple_children_attr_show, 318 315 }; 319 316 ··· 359 344 * a new simple_children group. That group can then have simple_child 360 345 * children of its own. 361 346 */ 362 - 363 - struct simple_children { 364 - struct config_group group; 365 - }; 366 347 367 348 static struct config_group *group_children_make_group(struct config_group *group, const char *name) 368 349 {
+11 -1
fs/Kconfig
··· 326 326 327 327 config OCFS2_FS 328 328 tristate "OCFS2 file system support (EXPERIMENTAL)" 329 - depends on NET && EXPERIMENTAL 329 + depends on NET && SYSFS && EXPERIMENTAL 330 330 select CONFIGFS_FS 331 331 select JBD 332 332 select CRC32 ··· 355 355 - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease) 356 356 - POSIX ACLs 357 357 - readpages / writepages (not user visible) 358 + 359 + config OCFS2_DEBUG_MASKLOG 360 + bool "OCFS2 logging support" 361 + depends on OCFS2_FS 362 + default y 363 + help 364 + The ocfs2 filesystem has an extensive logging system. The system 365 + allows selection of events to log via files in /sys/o2cb/logmask/. 366 + This option will enlarge your kernel, but it allows debugging of 367 + ocfs2 filesystem issues. 358 368 359 369 config MINIX_FS 360 370 tristate "Minix fs support"
+3 -3
fs/configfs/dir.c
··· 211 211 struct configfs_dirent * sd; 212 212 213 213 sd = d->d_fsdata; 214 - list_del_init(&sd->s_sibling); 214 + list_del_init(&sd->s_sibling); 215 215 configfs_put(sd); 216 216 if (d->d_inode) 217 217 simple_rmdir(parent->d_inode,d); ··· 330 330 331 331 ret = configfs_detach_prep(sd->s_dentry); 332 332 if (!ret) 333 - continue; 333 + continue; 334 334 } else 335 335 ret = -ENOTEMPTY; 336 336 ··· 931 931 932 932 new_dentry = lookup_one_len(new_name, parent, strlen(new_name)); 933 933 if (!IS_ERR(new_dentry)) { 934 - if (!new_dentry->d_inode) { 934 + if (!new_dentry->d_inode) { 935 935 error = config_item_set_name(item, "%s", new_name); 936 936 if (!error) { 937 937 d_add(new_dentry, NULL);
+1 -1
fs/configfs/symlink.c
··· 66 66 } 67 67 68 68 static int create_link(struct config_item *parent_item, 69 - struct config_item *item, 69 + struct config_item *item, 70 70 struct dentry *dentry) 71 71 { 72 72 struct configfs_dirent *target_sd = item->ci_dentry->d_fsdata;
+1 -8
fs/ocfs2/aops.c
··· 558 558 u64 vbo_max; /* file offset, max_blocks from iblock */ 559 559 u64 p_blkno; 560 560 int contig_blocks; 561 - unsigned char blocksize_bits; 561 + unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 562 562 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; 563 - 564 - if (!inode || !bh_result) { 565 - mlog(ML_ERROR, "inode or bh_result is null\n"); 566 - return -EIO; 567 - } 568 - 569 - blocksize_bits = inode->i_sb->s_blocksize_bits; 570 563 571 564 /* This function won't even be called if the request isn't all 572 565 * nicely aligned and of the right size, so there's no need
+20
fs/ocfs2/cluster/heartbeat.c
··· 517 517 hb_block->hb_seq = cpu_to_le64(cputime); 518 518 hb_block->hb_node = node_num; 519 519 hb_block->hb_generation = cpu_to_le64(generation); 520 + hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS); 520 521 521 522 /* This step must always happen last! */ 522 523 hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, ··· 646 645 struct o2nm_node *node; 647 646 struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; 648 647 u64 cputime; 648 + unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; 649 + unsigned int slot_dead_ms; 649 650 650 651 memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); 651 652 ··· 736 733 &o2hb_live_slots[slot->ds_node_num]); 737 734 738 735 slot->ds_equal_samples = 0; 736 + 737 + /* We want to be sure that all nodes agree on the 738 + * number of milliseconds before a node will be 739 + * considered dead. The self-fencing timeout is 740 + * computed from this value, and a discrepancy might 741 + * result in heartbeat calling a node dead when it 742 + * hasn't self-fenced yet. */ 743 + slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms); 744 + if (slot_dead_ms && slot_dead_ms != dead_ms) { 745 + /* TODO: Perhaps we can fail the region here. */ 746 + mlog(ML_ERROR, "Node %d on device %s has a dead count " 747 + "of %u ms, but our count is %u ms.\n" 748 + "Please double check your configuration values " 749 + "for 'O2CB_HEARTBEAT_THRESHOLD'\n", 750 + slot->ds_node_num, reg->hr_dev_name, slot_dead_ms, 751 + dead_ms); 752 + } 739 753 goto out; 740 754 } 741 755
+21 -1
fs/ocfs2/cluster/masklog.h
··· 123 123 #define MLOG_MASK_PREFIX 0 124 124 #endif 125 125 126 + /* 127 + * When logging is disabled, force the bit test to 0 for anything other 128 + * than errors and notices, allowing gcc to remove the code completely. 129 + * When enabled, allow all masks. 130 + */ 131 + #if defined(CONFIG_OCFS2_DEBUG_MASKLOG) 132 + #define ML_ALLOWED_BITS ~0 133 + #else 134 + #define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE) 135 + #endif 136 + 126 137 #define MLOG_MAX_BITS 64 127 138 128 139 struct mlog_bits { ··· 198 187 199 188 #define mlog(mask, fmt, args...) do { \ 200 189 u64 __m = MLOG_MASK_PREFIX | (mask); \ 201 - if (__mlog_test_u64(__m, mlog_and_bits) && \ 190 + if ((__m & ML_ALLOWED_BITS) && \ 191 + __mlog_test_u64(__m, mlog_and_bits) && \ 202 192 !__mlog_test_u64(__m, mlog_not_bits)) { \ 203 193 if (__m & ML_ERROR) \ 204 194 __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \ ··· 216 204 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ 217 205 } while (0) 218 206 207 + #if defined(CONFIG_OCFS2_DEBUG_MASKLOG) 219 208 #define mlog_entry(fmt, args...) do { \ 220 209 mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \ 221 210 } while (0) ··· 260 247 #define mlog_exit_void() do { \ 261 248 mlog(ML_EXIT, "EXIT\n"); \ 262 249 } while (0) 250 + #else 251 + #define mlog_entry(...) do { } while (0) 252 + #define mlog_entry_void(...) do { } while (0) 253 + #define mlog_exit(...) do { } while (0) 254 + #define mlog_exit_ptr(...) do { } while (0) 255 + #define mlog_exit_void(...) do { } while (0) 256 + #endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */ 263 257 264 258 #define mlog_bug_on_msg(cond, fmt, args...) do { \ 265 259 if (cond) { \
+1
fs/ocfs2/cluster/ocfs2_heartbeat.h
··· 32 32 __u8 hb_pad1[3]; 33 33 __le32 hb_cksum; 34 34 __le64 hb_generation; 35 + __le32 hb_dead_ms; 35 36 }; 36 37 37 38 #endif /* _OCFS2_HEARTBEAT_H */
+7 -7
fs/ocfs2/cluster/tcp.c
··· 396 396 } 397 397 398 398 if (was_valid && !valid) { 399 - mlog(ML_NOTICE, "no longer connected to " SC_NODEF_FMT "\n", 400 - SC_NODEF_ARGS(old_sc)); 399 + printk(KERN_INFO "o2net: no longer connected to " 400 + SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); 401 401 o2net_complete_nodes_nsw(nn); 402 402 } 403 403 ··· 409 409 * the only way to start connecting again is to down 410 410 * heartbeat and bring it back up. */ 411 411 cancel_delayed_work(&nn->nn_connect_expired); 412 - mlog(ML_NOTICE, "%s " SC_NODEF_FMT "\n", 413 - o2nm_this_node() > sc->sc_node->nd_num ? 414 - "connected to" : "accepted connection from", 415 - SC_NODEF_ARGS(sc)); 412 + printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", 413 + o2nm_this_node() > sc->sc_node->nd_num ? 414 + "connected to" : "accepted connection from", 415 + SC_NODEF_ARGS(sc)); 416 416 } 417 417 418 418 /* trigger the connecting worker func as long as we're not valid, ··· 1280 1280 1281 1281 do_gettimeofday(&now); 1282 1282 1283 - mlog(ML_NOTICE, "connection to " SC_NODEF_FMT " has been idle for 10 " 1283 + printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for 10 " 1284 1284 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc)); 1285 1285 mlog(ML_NOTICE, "here are some times that might help debug the " 1286 1286 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
+2 -4
fs/ocfs2/dir.c
··· 213 213 struct ocfs2_dir_entry **dirent) 214 214 { 215 215 int status = -ENOENT; 216 - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 217 216 218 - mlog_entry("(osb=%p, parent=%llu, name='%.*s', blkno=%p, inode=%p)\n", 219 - osb, (unsigned long long)OCFS2_I(inode)->ip_blkno, 220 - namelen, name, blkno, inode); 217 + mlog_entry("(name=%.*s, blkno=%p, inode=%p, dirent_bh=%p, dirent=%p)\n", 218 + namelen, name, blkno, inode, dirent_bh, dirent); 221 219 222 220 *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent); 223 221 if (!*dirent_bh || !*dirent) {
-2
fs/ocfs2/dlm/dlmcommon.h
··· 822 822 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data); 823 823 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 824 824 u8 nodenum, u8 *real_master); 825 - int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 826 - struct dlm_lock_resource *res, u8 *real_master); 827 825 828 826 829 827 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+6 -3
fs/ocfs2/dlm/dlmdomain.c
··· 408 408 409 409 assert_spin_locked(&dlm->spinlock); 410 410 411 - mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name); 411 + printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name); 412 412 413 413 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 414 414 node + 1)) < O2NM_MAX_NODES) { 415 - mlog(ML_NOTICE, " node %d\n", node); 415 + printk("%d ", node); 416 416 } 417 + printk("\n"); 417 418 } 418 419 419 420 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data) ··· 430 429 431 430 node = exit_msg->node_idx; 432 431 433 - mlog(0, "Node %u leaves domain %s\n", node, dlm->name); 432 + printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name); 434 433 435 434 spin_lock(&dlm->spinlock); 436 435 clear_bit(node, dlm->domain_map); ··· 679 678 set_bit(assert->node_idx, dlm->domain_map); 680 679 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 681 680 681 + printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n", 682 + assert->node_idx, dlm->name); 682 683 __dlm_print_nodes(dlm); 683 684 684 685 /* notify anything attached to the heartbeat events */
+6 -2
fs/ocfs2/dlm/dlmrecovery.c
··· 95 95 static void dlm_request_all_locks_worker(struct dlm_work_item *item, 96 96 void *data); 97 97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 98 + static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 99 + struct dlm_lock_resource *res, 100 + u8 *real_master); 98 101 99 102 static u64 dlm_get_next_mig_cookie(void); 100 103 ··· 1487 1484 1488 1485 1489 1486 1490 - int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1491 - struct dlm_lock_resource *res, u8 *real_master) 1487 + static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1488 + struct dlm_lock_resource *res, 1489 + u8 *real_master) 1492 1490 { 1493 1491 struct dlm_node_iter iter; 1494 1492 int nodenum;
+1 -2
fs/ocfs2/dlmglue.c
··· 2071 2071 } 2072 2072 2073 2073 /* launch vote thread */ 2074 - osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote-%d", 2075 - osb->osb_id); 2074 + osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote"); 2076 2075 if (IS_ERR(osb->vote_task)) { 2077 2076 status = PTR_ERR(osb->vote_task); 2078 2077 osb->vote_task = NULL;
+22 -7
fs/ocfs2/extent_map.c
··· 298 298 299 299 ret = ocfs2_extent_map_insert(inode, rec, 300 300 le16_to_cpu(el->l_tree_depth)); 301 - if (ret) { 301 + if (ret && (ret != -EEXIST)) { 302 302 mlog_errno(ret); 303 303 goto out_free; 304 304 } ··· 427 427 /* 428 428 * Simple rule: on any return code other than -EAGAIN, anything left 429 429 * in the insert_context will be freed. 430 + * 431 + * Simple rule #2: A return code of -EEXIST from this function or 432 + * its calls to ocfs2_extent_map_insert_entry() signifies that another 433 + * thread beat us to the insert. It is not an actual error, but it 434 + * tells the caller we have no more work to do. 430 435 */ 431 436 static int ocfs2_extent_map_try_insert(struct inode *inode, 432 437 struct ocfs2_extent_rec *rec, ··· 453 448 goto out_unlock; 454 449 } 455 450 451 + /* Since insert_entry failed, the map MUST have old_ent */ 456 452 old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos), 457 - le32_to_cpu(rec->e_clusters), NULL, 458 - NULL); 453 + le32_to_cpu(rec->e_clusters), 454 + NULL, NULL); 459 455 460 456 BUG_ON(!old_ent); 461 457 462 - ret = -EEXIST; 463 - if (old_ent->e_tree_depth < tree_depth) 458 + if (old_ent->e_tree_depth < tree_depth) { 459 + /* Another thread beat us to the lower tree_depth */ 460 + ret = -EEXIST; 464 461 goto out_unlock; 462 + } 465 463 466 464 if (old_ent->e_tree_depth == tree_depth) { 465 + /* 466 + * Another thread beat us to this tree_depth. 467 + * Let's make sure we agree with that thread (the 468 + * extent_rec should be identical). 469 + */ 467 470 if (!memcmp(rec, &old_ent->e_rec, 468 471 sizeof(struct ocfs2_extent_rec))) 469 472 ret = 0; 473 + else 474 + /* FIXME: Should this be ESRCH/EBADR??? */ 475 + ret = -EEXIST; 470 476 471 - /* FIXME: Should this be ESRCH/EBADR??? */ 472 477 goto out_unlock; 473 478 } 474 479 ··· 614 599 tree_depth, &ctxt); 615 600 } while (ret == -EAGAIN); 616 601 617 - if (ret < 0) 602 + if ((ret < 0) && (ret != -EEXIST)) 618 603 mlog_errno(ret); 619 604 620 605 if (ctxt.left_ent)
+2 -3
fs/ocfs2/journal.c
··· 784 784 } 785 785 786 786 /* Launch the commit thread */ 787 - osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt-%d", 788 - osb->osb_id); 787 + osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt"); 789 788 if (IS_ERR(osb->commit_task)) { 790 789 status = PTR_ERR(osb->commit_task); 791 790 osb->commit_task = NULL; ··· 1117 1118 goto out; 1118 1119 1119 1120 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, 1120 - "ocfs2rec-%d", osb->osb_id); 1121 + "ocfs2rec"); 1121 1122 if (IS_ERR(osb->recovery_thread_task)) { 1122 1123 mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); 1123 1124 osb->recovery_thread_task = NULL;
+2 -2
fs/ocfs2/mmap.c
··· 46 46 unsigned long address, 47 47 int *type) 48 48 { 49 - struct inode *inode = area->vm_file->f_dentry->d_inode; 50 49 struct page *page = NOPAGE_SIGBUS; 51 50 sigset_t blocked, oldset; 52 51 int ret; 53 52 54 - mlog_entry("(inode %lu, address %lu)\n", inode->i_ino, address); 53 + mlog_entry("(area=%p, address=%lu, type=%p)\n", area, address, 54 + type); 55 55 56 56 /* The best way to deal with signals in this path is 57 57 * to block them upfront, rather than allowing the
-4
fs/ocfs2/ocfs2.h
··· 184 184 struct ocfs2_journal_handle; 185 185 struct ocfs2_super 186 186 { 187 - u32 osb_id; /* id used by the proc interface */ 188 187 struct task_struct *commit_task; 189 188 struct super_block *sb; 190 189 struct inode *root_inode; ··· 221 222 unsigned long s_mount_opt; 222 223 223 224 u16 max_slots; 224 - u16 num_nodes; 225 225 s16 node_num; 226 226 s16 slot_num; 227 227 int s_sectsize_bits; 228 228 int s_clustersize; 229 229 int s_clustersize_bits; 230 - struct proc_dir_entry *proc_sub_dir; /* points to /proc/fs/ocfs2/<maj_min> */ 231 230 232 231 atomic_t vol_state; 233 232 struct mutex recovery_lock; ··· 291 294 }; 292 295 293 296 #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) 294 - #define OCFS2_MAX_OSB_ID 65536 295 297 296 298 static inline int ocfs2_should_order_data(struct inode *inode) 297 299 {
+1 -1
fs/ocfs2/slot_map.c
··· 264 264 osb->slot_num = slot; 265 265 spin_unlock(&si->si_lock); 266 266 267 - mlog(ML_NOTICE, "taking node slot %d\n", osb->slot_num); 267 + mlog(0, "taking node slot %d\n", osb->slot_num); 268 268 269 269 status = ocfs2_update_disk_slots(osb, si); 270 270 if (status < 0)
+8 -41
fs/ocfs2/super.c
··· 68 68 69 69 #include "buffer_head_io.h" 70 70 71 - /* 72 - * Globals 73 - */ 74 - static spinlock_t ocfs2_globals_lock = SPIN_LOCK_UNLOCKED; 75 - 76 - static u32 osb_id; /* Keeps track of next available OSB Id */ 77 - 78 71 static kmem_cache_t *ocfs2_inode_cachep = NULL; 79 72 80 73 kmem_cache_t *ocfs2_lock_cache = NULL; ··· 635 642 636 643 ocfs2_complete_mount_recovery(osb); 637 644 638 - printk("ocfs2: Mounting device (%u,%u) on (node %d, slot %d) with %s " 639 - "data mode.\n", 640 - MAJOR(sb->s_dev), MINOR(sb->s_dev), osb->node_num, 641 - osb->slot_num, 645 + printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %d, slot %d) " 646 + "with %s data mode.\n", 647 + osb->dev_str, osb->node_num, osb->slot_num, 642 648 osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : 643 649 "ordered"); 644 650 ··· 791 799 status = -ENOMEM; 792 800 goto leave; 793 801 } 794 - 795 - spin_lock(&ocfs2_globals_lock); 796 - osb_id = 0; 797 - spin_unlock(&ocfs2_globals_lock); 798 802 799 803 ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); 800 804 if (!ocfs2_debugfs_root) { ··· 1008 1020 goto bail; 1009 1021 } 1010 1022 1011 - mlog(ML_NOTICE, "I am node %d\n", osb->node_num); 1023 + mlog(0, "I am node %d\n", osb->node_num); 1012 1024 1013 1025 status = 0; 1014 1026 bail: ··· 1179 1191 1180 1192 atomic_set(&osb->vol_state, VOLUME_DISMOUNTED); 1181 1193 1182 - printk("ocfs2: Unmounting device (%u,%u) on (node %d)\n", 1183 - MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev), osb->node_num); 1194 + printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %d)\n", 1195 + osb->dev_str, osb->node_num); 1184 1196 1185 1197 ocfs2_delete_osb(osb); 1186 1198 kfree(osb); ··· 1199 1211 osb->uuid_str = kcalloc(1, OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL); 1200 1212 if (osb->uuid_str == NULL) 1201 1213 return -ENOMEM; 1202 - 1203 - memcpy(osb->uuid, uuid, OCFS2_VOL_UUID_LEN); 1204 1214 1205 1215 for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) { 1206 1216 /* print with null */ ··· 1297 1311 goto bail; 1298 1312 } 1299 1313 1300 - osb->uuid = kmalloc(OCFS2_VOL_UUID_LEN, GFP_KERNEL); 1301 - if (!osb->uuid) { 1302 - mlog(ML_ERROR, "unable to alloc uuid\n"); 1303 - status = -ENOMEM; 1304 - goto bail; 1305 - } 1306 - 1307 1314 di = (struct ocfs2_dinode *)bh->b_data; 1308 1315 1309 1316 osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots); ··· 1306 1327 status = -EINVAL; 1307 1328 goto bail; 1308 1329 } 1309 - mlog(ML_NOTICE, "max_slots for this device: %u\n", osb->max_slots); 1330 + mlog(0, "max_slots for this device: %u\n", osb->max_slots); 1310 1331 1311 1332 init_waitqueue_head(&osb->osb_wipe_event); 1312 1333 osb->osb_orphan_wipes = kcalloc(osb->max_slots, ··· 1397 1418 goto bail; 1398 1419 } 1399 1420 1400 - memcpy(&uuid_net_key, &osb->uuid[i], sizeof(osb->net_key)); 1421 + memcpy(&uuid_net_key, di->id2.i_super.s_uuid, sizeof(uuid_net_key)); 1401 1422 osb->net_key = le32_to_cpu(uuid_net_key); 1402 1423 1403 1424 strncpy(osb->vol_label, di->id2.i_super.s_label, 63); ··· 1462 1483 mlog_errno(status); 1463 1484 goto bail; 1464 1485 } 1465 - 1466 - /* Link this osb onto the global linked list of all osb structures. */ 1467 - /* The Global Link List is mainted for the whole driver . */ 1468 - spin_lock(&ocfs2_globals_lock); 1469 - osb->osb_id = osb_id; 1470 - if (osb_id < OCFS2_MAX_OSB_ID) 1471 - osb_id++; 1472 - else { 1473 - mlog(ML_ERROR, "Too many volumes mounted\n"); 1474 - status = -ENOMEM; 1475 - } 1476 - spin_unlock(&ocfs2_globals_lock); 1477 1486 1478 1487 bail: 1479 1488 mlog_exit(status);
+1 -1
fs/ocfs2/symlink.c
··· 154 154 } 155 155 156 156 status = vfs_follow_link(nd, link); 157 - if (status) 157 + if (status && status != -ENOENT) 158 158 mlog_errno(status); 159 159 bail: 160 160 if (page) {