Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2: (39 commits)
Treat writes as new when holes span across page boundaries
fs,ocfs2: Move o2net_get_func_run_time under CONFIG_OCFS2_FS_STATS.
ocfs2/dlm: Move kmalloc() outside the spinlock
ocfs2: Make the left masklogs compat.
ocfs2: Remove masklog ML_AIO.
ocfs2: Remove masklog ML_UPTODATE.
ocfs2: Remove masklog ML_BH_IO.
ocfs2: Remove masklog ML_JOURNAL.
ocfs2: Remove masklog ML_EXPORT.
ocfs2: Remove masklog ML_DCACHE.
ocfs2: Remove masklog ML_NAMEI.
ocfs2: Remove mlog(0) from fs/ocfs2/dir.c
ocfs2: remove NAMEI from symlink.c
ocfs2: Remove masklog ML_QUOTA.
ocfs2: Remove mlog(0) from quota_local.c.
ocfs2: Remove masklog ML_RESERVATIONS.
ocfs2: Remove masklog ML_XATTR.
ocfs2: Remove masklog ML_SUPER.
ocfs2: Remove mlog(0) from fs/ocfs2/heartbeat.c
ocfs2: Remove mlog(0) from fs/ocfs2/slot_map.c
...

Fix up trivial conflict in fs/ocfs2/super.c

+3854 -1625
-1
fs/ocfs2/acl.c
··· 24 24 #include <linux/slab.h> 25 25 #include <linux/string.h> 26 26 27 - #define MLOG_MASK_PREFIX ML_INODE 28 27 #include <cluster/masklog.h> 29 28 30 29 #include "ocfs2.h"
+92 -122
fs/ocfs2/alloc.c
··· 30 30 #include <linux/swap.h> 31 31 #include <linux/quotaops.h> 32 32 33 - #define MLOG_MASK_PREFIX ML_DISK_ALLOC 34 33 #include <cluster/masklog.h> 35 34 36 35 #include "ocfs2.h" ··· 49 50 #include "uptodate.h" 50 51 #include "xattr.h" 51 52 #include "refcounttree.h" 53 + #include "ocfs2_trace.h" 52 54 53 55 #include "buffer_head_io.h" 54 56 ··· 886 886 struct ocfs2_extent_block *eb = 887 887 (struct ocfs2_extent_block *)bh->b_data; 888 888 889 - mlog(0, "Validating extent block %llu\n", 890 - (unsigned long long)bh->b_blocknr); 889 + trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr); 891 890 892 891 BUG_ON(!buffer_uptodate(bh)); 893 892 ··· 964 965 struct buffer_head *eb_bh = NULL; 965 966 u64 last_eb_blk = 0; 966 967 967 - mlog_entry_void(); 968 - 969 968 el = et->et_root_el; 970 969 last_eb_blk = ocfs2_et_get_last_eb_blk(et); 971 970 ··· 984 987 bail: 985 988 brelse(eb_bh); 986 989 987 - mlog_exit(retval); 990 + trace_ocfs2_num_free_extents(retval); 988 991 return retval; 989 992 } 990 993 ··· 1006 1009 struct ocfs2_super *osb = 1007 1010 OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); 1008 1011 struct ocfs2_extent_block *eb; 1009 - 1010 - mlog_entry_void(); 1011 1012 1012 1013 count = 0; 1013 1014 while (count < wanted) { ··· 1069 1074 brelse(bhs[i]); 1070 1075 bhs[i] = NULL; 1071 1076 } 1077 + mlog_errno(status); 1072 1078 } 1073 - mlog_exit(status); 1074 1079 return status; 1075 1080 } 1076 1081 ··· 1168 1173 struct ocfs2_extent_list *el; 1169 1174 u32 new_cpos, root_end; 1170 1175 1171 - mlog_entry_void(); 1172 - 1173 1176 BUG_ON(!last_eb_bh || !*last_eb_bh); 1174 1177 1175 1178 if (eb_bh) { ··· 1193 1200 * from new_cpos). 1194 1201 */ 1195 1202 if (root_end > new_cpos) { 1196 - mlog(0, "adjust the cluster end from %u to %u\n", 1197 - root_end, new_cpos); 1203 + trace_ocfs2_adjust_rightmost_branch( 1204 + (unsigned long long) 1205 + ocfs2_metadata_cache_owner(et->et_ci), 1206 + root_end, new_cpos); 1207 + 1198 1208 status = ocfs2_adjust_rightmost_branch(handle, et); 1199 1209 if (status) { 1200 1210 mlog_errno(status); ··· 1328 1332 kfree(new_eb_bhs); 1329 1333 } 1330 1334 1331 - mlog_exit(status); 1332 1335 return status; 1333 1336 } 1334 1337 ··· 1347 1352 struct ocfs2_extent_block *eb; 1348 1353 struct ocfs2_extent_list *root_el; 1349 1354 struct ocfs2_extent_list *eb_el; 1350 - 1351 - mlog_entry_void(); 1352 1355 1353 1356 status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac, 1354 1357 &new_eb_bh); ··· 1408 1415 bail: 1409 1416 brelse(new_eb_bh); 1410 1417 1411 - mlog_exit(status); 1412 1418 return status; 1413 1419 } 1414 1420 ··· 1437 1445 struct ocfs2_extent_list *el; 1438 1446 struct buffer_head *bh = NULL; 1439 1447 struct buffer_head *lowest_bh = NULL; 1440 - 1441 - mlog_entry_void(); 1442 1448 1443 1449 *target_bh = NULL; 1444 1450 ··· 1493 1503 bail: 1494 1504 brelse(bh); 1495 1505 1496 - mlog_exit(status); 1497 1506 return status; 1498 1507 } 1499 1508 ··· 1529 1540 * another tree level */ 1530 1541 if (shift) { 1531 1542 BUG_ON(bh); 1532 - mlog(0, "need to shift tree depth (current = %d)\n", depth); 1543 + trace_ocfs2_grow_tree( 1544 + (unsigned long long) 1545 + ocfs2_metadata_cache_owner(et->et_ci), 1546 + depth); 1533 1547 1534 1548 /* ocfs2_shift_tree_depth will return us a buffer with 1535 1549 * the new extent block (so we can pass that to ··· 1562 1570 1563 1571 /* call ocfs2_add_branch to add the final part of the tree with 1564 1572 * the new data. */ 1565 - mlog(0, "add branch. bh = %p\n", bh); 1566 1573 ret = ocfs2_add_branch(handle, et, bh, last_eb_bh, 1567 1574 meta_ac); 1568 1575 if (ret < 0) { ··· 1636 1645 } 1637 1646 insert_index = i; 1638 1647 1639 - mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n", 1640 - insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); 1648 + trace_ocfs2_rotate_leaf(insert_cpos, insert_index, 1649 + has_empty, next_free, 1650 + le16_to_cpu(el->l_count)); 1641 1651 1642 1652 BUG_ON(insert_index < 0); 1643 1653 BUG_ON(insert_index >= le16_to_cpu(el->l_count)); ··· 2051 2059 left_el = path_leaf_el(left_path); 2052 2060 right_el = path_leaf_el(right_path); 2053 2061 for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { 2054 - mlog(0, "Adjust records at index %u\n", i); 2062 + trace_ocfs2_complete_edge_insert(i); 2055 2063 2056 2064 /* 2057 2065 * One nice property of knowing that all of these ··· 2381 2389 goto out; 2382 2390 } 2383 2391 2384 - mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos); 2392 + trace_ocfs2_rotate_tree_right( 2393 + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), 2394 + insert_cpos, cpos); 2385 2395 2386 2396 /* 2387 2397 * What we want to do here is: ··· 2412 2418 * rotating subtrees. 2413 2419 */ 2414 2420 while (cpos && insert_cpos <= cpos) { 2415 - mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n", 2416 - insert_cpos, cpos); 2421 + trace_ocfs2_rotate_tree_right( 2422 + (unsigned long long) 2423 + ocfs2_metadata_cache_owner(et->et_ci), 2424 + insert_cpos, cpos); 2417 2425 2418 2426 ret = ocfs2_find_path(et->et_ci, left_path, cpos); 2419 2427 if (ret) { ··· 2457 2461 2458 2462 start = ocfs2_find_subtree_root(et, left_path, right_path); 2459 2463 2460 - mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", 2461 - start, 2462 - (unsigned long long) right_path->p_node[start].bh->b_blocknr, 2463 - right_path->p_tree_depth); 2464 + trace_ocfs2_rotate_subtree(start, 2465 + (unsigned long long) 2466 + right_path->p_node[start].bh->b_blocknr, 2467 + right_path->p_tree_depth); 2464 2468 2465 2469 ret = ocfs2_extend_rotate_transaction(handle, start, 2466 2470 orig_credits, right_path); ··· 2960 2964 subtree_root = ocfs2_find_subtree_root(et, left_path, 2961 2965 right_path); 2962 2966 2963 - mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", 2964 - subtree_root, 2967 + trace_ocfs2_rotate_subtree(subtree_root, 2965 2968 (unsigned long long) 2966 2969 right_path->p_node[subtree_root].bh->b_blocknr, 2967 2970 right_path->p_tree_depth); ··· 3984 3989 goto out; 3985 3990 } 3986 3991 3987 - mlog(0, "Append may need a left path update. cpos: %u, " 3988 - "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos), 3989 - left_cpos); 3992 + trace_ocfs2_append_rec_to_path( 3993 + (unsigned long long) 3994 + ocfs2_metadata_cache_owner(et->et_ci), 3995 + le32_to_cpu(insert_rec->e_cpos), 3996 + left_cpos); 3990 3997 3991 3998 /* 3992 3999 * No need to worry if the append is already in the ··· 4559 4562 ocfs2_et_get_last_eb_blk(et), 4560 4563 &bh); 4561 4564 if (ret) { 4562 - mlog_exit(ret); 4565 + mlog_errno(ret); 4563 4566 goto out; 4564 4567 } 4565 4568 eb = (struct ocfs2_extent_block *) bh->b_data; ··· 4675 4678 struct ocfs2_insert_type insert = {0, }; 4676 4679 struct ocfs2_extent_rec rec; 4677 4680 4678 - mlog(0, "add %u clusters at position %u to owner %llu\n", 4679 - new_clusters, cpos, 4680 - (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); 4681 + trace_ocfs2_insert_extent_start( 4682 + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), 4683 + cpos, new_clusters); 4681 4684 4682 4685 memset(&rec, 0, sizeof(rec)); 4683 4686 rec.e_cpos = cpu_to_le32(cpos); ··· 4697 4700 goto bail; 4698 4701 } 4699 4702 4700 - mlog(0, "Insert.appending: %u, Insert.Contig: %u, " 4701 - "Insert.contig_index: %d, Insert.free_records: %d, " 4702 - "Insert.tree_depth: %d\n", 4703 - insert.ins_appending, insert.ins_contig, insert.ins_contig_index, 4704 - free_records, insert.ins_tree_depth); 4703 + trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig, 4704 + insert.ins_contig_index, free_records, 4705 + insert.ins_tree_depth); 4705 4706 4706 4707 if (insert.ins_contig == CONTIG_NONE && free_records == 0) { 4707 4708 status = ocfs2_grow_tree(handle, et, ··· 4721 4726 bail: 4722 4727 brelse(last_eb_bh); 4723 4728 4724 - mlog_exit(status); 4725 4729 return status; 4726 4730 } 4727 4731 ··· 4740 4746 struct ocfs2_alloc_context *meta_ac, 4741 4747 enum ocfs2_alloc_restarted *reason_ret) 4742 4748 { 4743 - int status = 0; 4749 + int status = 0, err = 0; 4744 4750 int free_extents; 4745 4751 enum ocfs2_alloc_restarted reason = RESTART_NONE; 4746 4752 u32 bit_off, num_bits; ··· 4767 4773 * 2) we are so fragmented, we've needed to add metadata too 4768 4774 * many times. */ 4769 4775 if (!free_extents && !meta_ac) { 4770 - mlog(0, "we haven't reserved any metadata!\n"); 4776 + err = -1; 4771 4777 status = -EAGAIN; 4772 4778 reason = RESTART_META; 4773 4779 goto leave; 4774 4780 } else if ((!free_extents) 4775 4781 && (ocfs2_alloc_context_bits_left(meta_ac) 4776 4782 < ocfs2_extend_meta_needed(et->et_root_el))) { 4777 - mlog(0, "filesystem is really fragmented...\n"); 4783 + err = -2; 4778 4784 status = -EAGAIN; 4779 4785 reason = RESTART_META; 4780 4786 goto leave; ··· 4799 4805 } 4800 4806 4801 4807 block = ocfs2_clusters_to_blocks(osb->sb, bit_off); 4802 - mlog(0, "Allocating %u clusters at block %u for owner %llu\n", 4803 - num_bits, bit_off, 4804 - (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); 4808 + trace_ocfs2_add_clusters_in_btree( 4809 + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), 4810 + bit_off, num_bits); 4805 4811 status = ocfs2_insert_extent(handle, et, *logical_offset, block, 4806 4812 num_bits, flags, meta_ac); 4807 4813 if (status < 0) { ··· 4815 4821 *logical_offset += num_bits; 4816 4822 4817 4823 if (clusters_to_add) { 4818 - mlog(0, "need to alloc once more, wanted = %u\n", 4819 - clusters_to_add); 4824 + err = clusters_to_add; 4820 4825 status = -EAGAIN; 4821 4826 reason = RESTART_TRANS; 4822 4827 } 4823 4828 4824 4829 leave: 4825 - mlog_exit(status); 4826 4830 if (reason_ret) 4827 4831 *reason_ret = reason; 4832 + trace_ocfs2_add_clusters_in_btree_ret(status, reason, err); 4828 4833 return status; 4829 4834 } 4830 4835 ··· 5032 5039 ocfs2_et_get_last_eb_blk(et), 5033 5040 &last_eb_bh); 5034 5041 if (ret) { 5035 - mlog_exit(ret); 5042 + mlog_errno(ret); 5036 5043 goto out; 5037 5044 } 5038 5045 ··· 5049 5056 5050 5057 ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); 5051 5058 5052 - mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n", 5053 - split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, 5054 - ctxt.c_split_covers_rec); 5059 + trace_ocfs2_split_extent(split_index, ctxt.c_contig_type, 5060 + ctxt.c_has_empty_extent, 5061 + ctxt.c_split_covers_rec); 5055 5062 5056 5063 if (ctxt.c_contig_type == CONTIG_NONE) { 5057 5064 if (ctxt.c_split_covers_rec) ··· 5185 5192 { 5186 5193 int ret; 5187 5194 5188 - mlog(0, "Inode %lu cpos %u, len %u, phys clusters %u\n", 5189 - inode->i_ino, cpos, len, phys); 5195 + trace_ocfs2_mark_extent_written( 5196 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 5197 + cpos, len, phys); 5190 5198 5191 5199 if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { 5192 5200 ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " ··· 5506 5512 5507 5513 BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); 5508 5514 5509 - mlog(0, "Owner %llu, remove (cpos %u, len %u). Existing index %d " 5510 - "(cpos %u, len %u)\n", 5511 - (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), 5512 - cpos, len, index, 5513 - le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); 5515 + trace_ocfs2_remove_extent( 5516 + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), 5517 + cpos, len, index, le32_to_cpu(rec->e_cpos), 5518 + ocfs2_rec_clusters(el, rec)); 5514 5519 5515 5520 if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { 5516 5521 ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, ··· 5788 5795 struct ocfs2_dinode *di; 5789 5796 struct ocfs2_truncate_log *tl; 5790 5797 5791 - mlog_entry("start_blk = %llu, num_clusters = %u\n", 5792 - (unsigned long long)start_blk, num_clusters); 5793 - 5794 5798 BUG_ON(mutex_trylock(&tl_inode->i_mutex)); 5795 5799 5796 5800 start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); ··· 5824 5834 goto bail; 5825 5835 } 5826 5836 5827 - mlog(0, "Log truncate of %u clusters starting at cluster %u to " 5828 - "%llu (index = %d)\n", num_clusters, start_cluster, 5829 - (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index); 5830 - 5837 + trace_ocfs2_truncate_log_append( 5838 + (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index, 5839 + start_cluster, num_clusters); 5831 5840 if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { 5832 5841 /* 5833 5842 * Move index back to the record we are coalescing with. ··· 5835 5846 index--; 5836 5847 5837 5848 num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); 5838 - mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n", 5839 - index, le32_to_cpu(tl->tl_recs[index].t_start), 5840 - num_clusters); 5849 + trace_ocfs2_truncate_log_append( 5850 + (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, 5851 + index, le32_to_cpu(tl->tl_recs[index].t_start), 5852 + num_clusters); 5841 5853 } else { 5842 5854 tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); 5843 5855 tl->tl_used = cpu_to_le16(index + 1); ··· 5849 5859 5850 5860 osb->truncated_clusters += num_clusters; 5851 5861 bail: 5852 - mlog_exit(status); 5853 5862 return status; 5854 5863 } 5855 5864 ··· 5866 5877 struct ocfs2_truncate_log *tl; 5867 5878 struct inode *tl_inode = osb->osb_tl_inode; 5868 5879 struct buffer_head *tl_bh = osb->osb_tl_bh; 5869 - 5870 - mlog_entry_void(); 5871 5880 5872 5881 di = (struct ocfs2_dinode *) tl_bh->b_data; 5873 5882 tl = &di->id2.i_dealloc; ··· 5902 5915 /* if start_blk is not set, we ignore the record as 5903 5916 * invalid. */ 5904 5917 if (start_blk) { 5905 - mlog(0, "free record %d, start = %u, clusters = %u\n", 5906 - i, le32_to_cpu(rec.t_start), num_clusters); 5918 + trace_ocfs2_replay_truncate_records( 5919 + (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, 5920 + i, le32_to_cpu(rec.t_start), num_clusters); 5907 5921 5908 5922 status = ocfs2_free_clusters(handle, data_alloc_inode, 5909 5923 data_alloc_bh, start_blk, ··· 5920 5932 osb->truncated_clusters = 0; 5921 5933 5922 5934 bail: 5923 - mlog_exit(status); 5924 5935 return status; 5925 5936 } 5926 5937 ··· 5936 5949 struct ocfs2_dinode *di; 5937 5950 struct ocfs2_truncate_log *tl; 5938 5951 5939 - mlog_entry_void(); 5940 - 5941 5952 BUG_ON(mutex_trylock(&tl_inode->i_mutex)); 5942 5953 5943 5954 di = (struct ocfs2_dinode *) tl_bh->b_data; ··· 5947 5962 5948 5963 tl = &di->id2.i_dealloc; 5949 5964 num_to_flush = le16_to_cpu(tl->tl_used); 5950 - mlog(0, "Flush %u records from truncate log #%llu\n", 5951 - num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); 5965 + trace_ocfs2_flush_truncate_log( 5966 + (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, 5967 + num_to_flush); 5952 5968 if (!num_to_flush) { 5953 5969 status = 0; 5954 5970 goto out; ··· 5995 6009 iput(data_alloc_inode); 5996 6010 5997 6011 out: 5998 - mlog_exit(status); 5999 6012 return status; 6000 6013 } 6001 6014 ··· 6017 6032 container_of(work, struct ocfs2_super, 6018 6033 osb_truncate_log_wq.work); 6019 6034 6020 - mlog_entry_void(); 6021 - 6022 6035 status = ocfs2_flush_truncate_log(osb); 6023 6036 if (status < 0) 6024 6037 mlog_errno(status); 6025 6038 else 6026 6039 ocfs2_init_steal_slots(osb); 6027 - 6028 - mlog_exit(status); 6029 6040 } 6030 6041 6031 6042 #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) ··· 6067 6086 *tl_inode = inode; 6068 6087 *tl_bh = bh; 6069 6088 bail: 6070 - mlog_exit(status); 6071 6089 return status; 6072 6090 } 6073 6091 ··· 6086 6106 6087 6107 *tl_copy = NULL; 6088 6108 6089 - mlog(0, "recover truncate log from slot %d\n", slot_num); 6109 + trace_ocfs2_begin_truncate_log_recovery(slot_num); 6090 6110 6091 6111 status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); 6092 6112 if (status < 0) { ··· 6103 6123 6104 6124 tl = &di->id2.i_dealloc; 6105 6125 if (le16_to_cpu(tl->tl_used)) { 6106 - mlog(0, "We'll have %u logs to recover\n", 6107 - le16_to_cpu(tl->tl_used)); 6126 + trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used)); 6108 6127 6109 6128 *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); 6110 6129 if (!(*tl_copy)) { ··· 6136 6157 if (status < 0 && (*tl_copy)) { 6137 6158 kfree(*tl_copy); 6138 6159 *tl_copy = NULL; 6160 + mlog_errno(status); 6139 6161 } 6140 6162 6141 - mlog_exit(status); 6142 6163 return status; 6143 6164 } 6144 6165 ··· 6153 6174 struct inode *tl_inode = osb->osb_tl_inode; 6154 6175 struct ocfs2_truncate_log *tl; 6155 6176 6156 - mlog_entry_void(); 6157 - 6158 6177 if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { 6159 6178 mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); 6160 6179 return -EINVAL; ··· 6160 6183 6161 6184 tl = &tl_copy->id2.i_dealloc; 6162 6185 num_recs = le16_to_cpu(tl->tl_used); 6163 - mlog(0, "cleanup %u records from %llu\n", num_recs, 6164 - (unsigned long long)le64_to_cpu(tl_copy->i_blkno)); 6186 + trace_ocfs2_complete_truncate_log_recovery( 6187 + (unsigned long long)le64_to_cpu(tl_copy->i_blkno), 6188 + num_recs); 6165 6189 6166 6190 mutex_lock(&tl_inode->i_mutex); 6167 6191 for(i = 0; i < num_recs; i++) { ··· 6197 6219 bail_up: 6198 6220 mutex_unlock(&tl_inode->i_mutex); 6199 6221 6200 - mlog_exit(status); 6201 6222 return status; 6202 6223 } 6203 6224 ··· 6204 6227 { 6205 6228 int status; 6206 6229 struct inode *tl_inode = osb->osb_tl_inode; 6207 - 6208 - mlog_entry_void(); 6209 6230 6210 6231 if (tl_inode) { 6211 6232 cancel_delayed_work(&osb->osb_truncate_log_wq); ··· 6216 6241 brelse(osb->osb_tl_bh); 6217 6242 iput(osb->osb_tl_inode); 6218 6243 } 6219 - 6220 - mlog_exit_void(); 6221 6244 } 6222 6245 6223 6246 int ocfs2_truncate_log_init(struct ocfs2_super *osb) ··· 6223 6250 int status; 6224 6251 struct inode *tl_inode = NULL; 6225 6252 struct buffer_head *tl_bh = NULL; 6226 - 6227 - mlog_entry_void(); 6228 6253 6229 6254 status = ocfs2_get_truncate_log_info(osb, 6230 6255 osb->slot_num, ··· 6239 6268 osb->osb_tl_bh = tl_bh; 6240 6269 osb->osb_tl_inode = tl_inode; 6241 6270 6242 - mlog_exit(status); 6243 6271 return status; 6244 6272 } 6245 6273 ··· 6320 6350 else 6321 6351 bg_blkno = ocfs2_which_suballoc_group(head->free_blk, 6322 6352 head->free_bit); 6323 - mlog(0, "Free bit: (bit %u, blkno %llu)\n", 6324 - head->free_bit, (unsigned long long)head->free_blk); 6353 + trace_ocfs2_free_cached_blocks( 6354 + (unsigned long long)head->free_blk, head->free_bit); 6325 6355 6326 6356 ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, 6327 6357 head->free_bit, bg_blkno, 1); ··· 6374 6404 return ret; 6375 6405 } 6376 6406 6377 - mlog(0, "Insert clusters: (bit %u, blk %llu)\n", 6378 - bit, (unsigned long long)blkno); 6407 + trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit); 6379 6408 6380 6409 item->free_blk = blkno; 6381 6410 item->free_bit = bit; ··· 6449 6480 fl = ctxt->c_first_suballocator; 6450 6481 6451 6482 if (fl->f_first) { 6452 - mlog(0, "Free items: (type %u, slot %d)\n", 6453 - fl->f_inode_type, fl->f_slot); 6483 + trace_ocfs2_run_deallocs(fl->f_inode_type, 6484 + fl->f_slot); 6454 6485 ret2 = ocfs2_free_cached_blocks(osb, 6455 6486 fl->f_inode_type, 6456 6487 fl->f_slot, ··· 6527 6558 goto out; 6528 6559 } 6529 6560 6530 - mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n", 6531 - type, slot, bit, (unsigned long long)blkno); 6561 + trace_ocfs2_cache_block_dealloc(type, slot, 6562 + (unsigned long long)suballoc, 6563 + (unsigned long long)blkno, bit); 6532 6564 6533 6565 item->free_bg = suballoc; 6534 6566 item->free_blk = blkno; ··· 6975 7005 struct ocfs2_extent_tree et; 6976 7006 struct ocfs2_cached_dealloc_ctxt dealloc; 6977 7007 6978 - mlog_entry_void(); 6979 - 6980 7008 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); 6981 7009 ocfs2_init_dealloc_ctxt(&dealloc); 6982 7010 ··· 7009 7041 goto bail; 7010 7042 } 7011 7043 7012 - mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n", 7013 - OCFS2_I(inode)->ip_clusters, path->p_tree_depth); 7044 + trace_ocfs2_commit_truncate( 7045 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 7046 + new_highest_cpos, 7047 + OCFS2_I(inode)->ip_clusters, 7048 + path->p_tree_depth); 7014 7049 7015 7050 /* 7016 7051 * By now, el will point to the extent list on the bottom most ··· 7107 7136 7108 7137 ocfs2_free_path(path); 7109 7138 7110 - mlog_exit(status); 7111 7139 return status; 7112 7140 } 7113 7141
+39 -43
fs/ocfs2/aops.c
··· 29 29 #include <linux/mpage.h> 30 30 #include <linux/quotaops.h> 31 31 32 - #define MLOG_MASK_PREFIX ML_FILE_IO 33 32 #include <cluster/masklog.h> 34 33 35 34 #include "ocfs2.h" ··· 44 45 #include "super.h" 45 46 #include "symlink.h" 46 47 #include "refcounttree.h" 48 + #include "ocfs2_trace.h" 47 49 48 50 #include "buffer_head_io.h" 49 51 ··· 59 59 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 60 60 void *kaddr; 61 61 62 - mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 63 - (unsigned long long)iblock, bh_result, create); 62 + trace_ocfs2_symlink_get_block( 63 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 64 + (unsigned long long)iblock, bh_result, create); 64 65 65 66 BUG_ON(ocfs2_inode_is_fast_symlink(inode)); 66 67 ··· 124 123 bail: 125 124 brelse(bh); 126 125 127 - mlog_exit(err); 128 126 return err; 129 127 } 130 128 ··· 136 136 u64 p_blkno, count, past_eof; 137 137 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 138 138 139 - mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 140 - (unsigned long long)iblock, bh_result, create); 139 + trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, 140 + (unsigned long long)iblock, bh_result, create); 141 141 142 142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) 143 143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", ··· 199 199 } 200 200 201 201 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 202 - mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, 203 - (unsigned long long)past_eof); 202 + 203 + trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, 204 + (unsigned long long)past_eof); 204 205 if (create && (iblock >= past_eof)) 205 206 set_buffer_new(bh_result); 206 207 ··· 209 208 if (err < 0) 210 209 err = -EIO; 211 210 212 - mlog_exit(err); 213 211 return err; 214 212 } 215 213 ··· 278 278 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; 279 279 int ret, unlock = 1; 280 280 281 - mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); 281 + trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, 282 + (page ? page->index : 0)); 282 283 283 284 ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); 284 285 if (ret != 0) { ··· 324 323 out: 325 324 if (unlock) 326 325 unlock_page(page); 327 - mlog_exit(ret); 328 326 return ret; 329 327 } 330 328 ··· 396 396 */ 397 397 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) 398 398 { 399 - int ret; 399 + trace_ocfs2_writepage( 400 + (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, 401 + page->index); 400 402 401 - mlog_entry("(0x%p)\n", page); 402 - 403 - ret = block_write_full_page(page, ocfs2_get_block, wbc); 404 - 405 - mlog_exit(ret); 406 - 407 - return ret; 403 + return block_write_full_page(page, ocfs2_get_block, wbc); 408 404 } 409 405 410 406 /* Taken from ext3. We don't necessarily need the full blown ··· 446 450 int err = 0; 447 451 struct inode *inode = mapping->host; 448 452 449 - mlog_entry("(block = %llu)\n", (unsigned long long)block); 453 + trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, 454 + (unsigned long long)block); 450 455 451 456 /* We don't need to lock journal system files, since they aren't 452 457 * accessed concurrently from multiple nodes. ··· 480 483 481 484 bail: 482 485 status = err ? 0 : p_blkno; 483 - 484 - mlog_exit((int)status); 485 486 486 487 return status; 487 488 } ··· 611 616 { 612 617 struct file *file = iocb->ki_filp; 613 618 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; 614 - int ret; 615 - 616 - mlog_entry_void(); 617 619 618 620 /* 619 621 * Fallback to buffered I/O if we see an inode without ··· 623 631 if (i_size_read(inode) <= offset) 624 632 return 0; 625 633 626 - ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, 627 - iov, offset, nr_segs, 628 - ocfs2_direct_IO_get_blocks, 629 - ocfs2_dio_end_io, NULL, 0); 630 - 631 - mlog_exit(ret); 632 - return ret; 634 + return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, 635 + iov, offset, nr_segs, 636 + ocfs2_direct_IO_get_blocks, 637 + ocfs2_dio_end_io, NULL, 0); 633 638 } 634 639 635 640 static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, ··· 1014 1025 1015 1026 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, 1016 1027 &cluster_start, &cluster_end); 1028 + 1029 + /* treat the write as new if the a hole/lseek spanned across 1030 + * the page boundary. 1031 + */ 1032 + new = new | ((i_size_read(inode) <= page_offset(page)) && 1033 + (page_offset(page) <= user_pos)); 1017 1034 1018 1035 if (page == wc->w_target_page) { 1019 1036 map_from = user_pos & (PAGE_CACHE_SIZE - 1); ··· 1529 1534 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1530 1535 struct ocfs2_dinode *di = NULL; 1531 1536 1532 - mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n", 1533 - (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, 1534 - oi->ip_dyn_features); 1537 + trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, 1538 + len, (unsigned long long)pos, 1539 + oi->ip_dyn_features); 1535 1540 1536 1541 /* 1537 1542 * Handle inodes which already have inline data 1st. ··· 1734 1739 1735 1740 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1736 1741 1742 + trace_ocfs2_write_begin_nolock( 1743 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 1744 + (long long)i_size_read(inode), 1745 + le32_to_cpu(di->i_clusters), 1746 + pos, len, flags, mmap_page, 1747 + clusters_to_alloc, extents_to_split); 1748 + 1737 1749 /* 1738 1750 * We set w_target_from, w_target_to here so that 1739 1751 * ocfs2_write_end() knows which range in the target page to ··· 1753 1751 * ocfs2_lock_allocators(). It greatly over-estimates 1754 1752 * the work to be done. 1755 1753 */ 1756 - mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u," 1757 - " clusters_to_add = %u, extents_to_split = %u\n", 1758 - (unsigned long long)OCFS2_I(inode)->ip_blkno, 1759 - (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), 1760 - clusters_to_alloc, extents_to_split); 1761 - 1762 1754 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), 1763 1755 wc->w_di_bh); 1764 1756 ret = ocfs2_lock_allocators(inode, &et, ··· 1934 1938 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); 1935 1939 kunmap_atomic(kaddr, KM_USER0); 1936 1940 1937 - mlog(0, "Data written to inode at offset %llu. " 1938 - "id_count = %u, copied = %u, i_dyn_features = 0x%x\n", 1941 + trace_ocfs2_write_end_inline( 1942 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 1939 1943 (unsigned long long)pos, *copied, 1940 1944 le16_to_cpu(di->id2.i_data.id_count), 1941 1945 le16_to_cpu(di->i_dyn_features));
+18 -31
fs/ocfs2/buffer_head_io.c
··· 35 35 #include "inode.h" 36 36 #include "journal.h" 37 37 #include "uptodate.h" 38 - 39 38 #include "buffer_head_io.h" 39 + #include "ocfs2_trace.h" 40 40 41 41 /* 42 42 * Bits on bh->b_state used by ocfs2. ··· 55 55 { 56 56 int ret = 0; 57 57 58 - mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n", 59 - (unsigned long long)bh->b_blocknr, ci); 58 + trace_ocfs2_write_block((unsigned long long)bh->b_blocknr, ci); 60 59 61 60 BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); 62 61 BUG_ON(buffer_jbd(bh)); ··· 65 66 * can get modified during recovery even if read-only. */ 66 67 if (ocfs2_is_hard_readonly(osb)) { 67 68 ret = -EROFS; 69 + mlog_errno(ret); 68 70 goto out; 69 71 } 70 72 ··· 91 91 * uptodate. */ 92 92 ret = -EIO; 93 93 put_bh(bh); 94 + mlog_errno(ret); 94 95 } 95 96 96 97 ocfs2_metadata_cache_io_unlock(ci); 97 98 out: 98 - mlog_exit(ret); 99 99 return ret; 100 100 } 101 101 ··· 106 106 unsigned int i; 107 107 struct buffer_head *bh; 108 108 109 - if (!nr) { 110 - mlog(ML_BH_IO, "No buffers will be read!\n"); 109 + trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); 110 + 111 + if (!nr) 111 112 goto bail; 112 - } 113 113 114 114 for (i = 0 ; i < nr ; i++) { 115 115 if (bhs[i] == NULL) { ··· 123 123 bh = bhs[i]; 124 124 125 125 if (buffer_jbd(bh)) { 126 - mlog(ML_BH_IO, 127 - "trying to sync read a jbd " 128 - "managed bh (blocknr = %llu), skipping\n", 129 - (unsigned long long)bh->b_blocknr); 126 + trace_ocfs2_read_blocks_sync_jbd( 127 + (unsigned long long)bh->b_blocknr); 130 128 continue; 131 129 } 132 130 ··· 184 186 struct buffer_head *bh; 185 187 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 186 188 187 - mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n", 188 - ci, (unsigned long long)block, nr, flags); 189 + trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); 189 190 190 191 BUG_ON(!ci); 191 192 BUG_ON((flags & OCFS2_BH_READAHEAD) && ··· 204 207 } 205 208 206 209 if (nr == 0) { 207 - mlog(ML_BH_IO, "No buffers will be read!\n"); 208 210 status = 0; 209 211 goto bail; 210 212 } ··· 247 251 */ 248 252 249 253 if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) { 250 - mlog(ML_UPTODATE, 251 - "bh (%llu), owner %llu not uptodate\n", 254 + trace_ocfs2_read_blocks_from_disk( 252 255 (unsigned long long)bh->b_blocknr, 253 256 (unsigned long long)ocfs2_metadata_cache_owner(ci)); 254 257 /* We're using ignore_cache here to say ··· 255 260 ignore_cache = 1; 256 261 } 257 262 263 + trace_ocfs2_read_blocks_bh((unsigned long long)bh->b_blocknr, 264 + ignore_cache, buffer_jbd(bh), buffer_dirty(bh)); 265 + 258 266 if (buffer_jbd(bh)) { 259 - if (ignore_cache) 260 - mlog(ML_BH_IO, "trying to sync read a jbd " 261 - "managed bh (blocknr = %llu)\n", 262 - (unsigned long long)bh->b_blocknr); 263 267 continue; 264 268 } 265 269 ··· 266 272 if (buffer_dirty(bh)) { 267 273 /* This should probably be a BUG, or 268 274 * at least return an error. */ 269 - mlog(ML_BH_IO, "asking me to sync read a dirty " 270 - "buffer! (blocknr = %llu)\n", 271 - (unsigned long long)bh->b_blocknr); 272 275 continue; 273 276 } 274 277 ··· 358 367 } 359 368 ocfs2_metadata_cache_io_unlock(ci); 360 369 361 - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 362 - (unsigned long long)block, nr, 363 - ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", 364 - flags); 370 + trace_ocfs2_read_blocks_end((unsigned long long)block, nr, 371 + flags, ignore_cache); 365 372 366 373 bail: 367 374 368 - mlog_exit(status); 369 375 return status; 370 376 } 371 377 ··· 396 408 int ret = 0; 397 409 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; 398 410 399 - mlog_entry_void(); 400 - 401 411 BUG_ON(buffer_jbd(bh)); 402 412 ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); 403 413 404 414 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { 405 415 ret = -EROFS; 416 + mlog_errno(ret); 406 417 goto out; 407 418 } 408 419 ··· 421 434 if (!buffer_uptodate(bh)) { 422 435 ret = -EIO; 423 436 put_bh(bh); 437 + mlog_errno(ret); 424 438 } 425 439 426 440 out: 427 - mlog_exit(ret); 428 441 return ret; 429 442 }
-3
fs/ocfs2/cluster/heartbeat.c
··· 1654 1654 struct o2hb_disk_slot *slot; 1655 1655 struct o2hb_disk_heartbeat_block *hb_block; 1656 1656 1657 - mlog_entry_void(); 1658 - 1659 1657 ret = o2hb_read_slots(reg, reg->hr_blocks); 1660 1658 if (ret) { 1661 1659 mlog_errno(ret); ··· 1675 1677 } 1676 1678 1677 1679 out: 1678 - mlog_exit(ret); 1679 1680 return ret; 1680 1681 } 1681 1682
+1 -19
fs/ocfs2/cluster/masklog.c
··· 30 30 31 31 struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); 32 32 EXPORT_SYMBOL_GPL(mlog_and_bits); 33 - struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(MLOG_INITIAL_NOT_MASK); 33 + struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(0); 34 34 EXPORT_SYMBOL_GPL(mlog_not_bits); 35 35 36 36 static ssize_t mlog_mask_show(u64 mask, char *buf) ··· 80 80 } 81 81 82 82 static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { 83 - define_mask(ENTRY), 84 - define_mask(EXIT), 85 83 define_mask(TCP), 86 84 define_mask(MSG), 87 85 define_mask(SOCKET), ··· 91 93 define_mask(DLM_THREAD), 92 94 define_mask(DLM_MASTER), 93 95 define_mask(DLM_RECOVERY), 94 - define_mask(AIO), 95 - define_mask(JOURNAL), 96 - define_mask(DISK_ALLOC), 97 - define_mask(SUPER), 98 - define_mask(FILE_IO), 99 - define_mask(EXTENT_MAP), 100 96 define_mask(DLM_GLUE), 101 - define_mask(BH_IO), 102 - define_mask(UPTODATE), 103 - define_mask(NAMEI), 104 - define_mask(INODE), 105 97 define_mask(VOTE), 106 - define_mask(DCACHE), 107 98 define_mask(CONN), 108 99 define_mask(QUORUM), 109 - define_mask(EXPORT), 110 - define_mask(XATTR), 111 - define_mask(QUOTA), 112 - define_mask(REFCOUNT), 113 100 define_mask(BASTS), 114 - define_mask(RESERVATIONS), 115 101 define_mask(CLUSTER), 116 102 define_mask(ERROR), 117 103 define_mask(NOTICE),
+17 -88
fs/ocfs2/cluster/masklog.h
··· 82 82 83 83 /* bits that are frequently given and infrequently matched in the low word */ 84 84 /* NOTE: If you add a flag, you need to also update masklog.c! */ 85 - #define ML_ENTRY 0x0000000000000001ULL /* func call entry */ 86 - #define ML_EXIT 0x0000000000000002ULL /* func call exit */ 87 - #define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */ 88 - #define ML_MSG 0x0000000000000008ULL /* net network messages */ 89 - #define ML_SOCKET 0x0000000000000010ULL /* net socket lifetime */ 90 - #define ML_HEARTBEAT 0x0000000000000020ULL /* hb all heartbeat tracking */ 91 - #define ML_HB_BIO 0x0000000000000040ULL /* hb io tracing */ 92 - #define ML_DLMFS 0x0000000000000080ULL /* dlm user dlmfs */ 93 - #define ML_DLM 0x0000000000000100ULL /* dlm general debugging */ 94 - #define ML_DLM_DOMAIN 0x0000000000000200ULL /* dlm domain debugging */ 95 - #define ML_DLM_THREAD 0x0000000000000400ULL /* dlm domain thread */ 96 - #define ML_DLM_MASTER 0x0000000000000800ULL /* dlm master functions */ 97 - #define ML_DLM_RECOVERY 0x0000000000001000ULL /* dlm master functions */ 98 - #define ML_AIO 0x0000000000002000ULL /* ocfs2 aio read and write */ 99 - #define ML_JOURNAL 0x0000000000004000ULL /* ocfs2 journalling functions */ 100 - #define ML_DISK_ALLOC 0x0000000000008000ULL /* ocfs2 disk allocation */ 101 - #define ML_SUPER 0x0000000000010000ULL /* ocfs2 mount / umount */ 102 - #define ML_FILE_IO 0x0000000000020000ULL /* ocfs2 file I/O */ 103 - #define ML_EXTENT_MAP 0x0000000000040000ULL /* ocfs2 extent map caching */ 104 - #define ML_DLM_GLUE 0x0000000000080000ULL /* ocfs2 dlm glue layer */ 105 - #define ML_BH_IO 0x0000000000100000ULL /* ocfs2 buffer I/O */ 106 - #define ML_UPTODATE 0x0000000000200000ULL /* ocfs2 caching sequence #'s */ 107 - #define ML_NAMEI 0x0000000000400000ULL /* ocfs2 directory / namespace */ 108 - #define ML_INODE 0x0000000000800000ULL /* ocfs2 inode manipulation */ 109 - #define ML_VOTE 0x0000000001000000ULL /* ocfs2 node messaging */ 110 - #define ML_DCACHE 0x0000000002000000ULL /* ocfs2 dcache operations */ 111 - #define ML_CONN 0x0000000004000000ULL /* net connection management */ 112 - #define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */ 113 - #define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */ 114 - #define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ 115 - #define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ 116 - #define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ 117 - #define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */ 118 - #define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */ 119 - #define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */ 85 + #define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */ 86 + #define ML_MSG 0x0000000000000002ULL /* net network messages */ 87 + #define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */ 88 + #define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */ 89 + #define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */ 90 + #define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */ 91 + #define ML_DLM 0x0000000000000040ULL /* dlm general debugging */ 92 + #define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */ 93 + #define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */ 94 + #define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */ 95 + #define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */ 96 + #define ML_DLM_GLUE 0x0000000000000800ULL /* ocfs2 dlm glue layer */ 97 + #define ML_VOTE 0x0000000000001000ULL /* ocfs2 node messaging */ 98 + #define ML_CONN 0x0000000000002000ULL /* net connection management */ 99 + #define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */ 100 + #define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */ 101 + #define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */ 120 102 121 103 /* bits that are infrequently given and frequently matched in the high word */ 122 104 #define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */ ··· 106 124 #define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */ 107 125 108 126 #define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) 109 - #define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT) 110 127 #ifndef MLOG_MASK_PREFIX 111 128 #define MLOG_MASK_PREFIX 0 112 129 #endif ··· 202 221 _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \ 203 222 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ 204 223 } while (0) 205 - 206 - #if defined(CONFIG_OCFS2_DEBUG_MASKLOG) 207 - #define mlog_entry(fmt, args...) do { \ 208 - mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \ 209 - } while (0) 210 - 211 - #define mlog_entry_void() do { \ 212 - mlog(ML_ENTRY, "ENTRY:\n"); \ 213 - } while (0) 214 - 215 - /* 216 - * We disable this for sparse. 217 - */ 218 - #if !defined(__CHECKER__) 219 - #define mlog_exit(st) do { \ 220 - if (__builtin_types_compatible_p(typeof(st), unsigned long)) \ 221 - mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \ 222 - else if (__builtin_types_compatible_p(typeof(st), signed long)) \ 223 - mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \ 224 - else if (__builtin_types_compatible_p(typeof(st), unsigned int) \ 225 - || __builtin_types_compatible_p(typeof(st), unsigned short) \ 226 - || __builtin_types_compatible_p(typeof(st), unsigned char)) \ 227 - mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \ 228 - else if (__builtin_types_compatible_p(typeof(st), signed int) \ 229 - || __builtin_types_compatible_p(typeof(st), signed short) \ 230 - || __builtin_types_compatible_p(typeof(st), signed char)) \ 231 - mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \ 232 - else if (__builtin_types_compatible_p(typeof(st), long long)) \ 233 - mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \ 234 - else \ 235 - mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \ 236 - } while (0) 237 - #else 238 - #define mlog_exit(st) do { \ 239 - mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \ 240 - } while (0) 241 - #endif 242 - 243 - #define mlog_exit_ptr(ptr) do { \ 244 - mlog(ML_EXIT, "EXIT: %p\n", ptr); \ 245 - } while (0) 246 - 247 - #define mlog_exit_void() do { \ 248 - mlog(ML_EXIT, "EXIT\n"); \ 249 - } while (0) 250 - #else 251 - #define mlog_entry(...) do { } while (0) 252 - #define mlog_entry_void(...) do { } while (0) 253 - #define mlog_exit(...) do { } while (0) 254 - #define mlog_exit_ptr(...) do { } while (0) 255 - #define mlog_exit_void(...) do { } while (0) 256 - #endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */ 257 224 258 225 #define mlog_bug_on_msg(cond, fmt, args...) do { \ 259 226 if (cond) { \
+5 -5
fs/ocfs2/cluster/tcp.c
··· 210 210 sc->sc_tv_func_stop = ktime_get(); 211 211 } 212 212 213 - static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) 214 - { 215 - return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start); 216 - } 217 213 #else /* CONFIG_DEBUG_FS */ 218 214 # define o2net_init_nst(a, b, c, d, e) 219 215 # define o2net_set_nst_sock_time(a) ··· 223 227 # define o2net_set_advance_stop_time(a) 224 228 # define o2net_set_func_start_time(a) 225 229 # define o2net_set_func_stop_time(a) 226 - # define o2net_get_func_run_time(a) (ktime_t)0 227 230 #endif /* CONFIG_DEBUG_FS */ 228 231 229 232 #ifdef CONFIG_OCFS2_FS_STATS 233 + static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) 234 + { 235 + return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start); 236 + } 237 + 230 238 static void o2net_update_send_stats(struct o2net_send_tracking *nst, 231 239 struct o2net_sock_container *sc) 232 240 {
+22 -23
fs/ocfs2/dcache.c
··· 28 28 #include <linux/slab.h> 29 29 #include <linux/namei.h> 30 30 31 - #define MLOG_MASK_PREFIX ML_DCACHE 32 31 #include <cluster/masklog.h> 33 32 34 33 #include "ocfs2.h" ··· 38 39 #include "file.h" 39 40 #include "inode.h" 40 41 #include "super.h" 42 + #include "ocfs2_trace.h" 41 43 42 44 void ocfs2_dentry_attach_gen(struct dentry *dentry) 43 45 { ··· 62 62 inode = dentry->d_inode; 63 63 osb = OCFS2_SB(dentry->d_sb); 64 64 65 - mlog_entry("(0x%p, '%.*s')\n", dentry, 66 - dentry->d_name.len, dentry->d_name.name); 65 + trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len, 66 + dentry->d_name.name); 67 67 68 68 /* For a negative dentry - 69 69 * check the generation number of the parent and compare with the ··· 73 73 unsigned long gen = (unsigned long) dentry->d_fsdata; 74 74 unsigned long pgen = 75 75 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 76 - mlog(0, "negative dentry: %.*s parent gen: %lu " 77 - "dentry gen: %lu\n", 78 - dentry->d_name.len, dentry->d_name.name, pgen, gen); 76 + 77 + trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, 78 + dentry->d_name.name, 79 + pgen, gen); 79 80 if (gen != pgen) 80 81 goto bail; 81 82 goto valid; ··· 91 90 /* did we or someone else delete this inode? */ 92 91 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 93 92 spin_unlock(&OCFS2_I(inode)->ip_lock); 94 - mlog(0, "inode (%llu) deleted, returning false\n", 95 - (unsigned long long)OCFS2_I(inode)->ip_blkno); 93 + trace_ocfs2_dentry_revalidate_delete( 94 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 96 95 goto bail; 97 96 } 98 97 spin_unlock(&OCFS2_I(inode)->ip_lock); ··· 102 101 * inode nlink hits zero, it never goes back. 103 102 */ 104 103 if (inode->i_nlink == 0) { 105 - mlog(0, "Inode %llu orphaned, returning false " 106 - "dir = %d\n", 107 - (unsigned long long)OCFS2_I(inode)->ip_blkno, 108 - S_ISDIR(inode->i_mode)); 104 + trace_ocfs2_dentry_revalidate_orphaned( 105 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 106 + S_ISDIR(inode->i_mode)); 109 107 goto bail; 110 108 } 111 109 ··· 113 113 * redo it. 114 114 */ 115 115 if (!dentry->d_fsdata) { 116 - mlog(0, "Inode %llu doesn't have dentry lock, " 117 - "returning false\n", 118 - (unsigned long long)OCFS2_I(inode)->ip_blkno); 116 + trace_ocfs2_dentry_revalidate_nofsdata( 117 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 119 118 goto bail; 120 119 } 121 120 ··· 122 123 ret = 1; 123 124 124 125 bail: 125 - mlog_exit(ret); 126 - 126 + trace_ocfs2_dentry_revalidate_ret(ret); 127 127 return ret; 128 128 } 129 129 ··· 179 181 180 182 spin_lock(&dentry->d_lock); 181 183 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 182 - mlog(0, "dentry found: %.*s\n", 183 - dentry->d_name.len, dentry->d_name.name); 184 + trace_ocfs2_find_local_alias(dentry->d_name.len, 185 + dentry->d_name.name); 184 186 185 187 dget_dlock(dentry); 186 188 spin_unlock(&dentry->d_lock); ··· 238 240 struct dentry *alias; 239 241 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 240 242 241 - mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", 242 - dentry->d_name.len, dentry->d_name.name, 243 - (unsigned long long)parent_blkno, dl); 243 + trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name, 244 + (unsigned long long)parent_blkno, dl); 244 245 245 246 /* 246 247 * Negative dentry. We ignore these for now. ··· 289 292 (unsigned long long)parent_blkno, 290 293 (unsigned long long)dl->dl_parent_blkno); 291 294 292 - mlog(0, "Found: %s\n", dl->dl_lockres.l_name); 295 + trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name, 296 + (unsigned long long)parent_blkno, 297 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 293 298 294 299 goto out_attach; 295 300 }
+52 -69
fs/ocfs2/dir.c
··· 43 43 #include <linux/quotaops.h> 44 44 #include <linux/sort.h> 45 45 46 - #define MLOG_MASK_PREFIX ML_NAMEI 47 46 #include <cluster/masklog.h> 48 47 49 48 #include "ocfs2.h" ··· 60 61 #include "super.h" 61 62 #include "sysfile.h" 62 63 #include "uptodate.h" 64 + #include "ocfs2_trace.h" 63 65 64 66 #include "buffer_head_io.h" 65 67 ··· 322 322 const char *error_msg = NULL; 323 323 const int rlen = le16_to_cpu(de->rec_len); 324 324 325 - if (rlen < OCFS2_DIR_REC_LEN(1)) 325 + if (unlikely(rlen < OCFS2_DIR_REC_LEN(1))) 326 326 error_msg = "rec_len is smaller than minimal"; 327 - else if (rlen % 4 != 0) 327 + else if (unlikely(rlen % 4 != 0)) 328 328 error_msg = "rec_len % 4 != 0"; 329 - else if (rlen < OCFS2_DIR_REC_LEN(de->name_len)) 329 + else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len))) 330 330 error_msg = "rec_len is too small for name_len"; 331 - else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) 331 + else if (unlikely( 332 + ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)) 332 333 error_msg = "directory entry across blocks"; 333 334 334 - if (error_msg != NULL) 335 + if (unlikely(error_msg != NULL)) 335 336 mlog(ML_ERROR, "bad entry in directory #%llu: %s - " 336 337 "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n", 337 338 (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, 338 339 offset, (unsigned long long)le64_to_cpu(de->inode), rlen, 339 340 de->name_len); 341 + 340 342 return error_msg == NULL ? 1 : 0; 341 343 } 342 344 ··· 368 366 char *dlimit, *de_buf; 369 367 int de_len; 370 368 int ret = 0; 371 - 372 - mlog_entry_void(); 373 369 374 370 de_buf = first_de; 375 371 dlimit = de_buf + bytes; ··· 402 402 } 403 403 404 404 bail: 405 - mlog_exit(ret); 405 + trace_ocfs2_search_dirblock(ret); 406 406 return ret; 407 407 } 408 408 ··· 447 447 * We don't validate dirents here, that's handled 448 448 * in-place when the code walks them. 449 449 */ 450 - mlog(0, "Validating dirblock %llu\n", 451 - (unsigned long long)bh->b_blocknr); 450 + trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr); 452 451 453 452 BUG_ON(!buffer_uptodate(bh)); 454 453 ··· 705 706 int num = 0; 706 707 int nblocks, i, err; 707 708 708 - mlog_entry_void(); 709 - 710 709 sb = dir->i_sb; 711 710 712 711 nblocks = i_size_read(dir) >> sb->s_blocksize_bits; ··· 785 788 for (; ra_ptr < ra_max; ra_ptr++) 786 789 brelse(bh_use[ra_ptr]); 787 790 788 - mlog_exit_ptr(ret); 791 + trace_ocfs2_find_entry_el(ret); 789 792 return ret; 790 793 } 791 794 ··· 947 950 goto out; 948 951 } 949 952 950 - mlog(0, "Dir %llu: name: \"%.*s\", lookup of hash: %u.0x%x " 951 - "returns: %llu\n", 952 - (unsigned long long)OCFS2_I(dir)->ip_blkno, 953 - namelen, name, hinfo->major_hash, hinfo->minor_hash, 954 - (unsigned long long)phys); 953 + trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno, 954 + namelen, name, hinfo->major_hash, 955 + hinfo->minor_hash, (unsigned long long)phys); 955 956 956 957 ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh); 957 958 if (ret) { ··· 959 964 960 965 dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data; 961 966 962 - mlog(0, "leaf info: num_used: %d, count: %d\n", 963 - le16_to_cpu(dx_leaf->dl_list.de_num_used), 964 - le16_to_cpu(dx_leaf->dl_list.de_count)); 967 + trace_ocfs2_dx_dir_search_leaf_info( 968 + le16_to_cpu(dx_leaf->dl_list.de_num_used), 969 + le16_to_cpu(dx_leaf->dl_list.de_count)); 965 970 966 971 entry_list = &dx_leaf->dl_list; 967 972 ··· 1161 1166 int i, status = -ENOENT; 1162 1167 ocfs2_journal_access_func access = ocfs2_journal_access_db; 1163 1168 1164 - mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh); 1165 - 1166 1169 if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) 1167 1170 access = ocfs2_journal_access_di; 1168 1171 ··· 1195 1202 de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); 1196 1203 } 1197 1204 bail: 1198 - mlog_exit(status); 1199 1205 return status; 1200 1206 } 1201 1207 ··· 1340 1348 } 1341 1349 } 1342 1350 1343 - mlog(0, "Dir %llu: delete entry at index: %d\n", 1344 - (unsigned long long)OCFS2_I(dir)->ip_blkno, index); 1351 + trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno, 1352 + index); 1345 1353 1346 1354 ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry, 1347 1355 leaf_bh, leaf_bh->b_data, leaf_bh->b_size); ··· 1624 1632 struct buffer_head *insert_bh = lookup->dl_leaf_bh; 1625 1633 char *data_start = insert_bh->b_data; 1626 1634 1627 - mlog_entry_void(); 1628 - 1629 1635 if (!namelen) 1630 1636 return -EINVAL; 1631 1637 ··· 1755 1765 * from ever getting here. */ 1756 1766 retval = -ENOSPC; 1757 1767 bail: 1768 + if (retval) 1769 + mlog_errno(retval); 1758 1770 1759 - mlog_exit(retval); 1760 1771 return retval; 1761 1772 } 1762 1773 ··· 2019 2028 struct inode *inode = filp->f_path.dentry->d_inode; 2020 2029 int lock_level = 0; 2021 2030 2022 - mlog_entry("dirino=%llu\n", 2023 - (unsigned long long)OCFS2_I(inode)->ip_blkno); 2031 + trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno); 2024 2032 2025 2033 error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); 2026 2034 if (lock_level && error >= 0) { ··· 2041 2051 dirent, filldir, NULL); 2042 2052 2043 2053 ocfs2_inode_unlock(inode, lock_level); 2054 + if (error) 2055 + mlog_errno(error); 2044 2056 2045 2057 bail_nolock: 2046 - mlog_exit(error); 2047 2058 2048 2059 return error; 2049 2060 } ··· 2060 2069 { 2061 2070 int status = -ENOENT; 2062 2071 2063 - mlog(0, "name=%.*s, blkno=%p, inode=%llu\n", namelen, name, blkno, 2064 - (unsigned long long)OCFS2_I(inode)->ip_blkno); 2072 + trace_ocfs2_find_files_on_disk(namelen, name, blkno, 2073 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 2065 2074 2066 2075 status = ocfs2_find_entry(name, namelen, inode, lookup); 2067 2076 if (status) ··· 2105 2114 int ret; 2106 2115 struct ocfs2_dir_lookup_result lookup = { NULL, }; 2107 2116 2108 - mlog_entry("dir %llu, name '%.*s'\n", 2109 - (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); 2117 + trace_ocfs2_check_dir_for_entry( 2118 + (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); 2110 2119 2111 2120 ret = -EEXIST; 2112 2121 if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) ··· 2116 2125 bail: 2117 2126 ocfs2_free_dir_lookup_result(&lookup); 2118 2127 2119 - mlog_exit(ret); 2128 + if (ret) 2129 + mlog_errno(ret); 2120 2130 return ret; 2121 2131 } 2122 2132 ··· 2316 2324 struct buffer_head *new_bh = NULL; 2317 2325 struct ocfs2_dir_entry *de; 2318 2326 2319 - mlog_entry_void(); 2320 - 2321 2327 if (ocfs2_new_dir_wants_trailer(inode)) 2322 2328 size = ocfs2_dir_trailer_blk_off(parent->i_sb); 2323 2329 ··· 2370 2380 bail: 2371 2381 brelse(new_bh); 2372 2382 2373 - mlog_exit(status); 2374 2383 return status; 2375 2384 } 2376 2385 ··· 2398 2409 goto out; 2399 2410 } 2400 2411 2401 - mlog(0, "Dir %llu, attach new index block: %llu\n", 2402 - (unsigned long long)OCFS2_I(dir)->ip_blkno, 2403 - (unsigned long long)dr_blkno); 2412 + trace_ocfs2_dx_dir_attach_index( 2413 + (unsigned long long)OCFS2_I(dir)->ip_blkno, 2414 + (unsigned long long)dr_blkno); 2404 2415 2405 2416 dx_root_bh = sb_getblk(osb->sb, dr_blkno); 2406 2417 if (dx_root_bh == NULL) { ··· 2500 2511 dx_leaf->dl_list.de_count = 2501 2512 cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb)); 2502 2513 2503 - mlog(0, 2504 - "Dir %llu, format dx_leaf: %llu, entry count: %u\n", 2505 - (unsigned long long)OCFS2_I(dir)->ip_blkno, 2506 - (unsigned long long)bh->b_blocknr, 2507 - le16_to_cpu(dx_leaf->dl_list.de_count)); 2514 + trace_ocfs2_dx_dir_format_cluster( 2515 + (unsigned long long)OCFS2_I(dir)->ip_blkno, 2516 + (unsigned long long)bh->b_blocknr, 2517 + le16_to_cpu(dx_leaf->dl_list.de_count)); 2508 2518 2509 2519 ocfs2_journal_dirty(handle, bh); 2510 2520 } ··· 2747 2759 2748 2760 ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo); 2749 2761 2750 - mlog(0, 2751 - "dir: %llu, major: 0x%x minor: 0x%x, index: %u, name: %.*s\n", 2752 - (unsigned long long)dir->i_ino, hinfo.major_hash, 2753 - hinfo.minor_hash, 2754 - le16_to_cpu(dx_root->dr_entries.de_num_used), 2755 - de->name_len, de->name); 2762 + trace_ocfs2_dx_dir_index_root_block( 2763 + (unsigned long long)dir->i_ino, 2764 + hinfo.major_hash, hinfo.minor_hash, 2765 + de->name_len, de->name, 2766 + le16_to_cpu(dx_root->dr_entries.de_num_used)); 2756 2767 2757 2768 ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo, 2758 2769 dirent_blk); ··· 3222 3235 bail: 3223 3236 if (did_quota && status < 0) 3224 3237 dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); 3225 - mlog_exit(status); 3226 3238 return status; 3227 3239 } 3228 3240 ··· 3255 3269 struct super_block *sb = osb->sb; 3256 3270 struct ocfs2_extent_tree et; 3257 3271 struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; 3258 - 3259 - mlog_entry_void(); 3260 3272 3261 3273 if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 3262 3274 /* ··· 3304 3320 down_write(&OCFS2_I(dir)->ip_alloc_sem); 3305 3321 drop_alloc_sem = 1; 3306 3322 dir_i_size = i_size_read(dir); 3307 - mlog(0, "extending dir %llu (i_size = %lld)\n", 3308 - (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size); 3323 + trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno, 3324 + dir_i_size); 3309 3325 3310 3326 /* dir->i_size is always block aligned. */ 3311 3327 spin_lock(&OCFS2_I(dir)->ip_lock); ··· 3420 3436 3421 3437 brelse(new_bh); 3422 3438 3423 - mlog_exit(status); 3424 3439 return status; 3425 3440 } 3426 3441 ··· 3566 3583 status = 0; 3567 3584 bail: 3568 3585 brelse(bh); 3586 + if (status) 3587 + mlog_errno(status); 3569 3588 3570 - mlog_exit(status); 3571 3589 return status; 3572 3590 } 3573 3591 ··· 3799 3815 struct ocfs2_dx_root_block *dx_root; 3800 3816 struct ocfs2_dx_leaf *tmp_dx_leaf = NULL; 3801 3817 3802 - mlog(0, "DX Dir: %llu, rebalance leaf leaf_blkno: %llu insert: %u\n", 3803 - (unsigned long long)OCFS2_I(dir)->ip_blkno, 3804 - (unsigned long long)leaf_blkno, insert_hash); 3818 + trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno, 3819 + (unsigned long long)leaf_blkno, 3820 + insert_hash); 3805 3821 3806 3822 ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); 3807 3823 ··· 3881 3897 goto out_commit; 3882 3898 } 3883 3899 3884 - mlog(0, "Split leaf (%u) at %u, insert major hash is %u\n", 3885 - leaf_cpos, split_hash, insert_hash); 3900 + trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash); 3886 3901 3887 3902 /* 3888 3903 * We have to carefully order operations here. There are items ··· 4338 4355 unsigned int blocks_wanted = 1; 4339 4356 struct buffer_head *bh = NULL; 4340 4357 4341 - mlog(0, "getting ready to insert namelen %d into dir %llu\n", 4342 - namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno); 4358 + trace_ocfs2_prepare_dir_for_insert( 4359 + (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen); 4343 4360 4344 4361 if (!namelen) { 4345 4362 ret = -EINVAL;
+3 -3
fs/ocfs2/dlm/dlmconvert.c
··· 128 128 129 129 assert_spin_locked(&res->spinlock); 130 130 131 - mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", 132 - lock->ml.type, lock->ml.convert_type, type); 131 + mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", 132 + lock->ml.type, lock->ml.convert_type, type); 133 133 134 134 spin_lock(&lock->spinlock); 135 135 ··· 353 353 struct kvec vec[2]; 354 354 size_t veclen = 1; 355 355 356 - mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 356 + mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); 357 357 358 358 memset(&convert, 0, sizeof(struct dlm_convert_lock)); 359 359 convert.node_idx = dlm->node_num;
+20 -16
fs/ocfs2/dlm/dlmdomain.c
··· 188 188 struct hlist_head *bucket; 189 189 struct hlist_node *list; 190 190 191 - mlog_entry("%.*s\n", len, name); 191 + mlog(0, "%.*s\n", len, name); 192 192 193 193 assert_spin_locked(&dlm->spinlock); 194 194 ··· 222 222 { 223 223 struct dlm_lock_resource *res = NULL; 224 224 225 - mlog_entry("%.*s\n", len, name); 225 + mlog(0, "%.*s\n", len, name); 226 226 227 227 assert_spin_locked(&dlm->spinlock); 228 228 ··· 531 531 unsigned int node; 532 532 struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; 533 533 534 - mlog_entry("%p %u %p", msg, len, data); 534 + mlog(0, "%p %u %p", msg, len, data); 535 535 536 536 if (!dlm_grab(dlm)) 537 537 return 0; ··· 926 926 } 927 927 928 928 static int dlm_match_regions(struct dlm_ctxt *dlm, 929 - struct dlm_query_region *qr) 929 + struct dlm_query_region *qr, 930 + char *local, int locallen) 930 931 { 931 - char *local = NULL, *remote = qr->qr_regions; 932 + char *remote = qr->qr_regions; 932 933 char *l, *r; 933 934 int localnr, i, j, foundit; 934 935 int status = 0; ··· 958 957 r += O2HB_MAX_REGION_NAME_LEN; 959 958 } 960 959 961 - local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC); 962 - if (!local) { 963 - status = -ENOMEM; 964 - goto bail; 965 - } 966 - 967 - localnr = o2hb_get_all_regions(local, O2NM_MAX_REGIONS); 960 + localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN); 961 + localnr = o2hb_get_all_regions(local, (u8)localnr); 968 962 969 963 /* compare local regions with remote */ 970 964 l = local; ··· 1008 1012 } 1009 1013 1010 1014 bail: 1011 - kfree(local); 1012 - 1013 1015 return status; 1014 1016 } 1015 1017 ··· 1069 1075 { 1070 1076 struct dlm_query_region *qr; 1071 1077 struct dlm_ctxt *dlm = NULL; 1078 + char *local = NULL; 1072 1079 int status = 0; 1073 1080 int locked = 0; 1074 1081 ··· 1077 1082 1078 1083 mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node, 1079 1084 qr->qr_domain); 1085 + 1086 + /* buffer used in dlm_mast_regions() */ 1087 + local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL); 1088 + if (!local) { 1089 + status = -ENOMEM; 1090 + goto bail; 1091 + } 1080 1092 1081 1093 status = -EINVAL; 1082 1094 ··· 1114 1112 goto bail; 1115 1113 } 1116 1114 1117 - status = dlm_match_regions(dlm, qr); 1115 + status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions)); 1118 1116 1119 1117 bail: 1120 1118 if (locked) 1121 1119 spin_unlock(&dlm->spinlock); 1122 1120 spin_unlock(&dlm_domain_lock); 1121 + 1122 + kfree(local); 1123 1123 1124 1124 return status; 1125 1125 } ··· 1557 1553 struct domain_join_ctxt *ctxt; 1558 1554 enum dlm_query_join_response_code response = JOIN_DISALLOW; 1559 1555 1560 - mlog_entry("%p", dlm); 1556 + mlog(0, "%p", dlm); 1561 1557 1562 1558 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); 1563 1559 if (!ctxt) {
+3 -7
fs/ocfs2/dlm/dlmlock.c
··· 128 128 int call_ast = 0, kick_thread = 0; 129 129 enum dlm_status status = DLM_NORMAL; 130 130 131 - mlog_entry("type=%d\n", lock->ml.type); 131 + mlog(0, "type=%d\n", lock->ml.type); 132 132 133 133 spin_lock(&res->spinlock); 134 134 /* if called from dlm_create_lock_handler, need to ··· 227 227 enum dlm_status status = DLM_DENIED; 228 228 int lockres_changed = 1; 229 229 230 - mlog_entry("type=%d\n", lock->ml.type); 231 - mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, 230 + mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n", 231 + lock->ml.type, res->lockname.len, 232 232 res->lockname.name, flags); 233 233 234 234 spin_lock(&res->spinlock); ··· 307 307 struct dlm_create_lock create; 308 308 int tmpret, status = 0; 309 309 enum dlm_status ret; 310 - 311 - mlog_entry_void(); 312 310 313 311 memset(&create, 0, sizeof(create)); 314 312 create.node_idx = dlm->node_num; ··· 474 476 unsigned int namelen; 475 477 476 478 BUG_ON(!dlm); 477 - 478 - mlog_entry_void(); 479 479 480 480 if (!dlm_grab(dlm)) 481 481 return DLM_REJECTED;
+1 -5
fs/ocfs2/dlm/dlmmaster.c
··· 426 426 struct dlm_master_list_entry *mle; 427 427 struct dlm_ctxt *dlm; 428 428 429 - mlog_entry_void(); 430 - 431 429 mle = container_of(kref, struct dlm_master_list_entry, mle_refs); 432 430 dlm = mle->dlm; 433 431 ··· 3118 3120 3119 3121 *oldmle = NULL; 3120 3122 3121 - mlog_entry_void(); 3122 - 3123 3123 assert_spin_locked(&dlm->spinlock); 3124 3124 assert_spin_locked(&dlm->master_lock); 3125 3125 ··· 3257 3261 struct hlist_node *list; 3258 3262 unsigned int i; 3259 3263 3260 - mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); 3264 + mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); 3261 3265 top: 3262 3266 assert_spin_locked(&dlm->spinlock); 3263 3267
+1 -8
fs/ocfs2/dlm/dlmrecovery.c
··· 727 727 if (destroy) 728 728 dlm_destroy_recovery_area(dlm, dead_node); 729 729 730 - mlog_exit(status); 731 730 return status; 732 731 } 733 732 ··· 1495 1496 kfree(buf); 1496 1497 if (item) 1497 1498 kfree(item); 1499 + mlog_errno(ret); 1498 1500 } 1499 1501 1500 - mlog_exit(ret); 1501 1502 return ret; 1502 1503 } 1503 1504 ··· 1566 1567 dlm_lockres_put(res); 1567 1568 } 1568 1569 kfree(data); 1569 - mlog_exit(ret); 1570 1570 } 1571 1571 1572 1572 ··· 1984 1986 dlm_lock_put(newlock); 1985 1987 } 1986 1988 1987 - mlog_exit(ret); 1988 1989 return ret; 1989 1990 } 1990 1991 ··· 2079 2082 struct hlist_node *hash_iter; 2080 2083 struct hlist_head *bucket; 2081 2084 struct dlm_lock_resource *res, *next; 2082 - 2083 - mlog_entry_void(); 2084 2085 2085 2086 assert_spin_locked(&dlm->spinlock); 2086 2087 ··· 2601 2606 struct dlm_node_iter iter; 2602 2607 int nodenum; 2603 2608 int status; 2604 - 2605 - mlog_entry("%u\n", dead_node); 2606 2609 2607 2610 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); 2608 2611
+1 -3
fs/ocfs2/dlm/dlmunlock.c
··· 317 317 struct kvec vec[2]; 318 318 size_t veclen = 1; 319 319 320 - mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); 320 + mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); 321 321 322 322 if (owner == dlm->node_num) { 323 323 /* ended up trying to contact ourself. this means ··· 587 587 struct dlm_lock_resource *res; 588 588 struct dlm_lock *lock = NULL; 589 589 int call_ast, is_master; 590 - 591 - mlog_entry_void(); 592 590 593 591 if (!lksb) { 594 592 dlm_error(DLM_BADARGS);
+55 -189
fs/ocfs2/dlmglue.c
··· 64 64 unsigned long mw_mask; 65 65 unsigned long mw_goal; 66 66 #ifdef CONFIG_OCFS2_FS_STATS 67 - unsigned long long mw_lock_start; 67 + ktime_t mw_lock_start; 68 68 #endif 69 69 }; 70 70 ··· 397 397 { 398 398 int len; 399 399 400 - mlog_entry_void(); 401 - 402 400 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); 403 401 404 402 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x", ··· 406 408 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1)); 407 409 408 410 mlog(0, "built lock resource with name: %s\n", name); 409 - 410 - mlog_exit_void(); 411 411 } 412 412 413 413 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); ··· 431 435 #ifdef CONFIG_OCFS2_FS_STATS 432 436 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) 433 437 { 434 - res->l_lock_num_prmode = 0; 435 - res->l_lock_num_prmode_failed = 0; 436 - res->l_lock_total_prmode = 0; 437 - res->l_lock_max_prmode = 0; 438 - res->l_lock_num_exmode = 0; 439 - res->l_lock_num_exmode_failed = 0; 440 - res->l_lock_total_exmode = 0; 441 - res->l_lock_max_exmode = 0; 442 438 res->l_lock_refresh = 0; 439 + memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats)); 440 + memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats)); 443 441 } 444 442 445 443 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, 446 444 struct ocfs2_mask_waiter *mw, int ret) 447 445 { 448 - unsigned long long *num, *sum; 449 - unsigned int *max, *failed; 450 - struct timespec ts = current_kernel_time(); 451 - unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start; 446 + u32 usec; 447 + ktime_t kt; 448 + struct ocfs2_lock_stats *stats; 452 449 453 - if (level == LKM_PRMODE) { 454 - num = &res->l_lock_num_prmode; 455 - sum = &res->l_lock_total_prmode; 456 - max = &res->l_lock_max_prmode; 457 - failed = &res->l_lock_num_prmode_failed; 458 - } else if (level == LKM_EXMODE) { 459 - num = &res->l_lock_num_exmode; 460 - sum = &res->l_lock_total_exmode; 461 - max = &res->l_lock_max_exmode; 462 - failed = &res->l_lock_num_exmode_failed; 463 - } else 450 + if (level == LKM_PRMODE) 451 + stats = &res->l_lock_prmode; 452 + else if (level == LKM_EXMODE) 453 + stats = &res->l_lock_exmode; 454 + else 464 455 return; 465 456 466 - (*num)++; 467 - (*sum) += time; 468 - if (time > *max) 469 - *max = time; 457 + kt = ktime_sub(ktime_get(), mw->mw_lock_start); 458 + usec = ktime_to_us(kt); 459 + 460 + stats->ls_gets++; 461 + stats->ls_total += ktime_to_ns(kt); 462 + /* overflow */ 463 + if (unlikely(stats->ls_gets) == 0) { 464 + stats->ls_gets++; 465 + stats->ls_total = ktime_to_ns(kt); 466 + } 467 + 468 + if (stats->ls_max < usec) 469 + stats->ls_max = usec; 470 + 470 471 if (ret) 471 - (*failed)++; 472 + stats->ls_fail++; 472 473 } 473 474 474 475 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) ··· 475 482 476 483 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) 477 484 { 478 - struct timespec ts = current_kernel_time(); 479 - mw->mw_lock_start = timespec_to_ns(&ts); 485 + mw->mw_lock_start = ktime_get(); 480 486 } 481 487 #else 482 488 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) ··· 721 729 722 730 void ocfs2_lock_res_free(struct ocfs2_lock_res *res) 723 731 { 724 - mlog_entry_void(); 725 - 726 732 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED)) 727 733 return; 728 734 ··· 746 756 memset(&res->l_lksb, 0, sizeof(res->l_lksb)); 747 757 748 758 res->l_flags = 0UL; 749 - mlog_exit_void(); 750 759 } 751 760 752 761 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, 753 762 int level) 754 763 { 755 - mlog_entry_void(); 756 - 757 764 BUG_ON(!lockres); 758 765 759 766 switch(level) { ··· 763 776 default: 764 777 BUG(); 765 778 } 766 - 767 - mlog_exit_void(); 768 779 } 769 780 770 781 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, 771 782 int level) 772 783 { 773 - mlog_entry_void(); 774 - 775 784 BUG_ON(!lockres); 776 785 777 786 switch(level) { ··· 782 799 default: 783 800 BUG(); 784 801 } 785 - mlog_exit_void(); 786 802 } 787 803 788 804 /* WARNING: This function lives in a world where the only three lock ··· 828 846 829 847 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres) 830 848 { 831 - mlog_entry_void(); 832 - 833 849 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); 834 850 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); 835 851 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); ··· 840 860 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); 841 861 } 842 862 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 843 - 844 - mlog_exit_void(); 845 863 } 846 864 847 865 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres) 848 866 { 849 - mlog_entry_void(); 850 - 851 867 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); 852 868 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); 853 869 ··· 865 889 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); 866 890 867 891 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 868 - 869 - mlog_exit_void(); 870 892 } 871 893 872 894 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres) 873 895 { 874 - mlog_entry_void(); 875 - 876 896 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY))); 877 897 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); 878 898 ··· 880 908 lockres->l_level = lockres->l_requested; 881 909 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED); 882 910 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 883 - 884 - mlog_exit_void(); 885 911 } 886 912 887 913 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, 888 914 int level) 889 915 { 890 916 int needs_downconvert = 0; 891 - mlog_entry_void(); 892 917 893 918 assert_spin_locked(&lockres->l_lock); 894 919 ··· 907 938 908 939 if (needs_downconvert) 909 940 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); 910 - 911 - mlog_exit(needs_downconvert); 941 + mlog(0, "needs_downconvert = %d\n", needs_downconvert); 912 942 return needs_downconvert; 913 943 } 914 944 ··· 1119 1151 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); 1120 1152 unsigned long flags; 1121 1153 1122 - mlog_entry_void(); 1123 - 1124 1154 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n", 1125 1155 lockres->l_name, lockres->l_unlock_action); 1126 1156 ··· 1128 1162 "unlock_action %d\n", error, lockres->l_name, 1129 1163 lockres->l_unlock_action); 1130 1164 spin_unlock_irqrestore(&lockres->l_lock, flags); 1131 - mlog_exit_void(); 1132 1165 return; 1133 1166 } 1134 1167 ··· 1151 1186 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; 1152 1187 wake_up(&lockres->l_event); 1153 1188 spin_unlock_irqrestore(&lockres->l_lock, flags); 1154 - 1155 - mlog_exit_void(); 1156 1189 } 1157 1190 1158 1191 /* ··· 1196 1233 { 1197 1234 unsigned long flags; 1198 1235 1199 - mlog_entry_void(); 1200 1236 spin_lock_irqsave(&lockres->l_lock, flags); 1201 1237 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1202 1238 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); ··· 1206 1244 spin_unlock_irqrestore(&lockres->l_lock, flags); 1207 1245 1208 1246 wake_up(&lockres->l_event); 1209 - mlog_exit_void(); 1210 1247 } 1211 1248 1212 1249 /* Note: If we detect another process working on the lock (i.e., ··· 1220 1259 int ret = 0; 1221 1260 unsigned long flags; 1222 1261 unsigned int gen; 1223 - 1224 - mlog_entry_void(); 1225 1262 1226 1263 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level, 1227 1264 dlm_flags); ··· 1252 1293 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name); 1253 1294 1254 1295 bail: 1255 - mlog_exit(ret); 1256 1296 return ret; 1257 1297 } 1258 1298 ··· 1373 1415 unsigned long flags; 1374 1416 unsigned int gen; 1375 1417 int noqueue_attempted = 0; 1376 - 1377 - mlog_entry_void(); 1378 1418 1379 1419 ocfs2_init_mask_waiter(&mw); 1380 1420 ··· 1539 1583 caller_ip); 1540 1584 } 1541 1585 #endif 1542 - mlog_exit(ret); 1543 1586 return ret; 1544 1587 } 1545 1588 ··· 1560 1605 { 1561 1606 unsigned long flags; 1562 1607 1563 - mlog_entry_void(); 1564 1608 spin_lock_irqsave(&lockres->l_lock, flags); 1565 1609 ocfs2_dec_holders(lockres, level); 1566 1610 ocfs2_downconvert_on_unlock(osb, lockres); ··· 1568 1614 if (lockres->l_lockdep_map.key != NULL) 1569 1615 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip); 1570 1616 #endif 1571 - mlog_exit_void(); 1572 1617 } 1573 1618 1574 1619 static int ocfs2_create_new_lock(struct ocfs2_super *osb, ··· 1600 1647 1601 1648 BUG_ON(!inode); 1602 1649 BUG_ON(!ocfs2_inode_is_new(inode)); 1603 - 1604 - mlog_entry_void(); 1605 1650 1606 1651 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); 1607 1652 ··· 1634 1683 } 1635 1684 1636 1685 bail: 1637 - mlog_exit(ret); 1638 1686 return ret; 1639 1687 } 1640 1688 ··· 1645 1695 1646 1696 BUG_ON(!inode); 1647 1697 1648 - mlog_entry_void(); 1649 - 1650 1698 mlog(0, "inode %llu take %s RW lock\n", 1651 1699 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1652 1700 write ? "EXMODE" : "PRMODE"); 1653 1701 1654 - if (ocfs2_mount_local(osb)) { 1655 - mlog_exit(0); 1702 + if (ocfs2_mount_local(osb)) 1656 1703 return 0; 1657 - } 1658 1704 1659 1705 lockres = &OCFS2_I(inode)->ip_rw_lockres; 1660 1706 ··· 1661 1715 if (status < 0) 1662 1716 mlog_errno(status); 1663 1717 1664 - mlog_exit(status); 1665 1718 return status; 1666 1719 } 1667 1720 ··· 1670 1725 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; 1671 1726 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1672 1727 1673 - mlog_entry_void(); 1674 - 1675 1728 mlog(0, "inode %llu drop %s RW lock\n", 1676 1729 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1677 1730 write ? "EXMODE" : "PRMODE"); 1678 1731 1679 1732 if (!ocfs2_mount_local(osb)) 1680 1733 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 1681 - 1682 - mlog_exit_void(); 1683 1734 } 1684 1735 1685 1736 /* ··· 1688 1747 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1689 1748 1690 1749 BUG_ON(!inode); 1691 - 1692 - mlog_entry_void(); 1693 1750 1694 1751 mlog(0, "inode %llu take PRMODE open lock\n", 1695 1752 (unsigned long long)OCFS2_I(inode)->ip_blkno); ··· 1703 1764 mlog_errno(status); 1704 1765 1705 1766 out: 1706 - mlog_exit(status); 1707 1767 return status; 1708 1768 } 1709 1769 ··· 1713 1775 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1714 1776 1715 1777 BUG_ON(!inode); 1716 - 1717 - mlog_entry_void(); 1718 1778 1719 1779 mlog(0, "inode %llu try to take %s open lock\n", 1720 1780 (unsigned long long)OCFS2_I(inode)->ip_blkno, ··· 1735 1799 level, DLM_LKF_NOQUEUE, 0); 1736 1800 1737 1801 out: 1738 - mlog_exit(status); 1739 1802 return status; 1740 1803 } 1741 1804 ··· 1745 1810 { 1746 1811 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres; 1747 1812 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1748 - 1749 - mlog_entry_void(); 1750 1813 1751 1814 mlog(0, "inode %llu drop open lock\n", 1752 1815 (unsigned long long)OCFS2_I(inode)->ip_blkno); ··· 1760 1827 DLM_LOCK_EX); 1761 1828 1762 1829 out: 1763 - mlog_exit_void(); 1830 + return; 1764 1831 } 1765 1832 1766 1833 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres, ··· 1976 2043 { 1977 2044 int kick = 0; 1978 2045 1979 - mlog_entry_void(); 1980 - 1981 2046 /* If we know that another node is waiting on our lock, kick 1982 2047 * the downconvert thread * pre-emptively when we reach a release 1983 2048 * condition. */ ··· 1996 2065 1997 2066 if (kick) 1998 2067 ocfs2_wake_downconvert_thread(osb); 1999 - 2000 - mlog_exit_void(); 2001 2068 } 2002 2069 2003 2070 #define OCFS2_SEC_BITS 34 ··· 2023 2094 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2024 2095 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 2025 2096 struct ocfs2_meta_lvb *lvb; 2026 - 2027 - mlog_entry_void(); 2028 2097 2029 2098 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2030 2099 ··· 2055 2128 2056 2129 out: 2057 2130 mlog_meta_lvb(0, lockres); 2058 - 2059 - mlog_exit_void(); 2060 2131 } 2061 2132 2062 2133 static void ocfs2_unpack_timespec(struct timespec *spec, ··· 2069 2144 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2070 2145 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 2071 2146 struct ocfs2_meta_lvb *lvb; 2072 - 2073 - mlog_entry_void(); 2074 2147 2075 2148 mlog_meta_lvb(0, lockres); 2076 2149 ··· 2100 2177 ocfs2_unpack_timespec(&inode->i_ctime, 2101 2178 be64_to_cpu(lvb->lvb_ictime_packed)); 2102 2179 spin_unlock(&oi->ip_lock); 2103 - 2104 - mlog_exit_void(); 2105 2180 } 2106 2181 2107 2182 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, ··· 2126 2205 unsigned long flags; 2127 2206 int status = 0; 2128 2207 2129 - mlog_entry_void(); 2130 - 2131 2208 refresh_check: 2132 2209 spin_lock_irqsave(&lockres->l_lock, flags); 2133 2210 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { ··· 2146 2227 2147 2228 status = 1; 2148 2229 bail: 2149 - mlog_exit(status); 2230 + mlog(0, "status %d\n", status); 2150 2231 return status; 2151 2232 } 2152 2233 ··· 2156 2237 int status) 2157 2238 { 2158 2239 unsigned long flags; 2159 - mlog_entry_void(); 2160 2240 2161 2241 spin_lock_irqsave(&lockres->l_lock, flags); 2162 2242 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING); ··· 2164 2246 spin_unlock_irqrestore(&lockres->l_lock, flags); 2165 2247 2166 2248 wake_up(&lockres->l_event); 2167 - 2168 - mlog_exit_void(); 2169 2249 } 2170 2250 2171 2251 /* may or may not return a bh if it went to disk. */ ··· 2175 2259 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 2176 2260 struct ocfs2_dinode *fe; 2177 2261 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2178 - 2179 - mlog_entry_void(); 2180 2262 2181 2263 if (ocfs2_mount_local(osb)) 2182 2264 goto bail; ··· 2244 2330 bail_refresh: 2245 2331 ocfs2_complete_lock_res_refresh(lockres, status); 2246 2332 bail: 2247 - mlog_exit(status); 2248 2333 return status; 2249 2334 } 2250 2335 ··· 2286 2373 struct buffer_head *local_bh = NULL; 2287 2374 2288 2375 BUG_ON(!inode); 2289 - 2290 - mlog_entry_void(); 2291 2376 2292 2377 mlog(0, "inode %llu, take %s META lock\n", 2293 2378 (unsigned long long)OCFS2_I(inode)->ip_blkno, ··· 2378 2467 if (local_bh) 2379 2468 brelse(local_bh); 2380 2469 2381 - mlog_exit(status); 2382 2470 return status; 2383 2471 } 2384 2472 ··· 2427 2517 { 2428 2518 int ret; 2429 2519 2430 - mlog_entry_void(); 2431 2520 ret = ocfs2_inode_lock(inode, NULL, 0); 2432 2521 if (ret < 0) { 2433 2522 mlog_errno(ret); ··· 2454 2545 } else 2455 2546 *level = 0; 2456 2547 2457 - mlog_exit(ret); 2458 2548 return ret; 2459 2549 } 2460 2550 ··· 2464 2556 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres; 2465 2557 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2466 2558 2467 - mlog_entry_void(); 2468 - 2469 2559 mlog(0, "inode %llu drop %s META lock\n", 2470 2560 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2471 2561 ex ? "EXMODE" : "PRMODE"); ··· 2471 2565 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) && 2472 2566 !ocfs2_mount_local(osb)) 2473 2567 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 2474 - 2475 - mlog_exit_void(); 2476 2568 } 2477 2569 2478 2570 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno) ··· 2521 2617 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2522 2618 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; 2523 2619 2524 - mlog_entry_void(); 2525 - 2526 2620 if (ocfs2_is_hard_readonly(osb)) 2527 2621 return -EROFS; 2528 2622 ··· 2552 2650 ocfs2_track_lock_refresh(lockres); 2553 2651 } 2554 2652 bail: 2555 - mlog_exit(status); 2556 2653 return status; 2557 2654 } 2558 2655 ··· 2770 2869 return iter; 2771 2870 } 2772 2871 2773 - /* So that debugfs.ocfs2 can determine which format is being used */ 2774 - #define OCFS2_DLM_DEBUG_STR_VERSION 2 2872 + /* 2873 + * Version is used by debugfs.ocfs2 to determine the format being used 2874 + * 2875 + * New in version 2 2876 + * - Lock stats printed 2877 + * New in version 3 2878 + * - Max time in lock stats is in usecs (instead of nsecs) 2879 + */ 2880 + #define OCFS2_DLM_DEBUG_STR_VERSION 3 2775 2881 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) 2776 2882 { 2777 2883 int i; ··· 2820 2912 seq_printf(m, "0x%x\t", lvb[i]); 2821 2913 2822 2914 #ifdef CONFIG_OCFS2_FS_STATS 2823 - # define lock_num_prmode(_l) (_l)->l_lock_num_prmode 2824 - # define lock_num_exmode(_l) (_l)->l_lock_num_exmode 2825 - # define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed 2826 - # define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed 2827 - # define lock_total_prmode(_l) (_l)->l_lock_total_prmode 2828 - # define lock_total_exmode(_l) (_l)->l_lock_total_exmode 2829 - # define lock_max_prmode(_l) (_l)->l_lock_max_prmode 2830 - # define lock_max_exmode(_l) (_l)->l_lock_max_exmode 2831 - # define lock_refresh(_l) (_l)->l_lock_refresh 2915 + # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets) 2916 + # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets) 2917 + # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail) 2918 + # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail) 2919 + # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total) 2920 + # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total) 2921 + # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max) 2922 + # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max) 2923 + # define lock_refresh(_l) ((_l)->l_lock_refresh) 2832 2924 #else 2833 - # define lock_num_prmode(_l) (0ULL) 2834 - # define lock_num_exmode(_l) (0ULL) 2925 + # define lock_num_prmode(_l) (0) 2926 + # define lock_num_exmode(_l) (0) 2835 2927 # define lock_num_prmode_failed(_l) (0) 2836 2928 # define lock_num_exmode_failed(_l) (0) 2837 2929 # define lock_total_prmode(_l) (0ULL) ··· 2841 2933 # define lock_refresh(_l) (0) 2842 2934 #endif 2843 2935 /* The following seq_print was added in version 2 of this output */ 2844 - seq_printf(m, "%llu\t" 2845 - "%llu\t" 2936 + seq_printf(m, "%u\t" 2937 + "%u\t" 2846 2938 "%u\t" 2847 2939 "%u\t" 2848 2940 "%llu\t" ··· 2962 3054 int status = 0; 2963 3055 struct ocfs2_cluster_connection *conn = NULL; 2964 3056 2965 - mlog_entry_void(); 2966 - 2967 3057 if (ocfs2_mount_local(osb)) { 2968 3058 osb->node_num = 0; 2969 3059 goto local; ··· 3018 3112 kthread_stop(osb->dc_task); 3019 3113 } 3020 3114 3021 - mlog_exit(status); 3022 3115 return status; 3023 3116 } 3024 3117 3025 3118 void ocfs2_dlm_shutdown(struct ocfs2_super *osb, 3026 3119 int hangup_pending) 3027 3120 { 3028 - mlog_entry_void(); 3029 - 3030 3121 ocfs2_drop_osb_locks(osb); 3031 3122 3032 3123 /* ··· 3046 3143 osb->cconn = NULL; 3047 3144 3048 3145 ocfs2_dlm_shutdown_debug(osb); 3049 - 3050 - mlog_exit_void(); 3051 3146 } 3052 3147 3053 3148 static int ocfs2_drop_lock(struct ocfs2_super *osb, ··· 3127 3226 3128 3227 ocfs2_wait_on_busy_lock(lockres); 3129 3228 out: 3130 - mlog_exit(0); 3131 3229 return 0; 3132 3230 } 3133 3231 ··· 3184 3284 { 3185 3285 int status, err; 3186 3286 3187 - mlog_entry_void(); 3188 - 3189 3287 /* No need to call ocfs2_mark_lockres_freeing here - 3190 3288 * ocfs2_clear_inode has done it for us. */ 3191 3289 ··· 3208 3310 if (err < 0 && !status) 3209 3311 status = err; 3210 3312 3211 - mlog_exit(status); 3212 3313 return status; 3213 3314 } 3214 3315 ··· 3249 3352 int ret; 3250 3353 u32 dlm_flags = DLM_LKF_CONVERT; 3251 3354 3252 - mlog_entry_void(); 3253 - 3254 3355 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, 3255 3356 lockres->l_level, new_level); 3256 3357 ··· 3270 3375 3271 3376 ret = 0; 3272 3377 bail: 3273 - mlog_exit(ret); 3274 3378 return ret; 3275 3379 } 3276 3380 ··· 3278 3384 struct ocfs2_lock_res *lockres) 3279 3385 { 3280 3386 assert_spin_locked(&lockres->l_lock); 3281 - 3282 - mlog_entry_void(); 3283 3387 3284 3388 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { 3285 3389 /* If we're already trying to cancel a lock conversion ··· 3308 3416 { 3309 3417 int ret; 3310 3418 3311 - mlog_entry_void(); 3312 - 3313 3419 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, 3314 3420 DLM_LKF_CANCEL); 3315 3421 if (ret) { ··· 3317 3427 3318 3428 mlog(ML_BASTS, "lockres %s\n", lockres->l_name); 3319 3429 3320 - mlog_exit(ret); 3321 3430 return ret; 3322 3431 } 3323 3432 ··· 3331 3442 int ret = 0; 3332 3443 int set_lvb = 0; 3333 3444 unsigned int gen; 3334 - 3335 - mlog_entry_void(); 3336 3445 3337 3446 spin_lock_irqsave(&lockres->l_lock, flags); 3338 3447 ··· 3506 3619 gen); 3507 3620 3508 3621 leave: 3509 - mlog_exit(ret); 3622 + if (ret) 3623 + mlog_errno(ret); 3510 3624 return ret; 3511 3625 3512 3626 leave_requeue: 3513 3627 spin_unlock_irqrestore(&lockres->l_lock, flags); 3514 3628 ctl->requeue = 1; 3515 3629 3516 - mlog_exit(0); 3517 3630 return 0; 3518 3631 } 3519 3632 ··· 3746 3859 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, 3747 3860 oinfo->dqi_gi.dqi_type); 3748 3861 3749 - mlog_entry_void(); 3750 - 3751 3862 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 3752 3863 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; 3753 3864 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); ··· 3754 3869 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks); 3755 3870 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk); 3756 3871 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry); 3757 - 3758 - mlog_exit_void(); 3759 3872 } 3760 3873 3761 3874 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) ··· 3762 3879 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); 3763 3880 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 3764 3881 3765 - mlog_entry_void(); 3766 3882 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) 3767 3883 ocfs2_cluster_unlock(osb, lockres, level); 3768 - mlog_exit_void(); 3769 3884 } 3770 3885 3771 3886 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) ··· 3818 3937 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 3819 3938 int status = 0; 3820 3939 3821 - mlog_entry_void(); 3822 - 3823 3940 /* On RO devices, locking really isn't needed... */ 3824 3941 if (ocfs2_is_hard_readonly(osb)) { 3825 3942 if (ex) ··· 3840 3961 ocfs2_qinfo_unlock(oinfo, ex); 3841 3962 ocfs2_complete_lock_res_refresh(lockres, status); 3842 3963 bail: 3843 - mlog_exit(status); 3844 3964 return status; 3845 3965 } 3846 3966 ··· 3885 4007 * considered valid until we remove the OCFS2_LOCK_QUEUED 3886 4008 * flag. */ 3887 4009 3888 - mlog_entry_void(); 3889 - 3890 4010 BUG_ON(!lockres); 3891 4011 BUG_ON(!lockres->l_ops); 3892 4012 ··· 3918 4042 if (ctl.unblock_action != UNBLOCK_CONTINUE 3919 4043 && lockres->l_ops->post_unlock) 3920 4044 lockres->l_ops->post_unlock(osb, lockres); 3921 - 3922 - mlog_exit_void(); 3923 4045 } 3924 4046 3925 4047 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, 3926 4048 struct ocfs2_lock_res *lockres) 3927 4049 { 3928 - mlog_entry_void(); 3929 - 3930 4050 assert_spin_locked(&lockres->l_lock); 3931 4051 3932 4052 if (lockres->l_flags & OCFS2_LOCK_FREEING) { ··· 3943 4071 osb->blocked_lock_count++; 3944 4072 } 3945 4073 spin_unlock(&osb->dc_task_lock); 3946 - 3947 - mlog_exit_void(); 3948 4074 } 3949 4075 3950 4076 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) 3951 4077 { 3952 4078 unsigned long processed; 3953 4079 struct ocfs2_lock_res *lockres; 3954 - 3955 - mlog_entry_void(); 3956 4080 3957 4081 spin_lock(&osb->dc_task_lock); 3958 4082 /* grab this early so we know to try again if a state change and ··· 3973 4105 spin_lock(&osb->dc_task_lock); 3974 4106 } 3975 4107 spin_unlock(&osb->dc_task_lock); 3976 - 3977 - mlog_exit_void(); 3978 4108 } 3979 4109 3980 4110 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
+20 -27
fs/ocfs2/export.c
··· 26 26 #include <linux/fs.h> 27 27 #include <linux/types.h> 28 28 29 - #define MLOG_MASK_PREFIX ML_EXPORT 30 29 #include <cluster/masklog.h> 31 30 32 31 #include "ocfs2.h" ··· 39 40 40 41 #include "buffer_head_io.h" 41 42 #include "suballoc.h" 43 + #include "ocfs2_trace.h" 42 44 43 45 struct ocfs2_inode_handle 44 46 { ··· 56 56 int status, set; 57 57 struct dentry *result; 58 58 59 - mlog_entry("(0x%p, 0x%p)\n", sb, handle); 59 + trace_ocfs2_get_dentry_begin(sb, handle, (unsigned long long)blkno); 60 60 61 61 if (blkno == 0) { 62 - mlog(0, "nfs wants inode with blkno: 0\n"); 63 62 result = ERR_PTR(-ESTALE); 64 63 goto bail; 65 64 } ··· 82 83 } 83 84 84 85 status = ocfs2_test_inode_bit(osb, blkno, &set); 86 + trace_ocfs2_get_dentry_test_bit(status, set); 85 87 if (status < 0) { 86 88 if (status == -EINVAL) { 87 89 /* ··· 90 90 * as an inode, we return -ESTALE to be 91 91 * nice 92 92 */ 93 - mlog(0, "test inode bit failed %d\n", status); 94 93 status = -ESTALE; 95 - } else { 94 + } else 96 95 mlog(ML_ERROR, "test inode bit failed %d\n", status); 97 - } 98 96 goto unlock_nfs_sync; 99 97 } 100 98 101 99 /* If the inode allocator bit is clear, this inode must be stale */ 102 100 if (!set) { 103 - mlog(0, "inode %llu suballoc bit is clear\n", 104 - (unsigned long long)blkno); 105 101 status = -ESTALE; 106 102 goto unlock_nfs_sync; 107 103 } ··· 110 114 check_err: 111 115 if (status < 0) { 112 116 if (status == -ESTALE) { 113 - mlog(0, "stale inode ino: %llu generation: %u\n", 114 - (unsigned long long)blkno, handle->ih_generation); 117 + trace_ocfs2_get_dentry_stale((unsigned long long)blkno, 118 + handle->ih_generation); 115 119 } 116 120 result = ERR_PTR(status); 117 121 goto bail; ··· 126 130 check_gen: 127 131 if (handle->ih_generation != inode->i_generation) { 128 132 iput(inode); 129 - mlog(0, "stale inode ino: %llu generation: %u\n", 130 - (unsigned long long)blkno, handle->ih_generation); 133 + trace_ocfs2_get_dentry_generation((unsigned long long)blkno, 134 + handle->ih_generation, 135 + inode->i_generation); 131 136 result = ERR_PTR(-ESTALE); 132 137 goto bail; 133 138 } ··· 138 141 mlog_errno(PTR_ERR(result)); 139 142 140 143 bail: 141 - mlog_exit_ptr(result); 144 + trace_ocfs2_get_dentry_end(result); 142 145 return result; 143 146 } 144 147 ··· 149 152 struct dentry *parent; 150 153 struct inode *dir = child->d_inode; 151 154 152 - mlog_entry("(0x%p, '%.*s')\n", child, 153 - child->d_name.len, child->d_name.name); 154 - 155 - mlog(0, "find parent of directory %llu\n", 156 - (unsigned long long)OCFS2_I(dir)->ip_blkno); 155 + trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name, 156 + (unsigned long long)OCFS2_I(dir)->ip_blkno); 157 157 158 158 status = ocfs2_inode_lock(dir, NULL, 0); 159 159 if (status < 0) { ··· 172 178 ocfs2_inode_unlock(dir, 0); 173 179 174 180 bail: 175 - mlog_exit_ptr(parent); 181 + trace_ocfs2_get_parent_end(parent); 176 182 177 183 return parent; 178 184 } ··· 187 193 u32 generation; 188 194 __le32 *fh = (__force __le32 *) fh_in; 189 195 190 - mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry, 191 - dentry->d_name.len, dentry->d_name.name, 192 - fh, len, connectable); 196 + trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len, 197 + dentry->d_name.name, 198 + fh, len, connectable); 193 199 194 200 if (connectable && (len < 6)) { 195 201 *max_len = 6; ··· 204 210 blkno = OCFS2_I(inode)->ip_blkno; 205 211 generation = inode->i_generation; 206 212 207 - mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", 208 - (unsigned long long)blkno, generation); 213 + trace_ocfs2_encode_fh_self((unsigned long long)blkno, generation); 209 214 210 215 len = 3; 211 216 fh[0] = cpu_to_le32((u32)(blkno >> 32)); ··· 229 236 len = 6; 230 237 type = 2; 231 238 232 - mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", 233 - (unsigned long long)blkno, generation); 239 + trace_ocfs2_encode_fh_parent((unsigned long long)blkno, 240 + generation); 234 241 } 235 242 236 243 *max_len = len; 237 244 238 245 bail: 239 - mlog_exit(type); 246 + trace_ocfs2_encode_fh_type(type); 240 247 return type; 241 248 } 242 249
+4 -6
fs/ocfs2/extent_map.c
··· 28 28 #include <linux/types.h> 29 29 #include <linux/fiemap.h> 30 30 31 - #define MLOG_MASK_PREFIX ML_EXTENT_MAP 32 31 #include <cluster/masklog.h> 33 32 34 33 #include "ocfs2.h" ··· 38 39 #include "inode.h" 39 40 #include "super.h" 40 41 #include "symlink.h" 42 + #include "ocfs2_trace.h" 41 43 42 44 #include "buffer_head_io.h" 43 45 ··· 841 841 u64 p_block, p_count; 842 842 int i, count, done = 0; 843 843 844 - mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, " 845 - "flags = %x, validate = %p)\n", 846 - inode, (unsigned long long)v_block, nr, bhs, flags, 847 - validate); 844 + trace_ocfs2_read_virt_blocks( 845 + inode, (unsigned long long)v_block, nr, bhs, flags, 846 + validate); 848 847 849 848 if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >= 850 849 i_size_read(inode)) { ··· 896 897 } 897 898 898 899 out: 899 - mlog_exit(rc); 900 900 return rc; 901 901 } 902 902
+95 -125
fs/ocfs2/file.c
··· 38 38 #include <linux/quotaops.h> 39 39 #include <linux/blkdev.h> 40 40 41 - #define MLOG_MASK_PREFIX ML_INODE 42 41 #include <cluster/masklog.h> 43 42 44 43 #include "ocfs2.h" ··· 60 61 #include "acl.h" 61 62 #include "quota.h" 62 63 #include "refcounttree.h" 64 + #include "ocfs2_trace.h" 63 65 64 66 #include "buffer_head_io.h" 65 67 ··· 99 99 int mode = file->f_flags; 100 100 struct ocfs2_inode_info *oi = OCFS2_I(inode); 101 101 102 - mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, 103 - file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); 102 + trace_ocfs2_file_open(inode, file, file->f_path.dentry, 103 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 104 + file->f_path.dentry->d_name.len, 105 + file->f_path.dentry->d_name.name, mode); 104 106 105 107 if (file->f_mode & FMODE_WRITE) 106 108 dquot_initialize(inode); ··· 137 135 } 138 136 139 137 leave: 140 - mlog_exit(status); 141 138 return status; 142 139 } 143 140 ··· 144 143 { 145 144 struct ocfs2_inode_info *oi = OCFS2_I(inode); 146 145 147 - mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, 148 - file->f_path.dentry->d_name.len, 149 - file->f_path.dentry->d_name.name); 150 - 151 146 spin_lock(&oi->ip_lock); 152 147 if (!--oi->ip_open_count) 153 148 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; 149 + 150 + trace_ocfs2_file_release(inode, file, file->f_path.dentry, 151 + oi->ip_blkno, 152 + file->f_path.dentry->d_name.len, 153 + file->f_path.dentry->d_name.name, 154 + oi->ip_open_count); 154 155 spin_unlock(&oi->ip_lock); 155 156 156 157 ocfs2_free_file_private(inode, file); 157 - 158 - mlog_exit(0); 159 158 160 159 return 0; 161 160 } ··· 178 177 struct inode *inode = file->f_mapping->host; 179 178 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 180 179 181 - mlog_entry("(0x%p, %d, 0x%p, '%.*s')\n", file, datasync, 182 - file->f_path.dentry, file->f_path.dentry->d_name.len, 183 - file->f_path.dentry->d_name.name); 180 + trace_ocfs2_sync_file(inode, file, file->f_path.dentry, 181 + OCFS2_I(inode)->ip_blkno, 182 + file->f_path.dentry->d_name.len, 183 + file->f_path.dentry->d_name.name, 184 + (unsigned long long)datasync); 184 185 185 186 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { 186 187 /* ··· 198 195 err = jbd2_journal_force_commit(journal); 199 196 200 197 bail: 201 - mlog_exit(err); 198 + if (err) 199 + mlog_errno(err); 202 200 203 201 return (err < 0) ? -EIO : 0; 204 202 } ··· 255 251 handle_t *handle; 256 252 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data; 257 253 258 - mlog_entry_void(); 259 - 260 254 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 261 255 if (IS_ERR(handle)) { 262 256 ret = PTR_ERR(handle); ··· 282 280 out_commit: 283 281 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); 284 282 out: 285 - mlog_exit(ret); 286 283 return ret; 287 284 } 288 285 ··· 292 291 { 293 292 int status; 294 293 295 - mlog_entry_void(); 296 294 i_size_write(inode, new_i_size); 297 295 inode->i_blocks = ocfs2_inode_sector_count(inode); 298 296 inode->i_ctime = inode->i_mtime = CURRENT_TIME; ··· 303 303 } 304 304 305 305 bail: 306 - mlog_exit(status); 307 306 return status; 308 307 } 309 308 ··· 374 375 struct ocfs2_dinode *di; 375 376 u64 cluster_bytes; 376 377 377 - mlog_entry_void(); 378 - 379 378 /* 380 379 * We need to CoW the cluster contains the offset if it is reflinked 381 380 * since we will call ocfs2_zero_range_for_truncate later which will ··· 426 429 out_commit: 427 430 ocfs2_commit_trans(osb, handle); 428 431 out: 429 - 430 - mlog_exit(status); 431 432 return status; 432 433 } 433 434 ··· 437 442 struct ocfs2_dinode *fe = NULL; 438 443 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 439 444 440 - mlog_entry("(inode = %llu, new_i_size = %llu\n", 441 - (unsigned long long)OCFS2_I(inode)->ip_blkno, 442 - (unsigned long long)new_i_size); 443 - 444 445 /* We trust di_bh because it comes from ocfs2_inode_lock(), which 445 446 * already validated it */ 446 447 fe = (struct ocfs2_dinode *) di_bh->b_data; 448 + 449 + trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno, 450 + (unsigned long long)le64_to_cpu(fe->i_size), 451 + (unsigned long long)new_i_size); 447 452 448 453 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), 449 454 "Inode %llu, inode i_size = %lld != di " ··· 454 459 le32_to_cpu(fe->i_flags)); 455 460 456 461 if (new_i_size > le64_to_cpu(fe->i_size)) { 457 - mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n", 458 - (unsigned long long)le64_to_cpu(fe->i_size), 459 - (unsigned long long)new_i_size); 462 + trace_ocfs2_truncate_file_error( 463 + (unsigned long long)le64_to_cpu(fe->i_size), 464 + (unsigned long long)new_i_size); 460 465 status = -EINVAL; 461 466 mlog_errno(status); 462 467 goto bail; 463 468 } 464 - 465 - mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n", 466 - (unsigned long long)le64_to_cpu(fe->i_blkno), 467 - (unsigned long long)le64_to_cpu(fe->i_size), 468 - (unsigned long long)new_i_size); 469 469 470 470 /* lets handle the simple truncate cases before doing any more 471 471 * cluster locking. */ ··· 515 525 if (!status && OCFS2_I(inode)->ip_clusters == 0) 516 526 status = ocfs2_try_remove_refcount_tree(inode, di_bh); 517 527 518 - mlog_exit(status); 519 528 return status; 520 529 } 521 530 ··· 567 578 struct ocfs2_extent_tree et; 568 579 int did_quota = 0; 569 580 570 - mlog_entry("(clusters_to_add = %u)\n", clusters_to_add); 571 - 572 581 /* 573 582 * This function only exists for file systems which don't 574 583 * support holes. ··· 583 596 restart_all: 584 597 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); 585 598 586 - mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, " 587 - "clusters_to_add = %u\n", 588 - (unsigned long long)OCFS2_I(inode)->ip_blkno, 589 - (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters), 590 - clusters_to_add); 591 599 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh); 592 600 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, 593 601 &data_ac, &meta_ac); ··· 602 620 } 603 621 604 622 restarted_transaction: 623 + trace_ocfs2_extend_allocation( 624 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 625 + (unsigned long long)i_size_read(inode), 626 + le32_to_cpu(fe->i_clusters), clusters_to_add, 627 + why, restart_func); 628 + 605 629 status = dquot_alloc_space_nodirty(inode, 606 630 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 607 631 if (status) ··· 654 666 655 667 if (why != RESTART_NONE && clusters_to_add) { 656 668 if (why == RESTART_META) { 657 - mlog(0, "restarting function.\n"); 658 669 restart_func = 1; 659 670 status = 0; 660 671 } else { 661 672 BUG_ON(why != RESTART_TRANS); 662 673 663 - mlog(0, "restarting transaction.\n"); 664 674 /* TODO: This can be more intelligent. */ 665 675 credits = ocfs2_calc_extend_credits(osb->sb, 666 676 &fe->id2.i_list, ··· 675 689 } 676 690 } 677 691 678 - mlog(0, "fe: i_clusters = %u, i_size=%llu\n", 692 + trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno, 679 693 le32_to_cpu(fe->i_clusters), 680 - (unsigned long long)le64_to_cpu(fe->i_size)); 681 - mlog(0, "inode: ip_clusters=%u, i_size=%lld\n", 682 - OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode)); 694 + (unsigned long long)le64_to_cpu(fe->i_size), 695 + OCFS2_I(inode)->ip_clusters, 696 + (unsigned long long)i_size_read(inode)); 683 697 684 698 leave: 685 699 if (status < 0 && did_quota) ··· 704 718 brelse(bh); 705 719 bh = NULL; 706 720 707 - mlog_exit(status); 708 721 return status; 709 722 } 710 723 ··· 770 785 if (!zero_to) 771 786 zero_to = PAGE_CACHE_SIZE; 772 787 773 - mlog(0, 774 - "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n", 775 - (unsigned long long)abs_from, (unsigned long long)abs_to, 776 - index, zero_from, zero_to); 788 + trace_ocfs2_write_zero_page( 789 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 790 + (unsigned long long)abs_from, 791 + (unsigned long long)abs_to, 792 + index, zero_from, zero_to); 777 793 778 794 /* We know that zero_from is block aligned */ 779 795 for (block_start = zero_from; block_start < zero_to; ··· 914 928 u64 next_pos; 915 929 u64 zero_pos = range_start; 916 930 917 - mlog(0, "range_start = %llu, range_end = %llu\n", 918 - (unsigned long long)range_start, 919 - (unsigned long long)range_end); 931 + trace_ocfs2_zero_extend_range( 932 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 933 + (unsigned long long)range_start, 934 + (unsigned long long)range_end); 920 935 BUG_ON(range_start >= range_end); 921 936 922 937 while (zero_pos < range_end) { ··· 949 962 struct super_block *sb = inode->i_sb; 950 963 951 964 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); 952 - mlog(0, "zero_start %llu for i_size %llu\n", 953 - (unsigned long long)zero_start, 954 - (unsigned long long)i_size_read(inode)); 965 + trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno, 966 + (unsigned long long)zero_start, 967 + (unsigned long long)i_size_read(inode)); 955 968 while (zero_start < zero_to_size) { 956 969 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start, 957 970 zero_to_size, ··· 1100 1113 struct dquot *transfer_to[MAXQUOTAS] = { }; 1101 1114 int qtype; 1102 1115 1103 - mlog_entry("(0x%p, '%.*s')\n", dentry, 1104 - dentry->d_name.len, dentry->d_name.name); 1116 + trace_ocfs2_setattr(inode, dentry, 1117 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 1118 + dentry->d_name.len, dentry->d_name.name, 1119 + attr->ia_valid, attr->ia_mode, 1120 + attr->ia_uid, attr->ia_gid); 1105 1121 1106 1122 /* ensuring we don't even attempt to truncate a symlink */ 1107 1123 if (S_ISLNK(inode->i_mode)) 1108 1124 attr->ia_valid &= ~ATTR_SIZE; 1109 1125 1110 - if (attr->ia_valid & ATTR_MODE) 1111 - mlog(0, "mode change: %d\n", attr->ia_mode); 1112 - if (attr->ia_valid & ATTR_UID) 1113 - mlog(0, "uid change: %d\n", attr->ia_uid); 1114 - if (attr->ia_valid & ATTR_GID) 1115 - mlog(0, "gid change: %d\n", attr->ia_gid); 1116 - if (attr->ia_valid & ATTR_SIZE) 1117 - mlog(0, "size change...\n"); 1118 - if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME)) 1119 - mlog(0, "time change...\n"); 1120 - 1121 1126 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ 1122 1127 | ATTR_GID | ATTR_UID | ATTR_MODE) 1123 - if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) { 1124 - mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid); 1128 + if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) 1125 1129 return 0; 1126 - } 1127 1130 1128 1131 status = inode_change_ok(inode, attr); 1129 1132 if (status) ··· 1251 1274 mlog_errno(status); 1252 1275 } 1253 1276 1254 - mlog_exit(status); 1255 1277 return status; 1256 1278 } 1257 1279 ··· 1262 1286 struct super_block *sb = dentry->d_inode->i_sb; 1263 1287 struct ocfs2_super *osb = sb->s_fs_info; 1264 1288 int err; 1265 - 1266 - mlog_entry_void(); 1267 1289 1268 1290 err = ocfs2_inode_revalidate(dentry); 1269 1291 if (err) { ··· 1276 1302 stat->blksize = osb->s_clustersize; 1277 1303 1278 1304 bail: 1279 - mlog_exit(err); 1280 - 1281 1305 return err; 1282 1306 } 1283 1307 ··· 1285 1313 1286 1314 if (flags & IPERM_FLAG_RCU) 1287 1315 return -ECHILD; 1288 - 1289 - mlog_entry_void(); 1290 1316 1291 1317 ret = ocfs2_inode_lock(inode, NULL, 0); 1292 1318 if (ret) { ··· 1297 1327 1298 1328 ocfs2_inode_unlock(inode, 0); 1299 1329 out: 1300 - mlog_exit(ret); 1301 1330 return ret; 1302 1331 } 1303 1332 ··· 1308 1339 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1309 1340 struct ocfs2_dinode *di; 1310 1341 1311 - mlog_entry("(Inode %llu, mode 0%o)\n", 1312 - (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode); 1342 + trace_ocfs2_write_remove_suid( 1343 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 1344 + inode->i_mode); 1313 1345 1314 1346 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 1315 1347 if (IS_ERR(handle)) { ··· 1338 1368 out_trans: 1339 1369 ocfs2_commit_trans(osb, handle); 1340 1370 out: 1341 - mlog_exit(ret); 1342 1371 return ret; 1343 1372 } 1344 1373 ··· 1516 1547 * partial clusters here. There's no need to worry about 1517 1548 * physical allocation - the zeroing code knows to skip holes. 1518 1549 */ 1519 - mlog(0, "byte start: %llu, end: %llu\n", 1520 - (unsigned long long)start, (unsigned long long)end); 1550 + trace_ocfs2_zero_partial_clusters( 1551 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 1552 + (unsigned long long)start, (unsigned long long)end); 1521 1553 1522 1554 /* 1523 1555 * If both edges are on a cluster boundary then there's no ··· 1542 1572 if (tmpend > end) 1543 1573 tmpend = end; 1544 1574 1545 - mlog(0, "1st range: start: %llu, tmpend: %llu\n", 1546 - (unsigned long long)start, (unsigned long long)tmpend); 1575 + trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, 1576 + (unsigned long long)tmpend); 1547 1577 1548 1578 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); 1549 1579 if (ret) ··· 1557 1587 */ 1558 1588 start = end & ~(osb->s_clustersize - 1); 1559 1589 1560 - mlog(0, "2nd range: start: %llu, end: %llu\n", 1561 - (unsigned long long)start, (unsigned long long)end); 1590 + trace_ocfs2_zero_partial_clusters_range2( 1591 + (unsigned long long)start, (unsigned long long)end); 1562 1592 1563 1593 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); 1564 1594 if (ret) ··· 1658 1688 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); 1659 1689 ocfs2_init_dealloc_ctxt(&dealloc); 1660 1690 1691 + trace_ocfs2_remove_inode_range( 1692 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 1693 + (unsigned long long)byte_start, 1694 + (unsigned long long)byte_len); 1695 + 1661 1696 if (byte_len == 0) 1662 1697 return 0; 1663 1698 ··· 1708 1733 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start); 1709 1734 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits; 1710 1735 cluster_in_el = trunc_end; 1711 - 1712 - mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, cend: %u\n", 1713 - (unsigned long long)OCFS2_I(inode)->ip_blkno, 1714 - (unsigned long long)byte_start, 1715 - (unsigned long long)byte_len, trunc_start, trunc_end); 1716 1736 1717 1737 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len); 1718 1738 if (ret) { ··· 2063 2093 int ret = 0, meta_level = 0; 2064 2094 struct dentry *dentry = file->f_path.dentry; 2065 2095 struct inode *inode = dentry->d_inode; 2066 - loff_t saved_pos, end; 2096 + loff_t saved_pos = 0, end; 2067 2097 2068 2098 /* 2069 2099 * We start with a read level meta lock and only jump to an ex ··· 2102 2132 2103 2133 /* work on a copy of ppos until we're sure that we won't have 2104 2134 * to recalculate it due to relocking. */ 2105 - if (appending) { 2135 + if (appending) 2106 2136 saved_pos = i_size_read(inode); 2107 - mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos); 2108 - } else { 2137 + else 2109 2138 saved_pos = *ppos; 2110 - } 2111 2139 2112 2140 end = saved_pos + count; 2113 2141 ··· 2176 2208 *ppos = saved_pos; 2177 2209 2178 2210 out_unlock: 2211 + trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, 2212 + saved_pos, appending, count, 2213 + direct_io, has_refcount); 2214 + 2179 2215 if (meta_level >= 0) 2180 2216 ocfs2_inode_unlock(inode, meta_level); 2181 2217 ··· 2205 2233 int full_coherency = !(osb->s_mount_opt & 2206 2234 OCFS2_MOUNT_COHERENCY_BUFFERED); 2207 2235 2208 - mlog_entry("(0x%p, %u, '%.*s')\n", file, 2209 - (unsigned int)nr_segs, 2210 - file->f_path.dentry->d_name.len, 2211 - file->f_path.dentry->d_name.name); 2236 + trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry, 2237 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 2238 + file->f_path.dentry->d_name.len, 2239 + file->f_path.dentry->d_name.name, 2240 + (unsigned int)nr_segs); 2212 2241 2213 2242 if (iocb->ki_left == 0) 2214 2243 return 0; ··· 2375 2402 2376 2403 if (written) 2377 2404 ret = written; 2378 - mlog_exit(ret); 2379 2405 return ret; 2380 2406 } 2381 2407 ··· 2410 2438 .u.file = out, 2411 2439 }; 2412 2440 2413 - mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe, 2414 - (unsigned int)len, 2415 - out->f_path.dentry->d_name.len, 2416 - out->f_path.dentry->d_name.name); 2441 + 2442 + trace_ocfs2_file_splice_write(inode, out, out->f_path.dentry, 2443 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 2444 + out->f_path.dentry->d_name.len, 2445 + out->f_path.dentry->d_name.name, len); 2417 2446 2418 2447 if (pipe->inode) 2419 2448 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT); ··· 2458 2485 balance_dirty_pages_ratelimited_nr(mapping, nr_pages); 2459 2486 } 2460 2487 2461 - mlog_exit(ret); 2462 2488 return ret; 2463 2489 } 2464 2490 ··· 2470 2498 int ret = 0, lock_level = 0; 2471 2499 struct inode *inode = in->f_path.dentry->d_inode; 2472 2500 2473 - mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe, 2474 - (unsigned int)len, 2475 - in->f_path.dentry->d_name.len, 2476 - in->f_path.dentry->d_name.name); 2501 + trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry, 2502 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 2503 + in->f_path.dentry->d_name.len, 2504 + in->f_path.dentry->d_name.name, len); 2477 2505 2478 2506 /* 2479 2507 * See the comment in ocfs2_file_aio_read() ··· 2488 2516 ret = generic_file_splice_read(in, ppos, pipe, len, flags); 2489 2517 2490 2518 bail: 2491 - mlog_exit(ret); 2492 2519 return ret; 2493 2520 } 2494 2521 ··· 2500 2529 struct file *filp = iocb->ki_filp; 2501 2530 struct inode *inode = filp->f_path.dentry->d_inode; 2502 2531 2503 - mlog_entry("(0x%p, %u, '%.*s')\n", filp, 2504 - (unsigned int)nr_segs, 2505 - filp->f_path.dentry->d_name.len, 2506 - filp->f_path.dentry->d_name.name); 2532 + trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry, 2533 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 2534 + filp->f_path.dentry->d_name.len, 2535 + filp->f_path.dentry->d_name.name, nr_segs); 2536 + 2507 2537 2508 2538 if (!inode) { 2509 2539 ret = -EINVAL; ··· 2550 2578 ocfs2_inode_unlock(inode, lock_level); 2551 2579 2552 2580 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); 2553 - if (ret == -EINVAL) 2554 - mlog(0, "generic_file_aio_read returned -EINVAL\n"); 2581 + trace_generic_file_aio_read_ret(ret); 2555 2582 2556 2583 /* buffered aio wouldn't have proper lock coverage today */ 2557 2584 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); ··· 2568 2597 } 2569 2598 if (rw_level != -1) 2570 2599 ocfs2_rw_unlock(inode, rw_level); 2571 - mlog_exit(ret); 2572 2600 2573 2601 return ret; 2574 2602 }
+2 -2
fs/ocfs2/heartbeat.c
··· 28 28 #include <linux/types.h> 29 29 #include <linux/highmem.h> 30 30 31 - #define MLOG_MASK_PREFIX ML_SUPER 32 31 #include <cluster/masklog.h> 33 32 34 33 #include "ocfs2.h" ··· 36 37 #include "heartbeat.h" 37 38 #include "inode.h" 38 39 #include "journal.h" 40 + #include "ocfs2_trace.h" 39 41 40 42 #include "buffer_head_io.h" 41 43 ··· 66 66 67 67 BUG_ON(osb->node_num == node_num); 68 68 69 - mlog(0, "ocfs2: node down event for %d\n", node_num); 69 + trace_ocfs2_do_node_down(node_num); 70 70 71 71 if (!osb->cconn) { 72 72 /*
+51 -83
fs/ocfs2/inode.c
··· 31 31 32 32 #include <asm/byteorder.h> 33 33 34 - #define MLOG_MASK_PREFIX ML_INODE 35 34 #include <cluster/masklog.h> 36 35 37 36 #include "ocfs2.h" ··· 52 53 #include "uptodate.h" 53 54 #include "xattr.h" 54 55 #include "refcounttree.h" 56 + #include "ocfs2_trace.h" 55 57 56 58 #include "buffer_head_io.h" 57 59 ··· 131 131 struct super_block *sb = osb->sb; 132 132 struct ocfs2_find_inode_args args; 133 133 134 - mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno); 134 + trace_ocfs2_iget_begin((unsigned long long)blkno, flags, 135 + sysfile_type); 135 136 136 137 /* Ok. By now we've either got the offsets passed to us by the 137 138 * caller, or we just pulled them off the bh. Lets do some ··· 153 152 /* inode was *not* in the inode cache. 2.6.x requires 154 153 * us to do our own read_inode call and unlock it 155 154 * afterwards. */ 156 - if (inode && inode->i_state & I_NEW) { 157 - mlog(0, "Inode was not in inode cache, reading it.\n"); 158 - ocfs2_read_locked_inode(inode, &args); 159 - unlock_new_inode(inode); 160 - } 161 155 if (inode == NULL) { 162 156 inode = ERR_PTR(-ENOMEM); 163 157 mlog_errno(PTR_ERR(inode)); 164 158 goto bail; 159 + } 160 + trace_ocfs2_iget5_locked(inode->i_state); 161 + if (inode->i_state & I_NEW) { 162 + ocfs2_read_locked_inode(inode, &args); 163 + unlock_new_inode(inode); 165 164 } 166 165 if (is_bad_inode(inode)) { 167 166 iput(inode); ··· 171 170 172 171 bail: 173 172 if (!IS_ERR(inode)) { 174 - mlog(0, "returning inode with number %llu\n", 175 - (unsigned long long)OCFS2_I(inode)->ip_blkno); 176 - mlog_exit_ptr(inode); 173 + trace_ocfs2_iget_end(inode, 174 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 177 175 } 178 176 179 177 return inode; ··· 192 192 struct ocfs2_inode_info *oi = OCFS2_I(inode); 193 193 int ret = 0; 194 194 195 - mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque); 196 - 197 195 args = opaque; 198 196 199 197 mlog_bug_on_msg(!inode, "No inode in find actor!\n"); 198 + 199 + trace_ocfs2_find_actor(inode, inode->i_ino, opaque, args->fi_blkno); 200 200 201 201 if (oi->ip_blkno != args->fi_blkno) 202 202 goto bail; 203 203 204 204 ret = 1; 205 205 bail: 206 - mlog_exit(ret); 207 206 return ret; 208 207 } 209 208 ··· 216 217 struct ocfs2_find_inode_args *args = opaque; 217 218 static struct lock_class_key ocfs2_quota_ip_alloc_sem_key, 218 219 ocfs2_file_ip_alloc_sem_key; 219 - 220 - mlog_entry("inode = %p, opaque = %p\n", inode, opaque); 221 220 222 221 inode->i_ino = args->fi_ino; 223 222 OCFS2_I(inode)->ip_blkno = args->fi_blkno; ··· 232 235 lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem, 233 236 &ocfs2_file_ip_alloc_sem_key); 234 237 235 - mlog_exit(0); 236 238 return 0; 237 239 } 238 240 ··· 241 245 struct super_block *sb; 242 246 struct ocfs2_super *osb; 243 247 int use_plocks = 1; 244 - 245 - mlog_entry("(0x%p, size:%llu)\n", inode, 246 - (unsigned long long)le64_to_cpu(fe->i_size)); 247 248 248 249 sb = inode->i_sb; 249 250 osb = OCFS2_SB(sb); ··· 293 300 294 301 inode->i_nlink = ocfs2_read_links_count(fe); 295 302 303 + trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno, 304 + le32_to_cpu(fe->i_flags)); 296 305 if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) { 297 306 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; 298 307 inode->i_flags |= S_NOQUOTA; 299 308 } 300 - 309 + 301 310 if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) { 302 311 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; 303 - mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino); 304 312 } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) { 305 313 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; 306 314 } else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) { 307 315 inode->i_flags |= S_NOQUOTA; 308 316 } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) { 309 - mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino); 310 317 /* we can't actually hit this as read_inode can't 311 318 * handle superblocks today ;-) */ 312 319 BUG(); ··· 374 381 if (S_ISDIR(inode->i_mode)) 375 382 ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv, 376 383 OCFS2_RESV_FLAG_DIR); 377 - mlog_exit_void(); 378 384 } 379 385 380 386 static int ocfs2_read_locked_inode(struct inode *inode, ··· 385 393 struct buffer_head *bh = NULL; 386 394 int status, can_lock; 387 395 u32 generation = 0; 388 - 389 - mlog_entry("(0x%p, 0x%p)\n", inode, args); 390 396 391 397 status = -EINVAL; 392 398 if (inode == NULL || inode->i_sb == NULL) { ··· 432 442 can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE) 433 443 && !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) 434 444 && !ocfs2_mount_local(osb); 445 + 446 + trace_ocfs2_read_locked_inode( 447 + (unsigned long long)OCFS2_I(inode)->ip_blkno, can_lock); 435 448 436 449 /* 437 450 * To maintain backwards compatibility with older versions of ··· 527 534 if (args && bh) 528 535 brelse(bh); 529 536 530 - mlog_exit(status); 531 537 return status; 532 538 } 533 539 ··· 542 550 int status = 0; 543 551 struct ocfs2_dinode *fe; 544 552 handle_t *handle = NULL; 545 - 546 - mlog_entry_void(); 547 553 548 554 fe = (struct ocfs2_dinode *) fe_bh->b_data; 549 555 ··· 590 600 out: 591 601 if (handle) 592 602 ocfs2_commit_trans(osb, handle); 593 - mlog_exit(status); 594 603 return status; 595 604 } 596 605 ··· 685 696 686 697 spin_lock(&osb->osb_lock); 687 698 if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) { 688 - mlog(0, "Recovery is happening on orphan dir %d, will skip " 689 - "this inode\n", slot); 690 699 ret = -EDEADLK; 691 700 goto out; 692 701 } ··· 693 706 osb->osb_orphan_wipes[slot]++; 694 707 out: 695 708 spin_unlock(&osb->osb_lock); 709 + trace_ocfs2_check_orphan_recovery_state(slot, ret); 696 710 return ret; 697 711 } 698 712 ··· 804 816 struct ocfs2_inode_info *oi = OCFS2_I(inode); 805 817 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 806 818 819 + trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task, 820 + (unsigned long long)oi->ip_blkno, 821 + oi->ip_flags); 822 + 807 823 /* We shouldn't be getting here for the root directory 808 824 * inode.. */ 809 825 if (inode == osb->root_inode) { ··· 820 828 * have to skip deleting this guy. That's OK though because 821 829 * the node who's doing the actual deleting should handle it 822 830 * anyway. */ 823 - if (current == osb->dc_task) { 824 - mlog(0, "Skipping delete of %lu because we're currently " 825 - "in downconvert\n", inode->i_ino); 831 + if (current == osb->dc_task) 826 832 goto bail; 827 - } 828 833 829 834 spin_lock(&oi->ip_lock); 830 835 /* OCFS2 *never* deletes system files. This should technically ··· 836 847 /* If we have allowd wipe of this inode for another node, it 837 848 * will be marked here so we can safely skip it. Recovery will 838 849 * cleanup any inodes we might inadvertantly skip here. */ 839 - if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) { 840 - mlog(0, "Skipping delete of %lu because another node " 841 - "has done this for us.\n", inode->i_ino); 850 + if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) 842 851 goto bail_unlock; 843 - } 844 852 845 853 ret = 1; 846 854 bail_unlock: ··· 854 868 struct buffer_head *di_bh, 855 869 int *wipe) 856 870 { 857 - int status = 0; 871 + int status = 0, reason = 0; 858 872 struct ocfs2_inode_info *oi = OCFS2_I(inode); 859 873 struct ocfs2_dinode *di; 860 874 861 875 *wipe = 0; 862 876 877 + trace_ocfs2_query_inode_wipe_begin((unsigned long long)oi->ip_blkno, 878 + inode->i_nlink); 879 + 863 880 /* While we were waiting for the cluster lock in 864 881 * ocfs2_delete_inode, another node might have asked to delete 865 882 * the inode. Recheck our flags to catch this. */ 866 883 if (!ocfs2_inode_is_valid_to_delete(inode)) { 867 - mlog(0, "Skipping delete of %llu because flags changed\n", 868 - (unsigned long long)oi->ip_blkno); 884 + reason = 1; 869 885 goto bail; 870 886 } 871 887 872 888 /* Now that we have an up to date inode, we can double check 873 889 * the link count. */ 874 - if (inode->i_nlink) { 875 - mlog(0, "Skipping delete of %llu because nlink = %u\n", 876 - (unsigned long long)oi->ip_blkno, inode->i_nlink); 890 + if (inode->i_nlink) 877 891 goto bail; 878 - } 879 892 880 893 /* Do some basic inode verification... */ 881 894 di = (struct ocfs2_dinode *) di_bh->b_data; ··· 889 904 * ORPHANED_FL not. 890 905 */ 891 906 if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) { 892 - mlog(0, "Reflinked inode %llu is no longer orphaned. " 893 - "it shouldn't be deleted\n", 894 - (unsigned long long)oi->ip_blkno); 907 + reason = 2; 895 908 goto bail; 896 909 } 897 910 ··· 926 943 status = ocfs2_try_open_lock(inode, 1); 927 944 if (status == -EAGAIN) { 928 945 status = 0; 929 - mlog(0, "Skipping delete of %llu because it is in use on " 930 - "other nodes\n", (unsigned long long)oi->ip_blkno); 946 + reason = 3; 931 947 goto bail; 932 948 } 933 949 if (status < 0) { ··· 935 953 } 936 954 937 955 *wipe = 1; 938 - mlog(0, "Inode %llu is ok to wipe from orphan dir %u\n", 939 - (unsigned long long)oi->ip_blkno, 940 - le16_to_cpu(di->i_orphaned_slot)); 956 + trace_ocfs2_query_inode_wipe_succ(le16_to_cpu(di->i_orphaned_slot)); 941 957 942 958 bail: 959 + trace_ocfs2_query_inode_wipe_end(status, reason); 943 960 return status; 944 961 } 945 962 ··· 948 967 static void ocfs2_cleanup_delete_inode(struct inode *inode, 949 968 int sync_data) 950 969 { 951 - mlog(0, "Cleanup inode %llu, sync = %d\n", 952 - (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); 970 + trace_ocfs2_cleanup_delete_inode( 971 + (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); 953 972 if (sync_data) 954 973 write_inode_now(inode, 1); 955 974 truncate_inode_pages(&inode->i_data, 0); ··· 961 980 sigset_t oldset; 962 981 struct buffer_head *di_bh = NULL; 963 982 964 - mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); 983 + trace_ocfs2_delete_inode(inode->i_ino, 984 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 985 + is_bad_inode(inode)); 965 986 966 987 /* When we fail in read_inode() we mark inode as bad. The second test 967 988 * catches the case when inode allocation fails before allocating 968 989 * a block for inode. */ 969 - if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) { 970 - mlog(0, "Skipping delete of bad inode\n"); 990 + if (is_bad_inode(inode) || !OCFS2_I(inode)->ip_blkno) 971 991 goto bail; 972 - } 973 992 974 993 dquot_initialize(inode); 975 994 ··· 1061 1080 bail_unblock: 1062 1081 ocfs2_unblock_signals(&oldset); 1063 1082 bail: 1064 - mlog_exit_void(); 1083 + return; 1065 1084 } 1066 1085 1067 1086 static void ocfs2_clear_inode(struct inode *inode) ··· 1069 1088 int status; 1070 1089 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1071 1090 1072 - mlog_entry_void(); 1073 - 1074 1091 end_writeback(inode); 1075 - mlog(0, "Clearing inode: %llu, nlink = %u\n", 1076 - (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink); 1092 + trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno, 1093 + inode->i_nlink); 1077 1094 1078 1095 mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, 1079 1096 "Inode=%lu\n", inode->i_ino); ··· 1160 1181 */ 1161 1182 jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal, 1162 1183 &oi->ip_jinode); 1163 - 1164 - mlog_exit_void(); 1165 1184 } 1166 1185 1167 1186 void ocfs2_evict_inode(struct inode *inode) ··· 1181 1204 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1182 1205 int res; 1183 1206 1184 - mlog_entry_void(); 1185 - 1186 - mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n", 1187 - (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags); 1207 + trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno, 1208 + inode->i_nlink, oi->ip_flags); 1188 1209 1189 1210 if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) 1190 1211 res = 1; 1191 1212 else 1192 1213 res = generic_drop_inode(inode); 1193 1214 1194 - mlog_exit_void(); 1195 1215 return res; 1196 1216 } 1197 1217 ··· 1200 1226 struct inode *inode = dentry->d_inode; 1201 1227 int status = 0; 1202 1228 1203 - mlog_entry("(inode = 0x%p, ino = %llu)\n", inode, 1204 - inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL); 1229 + trace_ocfs2_inode_revalidate(inode, 1230 + inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL, 1231 + inode ? (unsigned long long)OCFS2_I(inode)->ip_flags : 0); 1205 1232 1206 1233 if (!inode) { 1207 - mlog(0, "eep, no inode!\n"); 1208 1234 status = -ENOENT; 1209 1235 goto bail; 1210 1236 } ··· 1212 1238 spin_lock(&OCFS2_I(inode)->ip_lock); 1213 1239 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 1214 1240 spin_unlock(&OCFS2_I(inode)->ip_lock); 1215 - mlog(0, "inode deleted!\n"); 1216 1241 status = -ENOENT; 1217 1242 goto bail; 1218 1243 } ··· 1227 1254 } 1228 1255 ocfs2_inode_unlock(inode, 0); 1229 1256 bail: 1230 - mlog_exit(status); 1231 - 1232 1257 return status; 1233 1258 } 1234 1259 ··· 1242 1271 int status; 1243 1272 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; 1244 1273 1245 - mlog_entry("(inode %llu)\n", 1246 - (unsigned long long)OCFS2_I(inode)->ip_blkno); 1274 + trace_ocfs2_mark_inode_dirty((unsigned long long)OCFS2_I(inode)->ip_blkno); 1247 1275 1248 1276 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, 1249 1277 OCFS2_JOURNAL_ACCESS_WRITE); ··· 1272 1302 1273 1303 ocfs2_journal_dirty(handle, bh); 1274 1304 leave: 1275 - mlog_exit(status); 1276 1305 return status; 1277 1306 } 1278 1307 ··· 1314 1345 int rc; 1315 1346 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; 1316 1347 1317 - mlog(0, "Validating dinode %llu\n", 1318 - (unsigned long long)bh->b_blocknr); 1348 + trace_ocfs2_validate_inode_block((unsigned long long)bh->b_blocknr); 1319 1349 1320 1350 BUG_ON(!buffer_uptodate(bh)); 1321 1351
+30 -11
fs/ocfs2/ioctl.c
··· 9 9 #include <linux/mount.h> 10 10 #include <linux/compat.h> 11 11 12 - #define MLOG_MASK_PREFIX ML_INODE 13 12 #include <cluster/masklog.h> 14 13 15 14 #include "ocfs2.h" ··· 45 46 #define o2info_set_request_error(a, b) \ 46 47 __o2info_set_request_error((struct ocfs2_info_request *)&(a), b) 47 48 49 + static inline void __o2info_set_request_filled(struct ocfs2_info_request *req) 50 + { 51 + req->ir_flags |= OCFS2_INFO_FL_FILLED; 52 + } 53 + 54 + #define o2info_set_request_filled(a) \ 55 + __o2info_set_request_filled((struct ocfs2_info_request *)&(a)) 56 + 57 + static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req) 58 + { 59 + req->ir_flags &= ~OCFS2_INFO_FL_FILLED; 60 + } 61 + 62 + #define o2info_clear_request_filled(a) \ 63 + __o2info_clear_request_filled((struct ocfs2_info_request *)&(a)) 64 + 48 65 static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) 49 66 { 50 67 int status; ··· 74 59 *flags = OCFS2_I(inode)->ip_attr; 75 60 ocfs2_inode_unlock(inode, 0); 76 61 77 - mlog_exit(status); 78 62 return status; 79 63 } 80 64 ··· 139 125 140 126 brelse(bh); 141 127 142 - mlog_exit(status); 143 128 return status; 144 129 } 145 130 ··· 152 139 goto bail; 153 140 154 141 oib.ib_blocksize = inode->i_sb->s_blocksize; 155 - oib.ib_req.ir_flags |= OCFS2_INFO_FL_FILLED; 142 + 143 + o2info_set_request_filled(oib); 156 144 157 145 if (o2info_to_user(oib, req)) 158 146 goto bail; ··· 177 163 goto bail; 178 164 179 165 oic.ic_clustersize = osb->s_clustersize; 180 - oic.ic_req.ir_flags |= OCFS2_INFO_FL_FILLED; 166 + 167 + o2info_set_request_filled(oic); 181 168 182 169 if (o2info_to_user(oic, req)) 183 170 goto bail; ··· 202 187 goto bail; 203 188 204 189 oim.im_max_slots = osb->max_slots; 205 - oim.im_req.ir_flags |= OCFS2_INFO_FL_FILLED; 190 + 191 + o2info_set_request_filled(oim); 206 192 207 193 if (o2info_to_user(oim, req)) 208 194 goto bail; ··· 227 211 goto bail; 228 212 229 213 memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); 230 - oil.il_req.ir_flags |= OCFS2_INFO_FL_FILLED; 214 + 215 + o2info_set_request_filled(oil); 231 216 232 217 if (o2info_to_user(oil, req)) 233 218 goto bail; ··· 252 235 goto bail; 253 236 254 237 memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); 255 - oiu.iu_req.ir_flags |= OCFS2_INFO_FL_FILLED; 238 + 239 + o2info_set_request_filled(oiu); 256 240 257 241 if (o2info_to_user(oiu, req)) 258 242 goto bail; ··· 279 261 oif.if_compat_features = osb->s_feature_compat; 280 262 oif.if_incompat_features = osb->s_feature_incompat; 281 263 oif.if_ro_compat_features = osb->s_feature_ro_compat; 282 - oif.if_req.ir_flags |= OCFS2_INFO_FL_FILLED; 264 + 265 + o2info_set_request_filled(oif); 283 266 284 267 if (o2info_to_user(oif, req)) 285 268 goto bail; ··· 305 286 306 287 oij.ij_journal_size = osb->journal->j_inode->i_size; 307 288 308 - oij.ij_req.ir_flags |= OCFS2_INFO_FL_FILLED; 289 + o2info_set_request_filled(oij); 309 290 310 291 if (o2info_to_user(oij, req)) 311 292 goto bail; ··· 327 308 if (o2info_from_user(oir, req)) 328 309 goto bail; 329 310 330 - oir.ir_flags &= ~OCFS2_INFO_FL_FILLED; 311 + o2info_clear_request_filled(oir); 331 312 332 313 if (o2info_to_user(oir, req)) 333 314 goto bail;
+54 -114
fs/ocfs2/journal.c
··· 31 31 #include <linux/time.h> 32 32 #include <linux/random.h> 33 33 34 - #define MLOG_MASK_PREFIX ML_JOURNAL 35 34 #include <cluster/masklog.h> 36 35 37 36 #include "ocfs2.h" ··· 51 52 #include "quota.h" 52 53 53 54 #include "buffer_head_io.h" 55 + #include "ocfs2_trace.h" 54 56 55 57 DEFINE_SPINLOCK(trans_inc_lock); 56 58 ··· 303 303 unsigned int flushed; 304 304 struct ocfs2_journal *journal = NULL; 305 305 306 - mlog_entry_void(); 307 - 308 306 journal = osb->journal; 309 307 310 308 /* Flush all pending commits and checkpoint the journal. */ 311 309 down_write(&journal->j_trans_barrier); 312 310 313 - if (atomic_read(&journal->j_num_trans) == 0) { 311 + flushed = atomic_read(&journal->j_num_trans); 312 + trace_ocfs2_commit_cache_begin(flushed); 313 + if (flushed == 0) { 314 314 up_write(&journal->j_trans_barrier); 315 - mlog(0, "No transactions for me to flush!\n"); 316 315 goto finally; 317 316 } 318 317 ··· 330 331 atomic_set(&journal->j_num_trans, 0); 331 332 up_write(&journal->j_trans_barrier); 332 333 333 - mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", 334 - journal->j_trans_id, flushed); 334 + trace_ocfs2_commit_cache_end(journal->j_trans_id, flushed); 335 335 336 336 ocfs2_wake_downconvert_thread(osb); 337 337 wake_up(&journal->j_checkpointed); 338 338 finally: 339 - mlog_exit(status); 340 339 return status; 341 340 } 342 341 ··· 422 425 return 0; 423 426 424 427 old_nblocks = handle->h_buffer_credits; 425 - mlog_entry_void(); 426 428 427 - mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); 429 + trace_ocfs2_extend_trans(old_nblocks, nblocks); 428 430 429 431 #ifdef CONFIG_OCFS2_DEBUG_FS 430 432 status = 1; ··· 436 440 #endif 437 441 438 442 if (status > 0) { 439 - mlog(0, 440 - "jbd2_journal_extend failed, trying " 441 - "jbd2_journal_restart\n"); 443 + trace_ocfs2_extend_trans_restart(old_nblocks + nblocks); 442 444 status = jbd2_journal_restart(handle, 443 445 old_nblocks + nblocks); 444 446 if (status < 0) { ··· 447 453 448 454 status = 0; 449 455 bail: 450 - 451 - mlog_exit(status); 452 456 return status; 453 457 } 454 458 ··· 614 622 BUG_ON(!handle); 615 623 BUG_ON(!bh); 616 624 617 - mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", 618 - (unsigned long long)bh->b_blocknr, type, 619 - (type == OCFS2_JOURNAL_ACCESS_CREATE) ? 620 - "OCFS2_JOURNAL_ACCESS_CREATE" : 621 - "OCFS2_JOURNAL_ACCESS_WRITE", 622 - bh->b_size); 625 + trace_ocfs2_journal_access( 626 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 627 + (unsigned long long)bh->b_blocknr, type, bh->b_size); 623 628 624 629 /* we can safely remove this assertion after testing. */ 625 630 if (!buffer_uptodate(bh)) { ··· 657 668 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", 658 669 status, type); 659 670 660 - mlog_exit(status); 661 671 return status; 662 672 } 663 673 ··· 725 737 { 726 738 int status; 727 739 728 - mlog_entry("(bh->b_blocknr=%llu)\n", 729 - (unsigned long long)bh->b_blocknr); 740 + trace_ocfs2_journal_dirty((unsigned long long)bh->b_blocknr); 730 741 731 742 status = jbd2_journal_dirty_metadata(handle, bh); 732 743 BUG_ON(status); 733 - 734 - mlog_exit_void(); 735 744 } 736 745 737 746 #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) ··· 759 774 struct buffer_head *bh = NULL; 760 775 struct ocfs2_super *osb; 761 776 int inode_lock = 0; 762 - 763 - mlog_entry_void(); 764 777 765 778 BUG_ON(!journal); 766 779 ··· 803 820 goto done; 804 821 } 805 822 806 - mlog(0, "inode->i_size = %lld\n", inode->i_size); 807 - mlog(0, "inode->i_blocks = %llu\n", 808 - (unsigned long long)inode->i_blocks); 809 - mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); 823 + trace_ocfs2_journal_init(inode->i_size, 824 + (unsigned long long)inode->i_blocks, 825 + OCFS2_I(inode)->ip_clusters); 810 826 811 827 /* call the kernels journal init function now */ 812 828 j_journal = jbd2_journal_init_inode(inode); ··· 815 833 goto done; 816 834 } 817 835 818 - mlog(0, "Returned from jbd2_journal_init_inode\n"); 819 - mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen); 836 + trace_ocfs2_journal_init_maxlen(j_journal->j_maxlen); 820 837 821 838 *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & 822 839 OCFS2_JOURNAL_DIRTY_FL); ··· 840 859 } 841 860 } 842 861 843 - mlog_exit(status); 844 862 return status; 845 863 } 846 864 ··· 861 881 struct ocfs2_journal *journal = osb->journal; 862 882 struct buffer_head *bh = journal->j_bh; 863 883 struct ocfs2_dinode *fe; 864 - 865 - mlog_entry_void(); 866 884 867 885 fe = (struct ocfs2_dinode *)bh->b_data; 868 886 ··· 884 906 if (status < 0) 885 907 mlog_errno(status); 886 908 887 - mlog_exit(status); 888 909 return status; 889 910 } 890 911 ··· 897 920 int status = 0; 898 921 struct inode *inode = NULL; 899 922 int num_running_trans = 0; 900 - 901 - mlog_entry_void(); 902 923 903 924 BUG_ON(!osb); 904 925 ··· 914 939 BUG(); 915 940 916 941 num_running_trans = atomic_read(&(osb->journal->j_num_trans)); 917 - if (num_running_trans > 0) 918 - mlog(0, "Shutting down journal: must wait on %d " 919 - "running transactions!\n", 920 - num_running_trans); 942 + trace_ocfs2_journal_shutdown(num_running_trans); 921 943 922 944 /* Do a commit_cache here. It will flush our journal, *and* 923 945 * release any locks that are still held. ··· 927 955 * completely destroy the journal. */ 928 956 if (osb->commit_task) { 929 957 /* Wait for the commit thread */ 930 - mlog(0, "Waiting for ocfs2commit to exit....\n"); 958 + trace_ocfs2_journal_shutdown_wait(osb->commit_task); 931 959 kthread_stop(osb->commit_task); 932 960 osb->commit_task = NULL; 933 961 } ··· 970 998 done: 971 999 if (inode) 972 1000 iput(inode); 973 - mlog_exit_void(); 974 1001 } 975 1002 976 1003 static void ocfs2_clear_journal_error(struct super_block *sb, ··· 994 1023 { 995 1024 int status = 0; 996 1025 struct ocfs2_super *osb; 997 - 998 - mlog_entry_void(); 999 1026 1000 1027 BUG_ON(!journal); 1001 1028 ··· 1028 1059 osb->commit_task = NULL; 1029 1060 1030 1061 done: 1031 - mlog_exit(status); 1032 1062 return status; 1033 1063 } 1034 1064 ··· 1037 1069 int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) 1038 1070 { 1039 1071 int status; 1040 - 1041 - mlog_entry_void(); 1042 1072 1043 1073 BUG_ON(!journal); 1044 1074 ··· 1051 1085 mlog_errno(status); 1052 1086 1053 1087 bail: 1054 - mlog_exit(status); 1055 1088 return status; 1056 1089 } 1057 1090 ··· 1089 1124 #define CONCURRENT_JOURNAL_FILL 32ULL 1090 1125 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; 1091 1126 1092 - mlog_entry_void(); 1093 - 1094 1127 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 1095 1128 1096 1129 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); ··· 1124 1161 bail: 1125 1162 for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) 1126 1163 brelse(bhs[i]); 1127 - mlog_exit(status); 1128 1164 return status; 1129 1165 } 1130 1166 ··· 1147 1185 */ 1148 1186 void ocfs2_complete_recovery(struct work_struct *work) 1149 1187 { 1150 - int ret; 1188 + int ret = 0; 1151 1189 struct ocfs2_journal *journal = 1152 1190 container_of(work, struct ocfs2_journal, j_recovery_work); 1153 1191 struct ocfs2_super *osb = journal->j_osb; ··· 1156 1194 struct ocfs2_quota_recovery *qrec; 1157 1195 LIST_HEAD(tmp_la_list); 1158 1196 1159 - mlog_entry_void(); 1160 - 1161 - mlog(0, "completing recovery from keventd\n"); 1197 + trace_ocfs2_complete_recovery( 1198 + (unsigned long long)OCFS2_I(journal->j_inode)->ip_blkno); 1162 1199 1163 1200 spin_lock(&journal->j_lock); 1164 1201 list_splice_init(&journal->j_la_cleanups, &tmp_la_list); ··· 1166 1205 list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { 1167 1206 list_del_init(&item->lri_list); 1168 1207 1169 - mlog(0, "Complete recovery for slot %d\n", item->lri_slot); 1170 - 1171 1208 ocfs2_wait_on_quotas(osb); 1172 1209 1173 1210 la_dinode = item->lri_la_dinode; 1174 - if (la_dinode) { 1175 - mlog(0, "Clean up local alloc %llu\n", 1176 - (unsigned long long)le64_to_cpu(la_dinode->i_blkno)); 1211 + tl_dinode = item->lri_tl_dinode; 1212 + qrec = item->lri_qrec; 1177 1213 1214 + trace_ocfs2_complete_recovery_slot(item->lri_slot, 1215 + la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0, 1216 + tl_dinode ? le64_to_cpu(tl_dinode->i_blkno) : 0, 1217 + qrec); 1218 + 1219 + if (la_dinode) { 1178 1220 ret = ocfs2_complete_local_alloc_recovery(osb, 1179 1221 la_dinode); 1180 1222 if (ret < 0) ··· 1186 1222 kfree(la_dinode); 1187 1223 } 1188 1224 1189 - tl_dinode = item->lri_tl_dinode; 1190 1225 if (tl_dinode) { 1191 - mlog(0, "Clean up truncate log %llu\n", 1192 - (unsigned long long)le64_to_cpu(tl_dinode->i_blkno)); 1193 - 1194 1226 ret = ocfs2_complete_truncate_log_recovery(osb, 1195 1227 tl_dinode); 1196 1228 if (ret < 0) ··· 1199 1239 if (ret < 0) 1200 1240 mlog_errno(ret); 1201 1241 1202 - qrec = item->lri_qrec; 1203 1242 if (qrec) { 1204 - mlog(0, "Recovering quota files"); 1205 1243 ret = ocfs2_finish_quota_recovery(osb, qrec, 1206 1244 item->lri_slot); 1207 1245 if (ret < 0) ··· 1210 1252 kfree(item); 1211 1253 } 1212 1254 1213 - mlog(0, "Recovery completion\n"); 1214 - mlog_exit_void(); 1255 + trace_ocfs2_complete_recovery_end(ret); 1215 1256 } 1216 1257 1217 1258 /* NOTE: This function always eats your references to la_dinode and ··· 1296 1339 int rm_quota_used = 0, i; 1297 1340 struct ocfs2_quota_recovery *qrec; 1298 1341 1299 - mlog_entry_void(); 1300 - 1301 1342 status = ocfs2_wait_on_mount(osb); 1302 1343 if (status < 0) { 1303 1344 goto bail; ··· 1327 1372 * clear it until ocfs2_recover_node() has succeeded. */ 1328 1373 node_num = rm->rm_entries[0]; 1329 1374 spin_unlock(&osb->osb_lock); 1330 - mlog(0, "checking node %d\n", node_num); 1331 1375 slot_num = ocfs2_node_num_to_slot(osb, node_num); 1376 + trace_ocfs2_recovery_thread_node(node_num, slot_num); 1332 1377 if (slot_num == -ENOENT) { 1333 1378 status = 0; 1334 - mlog(0, "no slot for this node, so no recovery" 1335 - "required.\n"); 1336 1379 goto skip_recovery; 1337 1380 } 1338 - mlog(0, "node %d was using slot %d\n", node_num, slot_num); 1339 1381 1340 1382 /* It is a bit subtle with quota recovery. We cannot do it 1341 1383 * immediately because we have to obtain cluster locks from ··· 1359 1407 spin_lock(&osb->osb_lock); 1360 1408 } 1361 1409 spin_unlock(&osb->osb_lock); 1362 - mlog(0, "All nodes recovered\n"); 1410 + trace_ocfs2_recovery_thread_end(status); 1363 1411 1364 1412 /* Refresh all journal recovery generations from disk */ 1365 1413 status = ocfs2_check_journals_nolocks(osb); ··· 1403 1451 if (rm_quota) 1404 1452 kfree(rm_quota); 1405 1453 1406 - mlog_exit(status); 1407 1454 /* no one is callint kthread_stop() for us so the kthread() api 1408 1455 * requires that we call do_exit(). And it isn't exported, but 1409 1456 * complete_and_exit() seems to be a minimal wrapper around it. */ ··· 1412 1461 1413 1462 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) 1414 1463 { 1415 - mlog_entry("(node_num=%d, osb->node_num = %d)\n", 1416 - node_num, osb->node_num); 1417 - 1418 1464 mutex_lock(&osb->recovery_lock); 1465 + 1466 + trace_ocfs2_recovery_thread(node_num, osb->node_num, 1467 + osb->disable_recovery, osb->recovery_thread_task, 1468 + osb->disable_recovery ? 1469 + -1 : ocfs2_recovery_map_set(osb, node_num)); 1470 + 1419 1471 if (osb->disable_recovery) 1420 1472 goto out; 1421 - 1422 - /* People waiting on recovery will wait on 1423 - * the recovery map to empty. */ 1424 - if (ocfs2_recovery_map_set(osb, node_num)) 1425 - mlog(0, "node %d already in recovery map.\n", node_num); 1426 - 1427 - mlog(0, "starting recovery thread...\n"); 1428 1473 1429 1474 if (osb->recovery_thread_task) 1430 1475 goto out; ··· 1435 1488 out: 1436 1489 mutex_unlock(&osb->recovery_lock); 1437 1490 wake_up(&osb->recovery_event); 1438 - 1439 - mlog_exit_void(); 1440 1491 } 1441 1492 1442 1493 static int ocfs2_read_journal_inode(struct ocfs2_super *osb, ··· 1508 1563 * If not, it needs recovery. 1509 1564 */ 1510 1565 if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { 1511 - mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, 1566 + trace_ocfs2_replay_journal_recovered(slot_num, 1512 1567 osb->slot_recovery_generations[slot_num], slot_reco_gen); 1513 1568 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1514 1569 status = -EBUSY; ··· 1519 1574 1520 1575 status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); 1521 1576 if (status < 0) { 1522 - mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); 1577 + trace_ocfs2_replay_journal_lock_err(status); 1523 1578 if (status != -ERESTARTSYS) 1524 1579 mlog(ML_ERROR, "Could not lock journal!\n"); 1525 1580 goto done; ··· 1532 1587 slot_reco_gen = ocfs2_get_recovery_generation(fe); 1533 1588 1534 1589 if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { 1535 - mlog(0, "No recovery required for node %d\n", node_num); 1590 + trace_ocfs2_replay_journal_skip(node_num); 1536 1591 /* Refresh recovery generation for the slot */ 1537 1592 osb->slot_recovery_generations[slot_num] = slot_reco_gen; 1538 1593 goto done; ··· 1553 1608 goto done; 1554 1609 } 1555 1610 1556 - mlog(0, "calling journal_init_inode\n"); 1557 1611 journal = jbd2_journal_init_inode(inode); 1558 1612 if (journal == NULL) { 1559 1613 mlog(ML_ERROR, "Linux journal layer error\n"); ··· 1572 1628 ocfs2_clear_journal_error(osb->sb, journal, slot_num); 1573 1629 1574 1630 /* wipe the journal */ 1575 - mlog(0, "flushing the journal.\n"); 1576 1631 jbd2_journal_lock_updates(journal); 1577 1632 status = jbd2_journal_flush(journal); 1578 1633 jbd2_journal_unlock_updates(journal); ··· 1608 1665 1609 1666 brelse(bh); 1610 1667 1611 - mlog_exit(status); 1612 1668 return status; 1613 1669 } 1614 1670 ··· 1630 1688 struct ocfs2_dinode *la_copy = NULL; 1631 1689 struct ocfs2_dinode *tl_copy = NULL; 1632 1690 1633 - mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n", 1634 - node_num, slot_num, osb->node_num); 1691 + trace_ocfs2_recover_node(node_num, slot_num, osb->node_num); 1635 1692 1636 1693 /* Should not ever be called to recover ourselves -- in that 1637 1694 * case we should've called ocfs2_journal_load instead. */ ··· 1639 1698 status = ocfs2_replay_journal(osb, node_num, slot_num); 1640 1699 if (status < 0) { 1641 1700 if (status == -EBUSY) { 1642 - mlog(0, "Skipping recovery for slot %u (node %u) " 1643 - "as another node has recovered it\n", slot_num, 1644 - node_num); 1701 + trace_ocfs2_recover_node_skip(slot_num, node_num); 1645 1702 status = 0; 1646 1703 goto done; 1647 1704 } ··· 1674 1735 status = 0; 1675 1736 done: 1676 1737 1677 - mlog_exit(status); 1678 1738 return status; 1679 1739 } 1680 1740 ··· 1746 1808 spin_lock(&osb->osb_lock); 1747 1809 osb->slot_recovery_generations[i] = gen; 1748 1810 1749 - mlog(0, "Slot %u recovery generation is %u\n", i, 1750 - osb->slot_recovery_generations[i]); 1811 + trace_ocfs2_mark_dead_nodes(i, 1812 + osb->slot_recovery_generations[i]); 1751 1813 1752 1814 if (i == osb->slot_num) { 1753 1815 spin_unlock(&osb->osb_lock); ··· 1783 1845 1784 1846 status = 0; 1785 1847 bail: 1786 - mlog_exit(status); 1787 1848 return status; 1788 1849 } 1789 1850 ··· 1821 1884 1822 1885 os = &osb->osb_orphan_scan; 1823 1886 1824 - mlog(0, "Begin orphan scan\n"); 1825 - 1826 1887 if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE) 1827 1888 goto out; 1889 + 1890 + trace_ocfs2_queue_orphan_scan_begin(os->os_count, os->os_seqno, 1891 + atomic_read(&os->os_state)); 1828 1892 1829 1893 status = ocfs2_orphan_scan_lock(osb, &seqno); 1830 1894 if (status < 0) { ··· 1856 1918 unlock: 1857 1919 ocfs2_orphan_scan_unlock(osb, seqno); 1858 1920 out: 1859 - mlog(0, "Orphan scan completed\n"); 1921 + trace_ocfs2_queue_orphan_scan_end(os->os_count, os->os_seqno, 1922 + atomic_read(&os->os_state)); 1860 1923 return; 1861 1924 } 1862 1925 ··· 1941 2002 if (IS_ERR(iter)) 1942 2003 return 0; 1943 2004 1944 - mlog(0, "queue orphan %llu\n", 1945 - (unsigned long long)OCFS2_I(iter)->ip_blkno); 2005 + trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno); 1946 2006 /* No locking is required for the next_orphan queue as there 1947 2007 * is only ever a single process doing orphan recovery. */ 1948 2008 OCFS2_I(iter)->ip_next_orphan = p->head; ··· 2057 2119 struct inode *iter; 2058 2120 struct ocfs2_inode_info *oi; 2059 2121 2060 - mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); 2122 + trace_ocfs2_recover_orphans(slot); 2061 2123 2062 2124 ocfs2_mark_recovering_orphan_dir(osb, slot); 2063 2125 ret = ocfs2_queue_orphans(osb, slot, &inode); ··· 2070 2132 2071 2133 while (inode) { 2072 2134 oi = OCFS2_I(inode); 2073 - mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); 2135 + trace_ocfs2_recover_orphans_iput( 2136 + (unsigned long long)oi->ip_blkno); 2074 2137 2075 2138 iter = oi->ip_next_orphan; 2076 2139 ··· 2109 2170 * MOUNTED flag, but this is set right before 2110 2171 * dismount_volume() so we can trust it. */ 2111 2172 if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { 2173 + trace_ocfs2_wait_on_mount(VOLUME_DISABLED); 2112 2174 mlog(0, "mount error, exiting!\n"); 2113 2175 return -EBUSY; 2114 2176 }
+43 -66
fs/ocfs2/localalloc.c
··· 29 29 #include <linux/highmem.h> 30 30 #include <linux/bitops.h> 31 31 32 - #define MLOG_MASK_PREFIX ML_DISK_ALLOC 33 32 #include <cluster/masklog.h> 34 33 35 34 #include "ocfs2.h" ··· 42 43 #include "suballoc.h" 43 44 #include "super.h" 44 45 #include "sysfile.h" 46 + #include "ocfs2_trace.h" 45 47 46 48 #include "buffer_head_io.h" 47 49 ··· 201 201 la_max_mb = ocfs2_clusters_to_megabytes(sb, 202 202 ocfs2_local_alloc_size(sb) * 8); 203 203 204 - mlog(0, "requested: %dM, max: %uM, default: %uM\n", 205 - requested_mb, la_max_mb, la_default_mb); 204 + trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb); 206 205 207 206 if (requested_mb == -1) { 208 207 /* No user request - use defaults */ ··· 275 276 276 277 ret = 1; 277 278 bail: 278 - mlog(0, "state=%d, bits=%llu, la_bits=%d, ret=%d\n", 279 - osb->local_alloc_state, (unsigned long long)bits, la_bits, ret); 279 + trace_ocfs2_alloc_should_use_local( 280 + (unsigned long long)bits, osb->local_alloc_state, la_bits, ret); 280 281 spin_unlock(&osb->osb_lock); 281 282 return ret; 282 283 } ··· 289 290 u32 num_used; 290 291 struct inode *inode = NULL; 291 292 struct ocfs2_local_alloc *la; 292 - 293 - mlog_entry_void(); 294 293 295 294 if (osb->local_alloc_bits == 0) 296 295 goto bail; ··· 361 364 if (inode) 362 365 iput(inode); 363 366 364 - mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits); 367 + trace_ocfs2_load_local_alloc(osb->local_alloc_bits); 365 368 366 - mlog_exit(status); 369 + if (status) 370 + mlog_errno(status); 367 371 return status; 368 372 } 369 373 ··· 385 387 struct inode *main_bm_inode = NULL; 386 388 struct ocfs2_dinode *alloc_copy = NULL; 387 389 struct ocfs2_dinode *alloc = NULL; 388 - 389 - mlog_entry_void(); 390 390 391 391 cancel_delayed_work(&osb->la_enable_wq); 392 392 flush_workqueue(ocfs2_wq); ··· 478 482 479 483 if (alloc_copy) 480 484 kfree(alloc_copy); 481 - 482 - mlog_exit_void(); 483 485 } 484 486 485 487 /* ··· 496 502 struct inode *inode = NULL; 497 503 struct ocfs2_dinode *alloc; 498 504 499 - mlog_entry("(slot_num = %d)\n", slot_num); 505 + trace_ocfs2_begin_local_alloc_recovery(slot_num); 500 506 501 507 *alloc_copy = NULL; 502 508 ··· 546 552 iput(inode); 547 553 } 548 554 549 - mlog_exit(status); 555 + if (status) 556 + mlog_errno(status); 550 557 return status; 551 558 } 552 559 ··· 564 569 handle_t *handle; 565 570 struct buffer_head *main_bm_bh = NULL; 566 571 struct inode *main_bm_inode; 567 - 568 - mlog_entry_void(); 569 572 570 573 main_bm_inode = ocfs2_get_system_file_inode(osb, 571 574 GLOBAL_BITMAP_SYSTEM_INODE, ··· 613 620 out: 614 621 if (!status) 615 622 ocfs2_init_steal_slots(osb); 616 - mlog_exit(status); 623 + if (status) 624 + mlog_errno(status); 617 625 return status; 618 626 } 619 627 ··· 633 639 struct ocfs2_dinode *alloc; 634 640 struct inode *local_alloc_inode; 635 641 unsigned int free_bits; 636 - 637 - mlog_entry_void(); 638 642 639 643 BUG_ON(!ac); 640 644 ··· 704 712 goto bail; 705 713 } 706 714 707 - if (ac->ac_max_block) 708 - mlog(0, "Calling in_range for max block %llu\n", 709 - (unsigned long long)ac->ac_max_block); 710 - 711 715 ac->ac_inode = local_alloc_inode; 712 716 /* We should never use localalloc from another slot */ 713 717 ac->ac_alloc_slot = osb->slot_num; ··· 717 729 iput(local_alloc_inode); 718 730 } 719 731 720 - mlog(0, "bits=%d, slot=%d, ret=%d\n", bits_wanted, osb->slot_num, 721 - status); 732 + trace_ocfs2_reserve_local_alloc_bits( 733 + (unsigned long long)ac->ac_max_block, 734 + bits_wanted, osb->slot_num, status); 722 735 723 - mlog_exit(status); 736 + if (status) 737 + mlog_errno(status); 724 738 return status; 725 739 } 726 740 ··· 739 749 struct ocfs2_dinode *alloc; 740 750 struct ocfs2_local_alloc *la; 741 751 742 - mlog_entry_void(); 743 752 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); 744 753 745 754 local_alloc_inode = ac->ac_inode; ··· 777 788 ocfs2_journal_dirty(handle, osb->local_alloc_bh); 778 789 779 790 bail: 780 - mlog_exit(status); 791 + if (status) 792 + mlog_errno(status); 781 793 return status; 782 794 } 783 795 ··· 789 799 u32 count = 0; 790 800 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 791 801 792 - mlog_entry_void(); 793 - 794 802 buffer = la->la_bitmap; 795 803 for (i = 0; i < le16_to_cpu(la->la_size); i++) 796 804 count += hweight8(buffer[i]); 797 805 798 - mlog_exit(count); 806 + trace_ocfs2_local_alloc_count_bits(count); 799 807 return count; 800 808 } 801 809 ··· 808 820 void *bitmap = NULL; 809 821 struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap; 810 822 811 - mlog_entry("(numbits wanted = %u)\n", *numbits); 812 - 813 823 if (!alloc->id1.bitmap1.i_total) { 814 - mlog(0, "No bits in my window!\n"); 815 824 bitoff = -1; 816 825 goto bail; 817 826 } ··· 868 883 } 869 884 } 870 885 871 - mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff, 872 - numfound); 886 + trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound); 873 887 874 888 if (numfound == *numbits) 875 889 bitoff = startoff - numfound; ··· 879 895 if (local_resv) 880 896 ocfs2_resv_discard(resmap, resv); 881 897 882 - mlog_exit(bitoff); 898 + trace_ocfs2_local_alloc_find_clear_bits(*numbits, 899 + le32_to_cpu(alloc->id1.bitmap1.i_total), 900 + bitoff, numfound); 901 + 883 902 return bitoff; 884 903 } 885 904 ··· 890 903 { 891 904 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 892 905 int i; 893 - mlog_entry_void(); 894 906 895 907 alloc->id1.bitmap1.i_total = 0; 896 908 alloc->id1.bitmap1.i_used = 0; 897 909 la->la_bm_off = 0; 898 910 for(i = 0; i < le16_to_cpu(la->la_size); i++) 899 911 la->la_bitmap[i] = 0; 900 - 901 - mlog_exit_void(); 902 912 } 903 913 904 914 #if 0 ··· 936 952 void *bitmap; 937 953 struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); 938 954 939 - mlog_entry("total = %u, used = %u\n", 940 - le32_to_cpu(alloc->id1.bitmap1.i_total), 941 - le32_to_cpu(alloc->id1.bitmap1.i_used)); 955 + trace_ocfs2_sync_local_to_main( 956 + le32_to_cpu(alloc->id1.bitmap1.i_total), 957 + le32_to_cpu(alloc->id1.bitmap1.i_used)); 942 958 943 959 if (!alloc->id1.bitmap1.i_total) { 944 - mlog(0, "nothing to sync!\n"); 945 960 goto bail; 946 961 } 947 962 948 963 if (le32_to_cpu(alloc->id1.bitmap1.i_used) == 949 964 le32_to_cpu(alloc->id1.bitmap1.i_total)) { 950 - mlog(0, "all bits were taken!\n"); 951 965 goto bail; 952 966 } 953 967 ··· 967 985 ocfs2_clusters_to_blocks(osb->sb, 968 986 start - count); 969 987 970 - mlog(0, "freeing %u bits starting at local alloc bit " 971 - "%u (la_start_blk = %llu, blkno = %llu)\n", 988 + trace_ocfs2_sync_local_to_main_free( 972 989 count, start - count, 973 990 (unsigned long long)la_start_blk, 974 991 (unsigned long long)blkno); ··· 988 1007 } 989 1008 990 1009 bail: 991 - mlog_exit(status); 1010 + if (status) 1011 + mlog_errno(status); 992 1012 return status; 993 1013 } 994 1014 ··· 1114 1132 *ac = NULL; 1115 1133 } 1116 1134 1117 - mlog_exit(status); 1135 + if (status) 1136 + mlog_errno(status); 1118 1137 return status; 1119 1138 } 1120 1139 ··· 1131 1148 struct ocfs2_dinode *alloc = NULL; 1132 1149 struct ocfs2_local_alloc *la; 1133 1150 1134 - mlog_entry_void(); 1135 - 1136 1151 alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; 1137 1152 la = OCFS2_LOCAL_ALLOC(alloc); 1138 1153 1139 - if (alloc->id1.bitmap1.i_total) 1140 - mlog(0, "asking me to alloc a new window over a non-empty " 1141 - "one\n"); 1142 - 1143 - mlog(0, "Allocating %u clusters for a new window.\n", 1144 - osb->local_alloc_bits); 1154 + trace_ocfs2_local_alloc_new_window( 1155 + le32_to_cpu(alloc->id1.bitmap1.i_total), 1156 + osb->local_alloc_bits); 1145 1157 1146 1158 /* Instruct the allocation code to try the most recently used 1147 1159 * cluster group. We'll re-record the group used this pass ··· 1198 1220 ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count, 1199 1221 OCFS2_LOCAL_ALLOC(alloc)->la_bitmap); 1200 1222 1201 - mlog(0, "New window allocated:\n"); 1202 - mlog(0, "window la_bm_off = %u\n", 1203 - OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); 1204 - mlog(0, "window bits = %u\n", le32_to_cpu(alloc->id1.bitmap1.i_total)); 1223 + trace_ocfs2_local_alloc_new_window_result( 1224 + OCFS2_LOCAL_ALLOC(alloc)->la_bm_off, 1225 + le32_to_cpu(alloc->id1.bitmap1.i_total)); 1205 1226 1206 1227 bail: 1207 - mlog_exit(status); 1228 + if (status) 1229 + mlog_errno(status); 1208 1230 return status; 1209 1231 } 1210 1232 ··· 1220 1242 struct ocfs2_dinode *alloc; 1221 1243 struct ocfs2_dinode *alloc_copy = NULL; 1222 1244 struct ocfs2_alloc_context *ac = NULL; 1223 - 1224 - mlog_entry_void(); 1225 1245 1226 1246 ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE); 1227 1247 ··· 1300 1324 if (ac) 1301 1325 ocfs2_free_alloc_context(ac); 1302 1326 1303 - mlog_exit(status); 1327 + if (status) 1328 + mlog_errno(status); 1304 1329 return status; 1305 1330 } 1306 1331
-1
fs/ocfs2/locks.c
··· 26 26 #include <linux/fs.h> 27 27 #include <linux/fcntl.h> 28 28 29 - #define MLOG_MASK_PREFIX ML_INODE 30 29 #include <cluster/masklog.h> 31 30 32 31 #include "ocfs2.h"
+3 -4
fs/ocfs2/mmap.c
··· 31 31 #include <linux/signal.h> 32 32 #include <linux/rbtree.h> 33 33 34 - #define MLOG_MASK_PREFIX ML_FILE_IO 35 34 #include <cluster/masklog.h> 36 35 37 36 #include "ocfs2.h" ··· 41 42 #include "inode.h" 42 43 #include "mmap.h" 43 44 #include "super.h" 45 + #include "ocfs2_trace.h" 44 46 45 47 46 48 static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) ··· 49 49 sigset_t oldset; 50 50 int ret; 51 51 52 - mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff); 53 - 54 52 ocfs2_block_signals(&oldset); 55 53 ret = filemap_fault(area, vmf); 56 54 ocfs2_unblock_signals(&oldset); 57 55 58 - mlog_exit_ptr(vmf->page); 56 + trace_ocfs2_fault(OCFS2_I(area->vm_file->f_mapping->host)->ip_blkno, 57 + area, vmf->page, vmf->pgoff); 59 58 return ret; 60 59 } 61 60
+87 -88
fs/ocfs2/namei.c
··· 42 42 #include <linux/highmem.h> 43 43 #include <linux/quotaops.h> 44 44 45 - #define MLOG_MASK_PREFIX ML_NAMEI 46 45 #include <cluster/masklog.h> 47 46 48 47 #include "ocfs2.h" ··· 62 63 #include "uptodate.h" 63 64 #include "xattr.h" 64 65 #include "acl.h" 66 + #include "ocfs2_trace.h" 65 67 66 68 #include "buffer_head_io.h" 67 69 ··· 106 106 struct dentry *ret; 107 107 struct ocfs2_inode_info *oi; 108 108 109 - mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, 110 - dentry->d_name.len, dentry->d_name.name); 109 + trace_ocfs2_lookup(dir, dentry, dentry->d_name.len, 110 + dentry->d_name.name, 111 + (unsigned long long)OCFS2_I(dir)->ip_blkno, 0); 111 112 112 113 if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) { 113 114 ret = ERR_PTR(-ENAMETOOLONG); 114 115 goto bail; 115 116 } 116 - 117 - mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len, 118 - dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); 119 117 120 118 status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT); 121 119 if (status < 0) { ··· 180 182 181 183 bail: 182 184 183 - mlog_exit_ptr(ret); 185 + trace_ocfs2_lookup_ret(ret); 184 186 185 187 return ret; 186 188 } ··· 233 235 sigset_t oldset; 234 236 int did_block_signals = 0; 235 237 236 - mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, 237 - (unsigned long)dev, dentry->d_name.len, 238 - dentry->d_name.name); 238 + trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name, 239 + (unsigned long long)OCFS2_I(dir)->ip_blkno, 240 + (unsigned long)dev, mode); 239 241 240 242 dquot_initialize(dir); 241 243 ··· 352 354 goto leave; 353 355 did_quota_inode = 1; 354 356 355 - mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, 356 - inode->i_mode, (unsigned long)dev, dentry->d_name.len, 357 - dentry->d_name.name); 358 - 359 357 /* do the real work now. */ 360 358 status = ocfs2_mknod_locked(osb, dir, inode, dev, 361 359 &new_fe_bh, parent_fe_bh, handle, ··· 430 436 if (did_block_signals) 431 437 ocfs2_unblock_signals(&oldset); 432 438 433 - if (status == -ENOSPC) 434 - mlog(0, "Disk is full\n"); 435 - 436 439 brelse(new_fe_bh); 437 440 brelse(parent_fe_bh); 438 441 kfree(si.name); ··· 457 466 iput(inode); 458 467 } 459 468 460 - mlog_exit(status); 469 + if (status) 470 + mlog_errno(status); 461 471 462 472 return status; 463 473 } ··· 569 577 } 570 578 } 571 579 572 - mlog_exit(status); 580 + if (status) 581 + mlog_errno(status); 573 582 return status; 574 583 } 575 584 ··· 608 615 { 609 616 int ret; 610 617 611 - mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, 612 - dentry->d_name.len, dentry->d_name.name); 618 + trace_ocfs2_mkdir(dir, dentry, dentry->d_name.len, dentry->d_name.name, 619 + OCFS2_I(dir)->ip_blkno, mode); 613 620 ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0); 614 - mlog_exit(ret); 621 + if (ret) 622 + mlog_errno(ret); 615 623 616 624 return ret; 617 625 } ··· 624 630 { 625 631 int ret; 626 632 627 - mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, 628 - dentry->d_name.len, dentry->d_name.name); 633 + trace_ocfs2_create(dir, dentry, dentry->d_name.len, dentry->d_name.name, 634 + (unsigned long long)OCFS2_I(dir)->ip_blkno, mode); 629 635 ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0); 630 - mlog_exit(ret); 636 + if (ret) 637 + mlog_errno(ret); 631 638 632 639 return ret; 633 640 } ··· 647 652 struct ocfs2_dir_lookup_result lookup = { NULL, }; 648 653 sigset_t oldset; 649 654 650 - mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino, 651 - old_dentry->d_name.len, old_dentry->d_name.name, 652 - dentry->d_name.len, dentry->d_name.name); 655 + trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno, 656 + old_dentry->d_name.len, old_dentry->d_name.name, 657 + dentry->d_name.len, dentry->d_name.name); 653 658 654 659 if (S_ISDIR(inode->i_mode)) 655 660 return -EPERM; ··· 752 757 753 758 ocfs2_free_dir_lookup_result(&lookup); 754 759 755 - mlog_exit(err); 760 + if (err) 761 + mlog_errno(err); 756 762 757 763 return err; 758 764 } ··· 805 809 struct ocfs2_dir_lookup_result lookup = { NULL, }; 806 810 struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; 807 811 808 - mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, 809 - dentry->d_name.len, dentry->d_name.name); 812 + trace_ocfs2_unlink(dir, dentry, dentry->d_name.len, 813 + dentry->d_name.name, 814 + (unsigned long long)OCFS2_I(dir)->ip_blkno, 815 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 810 816 811 817 dquot_initialize(dir); 812 818 813 819 BUG_ON(dentry->d_parent->d_inode != dir); 814 820 815 - mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); 816 - 817 - if (inode == osb->root_inode) { 818 - mlog(0, "Cannot delete the root directory\n"); 821 + if (inode == osb->root_inode) 819 822 return -EPERM; 820 - } 821 823 822 824 status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1, 823 825 OI_LS_PARENT); ··· 837 843 if (OCFS2_I(inode)->ip_blkno != blkno) { 838 844 status = -ENOENT; 839 845 840 - mlog(0, "ip_blkno %llu != dirent blkno %llu ip_flags = %x\n", 841 - (unsigned long long)OCFS2_I(inode)->ip_blkno, 842 - (unsigned long long)blkno, OCFS2_I(inode)->ip_flags); 846 + trace_ocfs2_unlink_noent( 847 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 848 + (unsigned long long)blkno, 849 + OCFS2_I(inode)->ip_flags); 843 850 goto leave; 844 851 } 845 852 ··· 949 954 ocfs2_free_dir_lookup_result(&orphan_insert); 950 955 ocfs2_free_dir_lookup_result(&lookup); 951 956 952 - mlog_exit(status); 957 + if (status) 958 + mlog_errno(status); 953 959 954 960 return status; 955 961 } ··· 971 975 struct buffer_head **tmpbh; 972 976 struct inode *tmpinode; 973 977 974 - mlog_entry("(inode1 = %llu, inode2 = %llu)\n", 975 - (unsigned long long)oi1->ip_blkno, 976 - (unsigned long long)oi2->ip_blkno); 978 + trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, 979 + (unsigned long long)oi2->ip_blkno); 977 980 978 981 if (*bh1) 979 982 *bh1 = NULL; ··· 983 988 if (oi1->ip_blkno != oi2->ip_blkno) { 984 989 if (oi1->ip_blkno < oi2->ip_blkno) { 985 990 /* switch id1 and id2 around */ 986 - mlog(0, "switching them around...\n"); 987 991 tmpbh = bh2; 988 992 bh2 = bh1; 989 993 bh1 = tmpbh; ··· 1018 1024 mlog_errno(status); 1019 1025 } 1020 1026 1027 + trace_ocfs2_double_lock_end( 1028 + (unsigned long long)OCFS2_I(inode1)->ip_blkno, 1029 + (unsigned long long)OCFS2_I(inode2)->ip_blkno); 1030 + 1021 1031 bail: 1022 - mlog_exit(status); 1032 + if (status) 1033 + mlog_errno(status); 1023 1034 return status; 1024 1035 } 1025 1036 ··· 1066 1067 /* At some point it might be nice to break this function up a 1067 1068 * bit. */ 1068 1069 1069 - mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p, from='%.*s' to='%.*s')\n", 1070 - old_dir, old_dentry, new_dir, new_dentry, 1071 - old_dentry->d_name.len, old_dentry->d_name.name, 1072 - new_dentry->d_name.len, new_dentry->d_name.name); 1070 + trace_ocfs2_rename(old_dir, old_dentry, new_dir, new_dentry, 1071 + old_dentry->d_name.len, old_dentry->d_name.name, 1072 + new_dentry->d_name.len, new_dentry->d_name.name); 1073 1073 1074 1074 dquot_initialize(old_dir); 1075 1075 dquot_initialize(new_dir); ··· 1225 1227 if (!new_inode) { 1226 1228 status = -EACCES; 1227 1229 1228 - mlog(0, "We found an inode for name %.*s but VFS " 1229 - "didn't give us one.\n", new_dentry->d_name.len, 1230 - new_dentry->d_name.name); 1230 + trace_ocfs2_rename_target_exists(new_dentry->d_name.len, 1231 + new_dentry->d_name.name); 1231 1232 goto bail; 1232 1233 } 1233 1234 1234 1235 if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) { 1235 1236 status = -EACCES; 1236 1237 1237 - mlog(0, "Inode %llu and dir %llu disagree. flags = %x\n", 1238 + trace_ocfs2_rename_disagree( 1238 1239 (unsigned long long)OCFS2_I(new_inode)->ip_blkno, 1239 1240 (unsigned long long)newfe_blkno, 1240 1241 OCFS2_I(new_inode)->ip_flags); ··· 1256 1259 1257 1260 newfe = (struct ocfs2_dinode *) newfe_bh->b_data; 1258 1261 1259 - mlog(0, "aha rename over existing... new_blkno=%llu " 1260 - "newfebh=%p bhblocknr=%llu\n", 1262 + trace_ocfs2_rename_over_existing( 1261 1263 (unsigned long long)newfe_blkno, newfe_bh, newfe_bh ? 1262 1264 (unsigned long long)newfe_bh->b_blocknr : 0ULL); 1263 1265 ··· 1472 1476 brelse(old_dir_bh); 1473 1477 brelse(new_dir_bh); 1474 1478 1475 - mlog_exit(status); 1479 + if (status) 1480 + mlog_errno(status); 1476 1481 1477 1482 return status; 1478 1483 } ··· 1498 1501 * write i_size + 1 bytes. */ 1499 1502 blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 1500 1503 1501 - mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n", 1502 - (unsigned long long)inode->i_blocks, 1503 - i_size_read(inode), blocks); 1504 + trace_ocfs2_create_symlink_data((unsigned long long)inode->i_blocks, 1505 + i_size_read(inode), blocks); 1504 1506 1505 1507 /* Sanity check -- make sure we're going to fit. */ 1506 1508 if (bytes_left > ··· 1575 1579 kfree(bhs); 1576 1580 } 1577 1581 1578 - mlog_exit(status); 1582 + if (status) 1583 + mlog_errno(status); 1579 1584 return status; 1580 1585 } 1581 1586 ··· 1607 1610 sigset_t oldset; 1608 1611 int did_block_signals = 0; 1609 1612 1610 - mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, 1611 - dentry, symname, dentry->d_name.len, dentry->d_name.name); 1613 + trace_ocfs2_symlink_begin(dir, dentry, symname, 1614 + dentry->d_name.len, dentry->d_name.name); 1612 1615 1613 1616 dquot_initialize(dir); 1614 1617 ··· 1710 1713 goto bail; 1711 1714 did_quota_inode = 1; 1712 1715 1713 - mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, 1714 - inode->i_mode, dentry->d_name.len, 1715 - dentry->d_name.name); 1716 + trace_ocfs2_symlink_create(dir, dentry, dentry->d_name.len, 1717 + dentry->d_name.name, 1718 + (unsigned long long)OCFS2_I(dir)->ip_blkno, 1719 + inode->i_mode); 1716 1720 1717 1721 status = ocfs2_mknod_locked(osb, dir, inode, 1718 1722 0, &new_fe_bh, parent_fe_bh, handle, ··· 1833 1835 iput(inode); 1834 1836 } 1835 1837 1836 - mlog_exit(status); 1838 + if (status) 1839 + mlog_errno(status); 1837 1840 1838 1841 return status; 1839 1842 } ··· 1842 1843 static int ocfs2_blkno_stringify(u64 blkno, char *name) 1843 1844 { 1844 1845 int status, namelen; 1845 - 1846 - mlog_entry_void(); 1847 1846 1848 1847 namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx", 1849 1848 (long long)blkno); ··· 1859 1862 goto bail; 1860 1863 } 1861 1864 1862 - mlog(0, "built filename '%s' for orphan dir (len=%d)\n", name, 1863 - namelen); 1865 + trace_ocfs2_blkno_stringify(blkno, name, namelen); 1864 1866 1865 1867 status = 0; 1866 1868 bail: 1867 - mlog_exit(status); 1869 + if (status < 0) 1870 + mlog_errno(status); 1868 1871 return status; 1869 1872 } 1870 1873 ··· 1977 1980 iput(orphan_dir_inode); 1978 1981 } 1979 1982 1980 - mlog_exit(ret); 1983 + if (ret) 1984 + mlog_errno(ret); 1981 1985 return ret; 1982 1986 } 1983 1987 ··· 1995 1997 struct ocfs2_dinode *orphan_fe; 1996 1998 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; 1997 1999 1998 - mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); 2000 + trace_ocfs2_orphan_add_begin( 2001 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 1999 2002 2000 2003 status = ocfs2_read_inode_block(orphan_dir_inode, &orphan_dir_bh); 2001 2004 if (status < 0) { ··· 2055 2056 2056 2057 ocfs2_journal_dirty(handle, fe_bh); 2057 2058 2058 - mlog(0, "Inode %llu orphaned in slot %d\n", 2059 - (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); 2059 + trace_ocfs2_orphan_add_end((unsigned long long)OCFS2_I(inode)->ip_blkno, 2060 + osb->slot_num); 2060 2061 2061 2062 leave: 2062 2063 brelse(orphan_dir_bh); 2063 2064 2064 - mlog_exit(status); 2065 + if (status) 2066 + mlog_errno(status); 2065 2067 return status; 2066 2068 } 2067 2069 ··· 2078 2078 int status = 0; 2079 2079 struct ocfs2_dir_lookup_result lookup = { NULL, }; 2080 2080 2081 - mlog_entry_void(); 2082 - 2083 2081 status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); 2084 2082 if (status < 0) { 2085 2083 mlog_errno(status); 2086 2084 goto leave; 2087 2085 } 2088 2086 2089 - mlog(0, "removing '%s' from orphan dir %llu (namelen=%d)\n", 2090 - name, (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, 2091 - OCFS2_ORPHAN_NAMELEN); 2087 + trace_ocfs2_orphan_del( 2088 + (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, 2089 + name, OCFS2_ORPHAN_NAMELEN); 2092 2090 2093 2091 /* find it's spot in the orphan directory */ 2094 2092 status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode, ··· 2122 2124 leave: 2123 2125 ocfs2_free_dir_lookup_result(&lookup); 2124 2126 2125 - mlog_exit(status); 2127 + if (status) 2128 + mlog_errno(status); 2126 2129 return status; 2127 2130 } 2128 2131 ··· 2320 2321 iput(orphan_dir); 2321 2322 } 2322 2323 2323 - if (status == -ENOSPC) 2324 - mlog(0, "Disk is full\n"); 2325 - 2326 2324 if ((status < 0) && inode) { 2327 2325 clear_nlink(inode); 2328 2326 iput(inode); ··· 2354 2358 struct buffer_head *di_bh = NULL; 2355 2359 struct ocfs2_dir_lookup_result lookup = { NULL, }; 2356 2360 2357 - mlog_entry("(0x%p, 0x%p, %.*s')\n", dir, dentry, 2358 - dentry->d_name.len, dentry->d_name.name); 2361 + trace_ocfs2_mv_orphaned_inode_to_new(dir, dentry, 2362 + dentry->d_name.len, dentry->d_name.name, 2363 + (unsigned long long)OCFS2_I(dir)->ip_blkno, 2364 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 2359 2365 2360 2366 status = ocfs2_inode_lock(dir, &parent_di_bh, 1); 2361 2367 if (status < 0) { ··· 2474 2476 2475 2477 ocfs2_free_dir_lookup_result(&lookup); 2476 2478 2477 - mlog_exit(status); 2479 + if (status) 2480 + mlog_errno(status); 2478 2481 2479 2482 return status; 2480 2483 }
+14 -9
fs/ocfs2/ocfs2.h
··· 147 147 148 148 typedef void (*ocfs2_lock_callback)(int status, unsigned long data); 149 149 150 + #ifdef CONFIG_OCFS2_FS_STATS 151 + struct ocfs2_lock_stats { 152 + u64 ls_total; /* Total wait in NSEC */ 153 + u32 ls_gets; /* Num acquires */ 154 + u32 ls_fail; /* Num failed acquires */ 155 + 156 + /* Storing max wait in usecs saves 24 bytes per inode */ 157 + u32 ls_max; /* Max wait in USEC */ 158 + }; 159 + #endif 160 + 150 161 struct ocfs2_lock_res { 151 162 void *l_priv; 152 163 struct ocfs2_lock_res_ops *l_ops; ··· 193 182 struct list_head l_debug_list; 194 183 195 184 #ifdef CONFIG_OCFS2_FS_STATS 196 - unsigned long long l_lock_num_prmode; /* PR acquires */ 197 - unsigned long long l_lock_num_exmode; /* EX acquires */ 198 - unsigned int l_lock_num_prmode_failed; /* Failed PR gets */ 199 - unsigned int l_lock_num_exmode_failed; /* Failed EX gets */ 200 - unsigned long long l_lock_total_prmode; /* Tot wait for PR */ 201 - unsigned long long l_lock_total_exmode; /* Tot wait for EX */ 202 - unsigned int l_lock_max_prmode; /* Max wait for PR */ 203 - unsigned int l_lock_max_exmode; /* Max wait for EX */ 204 - unsigned int l_lock_refresh; /* Disk refreshes */ 185 + struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */ 186 + u32 l_lock_refresh; /* Disk refreshes */ 187 + struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */ 205 188 #endif 206 189 #ifdef CONFIG_DEBUG_LOCK_ALLOC 207 190 struct lockdep_map l_lockdep_map;
+2739
fs/ocfs2/ocfs2_trace.h
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM ocfs2 3 + 4 + #if !defined(_TRACE_OCFS2_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_OCFS2_H 6 + 7 + #include <linux/tracepoint.h> 8 + 9 + DECLARE_EVENT_CLASS(ocfs2__int, 10 + TP_PROTO(int num), 11 + TP_ARGS(num), 12 + TP_STRUCT__entry( 13 + __field(int, num) 14 + ), 15 + TP_fast_assign( 16 + __entry->num = num; 17 + ), 18 + TP_printk("%d", __entry->num) 19 + ); 20 + 21 + #define DEFINE_OCFS2_INT_EVENT(name) \ 22 + DEFINE_EVENT(ocfs2__int, name, \ 23 + TP_PROTO(int num), \ 24 + TP_ARGS(num)) 25 + 26 + DECLARE_EVENT_CLASS(ocfs2__uint, 27 + TP_PROTO(unsigned int num), 28 + TP_ARGS(num), 29 + TP_STRUCT__entry( 30 + __field( unsigned int, num ) 31 + ), 32 + TP_fast_assign( 33 + __entry->num = num; 34 + ), 35 + TP_printk("%u", __entry->num) 36 + ); 37 + 38 + #define DEFINE_OCFS2_UINT_EVENT(name) \ 39 + DEFINE_EVENT(ocfs2__uint, name, \ 40 + TP_PROTO(unsigned int num), \ 41 + TP_ARGS(num)) 42 + 43 + DECLARE_EVENT_CLASS(ocfs2__ull, 44 + TP_PROTO(unsigned long long blkno), 45 + TP_ARGS(blkno), 46 + TP_STRUCT__entry( 47 + __field(unsigned long long, blkno) 48 + ), 49 + TP_fast_assign( 50 + __entry->blkno = blkno; 51 + ), 52 + TP_printk("%llu", __entry->blkno) 53 + ); 54 + 55 + #define DEFINE_OCFS2_ULL_EVENT(name) \ 56 + DEFINE_EVENT(ocfs2__ull, name, \ 57 + TP_PROTO(unsigned long long num), \ 58 + TP_ARGS(num)) 59 + 60 + DECLARE_EVENT_CLASS(ocfs2__pointer, 61 + TP_PROTO(void *pointer), 62 + TP_ARGS(pointer), 63 + TP_STRUCT__entry( 64 + __field(void *, pointer) 65 + ), 66 + TP_fast_assign( 67 + __entry->pointer = pointer; 68 + ), 69 + TP_printk("%p", __entry->pointer) 70 + ); 71 + 72 + #define DEFINE_OCFS2_POINTER_EVENT(name) \ 73 + DEFINE_EVENT(ocfs2__pointer, name, \ 74 + TP_PROTO(void *pointer), \ 75 + TP_ARGS(pointer)) 76 + 77 + DECLARE_EVENT_CLASS(ocfs2__string, 78 + TP_PROTO(const char *name), 79 + TP_ARGS(name), 80 + TP_STRUCT__entry( 81 + __string(name,name) 82 + ), 83 + TP_fast_assign( 84 + __assign_str(name, name); 85 + ), 86 + TP_printk("%s", __get_str(name)) 87 + ); 88 + 89 + #define DEFINE_OCFS2_STRING_EVENT(name) \ 90 + DEFINE_EVENT(ocfs2__string, name, \ 91 + TP_PROTO(const char *name), \ 92 + TP_ARGS(name)) 93 + 94 + DECLARE_EVENT_CLASS(ocfs2__int_int, 95 + TP_PROTO(int value1, int value2), 96 + TP_ARGS(value1, value2), 97 + TP_STRUCT__entry( 98 + __field(int, value1) 99 + __field(int, value2) 100 + ), 101 + TP_fast_assign( 102 + __entry->value1 = value1; 103 + __entry->value2 = value2; 104 + ), 105 + TP_printk("%d %d", __entry->value1, __entry->value2) 106 + ); 107 + 108 + #define DEFINE_OCFS2_INT_INT_EVENT(name) \ 109 + DEFINE_EVENT(ocfs2__int_int, name, \ 110 + TP_PROTO(int val1, int val2), \ 111 + TP_ARGS(val1, val2)) 112 + 113 + DECLARE_EVENT_CLASS(ocfs2__uint_int, 114 + TP_PROTO(unsigned int value1, int value2), 115 + TP_ARGS(value1, value2), 116 + TP_STRUCT__entry( 117 + __field(unsigned int, value1) 118 + __field(int, value2) 119 + ), 120 + TP_fast_assign( 121 + __entry->value1 = value1; 122 + __entry->value2 = value2; 123 + ), 124 + TP_printk("%u %d", __entry->value1, __entry->value2) 125 + ); 126 + 127 + #define DEFINE_OCFS2_UINT_INT_EVENT(name) \ 128 + DEFINE_EVENT(ocfs2__uint_int, name, \ 129 + TP_PROTO(unsigned int val1, int val2), \ 130 + TP_ARGS(val1, val2)) 131 + 132 + DECLARE_EVENT_CLASS(ocfs2__uint_uint, 133 + TP_PROTO(unsigned int value1, unsigned int value2), 134 + TP_ARGS(value1, value2), 135 + TP_STRUCT__entry( 136 + __field(unsigned int, value1) 137 + __field(unsigned int, value2) 138 + ), 139 + TP_fast_assign( 140 + __entry->value1 = value1; 141 + __entry->value2 = value2; 142 + ), 143 + TP_printk("%u %u", __entry->value1, __entry->value2) 144 + ); 145 + 146 + #define DEFINE_OCFS2_UINT_UINT_EVENT(name) \ 147 + DEFINE_EVENT(ocfs2__uint_uint, name, \ 148 + TP_PROTO(unsigned int val1, unsigned int val2), \ 149 + TP_ARGS(val1, val2)) 150 + 151 + DECLARE_EVENT_CLASS(ocfs2__ull_uint, 152 + TP_PROTO(unsigned long long value1, unsigned int value2), 153 + TP_ARGS(value1, value2), 154 + TP_STRUCT__entry( 155 + __field(unsigned long long, value1) 156 + __field(unsigned int, value2) 157 + ), 158 + TP_fast_assign( 159 + __entry->value1 = value1; 160 + __entry->value2 = value2; 161 + ), 162 + TP_printk("%llu %u", __entry->value1, __entry->value2) 163 + ); 164 + 165 + #define DEFINE_OCFS2_ULL_UINT_EVENT(name) \ 166 + DEFINE_EVENT(ocfs2__ull_uint, name, \ 167 + TP_PROTO(unsigned long long val1, unsigned int val2), \ 168 + TP_ARGS(val1, val2)) 169 + 170 + DECLARE_EVENT_CLASS(ocfs2__ull_int, 171 + TP_PROTO(unsigned long long value1, int value2), 172 + TP_ARGS(value1, value2), 173 + TP_STRUCT__entry( 174 + __field(unsigned long long, value1) 175 + __field(int, value2) 176 + ), 177 + TP_fast_assign( 178 + __entry->value1 = value1; 179 + __entry->value2 = value2; 180 + ), 181 + TP_printk("%llu %d", __entry->value1, __entry->value2) 182 + ); 183 + 184 + #define DEFINE_OCFS2_ULL_INT_EVENT(name) \ 185 + DEFINE_EVENT(ocfs2__ull_int, name, \ 186 + TP_PROTO(unsigned long long val1, int val2), \ 187 + TP_ARGS(val1, val2)) 188 + 189 + DECLARE_EVENT_CLASS(ocfs2__ull_ull, 190 + TP_PROTO(unsigned long long value1, unsigned long long value2), 191 + TP_ARGS(value1, value2), 192 + TP_STRUCT__entry( 193 + __field(unsigned long long, value1) 194 + __field(unsigned long long, value2) 195 + ), 196 + TP_fast_assign( 197 + __entry->value1 = value1; 198 + __entry->value2 = value2; 199 + ), 200 + TP_printk("%llu %llu", __entry->value1, __entry->value2) 201 + ); 202 + 203 + #define DEFINE_OCFS2_ULL_ULL_EVENT(name) \ 204 + DEFINE_EVENT(ocfs2__ull_ull, name, \ 205 + TP_PROTO(unsigned long long val1, unsigned long long val2), \ 206 + TP_ARGS(val1, val2)) 207 + 208 + DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint, 209 + TP_PROTO(unsigned long long value1, 210 + unsigned long long value2, unsigned int value3), 211 + TP_ARGS(value1, value2, value3), 212 + TP_STRUCT__entry( 213 + __field(unsigned long long, value1) 214 + __field(unsigned long long, value2) 215 + __field(unsigned int, value3) 216 + ), 217 + TP_fast_assign( 218 + __entry->value1 = value1; 219 + __entry->value2 = value2; 220 + __entry->value3 = value3; 221 + ), 222 + TP_printk("%llu %llu %u", 223 + __entry->value1, __entry->value2, __entry->value3) 224 + ); 225 + 226 + #define DEFINE_OCFS2_ULL_ULL_UINT_EVENT(name) \ 227 + DEFINE_EVENT(ocfs2__ull_ull_uint, name, \ 228 + TP_PROTO(unsigned long long val1, \ 229 + unsigned long long val2, unsigned int val3), \ 230 + TP_ARGS(val1, val2, val3)) 231 + 232 + DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint, 233 + TP_PROTO(unsigned long long value1, 234 + unsigned int value2, unsigned int value3), 235 + TP_ARGS(value1, value2, value3), 236 + TP_STRUCT__entry( 237 + __field(unsigned long long, value1) 238 + __field(unsigned int, value2) 239 + __field(unsigned int, value3) 240 + ), 241 + TP_fast_assign( 242 + __entry->value1 = value1; 243 + __entry->value2 = value2; 244 + __entry->value3 = value3; 245 + ), 246 + TP_printk("%llu %u %u", __entry->value1, 247 + __entry->value2, __entry->value3) 248 + ); 249 + 250 + #define DEFINE_OCFS2_ULL_UINT_UINT_EVENT(name) \ 251 + DEFINE_EVENT(ocfs2__ull_uint_uint, name, \ 252 + TP_PROTO(unsigned long long val1, \ 253 + unsigned int val2, unsigned int val3), \ 254 + TP_ARGS(val1, val2, val3)) 255 + 256 + DECLARE_EVENT_CLASS(ocfs2__uint_uint_uint, 257 + TP_PROTO(unsigned int value1, unsigned int value2, 258 + unsigned int value3), 259 + TP_ARGS(value1, value2, value3), 260 + TP_STRUCT__entry( 261 + __field( unsigned int, value1 ) 262 + __field( unsigned int, value2 ) 263 + __field( unsigned int, value3 ) 264 + ), 265 + TP_fast_assign( 266 + __entry->value1 = value1; 267 + __entry->value2 = value2; 268 + __entry->value3 = value3; 269 + ), 270 + TP_printk("%u %u %u", __entry->value1, __entry->value2, __entry->value3) 271 + ); 272 + 273 + #define DEFINE_OCFS2_UINT_UINT_UINT_EVENT(name) \ 274 + DEFINE_EVENT(ocfs2__uint_uint_uint, name, \ 275 + TP_PROTO(unsigned int value1, unsigned int value2, \ 276 + unsigned int value3), \ 277 + TP_ARGS(value1, value2, value3)) 278 + 279 + DECLARE_EVENT_CLASS(ocfs2__ull_ull_ull, 280 + TP_PROTO(unsigned long long value1, 281 + unsigned long long value2, unsigned long long value3), 282 + TP_ARGS(value1, value2, value3), 283 + TP_STRUCT__entry( 284 + __field(unsigned long long, value1) 285 + __field(unsigned long long, value2) 286 + __field(unsigned long long, value3) 287 + ), 288 + TP_fast_assign( 289 + __entry->value1 = value1; 290 + __entry->value2 = value2; 291 + __entry->value3 = value3; 292 + ), 293 + TP_printk("%llu %llu %llu", 294 + __entry->value1, __entry->value2, __entry->value3) 295 + ); 296 + 297 + #define DEFINE_OCFS2_ULL_ULL_ULL_EVENT(name) \ 298 + DEFINE_EVENT(ocfs2__ull_ull_ull, name, \ 299 + TP_PROTO(unsigned long long value1, unsigned long long value2, \ 300 + unsigned long long value3), \ 301 + TP_ARGS(value1, value2, value3)) 302 + 303 + DECLARE_EVENT_CLASS(ocfs2__ull_int_int_int, 304 + TP_PROTO(unsigned long long ull, int value1, int value2, int value3), 305 + TP_ARGS(ull, value1, value2, value3), 306 + TP_STRUCT__entry( 307 + __field( unsigned long long, ull ) 308 + __field( int, value1 ) 309 + __field( int, value2 ) 310 + __field( int, value3 ) 311 + ), 312 + TP_fast_assign( 313 + __entry->ull = ull; 314 + __entry->value1 = value1; 315 + __entry->value2 = value2; 316 + __entry->value3 = value3; 317 + ), 318 + TP_printk("%llu %d %d %d", 319 + __entry->ull, __entry->value1, 320 + __entry->value2, __entry->value3) 321 + ); 322 + 323 + #define DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(name) \ 324 + DEFINE_EVENT(ocfs2__ull_int_int_int, name, \ 325 + TP_PROTO(unsigned long long ull, int value1, \ 326 + int value2, int value3), \ 327 + TP_ARGS(ull, value1, value2, value3)) 328 + 329 + DECLARE_EVENT_CLASS(ocfs2__ull_uint_uint_uint, 330 + TP_PROTO(unsigned long long ull, unsigned int value1, 331 + unsigned int value2, unsigned int value3), 332 + TP_ARGS(ull, value1, value2, value3), 333 + TP_STRUCT__entry( 334 + __field(unsigned long long, ull) 335 + __field(unsigned int, value1) 336 + __field(unsigned int, value2) 337 + __field(unsigned int, value3) 338 + ), 339 + TP_fast_assign( 340 + __entry->ull = ull; 341 + __entry->value1 = value1; 342 + __entry->value2 = value2; 343 + __entry->value3 = value3; 344 + ), 345 + TP_printk("%llu %u %u %u", 346 + __entry->ull, __entry->value1, 347 + __entry->value2, __entry->value3) 348 + ); 349 + 350 + #define DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(name) \ 351 + DEFINE_EVENT(ocfs2__ull_uint_uint_uint, name, \ 352 + TP_PROTO(unsigned long long ull, unsigned int value1, \ 353 + unsigned int value2, unsigned int value3), \ 354 + TP_ARGS(ull, value1, value2, value3)) 355 + 356 + DECLARE_EVENT_CLASS(ocfs2__ull_ull_uint_uint, 357 + TP_PROTO(unsigned long long value1, unsigned long long value2, 358 + unsigned int value3, unsigned int value4), 359 + TP_ARGS(value1, value2, value3, value4), 360 + TP_STRUCT__entry( 361 + __field(unsigned long long, value1) 362 + __field(unsigned long long, value2) 363 + __field(unsigned int, value3) 364 + __field(unsigned int, value4) 365 + ), 366 + TP_fast_assign( 367 + __entry->value1 = value1; 368 + __entry->value2 = value2; 369 + __entry->value3 = value3; 370 + __entry->value4 = value4; 371 + ), 372 + TP_printk("%llu %llu %u %u", 373 + __entry->value1, __entry->value2, 374 + __entry->value3, __entry->value4) 375 + ); 376 + 377 + #define DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(name) \ 378 + DEFINE_EVENT(ocfs2__ull_ull_uint_uint, name, \ 379 + TP_PROTO(unsigned long long ull, unsigned long long ull1, \ 380 + unsigned int value2, unsigned int value3), \ 381 + TP_ARGS(ull, ull1, value2, value3)) 382 + 383 + /* Trace events for fs/ocfs2/alloc.c. */ 384 + DECLARE_EVENT_CLASS(ocfs2__btree_ops, 385 + TP_PROTO(unsigned long long owner,\ 386 + unsigned int value1, unsigned int value2), 387 + TP_ARGS(owner, value1, value2), 388 + TP_STRUCT__entry( 389 + __field(unsigned long long, owner) 390 + __field(unsigned int, value1) 391 + __field(unsigned int, value2) 392 + ), 393 + TP_fast_assign( 394 + __entry->owner = owner; 395 + __entry->value1 = value1; 396 + __entry->value2 = value2; 397 + ), 398 + TP_printk("%llu %u %u", 399 + __entry->owner, __entry->value1, __entry->value2) 400 + ); 401 + 402 + #define DEFINE_OCFS2_BTREE_EVENT(name) \ 403 + DEFINE_EVENT(ocfs2__btree_ops, name, \ 404 + TP_PROTO(unsigned long long owner, \ 405 + unsigned int value1, unsigned int value2), \ 406 + TP_ARGS(owner, value1, value2)) 407 + 408 + DEFINE_OCFS2_BTREE_EVENT(ocfs2_adjust_rightmost_branch); 409 + 410 + DEFINE_OCFS2_BTREE_EVENT(ocfs2_rotate_tree_right); 411 + 412 + DEFINE_OCFS2_BTREE_EVENT(ocfs2_append_rec_to_path); 413 + 414 + DEFINE_OCFS2_BTREE_EVENT(ocfs2_insert_extent_start); 415 + 416 + DEFINE_OCFS2_BTREE_EVENT(ocfs2_add_clusters_in_btree); 417 + 418 + DEFINE_OCFS2_INT_EVENT(ocfs2_num_free_extents); 419 + 420 + DEFINE_OCFS2_INT_EVENT(ocfs2_complete_edge_insert); 421 + 422 + TRACE_EVENT(ocfs2_grow_tree, 423 + TP_PROTO(unsigned long long owner, int depth), 424 + TP_ARGS(owner, depth), 425 + TP_STRUCT__entry( 426 + __field(unsigned long long, owner) 427 + __field(int, depth) 428 + ), 429 + TP_fast_assign( 430 + __entry->owner = owner; 431 + __entry->depth = depth; 432 + ), 433 + TP_printk("%llu %d", __entry->owner, __entry->depth) 434 + ); 435 + 436 + TRACE_EVENT(ocfs2_rotate_subtree, 437 + TP_PROTO(int subtree_root, unsigned long long blkno, 438 + int depth), 439 + TP_ARGS(subtree_root, blkno, depth), 440 + TP_STRUCT__entry( 441 + __field(int, subtree_root) 442 + __field(unsigned long long, blkno) 443 + __field(int, depth) 444 + ), 445 + TP_fast_assign( 446 + __entry->subtree_root = subtree_root; 447 + __entry->blkno = blkno; 448 + __entry->depth = depth; 449 + ), 450 + TP_printk("%d %llu %d", __entry->subtree_root, 451 + __entry->blkno, __entry->depth) 452 + ); 453 + 454 + TRACE_EVENT(ocfs2_insert_extent, 455 + TP_PROTO(unsigned int ins_appending, unsigned int ins_contig, 456 + int ins_contig_index, int free_records, int ins_tree_depth), 457 + TP_ARGS(ins_appending, ins_contig, ins_contig_index, free_records, 458 + ins_tree_depth), 459 + TP_STRUCT__entry( 460 + __field(unsigned int, ins_appending) 461 + __field(unsigned int, ins_contig) 462 + __field(int, ins_contig_index) 463 + __field(int, free_records) 464 + __field(int, ins_tree_depth) 465 + ), 466 + TP_fast_assign( 467 + __entry->ins_appending = ins_appending; 468 + __entry->ins_contig = ins_contig; 469 + __entry->ins_contig_index = ins_contig_index; 470 + __entry->free_records = free_records; 471 + __entry->ins_tree_depth = ins_tree_depth; 472 + ), 473 + TP_printk("%u %u %d %d %d", 474 + __entry->ins_appending, __entry->ins_contig, 475 + __entry->ins_contig_index, __entry->free_records, 476 + __entry->ins_tree_depth) 477 + ); 478 + 479 + TRACE_EVENT(ocfs2_split_extent, 480 + TP_PROTO(int split_index, unsigned int c_contig_type, 481 + unsigned int c_has_empty_extent, 482 + unsigned int c_split_covers_rec), 483 + TP_ARGS(split_index, c_contig_type, 484 + c_has_empty_extent, c_split_covers_rec), 485 + TP_STRUCT__entry( 486 + __field(int, split_index) 487 + __field(unsigned int, c_contig_type) 488 + __field(unsigned int, c_has_empty_extent) 489 + __field(unsigned int, c_split_covers_rec) 490 + ), 491 + TP_fast_assign( 492 + __entry->split_index = split_index; 493 + __entry->c_contig_type = c_contig_type; 494 + __entry->c_has_empty_extent = c_has_empty_extent; 495 + __entry->c_split_covers_rec = c_split_covers_rec; 496 + ), 497 + TP_printk("%d %u %u %u", __entry->split_index, __entry->c_contig_type, 498 + __entry->c_has_empty_extent, __entry->c_split_covers_rec) 499 + ); 500 + 501 + TRACE_EVENT(ocfs2_remove_extent, 502 + TP_PROTO(unsigned long long owner, unsigned int cpos, 503 + unsigned int len, int index, 504 + unsigned int e_cpos, unsigned int clusters), 505 + TP_ARGS(owner, cpos, len, index, e_cpos, clusters), 506 + TP_STRUCT__entry( 507 + __field(unsigned long long, owner) 508 + __field(unsigned int, cpos) 509 + __field(unsigned int, len) 510 + __field(int, index) 511 + __field(unsigned int, e_cpos) 512 + __field(unsigned int, clusters) 513 + ), 514 + TP_fast_assign( 515 + __entry->owner = owner; 516 + __entry->cpos = cpos; 517 + __entry->len = len; 518 + __entry->index = index; 519 + __entry->e_cpos = e_cpos; 520 + __entry->clusters = clusters; 521 + ), 522 + TP_printk("%llu %u %u %d %u %u", 523 + __entry->owner, __entry->cpos, __entry->len, __entry->index, 524 + __entry->e_cpos, __entry->clusters) 525 + ); 526 + 527 + TRACE_EVENT(ocfs2_commit_truncate, 528 + TP_PROTO(unsigned long long ino, unsigned int new_cpos, 529 + unsigned int clusters, unsigned int depth), 530 + TP_ARGS(ino, new_cpos, clusters, depth), 531 + TP_STRUCT__entry( 532 + __field(unsigned long long, ino) 533 + __field(unsigned int, new_cpos) 534 + __field(unsigned int, clusters) 535 + __field(unsigned int, depth) 536 + ), 537 + TP_fast_assign( 538 + __entry->ino = ino; 539 + __entry->new_cpos = new_cpos; 540 + __entry->clusters = clusters; 541 + __entry->depth = depth; 542 + ), 543 + TP_printk("%llu %u %u %u", 544 + __entry->ino, __entry->new_cpos, 545 + __entry->clusters, __entry->depth) 546 + ); 547 + 548 + TRACE_EVENT(ocfs2_validate_extent_block, 549 + TP_PROTO(unsigned long long blkno), 550 + TP_ARGS(blkno), 551 + TP_STRUCT__entry( 552 + __field(unsigned long long, blkno) 553 + ), 554 + TP_fast_assign( 555 + __entry->blkno = blkno; 556 + ), 557 + TP_printk("%llu ", __entry->blkno) 558 + ); 559 + 560 + TRACE_EVENT(ocfs2_rotate_leaf, 561 + TP_PROTO(unsigned int insert_cpos, int insert_index, 562 + int has_empty, int next_free, 563 + unsigned int l_count), 564 + TP_ARGS(insert_cpos, insert_index, has_empty, 565 + next_free, l_count), 566 + TP_STRUCT__entry( 567 + __field(unsigned int, insert_cpos) 568 + __field(int, insert_index) 569 + __field(int, has_empty) 570 + __field(int, next_free) 571 + __field(unsigned int, l_count) 572 + ), 573 + TP_fast_assign( 574 + __entry->insert_cpos = insert_cpos; 575 + __entry->insert_index = insert_index; 576 + __entry->has_empty = has_empty; 577 + __entry->next_free = next_free; 578 + __entry->l_count = l_count; 579 + ), 580 + TP_printk("%u %d %d %d %u", __entry->insert_cpos, 581 + __entry->insert_index, __entry->has_empty, 582 + __entry->next_free, __entry->l_count) 583 + ); 584 + 585 + TRACE_EVENT(ocfs2_add_clusters_in_btree_ret, 586 + TP_PROTO(int status, int reason, int err), 587 + TP_ARGS(status, reason, err), 588 + TP_STRUCT__entry( 589 + __field(int, status) 590 + __field(int, reason) 591 + __field(int, err) 592 + ), 593 + TP_fast_assign( 594 + __entry->status = status; 595 + __entry->reason = reason; 596 + __entry->err = err; 597 + ), 598 + TP_printk("%d %d %d", __entry->status, 599 + __entry->reason, __entry->err) 600 + ); 601 + 602 + TRACE_EVENT(ocfs2_mark_extent_written, 603 + TP_PROTO(unsigned long long owner, unsigned int cpos, 604 + unsigned int len, unsigned int phys), 605 + TP_ARGS(owner, cpos, len, phys), 606 + TP_STRUCT__entry( 607 + __field(unsigned long long, owner) 608 + __field(unsigned int, cpos) 609 + __field(unsigned int, len) 610 + __field(unsigned int, phys) 611 + ), 612 + TP_fast_assign( 613 + __entry->owner = owner; 614 + __entry->cpos = cpos; 615 + __entry->len = len; 616 + __entry->phys = phys; 617 + ), 618 + TP_printk("%llu %u %u %u", 619 + __entry->owner, __entry->cpos, 620 + __entry->len, __entry->phys) 621 + ); 622 + 623 + DECLARE_EVENT_CLASS(ocfs2__truncate_log_ops, 624 + TP_PROTO(unsigned long long blkno, int index, 625 + unsigned int start, unsigned int num), 626 + TP_ARGS(blkno, index, start, num), 627 + TP_STRUCT__entry( 628 + __field(unsigned long long, blkno) 629 + __field(int, index) 630 + __field(unsigned int, start) 631 + __field(unsigned int, num) 632 + ), 633 + TP_fast_assign( 634 + __entry->blkno = blkno; 635 + __entry->index = index; 636 + __entry->start = start; 637 + __entry->num = num; 638 + ), 639 + TP_printk("%llu %d %u %u", 640 + __entry->blkno, __entry->index, 641 + __entry->start, __entry->num) 642 + ); 643 + 644 + #define DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(name) \ 645 + DEFINE_EVENT(ocfs2__truncate_log_ops, name, \ 646 + TP_PROTO(unsigned long long blkno, int index, \ 647 + unsigned int start, unsigned int num), \ 648 + TP_ARGS(blkno, index, start, num)) 649 + 650 + DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_truncate_log_append); 651 + 652 + DEFINE_OCFS2_TRUNCATE_LOG_OPS_EVENT(ocfs2_replay_truncate_records); 653 + 654 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_flush_truncate_log); 655 + 656 + DEFINE_OCFS2_INT_EVENT(ocfs2_begin_truncate_log_recovery); 657 + 658 + DEFINE_OCFS2_INT_EVENT(ocfs2_truncate_log_recovery_num); 659 + 660 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_complete_truncate_log_recovery); 661 + 662 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_free_cached_blocks); 663 + 664 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_cache_cluster_dealloc); 665 + 666 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_run_deallocs); 667 + 668 + TRACE_EVENT(ocfs2_cache_block_dealloc, 669 + TP_PROTO(int type, int slot, unsigned long long suballoc, 670 + unsigned long long blkno, unsigned int bit), 671 + TP_ARGS(type, slot, suballoc, blkno, bit), 672 + TP_STRUCT__entry( 673 + __field(int, type) 674 + __field(int, slot) 675 + __field(unsigned long long, suballoc) 676 + __field(unsigned long long, blkno) 677 + __field(unsigned int, bit) 678 + ), 679 + TP_fast_assign( 680 + __entry->type = type; 681 + __entry->slot = slot; 682 + __entry->suballoc = suballoc; 683 + __entry->blkno = blkno; 684 + __entry->bit = bit; 685 + ), 686 + TP_printk("%d %d %llu %llu %u", 687 + __entry->type, __entry->slot, __entry->suballoc, 688 + __entry->blkno, __entry->bit) 689 + ); 690 + 691 + /* End of trace events for fs/ocfs2/alloc.c. */ 692 + 693 + /* Trace events for fs/ocfs2/localalloc.c. */ 694 + 695 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_la_set_sizes); 696 + 697 + DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_alloc_should_use_local); 698 + 699 + DEFINE_OCFS2_INT_EVENT(ocfs2_load_local_alloc); 700 + 701 + DEFINE_OCFS2_INT_EVENT(ocfs2_begin_local_alloc_recovery); 702 + 703 + DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_reserve_local_alloc_bits); 704 + 705 + DEFINE_OCFS2_UINT_EVENT(ocfs2_local_alloc_count_bits); 706 + 707 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits_search_bitmap); 708 + 709 + DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_local_alloc_find_clear_bits); 710 + 711 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_sync_local_to_main); 712 + 713 + TRACE_EVENT(ocfs2_sync_local_to_main_free, 714 + TP_PROTO(int count, int bit, unsigned long long start_blk, 715 + unsigned long long blkno), 716 + TP_ARGS(count, bit, start_blk, blkno), 717 + TP_STRUCT__entry( 718 + __field(int, count) 719 + __field(int, bit) 720 + __field(unsigned long long, start_blk) 721 + __field(unsigned long long, blkno) 722 + ), 723 + TP_fast_assign( 724 + __entry->count = count; 725 + __entry->bit = bit; 726 + __entry->start_blk = start_blk; 727 + __entry->blkno = blkno; 728 + ), 729 + TP_printk("%d %d %llu %llu", 730 + __entry->count, __entry->bit, __entry->start_blk, 731 + __entry->blkno) 732 + ); 733 + 734 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_local_alloc_new_window); 735 + 736 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_local_alloc_new_window_result); 737 + 738 + /* End of trace events for fs/ocfs2/localalloc.c. */ 739 + 740 + /* Trace events for fs/ocfs2/resize.c. */ 741 + 742 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_update_last_group_and_inode); 743 + 744 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_group_extend); 745 + 746 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_group_add); 747 + 748 + /* End of trace events for fs/ocfs2/resize.c. */ 749 + 750 + /* Trace events for fs/ocfs2/suballoc.c. */ 751 + 752 + DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_group_descriptor); 753 + 754 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_contig); 755 + 756 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_block_group_alloc_discontig); 757 + 758 + DEFINE_OCFS2_ULL_EVENT(ocfs2_block_group_alloc); 759 + 760 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_nospc); 761 + 762 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_reserve_suballoc_bits_no_new_group); 763 + 764 + DEFINE_OCFS2_ULL_EVENT(ocfs2_reserve_new_inode_new_group); 765 + 766 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_set_bits); 767 + 768 + TRACE_EVENT(ocfs2_relink_block_group, 769 + TP_PROTO(unsigned long long i_blkno, unsigned int chain, 770 + unsigned long long bg_blkno, 771 + unsigned long long prev_blkno), 772 + TP_ARGS(i_blkno, chain, bg_blkno, prev_blkno), 773 + TP_STRUCT__entry( 774 + __field(unsigned long long, i_blkno) 775 + __field(unsigned int, chain) 776 + __field(unsigned long long, bg_blkno) 777 + __field(unsigned long long, prev_blkno) 778 + ), 779 + TP_fast_assign( 780 + __entry->i_blkno = i_blkno; 781 + __entry->chain = chain; 782 + __entry->bg_blkno = bg_blkno; 783 + __entry->prev_blkno = prev_blkno; 784 + ), 785 + TP_printk("%llu %u %llu %llu", 786 + __entry->i_blkno, __entry->chain, __entry->bg_blkno, 787 + __entry->prev_blkno) 788 + ); 789 + 790 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_cluster_group_search_wrong_max_bits); 791 + 792 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cluster_group_search_max_block); 793 + 794 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_block_group_search_max_block); 795 + 796 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_search_chain_begin); 797 + 798 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_succ); 799 + 800 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_search_chain_end); 801 + 802 + DEFINE_OCFS2_UINT_EVENT(ocfs2_claim_suballoc_bits); 803 + 804 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_claim_new_inode_at_loc); 805 + 806 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_block_group_clear_bits); 807 + 808 + TRACE_EVENT(ocfs2_free_suballoc_bits, 809 + TP_PROTO(unsigned long long inode, unsigned long long group, 810 + unsigned int start_bit, unsigned int count), 811 + TP_ARGS(inode, group, start_bit, count), 812 + TP_STRUCT__entry( 813 + __field(unsigned long long, inode) 814 + __field(unsigned long long, group) 815 + __field(unsigned int, start_bit) 816 + __field(unsigned int, count) 817 + ), 818 + TP_fast_assign( 819 + __entry->inode = inode; 820 + __entry->group = group; 821 + __entry->start_bit = start_bit; 822 + __entry->count = count; 823 + ), 824 + TP_printk("%llu %llu %u %u", __entry->inode, __entry->group, 825 + __entry->start_bit, __entry->count) 826 + ); 827 + 828 + TRACE_EVENT(ocfs2_free_clusters, 829 + TP_PROTO(unsigned long long bg_blkno, unsigned long long start_blk, 830 + unsigned int start_bit, unsigned int count), 831 + TP_ARGS(bg_blkno, start_blk, start_bit, count), 832 + TP_STRUCT__entry( 833 + __field(unsigned long long, bg_blkno) 834 + __field(unsigned long long, start_blk) 835 + __field(unsigned int, start_bit) 836 + __field(unsigned int, count) 837 + ), 838 + TP_fast_assign( 839 + __entry->bg_blkno = bg_blkno; 840 + __entry->start_blk = start_blk; 841 + __entry->start_bit = start_bit; 842 + __entry->count = count; 843 + ), 844 + TP_printk("%llu %llu %u %u", __entry->bg_blkno, __entry->start_blk, 845 + __entry->start_bit, __entry->count) 846 + ); 847 + 848 + DEFINE_OCFS2_ULL_EVENT(ocfs2_get_suballoc_slot_bit); 849 + 850 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_test_suballoc_bit); 851 + 852 + DEFINE_OCFS2_ULL_EVENT(ocfs2_test_inode_bit); 853 + 854 + /* End of trace events for fs/ocfs2/suballoc.c. */ 855 + 856 + /* Trace events for fs/ocfs2/refcounttree.c. */ 857 + 858 + DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_refcount_block); 859 + 860 + DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_refcount_trees); 861 + 862 + DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree); 863 + 864 + DEFINE_OCFS2_ULL_EVENT(ocfs2_create_refcount_tree_blkno); 865 + 866 + DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_change_refcount_rec); 867 + 868 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_expand_inline_ref_root); 869 + 870 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_divide_leaf_refcount_block); 871 + 872 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_new_leaf_refcount_block); 873 + 874 + DECLARE_EVENT_CLASS(ocfs2__refcount_tree_ops, 875 + TP_PROTO(unsigned long long blkno, int index, 876 + unsigned long long cpos, 877 + unsigned int clusters, unsigned int refcount), 878 + TP_ARGS(blkno, index, cpos, clusters, refcount), 879 + TP_STRUCT__entry( 880 + __field(unsigned long long, blkno) 881 + __field(int, index) 882 + __field(unsigned long long, cpos) 883 + __field(unsigned int, clusters) 884 + __field(unsigned int, refcount) 885 + ), 886 + TP_fast_assign( 887 + __entry->blkno = blkno; 888 + __entry->index = index; 889 + __entry->cpos = cpos; 890 + __entry->clusters = clusters; 891 + __entry->refcount = refcount; 892 + ), 893 + TP_printk("%llu %d %llu %u %u", __entry->blkno, __entry->index, 894 + __entry->cpos, __entry->clusters, __entry->refcount) 895 + ); 896 + 897 + #define DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(name) \ 898 + DEFINE_EVENT(ocfs2__refcount_tree_ops, name, \ 899 + TP_PROTO(unsigned long long blkno, int index, \ 900 + unsigned long long cpos, \ 901 + unsigned int count, unsigned int refcount), \ 902 + TP_ARGS(blkno, index, cpos, count, refcount)) 903 + 904 + DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_insert_refcount_rec); 905 + 906 + TRACE_EVENT(ocfs2_split_refcount_rec, 907 + TP_PROTO(unsigned long long cpos, 908 + unsigned int clusters, unsigned int refcount, 909 + unsigned long long split_cpos, 910 + unsigned int split_clusters, unsigned int split_refcount), 911 + TP_ARGS(cpos, clusters, refcount, 912 + split_cpos, split_clusters, split_refcount), 913 + TP_STRUCT__entry( 914 + __field(unsigned long long, cpos) 915 + __field(unsigned int, clusters) 916 + __field(unsigned int, refcount) 917 + __field(unsigned long long, split_cpos) 918 + __field(unsigned int, split_clusters) 919 + __field(unsigned int, split_refcount) 920 + ), 921 + TP_fast_assign( 922 + __entry->cpos = cpos; 923 + __entry->clusters = clusters; 924 + __entry->refcount = refcount; 925 + __entry->split_cpos = split_cpos; 926 + __entry->split_clusters = split_clusters; 927 + __entry->split_refcount = split_refcount; 928 + ), 929 + TP_printk("%llu %u %u %llu %u %u", 930 + __entry->cpos, __entry->clusters, __entry->refcount, 931 + __entry->split_cpos, __entry->split_clusters, 932 + __entry->split_refcount) 933 + ); 934 + 935 + DEFINE_OCFS2_REFCOUNT_TREE_OPS_EVENT(ocfs2_split_refcount_rec_insert); 936 + 937 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_increase_refcount_begin); 938 + 939 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_change); 940 + 941 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_increase_refcount_insert); 942 + 943 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_increase_refcount_split); 944 + 945 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_remove_refcount_extent); 946 + 947 + DEFINE_OCFS2_ULL_EVENT(ocfs2_restore_refcount_block); 948 + 949 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_decrease_refcount_rec); 950 + 951 + TRACE_EVENT(ocfs2_decrease_refcount, 952 + TP_PROTO(unsigned long long owner, 953 + unsigned long long cpos, 954 + unsigned int len, int delete), 955 + TP_ARGS(owner, cpos, len, delete), 956 + TP_STRUCT__entry( 957 + __field(unsigned long long, owner) 958 + __field(unsigned long long, cpos) 959 + __field(unsigned int, len) 960 + __field(int, delete) 961 + ), 962 + TP_fast_assign( 963 + __entry->owner = owner; 964 + __entry->cpos = cpos; 965 + __entry->len = len; 966 + __entry->delete = delete; 967 + ), 968 + TP_printk("%llu %llu %u %d", 969 + __entry->owner, __entry->cpos, __entry->len, __entry->delete) 970 + ); 971 + 972 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_mark_extent_refcounted); 973 + 974 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_calc_refcount_meta_credits); 975 + 976 + TRACE_EVENT(ocfs2_calc_refcount_meta_credits_iterate, 977 + TP_PROTO(int recs_add, unsigned long long cpos, 978 + unsigned int clusters, unsigned long long r_cpos, 979 + unsigned int r_clusters, unsigned int refcount, int index), 980 + TP_ARGS(recs_add, cpos, clusters, r_cpos, r_clusters, refcount, index), 981 + TP_STRUCT__entry( 982 + __field(int, recs_add) 983 + __field(unsigned long long, cpos) 984 + __field(unsigned int, clusters) 985 + __field(unsigned long long, r_cpos) 986 + __field(unsigned int, r_clusters) 987 + __field(unsigned int, refcount) 988 + __field(int, index) 989 + ), 990 + TP_fast_assign( 991 + __entry->recs_add = recs_add; 992 + __entry->cpos = cpos; 993 + __entry->clusters = clusters; 994 + __entry->r_cpos = r_cpos; 995 + __entry->r_clusters = r_clusters; 996 + __entry->refcount = refcount; 997 + __entry->index = index; 998 + ), 999 + TP_printk("%d %llu %u %llu %u %u %d", 1000 + __entry->recs_add, __entry->cpos, __entry->clusters, 1001 + __entry->r_cpos, __entry->r_clusters, 1002 + __entry->refcount, __entry->index) 1003 + ); 1004 + 1005 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_add_refcount_flag); 1006 + 1007 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_prepare_refcount_change_for_del); 1008 + 1009 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_lock_refcount_allocators); 1010 + 1011 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_page); 1012 + 1013 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_duplicate_clusters_by_jbd); 1014 + 1015 + TRACE_EVENT(ocfs2_clear_ext_refcount, 1016 + TP_PROTO(unsigned long long ino, unsigned int cpos, 1017 + unsigned int len, unsigned int p_cluster, 1018 + unsigned int ext_flags), 1019 + TP_ARGS(ino, cpos, len, p_cluster, ext_flags), 1020 + TP_STRUCT__entry( 1021 + __field(unsigned long long, ino) 1022 + __field(unsigned int, cpos) 1023 + __field(unsigned int, len) 1024 + __field(unsigned int, p_cluster) 1025 + __field(unsigned int, ext_flags) 1026 + ), 1027 + TP_fast_assign( 1028 + __entry->ino = ino; 1029 + __entry->cpos = cpos; 1030 + __entry->len = len; 1031 + __entry->p_cluster = p_cluster; 1032 + __entry->ext_flags = ext_flags; 1033 + ), 1034 + TP_printk("%llu %u %u %u %u", 1035 + __entry->ino, __entry->cpos, __entry->len, 1036 + __entry->p_cluster, __entry->ext_flags) 1037 + ); 1038 + 1039 + TRACE_EVENT(ocfs2_replace_clusters, 1040 + TP_PROTO(unsigned long long ino, unsigned int cpos, 1041 + unsigned int old, unsigned int new, unsigned int len, 1042 + unsigned int ext_flags), 1043 + TP_ARGS(ino, cpos, old, new, len, ext_flags), 1044 + TP_STRUCT__entry( 1045 + __field(unsigned long long, ino) 1046 + __field(unsigned int, cpos) 1047 + __field(unsigned int, old) 1048 + __field(unsigned int, new) 1049 + __field(unsigned int, len) 1050 + __field(unsigned int, ext_flags) 1051 + ), 1052 + TP_fast_assign( 1053 + __entry->ino = ino; 1054 + __entry->cpos = cpos; 1055 + __entry->old = old; 1056 + __entry->new = new; 1057 + __entry->len = len; 1058 + __entry->ext_flags = ext_flags; 1059 + ), 1060 + TP_printk("%llu %u %u %u %u %u", 1061 + __entry->ino, __entry->cpos, __entry->old, __entry->new, 1062 + __entry->len, __entry->ext_flags) 1063 + ); 1064 + 1065 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_make_clusters_writable); 1066 + 1067 + TRACE_EVENT(ocfs2_refcount_cow_hunk, 1068 + TP_PROTO(unsigned long long ino, unsigned int cpos, 1069 + unsigned int write_len, unsigned int max_cpos, 1070 + unsigned int cow_start, unsigned int cow_len), 1071 + TP_ARGS(ino, cpos, write_len, max_cpos, cow_start, cow_len), 1072 + TP_STRUCT__entry( 1073 + __field(unsigned long long, ino) 1074 + __field(unsigned int, cpos) 1075 + __field(unsigned int, write_len) 1076 + __field(unsigned int, max_cpos) 1077 + __field(unsigned int, cow_start) 1078 + __field(unsigned int, cow_len) 1079 + ), 1080 + TP_fast_assign( 1081 + __entry->ino = ino; 1082 + __entry->cpos = cpos; 1083 + __entry->write_len = write_len; 1084 + __entry->max_cpos = max_cpos; 1085 + __entry->cow_start = cow_start; 1086 + __entry->cow_len = cow_len; 1087 + ), 1088 + TP_printk("%llu %u %u %u %u %u", 1089 + __entry->ino, __entry->cpos, __entry->write_len, 1090 + __entry->max_cpos, __entry->cow_start, __entry->cow_len) 1091 + ); 1092 + 1093 + /* End of trace events for fs/ocfs2/refcounttree.c. */ 1094 + 1095 + /* Trace events for fs/ocfs2/aops.c. */ 1096 + 1097 + DECLARE_EVENT_CLASS(ocfs2__get_block, 1098 + TP_PROTO(unsigned long long ino, unsigned long long iblock, 1099 + void *bh_result, int create), 1100 + TP_ARGS(ino, iblock, bh_result, create), 1101 + TP_STRUCT__entry( 1102 + __field(unsigned long long, ino) 1103 + __field(unsigned long long, iblock) 1104 + __field(void *, bh_result) 1105 + __field(int, create) 1106 + ), 1107 + TP_fast_assign( 1108 + __entry->ino = ino; 1109 + __entry->iblock = iblock; 1110 + __entry->bh_result = bh_result; 1111 + __entry->create = create; 1112 + ), 1113 + TP_printk("%llu %llu %p %d", 1114 + __entry->ino, __entry->iblock, 1115 + __entry->bh_result, __entry->create) 1116 + ); 1117 + 1118 + #define DEFINE_OCFS2_GET_BLOCK_EVENT(name) \ 1119 + DEFINE_EVENT(ocfs2__get_block, name, \ 1120 + TP_PROTO(unsigned long long ino, unsigned long long iblock, \ 1121 + void *bh_result, int create), \ 1122 + TP_ARGS(ino, iblock, bh_result, create)) 1123 + 1124 + DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_symlink_get_block); 1125 + 1126 + DEFINE_OCFS2_GET_BLOCK_EVENT(ocfs2_get_block); 1127 + 1128 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_get_block_end); 1129 + 1130 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_readpage); 1131 + 1132 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_writepage); 1133 + 1134 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_bmap); 1135 + 1136 + TRACE_EVENT(ocfs2_try_to_write_inline_data, 1137 + TP_PROTO(unsigned long long ino, unsigned int len, 1138 + unsigned long long pos, unsigned int flags), 1139 + TP_ARGS(ino, len, pos, flags), 1140 + TP_STRUCT__entry( 1141 + __field(unsigned long long, ino) 1142 + __field(unsigned int, len) 1143 + __field(unsigned long long, pos) 1144 + __field(unsigned int, flags) 1145 + ), 1146 + TP_fast_assign( 1147 + __entry->ino = ino; 1148 + __entry->len = len; 1149 + __entry->pos = pos; 1150 + __entry->flags = flags; 1151 + ), 1152 + TP_printk("%llu %u %llu 0x%x", 1153 + __entry->ino, __entry->len, __entry->pos, __entry->flags) 1154 + ); 1155 + 1156 + TRACE_EVENT(ocfs2_write_begin_nolock, 1157 + TP_PROTO(unsigned long long ino, 1158 + long long i_size, unsigned int i_clusters, 1159 + unsigned long long pos, unsigned int len, 1160 + unsigned int flags, void *page, 1161 + unsigned int clusters, unsigned int extents_to_split), 1162 + TP_ARGS(ino, i_size, i_clusters, pos, len, flags, 1163 + page, clusters, extents_to_split), 1164 + TP_STRUCT__entry( 1165 + __field(unsigned long long, ino) 1166 + __field(long long, i_size) 1167 + __field(unsigned int, i_clusters) 1168 + __field(unsigned long long, pos) 1169 + __field(unsigned int, len) 1170 + __field(unsigned int, flags) 1171 + __field(void *, page) 1172 + __field(unsigned int, clusters) 1173 + __field(unsigned int, extents_to_split) 1174 + ), 1175 + TP_fast_assign( 1176 + __entry->ino = ino; 1177 + __entry->i_size = i_size; 1178 + __entry->i_clusters = i_clusters; 1179 + __entry->pos = pos; 1180 + __entry->len = len; 1181 + __entry->flags = flags; 1182 + __entry->page = page; 1183 + __entry->clusters = clusters; 1184 + __entry->extents_to_split = extents_to_split; 1185 + ), 1186 + TP_printk("%llu %lld %u %llu %u %u %p %u %u", 1187 + __entry->ino, __entry->i_size, __entry->i_clusters, 1188 + __entry->pos, __entry->len, 1189 + __entry->flags, __entry->page, __entry->clusters, 1190 + __entry->extents_to_split) 1191 + ); 1192 + 1193 + TRACE_EVENT(ocfs2_write_end_inline, 1194 + TP_PROTO(unsigned long long ino, 1195 + unsigned long long pos, unsigned int copied, 1196 + unsigned int id_count, unsigned int features), 1197 + TP_ARGS(ino, pos, copied, id_count, features), 1198 + TP_STRUCT__entry( 1199 + __field(unsigned long long, ino) 1200 + __field(unsigned long long, pos) 1201 + __field(unsigned int, copied) 1202 + __field(unsigned int, id_count) 1203 + __field(unsigned int, features) 1204 + ), 1205 + TP_fast_assign( 1206 + __entry->ino = ino; 1207 + __entry->pos = pos; 1208 + __entry->copied = copied; 1209 + __entry->id_count = id_count; 1210 + __entry->features = features; 1211 + ), 1212 + TP_printk("%llu %llu %u %u %u", 1213 + __entry->ino, __entry->pos, __entry->copied, 1214 + __entry->id_count, __entry->features) 1215 + ); 1216 + 1217 + /* End of trace events for fs/ocfs2/aops.c. */ 1218 + 1219 + /* Trace events for fs/ocfs2/mmap.c. */ 1220 + 1221 + TRACE_EVENT(ocfs2_fault, 1222 + TP_PROTO(unsigned long long ino, 1223 + void *area, void *page, unsigned long pgoff), 1224 + TP_ARGS(ino, area, page, pgoff), 1225 + TP_STRUCT__entry( 1226 + __field(unsigned long long, ino) 1227 + __field(void *, area) 1228 + __field(void *, page) 1229 + __field(unsigned long, pgoff) 1230 + ), 1231 + TP_fast_assign( 1232 + __entry->ino = ino; 1233 + __entry->area = area; 1234 + __entry->page = page; 1235 + __entry->pgoff = pgoff; 1236 + ), 1237 + TP_printk("%llu %p %p %lu", 1238 + __entry->ino, __entry->area, __entry->page, __entry->pgoff) 1239 + ); 1240 + 1241 + /* End of trace events for fs/ocfs2/mmap.c. */ 1242 + 1243 + /* Trace events for fs/ocfs2/file.c. */ 1244 + 1245 + DECLARE_EVENT_CLASS(ocfs2__file_ops, 1246 + TP_PROTO(void *inode, void *file, void *dentry, 1247 + unsigned long long ino, 1248 + unsigned int d_len, const unsigned char *d_name, 1249 + unsigned long long para), 1250 + TP_ARGS(inode, file, dentry, ino, d_len, d_name, para), 1251 + TP_STRUCT__entry( 1252 + __field(void *, inode) 1253 + __field(void *, file) 1254 + __field(void *, dentry) 1255 + __field(unsigned long long, ino) 1256 + __field(unsigned int, d_len) 1257 + __string(d_name, d_name) 1258 + __field(unsigned long long, para) 1259 + ), 1260 + TP_fast_assign( 1261 + __entry->inode = inode; 1262 + __entry->file = file; 1263 + __entry->dentry = dentry; 1264 + __entry->ino = ino; 1265 + __entry->d_len = d_len; 1266 + __assign_str(d_name, d_name); 1267 + __entry->para = para; 1268 + ), 1269 + TP_printk("%p %p %p %llu %llu %.*s", __entry->inode, __entry->file, 1270 + __entry->dentry, __entry->ino, __entry->para, 1271 + __entry->d_len, __get_str(d_name)) 1272 + ); 1273 + 1274 + #define DEFINE_OCFS2_FILE_OPS(name) \ 1275 + DEFINE_EVENT(ocfs2__file_ops, name, \ 1276 + TP_PROTO(void *inode, void *file, void *dentry, \ 1277 + unsigned long long ino, \ 1278 + unsigned int d_len, const unsigned char *d_name, \ 1279 + unsigned long long mode), \ 1280 + TP_ARGS(inode, file, dentry, ino, d_len, d_name, mode)) 1281 + 1282 + DEFINE_OCFS2_FILE_OPS(ocfs2_file_open); 1283 + 1284 + DEFINE_OCFS2_FILE_OPS(ocfs2_file_release); 1285 + 1286 + DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file); 1287 + 1288 + DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write); 1289 + 1290 + DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write); 1291 + 1292 + DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read); 1293 + 1294 + DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read); 1295 + 1296 + DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file); 1297 + 1298 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_truncate_file_error); 1299 + 1300 + TRACE_EVENT(ocfs2_extend_allocation, 1301 + TP_PROTO(unsigned long long ip_blkno, unsigned long long size, 1302 + unsigned int clusters, unsigned int clusters_to_add, 1303 + int why, int restart_func), 1304 + TP_ARGS(ip_blkno, size, clusters, clusters_to_add, why, restart_func), 1305 + TP_STRUCT__entry( 1306 + __field(unsigned long long, ip_blkno) 1307 + __field(unsigned long long, size) 1308 + __field(unsigned int, clusters) 1309 + __field(unsigned int, clusters_to_add) 1310 + __field(int, why) 1311 + __field(int, restart_func) 1312 + ), 1313 + TP_fast_assign( 1314 + __entry->ip_blkno = ip_blkno; 1315 + __entry->size = size; 1316 + __entry->clusters = clusters; 1317 + __entry->clusters_to_add = clusters_to_add; 1318 + __entry->why = why; 1319 + __entry->restart_func = restart_func; 1320 + ), 1321 + TP_printk("%llu %llu %u %u %d %d", 1322 + __entry->ip_blkno, __entry->size, __entry->clusters, 1323 + __entry->clusters_to_add, __entry->why, __entry->restart_func) 1324 + ); 1325 + 1326 + TRACE_EVENT(ocfs2_extend_allocation_end, 1327 + TP_PROTO(unsigned long long ino, 1328 + unsigned int di_clusters, unsigned long long di_size, 1329 + unsigned int ip_clusters, unsigned long long i_size), 1330 + TP_ARGS(ino, di_clusters, di_size, ip_clusters, i_size), 1331 + TP_STRUCT__entry( 1332 + __field(unsigned long long, ino) 1333 + __field(unsigned int, di_clusters) 1334 + __field(unsigned long long, di_size) 1335 + __field(unsigned int, ip_clusters) 1336 + __field(unsigned long long, i_size) 1337 + ), 1338 + TP_fast_assign( 1339 + __entry->ino = ino; 1340 + __entry->di_clusters = di_clusters; 1341 + __entry->di_size = di_size; 1342 + __entry->ip_clusters = ip_clusters; 1343 + __entry->i_size = i_size; 1344 + ), 1345 + TP_printk("%llu %u %llu %u %llu", __entry->ino, __entry->di_clusters, 1346 + __entry->di_size, __entry->ip_clusters, __entry->i_size) 1347 + ); 1348 + 1349 + TRACE_EVENT(ocfs2_write_zero_page, 1350 + TP_PROTO(unsigned long long ino, 1351 + unsigned long long abs_from, unsigned long long abs_to, 1352 + unsigned long index, unsigned int zero_from, 1353 + unsigned int zero_to), 1354 + TP_ARGS(ino, abs_from, abs_to, index, zero_from, zero_to), 1355 + TP_STRUCT__entry( 1356 + __field(unsigned long long, ino) 1357 + __field(unsigned long long, abs_from) 1358 + __field(unsigned long long, abs_to) 1359 + __field(unsigned long, index) 1360 + __field(unsigned int, zero_from) 1361 + __field(unsigned int, zero_to) 1362 + ), 1363 + TP_fast_assign( 1364 + __entry->ino = ino; 1365 + __entry->abs_from = abs_from; 1366 + __entry->abs_to = abs_to; 1367 + __entry->index = index; 1368 + __entry->zero_from = zero_from; 1369 + __entry->zero_to = zero_to; 1370 + ), 1371 + TP_printk("%llu %llu %llu %lu %u %u", __entry->ino, 1372 + __entry->abs_from, __entry->abs_to, 1373 + __entry->index, __entry->zero_from, __entry->zero_to) 1374 + ); 1375 + 1376 + DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend_range); 1377 + 1378 + DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_extend); 1379 + 1380 + TRACE_EVENT(ocfs2_setattr, 1381 + TP_PROTO(void *inode, void *dentry, 1382 + unsigned long long ino, 1383 + unsigned int d_len, const unsigned char *d_name, 1384 + unsigned int ia_valid, unsigned int ia_mode, 1385 + unsigned int ia_uid, unsigned int ia_gid), 1386 + TP_ARGS(inode, dentry, ino, d_len, d_name, 1387 + ia_valid, ia_mode, ia_uid, ia_gid), 1388 + TP_STRUCT__entry( 1389 + __field(void *, inode) 1390 + __field(void *, dentry) 1391 + __field(unsigned long long, ino) 1392 + __field(unsigned int, d_len) 1393 + __string(d_name, d_name) 1394 + __field(unsigned int, ia_valid) 1395 + __field(unsigned int, ia_mode) 1396 + __field(unsigned int, ia_uid) 1397 + __field(unsigned int, ia_gid) 1398 + ), 1399 + TP_fast_assign( 1400 + __entry->inode = inode; 1401 + __entry->dentry = dentry; 1402 + __entry->ino = ino; 1403 + __entry->d_len = d_len; 1404 + __assign_str(d_name, d_name); 1405 + __entry->ia_valid = ia_valid; 1406 + __entry->ia_mode = ia_mode; 1407 + __entry->ia_uid = ia_uid; 1408 + __entry->ia_gid = ia_gid; 1409 + ), 1410 + TP_printk("%p %p %llu %.*s %u %u %u %u", __entry->inode, 1411 + __entry->dentry, __entry->ino, __entry->d_len, 1412 + __get_str(d_name), __entry->ia_valid, __entry->ia_mode, 1413 + __entry->ia_uid, __entry->ia_gid) 1414 + ); 1415 + 1416 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_write_remove_suid); 1417 + 1418 + DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_zero_partial_clusters); 1419 + 1420 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range1); 1421 + 1422 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_zero_partial_clusters_range2); 1423 + 1424 + DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_remove_inode_range); 1425 + 1426 + TRACE_EVENT(ocfs2_prepare_inode_for_write, 1427 + TP_PROTO(unsigned long long ino, unsigned long long saved_pos, 1428 + int appending, unsigned long count, 1429 + int *direct_io, int *has_refcount), 1430 + TP_ARGS(ino, saved_pos, appending, count, direct_io, has_refcount), 1431 + TP_STRUCT__entry( 1432 + __field(unsigned long long, ino) 1433 + __field(unsigned long long, saved_pos) 1434 + __field(int, appending) 1435 + __field(unsigned long, count) 1436 + __field(int, direct_io) 1437 + __field(int, has_refcount) 1438 + ), 1439 + TP_fast_assign( 1440 + __entry->ino = ino; 1441 + __entry->saved_pos = saved_pos; 1442 + __entry->appending = appending; 1443 + __entry->count = count; 1444 + __entry->direct_io = direct_io ? *direct_io : -1; 1445 + __entry->has_refcount = has_refcount ? *has_refcount : -1; 1446 + ), 1447 + TP_printk("%llu %llu %d %lu %d %d", __entry->ino, 1448 + __entry->saved_pos, __entry->appending, __entry->count, 1449 + __entry->direct_io, __entry->has_refcount) 1450 + ); 1451 + 1452 + DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret); 1453 + 1454 + /* End of trace events for fs/ocfs2/file.c. */ 1455 + 1456 + /* Trace events for fs/ocfs2/inode.c. */ 1457 + 1458 + TRACE_EVENT(ocfs2_iget_begin, 1459 + TP_PROTO(unsigned long long ino, unsigned int flags, int sysfile_type), 1460 + TP_ARGS(ino, flags, sysfile_type), 1461 + TP_STRUCT__entry( 1462 + __field(unsigned long long, ino) 1463 + __field(unsigned int, flags) 1464 + __field(int, sysfile_type) 1465 + ), 1466 + TP_fast_assign( 1467 + __entry->ino = ino; 1468 + __entry->flags = flags; 1469 + __entry->sysfile_type = sysfile_type; 1470 + ), 1471 + TP_printk("%llu %u %d", __entry->ino, 1472 + __entry->flags, __entry->sysfile_type) 1473 + ); 1474 + 1475 + DEFINE_OCFS2_ULL_EVENT(ocfs2_iget5_locked); 1476 + 1477 + TRACE_EVENT(ocfs2_iget_end, 1478 + TP_PROTO(void *inode, unsigned long long ino), 1479 + TP_ARGS(inode, ino), 1480 + TP_STRUCT__entry( 1481 + __field(void *, inode) 1482 + __field(unsigned long long, ino) 1483 + ), 1484 + TP_fast_assign( 1485 + __entry->inode = inode; 1486 + __entry->ino = ino; 1487 + ), 1488 + TP_printk("%p %llu", __entry->inode, __entry->ino) 1489 + ); 1490 + 1491 + TRACE_EVENT(ocfs2_find_actor, 1492 + TP_PROTO(void *inode, unsigned long long ino, 1493 + void *args, unsigned long long fi_blkno), 1494 + TP_ARGS(inode, ino, args, fi_blkno), 1495 + TP_STRUCT__entry( 1496 + __field(void *, inode) 1497 + __field(unsigned long long, ino) 1498 + __field(void *, args) 1499 + __field(unsigned long long, fi_blkno) 1500 + ), 1501 + TP_fast_assign( 1502 + __entry->inode = inode; 1503 + __entry->ino = ino; 1504 + __entry->args = args; 1505 + __entry->fi_blkno = fi_blkno; 1506 + ), 1507 + TP_printk("%p %llu %p %llu", __entry->inode, __entry->ino, 1508 + __entry->args, __entry->fi_blkno) 1509 + ); 1510 + 1511 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_populate_inode); 1512 + 1513 + DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_read_locked_inode); 1514 + 1515 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_check_orphan_recovery_state); 1516 + 1517 + DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_inode_block); 1518 + 1519 + TRACE_EVENT(ocfs2_inode_is_valid_to_delete, 1520 + TP_PROTO(void *task, void *dc_task, unsigned long long ino, 1521 + unsigned int flags), 1522 + TP_ARGS(task, dc_task, ino, flags), 1523 + TP_STRUCT__entry( 1524 + __field(void *, task) 1525 + __field(void *, dc_task) 1526 + __field(unsigned long long, ino) 1527 + __field(unsigned int, flags) 1528 + ), 1529 + TP_fast_assign( 1530 + __entry->task = task; 1531 + __entry->dc_task = dc_task; 1532 + __entry->ino = ino; 1533 + __entry->flags = flags; 1534 + ), 1535 + TP_printk("%p %p %llu %u", __entry->task, __entry->dc_task, 1536 + __entry->ino, __entry->flags) 1537 + ); 1538 + 1539 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_query_inode_wipe_begin); 1540 + 1541 + DEFINE_OCFS2_UINT_EVENT(ocfs2_query_inode_wipe_succ); 1542 + 1543 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_query_inode_wipe_end); 1544 + 1545 + DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_cleanup_delete_inode); 1546 + 1547 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_delete_inode); 1548 + 1549 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_clear_inode); 1550 + 1551 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_drop_inode); 1552 + 1553 + TRACE_EVENT(ocfs2_inode_revalidate, 1554 + TP_PROTO(void *inode, unsigned long long ino, 1555 + unsigned int flags), 1556 + TP_ARGS(inode, ino, flags), 1557 + TP_STRUCT__entry( 1558 + __field(void *, inode) 1559 + __field(unsigned long long, ino) 1560 + __field(unsigned int, flags) 1561 + ), 1562 + TP_fast_assign( 1563 + __entry->inode = inode; 1564 + __entry->ino = ino; 1565 + __entry->flags = flags; 1566 + ), 1567 + TP_printk("%p %llu %u", __entry->inode, __entry->ino, __entry->flags) 1568 + ); 1569 + 1570 + DEFINE_OCFS2_ULL_EVENT(ocfs2_mark_inode_dirty); 1571 + 1572 + /* End of trace events for fs/ocfs2/inode.c. */ 1573 + 1574 + /* Trace events for fs/ocfs2/extent_map.c. */ 1575 + 1576 + TRACE_EVENT(ocfs2_read_virt_blocks, 1577 + TP_PROTO(void *inode, unsigned long long vblock, int nr, 1578 + void *bhs, unsigned int flags, void *validate), 1579 + TP_ARGS(inode, vblock, nr, bhs, flags, validate), 1580 + TP_STRUCT__entry( 1581 + __field(void *, inode) 1582 + __field(unsigned long long, vblock) 1583 + __field(int, nr) 1584 + __field(void *, bhs) 1585 + __field(unsigned int, flags) 1586 + __field(void *, validate) 1587 + ), 1588 + TP_fast_assign( 1589 + __entry->inode = inode; 1590 + __entry->vblock = vblock; 1591 + __entry->nr = nr; 1592 + __entry->bhs = bhs; 1593 + __entry->flags = flags; 1594 + __entry->validate = validate; 1595 + ), 1596 + TP_printk("%p %llu %d %p %x %p", __entry->inode, __entry->vblock, 1597 + __entry->nr, __entry->bhs, __entry->flags, __entry->validate) 1598 + ); 1599 + 1600 + /* End of trace events for fs/ocfs2/extent_map.c. */ 1601 + 1602 + /* Trace events for fs/ocfs2/slot_map.c. */ 1603 + 1604 + DEFINE_OCFS2_UINT_EVENT(ocfs2_refresh_slot_info); 1605 + 1606 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers); 1607 + 1608 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_map_slot_buffers_block); 1609 + 1610 + DEFINE_OCFS2_INT_EVENT(ocfs2_find_slot); 1611 + 1612 + /* End of trace events for fs/ocfs2/slot_map.c. */ 1613 + 1614 + /* Trace events for fs/ocfs2/heartbeat.c. */ 1615 + 1616 + DEFINE_OCFS2_INT_EVENT(ocfs2_do_node_down); 1617 + 1618 + /* End of trace events for fs/ocfs2/heartbeat.c. */ 1619 + 1620 + /* Trace events for fs/ocfs2/super.c. */ 1621 + 1622 + TRACE_EVENT(ocfs2_remount, 1623 + TP_PROTO(unsigned long s_flags, unsigned long osb_flags, int flags), 1624 + TP_ARGS(s_flags, osb_flags, flags), 1625 + TP_STRUCT__entry( 1626 + __field(unsigned long, s_flags) 1627 + __field(unsigned long, osb_flags) 1628 + __field(int, flags) 1629 + ), 1630 + TP_fast_assign( 1631 + __entry->s_flags = s_flags; 1632 + __entry->osb_flags = osb_flags; 1633 + __entry->flags = flags; 1634 + ), 1635 + TP_printk("%lu %lu %d", __entry->s_flags, 1636 + __entry->osb_flags, __entry->flags) 1637 + ); 1638 + 1639 + TRACE_EVENT(ocfs2_fill_super, 1640 + TP_PROTO(void *sb, void *data, int silent), 1641 + TP_ARGS(sb, data, silent), 1642 + TP_STRUCT__entry( 1643 + __field(void *, sb) 1644 + __field(void *, data) 1645 + __field(int, silent) 1646 + ), 1647 + TP_fast_assign( 1648 + __entry->sb = sb; 1649 + __entry->data = data; 1650 + __entry->silent = silent; 1651 + ), 1652 + TP_printk("%p %p %d", __entry->sb, 1653 + __entry->data, __entry->silent) 1654 + ); 1655 + 1656 + TRACE_EVENT(ocfs2_parse_options, 1657 + TP_PROTO(int is_remount, char *options), 1658 + TP_ARGS(is_remount, options), 1659 + TP_STRUCT__entry( 1660 + __field(int, is_remount) 1661 + __string(options, options) 1662 + ), 1663 + TP_fast_assign( 1664 + __entry->is_remount = is_remount; 1665 + __assign_str(options, options); 1666 + ), 1667 + TP_printk("%d %s", __entry->is_remount, __get_str(options)) 1668 + ); 1669 + 1670 + DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super); 1671 + 1672 + TRACE_EVENT(ocfs2_statfs, 1673 + TP_PROTO(void *sb, void *buf), 1674 + TP_ARGS(sb, buf), 1675 + TP_STRUCT__entry( 1676 + __field(void *, sb) 1677 + __field(void *, buf) 1678 + ), 1679 + TP_fast_assign( 1680 + __entry->sb = sb; 1681 + __entry->buf = buf; 1682 + ), 1683 + TP_printk("%p %p", __entry->sb, __entry->buf) 1684 + ); 1685 + 1686 + DEFINE_OCFS2_POINTER_EVENT(ocfs2_dismount_volume); 1687 + 1688 + TRACE_EVENT(ocfs2_initialize_super, 1689 + TP_PROTO(char *label, char *uuid_str, unsigned long long root_dir, 1690 + unsigned long long system_dir, int cluster_bits), 1691 + TP_ARGS(label, uuid_str, root_dir, system_dir, cluster_bits), 1692 + TP_STRUCT__entry( 1693 + __string(label, label) 1694 + __string(uuid_str, uuid_str) 1695 + __field(unsigned long long, root_dir) 1696 + __field(unsigned long long, system_dir) 1697 + __field(int, cluster_bits) 1698 + ), 1699 + TP_fast_assign( 1700 + __assign_str(label, label); 1701 + __assign_str(uuid_str, uuid_str); 1702 + __entry->root_dir = root_dir; 1703 + __entry->system_dir = system_dir; 1704 + __entry->cluster_bits = cluster_bits; 1705 + ), 1706 + TP_printk("%s %s %llu %llu %d", __get_str(label), __get_str(uuid_str), 1707 + __entry->root_dir, __entry->system_dir, __entry->cluster_bits) 1708 + ); 1709 + 1710 + /* End of trace events for fs/ocfs2/super.c. */ 1711 + 1712 + /* Trace events for fs/ocfs2/xattr.c. */ 1713 + 1714 + DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_xattr_block); 1715 + 1716 + DEFINE_OCFS2_UINT_EVENT(ocfs2_xattr_extend_allocation); 1717 + 1718 + TRACE_EVENT(ocfs2_init_xattr_set_ctxt, 1719 + TP_PROTO(const char *name, int meta, int clusters, int credits), 1720 + TP_ARGS(name, meta, clusters, credits), 1721 + TP_STRUCT__entry( 1722 + __string(name, name) 1723 + __field(int, meta) 1724 + __field(int, clusters) 1725 + __field(int, credits) 1726 + ), 1727 + TP_fast_assign( 1728 + __assign_str(name, name); 1729 + __entry->meta = meta; 1730 + __entry->clusters = clusters; 1731 + __entry->credits = credits; 1732 + ), 1733 + TP_printk("%s %d %d %d", __get_str(name), __entry->meta, 1734 + __entry->clusters, __entry->credits) 1735 + ); 1736 + 1737 + DECLARE_EVENT_CLASS(ocfs2__xattr_find, 1738 + TP_PROTO(unsigned long long ino, const char *name, int name_index, 1739 + unsigned int hash, unsigned long long location, 1740 + int xe_index), 1741 + TP_ARGS(ino, name, name_index, hash, location, xe_index), 1742 + TP_STRUCT__entry( 1743 + __field(unsigned long long, ino) 1744 + __string(name, name) 1745 + __field(int, name_index) 1746 + __field(unsigned int, hash) 1747 + __field(unsigned long long, location) 1748 + __field(int, xe_index) 1749 + ), 1750 + TP_fast_assign( 1751 + __entry->ino = ino; 1752 + __assign_str(name, name); 1753 + __entry->name_index = name_index; 1754 + __entry->hash = hash; 1755 + __entry->location = location; 1756 + __entry->xe_index = xe_index; 1757 + ), 1758 + TP_printk("%llu %s %d %u %llu %d", __entry->ino, __get_str(name), 1759 + __entry->name_index, __entry->hash, __entry->location, 1760 + __entry->xe_index) 1761 + ); 1762 + 1763 + #define DEFINE_OCFS2_XATTR_FIND_EVENT(name) \ 1764 + DEFINE_EVENT(ocfs2__xattr_find, name, \ 1765 + TP_PROTO(unsigned long long ino, const char *name, int name_index, \ 1766 + unsigned int hash, unsigned long long bucket, \ 1767 + int xe_index), \ 1768 + TP_ARGS(ino, name, name_index, hash, bucket, xe_index)) 1769 + 1770 + DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_bucket_find); 1771 + 1772 + DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find); 1773 + 1774 + DEFINE_OCFS2_XATTR_FIND_EVENT(ocfs2_xattr_index_block_find_rec); 1775 + 1776 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_iterate_xattr_buckets); 1777 + 1778 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_iterate_xattr_bucket); 1779 + 1780 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_cp_xattr_block_to_bucket_begin); 1781 + 1782 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cp_xattr_block_to_bucket_end); 1783 + 1784 + DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block_begin); 1785 + 1786 + DEFINE_OCFS2_ULL_EVENT(ocfs2_xattr_create_index_block); 1787 + 1788 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_defrag_xattr_bucket); 1789 + 1790 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_bucket_cross_cluster); 1791 + 1792 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_divide_xattr_bucket_begin); 1793 + 1794 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_divide_xattr_bucket_move); 1795 + 1796 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_cp_xattr_bucket); 1797 + 1798 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_mv_xattr_buckets); 1799 + 1800 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_adjust_xattr_cross_cluster); 1801 + 1802 + DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_begin); 1803 + 1804 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_add_new_xattr_cluster); 1805 + 1806 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_add_new_xattr_cluster_insert); 1807 + 1808 + DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_extend_xattr_bucket); 1809 + 1810 + DEFINE_OCFS2_ULL_EVENT(ocfs2_add_new_xattr_bucket); 1811 + 1812 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_xattr_bucket_value_truncate); 1813 + 1814 + DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_rm_xattr_cluster); 1815 + 1816 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_header); 1817 + 1818 + DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_create_empty_xattr_block); 1819 + 1820 + DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_bucket); 1821 + 1822 + DEFINE_OCFS2_STRING_EVENT(ocfs2_xattr_set_entry_index_block); 1823 + 1824 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_xattr_bucket_value_refcount); 1825 + 1826 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_reflink_xattr_buckets); 1827 + 1828 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_reflink_xattr_rec); 1829 + 1830 + /* End of trace events for fs/ocfs2/xattr.c. */ 1831 + 1832 + /* Trace events for fs/ocfs2/reservations.c. */ 1833 + 1834 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_insert); 1835 + 1836 + DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_begin); 1837 + 1838 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_find_free_bits_end); 1839 + 1840 + TRACE_EVENT(ocfs2_resv_find_window_begin, 1841 + TP_PROTO(unsigned int r_start, unsigned int r_end, unsigned int goal, 1842 + unsigned int wanted, int empty_root), 1843 + TP_ARGS(r_start, r_end, goal, wanted, empty_root), 1844 + TP_STRUCT__entry( 1845 + __field(unsigned int, r_start) 1846 + __field(unsigned int, r_end) 1847 + __field(unsigned int, goal) 1848 + __field(unsigned int, wanted) 1849 + __field(int, empty_root) 1850 + ), 1851 + TP_fast_assign( 1852 + __entry->r_start = r_start; 1853 + __entry->r_end = r_end; 1854 + __entry->goal = goal; 1855 + __entry->wanted = wanted; 1856 + __entry->empty_root = empty_root; 1857 + ), 1858 + TP_printk("%u %u %u %u %d", __entry->r_start, __entry->r_end, 1859 + __entry->goal, __entry->wanted, __entry->empty_root) 1860 + ); 1861 + 1862 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resv_find_window_prev); 1863 + 1864 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_resv_find_window_next); 1865 + 1866 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_cannibalize_resv_begin); 1867 + 1868 + TRACE_EVENT(ocfs2_cannibalize_resv_end, 1869 + TP_PROTO(unsigned int start, unsigned int end, unsigned int len, 1870 + unsigned int last_start, unsigned int last_len), 1871 + TP_ARGS(start, end, len, last_start, last_len), 1872 + TP_STRUCT__entry( 1873 + __field(unsigned int, start) 1874 + __field(unsigned int, end) 1875 + __field(unsigned int, len) 1876 + __field(unsigned int, last_start) 1877 + __field(unsigned int, last_len) 1878 + ), 1879 + TP_fast_assign( 1880 + __entry->start = start; 1881 + __entry->end = end; 1882 + __entry->len = len; 1883 + __entry->last_start = last_start; 1884 + __entry->last_len = last_len; 1885 + ), 1886 + TP_printk("%u %u %u %u %u", __entry->start, __entry->end, 1887 + __entry->len, __entry->last_start, __entry->last_len) 1888 + ); 1889 + 1890 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_resmap_resv_bits); 1891 + 1892 + TRACE_EVENT(ocfs2_resmap_claimed_bits_begin, 1893 + TP_PROTO(unsigned int cstart, unsigned int cend, unsigned int clen, 1894 + unsigned int r_start, unsigned int r_end, unsigned int r_len, 1895 + unsigned int last_start, unsigned int last_len), 1896 + TP_ARGS(cstart, cend, clen, r_start, r_end, 1897 + r_len, last_start, last_len), 1898 + TP_STRUCT__entry( 1899 + __field(unsigned int, cstart) 1900 + __field(unsigned int, cend) 1901 + __field(unsigned int, clen) 1902 + __field(unsigned int, r_start) 1903 + __field(unsigned int, r_end) 1904 + __field(unsigned int, r_len) 1905 + __field(unsigned int, last_start) 1906 + __field(unsigned int, last_len) 1907 + ), 1908 + TP_fast_assign( 1909 + __entry->cstart = cstart; 1910 + __entry->cend = cend; 1911 + __entry->clen = clen; 1912 + __entry->r_start = r_start; 1913 + __entry->r_end = r_end; 1914 + __entry->r_len = r_len; 1915 + __entry->last_start = last_start; 1916 + __entry->last_len = last_len; 1917 + ), 1918 + TP_printk("%u %u %u %u %u %u %u %u", 1919 + __entry->cstart, __entry->cend, __entry->clen, 1920 + __entry->r_start, __entry->r_end, __entry->r_len, 1921 + __entry->last_start, __entry->last_len) 1922 + ); 1923 + 1924 + TRACE_EVENT(ocfs2_resmap_claimed_bits_end, 1925 + TP_PROTO(unsigned int start, unsigned int end, unsigned int len, 1926 + unsigned int last_start, unsigned int last_len), 1927 + TP_ARGS(start, end, len, last_start, last_len), 1928 + TP_STRUCT__entry( 1929 + __field(unsigned int, start) 1930 + __field(unsigned int, end) 1931 + __field(unsigned int, len) 1932 + __field(unsigned int, last_start) 1933 + __field(unsigned int, last_len) 1934 + ), 1935 + TP_fast_assign( 1936 + __entry->start = start; 1937 + __entry->end = end; 1938 + __entry->len = len; 1939 + __entry->last_start = last_start; 1940 + __entry->last_len = last_len; 1941 + ), 1942 + TP_printk("%u %u %u %u %u", __entry->start, __entry->end, 1943 + __entry->len, __entry->last_start, __entry->last_len) 1944 + ); 1945 + 1946 + /* End of trace events for fs/ocfs2/reservations.c. */ 1947 + 1948 + /* Trace events for fs/ocfs2/quota_local.c. */ 1949 + 1950 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_recover_local_quota_file); 1951 + 1952 + DEFINE_OCFS2_INT_EVENT(ocfs2_finish_quota_recovery); 1953 + 1954 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(olq_set_dquot); 1955 + 1956 + /* End of trace events for fs/ocfs2/quota_local.c. */ 1957 + 1958 + /* Trace events for fs/ocfs2/quota_global.c. */ 1959 + 1960 + DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_quota_block); 1961 + 1962 + TRACE_EVENT(ocfs2_sync_dquot, 1963 + TP_PROTO(unsigned int dq_id, long long dqb_curspace, 1964 + long long spacechange, long long curinodes, 1965 + long long inodechange), 1966 + TP_ARGS(dq_id, dqb_curspace, spacechange, curinodes, inodechange), 1967 + TP_STRUCT__entry( 1968 + __field(unsigned int, dq_id) 1969 + __field(long long, dqb_curspace) 1970 + __field(long long, spacechange) 1971 + __field(long long, curinodes) 1972 + __field(long long, inodechange) 1973 + ), 1974 + TP_fast_assign( 1975 + __entry->dq_id = dq_id; 1976 + __entry->dqb_curspace = dqb_curspace; 1977 + __entry->spacechange = spacechange; 1978 + __entry->curinodes = curinodes; 1979 + __entry->inodechange = inodechange; 1980 + ), 1981 + TP_printk("%u %lld %lld %lld %lld", __entry->dq_id, 1982 + __entry->dqb_curspace, __entry->spacechange, 1983 + __entry->curinodes, __entry->inodechange) 1984 + ); 1985 + 1986 + TRACE_EVENT(ocfs2_sync_dquot_helper, 1987 + TP_PROTO(unsigned int dq_id, unsigned int dq_type, unsigned long type, 1988 + const char *s_id), 1989 + TP_ARGS(dq_id, dq_type, type, s_id), 1990 + 1991 + TP_STRUCT__entry( 1992 + __field(unsigned int, dq_id) 1993 + __field(unsigned int, dq_type) 1994 + __field(unsigned long, type) 1995 + __string(s_id, s_id) 1996 + ), 1997 + TP_fast_assign( 1998 + __entry->dq_id = dq_id; 1999 + __entry->dq_type = dq_type; 2000 + __entry->type = type; 2001 + __assign_str(s_id, s_id); 2002 + ), 2003 + TP_printk("%u %u %lu %s", __entry->dq_id, __entry->dq_type, 2004 + __entry->type, __get_str(s_id)) 2005 + ); 2006 + 2007 + DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_write_dquot); 2008 + 2009 + DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_release_dquot); 2010 + 2011 + DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_acquire_dquot); 2012 + 2013 + DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_mark_dquot_dirty); 2014 + 2015 + /* End of trace events for fs/ocfs2/quota_global.c. */ 2016 + 2017 + /* Trace events for fs/ocfs2/dir.c. */ 2018 + DEFINE_OCFS2_INT_EVENT(ocfs2_search_dirblock); 2019 + 2020 + DEFINE_OCFS2_ULL_EVENT(ocfs2_validate_dir_block); 2021 + 2022 + DEFINE_OCFS2_POINTER_EVENT(ocfs2_find_entry_el); 2023 + 2024 + TRACE_EVENT(ocfs2_dx_dir_search, 2025 + TP_PROTO(unsigned long long ino, int namelen, const char *name, 2026 + unsigned int major_hash, unsigned int minor_hash, 2027 + unsigned long long blkno), 2028 + TP_ARGS(ino, namelen, name, major_hash, minor_hash, blkno), 2029 + TP_STRUCT__entry( 2030 + __field(unsigned long long, ino) 2031 + __field(int, namelen) 2032 + __string(name, name) 2033 + __field(unsigned int, major_hash) 2034 + __field(unsigned int,minor_hash) 2035 + __field(unsigned long long, blkno) 2036 + ), 2037 + TP_fast_assign( 2038 + __entry->ino = ino; 2039 + __entry->namelen = namelen; 2040 + __assign_str(name, name); 2041 + __entry->major_hash = major_hash; 2042 + __entry->minor_hash = minor_hash; 2043 + __entry->blkno = blkno; 2044 + ), 2045 + TP_printk("%llu %.*s %u %u %llu", __entry->ino, 2046 + __entry->namelen, __get_str(name), 2047 + __entry->major_hash, __entry->minor_hash, __entry->blkno) 2048 + ); 2049 + 2050 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_dx_dir_search_leaf_info); 2051 + 2052 + DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_delete_entry_dx); 2053 + 2054 + DEFINE_OCFS2_ULL_EVENT(ocfs2_readdir); 2055 + 2056 + TRACE_EVENT(ocfs2_find_files_on_disk, 2057 + TP_PROTO(int namelen, const char *name, void *blkno, 2058 + unsigned long long dir), 2059 + TP_ARGS(namelen, name, blkno, dir), 2060 + TP_STRUCT__entry( 2061 + __field(int, namelen) 2062 + __string(name, name) 2063 + __field(void *, blkno) 2064 + __field(unsigned long long, dir) 2065 + ), 2066 + TP_fast_assign( 2067 + __entry->namelen = namelen; 2068 + __assign_str(name, name); 2069 + __entry->blkno = blkno; 2070 + __entry->dir = dir; 2071 + ), 2072 + TP_printk("%.*s %p %llu", __entry->namelen, __get_str(name), 2073 + __entry->blkno, __entry->dir) 2074 + ); 2075 + 2076 + TRACE_EVENT(ocfs2_check_dir_for_entry, 2077 + TP_PROTO(unsigned long long dir, int namelen, const char *name), 2078 + TP_ARGS(dir, namelen, name), 2079 + TP_STRUCT__entry( 2080 + __field(unsigned long long, dir) 2081 + __field(int, namelen) 2082 + __string(name, name) 2083 + ), 2084 + TP_fast_assign( 2085 + __entry->dir = dir; 2086 + __entry->namelen = namelen; 2087 + __assign_str(name, name); 2088 + ), 2089 + TP_printk("%llu %.*s", __entry->dir, 2090 + __entry->namelen, __get_str(name)) 2091 + ); 2092 + 2093 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_dx_dir_attach_index); 2094 + 2095 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_format_cluster); 2096 + 2097 + TRACE_EVENT(ocfs2_dx_dir_index_root_block, 2098 + TP_PROTO(unsigned long long dir, 2099 + unsigned int major_hash, unsigned int minor_hash, 2100 + int namelen, const char *name, unsigned int num_used), 2101 + TP_ARGS(dir, major_hash, minor_hash, namelen, name, num_used), 2102 + TP_STRUCT__entry( 2103 + __field(unsigned long long, dir) 2104 + __field(unsigned int, major_hash) 2105 + __field(unsigned int, minor_hash) 2106 + __field(int, namelen) 2107 + __string(name, name) 2108 + __field(unsigned int, num_used) 2109 + ), 2110 + TP_fast_assign( 2111 + __entry->dir = dir; 2112 + __entry->major_hash = major_hash; 2113 + __entry->minor_hash = minor_hash; 2114 + __entry->namelen = namelen; 2115 + __assign_str(name, name); 2116 + __entry->num_used = num_used; 2117 + ), 2118 + TP_printk("%llu %x %x %.*s %u", __entry->dir, 2119 + __entry->major_hash, __entry->minor_hash, 2120 + __entry->namelen, __get_str(name), __entry->num_used) 2121 + ); 2122 + 2123 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_extend_dir); 2124 + 2125 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_dx_dir_rebalance); 2126 + 2127 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_dx_dir_rebalance_split); 2128 + 2129 + DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_prepare_dir_for_insert); 2130 + 2131 + /* End of trace events for fs/ocfs2/dir.c. */ 2132 + 2133 + /* Trace events for fs/ocfs2/namei.c. */ 2134 + 2135 + DECLARE_EVENT_CLASS(ocfs2__dentry_ops, 2136 + TP_PROTO(void *dir, void *dentry, int name_len, const char *name, 2137 + unsigned long long dir_blkno, unsigned long long extra), 2138 + TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra), 2139 + TP_STRUCT__entry( 2140 + __field(void *, dir) 2141 + __field(void *, dentry) 2142 + __field(int, name_len) 2143 + __string(name, name) 2144 + __field(unsigned long long, dir_blkno) 2145 + __field(unsigned long long, extra) 2146 + ), 2147 + TP_fast_assign( 2148 + __entry->dir = dir; 2149 + __entry->dentry = dentry; 2150 + __entry->name_len = name_len; 2151 + __assign_str(name, name); 2152 + __entry->dir_blkno = dir_blkno; 2153 + __entry->extra = extra; 2154 + ), 2155 + TP_printk("%p %p %.*s %llu %llu", __entry->dir, __entry->dentry, 2156 + __entry->name_len, __get_str(name), 2157 + __entry->dir_blkno, __entry->extra) 2158 + ); 2159 + 2160 + #define DEFINE_OCFS2_DENTRY_OPS(name) \ 2161 + DEFINE_EVENT(ocfs2__dentry_ops, name, \ 2162 + TP_PROTO(void *dir, void *dentry, int name_len, const char *name, \ 2163 + unsigned long long dir_blkno, unsigned long long extra), \ 2164 + TP_ARGS(dir, dentry, name_len, name, dir_blkno, extra)) 2165 + 2166 + DEFINE_OCFS2_DENTRY_OPS(ocfs2_lookup); 2167 + 2168 + DEFINE_OCFS2_DENTRY_OPS(ocfs2_mkdir); 2169 + 2170 + DEFINE_OCFS2_DENTRY_OPS(ocfs2_create); 2171 + 2172 + DEFINE_OCFS2_DENTRY_OPS(ocfs2_unlink); 2173 + 2174 + DEFINE_OCFS2_DENTRY_OPS(ocfs2_symlink_create); 2175 + 2176 + DEFINE_OCFS2_DENTRY_OPS(ocfs2_mv_orphaned_inode_to_new); 2177 + 2178 + DEFINE_OCFS2_POINTER_EVENT(ocfs2_lookup_ret); 2179 + 2180 + TRACE_EVENT(ocfs2_mknod, 2181 + TP_PROTO(void *dir, void *dentry, int name_len, const char *name, 2182 + unsigned long long dir_blkno, unsigned long dev, int mode), 2183 + TP_ARGS(dir, dentry, name_len, name, dir_blkno, dev, mode), 2184 + TP_STRUCT__entry( 2185 + __field(void *, dir) 2186 + __field(void *, dentry) 2187 + __field(int, name_len) 2188 + __string(name, name) 2189 + __field(unsigned long long, dir_blkno) 2190 + __field(unsigned long, dev) 2191 + __field(int, mode) 2192 + ), 2193 + TP_fast_assign( 2194 + __entry->dir = dir; 2195 + __entry->dentry = dentry; 2196 + __entry->name_len = name_len; 2197 + __assign_str(name, name); 2198 + __entry->dir_blkno = dir_blkno; 2199 + __entry->dev = dev; 2200 + __entry->mode = mode; 2201 + ), 2202 + TP_printk("%p %p %.*s %llu %lu %d", __entry->dir, __entry->dentry, 2203 + __entry->name_len, __get_str(name), 2204 + __entry->dir_blkno, __entry->dev, __entry->mode) 2205 + ); 2206 + 2207 + TRACE_EVENT(ocfs2_link, 2208 + TP_PROTO(unsigned long long ino, int old_len, const char *old_name, 2209 + int name_len, const char *name), 2210 + TP_ARGS(ino, old_len, old_name, name_len, name), 2211 + TP_STRUCT__entry( 2212 + __field(unsigned long long, ino) 2213 + __field(int, old_len) 2214 + __string(old_name, old_name) 2215 + __field(int, name_len) 2216 + __string(name, name) 2217 + ), 2218 + TP_fast_assign( 2219 + __entry->ino = ino; 2220 + __entry->old_len = old_len; 2221 + __assign_str(old_name, old_name); 2222 + __entry->name_len = name_len; 2223 + __assign_str(name, name); 2224 + ), 2225 + TP_printk("%llu %.*s %.*s", __entry->ino, 2226 + __entry->old_len, __get_str(old_name), 2227 + __entry->name_len, __get_str(name)) 2228 + ); 2229 + 2230 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_unlink_noent); 2231 + 2232 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock); 2233 + 2234 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_double_lock_end); 2235 + 2236 + TRACE_EVENT(ocfs2_rename, 2237 + TP_PROTO(void *old_dir, void *old_dentry, 2238 + void *new_dir, void *new_dentry, 2239 + int old_len, const char *old_name, 2240 + int new_len, const char *new_name), 2241 + TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, 2242 + old_len, old_name, new_len, new_name), 2243 + TP_STRUCT__entry( 2244 + __field(void *, old_dir) 2245 + __field(void *, old_dentry) 2246 + __field(void *, new_dir) 2247 + __field(void *, new_dentry) 2248 + __field(int, old_len) 2249 + __string(old_name, old_name) 2250 + __field(int, new_len) 2251 + __string(new_name, new_name) 2252 + ), 2253 + TP_fast_assign( 2254 + __entry->old_dir = old_dir; 2255 + __entry->old_dentry = old_dentry; 2256 + __entry->new_dir = new_dir; 2257 + __entry->new_dentry = new_dentry; 2258 + __entry->old_len = old_len; 2259 + __assign_str(old_name, old_name); 2260 + __entry->new_len = new_len; 2261 + __assign_str(new_name, new_name); 2262 + ), 2263 + TP_printk("%p %p %p %p %.*s %.*s", 2264 + __entry->old_dir, __entry->old_dentry, 2265 + __entry->new_dir, __entry->new_dentry, 2266 + __entry->old_len, __get_str(old_name), 2267 + __entry->new_len, __get_str(new_name)) 2268 + ); 2269 + 2270 + TRACE_EVENT(ocfs2_rename_target_exists, 2271 + TP_PROTO(int new_len, const char *new_name), 2272 + TP_ARGS(new_len, new_name), 2273 + TP_STRUCT__entry( 2274 + __field(int, new_len) 2275 + __string(new_name, new_name) 2276 + ), 2277 + TP_fast_assign( 2278 + __entry->new_len = new_len; 2279 + __assign_str(new_name, new_name); 2280 + ), 2281 + TP_printk("%.*s", __entry->new_len, __get_str(new_name)) 2282 + ); 2283 + 2284 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_rename_disagree); 2285 + 2286 + TRACE_EVENT(ocfs2_rename_over_existing, 2287 + TP_PROTO(unsigned long long new_blkno, void *new_bh, 2288 + unsigned long long newdi_blkno), 2289 + TP_ARGS(new_blkno, new_bh, newdi_blkno), 2290 + TP_STRUCT__entry( 2291 + __field(unsigned long long, new_blkno) 2292 + __field(void *, new_bh) 2293 + __field(unsigned long long, newdi_blkno) 2294 + ), 2295 + TP_fast_assign( 2296 + __entry->new_blkno = new_blkno; 2297 + __entry->new_bh = new_bh; 2298 + __entry->newdi_blkno = newdi_blkno; 2299 + ), 2300 + TP_printk("%llu %p %llu", __entry->new_blkno, __entry->new_bh, 2301 + __entry->newdi_blkno) 2302 + ); 2303 + 2304 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_create_symlink_data); 2305 + 2306 + TRACE_EVENT(ocfs2_symlink_begin, 2307 + TP_PROTO(void *dir, void *dentry, const char *symname, 2308 + int len, const char *name), 2309 + TP_ARGS(dir, dentry, symname, len, name), 2310 + TP_STRUCT__entry( 2311 + __field(void *, dir) 2312 + __field(void *, dentry) 2313 + __field(const char *, symname) 2314 + __field(int, len) 2315 + __string(name, name) 2316 + ), 2317 + TP_fast_assign( 2318 + __entry->dir = dir; 2319 + __entry->dentry = dentry; 2320 + __entry->symname = symname; 2321 + __entry->len = len; 2322 + __assign_str(name, name); 2323 + ), 2324 + TP_printk("%p %p %s %.*s", __entry->dir, __entry->dentry, 2325 + __entry->symname, __entry->len, __get_str(name)) 2326 + ); 2327 + 2328 + TRACE_EVENT(ocfs2_blkno_stringify, 2329 + TP_PROTO(unsigned long long blkno, const char *name, int namelen), 2330 + TP_ARGS(blkno, name, namelen), 2331 + TP_STRUCT__entry( 2332 + __field(unsigned long long, blkno) 2333 + __string(name, name) 2334 + __field(int, namelen) 2335 + ), 2336 + TP_fast_assign( 2337 + __entry->blkno = blkno; 2338 + __assign_str(name, name); 2339 + __entry->namelen = namelen; 2340 + ), 2341 + TP_printk("%llu %s %d", __entry->blkno, __get_str(name), 2342 + __entry->namelen) 2343 + ); 2344 + 2345 + DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_add_begin); 2346 + 2347 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_orphan_add_end); 2348 + 2349 + TRACE_EVENT(ocfs2_orphan_del, 2350 + TP_PROTO(unsigned long long dir, const char *name, int namelen), 2351 + TP_ARGS(dir, name, namelen), 2352 + TP_STRUCT__entry( 2353 + __field(unsigned long long, dir) 2354 + __string(name, name) 2355 + __field(int, namelen) 2356 + ), 2357 + TP_fast_assign( 2358 + __entry->dir = dir; 2359 + __assign_str(name, name); 2360 + __entry->namelen = namelen; 2361 + ), 2362 + TP_printk("%llu %s %d", __entry->dir, __get_str(name), 2363 + __entry->namelen) 2364 + ); 2365 + 2366 + /* End of trace events for fs/ocfs2/namei.c. */ 2367 + 2368 + /* Trace events for fs/ocfs2/dcache.c. */ 2369 + 2370 + TRACE_EVENT(ocfs2_dentry_revalidate, 2371 + TP_PROTO(void *dentry, int len, const char *name), 2372 + TP_ARGS(dentry, len, name), 2373 + TP_STRUCT__entry( 2374 + __field(void *, dentry) 2375 + __field(int, len) 2376 + __string(name, name) 2377 + ), 2378 + TP_fast_assign( 2379 + __entry->dentry = dentry; 2380 + __entry->len = len; 2381 + __assign_str(name, name); 2382 + ), 2383 + TP_printk("%p %.*s", __entry->dentry, __entry->len, __get_str(name)) 2384 + ); 2385 + 2386 + TRACE_EVENT(ocfs2_dentry_revalidate_negative, 2387 + TP_PROTO(int len, const char *name, unsigned long pgen, 2388 + unsigned long gen), 2389 + TP_ARGS(len, name, pgen, gen), 2390 + TP_STRUCT__entry( 2391 + __field(int, len) 2392 + __string(name, name) 2393 + __field(unsigned long, pgen) 2394 + __field(unsigned long, gen) 2395 + ), 2396 + TP_fast_assign( 2397 + __entry->len = len; 2398 + __assign_str(name, name); 2399 + __entry->pgen = pgen; 2400 + __entry->gen = gen; 2401 + ), 2402 + TP_printk("%.*s %lu %lu", __entry->len, __get_str(name), 2403 + __entry->pgen, __entry->gen) 2404 + ); 2405 + 2406 + DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_delete); 2407 + 2408 + DEFINE_OCFS2_ULL_INT_EVENT(ocfs2_dentry_revalidate_orphaned); 2409 + 2410 + DEFINE_OCFS2_ULL_EVENT(ocfs2_dentry_revalidate_nofsdata); 2411 + 2412 + DEFINE_OCFS2_INT_EVENT(ocfs2_dentry_revalidate_ret); 2413 + 2414 + TRACE_EVENT(ocfs2_find_local_alias, 2415 + TP_PROTO(int len, const char *name), 2416 + TP_ARGS(len, name), 2417 + TP_STRUCT__entry( 2418 + __field(int, len) 2419 + __string(name, name) 2420 + ), 2421 + TP_fast_assign( 2422 + __entry->len = len; 2423 + __assign_str(name, name); 2424 + ), 2425 + TP_printk("%.*s", __entry->len, __get_str(name)) 2426 + ); 2427 + 2428 + TRACE_EVENT(ocfs2_dentry_attach_lock, 2429 + TP_PROTO(int len, const char *name, 2430 + unsigned long long parent, void *fsdata), 2431 + TP_ARGS(len, name, parent, fsdata), 2432 + TP_STRUCT__entry( 2433 + __field(int, len) 2434 + __string(name, name) 2435 + __field(unsigned long long, parent) 2436 + __field(void *, fsdata) 2437 + ), 2438 + TP_fast_assign( 2439 + __entry->len = len; 2440 + __assign_str(name, name); 2441 + __entry->parent = parent; 2442 + __entry->fsdata = fsdata; 2443 + ), 2444 + TP_printk("%.*s %llu %p", __entry->len, __get_str(name), 2445 + __entry->parent, __entry->fsdata) 2446 + ); 2447 + 2448 + TRACE_EVENT(ocfs2_dentry_attach_lock_found, 2449 + TP_PROTO(const char *name, unsigned long long parent, 2450 + unsigned long long ino), 2451 + TP_ARGS(name, parent, ino), 2452 + TP_STRUCT__entry( 2453 + __string(name, name) 2454 + __field(unsigned long long, parent) 2455 + __field(unsigned long long, ino) 2456 + ), 2457 + TP_fast_assign( 2458 + __assign_str(name, name); 2459 + __entry->parent = parent; 2460 + __entry->ino = ino; 2461 + ), 2462 + TP_printk("%s %llu %llu", __get_str(name), __entry->parent, __entry->ino) 2463 + ); 2464 + /* End of trace events for fs/ocfs2/dcache.c. */ 2465 + 2466 + /* Trace events for fs/ocfs2/export.c. */ 2467 + 2468 + TRACE_EVENT(ocfs2_get_dentry_begin, 2469 + TP_PROTO(void *sb, void *handle, unsigned long long blkno), 2470 + TP_ARGS(sb, handle, blkno), 2471 + TP_STRUCT__entry( 2472 + __field(void *, sb) 2473 + __field(void *, handle) 2474 + __field(unsigned long long, blkno) 2475 + ), 2476 + TP_fast_assign( 2477 + __entry->sb = sb; 2478 + __entry->handle = handle; 2479 + __entry->blkno = blkno; 2480 + ), 2481 + TP_printk("%p %p %llu", __entry->sb, __entry->handle, __entry->blkno) 2482 + ); 2483 + 2484 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_get_dentry_test_bit); 2485 + 2486 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_get_dentry_stale); 2487 + 2488 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_get_dentry_generation); 2489 + 2490 + DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_dentry_end); 2491 + 2492 + TRACE_EVENT(ocfs2_get_parent, 2493 + TP_PROTO(void *child, int len, const char *name, 2494 + unsigned long long ino), 2495 + TP_ARGS(child, len, name, ino), 2496 + TP_STRUCT__entry( 2497 + __field(void *, child) 2498 + __field(int, len) 2499 + __string(name, name) 2500 + __field(unsigned long long, ino) 2501 + ), 2502 + TP_fast_assign( 2503 + __entry->child = child; 2504 + __entry->len = len; 2505 + __assign_str(name, name); 2506 + __entry->ino = ino; 2507 + ), 2508 + TP_printk("%p %.*s %llu", __entry->child, __entry->len, 2509 + __get_str(name), __entry->ino) 2510 + ); 2511 + 2512 + DEFINE_OCFS2_POINTER_EVENT(ocfs2_get_parent_end); 2513 + 2514 + TRACE_EVENT(ocfs2_encode_fh_begin, 2515 + TP_PROTO(void *dentry, int name_len, const char *name, 2516 + void *fh, int len, int connectable), 2517 + TP_ARGS(dentry, name_len, name, fh, len, connectable), 2518 + TP_STRUCT__entry( 2519 + __field(void *, dentry) 2520 + __field(int, name_len) 2521 + __string(name, name) 2522 + __field(void *, fh) 2523 + __field(int, len) 2524 + __field(int, connectable) 2525 + ), 2526 + TP_fast_assign( 2527 + __entry->dentry = dentry; 2528 + __entry->name_len = name_len; 2529 + __assign_str(name, name); 2530 + __entry->fh = fh; 2531 + __entry->len = len; 2532 + __entry->connectable = connectable; 2533 + ), 2534 + TP_printk("%p %.*s %p %d %d", __entry->dentry, __entry->name_len, 2535 + __get_str(name), __entry->fh, __entry->len, 2536 + __entry->connectable) 2537 + ); 2538 + 2539 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_self); 2540 + 2541 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_encode_fh_parent); 2542 + 2543 + DEFINE_OCFS2_INT_EVENT(ocfs2_encode_fh_type); 2544 + 2545 + /* End of trace events for fs/ocfs2/export.c. */ 2546 + 2547 + /* Trace events for fs/ocfs2/journal.c. */ 2548 + 2549 + DEFINE_OCFS2_UINT_EVENT(ocfs2_commit_cache_begin); 2550 + 2551 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_commit_cache_end); 2552 + 2553 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_extend_trans); 2554 + 2555 + DEFINE_OCFS2_INT_EVENT(ocfs2_extend_trans_restart); 2556 + 2557 + DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_journal_access); 2558 + 2559 + DEFINE_OCFS2_ULL_EVENT(ocfs2_journal_dirty); 2560 + 2561 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_journal_init); 2562 + 2563 + DEFINE_OCFS2_UINT_EVENT(ocfs2_journal_init_maxlen); 2564 + 2565 + DEFINE_OCFS2_INT_EVENT(ocfs2_journal_shutdown); 2566 + 2567 + DEFINE_OCFS2_POINTER_EVENT(ocfs2_journal_shutdown_wait); 2568 + 2569 + DEFINE_OCFS2_ULL_EVENT(ocfs2_complete_recovery); 2570 + 2571 + DEFINE_OCFS2_INT_EVENT(ocfs2_complete_recovery_end); 2572 + 2573 + TRACE_EVENT(ocfs2_complete_recovery_slot, 2574 + TP_PROTO(int slot, unsigned long long la_ino, 2575 + unsigned long long tl_ino, void *qrec), 2576 + TP_ARGS(slot, la_ino, tl_ino, qrec), 2577 + TP_STRUCT__entry( 2578 + __field(int, slot) 2579 + __field(unsigned long long, la_ino) 2580 + __field(unsigned long long, tl_ino) 2581 + __field(void *, qrec) 2582 + ), 2583 + TP_fast_assign( 2584 + __entry->slot = slot; 2585 + __entry->la_ino = la_ino; 2586 + __entry->tl_ino = tl_ino; 2587 + __entry->qrec = qrec; 2588 + ), 2589 + TP_printk("%d %llu %llu %p", __entry->slot, __entry->la_ino, 2590 + __entry->tl_ino, __entry->qrec) 2591 + ); 2592 + 2593 + DEFINE_OCFS2_INT_INT_EVENT(ocfs2_recovery_thread_node); 2594 + 2595 + DEFINE_OCFS2_INT_EVENT(ocfs2_recovery_thread_end); 2596 + 2597 + TRACE_EVENT(ocfs2_recovery_thread, 2598 + TP_PROTO(int node_num, int osb_node_num, int disable, 2599 + void *recovery_thread, int map_set), 2600 + TP_ARGS(node_num, osb_node_num, disable, recovery_thread, map_set), 2601 + TP_STRUCT__entry( 2602 + __field(int, node_num) 2603 + __field(int, osb_node_num) 2604 + __field(int,disable) 2605 + __field(void *, recovery_thread) 2606 + __field(int,map_set) 2607 + ), 2608 + TP_fast_assign( 2609 + __entry->node_num = node_num; 2610 + __entry->osb_node_num = osb_node_num; 2611 + __entry->disable = disable; 2612 + __entry->recovery_thread = recovery_thread; 2613 + __entry->map_set = map_set; 2614 + ), 2615 + TP_printk("%d %d %d %p %d", __entry->node_num, 2616 + __entry->osb_node_num, __entry->disable, 2617 + __entry->recovery_thread, __entry->map_set) 2618 + ); 2619 + 2620 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_replay_journal_recovered); 2621 + 2622 + DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_lock_err); 2623 + 2624 + DEFINE_OCFS2_INT_EVENT(ocfs2_replay_journal_skip); 2625 + 2626 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_recover_node); 2627 + 2628 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_recover_node_skip); 2629 + 2630 + DEFINE_OCFS2_UINT_UINT_EVENT(ocfs2_mark_dead_nodes); 2631 + 2632 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_begin); 2633 + 2634 + DEFINE_OCFS2_UINT_UINT_UINT_EVENT(ocfs2_queue_orphan_scan_end); 2635 + 2636 + DEFINE_OCFS2_ULL_EVENT(ocfs2_orphan_filldir); 2637 + 2638 + DEFINE_OCFS2_INT_EVENT(ocfs2_recover_orphans); 2639 + 2640 + DEFINE_OCFS2_ULL_EVENT(ocfs2_recover_orphans_iput); 2641 + 2642 + DEFINE_OCFS2_INT_EVENT(ocfs2_wait_on_mount); 2643 + 2644 + /* End of trace events for fs/ocfs2/journal.c. */ 2645 + 2646 + /* Trace events for fs/ocfs2/buffer_head_io.c. */ 2647 + 2648 + DEFINE_OCFS2_ULL_UINT_EVENT(ocfs2_read_blocks_sync); 2649 + 2650 + DEFINE_OCFS2_ULL_EVENT(ocfs2_read_blocks_sync_jbd); 2651 + 2652 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_read_blocks_from_disk); 2653 + 2654 + DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_bh); 2655 + 2656 + DEFINE_OCFS2_ULL_INT_INT_INT_EVENT(ocfs2_read_blocks_end); 2657 + 2658 + TRACE_EVENT(ocfs2_write_block, 2659 + TP_PROTO(unsigned long long block, void *ci), 2660 + TP_ARGS(block, ci), 2661 + TP_STRUCT__entry( 2662 + __field(unsigned long long, block) 2663 + __field(void *, ci) 2664 + ), 2665 + TP_fast_assign( 2666 + __entry->block = block; 2667 + __entry->ci = ci; 2668 + ), 2669 + TP_printk("%llu %p", __entry->block, __entry->ci) 2670 + ); 2671 + 2672 + TRACE_EVENT(ocfs2_read_blocks_begin, 2673 + TP_PROTO(void *ci, unsigned long long block, 2674 + unsigned int nr, int flags), 2675 + TP_ARGS(ci, block, nr, flags), 2676 + TP_STRUCT__entry( 2677 + __field(void *, ci) 2678 + __field(unsigned long long, block) 2679 + __field(unsigned int, nr) 2680 + __field(int, flags) 2681 + ), 2682 + TP_fast_assign( 2683 + __entry->ci = ci; 2684 + __entry->block = block; 2685 + __entry->nr = nr; 2686 + __entry->flags = flags; 2687 + ), 2688 + TP_printk("%p %llu %u %d", __entry->ci, __entry->block, 2689 + __entry->nr, __entry->flags) 2690 + ); 2691 + 2692 + /* End of trace events for fs/ocfs2/buffer_head_io.c. */ 2693 + 2694 + /* Trace events for fs/ocfs2/uptodate.c. */ 2695 + 2696 + DEFINE_OCFS2_ULL_EVENT(ocfs2_purge_copied_metadata_tree); 2697 + 2698 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_metadata_cache_purge); 2699 + 2700 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_buffer_cached_begin); 2701 + 2702 + TRACE_EVENT(ocfs2_buffer_cached_end, 2703 + TP_PROTO(int index, void *item), 2704 + TP_ARGS(index, item), 2705 + TP_STRUCT__entry( 2706 + __field(int, index) 2707 + __field(void *, item) 2708 + ), 2709 + TP_fast_assign( 2710 + __entry->index = index; 2711 + __entry->item = item; 2712 + ), 2713 + TP_printk("%d %p", __entry->index, __entry->item) 2714 + ); 2715 + 2716 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_append_cache_array); 2717 + 2718 + DEFINE_OCFS2_ULL_ULL_UINT_EVENT(ocfs2_insert_cache_tree); 2719 + 2720 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_expand_cache); 2721 + 2722 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_set_buffer_uptodate); 2723 + 2724 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_set_buffer_uptodate_begin); 2725 + 2726 + DEFINE_OCFS2_ULL_UINT_UINT_EVENT(ocfs2_remove_metadata_array); 2727 + 2728 + DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_remove_metadata_tree); 2729 + 2730 + DEFINE_OCFS2_ULL_ULL_UINT_UINT_EVENT(ocfs2_remove_block_from_cache); 2731 + 2732 + /* End of trace events for fs/ocfs2/uptodate.c. */ 2733 + #endif /* _TRACE_OCFS2_H */ 2734 + 2735 + /* This part must be outside protection */ 2736 + #undef TRACE_INCLUDE_PATH 2737 + #define TRACE_INCLUDE_PATH . 2738 + #define TRACE_INCLUDE_FILE ocfs2_trace 2739 + #include <trace/define_trace.h>
+22 -23
fs/ocfs2/quota_global.c
··· 11 11 #include <linux/writeback.h> 12 12 #include <linux/workqueue.h> 13 13 14 - #define MLOG_MASK_PREFIX ML_QUOTA 15 14 #include <cluster/masklog.h> 16 15 17 16 #include "ocfs2_fs.h" ··· 26 27 #include "super.h" 27 28 #include "buffer_head_io.h" 28 29 #include "quota.h" 30 + #include "ocfs2_trace.h" 29 31 30 32 /* 31 33 * Locking of quotas with OCFS2 is rather complex. Here are rules that ··· 130 130 struct ocfs2_disk_dqtrailer *dqt = 131 131 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data); 132 132 133 - mlog(0, "Validating quota block %llu\n", 134 - (unsigned long long)bh->b_blocknr); 133 + trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr); 135 134 136 135 BUG_ON(!buffer_uptodate(bh)); 137 136 ··· 340 341 u64 pcount; 341 342 int status; 342 343 343 - mlog_entry_void(); 344 - 345 344 /* Read global header */ 346 345 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], 347 346 OCFS2_INVALID_SLOT); ··· 399 402 msecs_to_jiffies(oinfo->dqi_syncms)); 400 403 401 404 out_err: 402 - mlog_exit(status); 405 + if (status) 406 + mlog_errno(status); 403 407 return status; 404 408 out_unlock: 405 409 ocfs2_unlock_global_qf(oinfo, 0); ··· 506 508 olditime = dquot->dq_dqb.dqb_itime; 507 509 oldbtime = dquot->dq_dqb.dqb_btime; 508 510 ocfs2_global_disk2memdqb(dquot, &dqblk); 509 - mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n", 510 - dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange, 511 - dquot->dq_dqb.dqb_curinodes, (long long)inodechange); 511 + trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace, 512 + (long long)spacechange, 513 + dquot->dq_dqb.dqb_curinodes, 514 + (long long)inodechange); 512 515 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) 513 516 dquot->dq_dqb.dqb_curspace += spacechange; 514 517 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) ··· 593 594 struct ocfs2_super *osb = OCFS2_SB(sb); 594 595 int status = 0; 595 596 596 - mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id, 597 - dquot->dq_type, type, sb->s_id); 597 + trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type, 598 + type, sb->s_id); 598 599 if (type != dquot->dq_type) 599 600 goto out; 600 601 status = ocfs2_lock_global_qf(oinfo, 1); ··· 620 621 out_ilock: 621 622 ocfs2_unlock_global_qf(oinfo, 1); 622 623 out: 623 - mlog_exit(status); 624 624 return status; 625 625 } 626 626 ··· 645 647 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 646 648 int status = 0; 647 649 648 - mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 650 + trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type); 649 651 650 652 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); 651 653 if (IS_ERR(handle)) { ··· 658 660 mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); 659 661 ocfs2_commit_trans(osb, handle); 660 662 out: 661 - mlog_exit(status); 662 663 return status; 663 664 } 664 665 ··· 683 686 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 684 687 int status = 0; 685 688 686 - mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 689 + trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type); 687 690 688 691 mutex_lock(&dquot->dq_lock); 689 692 /* Check whether we are not racing with some other dqget() */ ··· 719 722 ocfs2_unlock_global_qf(oinfo, 1); 720 723 out: 721 724 mutex_unlock(&dquot->dq_lock); 722 - mlog_exit(status); 725 + if (status) 726 + mlog_errno(status); 723 727 return status; 724 728 } 725 729 ··· 741 743 int need_alloc = ocfs2_global_qinit_alloc(sb, type); 742 744 handle_t *handle; 743 745 744 - mlog_entry("id=%u, type=%d", dquot->dq_id, type); 746 + trace_ocfs2_acquire_dquot(dquot->dq_id, type); 745 747 mutex_lock(&dquot->dq_lock); 746 748 /* 747 749 * We need an exclusive lock, because we're going to update use count ··· 807 809 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 808 810 out: 809 811 mutex_unlock(&dquot->dq_lock); 810 - mlog_exit(status); 812 + if (status) 813 + mlog_errno(status); 811 814 return status; 812 815 } 813 816 ··· 828 829 handle_t *handle; 829 830 struct ocfs2_super *osb = OCFS2_SB(sb); 830 831 831 - mlog_entry("id=%u, type=%d", dquot->dq_id, type); 832 + trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type); 832 833 833 834 /* In case user set some limits, sync dquot immediately to global 834 835 * quota file so that information propagates quicker */ ··· 865 866 out_ilock: 866 867 ocfs2_unlock_global_qf(oinfo, 1); 867 868 out: 868 - mlog_exit(status); 869 + if (status) 870 + mlog_errno(status); 869 871 return status; 870 872 } 871 873 ··· 876 876 handle_t *handle; 877 877 int status = 0; 878 878 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 879 - 880 - mlog_entry_void(); 881 879 882 880 status = ocfs2_lock_global_qf(oinfo, 1); 883 881 if (status < 0) ··· 891 893 out_ilock: 892 894 ocfs2_unlock_global_qf(oinfo, 1); 893 895 out: 894 - mlog_exit(status); 896 + if (status) 897 + mlog_errno(status); 895 898 return status; 896 899 } 897 900
+9 -7
fs/ocfs2/quota_local.c
··· 8 8 #include <linux/quotaops.h> 9 9 #include <linux/module.h> 10 10 11 - #define MLOG_MASK_PREFIX ML_QUOTA 12 11 #include <cluster/masklog.h> 13 12 14 13 #include "ocfs2_fs.h" ··· 22 23 #include "quota.h" 23 24 #include "uptodate.h" 24 25 #include "super.h" 26 + #include "ocfs2_trace.h" 25 27 26 28 /* Number of local quota structures per block */ 27 29 static inline unsigned int ol_quota_entries_per_block(struct super_block *sb) ··· 475 475 struct ocfs2_recovery_chunk *rchunk, *next; 476 476 qsize_t spacechange, inodechange; 477 477 478 - mlog_entry("ino=%lu type=%u", (unsigned long)lqinode->i_ino, type); 478 + trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type); 479 479 480 480 list_for_each_entry_safe(rchunk, next, &(rec->r_list[type]), rc_list) { 481 481 chunk = rchunk->rc_chunk; ··· 575 575 } 576 576 if (status < 0) 577 577 free_recovery_list(&(rec->r_list[type])); 578 - mlog_exit(status); 578 + if (status) 579 + mlog_errno(status); 579 580 return status; 580 581 } 581 582 ··· 601 600 for (type = 0; type < MAXQUOTAS; type++) { 602 601 if (list_empty(&(rec->r_list[type]))) 603 602 continue; 604 - mlog(0, "Recovering quota in slot %d\n", slot_num); 603 + trace_ocfs2_finish_quota_recovery(slot_num); 605 604 lqinode = ocfs2_get_system_file_inode(osb, ino[type], slot_num); 606 605 if (!lqinode) { 607 606 status = -ENOENT; ··· 883 882 dqblk->dqb_inodemod = cpu_to_le64(od->dq_dquot.dq_dqb.dqb_curinodes - 884 883 od->dq_originodes); 885 884 spin_unlock(&dq_data_lock); 886 - mlog(0, "Writing local dquot %u space %lld inodes %lld\n", 887 - od->dq_dquot.dq_id, (long long)le64_to_cpu(dqblk->dqb_spacemod), 888 - (long long)le64_to_cpu(dqblk->dqb_inodemod)); 885 + trace_olq_set_dquot( 886 + (unsigned long long)le64_to_cpu(dqblk->dqb_spacemod), 887 + (unsigned long long)le64_to_cpu(dqblk->dqb_inodemod), 888 + od->dq_dquot.dq_id); 889 889 } 890 890 891 891 /* Write dquot to local quota file */
+78 -80
fs/ocfs2/refcounttree.c
··· 16 16 */ 17 17 18 18 #include <linux/sort.h> 19 - #define MLOG_MASK_PREFIX ML_REFCOUNT 20 19 #include <cluster/masklog.h> 21 20 #include "ocfs2.h" 22 21 #include "inode.h" ··· 33 34 #include "aops.h" 34 35 #include "xattr.h" 35 36 #include "namei.h" 37 + #include "ocfs2_trace.h" 36 38 37 39 #include <linux/bio.h> 38 40 #include <linux/blkdev.h> ··· 84 84 struct ocfs2_refcount_block *rb = 85 85 (struct ocfs2_refcount_block *)bh->b_data; 86 86 87 - mlog(0, "Validating refcount block %llu\n", 88 - (unsigned long long)bh->b_blocknr); 87 + trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr); 89 88 90 89 BUG_ON(!buffer_uptodate(bh)); 91 90 ··· 544 545 while ((node = rb_last(root)) != NULL) { 545 546 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); 546 547 547 - mlog(0, "Purge tree %llu\n", 548 - (unsigned long long) tree->rf_blkno); 548 + trace_ocfs2_purge_refcount_trees( 549 + (unsigned long long) tree->rf_blkno); 549 550 550 551 rb_erase(&tree->rf_node, root); 551 552 ocfs2_free_refcount_tree(tree); ··· 574 575 575 576 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); 576 577 577 - mlog(0, "create tree for inode %lu\n", inode->i_ino); 578 + trace_ocfs2_create_refcount_tree( 579 + (unsigned long long)OCFS2_I(inode)->ip_blkno); 578 580 579 581 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); 580 582 if (ret) { ··· 646 646 di->i_refcount_loc = cpu_to_le64(first_blkno); 647 647 spin_unlock(&oi->ip_lock); 648 648 649 - mlog(0, "created tree for inode %lu, refblock %llu\n", 650 - inode->i_ino, (unsigned long long)first_blkno); 649 + trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno); 651 650 652 651 ocfs2_journal_dirty(handle, di_bh); 653 652 ··· 1255 1256 goto out; 1256 1257 } 1257 1258 1258 - mlog(0, "change index %d, old count %u, change %d\n", index, 1259 - le32_to_cpu(rec->r_refcount), change); 1259 + trace_ocfs2_change_refcount_rec( 1260 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 1261 + index, le32_to_cpu(rec->r_refcount), change); 1260 1262 le32_add_cpu(&rec->r_refcount, change); 1261 1263 1262 1264 if (!rec->r_refcount) { ··· 1353 1353 1354 1354 ocfs2_journal_dirty(handle, ref_root_bh); 1355 1355 1356 - mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno, 1357 - le16_to_cpu(new_rb->rf_records.rl_used)); 1356 + trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno, 1357 + le16_to_cpu(new_rb->rf_records.rl_used)); 1358 1358 1359 1359 *ref_leaf_bh = new_bh; 1360 1360 new_bh = NULL; ··· 1466 1466 (struct ocfs2_refcount_block *)new_bh->b_data; 1467 1467 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; 1468 1468 1469 - mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n", 1470 - (unsigned long long)ref_leaf_bh->b_blocknr, 1471 - le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); 1469 + trace_ocfs2_divide_leaf_refcount_block( 1470 + (unsigned long long)ref_leaf_bh->b_blocknr, 1471 + le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); 1472 1472 1473 1473 /* 1474 1474 * XXX: Improvement later. ··· 1601 1601 1602 1602 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); 1603 1603 1604 - mlog(0, "insert new leaf block %llu at %u\n", 1605 - (unsigned long long)new_bh->b_blocknr, new_cpos); 1604 + trace_ocfs2_new_leaf_refcount_block( 1605 + (unsigned long long)new_bh->b_blocknr, new_cpos); 1606 1606 1607 1607 /* Insert the new leaf block with the specific offset cpos. */ 1608 1608 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, ··· 1794 1794 (le16_to_cpu(rf_list->rl_used) - index) * 1795 1795 sizeof(struct ocfs2_refcount_rec)); 1796 1796 1797 - mlog(0, "insert refcount record start %llu, len %u, count %u " 1798 - "to leaf block %llu at index %d\n", 1799 - (unsigned long long)le64_to_cpu(rec->r_cpos), 1800 - le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount), 1801 - (unsigned long long)ref_leaf_bh->b_blocknr, index); 1797 + trace_ocfs2_insert_refcount_rec( 1798 + (unsigned long long)ref_leaf_bh->b_blocknr, index, 1799 + (unsigned long long)le64_to_cpu(rec->r_cpos), 1800 + le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount)); 1802 1801 1803 1802 rf_list->rl_recs[index] = *rec; 1804 1803 ··· 1849 1850 1850 1851 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); 1851 1852 1852 - mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n", 1853 - le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters), 1854 - le64_to_cpu(split_rec->r_cpos), 1855 - le32_to_cpu(split_rec->r_clusters)); 1853 + trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos), 1854 + le32_to_cpu(orig_rec->r_clusters), 1855 + le32_to_cpu(orig_rec->r_refcount), 1856 + le64_to_cpu(split_rec->r_cpos), 1857 + le32_to_cpu(split_rec->r_clusters), 1858 + le32_to_cpu(split_rec->r_refcount)); 1856 1859 1857 1860 /* 1858 1861 * If we just need to split the header or tail clusters, ··· 1968 1967 1969 1968 if (split_rec->r_refcount) { 1970 1969 rf_list->rl_recs[index] = *split_rec; 1971 - mlog(0, "insert refcount record start %llu, len %u, count %u " 1972 - "to leaf block %llu at index %d\n", 1973 - (unsigned long long)le64_to_cpu(split_rec->r_cpos), 1974 - le32_to_cpu(split_rec->r_clusters), 1975 - le32_to_cpu(split_rec->r_refcount), 1976 - (unsigned long long)ref_leaf_bh->b_blocknr, index); 1970 + trace_ocfs2_split_refcount_rec_insert( 1971 + (unsigned long long)ref_leaf_bh->b_blocknr, index, 1972 + (unsigned long long)le64_to_cpu(split_rec->r_cpos), 1973 + le32_to_cpu(split_rec->r_clusters), 1974 + le32_to_cpu(split_rec->r_refcount)); 1977 1975 1978 1976 if (merge) 1979 1977 ocfs2_refcount_rec_merge(rb, index); ··· 1997 1997 struct ocfs2_refcount_rec rec; 1998 1998 unsigned int set_len = 0; 1999 1999 2000 - mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n", 2000 + trace_ocfs2_increase_refcount_begin( 2001 2001 (unsigned long long)ocfs2_metadata_cache_owner(ci), 2002 2002 (unsigned long long)cpos, len); 2003 2003 ··· 2024 2024 */ 2025 2025 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && 2026 2026 set_len <= len) { 2027 - mlog(0, "increase refcount rec, start %llu, len %u, " 2028 - "count %u\n", (unsigned long long)cpos, set_len, 2029 - le32_to_cpu(rec.r_refcount)); 2027 + trace_ocfs2_increase_refcount_change( 2028 + (unsigned long long)cpos, set_len, 2029 + le32_to_cpu(rec.r_refcount)); 2030 2030 ret = ocfs2_change_refcount_rec(handle, ci, 2031 2031 ref_leaf_bh, index, 2032 2032 merge, 1); ··· 2037 2037 } else if (!rec.r_refcount) { 2038 2038 rec.r_refcount = cpu_to_le32(1); 2039 2039 2040 - mlog(0, "insert refcount rec, start %llu, len %u\n", 2040 + trace_ocfs2_increase_refcount_insert( 2041 2041 (unsigned long long)le64_to_cpu(rec.r_cpos), 2042 2042 set_len); 2043 2043 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, ··· 2055 2055 rec.r_clusters = cpu_to_le32(set_len); 2056 2056 le32_add_cpu(&rec.r_refcount, 1); 2057 2057 2058 - mlog(0, "split refcount rec, start %llu, " 2059 - "len %u, count %u\n", 2058 + trace_ocfs2_increase_refcount_split( 2060 2059 (unsigned long long)le64_to_cpu(rec.r_cpos), 2061 2060 set_len, le32_to_cpu(rec.r_refcount)); 2062 2061 ret = ocfs2_split_refcount_rec(handle, ci, ··· 2093 2094 struct ocfs2_extent_tree et; 2094 2095 2095 2096 BUG_ON(rb->rf_records.rl_used); 2097 + 2098 + trace_ocfs2_remove_refcount_extent( 2099 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 2100 + (unsigned long long)ref_leaf_bh->b_blocknr, 2101 + le32_to_cpu(rb->rf_cpos)); 2096 2102 2097 2103 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); 2098 2104 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), ··· 2141 2137 if (!rb->rf_list.l_next_free_rec) { 2142 2138 BUG_ON(rb->rf_clusters); 2143 2139 2144 - mlog(0, "reset refcount tree root %llu to be a record block.\n", 2140 + trace_ocfs2_restore_refcount_block( 2145 2141 (unsigned long long)ref_root_bh->b_blocknr); 2146 2142 2147 2143 rb->rf_flags = 0; ··· 2188 2184 BUG_ON(cpos + len > 2189 2185 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); 2190 2186 2187 + trace_ocfs2_decrease_refcount_rec( 2188 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 2189 + (unsigned long long)cpos, len); 2190 + 2191 2191 if (cpos == le64_to_cpu(rec->r_cpos) && 2192 2192 len == le32_to_cpu(rec->r_clusters)) 2193 2193 ret = ocfs2_change_refcount_rec(handle, ci, ··· 2203 2195 2204 2196 le32_add_cpu(&split.r_refcount, -1); 2205 2197 2206 - mlog(0, "split refcount rec, start %llu, " 2207 - "len %u, count %u, original start %llu, len %u\n", 2208 - (unsigned long long)le64_to_cpu(split.r_cpos), 2209 - len, le32_to_cpu(split.r_refcount), 2210 - (unsigned long long)le64_to_cpu(rec->r_cpos), 2211 - le32_to_cpu(rec->r_clusters)); 2212 2198 ret = ocfs2_split_refcount_rec(handle, ci, 2213 2199 ref_root_bh, ref_leaf_bh, 2214 2200 &split, index, 1, ··· 2241 2239 struct super_block *sb = ocfs2_metadata_cache_get_super(ci); 2242 2240 struct buffer_head *ref_leaf_bh = NULL; 2243 2241 2244 - mlog(0, "Tree owner %llu, decrease refcount start %llu, " 2245 - "len %u, delete %u\n", 2246 - (unsigned long long)ocfs2_metadata_cache_owner(ci), 2247 - (unsigned long long)cpos, len, delete); 2242 + trace_ocfs2_decrease_refcount( 2243 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 2244 + (unsigned long long)cpos, len, delete); 2248 2245 2249 2246 while (len) { 2250 2247 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, ··· 2353 2352 { 2354 2353 int ret; 2355 2354 2356 - mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n", 2357 - inode->i_ino, cpos, len, phys); 2355 + trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno, 2356 + cpos, len, phys); 2358 2357 2359 2358 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { 2360 2359 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " ··· 2393 2392 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; 2394 2393 u32 len; 2395 2394 2396 - mlog(0, "start_cpos %llu, clusters %u\n", 2397 - (unsigned long long)start_cpos, clusters); 2398 2395 while (clusters) { 2399 2396 ret = ocfs2_get_refcount_rec(ci, ref_root_bh, 2400 2397 cpos, clusters, &rec, ··· 2426 2427 2427 2428 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; 2428 2429 2429 - mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu," 2430 - "rec->r_clusters %u, rec->r_refcount %u, index %d\n", 2431 - recs_add, (unsigned long long)cpos, clusters, 2432 - (unsigned long long)le64_to_cpu(rec.r_cpos), 2433 - le32_to_cpu(rec.r_clusters), 2434 - le32_to_cpu(rec.r_refcount), index); 2430 + trace_ocfs2_calc_refcount_meta_credits_iterate( 2431 + recs_add, (unsigned long long)cpos, clusters, 2432 + (unsigned long long)le64_to_cpu(rec.r_cpos), 2433 + le32_to_cpu(rec.r_clusters), 2434 + le32_to_cpu(rec.r_refcount), index); 2435 2435 2436 2436 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + 2437 2437 le32_to_cpu(rec.r_clusters)) - cpos; ··· 2486 2488 if (!ref_blocks) 2487 2489 goto out; 2488 2490 2489 - mlog(0, "we need ref_blocks %d\n", ref_blocks); 2490 2491 *meta_add += ref_blocks; 2491 2492 *credits += ref_blocks; 2492 2493 ··· 2511 2514 } 2512 2515 2513 2516 out: 2517 + 2518 + trace_ocfs2_calc_refcount_meta_credits( 2519 + (unsigned long long)start_cpos, clusters, 2520 + *meta_add, *credits); 2514 2521 brelse(ref_leaf_bh); 2515 2522 brelse(prev_bh); 2516 2523 return ret; ··· 2579 2578 goto out; 2580 2579 } 2581 2580 2582 - mlog(0, "reserve new metadata %d blocks, credits = %d\n", 2583 - *ref_blocks, *credits); 2581 + trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits); 2584 2582 2585 2583 out: 2586 2584 brelse(ref_root_bh); ··· 2886 2886 goto out; 2887 2887 } 2888 2888 2889 - mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n", 2890 - meta_add, num_clusters, *credits); 2889 + trace_ocfs2_lock_refcount_allocators(meta_add, *credits); 2891 2890 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, 2892 2891 meta_ac); 2893 2892 if (ret) { ··· 2936 2937 loff_t offset, end, map_end; 2937 2938 struct address_space *mapping = context->inode->i_mapping; 2938 2939 2939 - mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster, 2940 - new_cluster, new_len, cpos); 2940 + trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 2941 + new_cluster, new_len); 2941 2942 2942 2943 readahead_pages = 2943 2944 (ocfs2_cow_contig_clusters(sb) << ··· 3030 3031 struct buffer_head *old_bh = NULL; 3031 3032 struct buffer_head *new_bh = NULL; 3032 3033 3033 - mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster, 3034 - new_cluster, new_len); 3034 + trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, 3035 + new_cluster, new_len); 3035 3036 3036 3037 for (i = 0; i < blocks; i++, old_block++, new_block++) { 3037 3038 new_bh = sb_getblk(osb->sb, new_block); ··· 3084 3085 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); 3085 3086 u64 ino = ocfs2_metadata_cache_owner(et->et_ci); 3086 3087 3087 - mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n", 3088 - (unsigned long long)ino, cpos, len, p_cluster, ext_flags); 3088 + trace_ocfs2_clear_ext_refcount((unsigned long long)ino, 3089 + cpos, len, p_cluster, ext_flags); 3089 3090 3090 3091 memset(&replace_rec, 0, sizeof(replace_rec)); 3091 3092 replace_rec.e_cpos = cpu_to_le32(cpos); ··· 3140 3141 struct ocfs2_caching_info *ci = context->data_et.et_ci; 3141 3142 u64 ino = ocfs2_metadata_cache_owner(ci); 3142 3143 3143 - mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n", 3144 - (unsigned long long)ino, cpos, old, new, len, ext_flags); 3144 + trace_ocfs2_replace_clusters((unsigned long long)ino, 3145 + cpos, old, new, len, ext_flags); 3145 3146 3146 3147 /*If the old clusters is unwritten, no need to duplicate. */ 3147 3148 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { ··· 3235 3236 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; 3236 3237 struct ocfs2_refcount_rec rec; 3237 3238 3238 - mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n", 3239 - cpos, p_cluster, num_clusters, e_flags); 3239 + trace_ocfs2_make_clusters_writable(cpos, p_cluster, 3240 + num_clusters, e_flags); 3240 3241 3241 3242 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, 3242 3243 &context->data_et, ··· 3474 3475 goto out; 3475 3476 } 3476 3477 3477 - mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, " 3478 - "cow_len %u\n", inode->i_ino, 3479 - cpos, write_len, cow_start, cow_len); 3478 + trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno, 3479 + cpos, write_len, max_cpos, 3480 + cow_start, cow_len); 3480 3481 3481 3482 BUG_ON(cow_len == 0); 3482 3483 ··· 3755 3756 goto out; 3756 3757 } 3757 3758 3758 - mlog(0, "reserve new metadata %d, credits = %d\n", 3759 - ref_blocks, credits); 3759 + trace_ocfs2_add_refcount_flag(ref_blocks, credits); 3760 3760 3761 3761 if (ref_blocks) { 3762 3762 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
+26 -31
fs/ocfs2/reservations.c
··· 30 30 #include <linux/bitops.h> 31 31 #include <linux/list.h> 32 32 33 - #define MLOG_MASK_PREFIX ML_RESERVATIONS 34 33 #include <cluster/masklog.h> 35 34 36 35 #include "ocfs2.h" 36 + #include "ocfs2_trace.h" 37 37 38 38 #ifdef CONFIG_OCFS2_DEBUG_FS 39 39 #define OCFS2_CHECK_RESERVATIONS ··· 321 321 322 322 assert_spin_locked(&resv_lock); 323 323 324 - mlog(0, "Insert reservation start: %u len: %u\n", new->r_start, 325 - new->r_len); 324 + trace_ocfs2_resv_insert(new->r_start, new->r_len); 326 325 327 326 while (*p) { 328 327 parent = *p; ··· 422 423 unsigned int best_start, best_len = 0; 423 424 int offset, start, found; 424 425 425 - mlog(0, "Find %u bits within range (%u, len %u) resmap len: %u\n", 426 - wanted, search_start, search_len, resmap->m_bitmap_len); 426 + trace_ocfs2_resmap_find_free_bits_begin(search_start, search_len, 427 + wanted, resmap->m_bitmap_len); 427 428 428 429 found = best_start = best_len = 0; 429 430 ··· 462 463 *rlen = best_len; 463 464 *rstart = best_start; 464 465 465 - mlog(0, "Found start: %u len: %u\n", best_start, best_len); 466 + trace_ocfs2_resmap_find_free_bits_end(best_start, best_len); 466 467 467 468 return *rlen; 468 469 } ··· 486 487 * - our window should be last in all reservations 487 488 * - need to make sure we don't go past end of bitmap 488 489 */ 489 - 490 - mlog(0, "resv start: %u resv end: %u goal: %u wanted: %u\n", 491 - resv->r_start, ocfs2_resv_end(resv), goal, wanted); 490 + trace_ocfs2_resv_find_window_begin(resv->r_start, ocfs2_resv_end(resv), 491 + goal, wanted, RB_EMPTY_ROOT(root)); 492 492 493 493 assert_spin_locked(&resv_lock); 494 494 ··· 496 498 * Easiest case - empty tree. We can just take 497 499 * whatever window of free bits we want. 498 500 */ 499 - 500 - mlog(0, "Empty root\n"); 501 - 502 501 clen = ocfs2_resmap_find_free_bits(resmap, wanted, goal, 503 502 resmap->m_bitmap_len - goal, 504 503 &cstart, &clen); ··· 519 524 prev_resv = ocfs2_find_resv_lhs(resmap, goal); 520 525 521 526 if (prev_resv == NULL) { 522 - mlog(0, "Goal on LHS of leftmost window\n"); 523 - 524 527 /* 525 528 * A NULL here means that the search code couldn't 526 529 * find a window that starts before goal. ··· 563 570 next_resv = NULL; 564 571 } 565 572 573 + trace_ocfs2_resv_find_window_prev(prev_resv->r_start, 574 + ocfs2_resv_end(prev_resv)); 575 + 566 576 prev = &prev_resv->r_node; 567 577 568 578 /* Now we do a linear search for a window, starting at 'prev_rsv' */ 569 579 while (1) { 570 580 next = rb_next(prev); 571 581 if (next) { 572 - mlog(0, "One more resv found in linear search\n"); 573 582 next_resv = rb_entry(next, 574 583 struct ocfs2_alloc_reservation, 575 584 r_node); ··· 580 585 gap_end = next_resv->r_start - 1; 581 586 gap_len = gap_end - gap_start + 1; 582 587 } else { 583 - mlog(0, "No next node\n"); 584 588 /* 585 589 * We're at the rightmost edge of the 586 590 * tree. See if a reservation between this ··· 590 596 gap_end = resmap->m_bitmap_len - 1; 591 597 } 592 598 599 + trace_ocfs2_resv_find_window_next(next ? next_resv->r_start: -1, 600 + next ? ocfs2_resv_end(next_resv) : -1); 593 601 /* 594 602 * No need to check this gap if we have already found 595 603 * a larger region of free bits. ··· 650 654 lru_resv = list_first_entry(&resmap->m_lru, 651 655 struct ocfs2_alloc_reservation, r_lru); 652 656 653 - mlog(0, "lru resv: start: %u len: %u end: %u\n", lru_resv->r_start, 654 - lru_resv->r_len, ocfs2_resv_end(lru_resv)); 657 + trace_ocfs2_cannibalize_resv_begin(lru_resv->r_start, 658 + lru_resv->r_len, 659 + ocfs2_resv_end(lru_resv)); 655 660 656 661 /* 657 662 * Cannibalize (some or all) of the target reservation and ··· 681 684 resv->r_len = shrink; 682 685 } 683 686 684 - mlog(0, "Reservation now looks like: r_start: %u r_end: %u " 685 - "r_len: %u r_last_start: %u r_last_len: %u\n", 686 - resv->r_start, ocfs2_resv_end(resv), resv->r_len, 687 - resv->r_last_start, resv->r_last_len); 687 + trace_ocfs2_cannibalize_resv_end(resv->r_start, ocfs2_resv_end(resv), 688 + resv->r_len, resv->r_last_start, 689 + resv->r_last_len); 688 690 689 691 ocfs2_resv_insert(resmap, resv); 690 692 } ··· 744 748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen) 745 749 wanted = *clen; 746 750 747 - mlog(0, "empty reservation, find new window\n"); 748 751 /* 749 752 * Try to get a window here. If it works, we must fall 750 753 * through and test the bitmap . This avoids some ··· 752 757 * that inode. 753 758 */ 754 759 ocfs2_resv_find_window(resmap, resv, wanted); 760 + trace_ocfs2_resmap_resv_bits(resv->r_start, resv->r_len); 755 761 } 756 762 757 763 BUG_ON(ocfs2_resv_empty(resv)); ··· 809 813 810 814 spin_lock(&resv_lock); 811 815 812 - mlog(0, "claim bits: cstart: %u cend: %u clen: %u r_start: %u " 813 - "r_end: %u r_len: %u, r_last_start: %u r_last_len: %u\n", 814 - cstart, cend, clen, resv->r_start, ocfs2_resv_end(resv), 815 - resv->r_len, resv->r_last_start, resv->r_last_len); 816 + trace_ocfs2_resmap_claimed_bits_begin(cstart, cend, clen, resv->r_start, 817 + ocfs2_resv_end(resv), resv->r_len, 818 + resv->r_last_start, 819 + resv->r_last_len); 816 820 817 821 BUG_ON(cstart < resv->r_start); 818 822 BUG_ON(cstart > ocfs2_resv_end(resv)); ··· 829 833 if (!ocfs2_resv_empty(resv)) 830 834 ocfs2_resv_mark_lru(resmap, resv); 831 835 832 - mlog(0, "Reservation now looks like: r_start: %u r_end: %u " 833 - "r_len: %u r_last_start: %u r_last_len: %u\n", 834 - resv->r_start, ocfs2_resv_end(resv), resv->r_len, 835 - resv->r_last_start, resv->r_last_len); 836 + trace_ocfs2_resmap_claimed_bits_end(resv->r_start, ocfs2_resv_end(resv), 837 + resv->r_len, resv->r_last_start, 838 + resv->r_last_len); 836 839 837 840 ocfs2_check_resmap(resmap); 838 841
+9 -14
fs/ocfs2/resize.c
··· 27 27 #include <linux/fs.h> 28 28 #include <linux/types.h> 29 29 30 - #define MLOG_MASK_PREFIX ML_DISK_ALLOC 31 30 #include <cluster/masklog.h> 32 31 33 32 #include "ocfs2.h" ··· 38 39 #include "super.h" 39 40 #include "sysfile.h" 40 41 #include "uptodate.h" 42 + #include "ocfs2_trace.h" 41 43 42 44 #include "buffer_head_io.h" 43 45 #include "suballoc.h" ··· 82 82 backups++; 83 83 } 84 84 85 - mlog_exit_void(); 86 85 return backups; 87 86 } 88 87 ··· 102 103 u16 cl_bpc = le16_to_cpu(cl->cl_bpc); 103 104 u16 cl_cpg = le16_to_cpu(cl->cl_cpg); 104 105 105 - mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n", 106 - new_clusters, first_new_cluster); 106 + trace_ocfs2_update_last_group_and_inode(new_clusters, 107 + first_new_cluster); 107 108 108 109 ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode), 109 110 group_bh, OCFS2_JOURNAL_ACCESS_WRITE); ··· 175 176 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits); 176 177 } 177 178 out: 178 - mlog_exit(ret); 179 + if (ret) 180 + mlog_errno(ret); 179 181 return ret; 180 182 } 181 183 ··· 281 281 u32 first_new_cluster; 282 282 u64 lgd_blkno; 283 283 284 - mlog_entry_void(); 285 - 286 284 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 287 285 return -EROFS; 288 286 ··· 340 342 goto out_unlock; 341 343 } 342 344 343 - mlog(0, "extend the last group at %llu, new clusters = %d\n", 345 + 346 + trace_ocfs2_group_extend( 344 347 (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); 345 348 346 349 handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); ··· 376 377 iput(main_bm_inode); 377 378 378 379 out: 379 - mlog_exit_void(); 380 380 return ret; 381 381 } 382 382 ··· 470 472 struct ocfs2_chain_rec *cr; 471 473 u16 cl_bpc; 472 474 473 - mlog_entry_void(); 474 - 475 475 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) 476 476 return -EROFS; 477 477 ··· 516 520 goto out_unlock; 517 521 } 518 522 519 - mlog(0, "Add a new group %llu in chain = %u, length = %u\n", 520 - (unsigned long long)input->group, input->chain, input->clusters); 523 + trace_ocfs2_group_add((unsigned long long)input->group, 524 + input->chain, input->clusters, input->frees); 521 525 522 526 handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS); 523 527 if (IS_ERR(handle)) { ··· 585 589 iput(main_bm_inode); 586 590 587 591 out: 588 - mlog_exit_void(); 589 592 return ret; 590 593 }
+5 -11
fs/ocfs2/slot_map.c
··· 27 27 #include <linux/slab.h> 28 28 #include <linux/highmem.h> 29 29 30 - #define MLOG_MASK_PREFIX ML_SUPER 31 30 #include <cluster/masklog.h> 32 31 33 32 #include "ocfs2.h" ··· 38 39 #include "slot_map.h" 39 40 #include "super.h" 40 41 #include "sysfile.h" 42 + #include "ocfs2_trace.h" 41 43 42 44 #include "buffer_head_io.h" 43 45 ··· 142 142 BUG_ON(si->si_blocks == 0); 143 143 BUG_ON(si->si_bh == NULL); 144 144 145 - mlog(0, "Refreshing slot map, reading %u block(s)\n", 146 - si->si_blocks); 145 + trace_ocfs2_refresh_slot_info(si->si_blocks); 147 146 148 147 /* 149 148 * We pass -1 as blocknr because we expect all of si->si_bh to ··· 380 381 /* The size checks above should ensure this */ 381 382 BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks); 382 383 383 - mlog(0, "Slot map needs %u buffers for %llu bytes\n", 384 - si->si_blocks, bytes); 384 + trace_ocfs2_map_slot_buffers(bytes, si->si_blocks); 385 385 386 386 si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks, 387 387 GFP_KERNEL); ··· 398 400 goto bail; 399 401 } 400 402 401 - mlog(0, "Reading slot map block %u at %llu\n", i, 402 - (unsigned long long)blkno); 403 + trace_ocfs2_map_slot_buffers_block((unsigned long long)blkno, i); 403 404 404 405 bh = NULL; /* Acquire a fresh bh */ 405 406 status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno, ··· 472 475 int slot; 473 476 struct ocfs2_slot_info *si; 474 477 475 - mlog_entry_void(); 476 - 477 478 si = osb->slot_info; 478 479 479 480 spin_lock(&osb->osb_lock); ··· 500 505 osb->slot_num = slot; 501 506 spin_unlock(&osb->osb_lock); 502 507 503 - mlog(0, "taking node slot %d\n", osb->slot_num); 508 + trace_ocfs2_find_slot(osb->slot_num); 504 509 505 510 status = ocfs2_update_disk_slot(osb, si, osb->slot_num); 506 511 if (status < 0) 507 512 mlog_errno(status); 508 513 509 514 bail: 510 - mlog_exit(status); 511 515 return status; 512 516 } 513 517
+91 -98
fs/ocfs2/suballoc.c
··· 29 29 #include <linux/slab.h> 30 30 #include <linux/highmem.h> 31 31 32 - #define MLOG_MASK_PREFIX ML_DISK_ALLOC 33 32 #include <cluster/masklog.h> 34 33 35 34 #include "ocfs2.h" ··· 43 44 #include "super.h" 44 45 #include "sysfile.h" 45 46 #include "uptodate.h" 47 + #include "ocfs2_trace.h" 46 48 47 49 #include "buffer_head_io.h" 48 50 ··· 308 308 int rc; 309 309 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; 310 310 311 - mlog(0, "Validating group descriptor %llu\n", 312 - (unsigned long long)bh->b_blocknr); 311 + trace_ocfs2_validate_group_descriptor( 312 + (unsigned long long)bh->b_blocknr); 313 313 314 314 BUG_ON(!buffer_uptodate(bh)); 315 315 ··· 389 389 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; 390 390 struct super_block * sb = alloc_inode->i_sb; 391 391 392 - mlog_entry_void(); 393 - 394 392 if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) { 395 393 ocfs2_error(alloc_inode->i_sb, "group block (%llu) != " 396 394 "b_blocknr (%llu)", ··· 434 436 * allocation time. */ 435 437 436 438 bail: 437 - mlog_exit(status); 439 + if (status) 440 + mlog_errno(status); 438 441 return status; 439 442 } 440 443 ··· 476 477 477 478 /* setup the group */ 478 479 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); 479 - mlog(0, "new descriptor, record %u, at block %llu\n", 480 - alloc_rec, (unsigned long long)bg_blkno); 480 + trace_ocfs2_block_group_alloc_contig( 481 + (unsigned long long)bg_blkno, alloc_rec); 481 482 482 483 bg_bh = sb_getblk(osb->sb, bg_blkno); 483 484 if (!bg_bh) { ··· 656 657 657 658 /* setup the group */ 658 659 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); 659 - mlog(0, "new descriptor, record %u, at block %llu\n", 660 - alloc_rec, (unsigned long long)bg_blkno); 660 + trace_ocfs2_block_group_alloc_discontig( 661 + (unsigned long long)bg_blkno, alloc_rec); 661 662 662 663 bg_bh = sb_getblk(osb->sb, bg_blkno); 663 664 if (!bg_bh) { ··· 706 707 707 708 BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode)); 708 709 709 - mlog_entry_void(); 710 - 711 710 cl = &fe->id2.i_chain; 712 711 status = ocfs2_reserve_clusters_with_limit(osb, 713 712 le16_to_cpu(cl->cl_cpg), ··· 727 730 } 728 731 729 732 if (last_alloc_group && *last_alloc_group != 0) { 730 - mlog(0, "use old allocation group %llu for block group alloc\n", 731 - (unsigned long long)*last_alloc_group); 733 + trace_ocfs2_block_group_alloc( 734 + (unsigned long long)*last_alloc_group); 732 735 ac->ac_last_group = *last_alloc_group; 733 736 } 734 737 ··· 793 796 794 797 brelse(bg_bh); 795 798 796 - mlog_exit(status); 799 + if (status) 800 + mlog_errno(status); 797 801 return status; 798 802 } 799 803 ··· 811 813 struct buffer_head *bh = NULL; 812 814 struct ocfs2_dinode *fe; 813 815 u32 free_bits; 814 - 815 - mlog_entry_void(); 816 816 817 817 alloc_inode = ocfs2_get_system_file_inode(osb, type, slot); 818 818 if (!alloc_inode) { ··· 851 855 if (bits_wanted > free_bits) { 852 856 /* cluster bitmap never grows */ 853 857 if (ocfs2_is_cluster_bitmap(alloc_inode)) { 854 - mlog(0, "Disk Full: wanted=%u, free_bits=%u\n", 855 - bits_wanted, free_bits); 858 + trace_ocfs2_reserve_suballoc_bits_nospc(bits_wanted, 859 + free_bits); 856 860 status = -ENOSPC; 857 861 goto bail; 858 862 } 859 863 860 864 if (!(flags & ALLOC_NEW_GROUP)) { 861 - mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, " 862 - "and we don't alloc a new group for it.\n", 863 - slot, bits_wanted, free_bits); 865 + trace_ocfs2_reserve_suballoc_bits_no_new_group( 866 + slot, bits_wanted, free_bits); 864 867 status = -ENOSPC; 865 868 goto bail; 866 869 } ··· 885 890 bail: 886 891 brelse(bh); 887 892 888 - mlog_exit(status); 893 + if (status) 894 + mlog_errno(status); 889 895 return status; 890 896 } 891 897 ··· 1048 1052 *ac = NULL; 1049 1053 } 1050 1054 1051 - mlog_exit(status); 1055 + if (status) 1056 + mlog_errno(status); 1052 1057 return status; 1053 1058 } 1054 1059 ··· 1116 1119 spin_lock(&osb->osb_lock); 1117 1120 osb->osb_inode_alloc_group = alloc_group; 1118 1121 spin_unlock(&osb->osb_lock); 1119 - mlog(0, "after reservation, new allocation group is " 1120 - "%llu\n", (unsigned long long)alloc_group); 1122 + trace_ocfs2_reserve_new_inode_new_group( 1123 + (unsigned long long)alloc_group); 1121 1124 1122 1125 /* 1123 1126 * Some inodes must be freed by us, so try to allocate ··· 1149 1152 *ac = NULL; 1150 1153 } 1151 1154 1152 - mlog_exit(status); 1155 + if (status) 1156 + mlog_errno(status); 1153 1157 return status; 1154 1158 } 1155 1159 ··· 1186 1188 struct ocfs2_alloc_context **ac) 1187 1189 { 1188 1190 int status; 1189 - 1190 - mlog_entry_void(); 1191 1191 1192 1192 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); 1193 1193 if (!(*ac)) { ··· 1225 1229 *ac = NULL; 1226 1230 } 1227 1231 1228 - mlog_exit(status); 1232 + if (status) 1233 + mlog_errno(status); 1229 1234 return status; 1230 1235 } 1231 1236 ··· 1354 1357 void *bitmap = bg->bg_bitmap; 1355 1358 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; 1356 1359 1357 - mlog_entry_void(); 1358 - 1359 1360 /* All callers get the descriptor via 1360 1361 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ 1361 1362 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); 1362 1363 BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); 1363 1364 1364 - mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, 1365 - num_bits); 1365 + trace_ocfs2_block_group_set_bits(bit_off, num_bits); 1366 1366 1367 1367 if (ocfs2_is_cluster_bitmap(alloc_inode)) 1368 1368 journal_type = OCFS2_JOURNAL_ACCESS_UNDO; ··· 1388 1394 ocfs2_journal_dirty(handle, group_bh); 1389 1395 1390 1396 bail: 1391 - mlog_exit(status); 1397 + if (status) 1398 + mlog_errno(status); 1392 1399 return status; 1393 1400 } 1394 1401 ··· 1432 1437 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); 1433 1438 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(prev_bg)); 1434 1439 1435 - mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n", 1436 - (unsigned long long)le64_to_cpu(fe->i_blkno), chain, 1437 - (unsigned long long)le64_to_cpu(bg->bg_blkno), 1438 - (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); 1440 + trace_ocfs2_relink_block_group( 1441 + (unsigned long long)le64_to_cpu(fe->i_blkno), chain, 1442 + (unsigned long long)le64_to_cpu(bg->bg_blkno), 1443 + (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); 1439 1444 1440 1445 fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno); 1441 1446 bg_ptr = le64_to_cpu(bg->bg_next_group); ··· 1479 1484 prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); 1480 1485 } 1481 1486 1482 - mlog_exit(status); 1487 + if (status) 1488 + mlog_errno(status); 1483 1489 return status; 1484 1490 } 1485 1491 ··· 1521 1525 if ((gd_cluster_off + max_bits) > 1522 1526 OCFS2_I(inode)->ip_clusters) { 1523 1527 max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; 1524 - mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", 1525 - (unsigned long long)le64_to_cpu(gd->bg_blkno), 1526 - le16_to_cpu(gd->bg_bits), 1527 - OCFS2_I(inode)->ip_clusters, max_bits); 1528 + trace_ocfs2_cluster_group_search_wrong_max_bits( 1529 + (unsigned long long)le64_to_cpu(gd->bg_blkno), 1530 + le16_to_cpu(gd->bg_bits), 1531 + OCFS2_I(inode)->ip_clusters, max_bits); 1528 1532 } 1529 1533 1530 1534 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), ··· 1538 1542 gd_cluster_off + 1539 1543 res->sr_bit_offset + 1540 1544 res->sr_bits); 1541 - mlog(0, "Checking %llu against %llu\n", 1542 - (unsigned long long)blkoff, 1543 - (unsigned long long)max_block); 1545 + trace_ocfs2_cluster_group_search_max_block( 1546 + (unsigned long long)blkoff, 1547 + (unsigned long long)max_block); 1544 1548 if (blkoff > max_block) 1545 1549 return -ENOSPC; 1546 1550 } ··· 1584 1588 if (!ret && max_block) { 1585 1589 blkoff = le64_to_cpu(bg->bg_blkno) + 1586 1590 res->sr_bit_offset + res->sr_bits; 1587 - mlog(0, "Checking %llu against %llu\n", 1588 - (unsigned long long)blkoff, 1589 - (unsigned long long)max_block); 1591 + trace_ocfs2_block_group_search_max_block( 1592 + (unsigned long long)blkoff, 1593 + (unsigned long long)max_block); 1590 1594 if (blkoff > max_block) 1591 1595 ret = -ENOSPC; 1592 1596 } ··· 1752 1756 struct ocfs2_group_desc *bg; 1753 1757 1754 1758 chain = ac->ac_chain; 1755 - mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n", 1756 - bits_wanted, chain, 1757 - (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno); 1759 + trace_ocfs2_search_chain_begin( 1760 + (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, 1761 + bits_wanted, chain); 1758 1762 1759 1763 status = ocfs2_read_group_descriptor(alloc_inode, fe, 1760 1764 le64_to_cpu(cl->cl_recs[chain].c_blkno), ··· 1795 1799 goto bail; 1796 1800 } 1797 1801 1798 - mlog(0, "alloc succeeds: we give %u bits from block group %llu\n", 1799 - res->sr_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno)); 1802 + trace_ocfs2_search_chain_succ( 1803 + (unsigned long long)le64_to_cpu(bg->bg_blkno), res->sr_bits); 1800 1804 1801 1805 res->sr_bg_blkno = le64_to_cpu(bg->bg_blkno); 1802 1806 ··· 1857 1861 goto bail; 1858 1862 } 1859 1863 1860 - mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, 1861 - (unsigned long long)le64_to_cpu(fe->i_blkno)); 1864 + trace_ocfs2_search_chain_end( 1865 + (unsigned long long)le64_to_cpu(fe->i_blkno), 1866 + res->sr_bits); 1862 1867 1863 1868 out_loc_only: 1864 1869 *bits_left = le16_to_cpu(bg->bg_free_bits_count); ··· 1867 1870 brelse(group_bh); 1868 1871 brelse(prev_group_bh); 1869 1872 1870 - mlog_exit(status); 1873 + if (status) 1874 + mlog_errno(status); 1871 1875 return status; 1872 1876 } 1873 1877 ··· 1885 1887 u64 hint = ac->ac_last_group; 1886 1888 struct ocfs2_chain_list *cl; 1887 1889 struct ocfs2_dinode *fe; 1888 - 1889 - mlog_entry_void(); 1890 1890 1891 1891 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); 1892 1892 BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given)); ··· 1941 1945 goto bail; 1942 1946 } 1943 1947 1944 - mlog(0, "Search of victim chain %u came up with nothing, " 1945 - "trying all chains now.\n", victim); 1948 + trace_ocfs2_claim_suballoc_bits(victim); 1946 1949 1947 1950 /* If we didn't pick a good victim, then just default to 1948 1951 * searching each chain in order. Don't allow chain relinking ··· 1979 1984 } 1980 1985 1981 1986 bail: 1982 - mlog_exit(status); 1987 + if (status) 1988 + mlog_errno(status); 1983 1989 return status; 1984 1990 } 1985 1991 ··· 2017 2021 *num_bits = res.sr_bits; 2018 2022 status = 0; 2019 2023 bail: 2020 - mlog_exit(status); 2024 + if (status) 2025 + mlog_errno(status); 2021 2026 return status; 2022 2027 } 2023 2028 ··· 2169 2172 goto out; 2170 2173 } 2171 2174 2172 - mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits, 2173 - (unsigned long long)di_blkno); 2175 + trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno, 2176 + res->sr_bits); 2174 2177 2175 2178 atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); 2176 2179 ··· 2197 2200 { 2198 2201 int status; 2199 2202 struct ocfs2_suballoc_result res; 2200 - 2201 - mlog_entry_void(); 2202 2203 2203 2204 BUG_ON(!ac); 2204 2205 BUG_ON(ac->ac_bits_given != 0); ··· 2225 2230 ocfs2_save_inode_ac_group(dir, ac); 2226 2231 status = 0; 2227 2232 bail: 2228 - mlog_exit(status); 2233 + if (status) 2234 + mlog_errno(status); 2229 2235 return status; 2230 2236 } 2231 2237 ··· 2303 2307 struct ocfs2_suballoc_result res = { .sr_blkno = 0, }; 2304 2308 struct ocfs2_super *osb = OCFS2_SB(ac->ac_inode->i_sb); 2305 2309 2306 - mlog_entry_void(); 2307 - 2308 2310 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); 2309 2311 2310 2312 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL ··· 2357 2363 ac->ac_bits_given += *num_clusters; 2358 2364 2359 2365 bail: 2360 - mlog_exit(status); 2366 + if (status) 2367 + mlog_errno(status); 2361 2368 return status; 2362 2369 } 2363 2370 ··· 2387 2392 unsigned int tmp; 2388 2393 struct ocfs2_group_desc *undo_bg = NULL; 2389 2394 2390 - mlog_entry_void(); 2391 - 2392 2395 /* The caller got this descriptor from 2393 2396 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ 2394 2397 BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); 2395 2398 2396 - mlog(0, "off = %u, num = %u\n", bit_off, num_bits); 2399 + trace_ocfs2_block_group_clear_bits(bit_off, num_bits); 2397 2400 2398 2401 BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode)); 2399 2402 status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), ··· 2456 2463 struct buffer_head *group_bh = NULL; 2457 2464 struct ocfs2_group_desc *group; 2458 2465 2459 - mlog_entry_void(); 2460 - 2461 2466 /* The alloc_bh comes from ocfs2_free_dinode() or 2462 2467 * ocfs2_free_clusters(). The callers have all locked the 2463 2468 * allocator and gotten alloc_bh from the lock call. This ··· 2464 2473 BUG_ON(!OCFS2_IS_VALID_DINODE(fe)); 2465 2474 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl)); 2466 2475 2467 - mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n", 2468 - (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count, 2469 - (unsigned long long)bg_blkno, start_bit); 2476 + trace_ocfs2_free_suballoc_bits( 2477 + (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, 2478 + (unsigned long long)bg_blkno, 2479 + start_bit, count); 2470 2480 2471 2481 status = ocfs2_read_group_descriptor(alloc_inode, fe, bg_blkno, 2472 2482 &group_bh); ··· 2503 2511 bail: 2504 2512 brelse(group_bh); 2505 2513 2506 - mlog_exit(status); 2514 + if (status) 2515 + mlog_errno(status); 2507 2516 return status; 2508 2517 } 2509 2518 ··· 2549 2556 2550 2557 /* You can't ever have a contiguous set of clusters 2551 2558 * bigger than a block group bitmap so we never have to worry 2552 - * about looping on them. */ 2553 - 2554 - mlog_entry_void(); 2555 - 2556 - /* This is expensive. We can safely remove once this stuff has 2559 + * about looping on them. 2560 + * This is expensive. We can safely remove once this stuff has 2557 2561 * gotten tested really well. */ 2558 2562 BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk))); 2559 2563 ··· 2559 2569 ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno, 2560 2570 &bg_start_bit); 2561 2571 2562 - mlog(0, "want to free %u clusters starting at block %llu\n", 2563 - num_clusters, (unsigned long long)start_blk); 2564 - mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n", 2565 - (unsigned long long)bg_blkno, bg_start_bit); 2572 + trace_ocfs2_free_clusters((unsigned long long)bg_blkno, 2573 + (unsigned long long)start_blk, 2574 + bg_start_bit, num_clusters); 2566 2575 2567 2576 status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh, 2568 2577 bg_start_bit, bg_blkno, ··· 2575 2586 num_clusters); 2576 2587 2577 2588 out: 2578 - mlog_exit(status); 2589 + if (status) 2590 + mlog_errno(status); 2579 2591 return status; 2580 2592 } 2581 2593 ··· 2746 2756 struct buffer_head *inode_bh = NULL; 2747 2757 struct ocfs2_dinode *inode_fe; 2748 2758 2749 - mlog_entry("blkno: %llu\n", (unsigned long long)blkno); 2759 + trace_ocfs2_get_suballoc_slot_bit((unsigned long long)blkno); 2750 2760 2751 2761 /* dirty read disk */ 2752 2762 status = ocfs2_read_blocks_sync(osb, blkno, 1, &inode_bh); ··· 2783 2793 bail: 2784 2794 brelse(inode_bh); 2785 2795 2786 - mlog_exit(status); 2796 + if (status) 2797 + mlog_errno(status); 2787 2798 return status; 2788 2799 } 2789 2800 ··· 2807 2816 u64 bg_blkno; 2808 2817 int status; 2809 2818 2810 - mlog_entry("blkno: %llu bit: %u\n", (unsigned long long)blkno, 2811 - (unsigned int)bit); 2819 + trace_ocfs2_test_suballoc_bit((unsigned long long)blkno, 2820 + (unsigned int)bit); 2812 2821 2813 2822 alloc_di = (struct ocfs2_dinode *)alloc_bh->b_data; 2814 2823 if ((bit + 1) > ocfs2_bits_per_group(&alloc_di->id2.i_chain)) { ··· 2835 2844 bail: 2836 2845 brelse(group_bh); 2837 2846 2838 - mlog_exit(status); 2847 + if (status) 2848 + mlog_errno(status); 2839 2849 return status; 2840 2850 } 2841 2851 ··· 2861 2869 struct inode *inode_alloc_inode; 2862 2870 struct buffer_head *alloc_bh = NULL; 2863 2871 2864 - mlog_entry("blkno: %llu", (unsigned long long)blkno); 2872 + trace_ocfs2_test_inode_bit((unsigned long long)blkno); 2865 2873 2866 2874 status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot, 2867 2875 &group_blkno, &suballoc_bit); ··· 2902 2910 iput(inode_alloc_inode); 2903 2911 brelse(alloc_bh); 2904 2912 bail: 2905 - mlog_exit(status); 2913 + if (status) 2914 + mlog_errno(status); 2906 2915 return status; 2907 2916 }
+27 -62
fs/ocfs2/super.c
··· 42 42 #include <linux/seq_file.h> 43 43 #include <linux/quotaops.h> 44 44 45 - #define MLOG_MASK_PREFIX ML_SUPER 45 + #define CREATE_TRACE_POINTS 46 + #include "ocfs2_trace.h" 47 + 46 48 #include <cluster/masklog.h> 47 49 48 50 #include "ocfs2.h" ··· 443 441 int status = 0; 444 442 int i; 445 443 446 - mlog_entry_void(); 447 - 448 444 new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0); 449 445 if (IS_ERR(new)) { 450 446 status = PTR_ERR(new); ··· 478 478 } 479 479 480 480 bail: 481 - mlog_exit(status); 481 + if (status) 482 + mlog_errno(status); 482 483 return status; 483 484 } 484 485 ··· 488 487 struct inode *new = NULL; 489 488 int status = 0; 490 489 int i; 491 - 492 - mlog_entry_void(); 493 490 494 491 for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1; 495 492 i < NUM_SYSTEM_INODES; ··· 507 508 } 508 509 509 510 bail: 510 - mlog_exit(status); 511 + if (status) 512 + mlog_errno(status); 511 513 return status; 512 514 } 513 515 ··· 516 516 { 517 517 int i; 518 518 struct inode *inode; 519 - 520 - mlog_entry_void(); 521 519 522 520 for (i = 0; i < NUM_GLOBAL_SYSTEM_INODES; i++) { 523 521 inode = osb->global_system_inodes[i]; ··· 538 540 } 539 541 540 542 if (!osb->local_system_inodes) 541 - goto out; 543 + return; 542 544 543 545 for (i = 0; i < NUM_LOCAL_SYSTEM_INODES * osb->max_slots; i++) { 544 546 if (osb->local_system_inodes[i]) { ··· 549 551 550 552 kfree(osb->local_system_inodes); 551 553 osb->local_system_inodes = NULL; 552 - 553 - out: 554 - mlog_exit(0); 555 554 } 556 555 557 556 /* We're allocating fs objects, use GFP_NOFS */ ··· 679 684 } 680 685 681 686 if (*flags & MS_RDONLY) { 682 - mlog(0, "Going to ro mode.\n"); 683 687 sb->s_flags |= MS_RDONLY; 684 688 osb->osb_flags |= OCFS2_OSB_SOFT_RO; 685 689 } else { 686 - mlog(0, "Making ro filesystem writeable.\n"); 687 - 688 690 if (osb->osb_flags & OCFS2_OSB_ERROR_FS) { 689 691 mlog(ML_ERROR, "Cannot remount RDWR " 690 692 "filesystem due to previous errors.\n"); ··· 699 707 sb->s_flags &= ~MS_RDONLY; 700 708 osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; 701 709 } 710 + trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags); 702 711 unlock_osb: 703 712 spin_unlock(&osb->osb_lock); 704 713 /* Enable quota accounting after remounting RW */ ··· 1025 1032 char nodestr[8]; 1026 1033 struct ocfs2_blockcheck_stats stats; 1027 1034 1028 - mlog_entry("%p, %p, %i", sb, data, silent); 1035 + trace_ocfs2_fill_super(sb, data, silent); 1029 1036 1030 1037 if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { 1031 1038 status = -EINVAL; ··· 1201 1208 mlog_errno(status); 1202 1209 atomic_set(&osb->vol_state, VOLUME_DISABLED); 1203 1210 wake_up(&osb->osb_mount_event); 1204 - mlog_exit(status); 1205 1211 return status; 1206 1212 } 1207 1213 } ··· 1214 1222 /* Start this when the mount is almost sure of being successful */ 1215 1223 ocfs2_orphan_scan_start(osb); 1216 1224 1217 - mlog_exit(status); 1218 1225 return status; 1219 1226 1220 1227 read_super_error: ··· 1228 1237 ocfs2_dismount_volume(sb, 1); 1229 1238 } 1230 1239 1231 - mlog_exit(status); 1240 + if (status) 1241 + mlog_errno(status); 1232 1242 return status; 1233 1243 } 1234 1244 ··· 1312 1320 char *p; 1313 1321 u32 tmp; 1314 1322 1315 - mlog_entry("remount: %d, options: \"%s\"\n", is_remount, 1316 - options ? options : "(none)"); 1323 + trace_ocfs2_parse_options(is_remount, options ? options : "(none)"); 1317 1324 1318 1325 mopt->commit_interval = 0; 1319 1326 mopt->mount_opt = OCFS2_MOUNT_NOINTR; ··· 1529 1538 status = 1; 1530 1539 1531 1540 bail: 1532 - mlog_exit(status); 1533 1541 return status; 1534 1542 } 1535 1543 ··· 1619 1629 { 1620 1630 int status; 1621 1631 1622 - mlog_entry_void(); 1623 - 1624 1632 ocfs2_print_version(); 1625 1633 1626 1634 status = init_ocfs2_uptodate_cache(); ··· 1652 1664 if (status < 0) { 1653 1665 ocfs2_free_mem_caches(); 1654 1666 exit_ocfs2_uptodate_cache(); 1667 + mlog_errno(status); 1655 1668 } 1656 - 1657 - mlog_exit(status); 1658 1669 1659 1670 if (status >= 0) { 1660 1671 return register_filesystem(&ocfs2_fs_type); ··· 1663 1676 1664 1677 static void __exit ocfs2_exit(void) 1665 1678 { 1666 - mlog_entry_void(); 1667 - 1668 1679 if (ocfs2_wq) { 1669 1680 flush_workqueue(ocfs2_wq); 1670 1681 destroy_workqueue(ocfs2_wq); ··· 1677 1692 unregister_filesystem(&ocfs2_fs_type); 1678 1693 1679 1694 exit_ocfs2_uptodate_cache(); 1680 - 1681 - mlog_exit_void(); 1682 1695 } 1683 1696 1684 1697 static void ocfs2_put_super(struct super_block *sb) 1685 1698 { 1686 - mlog_entry("(0x%p)\n", sb); 1699 + trace_ocfs2_put_super(sb); 1687 1700 1688 1701 ocfs2_sync_blockdev(sb); 1689 1702 ocfs2_dismount_volume(sb, 0); 1690 - 1691 - mlog_exit_void(); 1692 1703 } 1693 1704 1694 1705 static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) ··· 1696 1715 struct buffer_head *bh = NULL; 1697 1716 struct inode *inode = NULL; 1698 1717 1699 - mlog_entry("(%p, %p)\n", dentry->d_sb, buf); 1718 + trace_ocfs2_statfs(dentry->d_sb, buf); 1700 1719 1701 1720 osb = OCFS2_SB(dentry->d_sb); 1702 1721 ··· 1743 1762 if (inode) 1744 1763 iput(inode); 1745 1764 1746 - mlog_exit(status); 1765 + if (status) 1766 + mlog_errno(status); 1747 1767 1748 1768 return status; 1749 1769 } ··· 1864 1882 int unlock_super = 0; 1865 1883 struct ocfs2_super *osb = OCFS2_SB(sb); 1866 1884 1867 - mlog_entry_void(); 1868 - 1869 1885 if (ocfs2_is_hard_readonly(osb)) 1870 1886 goto leave; 1871 1887 ··· 1908 1928 if (unlock_super) 1909 1929 ocfs2_super_unlock(osb, 1); 1910 1930 1911 - mlog_exit(status); 1912 1931 return status; 1913 1932 } 1914 1933 ··· 1917 1938 struct ocfs2_super *osb = NULL; 1918 1939 char nodestr[8]; 1919 1940 1920 - mlog_entry("(0x%p)\n", sb); 1941 + trace_ocfs2_dismount_volume(sb); 1921 1942 1922 1943 BUG_ON(!sb); 1923 1944 osb = OCFS2_SB(sb); ··· 2069 2090 struct ocfs2_super *osb; 2070 2091 u64 total_blocks; 2071 2092 2072 - mlog_entry_void(); 2073 - 2074 2093 osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL); 2075 2094 if (!osb) { 2076 2095 status = -ENOMEM; ··· 2132 2155 status = -EINVAL; 2133 2156 goto bail; 2134 2157 } 2135 - mlog(0, "max_slots for this device: %u\n", osb->max_slots); 2136 2158 2137 2159 ocfs2_orphan_scan_init(osb); 2138 2160 ··· 2270 2294 osb->s_clustersize_bits = 2271 2295 le32_to_cpu(di->id2.i_super.s_clustersize_bits); 2272 2296 osb->s_clustersize = 1 << osb->s_clustersize_bits; 2273 - mlog(0, "clusterbits=%d\n", osb->s_clustersize_bits); 2274 2297 2275 2298 if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE || 2276 2299 osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) { ··· 2308 2333 le64_to_cpu(di->id2.i_super.s_first_cluster_group); 2309 2334 osb->fs_generation = le32_to_cpu(di->i_fs_generation); 2310 2335 osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash); 2311 - mlog(0, "vol_label: %s\n", osb->vol_label); 2312 - mlog(0, "uuid: %s\n", osb->uuid_str); 2313 - mlog(0, "root_blkno=%llu, system_dir_blkno=%llu\n", 2314 - (unsigned long long)osb->root_blkno, 2315 - (unsigned long long)osb->system_dir_blkno); 2336 + trace_ocfs2_initialize_super(osb->vol_label, osb->uuid_str, 2337 + (unsigned long long)osb->root_blkno, 2338 + (unsigned long long)osb->system_dir_blkno, 2339 + osb->s_clustersize_bits); 2316 2340 2317 2341 osb->osb_dlm_debug = ocfs2_new_dlm_debug(); 2318 2342 if (!osb->osb_dlm_debug) { ··· 2354 2380 } 2355 2381 2356 2382 bail: 2357 - mlog_exit(status); 2358 2383 return status; 2359 2384 } 2360 2385 ··· 2368 2395 struct ocfs2_blockcheck_stats *stats) 2369 2396 { 2370 2397 int status = -EAGAIN; 2371 - 2372 - mlog_entry_void(); 2373 2398 2374 2399 if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, 2375 2400 strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { ··· 2423 2452 } 2424 2453 2425 2454 out: 2426 - mlog_exit(status); 2455 + if (status && status != -EAGAIN) 2456 + mlog_errno(status); 2427 2457 return status; 2428 2458 } 2429 2459 ··· 2436 2464 struct ocfs2_dinode *local_alloc = NULL; /* only used if we 2437 2465 * recover 2438 2466 * ourselves. */ 2439 - 2440 - mlog_entry_void(); 2441 2467 2442 2468 /* Init our journal object. */ 2443 2469 status = ocfs2_journal_init(osb->journal, &dirty); ··· 2486 2516 * ourselves as mounted. */ 2487 2517 } 2488 2518 2489 - mlog(0, "Journal loaded.\n"); 2490 - 2491 2519 status = ocfs2_load_local_alloc(osb); 2492 2520 if (status < 0) { 2493 2521 mlog_errno(status); ··· 2517 2549 if (local_alloc) 2518 2550 kfree(local_alloc); 2519 2551 2520 - mlog_exit(status); 2552 + if (status) 2553 + mlog_errno(status); 2521 2554 return status; 2522 2555 } 2523 2556 ··· 2530 2561 */ 2531 2562 static void ocfs2_delete_osb(struct ocfs2_super *osb) 2532 2563 { 2533 - mlog_entry_void(); 2534 - 2535 2564 /* This function assumes that the caller has the main osb resource */ 2536 2565 2537 2566 ocfs2_free_slot_info(osb); ··· 2547 2580 kfree(osb->uuid_str); 2548 2581 ocfs2_put_dlm_debug(osb->osb_dlm_debug); 2549 2582 memset(osb, 0, sizeof(struct ocfs2_super)); 2550 - 2551 - mlog_exit_void(); 2552 2583 } 2553 2584 2554 2585 /* Put OCFS2 into a readonly state, or (if the user specifies it),
+4 -10
fs/ocfs2/symlink.c
··· 40 40 #include <linux/pagemap.h> 41 41 #include <linux/namei.h> 42 42 43 - #define MLOG_MASK_PREFIX ML_NAMEI 44 43 #include <cluster/masklog.h> 45 44 46 45 #include "ocfs2.h" ··· 61 62 char *link = NULL; 62 63 struct ocfs2_dinode *fe; 63 64 64 - mlog_entry_void(); 65 - 66 65 status = ocfs2_read_inode_block(inode, bh); 67 66 if (status < 0) { 68 67 mlog_errno(status); ··· 71 74 fe = (struct ocfs2_dinode *) (*bh)->b_data; 72 75 link = (char *) fe->id2.i_symlink; 73 76 bail: 74 - mlog_exit(status); 75 77 76 78 return link; 77 79 } ··· 83 87 char *link; 84 88 struct buffer_head *bh = NULL; 85 89 struct inode *inode = dentry->d_inode; 86 - 87 - mlog_entry_void(); 88 90 89 91 link = ocfs2_fast_symlink_getlink(inode, &bh); 90 92 if (IS_ERR(link)) { ··· 98 104 99 105 brelse(bh); 100 106 out: 101 - mlog_exit(ret); 107 + if (ret < 0) 108 + mlog_errno(ret); 102 109 return ret; 103 110 } 104 111 ··· 111 116 char *target, *link = ERR_PTR(-ENOMEM); 112 117 struct inode *inode = dentry->d_inode; 113 118 struct buffer_head *bh = NULL; 114 - 115 - mlog_entry_void(); 116 119 117 120 BUG_ON(!ocfs2_inode_is_fast_symlink(inode)); 118 121 target = ocfs2_fast_symlink_getlink(inode, &bh); ··· 135 142 nd_set_link(nd, status ? ERR_PTR(status) : link); 136 143 brelse(bh); 137 144 138 - mlog_exit(status); 145 + if (status) 146 + mlog_errno(status); 139 147 return NULL; 140 148 } 141 149
-1
fs/ocfs2/sysfile.c
··· 27 27 #include <linux/types.h> 28 28 #include <linux/highmem.h> 29 29 30 - #define MLOG_MASK_PREFIX ML_INODE 31 30 #include <cluster/masklog.h> 32 31 33 32 #include "ocfs2.h"
+36 -37
fs/ocfs2/uptodate.c
··· 54 54 #include <linux/buffer_head.h> 55 55 #include <linux/rbtree.h> 56 56 57 - #define MLOG_MASK_PREFIX ML_UPTODATE 58 - 59 57 #include <cluster/masklog.h> 60 58 61 59 #include "ocfs2.h" 62 60 63 61 #include "inode.h" 64 62 #include "uptodate.h" 63 + #include "ocfs2_trace.h" 65 64 66 65 struct ocfs2_meta_cache_item { 67 66 struct rb_node c_node; ··· 151 152 while ((node = rb_last(root)) != NULL) { 152 153 item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); 153 154 154 - mlog(0, "Purge item %llu\n", 155 - (unsigned long long) item->c_block); 155 + trace_ocfs2_purge_copied_metadata_tree( 156 + (unsigned long long) item->c_block); 156 157 157 158 rb_erase(&item->c_node, root); 158 159 kmem_cache_free(ocfs2_uptodate_cachep, item); ··· 179 180 tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); 180 181 to_purge = ci->ci_num_cached; 181 182 182 - mlog(0, "Purge %u %s items from Owner %llu\n", to_purge, 183 - tree ? "array" : "tree", 184 - (unsigned long long)ocfs2_metadata_cache_owner(ci)); 183 + trace_ocfs2_metadata_cache_purge( 184 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 185 + to_purge, tree); 185 186 186 187 /* If we're a tree, save off the root so that we can safely 187 188 * initialize the cache. We do the work to free tree members ··· 248 249 249 250 ocfs2_metadata_cache_lock(ci); 250 251 251 - mlog(0, "Owner %llu, query block %llu (inline = %u)\n", 252 - (unsigned long long)ocfs2_metadata_cache_owner(ci), 253 - (unsigned long long) bh->b_blocknr, 254 - !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); 252 + trace_ocfs2_buffer_cached_begin( 253 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 254 + (unsigned long long) bh->b_blocknr, 255 + !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); 255 256 256 257 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) 257 258 index = ocfs2_search_cache_array(ci, bh->b_blocknr); ··· 260 261 261 262 ocfs2_metadata_cache_unlock(ci); 262 263 263 - mlog(0, "index = %d, item = %p\n", index, item); 264 + trace_ocfs2_buffer_cached_end(index, item); 264 265 265 266 return (index != -1) || (item != NULL); 266 267 } ··· 305 306 { 306 307 BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); 307 308 308 - mlog(0, "block %llu takes position %u\n", (unsigned long long) block, 309 - ci->ci_num_cached); 309 + trace_ocfs2_append_cache_array( 310 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 311 + (unsigned long long)block, ci->ci_num_cached); 310 312 311 313 ci->ci_cache.ci_array[ci->ci_num_cached] = block; 312 314 ci->ci_num_cached++; ··· 324 324 struct rb_node **p = &ci->ci_cache.ci_tree.rb_node; 325 325 struct ocfs2_meta_cache_item *tmp; 326 326 327 - mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block, 328 - ci->ci_num_cached); 327 + trace_ocfs2_insert_cache_tree( 328 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 329 + (unsigned long long)block, ci->ci_num_cached); 329 330 330 331 while(*p) { 331 332 parent = *p; ··· 390 389 tree[i] = NULL; 391 390 } 392 391 393 - mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", 394 - (unsigned long long)ocfs2_metadata_cache_owner(ci), 395 - ci->ci_flags, ci->ci_num_cached); 392 + trace_ocfs2_expand_cache( 393 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 394 + ci->ci_flags, ci->ci_num_cached); 396 395 } 397 396 398 397 /* Slow path function - memory allocation is necessary. See the ··· 406 405 struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = 407 406 { NULL, }; 408 407 409 - mlog(0, "Owner %llu, block %llu, expand = %d\n", 410 - (unsigned long long)ocfs2_metadata_cache_owner(ci), 411 - (unsigned long long)block, expand_tree); 408 + trace_ocfs2_set_buffer_uptodate( 409 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 410 + (unsigned long long)block, expand_tree); 412 411 413 412 new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); 414 413 if (!new) { ··· 434 433 435 434 ocfs2_metadata_cache_lock(ci); 436 435 if (ocfs2_insert_can_use_array(ci)) { 437 - mlog(0, "Someone cleared the tree underneath us\n"); 438 436 /* Ok, items were removed from the cache in between 439 437 * locks. Detect this and revert back to the fast path */ 440 438 ocfs2_append_cache_array(ci, block); ··· 490 490 if (ocfs2_buffer_cached(ci, bh)) 491 491 return; 492 492 493 - mlog(0, "Owner %llu, inserting block %llu\n", 494 - (unsigned long long)ocfs2_metadata_cache_owner(ci), 495 - (unsigned long long)bh->b_blocknr); 493 + trace_ocfs2_set_buffer_uptodate_begin( 494 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 495 + (unsigned long long)bh->b_blocknr); 496 496 497 497 /* No need to recheck under spinlock - insertion is guarded by 498 498 * co_io_lock() */ ··· 542 542 BUG_ON(index >= ci->ci_num_cached); 543 543 BUG_ON(!ci->ci_num_cached); 544 544 545 - mlog(0, "remove index %d (num_cached = %u\n", index, 546 - ci->ci_num_cached); 545 + trace_ocfs2_remove_metadata_array( 546 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 547 + index, ci->ci_num_cached); 547 548 548 549 ci->ci_num_cached--; 549 550 ··· 560 559 static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, 561 560 struct ocfs2_meta_cache_item *item) 562 561 { 563 - mlog(0, "remove block %llu from tree\n", 564 - (unsigned long long) item->c_block); 562 + trace_ocfs2_remove_metadata_tree( 563 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 564 + (unsigned long long)item->c_block); 565 565 566 566 rb_erase(&item->c_node, &ci->ci_cache.ci_tree); 567 567 ci->ci_num_cached--; ··· 575 573 struct ocfs2_meta_cache_item *item = NULL; 576 574 577 575 ocfs2_metadata_cache_lock(ci); 578 - mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n", 579 - (unsigned long long)ocfs2_metadata_cache_owner(ci), 580 - (unsigned long long) block, ci->ci_num_cached, 581 - ci->ci_flags & OCFS2_CACHE_FL_INLINE); 576 + trace_ocfs2_remove_block_from_cache( 577 + (unsigned long long)ocfs2_metadata_cache_owner(ci), 578 + (unsigned long long) block, ci->ci_num_cached, 579 + ci->ci_flags); 582 580 583 581 if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { 584 582 index = ocfs2_search_cache_array(ci, block); ··· 627 625 0, SLAB_HWCACHE_ALIGN, NULL); 628 626 if (!ocfs2_uptodate_cachep) 629 627 return -ENOMEM; 630 - 631 - mlog(0, "%u inlined cache items per inode.\n", 632 - OCFS2_CACHE_INFO_MAX_ARRAY); 633 628 634 629 return 0; 635 630 }
+75 -80
fs/ocfs2/xattr.c
··· 37 37 #include <linux/string.h> 38 38 #include <linux/security.h> 39 39 40 - #define MLOG_MASK_PREFIX ML_XATTR 41 40 #include <cluster/masklog.h> 42 41 43 42 #include "ocfs2.h" ··· 56 57 #include "xattr.h" 57 58 #include "refcounttree.h" 58 59 #include "acl.h" 60 + #include "ocfs2_trace.h" 59 61 60 62 struct ocfs2_xattr_def_value_root { 61 63 struct ocfs2_xattr_value_root xv; ··· 474 474 struct ocfs2_xattr_block *xb = 475 475 (struct ocfs2_xattr_block *)bh->b_data; 476 476 477 - mlog(0, "Validating xattr block %llu\n", 478 - (unsigned long long)bh->b_blocknr); 477 + trace_ocfs2_validate_xattr_block((unsigned long long)bh->b_blocknr); 479 478 480 479 BUG_ON(!buffer_uptodate(bh)); 481 480 ··· 714 715 u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); 715 716 struct ocfs2_extent_tree et; 716 717 717 - mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add); 718 - 719 718 ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); 720 719 721 720 while (clusters_to_add) { 721 + trace_ocfs2_xattr_extend_allocation(clusters_to_add); 722 + 722 723 status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, 723 724 OCFS2_JOURNAL_ACCESS_WRITE); 724 725 if (status < 0) { ··· 753 754 */ 754 755 BUG_ON(why == RESTART_META); 755 756 756 - mlog(0, "restarting xattr value extension for %u" 757 - " clusters,.\n", clusters_to_add); 758 757 credits = ocfs2_calc_extend_credits(inode->i_sb, 759 758 &vb->vb_xv->xr_list, 760 759 clusters_to_add); ··· 3243 3246 } 3244 3247 3245 3248 meta_add += extra_meta; 3246 - mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " 3247 - "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits); 3249 + trace_ocfs2_init_xattr_set_ctxt(xi->xi_name, meta_add, 3250 + clusters_add, *credits); 3248 3251 3249 3252 if (meta_add) { 3250 3253 ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, ··· 3884 3887 3885 3888 if (found) { 3886 3889 xs->here = &xs->header->xh_entries[index]; 3887 - mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, 3888 - (unsigned long long)bucket_blkno(xs->bucket), index); 3890 + trace_ocfs2_xattr_bucket_find(OCFS2_I(inode)->ip_blkno, 3891 + name, name_index, name_hash, 3892 + (unsigned long long)bucket_blkno(xs->bucket), 3893 + index); 3889 3894 } else 3890 3895 ret = -ENODATA; 3891 3896 ··· 3914 3915 if (le16_to_cpu(el->l_next_free_rec) == 0) 3915 3916 return -ENODATA; 3916 3917 3917 - mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n", 3918 - name, name_hash, name_index); 3918 + trace_ocfs2_xattr_index_block_find(OCFS2_I(inode)->ip_blkno, 3919 + name, name_index, name_hash, 3920 + (unsigned long long)root_bh->b_blocknr, 3921 + -1); 3919 3922 3920 3923 ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash, 3921 3924 &num_clusters, el); ··· 3928 3927 3929 3928 BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash); 3930 3929 3931 - mlog(0, "find xattr extent rec %u clusters from %llu, the first hash " 3932 - "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno, 3933 - first_hash); 3930 + trace_ocfs2_xattr_index_block_find_rec(OCFS2_I(inode)->ip_blkno, 3931 + name, name_index, first_hash, 3932 + (unsigned long long)p_blkno, 3933 + num_clusters); 3934 3934 3935 3935 ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash, 3936 3936 p_blkno, first_hash, num_clusters, xs); ··· 3957 3955 return -ENOMEM; 3958 3956 } 3959 3957 3960 - mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n", 3961 - clusters, (unsigned long long)blkno); 3958 + trace_ocfs2_iterate_xattr_buckets( 3959 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 3960 + (unsigned long long)blkno, clusters); 3962 3961 3963 3962 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) { 3964 3963 ret = ocfs2_read_xattr_bucket(bucket, blkno); ··· 3975 3972 if (i == 0) 3976 3973 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets); 3977 3974 3978 - mlog(0, "iterating xattr bucket %llu, first hash %u\n", 3979 - (unsigned long long)blkno, 3975 + trace_ocfs2_iterate_xattr_bucket((unsigned long long)blkno, 3980 3976 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash)); 3981 3977 if (func) { 3982 3978 ret = func(inode, bucket, para); ··· 4175 4173 char *src = xb_bh->b_data; 4176 4174 char *target = bucket_block(bucket, blks - 1); 4177 4175 4178 - mlog(0, "cp xattr from block %llu to bucket %llu\n", 4179 - (unsigned long long)xb_bh->b_blocknr, 4180 - (unsigned long long)bucket_blkno(bucket)); 4176 + trace_ocfs2_cp_xattr_block_to_bucket_begin( 4177 + (unsigned long long)xb_bh->b_blocknr, 4178 + (unsigned long long)bucket_blkno(bucket)); 4181 4179 4182 4180 for (i = 0; i < blks; i++) 4183 4181 memset(bucket_block(bucket, i), 0, blocksize); ··· 4213 4211 for (i = 0; i < count; i++) 4214 4212 le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change); 4215 4213 4216 - mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n", 4217 - offset, size, off_change); 4214 + trace_ocfs2_cp_xattr_block_to_bucket_end(offset, size, off_change); 4218 4215 4219 4216 sort(target + offset, count, sizeof(struct ocfs2_xattr_entry), 4220 4217 cmp_xe, swap_xe); ··· 4262 4261 struct ocfs2_xattr_tree_root *xr; 4263 4262 u16 xb_flags = le16_to_cpu(xb->xb_flags); 4264 4263 4265 - mlog(0, "create xattr index block for %llu\n", 4266 - (unsigned long long)xb_bh->b_blocknr); 4264 + trace_ocfs2_xattr_create_index_block_begin( 4265 + (unsigned long long)xb_bh->b_blocknr); 4267 4266 4268 4267 BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); 4269 4268 BUG_ON(!xs->bucket); ··· 4296 4295 */ 4297 4296 blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); 4298 4297 4299 - mlog(0, "allocate 1 cluster from %llu to xattr block\n", 4300 - (unsigned long long)blkno); 4298 + trace_ocfs2_xattr_create_index_block((unsigned long long)blkno); 4301 4299 4302 4300 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno); 4303 4301 if (ret) { ··· 4400 4400 entries = (char *)xh->xh_entries; 4401 4401 xh_free_start = le16_to_cpu(xh->xh_free_start); 4402 4402 4403 - mlog(0, "adjust xattr bucket in %llu, count = %u, " 4404 - "xh_free_start = %u, xh_name_value_len = %u.\n", 4403 + trace_ocfs2_defrag_xattr_bucket( 4405 4404 (unsigned long long)blkno, le16_to_cpu(xh->xh_count), 4406 4405 xh_free_start, le16_to_cpu(xh->xh_name_value_len)); 4407 4406 ··· 4502 4503 BUG_ON(le16_to_cpu(bucket_xh(first)->xh_num_buckets) < num_buckets); 4503 4504 BUG_ON(OCFS2_XATTR_BUCKET_SIZE == OCFS2_SB(sb)->s_clustersize); 4504 4505 4505 - mlog(0, "move half of xattrs in cluster %llu to %llu\n", 4506 - (unsigned long long)last_cluster_blkno, (unsigned long long)new_blkno); 4506 + trace_ocfs2_mv_xattr_bucket_cross_cluster( 4507 + (unsigned long long)last_cluster_blkno, 4508 + (unsigned long long)new_blkno); 4507 4509 4508 4510 ret = ocfs2_mv_xattr_buckets(inode, handle, bucket_blkno(first), 4509 4511 last_cluster_blkno, new_blkno, ··· 4614 4614 struct ocfs2_xattr_entry *xe; 4615 4615 int blocksize = inode->i_sb->s_blocksize; 4616 4616 4617 - mlog(0, "move some of xattrs from bucket %llu to %llu\n", 4618 - (unsigned long long)blk, (unsigned long long)new_blk); 4617 + trace_ocfs2_divide_xattr_bucket_begin((unsigned long long)blk, 4618 + (unsigned long long)new_blk); 4619 4619 4620 4620 s_bucket = ocfs2_xattr_bucket_new(inode); 4621 4621 t_bucket = ocfs2_xattr_bucket_new(inode); ··· 4714 4714 */ 4715 4715 xe = &xh->xh_entries[start]; 4716 4716 len = sizeof(struct ocfs2_xattr_entry) * (count - start); 4717 - mlog(0, "mv xattr entry len %d from %d to %d\n", len, 4718 - (int)((char *)xe - (char *)xh), 4719 - (int)((char *)xh->xh_entries - (char *)xh)); 4717 + trace_ocfs2_divide_xattr_bucket_move(len, 4718 + (int)((char *)xe - (char *)xh), 4719 + (int)((char *)xh->xh_entries - (char *)xh)); 4720 4720 memmove((char *)xh->xh_entries, (char *)xe, len); 4721 4721 xe = &xh->xh_entries[count - start]; 4722 4722 len = sizeof(struct ocfs2_xattr_entry) * start; ··· 4788 4788 4789 4789 BUG_ON(s_blkno == t_blkno); 4790 4790 4791 - mlog(0, "cp bucket %llu to %llu, target is %d\n", 4792 - (unsigned long long)s_blkno, (unsigned long long)t_blkno, 4793 - t_is_new); 4791 + trace_ocfs2_cp_xattr_bucket((unsigned long long)s_blkno, 4792 + (unsigned long long)t_blkno, 4793 + t_is_new); 4794 4794 4795 4795 s_bucket = ocfs2_xattr_bucket_new(inode); 4796 4796 t_bucket = ocfs2_xattr_bucket_new(inode); ··· 4862 4862 int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); 4863 4863 struct ocfs2_xattr_bucket *old_first, *new_first; 4864 4864 4865 - mlog(0, "mv xattrs from cluster %llu to %llu\n", 4866 - (unsigned long long)last_blk, (unsigned long long)to_blk); 4865 + trace_ocfs2_mv_xattr_buckets((unsigned long long)last_blk, 4866 + (unsigned long long)to_blk); 4867 4867 4868 4868 BUG_ON(start_bucket >= num_buckets); 4869 4869 if (start_bucket) { ··· 5013 5013 { 5014 5014 int ret; 5015 5015 5016 - mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n", 5017 - (unsigned long long)bucket_blkno(first), prev_clusters, 5018 - (unsigned long long)new_blk); 5016 + trace_ocfs2_adjust_xattr_cross_cluster( 5017 + (unsigned long long)bucket_blkno(first), 5018 + (unsigned long long)new_blk, prev_clusters); 5019 5019 5020 5020 if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) { 5021 5021 ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, ··· 5088 5088 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 5089 5089 struct ocfs2_extent_tree et; 5090 5090 5091 - mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, " 5092 - "previous xattr blkno = %llu\n", 5093 - (unsigned long long)OCFS2_I(inode)->ip_blkno, 5094 - prev_cpos, (unsigned long long)bucket_blkno(first)); 5091 + trace_ocfs2_add_new_xattr_cluster_begin( 5092 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 5093 + (unsigned long long)bucket_blkno(first), 5094 + prev_cpos, prev_clusters); 5095 5095 5096 5096 ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); 5097 5097 ··· 5113 5113 BUG_ON(num_bits > clusters_to_add); 5114 5114 5115 5115 block = ocfs2_clusters_to_blocks(osb->sb, bit_off); 5116 - mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n", 5117 - num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); 5116 + trace_ocfs2_add_new_xattr_cluster((unsigned long long)block, num_bits); 5118 5117 5119 5118 if (bucket_blkno(first) + (prev_clusters * bpc) == block && 5120 5119 (prev_clusters + num_bits) << osb->s_clustersize_bits <= ··· 5129 5130 */ 5130 5131 v_start = prev_cpos + prev_clusters; 5131 5132 *num_clusters = prev_clusters + num_bits; 5132 - mlog(0, "Add contiguous %u clusters to previous extent rec.\n", 5133 - num_bits); 5134 5133 } else { 5135 5134 ret = ocfs2_adjust_xattr_cross_cluster(inode, 5136 5135 handle, ··· 5144 5147 } 5145 5148 } 5146 5149 5147 - mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", 5148 - num_bits, (unsigned long long)block, v_start); 5150 + trace_ocfs2_add_new_xattr_cluster_insert((unsigned long long)block, 5151 + v_start, num_bits); 5149 5152 ret = ocfs2_insert_extent(handle, &et, v_start, block, 5150 5153 num_bits, 0, ctxt->meta_ac); 5151 5154 if (ret < 0) { ··· 5180 5183 u64 end_blk; 5181 5184 u16 new_bucket = le16_to_cpu(bucket_xh(first)->xh_num_buckets); 5182 5185 5183 - mlog(0, "extend xattr bucket in %llu, xattr extend rec starting " 5184 - "from %llu, len = %u\n", (unsigned long long)target_blk, 5185 - (unsigned long long)bucket_blkno(first), num_clusters); 5186 + trace_ocfs2_extend_xattr_bucket((unsigned long long)target_blk, 5187 + (unsigned long long)bucket_blkno(first), 5188 + num_clusters, new_bucket); 5186 5189 5187 5190 /* The extent must have room for an additional bucket */ 5188 5191 BUG_ON(new_bucket >= ··· 5262 5265 /* The bucket at the front of the extent */ 5263 5266 struct ocfs2_xattr_bucket *first; 5264 5267 5265 - mlog(0, "Add new xattr bucket starting from %llu\n", 5266 - (unsigned long long)bucket_blkno(target)); 5268 + trace_ocfs2_add_new_xattr_bucket( 5269 + (unsigned long long)bucket_blkno(target)); 5267 5270 5268 5271 /* The first bucket of the original extent */ 5269 5272 first = ocfs2_xattr_bucket_new(inode); ··· 5379 5382 * modified something. We have to assume they did, and dirty 5380 5383 * the whole bucket. This leaves us in a consistent state. 5381 5384 */ 5382 - mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", 5383 - xe_off, (unsigned long long)bucket_blkno(bucket), len); 5385 + trace_ocfs2_xattr_bucket_value_truncate( 5386 + (unsigned long long)bucket_blkno(bucket), xe_off, len); 5384 5387 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); 5385 5388 if (ret) { 5386 5389 mlog_errno(ret); ··· 5430 5433 5431 5434 ocfs2_init_dealloc_ctxt(&dealloc); 5432 5435 5433 - mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n", 5434 - cpos, len, (unsigned long long)blkno); 5436 + trace_ocfs2_rm_xattr_cluster( 5437 + (unsigned long long)OCFS2_I(inode)->ip_blkno, 5438 + (unsigned long long)blkno, cpos, len); 5435 5439 5436 5440 ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno, 5437 5441 len); ··· 5536 5538 int ret; 5537 5539 struct ocfs2_xa_loc loc; 5538 5540 5539 - mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name); 5541 + trace_ocfs2_xattr_set_entry_bucket(xi->xi_name); 5540 5542 5541 5543 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, 5542 5544 xs->not_found ? NULL : xs->here); ··· 5568 5570 5569 5571 5570 5572 out: 5571 - mlog_exit(ret); 5572 5573 return ret; 5573 5574 } 5574 5575 ··· 5578 5581 { 5579 5582 int ret; 5580 5583 5581 - mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name); 5584 + trace_ocfs2_xattr_set_entry_index_block(xi->xi_name); 5582 5585 5583 5586 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); 5584 5587 if (!ret) ··· 5634 5637 mlog_errno(ret); 5635 5638 5636 5639 out: 5637 - mlog_exit(ret); 5638 5640 return ret; 5639 5641 } 5640 5642 ··· 6037 6041 if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb))) 6038 6042 p = &refcount; 6039 6043 6040 - mlog(0, "refcount bucket %llu, count = %u\n", 6041 - (unsigned long long)bucket_blkno(bucket), 6042 - le16_to_cpu(xh->xh_count)); 6044 + trace_ocfs2_xattr_bucket_value_refcount( 6045 + (unsigned long long)bucket_blkno(bucket), 6046 + le16_to_cpu(xh->xh_count)); 6043 6047 for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { 6044 6048 xe = &xh->xh_entries[i]; 6045 6049 ··· 6335 6339 u32 clusters, cpos, p_cluster, num_clusters; 6336 6340 unsigned int ext_flags = 0; 6337 6341 6338 - mlog(0, "reflink xattr in container %llu, count = %u\n", 6339 - (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count)); 6342 + trace_ocfs2_reflink_xattr_header((unsigned long long)old_bh->b_blocknr, 6343 + le16_to_cpu(xh->xh_count)); 6340 6344 6341 6345 last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)]; 6342 6346 for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) { ··· 6536 6540 goto out; 6537 6541 } 6538 6542 6539 - mlog(0, "create new xattr block for inode %llu, index = %d\n", 6540 - (unsigned long long)fe_bh->b_blocknr, indexed); 6543 + trace_ocfs2_create_empty_xattr_block( 6544 + (unsigned long long)fe_bh->b_blocknr, indexed); 6541 6545 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, 6542 6546 ret_bh); 6543 6547 if (ret) ··· 6948 6952 if (ret) 6949 6953 mlog_errno(ret); 6950 6954 6951 - mlog(0, "insert new xattr extent rec start %llu len %u to %u\n", 6952 - (unsigned long long)new_blkno, num_clusters, reflink_cpos); 6955 + trace_ocfs2_reflink_xattr_buckets((unsigned long long)new_blkno, 6956 + num_clusters, reflink_cpos); 6953 6957 6954 6958 len -= num_clusters; 6955 6959 blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); ··· 6978 6982 struct ocfs2_alloc_context *data_ac = NULL; 6979 6983 struct ocfs2_extent_tree et; 6980 6984 6981 - mlog(0, "reflink xattr buckets %llu len %u\n", 6982 - (unsigned long long)blkno, len); 6985 + trace_ocfs2_reflink_xattr_rec((unsigned long long)blkno, len); 6983 6986 6984 6987 ocfs2_init_xattr_tree_extent_tree(&et, 6985 6988 INODE_CACHE(args->reflink->new_inode),