Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

hfs/hfsplus: convert dprint to hfs_dbg

Use a more current logging style.

Rename macro and uses.
Add do {} while (0) to macro.
Add DBG_ to macro.
Add and use hfs_dbg_cont variant where appropriate.

Signed-off-by: Joe Perches <joe@perches.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Hin-Tak Leung <htl10@users.sourceforge.net>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joe Perches and committed by
Linus Torvalds
c2b3e1f7 5f3726f9

+127 -98
+4 -2
fs/hfs/bfind.c
··· 22 22 return -ENOMEM; 23 23 fd->search_key = ptr; 24 24 fd->key = ptr + tree->max_key_len + 2; 25 - dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); 25 + hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", 26 + tree->cnid, __builtin_return_address(0)); 26 27 mutex_lock(&tree->tree_lock); 27 28 return 0; 28 29 } ··· 32 31 { 33 32 hfs_bnode_put(fd->bnode); 34 33 kfree(fd->search_key); 35 - dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); 34 + hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n", 35 + fd->tree->cnid, __builtin_return_address(0)); 36 36 mutex_unlock(&fd->tree->tree_lock); 37 37 fd->tree = NULL; 38 38 }
+2 -2
fs/hfs/bitmap.c
··· 158 158 } 159 159 } 160 160 161 - dprint(DBG_BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits); 161 + hfs_dbg(BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits); 162 162 HFS_SB(sb)->free_ablocks -= *num_bits; 163 163 hfs_bitmap_dirty(sb); 164 164 out: ··· 200 200 if (!count) 201 201 return 0; 202 202 203 - dprint(DBG_BITMAP, "clear_bits: %u,%u\n", start, count); 203 + hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count); 204 204 /* are all of the bits in range? */ 205 205 if ((start + count) > HFS_SB(sb)->fs_ablocks) 206 206 return -2;
+19 -16
fs/hfs/bnode.c
··· 100 100 struct hfs_btree *tree; 101 101 struct page *src_page, *dst_page; 102 102 103 - dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 103 + hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 104 104 if (!len) 105 105 return; 106 106 tree = src_node->tree; ··· 120 120 struct page *page; 121 121 void *ptr; 122 122 123 - dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 123 + hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 124 124 if (!len) 125 125 return; 126 126 src += node->page_offset; ··· 138 138 __be32 cnid; 139 139 int i, off, key_off; 140 140 141 - dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); 141 + hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); 142 142 hfs_bnode_read(node, &desc, 0, sizeof(desc)); 143 - dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", 143 + hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n", 144 144 be32_to_cpu(desc.next), be32_to_cpu(desc.prev), 145 145 desc.type, desc.height, be16_to_cpu(desc.num_recs)); 146 146 147 147 off = node->tree->node_size - 2; 148 148 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { 149 149 key_off = hfs_bnode_read_u16(node, off); 150 - dprint(DBG_BNODE_MOD, " %d", key_off); 150 + hfs_dbg_cont(BNODE_MOD, " %d", key_off); 151 151 if (i && node->type == HFS_NODE_INDEX) { 152 152 int tmp; 153 153 ··· 155 155 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; 156 156 else 157 157 tmp = node->tree->max_key_len + 1; 158 - dprint(DBG_BNODE_MOD, " (%d,%d", tmp, hfs_bnode_read_u8(node, key_off)); 158 + hfs_dbg_cont(BNODE_MOD, " (%d,%d", 159 + tmp, hfs_bnode_read_u8(node, key_off)); 159 160 hfs_bnode_read(node, &cnid, key_off + tmp, 4); 160 - dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 161 + hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 161 162 } else if (i && node->type == HFS_NODE_LEAF) { 162 163 int tmp; 163 164 164 165 tmp = hfs_bnode_read_u8(node, key_off); 165 - dprint(DBG_BNODE_MOD, " (%d)", tmp); 166 + hfs_dbg_cont(BNODE_MOD, " (%d)", tmp); 166 167 } 167 168 } 168 - dprint(DBG_BNODE_MOD, "\n"); 169 + hfs_dbg_cont(BNODE_MOD, "\n"); 169 170 } 170 171 171 172 void hfs_bnode_unlink(struct hfs_bnode *node) ··· 258 257 node->this = cnid; 259 258 set_bit(HFS_BNODE_NEW, &node->flags); 260 259 atomic_set(&node->refcnt, 1); 261 - dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", 262 - node->tree->cnid, node->this); 260 + hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n", 261 + node->tree->cnid, node->this); 263 262 init_waitqueue_head(&node->lock_wq); 264 263 spin_lock(&tree->hash_lock); 265 264 node2 = hfs_bnode_findhash(tree, cnid); ··· 302 301 { 303 302 struct hfs_bnode **p; 304 303 305 - dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", 304 + hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n", 306 305 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 307 306 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 308 307 *p && *p != node; p = &(*p)->next_hash) ··· 444 443 { 445 444 if (node) { 446 445 atomic_inc(&node->refcnt); 447 - dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", 448 - node->tree->cnid, node->this, atomic_read(&node->refcnt)); 446 + hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n", 447 + node->tree->cnid, node->this, 448 + atomic_read(&node->refcnt)); 449 449 } 450 450 } 451 451 ··· 457 455 struct hfs_btree *tree = node->tree; 458 456 int i; 459 457 460 - dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", 461 - node->tree->cnid, node->this, atomic_read(&node->refcnt)); 458 + hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n", 459 + node->tree->cnid, node->this, 460 + atomic_read(&node->refcnt)); 462 461 BUG_ON(!atomic_read(&node->refcnt)); 463 462 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) 464 463 return;
+7 -4
fs/hfs/brec.c
··· 94 94 end_rec_off = tree->node_size - (node->num_recs + 1) * 2; 95 95 end_off = hfs_bnode_read_u16(node, end_rec_off); 96 96 end_rec_off -= 2; 97 - dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); 97 + hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", 98 + rec, size, end_off, end_rec_off); 98 99 if (size > end_rec_off - end_off) { 99 100 if (new_node) 100 101 panic("not enough room!\n"); ··· 191 190 mark_inode_dirty(tree->inode); 192 191 } 193 192 hfs_bnode_dump(node); 194 - dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength); 193 + hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n", 194 + fd->record, fd->keylength + fd->entrylength); 195 195 if (!--node->num_recs) { 196 196 hfs_bnode_unlink(node); 197 197 if (!node->parent) ··· 242 240 if (IS_ERR(new_node)) 243 241 return new_node; 244 242 hfs_bnode_get(node); 245 - dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n", 243 + hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n", 246 244 node->this, new_node->this, node->next); 247 245 new_node->next = node->next; 248 246 new_node->prev = node->this; ··· 376 374 newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1; 377 375 else 378 376 fd->keylength = newkeylen = tree->max_key_len + 1; 379 - dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen); 377 + hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n", 378 + rec, fd->keylength, newkeylen); 380 379 381 380 rec_off = tree->node_size - (rec + 2) * 2; 382 381 end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
+1 -1
fs/hfs/btree.c
··· 316 316 u32 nidx; 317 317 u8 *data, byte, m; 318 318 319 - dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); 319 + hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); 320 320 tree = node->tree; 321 321 nidx = node->this; 322 322 node = hfs_bnode_find(tree, 0);
+5 -3
fs/hfs/catalog.c
··· 87 87 int entry_size; 88 88 int err; 89 89 90 - dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); 90 + hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n", 91 + str->name, cnid, inode->i_nlink); 91 92 if (dir->i_size >= HFS_MAX_VALENCE) 92 93 return -ENOSPC; 93 94 ··· 215 214 struct list_head *pos; 216 215 int res, type; 217 216 218 - dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); 217 + hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); 219 218 sb = dir->i_sb; 220 219 res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 221 220 if (res) ··· 283 282 int entry_size, type; 284 283 int err; 285 284 286 - dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, 285 + hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", 286 + cnid, src_dir->i_ino, src_name->name, 287 287 dst_dir->i_ino, dst_name->name); 288 288 sb = src_dir->i_sb; 289 289 err = hfs_find_init(HFS_SB(sb)->cat_tree, &src_fd);
+11 -9
fs/hfs/extent.c
··· 205 205 { 206 206 int i; 207 207 208 - dprint(DBG_EXTENT, " "); 208 + hfs_dbg(EXTENT, " "); 209 209 for (i = 0; i < 3; i++) 210 - dprint(DBG_EXTENT, " %u:%u", be16_to_cpu(extent[i].block), 211 - be16_to_cpu(extent[i].count)); 212 - dprint(DBG_EXTENT, "\n"); 210 + hfs_dbg_cont(EXTENT, " %u:%u", 211 + be16_to_cpu(extent[i].block), 212 + be16_to_cpu(extent[i].count)); 213 + hfs_dbg_cont(EXTENT, "\n"); 213 214 } 214 215 215 216 static int hfs_add_extent(struct hfs_extent *extent, u16 offset, ··· 405 404 goto out; 406 405 } 407 406 408 - dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 407 + hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 409 408 if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) { 410 409 if (!HFS_I(inode)->first_blocks) { 411 - dprint(DBG_EXTENT, "first extents\n"); 410 + hfs_dbg(EXTENT, "first extents\n"); 412 411 /* no extents yet */ 413 412 HFS_I(inode)->first_extents[0].block = cpu_to_be16(start); 414 413 HFS_I(inode)->first_extents[0].count = cpu_to_be16(len); ··· 450 449 return res; 451 450 452 451 insert_extent: 453 - dprint(DBG_EXTENT, "insert new extent\n"); 452 + hfs_dbg(EXTENT, "insert new extent\n"); 454 453 res = hfs_ext_write_extent(inode); 455 454 if (res) 456 455 goto out; ··· 475 474 u32 size; 476 475 int res; 477 476 478 - dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n", inode->i_ino, 479 - (long long)HFS_I(inode)->phys_size, inode->i_size); 477 + hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n", 478 + inode->i_ino, (long long)HFS_I(inode)->phys_size, 479 + inode->i_size); 480 480 if (inode->i_size > HFS_I(inode)->phys_size) { 481 481 struct address_space *mapping = inode->i_mapping; 482 482 void *fsdata;
+12 -2
fs/hfs/hfs_fs.h
··· 34 34 //#define DBG_MASK (DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT) 35 35 #define DBG_MASK (0) 36 36 37 - #define dprint(flg, fmt, args...) \ 38 - if (flg & DBG_MASK) printk(fmt , ## args) 37 + #define hfs_dbg(flg, fmt, ...) \ 38 + do { \ 39 + if (DBG_##flg & DBG_MASK) \ 40 + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 41 + } while (0) 42 + 43 + #define hfs_dbg_cont(flg, fmt, ...) \ 44 + do { \ 45 + if (DBG_##flg & DBG_MASK) \ 46 + printk(KERN_CONT fmt, ##__VA_ARGS__); \ 47 + } while (0) 48 + 39 49 40 50 /* 41 51 * struct hfs_inode_info
+2 -2
fs/hfs/inode.c
··· 237 237 { 238 238 struct super_block *sb = inode->i_sb; 239 239 240 - dprint(DBG_INODE, "delete_inode: %lu\n", inode->i_ino); 240 + hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino); 241 241 if (S_ISDIR(inode->i_mode)) { 242 242 HFS_SB(sb)->folder_count--; 243 243 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID)) ··· 418 418 hfs_cat_rec rec; 419 419 int res; 420 420 421 - dprint(DBG_INODE, "hfs_write_inode: %lu\n", inode->i_ino); 421 + hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino); 422 422 res = hfs_ext_write_extent(inode); 423 423 if (res) 424 424 return res;
+4 -4
fs/hfsplus/attributes.c
··· 166 166 { 167 167 int err = 0; 168 168 169 - dprint(DBG_ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid); 169 + hfs_dbg(ATTR_MOD, "find_attr: %s,%d\n", name ? name : NULL, cnid); 170 170 171 171 if (!HFSPLUS_SB(sb)->attr_tree) { 172 172 printk(KERN_ERR "hfs: attributes file doesn't exist\n"); ··· 228 228 int entry_size; 229 229 int err; 230 230 231 - dprint(DBG_ATTR_MOD, "create_attr: %s,%ld\n", 231 + hfs_dbg(ATTR_MOD, "create_attr: %s,%ld\n", 232 232 name ? name : NULL, inode->i_ino); 233 233 234 234 if (!HFSPLUS_SB(sb)->attr_tree) { ··· 328 328 struct super_block *sb = inode->i_sb; 329 329 struct hfs_find_data fd; 330 330 331 - dprint(DBG_ATTR_MOD, "delete_attr: %s,%ld\n", 331 + hfs_dbg(ATTR_MOD, "delete_attr: %s,%ld\n", 332 332 name ? name : NULL, inode->i_ino); 333 333 334 334 if (!HFSPLUS_SB(sb)->attr_tree) { ··· 369 369 int err = 0; 370 370 struct hfs_find_data fd; 371 371 372 - dprint(DBG_ATTR_MOD, "delete_all_attrs: %d\n", cnid); 372 + hfs_dbg(ATTR_MOD, "delete_all_attrs: %d\n", cnid); 373 373 374 374 if (!HFSPLUS_SB(dir->i_sb)->attr_tree) { 375 375 printk(KERN_ERR "hfs: attributes file doesn't exist\n");
+2 -2
fs/hfsplus/bfind.c
··· 22 22 return -ENOMEM; 23 23 fd->search_key = ptr; 24 24 fd->key = ptr + tree->max_key_len + 2; 25 - dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", 25 + hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", 26 26 tree->cnid, __builtin_return_address(0)); 27 27 switch (tree->cnid) { 28 28 case HFSPLUS_CAT_CNID: ··· 44 44 { 45 45 hfs_bnode_put(fd->bnode); 46 46 kfree(fd->search_key); 47 - dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", 47 + hfs_dbg(BNODE_REFS, "find_exit: %d (%p)\n", 48 48 fd->tree->cnid, __builtin_return_address(0)); 49 49 mutex_unlock(&fd->tree->tree_lock); 50 50 fd->tree = NULL;
+5 -5
fs/hfsplus/bitmap.c
··· 30 30 if (!len) 31 31 return size; 32 32 33 - dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); 33 + hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); 34 34 mutex_lock(&sbi->alloc_mutex); 35 35 mapping = sbi->alloc_file->i_mapping; 36 36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); ··· 89 89 else 90 90 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; 91 91 } 92 - dprint(DBG_BITMAP, "bitmap full\n"); 92 + hfs_dbg(BITMAP, "bitmap full\n"); 93 93 start = size; 94 94 goto out; 95 95 96 96 found: 97 97 start = offset + (curr - pptr) * 32 + i; 98 98 if (start >= size) { 99 - dprint(DBG_BITMAP, "bitmap full\n"); 99 + hfs_dbg(BITMAP, "bitmap full\n"); 100 100 goto out; 101 101 } 102 102 /* do any partial u32 at the start */ ··· 154 154 *max = offset + (curr - pptr) * 32 + i - start; 155 155 sbi->free_blocks -= *max; 156 156 hfsplus_mark_mdb_dirty(sb); 157 - dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); 157 + hfs_dbg(BITMAP, "-> %u,%u\n", start, *max); 158 158 out: 159 159 mutex_unlock(&sbi->alloc_mutex); 160 160 return start; ··· 173 173 if (!count) 174 174 return 0; 175 175 176 - dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); 176 + hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count); 177 177 /* are all of the bits in range? */ 178 178 if ((offset + count) > sbi->total_blocks) 179 179 return -ENOENT;
+15 -15
fs/hfsplus/bnode.c
··· 130 130 struct page **src_page, **dst_page; 131 131 int l; 132 132 133 - dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 133 + hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); 134 134 if (!len) 135 135 return; 136 136 tree = src_node->tree; ··· 188 188 struct page **src_page, **dst_page; 189 189 int l; 190 190 191 - dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 191 + hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); 192 192 if (!len) 193 193 return; 194 194 src += node->page_offset; ··· 302 302 __be32 cnid; 303 303 int i, off, key_off; 304 304 305 - dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this); 305 + hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); 306 306 hfs_bnode_read(node, &desc, 0, sizeof(desc)); 307 - dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n", 307 + hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n", 308 308 be32_to_cpu(desc.next), be32_to_cpu(desc.prev), 309 309 desc.type, desc.height, be16_to_cpu(desc.num_recs)); 310 310 311 311 off = node->tree->node_size - 2; 312 312 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { 313 313 key_off = hfs_bnode_read_u16(node, off); 314 - dprint(DBG_BNODE_MOD, " %d", key_off); 314 + hfs_dbg(BNODE_MOD, " %d", key_off); 315 315 if (i && node->type == HFS_NODE_INDEX) { 316 316 int tmp; 317 317 ··· 320 320 tmp = hfs_bnode_read_u16(node, key_off) + 2; 321 321 else 322 322 tmp = node->tree->max_key_len + 2; 323 - dprint(DBG_BNODE_MOD, " (%d", tmp); 323 + hfs_dbg_cont(BNODE_MOD, " (%d", tmp); 324 324 hfs_bnode_read(node, &cnid, key_off + tmp, 4); 325 - dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 325 + hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid)); 326 326 } else if (i && node->type == HFS_NODE_LEAF) { 327 327 int tmp; 328 328 329 329 tmp = hfs_bnode_read_u16(node, key_off); 330 - dprint(DBG_BNODE_MOD, " (%d)", tmp); 330 + hfs_dbg_cont(BNODE_MOD, " (%d)", tmp); 331 331 } 332 332 } 333 - dprint(DBG_BNODE_MOD, "\n"); 333 + hfs_dbg_cont(BNODE_MOD, "\n"); 334 334 } 335 335 336 336 void hfs_bnode_unlink(struct hfs_bnode *node) ··· 366 366 367 367 /* move down? */ 368 368 if (!node->prev && !node->next) 369 - dprint(DBG_BNODE_MOD, "hfs_btree_del_level\n"); 369 + hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n"); 370 370 if (!node->parent) { 371 371 tree->root = 0; 372 372 tree->depth = 0; ··· 425 425 node->this = cnid; 426 426 set_bit(HFS_BNODE_NEW, &node->flags); 427 427 atomic_set(&node->refcnt, 1); 428 - dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n", 429 - node->tree->cnid, node->this); 428 + hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n", 429 + node->tree->cnid, node->this); 430 430 init_waitqueue_head(&node->lock_wq); 431 431 spin_lock(&tree->hash_lock); 432 432 node2 = hfs_bnode_findhash(tree, cnid); ··· 470 470 { 471 471 struct hfs_bnode **p; 472 472 473 - dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n", 473 + hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n", 474 474 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 475 475 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; 476 476 *p && *p != node; p = &(*p)->next_hash) ··· 620 620 { 621 621 if (node) { 622 622 atomic_inc(&node->refcnt); 623 - dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", 623 + hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n", 624 624 node->tree->cnid, node->this, 625 625 atomic_read(&node->refcnt)); 626 626 } ··· 633 633 struct hfs_btree *tree = node->tree; 634 634 int i; 635 635 636 - dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", 636 + hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n", 637 637 node->tree->cnid, node->this, 638 638 atomic_read(&node->refcnt)); 639 639 BUG_ON(!atomic_read(&node->refcnt));
+5 -5
fs/hfsplus/brec.c
··· 90 90 end_rec_off = tree->node_size - (node->num_recs + 1) * 2; 91 91 end_off = hfs_bnode_read_u16(node, end_rec_off); 92 92 end_rec_off -= 2; 93 - dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", 93 + hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", 94 94 rec, size, end_off, end_rec_off); 95 95 if (size > end_rec_off - end_off) { 96 96 if (new_node) ··· 191 191 mark_inode_dirty(tree->inode); 192 192 } 193 193 hfs_bnode_dump(node); 194 - dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", 194 + hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n", 195 195 fd->record, fd->keylength + fd->entrylength); 196 196 if (!--node->num_recs) { 197 197 hfs_bnode_unlink(node); ··· 244 244 if (IS_ERR(new_node)) 245 245 return new_node; 246 246 hfs_bnode_get(node); 247 - dprint(DBG_BNODE_MOD, "split_nodes: %d - %d - %d\n", 247 + hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n", 248 248 node->this, new_node->this, node->next); 249 249 new_node->next = node->next; 250 250 new_node->prev = node->this; ··· 379 379 newkeylen = hfs_bnode_read_u16(node, 14) + 2; 380 380 else 381 381 fd->keylength = newkeylen = tree->max_key_len + 2; 382 - dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", 382 + hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n", 383 383 rec, fd->keylength, newkeylen); 384 384 385 385 rec_off = tree->node_size - (rec + 2) * 2; ··· 391 391 end_off = hfs_bnode_read_u16(parent, end_rec_off); 392 392 if (end_rec_off - end_off < diff) { 393 393 394 - dprint(DBG_BNODE_MOD, "hfs: splitting index node.\n"); 394 + hfs_dbg(BNODE_MOD, "splitting index node\n"); 395 395 fd->bnode = parent; 396 396 new_node = hfs_bnode_split(fd); 397 397 if (IS_ERR(new_node))
+2 -2
fs/hfsplus/btree.c
··· 303 303 kunmap(*pagep); 304 304 nidx = node->next; 305 305 if (!nidx) { 306 - dprint(DBG_BNODE_MOD, "hfs: create new bmap node.\n"); 306 + hfs_dbg(BNODE_MOD, "create new bmap node\n"); 307 307 next_node = hfs_bmap_new_bmap(node, idx); 308 308 } else 309 309 next_node = hfs_bnode_find(tree, nidx); ··· 329 329 u32 nidx; 330 330 u8 *data, byte, m; 331 331 332 - dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); 332 + hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this); 333 333 BUG_ON(!node->this); 334 334 tree = node->tree; 335 335 nidx = node->this;
+3 -4
fs/hfsplus/catalog.c
··· 212 212 int entry_size; 213 213 int err; 214 214 215 - dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", 215 + hfs_dbg(CAT_MOD, "create_cat: %s,%u(%d)\n", 216 216 str->name, cnid, inode->i_nlink); 217 217 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); 218 218 if (err) ··· 271 271 int err, off; 272 272 u16 type; 273 273 274 - dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", 275 - str ? str->name : NULL, cnid); 274 + hfs_dbg(CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); 276 275 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); 277 276 if (err) 278 277 return err; ··· 360 361 int entry_size, type; 361 362 int err; 362 363 363 - dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", 364 + hfs_dbg(CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", 364 365 cnid, src_dir->i_ino, src_name->name, 365 366 dst_dir->i_ino, dst_name->name); 366 367 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
+13 -13
fs/hfsplus/extents.c
··· 265 265 mutex_unlock(&hip->extents_lock); 266 266 267 267 done: 268 - dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", 268 + hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n", 269 269 inode->i_ino, (long long)iblock, dblock); 270 270 271 271 mask = (1 << sbi->fs_shift) - 1; ··· 288 288 { 289 289 int i; 290 290 291 - dprint(DBG_EXTENT, " "); 291 + hfs_dbg(EXTENT, " "); 292 292 for (i = 0; i < 8; i++) 293 - dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), 294 - be32_to_cpu(extent[i].block_count)); 295 - dprint(DBG_EXTENT, "\n"); 293 + hfs_dbg_cont(EXTENT, " %u:%u", 294 + be32_to_cpu(extent[i].start_block), 295 + be32_to_cpu(extent[i].block_count)); 296 + hfs_dbg_cont(EXTENT, "\n"); 296 297 } 297 298 298 299 static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, ··· 350 349 err = hfsplus_block_free(sb, start, count); 351 350 if (err) { 352 351 printk(KERN_ERR "hfs: can't free extent\n"); 353 - dprint(DBG_EXTENT, " start: %u count: %u\n", 352 + hfs_dbg(EXTENT, " start: %u count: %u\n", 354 353 start, count); 355 354 } 356 355 extent->block_count = 0; ··· 361 360 err = hfsplus_block_free(sb, start + count, block_nr); 362 361 if (err) { 363 362 printk(KERN_ERR "hfs: can't free extent\n"); 364 - dprint(DBG_EXTENT, " start: %u count: %u\n", 363 + hfs_dbg(EXTENT, " start: %u count: %u\n", 365 364 start, count); 366 365 } 367 366 extent->block_count = cpu_to_be32(count); ··· 460 459 } 461 460 } 462 461 463 - dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 462 + hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 464 463 465 464 if (hip->alloc_blocks <= hip->first_blocks) { 466 465 if (!hip->first_blocks) { 467 - dprint(DBG_EXTENT, "first extents\n"); 466 + hfs_dbg(EXTENT, "first extents\n"); 468 467 /* no extents yet */ 469 468 hip->first_extents[0].start_block = cpu_to_be32(start); 470 469 hip->first_extents[0].block_count = cpu_to_be32(len); ··· 501 500 return res; 502 501 503 502 insert_extent: 504 - dprint(DBG_EXTENT, "insert new extent\n"); 503 + hfs_dbg(EXTENT, "insert new extent\n"); 505 504 res = hfsplus_ext_write_extent_locked(inode); 506 505 if (res) 507 506 goto out; ··· 526 525 u32 alloc_cnt, blk_cnt, start; 527 526 int res; 528 527 529 - dprint(DBG_INODE, "truncate: %lu, %llu -> %llu\n", 530 - inode->i_ino, (long long)hip->phys_size, 531 - inode->i_size); 528 + hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n", 529 + inode->i_ino, (long long)hip->phys_size, inode->i_size); 532 530 533 531 if (inode->i_size > hip->phys_size) { 534 532 struct address_space *mapping = inode->i_mapping;
+11 -3
fs/hfsplus/hfsplus_fs.h
··· 32 32 #endif 33 33 #define DBG_MASK (0) 34 34 35 - #define dprint(flg, fmt, args...) \ 36 - if (flg & DBG_MASK) \ 37 - printk(fmt , ## args) 35 + #define hfs_dbg(flg, fmt, ...) \ 36 + do { \ 37 + if (DBG_##flg & DBG_MASK) \ 38 + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 39 + } while (0) 40 + 41 + #define hfs_dbg_cont(flg, fmt, ...) \ 42 + do { \ 43 + if (DBG_##flg & DBG_MASK) \ 44 + printk(KERN_CONT fmt, ##__VA_ARGS__); \ 45 + } while (0) 38 46 39 47 /* Runtime config options */ 40 48 #define HFSPLUS_DEF_CR_TYPE 0x3F3F3F3F /* '????' */
+4 -4
fs/hfsplus/super.c
··· 145 145 { 146 146 int err; 147 147 148 - dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); 148 + hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); 149 149 150 150 err = hfsplus_ext_write_extent(inode); 151 151 if (err) ··· 160 160 161 161 static void hfsplus_evict_inode(struct inode *inode) 162 162 { 163 - dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); 163 + hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); 164 164 truncate_inode_pages(&inode->i_data, 0); 165 165 clear_inode(inode); 166 166 if (HFSPLUS_IS_RSRC(inode)) { ··· 179 179 if (!wait) 180 180 return 0; 181 181 182 - dprint(DBG_SUPER, "hfsplus_sync_fs\n"); 182 + hfs_dbg(SUPER, "hfsplus_sync_fs\n"); 183 183 184 184 /* 185 185 * Explicitly write out the special metadata inodes. ··· 275 275 { 276 276 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 277 277 278 - dprint(DBG_SUPER, "hfsplus_put_super\n"); 278 + hfs_dbg(SUPER, "hfsplus_put_super\n"); 279 279 280 280 cancel_delayed_work_sync(&sbi->sync_work); 281 281