Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ceph: print cluster fsid and client global_id in all debug logs

Multiple CephFS mounts on a host is increasingly common so
disambiguating messages like this is necessary and will make it easier
to debug issues.

At the same this will improve the debug logs to make them easier to
troubleshooting issues, such as print the ino# instead only printing
the memory addresses of the corresponding inodes and print the dentry
names instead of the corresponding memory addresses for the dentry,etc.

Link: https://tracker.ceph.com/issues/61590
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>

authored by

Xiubo Li and committed by
Ilya Dryomov
38d46409 5995d90d

+1746 -1311
+4 -2
fs/ceph/acl.c
··· 15 15 #include <linux/slab.h> 16 16 17 17 #include "super.h" 18 + #include "mds_client.h" 18 19 19 20 static inline void ceph_set_cached_acl(struct inode *inode, 20 21 int type, struct posix_acl *acl) ··· 32 31 33 32 struct posix_acl *ceph_get_acl(struct inode *inode, int type, bool rcu) 34 33 { 34 + struct ceph_client *cl = ceph_inode_to_client(inode); 35 35 int size; 36 36 unsigned int retry_cnt = 0; 37 37 const char *name; ··· 74 72 } else if (size == -ENODATA || size == 0) { 75 73 acl = NULL; 76 74 } else { 77 - pr_err_ratelimited("get acl %llx.%llx failed, err=%d\n", 78 - ceph_vinop(inode), size); 75 + pr_err_ratelimited_client(cl, "%llx.%llx failed, err=%d\n", 76 + ceph_vinop(inode), size); 79 77 acl = ERR_PTR(-EIO); 80 78 } 81 79
+153 -126
fs/ceph/addr.c
··· 79 79 */ 80 80 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio) 81 81 { 82 - struct inode *inode; 82 + struct inode *inode = mapping->host; 83 + struct ceph_client *cl = ceph_inode_to_client(inode); 83 84 struct ceph_inode_info *ci; 84 85 struct ceph_snap_context *snapc; 85 86 86 87 if (folio_test_dirty(folio)) { 87 - dout("%p dirty_folio %p idx %lu -- already dirty\n", 88 - mapping->host, folio, folio->index); 88 + doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n", 89 + ceph_vinop(inode), folio, folio->index); 89 90 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio); 90 91 return false; 91 92 } 92 93 93 - inode = mapping->host; 94 94 ci = ceph_inode(inode); 95 95 96 96 /* dirty the head */ ··· 111 111 if (ci->i_wrbuffer_ref == 0) 112 112 ihold(inode); 113 113 ++ci->i_wrbuffer_ref; 114 - dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d " 115 - "snapc %p seq %lld (%d snaps)\n", 116 - mapping->host, folio, folio->index, 117 - ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 118 - ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 119 - snapc, snapc->seq, snapc->num_snaps); 114 + doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d " 115 + "snapc %p seq %lld (%d snaps)\n", 116 + ceph_vinop(inode), folio, folio->index, 117 + ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, 118 + ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 119 + snapc, snapc->seq, snapc->num_snaps); 120 120 spin_unlock(&ci->i_ceph_lock); 121 121 122 122 /* ··· 137 137 static void ceph_invalidate_folio(struct folio *folio, size_t offset, 138 138 size_t length) 139 139 { 140 - struct inode *inode; 141 - struct ceph_inode_info *ci; 140 + struct inode *inode = folio->mapping->host; 141 + struct ceph_client *cl = ceph_inode_to_client(inode); 142 + struct ceph_inode_info *ci = ceph_inode(inode); 142 143 struct ceph_snap_context *snapc; 143 144 144 - inode = folio->mapping->host; 145 - ci = ceph_inode(inode); 146 145 147 146 if (offset != 0 || length != folio_size(folio)) { 148 - dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n", 149 - inode, folio->index, offset, length); 147 + doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n", 148 + ceph_vinop(inode), folio->index, offset, length); 150 149 return; 151 150 } 152 151 153 152 WARN_ON(!folio_test_locked(folio)); 154 153 if (folio_test_private(folio)) { 155 - dout("%p invalidate_folio idx %lu full dirty page\n", 156 - inode, folio->index); 154 + doutc(cl, "%llx.%llx idx %lu full dirty page\n", 155 + ceph_vinop(inode), folio->index); 157 156 158 157 snapc = folio_detach_private(folio); 159 158 ceph_put_wrbuffer_cap_refs(ci, 1, snapc); ··· 165 166 static bool ceph_release_folio(struct folio *folio, gfp_t gfp) 166 167 { 167 168 struct inode *inode = folio->mapping->host; 169 + struct ceph_client *cl = ceph_inode_to_client(inode); 168 170 169 - dout("%llx:%llx release_folio idx %lu (%sdirty)\n", 170 - ceph_vinop(inode), 171 - folio->index, folio_test_dirty(folio) ? "" : "not "); 171 + doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode), 172 + folio->index, folio_test_dirty(folio) ? "" : "not "); 172 173 173 174 if (folio_test_private(folio)) 174 175 return false; ··· 244 245 { 245 246 struct inode *inode = req->r_inode; 246 247 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 248 + struct ceph_client *cl = fsc->client; 247 249 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); 248 250 struct netfs_io_subrequest *subreq = req->r_priv; 249 251 struct ceph_osd_req_op *op = &req->r_ops[0]; ··· 254 254 ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency, 255 255 req->r_end_latency, osd_data->length, err); 256 256 257 - dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result, 258 - subreq->len, i_size_read(req->r_inode)); 257 + doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result, 258 + subreq->len, i_size_read(req->r_inode)); 259 259 260 260 /* no object means success but no data */ 261 261 if (err == -ENOENT) ··· 349 349 struct inode *inode = rreq->inode; 350 350 struct ceph_inode_info *ci = ceph_inode(inode); 351 351 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 352 + struct ceph_client *cl = fsc->client; 352 353 struct ceph_osd_request *req = NULL; 353 354 struct ceph_vino vino = ceph_vino(inode); 354 355 struct iov_iter iter; ··· 384 383 goto out; 385 384 } 386 385 387 - dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 386 + doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n", 387 + ceph_vinop(inode), subreq->start, subreq->len, len); 388 388 389 389 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); 390 390 ··· 402 400 403 401 err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off); 404 402 if (err < 0) { 405 - dout("%s: iov_ter_get_pages_alloc returned %d\n", 406 - __func__, err); 403 + doutc(cl, "%llx.%llx failed to allocate pages, %d\n", 404 + ceph_vinop(inode), err); 407 405 goto out; 408 406 } 409 407 ··· 431 429 ceph_osdc_put_request(req); 432 430 if (err) 433 431 netfs_subreq_terminated(subreq, err, false); 434 - dout("%s: result %d\n", __func__, err); 432 + doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err); 435 433 } 436 434 437 435 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) 438 436 { 439 437 struct inode *inode = rreq->inode; 438 + struct ceph_client *cl = ceph_inode_to_client(inode); 440 439 int got = 0, want = CEPH_CAP_FILE_CACHE; 441 440 struct ceph_netfs_request_data *priv; 442 441 int ret = 0; ··· 469 466 */ 470 467 ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); 471 468 if (ret < 0) { 472 - dout("start_read %p, error getting cap\n", inode); 469 + doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode)); 473 470 goto out; 474 471 } 475 472 476 473 if (!(got & want)) { 477 - dout("start_read %p, no cache cap\n", inode); 474 + doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode)); 478 475 ret = -EACCES; 479 476 goto out; 480 477 } ··· 566 563 struct ceph_snap_context *page_snapc) 567 564 { 568 565 struct ceph_inode_info *ci = ceph_inode(inode); 566 + struct ceph_client *cl = ceph_inode_to_client(inode); 569 567 struct ceph_snap_context *snapc = NULL; 570 568 struct ceph_cap_snap *capsnap = NULL; 571 569 572 570 spin_lock(&ci->i_ceph_lock); 573 571 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 574 - dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, 575 - capsnap->context, capsnap->dirty_pages); 572 + doutc(cl, " capsnap %p snapc %p has %d dirty pages\n", 573 + capsnap, capsnap->context, capsnap->dirty_pages); 576 574 if (!capsnap->dirty_pages) 577 575 continue; 578 576 ··· 605 601 } 606 602 if (!snapc && ci->i_wrbuffer_ref_head) { 607 603 snapc = ceph_get_snap_context(ci->i_head_snapc); 608 - dout(" head snapc %p has %d dirty pages\n", 609 - snapc, ci->i_wrbuffer_ref_head); 604 + doutc(cl, " head snapc %p has %d dirty pages\n", snapc, 605 + ci->i_wrbuffer_ref_head); 610 606 if (ctl) { 611 607 ctl->i_size = i_size_read(inode); 612 608 ctl->truncate_size = ci->i_truncate_size; ··· 663 659 struct inode *inode = page->mapping->host; 664 660 struct ceph_inode_info *ci = ceph_inode(inode); 665 661 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 662 + struct ceph_client *cl = fsc->client; 666 663 struct ceph_snap_context *snapc, *oldest; 667 664 loff_t page_off = page_offset(page); 668 665 int err; ··· 675 670 bool caching = ceph_is_cache_enabled(inode); 676 671 struct page *bounce_page = NULL; 677 672 678 - dout("writepage %p idx %lu\n", page, page->index); 673 + doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page, 674 + page->index); 679 675 680 676 if (ceph_inode_is_shutdown(inode)) 681 677 return -EIO; ··· 684 678 /* verify this is a writeable snap context */ 685 679 snapc = page_snap_context(page); 686 680 if (!snapc) { 687 - dout("writepage %p page %p not dirty?\n", inode, page); 681 + doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode), 682 + page); 688 683 return 0; 689 684 } 690 685 oldest = get_oldest_context(inode, &ceph_wbc, snapc); 691 686 if (snapc->seq > oldest->seq) { 692 - dout("writepage %p page %p snapc %p not writeable - noop\n", 693 - inode, page, snapc); 687 + doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n", 688 + ceph_vinop(inode), page, snapc); 694 689 /* we should only noop if called by kswapd */ 695 690 WARN_ON(!(current->flags & PF_MEMALLOC)); 696 691 ceph_put_snap_context(oldest); ··· 702 695 703 696 /* is this a partial page at end of file? */ 704 697 if (page_off >= ceph_wbc.i_size) { 705 - dout("folio at %lu beyond eof %llu\n", folio->index, 706 - ceph_wbc.i_size); 698 + doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n", 699 + ceph_vinop(inode), folio->index, ceph_wbc.i_size); 707 700 folio_invalidate(folio, 0, folio_size(folio)); 708 701 return 0; 709 702 } ··· 712 705 len = ceph_wbc.i_size - page_off; 713 706 714 707 wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len; 715 - dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n", 716 - inode, page, page->index, page_off, wlen, snapc, snapc->seq); 708 + doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n", 709 + ceph_vinop(inode), page, page->index, page_off, wlen, snapc, 710 + snapc->seq); 717 711 718 712 if (atomic_long_inc_return(&fsc->writeback_count) > 719 713 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) ··· 755 747 osd_req_op_extent_osd_data_pages(req, 0, 756 748 bounce_page ? &bounce_page : &page, wlen, 0, 757 749 false, false); 758 - dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n", 759 - page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not "); 750 + doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n", 751 + ceph_vinop(inode), page_off, len, wlen, 752 + IS_ENCRYPTED(inode) ? "" : "not "); 760 753 761 754 req->r_mtime = inode->i_mtime; 762 755 ceph_osdc_start_request(osdc, req); ··· 776 767 wbc = &tmp_wbc; 777 768 if (err == -ERESTARTSYS) { 778 769 /* killed by SIGKILL */ 779 - dout("writepage interrupted page %p\n", page); 770 + doutc(cl, "%llx.%llx interrupted page %p\n", 771 + ceph_vinop(inode), page); 780 772 redirty_page_for_writepage(wbc, page); 781 773 end_page_writeback(page); 782 774 return err; 783 775 } 784 776 if (err == -EBLOCKLISTED) 785 777 fsc->blocklisted = true; 786 - dout("writepage setting page/mapping error %d %p\n", 787 - err, page); 778 + doutc(cl, "%llx.%llx setting page/mapping error %d %p\n", 779 + ceph_vinop(inode), err, page); 788 780 mapping_set_error(&inode->i_data, err); 789 781 wbc->pages_skipped++; 790 782 } else { 791 - dout("writepage cleaned page %p\n", page); 783 + doutc(cl, "%llx.%llx cleaned page %p\n", 784 + ceph_vinop(inode), page); 792 785 err = 0; /* vfs expects us to return 0 */ 793 786 } 794 787 oldest = detach_page_private(page); ··· 840 829 { 841 830 struct inode *inode = req->r_inode; 842 831 struct ceph_inode_info *ci = ceph_inode(inode); 832 + struct ceph_client *cl = ceph_inode_to_client(inode); 843 833 struct ceph_osd_data *osd_data; 844 834 struct page *page; 845 835 int num_pages, total_pages = 0; ··· 852 840 unsigned int len = 0; 853 841 bool remove_page; 854 842 855 - dout("writepages_finish %p rc %d\n", inode, rc); 843 + doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc); 856 844 if (rc < 0) { 857 845 mapping_set_error(mapping, rc); 858 846 ceph_set_error_write(ci); ··· 874 862 /* clean all pages */ 875 863 for (i = 0; i < req->r_num_ops; i++) { 876 864 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) { 877 - pr_warn("%s incorrect op %d req %p index %d tid %llu\n", 878 - __func__, req->r_ops[i].op, req, i, req->r_tid); 865 + pr_warn_client(cl, 866 + "%llx.%llx incorrect op %d req %p index %d tid %llu\n", 867 + ceph_vinop(inode), req->r_ops[i].op, req, i, 868 + req->r_tid); 879 869 break; 880 870 } 881 871 ··· 904 890 905 891 ceph_put_snap_context(detach_page_private(page)); 906 892 end_page_writeback(page); 907 - dout("unlocking %p\n", page); 893 + doutc(cl, "unlocking %p\n", page); 908 894 909 895 if (remove_page) 910 896 generic_error_remove_page(inode->i_mapping, ··· 912 898 913 899 unlock_page(page); 914 900 } 915 - dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n", 916 - inode, osd_data->length, rc >= 0 ? num_pages : 0); 901 + doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n", 902 + ceph_vinop(inode), osd_data->length, 903 + rc >= 0 ? num_pages : 0); 917 904 918 905 release_pages(osd_data->pages, num_pages); 919 906 } ··· 942 927 struct inode *inode = mapping->host; 943 928 struct ceph_inode_info *ci = ceph_inode(inode); 944 929 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 930 + struct ceph_client *cl = fsc->client; 945 931 struct ceph_vino vino = ceph_vino(inode); 946 932 pgoff_t index, start_index, end = -1; 947 933 struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc; ··· 960 944 fsc->write_congested) 961 945 return 0; 962 946 963 - dout("writepages_start %p (mode=%s)\n", inode, 964 - wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 965 - (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 947 + doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode), 948 + wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 949 + (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 966 950 967 951 if (ceph_inode_is_shutdown(inode)) { 968 952 if (ci->i_wrbuffer_ref > 0) { 969 - pr_warn_ratelimited( 970 - "writepage_start %p %lld forced umount\n", 971 - inode, ceph_ino(inode)); 953 + pr_warn_ratelimited_client(cl, 954 + "%llx.%llx %lld forced umount\n", 955 + ceph_vinop(inode), ceph_ino(inode)); 972 956 } 973 957 mapping_set_error(mapping, -EIO); 974 958 return -EIO; /* we're in a forced umount, don't write! */ ··· 992 976 if (!snapc) { 993 977 /* hmm, why does writepages get called when there 994 978 is no dirty data? */ 995 - dout(" no snap context with dirty data?\n"); 979 + doutc(cl, " no snap context with dirty data?\n"); 996 980 goto out; 997 981 } 998 - dout(" oldest snapc is %p seq %lld (%d snaps)\n", 999 - snapc, snapc->seq, snapc->num_snaps); 982 + doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc, 983 + snapc->seq, snapc->num_snaps); 1000 984 1001 985 should_loop = false; 1002 986 if (ceph_wbc.head_snapc && snapc != last_snapc) { ··· 1006 990 end = -1; 1007 991 if (index > 0) 1008 992 should_loop = true; 1009 - dout(" cyclic, start at %lu\n", index); 993 + doutc(cl, " cyclic, start at %lu\n", index); 1010 994 } else { 1011 995 index = wbc->range_start >> PAGE_SHIFT; 1012 996 end = wbc->range_end >> PAGE_SHIFT; 1013 997 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 1014 998 range_whole = true; 1015 - dout(" not cyclic, %lu to %lu\n", index, end); 999 + doutc(cl, " not cyclic, %lu to %lu\n", index, end); 1016 1000 } 1017 1001 } else if (!ceph_wbc.head_snapc) { 1018 1002 /* Do not respect wbc->range_{start,end}. Dirty pages ··· 1021 1005 * associated with 'snapc' get written */ 1022 1006 if (index > 0) 1023 1007 should_loop = true; 1024 - dout(" non-head snapc, range whole\n"); 1008 + doutc(cl, " non-head snapc, range whole\n"); 1025 1009 } 1026 1010 1027 1011 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) ··· 1044 1028 get_more_pages: 1045 1029 nr_folios = filemap_get_folios_tag(mapping, &index, 1046 1030 end, tag, &fbatch); 1047 - dout("pagevec_lookup_range_tag got %d\n", nr_folios); 1031 + doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios); 1048 1032 if (!nr_folios && !locked_pages) 1049 1033 break; 1050 1034 for (i = 0; i < nr_folios && locked_pages < max_pages; i++) { 1051 1035 page = &fbatch.folios[i]->page; 1052 - dout("? %p idx %lu\n", page, page->index); 1036 + doutc(cl, "? %p idx %lu\n", page, page->index); 1053 1037 if (locked_pages == 0) 1054 1038 lock_page(page); /* first page */ 1055 1039 else if (!trylock_page(page)) ··· 1058 1042 /* only dirty pages, or our accounting breaks */ 1059 1043 if (unlikely(!PageDirty(page)) || 1060 1044 unlikely(page->mapping != mapping)) { 1061 - dout("!dirty or !mapping %p\n", page); 1045 + doutc(cl, "!dirty or !mapping %p\n", page); 1062 1046 unlock_page(page); 1063 1047 continue; 1064 1048 } 1065 1049 /* only if matching snap context */ 1066 1050 pgsnapc = page_snap_context(page); 1067 1051 if (pgsnapc != snapc) { 1068 - dout("page snapc %p %lld != oldest %p %lld\n", 1069 - pgsnapc, pgsnapc->seq, snapc, snapc->seq); 1052 + doutc(cl, "page snapc %p %lld != oldest %p %lld\n", 1053 + pgsnapc, pgsnapc->seq, snapc, snapc->seq); 1070 1054 if (!should_loop && 1071 1055 !ceph_wbc.head_snapc && 1072 1056 wbc->sync_mode != WB_SYNC_NONE) ··· 1077 1061 if (page_offset(page) >= ceph_wbc.i_size) { 1078 1062 struct folio *folio = page_folio(page); 1079 1063 1080 - dout("folio at %lu beyond eof %llu\n", 1081 - folio->index, ceph_wbc.i_size); 1064 + doutc(cl, "folio at %lu beyond eof %llu\n", 1065 + folio->index, ceph_wbc.i_size); 1082 1066 if ((ceph_wbc.size_stable || 1083 1067 folio_pos(folio) >= i_size_read(inode)) && 1084 1068 folio_clear_dirty_for_io(folio)) ··· 1088 1072 continue; 1089 1073 } 1090 1074 if (strip_unit_end && (page->index > strip_unit_end)) { 1091 - dout("end of strip unit %p\n", page); 1075 + doutc(cl, "end of strip unit %p\n", page); 1092 1076 unlock_page(page); 1093 1077 break; 1094 1078 } 1095 1079 if (PageWriteback(page) || PageFsCache(page)) { 1096 1080 if (wbc->sync_mode == WB_SYNC_NONE) { 1097 - dout("%p under writeback\n", page); 1081 + doutc(cl, "%p under writeback\n", page); 1098 1082 unlock_page(page); 1099 1083 continue; 1100 1084 } 1101 - dout("waiting on writeback %p\n", page); 1085 + doutc(cl, "waiting on writeback %p\n", page); 1102 1086 wait_on_page_writeback(page); 1103 1087 wait_on_page_fscache(page); 1104 1088 } 1105 1089 1106 1090 if (!clear_page_dirty_for_io(page)) { 1107 - dout("%p !clear_page_dirty_for_io\n", page); 1091 + doutc(cl, "%p !clear_page_dirty_for_io\n", page); 1108 1092 unlock_page(page); 1109 1093 continue; 1110 1094 } ··· 1159 1143 } 1160 1144 1161 1145 /* note position of first page in fbatch */ 1162 - dout("%p will write page %p idx %lu\n", 1163 - inode, page, page->index); 1146 + doutc(cl, "%llx.%llx will write page %p idx %lu\n", 1147 + ceph_vinop(inode), page, page->index); 1164 1148 1165 1149 if (atomic_long_inc_return(&fsc->writeback_count) > 1166 1150 CONGESTION_ON_THRESH( ··· 1174 1158 locked_pages ? GFP_NOWAIT : GFP_NOFS); 1175 1159 if (IS_ERR(pages[locked_pages])) { 1176 1160 if (PTR_ERR(pages[locked_pages]) == -EINVAL) 1177 - pr_err("%s: inode->i_blkbits=%hhu\n", 1178 - __func__, inode->i_blkbits); 1161 + pr_err_client(cl, 1162 + "inode->i_blkbits=%hhu\n", 1163 + inode->i_blkbits); 1179 1164 /* better not fail on first page! */ 1180 1165 BUG_ON(locked_pages == 0); 1181 1166 pages[locked_pages] = NULL; ··· 1210 1193 1211 1194 if (nr_folios && i == nr_folios && 1212 1195 locked_pages < max_pages) { 1213 - dout("reached end fbatch, trying for more\n"); 1196 + doutc(cl, "reached end fbatch, trying for more\n"); 1214 1197 folio_batch_release(&fbatch); 1215 1198 goto get_more_pages; 1216 1199 } ··· 1271 1254 /* Start a new extent */ 1272 1255 osd_req_op_extent_dup_last(req, op_idx, 1273 1256 cur_offset - offset); 1274 - dout("writepages got pages at %llu~%llu\n", 1275 - offset, len); 1257 + doutc(cl, "got pages at %llu~%llu\n", offset, 1258 + len); 1276 1259 osd_req_op_extent_osd_data_pages(req, op_idx, 1277 1260 data_pages, len, 0, 1278 1261 from_pool, false); ··· 1305 1288 if (IS_ENCRYPTED(inode)) 1306 1289 len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE); 1307 1290 1308 - dout("writepages got pages at %llu~%llu\n", offset, len); 1291 + doutc(cl, "got pages at %llu~%llu\n", offset, len); 1309 1292 1310 1293 if (IS_ENCRYPTED(inode) && 1311 1294 ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) 1312 - pr_warn("%s: bad encrypted write offset=%lld len=%llu\n", 1313 - __func__, offset, len); 1295 + pr_warn_client(cl, 1296 + "bad encrypted write offset=%lld len=%llu\n", 1297 + offset, len); 1314 1298 1315 1299 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 1316 1300 0, from_pool, false); ··· 1363 1345 done = true; 1364 1346 1365 1347 release_folios: 1366 - dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr, 1367 - fbatch.nr ? fbatch.folios[0] : NULL); 1348 + doutc(cl, "folio_batch release on %d folios (%p)\n", 1349 + (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL); 1368 1350 folio_batch_release(&fbatch); 1369 1351 } 1370 1352 1371 1353 if (should_loop && !done) { 1372 1354 /* more to do; loop back to beginning of file */ 1373 - dout("writepages looping back to beginning of file\n"); 1355 + doutc(cl, "looping back to beginning of file\n"); 1374 1356 end = start_index - 1; /* OK even when start_index == 0 */ 1375 1357 1376 1358 /* to write dirty pages associated with next snapc, ··· 1408 1390 out: 1409 1391 ceph_osdc_put_request(req); 1410 1392 ceph_put_snap_context(last_snapc); 1411 - dout("writepages dend - startone, rc = %d\n", rc); 1393 + doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode), 1394 + rc); 1412 1395 return rc; 1413 1396 } 1414 1397 ··· 1443 1424 ceph_find_incompatible(struct page *page) 1444 1425 { 1445 1426 struct inode *inode = page->mapping->host; 1427 + struct ceph_client *cl = ceph_inode_to_client(inode); 1446 1428 struct ceph_inode_info *ci = ceph_inode(inode); 1447 1429 1448 1430 if (ceph_inode_is_shutdown(inode)) { 1449 - dout(" page %p %llx:%llx is shutdown\n", page, 1450 - ceph_vinop(inode)); 1431 + doutc(cl, " %llx.%llx page %p is shutdown\n", 1432 + ceph_vinop(inode), page); 1451 1433 return ERR_PTR(-ESTALE); 1452 1434 } 1453 1435 ··· 1469 1449 if (snapc->seq > oldest->seq) { 1470 1450 /* not writeable -- return it for the caller to deal with */ 1471 1451 ceph_put_snap_context(oldest); 1472 - dout(" page %p snapc %p not current or oldest\n", page, snapc); 1452 + doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n", 1453 + ceph_vinop(inode), page, snapc); 1473 1454 return ceph_get_snap_context(snapc); 1474 1455 } 1475 1456 ceph_put_snap_context(oldest); 1476 1457 1477 1458 /* yay, writeable, do it now (without dropping page lock) */ 1478 - dout(" page %p snapc %p not current, but oldest\n", page, snapc); 1459 + doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n", 1460 + ceph_vinop(inode), page, snapc); 1479 1461 if (clear_page_dirty_for_io(page)) { 1480 1462 int r = writepage_nounlock(page, NULL); 1481 1463 if (r < 0) ··· 1546 1524 { 1547 1525 struct folio *folio = page_folio(subpage); 1548 1526 struct inode *inode = file_inode(file); 1527 + struct ceph_client *cl = ceph_inode_to_client(inode); 1549 1528 bool check_cap = false; 1550 1529 1551 - dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file, 1552 - inode, folio, (int)pos, (int)copied, (int)len); 1530 + doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode), 1531 + file, folio, (int)pos, (int)copied, (int)len); 1553 1532 1554 1533 if (!folio_test_uptodate(folio)) { 1555 1534 /* just return that nothing was copied on a short copy */ ··· 1610 1587 struct vm_area_struct *vma = vmf->vma; 1611 1588 struct inode *inode = file_inode(vma->vm_file); 1612 1589 struct ceph_inode_info *ci = ceph_inode(inode); 1590 + struct ceph_client *cl = ceph_inode_to_client(inode); 1613 1591 struct ceph_file_info *fi = vma->vm_file->private_data; 1614 1592 loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; 1615 1593 int want, got, err; ··· 1622 1598 1623 1599 ceph_block_sigs(&oldset); 1624 1600 1625 - dout("filemap_fault %p %llx.%llx %llu trying to get caps\n", 1626 - inode, ceph_vinop(inode), off); 1601 + doutc(cl, "%llx.%llx %llu trying to get caps\n", 1602 + ceph_vinop(inode), off); 1627 1603 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1628 1604 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1629 1605 else ··· 1634 1610 if (err < 0) 1635 1611 goto out_restore; 1636 1612 1637 - dout("filemap_fault %p %llu got cap refs on %s\n", 1638 - inode, off, ceph_cap_string(got)); 1613 + doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode), 1614 + off, ceph_cap_string(got)); 1639 1615 1640 1616 if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || 1641 1617 !ceph_has_inline_data(ci)) { ··· 1643 1619 ceph_add_rw_context(fi, &rw_ctx); 1644 1620 ret = filemap_fault(vmf); 1645 1621 ceph_del_rw_context(fi, &rw_ctx); 1646 - dout("filemap_fault %p %llu drop cap refs %s ret %x\n", 1647 - inode, off, ceph_cap_string(got), ret); 1622 + doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n", 1623 + ceph_vinop(inode), off, ceph_cap_string(got), ret); 1648 1624 } else 1649 1625 err = -EAGAIN; 1650 1626 ··· 1685 1661 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED; 1686 1662 out_inline: 1687 1663 filemap_invalidate_unlock_shared(mapping); 1688 - dout("filemap_fault %p %llu read inline data ret %x\n", 1689 - inode, off, ret); 1664 + doutc(cl, "%llx.%llx %llu read inline data ret %x\n", 1665 + ceph_vinop(inode), off, ret); 1690 1666 } 1691 1667 out_restore: 1692 1668 ceph_restore_sigs(&oldset); ··· 1700 1676 { 1701 1677 struct vm_area_struct *vma = vmf->vma; 1702 1678 struct inode *inode = file_inode(vma->vm_file); 1679 + struct ceph_client *cl = ceph_inode_to_client(inode); 1703 1680 struct ceph_inode_info *ci = ceph_inode(inode); 1704 1681 struct ceph_file_info *fi = vma->vm_file->private_data; 1705 1682 struct ceph_cap_flush *prealloc_cf; ··· 1727 1702 else 1728 1703 len = offset_in_thp(page, size); 1729 1704 1730 - dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", 1731 - inode, ceph_vinop(inode), off, len, size); 1705 + doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n", 1706 + ceph_vinop(inode), off, len, size); 1732 1707 if (fi->fmode & CEPH_FILE_MODE_LAZY) 1733 1708 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; 1734 1709 else ··· 1739 1714 if (err < 0) 1740 1715 goto out_free; 1741 1716 1742 - dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", 1743 - inode, off, len, ceph_cap_string(got)); 1717 + doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode), 1718 + off, len, ceph_cap_string(got)); 1744 1719 1745 1720 /* Update time before taking page lock */ 1746 1721 file_update_time(vma->vm_file); ··· 1788 1763 __mark_inode_dirty(inode, dirty); 1789 1764 } 1790 1765 1791 - dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n", 1792 - inode, off, len, ceph_cap_string(got), ret); 1766 + doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n", 1767 + ceph_vinop(inode), off, len, ceph_cap_string(got), ret); 1793 1768 ceph_put_cap_refs_async(ci, got); 1794 1769 out_free: 1795 1770 ceph_restore_sigs(&oldset); ··· 1803 1778 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, 1804 1779 char *data, size_t len) 1805 1780 { 1781 + struct ceph_client *cl = ceph_inode_to_client(inode); 1806 1782 struct address_space *mapping = inode->i_mapping; 1807 1783 struct page *page; 1808 1784 ··· 1824 1798 } 1825 1799 } 1826 1800 1827 - dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1828 - inode, ceph_vinop(inode), len, locked_page); 1801 + doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode, 1802 + ceph_vinop(inode), len, locked_page); 1829 1803 1830 1804 if (len > 0) { 1831 1805 void *kaddr = kmap_atomic(page); ··· 1850 1824 struct inode *inode = file_inode(file); 1851 1825 struct ceph_inode_info *ci = ceph_inode(inode); 1852 1826 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1827 + struct ceph_client *cl = fsc->client; 1853 1828 struct ceph_osd_request *req = NULL; 1854 1829 struct ceph_cap_flush *prealloc_cf = NULL; 1855 1830 struct folio *folio = NULL; ··· 1863 1836 inline_version = ci->i_inline_version; 1864 1837 spin_unlock(&ci->i_ceph_lock); 1865 1838 1866 - dout("uninline_data %p %llx.%llx inline_version %llu\n", 1867 - inode, ceph_vinop(inode), inline_version); 1839 + doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode), 1840 + inline_version); 1868 1841 1869 1842 if (ceph_inode_is_shutdown(inode)) { 1870 1843 err = -EIO; ··· 1976 1949 } 1977 1950 out: 1978 1951 ceph_free_cap_flush(prealloc_cf); 1979 - dout("uninline_data %p %llx.%llx inline_version %llu = %d\n", 1980 - inode, ceph_vinop(inode), inline_version, err); 1952 + doutc(cl, "%llx.%llx inline_version %llu = %d\n", 1953 + ceph_vinop(inode), inline_version, err); 1981 1954 return err; 1982 1955 } 1983 1956 ··· 2006 1979 { 2007 1980 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode); 2008 1981 struct ceph_mds_client *mdsc = fsc->mdsc; 1982 + struct ceph_client *cl = fsc->client; 2009 1983 struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; 2010 1984 struct rb_node **p, *parent; 2011 1985 struct ceph_pool_perm *perm; ··· 2041 2013 goto out; 2042 2014 2043 2015 if (pool_ns) 2044 - dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n", 2045 - pool, (int)pool_ns->len, pool_ns->str); 2016 + doutc(cl, "pool %lld ns %.*s no perm cached\n", pool, 2017 + (int)pool_ns->len, pool_ns->str); 2046 2018 else 2047 - dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool); 2019 + doutc(cl, "pool %lld no perm cached\n", pool); 2048 2020 2049 2021 down_write(&mdsc->pool_perm_rwsem); 2050 2022 p = &mdsc->pool_perm_tree.rb_node; ··· 2169 2141 if (!err) 2170 2142 err = have; 2171 2143 if (pool_ns) 2172 - dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n", 2173 - pool, (int)pool_ns->len, pool_ns->str, err); 2144 + doutc(cl, "pool %lld ns %.*s result = %d\n", pool, 2145 + (int)pool_ns->len, pool_ns->str, err); 2174 2146 else 2175 - dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err); 2147 + doutc(cl, "pool %lld result = %d\n", pool, err); 2176 2148 return err; 2177 2149 } 2178 2150 2179 2151 int ceph_pool_perm_check(struct inode *inode, int need) 2180 2152 { 2153 + struct ceph_client *cl = ceph_inode_to_client(inode); 2181 2154 struct ceph_inode_info *ci = ceph_inode(inode); 2182 2155 struct ceph_string *pool_ns; 2183 2156 s64 pool; ··· 2208 2179 check: 2209 2180 if (flags & CEPH_I_POOL_PERM) { 2210 2181 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { 2211 - dout("ceph_pool_perm_check pool %lld no read perm\n", 2212 - pool); 2182 + doutc(cl, "pool %lld no read perm\n", pool); 2213 2183 return -EPERM; 2214 2184 } 2215 2185 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { 2216 - dout("ceph_pool_perm_check pool %lld no write perm\n", 2217 - pool); 2186 + doutc(cl, "pool %lld no write perm\n", pool); 2218 2187 return -EPERM; 2219 2188 } 2220 2189 return 0;
+409 -299
fs/ceph/caps.c
··· 186 186 mdsc->caps_avail_count += nr_caps; 187 187 } 188 188 189 - dout("%s: caps %d = %d used + %d resv + %d avail\n", 190 - __func__, 191 - mdsc->caps_total_count, mdsc->caps_use_count, 192 - mdsc->caps_reserve_count, mdsc->caps_avail_count); 189 + doutc(mdsc->fsc->client, 190 + "caps %d = %d used + %d resv + %d avail\n", 191 + mdsc->caps_total_count, mdsc->caps_use_count, 192 + mdsc->caps_reserve_count, mdsc->caps_avail_count); 193 193 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 194 194 mdsc->caps_reserve_count + 195 195 mdsc->caps_avail_count); ··· 202 202 int ceph_reserve_caps(struct ceph_mds_client *mdsc, 203 203 struct ceph_cap_reservation *ctx, int need) 204 204 { 205 + struct ceph_client *cl = mdsc->fsc->client; 205 206 int i, j; 206 207 struct ceph_cap *cap; 207 208 int have; ··· 213 212 struct ceph_mds_session *s; 214 213 LIST_HEAD(newcaps); 215 214 216 - dout("reserve caps ctx=%p need=%d\n", ctx, need); 215 + doutc(cl, "ctx=%p need=%d\n", ctx, need); 217 216 218 217 /* first reserve any caps that are already allocated */ 219 218 spin_lock(&mdsc->caps_list_lock); ··· 273 272 continue; 274 273 } 275 274 276 - pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n", 277 - ctx, need, have + alloc); 275 + pr_warn_client(cl, "ctx=%p ENOMEM need=%d got=%d\n", ctx, need, 276 + have + alloc); 278 277 err = -ENOMEM; 279 278 break; 280 279 } ··· 299 298 300 299 spin_unlock(&mdsc->caps_list_lock); 301 300 302 - dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", 303 - ctx, mdsc->caps_total_count, mdsc->caps_use_count, 304 - mdsc->caps_reserve_count, mdsc->caps_avail_count); 301 + doutc(cl, "ctx=%p %d = %d used + %d resv + %d avail\n", ctx, 302 + mdsc->caps_total_count, mdsc->caps_use_count, 303 + mdsc->caps_reserve_count, mdsc->caps_avail_count); 305 304 return err; 306 305 } 307 306 308 307 void ceph_unreserve_caps(struct ceph_mds_client *mdsc, 309 308 struct ceph_cap_reservation *ctx) 310 309 { 310 + struct ceph_client *cl = mdsc->fsc->client; 311 311 bool reclaim = false; 312 312 if (!ctx->count) 313 313 return; 314 314 315 - dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); 315 + doutc(cl, "ctx=%p count=%d\n", ctx, ctx->count); 316 316 spin_lock(&mdsc->caps_list_lock); 317 317 __ceph_unreserve_caps(mdsc, ctx->count); 318 318 ctx->count = 0; ··· 330 328 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, 331 329 struct ceph_cap_reservation *ctx) 332 330 { 331 + struct ceph_client *cl = mdsc->fsc->client; 333 332 struct ceph_cap *cap = NULL; 334 333 335 334 /* temporary, until we do something about cap import/export */ ··· 362 359 } 363 360 364 361 spin_lock(&mdsc->caps_list_lock); 365 - dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", 366 - ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, 367 - mdsc->caps_reserve_count, mdsc->caps_avail_count); 362 + doutc(cl, "ctx=%p (%d) %d = %d used + %d resv + %d avail\n", ctx, 363 + ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, 364 + mdsc->caps_reserve_count, mdsc->caps_avail_count); 368 365 BUG_ON(!ctx->count); 369 366 BUG_ON(ctx->count > mdsc->caps_reserve_count); 370 367 BUG_ON(list_empty(&mdsc->caps_list)); ··· 385 382 386 383 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) 387 384 { 385 + struct ceph_client *cl = mdsc->fsc->client; 386 + 388 387 spin_lock(&mdsc->caps_list_lock); 389 - dout("put_cap %p %d = %d used + %d resv + %d avail\n", 390 - cap, mdsc->caps_total_count, mdsc->caps_use_count, 391 - mdsc->caps_reserve_count, mdsc->caps_avail_count); 388 + doutc(cl, "%p %d = %d used + %d resv + %d avail\n", cap, 389 + mdsc->caps_total_count, mdsc->caps_use_count, 390 + mdsc->caps_reserve_count, mdsc->caps_avail_count); 392 391 mdsc->caps_use_count--; 393 392 /* 394 393 * Keep some preallocated caps around (ceph_min_count), to ··· 496 491 static void __cap_set_timeouts(struct ceph_mds_client *mdsc, 497 492 struct ceph_inode_info *ci) 498 493 { 494 + struct inode *inode = &ci->netfs.inode; 499 495 struct ceph_mount_options *opt = mdsc->fsc->mount_options; 496 + 500 497 ci->i_hold_caps_max = round_jiffies(jiffies + 501 498 opt->caps_wanted_delay_max * HZ); 502 - dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode, 503 - ci->i_hold_caps_max - jiffies); 499 + doutc(mdsc->fsc->client, "%p %llx.%llx %lu\n", inode, 500 + ceph_vinop(inode), ci->i_hold_caps_max - jiffies); 504 501 } 505 502 506 503 /* ··· 516 509 static void __cap_delay_requeue(struct ceph_mds_client *mdsc, 517 510 struct ceph_inode_info *ci) 518 511 { 519 - dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode, 520 - ci->i_ceph_flags, ci->i_hold_caps_max); 512 + struct inode *inode = &ci->netfs.inode; 513 + 514 + doutc(mdsc->fsc->client, "%p %llx.%llx flags 0x%lx at %lu\n", 515 + inode, ceph_vinop(inode), ci->i_ceph_flags, 516 + ci->i_hold_caps_max); 521 517 if (!mdsc->stopping) { 522 518 spin_lock(&mdsc->cap_delay_lock); 523 519 if (!list_empty(&ci->i_cap_delay_list)) { ··· 543 533 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, 544 534 struct ceph_inode_info *ci) 545 535 { 546 - dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode); 536 + struct inode *inode = &ci->netfs.inode; 537 + 538 + doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 547 539 spin_lock(&mdsc->cap_delay_lock); 548 540 ci->i_ceph_flags |= CEPH_I_FLUSH; 549 541 if (!list_empty(&ci->i_cap_delay_list)) ··· 562 550 static void __cap_delay_cancel(struct ceph_mds_client *mdsc, 563 551 struct ceph_inode_info *ci) 564 552 { 565 - dout("__cap_delay_cancel %p\n", &ci->netfs.inode); 553 + struct inode *inode = &ci->netfs.inode; 554 + 555 + doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 566 556 if (list_empty(&ci->i_cap_delay_list)) 567 557 return; 568 558 spin_lock(&mdsc->cap_delay_lock); ··· 576 562 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, 577 563 unsigned issued) 578 564 { 565 + struct inode *inode = &ci->netfs.inode; 566 + struct ceph_client *cl = ceph_inode_to_client(inode); 567 + 579 568 unsigned had = __ceph_caps_issued(ci, NULL); 580 569 581 570 lockdep_assert_held(&ci->i_ceph_lock); ··· 603 586 if (issued & CEPH_CAP_FILE_SHARED) 604 587 atomic_inc(&ci->i_shared_gen); 605 588 if (S_ISDIR(ci->netfs.inode.i_mode)) { 606 - dout(" marking %p NOT complete\n", &ci->netfs.inode); 589 + doutc(cl, " marking %p NOT complete\n", inode); 607 590 __ceph_dir_clear_complete(ci); 608 591 } 609 592 } ··· 653 636 struct ceph_cap **new_cap) 654 637 { 655 638 struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc; 639 + struct ceph_client *cl = ceph_inode_to_client(inode); 656 640 struct ceph_inode_info *ci = ceph_inode(inode); 657 641 struct ceph_cap *cap; 658 642 int mds = session->s_mds; ··· 662 644 663 645 lockdep_assert_held(&ci->i_ceph_lock); 664 646 665 - dout("add_cap %p mds%d cap %llx %s seq %d\n", inode, 666 - session->s_mds, cap_id, ceph_cap_string(issued), seq); 647 + doutc(cl, "%p %llx.%llx mds%d cap %llx %s seq %d\n", inode, 648 + ceph_vinop(inode), session->s_mds, cap_id, 649 + ceph_cap_string(issued), seq); 667 650 668 651 gen = atomic_read(&session->s_cap_gen); 669 652 ··· 742 723 actual_wanted = __ceph_caps_wanted(ci); 743 724 if ((wanted & ~actual_wanted) || 744 725 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) { 745 - dout(" issued %s, mds wanted %s, actual %s, queueing\n", 746 - ceph_cap_string(issued), ceph_cap_string(wanted), 747 - ceph_cap_string(actual_wanted)); 726 + doutc(cl, "issued %s, mds wanted %s, actual %s, queueing\n", 727 + ceph_cap_string(issued), ceph_cap_string(wanted), 728 + ceph_cap_string(actual_wanted)); 748 729 __cap_delay_requeue(mdsc, ci); 749 730 } 750 731 ··· 761 742 WARN_ON(ci->i_auth_cap == cap); 762 743 } 763 744 764 - dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", 765 - inode, ceph_vinop(inode), cap, ceph_cap_string(issued), 766 - ceph_cap_string(issued|cap->issued), seq, mds); 745 + doutc(cl, "inode %p %llx.%llx cap %p %s now %s seq %d mds%d\n", 746 + inode, ceph_vinop(inode), cap, ceph_cap_string(issued), 747 + ceph_cap_string(issued|cap->issued), seq, mds); 767 748 cap->cap_id = cap_id; 768 749 cap->issued = issued; 769 750 cap->implemented |= issued; ··· 785 766 */ 786 767 static int __cap_is_valid(struct ceph_cap *cap) 787 768 { 769 + struct inode *inode = &cap->ci->netfs.inode; 770 + struct ceph_client *cl = cap->session->s_mdsc->fsc->client; 788 771 unsigned long ttl; 789 772 u32 gen; 790 773 ··· 794 773 ttl = cap->session->s_cap_ttl; 795 774 796 775 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 797 - dout("__cap_is_valid %p cap %p issued %s " 798 - "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode, 799 - cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); 776 + doutc(cl, "%p %llx.%llx cap %p issued %s but STALE (gen %u vs %u)\n", 777 + inode, ceph_vinop(inode), cap, 778 + ceph_cap_string(cap->issued), cap->cap_gen, gen); 800 779 return 0; 801 780 } 802 781 ··· 810 789 */ 811 790 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) 812 791 { 792 + struct inode *inode = &ci->netfs.inode; 793 + struct ceph_client *cl = ceph_inode_to_client(inode); 813 794 int have = ci->i_snap_caps; 814 795 struct ceph_cap *cap; 815 796 struct rb_node *p; ··· 822 799 cap = rb_entry(p, struct ceph_cap, ci_node); 823 800 if (!__cap_is_valid(cap)) 824 801 continue; 825 - dout("__ceph_caps_issued %p cap %p issued %s\n", 826 - &ci->netfs.inode, cap, ceph_cap_string(cap->issued)); 802 + doutc(cl, "%p %llx.%llx cap %p issued %s\n", inode, 803 + ceph_vinop(inode), cap, ceph_cap_string(cap->issued)); 827 804 have |= cap->issued; 828 805 if (implemented) 829 806 *implemented |= cap->implemented; ··· 866 843 */ 867 844 static void __touch_cap(struct ceph_cap *cap) 868 845 { 846 + struct inode *inode = &cap->ci->netfs.inode; 869 847 struct ceph_mds_session *s = cap->session; 848 + struct ceph_client *cl = s->s_mdsc->fsc->client; 870 849 871 850 spin_lock(&s->s_cap_lock); 872 851 if (!s->s_cap_iterator) { 873 - dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap, 874 - s->s_mds); 852 + doutc(cl, "%p %llx.%llx cap %p mds%d\n", inode, 853 + ceph_vinop(inode), cap, s->s_mds); 875 854 list_move_tail(&cap->session_caps, &s->s_caps); 876 855 } else { 877 - dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", 878 - &cap->ci->netfs.inode, cap, s->s_mds); 856 + doutc(cl, "%p %llx.%llx cap %p mds%d NOP, iterating over caps\n", 857 + inode, ceph_vinop(inode), cap, s->s_mds); 879 858 } 880 859 spin_unlock(&s->s_cap_lock); 881 860 } ··· 889 864 */ 890 865 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) 891 866 { 867 + struct inode *inode = &ci->netfs.inode; 868 + struct ceph_client *cl = ceph_inode_to_client(inode); 892 869 struct ceph_cap *cap; 893 870 struct rb_node *p; 894 871 int have = ci->i_snap_caps; 895 872 896 873 if ((have & mask) == mask) { 897 - dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s" 898 - " (mask %s)\n", ceph_ino(&ci->netfs.inode), 899 - ceph_cap_string(have), 900 - ceph_cap_string(mask)); 874 + doutc(cl, "mask %p %llx.%llx snap issued %s (mask %s)\n", 875 + inode, ceph_vinop(inode), ceph_cap_string(have), 876 + ceph_cap_string(mask)); 901 877 return 1; 902 878 } 903 879 ··· 907 881 if (!__cap_is_valid(cap)) 908 882 continue; 909 883 if ((cap->issued & mask) == mask) { 910 - dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s" 911 - " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap, 912 - ceph_cap_string(cap->issued), 913 - ceph_cap_string(mask)); 884 + doutc(cl, "mask %p %llx.%llx cap %p issued %s (mask %s)\n", 885 + inode, ceph_vinop(inode), cap, 886 + ceph_cap_string(cap->issued), 887 + ceph_cap_string(mask)); 914 888 if (touch) 915 889 __touch_cap(cap); 916 890 return 1; ··· 919 893 /* does a combination of caps satisfy mask? */ 920 894 have |= cap->issued; 921 895 if ((have & mask) == mask) { 922 - dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s" 923 - " (mask %s)\n", ceph_ino(&ci->netfs.inode), 924 - ceph_cap_string(cap->issued), 925 - ceph_cap_string(mask)); 896 + doutc(cl, "mask %p %llx.%llx combo issued %s (mask %s)\n", 897 + inode, ceph_vinop(inode), 898 + ceph_cap_string(cap->issued), 899 + ceph_cap_string(mask)); 926 900 if (touch) { 927 901 struct rb_node *q; 928 902 ··· 980 954 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) 981 955 { 982 956 struct inode *inode = &ci->netfs.inode; 957 + struct ceph_client *cl = ceph_inode_to_client(inode); 983 958 int ret; 984 959 985 960 spin_lock(&ci->i_ceph_lock); 986 961 ret = __ceph_caps_revoking_other(ci, NULL, mask); 987 962 spin_unlock(&ci->i_ceph_lock); 988 - dout("ceph_caps_revoking %p %s = %d\n", inode, 989 - ceph_cap_string(mask), ret); 963 + doutc(cl, "%p %llx.%llx %s = %d\n", inode, ceph_vinop(inode), 964 + ceph_cap_string(mask), ret); 990 965 return ret; 991 966 } 992 967 ··· 1134 1107 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) 1135 1108 { 1136 1109 struct ceph_mds_session *session = cap->session; 1110 + struct ceph_client *cl = session->s_mdsc->fsc->client; 1137 1111 struct ceph_inode_info *ci = cap->ci; 1112 + struct inode *inode = &ci->netfs.inode; 1138 1113 struct ceph_mds_client *mdsc; 1139 1114 int removed = 0; 1140 1115 1141 1116 /* 'ci' being NULL means the remove have already occurred */ 1142 1117 if (!ci) { 1143 - dout("%s: cap inode is NULL\n", __func__); 1118 + doutc(cl, "inode is NULL\n"); 1144 1119 return; 1145 1120 } 1146 1121 1147 1122 lockdep_assert_held(&ci->i_ceph_lock); 1148 1123 1149 - dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode); 1124 + doutc(cl, "%p from %p %llx.%llx\n", cap, inode, ceph_vinop(inode)); 1150 1125 1151 1126 mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc; 1152 1127 ··· 1161 1132 spin_lock(&session->s_cap_lock); 1162 1133 if (session->s_cap_iterator == cap) { 1163 1134 /* not yet, we are iterating over this very cap */ 1164 - dout("__ceph_remove_cap delaying %p removal from session %p\n", 1165 - cap, cap->session); 1135 + doutc(cl, "delaying %p removal from session %p\n", cap, 1136 + cap->session); 1166 1137 } else { 1167 1138 list_del_init(&cap->session_caps); 1168 1139 session->s_nr_caps--; ··· 1215 1186 1216 1187 /* 'ci' being NULL means the remove have already occurred */ 1217 1188 if (!ci) { 1218 - dout("%s: cap inode is NULL\n", __func__); 1189 + doutc(mdsc->fsc->client, "inode is NULL\n"); 1219 1190 return; 1220 1191 } 1221 1192 ··· 1257 1228 { 1258 1229 struct ceph_mds_caps *fc; 1259 1230 void *p; 1260 - struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc; 1231 + struct ceph_mds_client *mdsc = arg->session->s_mdsc; 1232 + struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; 1261 1233 1262 - dout("%s %s %llx %llx caps %s wanted %s dirty %s seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu xattr_ver %llu xattr_len %d\n", 1263 - __func__, ceph_cap_op_name(arg->op), arg->cid, arg->ino, 1264 - ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted), 1265 - ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq, 1266 - arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows, 1267 - arg->size, arg->max_size, arg->xattr_version, 1268 - arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0); 1234 + doutc(mdsc->fsc->client, 1235 + "%s %llx %llx caps %s wanted %s dirty %s seq %u/%u" 1236 + " tid %llu/%llu mseq %u follows %lld size %llu/%llu" 1237 + " xattr_ver %llu xattr_len %d\n", 1238 + ceph_cap_op_name(arg->op), arg->cid, arg->ino, 1239 + ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted), 1240 + ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq, 1241 + arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows, 1242 + arg->size, arg->max_size, arg->xattr_version, 1243 + arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0); 1269 1244 1270 1245 msg->hdr.version = cpu_to_le16(12); 1271 1246 msg->hdr.tid = cpu_to_le64(arg->flush_tid); ··· 1406 1373 { 1407 1374 struct ceph_inode_info *ci = cap->ci; 1408 1375 struct inode *inode = &ci->netfs.inode; 1376 + struct ceph_client *cl = ceph_inode_to_client(inode); 1409 1377 int held, revoking; 1410 1378 1411 1379 lockdep_assert_held(&ci->i_ceph_lock); ··· 1415 1381 revoking = cap->implemented & ~cap->issued; 1416 1382 retain &= ~revoking; 1417 1383 1418 - dout("%s %p cap %p session %p %s -> %s (revoking %s)\n", 1419 - __func__, inode, cap, cap->session, 1420 - ceph_cap_string(held), ceph_cap_string(held & retain), 1421 - ceph_cap_string(revoking)); 1384 + doutc(cl, "%p %llx.%llx cap %p session %p %s -> %s (revoking %s)\n", 1385 + inode, ceph_vinop(inode), cap, cap->session, 1386 + ceph_cap_string(held), ceph_cap_string(held & retain), 1387 + ceph_cap_string(revoking)); 1422 1388 BUG_ON((retain & CEPH_CAP_PIN) == 0); 1423 1389 1424 1390 ci->i_ceph_flags &= ~CEPH_I_FLUSH; ··· 1534 1500 { 1535 1501 struct ceph_msg *msg; 1536 1502 struct inode *inode = &ci->netfs.inode; 1503 + struct ceph_client *cl = ceph_inode_to_client(inode); 1537 1504 1538 1505 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, cap_msg_size(arg), GFP_NOFS, 1539 1506 false); 1540 1507 if (!msg) { 1541 - pr_err("error allocating cap msg: ino (%llx.%llx) flushing %s tid %llu, requeuing cap.\n", 1542 - ceph_vinop(inode), ceph_cap_string(arg->dirty), 1543 - arg->flush_tid); 1508 + pr_err_client(cl, 1509 + "error allocating cap msg: ino (%llx.%llx)" 1510 + " flushing %s tid %llu, requeuing cap.\n", 1511 + ceph_vinop(inode), ceph_cap_string(arg->dirty), 1512 + arg->flush_tid); 1544 1513 spin_lock(&ci->i_ceph_lock); 1545 1514 __cap_delay_requeue(arg->session->s_mdsc, ci); 1546 1515 spin_unlock(&ci->i_ceph_lock); ··· 1632 1595 { 1633 1596 struct inode *inode = &ci->netfs.inode; 1634 1597 struct ceph_mds_client *mdsc = session->s_mdsc; 1598 + struct ceph_client *cl = mdsc->fsc->client; 1635 1599 struct ceph_cap_snap *capsnap; 1636 1600 u64 oldest_flush_tid = 0; 1637 1601 u64 first_tid = 1, last_tid = 0; 1638 1602 1639 - dout("__flush_snaps %p session %p\n", inode, session); 1603 + doutc(cl, "%p %llx.%llx session %p\n", inode, ceph_vinop(inode), 1604 + session); 1640 1605 1641 1606 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 1642 1607 /* ··· 1653 1614 1654 1615 /* only flush each capsnap once */ 1655 1616 if (capsnap->cap_flush.tid > 0) { 1656 - dout(" already flushed %p, skipping\n", capsnap); 1617 + doutc(cl, "already flushed %p, skipping\n", capsnap); 1657 1618 continue; 1658 1619 } 1659 1620 ··· 1685 1646 int ret; 1686 1647 1687 1648 if (!(cap && cap->session == session)) { 1688 - dout("__flush_snaps %p auth cap %p not mds%d, " 1689 - "stop\n", inode, cap, session->s_mds); 1649 + doutc(cl, "%p %llx.%llx auth cap %p not mds%d, stop\n", 1650 + inode, ceph_vinop(inode), cap, session->s_mds); 1690 1651 break; 1691 1652 } 1692 1653 ··· 1707 1668 refcount_inc(&capsnap->nref); 1708 1669 spin_unlock(&ci->i_ceph_lock); 1709 1670 1710 - dout("__flush_snaps %p capsnap %p tid %llu %s\n", 1711 - inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty)); 1671 + doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n", inode, 1672 + ceph_vinop(inode), capsnap, cf->tid, 1673 + ceph_cap_string(capsnap->dirty)); 1712 1674 1713 1675 ret = __send_flush_snap(inode, session, capsnap, cap->mseq, 1714 1676 oldest_flush_tid); 1715 1677 if (ret < 0) { 1716 - pr_err("__flush_snaps: error sending cap flushsnap, " 1717 - "ino (%llx.%llx) tid %llu follows %llu\n", 1718 - ceph_vinop(inode), cf->tid, capsnap->follows); 1678 + pr_err_client(cl, "error sending cap flushsnap, " 1679 + "ino (%llx.%llx) tid %llu follows %llu\n", 1680 + ceph_vinop(inode), cf->tid, 1681 + capsnap->follows); 1719 1682 } 1720 1683 1721 1684 ceph_put_cap_snap(capsnap); ··· 1730 1689 { 1731 1690 struct inode *inode = &ci->netfs.inode; 1732 1691 struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc; 1692 + struct ceph_client *cl = ceph_inode_to_client(inode); 1733 1693 struct ceph_mds_session *session = NULL; 1734 1694 bool need_put = false; 1735 1695 int mds; 1736 1696 1737 - dout("ceph_flush_snaps %p\n", inode); 1697 + doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 1738 1698 if (psession) 1739 1699 session = *psession; 1740 1700 retry: 1741 1701 spin_lock(&ci->i_ceph_lock); 1742 1702 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { 1743 - dout(" no capsnap needs flush, doing nothing\n"); 1703 + doutc(cl, " no capsnap needs flush, doing nothing\n"); 1744 1704 goto out; 1745 1705 } 1746 1706 if (!ci->i_auth_cap) { 1747 - dout(" no auth cap (migrating?), doing nothing\n"); 1707 + doutc(cl, " no auth cap (migrating?), doing nothing\n"); 1748 1708 goto out; 1749 1709 } 1750 1710 1751 1711 mds = ci->i_auth_cap->session->s_mds; 1752 1712 if (session && session->s_mds != mds) { 1753 - dout(" oops, wrong session %p mutex\n", session); 1713 + doutc(cl, " oops, wrong session %p mutex\n", session); 1754 1714 ceph_put_mds_session(session); 1755 1715 session = NULL; 1756 1716 } ··· 1797 1755 struct ceph_mds_client *mdsc = 1798 1756 ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc; 1799 1757 struct inode *inode = &ci->netfs.inode; 1758 + struct ceph_client *cl = ceph_inode_to_client(inode); 1800 1759 int was = ci->i_dirty_caps; 1801 1760 int dirty = 0; 1802 1761 1803 1762 lockdep_assert_held(&ci->i_ceph_lock); 1804 1763 1805 1764 if (!ci->i_auth_cap) { 1806 - pr_warn("__mark_dirty_caps %p %llx mask %s, " 1807 - "but no auth cap (session was closed?)\n", 1808 - inode, ceph_ino(inode), ceph_cap_string(mask)); 1765 + pr_warn_client(cl, "%p %llx.%llx mask %s, " 1766 + "but no auth cap (session was closed?)\n", 1767 + inode, ceph_vinop(inode), 1768 + ceph_cap_string(mask)); 1809 1769 return 0; 1810 1770 } 1811 1771 1812 - dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode, 1813 - ceph_cap_string(mask), ceph_cap_string(was), 1814 - ceph_cap_string(was | mask)); 1772 + doutc(cl, "%p %llx.%llx %s dirty %s -> %s\n", inode, 1773 + ceph_vinop(inode), ceph_cap_string(mask), 1774 + ceph_cap_string(was), ceph_cap_string(was | mask)); 1815 1775 ci->i_dirty_caps |= mask; 1816 1776 if (was == 0) { 1817 1777 struct ceph_mds_session *session = ci->i_auth_cap->session; ··· 1826 1782 ci->i_head_snapc = ceph_get_snap_context( 1827 1783 ci->i_snap_realm->cached_context); 1828 1784 } 1829 - dout(" inode %p now dirty snapc %p auth cap %p\n", 1830 - &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap); 1785 + doutc(cl, "%p %llx.%llx now dirty snapc %p auth cap %p\n", 1786 + inode, ceph_vinop(inode), ci->i_head_snapc, 1787 + ci->i_auth_cap); 1831 1788 BUG_ON(!list_empty(&ci->i_dirty_item)); 1832 1789 spin_lock(&mdsc->cap_dirty_lock); 1833 1790 list_add(&ci->i_dirty_item, &session->s_cap_dirty); ··· 1922 1877 u64 *oldest_flush_tid) 1923 1878 { 1924 1879 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc; 1880 + struct ceph_client *cl = ceph_inode_to_client(inode); 1925 1881 struct ceph_inode_info *ci = ceph_inode(inode); 1926 1882 struct ceph_cap_flush *cf = NULL; 1927 1883 int flushing; ··· 1933 1887 BUG_ON(!ci->i_prealloc_cap_flush); 1934 1888 1935 1889 flushing = ci->i_dirty_caps; 1936 - dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", 1937 - ceph_cap_string(flushing), 1938 - ceph_cap_string(ci->i_flushing_caps), 1939 - ceph_cap_string(ci->i_flushing_caps | flushing)); 1890 + doutc(cl, "flushing %s, flushing_caps %s -> %s\n", 1891 + ceph_cap_string(flushing), 1892 + ceph_cap_string(ci->i_flushing_caps), 1893 + ceph_cap_string(ci->i_flushing_caps | flushing)); 1940 1894 ci->i_flushing_caps |= flushing; 1941 1895 ci->i_dirty_caps = 0; 1942 - dout(" inode %p now !dirty\n", inode); 1896 + doutc(cl, "%p %llx.%llx now !dirty\n", inode, ceph_vinop(inode)); 1943 1897 1944 1898 swap(cf, ci->i_prealloc_cap_flush); 1945 1899 cf->caps = flushing; ··· 1970 1924 __releases(ci->i_ceph_lock) 1971 1925 __acquires(ci->i_ceph_lock) 1972 1926 { 1927 + struct ceph_client *cl = ceph_inode_to_client(inode); 1973 1928 struct ceph_inode_info *ci = ceph_inode(inode); 1974 1929 u32 invalidating_gen = ci->i_rdcache_gen; 1975 1930 ··· 1982 1935 if (inode->i_data.nrpages == 0 && 1983 1936 invalidating_gen == ci->i_rdcache_gen) { 1984 1937 /* success. */ 1985 - dout("try_nonblocking_invalidate %p success\n", inode); 1938 + doutc(cl, "%p %llx.%llx success\n", inode, 1939 + ceph_vinop(inode)); 1986 1940 /* save any racing async invalidate some trouble */ 1987 1941 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; 1988 1942 return 0; 1989 1943 } 1990 - dout("try_nonblocking_invalidate %p failed\n", inode); 1944 + doutc(cl, "%p %llx.%llx failed\n", inode, ceph_vinop(inode)); 1991 1945 return -1; 1992 1946 } 1993 1947 ··· 2020 1972 { 2021 1973 struct inode *inode = &ci->netfs.inode; 2022 1974 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 1975 + struct ceph_client *cl = ceph_inode_to_client(inode); 2023 1976 struct ceph_cap *cap; 2024 1977 u64 flush_tid, oldest_flush_tid; 2025 1978 int file_wanted, used, cap_used; ··· 2095 2046 } 2096 2047 } 2097 2048 2098 - dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s" 2099 - " issued %s revoking %s retain %s %s%s%s\n", ceph_vinop(inode), 2100 - ceph_cap_string(file_wanted), 2049 + doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s " 2050 + "flushing %s issued %s revoking %s retain %s %s%s%s\n", 2051 + inode, ceph_vinop(inode), ceph_cap_string(file_wanted), 2101 2052 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), 2102 2053 ceph_cap_string(ci->i_flushing_caps), 2103 2054 ceph_cap_string(issued), ceph_cap_string(revoking), ··· 2118 2069 (revoking & (CEPH_CAP_FILE_CACHE| 2119 2070 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */ 2120 2071 !tried_invalidate) { 2121 - dout("check_caps trying to invalidate on %llx.%llx\n", 2122 - ceph_vinop(inode)); 2072 + doutc(cl, "trying to invalidate on %p %llx.%llx\n", 2073 + inode, ceph_vinop(inode)); 2123 2074 if (try_nonblocking_invalidate(inode) < 0) { 2124 - dout("check_caps queuing invalidate\n"); 2075 + doutc(cl, "queuing invalidate\n"); 2125 2076 queue_invalidate = true; 2126 2077 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2127 2078 } ··· 2149 2100 cap_used &= ~ci->i_auth_cap->issued; 2150 2101 2151 2102 revoking = cap->implemented & ~cap->issued; 2152 - dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n", 2153 - cap->mds, cap, ceph_cap_string(cap_used), 2154 - ceph_cap_string(cap->issued), 2155 - ceph_cap_string(cap->implemented), 2156 - ceph_cap_string(revoking)); 2103 + doutc(cl, " mds%d cap %p used %s issued %s implemented %s revoking %s\n", 2104 + cap->mds, cap, ceph_cap_string(cap_used), 2105 + ceph_cap_string(cap->issued), 2106 + ceph_cap_string(cap->implemented), 2107 + ceph_cap_string(revoking)); 2157 2108 2158 2109 if (cap == ci->i_auth_cap && 2159 2110 (cap->issued & CEPH_CAP_FILE_WR)) { 2160 2111 /* request larger max_size from MDS? */ 2161 2112 if (ci->i_wanted_max_size > ci->i_max_size && 2162 2113 ci->i_wanted_max_size > ci->i_requested_max_size) { 2163 - dout("requesting new max_size\n"); 2114 + doutc(cl, "requesting new max_size\n"); 2164 2115 goto ack; 2165 2116 } 2166 2117 2167 2118 /* approaching file_max? */ 2168 2119 if (__ceph_should_report_size(ci)) { 2169 - dout("i_size approaching max_size\n"); 2120 + doutc(cl, "i_size approaching max_size\n"); 2170 2121 goto ack; 2171 2122 } 2172 2123 } 2173 2124 /* flush anything dirty? */ 2174 2125 if (cap == ci->i_auth_cap) { 2175 2126 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) { 2176 - dout("flushing dirty caps\n"); 2127 + doutc(cl, "flushing dirty caps\n"); 2177 2128 goto ack; 2178 2129 } 2179 2130 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) { 2180 - dout("flushing snap caps\n"); 2131 + doutc(cl, "flushing snap caps\n"); 2181 2132 goto ack; 2182 2133 } 2183 2134 } ··· 2185 2136 /* completed revocation? going down and there are no caps? */ 2186 2137 if (revoking) { 2187 2138 if ((revoking & cap_used) == 0) { 2188 - dout("completed revocation of %s\n", 2139 + doutc(cl, "completed revocation of %s\n", 2189 2140 ceph_cap_string(cap->implemented & ~cap->issued)); 2190 2141 goto ack; 2191 2142 } ··· 2363 2314 static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode) 2364 2315 { 2365 2316 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc; 2317 + struct ceph_client *cl = ceph_inode_to_client(inode); 2366 2318 struct ceph_inode_info *ci = ceph_inode(inode); 2367 2319 struct ceph_mds_request *req1 = NULL, *req2 = NULL; 2368 2320 int ret, err = 0; ··· 2453 2403 kfree(sessions); 2454 2404 } 2455 2405 2456 - dout("%s %p wait on tid %llu %llu\n", __func__, 2457 - inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL); 2406 + doutc(cl, "%p %llx.%llx wait on tid %llu %llu\n", inode, 2407 + ceph_vinop(inode), req1 ? req1->r_tid : 0ULL, 2408 + req2 ? req2->r_tid : 0ULL); 2458 2409 if (req1) { 2459 2410 ret = !wait_for_completion_timeout(&req1->r_safe_completion, 2460 2411 ceph_timeout_jiffies(req1->r_timeout)); ··· 2481 2430 { 2482 2431 struct inode *inode = file->f_mapping->host; 2483 2432 struct ceph_inode_info *ci = ceph_inode(inode); 2433 + struct ceph_client *cl = ceph_inode_to_client(inode); 2484 2434 u64 flush_tid; 2485 2435 int ret, err; 2486 2436 int dirty; 2487 2437 2488 - dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); 2438 + doutc(cl, "%p %llx.%llx%s\n", inode, ceph_vinop(inode), 2439 + datasync ? " datasync" : ""); 2489 2440 2490 2441 ret = file_write_and_wait_range(file, start, end); 2491 2442 if (datasync) ··· 2498 2445 goto out; 2499 2446 2500 2447 dirty = try_flush_caps(inode, &flush_tid); 2501 - dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); 2448 + doutc(cl, "dirty caps are %s\n", ceph_cap_string(dirty)); 2502 2449 2503 2450 err = flush_mdlog_and_wait_inode_unsafe_requests(inode); 2504 2451 ··· 2519 2466 if (err < 0) 2520 2467 ret = err; 2521 2468 out: 2522 - dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret); 2469 + doutc(cl, "%p %llx.%llx%s result=%d\n", inode, ceph_vinop(inode), 2470 + datasync ? " datasync" : "", ret); 2523 2471 return ret; 2524 2472 } 2525 2473 ··· 2533 2479 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) 2534 2480 { 2535 2481 struct ceph_inode_info *ci = ceph_inode(inode); 2482 + struct ceph_client *cl = ceph_inode_to_client(inode); 2536 2483 u64 flush_tid; 2537 2484 int err = 0; 2538 2485 int dirty; 2539 2486 int wait = (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync); 2540 2487 2541 - dout("write_inode %p wait=%d\n", inode, wait); 2488 + doutc(cl, "%p %llx.%llx wait=%d\n", inode, ceph_vinop(inode), wait); 2542 2489 ceph_fscache_unpin_writeback(inode, wbc); 2543 2490 if (wait) { 2544 2491 err = ceph_wait_on_async_create(inode); ··· 2569 2514 __acquires(ci->i_ceph_lock) 2570 2515 { 2571 2516 struct inode *inode = &ci->netfs.inode; 2517 + struct ceph_client *cl = mdsc->fsc->client; 2572 2518 struct ceph_cap *cap; 2573 2519 struct ceph_cap_flush *cf; 2574 2520 int ret; ··· 2595 2539 2596 2540 cap = ci->i_auth_cap; 2597 2541 if (!(cap && cap->session == session)) { 2598 - pr_err("%p auth cap %p not mds%d ???\n", 2599 - inode, cap, session->s_mds); 2542 + pr_err_client(cl, "%p auth cap %p not mds%d ???\n", 2543 + inode, cap, session->s_mds); 2600 2544 break; 2601 2545 } 2602 2546 ··· 2605 2549 if (!cf->is_capsnap) { 2606 2550 struct cap_msg_args arg; 2607 2551 2608 - dout("kick_flushing_caps %p cap %p tid %llu %s\n", 2609 - inode, cap, cf->tid, ceph_cap_string(cf->caps)); 2552 + doutc(cl, "%p %llx.%llx cap %p tid %llu %s\n", 2553 + inode, ceph_vinop(inode), cap, cf->tid, 2554 + ceph_cap_string(cf->caps)); 2610 2555 __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH, 2611 2556 (cf->tid < last_snap_flush ? 2612 2557 CEPH_CLIENT_CAPS_PENDING_CAPSNAP : 0), ··· 2621 2564 struct ceph_cap_snap *capsnap = 2622 2565 container_of(cf, struct ceph_cap_snap, 2623 2566 cap_flush); 2624 - dout("kick_flushing_caps %p capsnap %p tid %llu %s\n", 2625 - inode, capsnap, cf->tid, 2626 - ceph_cap_string(capsnap->dirty)); 2567 + doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n", 2568 + inode, ceph_vinop(inode), capsnap, cf->tid, 2569 + ceph_cap_string(capsnap->dirty)); 2627 2570 2628 2571 refcount_inc(&capsnap->nref); 2629 2572 spin_unlock(&ci->i_ceph_lock); ··· 2631 2574 ret = __send_flush_snap(inode, session, capsnap, cap->mseq, 2632 2575 oldest_flush_tid); 2633 2576 if (ret < 0) { 2634 - pr_err("kick_flushing_caps: error sending " 2635 - "cap flushsnap, ino (%llx.%llx) " 2636 - "tid %llu follows %llu\n", 2637 - ceph_vinop(inode), cf->tid, 2638 - capsnap->follows); 2577 + pr_err_client(cl, "error sending cap flushsnap," 2578 + " %p %llx.%llx tid %llu follows %llu\n", 2579 + inode, ceph_vinop(inode), cf->tid, 2580 + capsnap->follows); 2639 2581 } 2640 2582 2641 2583 ceph_put_cap_snap(capsnap); ··· 2647 2591 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, 2648 2592 struct ceph_mds_session *session) 2649 2593 { 2594 + struct ceph_client *cl = mdsc->fsc->client; 2650 2595 struct ceph_inode_info *ci; 2651 2596 struct ceph_cap *cap; 2652 2597 u64 oldest_flush_tid; 2653 2598 2654 - dout("early_kick_flushing_caps mds%d\n", session->s_mds); 2599 + doutc(cl, "mds%d\n", session->s_mds); 2655 2600 2656 2601 spin_lock(&mdsc->cap_dirty_lock); 2657 2602 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2658 2603 spin_unlock(&mdsc->cap_dirty_lock); 2659 2604 2660 2605 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2606 + struct inode *inode = &ci->netfs.inode; 2607 + 2661 2608 spin_lock(&ci->i_ceph_lock); 2662 2609 cap = ci->i_auth_cap; 2663 2610 if (!(cap && cap->session == session)) { 2664 - pr_err("%p auth cap %p not mds%d ???\n", 2665 - &ci->netfs.inode, cap, session->s_mds); 2611 + pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n", 2612 + inode, ceph_vinop(inode), cap, 2613 + session->s_mds); 2666 2614 spin_unlock(&ci->i_ceph_lock); 2667 2615 continue; 2668 2616 } ··· 2699 2639 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, 2700 2640 struct ceph_mds_session *session) 2701 2641 { 2642 + struct ceph_client *cl = mdsc->fsc->client; 2702 2643 struct ceph_inode_info *ci; 2703 2644 struct ceph_cap *cap; 2704 2645 u64 oldest_flush_tid; 2705 2646 2706 2647 lockdep_assert_held(&session->s_mutex); 2707 2648 2708 - dout("kick_flushing_caps mds%d\n", session->s_mds); 2649 + doutc(cl, "mds%d\n", session->s_mds); 2709 2650 2710 2651 spin_lock(&mdsc->cap_dirty_lock); 2711 2652 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2712 2653 spin_unlock(&mdsc->cap_dirty_lock); 2713 2654 2714 2655 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2656 + struct inode *inode = &ci->netfs.inode; 2657 + 2715 2658 spin_lock(&ci->i_ceph_lock); 2716 2659 cap = ci->i_auth_cap; 2717 2660 if (!(cap && cap->session == session)) { 2718 - pr_err("%p auth cap %p not mds%d ???\n", 2719 - &ci->netfs.inode, cap, session->s_mds); 2661 + pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n", 2662 + inode, ceph_vinop(inode), cap, 2663 + session->s_mds); 2720 2664 spin_unlock(&ci->i_ceph_lock); 2721 2665 continue; 2722 2666 } ··· 2737 2673 { 2738 2674 struct ceph_mds_client *mdsc = session->s_mdsc; 2739 2675 struct ceph_cap *cap = ci->i_auth_cap; 2676 + struct inode *inode = &ci->netfs.inode; 2740 2677 2741 2678 lockdep_assert_held(&ci->i_ceph_lock); 2742 2679 2743 - dout("%s %p flushing %s\n", __func__, &ci->netfs.inode, 2744 - ceph_cap_string(ci->i_flushing_caps)); 2680 + doutc(mdsc->fsc->client, "%p %llx.%llx flushing %s\n", 2681 + inode, ceph_vinop(inode), 2682 + ceph_cap_string(ci->i_flushing_caps)); 2745 2683 2746 2684 if (!list_empty(&ci->i_cap_flush_list)) { 2747 2685 u64 oldest_flush_tid; ··· 2765 2699 void ceph_take_cap_refs(struct ceph_inode_info *ci, int got, 2766 2700 bool snap_rwsem_locked) 2767 2701 { 2702 + struct inode *inode = &ci->netfs.inode; 2703 + struct ceph_client *cl = ceph_inode_to_client(inode); 2704 + 2768 2705 lockdep_assert_held(&ci->i_ceph_lock); 2769 2706 2770 2707 if (got & CEPH_CAP_PIN) ··· 2788 2719 } 2789 2720 if (got & CEPH_CAP_FILE_BUFFER) { 2790 2721 if (ci->i_wb_ref == 0) 2791 - ihold(&ci->netfs.inode); 2722 + ihold(inode); 2792 2723 ci->i_wb_ref++; 2793 - dout("%s %p wb %d -> %d (?)\n", __func__, 2794 - &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref); 2724 + doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode, 2725 + ceph_vinop(inode), ci->i_wb_ref-1, ci->i_wb_ref); 2795 2726 } 2796 2727 } 2797 2728 ··· 2819 2750 { 2820 2751 struct ceph_inode_info *ci = ceph_inode(inode); 2821 2752 struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc; 2753 + struct ceph_client *cl = ceph_inode_to_client(inode); 2822 2754 int ret = 0; 2823 2755 int have, implemented; 2824 2756 bool snap_rwsem_locked = false; 2825 2757 2826 - dout("get_cap_refs %p need %s want %s\n", inode, 2827 - ceph_cap_string(need), ceph_cap_string(want)); 2758 + doutc(cl, "%p %llx.%llx need %s want %s\n", inode, 2759 + ceph_vinop(inode), ceph_cap_string(need), 2760 + ceph_cap_string(want)); 2828 2761 2829 2762 again: 2830 2763 spin_lock(&ci->i_ceph_lock); 2831 2764 2832 2765 if ((flags & CHECK_FILELOCK) && 2833 2766 (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) { 2834 - dout("try_get_cap_refs %p error filelock\n", inode); 2767 + doutc(cl, "%p %llx.%llx error filelock\n", inode, 2768 + ceph_vinop(inode)); 2835 2769 ret = -EIO; 2836 2770 goto out_unlock; 2837 2771 } ··· 2854 2782 2855 2783 if (have & need & CEPH_CAP_FILE_WR) { 2856 2784 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { 2857 - dout("get_cap_refs %p endoff %llu > maxsize %llu\n", 2858 - inode, endoff, ci->i_max_size); 2785 + doutc(cl, "%p %llx.%llx endoff %llu > maxsize %llu\n", 2786 + inode, ceph_vinop(inode), endoff, ci->i_max_size); 2859 2787 if (endoff > ci->i_requested_max_size) 2860 2788 ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN; 2861 2789 goto out_unlock; ··· 2865 2793 * can get a final snapshot value for size+mtime. 2866 2794 */ 2867 2795 if (__ceph_have_pending_cap_snap(ci)) { 2868 - dout("get_cap_refs %p cap_snap_pending\n", inode); 2796 + doutc(cl, "%p %llx.%llx cap_snap_pending\n", inode, 2797 + ceph_vinop(inode)); 2869 2798 goto out_unlock; 2870 2799 } 2871 2800 } ··· 2884 2811 int not = want & ~(have & need); 2885 2812 int revoking = implemented & ~have; 2886 2813 int exclude = revoking & not; 2887 - dout("get_cap_refs %p have %s but not %s (revoking %s)\n", 2888 - inode, ceph_cap_string(have), ceph_cap_string(not), 2889 - ceph_cap_string(revoking)); 2814 + doutc(cl, "%p %llx.%llx have %s but not %s (revoking %s)\n", 2815 + inode, ceph_vinop(inode), ceph_cap_string(have), 2816 + ceph_cap_string(not), ceph_cap_string(revoking)); 2890 2817 if (!exclude || !(exclude & CEPH_CAP_FILE_BUFFER)) { 2891 2818 if (!snap_rwsem_locked && 2892 2819 !ci->i_head_snapc && ··· 2926 2853 spin_unlock(&s->s_cap_lock); 2927 2854 } 2928 2855 if (session_readonly) { 2929 - dout("get_cap_refs %p need %s but mds%d readonly\n", 2930 - inode, ceph_cap_string(need), ci->i_auth_cap->mds); 2856 + doutc(cl, "%p %llx.%llx need %s but mds%d readonly\n", 2857 + inode, ceph_vinop(inode), ceph_cap_string(need), 2858 + ci->i_auth_cap->mds); 2931 2859 ret = -EROFS; 2932 2860 goto out_unlock; 2933 2861 } 2934 2862 2935 2863 if (ceph_inode_is_shutdown(inode)) { 2936 - dout("get_cap_refs %p inode is shutdown\n", inode); 2864 + doutc(cl, "%p %llx.%llx inode is shutdown\n", 2865 + inode, ceph_vinop(inode)); 2937 2866 ret = -ESTALE; 2938 2867 goto out_unlock; 2939 2868 } 2940 2869 mds_wanted = __ceph_caps_mds_wanted(ci, false); 2941 2870 if (need & ~mds_wanted) { 2942 - dout("get_cap_refs %p need %s > mds_wanted %s\n", 2943 - inode, ceph_cap_string(need), 2944 - ceph_cap_string(mds_wanted)); 2871 + doutc(cl, "%p %llx.%llx need %s > mds_wanted %s\n", 2872 + inode, ceph_vinop(inode), ceph_cap_string(need), 2873 + ceph_cap_string(mds_wanted)); 2945 2874 ret = -EUCLEAN; 2946 2875 goto out_unlock; 2947 2876 } 2948 2877 2949 - dout("get_cap_refs %p have %s need %s\n", inode, 2950 - ceph_cap_string(have), ceph_cap_string(need)); 2878 + doutc(cl, "%p %llx.%llx have %s need %s\n", inode, 2879 + ceph_vinop(inode), ceph_cap_string(have), 2880 + ceph_cap_string(need)); 2951 2881 } 2952 2882 out_unlock: 2953 2883 ··· 2965 2889 else if (ret == 1) 2966 2890 ceph_update_cap_hit(&mdsc->metric); 2967 2891 2968 - dout("get_cap_refs %p ret %d got %s\n", inode, 2969 - ret, ceph_cap_string(*got)); 2892 + doutc(cl, "%p %llx.%llx ret %d got %s\n", inode, 2893 + ceph_vinop(inode), ret, ceph_cap_string(*got)); 2970 2894 return ret; 2971 2895 } 2972 2896 ··· 2978 2902 static void check_max_size(struct inode *inode, loff_t endoff) 2979 2903 { 2980 2904 struct ceph_inode_info *ci = ceph_inode(inode); 2905 + struct ceph_client *cl = ceph_inode_to_client(inode); 2981 2906 int check = 0; 2982 2907 2983 2908 /* do we need to explicitly request a larger max_size? */ 2984 2909 spin_lock(&ci->i_ceph_lock); 2985 2910 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) { 2986 - dout("write %p at large endoff %llu, req max_size\n", 2987 - inode, endoff); 2911 + doutc(cl, "write %p %llx.%llx at large endoff %llu, req max_size\n", 2912 + inode, ceph_vinop(inode), endoff); 2988 2913 ci->i_wanted_max_size = endoff; 2989 2914 } 2990 2915 /* duplicate ceph_check_caps()'s logic */ ··· 3195 3118 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci, 3196 3119 struct ceph_cap_snap *capsnap) 3197 3120 { 3121 + struct inode *inode = &ci->netfs.inode; 3122 + struct ceph_client *cl = ceph_inode_to_client(inode); 3123 + 3198 3124 if (!capsnap->need_flush && 3199 3125 !capsnap->writing && !capsnap->dirty_pages) { 3200 - dout("dropping cap_snap %p follows %llu\n", 3201 - capsnap, capsnap->follows); 3126 + doutc(cl, "%p follows %llu\n", capsnap, capsnap->follows); 3202 3127 BUG_ON(capsnap->cap_flush.tid > 0); 3203 3128 ceph_put_snap_context(capsnap->context); 3204 3129 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps)) ··· 3232 3153 enum put_cap_refs_mode mode) 3233 3154 { 3234 3155 struct inode *inode = &ci->netfs.inode; 3156 + struct ceph_client *cl = ceph_inode_to_client(inode); 3235 3157 int last = 0, put = 0, flushsnaps = 0, wake = 0; 3236 3158 bool check_flushsnaps = false; 3237 3159 ··· 3255 3175 put++; 3256 3176 check_flushsnaps = true; 3257 3177 } 3258 - dout("put_cap_refs %p wb %d -> %d (?)\n", 3259 - inode, ci->i_wb_ref+1, ci->i_wb_ref); 3178 + doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode, 3179 + ceph_vinop(inode), ci->i_wb_ref+1, ci->i_wb_ref); 3260 3180 } 3261 3181 if (had & CEPH_CAP_FILE_WR) { 3262 3182 if (--ci->i_wr_ref == 0) { ··· 3296 3216 } 3297 3217 spin_unlock(&ci->i_ceph_lock); 3298 3218 3299 - dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), 3300 - last ? " last" : "", put ? " put" : ""); 3219 + doutc(cl, "%p %llx.%llx had %s%s%s\n", inode, ceph_vinop(inode), 3220 + ceph_cap_string(had), last ? " last" : "", put ? " put" : ""); 3301 3221 3302 3222 switch (mode) { 3303 3223 case PUT_CAP_REFS_SYNC: ··· 3347 3267 struct ceph_snap_context *snapc) 3348 3268 { 3349 3269 struct inode *inode = &ci->netfs.inode; 3270 + struct ceph_client *cl = ceph_inode_to_client(inode); 3350 3271 struct ceph_cap_snap *capsnap = NULL, *iter; 3351 3272 int put = 0; 3352 3273 bool last = false; ··· 3371 3290 ceph_put_snap_context(ci->i_head_snapc); 3372 3291 ci->i_head_snapc = NULL; 3373 3292 } 3374 - dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n", 3375 - inode, 3376 - ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, 3377 - ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 3378 - last ? " LAST" : ""); 3293 + doutc(cl, "on %p %llx.%llx head %d/%d -> %d/%d %s\n", 3294 + inode, ceph_vinop(inode), ci->i_wrbuffer_ref+nr, 3295 + ci->i_wrbuffer_ref_head+nr, ci->i_wrbuffer_ref, 3296 + ci->i_wrbuffer_ref_head, last ? " LAST" : ""); 3379 3297 } else { 3380 3298 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { 3381 3299 if (iter->context == snapc) { ··· 3404 3324 } 3405 3325 } 3406 3326 } 3407 - dout("put_wrbuffer_cap_refs on %p cap_snap %p " 3408 - " snap %lld %d/%d -> %d/%d %s%s\n", 3409 - inode, capsnap, capsnap->context->seq, 3410 - ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 3411 - ci->i_wrbuffer_ref, capsnap->dirty_pages, 3412 - last ? " (wrbuffer last)" : "", 3413 - complete_capsnap ? " (complete capsnap)" : ""); 3327 + doutc(cl, "%p %llx.%llx cap_snap %p snap %lld %d/%d -> %d/%d %s%s\n", 3328 + inode, ceph_vinop(inode), capsnap, capsnap->context->seq, 3329 + ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 3330 + ci->i_wrbuffer_ref, capsnap->dirty_pages, 3331 + last ? " (wrbuffer last)" : "", 3332 + complete_capsnap ? " (complete capsnap)" : ""); 3414 3333 } 3415 3334 3416 3335 unlock: ··· 3432 3353 */ 3433 3354 static void invalidate_aliases(struct inode *inode) 3434 3355 { 3356 + struct ceph_client *cl = ceph_inode_to_client(inode); 3435 3357 struct dentry *dn, *prev = NULL; 3436 3358 3437 - dout("invalidate_aliases inode %p\n", inode); 3359 + doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 3438 3360 d_prune_aliases(inode); 3439 3361 /* 3440 3362 * For non-directory inode, d_find_alias() only returns ··· 3494 3414 __releases(ci->i_ceph_lock) 3495 3415 __releases(session->s_mdsc->snap_rwsem) 3496 3416 { 3417 + struct ceph_client *cl = ceph_inode_to_client(inode); 3497 3418 struct ceph_inode_info *ci = ceph_inode(inode); 3498 3419 int seq = le32_to_cpu(grant->seq); 3499 3420 int newcaps = le32_to_cpu(grant->caps); ··· 3518 3437 if (IS_ENCRYPTED(inode) && size) 3519 3438 size = extra_info->fscrypt_file_size; 3520 3439 3521 - dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", 3522 - inode, cap, session->s_mds, seq, ceph_cap_string(newcaps)); 3523 - dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, 3524 - i_size_read(inode)); 3440 + doutc(cl, "%p %llx.%llx cap %p mds%d seq %d %s\n", inode, 3441 + ceph_vinop(inode), cap, session->s_mds, seq, 3442 + ceph_cap_string(newcaps)); 3443 + doutc(cl, " size %llu max_size %llu, i_size %llu\n", size, 3444 + max_size, i_size_read(inode)); 3525 3445 3526 3446 3527 3447 /* ··· 3582 3500 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid)); 3583 3501 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid)); 3584 3502 ci->i_btime = extra_info->btime; 3585 - dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 3586 - from_kuid(&init_user_ns, inode->i_uid), 3587 - from_kgid(&init_user_ns, inode->i_gid)); 3503 + doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode, 3504 + ceph_vinop(inode), inode->i_mode, 3505 + from_kuid(&init_user_ns, inode->i_uid), 3506 + from_kgid(&init_user_ns, inode->i_gid)); 3588 3507 #if IS_ENABLED(CONFIG_FS_ENCRYPTION) 3589 3508 if (ci->fscrypt_auth_len != extra_info->fscrypt_auth_len || 3590 3509 memcmp(ci->fscrypt_auth, extra_info->fscrypt_auth, 3591 3510 ci->fscrypt_auth_len)) 3592 - pr_warn_ratelimited("%s: cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n", 3593 - __func__, ci->fscrypt_auth_len, 3511 + pr_warn_ratelimited_client(cl, 3512 + "cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n", 3513 + ci->fscrypt_auth_len, 3594 3514 extra_info->fscrypt_auth_len); 3595 3515 #endif 3596 3516 } ··· 3610 3526 u64 version = le64_to_cpu(grant->xattr_version); 3611 3527 3612 3528 if (version > ci->i_xattrs.version) { 3613 - dout(" got new xattrs v%llu on %p len %d\n", 3614 - version, inode, len); 3529 + doutc(cl, " got new xattrs v%llu on %p %llx.%llx len %d\n", 3530 + version, inode, ceph_vinop(inode), len); 3615 3531 if (ci->i_xattrs.blob) 3616 3532 ceph_buffer_put(ci->i_xattrs.blob); 3617 3533 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); ··· 3662 3578 3663 3579 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) { 3664 3580 if (max_size != ci->i_max_size) { 3665 - dout("max_size %lld -> %llu\n", 3666 - ci->i_max_size, max_size); 3581 + doutc(cl, "max_size %lld -> %llu\n", ci->i_max_size, 3582 + max_size); 3667 3583 ci->i_max_size = max_size; 3668 3584 if (max_size >= ci->i_wanted_max_size) { 3669 3585 ci->i_wanted_max_size = 0; /* reset */ ··· 3677 3593 wanted = __ceph_caps_wanted(ci); 3678 3594 used = __ceph_caps_used(ci); 3679 3595 dirty = __ceph_caps_dirty(ci); 3680 - dout(" my wanted = %s, used = %s, dirty %s\n", 3681 - ceph_cap_string(wanted), 3682 - ceph_cap_string(used), 3683 - ceph_cap_string(dirty)); 3596 + doutc(cl, " my wanted = %s, used = %s, dirty %s\n", 3597 + ceph_cap_string(wanted), ceph_cap_string(used), 3598 + ceph_cap_string(dirty)); 3684 3599 3685 3600 if ((was_stale || le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) && 3686 3601 (wanted & ~(cap->mds_wanted | newcaps))) { ··· 3700 3617 if (cap->issued & ~newcaps) { 3701 3618 int revoking = cap->issued & ~newcaps; 3702 3619 3703 - dout("revocation: %s -> %s (revoking %s)\n", 3704 - ceph_cap_string(cap->issued), 3705 - ceph_cap_string(newcaps), 3706 - ceph_cap_string(revoking)); 3620 + doutc(cl, "revocation: %s -> %s (revoking %s)\n", 3621 + ceph_cap_string(cap->issued), ceph_cap_string(newcaps), 3622 + ceph_cap_string(revoking)); 3707 3623 if (S_ISREG(inode->i_mode) && 3708 3624 (revoking & used & CEPH_CAP_FILE_BUFFER)) 3709 3625 writeback = true; /* initiate writeback; will delay ack */ ··· 3720 3638 cap->issued = newcaps; 3721 3639 cap->implemented |= newcaps; 3722 3640 } else if (cap->issued == newcaps) { 3723 - dout("caps unchanged: %s -> %s\n", 3724 - ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); 3641 + doutc(cl, "caps unchanged: %s -> %s\n", 3642 + ceph_cap_string(cap->issued), 3643 + ceph_cap_string(newcaps)); 3725 3644 } else { 3726 - dout("grant: %s -> %s\n", ceph_cap_string(cap->issued), 3727 - ceph_cap_string(newcaps)); 3645 + doutc(cl, "grant: %s -> %s\n", ceph_cap_string(cap->issued), 3646 + ceph_cap_string(newcaps)); 3728 3647 /* non-auth MDS is revoking the newly grant caps ? */ 3729 3648 if (cap == ci->i_auth_cap && 3730 3649 __ceph_caps_revoking_other(ci, cap, newcaps)) ··· 3814 3731 { 3815 3732 struct ceph_inode_info *ci = ceph_inode(inode); 3816 3733 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc; 3734 + struct ceph_client *cl = mdsc->fsc->client; 3817 3735 struct ceph_cap_flush *cf, *tmp_cf; 3818 3736 LIST_HEAD(to_remove); 3819 3737 unsigned seq = le32_to_cpu(m->seq); ··· 3851 3767 } 3852 3768 } 3853 3769 3854 - dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," 3855 - " flushing %s -> %s\n", 3856 - inode, session->s_mds, seq, ceph_cap_string(dirty), 3857 - ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), 3858 - ceph_cap_string(ci->i_flushing_caps & ~cleaned)); 3770 + doutc(cl, "%p %llx.%llx mds%d seq %d on %s cleaned %s, flushing %s -> %s\n", 3771 + inode, ceph_vinop(inode), session->s_mds, seq, 3772 + ceph_cap_string(dirty), ceph_cap_string(cleaned), 3773 + ceph_cap_string(ci->i_flushing_caps), 3774 + ceph_cap_string(ci->i_flushing_caps & ~cleaned)); 3859 3775 3860 3776 if (list_empty(&to_remove) && !cleaned) 3861 3777 goto out; ··· 3871 3787 if (list_empty(&ci->i_cap_flush_list)) { 3872 3788 list_del_init(&ci->i_flushing_item); 3873 3789 if (!list_empty(&session->s_cap_flushing)) { 3874 - dout(" mds%d still flushing cap on %p\n", 3875 - session->s_mds, 3876 - &list_first_entry(&session->s_cap_flushing, 3877 - struct ceph_inode_info, 3878 - i_flushing_item)->netfs.inode); 3790 + struct inode *inode = 3791 + &list_first_entry(&session->s_cap_flushing, 3792 + struct ceph_inode_info, 3793 + i_flushing_item)->netfs.inode; 3794 + doutc(cl, " mds%d still flushing cap on %p %llx.%llx\n", 3795 + session->s_mds, inode, ceph_vinop(inode)); 3879 3796 } 3880 3797 } 3881 3798 mdsc->num_cap_flushing--; 3882 - dout(" inode %p now !flushing\n", inode); 3799 + doutc(cl, " %p %llx.%llx now !flushing\n", inode, 3800 + ceph_vinop(inode)); 3883 3801 3884 3802 if (ci->i_dirty_caps == 0) { 3885 - dout(" inode %p now clean\n", inode); 3803 + doutc(cl, " %p %llx.%llx now clean\n", inode, 3804 + ceph_vinop(inode)); 3886 3805 BUG_ON(!list_empty(&ci->i_dirty_item)); 3887 3806 drop = true; 3888 3807 if (ci->i_wr_ref == 0 && ··· 3924 3837 { 3925 3838 struct ceph_inode_info *ci = ceph_inode(inode); 3926 3839 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc; 3840 + struct ceph_client *cl = mdsc->fsc->client; 3927 3841 bool ret; 3928 3842 3929 3843 lockdep_assert_held(&ci->i_ceph_lock); 3930 3844 3931 - dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci); 3845 + doutc(cl, "removing capsnap %p, %p %llx.%llx ci %p\n", capsnap, 3846 + inode, ceph_vinop(inode), ci); 3932 3847 3933 3848 list_del_init(&capsnap->ci_item); 3934 3849 ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush); ··· 3970 3881 { 3971 3882 struct ceph_inode_info *ci = ceph_inode(inode); 3972 3883 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc; 3884 + struct ceph_client *cl = mdsc->fsc->client; 3973 3885 u64 follows = le64_to_cpu(m->snap_follows); 3974 3886 struct ceph_cap_snap *capsnap = NULL, *iter; 3975 3887 bool wake_ci = false; 3976 3888 bool wake_mdsc = false; 3977 3889 3978 - dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", 3979 - inode, ci, session->s_mds, follows); 3890 + doutc(cl, "%p %llx.%llx ci %p mds%d follows %lld\n", inode, 3891 + ceph_vinop(inode), ci, session->s_mds, follows); 3980 3892 3981 3893 spin_lock(&ci->i_ceph_lock); 3982 3894 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { 3983 3895 if (iter->follows == follows) { 3984 3896 if (iter->cap_flush.tid != flush_tid) { 3985 - dout(" cap_snap %p follows %lld tid %lld !=" 3986 - " %lld\n", iter, follows, 3987 - flush_tid, iter->cap_flush.tid); 3897 + doutc(cl, " cap_snap %p follows %lld " 3898 + "tid %lld != %lld\n", iter, 3899 + follows, flush_tid, 3900 + iter->cap_flush.tid); 3988 3901 break; 3989 3902 } 3990 3903 capsnap = iter; 3991 3904 break; 3992 3905 } else { 3993 - dout(" skipping cap_snap %p follows %lld\n", 3994 - iter, iter->follows); 3906 + doutc(cl, " skipping cap_snap %p follows %lld\n", 3907 + iter, iter->follows); 3995 3908 } 3996 3909 } 3997 3910 if (capsnap) ··· 4022 3931 struct cap_extra_info *extra_info) 4023 3932 { 4024 3933 struct ceph_inode_info *ci = ceph_inode(inode); 3934 + struct ceph_client *cl = ceph_inode_to_client(inode); 4025 3935 int mds = session->s_mds; 4026 3936 int seq = le32_to_cpu(trunc->seq); 4027 3937 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq); ··· 4045 3953 if (IS_ENCRYPTED(inode) && size) 4046 3954 size = extra_info->fscrypt_file_size; 4047 3955 4048 - dout("%s inode %p mds%d seq %d to %lld truncate seq %d\n", 4049 - __func__, inode, mds, seq, truncate_size, truncate_seq); 3956 + doutc(cl, "%p %llx.%llx mds%d seq %d to %lld truncate seq %d\n", 3957 + inode, ceph_vinop(inode), mds, seq, truncate_size, truncate_seq); 4050 3958 queue_trunc = ceph_fill_file_size(inode, issued, 4051 3959 truncate_seq, truncate_size, size); 4052 3960 return queue_trunc; ··· 4065 3973 struct ceph_mds_session *session) 4066 3974 { 4067 3975 struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc; 3976 + struct ceph_client *cl = mdsc->fsc->client; 4068 3977 struct ceph_mds_session *tsession = NULL; 4069 3978 struct ceph_cap *cap, *tcap, *new_cap = NULL; 4070 3979 struct ceph_inode_info *ci = ceph_inode(inode); ··· 4085 3992 target = -1; 4086 3993 } 4087 3994 4088 - dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n", 4089 - inode, ci, mds, mseq, target); 3995 + doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d target %d\n", 3996 + inode, ceph_vinop(inode), ci, mds, mseq, target); 4090 3997 retry: 4091 3998 down_read(&mdsc->snap_rwsem); 4092 3999 spin_lock(&ci->i_ceph_lock); ··· 4106 4013 4107 4014 issued = cap->issued; 4108 4015 if (issued != cap->implemented) 4109 - pr_err_ratelimited("handle_cap_export: issued != implemented: " 4110 - "ino (%llx.%llx) mds%d seq %d mseq %d " 4111 - "issued %s implemented %s\n", 4112 - ceph_vinop(inode), mds, cap->seq, cap->mseq, 4113 - ceph_cap_string(issued), 4114 - ceph_cap_string(cap->implemented)); 4016 + pr_err_ratelimited_client(cl, "issued != implemented: " 4017 + "%p %llx.%llx mds%d seq %d mseq %d" 4018 + " issued %s implemented %s\n", 4019 + inode, ceph_vinop(inode), mds, 4020 + cap->seq, cap->mseq, 4021 + ceph_cap_string(issued), 4022 + ceph_cap_string(cap->implemented)); 4115 4023 4116 4024 4117 4025 tcap = __get_cap_for_mds(ci, target); ··· 4120 4026 /* already have caps from the target */ 4121 4027 if (tcap->cap_id == t_cap_id && 4122 4028 ceph_seq_cmp(tcap->seq, t_seq) < 0) { 4123 - dout(" updating import cap %p mds%d\n", tcap, target); 4029 + doutc(cl, " updating import cap %p mds%d\n", tcap, 4030 + target); 4124 4031 tcap->cap_id = t_cap_id; 4125 4032 tcap->seq = t_seq - 1; 4126 4033 tcap->issue_seq = t_seq - 1; ··· 4202 4107 struct ceph_cap **target_cap, int *old_issued) 4203 4108 { 4204 4109 struct ceph_inode_info *ci = ceph_inode(inode); 4110 + struct ceph_client *cl = mdsc->fsc->client; 4205 4111 struct ceph_cap *cap, *ocap, *new_cap = NULL; 4206 4112 int mds = session->s_mds; 4207 4113 int issued; ··· 4223 4127 peer = -1; 4224 4128 } 4225 4129 4226 - dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n", 4227 - inode, ci, mds, mseq, peer); 4130 + doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d peer %d\n", 4131 + inode, ceph_vinop(inode), ci, mds, mseq, peer); 4228 4132 retry: 4229 4133 cap = __get_cap_for_mds(ci, mds); 4230 4134 if (!cap) { ··· 4250 4154 4251 4155 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL; 4252 4156 if (ocap && ocap->cap_id == p_cap_id) { 4253 - dout(" remove export cap %p mds%d flags %d\n", 4254 - ocap, peer, ph->flags); 4157 + doutc(cl, " remove export cap %p mds%d flags %d\n", 4158 + ocap, peer, ph->flags); 4255 4159 if ((ph->flags & CEPH_CAP_FLAG_AUTH) && 4256 4160 (ocap->seq != le32_to_cpu(ph->seq) || 4257 4161 ocap->mseq != le32_to_cpu(ph->mseq))) { 4258 - pr_err_ratelimited("handle_cap_import: " 4259 - "mismatched seq/mseq: ino (%llx.%llx) " 4260 - "mds%d seq %d mseq %d importer mds%d " 4261 - "has peer seq %d mseq %d\n", 4262 - ceph_vinop(inode), peer, ocap->seq, 4263 - ocap->mseq, mds, le32_to_cpu(ph->seq), 4162 + pr_err_ratelimited_client(cl, "mismatched seq/mseq: " 4163 + "%p %llx.%llx mds%d seq %d mseq %d" 4164 + " importer mds%d has peer seq %d mseq %d\n", 4165 + inode, ceph_vinop(inode), peer, 4166 + ocap->seq, ocap->mseq, mds, 4167 + le32_to_cpu(ph->seq), 4264 4168 le32_to_cpu(ph->mseq)); 4265 4169 } 4266 4170 ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE)); ··· 4326 4230 struct ceph_msg *msg) 4327 4231 { 4328 4232 struct ceph_mds_client *mdsc = session->s_mdsc; 4233 + struct ceph_client *cl = mdsc->fsc->client; 4329 4234 struct inode *inode; 4330 4235 struct ceph_inode_info *ci; 4331 4236 struct ceph_cap *cap; ··· 4345 4248 bool close_sessions = false; 4346 4249 bool do_cap_release = false; 4347 4250 4348 - dout("handle_caps from mds%d\n", session->s_mds); 4251 + doutc(cl, "from mds%d\n", session->s_mds); 4349 4252 4350 4253 if (!ceph_inc_mds_stopping_blocker(mdsc, session)) 4351 4254 return; ··· 4447 4350 4448 4351 /* lookup ino */ 4449 4352 inode = ceph_find_inode(mdsc->fsc->sb, vino); 4450 - dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, 4451 - vino.snap, inode); 4353 + doutc(cl, " op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), 4354 + vino.ino, vino.snap, inode); 4452 4355 4453 4356 mutex_lock(&session->s_mutex); 4454 - dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, 4455 - (unsigned)seq); 4357 + doutc(cl, " mds%d seq %lld cap seq %u\n", session->s_mds, 4358 + session->s_seq, (unsigned)seq); 4456 4359 4457 4360 if (!inode) { 4458 - dout(" i don't have ino %llx\n", vino.ino); 4361 + doutc(cl, " i don't have ino %llx\n", vino.ino); 4459 4362 4460 4363 switch (op) { 4461 4364 case CEPH_CAP_OP_IMPORT: ··· 4510 4413 spin_lock(&ci->i_ceph_lock); 4511 4414 cap = __get_cap_for_mds(ceph_inode(inode), session->s_mds); 4512 4415 if (!cap) { 4513 - dout(" no cap on %p ino %llx.%llx from mds%d\n", 4514 - inode, ceph_ino(inode), ceph_snap(inode), 4515 - session->s_mds); 4416 + doutc(cl, " no cap on %p ino %llx.%llx from mds%d\n", 4417 + inode, ceph_ino(inode), ceph_snap(inode), 4418 + session->s_mds); 4516 4419 spin_unlock(&ci->i_ceph_lock); 4517 4420 switch (op) { 4518 4421 case CEPH_CAP_OP_REVOKE: ··· 4550 4453 4551 4454 default: 4552 4455 spin_unlock(&ci->i_ceph_lock); 4553 - pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, 4554 - ceph_cap_op_name(op)); 4456 + pr_err_client(cl, "unknown cap op %d %s\n", op, 4457 + ceph_cap_op_name(op)); 4555 4458 } 4556 4459 4557 4460 done: ··· 4592 4495 goto done; 4593 4496 4594 4497 bad: 4595 - pr_err("ceph_handle_caps: corrupt message\n"); 4498 + pr_err_client(cl, "corrupt message\n"); 4596 4499 ceph_msg_dump(msg); 4597 4500 goto out; 4598 4501 } ··· 4606 4509 */ 4607 4510 unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) 4608 4511 { 4512 + struct ceph_client *cl = mdsc->fsc->client; 4609 4513 struct inode *inode; 4610 4514 struct ceph_inode_info *ci; 4611 4515 struct ceph_mount_options *opt = mdsc->fsc->mount_options; ··· 4614 4516 unsigned long loop_start = jiffies; 4615 4517 unsigned long delay = 0; 4616 4518 4617 - dout("check_delayed_caps\n"); 4519 + doutc(cl, "begin\n"); 4618 4520 spin_lock(&mdsc->cap_delay_lock); 4619 4521 while (!list_empty(&mdsc->cap_delay_list)) { 4620 4522 ci = list_first_entry(&mdsc->cap_delay_list, 4621 4523 struct ceph_inode_info, 4622 4524 i_cap_delay_list); 4623 4525 if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) { 4624 - dout("%s caps added recently. Exiting loop", __func__); 4526 + doutc(cl, "caps added recently. Exiting loop"); 4625 4527 delay = ci->i_hold_caps_max; 4626 4528 break; 4627 4529 } ··· 4633 4535 inode = igrab(&ci->netfs.inode); 4634 4536 if (inode) { 4635 4537 spin_unlock(&mdsc->cap_delay_lock); 4636 - dout("check_delayed_caps on %p\n", inode); 4538 + doutc(cl, "on %p %llx.%llx\n", inode, 4539 + ceph_vinop(inode)); 4637 4540 ceph_check_caps(ci, 0); 4638 4541 iput(inode); 4639 4542 spin_lock(&mdsc->cap_delay_lock); 4640 4543 } 4641 4544 } 4642 4545 spin_unlock(&mdsc->cap_delay_lock); 4546 + doutc(cl, "done\n"); 4643 4547 4644 4548 return delay; 4645 4549 } ··· 4652 4552 static void flush_dirty_session_caps(struct ceph_mds_session *s) 4653 4553 { 4654 4554 struct ceph_mds_client *mdsc = s->s_mdsc; 4555 + struct ceph_client *cl = mdsc->fsc->client; 4655 4556 struct ceph_inode_info *ci; 4656 4557 struct inode *inode; 4657 4558 4658 - dout("flush_dirty_caps\n"); 4559 + doutc(cl, "begin\n"); 4659 4560 spin_lock(&mdsc->cap_dirty_lock); 4660 4561 while (!list_empty(&s->s_cap_dirty)) { 4661 4562 ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info, 4662 4563 i_dirty_item); 4663 4564 inode = &ci->netfs.inode; 4664 4565 ihold(inode); 4665 - dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode)); 4566 + doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 4666 4567 spin_unlock(&mdsc->cap_dirty_lock); 4667 4568 ceph_wait_on_async_create(inode); 4668 4569 ceph_check_caps(ci, CHECK_CAPS_FLUSH); ··· 4671 4570 spin_lock(&mdsc->cap_dirty_lock); 4672 4571 } 4673 4572 spin_unlock(&mdsc->cap_dirty_lock); 4674 - dout("flush_dirty_caps done\n"); 4573 + doutc(cl, "done\n"); 4675 4574 } 4676 4575 4677 4576 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) ··· 4796 4695 int mds, int drop, int unless, int force) 4797 4696 { 4798 4697 struct ceph_inode_info *ci = ceph_inode(inode); 4698 + struct ceph_client *cl = ceph_inode_to_client(inode); 4799 4699 struct ceph_cap *cap; 4800 4700 struct ceph_mds_request_release *rel = *p; 4801 4701 int used, dirty; ··· 4806 4704 used = __ceph_caps_used(ci); 4807 4705 dirty = __ceph_caps_dirty(ci); 4808 4706 4809 - dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n", 4810 - inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop), 4811 - ceph_cap_string(unless)); 4707 + doutc(cl, "%p %llx.%llx mds%d used|dirty %s drop %s unless %s\n", 4708 + inode, ceph_vinop(inode), mds, ceph_cap_string(used|dirty), 4709 + ceph_cap_string(drop), ceph_cap_string(unless)); 4812 4710 4813 4711 /* only drop unused, clean caps */ 4814 4712 drop &= ~(used | dirty); ··· 4830 4728 if (force || (cap->issued & drop)) { 4831 4729 if (cap->issued & drop) { 4832 4730 int wanted = __ceph_caps_wanted(ci); 4833 - dout("encode_inode_release %p cap %p " 4834 - "%s -> %s, wanted %s -> %s\n", inode, cap, 4835 - ceph_cap_string(cap->issued), 4836 - ceph_cap_string(cap->issued & ~drop), 4837 - ceph_cap_string(cap->mds_wanted), 4838 - ceph_cap_string(wanted)); 4731 + doutc(cl, "%p %llx.%llx cap %p %s -> %s, " 4732 + "wanted %s -> %s\n", inode, 4733 + ceph_vinop(inode), cap, 4734 + ceph_cap_string(cap->issued), 4735 + ceph_cap_string(cap->issued & ~drop), 4736 + ceph_cap_string(cap->mds_wanted), 4737 + ceph_cap_string(wanted)); 4839 4738 4840 4739 cap->issued &= ~drop; 4841 4740 cap->implemented &= ~drop; ··· 4845 4742 !(wanted & CEPH_CAP_ANY_FILE_WR)) 4846 4743 ci->i_requested_max_size = 0; 4847 4744 } else { 4848 - dout("encode_inode_release %p cap %p %s" 4849 - " (force)\n", inode, cap, 4850 - ceph_cap_string(cap->issued)); 4745 + doutc(cl, "%p %llx.%llx cap %p %s (force)\n", 4746 + inode, ceph_vinop(inode), cap, 4747 + ceph_cap_string(cap->issued)); 4851 4748 } 4852 4749 4853 4750 rel->ino = cpu_to_le64(ceph_ino(inode)); ··· 4862 4759 *p += sizeof(*rel); 4863 4760 ret = 1; 4864 4761 } else { 4865 - dout("encode_inode_release %p cap %p %s (noop)\n", 4866 - inode, cap, ceph_cap_string(cap->issued)); 4762 + doutc(cl, "%p %llx.%llx cap %p %s (noop)\n", 4763 + inode, ceph_vinop(inode), cap, 4764 + ceph_cap_string(cap->issued)); 4867 4765 } 4868 4766 } 4869 4767 spin_unlock(&ci->i_ceph_lock); ··· 4890 4786 struct dentry *parent = NULL; 4891 4787 struct ceph_mds_request_release *rel = *p; 4892 4788 struct ceph_dentry_info *di = ceph_dentry(dentry); 4789 + struct ceph_client *cl; 4893 4790 int force = 0; 4894 4791 int ret; 4895 4792 ··· 4912 4807 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); 4913 4808 dput(parent); 4914 4809 4810 + cl = ceph_inode_to_client(dir); 4915 4811 spin_lock(&dentry->d_lock); 4916 4812 if (ret && di->lease_session && di->lease_session->s_mds == mds) { 4917 - dout("encode_dentry_release %p mds%d seq %d\n", 4918 - dentry, mds, (int)di->lease_seq); 4813 + doutc(cl, "%p mds%d seq %d\n", dentry, mds, 4814 + (int)di->lease_seq); 4919 4815 rel->dname_seq = cpu_to_le32(di->lease_seq); 4920 4816 __ceph_mdsc_drop_dentry_lease(dentry); 4921 4817 spin_unlock(&dentry->d_lock); ··· 4942 4836 static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode) 4943 4837 { 4944 4838 struct ceph_inode_info *ci = ceph_inode(inode); 4839 + struct ceph_client *cl = mdsc->fsc->client; 4945 4840 struct ceph_cap_snap *capsnap; 4946 4841 int capsnap_release = 0; 4947 4842 4948 4843 lockdep_assert_held(&ci->i_ceph_lock); 4949 4844 4950 - dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode); 4845 + doutc(cl, "removing capsnaps, ci is %p, %p %llx.%llx\n", 4846 + ci, inode, ceph_vinop(inode)); 4951 4847 4952 4848 while (!list_empty(&ci->i_cap_snaps)) { 4953 4849 capsnap = list_first_entry(&ci->i_cap_snaps, ··· 4968 4860 { 4969 4861 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 4970 4862 struct ceph_mds_client *mdsc = fsc->mdsc; 4863 + struct ceph_client *cl = fsc->client; 4971 4864 struct ceph_inode_info *ci = ceph_inode(inode); 4972 4865 bool is_auth; 4973 4866 bool dirty_dropped = false; ··· 4976 4867 4977 4868 lockdep_assert_held(&ci->i_ceph_lock); 4978 4869 4979 - dout("removing cap %p, ci is %p, inode is %p\n", 4980 - cap, ci, &ci->netfs.inode); 4870 + doutc(cl, "removing cap %p, ci is %p, %p %llx.%llx\n", 4871 + cap, ci, inode, ceph_vinop(inode)); 4981 4872 4982 4873 is_auth = (cap == ci->i_auth_cap); 4983 4874 __ceph_remove_cap(cap, false); ··· 5004 4895 } 5005 4896 5006 4897 if (!list_empty(&ci->i_dirty_item)) { 5007 - pr_warn_ratelimited( 5008 - " dropping dirty %s state for %p %lld\n", 4898 + pr_warn_ratelimited_client(cl, 4899 + " dropping dirty %s state for %p %llx.%llx\n", 5009 4900 ceph_cap_string(ci->i_dirty_caps), 5010 - inode, ceph_ino(inode)); 4901 + inode, ceph_vinop(inode)); 5011 4902 ci->i_dirty_caps = 0; 5012 4903 list_del_init(&ci->i_dirty_item); 5013 4904 dirty_dropped = true; 5014 4905 } 5015 4906 if (!list_empty(&ci->i_flushing_item)) { 5016 - pr_warn_ratelimited( 5017 - " dropping dirty+flushing %s state for %p %lld\n", 4907 + pr_warn_ratelimited_client(cl, 4908 + " dropping dirty+flushing %s state for %p %llx.%llx\n", 5018 4909 ceph_cap_string(ci->i_flushing_caps), 5019 - inode, ceph_ino(inode)); 4910 + inode, ceph_vinop(inode)); 5020 4911 ci->i_flushing_caps = 0; 5021 4912 list_del_init(&ci->i_flushing_item); 5022 4913 mdsc->num_cap_flushing--; ··· 5039 4930 if (atomic_read(&ci->i_filelock_ref) > 0) { 5040 4931 /* make further file lock syscall return -EIO */ 5041 4932 ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK; 5042 - pr_warn_ratelimited(" dropping file locks for %p %lld\n", 5043 - inode, ceph_ino(inode)); 4933 + pr_warn_ratelimited_client(cl, 4934 + " dropping file locks for %p %llx.%llx\n", 4935 + inode, ceph_vinop(inode)); 5044 4936 } 5045 4937 5046 4938 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
+26 -13
fs/ceph/crypto.c
··· 211 211 static struct inode *parse_longname(const struct inode *parent, 212 212 const char *name, int *name_len) 213 213 { 214 + struct ceph_client *cl = ceph_inode_to_client(parent); 214 215 struct inode *dir = NULL; 215 216 struct ceph_vino vino = { .snap = CEPH_NOSNAP }; 216 217 char *inode_number; ··· 223 222 name++; 224 223 name_end = strrchr(name, '_'); 225 224 if (!name_end) { 226 - dout("Failed to parse long snapshot name: %s\n", name); 225 + doutc(cl, "failed to parse long snapshot name: %s\n", name); 227 226 return ERR_PTR(-EIO); 228 227 } 229 228 *name_len = (name_end - name); 230 229 if (*name_len <= 0) { 231 - pr_err("Failed to parse long snapshot name\n"); 230 + pr_err_client(cl, "failed to parse long snapshot name\n"); 232 231 return ERR_PTR(-EIO); 233 232 } 234 233 ··· 240 239 return ERR_PTR(-ENOMEM); 241 240 ret = kstrtou64(inode_number, 10, &vino.ino); 242 241 if (ret) { 243 - dout("Failed to parse inode number: %s\n", name); 242 + doutc(cl, "failed to parse inode number: %s\n", name); 244 243 dir = ERR_PTR(ret); 245 244 goto out; 246 245 } ··· 251 250 /* This can happen if we're not mounting cephfs on the root */ 252 251 dir = ceph_get_inode(parent->i_sb, vino, NULL); 253 252 if (IS_ERR(dir)) 254 - dout("Can't find inode %s (%s)\n", inode_number, name); 253 + doutc(cl, "can't find inode %s (%s)\n", inode_number, name); 255 254 } 256 255 257 256 out: ··· 262 261 int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name, 263 262 char *buf) 264 263 { 264 + struct ceph_client *cl = ceph_inode_to_client(parent); 265 265 struct inode *dir = parent; 266 266 struct qstr iname; 267 267 u32 len; ··· 331 329 332 330 /* base64 encode the encrypted name */ 333 331 elen = ceph_base64_encode(cryptbuf, len, buf); 334 - dout("base64-encoded ciphertext name = %.*s\n", elen, buf); 332 + doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, buf); 335 333 336 334 /* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */ 337 335 WARN_ON(elen > 240); ··· 506 504 struct page *page, unsigned int len, 507 505 unsigned int offs, u64 lblk_num) 508 506 { 509 - dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num); 507 + struct ceph_client *cl = ceph_inode_to_client(inode); 508 + 509 + doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode, 510 + ceph_vinop(inode), len, offs, lblk_num); 510 511 return fscrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num); 511 512 } 512 513 ··· 518 513 unsigned int offs, u64 lblk_num, 519 514 gfp_t gfp_flags) 520 515 { 521 - dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num); 516 + struct ceph_client *cl = ceph_inode_to_client(inode); 517 + 518 + doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode, 519 + ceph_vinop(inode), len, offs, lblk_num); 522 520 return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num, 523 521 gfp_flags); 524 522 } ··· 590 582 u64 off, struct ceph_sparse_extent *map, 591 583 u32 ext_cnt) 592 584 { 585 + struct ceph_client *cl = ceph_inode_to_client(inode); 593 586 int i, ret = 0; 594 587 struct ceph_inode_info *ci = ceph_inode(inode); 595 588 u64 objno, objoff; ··· 598 589 599 590 /* Nothing to do for empty array */ 600 591 if (ext_cnt == 0) { 601 - dout("%s: empty array, ret 0\n", __func__); 592 + doutc(cl, "%p %llx.%llx empty array, ret 0\n", inode, 593 + ceph_vinop(inode)); 602 594 return 0; 603 595 } 604 596 ··· 613 603 int fret; 614 604 615 605 if ((ext->off | ext->len) & ~CEPH_FSCRYPT_BLOCK_MASK) { 616 - pr_warn("%s: bad encrypted sparse extent idx %d off %llx len %llx\n", 617 - __func__, i, ext->off, ext->len); 606 + pr_warn_client(cl, 607 + "%p %llx.%llx bad encrypted sparse extent " 608 + "idx %d off %llx len %llx\n", 609 + inode, ceph_vinop(inode), i, ext->off, 610 + ext->len); 618 611 return -EIO; 619 612 } 620 613 fret = ceph_fscrypt_decrypt_pages(inode, &page[pgidx], 621 614 off + pgsoff, ext->len); 622 - dout("%s: [%d] 0x%llx~0x%llx fret %d\n", __func__, i, 623 - ext->off, ext->len, fret); 615 + doutc(cl, "%p %llx.%llx [%d] 0x%llx~0x%llx fret %d\n", inode, 616 + ceph_vinop(inode), i, ext->off, ext->len, fret); 624 617 if (fret < 0) { 625 618 if (ret == 0) 626 619 ret = fret; ··· 631 618 } 632 619 ret = pgsoff + fret; 633 620 } 634 - dout("%s: ret %d\n", __func__, ret); 621 + doutc(cl, "ret %d\n", ret); 635 622 return ret; 636 623 } 637 624
+4 -2
fs/ceph/debugfs.c
··· 398 398 399 399 void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc) 400 400 { 401 - dout("ceph_fs_debugfs_cleanup\n"); 401 + doutc(fsc->client, "begin\n"); 402 402 debugfs_remove(fsc->debugfs_bdi); 403 403 debugfs_remove(fsc->debugfs_congestion_kb); 404 404 debugfs_remove(fsc->debugfs_mdsmap); ··· 407 407 debugfs_remove(fsc->debugfs_status); 408 408 debugfs_remove(fsc->debugfs_mdsc); 409 409 debugfs_remove_recursive(fsc->debugfs_metrics_dir); 410 + doutc(fsc->client, "done\n"); 410 411 } 411 412 412 413 void ceph_fs_debugfs_init(struct ceph_fs_client *fsc) 413 414 { 414 415 char name[100]; 415 416 416 - dout("ceph_fs_debugfs_init\n"); 417 + doutc(fsc->client, "begin\n"); 417 418 fsc->debugfs_congestion_kb = 418 419 debugfs_create_file("writeback_congestion_kb", 419 420 0600, ··· 470 469 &metrics_size_fops); 471 470 debugfs_create_file("caps", 0400, fsc->debugfs_metrics_dir, fsc, 472 471 &metrics_caps_fops); 472 + doutc(fsc->client, "done\n"); 473 473 } 474 474 475 475
+132 -86
fs/ceph/dir.c
··· 109 109 * regardless of what dir changes take place on the 110 110 * server. 111 111 */ 112 - static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name, 112 + static int note_last_dentry(struct ceph_fs_client *fsc, 113 + struct ceph_dir_file_info *dfi, 114 + const char *name, 113 115 int len, unsigned next_offset) 114 116 { 115 117 char *buf = kmalloc(len+1, GFP_KERNEL); ··· 122 120 memcpy(dfi->last_name, name, len); 123 121 dfi->last_name[len] = 0; 124 122 dfi->next_offset = next_offset; 125 - dout("note_last_dentry '%s'\n", dfi->last_name); 123 + doutc(fsc->client, "'%s'\n", dfi->last_name); 126 124 return 0; 127 125 } 128 126 ··· 132 130 struct ceph_readdir_cache_control *cache_ctl) 133 131 { 134 132 struct inode *dir = d_inode(parent); 133 + struct ceph_client *cl = ceph_inode_to_client(dir); 135 134 struct dentry *dentry; 136 135 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1; 137 136 loff_t ptr_pos = idx * sizeof(struct dentry *); ··· 145 142 ceph_readdir_cache_release(cache_ctl); 146 143 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff); 147 144 if (!cache_ctl->page) { 148 - dout(" page %lu not found\n", ptr_pgoff); 145 + doutc(cl, " page %lu not found\n", ptr_pgoff); 149 146 return ERR_PTR(-EAGAIN); 150 147 } 151 148 /* reading/filling the cache are serialized by ··· 188 185 struct ceph_dir_file_info *dfi = file->private_data; 189 186 struct dentry *parent = file->f_path.dentry; 190 187 struct inode *dir = d_inode(parent); 188 + struct ceph_fs_client *fsc = ceph_inode_to_fs_client(dir); 189 + struct ceph_client *cl = ceph_inode_to_client(dir); 191 190 struct dentry *dentry, *last = NULL; 192 191 struct ceph_dentry_info *di; 193 192 struct ceph_readdir_cache_control cache_ctl = {}; 194 193 u64 idx = 0; 195 194 int err = 0; 196 195 197 - dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos); 196 + doutc(cl, "%p %llx.%llx v%u at %llx\n", dir, ceph_vinop(dir), 197 + (unsigned)shared_gen, ctx->pos); 198 198 199 199 /* search start position */ 200 200 if (ctx->pos > 2) { ··· 227 221 dput(dentry); 228 222 } 229 223 230 - dout("__dcache_readdir %p cache idx %llu\n", dir, idx); 224 + doutc(cl, "%p %llx.%llx cache idx %llu\n", dir, 225 + ceph_vinop(dir), idx); 231 226 } 232 227 233 228 ··· 264 257 spin_unlock(&dentry->d_lock); 265 258 266 259 if (emit_dentry) { 267 - dout(" %llx dentry %p %pd %p\n", di->offset, 268 - dentry, dentry, d_inode(dentry)); 260 + doutc(cl, " %llx dentry %p %pd %p\n", di->offset, 261 + dentry, dentry, d_inode(dentry)); 269 262 ctx->pos = di->offset; 270 263 if (!dir_emit(ctx, dentry->d_name.name, 271 264 dentry->d_name.len, ceph_present_inode(d_inode(dentry)), ··· 288 281 if (last) { 289 282 int ret; 290 283 di = ceph_dentry(last); 291 - ret = note_last_dentry(dfi, last->d_name.name, last->d_name.len, 284 + ret = note_last_dentry(fsc, dfi, last->d_name.name, 285 + last->d_name.len, 292 286 fpos_off(di->offset) + 1); 293 287 if (ret < 0) 294 288 err = ret; ··· 320 312 struct ceph_inode_info *ci = ceph_inode(inode); 321 313 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 322 314 struct ceph_mds_client *mdsc = fsc->mdsc; 315 + struct ceph_client *cl = fsc->client; 323 316 int i; 324 317 int err; 325 318 unsigned frag = -1; 326 319 struct ceph_mds_reply_info_parsed *rinfo; 327 320 328 - dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos); 321 + doutc(cl, "%p %llx.%llx file %p pos %llx\n", inode, 322 + ceph_vinop(inode), file, ctx->pos); 329 323 if (dfi->file_info.flags & CEPH_F_ATEND) 330 324 return 0; 331 325 332 326 /* always start with . and .. */ 333 327 if (ctx->pos == 0) { 334 - dout("readdir off 0 -> '.'\n"); 328 + doutc(cl, "%p %llx.%llx off 0 -> '.'\n", inode, 329 + ceph_vinop(inode)); 335 330 if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode), 336 331 inode->i_mode >> 12)) 337 332 return 0; ··· 348 337 ino = ceph_present_inode(dentry->d_parent->d_inode); 349 338 spin_unlock(&dentry->d_lock); 350 339 351 - dout("readdir off 1 -> '..'\n"); 340 + doutc(cl, "%p %llx.%llx off 1 -> '..'\n", inode, 341 + ceph_vinop(inode)); 352 342 if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12)) 353 343 return 0; 354 344 ctx->pos = 2; ··· 403 391 frag = fpos_frag(ctx->pos); 404 392 } 405 393 406 - dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 407 - ceph_vinop(inode), frag, dfi->last_name); 394 + doutc(cl, "fetching %p %llx.%llx frag %x offset '%s'\n", 395 + inode, ceph_vinop(inode), frag, dfi->last_name); 408 396 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 409 397 if (IS_ERR(req)) 410 398 return PTR_ERR(req); ··· 458 446 ceph_mdsc_put_request(req); 459 447 return err; 460 448 } 461 - dout("readdir got and parsed readdir result=%d on " 462 - "frag %x, end=%d, complete=%d, hash_order=%d\n", 463 - err, frag, 464 - (int)req->r_reply_info.dir_end, 465 - (int)req->r_reply_info.dir_complete, 466 - (int)req->r_reply_info.hash_order); 449 + doutc(cl, "%p %llx.%llx got and parsed readdir result=%d" 450 + "on frag %x, end=%d, complete=%d, hash_order=%d\n", 451 + inode, ceph_vinop(inode), err, frag, 452 + (int)req->r_reply_info.dir_end, 453 + (int)req->r_reply_info.dir_complete, 454 + (int)req->r_reply_info.hash_order); 467 455 468 456 rinfo = &req->r_reply_info; 469 457 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) { ··· 493 481 dfi->dir_ordered_count = req->r_dir_ordered_cnt; 494 482 } 495 483 } else { 496 - dout("readdir !did_prepopulate\n"); 484 + doutc(cl, "%p %llx.%llx !did_prepopulate\n", inode, 485 + ceph_vinop(inode)); 497 486 /* disable readdir cache */ 498 487 dfi->readdir_cache_idx = -1; 499 488 /* preclude from marking dir complete */ ··· 507 494 rinfo->dir_entries + (rinfo->dir_nr-1); 508 495 unsigned next_offset = req->r_reply_info.dir_end ? 509 496 2 : (fpos_off(rde->offset) + 1); 510 - err = note_last_dentry(dfi, rde->name, rde->name_len, 511 - next_offset); 497 + err = note_last_dentry(fsc, dfi, rde->name, 498 + rde->name_len, next_offset); 512 499 if (err) { 513 500 ceph_mdsc_put_request(dfi->last_readdir); 514 501 dfi->last_readdir = NULL; ··· 521 508 } 522 509 523 510 rinfo = &dfi->last_readdir->r_reply_info; 524 - dout("readdir frag %x num %d pos %llx chunk first %llx\n", 525 - dfi->frag, rinfo->dir_nr, ctx->pos, 526 - rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL); 511 + doutc(cl, "%p %llx.%llx frag %x num %d pos %llx chunk first %llx\n", 512 + inode, ceph_vinop(inode), dfi->frag, rinfo->dir_nr, ctx->pos, 513 + rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL); 527 514 528 515 i = 0; 529 516 /* search start position */ ··· 543 530 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 544 531 545 532 if (rde->offset < ctx->pos) { 546 - pr_warn("%s: rde->offset 0x%llx ctx->pos 0x%llx\n", 547 - __func__, rde->offset, ctx->pos); 533 + pr_warn_client(cl, 534 + "%p %llx.%llx rde->offset 0x%llx ctx->pos 0x%llx\n", 535 + inode, ceph_vinop(inode), rde->offset, ctx->pos); 548 536 return -EIO; 549 537 } 550 538 ··· 553 539 return -EIO; 554 540 555 541 ctx->pos = rde->offset; 556 - dout("readdir (%d/%d) -> %llx '%.*s' %p\n", 557 - i, rinfo->dir_nr, ctx->pos, 558 - rde->name_len, rde->name, &rde->inode.in); 542 + doutc(cl, "%p %llx.%llx (%d/%d) -> %llx '%.*s' %p\n", inode, 543 + ceph_vinop(inode), i, rinfo->dir_nr, ctx->pos, 544 + rde->name_len, rde->name, &rde->inode.in); 559 545 560 546 if (!dir_emit(ctx, rde->name, rde->name_len, 561 547 ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)), ··· 566 552 * doesn't have enough memory, etc. So for next readdir 567 553 * it will continue. 568 554 */ 569 - dout("filldir stopping us...\n"); 555 + doutc(cl, "filldir stopping us...\n"); 570 556 return 0; 571 557 } 572 558 ··· 597 583 kfree(dfi->last_name); 598 584 dfi->last_name = NULL; 599 585 } 600 - dout("readdir next frag is %x\n", frag); 586 + doutc(cl, "%p %llx.%llx next frag is %x\n", inode, 587 + ceph_vinop(inode), frag); 601 588 goto more; 602 589 } 603 590 dfi->file_info.flags |= CEPH_F_ATEND; ··· 613 598 spin_lock(&ci->i_ceph_lock); 614 599 if (dfi->dir_ordered_count == 615 600 atomic64_read(&ci->i_ordered_count)) { 616 - dout(" marking %p complete and ordered\n", inode); 601 + doutc(cl, " marking %p %llx.%llx complete and ordered\n", 602 + inode, ceph_vinop(inode)); 617 603 /* use i_size to track number of entries in 618 604 * readdir cache */ 619 605 BUG_ON(dfi->readdir_cache_idx < 0); 620 606 i_size_write(inode, dfi->readdir_cache_idx * 621 607 sizeof(struct dentry*)); 622 608 } else { 623 - dout(" marking %p complete\n", inode); 609 + doutc(cl, " marking %llx.%llx complete\n", 610 + ceph_vinop(inode)); 624 611 } 625 612 __ceph_dir_set_complete(ci, dfi->dir_release_count, 626 613 dfi->dir_ordered_count); 627 614 spin_unlock(&ci->i_ceph_lock); 628 615 } 629 - dout("readdir %p file %p done.\n", inode, file); 616 + doutc(cl, "%p %llx.%llx file %p done.\n", inode, ceph_vinop(inode), 617 + file); 630 618 return 0; 631 619 } 632 620 ··· 675 657 { 676 658 struct ceph_dir_file_info *dfi = file->private_data; 677 659 struct inode *inode = file->f_mapping->host; 660 + struct ceph_client *cl = ceph_inode_to_client(inode); 678 661 loff_t retval; 679 662 680 663 inode_lock(inode); ··· 695 676 696 677 if (offset >= 0) { 697 678 if (need_reset_readdir(dfi, offset)) { 698 - dout("dir_llseek dropping %p content\n", file); 679 + doutc(cl, "%p %llx.%llx dropping %p content\n", 680 + inode, ceph_vinop(inode), file); 699 681 reset_readdir(dfi); 700 682 } else if (is_hash_order(offset) && offset > file->f_pos) { 701 683 /* for hash offset, we don't know if a forward seek ··· 725 705 { 726 706 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb); 727 707 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */ 708 + struct ceph_client *cl = ceph_inode_to_client(parent); 728 709 729 710 /* .snap dir? */ 730 711 if (ceph_snap(parent) == CEPH_NOSNAP && ··· 734 713 struct inode *inode = ceph_get_snapdir(parent); 735 714 736 715 res = d_splice_alias(inode, dentry); 737 - dout("ENOENT on snapdir %p '%pd', linking to snapdir %p. Spliced dentry %p\n", 738 - dentry, dentry, inode, res); 716 + doutc(cl, "ENOENT on snapdir %p '%pd', linking to " 717 + "snapdir %p %llx.%llx. Spliced dentry %p\n", 718 + dentry, dentry, inode, ceph_vinop(inode), res); 739 719 if (res) 740 720 dentry = res; 741 721 } ··· 757 735 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 758 736 struct dentry *dentry, int err) 759 737 { 738 + struct ceph_client *cl = req->r_mdsc->fsc->client; 739 + 760 740 if (err == -ENOENT) { 761 741 /* no trace? */ 762 742 err = 0; 763 743 if (!req->r_reply_info.head->is_dentry) { 764 - dout("ENOENT and no trace, dentry %p inode %p\n", 765 - dentry, d_inode(dentry)); 744 + doutc(cl, 745 + "ENOENT and no trace, dentry %p inode %llx.%llx\n", 746 + dentry, ceph_vinop(d_inode(dentry))); 766 747 if (d_really_is_positive(dentry)) { 767 748 d_drop(dentry); 768 749 err = -ENOENT; ··· 798 773 { 799 774 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb); 800 775 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); 776 + struct ceph_client *cl = fsc->client; 801 777 struct ceph_mds_request *req; 802 778 int op; 803 779 int mask; 804 780 int err; 805 781 806 - dout("lookup %p dentry %p '%pd'\n", 807 - dir, dentry, dentry); 782 + doutc(cl, "%p %llx.%llx/'%pd' dentry %p\n", dir, ceph_vinop(dir), 783 + dentry, dentry); 808 784 809 785 if (dentry->d_name.len > NAME_MAX) 810 786 return ERR_PTR(-ENAMETOOLONG); ··· 828 802 struct ceph_dentry_info *di = ceph_dentry(dentry); 829 803 830 804 spin_lock(&ci->i_ceph_lock); 831 - dout(" dir %p flags are 0x%lx\n", dir, ci->i_ceph_flags); 805 + doutc(cl, " dir %llx.%llx flags are 0x%lx\n", 806 + ceph_vinop(dir), ci->i_ceph_flags); 832 807 if (strncmp(dentry->d_name.name, 833 808 fsc->mount_options->snapdir_name, 834 809 dentry->d_name.len) && ··· 839 812 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) { 840 813 __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD); 841 814 spin_unlock(&ci->i_ceph_lock); 842 - dout(" dir %p complete, -ENOENT\n", dir); 815 + doutc(cl, " dir %llx.%llx complete, -ENOENT\n", 816 + ceph_vinop(dir)); 843 817 d_add(dentry, NULL); 844 818 di->lease_shared_gen = atomic_read(&ci->i_shared_gen); 845 819 return NULL; ··· 878 850 } 879 851 dentry = ceph_finish_lookup(req, dentry, err); 880 852 ceph_mdsc_put_request(req); /* will dput(dentry) */ 881 - dout("lookup result=%p\n", dentry); 853 + doutc(cl, "result=%p\n", dentry); 882 854 return dentry; 883 855 } 884 856 ··· 913 885 struct dentry *dentry, umode_t mode, dev_t rdev) 914 886 { 915 887 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); 888 + struct ceph_client *cl = mdsc->fsc->client; 916 889 struct ceph_mds_request *req; 917 890 struct ceph_acl_sec_ctx as_ctx = {}; 918 891 int err; ··· 930 901 goto out; 931 902 } 932 903 933 - dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 934 - dir, dentry, mode, rdev); 904 + doutc(cl, "%p %llx.%llx/'%pd' dentry %p mode 0%ho rdev %d\n", 905 + dir, ceph_vinop(dir), dentry, dentry, mode, rdev); 935 906 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 936 907 if (IS_ERR(req)) { 937 908 err = PTR_ERR(req); ··· 1022 993 struct dentry *dentry, const char *dest) 1023 994 { 1024 995 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); 996 + struct ceph_client *cl = mdsc->fsc->client; 1025 997 struct ceph_mds_request *req; 1026 998 struct ceph_acl_sec_ctx as_ctx = {}; 1027 999 umode_t mode = S_IFLNK | 0777; ··· 1040 1010 goto out; 1041 1011 } 1042 1012 1043 - dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 1013 + doutc(cl, "%p %llx.%llx/'%pd' to '%s'\n", dir, ceph_vinop(dir), dentry, 1014 + dest); 1044 1015 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 1045 1016 if (IS_ERR(req)) { 1046 1017 err = PTR_ERR(req); ··· 1095 1064 struct dentry *dentry, umode_t mode) 1096 1065 { 1097 1066 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); 1067 + struct ceph_client *cl = mdsc->fsc->client; 1098 1068 struct ceph_mds_request *req; 1099 1069 struct ceph_acl_sec_ctx as_ctx = {}; 1100 1070 int err; ··· 1108 1076 if (ceph_snap(dir) == CEPH_SNAPDIR) { 1109 1077 /* mkdir .snap/foo is a MKSNAP */ 1110 1078 op = CEPH_MDS_OP_MKSNAP; 1111 - dout("mksnap dir %p snap '%pd' dn %p\n", dir, 1112 - dentry, dentry); 1079 + doutc(cl, "mksnap %llx.%llx/'%pd' dentry %p\n", 1080 + ceph_vinop(dir), dentry, dentry); 1113 1081 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 1114 - dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 1082 + doutc(cl, "mkdir %llx.%llx/'%pd' dentry %p mode 0%ho\n", 1083 + ceph_vinop(dir), dentry, dentry, mode); 1115 1084 op = CEPH_MDS_OP_MKDIR; 1116 1085 } else { 1117 1086 err = -EROFS; ··· 1177 1144 struct dentry *dentry) 1178 1145 { 1179 1146 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); 1147 + struct ceph_client *cl = mdsc->fsc->client; 1180 1148 struct ceph_mds_request *req; 1181 1149 int err; 1182 1150 ··· 1195 1161 if (err) 1196 1162 return err; 1197 1163 1198 - dout("link in dir %p %llx.%llx old_dentry %p:'%pd' dentry %p:'%pd'\n", 1199 - dir, ceph_vinop(dir), old_dentry, old_dentry, dentry, dentry); 1164 + doutc(cl, "%p %llx.%llx/'%pd' to '%pd'\n", dir, ceph_vinop(dir), 1165 + old_dentry, dentry); 1200 1166 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 1201 1167 if (IS_ERR(req)) { 1202 1168 d_drop(dentry); ··· 1234 1200 { 1235 1201 struct dentry *dentry = req->r_dentry; 1236 1202 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb); 1203 + struct ceph_client *cl = fsc->client; 1237 1204 struct ceph_dentry_info *di = ceph_dentry(dentry); 1238 1205 int result = req->r_err ? req->r_err : 1239 1206 le32_to_cpu(req->r_reply_info.head->result); 1240 1207 1241 1208 if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags)) 1242 - pr_warn("%s dentry %p:%pd async unlink bit is not set\n", 1243 - __func__, dentry, dentry); 1209 + pr_warn_client(cl, 1210 + "dentry %p:%pd async unlink bit is not set\n", 1211 + dentry, dentry); 1244 1212 1245 1213 spin_lock(&fsc->async_unlink_conflict_lock); 1246 1214 hash_del_rcu(&di->hnode); ··· 1276 1240 /* mark inode itself for an error (since metadata is bogus) */ 1277 1241 mapping_set_error(req->r_old_inode->i_mapping, result); 1278 1242 1279 - pr_warn("async unlink failure path=(%llx)%s result=%d!\n", 1280 - base, IS_ERR(path) ? "<<bad>>" : path, result); 1243 + pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n", 1244 + base, IS_ERR(path) ? "<<bad>>" : path, result); 1281 1245 ceph_mdsc_free_path(path, pathlen); 1282 1246 } 1283 1247 out: ··· 1327 1291 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 1328 1292 { 1329 1293 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb); 1294 + struct ceph_client *cl = fsc->client; 1330 1295 struct ceph_mds_client *mdsc = fsc->mdsc; 1331 1296 struct inode *inode = d_inode(dentry); 1332 1297 struct ceph_mds_request *req; ··· 1337 1300 1338 1301 if (ceph_snap(dir) == CEPH_SNAPDIR) { 1339 1302 /* rmdir .snap/foo is RMSNAP */ 1340 - dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry); 1303 + doutc(cl, "rmsnap %llx.%llx/'%pd' dn\n", ceph_vinop(dir), 1304 + dentry); 1341 1305 op = CEPH_MDS_OP_RMSNAP; 1342 1306 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 1343 - dout("unlink/rmdir dir %p dn %p inode %p\n", 1344 - dir, dentry, inode); 1307 + doutc(cl, "unlink/rmdir %llx.%llx/'%pd' inode %llx.%llx\n", 1308 + ceph_vinop(dir), dentry, ceph_vinop(inode)); 1345 1309 op = d_is_dir(dentry) ? 1346 1310 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 1347 1311 } else ··· 1365 1327 (req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) { 1366 1328 struct ceph_dentry_info *di = ceph_dentry(dentry); 1367 1329 1368 - dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir), 1369 - dentry->d_name.len, dentry->d_name.name, 1370 - ceph_cap_string(req->r_dir_caps)); 1330 + doutc(cl, "async unlink on %llx.%llx/'%pd' caps=%s", 1331 + ceph_vinop(dir), dentry, 1332 + ceph_cap_string(req->r_dir_caps)); 1371 1333 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags); 1372 1334 req->r_callback = ceph_async_unlink_cb; 1373 1335 req->r_old_inode = d_inode(dentry); ··· 1422 1384 struct dentry *new_dentry, unsigned int flags) 1423 1385 { 1424 1386 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old_dir->i_sb); 1387 + struct ceph_client *cl = mdsc->fsc->client; 1425 1388 struct ceph_mds_request *req; 1426 1389 int op = CEPH_MDS_OP_RENAME; 1427 1390 int err; ··· 1452 1413 if (err) 1453 1414 return err; 1454 1415 1455 - dout("rename dir %p dentry %p to dir %p dentry %p\n", 1456 - old_dir, old_dentry, new_dir, new_dentry); 1416 + doutc(cl, "%llx.%llx/'%pd' to %llx.%llx/'%pd'\n", 1417 + ceph_vinop(old_dir), old_dentry, ceph_vinop(new_dir), 1418 + new_dentry); 1457 1419 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 1458 1420 if (IS_ERR(req)) 1459 1421 return PTR_ERR(req); ··· 1499 1459 void __ceph_dentry_lease_touch(struct ceph_dentry_info *di) 1500 1460 { 1501 1461 struct dentry *dn = di->dentry; 1502 - struct ceph_mds_client *mdsc; 1462 + struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc; 1463 + struct ceph_client *cl = mdsc->fsc->client; 1503 1464 1504 - dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn); 1465 + doutc(cl, "%p %p '%pd'\n", di, dn, dn); 1505 1466 1506 1467 di->flags |= CEPH_DENTRY_LEASE_LIST; 1507 1468 if (di->flags & CEPH_DENTRY_SHRINK_LIST) { ··· 1510 1469 return; 1511 1470 } 1512 1471 1513 - mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc; 1514 1472 spin_lock(&mdsc->dentry_list_lock); 1515 1473 list_move_tail(&di->lease_list, &mdsc->dentry_leases); 1516 1474 spin_unlock(&mdsc->dentry_list_lock); ··· 1533 1493 void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di) 1534 1494 { 1535 1495 struct dentry *dn = di->dentry; 1536 - struct ceph_mds_client *mdsc; 1496 + struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc; 1497 + struct ceph_client *cl = mdsc->fsc->client; 1537 1498 1538 - dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n", 1539 - di, dn, dn, di->offset); 1499 + doutc(cl, "%p %p '%pd' (offset 0x%llx)\n", di, dn, dn, di->offset); 1540 1500 1541 1501 if (!list_empty(&di->lease_list)) { 1542 1502 if (di->flags & CEPH_DENTRY_LEASE_LIST) { ··· 1556 1516 return; 1557 1517 } 1558 1518 1559 - mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc; 1560 1519 spin_lock(&mdsc->dentry_list_lock); 1561 1520 __dentry_dir_lease_touch(mdsc, di), 1562 1521 spin_unlock(&mdsc->dentry_list_lock); ··· 1796 1757 { 1797 1758 struct ceph_dentry_info *di; 1798 1759 struct ceph_mds_session *session = NULL; 1760 + struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc; 1761 + struct ceph_client *cl = mdsc->fsc->client; 1799 1762 u32 seq = 0; 1800 1763 int valid = 0; 1801 1764 ··· 1830 1789 CEPH_MDS_LEASE_RENEW, seq); 1831 1790 ceph_put_mds_session(session); 1832 1791 } 1833 - dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 1792 + doutc(cl, "dentry %p = %d\n", dentry, valid); 1834 1793 return valid; 1835 1794 } 1836 1795 ··· 1873 1832 struct ceph_mds_client *mdsc) 1874 1833 { 1875 1834 struct ceph_inode_info *ci = ceph_inode(dir); 1835 + struct ceph_client *cl = mdsc->fsc->client; 1876 1836 int valid; 1877 1837 int shared_gen; 1878 1838 ··· 1895 1853 valid = 0; 1896 1854 spin_unlock(&dentry->d_lock); 1897 1855 } 1898 - dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n", 1899 - dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid); 1856 + doutc(cl, "dir %p %llx.%llx v%u dentry %p '%pd' = %d\n", dir, 1857 + ceph_vinop(dir), (unsigned)atomic_read(&ci->i_shared_gen), 1858 + dentry, dentry, valid); 1900 1859 return valid; 1901 1860 } 1902 1861 ··· 1906 1863 */ 1907 1864 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1908 1865 { 1866 + struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc; 1867 + struct ceph_client *cl = mdsc->fsc->client; 1909 1868 int valid = 0; 1910 1869 struct dentry *parent; 1911 1870 struct inode *dir, *inode; 1912 - struct ceph_mds_client *mdsc; 1913 1871 1914 1872 valid = fscrypt_d_revalidate(dentry, flags); 1915 1873 if (valid <= 0) ··· 1928 1884 inode = d_inode(dentry); 1929 1885 } 1930 1886 1931 - dout("d_revalidate %p '%pd' inode %p offset 0x%llx nokey %d\n", dentry, 1932 - dentry, inode, ceph_dentry(dentry)->offset, 1933 - !!(dentry->d_flags & DCACHE_NOKEY_NAME)); 1887 + doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n", 1888 + dentry, dentry, inode, ceph_dentry(dentry)->offset, 1889 + !!(dentry->d_flags & DCACHE_NOKEY_NAME)); 1934 1890 1935 1891 mdsc = ceph_sb_to_fs_client(dir->i_sb)->mdsc; 1936 1892 1937 1893 /* always trust cached snapped dentries, snapdir dentry */ 1938 1894 if (ceph_snap(dir) != CEPH_NOSNAP) { 1939 - dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, 1940 - dentry, inode); 1895 + doutc(cl, "%p '%pd' inode %p is SNAPPED\n", dentry, 1896 + dentry, inode); 1941 1897 valid = 1; 1942 1898 } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { 1943 1899 valid = 1; ··· 1992 1948 break; 1993 1949 } 1994 1950 ceph_mdsc_put_request(req); 1995 - dout("d_revalidate %p lookup result=%d\n", 1996 - dentry, err); 1951 + doutc(cl, "%p '%pd', lookup result=%d\n", dentry, 1952 + dentry, err); 1997 1953 } 1998 1954 } else { 1999 1955 percpu_counter_inc(&mdsc->metric.d_lease_hit); 2000 1956 } 2001 1957 2002 - dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1958 + doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid"); 2003 1959 if (!valid) 2004 1960 ceph_dir_clear_complete(dir); 2005 1961 ··· 2041 1997 struct ceph_dentry_info *di = ceph_dentry(dentry); 2042 1998 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb); 2043 1999 2044 - dout("d_release %p\n", dentry); 2000 + doutc(fsc->client, "dentry %p '%pd'\n", dentry, dentry); 2045 2001 2046 2002 atomic64_dec(&fsc->mdsc->metric.total_dentries); 2047 2003 ··· 2062 2018 */ 2063 2019 static void ceph_d_prune(struct dentry *dentry) 2064 2020 { 2021 + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dentry->d_sb); 2022 + struct ceph_client *cl = mdsc->fsc->client; 2065 2023 struct ceph_inode_info *dir_ci; 2066 2024 struct ceph_dentry_info *di; 2067 2025 2068 - dout("ceph_d_prune %pd %p\n", dentry, dentry); 2026 + doutc(cl, "dentry %p '%pd'\n", dentry, dentry); 2069 2027 2070 2028 /* do we have a valid parent? */ 2071 2029 if (IS_ROOT(dentry))
+22 -17
fs/ceph/export.c
··· 36 36 static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len, 37 37 struct inode *parent_inode) 38 38 { 39 + struct ceph_client *cl = ceph_inode_to_client(inode); 39 40 static const int snap_handle_length = 40 41 sizeof(struct ceph_nfs_snapfh) >> 2; 41 42 struct ceph_nfs_snapfh *sfh = (void *)rawfh; ··· 80 79 *max_len = snap_handle_length; 81 80 ret = FILEID_BTRFS_WITH_PARENT; 82 81 out: 83 - dout("encode_snapfh %llx.%llx ret=%d\n", ceph_vinop(inode), ret); 82 + doutc(cl, "%p %llx.%llx ret=%d\n", inode, ceph_vinop(inode), ret); 84 83 return ret; 85 84 } 86 85 87 86 static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len, 88 87 struct inode *parent_inode) 89 88 { 89 + struct ceph_client *cl = ceph_inode_to_client(inode); 90 90 static const int handle_length = 91 91 sizeof(struct ceph_nfs_fh) >> 2; 92 92 static const int connected_handle_length = ··· 107 105 108 106 if (parent_inode) { 109 107 struct ceph_nfs_confh *cfh = (void *)rawfh; 110 - dout("encode_fh %llx with parent %llx\n", 111 - ceph_ino(inode), ceph_ino(parent_inode)); 108 + doutc(cl, "%p %llx.%llx with parent %p %llx.%llx\n", inode, 109 + ceph_vinop(inode), parent_inode, ceph_vinop(parent_inode)); 112 110 cfh->ino = ceph_ino(inode); 113 111 cfh->parent_ino = ceph_ino(parent_inode); 114 112 *max_len = connected_handle_length; 115 113 type = FILEID_INO32_GEN_PARENT; 116 114 } else { 117 115 struct ceph_nfs_fh *fh = (void *)rawfh; 118 - dout("encode_fh %llx\n", ceph_ino(inode)); 116 + doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 119 117 fh->ino = ceph_ino(inode); 120 118 *max_len = handle_length; 121 119 type = FILEID_INO32_GEN; ··· 208 206 bool want_parent) 209 207 { 210 208 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc; 209 + struct ceph_client *cl = mdsc->fsc->client; 211 210 struct ceph_mds_request *req; 212 211 struct inode *inode; 213 212 struct ceph_vino vino; ··· 281 278 ceph_mdsc_put_request(req); 282 279 283 280 if (want_parent) { 284 - dout("snapfh_to_parent %llx.%llx\n err=%d\n", 285 - vino.ino, vino.snap, err); 281 + doutc(cl, "%llx.%llx\n err=%d\n", vino.ino, vino.snap, err); 286 282 } else { 287 - dout("snapfh_to_dentry %llx.%llx parent %llx hash %x err=%d", 288 - vino.ino, vino.snap, sfh->parent_ino, sfh->hash, err); 283 + doutc(cl, "%llx.%llx parent %llx hash %x err=%d", vino.ino, 284 + vino.snap, sfh->parent_ino, sfh->hash, err); 289 285 } 290 286 if (IS_ERR(inode)) 291 287 return ERR_CAST(inode); ··· 299 297 struct fid *fid, 300 298 int fh_len, int fh_type) 301 299 { 300 + struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); 302 301 struct ceph_nfs_fh *fh = (void *)fid->raw; 303 302 304 303 if (fh_type == FILEID_BTRFS_WITH_PARENT) { ··· 313 310 if (fh_len < sizeof(*fh) / 4) 314 311 return NULL; 315 312 316 - dout("fh_to_dentry %llx\n", fh->ino); 313 + doutc(fsc->client, "%llx\n", fh->ino); 317 314 return __fh_to_dentry(sb, fh->ino); 318 315 } 319 316 ··· 366 363 static struct dentry *ceph_get_parent(struct dentry *child) 367 364 { 368 365 struct inode *inode = d_inode(child); 366 + struct ceph_client *cl = ceph_inode_to_client(inode); 369 367 struct dentry *dn; 370 368 371 369 if (ceph_snap(inode) != CEPH_NOSNAP) { ··· 406 402 dn = __get_parent(child->d_sb, child, 0); 407 403 } 408 404 out: 409 - dout("get_parent %p ino %llx.%llx err=%ld\n", 410 - child, ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn)); 405 + doutc(cl, "child %p %p %llx.%llx err=%ld\n", child, inode, 406 + ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn)); 411 407 return dn; 412 408 } 413 409 ··· 418 414 struct fid *fid, 419 415 int fh_len, int fh_type) 420 416 { 417 + struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); 421 418 struct ceph_nfs_confh *cfh = (void *)fid->raw; 422 419 struct dentry *dentry; 423 420 ··· 432 427 if (fh_len < sizeof(*cfh) / 4) 433 428 return NULL; 434 429 435 - dout("fh_to_parent %llx\n", cfh->parent_ino); 430 + doutc(fsc->client, "%llx\n", cfh->parent_ino); 436 431 dentry = __get_parent(sb, NULL, cfh->ino); 437 432 if (unlikely(dentry == ERR_PTR(-ENOENT))) 438 433 dentry = __fh_to_dentry(sb, cfh->parent_ino); ··· 531 526 if (req) 532 527 ceph_mdsc_put_request(req); 533 528 kfree(last_name); 534 - dout("get_snap_name %p ino %llx.%llx err=%d\n", 535 - child, ceph_vinop(inode), err); 529 + doutc(fsc->client, "child dentry %p %p %llx.%llx err=%d\n", child, 530 + inode, ceph_vinop(inode), err); 536 531 return err; 537 532 } 538 533 ··· 593 588 ceph_fname_free_buffer(dir, &oname); 594 589 } 595 590 out: 596 - dout("get_name %p ino %llx.%llx err %d %s%s\n", 597 - child, ceph_vinop(inode), err, 598 - err ? "" : "name ", err ? "" : name); 591 + doutc(mdsc->fsc->client, "child dentry %p %p %llx.%llx err %d %s%s\n", 592 + child, inode, ceph_vinop(inode), err, err ? "" : "name ", 593 + err ? "" : name); 599 594 ceph_mdsc_put_request(req); 600 595 return err; 601 596 }
+141 -104
fs/ceph/file.c
··· 19 19 #include "io.h" 20 20 #include "metric.h" 21 21 22 - static __le32 ceph_flags_sys2wire(u32 flags) 22 + static __le32 ceph_flags_sys2wire(struct ceph_mds_client *mdsc, u32 flags) 23 23 { 24 + struct ceph_client *cl = mdsc->fsc->client; 24 25 u32 wire_flags = 0; 25 26 26 27 switch (flags & O_ACCMODE) { ··· 49 48 #undef ceph_sys2wire 50 49 51 50 if (flags) 52 - dout("unused open flags: %x\n", flags); 51 + doutc(cl, "unused open flags: %x\n", flags); 53 52 54 53 return cpu_to_le32(wire_flags); 55 54 } ··· 190 189 if (IS_ERR(req)) 191 190 goto out; 192 191 req->r_fmode = ceph_flags_to_mode(flags); 193 - req->r_args.open.flags = ceph_flags_sys2wire(flags); 192 + req->r_args.open.flags = ceph_flags_sys2wire(mdsc, flags); 194 193 req->r_args.open.mode = cpu_to_le32(create_mode); 195 194 out: 196 195 return req; ··· 202 201 struct ceph_inode_info *ci = ceph_inode(inode); 203 202 struct ceph_mount_options *opt = 204 203 ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options; 204 + struct ceph_client *cl = ceph_inode_to_client(inode); 205 205 struct ceph_file_info *fi; 206 206 int ret; 207 207 208 - dout("%s %p %p 0%o (%s)\n", __func__, inode, file, 209 - inode->i_mode, isdir ? "dir" : "regular"); 208 + doutc(cl, "%p %llx.%llx %p 0%o (%s)\n", inode, ceph_vinop(inode), 209 + file, inode->i_mode, isdir ? "dir" : "regular"); 210 210 BUG_ON(inode->i_fop->release != ceph_release); 211 211 212 212 if (isdir) { ··· 261 259 */ 262 260 static int ceph_init_file(struct inode *inode, struct file *file, int fmode) 263 261 { 262 + struct ceph_client *cl = ceph_inode_to_client(inode); 264 263 int ret = 0; 265 264 266 265 switch (inode->i_mode & S_IFMT) { ··· 274 271 break; 275 272 276 273 case S_IFLNK: 277 - dout("init_file %p %p 0%o (symlink)\n", inode, file, 278 - inode->i_mode); 274 + doutc(cl, "%p %llx.%llx %p 0%o (symlink)\n", inode, 275 + ceph_vinop(inode), file, inode->i_mode); 279 276 break; 280 277 281 278 default: 282 - dout("init_file %p %p 0%o (special)\n", inode, file, 283 - inode->i_mode); 279 + doutc(cl, "%p %llx.%llx %p 0%o (special)\n", inode, 280 + ceph_vinop(inode), file, inode->i_mode); 284 281 /* 285 282 * we need to drop the open ref now, since we don't 286 283 * have .release set to ceph_release. ··· 299 296 int ceph_renew_caps(struct inode *inode, int fmode) 300 297 { 301 298 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 299 + struct ceph_client *cl = mdsc->fsc->client; 302 300 struct ceph_inode_info *ci = ceph_inode(inode); 303 301 struct ceph_mds_request *req; 304 302 int err, flags, wanted; ··· 311 307 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) { 312 308 int issued = __ceph_caps_issued(ci, NULL); 313 309 spin_unlock(&ci->i_ceph_lock); 314 - dout("renew caps %p want %s issued %s updating mds_wanted\n", 315 - inode, ceph_cap_string(wanted), ceph_cap_string(issued)); 310 + doutc(cl, "%p %llx.%llx want %s issued %s updating mds_wanted\n", 311 + inode, ceph_vinop(inode), ceph_cap_string(wanted), 312 + ceph_cap_string(issued)); 316 313 ceph_check_caps(ci, 0); 317 314 return 0; 318 315 } ··· 344 339 err = ceph_mdsc_do_request(mdsc, NULL, req); 345 340 ceph_mdsc_put_request(req); 346 341 out: 347 - dout("renew caps %p open result=%d\n", inode, err); 342 + doutc(cl, "%p %llx.%llx open result=%d\n", inode, ceph_vinop(inode), 343 + err); 348 344 return err < 0 ? err : 0; 349 345 } 350 346 ··· 359 353 { 360 354 struct ceph_inode_info *ci = ceph_inode(inode); 361 355 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb); 356 + struct ceph_client *cl = fsc->client; 362 357 struct ceph_mds_client *mdsc = fsc->mdsc; 363 358 struct ceph_mds_request *req; 364 359 struct ceph_file_info *fi = file->private_data; ··· 367 360 int flags, fmode, wanted; 368 361 369 362 if (fi) { 370 - dout("open file %p is already opened\n", file); 363 + doutc(cl, "file %p is already opened\n", file); 371 364 return 0; 372 365 } 373 366 ··· 381 374 return err; 382 375 } 383 376 384 - dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, 385 - ceph_vinop(inode), file, flags, file->f_flags); 377 + doutc(cl, "%p %llx.%llx file %p flags %d (%d)\n", inode, 378 + ceph_vinop(inode), file, flags, file->f_flags); 386 379 fmode = ceph_flags_to_mode(flags); 387 380 wanted = ceph_caps_for_mode(fmode); 388 381 ··· 406 399 int mds_wanted = __ceph_caps_mds_wanted(ci, true); 407 400 int issued = __ceph_caps_issued(ci, NULL); 408 401 409 - dout("open %p fmode %d want %s issued %s using existing\n", 410 - inode, fmode, ceph_cap_string(wanted), 411 - ceph_cap_string(issued)); 402 + doutc(cl, "open %p fmode %d want %s issued %s using existing\n", 403 + inode, fmode, ceph_cap_string(wanted), 404 + ceph_cap_string(issued)); 412 405 __ceph_touch_fmode(ci, mdsc, fmode); 413 406 spin_unlock(&ci->i_ceph_lock); 414 407 ··· 428 421 429 422 spin_unlock(&ci->i_ceph_lock); 430 423 431 - dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 424 + doutc(cl, "open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); 432 425 req = prepare_open_request(inode->i_sb, flags, 0); 433 426 if (IS_ERR(req)) { 434 427 err = PTR_ERR(req); ··· 442 435 if (!err) 443 436 err = ceph_init_file(inode, file, req->r_fmode); 444 437 ceph_mdsc_put_request(req); 445 - dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 438 + doutc(cl, "open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); 446 439 out: 447 440 return err; 448 441 } ··· 522 515 523 516 static void restore_deleg_ino(struct inode *dir, u64 ino) 524 517 { 518 + struct ceph_client *cl = ceph_inode_to_client(dir); 525 519 struct ceph_inode_info *ci = ceph_inode(dir); 526 520 struct ceph_mds_session *s = NULL; 527 521 ··· 533 525 if (s) { 534 526 int err = ceph_restore_deleg_ino(s, ino); 535 527 if (err) 536 - pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n", 528 + pr_warn_client(cl, 529 + "unable to restore delegated ino 0x%llx to session: %d\n", 537 530 ino, err); 538 531 ceph_put_mds_session(s); 539 532 } ··· 566 557 static void ceph_async_create_cb(struct ceph_mds_client *mdsc, 567 558 struct ceph_mds_request *req) 568 559 { 560 + struct ceph_client *cl = mdsc->fsc->client; 569 561 struct dentry *dentry = req->r_dentry; 570 562 struct inode *dinode = d_inode(dentry); 571 563 struct inode *tinode = req->r_target_inode; ··· 587 577 char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen, 588 578 &base, 0); 589 579 590 - pr_warn("async create failure path=(%llx)%s result=%d!\n", 580 + pr_warn_client(cl, 581 + "async create failure path=(%llx)%s result=%d!\n", 591 582 base, IS_ERR(path) ? "<<bad>>" : path, result); 592 583 ceph_mdsc_free_path(path, pathlen); 593 584 ··· 607 596 u64 ino = ceph_vino(tinode).ino; 608 597 609 598 if (req->r_deleg_ino != ino) 610 - pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n", 611 - __func__, req->r_err, req->r_deleg_ino, ino); 599 + pr_warn_client(cl, 600 + "inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n", 601 + req->r_err, req->r_deleg_ino, ino); 612 602 613 603 mapping_set_error(tinode->i_mapping, result); 614 604 wake_async_create_waiters(tinode, req->r_session); 615 605 } else if (!result) { 616 - pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__, 617 - req->r_deleg_ino); 606 + pr_warn_client(cl, "no req->r_target_inode for 0x%llx\n", 607 + req->r_deleg_ino); 618 608 } 619 609 out: 620 610 ceph_mdsc_release_dir_caps(req); ··· 637 625 struct timespec64 now; 638 626 struct ceph_string *pool_ns; 639 627 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); 628 + struct ceph_client *cl = mdsc->fsc->client; 640 629 struct ceph_vino vino = { .ino = req->r_deleg_ino, 641 630 .snap = CEPH_NOSNAP }; 642 631 ··· 696 683 req->r_fmode, NULL); 697 684 up_read(&mdsc->snap_rwsem); 698 685 if (ret) { 699 - dout("%s failed to fill inode: %d\n", __func__, ret); 686 + doutc(cl, "failed to fill inode: %d\n", ret); 700 687 ceph_dir_clear_complete(dir); 701 688 if (!d_unhashed(dentry)) 702 689 d_drop(dentry); ··· 704 691 } else { 705 692 struct dentry *dn; 706 693 707 - dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__, 708 - vino.ino, ceph_ino(dir), dentry->d_name.name); 694 + doutc(cl, "d_adding new inode 0x%llx to 0x%llx/%s\n", 695 + vino.ino, ceph_ino(dir), dentry->d_name.name); 709 696 ceph_dir_clear_ordered(dir); 710 697 ceph_init_inode_acls(inode, as_ctx); 711 698 if (inode->i_state & I_NEW) { ··· 744 731 struct file *file, unsigned flags, umode_t mode) 745 732 { 746 733 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb); 734 + struct ceph_client *cl = fsc->client; 747 735 struct ceph_mds_client *mdsc = fsc->mdsc; 748 736 struct ceph_mds_request *req; 749 737 struct inode *new_inode = NULL; ··· 754 740 int mask; 755 741 int err; 756 742 757 - dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n", 758 - dir, dentry, dentry, 759 - d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 743 + doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n", 744 + dir, ceph_vinop(dir), dentry, dentry, 745 + d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode); 760 746 761 747 if (dentry->d_name.len > NAME_MAX) 762 748 return -ENAMETOOLONG; ··· 894 880 goto out_req; 895 881 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { 896 882 /* make vfs retry on splice, ENOENT, or symlink */ 897 - dout("atomic_open finish_no_open on dn %p\n", dn); 883 + doutc(cl, "finish_no_open on dn %p\n", dn); 898 884 err = finish_no_open(file, dn); 899 885 } else { 900 886 if (IS_ENCRYPTED(dir) && 901 887 !fscrypt_has_permitted_context(dir, d_inode(dentry))) { 902 - pr_warn("Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n", 888 + pr_warn_client(cl, 889 + "Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n", 903 890 ceph_vinop(dir), ceph_vinop(d_inode(dentry))); 904 891 goto out_req; 905 892 } 906 893 907 - dout("atomic_open finish_open on dn %p\n", dn); 894 + doutc(cl, "finish_open on dn %p\n", dn); 908 895 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) { 909 896 struct inode *newino = d_inode(dentry); 910 897 ··· 920 905 iput(new_inode); 921 906 out_ctx: 922 907 ceph_release_acl_sec_ctx(&as_ctx); 923 - dout("atomic_open result=%d\n", err); 908 + doutc(cl, "result=%d\n", err); 924 909 return err; 925 910 } 926 911 927 912 int ceph_release(struct inode *inode, struct file *file) 928 913 { 914 + struct ceph_client *cl = ceph_inode_to_client(inode); 929 915 struct ceph_inode_info *ci = ceph_inode(inode); 930 916 931 917 if (S_ISDIR(inode->i_mode)) { 932 918 struct ceph_dir_file_info *dfi = file->private_data; 933 - dout("release inode %p dir file %p\n", inode, file); 919 + doutc(cl, "%p %llx.%llx dir file %p\n", inode, 920 + ceph_vinop(inode), file); 934 921 WARN_ON(!list_empty(&dfi->file_info.rw_contexts)); 935 922 936 923 ceph_put_fmode(ci, dfi->file_info.fmode, 1); ··· 944 927 kmem_cache_free(ceph_dir_file_cachep, dfi); 945 928 } else { 946 929 struct ceph_file_info *fi = file->private_data; 947 - dout("release inode %p regular file %p\n", inode, file); 930 + doutc(cl, "%p %llx.%llx regular file %p\n", inode, 931 + ceph_vinop(inode), file); 948 932 WARN_ON(!list_empty(&fi->rw_contexts)); 949 933 950 934 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE); ··· 981 963 { 982 964 struct ceph_inode_info *ci = ceph_inode(inode); 983 965 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 966 + struct ceph_client *cl = fsc->client; 984 967 struct ceph_osd_client *osdc = &fsc->client->osdc; 985 968 ssize_t ret; 986 969 u64 off = *ki_pos; ··· 990 971 bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD); 991 972 u64 objver = 0; 992 973 993 - dout("sync_read on inode %p %llx~%llx\n", inode, *ki_pos, len); 974 + doutc(cl, "on inode %p %llx.%llx %llx~%llx\n", inode, 975 + ceph_vinop(inode), *ki_pos, len); 994 976 995 977 if (ceph_inode_is_shutdown(inode)) 996 978 return -EIO; ··· 1025 1005 /* determine new offset/length if encrypted */ 1026 1006 ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len); 1027 1007 1028 - dout("sync_read orig %llu~%llu reading %llu~%llu", 1029 - off, len, read_off, read_len); 1008 + doutc(cl, "orig %llu~%llu reading %llu~%llu", off, len, 1009 + read_off, read_len); 1030 1010 1031 1011 req = ceph_osdc_new_request(osdc, &ci->i_layout, 1032 1012 ci->i_vino, read_off, &read_len, 0, 1, ··· 1079 1059 objver = req->r_version; 1080 1060 1081 1061 i_size = i_size_read(inode); 1082 - dout("sync_read %llu~%llu got %zd i_size %llu%s\n", 1083 - off, len, ret, i_size, (more ? " MORE" : "")); 1062 + doutc(cl, "%llu~%llu got %zd i_size %llu%s\n", off, len, 1063 + ret, i_size, (more ? " MORE" : "")); 1084 1064 1085 1065 /* Fix it to go to end of extent map */ 1086 1066 if (sparse && ret >= 0) ··· 1121 1101 int zlen = min(len - ret, i_size - off - ret); 1122 1102 int zoff = page_off + ret; 1123 1103 1124 - dout("sync_read zero gap %llu~%llu\n", 1125 - off + ret, off + ret + zlen); 1104 + doutc(cl, "zero gap %llu~%llu\n", off + ret, 1105 + off + ret + zlen); 1126 1106 ceph_zero_page_vector_range(zoff, zlen, pages); 1127 1107 ret += zlen; 1128 1108 } ··· 1171 1151 if (last_objver) 1172 1152 *last_objver = objver; 1173 1153 } 1174 - dout("sync_read result %zd retry_op %d\n", ret, *retry_op); 1154 + doutc(cl, "result %zd retry_op %d\n", ret, *retry_op); 1175 1155 return ret; 1176 1156 } 1177 1157 ··· 1180 1160 { 1181 1161 struct file *file = iocb->ki_filp; 1182 1162 struct inode *inode = file_inode(file); 1163 + struct ceph_client *cl = ceph_inode_to_client(inode); 1183 1164 1184 - dout("sync_read on file %p %llx~%zx %s\n", file, iocb->ki_pos, 1185 - iov_iter_count(to), (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 1165 + doutc(cl, "on file %p %llx~%zx %s\n", file, iocb->ki_pos, 1166 + iov_iter_count(to), 1167 + (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 1186 1168 1187 1169 return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL); 1188 1170 } ··· 1212 1190 static void ceph_aio_complete(struct inode *inode, 1213 1191 struct ceph_aio_request *aio_req) 1214 1192 { 1193 + struct ceph_client *cl = ceph_inode_to_client(inode); 1215 1194 struct ceph_inode_info *ci = ceph_inode(inode); 1216 1195 int ret; 1217 1196 ··· 1226 1203 if (!ret) 1227 1204 ret = aio_req->total_len; 1228 1205 1229 - dout("ceph_aio_complete %p rc %d\n", inode, ret); 1206 + doutc(cl, "%p %llx.%llx rc %d\n", inode, ceph_vinop(inode), ret); 1230 1207 1231 1208 if (ret >= 0 && aio_req->write) { 1232 1209 int dirty; ··· 1265 1242 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric; 1266 1243 unsigned int len = osd_data->bvec_pos.iter.bi_size; 1267 1244 bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ); 1245 + struct ceph_client *cl = ceph_inode_to_client(inode); 1268 1246 1269 1247 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS); 1270 1248 BUG_ON(!osd_data->num_bvecs); 1271 1249 1272 - dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len); 1250 + doutc(cl, "req %p inode %p %llx.%llx, rc %d bytes %u\n", req, 1251 + inode, ceph_vinop(inode), rc, len); 1273 1252 1274 1253 if (rc == -EOLDSNAPC) { 1275 1254 struct ceph_aio_work *aio_work; ··· 1412 1387 struct inode *inode = file_inode(file); 1413 1388 struct ceph_inode_info *ci = ceph_inode(inode); 1414 1389 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1390 + struct ceph_client *cl = fsc->client; 1415 1391 struct ceph_client_metric *metric = &fsc->mdsc->metric; 1416 1392 struct ceph_vino vino; 1417 1393 struct ceph_osd_request *req; ··· 1431 1405 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1432 1406 return -EROFS; 1433 1407 1434 - dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n", 1435 - (write ? "write" : "read"), file, pos, (unsigned)count, 1436 - snapc, snapc ? snapc->seq : 0); 1408 + doutc(cl, "sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n", 1409 + (write ? "write" : "read"), file, pos, (unsigned)count, 1410 + snapc, snapc ? snapc->seq : 0); 1437 1411 1438 1412 if (write) { 1439 1413 int ret2; ··· 1444 1418 pos >> PAGE_SHIFT, 1445 1419 (pos + count - 1) >> PAGE_SHIFT); 1446 1420 if (ret2 < 0) 1447 - dout("invalidate_inode_pages2_range returned %d\n", ret2); 1421 + doutc(cl, "invalidate_inode_pages2_range returned %d\n", 1422 + ret2); 1448 1423 1449 1424 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE; 1450 1425 } else { ··· 1638 1611 struct inode *inode = file_inode(file); 1639 1612 struct ceph_inode_info *ci = ceph_inode(inode); 1640 1613 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 1614 + struct ceph_client *cl = fsc->client; 1641 1615 struct ceph_osd_client *osdc = &fsc->client->osdc; 1642 1616 struct ceph_osd_request *req; 1643 1617 struct page **pages; ··· 1653 1625 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) 1654 1626 return -EROFS; 1655 1627 1656 - dout("sync_write on file %p %lld~%u snapc %p seq %lld\n", 1657 - file, pos, (unsigned)count, snapc, snapc->seq); 1628 + doutc(cl, "on file %p %lld~%u snapc %p seq %lld\n", file, pos, 1629 + (unsigned)count, snapc, snapc->seq); 1658 1630 1659 1631 ret = filemap_write_and_wait_range(inode->i_mapping, 1660 1632 pos, pos + count - 1); ··· 1698 1670 last = (pos + len) != (write_pos + write_len); 1699 1671 rmw = first || last; 1700 1672 1701 - dout("sync_write ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n", 1702 - ci->i_vino.ino, pos, len, write_pos, write_len, 1703 - rmw ? "" : "no "); 1673 + doutc(cl, "ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n", 1674 + ci->i_vino.ino, pos, len, write_pos, write_len, 1675 + rmw ? "" : "no "); 1704 1676 1705 1677 /* 1706 1678 * The data is emplaced into the page as it would be if it were ··· 1909 1881 left -= ret; 1910 1882 } 1911 1883 if (ret < 0) { 1912 - dout("sync_write write failed with %d\n", ret); 1884 + doutc(cl, "write failed with %d\n", ret); 1913 1885 ceph_release_page_vector(pages, num_pages); 1914 1886 break; 1915 1887 } ··· 1919 1891 write_pos, write_len, 1920 1892 GFP_KERNEL); 1921 1893 if (ret < 0) { 1922 - dout("encryption failed with %d\n", ret); 1894 + doutc(cl, "encryption failed with %d\n", ret); 1923 1895 ceph_release_page_vector(pages, num_pages); 1924 1896 break; 1925 1897 } ··· 1938 1910 break; 1939 1911 } 1940 1912 1941 - dout("sync_write write op %lld~%llu\n", write_pos, write_len); 1913 + doutc(cl, "write op %lld~%llu\n", write_pos, write_len); 1942 1914 osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len, 1943 1915 offset_in_page(write_pos), false, 1944 1916 true); ··· 1969 1941 req->r_end_latency, len, ret); 1970 1942 ceph_osdc_put_request(req); 1971 1943 if (ret != 0) { 1972 - dout("sync_write osd write returned %d\n", ret); 1944 + doutc(cl, "osd write returned %d\n", ret); 1973 1945 /* Version changed! Must re-do the rmw cycle */ 1974 1946 if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) || 1975 1947 (!assert_ver && ret == -EEXIST)) { ··· 1999 1971 pos >> PAGE_SHIFT, 2000 1972 (pos + len - 1) >> PAGE_SHIFT); 2001 1973 if (ret < 0) { 2002 - dout("invalidate_inode_pages2_range returned %d\n", 2003 - ret); 1974 + doutc(cl, "invalidate_inode_pages2_range returned %d\n", 1975 + ret); 2004 1976 ret = 0; 2005 1977 } 2006 1978 pos += len; 2007 1979 written += len; 2008 - dout("sync_write written %d\n", written); 1980 + doutc(cl, "written %d\n", written); 2009 1981 if (pos > i_size_read(inode)) { 2010 1982 check_caps = ceph_inode_set_size(inode, pos); 2011 1983 if (check_caps) ··· 2019 1991 ret = written; 2020 1992 iocb->ki_pos = pos; 2021 1993 } 2022 - dout("sync_write returning %d\n", ret); 1994 + doutc(cl, "returning %d\n", ret); 2023 1995 return ret; 2024 1996 } 2025 1997 ··· 2038 2010 struct inode *inode = file_inode(filp); 2039 2011 struct ceph_inode_info *ci = ceph_inode(inode); 2040 2012 bool direct_lock = iocb->ki_flags & IOCB_DIRECT; 2013 + struct ceph_client *cl = ceph_inode_to_client(inode); 2041 2014 ssize_t ret; 2042 2015 int want = 0, got = 0; 2043 2016 int retry_op = 0, read = 0; 2044 2017 2045 2018 again: 2046 - dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", 2047 - inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); 2019 + doutc(cl, "%llu~%u trying to get caps on %p %llx.%llx\n", 2020 + iocb->ki_pos, (unsigned)len, inode, ceph_vinop(inode)); 2048 2021 2049 2022 if (ceph_inode_is_shutdown(inode)) 2050 2023 return -ESTALE; ··· 2073 2044 (iocb->ki_flags & IOCB_DIRECT) || 2074 2045 (fi->flags & CEPH_F_SYNC)) { 2075 2046 2076 - dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n", 2077 - inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 2078 - ceph_cap_string(got)); 2047 + doutc(cl, "sync %p %llx.%llx %llu~%u got cap refs on %s\n", 2048 + inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 2049 + ceph_cap_string(got)); 2079 2050 2080 2051 if (!ceph_has_inline_data(ci)) { 2081 2052 if (!retry_op && ··· 2093 2064 } 2094 2065 } else { 2095 2066 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got); 2096 - dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", 2097 - inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 2098 - ceph_cap_string(got)); 2067 + doutc(cl, "async %p %llx.%llx %llu~%u got cap refs on %s\n", 2068 + inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, 2069 + ceph_cap_string(got)); 2099 2070 ceph_add_rw_context(fi, &rw_ctx); 2100 2071 ret = generic_file_read_iter(iocb, to); 2101 2072 ceph_del_rw_context(fi, &rw_ctx); 2102 2073 } 2103 2074 2104 - dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", 2105 - inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 2075 + doutc(cl, "%p %llx.%llx dropping cap refs on %s = %d\n", 2076 + inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); 2106 2077 ceph_put_cap_refs(ci, got); 2107 2078 2108 2079 if (direct_lock) ··· 2162 2133 /* hit EOF or hole? */ 2163 2134 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && 2164 2135 ret < len) { 2165 - dout("sync_read hit hole, ppos %lld < size %lld" 2166 - ", reading more\n", iocb->ki_pos, i_size); 2136 + doutc(cl, "hit hole, ppos %lld < size %lld, reading more\n", 2137 + iocb->ki_pos, i_size); 2167 2138 2168 2139 read += ret; 2169 2140 len -= ret; ··· 2258 2229 struct inode *inode = file_inode(file); 2259 2230 struct ceph_inode_info *ci = ceph_inode(inode); 2260 2231 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 2232 + struct ceph_client *cl = fsc->client; 2261 2233 struct ceph_osd_client *osdc = &fsc->client->osdc; 2262 2234 struct ceph_cap_flush *prealloc_cf; 2263 2235 ssize_t count, written = 0; ··· 2326 2296 if (err) 2327 2297 goto out; 2328 2298 2329 - dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 2330 - inode, ceph_vinop(inode), pos, count, i_size_read(inode)); 2299 + doutc(cl, "%p %llx.%llx %llu~%zd getting caps. i_size %llu\n", 2300 + inode, ceph_vinop(inode), pos, count, 2301 + i_size_read(inode)); 2331 2302 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock) 2332 2303 want |= CEPH_CAP_FILE_BUFFER; 2333 2304 if (fi->fmode & CEPH_FILE_MODE_LAZY) ··· 2344 2313 2345 2314 inode_inc_iversion_raw(inode); 2346 2315 2347 - dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", 2348 - inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 2316 + doutc(cl, "%p %llx.%llx %llu~%zd got cap refs on %s\n", 2317 + inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); 2349 2318 2350 2319 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || 2351 2320 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) || ··· 2405 2374 ceph_check_caps(ci, CHECK_CAPS_FLUSH); 2406 2375 } 2407 2376 2408 - dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", 2409 - inode, ceph_vinop(inode), pos, (unsigned)count, 2410 - ceph_cap_string(got)); 2377 + doutc(cl, "%p %llx.%llx %llu~%u dropping cap refs on %s\n", 2378 + inode, ceph_vinop(inode), pos, (unsigned)count, 2379 + ceph_cap_string(got)); 2411 2380 ceph_put_cap_refs(ci, got); 2412 2381 2413 2382 if (written == -EOLDSNAPC) { 2414 - dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n", 2415 - inode, ceph_vinop(inode), pos, (unsigned)count); 2383 + doutc(cl, "%p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n", 2384 + inode, ceph_vinop(inode), pos, (unsigned)count); 2416 2385 goto retry_snap; 2417 2386 } 2418 2387 ··· 2584 2553 struct inode *inode = file_inode(file); 2585 2554 struct ceph_inode_info *ci = ceph_inode(inode); 2586 2555 struct ceph_cap_flush *prealloc_cf; 2556 + struct ceph_client *cl = ceph_inode_to_client(inode); 2587 2557 int want, got = 0; 2588 2558 int dirty; 2589 2559 int ret = 0; 2590 2560 loff_t endoff = 0; 2591 2561 loff_t size; 2592 2562 2593 - dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__, 2594 - inode, ceph_vinop(inode), mode, offset, length); 2563 + doutc(cl, "%p %llx.%llx mode %x, offset %llu length %llu\n", 2564 + inode, ceph_vinop(inode), mode, offset, length); 2595 2565 2596 2566 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2597 2567 return -EOPNOTSUPP; ··· 2721 2689 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode, 2722 2690 loff_t src_off, loff_t dst_off, size_t len) 2723 2691 { 2692 + struct ceph_client *cl = ceph_inode_to_client(src_inode); 2724 2693 loff_t size, endoff; 2725 2694 2726 2695 size = i_size_read(src_inode); ··· 2732 2699 * inode. 2733 2700 */ 2734 2701 if (src_off + len > size) { 2735 - dout("Copy beyond EOF (%llu + %zu > %llu)\n", 2736 - src_off, len, size); 2702 + doutc(cl, "Copy beyond EOF (%llu + %zu > %llu)\n", src_off, 2703 + len, size); 2737 2704 return -EOPNOTSUPP; 2738 2705 } 2739 2706 size = i_size_read(dst_inode); ··· 2809 2776 u64 src_objnum, src_objoff, dst_objnum, dst_objoff; 2810 2777 u32 src_objlen, dst_objlen; 2811 2778 u32 object_size = src_ci->i_layout.object_size; 2779 + struct ceph_client *cl = fsc->client; 2812 2780 int ret; 2813 2781 2814 2782 src_oloc.pool = src_ci->i_layout.pool_id; ··· 2851 2817 if (ret) { 2852 2818 if (ret == -EOPNOTSUPP) { 2853 2819 fsc->have_copy_from2 = false; 2854 - pr_notice("OSDs don't support copy-from2; disabling copy offload\n"); 2820 + pr_notice_client(cl, 2821 + "OSDs don't support copy-from2; disabling copy offload\n"); 2855 2822 } 2856 - dout("ceph_osdc_copy_from returned %d\n", ret); 2823 + doutc(cl, "returned %d\n", ret); 2857 2824 if (!bytes) 2858 2825 bytes = ret; 2859 2826 goto out; ··· 2881 2846 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode); 2882 2847 struct ceph_cap_flush *prealloc_cf; 2883 2848 struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode); 2849 + struct ceph_client *cl = src_fsc->client; 2884 2850 loff_t size; 2885 2851 ssize_t ret = -EIO, bytes; 2886 2852 u64 src_objnum, dst_objnum, src_objoff, dst_objoff; ··· 2924 2888 (src_ci->i_layout.stripe_count != 1) || 2925 2889 (dst_ci->i_layout.stripe_count != 1) || 2926 2890 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) { 2927 - dout("Invalid src/dst files layout\n"); 2891 + doutc(cl, "Invalid src/dst files layout\n"); 2928 2892 return -EOPNOTSUPP; 2929 2893 } 2930 2894 ··· 2942 2906 /* Start by sync'ing the source and destination files */ 2943 2907 ret = file_write_and_wait_range(src_file, src_off, (src_off + len)); 2944 2908 if (ret < 0) { 2945 - dout("failed to write src file (%zd)\n", ret); 2909 + doutc(cl, "failed to write src file (%zd)\n", ret); 2946 2910 goto out; 2947 2911 } 2948 2912 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len)); 2949 2913 if (ret < 0) { 2950 - dout("failed to write dst file (%zd)\n", ret); 2914 + doutc(cl, "failed to write dst file (%zd)\n", ret); 2951 2915 goto out; 2952 2916 } 2953 2917 ··· 2959 2923 err = get_rd_wr_caps(src_file, &src_got, 2960 2924 dst_file, (dst_off + len), &dst_got); 2961 2925 if (err < 0) { 2962 - dout("get_rd_wr_caps returned %d\n", err); 2926 + doutc(cl, "get_rd_wr_caps returned %d\n", err); 2963 2927 ret = -EOPNOTSUPP; 2964 2928 goto out; 2965 2929 } ··· 2974 2938 dst_off >> PAGE_SHIFT, 2975 2939 (dst_off + len) >> PAGE_SHIFT); 2976 2940 if (ret < 0) { 2977 - dout("Failed to invalidate inode pages (%zd)\n", ret); 2941 + doutc(cl, "Failed to invalidate inode pages (%zd)\n", 2942 + ret); 2978 2943 ret = 0; /* XXX */ 2979 2944 } 2980 2945 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off, ··· 2996 2959 * starting at the src_off 2997 2960 */ 2998 2961 if (src_objoff) { 2999 - dout("Initial partial copy of %u bytes\n", src_objlen); 2962 + doutc(cl, "Initial partial copy of %u bytes\n", src_objlen); 3000 2963 3001 2964 /* 3002 2965 * we need to temporarily drop all caps as we'll be calling ··· 3007 2970 &dst_off, src_objlen, flags); 3008 2971 /* Abort on short copies or on error */ 3009 2972 if (ret < (long)src_objlen) { 3010 - dout("Failed partial copy (%zd)\n", ret); 2973 + doutc(cl, "Failed partial copy (%zd)\n", ret); 3011 2974 goto out; 3012 2975 } 3013 2976 len -= ret; ··· 3029 2992 ret = bytes; 3030 2993 goto out_caps; 3031 2994 } 3032 - dout("Copied %zu bytes out of %zu\n", bytes, len); 2995 + doutc(cl, "Copied %zu bytes out of %zu\n", bytes, len); 3033 2996 len -= bytes; 3034 2997 ret += bytes; 3035 2998 ··· 3057 3020 * there were errors in remote object copies (len >= object_size). 3058 3021 */ 3059 3022 if (len && (len < src_ci->i_layout.object_size)) { 3060 - dout("Final partial copy of %zu bytes\n", len); 3023 + doutc(cl, "Final partial copy of %zu bytes\n", len); 3061 3024 bytes = do_splice_direct(src_file, &src_off, dst_file, 3062 3025 &dst_off, len, flags); 3063 3026 if (bytes > 0) 3064 3027 ret += bytes; 3065 3028 else 3066 - dout("Failed partial copy (%zd)\n", bytes); 3029 + doutc(cl, "Failed partial copy (%zd)\n", bytes); 3067 3030 } 3068 3031 3069 3032 out:
+267 -218
fs/ceph/inode.c
··· 129 129 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino, 130 130 struct inode *newino) 131 131 { 132 + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb); 133 + struct ceph_client *cl = mdsc->fsc->client; 132 134 struct inode *inode; 133 135 134 136 if (ceph_vino_is_reserved(vino)) ··· 147 145 } 148 146 149 147 if (!inode) { 150 - dout("No inode found for %llx.%llx\n", vino.ino, vino.snap); 148 + doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap); 151 149 return ERR_PTR(-ENOMEM); 152 150 } 153 151 154 - dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode), 155 - ceph_vinop(inode), inode, !!(inode->i_state & I_NEW)); 152 + doutc(cl, "on %llx=%llx.%llx got %p new %d\n", 153 + ceph_present_inode(inode), ceph_vinop(inode), inode, 154 + !!(inode->i_state & I_NEW)); 156 155 return inode; 157 156 } 158 157 ··· 162 159 */ 163 160 struct inode *ceph_get_snapdir(struct inode *parent) 164 161 { 162 + struct ceph_client *cl = ceph_inode_to_client(parent); 165 163 struct ceph_vino vino = { 166 164 .ino = ceph_ino(parent), 167 165 .snap = CEPH_SNAPDIR, ··· 175 171 return inode; 176 172 177 173 if (!S_ISDIR(parent->i_mode)) { 178 - pr_warn_once("bad snapdir parent type (mode=0%o)\n", 179 - parent->i_mode); 174 + pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n", 175 + parent->i_mode); 180 176 goto err; 181 177 } 182 178 183 179 if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) { 184 - pr_warn_once("bad snapdir inode type (mode=0%o)\n", 185 - inode->i_mode); 180 + pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n", 181 + inode->i_mode); 186 182 goto err; 187 183 } 188 184 ··· 207 203 inode->i_flags |= S_ENCRYPTED; 208 204 ci->fscrypt_auth_len = pci->fscrypt_auth_len; 209 205 } else { 210 - dout("Failed to alloc snapdir fscrypt_auth\n"); 206 + doutc(cl, "Failed to alloc snapdir fscrypt_auth\n"); 211 207 ret = -ENOMEM; 212 208 goto err; 213 209 } ··· 253 249 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 254 250 u32 f) 255 251 { 252 + struct inode *inode = &ci->netfs.inode; 253 + struct ceph_client *cl = ceph_inode_to_client(inode); 256 254 struct rb_node **p; 257 255 struct rb_node *parent = NULL; 258 256 struct ceph_inode_frag *frag; ··· 285 279 rb_link_node(&frag->node, parent, p); 286 280 rb_insert_color(&frag->node, &ci->i_fragtree); 287 281 288 - dout("get_or_create_frag added %llx.%llx frag %x\n", 289 - ceph_vinop(&ci->netfs.inode), f); 282 + doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f); 290 283 return frag; 291 284 } 292 285 ··· 318 313 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 319 314 struct ceph_inode_frag *pfrag, int *found) 320 315 { 316 + struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode); 321 317 u32 t = ceph_frag_make(0, 0); 322 318 struct ceph_inode_frag *frag; 323 319 unsigned nway, i; ··· 342 336 343 337 /* choose child */ 344 338 nway = 1 << frag->split_by; 345 - dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 346 - frag->split_by, nway); 339 + doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t, 340 + frag->split_by, nway); 347 341 for (i = 0; i < nway; i++) { 348 342 n = ceph_frag_make_child(t, frag->split_by, i); 349 343 if (ceph_frag_contains_value(n, v)) { ··· 353 347 } 354 348 BUG_ON(i == nway); 355 349 } 356 - dout("choose_frag(%x) = %x\n", v, t); 350 + doutc(cl, "frag(%x) = %x\n", v, t); 357 351 358 352 return t; 359 353 } ··· 377 371 struct ceph_mds_reply_dirfrag *dirinfo) 378 372 { 379 373 struct ceph_inode_info *ci = ceph_inode(inode); 374 + struct ceph_client *cl = ceph_inode_to_client(inode); 380 375 struct ceph_inode_frag *frag; 381 376 u32 id = le32_to_cpu(dirinfo->frag); 382 377 int mds = le32_to_cpu(dirinfo->auth); ··· 402 395 goto out; 403 396 if (frag->split_by == 0) { 404 397 /* tree leaf, remove */ 405 - dout("fill_dirfrag removed %llx.%llx frag %x" 406 - " (no ref)\n", ceph_vinop(inode), id); 398 + doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n", 399 + inode, ceph_vinop(inode), id); 407 400 rb_erase(&frag->node, &ci->i_fragtree); 408 401 kfree(frag); 409 402 } else { 410 403 /* tree branch, keep and clear */ 411 - dout("fill_dirfrag cleared %llx.%llx frag %x" 412 - " referral\n", ceph_vinop(inode), id); 404 + doutc(cl, "cleared %p %llx.%llx frag %x referral\n", 405 + inode, ceph_vinop(inode), id); 413 406 frag->mds = -1; 414 407 frag->ndist = 0; 415 408 } ··· 422 415 if (IS_ERR(frag)) { 423 416 /* this is not the end of the world; we can continue 424 417 with bad/inaccurate delegation info */ 425 - pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 426 - ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 418 + pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n", 419 + inode, ceph_vinop(inode), 420 + le32_to_cpu(dirinfo->frag)); 427 421 err = -ENOMEM; 428 422 goto out; 429 423 } ··· 433 425 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 434 426 for (i = 0; i < frag->ndist; i++) 435 427 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 436 - dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 437 - ceph_vinop(inode), frag->frag, frag->ndist); 428 + doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode, 429 + ceph_vinop(inode), frag->frag, frag->ndist); 438 430 439 431 out: 440 432 mutex_unlock(&ci->i_fragtree_mutex); ··· 462 454 struct ceph_frag_tree_head *fragtree, 463 455 struct ceph_mds_reply_dirfrag *dirinfo) 464 456 { 457 + struct ceph_client *cl = ceph_inode_to_client(inode); 465 458 struct ceph_inode_info *ci = ceph_inode(inode); 466 459 struct ceph_inode_frag *frag, *prev_frag = NULL; 467 460 struct rb_node *rb_node; ··· 498 489 frag_tree_split_cmp, NULL); 499 490 } 500 491 501 - dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode)); 492 + doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 502 493 rb_node = rb_first(&ci->i_fragtree); 503 494 for (i = 0; i < nsplits; i++) { 504 495 id = le32_to_cpu(fragtree->splits[i].frag); 505 496 split_by = le32_to_cpu(fragtree->splits[i].by); 506 497 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) { 507 - pr_err("fill_fragtree %llx.%llx invalid split %d/%u, " 508 - "frag %x split by %d\n", ceph_vinop(inode), 509 - i, nsplits, id, split_by); 498 + pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, " 499 + "frag %x split by %d\n", inode, 500 + ceph_vinop(inode), i, nsplits, id, split_by); 510 501 continue; 511 502 } 512 503 frag = NULL; ··· 538 529 if (frag->split_by == 0) 539 530 ci->i_fragtree_nsplits++; 540 531 frag->split_by = split_by; 541 - dout(" frag %x split by %d\n", frag->frag, frag->split_by); 532 + doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by); 542 533 prev_frag = frag; 543 534 } 544 535 while (rb_node) { ··· 563 554 */ 564 555 struct inode *ceph_alloc_inode(struct super_block *sb) 565 556 { 557 + struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); 566 558 struct ceph_inode_info *ci; 567 559 int i; 568 560 ··· 571 561 if (!ci) 572 562 return NULL; 573 563 574 - dout("alloc_inode %p\n", &ci->netfs.inode); 564 + doutc(fsc->client, "%p\n", &ci->netfs.inode); 575 565 576 566 /* Set parameters for the netfs library */ 577 567 netfs_inode_init(&ci->netfs, &ceph_netfs_ops); ··· 685 675 { 686 676 struct ceph_inode_info *ci = ceph_inode(inode); 687 677 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 678 + struct ceph_client *cl = ceph_inode_to_client(inode); 688 679 struct ceph_inode_frag *frag; 689 680 struct rb_node *n; 690 681 691 - dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 682 + doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode)); 692 683 693 684 percpu_counter_dec(&mdsc->metric.total_inodes); 694 685 ··· 712 701 */ 713 702 if (ci->i_snap_realm) { 714 703 if (ceph_snap(inode) == CEPH_NOSNAP) { 715 - dout(" dropping residual ref to snap realm %p\n", 716 - ci->i_snap_realm); 704 + doutc(cl, " dropping residual ref to snap realm %p\n", 705 + ci->i_snap_realm); 717 706 ceph_change_snap_realm(inode, NULL); 718 707 } else { 719 708 ceph_put_snapid_map(mdsc, ci->i_snapid_map); ··· 754 743 int ceph_fill_file_size(struct inode *inode, int issued, 755 744 u32 truncate_seq, u64 truncate_size, u64 size) 756 745 { 746 + struct ceph_client *cl = ceph_inode_to_client(inode); 757 747 struct ceph_inode_info *ci = ceph_inode(inode); 758 748 int queue_trunc = 0; 759 749 loff_t isize = i_size_read(inode); 760 750 761 751 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 762 752 (truncate_seq == ci->i_truncate_seq && size > isize)) { 763 - dout("size %lld -> %llu\n", isize, size); 753 + doutc(cl, "size %lld -> %llu\n", isize, size); 764 754 if (size > 0 && S_ISDIR(inode->i_mode)) { 765 - pr_err("fill_file_size non-zero size for directory\n"); 755 + pr_err_client(cl, "non-zero size for directory\n"); 766 756 size = 0; 767 757 } 768 758 i_size_write(inode, size); ··· 776 764 ceph_fscache_update(inode); 777 765 ci->i_reported_size = size; 778 766 if (truncate_seq != ci->i_truncate_seq) { 779 - dout("%s truncate_seq %u -> %u\n", __func__, 780 - ci->i_truncate_seq, truncate_seq); 767 + doutc(cl, "truncate_seq %u -> %u\n", 768 + ci->i_truncate_seq, truncate_seq); 781 769 ci->i_truncate_seq = truncate_seq; 782 770 783 771 /* the MDS should have revoked these caps */ ··· 806 794 * anyway. 807 795 */ 808 796 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) { 809 - dout("%s truncate_size %lld -> %llu, encrypted %d\n", __func__, 810 - ci->i_truncate_size, truncate_size, !!IS_ENCRYPTED(inode)); 797 + doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n", 798 + ci->i_truncate_size, truncate_size, 799 + !!IS_ENCRYPTED(inode)); 811 800 812 801 ci->i_truncate_size = truncate_size; 813 802 814 803 if (IS_ENCRYPTED(inode)) { 815 - dout("%s truncate_pagecache_size %lld -> %llu\n", 816 - __func__, ci->i_truncate_pagecache_size, size); 804 + doutc(cl, "truncate_pagecache_size %lld -> %llu\n", 805 + ci->i_truncate_pagecache_size, size); 817 806 ci->i_truncate_pagecache_size = size; 818 807 } else { 819 808 ci->i_truncate_pagecache_size = truncate_size; ··· 827 814 u64 time_warp_seq, struct timespec64 *ctime, 828 815 struct timespec64 *mtime, struct timespec64 *atime) 829 816 { 817 + struct ceph_client *cl = ceph_inode_to_client(inode); 830 818 struct ceph_inode_info *ci = ceph_inode(inode); 831 819 struct timespec64 ictime = inode_get_ctime(inode); 832 820 int warn = 0; ··· 839 825 CEPH_CAP_XATTR_EXCL)) { 840 826 if (ci->i_version == 0 || 841 827 timespec64_compare(ctime, &ictime) > 0) { 842 - dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n", 828 + doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n", 843 829 ictime.tv_sec, ictime.tv_nsec, 844 830 ctime->tv_sec, ctime->tv_nsec); 845 831 inode_set_ctime_to_ts(inode, *ctime); ··· 847 833 if (ci->i_version == 0 || 848 834 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 849 835 /* the MDS did a utimes() */ 850 - dout("mtime %lld.%09ld -> %lld.%09ld " 851 - "tw %d -> %d\n", 852 - inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 853 - mtime->tv_sec, mtime->tv_nsec, 854 - ci->i_time_warp_seq, (int)time_warp_seq); 836 + doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n", 837 + inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 838 + mtime->tv_sec, mtime->tv_nsec, 839 + ci->i_time_warp_seq, (int)time_warp_seq); 855 840 856 841 inode->i_mtime = *mtime; 857 842 inode->i_atime = *atime; ··· 858 845 } else if (time_warp_seq == ci->i_time_warp_seq) { 859 846 /* nobody did utimes(); take the max */ 860 847 if (timespec64_compare(mtime, &inode->i_mtime) > 0) { 861 - dout("mtime %lld.%09ld -> %lld.%09ld inc\n", 862 - inode->i_mtime.tv_sec, 863 - inode->i_mtime.tv_nsec, 864 - mtime->tv_sec, mtime->tv_nsec); 848 + doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n", 849 + inode->i_mtime.tv_sec, 850 + inode->i_mtime.tv_nsec, 851 + mtime->tv_sec, mtime->tv_nsec); 865 852 inode->i_mtime = *mtime; 866 853 } 867 854 if (timespec64_compare(atime, &inode->i_atime) > 0) { 868 - dout("atime %lld.%09ld -> %lld.%09ld inc\n", 869 - inode->i_atime.tv_sec, 870 - inode->i_atime.tv_nsec, 871 - atime->tv_sec, atime->tv_nsec); 855 + doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n", 856 + inode->i_atime.tv_sec, 857 + inode->i_atime.tv_nsec, 858 + atime->tv_sec, atime->tv_nsec); 872 859 inode->i_atime = *atime; 873 860 } 874 861 } else if (issued & CEPH_CAP_FILE_EXCL) { ··· 888 875 } 889 876 } 890 877 if (warn) /* time_warp_seq shouldn't go backwards */ 891 - dout("%p mds time_warp_seq %llu < %u\n", 892 - inode, time_warp_seq, ci->i_time_warp_seq); 878 + doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode, 879 + time_warp_seq, ci->i_time_warp_seq); 893 880 } 894 881 895 882 #if IS_ENABLED(CONFIG_FS_ENCRYPTION) 896 - static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym) 883 + static int decode_encrypted_symlink(struct ceph_mds_client *mdsc, 884 + const char *encsym, 885 + int enclen, u8 **decsym) 897 886 { 887 + struct ceph_client *cl = mdsc->fsc->client; 898 888 int declen; 899 889 u8 *sym; 900 890 ··· 907 891 908 892 declen = ceph_base64_decode(encsym, enclen, sym); 909 893 if (declen < 0) { 910 - pr_err("%s: can't decode symlink (%d). Content: %.*s\n", 911 - __func__, declen, enclen, encsym); 894 + pr_err_client(cl, 895 + "can't decode symlink (%d). Content: %.*s\n", 896 + declen, enclen, encsym); 912 897 kfree(sym); 913 898 return -EIO; 914 899 } ··· 918 901 return declen; 919 902 } 920 903 #else 921 - static int decode_encrypted_symlink(const char *encsym, int symlen, u8 **decsym) 904 + static int decode_encrypted_symlink(struct ceph_mds_client *mdsc, 905 + const char *encsym, 906 + int symlen, u8 **decsym) 922 907 { 923 908 return -EOPNOTSUPP; 924 909 } ··· 937 918 struct ceph_cap_reservation *caps_reservation) 938 919 { 939 920 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 921 + struct ceph_client *cl = mdsc->fsc->client; 940 922 struct ceph_mds_reply_inode *info = iinfo->in; 941 923 struct ceph_inode_info *ci = ceph_inode(inode); 942 924 int issued, new_issued, info_caps; ··· 956 936 957 937 lockdep_assert_held(&mdsc->snap_rwsem); 958 938 959 - dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__, 960 - inode, ceph_vinop(inode), le64_to_cpu(info->version), 961 - ci->i_version); 939 + doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode), 940 + le64_to_cpu(info->version), ci->i_version); 962 941 963 942 /* Once I_NEW is cleared, we can't change type or dev numbers */ 964 943 if (inode->i_state & I_NEW) { 965 944 inode->i_mode = mode; 966 945 } else { 967 946 if (inode_wrong_type(inode, mode)) { 968 - pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n", 969 - ceph_vinop(inode), inode->i_mode, mode); 947 + pr_warn_once_client(cl, 948 + "inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n", 949 + ceph_vinop(inode), inode->i_mode, mode); 970 950 return -ESTALE; 971 951 } 972 952 973 953 if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) { 974 - pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n", 975 - ceph_vinop(inode), MAJOR(inode->i_rdev), 976 - MINOR(inode->i_rdev), MAJOR(rdev), 977 - MINOR(rdev)); 954 + pr_warn_once_client(cl, 955 + "dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n", 956 + ceph_vinop(inode), MAJOR(inode->i_rdev), 957 + MINOR(inode->i_rdev), MAJOR(rdev), 958 + MINOR(rdev)); 978 959 return -ESTALE; 979 960 } 980 961 } ··· 997 976 if (iinfo->xattr_len > 4) { 998 977 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 999 978 if (!xattr_blob) 1000 - pr_err("%s ENOMEM xattr blob %d bytes\n", __func__, 1001 - iinfo->xattr_len); 979 + pr_err_client(cl, "ENOMEM xattr blob %d bytes\n", 980 + iinfo->xattr_len); 1002 981 } 1003 982 1004 983 if (iinfo->pool_ns_len > 0) ··· 1052 1031 inode->i_mode = mode; 1053 1032 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 1054 1033 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 1055 - dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 1056 - from_kuid(&init_user_ns, inode->i_uid), 1057 - from_kgid(&init_user_ns, inode->i_gid)); 1034 + doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode, 1035 + ceph_vinop(inode), inode->i_mode, 1036 + from_kuid(&init_user_ns, inode->i_uid), 1037 + from_kgid(&init_user_ns, inode->i_gid)); 1058 1038 ceph_decode_timespec64(&ci->i_btime, &iinfo->btime); 1059 1039 ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime); 1060 1040 } ··· 1111 1089 if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) { 1112 1090 size = fsize; 1113 1091 } else { 1114 - pr_warn("fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n", 1092 + pr_warn_client(cl, 1093 + "fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n", 1115 1094 info->size, size); 1116 1095 } 1117 1096 } ··· 1124 1101 /* only update max_size on auth cap */ 1125 1102 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 1126 1103 ci->i_max_size != le64_to_cpu(info->max_size)) { 1127 - dout("max_size %lld -> %llu\n", ci->i_max_size, 1128 - le64_to_cpu(info->max_size)); 1104 + doutc(cl, "max_size %lld -> %llu\n", 1105 + ci->i_max_size, le64_to_cpu(info->max_size)); 1129 1106 ci->i_max_size = le64_to_cpu(info->max_size); 1130 1107 } 1131 1108 } ··· 1188 1165 1189 1166 if (IS_ENCRYPTED(inode)) { 1190 1167 if (symlen != i_size_read(inode)) 1191 - pr_err("%s %llx.%llx BAD symlink size %lld\n", 1192 - __func__, ceph_vinop(inode), 1168 + pr_err_client(cl, 1169 + "%p %llx.%llx BAD symlink size %lld\n", 1170 + inode, ceph_vinop(inode), 1193 1171 i_size_read(inode)); 1194 1172 1195 - err = decode_encrypted_symlink(iinfo->symlink, 1173 + err = decode_encrypted_symlink(mdsc, iinfo->symlink, 1196 1174 symlen, (u8 **)&sym); 1197 1175 if (err < 0) { 1198 - pr_err("%s decoding encrypted symlink failed: %d\n", 1199 - __func__, err); 1176 + pr_err_client(cl, 1177 + "decoding encrypted symlink failed: %d\n", 1178 + err); 1200 1179 goto out; 1201 1180 } 1202 1181 symlen = err; ··· 1206 1181 inode->i_blocks = calc_inode_blocks(symlen); 1207 1182 } else { 1208 1183 if (symlen != i_size_read(inode)) { 1209 - pr_err("%s %llx.%llx BAD symlink size %lld\n", 1210 - __func__, ceph_vinop(inode), 1184 + pr_err_client(cl, 1185 + "%p %llx.%llx BAD symlink size %lld\n", 1186 + inode, ceph_vinop(inode), 1211 1187 i_size_read(inode)); 1212 1188 i_size_write(inode, symlen); 1213 1189 inode->i_blocks = calc_inode_blocks(symlen); ··· 1243 1217 inode->i_fop = &ceph_dir_fops; 1244 1218 break; 1245 1219 default: 1246 - pr_err("%s %llx.%llx BAD mode 0%o\n", __func__, 1247 - ceph_vinop(inode), inode->i_mode); 1220 + pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode, 1221 + ceph_vinop(inode), inode->i_mode); 1248 1222 } 1249 1223 1250 1224 /* were we issued a capability? */ ··· 1265 1239 (info_caps & CEPH_CAP_FILE_SHARED) && 1266 1240 (issued & CEPH_CAP_FILE_EXCL) == 0 && 1267 1241 !__ceph_dir_is_complete(ci)) { 1268 - dout(" marking %p complete (empty)\n", inode); 1242 + doutc(cl, " marking %p complete (empty)\n", 1243 + inode); 1269 1244 i_size_write(inode, 0); 1270 1245 __ceph_dir_set_complete(ci, 1271 1246 atomic64_read(&ci->i_release_count), ··· 1275 1248 1276 1249 wake = true; 1277 1250 } else { 1278 - dout(" %p got snap_caps %s\n", inode, 1279 - ceph_cap_string(info_caps)); 1251 + doutc(cl, " %p got snap_caps %s\n", inode, 1252 + ceph_cap_string(info_caps)); 1280 1253 ci->i_snap_caps |= info_caps; 1281 1254 } 1282 1255 } ··· 1292 1265 1293 1266 if (cap_fmode >= 0) { 1294 1267 if (!info_caps) 1295 - pr_warn("mds issued no caps on %llx.%llx\n", 1296 - ceph_vinop(inode)); 1268 + pr_warn_client(cl, "mds issued no caps on %llx.%llx\n", 1269 + ceph_vinop(inode)); 1297 1270 __ceph_touch_fmode(ci, mdsc, cap_fmode); 1298 1271 } 1299 1272 ··· 1339 1312 unsigned long from_time, 1340 1313 struct ceph_mds_session **old_lease_session) 1341 1314 { 1315 + struct ceph_client *cl = ceph_inode_to_client(dir); 1342 1316 struct ceph_dentry_info *di = ceph_dentry(dentry); 1343 1317 unsigned mask = le16_to_cpu(lease->mask); 1344 1318 long unsigned duration = le32_to_cpu(lease->duration_ms); 1345 1319 long unsigned ttl = from_time + (duration * HZ) / 1000; 1346 1320 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 1347 1321 1348 - dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 1349 - dentry, duration, ttl); 1322 + doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl); 1350 1323 1351 1324 /* only track leases on regular dentries */ 1352 1325 if (ceph_snap(dir) != CEPH_NOSNAP) ··· 1447 1420 */ 1448 1421 static int splice_dentry(struct dentry **pdn, struct inode *in) 1449 1422 { 1423 + struct ceph_client *cl = ceph_inode_to_client(in); 1450 1424 struct dentry *dn = *pdn; 1451 1425 struct dentry *realdn; 1452 1426 ··· 1479 1451 d_drop(dn); 1480 1452 realdn = d_splice_alias(in, dn); 1481 1453 if (IS_ERR(realdn)) { 1482 - pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 1483 - PTR_ERR(realdn), dn, in, ceph_vinop(in)); 1454 + pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n", 1455 + PTR_ERR(realdn), dn, in, ceph_vinop(in)); 1484 1456 return PTR_ERR(realdn); 1485 1457 } 1486 1458 1487 1459 if (realdn) { 1488 - dout("dn %p (%d) spliced with %p (%d) " 1489 - "inode %p ino %llx.%llx\n", 1490 - dn, d_count(dn), 1491 - realdn, d_count(realdn), 1492 - d_inode(realdn), ceph_vinop(d_inode(realdn))); 1460 + doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n", 1461 + dn, d_count(dn), realdn, d_count(realdn), 1462 + d_inode(realdn), ceph_vinop(d_inode(realdn))); 1493 1463 dput(dn); 1494 1464 *pdn = realdn; 1495 1465 } else { 1496 1466 BUG_ON(!ceph_dentry(dn)); 1497 - dout("dn %p attached to %p ino %llx.%llx\n", 1498 - dn, d_inode(dn), ceph_vinop(d_inode(dn))); 1467 + doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn, 1468 + d_inode(dn), ceph_vinop(d_inode(dn))); 1499 1469 } 1500 1470 return 0; 1501 1471 } ··· 1516 1490 struct inode *in = NULL; 1517 1491 struct ceph_vino tvino, dvino; 1518 1492 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); 1493 + struct ceph_client *cl = fsc->client; 1519 1494 int err = 0; 1520 1495 1521 - dout("fill_trace %p is_dentry %d is_target %d\n", req, 1522 - rinfo->head->is_dentry, rinfo->head->is_target); 1496 + doutc(cl, "%p is_dentry %d is_target %d\n", req, 1497 + rinfo->head->is_dentry, rinfo->head->is_target); 1523 1498 1524 1499 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 1525 - dout("fill_trace reply is empty!\n"); 1500 + doutc(cl, "reply is empty!\n"); 1526 1501 if (rinfo->head->result == 0 && req->r_parent) 1527 1502 ceph_invalidate_dir_request(req); 1528 1503 return 0; ··· 1580 1553 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1581 1554 retry_lookup: 1582 1555 dn = d_lookup(parent, &dname); 1583 - dout("d_lookup on parent=%p name=%.*s got %p\n", 1584 - parent, dname.len, dname.name, dn); 1556 + doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n", 1557 + parent, dname.len, dname.name, dn); 1585 1558 1586 1559 if (!dn) { 1587 1560 dn = d_alloc(parent, &dname); 1588 - dout("d_alloc %p '%.*s' = %p\n", parent, 1589 - dname.len, dname.name, dn); 1561 + doutc(cl, "d_alloc %p '%.*s' = %p\n", parent, 1562 + dname.len, dname.name, dn); 1590 1563 if (!dn) { 1591 1564 dput(parent); 1592 1565 ceph_fname_free_buffer(dir, &oname); ··· 1602 1575 } else if (d_really_is_positive(dn) && 1603 1576 (ceph_ino(d_inode(dn)) != tvino.ino || 1604 1577 ceph_snap(d_inode(dn)) != tvino.snap)) { 1605 - dout(" dn %p points to wrong inode %p\n", 1606 - dn, d_inode(dn)); 1578 + doutc(cl, " dn %p points to wrong inode %p\n", 1579 + dn, d_inode(dn)); 1607 1580 ceph_dir_clear_ordered(dir); 1608 1581 d_delete(dn); 1609 1582 dput(dn); ··· 1628 1601 rinfo->head->result == 0) ? req->r_fmode : -1, 1629 1602 &req->r_caps_reservation); 1630 1603 if (err < 0) { 1631 - pr_err("ceph_fill_inode badness %p %llx.%llx\n", 1632 - in, ceph_vinop(in)); 1604 + pr_err_client(cl, "badness %p %llx.%llx\n", in, 1605 + ceph_vinop(in)); 1633 1606 req->r_target_inode = NULL; 1634 1607 if (in->i_state & I_NEW) 1635 1608 discard_new_inode(in); ··· 1679 1652 have_lease = have_dir_cap || 1680 1653 le32_to_cpu(rinfo->dlease->duration_ms); 1681 1654 if (!have_lease) 1682 - dout("fill_trace no dentry lease or dir cap\n"); 1655 + doutc(cl, "no dentry lease or dir cap\n"); 1683 1656 1684 1657 /* rename? */ 1685 1658 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1686 1659 struct inode *olddir = req->r_old_dentry_dir; 1687 1660 BUG_ON(!olddir); 1688 1661 1689 - dout(" src %p '%pd' dst %p '%pd'\n", 1690 - req->r_old_dentry, 1691 - req->r_old_dentry, 1692 - dn, dn); 1693 - dout("fill_trace doing d_move %p -> %p\n", 1694 - req->r_old_dentry, dn); 1662 + doutc(cl, " src %p '%pd' dst %p '%pd'\n", 1663 + req->r_old_dentry, req->r_old_dentry, dn, dn); 1664 + doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn); 1695 1665 1696 1666 /* d_move screws up sibling dentries' offsets */ 1697 1667 ceph_dir_clear_ordered(dir); 1698 1668 ceph_dir_clear_ordered(olddir); 1699 1669 1700 1670 d_move(req->r_old_dentry, dn); 1701 - dout(" src %p '%pd' dst %p '%pd'\n", 1702 - req->r_old_dentry, 1703 - req->r_old_dentry, 1704 - dn, dn); 1671 + doutc(cl, " src %p '%pd' dst %p '%pd'\n", 1672 + req->r_old_dentry, req->r_old_dentry, dn, dn); 1705 1673 1706 1674 /* ensure target dentry is invalidated, despite 1707 1675 rehashing bug in vfs_rename_dir */ 1708 1676 ceph_invalidate_dentry_lease(dn); 1709 1677 1710 - dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1711 - ceph_dentry(req->r_old_dentry)->offset); 1678 + doutc(cl, "dn %p gets new offset %lld\n", 1679 + req->r_old_dentry, 1680 + ceph_dentry(req->r_old_dentry)->offset); 1712 1681 1713 1682 /* swap r_dentry and r_old_dentry in case that 1714 1683 * splice_dentry() gets called later. This is safe ··· 1716 1693 1717 1694 /* null dentry? */ 1718 1695 if (!rinfo->head->is_target) { 1719 - dout("fill_trace null dentry\n"); 1696 + doutc(cl, "null dentry\n"); 1720 1697 if (d_really_is_positive(dn)) { 1721 - dout("d_delete %p\n", dn); 1698 + doutc(cl, "d_delete %p\n", dn); 1722 1699 ceph_dir_clear_ordered(dir); 1723 1700 d_delete(dn); 1724 1701 } else if (have_lease) { ··· 1742 1719 goto done; 1743 1720 dn = req->r_dentry; /* may have spliced */ 1744 1721 } else if (d_really_is_positive(dn) && d_inode(dn) != in) { 1745 - dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1746 - dn, d_inode(dn), ceph_vinop(d_inode(dn)), 1747 - ceph_vinop(in)); 1722 + doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n", 1723 + dn, d_inode(dn), ceph_vinop(d_inode(dn)), 1724 + ceph_vinop(in)); 1748 1725 d_invalidate(dn); 1749 1726 have_lease = false; 1750 1727 } ··· 1754 1731 rinfo->dlease, session, 1755 1732 req->r_request_started); 1756 1733 } 1757 - dout(" final dn %p\n", dn); 1734 + doutc(cl, " final dn %p\n", dn); 1758 1735 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1759 1736 req->r_op == CEPH_MDS_OP_MKSNAP) && 1760 1737 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && ··· 1765 1742 BUG_ON(!dir); 1766 1743 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR); 1767 1744 BUG_ON(!req->r_dentry); 1768 - dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry); 1745 + doutc(cl, " linking snapped dir %p to dn %p\n", in, 1746 + req->r_dentry); 1769 1747 ceph_dir_clear_ordered(dir); 1770 1748 ihold(in); 1771 1749 err = splice_dentry(&req->r_dentry, in); ··· 1788 1764 &dvino, ptvino); 1789 1765 } 1790 1766 done: 1791 - dout("fill_trace done err=%d\n", err); 1767 + doutc(cl, "done err=%d\n", err); 1792 1768 return err; 1793 1769 } 1794 1770 ··· 1799 1775 struct ceph_mds_session *session) 1800 1776 { 1801 1777 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1778 + struct ceph_client *cl = session->s_mdsc->fsc->client; 1802 1779 int i, err = 0; 1803 1780 1804 1781 for (i = 0; i < rinfo->dir_nr; i++) { ··· 1814 1789 in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL); 1815 1790 if (IS_ERR(in)) { 1816 1791 err = PTR_ERR(in); 1817 - dout("new_inode badness got %d\n", err); 1792 + doutc(cl, "badness got %d\n", err); 1818 1793 continue; 1819 1794 } 1820 1795 rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session, 1821 1796 -1, &req->r_caps_reservation); 1822 1797 if (rc < 0) { 1823 - pr_err("ceph_fill_inode badness on %p got %d\n", 1824 - in, rc); 1798 + pr_err_client(cl, "inode badness on %p got %d\n", in, 1799 + rc); 1825 1800 err = rc; 1826 1801 if (in->i_state & I_NEW) { 1827 1802 ihold(in); ··· 1850 1825 struct ceph_readdir_cache_control *ctl, 1851 1826 struct ceph_mds_request *req) 1852 1827 { 1828 + struct ceph_client *cl = ceph_inode_to_client(dir); 1853 1829 struct ceph_inode_info *ci = ceph_inode(dir); 1854 1830 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*); 1855 1831 unsigned idx = ctl->index % nsize; ··· 1876 1850 1877 1851 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1878 1852 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { 1879 - dout("readdir cache dn %p idx %d\n", dn, ctl->index); 1853 + doutc(cl, "dn %p idx %d\n", dn, ctl->index); 1880 1854 ctl->dentries[idx] = dn; 1881 1855 ctl->index++; 1882 1856 } else { 1883 - dout("disable readdir cache\n"); 1857 + doutc(cl, "disable readdir cache\n"); 1884 1858 ctl->index = -1; 1885 1859 } 1886 1860 return 0; ··· 1893 1867 struct inode *inode = d_inode(parent); 1894 1868 struct ceph_inode_info *ci = ceph_inode(inode); 1895 1869 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1870 + struct ceph_client *cl = session->s_mdsc->fsc->client; 1896 1871 struct qstr dname; 1897 1872 struct dentry *dn; 1898 1873 struct inode *in; ··· 1921 1894 1922 1895 if (rinfo->dir_dir && 1923 1896 le32_to_cpu(rinfo->dir_dir->frag) != frag) { 1924 - dout("readdir_prepopulate got new frag %x -> %x\n", 1925 - frag, le32_to_cpu(rinfo->dir_dir->frag)); 1897 + doutc(cl, "got new frag %x -> %x\n", frag, 1898 + le32_to_cpu(rinfo->dir_dir->frag)); 1926 1899 frag = le32_to_cpu(rinfo->dir_dir->frag); 1927 1900 if (!rinfo->hash_order) 1928 1901 req->r_readdir_offset = 2; 1929 1902 } 1930 1903 1931 1904 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1932 - dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1933 - rinfo->dir_nr, parent); 1905 + doutc(cl, "%d items under SNAPDIR dn %p\n", 1906 + rinfo->dir_nr, parent); 1934 1907 } else { 1935 - dout("readdir_prepopulate %d items under dn %p\n", 1936 - rinfo->dir_nr, parent); 1908 + doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent); 1937 1909 if (rinfo->dir_dir) 1938 1910 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); 1939 1911 ··· 1976 1950 1977 1951 retry_lookup: 1978 1952 dn = d_lookup(parent, &dname); 1979 - dout("d_lookup on parent=%p name=%.*s got %p\n", 1980 - parent, dname.len, dname.name, dn); 1953 + doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n", 1954 + parent, dname.len, dname.name, dn); 1981 1955 1982 1956 if (!dn) { 1983 1957 dn = d_alloc(parent, &dname); 1984 - dout("d_alloc %p '%.*s' = %p\n", parent, 1985 - dname.len, dname.name, dn); 1958 + doutc(cl, "d_alloc %p '%.*s' = %p\n", parent, 1959 + dname.len, dname.name, dn); 1986 1960 if (!dn) { 1987 - dout("d_alloc badness\n"); 1961 + doutc(cl, "d_alloc badness\n"); 1988 1962 err = -ENOMEM; 1989 1963 goto out; 1990 1964 } ··· 1997 1971 (ceph_ino(d_inode(dn)) != tvino.ino || 1998 1972 ceph_snap(d_inode(dn)) != tvino.snap)) { 1999 1973 struct ceph_dentry_info *di = ceph_dentry(dn); 2000 - dout(" dn %p points to wrong inode %p\n", 2001 - dn, d_inode(dn)); 1974 + doutc(cl, " dn %p points to wrong inode %p\n", 1975 + dn, d_inode(dn)); 2002 1976 2003 1977 spin_lock(&dn->d_lock); 2004 1978 if (di->offset > 0 && ··· 2020 1994 } else { 2021 1995 in = ceph_get_inode(parent->d_sb, tvino, NULL); 2022 1996 if (IS_ERR(in)) { 2023 - dout("new_inode badness\n"); 1997 + doutc(cl, "new_inode badness\n"); 2024 1998 d_drop(dn); 2025 1999 dput(dn); 2026 2000 err = PTR_ERR(in); ··· 2031 2005 ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session, 2032 2006 -1, &req->r_caps_reservation); 2033 2007 if (ret < 0) { 2034 - pr_err("ceph_fill_inode badness on %p\n", in); 2008 + pr_err_client(cl, "badness on %p %llx.%llx\n", in, 2009 + ceph_vinop(in)); 2035 2010 if (d_really_is_negative(dn)) { 2036 2011 if (in->i_state & I_NEW) { 2037 2012 ihold(in); ··· 2049 2022 2050 2023 if (d_really_is_negative(dn)) { 2051 2024 if (ceph_security_xattr_deadlock(in)) { 2052 - dout(" skip splicing dn %p to inode %p" 2053 - " (security xattr deadlock)\n", dn, in); 2025 + doutc(cl, " skip splicing dn %p to inode %p" 2026 + " (security xattr deadlock)\n", dn, in); 2054 2027 iput(in); 2055 2028 skipped++; 2056 2029 goto next_item; ··· 2082 2055 req->r_readdir_cache_idx = cache_ctl.index; 2083 2056 } 2084 2057 ceph_readdir_cache_release(&cache_ctl); 2085 - dout("readdir_prepopulate done\n"); 2058 + doutc(cl, "done\n"); 2086 2059 return err; 2087 2060 } 2088 2061 2089 2062 bool ceph_inode_set_size(struct inode *inode, loff_t size) 2090 2063 { 2064 + struct ceph_client *cl = ceph_inode_to_client(inode); 2091 2065 struct ceph_inode_info *ci = ceph_inode(inode); 2092 2066 bool ret; 2093 2067 2094 2068 spin_lock(&ci->i_ceph_lock); 2095 - dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size); 2069 + doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size); 2096 2070 i_size_write(inode, size); 2097 2071 ceph_fscache_update(inode); 2098 2072 inode->i_blocks = calc_inode_blocks(size); ··· 2108 2080 void ceph_queue_inode_work(struct inode *inode, int work_bit) 2109 2081 { 2110 2082 struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 2083 + struct ceph_client *cl = fsc->client; 2111 2084 struct ceph_inode_info *ci = ceph_inode(inode); 2112 2085 set_bit(work_bit, &ci->i_work_mask); 2113 2086 2114 2087 ihold(inode); 2115 2088 if (queue_work(fsc->inode_wq, &ci->i_work)) { 2116 - dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask); 2089 + doutc(cl, "%p %llx.%llx mask=%lx\n", inode, 2090 + ceph_vinop(inode), ci->i_work_mask); 2117 2091 } else { 2118 - dout("queue_inode_work %p already queued, mask=%lx\n", 2119 - inode, ci->i_work_mask); 2092 + doutc(cl, "%p %llx.%llx already queued, mask=%lx\n", 2093 + inode, ceph_vinop(inode), ci->i_work_mask); 2120 2094 iput(inode); 2121 2095 } 2122 2096 } 2123 2097 2124 2098 static void ceph_do_invalidate_pages(struct inode *inode) 2125 2099 { 2100 + struct ceph_client *cl = ceph_inode_to_client(inode); 2126 2101 struct ceph_inode_info *ci = ceph_inode(inode); 2127 2102 u32 orig_gen; 2128 2103 int check = 0; ··· 2135 2104 mutex_lock(&ci->i_truncate_mutex); 2136 2105 2137 2106 if (ceph_inode_is_shutdown(inode)) { 2138 - pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n", 2139 - __func__, ceph_vinop(inode)); 2107 + pr_warn_ratelimited_client(cl, 2108 + "%p %llx.%llx is shut down\n", inode, 2109 + ceph_vinop(inode)); 2140 2110 mapping_set_error(inode->i_mapping, -EIO); 2141 2111 truncate_pagecache(inode, 0); 2142 2112 mutex_unlock(&ci->i_truncate_mutex); ··· 2145 2113 } 2146 2114 2147 2115 spin_lock(&ci->i_ceph_lock); 2148 - dout("invalidate_pages %p gen %d revoking %d\n", inode, 2149 - ci->i_rdcache_gen, ci->i_rdcache_revoking); 2116 + doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode, 2117 + ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking); 2150 2118 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 2151 2119 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 2152 2120 check = 1; ··· 2158 2126 spin_unlock(&ci->i_ceph_lock); 2159 2127 2160 2128 if (invalidate_inode_pages2(inode->i_mapping) < 0) { 2161 - pr_err("invalidate_inode_pages2 %llx.%llx failed\n", 2162 - ceph_vinop(inode)); 2129 + pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n", 2130 + ceph_vinop(inode)); 2163 2131 } 2164 2132 2165 2133 spin_lock(&ci->i_ceph_lock); 2166 2134 if (orig_gen == ci->i_rdcache_gen && 2167 2135 orig_gen == ci->i_rdcache_revoking) { 2168 - dout("invalidate_pages %p gen %d successful\n", inode, 2169 - ci->i_rdcache_gen); 2136 + doutc(cl, "%p %llx.%llx gen %d successful\n", inode, 2137 + ceph_vinop(inode), ci->i_rdcache_gen); 2170 2138 ci->i_rdcache_revoking--; 2171 2139 check = 1; 2172 2140 } else { 2173 - dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 2174 - inode, orig_gen, ci->i_rdcache_gen, 2175 - ci->i_rdcache_revoking); 2141 + doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n", 2142 + inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen, 2143 + ci->i_rdcache_revoking); 2176 2144 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 2177 2145 check = 1; 2178 2146 } ··· 2189 2157 */ 2190 2158 void __ceph_do_pending_vmtruncate(struct inode *inode) 2191 2159 { 2160 + struct ceph_client *cl = ceph_inode_to_client(inode); 2192 2161 struct ceph_inode_info *ci = ceph_inode(inode); 2193 2162 u64 to; 2194 2163 int wrbuffer_refs, finish = 0; ··· 2198 2165 retry: 2199 2166 spin_lock(&ci->i_ceph_lock); 2200 2167 if (ci->i_truncate_pending == 0) { 2201 - dout("%s %p none pending\n", __func__, inode); 2168 + doutc(cl, "%p %llx.%llx none pending\n", inode, 2169 + ceph_vinop(inode)); 2202 2170 spin_unlock(&ci->i_ceph_lock); 2203 2171 mutex_unlock(&ci->i_truncate_mutex); 2204 2172 return; ··· 2211 2177 */ 2212 2178 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 2213 2179 spin_unlock(&ci->i_ceph_lock); 2214 - dout("%s %p flushing snaps first\n", __func__, inode); 2180 + doutc(cl, "%p %llx.%llx flushing snaps first\n", inode, 2181 + ceph_vinop(inode)); 2215 2182 filemap_write_and_wait_range(&inode->i_data, 0, 2216 2183 inode->i_sb->s_maxbytes); 2217 2184 goto retry; ··· 2223 2188 2224 2189 to = ci->i_truncate_pagecache_size; 2225 2190 wrbuffer_refs = ci->i_wrbuffer_ref; 2226 - dout("%s %p (%d) to %lld\n", __func__, inode, 2227 - ci->i_truncate_pending, to); 2191 + doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode), 2192 + ci->i_truncate_pending, to); 2228 2193 spin_unlock(&ci->i_ceph_lock); 2229 2194 2230 2195 ceph_fscache_resize(inode, to); ··· 2252 2217 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 2253 2218 i_work); 2254 2219 struct inode *inode = &ci->netfs.inode; 2220 + struct ceph_client *cl = ceph_inode_to_client(inode); 2255 2221 2256 2222 if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) { 2257 - dout("writeback %p\n", inode); 2223 + doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode)); 2258 2224 filemap_fdatawrite(&inode->i_data); 2259 2225 } 2260 2226 if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask)) ··· 2327 2291 struct ceph_mds_request *req, 2328 2292 struct iattr *attr) 2329 2293 { 2294 + struct ceph_client *cl = ceph_inode_to_client(inode); 2330 2295 struct ceph_inode_info *ci = ceph_inode(inode); 2331 2296 int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE; 2332 2297 loff_t pos, orig_pos = round_down(attr->ia_size, ··· 2350 2313 2351 2314 issued = __ceph_caps_issued(ci, NULL); 2352 2315 2353 - dout("%s size %lld -> %lld got cap refs on %s, issued %s\n", __func__, 2354 - i_size, attr->ia_size, ceph_cap_string(got), 2355 - ceph_cap_string(issued)); 2316 + doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n", 2317 + i_size, attr->ia_size, ceph_cap_string(got), 2318 + ceph_cap_string(issued)); 2356 2319 2357 2320 /* Try to writeback the dirty pagecaches */ 2358 2321 if (issued & (CEPH_CAP_FILE_BUFFER)) { ··· 2407 2370 * If the Rados object doesn't exist, it will be set to 0. 2408 2371 */ 2409 2372 if (!objver) { 2410 - dout("%s hit hole, ppos %lld < size %lld\n", __func__, 2411 - pos, i_size); 2373 + doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size); 2412 2374 2413 2375 header.data_len = cpu_to_le32(8 + 8 + 4); 2414 2376 header.file_offset = 0; ··· 2416 2380 header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE); 2417 2381 header.file_offset = cpu_to_le64(orig_pos); 2418 2382 2419 - dout("%s encrypt block boff/bsize %d/%lu\n", __func__, 2420 - boff, CEPH_FSCRYPT_BLOCK_SIZE); 2383 + doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff, 2384 + CEPH_FSCRYPT_BLOCK_SIZE); 2421 2385 2422 2386 /* truncate and zero out the extra contents for the last block */ 2423 2387 memset(iov.iov_base + boff, 0, PAGE_SIZE - boff); ··· 2445 2409 } 2446 2410 req->r_pagelist = pagelist; 2447 2411 out: 2448 - dout("%s %p size dropping cap refs on %s\n", __func__, 2449 - inode, ceph_cap_string(got)); 2412 + doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode, 2413 + ceph_vinop(inode), ceph_cap_string(got)); 2450 2414 ceph_put_cap_refs(ci, got); 2451 2415 if (iov.iov_base) 2452 2416 kunmap_local(iov.iov_base); ··· 2464 2428 unsigned int ia_valid = attr->ia_valid; 2465 2429 struct ceph_mds_request *req; 2466 2430 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc; 2431 + struct ceph_client *cl = ceph_inode_to_client(inode); 2467 2432 struct ceph_cap_flush *prealloc_cf; 2468 2433 loff_t isize = i_size_read(inode); 2469 2434 int issued; ··· 2503 2466 } 2504 2467 } 2505 2468 2506 - dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 2469 + doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode), 2470 + ceph_cap_string(issued)); 2507 2471 #if IS_ENABLED(CONFIG_FS_ENCRYPTION) 2508 2472 if (cia && cia->fscrypt_auth) { 2509 2473 u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth); ··· 2515 2477 goto out; 2516 2478 } 2517 2479 2518 - dout("setattr %llx:%llx fscrypt_auth len %u to %u)\n", 2519 - ceph_vinop(inode), ci->fscrypt_auth_len, len); 2480 + doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode, 2481 + ceph_vinop(inode), ci->fscrypt_auth_len, len); 2520 2482 2521 2483 /* It should never be re-set once set */ 2522 2484 WARN_ON_ONCE(ci->fscrypt_auth); ··· 2544 2506 #endif /* CONFIG_FS_ENCRYPTION */ 2545 2507 2546 2508 if (ia_valid & ATTR_UID) { 2547 - dout("setattr %p uid %d -> %d\n", inode, 2548 - from_kuid(&init_user_ns, inode->i_uid), 2549 - from_kuid(&init_user_ns, attr->ia_uid)); 2509 + doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode, 2510 + ceph_vinop(inode), 2511 + from_kuid(&init_user_ns, inode->i_uid), 2512 + from_kuid(&init_user_ns, attr->ia_uid)); 2550 2513 if (issued & CEPH_CAP_AUTH_EXCL) { 2551 2514 inode->i_uid = attr->ia_uid; 2552 2515 dirtied |= CEPH_CAP_AUTH_EXCL; ··· 2560 2521 } 2561 2522 } 2562 2523 if (ia_valid & ATTR_GID) { 2563 - dout("setattr %p gid %d -> %d\n", inode, 2564 - from_kgid(&init_user_ns, inode->i_gid), 2565 - from_kgid(&init_user_ns, attr->ia_gid)); 2524 + doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode, 2525 + ceph_vinop(inode), 2526 + from_kgid(&init_user_ns, inode->i_gid), 2527 + from_kgid(&init_user_ns, attr->ia_gid)); 2566 2528 if (issued & CEPH_CAP_AUTH_EXCL) { 2567 2529 inode->i_gid = attr->ia_gid; 2568 2530 dirtied |= CEPH_CAP_AUTH_EXCL; ··· 2576 2536 } 2577 2537 } 2578 2538 if (ia_valid & ATTR_MODE) { 2579 - dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 2580 - attr->ia_mode); 2539 + doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode, 2540 + ceph_vinop(inode), inode->i_mode, attr->ia_mode); 2581 2541 if (issued & CEPH_CAP_AUTH_EXCL) { 2582 2542 inode->i_mode = attr->ia_mode; 2583 2543 dirtied |= CEPH_CAP_AUTH_EXCL; ··· 2591 2551 } 2592 2552 2593 2553 if (ia_valid & ATTR_ATIME) { 2594 - dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode, 2595 - inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 2596 - attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 2554 + doutc(cl, "%p %llx.%llx atime %lld.%ld -> %lld.%ld\n", 2555 + inode, ceph_vinop(inode), inode->i_atime.tv_sec, 2556 + inode->i_atime.tv_nsec, attr->ia_atime.tv_sec, 2557 + attr->ia_atime.tv_nsec); 2597 2558 if (issued & CEPH_CAP_FILE_EXCL) { 2598 2559 ci->i_time_warp_seq++; 2599 2560 inode->i_atime = attr->ia_atime; ··· 2614 2573 } 2615 2574 } 2616 2575 if (ia_valid & ATTR_SIZE) { 2617 - dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size); 2576 + doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode, 2577 + ceph_vinop(inode), isize, attr->ia_size); 2618 2578 /* 2619 2579 * Only when the new size is smaller and not aligned to 2620 2580 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed. ··· 2666 2624 } 2667 2625 } 2668 2626 if (ia_valid & ATTR_MTIME) { 2669 - dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode, 2670 - inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 2671 - attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 2627 + doutc(cl, "%p %llx.%llx mtime %lld.%ld -> %lld.%ld\n", 2628 + inode, ceph_vinop(inode), inode->i_mtime.tv_sec, 2629 + inode->i_mtime.tv_nsec, attr->ia_mtime.tv_sec, 2630 + attr->ia_mtime.tv_nsec); 2672 2631 if (issued & CEPH_CAP_FILE_EXCL) { 2673 2632 ci->i_time_warp_seq++; 2674 2633 inode->i_mtime = attr->ia_mtime; ··· 2693 2650 if (ia_valid & ATTR_CTIME) { 2694 2651 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 2695 2652 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 2696 - dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode, 2697 - inode_get_ctime(inode).tv_sec, 2698 - inode_get_ctime(inode).tv_nsec, 2699 - attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2700 - only ? "ctime only" : "ignored"); 2653 + doutc(cl, "%p %llx.%llx ctime %lld.%ld -> %lld.%ld (%s)\n", 2654 + inode, ceph_vinop(inode), inode_get_ctime(inode).tv_sec, 2655 + inode_get_ctime(inode).tv_nsec, 2656 + attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2657 + only ? "ctime only" : "ignored"); 2658 + 2701 2659 if (only) { 2702 2660 /* 2703 2661 * if kernel wants to dirty ctime but nothing else, ··· 2716 2672 } 2717 2673 } 2718 2674 if (ia_valid & ATTR_FILE) 2719 - dout("setattr %p ATTR_FILE ... hrm!\n", inode); 2675 + doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode, 2676 + ceph_vinop(inode)); 2720 2677 2721 2678 if (dirtied) { 2722 2679 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, ··· 2758 2713 */ 2759 2714 err = ceph_mdsc_do_request(mdsc, NULL, req); 2760 2715 if (err == -EAGAIN && truncate_retry--) { 2761 - dout("setattr %p result=%d (%s locally, %d remote), retry it!\n", 2762 - inode, err, ceph_cap_string(dirtied), mask); 2716 + doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n", 2717 + inode, ceph_vinop(inode), err, 2718 + ceph_cap_string(dirtied), mask); 2763 2719 ceph_mdsc_put_request(req); 2764 2720 ceph_free_cap_flush(prealloc_cf); 2765 2721 goto retry; 2766 2722 } 2767 2723 } 2768 2724 out: 2769 - dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 2770 - ceph_cap_string(dirtied), mask); 2725 + doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode, 2726 + ceph_vinop(inode), err, ceph_cap_string(dirtied), mask); 2771 2727 2772 2728 ceph_mdsc_put_request(req); 2773 2729 ceph_free_cap_flush(prealloc_cf); ··· 2857 2811 int mask, bool force) 2858 2812 { 2859 2813 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb); 2814 + struct ceph_client *cl = fsc->client; 2860 2815 struct ceph_mds_client *mdsc = fsc->mdsc; 2861 2816 struct ceph_mds_request *req; 2862 2817 int mode; 2863 2818 int err; 2864 2819 2865 2820 if (ceph_snap(inode) == CEPH_SNAPDIR) { 2866 - dout("do_getattr inode %p SNAPDIR\n", inode); 2821 + doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode, 2822 + ceph_vinop(inode)); 2867 2823 return 0; 2868 2824 } 2869 2825 2870 - dout("do_getattr inode %p mask %s mode 0%o\n", 2871 - inode, ceph_cap_string(mask), inode->i_mode); 2826 + doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode, 2827 + ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode); 2872 2828 if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1)) 2873 2829 return 0; 2874 2830 ··· 2897 2849 } 2898 2850 } 2899 2851 ceph_mdsc_put_request(req); 2900 - dout("do_getattr result=%d\n", err); 2852 + doutc(cl, "result=%d\n", err); 2901 2853 return err; 2902 2854 } 2903 2855 ··· 2905 2857 size_t size) 2906 2858 { 2907 2859 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb); 2860 + struct ceph_client *cl = fsc->client; 2908 2861 struct ceph_mds_client *mdsc = fsc->mdsc; 2909 2862 struct ceph_mds_request *req; 2910 2863 int mode = USE_AUTH_MDS; ··· 2935 2886 xattr_value = req->r_reply_info.xattr_info.xattr_value; 2936 2887 xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len; 2937 2888 2938 - dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size); 2889 + doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size); 2939 2890 2940 2891 err = (int)xattr_value_len; 2941 2892 if (size == 0) ··· 2950 2901 put: 2951 2902 ceph_mdsc_put_request(req); 2952 2903 out: 2953 - dout("do_getvxattr result=%d\n", err); 2904 + doutc(cl, "result=%d\n", err); 2954 2905 return err; 2955 2906 } 2956 2907
+9 -4
fs/ceph/ioctl.c
··· 245 245 struct inode *inode = file_inode(file); 246 246 struct ceph_inode_info *ci = ceph_inode(inode); 247 247 struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc; 248 + struct ceph_client *cl = mdsc->fsc->client; 248 249 249 250 if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) { 250 251 spin_lock(&ci->i_ceph_lock); ··· 253 252 ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++; 254 253 __ceph_touch_fmode(ci, mdsc, fi->fmode); 255 254 spin_unlock(&ci->i_ceph_lock); 256 - dout("ioctl_layzio: file %p marked lazy\n", file); 255 + doutc(cl, "file %p %p %llx.%llx marked lazy\n", file, inode, 256 + ceph_vinop(inode)); 257 257 258 258 ceph_check_caps(ci, 0); 259 259 } else { 260 - dout("ioctl_layzio: file %p already lazy\n", file); 260 + doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode, 261 + ceph_vinop(inode)); 261 262 } 262 263 return 0; 263 264 } ··· 358 355 359 356 long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 360 357 { 358 + struct inode *inode = file_inode(file); 359 + struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); 361 360 int ret; 362 361 363 - dout("ioctl file %p cmd %s arg %lu\n", file, 364 - ceph_ioctl_cmd_name(cmd), arg); 362 + doutc(fsc->client, "file %p %p %llx.%llx cmd %s arg %lu\n", file, 363 + inode, ceph_vinop(inode), ceph_ioctl_cmd_name(cmd), arg); 365 364 switch (cmd) { 366 365 case CEPH_IOC_GET_LAYOUT: 367 366 return ceph_ioctl_get_layout(file, (void __user *)arg);
+33 -24
fs/ceph/locks.c
··· 77 77 int cmd, u8 wait, struct file_lock *fl) 78 78 { 79 79 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 80 + struct ceph_client *cl = mdsc->fsc->client; 80 81 struct ceph_mds_request *req; 81 82 int err; 82 83 u64 length = 0; ··· 112 111 113 112 owner = secure_addr(fl->fl_owner); 114 113 115 - dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, " 116 - "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type, 117 - (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length, 118 - wait, fl->fl_type); 114 + doutc(cl, "rule: %d, op: %d, owner: %llx, pid: %llu, " 115 + "start: %llu, length: %llu, wait: %d, type: %d\n", 116 + (int)lock_type, (int)operation, owner, (u64)fl->fl_pid, 117 + fl->fl_start, length, wait, fl->fl_type); 119 118 120 119 req->r_args.filelock_change.rule = lock_type; 121 120 req->r_args.filelock_change.type = cmd; ··· 148 147 149 148 } 150 149 ceph_mdsc_put_request(req); 151 - dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 152 - "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type, 153 - (int)operation, (u64)fl->fl_pid, fl->fl_start, 154 - length, wait, fl->fl_type, err); 150 + doutc(cl, "rule: %d, op: %d, pid: %llu, start: %llu, " 151 + "length: %llu, wait: %d, type: %d, err code %d\n", 152 + (int)lock_type, (int)operation, (u64)fl->fl_pid, 153 + fl->fl_start, length, wait, fl->fl_type, err); 155 154 return err; 156 155 } 157 156 158 157 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, 159 158 struct ceph_mds_request *req) 160 159 { 160 + struct ceph_client *cl = mdsc->fsc->client; 161 161 struct ceph_mds_request *intr_req; 162 162 struct inode *inode = req->r_inode; 163 163 int err, lock_type; ··· 176 174 if (!err) 177 175 return 0; 178 176 179 - dout("ceph_lock_wait_for_completion: request %llu was interrupted\n", 180 - req->r_tid); 177 + doutc(cl, "request %llu was interrupted\n", req->r_tid); 181 178 182 179 mutex_lock(&mdsc->mutex); 183 180 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { ··· 247 246 { 248 247 struct inode *inode = file_inode(file); 249 248 struct ceph_inode_info *ci = ceph_inode(inode); 249 + struct ceph_client *cl = ceph_inode_to_client(inode); 250 250 int err = 0; 251 251 u16 op = CEPH_MDS_OP_SETFILELOCK; 252 252 u8 wait = 0; ··· 259 257 if (ceph_inode_is_shutdown(inode)) 260 258 return -ESTALE; 261 259 262 - dout("ceph_lock, fl_owner: %p\n", fl->fl_owner); 260 + doutc(cl, "fl_owner: %p\n", fl->fl_owner); 263 261 264 262 /* set wait bit as appropriate, then make command as Ceph expects it*/ 265 263 if (IS_GETLK(cmd)) ··· 294 292 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl); 295 293 if (!err) { 296 294 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) { 297 - dout("mds locked, locking locally\n"); 295 + doutc(cl, "locking locally\n"); 298 296 err = posix_lock_file(file, fl, NULL); 299 297 if (err) { 300 298 /* undo! This should only happen if ··· 302 300 * deadlock. */ 303 301 ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, 304 302 CEPH_LOCK_UNLOCK, 0, fl); 305 - dout("got %d on posix_lock_file, undid lock\n", 306 - err); 303 + doutc(cl, "got %d on posix_lock_file, undid lock\n", 304 + err); 307 305 } 308 306 } 309 307 } ··· 314 312 { 315 313 struct inode *inode = file_inode(file); 316 314 struct ceph_inode_info *ci = ceph_inode(inode); 315 + struct ceph_client *cl = ceph_inode_to_client(inode); 317 316 int err = 0; 318 317 u8 wait = 0; 319 318 u8 lock_cmd; ··· 325 322 if (ceph_inode_is_shutdown(inode)) 326 323 return -ESTALE; 327 324 328 - dout("ceph_flock, fl_file: %p\n", fl->fl_file); 325 + doutc(cl, "fl_file: %p\n", fl->fl_file); 329 326 330 327 spin_lock(&ci->i_ceph_lock); 331 328 if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) { ··· 362 359 ceph_lock_message(CEPH_LOCK_FLOCK, 363 360 CEPH_MDS_OP_SETFILELOCK, 364 361 inode, CEPH_LOCK_UNLOCK, 0, fl); 365 - dout("got %d on locks_lock_file_wait, undid lock\n", err); 362 + doutc(cl, "got %d on locks_lock_file_wait, undid lock\n", 363 + err); 366 364 } 367 365 } 368 366 return err; ··· 375 371 */ 376 372 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) 377 373 { 374 + struct ceph_client *cl = ceph_inode_to_client(inode); 378 375 struct file_lock *lock; 379 376 struct file_lock_context *ctx; 380 377 ··· 391 386 ++(*flock_count); 392 387 spin_unlock(&ctx->flc_lock); 393 388 } 394 - dout("counted %d flock locks and %d fcntl locks\n", 395 - *flock_count, *fcntl_count); 389 + doutc(cl, "counted %d flock locks and %d fcntl locks\n", 390 + *flock_count, *fcntl_count); 396 391 } 397 392 398 393 /* 399 394 * Given a pointer to a lock, convert it to a ceph filelock 400 395 */ 401 - static int lock_to_ceph_filelock(struct file_lock *lock, 396 + static int lock_to_ceph_filelock(struct inode *inode, 397 + struct file_lock *lock, 402 398 struct ceph_filelock *cephlock) 403 399 { 400 + struct ceph_client *cl = ceph_inode_to_client(inode); 404 401 int err = 0; 402 + 405 403 cephlock->start = cpu_to_le64(lock->fl_start); 406 404 cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); 407 405 cephlock->client = cpu_to_le64(0); ··· 422 414 cephlock->type = CEPH_LOCK_UNLOCK; 423 415 break; 424 416 default: 425 - dout("Have unknown lock type %d\n", lock->fl_type); 417 + doutc(cl, "Have unknown lock type %d\n", lock->fl_type); 426 418 err = -EINVAL; 427 419 } 428 420 ··· 440 432 { 441 433 struct file_lock *lock; 442 434 struct file_lock_context *ctx = locks_inode_context(inode); 435 + struct ceph_client *cl = ceph_inode_to_client(inode); 443 436 int err = 0; 444 437 int seen_fcntl = 0; 445 438 int seen_flock = 0; 446 439 int l = 0; 447 440 448 - dout("encoding %d flock and %d fcntl locks\n", num_flock_locks, 449 - num_fcntl_locks); 441 + doutc(cl, "encoding %d flock and %d fcntl locks\n", num_flock_locks, 442 + num_fcntl_locks); 450 443 451 444 if (!ctx) 452 445 return 0; ··· 459 450 err = -ENOSPC; 460 451 goto fail; 461 452 } 462 - err = lock_to_ceph_filelock(lock, &flocks[l]); 453 + err = lock_to_ceph_filelock(inode, lock, &flocks[l]); 463 454 if (err) 464 455 goto fail; 465 456 ++l; ··· 470 461 err = -ENOSPC; 471 462 goto fail; 472 463 } 473 - err = lock_to_ceph_filelock(lock, &flocks[l]); 464 + err = lock_to_ceph_filelock(inode, lock, &flocks[l]); 474 465 if (err) 475 466 goto fail; 476 467 ++l;
+315 -243
fs/ceph/mds_client.c
··· 411 411 u64 features) 412 412 { 413 413 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; 414 + struct ceph_client *cl = req->r_mdsc->fsc->client; 414 415 u32 num, i = 0; 415 416 int err; 416 417 ··· 434 433 BUG_ON(!info->dir_entries); 435 434 if ((unsigned long)(info->dir_entries + num) > 436 435 (unsigned long)info->dir_entries + info->dir_buf_size) { 437 - pr_err("dir contents are larger than expected\n"); 436 + pr_err_client(cl, "dir contents are larger than expected\n"); 438 437 WARN_ON(1); 439 438 goto bad; 440 439 } ··· 455 454 ceph_decode_need(p, end, _name_len, bad); 456 455 _name = *p; 457 456 *p += _name_len; 458 - dout("parsed dir dname '%.*s'\n", _name_len, _name); 457 + doutc(cl, "parsed dir dname '%.*s'\n", _name_len, _name); 459 458 460 459 if (info->hash_order) 461 460 rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, ··· 515 514 rde->is_nokey = false; 516 515 err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey); 517 516 if (err) { 518 - pr_err("%s unable to decode %.*s, got %d\n", __func__, 519 - _name_len, _name, err); 517 + pr_err_client(cl, "unable to decode %.*s, got %d\n", 518 + _name_len, _name, err); 520 519 goto out_bad; 521 520 } 522 521 rde->name = oname.name; ··· 540 539 bad: 541 540 err = -EIO; 542 541 out_bad: 543 - pr_err("problem parsing dir contents %d\n", err); 542 + pr_err_client(cl, "problem parsing dir contents %d\n", err); 544 543 return err; 545 544 } 546 545 ··· 571 570 static int ceph_parse_deleg_inos(void **p, void *end, 572 571 struct ceph_mds_session *s) 573 572 { 573 + struct ceph_client *cl = s->s_mdsc->fsc->client; 574 574 u32 sets; 575 575 576 576 ceph_decode_32_safe(p, end, sets, bad); 577 - dout("got %u sets of delegated inodes\n", sets); 577 + doutc(cl, "got %u sets of delegated inodes\n", sets); 578 578 while (sets--) { 579 579 u64 start, len; 580 580 ··· 584 582 585 583 /* Don't accept a delegation of system inodes */ 586 584 if (start < CEPH_INO_SYSTEM_BASE) { 587 - pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n", 588 - start, len); 585 + pr_warn_ratelimited_client(cl, 586 + "ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n", 587 + start, len); 589 588 continue; 590 589 } 591 590 while (len--) { ··· 594 591 DELEGATED_INO_AVAILABLE, 595 592 GFP_KERNEL); 596 593 if (!err) { 597 - dout("added delegated inode 0x%llx\n", 598 - start - 1); 594 + doutc(cl, "added delegated inode 0x%llx\n", start - 1); 599 595 } else if (err == -EBUSY) { 600 - pr_warn("MDS delegated inode 0x%llx more than once.\n", 596 + pr_warn_client(cl, 597 + "MDS delegated inode 0x%llx more than once.\n", 601 598 start - 1); 602 599 } else { 603 600 return err; ··· 747 744 struct ceph_mds_request *req, u64 features) 748 745 { 749 746 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info; 747 + struct ceph_client *cl = s->s_mdsc->fsc->client; 750 748 void *p, *end; 751 749 u32 len; 752 750 int err; ··· 787 783 bad: 788 784 err = -EIO; 789 785 out_bad: 790 - pr_err("mds parse_reply err %d\n", err); 786 + pr_err_client(cl, "mds parse_reply err %d\n", err); 791 787 ceph_msg_dump(msg); 792 788 return err; 793 789 } ··· 835 831 int ceph_wait_on_conflict_unlink(struct dentry *dentry) 836 832 { 837 833 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb); 834 + struct ceph_client *cl = fsc->client; 838 835 struct dentry *pdentry = dentry->d_parent; 839 836 struct dentry *udentry, *found = NULL; 840 837 struct ceph_dentry_info *di; ··· 860 855 goto next; 861 856 862 857 if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags)) 863 - pr_warn("%s dentry %p:%pd async unlink bit is not set\n", 864 - __func__, dentry, dentry); 858 + pr_warn_client(cl, "dentry %p:%pd async unlink bit is not set\n", 859 + dentry, dentry); 865 860 866 861 if (!d_same_name(udentry, pdentry, &dname)) 867 862 goto next; ··· 877 872 if (likely(!found)) 878 873 return 0; 879 874 880 - dout("%s dentry %p:%pd conflict with old %p:%pd\n", __func__, 881 - dentry, dentry, found, found); 875 + doutc(cl, "dentry %p:%pd conflict with old %p:%pd\n", dentry, dentry, 876 + found, found); 882 877 883 878 err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT, 884 879 TASK_KILLABLE); ··· 962 957 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, 963 958 int mds) 964 959 { 960 + struct ceph_client *cl = mdsc->fsc->client; 965 961 struct ceph_mds_session *s; 966 962 967 963 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) ··· 979 973 int newmax = 1 << get_count_order(mds + 1); 980 974 struct ceph_mds_session **sa; 981 975 982 - dout("%s: realloc to %d\n", __func__, newmax); 976 + doutc(cl, "realloc to %d\n", newmax); 983 977 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); 984 978 if (!sa) 985 979 goto fail_realloc; ··· 992 986 mdsc->max_sessions = newmax; 993 987 } 994 988 995 - dout("%s: mds%d\n", __func__, mds); 989 + doutc(cl, "mds%d\n", mds); 996 990 s->s_mdsc = mdsc; 997 991 s->s_mds = mds; 998 992 s->s_state = CEPH_MDS_SESSION_NEW; ··· 1035 1029 static void __unregister_session(struct ceph_mds_client *mdsc, 1036 1030 struct ceph_mds_session *s) 1037 1031 { 1038 - dout("__unregister_session mds%d %p\n", s->s_mds, s); 1032 + doutc(mdsc->fsc->client, "mds%d %p\n", s->s_mds, s); 1039 1033 BUG_ON(mdsc->sessions[s->s_mds] != s); 1040 1034 mdsc->sessions[s->s_mds] = NULL; 1041 1035 ceph_con_close(&s->s_con); ··· 1161 1155 struct ceph_mds_request *req, 1162 1156 struct inode *dir) 1163 1157 { 1158 + struct ceph_client *cl = mdsc->fsc->client; 1164 1159 int ret = 0; 1165 1160 1166 1161 req->r_tid = ++mdsc->last_tid; ··· 1169 1162 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation, 1170 1163 req->r_num_caps); 1171 1164 if (ret < 0) { 1172 - pr_err("__register_request %p " 1173 - "failed to reserve caps: %d\n", req, ret); 1165 + pr_err_client(cl, "%p failed to reserve caps: %d\n", 1166 + req, ret); 1174 1167 /* set req->r_err to fail early from __do_request */ 1175 1168 req->r_err = ret; 1176 1169 return; 1177 1170 } 1178 1171 } 1179 - dout("__register_request %p tid %lld\n", req, req->r_tid); 1172 + doutc(cl, "%p tid %lld\n", req, req->r_tid); 1180 1173 ceph_mdsc_get_request(req); 1181 1174 insert_request(&mdsc->request_tree, req); 1182 1175 ··· 1199 1192 static void __unregister_request(struct ceph_mds_client *mdsc, 1200 1193 struct ceph_mds_request *req) 1201 1194 { 1202 - dout("__unregister_request %p tid %lld\n", req, req->r_tid); 1195 + doutc(mdsc->fsc->client, "%p tid %lld\n", req, req->r_tid); 1203 1196 1204 1197 /* Never leave an unregistered request on an unsafe list! */ 1205 1198 list_del_init(&req->r_unsafe_item); ··· 1285 1278 int mds = -1; 1286 1279 u32 hash = req->r_direct_hash; 1287 1280 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); 1281 + struct ceph_client *cl = mdsc->fsc->client; 1288 1282 1289 1283 if (random) 1290 1284 *random = false; ··· 1297 1289 if (req->r_resend_mds >= 0 && 1298 1290 (__have_session(mdsc, req->r_resend_mds) || 1299 1291 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { 1300 - dout("%s using resend_mds mds%d\n", __func__, 1301 - req->r_resend_mds); 1292 + doutc(cl, "using resend_mds mds%d\n", req->r_resend_mds); 1302 1293 return req->r_resend_mds; 1303 1294 } 1304 1295 ··· 1314 1307 rcu_read_lock(); 1315 1308 inode = get_nonsnap_parent(req->r_dentry); 1316 1309 rcu_read_unlock(); 1317 - dout("%s using snapdir's parent %p\n", __func__, inode); 1310 + doutc(cl, "using snapdir's parent %p %llx.%llx\n", 1311 + inode, ceph_vinop(inode)); 1318 1312 } 1319 1313 } else if (req->r_dentry) { 1320 1314 /* ignore race with rename; old or new d_parent is okay */ ··· 1335 1327 /* direct snapped/virtual snapdir requests 1336 1328 * based on parent dir inode */ 1337 1329 inode = get_nonsnap_parent(parent); 1338 - dout("%s using nonsnap parent %p\n", __func__, inode); 1330 + doutc(cl, "using nonsnap parent %p %llx.%llx\n", 1331 + inode, ceph_vinop(inode)); 1339 1332 } else { 1340 1333 /* dentry target */ 1341 1334 inode = d_inode(req->r_dentry); ··· 1352 1343 rcu_read_unlock(); 1353 1344 } 1354 1345 1355 - dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash, 1356 - hash, mode); 1357 1346 if (!inode) 1358 1347 goto random; 1348 + 1349 + doutc(cl, "%p %llx.%llx is_hash=%d (0x%x) mode %d\n", inode, 1350 + ceph_vinop(inode), (int)is_hash, hash, mode); 1359 1351 ci = ceph_inode(inode); 1360 1352 1361 1353 if (is_hash && S_ISDIR(inode->i_mode)) { ··· 1372 1362 get_random_bytes(&r, 1); 1373 1363 r %= frag.ndist; 1374 1364 mds = frag.dist[r]; 1375 - dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n", 1376 - __func__, inode, ceph_vinop(inode), 1377 - frag.frag, mds, (int)r, frag.ndist); 1365 + doutc(cl, "%p %llx.%llx frag %u mds%d (%d/%d)\n", 1366 + inode, ceph_vinop(inode), frag.frag, 1367 + mds, (int)r, frag.ndist); 1378 1368 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= 1379 1369 CEPH_MDS_STATE_ACTIVE && 1380 1370 !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds)) ··· 1387 1377 if (frag.mds >= 0) { 1388 1378 /* choose auth mds */ 1389 1379 mds = frag.mds; 1390 - dout("%s %p %llx.%llx frag %u mds%d (auth)\n", 1391 - __func__, inode, ceph_vinop(inode), 1392 - frag.frag, mds); 1380 + doutc(cl, "%p %llx.%llx frag %u mds%d (auth)\n", 1381 + inode, ceph_vinop(inode), frag.frag, mds); 1393 1382 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= 1394 1383 CEPH_MDS_STATE_ACTIVE) { 1395 1384 if (!ceph_mdsmap_is_laggy(mdsc->mdsmap, ··· 1412 1403 goto random; 1413 1404 } 1414 1405 mds = cap->session->s_mds; 1415 - dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__, 1416 - inode, ceph_vinop(inode), mds, 1417 - cap == ci->i_auth_cap ? "auth " : "", cap); 1406 + doutc(cl, "%p %llx.%llx mds%d (%scap %p)\n", inode, 1407 + ceph_vinop(inode), mds, 1408 + cap == ci->i_auth_cap ? "auth " : "", cap); 1418 1409 spin_unlock(&ci->i_ceph_lock); 1419 1410 out: 1420 1411 iput(inode); ··· 1425 1416 *random = true; 1426 1417 1427 1418 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); 1428 - dout("%s chose random mds%d\n", __func__, mds); 1419 + doutc(cl, "chose random mds%d\n", mds); 1429 1420 return mds; 1430 1421 } 1431 1422 ··· 1538 1529 int metadata_key_count = 0; 1539 1530 struct ceph_options *opt = mdsc->fsc->client->options; 1540 1531 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; 1532 + struct ceph_client *cl = mdsc->fsc->client; 1541 1533 size_t size, count; 1542 1534 void *p, *end; 1543 1535 int ret; ··· 1577 1567 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes, 1578 1568 GFP_NOFS, false); 1579 1569 if (!msg) { 1580 - pr_err("ENOMEM creating session open msg\n"); 1570 + pr_err_client(cl, "ENOMEM creating session open msg\n"); 1581 1571 return ERR_PTR(-ENOMEM); 1582 1572 } 1583 1573 p = msg->front.iov_base; ··· 1617 1607 1618 1608 ret = encode_supported_features(&p, end); 1619 1609 if (ret) { 1620 - pr_err("encode_supported_features failed!\n"); 1610 + pr_err_client(cl, "encode_supported_features failed!\n"); 1621 1611 ceph_msg_put(msg); 1622 1612 return ERR_PTR(ret); 1623 1613 } 1624 1614 1625 1615 ret = encode_metric_spec(&p, end); 1626 1616 if (ret) { 1627 - pr_err("encode_metric_spec failed!\n"); 1617 + pr_err_client(cl, "encode_metric_spec failed!\n"); 1628 1618 ceph_msg_put(msg); 1629 1619 return ERR_PTR(ret); 1630 1620 } ··· 1652 1642 1653 1643 /* wait for mds to go active? */ 1654 1644 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); 1655 - dout("open_session to mds%d (%s)\n", mds, 1656 - ceph_mds_state_name(mstate)); 1645 + doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n", mds, 1646 + ceph_mds_state_name(mstate)); 1657 1647 session->s_state = CEPH_MDS_SESSION_OPENING; 1658 1648 session->s_renew_requested = jiffies; 1659 1649 ··· 1696 1686 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) 1697 1687 { 1698 1688 struct ceph_mds_session *session; 1689 + struct ceph_client *cl = mdsc->fsc->client; 1699 1690 1700 - dout("open_export_target_session to mds%d\n", target); 1691 + doutc(cl, "to mds%d\n", target); 1701 1692 1702 1693 mutex_lock(&mdsc->mutex); 1703 1694 session = __open_export_target_session(mdsc, target); ··· 1713 1702 struct ceph_mds_info *mi; 1714 1703 struct ceph_mds_session *ts; 1715 1704 int i, mds = session->s_mds; 1705 + struct ceph_client *cl = mdsc->fsc->client; 1716 1706 1717 1707 if (mds >= mdsc->mdsmap->possible_max_rank) 1718 1708 return; 1719 1709 1720 1710 mi = &mdsc->mdsmap->m_info[mds]; 1721 - dout("open_export_target_sessions for mds%d (%d targets)\n", 1722 - session->s_mds, mi->num_export_targets); 1711 + doutc(cl, "for mds%d (%d targets)\n", session->s_mds, 1712 + mi->num_export_targets); 1723 1713 1724 1714 for (i = 0; i < mi->num_export_targets; i++) { 1725 1715 ts = __open_export_target_session(mdsc, mi->export_targets[i]); ··· 1743 1731 static void detach_cap_releases(struct ceph_mds_session *session, 1744 1732 struct list_head *target) 1745 1733 { 1734 + struct ceph_client *cl = session->s_mdsc->fsc->client; 1735 + 1746 1736 lockdep_assert_held(&session->s_cap_lock); 1747 1737 1748 1738 list_splice_init(&session->s_cap_releases, target); 1749 1739 session->s_num_cap_releases = 0; 1750 - dout("dispose_cap_releases mds%d\n", session->s_mds); 1740 + doutc(cl, "mds%d\n", session->s_mds); 1751 1741 } 1752 1742 1753 1743 static void dispose_cap_releases(struct ceph_mds_client *mdsc, ··· 1767 1753 static void cleanup_session_requests(struct ceph_mds_client *mdsc, 1768 1754 struct ceph_mds_session *session) 1769 1755 { 1756 + struct ceph_client *cl = mdsc->fsc->client; 1770 1757 struct ceph_mds_request *req; 1771 1758 struct rb_node *p; 1772 1759 1773 - dout("cleanup_session_requests mds%d\n", session->s_mds); 1760 + doutc(cl, "mds%d\n", session->s_mds); 1774 1761 mutex_lock(&mdsc->mutex); 1775 1762 while (!list_empty(&session->s_unsafe)) { 1776 1763 req = list_first_entry(&session->s_unsafe, 1777 1764 struct ceph_mds_request, r_unsafe_item); 1778 - pr_warn_ratelimited(" dropping unsafe request %llu\n", 1779 - req->r_tid); 1765 + pr_warn_ratelimited_client(cl, " dropping unsafe request %llu\n", 1766 + req->r_tid); 1780 1767 if (req->r_target_inode) 1781 1768 mapping_set_error(req->r_target_inode->i_mapping, -EIO); 1782 1769 if (req->r_unsafe_dir) ··· 1806 1791 int (*cb)(struct inode *, int mds, void *), 1807 1792 void *arg) 1808 1793 { 1794 + struct ceph_client *cl = session->s_mdsc->fsc->client; 1809 1795 struct list_head *p; 1810 1796 struct ceph_cap *cap; 1811 1797 struct inode *inode, *last_inode = NULL; 1812 1798 struct ceph_cap *old_cap = NULL; 1813 1799 int ret; 1814 1800 1815 - dout("iterate_session_caps %p mds%d\n", session, session->s_mds); 1801 + doutc(cl, "%p mds%d\n", session, session->s_mds); 1816 1802 spin_lock(&session->s_cap_lock); 1817 1803 p = session->s_caps.next; 1818 1804 while (p != &session->s_caps) { ··· 1844 1828 spin_lock(&session->s_cap_lock); 1845 1829 p = p->next; 1846 1830 if (!cap->ci) { 1847 - dout("iterate_session_caps finishing cap %p removal\n", 1848 - cap); 1831 + doutc(cl, "finishing cap %p removal\n", cap); 1849 1832 BUG_ON(cap->session != session); 1850 1833 cap->session = NULL; 1851 1834 list_del_init(&cap->session_caps); ··· 1873 1858 static int remove_session_caps_cb(struct inode *inode, int mds, void *arg) 1874 1859 { 1875 1860 struct ceph_inode_info *ci = ceph_inode(inode); 1861 + struct ceph_client *cl = ceph_inode_to_client(inode); 1876 1862 bool invalidate = false; 1877 1863 struct ceph_cap *cap; 1878 1864 int iputs = 0; ··· 1881 1865 spin_lock(&ci->i_ceph_lock); 1882 1866 cap = __get_cap_for_mds(ci, mds); 1883 1867 if (cap) { 1884 - dout(" removing cap %p, ci is %p, inode is %p\n", 1885 - cap, ci, &ci->netfs.inode); 1868 + doutc(cl, " removing cap %p, ci is %p, inode is %p\n", 1869 + cap, ci, &ci->netfs.inode); 1886 1870 1887 1871 iputs = ceph_purge_inode_cap(inode, cap, &invalidate); 1888 1872 } ··· 1906 1890 struct super_block *sb = fsc->sb; 1907 1891 LIST_HEAD(dispose); 1908 1892 1909 - dout("remove_session_caps on %p\n", session); 1893 + doutc(fsc->client, "on %p\n", session); 1910 1894 ceph_iterate_session_caps(session, remove_session_caps_cb, fsc); 1911 1895 1912 1896 wake_up_all(&fsc->mdsc->cap_flushing_wq); ··· 1987 1971 1988 1972 static void wake_up_session_caps(struct ceph_mds_session *session, int ev) 1989 1973 { 1990 - dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); 1974 + struct ceph_client *cl = session->s_mdsc->fsc->client; 1975 + 1976 + doutc(cl, "session %p mds%d\n", session, session->s_mds); 1991 1977 ceph_iterate_session_caps(session, wake_up_session_cb, 1992 1978 (void *)(unsigned long)ev); 1993 1979 } ··· 2003 1985 static int send_renew_caps(struct ceph_mds_client *mdsc, 2004 1986 struct ceph_mds_session *session) 2005 1987 { 1988 + struct ceph_client *cl = mdsc->fsc->client; 2006 1989 struct ceph_msg *msg; 2007 1990 int state; 2008 1991 2009 1992 if (time_after_eq(jiffies, session->s_cap_ttl) && 2010 1993 time_after_eq(session->s_cap_ttl, session->s_renew_requested)) 2011 - pr_info("mds%d caps stale\n", session->s_mds); 1994 + pr_info_client(cl, "mds%d caps stale\n", session->s_mds); 2012 1995 session->s_renew_requested = jiffies; 2013 1996 2014 1997 /* do not try to renew caps until a recovering mds has reconnected 2015 1998 * with its clients. */ 2016 1999 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); 2017 2000 if (state < CEPH_MDS_STATE_RECONNECT) { 2018 - dout("send_renew_caps ignoring mds%d (%s)\n", 2019 - session->s_mds, ceph_mds_state_name(state)); 2001 + doutc(cl, "ignoring mds%d (%s)\n", session->s_mds, 2002 + ceph_mds_state_name(state)); 2020 2003 return 0; 2021 2004 } 2022 2005 2023 - dout("send_renew_caps to mds%d (%s)\n", session->s_mds, 2024 - ceph_mds_state_name(state)); 2006 + doutc(cl, "to mds%d (%s)\n", session->s_mds, 2007 + ceph_mds_state_name(state)); 2025 2008 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, 2026 2009 ++session->s_renew_seq); 2027 2010 if (!msg) ··· 2034 2015 static int send_flushmsg_ack(struct ceph_mds_client *mdsc, 2035 2016 struct ceph_mds_session *session, u64 seq) 2036 2017 { 2018 + struct ceph_client *cl = mdsc->fsc->client; 2037 2019 struct ceph_msg *msg; 2038 2020 2039 - dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", 2040 - session->s_mds, ceph_session_state_name(session->s_state), seq); 2021 + doutc(cl, "to mds%d (%s)s seq %lld\n", session->s_mds, 2022 + ceph_session_state_name(session->s_state), seq); 2041 2023 msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); 2042 2024 if (!msg) 2043 2025 return -ENOMEM; ··· 2055 2035 static void renewed_caps(struct ceph_mds_client *mdsc, 2056 2036 struct ceph_mds_session *session, int is_renew) 2057 2037 { 2038 + struct ceph_client *cl = mdsc->fsc->client; 2058 2039 int was_stale; 2059 2040 int wake = 0; 2060 2041 ··· 2067 2046 2068 2047 if (was_stale) { 2069 2048 if (time_before(jiffies, session->s_cap_ttl)) { 2070 - pr_info("mds%d caps renewed\n", session->s_mds); 2049 + pr_info_client(cl, "mds%d caps renewed\n", 2050 + session->s_mds); 2071 2051 wake = 1; 2072 2052 } else { 2073 - pr_info("mds%d caps still stale\n", session->s_mds); 2053 + pr_info_client(cl, "mds%d caps still stale\n", 2054 + session->s_mds); 2074 2055 } 2075 2056 } 2076 - dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", 2077 - session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", 2078 - time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); 2057 + doutc(cl, "mds%d ttl now %lu, was %s, now %s\n", session->s_mds, 2058 + session->s_cap_ttl, was_stale ? "stale" : "fresh", 2059 + time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); 2079 2060 spin_unlock(&session->s_cap_lock); 2080 2061 2081 2062 if (wake) ··· 2089 2066 */ 2090 2067 static int request_close_session(struct ceph_mds_session *session) 2091 2068 { 2069 + struct ceph_client *cl = session->s_mdsc->fsc->client; 2092 2070 struct ceph_msg *msg; 2093 2071 2094 - dout("request_close_session mds%d state %s seq %lld\n", 2095 - session->s_mds, ceph_session_state_name(session->s_state), 2096 - session->s_seq); 2072 + doutc(cl, "mds%d state %s seq %lld\n", session->s_mds, 2073 + ceph_session_state_name(session->s_state), session->s_seq); 2097 2074 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE, 2098 2075 session->s_seq); 2099 2076 if (!msg) ··· 2150 2127 static int trim_caps_cb(struct inode *inode, int mds, void *arg) 2151 2128 { 2152 2129 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 2130 + struct ceph_client *cl = mdsc->fsc->client; 2153 2131 int *remaining = arg; 2154 2132 struct ceph_inode_info *ci = ceph_inode(inode); 2155 2133 int used, wanted, oissued, mine; ··· 2170 2146 wanted = __ceph_caps_file_wanted(ci); 2171 2147 oissued = __ceph_caps_issued_other(ci, cap); 2172 2148 2173 - dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", 2174 - inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), 2175 - ceph_cap_string(used), ceph_cap_string(wanted)); 2149 + doutc(cl, "%p %llx.%llx cap %p mine %s oissued %s used %s wanted %s\n", 2150 + inode, ceph_vinop(inode), cap, ceph_cap_string(mine), 2151 + ceph_cap_string(oissued), ceph_cap_string(used), 2152 + ceph_cap_string(wanted)); 2176 2153 if (cap == ci->i_auth_cap) { 2177 2154 if (ci->i_dirty_caps || ci->i_flushing_caps || 2178 2155 !list_empty(&ci->i_cap_snaps)) ··· 2213 2188 count = atomic_read(&inode->i_count); 2214 2189 if (count == 1) 2215 2190 (*remaining)--; 2216 - dout("trim_caps_cb %p cap %p pruned, count now %d\n", 2217 - inode, cap, count); 2191 + doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n", 2192 + inode, ceph_vinop(inode), cap, count); 2218 2193 } else { 2219 2194 dput(dentry); 2220 2195 } ··· 2233 2208 struct ceph_mds_session *session, 2234 2209 int max_caps) 2235 2210 { 2211 + struct ceph_client *cl = mdsc->fsc->client; 2236 2212 int trim_caps = session->s_nr_caps - max_caps; 2237 2213 2238 - dout("trim_caps mds%d start: %d / %d, trim %d\n", 2239 - session->s_mds, session->s_nr_caps, max_caps, trim_caps); 2214 + doutc(cl, "mds%d start: %d / %d, trim %d\n", session->s_mds, 2215 + session->s_nr_caps, max_caps, trim_caps); 2240 2216 if (trim_caps > 0) { 2241 2217 int remaining = trim_caps; 2242 2218 2243 2219 ceph_iterate_session_caps(session, trim_caps_cb, &remaining); 2244 - dout("trim_caps mds%d done: %d / %d, trimmed %d\n", 2245 - session->s_mds, session->s_nr_caps, max_caps, 2246 - trim_caps - remaining); 2220 + doutc(cl, "mds%d done: %d / %d, trimmed %d\n", 2221 + session->s_mds, session->s_nr_caps, max_caps, 2222 + trim_caps - remaining); 2247 2223 } 2248 2224 2249 2225 ceph_flush_cap_releases(mdsc, session); ··· 2254 2228 static int check_caps_flush(struct ceph_mds_client *mdsc, 2255 2229 u64 want_flush_tid) 2256 2230 { 2231 + struct ceph_client *cl = mdsc->fsc->client; 2257 2232 int ret = 1; 2258 2233 2259 2234 spin_lock(&mdsc->cap_dirty_lock); ··· 2263 2236 list_first_entry(&mdsc->cap_flush_list, 2264 2237 struct ceph_cap_flush, g_list); 2265 2238 if (cf->tid <= want_flush_tid) { 2266 - dout("check_caps_flush still flushing tid " 2267 - "%llu <= %llu\n", cf->tid, want_flush_tid); 2239 + doutc(cl, "still flushing tid %llu <= %llu\n", 2240 + cf->tid, want_flush_tid); 2268 2241 ret = 0; 2269 2242 } 2270 2243 } ··· 2280 2253 static void wait_caps_flush(struct ceph_mds_client *mdsc, 2281 2254 u64 want_flush_tid) 2282 2255 { 2283 - dout("check_caps_flush want %llu\n", want_flush_tid); 2256 + struct ceph_client *cl = mdsc->fsc->client; 2257 + 2258 + doutc(cl, "want %llu\n", want_flush_tid); 2284 2259 2285 2260 wait_event(mdsc->cap_flushing_wq, 2286 2261 check_caps_flush(mdsc, want_flush_tid)); 2287 2262 2288 - dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); 2263 + doutc(cl, "ok, flushed thru %llu\n", want_flush_tid); 2289 2264 } 2290 2265 2291 2266 /* ··· 2296 2267 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc, 2297 2268 struct ceph_mds_session *session) 2298 2269 { 2270 + struct ceph_client *cl = mdsc->fsc->client; 2299 2271 struct ceph_msg *msg = NULL; 2300 2272 struct ceph_mds_cap_release *head; 2301 2273 struct ceph_mds_cap_item *item; ··· 2355 2325 msg->front.iov_len += sizeof(*cap_barrier); 2356 2326 2357 2327 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2358 - dout("send_cap_releases mds%d %p\n", session->s_mds, msg); 2328 + doutc(cl, "mds%d %p\n", session->s_mds, msg); 2359 2329 ceph_con_send(&session->s_con, msg); 2360 2330 msg = NULL; 2361 2331 } ··· 2375 2345 msg->front.iov_len += sizeof(*cap_barrier); 2376 2346 2377 2347 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2378 - dout("send_cap_releases mds%d %p\n", session->s_mds, msg); 2348 + doutc(cl, "mds%d %p\n", session->s_mds, msg); 2379 2349 ceph_con_send(&session->s_con, msg); 2380 2350 } 2381 2351 return; 2382 2352 out_err: 2383 - pr_err("send_cap_releases mds%d, failed to allocate message\n", 2384 - session->s_mds); 2353 + pr_err_client(cl, "mds%d, failed to allocate message\n", 2354 + session->s_mds); 2385 2355 spin_lock(&session->s_cap_lock); 2386 2356 list_splice(&tmp_list, &session->s_cap_releases); 2387 2357 session->s_num_cap_releases += num_cap_releases; ··· 2404 2374 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc, 2405 2375 struct ceph_mds_session *session) 2406 2376 { 2377 + struct ceph_client *cl = mdsc->fsc->client; 2407 2378 if (mdsc->stopping) 2408 2379 return; 2409 2380 2410 2381 ceph_get_mds_session(session); 2411 2382 if (queue_work(mdsc->fsc->cap_wq, 2412 2383 &session->s_cap_release_work)) { 2413 - dout("cap release work queued\n"); 2384 + doutc(cl, "cap release work queued\n"); 2414 2385 } else { 2415 2386 ceph_put_mds_session(session); 2416 - dout("failed to queue cap release work\n"); 2387 + doutc(cl, "failed to queue cap release work\n"); 2417 2388 } 2418 2389 } 2419 2390 ··· 2442 2411 2443 2412 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc) 2444 2413 { 2414 + struct ceph_client *cl = mdsc->fsc->client; 2445 2415 if (mdsc->stopping) 2446 2416 return; 2447 2417 2448 2418 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) { 2449 - dout("caps reclaim work queued\n"); 2419 + doutc(cl, "caps reclaim work queued\n"); 2450 2420 } else { 2451 - dout("failed to queue caps release work\n"); 2421 + doutc(cl, "failed to queue caps release work\n"); 2452 2422 } 2453 2423 } 2454 2424 ··· 2644 2612 char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry, 2645 2613 int *plen, u64 *pbase, int for_wire) 2646 2614 { 2615 + struct ceph_client *cl = mdsc->fsc->client; 2647 2616 struct dentry *cur; 2648 2617 struct inode *inode; 2649 2618 char *path; ··· 2670 2637 spin_lock(&cur->d_lock); 2671 2638 inode = d_inode(cur); 2672 2639 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { 2673 - dout("build_path path+%d: %p SNAPDIR\n", 2674 - pos, cur); 2640 + doutc(cl, "path+%d: %p SNAPDIR\n", pos, cur); 2675 2641 spin_unlock(&cur->d_lock); 2676 2642 parent = dget_parent(cur); 2677 2643 } else if (for_wire && inode && dentry != cur && ··· 2748 2716 * A rename didn't occur, but somehow we didn't end up where 2749 2717 * we thought we would. Throw a warning and try again. 2750 2718 */ 2751 - pr_warn("build_path did not end path lookup where expected (pos = %d)\n", 2752 - pos); 2719 + pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n", 2720 + pos); 2753 2721 goto retry; 2754 2722 } 2755 2723 2756 2724 *pbase = base; 2757 2725 *plen = PATH_MAX - 1 - pos; 2758 - dout("build_path on %p %d built %llx '%.*s'\n", 2759 - dentry, d_count(dentry), base, *plen, path + pos); 2726 + doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry), 2727 + base, *plen, path + pos); 2760 2728 return path + pos; 2761 2729 } 2762 2730 ··· 2819 2787 int *pathlen, u64 *ino, bool *freepath, 2820 2788 bool parent_locked) 2821 2789 { 2790 + struct ceph_client *cl = mdsc->fsc->client; 2822 2791 int r = 0; 2823 2792 2824 2793 if (rinode) { 2825 2794 r = build_inode_path(rinode, ppath, pathlen, ino, freepath); 2826 - dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), 2827 - ceph_snap(rinode)); 2795 + doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode), 2796 + ceph_snap(rinode)); 2828 2797 } else if (rdentry) { 2829 2798 r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino, 2830 2799 freepath, parent_locked); 2831 - dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 2832 - *ppath); 2800 + doutc(cl, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath); 2833 2801 } else if (rpath || rino) { 2834 2802 *ino = rino; 2835 2803 *ppath = rpath; 2836 2804 *pathlen = rpath ? strlen(rpath) : 0; 2837 - dout(" path %.*s\n", *pathlen, rpath); 2805 + doutc(cl, " path %.*s\n", *pathlen, rpath); 2838 2806 } 2839 2807 2840 2808 return r; ··· 3135 3103 { 3136 3104 int mds = session->s_mds; 3137 3105 struct ceph_mds_client *mdsc = session->s_mdsc; 3106 + struct ceph_client *cl = mdsc->fsc->client; 3138 3107 struct ceph_mds_request_head_legacy *lhead; 3139 3108 struct ceph_mds_request_head *nhead; 3140 3109 struct ceph_msg *msg; ··· 3154 3121 old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE); 3155 3122 if ((old_version && req->r_attempts >= old_max_retry) || 3156 3123 ((uint32_t)req->r_attempts >= U32_MAX)) { 3157 - pr_warn_ratelimited("%s request tid %llu seq overflow\n", 3158 - __func__, req->r_tid); 3124 + pr_warn_ratelimited_client(cl, "request tid %llu seq overflow\n", 3125 + req->r_tid); 3159 3126 return -EMULTIHOP; 3160 3127 } 3161 3128 } ··· 3170 3137 else 3171 3138 req->r_sent_on_mseq = -1; 3172 3139 } 3173 - dout("%s %p tid %lld %s (attempt %d)\n", __func__, req, 3174 - req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); 3140 + doutc(cl, "%p tid %lld %s (attempt %d)\n", req, req->r_tid, 3141 + ceph_mds_op_name(req->r_op), req->r_attempts); 3175 3142 3176 3143 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 3177 3144 void *p; ··· 3239 3206 nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1); 3240 3207 } 3241 3208 3242 - dout(" r_parent = %p\n", req->r_parent); 3209 + doutc(cl, " r_parent = %p\n", req->r_parent); 3243 3210 return 0; 3244 3211 } 3245 3212 ··· 3267 3234 static void __do_request(struct ceph_mds_client *mdsc, 3268 3235 struct ceph_mds_request *req) 3269 3236 { 3237 + struct ceph_client *cl = mdsc->fsc->client; 3270 3238 struct ceph_mds_session *session = NULL; 3271 3239 int mds = -1; 3272 3240 int err = 0; ··· 3280 3246 } 3281 3247 3282 3248 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) { 3283 - dout("do_request metadata corrupted\n"); 3249 + doutc(cl, "metadata corrupted\n"); 3284 3250 err = -EIO; 3285 3251 goto finish; 3286 3252 } 3287 3253 if (req->r_timeout && 3288 3254 time_after_eq(jiffies, req->r_started + req->r_timeout)) { 3289 - dout("do_request timed out\n"); 3255 + doutc(cl, "timed out\n"); 3290 3256 err = -ETIMEDOUT; 3291 3257 goto finish; 3292 3258 } 3293 3259 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 3294 - dout("do_request forced umount\n"); 3260 + doutc(cl, "forced umount\n"); 3295 3261 err = -EIO; 3296 3262 goto finish; 3297 3263 } 3298 3264 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { 3299 3265 if (mdsc->mdsmap_err) { 3300 3266 err = mdsc->mdsmap_err; 3301 - dout("do_request mdsmap err %d\n", err); 3267 + doutc(cl, "mdsmap err %d\n", err); 3302 3268 goto finish; 3303 3269 } 3304 3270 if (mdsc->mdsmap->m_epoch == 0) { 3305 - dout("do_request no mdsmap, waiting for map\n"); 3271 + doutc(cl, "no mdsmap, waiting for map\n"); 3306 3272 list_add(&req->r_wait, &mdsc->waiting_for_map); 3307 3273 return; 3308 3274 } ··· 3323 3289 err = -EJUKEBOX; 3324 3290 goto finish; 3325 3291 } 3326 - dout("do_request no mds or not active, waiting for map\n"); 3292 + doutc(cl, "no mds or not active, waiting for map\n"); 3327 3293 list_add(&req->r_wait, &mdsc->waiting_for_map); 3328 3294 return; 3329 3295 } ··· 3339 3305 } 3340 3306 req->r_session = ceph_get_mds_session(session); 3341 3307 3342 - dout("do_request mds%d session %p state %s\n", mds, session, 3343 - ceph_session_state_name(session->s_state)); 3308 + doutc(cl, "mds%d session %p state %s\n", mds, session, 3309 + ceph_session_state_name(session->s_state)); 3344 3310 3345 3311 /* 3346 3312 * The old ceph will crash the MDSs when see unknown OPs ··· 3431 3397 spin_lock(&ci->i_ceph_lock); 3432 3398 cap = ci->i_auth_cap; 3433 3399 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) { 3434 - dout("do_request session changed for auth cap %d -> %d\n", 3435 - cap->session->s_mds, session->s_mds); 3400 + doutc(cl, "session changed for auth cap %d -> %d\n", 3401 + cap->session->s_mds, session->s_mds); 3436 3402 3437 3403 /* Remove the auth cap from old session */ 3438 3404 spin_lock(&cap->session->s_cap_lock); ··· 3459 3425 ceph_put_mds_session(session); 3460 3426 finish: 3461 3427 if (err) { 3462 - dout("__do_request early error %d\n", err); 3428 + doutc(cl, "early error %d\n", err); 3463 3429 req->r_err = err; 3464 3430 complete_request(mdsc, req); 3465 3431 __unregister_request(mdsc, req); ··· 3473 3439 static void __wake_requests(struct ceph_mds_client *mdsc, 3474 3440 struct list_head *head) 3475 3441 { 3442 + struct ceph_client *cl = mdsc->fsc->client; 3476 3443 struct ceph_mds_request *req; 3477 3444 LIST_HEAD(tmp_list); 3478 3445 ··· 3483 3448 req = list_entry(tmp_list.next, 3484 3449 struct ceph_mds_request, r_wait); 3485 3450 list_del_init(&req->r_wait); 3486 - dout(" wake request %p tid %llu\n", req, req->r_tid); 3451 + doutc(cl, " wake request %p tid %llu\n", req, 3452 + req->r_tid); 3487 3453 __do_request(mdsc, req); 3488 3454 } 3489 3455 } ··· 3495 3459 */ 3496 3460 static void kick_requests(struct ceph_mds_client *mdsc, int mds) 3497 3461 { 3462 + struct ceph_client *cl = mdsc->fsc->client; 3498 3463 struct ceph_mds_request *req; 3499 3464 struct rb_node *p = rb_first(&mdsc->request_tree); 3500 3465 3501 - dout("kick_requests mds%d\n", mds); 3466 + doutc(cl, "kick_requests mds%d\n", mds); 3502 3467 while (p) { 3503 3468 req = rb_entry(p, struct ceph_mds_request, r_node); 3504 3469 p = rb_next(p); ··· 3509 3472 continue; /* only new requests */ 3510 3473 if (req->r_session && 3511 3474 req->r_session->s_mds == mds) { 3512 - dout(" kicking tid %llu\n", req->r_tid); 3475 + doutc(cl, " kicking tid %llu\n", req->r_tid); 3513 3476 list_del_init(&req->r_wait); 3514 3477 __do_request(mdsc, req); 3515 3478 } ··· 3519 3482 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir, 3520 3483 struct ceph_mds_request *req) 3521 3484 { 3485 + struct ceph_client *cl = mdsc->fsc->client; 3522 3486 int err = 0; 3523 3487 3524 3488 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ ··· 3541 3503 if (req->r_inode) { 3542 3504 err = ceph_wait_on_async_create(req->r_inode); 3543 3505 if (err) { 3544 - dout("%s: wait for async create returned: %d\n", 3545 - __func__, err); 3506 + doutc(cl, "wait for async create returned: %d\n", err); 3546 3507 return err; 3547 3508 } 3548 3509 } ··· 3549 3512 if (!err && req->r_old_inode) { 3550 3513 err = ceph_wait_on_async_create(req->r_old_inode); 3551 3514 if (err) { 3552 - dout("%s: wait for async create returned: %d\n", 3553 - __func__, err); 3515 + doutc(cl, "wait for async create returned: %d\n", err); 3554 3516 return err; 3555 3517 } 3556 3518 } 3557 3519 3558 - dout("submit_request on %p for inode %p\n", req, dir); 3520 + doutc(cl, "submit_request on %p for inode %p\n", req, dir); 3559 3521 mutex_lock(&mdsc->mutex); 3560 3522 __register_request(mdsc, req, dir); 3561 3523 __do_request(mdsc, req); ··· 3567 3531 struct ceph_mds_request *req, 3568 3532 ceph_mds_request_wait_callback_t wait_func) 3569 3533 { 3534 + struct ceph_client *cl = mdsc->fsc->client; 3570 3535 int err; 3571 3536 3572 3537 /* wait */ 3573 - dout("do_request waiting\n"); 3538 + doutc(cl, "do_request waiting\n"); 3574 3539 if (wait_func) { 3575 3540 err = wait_func(mdsc, req); 3576 3541 } else { ··· 3585 3548 else 3586 3549 err = timeleft; /* killed */ 3587 3550 } 3588 - dout("do_request waited, got %d\n", err); 3551 + doutc(cl, "do_request waited, got %d\n", err); 3589 3552 mutex_lock(&mdsc->mutex); 3590 3553 3591 3554 /* only abort if we didn't race with a real reply */ 3592 3555 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { 3593 3556 err = le32_to_cpu(req->r_reply_info.head->result); 3594 3557 } else if (err < 0) { 3595 - dout("aborted request %lld with %d\n", req->r_tid, err); 3558 + doutc(cl, "aborted request %lld with %d\n", req->r_tid, err); 3596 3559 3597 3560 /* 3598 3561 * ensure we aren't running concurrently with ··· 3623 3586 struct inode *dir, 3624 3587 struct ceph_mds_request *req) 3625 3588 { 3589 + struct ceph_client *cl = mdsc->fsc->client; 3626 3590 int err; 3627 3591 3628 - dout("do_request on %p\n", req); 3592 + doutc(cl, "do_request on %p\n", req); 3629 3593 3630 3594 /* issue */ 3631 3595 err = ceph_mdsc_submit_request(mdsc, dir, req); 3632 3596 if (!err) 3633 3597 err = ceph_mdsc_wait_request(mdsc, req, NULL); 3634 - dout("do_request %p done, result %d\n", req, err); 3598 + doutc(cl, "do_request %p done, result %d\n", req, err); 3635 3599 return err; 3636 3600 } 3637 3601 ··· 3644 3606 { 3645 3607 struct inode *dir = req->r_parent; 3646 3608 struct inode *old_dir = req->r_old_dentry_dir; 3609 + struct ceph_client *cl = req->r_mdsc->fsc->client; 3647 3610 3648 - dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir); 3611 + doutc(cl, "invalidate_dir_request %p %p (complete, lease(s))\n", 3612 + dir, old_dir); 3649 3613 3650 3614 ceph_dir_clear_complete(dir); 3651 3615 if (old_dir) ··· 3668 3628 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) 3669 3629 { 3670 3630 struct ceph_mds_client *mdsc = session->s_mdsc; 3631 + struct ceph_client *cl = mdsc->fsc->client; 3671 3632 struct ceph_mds_request *req; 3672 3633 struct ceph_mds_reply_head *head = msg->front.iov_base; 3673 3634 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ ··· 3679 3638 bool close_sessions = false; 3680 3639 3681 3640 if (msg->front.iov_len < sizeof(*head)) { 3682 - pr_err("mdsc_handle_reply got corrupt (short) reply\n"); 3641 + pr_err_client(cl, "got corrupt (short) reply\n"); 3683 3642 ceph_msg_dump(msg); 3684 3643 return; 3685 3644 } ··· 3689 3648 mutex_lock(&mdsc->mutex); 3690 3649 req = lookup_get_request(mdsc, tid); 3691 3650 if (!req) { 3692 - dout("handle_reply on unknown tid %llu\n", tid); 3651 + doutc(cl, "on unknown tid %llu\n", tid); 3693 3652 mutex_unlock(&mdsc->mutex); 3694 3653 return; 3695 3654 } 3696 - dout("handle_reply %p\n", req); 3655 + doutc(cl, "handle_reply %p\n", req); 3697 3656 3698 3657 /* correct session? */ 3699 3658 if (req->r_session != session) { 3700 - pr_err("mdsc_handle_reply got %llu on session mds%d" 3701 - " not mds%d\n", tid, session->s_mds, 3702 - req->r_session ? req->r_session->s_mds : -1); 3659 + pr_err_client(cl, "got %llu on session mds%d not mds%d\n", 3660 + tid, session->s_mds, 3661 + req->r_session ? req->r_session->s_mds : -1); 3703 3662 mutex_unlock(&mdsc->mutex); 3704 3663 goto out; 3705 3664 } ··· 3707 3666 /* dup? */ 3708 3667 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || 3709 3668 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { 3710 - pr_warn("got a dup %s reply on %llu from mds%d\n", 3711 - head->safe ? "safe" : "unsafe", tid, mds); 3669 + pr_warn_client(cl, "got a dup %s reply on %llu from mds%d\n", 3670 + head->safe ? "safe" : "unsafe", tid, mds); 3712 3671 mutex_unlock(&mdsc->mutex); 3713 3672 goto out; 3714 3673 } 3715 3674 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { 3716 - pr_warn("got unsafe after safe on %llu from mds%d\n", 3717 - tid, mds); 3675 + pr_warn_client(cl, "got unsafe after safe on %llu from mds%d\n", 3676 + tid, mds); 3718 3677 mutex_unlock(&mdsc->mutex); 3719 3678 goto out; 3720 3679 } ··· 3737 3696 * response. And even if it did, there is nothing 3738 3697 * useful we could do with a revised return value. 3739 3698 */ 3740 - dout("got safe reply %llu, mds%d\n", tid, mds); 3699 + doutc(cl, "got safe reply %llu, mds%d\n", tid, mds); 3741 3700 3742 3701 mutex_unlock(&mdsc->mutex); 3743 3702 goto out; ··· 3747 3706 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); 3748 3707 } 3749 3708 3750 - dout("handle_reply tid %lld result %d\n", tid, result); 3709 + doutc(cl, "tid %lld result %d\n", tid, result); 3751 3710 if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features)) 3752 3711 err = parse_reply_info(session, msg, req, (u64)-1); 3753 3712 else ··· 3787 3746 3788 3747 mutex_lock(&session->s_mutex); 3789 3748 if (err < 0) { 3790 - pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); 3749 + pr_err_client(cl, "got corrupt reply mds%d(tid:%lld)\n", 3750 + mds, tid); 3791 3751 ceph_msg_dump(msg); 3792 3752 goto out_err; 3793 3753 } ··· 3852 3810 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); 3853 3811 } 3854 3812 } else { 3855 - dout("reply arrived after request %lld was aborted\n", tid); 3813 + doutc(cl, "reply arrived after request %lld was aborted\n", tid); 3856 3814 } 3857 3815 mutex_unlock(&mdsc->mutex); 3858 3816 ··· 3881 3839 struct ceph_mds_session *session, 3882 3840 struct ceph_msg *msg) 3883 3841 { 3842 + struct ceph_client *cl = mdsc->fsc->client; 3884 3843 struct ceph_mds_request *req; 3885 3844 u64 tid = le64_to_cpu(msg->hdr.tid); 3886 3845 u32 next_mds; ··· 3899 3856 req = lookup_get_request(mdsc, tid); 3900 3857 if (!req) { 3901 3858 mutex_unlock(&mdsc->mutex); 3902 - dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); 3859 + doutc(cl, "forward tid %llu to mds%d - req dne\n", tid, next_mds); 3903 3860 return; /* dup reply? */ 3904 3861 } 3905 3862 3906 3863 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 3907 - dout("forward tid %llu aborted, unregistering\n", tid); 3864 + doutc(cl, "forward tid %llu aborted, unregistering\n", tid); 3908 3865 __unregister_request(mdsc, req); 3909 3866 } else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) { 3910 3867 /* ··· 3920 3877 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); 3921 3878 mutex_unlock(&req->r_fill_mutex); 3922 3879 aborted = true; 3923 - pr_warn_ratelimited("forward tid %llu seq overflow\n", tid); 3880 + pr_warn_ratelimited_client(cl, "forward tid %llu seq overflow\n", 3881 + tid); 3924 3882 } else { 3925 3883 /* resend. forward race not possible; mds would drop */ 3926 - dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); 3884 + doutc(cl, "forward tid %llu to mds%d (we resend)\n", tid, next_mds); 3927 3885 BUG_ON(req->r_err); 3928 3886 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); 3929 3887 req->r_attempts = 0; ··· 3942 3898 return; 3943 3899 3944 3900 bad: 3945 - pr_err("mdsc_handle_forward decode error err=%d\n", err); 3901 + pr_err_client(cl, "decode error err=%d\n", err); 3946 3902 ceph_msg_dump(msg); 3947 3903 } 3948 3904 ··· 3981 3937 struct ceph_msg *msg) 3982 3938 { 3983 3939 struct ceph_mds_client *mdsc = session->s_mdsc; 3940 + struct ceph_client *cl = mdsc->fsc->client; 3984 3941 int mds = session->s_mds; 3985 3942 int msg_version = le16_to_cpu(msg->hdr.version); 3986 3943 void *p = msg->front.iov_base; ··· 4029 3984 /* version >= 5, flags */ 4030 3985 ceph_decode_32_safe(&p, end, flags, bad); 4031 3986 if (flags & CEPH_SESSION_BLOCKLISTED) { 4032 - pr_warn("mds%d session blocklisted\n", session->s_mds); 3987 + pr_warn_client(cl, "mds%d session blocklisted\n", 3988 + session->s_mds); 4033 3989 blocklisted = true; 4034 3990 } 4035 3991 } ··· 4046 4000 4047 4001 mutex_lock(&session->s_mutex); 4048 4002 4049 - dout("handle_session mds%d %s %p state %s seq %llu\n", 4050 - mds, ceph_session_op_name(op), session, 4051 - ceph_session_state_name(session->s_state), seq); 4003 + doutc(cl, "mds%d %s %p state %s seq %llu\n", mds, 4004 + ceph_session_op_name(op), session, 4005 + ceph_session_state_name(session->s_state), seq); 4052 4006 4053 4007 if (session->s_state == CEPH_MDS_SESSION_HUNG) { 4054 4008 session->s_state = CEPH_MDS_SESSION_OPEN; 4055 - pr_info("mds%d came back\n", session->s_mds); 4009 + pr_info_client(cl, "mds%d came back\n", session->s_mds); 4056 4010 } 4057 4011 4058 4012 switch (op) { 4059 4013 case CEPH_SESSION_OPEN: 4060 4014 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 4061 - pr_info("mds%d reconnect success\n", session->s_mds); 4015 + pr_info_client(cl, "mds%d reconnect success\n", 4016 + session->s_mds); 4062 4017 4063 4018 if (session->s_state == CEPH_MDS_SESSION_OPEN) { 4064 - pr_notice("mds%d is already opened\n", session->s_mds); 4019 + pr_notice_client(cl, "mds%d is already opened\n", 4020 + session->s_mds); 4065 4021 } else { 4066 4022 session->s_state = CEPH_MDS_SESSION_OPEN; 4067 4023 session->s_features = features; ··· 4093 4045 4094 4046 case CEPH_SESSION_CLOSE: 4095 4047 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 4096 - pr_info("mds%d reconnect denied\n", session->s_mds); 4048 + pr_info_client(cl, "mds%d reconnect denied\n", 4049 + session->s_mds); 4097 4050 session->s_state = CEPH_MDS_SESSION_CLOSED; 4098 4051 cleanup_session_requests(mdsc, session); 4099 4052 remove_session_caps(session); ··· 4103 4054 break; 4104 4055 4105 4056 case CEPH_SESSION_STALE: 4106 - pr_info("mds%d caps went stale, renewing\n", 4107 - session->s_mds); 4057 + pr_info_client(cl, "mds%d caps went stale, renewing\n", 4058 + session->s_mds); 4108 4059 atomic_inc(&session->s_cap_gen); 4109 4060 session->s_cap_ttl = jiffies - 1; 4110 4061 send_renew_caps(mdsc, session); ··· 4125 4076 break; 4126 4077 4127 4078 case CEPH_SESSION_FORCE_RO: 4128 - dout("force_session_readonly %p\n", session); 4079 + doutc(cl, "force_session_readonly %p\n", session); 4129 4080 spin_lock(&session->s_cap_lock); 4130 4081 session->s_readonly = true; 4131 4082 spin_unlock(&session->s_cap_lock); ··· 4134 4085 4135 4086 case CEPH_SESSION_REJECT: 4136 4087 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); 4137 - pr_info("mds%d rejected session\n", session->s_mds); 4088 + pr_info_client(cl, "mds%d rejected session\n", 4089 + session->s_mds); 4138 4090 session->s_state = CEPH_MDS_SESSION_REJECTED; 4139 4091 cleanup_session_requests(mdsc, session); 4140 4092 remove_session_caps(session); ··· 4145 4095 break; 4146 4096 4147 4097 default: 4148 - pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); 4098 + pr_err_client(cl, "bad op %d mds%d\n", op, mds); 4149 4099 WARN_ON(1); 4150 4100 } 4151 4101 ··· 4162 4112 return; 4163 4113 4164 4114 bad: 4165 - pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, 4166 - (int)msg->front.iov_len); 4115 + pr_err_client(cl, "corrupt message mds%d len %d\n", mds, 4116 + (int)msg->front.iov_len); 4167 4117 ceph_msg_dump(msg); 4168 4118 return; 4169 4119 } 4170 4120 4171 4121 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req) 4172 4122 { 4123 + struct ceph_client *cl = req->r_mdsc->fsc->client; 4173 4124 int dcaps; 4174 4125 4175 4126 dcaps = xchg(&req->r_dir_caps, 0); 4176 4127 if (dcaps) { 4177 - dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps)); 4128 + doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps)); 4178 4129 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps); 4179 4130 } 4180 4131 } 4181 4132 4182 4133 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req) 4183 4134 { 4135 + struct ceph_client *cl = req->r_mdsc->fsc->client; 4184 4136 int dcaps; 4185 4137 4186 4138 dcaps = xchg(&req->r_dir_caps, 0); 4187 4139 if (dcaps) { 4188 - dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps)); 4140 + doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps)); 4189 4141 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent), 4190 4142 dcaps); 4191 4143 } ··· 4202 4150 struct ceph_mds_request *req, *nreq; 4203 4151 struct rb_node *p; 4204 4152 4205 - dout("replay_unsafe_requests mds%d\n", session->s_mds); 4153 + doutc(mdsc->fsc->client, "mds%d\n", session->s_mds); 4206 4154 4207 4155 mutex_lock(&mdsc->mutex); 4208 4156 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) ··· 4347 4295 static int reconnect_caps_cb(struct inode *inode, int mds, void *arg) 4348 4296 { 4349 4297 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 4298 + struct ceph_client *cl = ceph_inode_to_client(inode); 4350 4299 union { 4351 4300 struct ceph_mds_cap_reconnect v2; 4352 4301 struct ceph_mds_cap_reconnect_v1 v1; ··· 4384 4331 err = 0; 4385 4332 goto out_err; 4386 4333 } 4387 - dout(" adding %p ino %llx.%llx cap %p %lld %s\n", 4388 - inode, ceph_vinop(inode), cap, cap->cap_id, 4389 - ceph_cap_string(cap->issued)); 4334 + doutc(cl, " adding %p ino %llx.%llx cap %p %lld %s\n", inode, 4335 + ceph_vinop(inode), cap, cap->cap_id, 4336 + ceph_cap_string(cap->issued)); 4390 4337 4391 4338 cap->seq = 0; /* reset cap seq */ 4392 4339 cap->issue_seq = 0; /* and issue_seq */ ··· 4536 4483 { 4537 4484 struct rb_node *p; 4538 4485 struct ceph_pagelist *pagelist = recon_state->pagelist; 4486 + struct ceph_client *cl = mdsc->fsc->client; 4539 4487 int err = 0; 4540 4488 4541 4489 if (recon_state->msg_version >= 4) { ··· 4575 4521 ceph_pagelist_encode_32(pagelist, sizeof(sr_rec)); 4576 4522 } 4577 4523 4578 - dout(" adding snap realm %llx seq %lld parent %llx\n", 4579 - realm->ino, realm->seq, realm->parent_ino); 4524 + doutc(cl, " adding snap realm %llx seq %lld parent %llx\n", 4525 + realm->ino, realm->seq, realm->parent_ino); 4580 4526 sr_rec.ino = cpu_to_le64(realm->ino); 4581 4527 sr_rec.seq = cpu_to_le64(realm->seq); 4582 4528 sr_rec.parent = cpu_to_le64(realm->parent_ino); ··· 4605 4551 static void send_mds_reconnect(struct ceph_mds_client *mdsc, 4606 4552 struct ceph_mds_session *session) 4607 4553 { 4554 + struct ceph_client *cl = mdsc->fsc->client; 4608 4555 struct ceph_msg *reply; 4609 4556 int mds = session->s_mds; 4610 4557 int err = -ENOMEM; ··· 4614 4559 }; 4615 4560 LIST_HEAD(dispose); 4616 4561 4617 - pr_info("mds%d reconnect start\n", mds); 4562 + pr_info_client(cl, "mds%d reconnect start\n", mds); 4618 4563 4619 4564 recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS); 4620 4565 if (!recon_state.pagelist) ··· 4630 4575 session->s_state = CEPH_MDS_SESSION_RECONNECTING; 4631 4576 session->s_seq = 0; 4632 4577 4633 - dout("session %p state %s\n", session, 4634 - ceph_session_state_name(session->s_state)); 4578 + doutc(cl, "session %p state %s\n", session, 4579 + ceph_session_state_name(session->s_state)); 4635 4580 4636 4581 atomic_inc(&session->s_cap_gen); 4637 4582 ··· 4765 4710 fail_nomsg: 4766 4711 ceph_pagelist_release(recon_state.pagelist); 4767 4712 fail_nopagelist: 4768 - pr_err("error %d preparing reconnect for mds%d\n", err, mds); 4713 + pr_err_client(cl, "error %d preparing reconnect for mds%d\n", 4714 + err, mds); 4769 4715 return; 4770 4716 } 4771 4717 ··· 4785 4729 int oldstate, newstate; 4786 4730 struct ceph_mds_session *s; 4787 4731 unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0}; 4732 + struct ceph_client *cl = mdsc->fsc->client; 4788 4733 4789 - dout("check_new_map new %u old %u\n", 4790 - newmap->m_epoch, oldmap->m_epoch); 4734 + doutc(cl, "new %u old %u\n", newmap->m_epoch, oldmap->m_epoch); 4791 4735 4792 4736 if (newmap->m_info) { 4793 4737 for (i = 0; i < newmap->possible_max_rank; i++) { ··· 4803 4747 oldstate = ceph_mdsmap_get_state(oldmap, i); 4804 4748 newstate = ceph_mdsmap_get_state(newmap, i); 4805 4749 4806 - dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", 4807 - i, ceph_mds_state_name(oldstate), 4808 - ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", 4809 - ceph_mds_state_name(newstate), 4810 - ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", 4811 - ceph_session_state_name(s->s_state)); 4750 + doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n", 4751 + i, ceph_mds_state_name(oldstate), 4752 + ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", 4753 + ceph_mds_state_name(newstate), 4754 + ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", 4755 + ceph_session_state_name(s->s_state)); 4812 4756 4813 4757 if (i >= newmap->possible_max_rank) { 4814 4758 /* force close session for stopped mds */ ··· 4861 4805 newstate >= CEPH_MDS_STATE_ACTIVE) { 4862 4806 if (oldstate != CEPH_MDS_STATE_CREATING && 4863 4807 oldstate != CEPH_MDS_STATE_STARTING) 4864 - pr_info("mds%d recovery completed\n", s->s_mds); 4808 + pr_info_client(cl, "mds%d recovery completed\n", 4809 + s->s_mds); 4865 4810 kick_requests(mdsc, i); 4866 4811 mutex_unlock(&mdsc->mutex); 4867 4812 mutex_lock(&s->s_mutex); ··· 4906 4849 s = __open_export_target_session(mdsc, i); 4907 4850 if (IS_ERR(s)) { 4908 4851 err = PTR_ERR(s); 4909 - pr_err("failed to open export target session, err %d\n", 4910 - err); 4852 + pr_err_client(cl, 4853 + "failed to open export target session, err %d\n", 4854 + err); 4911 4855 continue; 4912 4856 } 4913 4857 } 4914 - dout("send reconnect to export target mds.%d\n", i); 4858 + doutc(cl, "send reconnect to export target mds.%d\n", i); 4915 4859 mutex_unlock(&mdsc->mutex); 4916 4860 send_mds_reconnect(mdsc, s); 4917 4861 ceph_put_mds_session(s); ··· 4928 4870 if (s->s_state == CEPH_MDS_SESSION_OPEN || 4929 4871 s->s_state == CEPH_MDS_SESSION_HUNG || 4930 4872 s->s_state == CEPH_MDS_SESSION_CLOSING) { 4931 - dout(" connecting to export targets of laggy mds%d\n", 4932 - i); 4873 + doutc(cl, " connecting to export targets of laggy mds%d\n", i); 4933 4874 __open_export_target_sessions(mdsc, s); 4934 4875 } 4935 4876 } ··· 4955 4898 struct ceph_mds_session *session, 4956 4899 struct ceph_msg *msg) 4957 4900 { 4901 + struct ceph_client *cl = mdsc->fsc->client; 4958 4902 struct super_block *sb = mdsc->fsc->sb; 4959 4903 struct inode *inode; 4960 4904 struct dentry *parent, *dentry; ··· 4967 4909 struct qstr dname; 4968 4910 int release = 0; 4969 4911 4970 - dout("handle_lease from mds%d\n", mds); 4912 + doutc(cl, "from mds%d\n", mds); 4971 4913 4972 4914 if (!ceph_inc_mds_stopping_blocker(mdsc, session)) 4973 4915 return; ··· 4985 4927 4986 4928 /* lookup inode */ 4987 4929 inode = ceph_find_inode(sb, vino); 4988 - dout("handle_lease %s, ino %llx %p %.*s\n", 4989 - ceph_lease_op_name(h->action), vino.ino, inode, 4990 - dname.len, dname.name); 4930 + doutc(cl, "%s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action), 4931 + vino.ino, inode, dname.len, dname.name); 4991 4932 4992 4933 mutex_lock(&session->s_mutex); 4993 4934 if (!inode) { 4994 - dout("handle_lease no inode %llx\n", vino.ino); 4935 + doutc(cl, "no inode %llx\n", vino.ino); 4995 4936 goto release; 4996 4937 } 4997 4938 4998 4939 /* dentry */ 4999 4940 parent = d_find_alias(inode); 5000 4941 if (!parent) { 5001 - dout("no parent dentry on inode %p\n", inode); 4942 + doutc(cl, "no parent dentry on inode %p\n", inode); 5002 4943 WARN_ON(1); 5003 4944 goto release; /* hrm... */ 5004 4945 } ··· 5057 5000 bad: 5058 5001 ceph_dec_mds_stopping_blocker(mdsc); 5059 5002 5060 - pr_err("corrupt lease message\n"); 5003 + pr_err_client(cl, "corrupt lease message\n"); 5061 5004 ceph_msg_dump(msg); 5062 5005 } 5063 5006 ··· 5065 5008 struct dentry *dentry, char action, 5066 5009 u32 seq) 5067 5010 { 5011 + struct ceph_client *cl = session->s_mdsc->fsc->client; 5068 5012 struct ceph_msg *msg; 5069 5013 struct ceph_mds_lease *lease; 5070 5014 struct inode *dir; 5071 5015 int len = sizeof(*lease) + sizeof(u32) + NAME_MAX; 5072 5016 5073 - dout("lease_send_msg identry %p %s to mds%d\n", 5074 - dentry, ceph_lease_op_name(action), session->s_mds); 5017 + doutc(cl, "identry %p %s to mds%d\n", dentry, ceph_lease_op_name(action), 5018 + session->s_mds); 5075 5019 5076 5020 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); 5077 5021 if (!msg) ··· 5105 5047 5106 5048 static void maybe_recover_session(struct ceph_mds_client *mdsc) 5107 5049 { 5050 + struct ceph_client *cl = mdsc->fsc->client; 5108 5051 struct ceph_fs_client *fsc = mdsc->fsc; 5109 5052 5110 5053 if (!ceph_test_mount_opt(fsc, CLEANRECOVER)) ··· 5117 5058 if (!READ_ONCE(fsc->blocklisted)) 5118 5059 return; 5119 5060 5120 - pr_info("auto reconnect after blocklisted\n"); 5061 + pr_info_client(cl, "auto reconnect after blocklisted\n"); 5121 5062 ceph_force_reconnect(fsc->sb); 5122 5063 } 5123 5064 5124 5065 bool check_session_state(struct ceph_mds_session *s) 5125 5066 { 5067 + struct ceph_client *cl = s->s_mdsc->fsc->client; 5068 + 5126 5069 switch (s->s_state) { 5127 5070 case CEPH_MDS_SESSION_OPEN: 5128 5071 if (s->s_ttl && time_after(jiffies, s->s_ttl)) { 5129 5072 s->s_state = CEPH_MDS_SESSION_HUNG; 5130 - pr_info("mds%d hung\n", s->s_mds); 5073 + pr_info_client(cl, "mds%d hung\n", s->s_mds); 5131 5074 } 5132 5075 break; 5133 5076 case CEPH_MDS_SESSION_CLOSING: ··· 5149 5088 */ 5150 5089 void inc_session_sequence(struct ceph_mds_session *s) 5151 5090 { 5091 + struct ceph_client *cl = s->s_mdsc->fsc->client; 5092 + 5152 5093 lockdep_assert_held(&s->s_mutex); 5153 5094 5154 5095 s->s_seq++; ··· 5158 5095 if (s->s_state == CEPH_MDS_SESSION_CLOSING) { 5159 5096 int ret; 5160 5097 5161 - dout("resending session close request for mds%d\n", s->s_mds); 5098 + doutc(cl, "resending session close request for mds%d\n", s->s_mds); 5162 5099 ret = request_close_session(s); 5163 5100 if (ret < 0) 5164 - pr_err("unable to close session to mds%d: %d\n", 5165 - s->s_mds, ret); 5101 + pr_err_client(cl, "unable to close session to mds%d: %d\n", 5102 + s->s_mds, ret); 5166 5103 } 5167 5104 } 5168 5105 ··· 5191 5128 int renew_caps; 5192 5129 int i; 5193 5130 5194 - dout("mdsc delayed_work\n"); 5131 + doutc(mdsc->fsc->client, "mdsc delayed_work\n"); 5195 5132 5196 5133 if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) 5197 5134 return; ··· 5320 5257 */ 5321 5258 static void wait_requests(struct ceph_mds_client *mdsc) 5322 5259 { 5260 + struct ceph_client *cl = mdsc->fsc->client; 5323 5261 struct ceph_options *opts = mdsc->fsc->client->options; 5324 5262 struct ceph_mds_request *req; 5325 5263 ··· 5328 5264 if (__get_oldest_req(mdsc)) { 5329 5265 mutex_unlock(&mdsc->mutex); 5330 5266 5331 - dout("wait_requests waiting for requests\n"); 5267 + doutc(cl, "waiting for requests\n"); 5332 5268 wait_for_completion_timeout(&mdsc->safe_umount_waiters, 5333 5269 ceph_timeout_jiffies(opts->mount_timeout)); 5334 5270 5335 5271 /* tear down remaining requests */ 5336 5272 mutex_lock(&mdsc->mutex); 5337 5273 while ((req = __get_oldest_req(mdsc))) { 5338 - dout("wait_requests timed out on tid %llu\n", 5339 - req->r_tid); 5274 + doutc(cl, "timed out on tid %llu\n", req->r_tid); 5340 5275 list_del_init(&req->r_wait); 5341 5276 __unregister_request(mdsc, req); 5342 5277 } 5343 5278 } 5344 5279 mutex_unlock(&mdsc->mutex); 5345 - dout("wait_requests done\n"); 5280 + doutc(cl, "done\n"); 5346 5281 } 5347 5282 5348 5283 void send_flush_mdlog(struct ceph_mds_session *s) 5349 5284 { 5285 + struct ceph_client *cl = s->s_mdsc->fsc->client; 5350 5286 struct ceph_msg *msg; 5351 5287 5352 5288 /* ··· 5356 5292 return; 5357 5293 5358 5294 mutex_lock(&s->s_mutex); 5359 - dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds, 5360 - ceph_session_state_name(s->s_state), s->s_seq); 5295 + doutc(cl, "request mdlog flush to mds%d (%s)s seq %lld\n", 5296 + s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); 5361 5297 msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG, 5362 5298 s->s_seq); 5363 5299 if (!msg) { 5364 - pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n", 5365 - s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); 5300 + pr_err_client(cl, "failed to request mdlog flush to mds%d (%s) seq %lld\n", 5301 + s->s_mds, ceph_session_state_name(s->s_state), s->s_seq); 5366 5302 } else { 5367 5303 ceph_con_send(&s->s_con, msg); 5368 5304 } ··· 5375 5311 */ 5376 5312 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) 5377 5313 { 5378 - dout("pre_umount\n"); 5314 + doutc(mdsc->fsc->client, "begin\n"); 5379 5315 mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; 5380 5316 5381 5317 ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); ··· 5390 5326 ceph_msgr_flush(); 5391 5327 5392 5328 ceph_cleanup_quotarealms_inodes(mdsc); 5329 + doutc(mdsc->fsc->client, "done\n"); 5393 5330 } 5394 5331 5395 5332 /* ··· 5399 5334 static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc, 5400 5335 u64 want_tid) 5401 5336 { 5337 + struct ceph_client *cl = mdsc->fsc->client; 5402 5338 struct ceph_mds_request *req = NULL, *nextreq; 5403 5339 struct ceph_mds_session *last_session = NULL; 5404 5340 struct rb_node *n; 5405 5341 5406 5342 mutex_lock(&mdsc->mutex); 5407 - dout("%s want %lld\n", __func__, want_tid); 5343 + doutc(cl, "want %lld\n", want_tid); 5408 5344 restart: 5409 5345 req = __get_oldest_req(mdsc); 5410 5346 while (req && req->r_tid <= want_tid) { ··· 5439 5373 } else { 5440 5374 ceph_put_mds_session(s); 5441 5375 } 5442 - dout("%s wait on %llu (want %llu)\n", __func__, 5443 - req->r_tid, want_tid); 5376 + doutc(cl, "wait on %llu (want %llu)\n", 5377 + req->r_tid, want_tid); 5444 5378 wait_for_completion(&req->r_safe_completion); 5445 5379 5446 5380 mutex_lock(&mdsc->mutex); ··· 5458 5392 } 5459 5393 mutex_unlock(&mdsc->mutex); 5460 5394 ceph_put_mds_session(last_session); 5461 - dout("%s done\n", __func__); 5395 + doutc(cl, "done\n"); 5462 5396 } 5463 5397 5464 5398 void ceph_mdsc_sync(struct ceph_mds_client *mdsc) 5465 5399 { 5400 + struct ceph_client *cl = mdsc->fsc->client; 5466 5401 u64 want_tid, want_flush; 5467 5402 5468 5403 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) 5469 5404 return; 5470 5405 5471 - dout("sync\n"); 5406 + doutc(cl, "sync\n"); 5472 5407 mutex_lock(&mdsc->mutex); 5473 5408 want_tid = mdsc->last_tid; 5474 5409 mutex_unlock(&mdsc->mutex); ··· 5485 5418 } 5486 5419 spin_unlock(&mdsc->cap_dirty_lock); 5487 5420 5488 - dout("sync want tid %lld flush_seq %lld\n", 5489 - want_tid, want_flush); 5421 + doutc(cl, "sync want tid %lld flush_seq %lld\n", want_tid, want_flush); 5490 5422 5491 5423 flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid); 5492 5424 wait_caps_flush(mdsc, want_flush); ··· 5507 5441 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) 5508 5442 { 5509 5443 struct ceph_options *opts = mdsc->fsc->client->options; 5444 + struct ceph_client *cl = mdsc->fsc->client; 5510 5445 struct ceph_mds_session *session; 5511 5446 int i; 5512 5447 int skipped = 0; 5513 5448 5514 - dout("close_sessions\n"); 5449 + doutc(cl, "begin\n"); 5515 5450 5516 5451 /* close sessions */ 5517 5452 mutex_lock(&mdsc->mutex); ··· 5530 5463 } 5531 5464 mutex_unlock(&mdsc->mutex); 5532 5465 5533 - dout("waiting for sessions to close\n"); 5466 + doutc(cl, "waiting for sessions to close\n"); 5534 5467 wait_event_timeout(mdsc->session_close_wq, 5535 5468 done_closing_sessions(mdsc, skipped), 5536 5469 ceph_timeout_jiffies(opts->mount_timeout)); ··· 5558 5491 cancel_work_sync(&mdsc->cap_reclaim_work); 5559 5492 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 5560 5493 5561 - dout("stopped\n"); 5494 + doutc(cl, "done\n"); 5562 5495 } 5563 5496 5564 5497 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) ··· 5566 5499 struct ceph_mds_session *session; 5567 5500 int mds; 5568 5501 5569 - dout("force umount\n"); 5502 + doutc(mdsc->fsc->client, "force umount\n"); 5570 5503 5571 5504 mutex_lock(&mdsc->mutex); 5572 5505 for (mds = 0; mds < mdsc->max_sessions; mds++) { ··· 5597 5530 5598 5531 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) 5599 5532 { 5600 - dout("stop\n"); 5533 + doutc(mdsc->fsc->client, "stop\n"); 5601 5534 /* 5602 5535 * Make sure the delayed work stopped before releasing 5603 5536 * the resources. ··· 5618 5551 void ceph_mdsc_destroy(struct ceph_fs_client *fsc) 5619 5552 { 5620 5553 struct ceph_mds_client *mdsc = fsc->mdsc; 5621 - dout("mdsc_destroy %p\n", mdsc); 5554 + doutc(fsc->client, "%p\n", mdsc); 5622 5555 5623 5556 if (!mdsc) 5624 5557 return; ··· 5632 5565 5633 5566 fsc->mdsc = NULL; 5634 5567 kfree(mdsc); 5635 - dout("mdsc_destroy %p done\n", mdsc); 5568 + doutc(fsc->client, "%p done\n", mdsc); 5636 5569 } 5637 5570 5638 5571 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) 5639 5572 { 5640 5573 struct ceph_fs_client *fsc = mdsc->fsc; 5574 + struct ceph_client *cl = fsc->client; 5641 5575 const char *mds_namespace = fsc->mount_options->mds_namespace; 5642 5576 void *p = msg->front.iov_base; 5643 5577 void *end = p + msg->front.iov_len; ··· 5650 5582 ceph_decode_need(&p, end, sizeof(u32), bad); 5651 5583 epoch = ceph_decode_32(&p); 5652 5584 5653 - dout("handle_fsmap epoch %u\n", epoch); 5585 + doutc(cl, "epoch %u\n", epoch); 5654 5586 5655 5587 /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */ 5656 5588 ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad); ··· 5695 5627 return; 5696 5628 5697 5629 bad: 5698 - pr_err("error decoding fsmap %d. Shutting down mount.\n", err); 5630 + pr_err_client(cl, "error decoding fsmap %d. Shutting down mount.\n", 5631 + err); 5699 5632 ceph_umount_begin(mdsc->fsc->sb); 5700 5633 ceph_msg_dump(msg); 5701 5634 err_out: ··· 5711 5642 */ 5712 5643 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) 5713 5644 { 5645 + struct ceph_client *cl = mdsc->fsc->client; 5714 5646 u32 epoch; 5715 5647 u32 maplen; 5716 5648 void *p = msg->front.iov_base; ··· 5726 5656 return; 5727 5657 epoch = ceph_decode_32(&p); 5728 5658 maplen = ceph_decode_32(&p); 5729 - dout("handle_map epoch %u len %d\n", epoch, (int)maplen); 5659 + doutc(cl, "epoch %u len %d\n", epoch, (int)maplen); 5730 5660 5731 5661 /* do we need it? */ 5732 5662 mutex_lock(&mdsc->mutex); 5733 5663 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { 5734 - dout("handle_map epoch %u <= our %u\n", 5735 - epoch, mdsc->mdsmap->m_epoch); 5664 + doutc(cl, "epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch); 5736 5665 mutex_unlock(&mdsc->mutex); 5737 5666 return; 5738 5667 } ··· 5765 5696 bad_unlock: 5766 5697 mutex_unlock(&mdsc->mutex); 5767 5698 bad: 5768 - pr_err("error decoding mdsmap %d. Shutting down mount.\n", err); 5699 + pr_err_client(cl, "error decoding mdsmap %d. Shutting down mount.\n", 5700 + err); 5769 5701 ceph_umount_begin(mdsc->fsc->sb); 5770 5702 ceph_msg_dump(msg); 5771 5703 return; ··· 5797 5727 struct ceph_mds_session *s = con->private; 5798 5728 struct ceph_mds_client *mdsc = s->s_mdsc; 5799 5729 5800 - pr_warn("mds%d closed our session\n", s->s_mds); 5730 + pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n", 5731 + s->s_mds); 5801 5732 if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO) 5802 5733 send_mds_reconnect(mdsc, s); 5803 5734 } ··· 5807 5736 { 5808 5737 struct ceph_mds_session *s = con->private; 5809 5738 struct ceph_mds_client *mdsc = s->s_mdsc; 5739 + struct ceph_client *cl = mdsc->fsc->client; 5810 5740 int type = le16_to_cpu(msg->hdr.type); 5811 5741 5812 5742 mutex_lock(&mdsc->mutex); ··· 5847 5775 break; 5848 5776 5849 5777 default: 5850 - pr_err("received unknown message type %d %s\n", type, 5851 - ceph_msg_type_name(type)); 5778 + pr_err_client(cl, "received unknown message type %d %s\n", 5779 + type, ceph_msg_type_name(type)); 5852 5780 } 5853 5781 out: 5854 5782 ceph_msg_put(msg);
+12 -12
fs/ceph/mdsmap.c
··· 11 11 #include <linux/ceph/messenger.h> 12 12 #include <linux/ceph/decode.h> 13 13 14 + #include "mds_client.h" 14 15 #include "super.h" 15 16 16 17 #define CEPH_MDS_IS_READY(i, ignore_laggy) \ ··· 118 117 struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p, 119 118 void *end, bool msgr2) 120 119 { 120 + struct ceph_client *cl = mdsc->fsc->client; 121 121 struct ceph_mdsmap *m; 122 122 const void *start = *p; 123 123 int i, j, n; ··· 236 234 *p = info_end; 237 235 } 238 236 239 - dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n", 240 - i+1, n, global_id, mds, inc, 241 - ceph_pr_addr(&addr), 242 - ceph_mds_state_name(state), 243 - laggy ? "(laggy)" : ""); 237 + doutc(cl, "%d/%d %lld mds%d.%d %s %s%s\n", i+1, n, global_id, 238 + mds, inc, ceph_pr_addr(&addr), 239 + ceph_mds_state_name(state), laggy ? "(laggy)" : ""); 244 240 245 241 if (mds < 0 || mds >= m->possible_max_rank) { 246 - pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds); 242 + pr_warn_client(cl, "got incorrect mds(%d)\n", mds); 247 243 continue; 248 244 } 249 245 250 246 if (state <= 0) { 251 - dout("mdsmap_decode got incorrect state(%s)\n", 252 - ceph_mds_state_name(state)); 247 + doutc(cl, "got incorrect state(%s)\n", 248 + ceph_mds_state_name(state)); 253 249 continue; 254 250 } 255 251 ··· 386 386 m->m_max_xattr_size = 0; 387 387 } 388 388 bad_ext: 389 - dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n", 390 - !!m->m_enabled, !!m->m_damaged, m->m_num_laggy); 389 + doutc(cl, "m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n", 390 + !!m->m_enabled, !!m->m_damaged, m->m_num_laggy); 391 391 *p = end; 392 - dout("mdsmap_decode success epoch %u\n", m->m_epoch); 392 + doutc(cl, "success epoch %u\n", m->m_epoch); 393 393 return m; 394 394 nomem: 395 395 err = -ENOMEM; 396 396 goto out_err; 397 397 corrupt: 398 - pr_err("corrupt mdsmap\n"); 398 + pr_err_client(cl, "corrupt mdsmap\n"); 399 399 print_hex_dump(KERN_DEBUG, "mdsmap: ", 400 400 DUMP_PREFIX_OFFSET, 16, 1, 401 401 start, end - start, true);
+3 -2
fs/ceph/metric.c
··· 31 31 struct ceph_client_metric *m = &mdsc->metric; 32 32 u64 nr_caps = atomic64_read(&m->total_caps); 33 33 u32 header_len = sizeof(struct ceph_metric_header); 34 + struct ceph_client *cl = mdsc->fsc->client; 34 35 struct ceph_msg *msg; 35 36 s64 sum; 36 37 s32 items = 0; ··· 52 51 53 52 msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true); 54 53 if (!msg) { 55 - pr_err("send metrics to mds%d, failed to allocate message\n", 56 - s->s_mds); 54 + pr_err_client(cl, "to mds%d, failed to allocate message\n", 55 + s->s_mds); 57 56 return false; 58 57 } 59 58
+18 -11
fs/ceph/quota.c
··· 43 43 { 44 44 struct super_block *sb = mdsc->fsc->sb; 45 45 struct ceph_mds_quota *h = msg->front.iov_base; 46 + struct ceph_client *cl = mdsc->fsc->client; 46 47 struct ceph_vino vino; 47 48 struct inode *inode; 48 49 struct ceph_inode_info *ci; ··· 52 51 return; 53 52 54 53 if (msg->front.iov_len < sizeof(*h)) { 55 - pr_err("%s corrupt message mds%d len %d\n", __func__, 56 - session->s_mds, (int)msg->front.iov_len); 54 + pr_err_client(cl, "corrupt message mds%d len %d\n", 55 + session->s_mds, (int)msg->front.iov_len); 57 56 ceph_msg_dump(msg); 58 57 goto out; 59 58 } ··· 63 62 vino.snap = CEPH_NOSNAP; 64 63 inode = ceph_find_inode(sb, vino); 65 64 if (!inode) { 66 - pr_warn("Failed to find inode %llu\n", vino.ino); 65 + pr_warn_client(cl, "failed to find inode %llx\n", vino.ino); 67 66 goto out; 68 67 } 69 68 ci = ceph_inode(inode); ··· 86 85 { 87 86 struct ceph_quotarealm_inode *qri = NULL; 88 87 struct rb_node **node, *parent = NULL; 88 + struct ceph_client *cl = mdsc->fsc->client; 89 89 90 90 mutex_lock(&mdsc->quotarealms_inodes_mutex); 91 91 node = &(mdsc->quotarealms_inodes.rb_node); ··· 112 110 rb_link_node(&qri->node, parent, node); 113 111 rb_insert_color(&qri->node, &mdsc->quotarealms_inodes); 114 112 } else 115 - pr_warn("Failed to alloc quotarealms_inode\n"); 113 + pr_warn_client(cl, "Failed to alloc quotarealms_inode\n"); 116 114 } 117 115 mutex_unlock(&mdsc->quotarealms_inodes_mutex); 118 116 ··· 131 129 struct super_block *sb, 132 130 struct ceph_snap_realm *realm) 133 131 { 132 + struct ceph_client *cl = mdsc->fsc->client; 134 133 struct ceph_quotarealm_inode *qri; 135 134 struct inode *in; 136 135 ··· 164 161 } 165 162 166 163 if (IS_ERR(in)) { 167 - dout("Can't lookup inode %llx (err: %ld)\n", 168 - realm->ino, PTR_ERR(in)); 164 + doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino, 165 + PTR_ERR(in)); 169 166 qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ 170 167 } else { 171 168 qri->timeout = 0; ··· 216 213 enum quota_get_realm which_quota, 217 214 bool retry) 218 215 { 216 + struct ceph_client *cl = mdsc->fsc->client; 219 217 struct ceph_inode_info *ci = NULL; 220 218 struct ceph_snap_realm *realm, *next; 221 219 struct inode *in; ··· 230 226 if (realm) 231 227 ceph_get_snap_realm(mdsc, realm); 232 228 else 233 - pr_err_ratelimited("get_quota_realm: ino (%llx.%llx) " 234 - "null i_snap_realm\n", ceph_vinop(inode)); 229 + pr_err_ratelimited_client(cl, 230 + "%p %llx.%llx null i_snap_realm\n", 231 + inode, ceph_vinop(inode)); 235 232 while (realm) { 236 233 bool has_inode; 237 234 ··· 322 317 loff_t delta) 323 318 { 324 319 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 320 + struct ceph_client *cl = mdsc->fsc->client; 325 321 struct ceph_inode_info *ci; 326 322 struct ceph_snap_realm *realm, *next; 327 323 struct inode *in; ··· 338 332 if (realm) 339 333 ceph_get_snap_realm(mdsc, realm); 340 334 else 341 - pr_err_ratelimited("check_quota_exceeded: ino (%llx.%llx) " 342 - "null i_snap_realm\n", ceph_vinop(inode)); 335 + pr_err_ratelimited_client(cl, 336 + "%p %llx.%llx null i_snap_realm\n", 337 + inode, ceph_vinop(inode)); 343 338 while (realm) { 344 339 bool has_inode; 345 340 ··· 390 383 break; 391 384 default: 392 385 /* Shouldn't happen */ 393 - pr_warn("Invalid quota check op (%d)\n", op); 386 + pr_warn_client(cl, "Invalid quota check op (%d)\n", op); 394 387 exceeded = true; /* Just break the loop */ 395 388 } 396 389 iput(in);
+95 -79
fs/ceph/snap.c
··· 138 138 __insert_snap_realm(&mdsc->snap_realms, realm); 139 139 mdsc->num_snap_realms++; 140 140 141 - dout("%s %llx %p\n", __func__, realm->ino, realm); 141 + doutc(mdsc->fsc->client, "%llx %p\n", realm->ino, realm); 142 142 return realm; 143 143 } 144 144 ··· 150 150 static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc, 151 151 u64 ino) 152 152 { 153 + struct ceph_client *cl = mdsc->fsc->client; 153 154 struct rb_node *n = mdsc->snap_realms.rb_node; 154 155 struct ceph_snap_realm *r; 155 156 ··· 163 162 else if (ino > r->ino) 164 163 n = n->rb_right; 165 164 else { 166 - dout("%s %llx %p\n", __func__, r->ino, r); 165 + doutc(cl, "%llx %p\n", r->ino, r); 167 166 return r; 168 167 } 169 168 } ··· 189 188 static void __destroy_snap_realm(struct ceph_mds_client *mdsc, 190 189 struct ceph_snap_realm *realm) 191 190 { 191 + struct ceph_client *cl = mdsc->fsc->client; 192 192 lockdep_assert_held_write(&mdsc->snap_rwsem); 193 193 194 - dout("%s %p %llx\n", __func__, realm, realm->ino); 194 + doutc(cl, "%p %llx\n", realm, realm->ino); 195 195 196 196 rb_erase(&realm->node, &mdsc->snap_realms); 197 197 mdsc->num_snap_realms--; ··· 292 290 struct ceph_snap_realm *realm, 293 291 u64 parentino) 294 292 { 293 + struct ceph_client *cl = mdsc->fsc->client; 295 294 struct ceph_snap_realm *parent; 296 295 297 296 lockdep_assert_held_write(&mdsc->snap_rwsem); ··· 306 303 if (IS_ERR(parent)) 307 304 return PTR_ERR(parent); 308 305 } 309 - dout("%s %llx %p: %llx %p -> %llx %p\n", __func__, realm->ino, 310 - realm, realm->parent_ino, realm->parent, parentino, parent); 306 + doutc(cl, "%llx %p: %llx %p -> %llx %p\n", realm->ino, realm, 307 + realm->parent_ino, realm->parent, parentino, parent); 311 308 if (realm->parent) { 312 309 list_del_init(&realm->child_item); 313 310 ceph_put_snap_realm(mdsc, realm->parent); ··· 337 334 struct list_head *realm_queue, 338 335 struct list_head *dirty_realms) 339 336 { 337 + struct ceph_client *cl = mdsc->fsc->client; 340 338 struct ceph_snap_realm *parent = realm->parent; 341 339 struct ceph_snap_context *snapc; 342 340 int err = 0; ··· 365 361 realm->cached_context->seq == realm->seq && 366 362 (!parent || 367 363 realm->cached_context->seq >= parent->cached_context->seq)) { 368 - dout("%s %llx %p: %p seq %lld (%u snaps) (unchanged)\n", 369 - __func__, realm->ino, realm, realm->cached_context, 370 - realm->cached_context->seq, 371 - (unsigned int)realm->cached_context->num_snaps); 364 + doutc(cl, "%llx %p: %p seq %lld (%u snaps) (unchanged)\n", 365 + realm->ino, realm, realm->cached_context, 366 + realm->cached_context->seq, 367 + (unsigned int)realm->cached_context->num_snaps); 372 368 return 0; 373 369 } 374 370 ··· 405 401 406 402 sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL); 407 403 snapc->num_snaps = num; 408 - dout("%s %llx %p: %p seq %lld (%u snaps)\n", __func__, realm->ino, 409 - realm, snapc, snapc->seq, (unsigned int) snapc->num_snaps); 404 + doutc(cl, "%llx %p: %p seq %lld (%u snaps)\n", realm->ino, realm, 405 + snapc, snapc->seq, (unsigned int) snapc->num_snaps); 410 406 411 407 ceph_put_snap_context(realm->cached_context); 412 408 realm->cached_context = snapc; ··· 423 419 ceph_put_snap_context(realm->cached_context); 424 420 realm->cached_context = NULL; 425 421 } 426 - pr_err("%s %llx %p fail %d\n", __func__, realm->ino, realm, err); 422 + pr_err_client(cl, "%llx %p fail %d\n", realm->ino, realm, err); 427 423 return err; 428 424 } 429 425 ··· 434 430 struct ceph_snap_realm *realm, 435 431 struct list_head *dirty_realms) 436 432 { 433 + struct ceph_client *cl = mdsc->fsc->client; 437 434 LIST_HEAD(realm_queue); 438 435 int last = 0; 439 436 bool skip = false; ··· 460 455 461 456 last = build_snap_context(mdsc, _realm, &realm_queue, 462 457 dirty_realms); 463 - dout("%s %llx %p, %s\n", __func__, _realm->ino, _realm, 464 - last > 0 ? "is deferred" : !last ? "succeeded" : "failed"); 458 + doutc(cl, "%llx %p, %s\n", realm->ino, realm, 459 + last > 0 ? "is deferred" : !last ? "succeeded" : "failed"); 465 460 466 461 /* is any child in the list ? */ 467 462 list_for_each_entry(child, &_realm->children, child_item) { ··· 531 526 struct ceph_cap_snap **pcapsnap) 532 527 { 533 528 struct inode *inode = &ci->netfs.inode; 529 + struct ceph_client *cl = ceph_inode_to_client(inode); 534 530 struct ceph_snap_context *old_snapc, *new_snapc; 535 531 struct ceph_cap_snap *capsnap = *pcapsnap; 536 532 struct ceph_buffer *old_blob = NULL; ··· 557 551 as no new writes are allowed to start when pending, so any 558 552 writes in progress now were started before the previous 559 553 cap_snap. lucky us. */ 560 - dout("%s %p %llx.%llx already pending\n", 561 - __func__, inode, ceph_vinop(inode)); 554 + doutc(cl, "%p %llx.%llx already pending\n", inode, 555 + ceph_vinop(inode)); 562 556 goto update_snapc; 563 557 } 564 558 if (ci->i_wrbuffer_ref_head == 0 && 565 559 !(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) { 566 - dout("%s %p %llx.%llx nothing dirty|writing\n", 567 - __func__, inode, ceph_vinop(inode)); 560 + doutc(cl, "%p %llx.%llx nothing dirty|writing\n", inode, 561 + ceph_vinop(inode)); 568 562 goto update_snapc; 569 563 } 570 564 ··· 584 578 } else { 585 579 if (!(used & CEPH_CAP_FILE_WR) && 586 580 ci->i_wrbuffer_ref_head == 0) { 587 - dout("%s %p %llx.%llx no new_snap|dirty_page|writing\n", 588 - __func__, inode, ceph_vinop(inode)); 581 + doutc(cl, "%p %llx.%llx no new_snap|dirty_page|writing\n", 582 + inode, ceph_vinop(inode)); 589 583 goto update_snapc; 590 584 } 591 585 } 592 586 593 - dout("%s %p %llx.%llx cap_snap %p queuing under %p %s %s\n", 594 - __func__, inode, ceph_vinop(inode), capsnap, old_snapc, 595 - ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush"); 587 + doutc(cl, "%p %llx.%llx cap_snap %p queuing under %p %s %s\n", 588 + inode, ceph_vinop(inode), capsnap, old_snapc, 589 + ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush"); 596 590 ihold(inode); 597 591 598 592 capsnap->follows = old_snapc->seq; ··· 624 618 list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); 625 619 626 620 if (used & CEPH_CAP_FILE_WR) { 627 - dout("%s %p %llx.%llx cap_snap %p snapc %p seq %llu used WR," 628 - " now pending\n", __func__, inode, ceph_vinop(inode), 629 - capsnap, old_snapc, old_snapc->seq); 621 + doutc(cl, "%p %llx.%llx cap_snap %p snapc %p seq %llu used WR," 622 + " now pending\n", inode, ceph_vinop(inode), capsnap, 623 + old_snapc, old_snapc->seq); 630 624 capsnap->writing = 1; 631 625 } else { 632 626 /* note mtime, size NOW. */ ··· 643 637 ci->i_head_snapc = NULL; 644 638 } else { 645 639 ci->i_head_snapc = ceph_get_snap_context(new_snapc); 646 - dout(" new snapc is %p\n", new_snapc); 640 + doutc(cl, " new snapc is %p\n", new_snapc); 647 641 } 648 642 spin_unlock(&ci->i_ceph_lock); 649 643 ··· 664 658 { 665 659 struct inode *inode = &ci->netfs.inode; 666 660 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 661 + struct ceph_client *cl = mdsc->fsc->client; 667 662 668 663 BUG_ON(capsnap->writing); 669 664 capsnap->size = i_size_read(inode); ··· 677 670 capsnap->truncate_size = ci->i_truncate_size; 678 671 capsnap->truncate_seq = ci->i_truncate_seq; 679 672 if (capsnap->dirty_pages) { 680 - dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu " 681 - "still has %d dirty pages\n", __func__, inode, 682 - ceph_vinop(inode), capsnap, capsnap->context, 683 - capsnap->context->seq, ceph_cap_string(capsnap->dirty), 684 - capsnap->size, capsnap->dirty_pages); 673 + doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s " 674 + "s=%llu still has %d dirty pages\n", inode, 675 + ceph_vinop(inode), capsnap, capsnap->context, 676 + capsnap->context->seq, 677 + ceph_cap_string(capsnap->dirty), 678 + capsnap->size, capsnap->dirty_pages); 685 679 return 0; 686 680 } 687 681 ··· 691 683 * And trigger to flush the buffer immediately. 692 684 */ 693 685 if (ci->i_wrbuffer_ref) { 694 - dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu " 695 - "used WRBUFFER, delaying\n", __func__, inode, 696 - ceph_vinop(inode), capsnap, capsnap->context, 697 - capsnap->context->seq, ceph_cap_string(capsnap->dirty), 698 - capsnap->size); 686 + doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s " 687 + "s=%llu used WRBUFFER, delaying\n", inode, 688 + ceph_vinop(inode), capsnap, capsnap->context, 689 + capsnap->context->seq, ceph_cap_string(capsnap->dirty), 690 + capsnap->size); 699 691 ceph_queue_writeback(inode); 700 692 return 0; 701 693 } 702 694 703 695 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; 704 - dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n", 705 - __func__, inode, ceph_vinop(inode), capsnap, capsnap->context, 706 - capsnap->context->seq, ceph_cap_string(capsnap->dirty), 707 - capsnap->size); 696 + doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n", 697 + inode, ceph_vinop(inode), capsnap, capsnap->context, 698 + capsnap->context->seq, ceph_cap_string(capsnap->dirty), 699 + capsnap->size); 708 700 709 701 spin_lock(&mdsc->snap_flush_lock); 710 702 if (list_empty(&ci->i_snap_flush_item)) { ··· 722 714 static void queue_realm_cap_snaps(struct ceph_mds_client *mdsc, 723 715 struct ceph_snap_realm *realm) 724 716 { 717 + struct ceph_client *cl = mdsc->fsc->client; 725 718 struct ceph_inode_info *ci; 726 719 struct inode *lastinode = NULL; 727 720 struct ceph_cap_snap *capsnap = NULL; 728 721 729 - dout("%s %p %llx inode\n", __func__, realm, realm->ino); 722 + doutc(cl, "%p %llx inode\n", realm, realm->ino); 730 723 731 724 spin_lock(&realm->inodes_with_caps_lock); 732 725 list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) { ··· 746 737 if (!capsnap) { 747 738 capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS); 748 739 if (!capsnap) { 749 - pr_err("ENOMEM allocating ceph_cap_snap on %p\n", 750 - inode); 740 + pr_err_client(cl, 741 + "ENOMEM allocating ceph_cap_snap on %p\n", 742 + inode); 751 743 return; 752 744 } 753 745 } ··· 766 756 767 757 if (capsnap) 768 758 kmem_cache_free(ceph_cap_snap_cachep, capsnap); 769 - dout("%s %p %llx done\n", __func__, realm, realm->ino); 759 + doutc(cl, "%p %llx done\n", realm, realm->ino); 770 760 } 771 761 772 762 /* ··· 780 770 void *p, void *e, bool deletion, 781 771 struct ceph_snap_realm **realm_ret) 782 772 { 773 + struct ceph_client *cl = mdsc->fsc->client; 783 774 struct ceph_mds_snap_realm *ri; /* encoded */ 784 775 __le64 *snaps; /* encoded */ 785 776 __le64 *prior_parent_snaps; /* encoded */ ··· 795 784 796 785 lockdep_assert_held_write(&mdsc->snap_rwsem); 797 786 798 - dout("%s deletion=%d\n", __func__, deletion); 787 + doutc(cl, "deletion=%d\n", deletion); 799 788 more: 800 789 realm = NULL; 801 790 rebuild_snapcs = 0; ··· 825 814 rebuild_snapcs += err; 826 815 827 816 if (le64_to_cpu(ri->seq) > realm->seq) { 828 - dout("%s updating %llx %p %lld -> %lld\n", __func__, 829 - realm->ino, realm, realm->seq, le64_to_cpu(ri->seq)); 817 + doutc(cl, "updating %llx %p %lld -> %lld\n", realm->ino, 818 + realm, realm->seq, le64_to_cpu(ri->seq)); 830 819 /* update realm parameters, snap lists */ 831 820 realm->seq = le64_to_cpu(ri->seq); 832 821 realm->created = le64_to_cpu(ri->created); ··· 849 838 850 839 rebuild_snapcs = 1; 851 840 } else if (!realm->cached_context) { 852 - dout("%s %llx %p seq %lld new\n", __func__, 853 - realm->ino, realm, realm->seq); 841 + doutc(cl, "%llx %p seq %lld new\n", realm->ino, realm, 842 + realm->seq); 854 843 rebuild_snapcs = 1; 855 844 } else { 856 - dout("%s %llx %p seq %lld unchanged\n", __func__, 857 - realm->ino, realm, realm->seq); 845 + doutc(cl, "%llx %p seq %lld unchanged\n", realm->ino, realm, 846 + realm->seq); 858 847 } 859 848 860 - dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino, 861 - realm, rebuild_snapcs, p, e); 849 + doutc(cl, "done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino, 850 + realm, rebuild_snapcs, p, e); 862 851 863 852 /* 864 853 * this will always track the uppest parent realm from which ··· 906 895 ceph_put_snap_realm(mdsc, realm); 907 896 if (first_realm) 908 897 ceph_put_snap_realm(mdsc, first_realm); 909 - pr_err("%s error %d\n", __func__, err); 898 + pr_err_client(cl, "error %d\n", err); 910 899 911 900 /* 912 901 * When receiving a corrupted snap trace we don't know what ··· 920 909 WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO); 921 910 ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr); 922 911 if (ret) 923 - pr_err("%s failed to blocklist %s: %d\n", __func__, 924 - ceph_pr_addr(&client->msgr.inst.addr), ret); 912 + pr_err_client(cl, "failed to blocklist %s: %d\n", 913 + ceph_pr_addr(&client->msgr.inst.addr), ret); 925 914 926 - WARN(1, "%s: %s%sdo remount to continue%s", 927 - __func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr), 915 + WARN(1, "[client.%lld] %s %s%sdo remount to continue%s", 916 + client->monc.auth->global_id, __func__, 917 + ret ? "" : ceph_pr_addr(&client->msgr.inst.addr), 928 918 ret ? "" : " was blocklisted, ", 929 919 err == -EIO ? " after corrupted snaptrace is fixed" : ""); 930 920 ··· 941 929 */ 942 930 static void flush_snaps(struct ceph_mds_client *mdsc) 943 931 { 932 + struct ceph_client *cl = mdsc->fsc->client; 944 933 struct ceph_inode_info *ci; 945 934 struct inode *inode; 946 935 struct ceph_mds_session *session = NULL; 947 936 948 - dout("%s\n", __func__); 937 + doutc(cl, "begin\n"); 949 938 spin_lock(&mdsc->snap_flush_lock); 950 939 while (!list_empty(&mdsc->snap_flush_list)) { 951 940 ci = list_first_entry(&mdsc->snap_flush_list, ··· 961 948 spin_unlock(&mdsc->snap_flush_lock); 962 949 963 950 ceph_put_mds_session(session); 964 - dout("%s done\n", __func__); 951 + doutc(cl, "done\n"); 965 952 } 966 953 967 954 /** ··· 1017 1004 struct ceph_mds_session *session, 1018 1005 struct ceph_msg *msg) 1019 1006 { 1007 + struct ceph_client *cl = mdsc->fsc->client; 1020 1008 struct super_block *sb = mdsc->fsc->sb; 1021 1009 int mds = session->s_mds; 1022 1010 u64 split; ··· 1048 1034 trace_len = le32_to_cpu(h->trace_len); 1049 1035 p += sizeof(*h); 1050 1036 1051 - dout("%s from mds%d op %s split %llx tracelen %d\n", __func__, 1052 - mds, ceph_snap_op_name(op), split, trace_len); 1037 + doutc(cl, "from mds%d op %s split %llx tracelen %d\n", mds, 1038 + ceph_snap_op_name(op), split, trace_len); 1053 1039 1054 1040 down_write(&mdsc->snap_rwsem); 1055 1041 locked_rwsem = 1; ··· 1080 1066 goto out; 1081 1067 } 1082 1068 1083 - dout("splitting snap_realm %llx %p\n", realm->ino, realm); 1069 + doutc(cl, "splitting snap_realm %llx %p\n", realm->ino, realm); 1084 1070 for (i = 0; i < num_split_inos; i++) { 1085 1071 struct ceph_vino vino = { 1086 1072 .ino = le64_to_cpu(split_inos[i]), ··· 1105 1091 */ 1106 1092 if (ci->i_snap_realm->created > 1107 1093 le64_to_cpu(ri->created)) { 1108 - dout(" leaving %p %llx.%llx in newer realm %llx %p\n", 1109 - inode, ceph_vinop(inode), ci->i_snap_realm->ino, 1110 - ci->i_snap_realm); 1094 + doutc(cl, " leaving %p %llx.%llx in newer realm %llx %p\n", 1095 + inode, ceph_vinop(inode), ci->i_snap_realm->ino, 1096 + ci->i_snap_realm); 1111 1097 goto skip_inode; 1112 1098 } 1113 - dout(" will move %p %llx.%llx to split realm %llx %p\n", 1114 - inode, ceph_vinop(inode), realm->ino, realm); 1099 + doutc(cl, " will move %p %llx.%llx to split realm %llx %p\n", 1100 + inode, ceph_vinop(inode), realm->ino, realm); 1115 1101 1116 1102 ceph_get_snap_realm(mdsc, realm); 1117 1103 ceph_change_snap_realm(inode, realm); ··· 1172 1158 return; 1173 1159 1174 1160 bad: 1175 - pr_err("%s corrupt snap message from mds%d\n", __func__, mds); 1161 + pr_err_client(cl, "corrupt snap message from mds%d\n", mds); 1176 1162 ceph_msg_dump(msg); 1177 1163 out: 1178 1164 if (locked_rwsem) ··· 1188 1174 struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc, 1189 1175 u64 snap) 1190 1176 { 1177 + struct ceph_client *cl = mdsc->fsc->client; 1191 1178 struct ceph_snapid_map *sm, *exist; 1192 1179 struct rb_node **p, *parent; 1193 1180 int ret; ··· 1211 1196 } 1212 1197 spin_unlock(&mdsc->snapid_map_lock); 1213 1198 if (exist) { 1214 - dout("%s found snapid map %llx -> %x\n", __func__, 1215 - exist->snap, exist->dev); 1199 + doutc(cl, "found snapid map %llx -> %x\n", exist->snap, 1200 + exist->dev); 1216 1201 return exist; 1217 1202 } 1218 1203 ··· 1256 1241 if (exist) { 1257 1242 free_anon_bdev(sm->dev); 1258 1243 kfree(sm); 1259 - dout("%s found snapid map %llx -> %x\n", __func__, 1260 - exist->snap, exist->dev); 1244 + doutc(cl, "found snapid map %llx -> %x\n", exist->snap, 1245 + exist->dev); 1261 1246 return exist; 1262 1247 } 1263 1248 1264 - dout("%s create snapid map %llx -> %x\n", __func__, 1265 - sm->snap, sm->dev); 1249 + doutc(cl, "create snapid map %llx -> %x\n", sm->snap, sm->dev); 1266 1250 return sm; 1267 1251 } 1268 1252 ··· 1286 1272 1287 1273 void ceph_trim_snapid_map(struct ceph_mds_client *mdsc) 1288 1274 { 1275 + struct ceph_client *cl = mdsc->fsc->client; 1289 1276 struct ceph_snapid_map *sm; 1290 1277 unsigned long now; 1291 1278 LIST_HEAD(to_free); ··· 1308 1293 while (!list_empty(&to_free)) { 1309 1294 sm = list_first_entry(&to_free, struct ceph_snapid_map, lru); 1310 1295 list_del(&sm->lru); 1311 - dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev); 1296 + doutc(cl, "trim snapid map %llx -> %x\n", sm->snap, sm->dev); 1312 1297 free_anon_bdev(sm->dev); 1313 1298 kfree(sm); 1314 1299 } ··· 1316 1301 1317 1302 void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc) 1318 1303 { 1304 + struct ceph_client *cl = mdsc->fsc->client; 1319 1305 struct ceph_snapid_map *sm; 1320 1306 struct rb_node *p; 1321 1307 LIST_HEAD(to_free); ··· 1335 1319 list_del(&sm->lru); 1336 1320 free_anon_bdev(sm->dev); 1337 1321 if (WARN_ON_ONCE(atomic_read(&sm->ref))) { 1338 - pr_err("snapid map %llx -> %x still in use\n", 1339 - sm->snap, sm->dev); 1322 + pr_err_client(cl, "snapid map %llx -> %x still in use\n", 1323 + sm->snap, sm->dev); 1340 1324 } 1341 1325 kfree(sm); 1342 1326 }
+40 -30
fs/ceph/super.c
··· 46 46 { 47 47 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s); 48 48 49 - dout("put_super\n"); 49 + doutc(fsc->client, "begin\n"); 50 50 ceph_fscrypt_free_dummy_policy(fsc); 51 51 ceph_mdsc_close_sessions(fsc->mdsc); 52 + doutc(fsc->client, "done\n"); 52 53 } 53 54 54 55 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) ··· 60 59 int i, err; 61 60 u64 data_pool; 62 61 62 + doutc(fsc->client, "begin\n"); 63 63 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) { 64 64 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0]; 65 65 } else { 66 66 data_pool = CEPH_NOPOOL; 67 67 } 68 68 69 - dout("statfs\n"); 70 69 err = ceph_monc_do_statfs(monc, data_pool, &st); 71 70 if (err < 0) 72 71 return err; ··· 114 113 /* fold the fs_cluster_id into the upper bits */ 115 114 buf->f_fsid.val[1] = monc->fs_cluster_id; 116 115 116 + doutc(fsc->client, "done\n"); 117 117 return 0; 118 118 } 119 119 120 120 static int ceph_sync_fs(struct super_block *sb, int wait) 121 121 { 122 122 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); 123 + struct ceph_client *cl = fsc->client; 123 124 124 125 if (!wait) { 125 - dout("sync_fs (non-blocking)\n"); 126 + doutc(cl, "(non-blocking)\n"); 126 127 ceph_flush_dirty_caps(fsc->mdsc); 127 - dout("sync_fs (non-blocking) done\n"); 128 + doutc(cl, "(non-blocking) done\n"); 128 129 return 0; 129 130 } 130 131 131 - dout("sync_fs (blocking)\n"); 132 + doutc(cl, "(blocking)\n"); 132 133 ceph_osdc_sync(&fsc->client->osdc); 133 134 ceph_mdsc_sync(fsc->mdsc); 134 - dout("sync_fs (blocking) done\n"); 135 + doutc(cl, "(blocking) done\n"); 135 136 return 0; 136 137 } 137 138 ··· 344 341 char *dev_name = param->string, *dev_name_end; 345 342 int ret; 346 343 347 - dout("%s '%s'\n", __func__, dev_name); 344 + dout("'%s'\n", dev_name); 348 345 if (!dev_name || !*dev_name) 349 346 return invalfc(fc, "Empty source"); 350 347 ··· 416 413 return ret; 417 414 418 415 token = fs_parse(fc, ceph_mount_parameters, param, &result); 419 - dout("%s fs_parse '%s' token %d\n", __func__, param->key, token); 416 + dout("%s: fs_parse '%s' token %d\n",__func__, param->key, token); 420 417 if (token < 0) 421 418 return token; 422 419 ··· 884 881 885 882 static void destroy_fs_client(struct ceph_fs_client *fsc) 886 883 { 887 - dout("destroy_fs_client %p\n", fsc); 884 + doutc(fsc->client, "%p\n", fsc); 888 885 889 886 spin_lock(&ceph_fsc_lock); 890 887 list_del(&fsc->metric_wakeup); ··· 899 896 ceph_destroy_client(fsc->client); 900 897 901 898 kfree(fsc); 902 - dout("destroy_fs_client %p done\n", fsc); 899 + dout("%s: %p done\n", __func__, fsc); 903 900 } 904 901 905 902 /* ··· 1020 1017 { 1021 1018 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); 1022 1019 1023 - dout("ceph_umount_begin - starting forced umount\n"); 1020 + doutc(fsc->client, "starting forced umount\n"); 1024 1021 if (!fsc) 1025 1022 return; 1026 1023 fsc->mount_state = CEPH_MOUNT_SHUTDOWN; ··· 1048 1045 const char *path, 1049 1046 unsigned long started) 1050 1047 { 1048 + struct ceph_client *cl = fsc->client; 1051 1049 struct ceph_mds_client *mdsc = fsc->mdsc; 1052 1050 struct ceph_mds_request *req = NULL; 1053 1051 int err; 1054 1052 struct dentry *root; 1055 1053 1056 1054 /* open dir */ 1057 - dout("open_root_inode opening '%s'\n", path); 1055 + doutc(cl, "opening '%s'\n", path); 1058 1056 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1059 1057 if (IS_ERR(req)) 1060 1058 return ERR_CAST(req); ··· 1075 1071 if (err == 0) { 1076 1072 struct inode *inode = req->r_target_inode; 1077 1073 req->r_target_inode = NULL; 1078 - dout("open_root_inode success\n"); 1074 + doutc(cl, "success\n"); 1079 1075 root = d_make_root(inode); 1080 1076 if (!root) { 1081 1077 root = ERR_PTR(-ENOMEM); 1082 1078 goto out; 1083 1079 } 1084 - dout("open_root_inode success, root dentry is %p\n", root); 1080 + doutc(cl, "success, root dentry is %p\n", root); 1085 1081 } else { 1086 1082 root = ERR_PTR(err); 1087 1083 } ··· 1140 1136 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, 1141 1137 struct fs_context *fc) 1142 1138 { 1139 + struct ceph_client *cl = fsc->client; 1143 1140 int err; 1144 1141 unsigned long started = jiffies; /* note the start time */ 1145 1142 struct dentry *root; 1146 1143 1147 - dout("mount start %p\n", fsc); 1144 + doutc(cl, "mount start %p\n", fsc); 1148 1145 mutex_lock(&fsc->client->mount_mutex); 1149 1146 1150 1147 if (!fsc->sb->s_root) { ··· 1168 1163 if (err) 1169 1164 goto out; 1170 1165 1171 - dout("mount opening path '%s'\n", path); 1166 + doutc(cl, "mount opening path '%s'\n", path); 1172 1167 1173 1168 ceph_fs_debugfs_init(fsc); 1174 1169 ··· 1183 1178 } 1184 1179 1185 1180 fsc->mount_state = CEPH_MOUNT_MOUNTED; 1186 - dout("mount success\n"); 1181 + doutc(cl, "mount success\n"); 1187 1182 mutex_unlock(&fsc->client->mount_mutex); 1188 1183 return root; 1189 1184 ··· 1196 1191 static int ceph_set_super(struct super_block *s, struct fs_context *fc) 1197 1192 { 1198 1193 struct ceph_fs_client *fsc = s->s_fs_info; 1194 + struct ceph_client *cl = fsc->client; 1199 1195 int ret; 1200 1196 1201 - dout("set_super %p\n", s); 1197 + doutc(cl, "%p\n", s); 1202 1198 1203 1199 s->s_maxbytes = MAX_LFS_FILESIZE; 1204 1200 ··· 1233 1227 struct ceph_mount_options *fsopt = new->mount_options; 1234 1228 struct ceph_options *opt = new->client->options; 1235 1229 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb); 1230 + struct ceph_client *cl = fsc->client; 1236 1231 1237 - dout("ceph_compare_super %p\n", sb); 1232 + doutc(cl, "%p\n", sb); 1238 1233 1239 1234 if (compare_mount_options(fsopt, opt, fsc)) { 1240 - dout("monitor(s)/mount options don't match\n"); 1235 + doutc(cl, "monitor(s)/mount options don't match\n"); 1241 1236 return 0; 1242 1237 } 1243 1238 if ((opt->flags & CEPH_OPT_FSID) && 1244 1239 ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) { 1245 - dout("fsid doesn't match\n"); 1240 + doutc(cl, "fsid doesn't match\n"); 1246 1241 return 0; 1247 1242 } 1248 1243 if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) { 1249 - dout("flags differ\n"); 1244 + doutc(cl, "flags differ\n"); 1250 1245 return 0; 1251 1246 } 1252 1247 1253 1248 if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) { 1254 - dout("client is blocklisted (and CLEANRECOVER is not set)\n"); 1249 + doutc(cl, "client is blocklisted (and CLEANRECOVER is not set)\n"); 1255 1250 return 0; 1256 1251 } 1257 1252 1258 1253 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) { 1259 - dout("client has been forcibly unmounted\n"); 1254 + doutc(cl, "client has been forcibly unmounted\n"); 1260 1255 return 0; 1261 1256 } 1262 1257 ··· 1345 1338 err = PTR_ERR(res); 1346 1339 goto out_splat; 1347 1340 } 1348 - dout("root %p inode %p ino %llx.%llx\n", res, 1349 - d_inode(res), ceph_vinop(d_inode(res))); 1341 + 1342 + doutc(fsc->client, "root %p inode %p ino %llx.%llx\n", res, 1343 + d_inode(res), ceph_vinop(d_inode(res))); 1350 1344 fc->root = fsc->sb->s_root; 1351 1345 return 0; 1352 1346 ··· 1405 1397 kfree(fsc->mount_options->mon_addr); 1406 1398 fsc->mount_options->mon_addr = fsopt->mon_addr; 1407 1399 fsopt->mon_addr = NULL; 1408 - pr_notice("ceph: monitor addresses recorded, but not used for reconnection"); 1400 + pr_notice_client(fsc->client, 1401 + "monitor addresses recorded, but not used for reconnection"); 1409 1402 } 1410 1403 1411 1404 sync_filesystem(sb); ··· 1526 1517 static void ceph_kill_sb(struct super_block *s) 1527 1518 { 1528 1519 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s); 1520 + struct ceph_client *cl = fsc->client; 1529 1521 struct ceph_mds_client *mdsc = fsc->mdsc; 1530 1522 bool wait; 1531 1523 1532 - dout("kill_sb %p\n", s); 1524 + doutc(cl, "%p\n", s); 1533 1525 1534 1526 ceph_mdsc_pre_umount(mdsc); 1535 1527 flush_fs_workqueues(fsc); ··· 1561 1551 &mdsc->stopping_waiter, 1562 1552 fsc->client->options->mount_timeout); 1563 1553 if (!timeleft) /* timed out */ 1564 - pr_warn("umount timed out, %ld\n", timeleft); 1554 + pr_warn_client(cl, "umount timed out, %ld\n", timeleft); 1565 1555 else if (timeleft < 0) /* killed */ 1566 - pr_warn("umount was killed, %ld\n", timeleft); 1556 + pr_warn_client(cl, "umount was killed, %ld\n", timeleft); 1567 1557 } 1568 1558 1569 1559 mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
+6
fs/ceph/super.h
··· 505 505 return (struct ceph_mds_client *)ceph_sb_to_fs_client(sb)->mdsc; 506 506 } 507 507 508 + static inline struct ceph_client * 509 + ceph_inode_to_client(const struct inode *inode) 510 + { 511 + return (struct ceph_client *)ceph_inode_to_fs_client(inode)->client; 512 + } 513 + 508 514 static inline struct ceph_vino 509 515 ceph_vino(const struct inode *inode) 510 516 {
+57 -39
fs/ceph/xattr.c
··· 58 58 size_t size) 59 59 { 60 60 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb); 61 + struct ceph_client *cl = fsc->client; 61 62 struct ceph_osd_client *osdc = &fsc->client->osdc; 62 63 struct ceph_string *pool_ns; 63 64 s64 pool = ci->i_layout.pool_id; ··· 70 69 71 70 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 72 71 73 - dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode); 72 + doutc(cl, "%p\n", &ci->netfs.inode); 74 73 down_read(&osdc->lock); 75 74 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); 76 75 if (pool_name) { ··· 571 570 int flags, int update_xattr, 572 571 struct ceph_inode_xattr **newxattr) 573 572 { 573 + struct inode *inode = &ci->netfs.inode; 574 + struct ceph_client *cl = ceph_inode_to_client(inode); 574 575 struct rb_node **p; 575 576 struct rb_node *parent = NULL; 576 577 struct ceph_inode_xattr *xattr = NULL; ··· 629 626 xattr->should_free_name = update_xattr; 630 627 631 628 ci->i_xattrs.count++; 632 - dout("%s count=%d\n", __func__, ci->i_xattrs.count); 629 + doutc(cl, "count=%d\n", ci->i_xattrs.count); 633 630 } else { 634 631 kfree(*newxattr); 635 632 *newxattr = NULL; ··· 657 654 if (new) { 658 655 rb_link_node(&xattr->node, parent, p); 659 656 rb_insert_color(&xattr->node, &ci->i_xattrs.index); 660 - dout("%s p=%p\n", __func__, p); 657 + doutc(cl, "p=%p\n", p); 661 658 } 662 659 663 - dout("%s added %llx.%llx xattr %p %.*s=%.*s%s\n", __func__, 664 - ceph_vinop(&ci->netfs.inode), xattr, name_len, name, 665 - min(val_len, MAX_XATTR_VAL_PRINT_LEN), val, 666 - val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : ""); 660 + doutc(cl, "added %p %llx.%llx xattr %p %.*s=%.*s%s\n", inode, 661 + ceph_vinop(inode), xattr, name_len, name, min(val_len, 662 + MAX_XATTR_VAL_PRINT_LEN), val, 663 + val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : ""); 667 664 668 665 return 0; 669 666 } ··· 671 668 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci, 672 669 const char *name) 673 670 { 671 + struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode); 674 672 struct rb_node **p; 675 673 struct rb_node *parent = NULL; 676 674 struct ceph_inode_xattr *xattr = NULL; ··· 692 688 else { 693 689 int len = min(xattr->val_len, MAX_XATTR_VAL_PRINT_LEN); 694 690 695 - dout("%s %s: found %.*s%s\n", __func__, name, len, 696 - xattr->val, xattr->val_len > len ? "..." : ""); 691 + doutc(cl, "%s found %.*s%s\n", name, len, xattr->val, 692 + xattr->val_len > len ? "..." : ""); 697 693 return xattr; 698 694 } 699 695 } 700 696 701 - dout("%s %s: not found\n", __func__, name); 697 + doutc(cl, "%s not found\n", name); 702 698 703 699 return NULL; 704 700 } ··· 739 735 static char *__copy_xattr_names(struct ceph_inode_info *ci, 740 736 char *dest) 741 737 { 738 + struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode); 742 739 struct rb_node *p; 743 740 struct ceph_inode_xattr *xattr = NULL; 744 741 745 742 p = rb_first(&ci->i_xattrs.index); 746 - dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count); 743 + doutc(cl, "count=%d\n", ci->i_xattrs.count); 747 744 748 745 while (p) { 749 746 xattr = rb_entry(p, struct ceph_inode_xattr, node); 750 747 memcpy(dest, xattr->name, xattr->name_len); 751 748 dest[xattr->name_len] = '\0'; 752 749 753 - dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name, 754 - xattr->name_len, ci->i_xattrs.names_size); 750 + doutc(cl, "dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name, 751 + xattr->name_len, ci->i_xattrs.names_size); 755 752 756 753 dest += xattr->name_len + 1; 757 754 p = rb_next(p); ··· 763 758 764 759 void __ceph_destroy_xattrs(struct ceph_inode_info *ci) 765 760 { 761 + struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode); 766 762 struct rb_node *p, *tmp; 767 763 struct ceph_inode_xattr *xattr = NULL; 768 764 769 765 p = rb_first(&ci->i_xattrs.index); 770 766 771 - dout("__ceph_destroy_xattrs p=%p\n", p); 767 + doutc(cl, "p=%p\n", p); 772 768 773 769 while (p) { 774 770 xattr = rb_entry(p, struct ceph_inode_xattr, node); 775 771 tmp = p; 776 772 p = rb_next(tmp); 777 - dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p, 778 - xattr->name_len, xattr->name); 773 + doutc(cl, "next p=%p (%.*s)\n", p, xattr->name_len, xattr->name); 779 774 rb_erase(tmp, &ci->i_xattrs.index); 780 775 781 776 __free_xattr(xattr); ··· 792 787 __releases(ci->i_ceph_lock) 793 788 __acquires(ci->i_ceph_lock) 794 789 { 790 + struct ceph_client *cl = ceph_inode_to_client(inode); 795 791 u32 namelen; 796 792 u32 numattr = 0; 797 793 void *p, *end; ··· 804 798 int err = 0; 805 799 int i; 806 800 807 - dout("__build_xattrs() len=%d\n", 808 - ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0); 801 + doutc(cl, "len=%d\n", 802 + ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0); 809 803 810 804 if (ci->i_xattrs.index_version >= ci->i_xattrs.version) 811 805 return 0; /* already built */ ··· 880 874 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size, 881 875 int val_size) 882 876 { 877 + struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode); 878 + 883 879 /* 884 880 * 4 bytes for the length, and additional 4 bytes per each xattr name, 885 881 * 4 bytes per each value ··· 889 881 int size = 4 + ci->i_xattrs.count*(4 + 4) + 890 882 ci->i_xattrs.names_size + 891 883 ci->i_xattrs.vals_size; 892 - dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n", 893 - ci->i_xattrs.count, ci->i_xattrs.names_size, 894 - ci->i_xattrs.vals_size); 884 + doutc(cl, "c=%d names.size=%d vals.size=%d\n", ci->i_xattrs.count, 885 + ci->i_xattrs.names_size, ci->i_xattrs.vals_size); 895 886 896 887 if (name_size) 897 888 size += 4 + 4 + name_size + val_size; ··· 906 899 */ 907 900 struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) 908 901 { 902 + struct inode *inode = &ci->netfs.inode; 903 + struct ceph_client *cl = ceph_inode_to_client(inode); 909 904 struct rb_node *p; 910 905 struct ceph_inode_xattr *xattr = NULL; 911 906 struct ceph_buffer *old_blob = NULL; 912 907 void *dest; 913 908 914 - dout("__build_xattrs_blob %p\n", &ci->netfs.inode); 909 + doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode)); 915 910 if (ci->i_xattrs.dirty) { 916 911 int need = __get_required_blob_size(ci, 0, 0); 917 912 ··· 971 962 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, 972 963 size_t size) 973 964 { 965 + struct ceph_client *cl = ceph_inode_to_client(inode); 974 966 struct ceph_inode_info *ci = ceph_inode(inode); 975 967 struct ceph_inode_xattr *xattr; 976 968 struct ceph_vxattr *vxattr; ··· 1010 1000 req_mask = __get_request_mask(inode); 1011 1001 1012 1002 spin_lock(&ci->i_ceph_lock); 1013 - dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name, 1014 - ci->i_xattrs.version, ci->i_xattrs.index_version); 1003 + doutc(cl, "%p %llx.%llx name '%s' ver=%lld index_ver=%lld\n", inode, 1004 + ceph_vinop(inode), name, ci->i_xattrs.version, 1005 + ci->i_xattrs.index_version); 1015 1006 1016 1007 if (ci->i_xattrs.version == 0 || 1017 1008 !((req_mask & CEPH_CAP_XATTR_SHARED) || ··· 1021 1010 1022 1011 /* security module gets xattr while filling trace */ 1023 1012 if (current->journal_info) { 1024 - pr_warn_ratelimited("sync getxattr %p " 1025 - "during filling trace\n", inode); 1013 + pr_warn_ratelimited_client(cl, 1014 + "sync %p %llx.%llx during filling trace\n", 1015 + inode, ceph_vinop(inode)); 1026 1016 return -EBUSY; 1027 1017 } 1028 1018 ··· 1065 1053 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size) 1066 1054 { 1067 1055 struct inode *inode = d_inode(dentry); 1056 + struct ceph_client *cl = ceph_inode_to_client(inode); 1068 1057 struct ceph_inode_info *ci = ceph_inode(inode); 1069 1058 bool len_only = (size == 0); 1070 1059 u32 namelen; 1071 1060 int err; 1072 1061 1073 1062 spin_lock(&ci->i_ceph_lock); 1074 - dout("listxattr %p ver=%lld index_ver=%lld\n", inode, 1075 - ci->i_xattrs.version, ci->i_xattrs.index_version); 1063 + doutc(cl, "%p %llx.%llx ver=%lld index_ver=%lld\n", inode, 1064 + ceph_vinop(inode), ci->i_xattrs.version, 1065 + ci->i_xattrs.index_version); 1076 1066 1077 1067 if (ci->i_xattrs.version == 0 || 1078 1068 !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) { ··· 1109 1095 const char *value, size_t size, int flags) 1110 1096 { 1111 1097 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb); 1098 + struct ceph_client *cl = ceph_inode_to_client(inode); 1112 1099 struct ceph_inode_info *ci = ceph_inode(inode); 1113 1100 struct ceph_mds_request *req; 1114 1101 struct ceph_mds_client *mdsc = fsc->mdsc; ··· 1134 1119 flags |= CEPH_XATTR_REMOVE; 1135 1120 } 1136 1121 1137 - dout("setxattr value size: %zu\n", size); 1122 + doutc(cl, "name %s value size %zu\n", name, size); 1138 1123 1139 1124 /* do request */ 1140 1125 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); ··· 1163 1148 req->r_num_caps = 1; 1164 1149 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 1165 1150 1166 - dout("xattr.ver (before): %lld\n", ci->i_xattrs.version); 1151 + doutc(cl, "xattr.ver (before): %lld\n", ci->i_xattrs.version); 1167 1152 err = ceph_mdsc_do_request(mdsc, NULL, req); 1168 1153 ceph_mdsc_put_request(req); 1169 - dout("xattr.ver (after): %lld\n", ci->i_xattrs.version); 1154 + doutc(cl, "xattr.ver (after): %lld\n", ci->i_xattrs.version); 1170 1155 1171 1156 out: 1172 1157 if (pagelist) ··· 1177 1162 int __ceph_setxattr(struct inode *inode, const char *name, 1178 1163 const void *value, size_t size, int flags) 1179 1164 { 1165 + struct ceph_client *cl = ceph_inode_to_client(inode); 1180 1166 struct ceph_vxattr *vxattr; 1181 1167 struct ceph_inode_info *ci = ceph_inode(inode); 1182 1168 struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc; ··· 1236 1220 required_blob_size = __get_required_blob_size(ci, name_len, val_len); 1237 1221 if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) || 1238 1222 (required_blob_size > mdsc->mdsmap->m_max_xattr_size)) { 1239 - dout("%s do sync setxattr: version: %llu size: %d max: %llu\n", 1240 - __func__, ci->i_xattrs.version, required_blob_size, 1241 - mdsc->mdsmap->m_max_xattr_size); 1223 + doutc(cl, "sync version: %llu size: %d max: %llu\n", 1224 + ci->i_xattrs.version, required_blob_size, 1225 + mdsc->mdsmap->m_max_xattr_size); 1242 1226 goto do_sync; 1243 1227 } 1244 1228 ··· 1252 1236 } 1253 1237 } 1254 1238 1255 - dout("setxattr %p name '%s' issued %s\n", inode, name, 1256 - ceph_cap_string(issued)); 1239 + doutc(cl, "%p %llx.%llx name '%s' issued %s\n", inode, 1240 + ceph_vinop(inode), name, ceph_cap_string(issued)); 1257 1241 __build_xattrs(inode); 1258 1242 1259 1243 if (!ci->i_xattrs.prealloc_blob || ··· 1262 1246 1263 1247 spin_unlock(&ci->i_ceph_lock); 1264 1248 ceph_buffer_put(old_blob); /* Shouldn't be required */ 1265 - dout(" pre-allocating new blob size=%d\n", required_blob_size); 1249 + doutc(cl, " pre-allocating new blob size=%d\n", 1250 + required_blob_size); 1266 1251 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1267 1252 if (!blob) 1268 1253 goto do_sync_unlocked; ··· 1302 1285 1303 1286 /* security module set xattr while filling trace */ 1304 1287 if (current->journal_info) { 1305 - pr_warn_ratelimited("sync setxattr %p " 1306 - "during filling trace\n", inode); 1288 + pr_warn_ratelimited_client(cl, 1289 + "sync %p %llx.%llx during filling trace\n", 1290 + inode, ceph_vinop(inode)); 1307 1291 err = -EBUSY; 1308 1292 } else { 1309 1293 err = ceph_sync_setxattr(inode, name, value, size, flags);