Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ceph: blocklist the kclient when receiving corrupted snap trace

When received corrupted snap trace we don't know what exactly has
happened in MDS side. And we shouldn't continue IOs and metadatas
access to MDS, which may corrupt or get incorrect contents.

This patch will just block all the further IO/MDS requests
immediately and then evict the kclient itself.

The reason why we still need to evict the kclient just after
blocking all the further IOs is that the MDS could revoke the caps
faster.

Link: https://tracker.ceph.com/issues/57686
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Venky Shankar <vshankar@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>

authored by

Xiubo Li and committed by
Ilya Dryomov
a68e564a b38b17b6

+93 -10
+15 -2
fs/ceph/addr.c
··· 305 305 struct inode *inode = rreq->inode; 306 306 struct ceph_inode_info *ci = ceph_inode(inode); 307 307 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 308 - struct ceph_osd_request *req; 308 + struct ceph_osd_request *req = NULL; 309 309 struct ceph_vino vino = ceph_vino(inode); 310 310 struct iov_iter iter; 311 311 struct page **pages; 312 312 size_t page_off; 313 313 int err = 0; 314 314 u64 len = subreq->len; 315 + 316 + if (ceph_inode_is_shutdown(inode)) { 317 + err = -EIO; 318 + goto out; 319 + } 315 320 316 321 if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq)) 317 322 return; ··· 567 562 bool caching = ceph_is_cache_enabled(inode); 568 563 569 564 dout("writepage %p idx %lu\n", page, page->index); 565 + 566 + if (ceph_inode_is_shutdown(inode)) 567 + return -EIO; 570 568 571 569 /* verify this is a writeable snap context */ 572 570 snapc = page_snap_context(page); ··· 1651 1643 struct ceph_inode_info *ci = ceph_inode(inode); 1652 1644 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1653 1645 struct ceph_osd_request *req = NULL; 1654 - struct ceph_cap_flush *prealloc_cf; 1646 + struct ceph_cap_flush *prealloc_cf = NULL; 1655 1647 struct folio *folio = NULL; 1656 1648 u64 inline_version = CEPH_INLINE_NONE; 1657 1649 struct page *pages[1]; ··· 1664 1656 1665 1657 dout("uninline_data %p %llx.%llx inline_version %llu\n", 1666 1658 inode, ceph_vinop(inode), inline_version); 1659 + 1660 + if (ceph_inode_is_shutdown(inode)) { 1661 + err = -EIO; 1662 + goto out; 1663 + } 1667 1664 1668 1665 if (inline_version == CEPH_INLINE_NONE) 1669 1666 return 0;
+13 -3
fs/ceph/caps.c
··· 4078 4078 void *p, *end; 4079 4079 struct cap_extra_info extra_info = {}; 4080 4080 bool queue_trunc; 4081 + bool close_sessions = false; 4081 4082 4082 4083 dout("handle_caps from mds%d\n", session->s_mds); 4083 4084 ··· 4216 4215 realm = NULL; 4217 4216 if (snaptrace_len) { 4218 4217 down_write(&mdsc->snap_rwsem); 4219 - ceph_update_snap_trace(mdsc, snaptrace, 4220 - snaptrace + snaptrace_len, 4221 - false, &realm); 4218 + if (ceph_update_snap_trace(mdsc, snaptrace, 4219 + snaptrace + snaptrace_len, 4220 + false, &realm)) { 4221 + up_write(&mdsc->snap_rwsem); 4222 + close_sessions = true; 4223 + goto done; 4224 + } 4222 4225 downgrade_write(&mdsc->snap_rwsem); 4223 4226 } else { 4224 4227 down_read(&mdsc->snap_rwsem); ··· 4282 4277 iput(inode); 4283 4278 out: 4284 4279 ceph_put_string(extra_info.pool_ns); 4280 + 4281 + /* Defer closing the sessions after s_mutex lock being released */ 4282 + if (close_sessions) 4283 + ceph_mdsc_close_sessions(mdsc); 4284 + 4285 4285 return; 4286 4286 4287 4287 flush_cap_releases:
+3
fs/ceph/file.c
··· 2011 2011 loff_t zero = 0; 2012 2012 int op; 2013 2013 2014 + if (ceph_inode_is_shutdown(inode)) 2015 + return -EIO; 2016 + 2014 2017 if (!length) { 2015 2018 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; 2016 2019 length = &zero;
+27 -3
fs/ceph/mds_client.c
··· 806 806 { 807 807 struct ceph_mds_session *s; 808 808 809 + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) 810 + return ERR_PTR(-EIO); 811 + 809 812 if (mds >= mdsc->mdsmap->possible_max_rank) 810 813 return ERR_PTR(-EINVAL); 811 814 ··· 1480 1477 struct ceph_msg *msg; 1481 1478 int mstate; 1482 1479 int mds = session->s_mds; 1480 + 1481 + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) 1482 + return -EIO; 1483 1483 1484 1484 /* wait for mds to go active? */ 1485 1485 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); ··· 2866 2860 return; 2867 2861 } 2868 2862 2863 + if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) { 2864 + dout("do_request metadata corrupted\n"); 2865 + err = -EIO; 2866 + goto finish; 2867 + } 2869 2868 if (req->r_timeout && 2870 2869 time_after_eq(jiffies, req->r_started + req->r_timeout)) { 2871 2870 dout("do_request timed out\n"); ··· 3256 3245 u64 tid; 3257 3246 int err, result; 3258 3247 int mds = session->s_mds; 3248 + bool close_sessions = false; 3259 3249 3260 3250 if (msg->front.iov_len < sizeof(*head)) { 3261 3251 pr_err("mdsc_handle_reply got corrupt (short) reply\n"); ··· 3363 3351 realm = NULL; 3364 3352 if (rinfo->snapblob_len) { 3365 3353 down_write(&mdsc->snap_rwsem); 3366 - ceph_update_snap_trace(mdsc, rinfo->snapblob, 3354 + err = ceph_update_snap_trace(mdsc, rinfo->snapblob, 3367 3355 rinfo->snapblob + rinfo->snapblob_len, 3368 3356 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, 3369 3357 &realm); 3358 + if (err) { 3359 + up_write(&mdsc->snap_rwsem); 3360 + close_sessions = true; 3361 + if (err == -EIO) 3362 + ceph_msg_dump(msg); 3363 + goto out_err; 3364 + } 3370 3365 downgrade_write(&mdsc->snap_rwsem); 3371 3366 } else { 3372 3367 down_read(&mdsc->snap_rwsem); ··· 3431 3412 req->r_end_latency, err); 3432 3413 out: 3433 3414 ceph_mdsc_put_request(req); 3415 + 3416 + /* Defer closing the sessions after s_mutex lock being released */ 3417 + if (close_sessions) 3418 + ceph_mdsc_close_sessions(mdsc); 3434 3419 return; 3435 3420 } 3436 3421 ··· 5034 5011 } 5035 5012 5036 5013 /* 5037 - * called after sb is ro. 5014 + * called after sb is ro or when metadata corrupted. 5038 5015 */ 5039 5016 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) 5040 5017 { ··· 5324 5301 struct ceph_mds_client *mdsc = s->s_mdsc; 5325 5302 5326 5303 pr_warn("mds%d closed our session\n", s->s_mds); 5327 - send_mds_reconnect(mdsc, s); 5304 + if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO) 5305 + send_mds_reconnect(mdsc, s); 5328 5306 } 5329 5307 5330 5308 static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+34 -2
fs/ceph/snap.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/ceph/ceph_debug.h> 3 3 4 + #include <linux/fs.h> 4 5 #include <linux/sort.h> 5 6 #include <linux/slab.h> 6 7 #include <linux/iversion.h> ··· 767 766 struct ceph_snap_realm *realm; 768 767 struct ceph_snap_realm *first_realm = NULL; 769 768 struct ceph_snap_realm *realm_to_rebuild = NULL; 769 + struct ceph_client *client = mdsc->fsc->client; 770 770 int rebuild_snapcs; 771 771 int err = -ENOMEM; 772 + int ret; 772 773 LIST_HEAD(dirty_realms); 773 774 774 775 lockdep_assert_held_write(&mdsc->snap_rwsem); ··· 887 884 if (first_realm) 888 885 ceph_put_snap_realm(mdsc, first_realm); 889 886 pr_err("%s error %d\n", __func__, err); 887 + 888 + /* 889 + * When receiving a corrupted snap trace we don't know what 890 + * exactly has happened in MDS side. And we shouldn't continue 891 + * writing to OSD, which may corrupt the snapshot contents. 892 + * 893 + * Just try to blocklist this kclient and then this kclient 894 + * must be remounted to continue after the corrupted metadata 895 + * fixed in the MDS side. 896 + */ 897 + WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO); 898 + ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr); 899 + if (ret) 900 + pr_err("%s failed to blocklist %s: %d\n", __func__, 901 + ceph_pr_addr(&client->msgr.inst.addr), ret); 902 + 903 + WARN(1, "%s: %s%sdo remount to continue%s", 904 + __func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr), 905 + ret ? "" : " was blocklisted, ", 906 + err == -EIO ? " after corrupted snaptrace is fixed" : ""); 907 + 890 908 return err; 891 909 } 892 910 ··· 1008 984 __le64 *split_inos = NULL, *split_realms = NULL; 1009 985 int i; 1010 986 int locked_rwsem = 0; 987 + bool close_sessions = false; 1011 988 1012 989 /* decode */ 1013 990 if (msg->front.iov_len < sizeof(*h)) ··· 1117 1092 * update using the provided snap trace. if we are deleting a 1118 1093 * snap, we can avoid queueing cap_snaps. 1119 1094 */ 1120 - ceph_update_snap_trace(mdsc, p, e, 1121 - op == CEPH_SNAP_OP_DESTROY, NULL); 1095 + if (ceph_update_snap_trace(mdsc, p, e, 1096 + op == CEPH_SNAP_OP_DESTROY, 1097 + NULL)) { 1098 + close_sessions = true; 1099 + goto bad; 1100 + } 1122 1101 1123 1102 if (op == CEPH_SNAP_OP_SPLIT) 1124 1103 /* we took a reference when we created the realm, above */ ··· 1141 1112 out: 1142 1113 if (locked_rwsem) 1143 1114 up_write(&mdsc->snap_rwsem); 1115 + 1116 + if (close_sessions) 1117 + ceph_mdsc_close_sessions(mdsc); 1144 1118 return; 1145 1119 } 1146 1120
+1
fs/ceph/super.h
··· 108 108 CEPH_MOUNT_UNMOUNTED, 109 109 CEPH_MOUNT_SHUTDOWN, 110 110 CEPH_MOUNT_RECOVER, 111 + CEPH_MOUNT_FENCE_IO, 111 112 }; 112 113 113 114 #define CEPH_ASYNC_CREATE_CONFLICT_BITS 8