Merge tag 'ceph-for-5.10-rc3' of git://github.com/ceph/ceph-client

Pull ceph fix from Ilya Dryomov:
"A fix for a potential stall on umount caused by the MDS dropping our
REQUEST_CLOSE message. The code that handled this case was
inadvertently disabled in 5.9, this patch removes it entirely and
fixes the problem in a way that is consistent with ceph-fuse"

* tag 'ceph-for-5.10-rc3' of git://github.com/ceph/ceph-client:
ceph: check session state after bumping session->s_seq

+39 -18
+1 -1
fs/ceph/caps.c
··· 4074 4074 vino.snap, inode); 4075 4075 4076 4076 mutex_lock(&session->s_mutex); 4077 - session->s_seq++; 4077 + inc_session_sequence(session); 4078 4078 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, 4079 4079 (unsigned)seq); 4080 4080
+35 -15
fs/ceph/mds_client.c
··· 4231 4231 dname.len, dname.name); 4232 4232 4233 4233 mutex_lock(&session->s_mutex); 4234 - session->s_seq++; 4234 + inc_session_sequence(session); 4235 4235 4236 4236 if (!inode) { 4237 4237 dout("handle_lease no inode %llx\n", vino.ino); ··· 4385 4385 4386 4386 bool check_session_state(struct ceph_mds_session *s) 4387 4387 { 4388 - if (s->s_state == CEPH_MDS_SESSION_CLOSING) { 4389 - dout("resending session close request for mds%d\n", 4390 - s->s_mds); 4391 - request_close_session(s); 4392 - return false; 4393 - } 4394 - if (s->s_ttl && time_after(jiffies, s->s_ttl)) { 4395 - if (s->s_state == CEPH_MDS_SESSION_OPEN) { 4388 + switch (s->s_state) { 4389 + case CEPH_MDS_SESSION_OPEN: 4390 + if (s->s_ttl && time_after(jiffies, s->s_ttl)) { 4396 4391 s->s_state = CEPH_MDS_SESSION_HUNG; 4397 4392 pr_info("mds%d hung\n", s->s_mds); 4398 4393 } 4399 - } 4400 - if (s->s_state == CEPH_MDS_SESSION_NEW || 4401 - s->s_state == CEPH_MDS_SESSION_RESTARTING || 4402 - s->s_state == CEPH_MDS_SESSION_CLOSED || 4403 - s->s_state == CEPH_MDS_SESSION_REJECTED) 4404 - /* this mds is failed or recovering, just wait */ 4394 + break; 4395 + case CEPH_MDS_SESSION_CLOSING: 4396 + /* Should never reach this when we're unmounting */ 4397 + WARN_ON_ONCE(true); 4398 + fallthrough; 4399 + case CEPH_MDS_SESSION_NEW: 4400 + case CEPH_MDS_SESSION_RESTARTING: 4401 + case CEPH_MDS_SESSION_CLOSED: 4402 + case CEPH_MDS_SESSION_REJECTED: 4405 4403 return false; 4404 + } 4406 4405 4407 4406 return true; 4407 + } 4408 + 4409 + /* 4410 + * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply, 4411 + * then we need to retransmit that request. 4412 + */ 4413 + void inc_session_sequence(struct ceph_mds_session *s) 4414 + { 4415 + lockdep_assert_held(&s->s_mutex); 4416 + 4417 + s->s_seq++; 4418 + 4419 + if (s->s_state == CEPH_MDS_SESSION_CLOSING) { 4420 + int ret; 4421 + 4422 + dout("resending session close request for mds%d\n", s->s_mds); 4423 + ret = request_close_session(s); 4424 + if (ret < 0) 4425 + pr_err("unable to close session to mds%d: %d\n", 4426 + s->s_mds, ret); 4427 + } 4408 4428 } 4409 4429 4410 4430 /*
+1
fs/ceph/mds_client.h
··· 480 480 extern const char *ceph_mds_op_name(int op); 481 481 482 482 extern bool check_session_state(struct ceph_mds_session *s); 483 + void inc_session_sequence(struct ceph_mds_session *s); 483 484 484 485 extern struct ceph_mds_session * 485 486 __ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
+1 -1
fs/ceph/quota.c
··· 53 53 54 54 /* increment msg sequence number */ 55 55 mutex_lock(&session->s_mutex); 56 - session->s_seq++; 56 + inc_session_sequence(session); 57 57 mutex_unlock(&session->s_mutex); 58 58 59 59 /* lookup inode */
+1 -1
fs/ceph/snap.c
··· 873 873 ceph_snap_op_name(op), split, trace_len); 874 874 875 875 mutex_lock(&session->s_mutex); 876 - session->s_seq++; 876 + inc_session_sequence(session); 877 877 mutex_unlock(&session->s_mutex); 878 878 879 879 down_write(&mdsc->snap_rwsem);