Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag '6.3-rc2-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs client fixes from Steve French:
"Seven cifs/smb3 client fixes, all also for stable:

- four DFS fixes

- multichannel reconnect fix

- fix smb1 stats for cancel command

- fix for set file size error path"

* tag '6.3-rc2-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
cifs: use DFS root session instead of tcon ses
cifs: return DFS root session id in DebugData
cifs: fix use-after-free bug in refresh_cache_worker()
cifs: set DFS root session in cifs_get_smb_ses()
cifs: generate signkey for the channel that's reconnecting
cifs: Fix smb2_set_path_size()
cifs: Move the in_send statistic to __smb_send_rqst()

+118 -195
+5
fs/cifs/cifs_debug.c
··· 420 420 from_kuid(&init_user_ns, ses->linux_uid), 421 421 from_kuid(&init_user_ns, ses->cred_uid)); 422 422 423 + if (ses->dfs_root_ses) { 424 + seq_printf(m, "\n\tDFS root session id: 0x%llx", 425 + ses->dfs_root_ses->Suid); 426 + } 427 + 423 428 spin_lock(&ses->chan_lock); 424 429 if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0)) 425 430 seq_puts(m, "\tPrimary channel: DISCONNECTED ");
+1
fs/cifs/cifs_dfs_ref.c
··· 179 179 tmp.source = full_path; 180 180 tmp.leaf_fullpath = NULL; 181 181 tmp.UNC = tmp.prepath = NULL; 182 + tmp.dfs_root_ses = NULL; 182 183 183 184 rc = smb3_fs_context_dup(ctx, &tmp); 184 185 if (rc) {
-2
fs/cifs/cifs_fs_sb.h
··· 61 61 /* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */ 62 62 char *prepath; 63 63 64 - /* randomly generated 128-bit number for indexing dfs mount groups in referral cache */ 65 - uuid_t dfs_mount_id; 66 64 /* 67 65 * Indicate whether serverino option was turned off later 68 66 * (cifs_autodisable_serverino) in order to match new mounts.
+2 -2
fs/cifs/cifsglob.h
··· 1233 1233 /* BB add field for back pointer to sb struct(s)? */ 1234 1234 #ifdef CONFIG_CIFS_DFS_UPCALL 1235 1235 struct list_head ulist; /* cache update list */ 1236 + struct list_head dfs_ses_list; 1236 1237 #endif 1237 1238 struct delayed_work query_interfaces; /* query interfaces workqueue job */ 1238 1239 }; ··· 1750 1749 struct TCP_Server_Info *server; 1751 1750 struct cifs_ses *ses; 1752 1751 struct cifs_tcon *tcon; 1753 - struct cifs_ses *root_ses; 1754 - uuid_t mount_id; 1755 1752 char *origin_fullpath, *leaf_fullpath; 1753 + struct list_head dfs_ses_list; 1756 1754 }; 1757 1755 1758 1756 static inline void free_dfs_info_param(struct dfs_info3_param *param)
+4 -6
fs/cifs/connect.c
··· 2229 2229 * need to lock before changing something in the session. 2230 2230 */ 2231 2231 spin_lock(&cifs_tcp_ses_lock); 2232 + ses->dfs_root_ses = ctx->dfs_root_ses; 2232 2233 list_add(&ses->smb_ses_list, &server->smb_ses_list); 2233 2234 spin_unlock(&cifs_tcp_ses_lock); 2234 2235 ··· 3408 3407 bool isdfs; 3409 3408 int rc; 3410 3409 3411 - uuid_gen(&mnt_ctx.mount_id); 3410 + INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list); 3411 + 3412 3412 rc = dfs_mount_share(&mnt_ctx, &isdfs); 3413 3413 if (rc) 3414 3414 goto error; ··· 3429 3427 kfree(cifs_sb->prepath); 3430 3428 cifs_sb->prepath = ctx->prepath; 3431 3429 ctx->prepath = NULL; 3432 - uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id); 3433 3430 3434 3431 out: 3435 3432 cifs_try_adding_channels(cifs_sb, mnt_ctx.ses); ··· 3440 3439 return rc; 3441 3440 3442 3441 error: 3443 - dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id); 3442 + dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list); 3444 3443 kfree(mnt_ctx.origin_fullpath); 3445 3444 kfree(mnt_ctx.leaf_fullpath); 3446 3445 cifs_mount_put_conns(&mnt_ctx); ··· 3638 3637 spin_unlock(&cifs_sb->tlink_tree_lock); 3639 3638 3640 3639 kfree(cifs_sb->prepath); 3641 - #ifdef CONFIG_CIFS_DFS_UPCALL 3642 - dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id); 3643 - #endif 3644 3640 call_rcu(&cifs_sb->rcu, delayed_free); 3645 3641 } 3646 3642
+45 -22
fs/cifs/dfs.c
··· 95 95 ctx->leaf_fullpath = (char *)full_path; 96 96 rc = cifs_mount_get_session(mnt_ctx); 97 97 ctx->leaf_fullpath = NULL; 98 - if (!rc) { 99 - struct cifs_ses *ses = mnt_ctx->ses; 100 98 101 - mutex_lock(&ses->session_mutex); 102 - ses->dfs_root_ses = mnt_ctx->root_ses; 103 - mutex_unlock(&ses->session_mutex); 104 - } 105 99 return rc; 106 100 } 107 101 108 - static void set_root_ses(struct cifs_mount_ctx *mnt_ctx) 102 + static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx) 109 103 { 110 - if (mnt_ctx->ses) { 104 + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 105 + struct dfs_root_ses *root_ses; 106 + struct cifs_ses *ses = mnt_ctx->ses; 107 + 108 + if (ses) { 109 + root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL); 110 + if (!root_ses) 111 + return -ENOMEM; 112 + 113 + INIT_LIST_HEAD(&root_ses->list); 114 + 111 115 spin_lock(&cifs_tcp_ses_lock); 112 - mnt_ctx->ses->ses_count++; 116 + ses->ses_count++; 113 117 spin_unlock(&cifs_tcp_ses_lock); 114 - dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses); 118 + root_ses->ses = ses; 119 + list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list); 115 120 } 116 - mnt_ctx->root_ses = mnt_ctx->ses; 121 + ctx->dfs_root_ses = ses; 122 + return 0; 117 123 } 118 124 119 125 static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path, ··· 127 121 { 128 122 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 129 123 struct dfs_info3_param ref = {}; 130 - int rc; 124 + bool is_refsrv = false; 125 + int rc, rc2; 131 126 132 127 rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref); 133 128 if (rc) ··· 143 136 if (rc) 144 137 goto out; 145 138 146 - if (ref.flags & DFSREF_REFERRAL_SERVER) 147 - set_root_ses(mnt_ctx); 139 + is_refsrv = !!(ref.flags & DFSREF_REFERRAL_SERVER); 148 140 149 141 rc = -EREMOTE; 150 142 if (ref.flags & DFSREF_STORAGE_SERVER) { ··· 152 146 goto out; 153 147 154 148 /* some servers may not advertise referral capability under ref.flags */ 155 - if (!(ref.flags & DFSREF_REFERRAL_SERVER) && 156 - is_tcon_dfs(mnt_ctx->tcon)) 157 - set_root_ses(mnt_ctx); 149 + is_refsrv |= is_tcon_dfs(mnt_ctx->tcon); 158 150 159 151 rc = cifs_is_path_remote(mnt_ctx); 152 + } 153 + 154 + if (rc == -EREMOTE && is_refsrv) { 155 + rc2 = get_root_smb_session(mnt_ctx); 156 + if (rc2) 157 + rc = rc2; 160 158 } 161 159 162 160 out: ··· 175 165 char *ref_path = NULL, *full_path = NULL; 176 166 struct dfs_cache_tgt_iterator *tit; 177 167 struct TCP_Server_Info *server; 168 + struct cifs_tcon *tcon; 178 169 char *origin_fullpath = NULL; 179 170 int num_links = 0; 180 171 int rc; ··· 245 234 246 235 if (!rc) { 247 236 server = mnt_ctx->server; 237 + tcon = mnt_ctx->tcon; 248 238 249 239 mutex_lock(&server->refpath_lock); 250 - server->origin_fullpath = origin_fullpath; 251 - server->current_fullpath = server->leaf_fullpath; 240 + if (!server->origin_fullpath) { 241 + server->origin_fullpath = origin_fullpath; 242 + server->current_fullpath = server->leaf_fullpath; 243 + origin_fullpath = NULL; 244 + } 252 245 mutex_unlock(&server->refpath_lock); 253 - origin_fullpath = NULL; 246 + 247 + if (list_empty(&tcon->dfs_ses_list)) { 248 + list_replace_init(&mnt_ctx->dfs_ses_list, 249 + &tcon->dfs_ses_list); 250 + } else { 251 + dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list); 252 + } 254 253 } 255 254 256 255 out: ··· 281 260 rc = get_session(mnt_ctx, NULL); 282 261 if (rc) 283 262 return rc; 284 - mnt_ctx->root_ses = mnt_ctx->ses; 263 + ctx->dfs_root_ses = mnt_ctx->ses; 285 264 /* 286 265 * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally 287 266 * try to get an DFS referral (even cached) to determine whether it is an DFS mount. ··· 301 280 } 302 281 303 282 *isdfs = true; 304 - set_root_ses(mnt_ctx); 283 + rc = get_root_smb_session(mnt_ctx); 284 + if (rc) 285 + return rc; 305 286 306 287 return __dfs_mount_share(mnt_ctx); 307 288 }
+18 -1
fs/cifs/dfs.h
··· 10 10 #include "fs_context.h" 11 11 #include "cifs_unicode.h" 12 12 13 + struct dfs_root_ses { 14 + struct list_head list; 15 + struct cifs_ses *ses; 16 + }; 17 + 13 18 int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref, 14 19 struct smb3_fs_context *ctx); 15 20 int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs); ··· 27 22 static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path, 28 23 struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl) 29 24 { 25 + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 30 26 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; 31 27 32 - return dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls, 28 + return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls, 33 29 cifs_remap(cifs_sb), path, ref, tl); 34 30 } 35 31 ··· 47 41 server->origin_fullpath, 48 42 strlen(server->origin_fullpath), 49 43 true); 44 + } 45 + 46 + static inline void dfs_put_root_smb_sessions(struct list_head *head) 47 + { 48 + struct dfs_root_ses *root, *tmp; 49 + 50 + list_for_each_entry_safe(root, tmp, head, list) { 51 + list_del_init(&root->list); 52 + cifs_put_smb_ses(root->ses); 53 + kfree(root); 54 + } 50 55 } 51 56 52 57 #endif /* _CIFS_DFS_H */
-140
fs/cifs/dfs_cache.c
··· 49 49 struct cache_dfs_tgt *tgthint; 50 50 }; 51 51 52 - /* List of referral server sessions per dfs mount */ 53 - struct mount_group { 54 - struct list_head list; 55 - uuid_t id; 56 - struct cifs_ses *sessions[CACHE_MAX_ENTRIES]; 57 - int num_sessions; 58 - spinlock_t lock; 59 - struct list_head refresh_list; 60 - struct kref refcount; 61 - }; 62 - 63 52 static struct kmem_cache *cache_slab __read_mostly; 64 53 static struct workqueue_struct *dfscache_wq __read_mostly; 65 54 ··· 65 76 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; 66 77 static DECLARE_RWSEM(htable_rw_lock); 67 78 68 - static LIST_HEAD(mount_group_list); 69 - static DEFINE_MUTEX(mount_group_list_lock); 70 - 71 79 static void refresh_cache_worker(struct work_struct *work); 72 80 73 81 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker); 74 - 75 - static void __mount_group_release(struct mount_group *mg) 76 - { 77 - int i; 78 - 79 - for (i = 0; i < mg->num_sessions; i++) 80 - cifs_put_smb_ses(mg->sessions[i]); 81 - kfree(mg); 82 - } 83 - 84 - static void mount_group_release(struct kref *kref) 85 - { 86 - struct mount_group *mg = container_of(kref, struct mount_group, refcount); 87 - 88 - mutex_lock(&mount_group_list_lock); 89 - list_del(&mg->list); 90 - mutex_unlock(&mount_group_list_lock); 91 - __mount_group_release(mg); 92 - } 93 - 94 - static struct mount_group *find_mount_group_locked(const uuid_t *id) 95 - { 96 - struct mount_group *mg; 97 - 98 - list_for_each_entry(mg, &mount_group_list, list) { 99 - if (uuid_equal(&mg->id, id)) 100 - return mg; 101 - } 102 - return ERR_PTR(-ENOENT); 103 - } 104 - 105 - static struct mount_group *__get_mount_group_locked(const uuid_t *id) 106 - { 107 - struct mount_group *mg; 108 - 109 - mg = find_mount_group_locked(id); 110 - if (!IS_ERR(mg)) 111 - return mg; 112 - 113 - mg = kmalloc(sizeof(*mg), GFP_KERNEL); 114 - if (!mg) 115 - return ERR_PTR(-ENOMEM); 116 - kref_init(&mg->refcount); 117 - uuid_copy(&mg->id, id); 118 - mg->num_sessions = 0; 119 - spin_lock_init(&mg->lock); 120 - list_add(&mg->list, &mount_group_list); 121 - return mg; 122 - } 123 - 124 - static struct mount_group *get_mount_group(const uuid_t *id) 125 - { 126 - struct mount_group *mg; 127 - 128 - mutex_lock(&mount_group_list_lock); 129 - mg = __get_mount_group_locked(id); 130 - if (!IS_ERR(mg)) 131 - kref_get(&mg->refcount); 132 - mutex_unlock(&mount_group_list_lock); 133 - 134 - return mg; 135 - } 136 - 137 - static void free_mount_group_list(void) 138 - { 139 - struct mount_group *mg, *tmp_mg; 140 - 141 - list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) { 142 - list_del_init(&mg->list); 143 - __mount_group_release(mg); 144 - } 145 - } 146 82 147 83 /** 148 84 * dfs_cache_canonical_path - get a canonical DFS path ··· 618 704 { 619 705 cancel_delayed_work_sync(&refresh_task); 620 706 unload_nls(cache_cp); 621 - free_mount_group_list(); 622 707 flush_cache_ents(); 623 708 kmem_cache_destroy(cache_slab); 624 709 destroy_workqueue(dfscache_wq); ··· 1024 1111 return rc; 1025 1112 } 1026 1113 1027 - /** 1028 - * dfs_cache_add_refsrv_session - add SMB session of referral server 1029 - * 1030 - * @mount_id: mount group uuid to lookup. 1031 - * @ses: reference counted SMB session of referral server. 1032 - */ 1033 - void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses) 1034 - { 1035 - struct mount_group *mg; 1036 - 1037 - if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses)) 1038 - return; 1039 - 1040 - mg = get_mount_group(mount_id); 1041 - if (WARN_ON_ONCE(IS_ERR(mg))) 1042 - return; 1043 - 1044 - spin_lock(&mg->lock); 1045 - if (mg->num_sessions < ARRAY_SIZE(mg->sessions)) 1046 - mg->sessions[mg->num_sessions++] = ses; 1047 - spin_unlock(&mg->lock); 1048 - kref_put(&mg->refcount, mount_group_release); 1049 - } 1050 - 1051 - /** 1052 - * dfs_cache_put_refsrv_sessions - put all referral server sessions 1053 - * 1054 - * Put all SMB sessions from the given mount group id. 1055 - * 1056 - * @mount_id: mount group uuid to lookup. 1057 - */ 1058 - void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id) 1059 - { 1060 - struct mount_group *mg; 1061 - 1062 - if (!mount_id || uuid_is_null(mount_id)) 1063 - return; 1064 - 1065 - mutex_lock(&mount_group_list_lock); 1066 - mg = find_mount_group_locked(mount_id); 1067 - if (IS_ERR(mg)) { 1068 - mutex_unlock(&mount_group_list_lock); 1069 - return; 1070 - } 1071 - mutex_unlock(&mount_group_list_lock); 1072 - kref_put(&mg->refcount, mount_group_release); 1073 - } 1074 - 1075 1114 /* Extract share from DFS target and return a pointer to prefix path or NULL */ 1076 1115 static const char *parse_target_share(const char *target, char **share) 1077 1116 { ··· 1248 1383 if (!server->origin_fullpath) { 1249 1384 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__); 1250 1385 return 0; 1251 - } 1252 - 1253 - if (uuid_is_null(&cifs_sb->dfs_mount_id)) { 1254 - cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__); 1255 - return -EINVAL; 1256 1386 } 1257 1387 /* 1258 1388 * After reconnecting to a different server, unique ids won't match anymore, so we disable
-2
fs/cifs/dfs_cache.h
··· 40 40 struct dfs_info3_param *ref); 41 41 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share, 42 42 char **prefix); 43 - void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id); 44 - void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses); 45 43 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap); 46 44 int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb); 47 45
+1
fs/cifs/fs_context.h
··· 265 265 bool rootfs:1; /* if it's a SMB root file system */ 266 266 bool witness:1; /* use witness protocol */ 267 267 char *leaf_fullpath; 268 + struct cifs_ses *dfs_root_ses; 268 269 }; 269 270 270 271 extern const struct fs_parameter_spec smb3_fs_parameters[];
+8
fs/cifs/misc.c
··· 22 22 #ifdef CONFIG_CIFS_DFS_UPCALL 23 23 #include "dns_resolve.h" 24 24 #include "dfs_cache.h" 25 + #include "dfs.h" 25 26 #endif 26 27 #include "fs_context.h" 27 28 #include "cached_dir.h" ··· 135 134 spin_lock_init(&ret_buf->stat_lock); 136 135 atomic_set(&ret_buf->num_local_opens, 0); 137 136 atomic_set(&ret_buf->num_remote_opens, 0); 137 + #ifdef CONFIG_CIFS_DFS_UPCALL 138 + INIT_LIST_HEAD(&ret_buf->dfs_ses_list); 139 + #endif 138 140 139 141 return ret_buf; 140 142 } ··· 153 149 atomic_dec(&tconInfoAllocCount); 154 150 kfree(tcon->nativeFileSystem); 155 151 kfree_sensitive(tcon->password); 152 + #ifdef CONFIG_CIFS_DFS_UPCALL 153 + dfs_put_root_smb_sessions(&tcon->dfs_ses_list); 154 + #endif 156 155 kfree(tcon); 157 156 } 158 157 ··· 1262 1255 * removing cached DFS targets that the client would eventually 1263 1256 * need during failover. 1264 1257 */ 1258 + ses = CIFS_DFS_ROOT_SES(ses); 1265 1259 if (ses->server->ops->get_dfs_refer && 1266 1260 !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs, 1267 1261 &num_refs, cifs_sb->local_nls,
+24 -7
fs/cifs/smb2inode.c
··· 234 234 size[0] = 8; /* sizeof __le64 */ 235 235 data[0] = ptr; 236 236 237 - rc = SMB2_set_info_init(tcon, server, 238 - &rqst[num_rqst], COMPOUND_FID, 239 - COMPOUND_FID, current->tgid, 240 - FILE_END_OF_FILE_INFORMATION, 241 - SMB2_O_INFO_FILE, 0, data, size); 237 + if (cfile) { 238 + rc = SMB2_set_info_init(tcon, server, 239 + &rqst[num_rqst], 240 + cfile->fid.persistent_fid, 241 + cfile->fid.volatile_fid, 242 + current->tgid, 243 + FILE_END_OF_FILE_INFORMATION, 244 + SMB2_O_INFO_FILE, 0, 245 + data, size); 246 + } else { 247 + rc = SMB2_set_info_init(tcon, server, 248 + &rqst[num_rqst], 249 + COMPOUND_FID, 250 + COMPOUND_FID, 251 + current->tgid, 252 + FILE_END_OF_FILE_INFORMATION, 253 + SMB2_O_INFO_FILE, 0, 254 + data, size); 255 + if (!rc) { 256 + smb2_set_next_command(tcon, &rqst[num_rqst]); 257 + smb2_set_related(&rqst[num_rqst]); 258 + } 259 + } 242 260 if (rc) 243 261 goto finished; 244 - smb2_set_next_command(tcon, &rqst[num_rqst]); 245 - smb2_set_related(&rqst[num_rqst++]); 262 + num_rqst++; 246 263 trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path); 247 264 break; 248 265 case SMB2_OP_SET_INFO:
+1 -1
fs/cifs/smb2transport.c
··· 425 425 426 426 /* safe to access primary channel, since it will never go away */ 427 427 spin_lock(&ses->chan_lock); 428 - memcpy(ses->chans[0].signkey, ses->smb3signingkey, 428 + memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey, 429 429 SMB3_SIGN_KEY_SIZE); 430 430 spin_unlock(&ses->chan_lock); 431 431
+9 -12
fs/cifs/transport.c
··· 278 278 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, 279 279 struct smb_rqst *rqst) 280 280 { 281 - int rc = 0; 281 + int rc; 282 282 struct kvec *iov; 283 283 int n_vec; 284 284 unsigned int send_length = 0; ··· 289 289 struct msghdr smb_msg = {}; 290 290 __be32 rfc1002_marker; 291 291 292 + cifs_in_send_inc(server); 292 293 if (cifs_rdma_enabled(server)) { 293 294 /* return -EAGAIN when connecting or reconnecting */ 294 295 rc = -EAGAIN; ··· 298 297 goto smbd_done; 299 298 } 300 299 300 + rc = -EAGAIN; 301 301 if (ssocket == NULL) 302 - return -EAGAIN; 302 + goto out; 303 303 304 + rc = -ERESTARTSYS; 304 305 if (fatal_signal_pending(current)) { 305 306 cifs_dbg(FYI, "signal pending before send request\n"); 306 - return -ERESTARTSYS; 307 + goto out; 307 308 } 308 309 310 + rc = 0; 309 311 /* cork the socket */ 310 312 tcp_sock_set_cork(ssocket->sk, true); 311 313 ··· 411 407 rc); 412 408 else if (rc > 0) 413 409 rc = 0; 414 - 410 + out: 411 + cifs_in_send_dec(server); 415 412 return rc; 416 413 } 417 414 ··· 831 826 * I/O response may come back and free the mid entry on another thread. 832 827 */ 833 828 cifs_save_when_sent(mid); 834 - cifs_in_send_inc(server); 835 829 rc = smb_send_rqst(server, 1, rqst, flags); 836 - cifs_in_send_dec(server); 837 830 838 831 if (rc < 0) { 839 832 revert_current_mid(server, mid->credits); ··· 1147 1144 else 1148 1145 midQ[i]->callback = cifs_compound_last_callback; 1149 1146 } 1150 - cifs_in_send_inc(server); 1151 1147 rc = smb_send_rqst(server, num_rqst, rqst, flags); 1152 - cifs_in_send_dec(server); 1153 1148 1154 1149 for (i = 0; i < num_rqst; i++) 1155 1150 cifs_save_when_sent(midQ[i]); ··· 1397 1396 1398 1397 midQ->mid_state = MID_REQUEST_SUBMITTED; 1399 1398 1400 - cifs_in_send_inc(server); 1401 1399 rc = smb_send(server, in_buf, len); 1402 - cifs_in_send_dec(server); 1403 1400 cifs_save_when_sent(midQ); 1404 1401 1405 1402 if (rc < 0) ··· 1538 1539 } 1539 1540 1540 1541 midQ->mid_state = MID_REQUEST_SUBMITTED; 1541 - cifs_in_send_inc(server); 1542 1542 rc = smb_send(server, in_buf, len); 1543 - cifs_in_send_dec(server); 1544 1543 cifs_save_when_sent(midQ); 1545 1544 1546 1545 if (rc < 0)