Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'nfs-for-6.17-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client fixes from Trond Myklebust:
"Stable patches:

- Revert "SUNRPC: Don't allow waiting for exiting tasks" as it is
breaking ltp tests

Bugfixes:

- Another set of fixes to the tracking of NFSv4 server capabilities
when crossing filesystem boundaries

- Localio fix to restore credentials and prevent triggering a
BUG_ON()

- Fix to prevent flapping of the localio on/off trigger

- Protections against 'eof page pollution' as demonstrated in
xfstests generic/363

- Series of patches to ensure correct ordering of O_DIRECT i/o and
truncate, fallocate and copy functions

- Fix a NULL pointer check in flexfiles reads that regresses 6.17

- Correct a typo that breaks flexfiles layout segment processing"

* tag 'nfs-for-6.17-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
NFSv4/flexfiles: Fix layout merge mirror check.
SUNRPC: call xs_sock_process_cmsg for all cmsg
Revert "SUNRPC: Don't allow waiting for exiting tasks"
NFS: Fix the marking of the folio as up to date
NFS: nfs_invalidate_folio() must observe the offset and size arguments
NFSv4.2: Serialise O_DIRECT i/o and copy range
NFSv4.2: Serialise O_DIRECT i/o and clone range
NFSv4.2: Serialise O_DIRECT i/o and fallocate()
NFS: Serialise O_DIRECT i/o and truncate()
NFSv4.2: Protect copy offload and clone against 'eof page pollution'
NFS: Protect against 'eof page pollution'
flexfiles/pNFS: fix NULL checks on result of ff_layout_choose_ds_for_read
nfs/localio: avoid bouncing LOCALIO if nfs_client_is_local()
nfs/localio: restore creds before releasing pageio data
NFSv4: Clear the NFS_CAP_XATTR flag if not supported by the server
NFSv4: Clear NFS_CAP_OPEN_XOR and NFS_CAP_DELEGTIME if not supported
NFSv4: Clear the NFS_CAP_FS_LOCATIONS flag if it is not set
NFSv4: Don't clear capabilities that won't be reset

+129 -99
+2
fs/nfs/client.c
··· 888 888 889 889 if (fsinfo->xattr_support) 890 890 server->caps |= NFS_CAP_XATTR; 891 + else 892 + server->caps &= ~NFS_CAP_XATTR; 891 893 #endif 892 894 } 893 895
+37 -3
fs/nfs/file.c
··· 28 28 #include <linux/mm.h> 29 29 #include <linux/pagemap.h> 30 30 #include <linux/gfp.h> 31 + #include <linux/rmap.h> 31 32 #include <linux/swap.h> 32 33 #include <linux/compaction.h> 33 34 ··· 281 280 } 282 281 EXPORT_SYMBOL_GPL(nfs_file_fsync); 283 282 283 + void nfs_truncate_last_folio(struct address_space *mapping, loff_t from, 284 + loff_t to) 285 + { 286 + struct folio *folio; 287 + 288 + if (from >= to) 289 + return; 290 + 291 + folio = filemap_lock_folio(mapping, from >> PAGE_SHIFT); 292 + if (IS_ERR(folio)) 293 + return; 294 + 295 + if (folio_mkclean(folio)) 296 + folio_mark_dirty(folio); 297 + 298 + if (folio_test_uptodate(folio)) { 299 + loff_t fpos = folio_pos(folio); 300 + size_t offset = from - fpos; 301 + size_t end = folio_size(folio); 302 + 303 + if (to - fpos < end) 304 + end = to - fpos; 305 + folio_zero_segment(folio, offset, end); 306 + trace_nfs_size_truncate_folio(mapping->host, to); 307 + } 308 + 309 + folio_unlock(folio); 310 + folio_put(folio); 311 + } 312 + EXPORT_SYMBOL_GPL(nfs_truncate_last_folio); 313 + 284 314 /* 285 315 * Decide whether a read/modify/write cycle may be more efficient 286 316 * then a modify/write/read cycle when writing to a page in the ··· 388 356 389 357 dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n", 390 358 file, mapping->host->i_ino, len, (long long) pos); 359 + nfs_truncate_last_folio(mapping, i_size_read(mapping->host), pos); 391 360 392 361 fgp |= fgf_set_order(len); 393 362 start: ··· 475 442 dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n", 476 443 folio->index, offset, length); 477 444 478 - if (offset != 0 || length < folio_size(folio)) 479 - return; 480 445 /* Cancel any unstarted writes on this page */ 481 - nfs_wb_folio_cancel(inode, folio); 446 + if (offset != 0 || length < folio_size(folio)) 447 + nfs_wb_folio(inode, folio); 448 + else 449 + nfs_wb_folio_cancel(inode, folio); 482 450 folio_wait_private_2(folio); /* [DEPRECATED] */ 483 451 trace_nfs_invalidate_folio(inode, folio_pos(folio) + offset, length); 484 452 }
+13 -8
fs/nfs/flexfilelayout/flexfilelayout.c
··· 293 293 struct pnfs_layout_segment *l2) 294 294 { 295 295 const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1); 296 - const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1); 296 + const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2); 297 297 u32 i; 298 298 299 299 if (fl1->mirror_array_cnt != fl2->mirror_array_cnt) ··· 773 773 continue; 774 774 775 775 if (check_device && 776 - nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) 776 + nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) { 777 + // reinitialize the error state in case if this is the last iteration 778 + ds = ERR_PTR(-EINVAL); 777 779 continue; 780 + } 778 781 779 782 *best_idx = idx; 780 783 break; ··· 807 804 struct nfs4_pnfs_ds *ds; 808 805 809 806 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx); 810 - if (ds) 807 + if (!IS_ERR(ds)) 811 808 return ds; 812 809 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx); 813 810 } ··· 821 818 822 819 ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx, 823 820 best_idx); 824 - if (ds || !pgio->pg_mirror_idx) 821 + if (!IS_ERR(ds) || !pgio->pg_mirror_idx) 825 822 return ds; 826 823 return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx); 827 824 } ··· 871 868 req->wb_nio = 0; 872 869 873 870 ds = ff_layout_get_ds_for_read(pgio, &ds_idx); 874 - if (!ds) { 871 + if (IS_ERR(ds)) { 875 872 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 876 873 goto out_mds; 877 874 pnfs_generic_pg_cleanup(pgio); ··· 1075 1072 { 1076 1073 u32 idx = hdr->pgio_mirror_idx + 1; 1077 1074 u32 new_idx = 0; 1075 + struct nfs4_pnfs_ds *ds; 1078 1076 1079 - if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx)) 1080 - ff_layout_send_layouterror(hdr->lseg); 1081 - else 1077 + ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx); 1078 + if (IS_ERR(ds)) 1082 1079 pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg); 1080 + else 1081 + ff_layout_send_layouterror(hdr->lseg); 1083 1082 pnfs_read_resend_pnfs(hdr, new_idx); 1084 1083 } 1085 1084
+10 -3
fs/nfs/inode.c
··· 716 716 { 717 717 struct inode *inode = d_inode(dentry); 718 718 struct nfs_fattr *fattr; 719 + loff_t oldsize = i_size_read(inode); 719 720 int error = 0; 720 721 721 722 nfs_inc_stats(inode, NFSIOS_VFSSETATTR); ··· 732 731 if (error) 733 732 return error; 734 733 735 - if (attr->ia_size == i_size_read(inode)) 734 + if (attr->ia_size == oldsize) 736 735 attr->ia_valid &= ~ATTR_SIZE; 737 736 } 738 737 ··· 768 767 trace_nfs_setattr_enter(inode); 769 768 770 769 /* Write all dirty data */ 771 - if (S_ISREG(inode->i_mode)) 770 + if (S_ISREG(inode->i_mode)) { 771 + nfs_file_block_o_direct(NFS_I(inode)); 772 772 nfs_sync_inode(inode); 773 + } 773 774 774 775 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 775 776 if (fattr == NULL) { ··· 780 777 } 781 778 782 779 error = NFS_PROTO(inode)->setattr(dentry, fattr, attr); 783 - if (error == 0) 780 + if (error == 0) { 781 + if (attr->ia_valid & ATTR_SIZE) 782 + nfs_truncate_last_folio(inode->i_mapping, oldsize, 783 + attr->ia_size); 784 784 error = nfs_refresh_inode(inode, fattr); 785 + } 785 786 nfs_free_fattr(fattr); 786 787 out: 787 788 trace_nfs_setattr_exit(inode, error);
+12
fs/nfs/internal.h
··· 437 437 int nfs_lock(struct file *, int, struct file_lock *); 438 438 int nfs_flock(struct file *, int, struct file_lock *); 439 439 int nfs_check_flags(int); 440 + void nfs_truncate_last_folio(struct address_space *mapping, loff_t from, 441 + loff_t to); 440 442 441 443 /* inode.c */ 442 444 extern struct workqueue_struct *nfsiod_workqueue; ··· 531 529 { 532 530 return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0; 533 531 } 532 + 533 + /* Must be called with exclusively locked inode->i_rwsem */ 534 + static inline void nfs_file_block_o_direct(struct nfs_inode *nfsi) 535 + { 536 + if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) { 537 + clear_bit(NFS_INO_ODIRECT, &nfsi->flags); 538 + inode_dio_wait(&nfsi->vfs_inode); 539 + } 540 + } 541 + 534 542 535 543 /* namespace.c */ 536 544 #define NFS_PATH_CANONICAL 1
+2 -11
fs/nfs/io.c
··· 14 14 15 15 #include "internal.h" 16 16 17 - /* Call with exclusively locked inode->i_rwsem */ 18 - static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode) 19 - { 20 - if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) { 21 - clear_bit(NFS_INO_ODIRECT, &nfsi->flags); 22 - inode_dio_wait(inode); 23 - } 24 - } 25 - 26 17 /** 27 18 * nfs_start_io_read - declare the file is being used for buffered reads 28 19 * @inode: file inode ··· 48 57 err = down_write_killable(&inode->i_rwsem); 49 58 if (err) 50 59 return err; 51 - nfs_block_o_direct(nfsi, inode); 60 + nfs_file_block_o_direct(nfsi); 52 61 downgrade_write(&inode->i_rwsem); 53 62 54 63 return 0; ··· 81 90 82 91 err = down_write_killable(&inode->i_rwsem); 83 92 if (!err) 84 - nfs_block_o_direct(NFS_I(inode), inode); 93 + nfs_file_block_o_direct(NFS_I(inode)); 85 94 return err; 86 95 } 87 96
+11 -10
fs/nfs/localio.c
··· 180 180 return; 181 181 } 182 182 183 - if (nfs_client_is_local(clp)) { 184 - /* If already enabled, disable and re-enable */ 185 - nfs_localio_disable_client(clp); 186 - } 183 + if (nfs_client_is_local(clp)) 184 + return; 187 185 188 186 if (!nfs_uuid_begin(&clp->cl_uuid)) 189 187 return; ··· 242 244 case -ENOMEM: 243 245 case -ENXIO: 244 246 case -ENOENT: 245 - /* Revalidate localio, will disable if unsupported */ 247 + /* Revalidate localio */ 248 + nfs_localio_disable_client(clp); 246 249 nfs_local_probe(clp); 247 250 } 248 251 } ··· 452 453 nfs_local_iter_init(&iter, iocb, READ); 453 454 454 455 status = filp->f_op->read_iter(&iocb->kiocb, &iter); 456 + 457 + revert_creds(save_cred); 458 + 455 459 if (status != -EIOCBQUEUED) { 456 460 nfs_local_read_done(iocb, status); 457 461 nfs_local_pgio_release(iocb); 458 462 } 459 - 460 - revert_creds(save_cred); 461 463 } 462 464 463 465 static int ··· 648 648 file_start_write(filp); 649 649 status = filp->f_op->write_iter(&iocb->kiocb, &iter); 650 650 file_end_write(filp); 651 + 652 + revert_creds(save_cred); 653 + current->flags = old_flags; 654 + 651 655 if (status != -EIOCBQUEUED) { 652 656 nfs_local_write_done(iocb, status); 653 657 nfs_local_vfs_getattr(iocb); 654 658 nfs_local_pgio_release(iocb); 655 659 } 656 - 657 - revert_creds(save_cred); 658 - current->flags = old_flags; 659 660 } 660 661 661 662 static int
+26 -9
fs/nfs/nfs42proc.c
··· 114 114 exception.inode = inode; 115 115 exception.state = lock->open_context->state; 116 116 117 + nfs_file_block_o_direct(NFS_I(inode)); 117 118 err = nfs_sync_inode(inode); 118 119 if (err) 119 120 goto out; ··· 138 137 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 139 138 }; 140 139 struct inode *inode = file_inode(filep); 140 + loff_t oldsize = i_size_read(inode); 141 141 int err; 142 142 143 143 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) ··· 147 145 inode_lock(inode); 148 146 149 147 err = nfs42_proc_fallocate(&msg, filep, offset, len); 150 - if (err == -EOPNOTSUPP) 148 + 149 + if (err == 0) 150 + nfs_truncate_last_folio(inode->i_mapping, oldsize, 151 + offset + len); 152 + else if (err == -EOPNOTSUPP) 151 153 NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE | 152 154 NFS_CAP_ZERO_RANGE); 153 155 ··· 189 183 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE], 190 184 }; 191 185 struct inode *inode = file_inode(filep); 186 + loff_t oldsize = i_size_read(inode); 192 187 int err; 193 188 194 189 if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE)) ··· 198 191 inode_lock(inode); 199 192 200 193 err = nfs42_proc_fallocate(&msg, filep, offset, len); 201 - if (err == 0) 194 + if (err == 0) { 195 + nfs_truncate_last_folio(inode->i_mapping, oldsize, 196 + offset + len); 202 197 truncate_pagecache_range(inode, offset, (offset + len) -1); 203 - if (err == -EOPNOTSUPP) 198 + } else if (err == -EOPNOTSUPP) 204 199 NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE; 205 200 206 201 inode_unlock(inode); ··· 363 354 364 355 /** 365 356 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 366 - * @inode: pointer to destination inode 357 + * @file: pointer to destination file 367 358 * @pos: destination offset 368 359 * @len: copy length 360 + * @oldsize: length of the file prior to clone/copy 369 361 * 370 362 * Punch a hole in the inode page cache, so that the NFS client will 371 363 * know to retrieve new data. 372 364 * Update the file size if necessary, and then mark the inode as having 373 365 * invalid cached values for change attribute, ctime, mtime and space used. 374 366 */ 375 - static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) 367 + static void nfs42_copy_dest_done(struct file *file, loff_t pos, loff_t len, 368 + loff_t oldsize) 376 369 { 370 + struct inode *inode = file_inode(file); 371 + struct address_space *mapping = file->f_mapping; 377 372 loff_t newsize = pos + len; 378 373 loff_t end = newsize - 1; 379 374 380 - WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, 381 - pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); 375 + nfs_truncate_last_folio(mapping, oldsize, pos); 376 + WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, 377 + end >> PAGE_SHIFT)); 382 378 383 379 spin_lock(&inode->i_lock); 384 380 if (newsize > i_size_read(inode)) ··· 416 402 struct nfs_server *src_server = NFS_SERVER(src_inode); 417 403 loff_t pos_src = args->src_pos; 418 404 loff_t pos_dst = args->dst_pos; 405 + loff_t oldsize_dst = i_size_read(dst_inode); 419 406 size_t count = args->count; 420 407 ssize_t status; 421 408 ··· 445 430 return status; 446 431 } 447 432 433 + nfs_file_block_o_direct(NFS_I(dst_inode)); 448 434 status = nfs_sync_inode(dst_inode); 449 435 if (status) 450 436 return status; ··· 491 475 goto out; 492 476 } 493 477 494 - nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count); 478 + nfs42_copy_dest_done(dst, pos_dst, res->write_res.count, oldsize_dst); 495 479 nfs_invalidate_atime(src_inode); 496 480 status = res->write_res.count; 497 481 out: ··· 1258 1242 struct nfs42_clone_res res = { 1259 1243 .server = server, 1260 1244 }; 1245 + loff_t oldsize_dst = i_size_read(dst_inode); 1261 1246 int status; 1262 1247 1263 1248 msg->rpc_argp = &args; ··· 1293 1276 /* a zero-length count means clone to EOF in src */ 1294 1277 if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE) 1295 1278 count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset; 1296 - nfs42_copy_dest_done(dst_inode, dst_offset, count); 1279 + nfs42_copy_dest_done(dst_f, dst_offset, count, oldsize_dst); 1297 1280 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1298 1281 } 1299 1282
+2
fs/nfs/nfs4file.c
··· 278 278 lock_two_nondirectories(src_inode, dst_inode); 279 279 /* flush all pending writes on both src and dst so that server 280 280 * has the latest data */ 281 + nfs_file_block_o_direct(NFS_I(src_inode)); 281 282 ret = nfs_sync_inode(src_inode); 282 283 if (ret) 283 284 goto out_unlock; 285 + nfs_file_block_o_direct(NFS_I(dst_inode)); 284 286 ret = nfs_sync_inode(dst_inode); 285 287 if (ret) 286 288 goto out_unlock;
+4 -3
fs/nfs/nfs4proc.c
··· 4013 4013 res.attr_bitmask[2]; 4014 4014 } 4015 4015 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 4016 - server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | 4017 - NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); 4016 + server->caps &= 4017 + ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS | 4018 + NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS | 4019 + NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME); 4018 4020 server->fattr_valid = NFS_ATTR_FATTR_V4; 4019 4021 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 4020 4022 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) ··· 4094 4092 }; 4095 4093 int err; 4096 4094 4097 - nfs_server_set_init_caps(server); 4098 4095 do { 4099 4096 err = nfs4_handle_exception(server, 4100 4097 _nfs4_server_capabilities(server, fhandle),
+1
fs/nfs/nfstrace.h
··· 272 272 TP_ARGS(inode, new_size)) 273 273 274 274 DEFINE_NFS_UPDATE_SIZE_EVENT(truncate); 275 + DEFINE_NFS_UPDATE_SIZE_EVENT(truncate_folio); 275 276 DEFINE_NFS_UPDATE_SIZE_EVENT(wcc); 276 277 DEFINE_NFS_UPDATE_SIZE_EVENT(update); 277 278 DEFINE_NFS_UPDATE_SIZE_EVENT(grow);
+6 -47
fs/nfs/write.c
··· 237 237 } 238 238 239 239 /* 240 - * nfs_page_group_search_locked 241 - * @head - head request of page group 242 - * @page_offset - offset into page 240 + * nfs_page_covers_folio 241 + * @req: struct nfs_page 243 242 * 244 - * Search page group with head @head to find a request that contains the 245 - * page offset @page_offset. 246 - * 247 - * Returns a pointer to the first matching nfs request, or NULL if no 248 - * match is found. 249 - * 250 - * Must be called with the page group lock held 251 - */ 252 - static struct nfs_page * 253 - nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 254 - { 255 - struct nfs_page *req; 256 - 257 - req = head; 258 - do { 259 - if (page_offset >= req->wb_pgbase && 260 - page_offset < (req->wb_pgbase + req->wb_bytes)) 261 - return req; 262 - 263 - req = req->wb_this_page; 264 - } while (req != head); 265 - 266 - return NULL; 267 - } 268 - 269 - /* 270 - * nfs_page_group_covers_page 271 - * @head - head request of page group 272 - * 273 - * Return true if the page group with head @head covers the whole page, 274 - * returns false otherwise 243 + * Return true if the request covers the whole folio. 244 + * Note that the caller should ensure all subrequests have been joined 275 245 */ 276 246 static bool nfs_page_group_covers_page(struct nfs_page *req) 277 247 { 278 248 unsigned int len = nfs_folio_length(nfs_page_to_folio(req)); 279 - struct nfs_page *tmp; 280 - unsigned int pos = 0; 281 249 282 - nfs_page_group_lock(req); 283 - 284 - for (;;) { 285 - tmp = nfs_page_group_search_locked(req->wb_head, pos); 286 - if (!tmp) 287 - break; 288 - pos = tmp->wb_pgbase + tmp->wb_bytes; 289 - } 290 - 291 - nfs_page_group_unlock(req); 292 - return pos >= len; 250 + return req->wb_pgbase == 0 && req->wb_bytes == len; 293 251 } 294 252 295 253 /* We can set the PG_uptodate flag if we see that a write request ··· 2003 2045 * release it */ 2004 2046 nfs_inode_remove_request(req); 2005 2047 nfs_unlock_and_release_request(req); 2048 + folio_cancel_dirty(folio); 2006 2049 } 2007 2050 2008 2051 return ret;
-2
net/sunrpc/sched.c
··· 276 276 277 277 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 278 278 { 279 - if (unlikely(current->flags & PF_EXITING)) 280 - return -EINTR; 281 279 schedule(); 282 280 if (signal_pending_state(mode, current)) 283 281 return -ERESTARTSYS;
+3 -3
net/sunrpc/xprtsock.c
··· 407 407 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1, 408 408 alert_kvec.iov_len); 409 409 ret = sock_recvmsg(sock, &msg, flags); 410 - if (ret > 0 && 411 - tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) { 412 - iov_iter_revert(&msg.msg_iter, ret); 410 + if (ret > 0) { 411 + if (tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) 412 + iov_iter_revert(&msg.msg_iter, ret); 413 413 ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg, 414 414 -EAGAIN); 415 415 }