Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'v6.18-rc-part2-smb-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull more smb client updates from Steve French:

- fix i_size in fallocate

- two truncate fixes

- utime fix

- minor cleanups

- SMB1 fixes

- improve error check in read

- improve perf of copy file_range (copy_chunk)

* tag 'v6.18-rc-part2-smb-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
cifs: update internal version number
cifs: Add comments for DeletePending assignments in open functions
cifs: Add fallback code path for cifs_mkdir_setinfo()
cifs: Allow fallback code in smb_set_file_info() also for directories
cifs: Query EA $LXMOD in cifs_query_path_info() for WSL reparse points
smb: client: remove cfids_invalidation_worker
smb: client: remove redudant assignment in cifs_strict_fsync()
smb: client: fix race with fallocate(2) and AIO+DIO
smb: client: fix missing timestamp updates after utime(2)
smb: client: fix missing timestamp updates after ftruncate(2)
smb: client: fix missing timestamp updates with O_TRUNC
cifs: Fix copy_to_iter return value check
smb: client: batch SRV_COPYCHUNK entries to cut round trips
smb: client: Omit an if branch in smb2_find_smb_tcon()
smb: client: Return directly after a failed genlmsg_new() in cifs_swn_send_register_message()
smb: client: Use common code in cifs_do_create()
smb: client: Improve unlocking of a mutex in cifs_get_swn_reg()
smb: client: Return a status code only as a constant in cifs_spnego_key_instantiate()
smb: client: Use common code in cifs_lookup()
smb: client: Reduce the scopes for a few variables in two functions

+535 -342
+9 -28
fs/smb/client/cached_dir.c
··· 562 562 563 563 /* 564 564 * Mark all the cfids as closed, and move them to the cfids->dying list. 565 - * They'll be cleaned up later by cfids_invalidation_worker. Take 566 - * a reference to each cfid during this process. 565 + * They'll be cleaned up by laundromat. Take a reference to each cfid 566 + * during this process. 567 567 */ 568 568 spin_lock(&cfids->cfid_list_lock); 569 569 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { ··· 580 580 } else 581 581 kref_get(&cfid->refcount); 582 582 } 583 - /* 584 - * Queue dropping of the dentries once locks have been dropped 585 - */ 586 - if (!list_empty(&cfids->dying)) 587 - queue_work(cfid_put_wq, &cfids->invalidation_work); 588 583 spin_unlock(&cfids->cfid_list_lock); 584 + 585 + /* run laundromat unconditionally now as there might have been previously queued work */ 586 + mod_delayed_work(cfid_put_wq, &cfids->laundromat_work, 0); 587 + flush_delayed_work(&cfids->laundromat_work); 589 588 } 590 589 591 590 static void ··· 714 715 kfree(cfid); 715 716 } 716 717 717 - static void cfids_invalidation_worker(struct work_struct *work) 718 - { 719 - struct cached_fids *cfids = container_of(work, struct cached_fids, 720 - invalidation_work); 721 - struct cached_fid *cfid, *q; 722 - LIST_HEAD(entry); 723 - 724 - spin_lock(&cfids->cfid_list_lock); 725 - /* move cfids->dying to the local list */ 726 - list_cut_before(&entry, &cfids->dying, &cfids->dying); 727 - spin_unlock(&cfids->cfid_list_lock); 728 - 729 - list_for_each_entry_safe(cfid, q, &entry, entry) { 730 - list_del(&cfid->entry); 731 - /* Drop the ref-count acquired in invalidate_all_cached_dirs */ 732 - kref_put(&cfid->refcount, smb2_close_cached_fid); 733 - } 734 - } 735 - 736 718 static void cfids_laundromat_worker(struct work_struct *work) 737 719 { 738 720 struct cached_fids *cfids; ··· 723 743 cfids = container_of(work, struct cached_fids, laundromat_work.work); 724 744 725 745 spin_lock(&cfids->cfid_list_lock); 746 + /* move cfids->dying to the local list */ 747 + list_cut_before(&entry, &cfids->dying, &cfids->dying); 748 + 726 749 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 727 750 if (cfid->last_access_time && 728 751 time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) { ··· 779 796 INIT_LIST_HEAD(&cfids->entries); 780 797 INIT_LIST_HEAD(&cfids->dying); 781 798 782 - INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker); 783 799 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); 784 800 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 785 801 dir_cache_timeout * HZ); ··· 802 820 return; 803 821 804 822 cancel_delayed_work_sync(&cfids->laundromat_work); 805 - cancel_work_sync(&cfids->invalidation_work); 806 823 807 824 spin_lock(&cfids->cfid_list_lock); 808 825 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
-1
fs/smb/client/cached_dir.h
··· 62 62 int num_entries; 63 63 struct list_head entries; 64 64 struct list_head dying; 65 - struct work_struct invalidation_work; 66 65 struct delayed_work laundromat_work; 67 66 /* aggregate accounting for all cached dirents under this tcon */ 68 67 atomic_long_t total_dirents_entries;
+3 -9
fs/smb/client/cifs_spnego.c
··· 24 24 static int 25 25 cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep) 26 26 { 27 - char *payload; 28 - int ret; 27 + char *payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL); 29 28 30 - ret = -ENOMEM; 31 - payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL); 32 29 if (!payload) 33 - goto error; 30 + return -ENOMEM; 34 31 35 32 /* attach the data */ 36 33 key->payload.data[0] = payload; 37 - ret = 0; 38 - 39 - error: 40 - return ret; 34 + return 0; 41 35 } 42 36 43 37 static void
+8 -12
fs/smb/client/cifs_swn.c
··· 82 82 int ret; 83 83 84 84 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 85 - if (skb == NULL) { 86 - ret = -ENOMEM; 87 - goto fail; 88 - } 85 + if (!skb) 86 + return -ENOMEM; 89 87 90 88 hdr = genlmsg_put(skb, 0, 0, &cifs_genl_family, 0, CIFS_GENL_CMD_SWN_REGISTER); 91 89 if (hdr == NULL) { ··· 170 172 nlmsg_fail: 171 173 genlmsg_cancel(skb, hdr); 172 174 nlmsg_free(skb); 173 - fail: 174 175 return ret; 175 176 } 176 177 ··· 310 313 reg = cifs_find_swn_reg(tcon); 311 314 if (!IS_ERR(reg)) { 312 315 kref_get(&reg->ref_count); 313 - mutex_unlock(&cifs_swnreg_idr_mutex); 314 - return reg; 316 + goto unlock; 315 317 } else if (PTR_ERR(reg) != -EEXIST) { 316 - mutex_unlock(&cifs_swnreg_idr_mutex); 317 - return reg; 318 + goto unlock; 318 319 } 319 320 320 321 reg = kmalloc(sizeof(struct cifs_swn_reg), GFP_ATOMIC); 321 322 if (reg == NULL) { 322 - mutex_unlock(&cifs_swnreg_idr_mutex); 323 - return ERR_PTR(-ENOMEM); 323 + ret = -ENOMEM; 324 + goto fail_unlock; 324 325 } 325 326 326 327 kref_init(&reg->ref_count); ··· 349 354 reg->ip_notify = (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT); 350 355 351 356 reg->tcon = tcon; 352 - 357 + unlock: 353 358 mutex_unlock(&cifs_swnreg_idr_mutex); 354 359 355 360 return reg; ··· 360 365 idr_remove(&cifs_swnreg_idr, reg->id); 361 366 fail: 362 367 kfree(reg); 368 + fail_unlock: 363 369 mutex_unlock(&cifs_swnreg_idr_mutex); 364 370 return ERR_PTR(ret); 365 371 }
+19 -3
fs/smb/client/cifsfs.c
··· 392 392 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 393 393 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 394 394 struct TCP_Server_Info *server = tcon->ses->server; 395 + struct inode *inode = file_inode(file); 396 + int rc; 395 397 396 - if (server->ops->fallocate) 397 - return server->ops->fallocate(file, tcon, mode, off, len); 398 + if (!server->ops->fallocate) 399 + return -EOPNOTSUPP; 398 400 399 - return -EOPNOTSUPP; 401 + rc = inode_lock_killable(inode); 402 + if (rc) 403 + return rc; 404 + 405 + netfs_wait_for_outstanding_io(inode); 406 + 407 + rc = file_modified(file); 408 + if (rc) 409 + goto out_unlock; 410 + 411 + rc = server->ops->fallocate(file, tcon, mode, off, len); 412 + 413 + out_unlock: 414 + inode_unlock(inode); 415 + return rc; 400 416 } 401 417 402 418 static int cifs_permission(struct mnt_idmap *idmap,
+2 -2
fs/smb/client/cifsfs.h
··· 145 145 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 146 146 147 147 /* when changing internal version - update following two lines at same time */ 148 - #define SMB3_PRODUCT_BUILD 56 149 - #define CIFS_VERSION "2.56" 148 + #define SMB3_PRODUCT_BUILD 57 149 + #define CIFS_VERSION "2.57" 150 150 #endif /* _CIFSFS_H */
+5
fs/smb/client/cifsglob.h
··· 1566 1566 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr, 1567 1567 bool offload); 1568 1568 void cifsFileInfo_put(struct cifsFileInfo *cifs_file); 1569 + int cifs_file_flush(const unsigned int xid, struct inode *inode, 1570 + struct cifsFileInfo *cfile); 1571 + int cifs_file_set_size(const unsigned int xid, struct dentry *dentry, 1572 + const char *full_path, struct cifsFileInfo *open_file, 1573 + loff_t size); 1569 1574 1570 1575 #define CIFS_CACHE_READ_FLG 1 1571 1576 #define CIFS_CACHE_HANDLE_FLG 2
+2 -2
fs/smb/client/cifssmb.c
··· 1163 1163 cpu_to_le64(le32_to_cpu(pSMBr->EndOfFile)); 1164 1164 pfile_info->EndOfFile = pfile_info->AllocationSize; 1165 1165 pfile_info->NumberOfLinks = cpu_to_le32(1); 1166 - pfile_info->DeletePending = 0; 1166 + pfile_info->DeletePending = 0; /* successful open = not delete pending */ 1167 1167 } 1168 1168 } 1169 1169 ··· 1288 1288 buf->AllocationSize = rsp->AllocationSize; 1289 1289 buf->EndOfFile = rsp->EndOfFile; 1290 1290 buf->NumberOfLinks = cpu_to_le32(1); 1291 - buf->DeletePending = 0; 1291 + buf->DeletePending = 0; /* successful open = not delete pending */ 1292 1292 } 1293 1293 1294 1294 cifs_buf_release(req);
+20 -18
fs/smb/client/dir.c
··· 200 200 201 201 full_path = build_path_from_dentry(direntry, page); 202 202 if (IS_ERR(full_path)) { 203 - free_dentry_path(page); 204 - return PTR_ERR(full_path); 203 + rc = PTR_ERR(full_path); 204 + goto out; 205 205 } 206 206 207 207 /* If we're caching, we need to be able to fill in around partial writes. */ ··· 678 678 const char *full_path; 679 679 void *page; 680 680 int retry_count = 0; 681 - struct cached_fid *cfid = NULL; 681 + struct dentry *de; 682 682 683 683 xid = get_xid(); 684 684 ··· 690 690 cifs_sb = CIFS_SB(parent_dir_inode->i_sb); 691 691 tlink = cifs_sb_tlink(cifs_sb); 692 692 if (IS_ERR(tlink)) { 693 - free_xid(xid); 694 - return ERR_CAST(tlink); 693 + de = ERR_CAST(tlink); 694 + goto free_xid; 695 695 } 696 696 pTcon = tlink_tcon(tlink); 697 697 698 698 rc = check_name(direntry, pTcon); 699 699 if (unlikely(rc)) { 700 - cifs_put_tlink(tlink); 701 - free_xid(xid); 702 - return ERR_PTR(rc); 700 + de = ERR_PTR(rc); 701 + goto put_tlink; 703 702 } 704 703 705 704 /* can not grab the rename sem here since it would ··· 707 708 page = alloc_dentry_path(); 708 709 full_path = build_path_from_dentry(direntry, page); 709 710 if (IS_ERR(full_path)) { 710 - cifs_put_tlink(tlink); 711 - free_xid(xid); 712 - free_dentry_path(page); 713 - return ERR_CAST(full_path); 711 + de = ERR_CAST(full_path); 712 + goto free_dentry_path; 714 713 } 715 714 716 715 if (d_really_is_positive(direntry)) { 717 716 cifs_dbg(FYI, "non-NULL inode in lookup\n"); 718 717 } else { 718 + struct cached_fid *cfid = NULL; 719 + 719 720 cifs_dbg(FYI, "NULL inode in lookup\n"); 720 721 721 722 /* ··· 774 775 } 775 776 776 777 out: 778 + de = d_splice_alias(newInode, direntry); 779 + free_dentry_path: 777 780 free_dentry_path(page); 781 + put_tlink: 778 782 cifs_put_tlink(tlink); 783 + free_xid: 779 784 free_xid(xid); 780 - return d_splice_alias(newInode, direntry); 785 + return de; 781 786 } 782 787 783 788 static int 784 789 cifs_d_revalidate(struct inode *dir, const struct qstr *name, 785 790 struct dentry *direntry, unsigned int flags) 786 791 { 787 - struct inode *inode = NULL; 788 - struct cached_fid *cfid; 789 - int rc; 790 - 791 792 if (flags & LOOKUP_RCU) 792 793 return -ECHILD; 793 794 794 795 if (d_really_is_positive(direntry)) { 795 - inode = d_inode(direntry); 796 + int rc; 797 + struct inode *inode = d_inode(direntry); 798 + 796 799 if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) 797 800 CIFS_I(inode)->time = 0; /* force reval */ 798 801 ··· 837 836 } else { 838 837 struct cifs_sb_info *cifs_sb = CIFS_SB(dir->i_sb); 839 838 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 839 + struct cached_fid *cfid; 840 840 841 841 if (!open_cached_dir_by_dentry(tcon, direntry->d_parent, &cfid)) { 842 842 /*
+72 -33
fs/smb/client/file.c
··· 952 952 } 953 953 } 954 954 955 + int cifs_file_flush(const unsigned int xid, struct inode *inode, 956 + struct cifsFileInfo *cfile) 957 + { 958 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 959 + struct cifs_tcon *tcon; 960 + int rc; 961 + 962 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 963 + return 0; 964 + 965 + if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) { 966 + tcon = tlink_tcon(cfile->tlink); 967 + return tcon->ses->server->ops->flush(xid, tcon, 968 + &cfile->fid); 969 + } 970 + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); 971 + if (!rc) { 972 + tcon = tlink_tcon(cfile->tlink); 973 + rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid); 974 + cifsFileInfo_put(cfile); 975 + } else if (rc == -EBADF) { 976 + rc = 0; 977 + } 978 + return rc; 979 + } 980 + 981 + static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry) 982 + { 983 + struct cifsInodeInfo *cinode = CIFS_I(d_inode(dentry)); 984 + struct inode *inode = d_inode(dentry); 985 + struct cifsFileInfo *cfile = NULL; 986 + struct TCP_Server_Info *server; 987 + struct cifs_tcon *tcon; 988 + int rc; 989 + 990 + rc = filemap_write_and_wait(inode->i_mapping); 991 + if (is_interrupt_error(rc)) 992 + return -ERESTARTSYS; 993 + mapping_set_error(inode->i_mapping, rc); 994 + 995 + cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY); 996 + rc = cifs_file_flush(xid, inode, cfile); 997 + if (!rc) { 998 + if (cfile) { 999 + tcon = tlink_tcon(cfile->tlink); 1000 + server = tcon->ses->server; 1001 + rc = server->ops->set_file_size(xid, tcon, 1002 + cfile, 0, false); 1003 + } 1004 + if (!rc) { 1005 + netfs_resize_file(&cinode->netfs, 0, true); 1006 + cifs_setsize(inode, 0); 1007 + inode->i_blocks = 0; 1008 + } 1009 + } 1010 + if (cfile) 1011 + cifsFileInfo_put(cfile); 1012 + return rc; 1013 + } 1014 + 955 1015 int cifs_open(struct inode *inode, struct file *file) 956 1016 957 1017 { ··· 1062 1002 file->f_op = &cifs_file_direct_nobrl_ops; 1063 1003 else 1064 1004 file->f_op = &cifs_file_direct_ops; 1005 + } 1006 + 1007 + if (file->f_flags & O_TRUNC) { 1008 + rc = cifs_do_truncate(xid, file_dentry(file)); 1009 + if (rc) 1010 + goto out; 1065 1011 } 1066 1012 1067 1013 /* Get the cached handle as SMB2 close is deferred */ ··· 2751 2685 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, 2752 2686 int datasync) 2753 2687 { 2754 - unsigned int xid; 2755 - int rc = 0; 2756 - struct cifs_tcon *tcon; 2757 - struct TCP_Server_Info *server; 2758 2688 struct cifsFileInfo *smbfile = file->private_data; 2759 2689 struct inode *inode = file_inode(file); 2760 - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 2690 + unsigned int xid; 2691 + int rc; 2761 2692 2762 2693 rc = file_write_and_wait_range(file, start, end); 2763 2694 if (rc) { ··· 2762 2699 return rc; 2763 2700 } 2764 2701 2765 - xid = get_xid(); 2766 - 2767 - cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n", 2768 - file, datasync); 2702 + cifs_dbg(FYI, "%s: name=%pD datasync=0x%x\n", __func__, file, datasync); 2769 2703 2770 2704 if (!CIFS_CACHE_READ(CIFS_I(inode))) { 2771 2705 rc = cifs_zap_mapping(inode); 2772 - if (rc) { 2773 - cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc); 2774 - rc = 0; /* don't care about it in fsync */ 2775 - } 2706 + cifs_dbg(FYI, "%s: invalidate mapping: rc = %d\n", __func__, rc); 2776 2707 } 2777 2708 2778 - tcon = tlink_tcon(smbfile->tlink); 2779 - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 2780 - server = tcon->ses->server; 2781 - if (server->ops->flush == NULL) { 2782 - rc = -ENOSYS; 2783 - goto strict_fsync_exit; 2784 - } 2785 - 2786 - if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 2787 - smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 2788 - if (smbfile) { 2789 - rc = server->ops->flush(xid, tcon, &smbfile->fid); 2790 - cifsFileInfo_put(smbfile); 2791 - } else 2792 - cifs_dbg(FYI, "ignore fsync for file not open for write\n"); 2793 - } else 2794 - rc = server->ops->flush(xid, tcon, &smbfile->fid); 2795 - } 2796 - 2797 - strict_fsync_exit: 2709 + xid = get_xid(); 2710 + rc = cifs_file_flush(xid, inode, smbfile); 2798 2711 free_xid(xid); 2799 2712 return rc; 2800 2713 }
+84 -67
fs/smb/client/inode.c
··· 3007 3007 3008 3008 void cifs_setsize(struct inode *inode, loff_t offset) 3009 3009 { 3010 - struct cifsInodeInfo *cifs_i = CIFS_I(inode); 3011 - 3012 3010 spin_lock(&inode->i_lock); 3013 3011 i_size_write(inode, offset); 3014 3012 spin_unlock(&inode->i_lock); 3015 - 3016 - /* Cached inode must be refreshed on truncate */ 3017 - cifs_i->time = 0; 3013 + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 3018 3014 truncate_pagecache(inode, offset); 3015 + netfs_wait_for_outstanding_io(inode); 3019 3016 } 3020 3017 3021 - static int 3022 - cifs_set_file_size(struct inode *inode, struct iattr *attrs, 3023 - unsigned int xid, const char *full_path, struct dentry *dentry) 3018 + int cifs_file_set_size(const unsigned int xid, struct dentry *dentry, 3019 + const char *full_path, struct cifsFileInfo *open_file, 3020 + loff_t size) 3024 3021 { 3025 - int rc; 3026 - struct cifsFileInfo *open_file; 3027 - struct cifsInodeInfo *cifsInode = CIFS_I(inode); 3022 + struct inode *inode = d_inode(dentry); 3028 3023 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 3024 + struct cifsInodeInfo *cifsInode = CIFS_I(inode); 3029 3025 struct tcon_link *tlink = NULL; 3030 3026 struct cifs_tcon *tcon = NULL; 3031 3027 struct TCP_Server_Info *server; 3028 + int rc = -EINVAL; 3032 3029 3033 3030 /* 3034 3031 * To avoid spurious oplock breaks from server, in the case of ··· 3036 3039 * writebehind data than the SMB timeout for the SetPathInfo 3037 3040 * request would allow 3038 3041 */ 3039 - open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3040 - if (open_file) { 3042 + if (open_file && (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE)) { 3041 3043 tcon = tlink_tcon(open_file->tlink); 3042 3044 server = tcon->ses->server; 3043 - if (server->ops->set_file_size) 3044 - rc = server->ops->set_file_size(xid, tcon, open_file, 3045 - attrs->ia_size, false); 3046 - else 3047 - rc = -ENOSYS; 3048 - cifsFileInfo_put(open_file); 3049 - cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); 3050 - } else 3051 - rc = -EINVAL; 3045 + rc = server->ops->set_file_size(xid, tcon, 3046 + open_file, 3047 + size, false); 3048 + cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc); 3049 + } else { 3050 + open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3051 + if (open_file) { 3052 + tcon = tlink_tcon(open_file->tlink); 3053 + server = tcon->ses->server; 3054 + rc = server->ops->set_file_size(xid, tcon, 3055 + open_file, 3056 + size, false); 3057 + cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc); 3058 + cifsFileInfo_put(open_file); 3059 + } 3060 + } 3052 3061 3053 3062 if (!rc) 3054 3063 goto set_size_out; ··· 3072 3069 * valid, writeable file handle for it was found or because there was 3073 3070 * an error setting it by handle. 3074 3071 */ 3075 - if (server->ops->set_path_size) 3076 - rc = server->ops->set_path_size(xid, tcon, full_path, 3077 - attrs->ia_size, cifs_sb, false, dentry); 3078 - else 3079 - rc = -ENOSYS; 3080 - cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); 3081 - 3082 - if (tlink) 3083 - cifs_put_tlink(tlink); 3072 + rc = server->ops->set_path_size(xid, tcon, full_path, size, 3073 + cifs_sb, false, dentry); 3074 + cifs_dbg(FYI, "%s: SetEOF by path (setattrs) rc = %d\n", __func__, rc); 3075 + cifs_put_tlink(tlink); 3084 3076 3085 3077 set_size_out: 3086 3078 if (rc == 0) { 3087 - netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true); 3088 - cifs_setsize(inode, attrs->ia_size); 3079 + netfs_resize_file(&cifsInode->netfs, size, true); 3080 + cifs_setsize(inode, size); 3089 3081 /* 3090 3082 * i_blocks is not related to (i_size / i_blksize), but instead 3091 3083 * 512 byte (2**9) size is required for calculating num blocks. ··· 3088 3090 * this is best estimate we have for blocks allocated for a file 3089 3091 * Number of blocks must be rounded up so size 1 is not 0 blocks 3090 3092 */ 3091 - inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9; 3092 - 3093 - /* 3094 - * The man page of truncate says if the size changed, 3095 - * then the st_ctime and st_mtime fields for the file 3096 - * are updated. 3097 - */ 3098 - attrs->ia_ctime = attrs->ia_mtime = current_time(inode); 3099 - attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME; 3093 + inode->i_blocks = (512 - 1 + size) >> 9; 3100 3094 } 3101 3095 3102 3096 return rc; ··· 3108 3118 struct tcon_link *tlink; 3109 3119 struct cifs_tcon *pTcon; 3110 3120 struct cifs_unix_set_info_args *args = NULL; 3111 - struct cifsFileInfo *open_file; 3121 + struct cifsFileInfo *open_file = NULL; 3112 3122 3113 3123 cifs_dbg(FYI, "setattr_unix on file %pd attrs->ia_valid=0x%x\n", 3114 3124 direntry, attrs->ia_valid); ··· 3121 3131 rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs); 3122 3132 if (rc < 0) 3123 3133 goto out; 3134 + 3135 + if (attrs->ia_valid & ATTR_FILE) 3136 + open_file = attrs->ia_file->private_data; 3124 3137 3125 3138 full_path = build_path_from_dentry(direntry, page); 3126 3139 if (IS_ERR(full_path)) { ··· 3152 3159 rc = 0; 3153 3160 3154 3161 if (attrs->ia_valid & ATTR_SIZE) { 3155 - rc = cifs_set_file_size(inode, attrs, xid, full_path, direntry); 3162 + rc = cifs_file_set_size(xid, direntry, full_path, 3163 + open_file, attrs->ia_size); 3156 3164 if (rc != 0) 3157 3165 goto out; 3166 + /* 3167 + * Avoid setting timestamps on the server for ftruncate(2) to 3168 + * prevent it from disabling automatic timestamp updates as per 3169 + * MS-FSA 2.1.4.17. 3170 + */ 3171 + attrs->ia_valid &= ~(ATTR_CTIME | ATTR_MTIME); 3158 3172 } 3159 3173 3160 3174 /* skip mode change if it's just for clearing setuid/setgid */ ··· 3206 3206 args->ctime = NO_CHANGE_64; 3207 3207 3208 3208 args->device = 0; 3209 - open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3210 - if (open_file) { 3211 - u16 nfid = open_file->fid.netfid; 3212 - u32 npid = open_file->pid; 3209 + rc = -EINVAL; 3210 + if (open_file && (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE)) { 3213 3211 pTcon = tlink_tcon(open_file->tlink); 3214 - rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid); 3215 - cifsFileInfo_put(open_file); 3212 + rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, 3213 + open_file->fid.netfid, 3214 + open_file->pid); 3216 3215 } else { 3216 + open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3217 + if (open_file) { 3218 + pTcon = tlink_tcon(open_file->tlink); 3219 + rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, 3220 + open_file->fid.netfid, 3221 + open_file->pid); 3222 + cifsFileInfo_put(open_file); 3223 + } 3224 + } 3225 + 3226 + if (rc) { 3217 3227 tlink = cifs_sb_tlink(cifs_sb); 3218 3228 if (IS_ERR(tlink)) { 3219 3229 rc = PTR_ERR(tlink); ··· 3231 3221 } 3232 3222 pTcon = tlink_tcon(tlink); 3233 3223 rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, 3234 - cifs_sb->local_nls, 3235 - cifs_remap(cifs_sb)); 3224 + cifs_sb->local_nls, 3225 + cifs_remap(cifs_sb)); 3236 3226 cifs_put_tlink(tlink); 3237 3227 } 3238 3228 ··· 3274 3264 struct inode *inode = d_inode(direntry); 3275 3265 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 3276 3266 struct cifsInodeInfo *cifsInode = CIFS_I(inode); 3277 - struct cifsFileInfo *wfile; 3278 - struct cifs_tcon *tcon; 3267 + struct cifsFileInfo *cfile = NULL; 3279 3268 const char *full_path; 3280 3269 void *page = alloc_dentry_path(); 3281 3270 int rc = -EACCES; ··· 3293 3284 rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs); 3294 3285 if (rc < 0) 3295 3286 goto cifs_setattr_exit; 3287 + 3288 + if (attrs->ia_valid & ATTR_FILE) 3289 + cfile = attrs->ia_file->private_data; 3296 3290 3297 3291 full_path = build_path_from_dentry(direntry, page); 3298 3292 if (IS_ERR(full_path)) { ··· 3323 3311 3324 3312 rc = 0; 3325 3313 3326 - if ((attrs->ia_valid & ATTR_MTIME) && 3327 - !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) { 3328 - rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile); 3329 - if (!rc) { 3330 - tcon = tlink_tcon(wfile->tlink); 3331 - rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid); 3332 - cifsFileInfo_put(wfile); 3333 - if (rc) 3334 - goto cifs_setattr_exit; 3335 - } else if (rc != -EBADF) 3314 + if (attrs->ia_valid & ATTR_MTIME) { 3315 + rc = cifs_file_flush(xid, inode, cfile); 3316 + if (rc) 3336 3317 goto cifs_setattr_exit; 3337 - else 3338 - rc = 0; 3339 3318 } 3340 3319 3341 3320 if (attrs->ia_valid & ATTR_SIZE) { 3342 - rc = cifs_set_file_size(inode, attrs, xid, full_path, direntry); 3321 + rc = cifs_file_set_size(xid, direntry, full_path, 3322 + cfile, attrs->ia_size); 3343 3323 if (rc != 0) 3344 3324 goto cifs_setattr_exit; 3325 + /* 3326 + * Avoid setting timestamps on the server for ftruncate(2) to 3327 + * prevent it from disabling automatic timestamp updates as per 3328 + * MS-FSA 2.1.4.17. 3329 + */ 3330 + attrs->ia_valid &= ~(ATTR_CTIME | ATTR_MTIME); 3345 3331 } 3346 3332 3347 3333 if (attrs->ia_valid & ATTR_UID) ··· 3469 3459 3470 3460 if (unlikely(cifs_forced_shutdown(cifs_sb))) 3471 3461 return -EIO; 3462 + /* 3463 + * Avoid setting [cm]time with O_TRUNC to prevent the server from 3464 + * disabling automatic timestamp updates as specified in 3465 + * MS-FSA 2.1.4.17. 3466 + */ 3467 + if (attrs->ia_valid & ATTR_OPEN) 3468 + return 0; 3472 3469 3473 3470 do { 3474 3471 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+66 -3
fs/smb/client/smb1ops.c
··· 652 652 653 653 #ifdef CONFIG_CIFS_XATTR 654 654 /* 655 + * For non-symlink WSL reparse points it is required to fetch 656 + * EA $LXMOD which contains in its S_DT part the mandatory file type. 657 + */ 658 + if (!rc && data->reparse_point) { 659 + struct smb2_file_full_ea_info *ea; 660 + u32 next = 0; 661 + 662 + ea = (struct smb2_file_full_ea_info *)data->wsl.eas; 663 + do { 664 + ea = (void *)((u8 *)ea + next); 665 + next = le32_to_cpu(ea->next_entry_offset); 666 + } while (next); 667 + if (le16_to_cpu(ea->ea_value_length)) { 668 + ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) + 669 + ea->ea_name_length + 1 + 670 + le16_to_cpu(ea->ea_value_length), 4)); 671 + ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset)); 672 + } 673 + 674 + rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_MODE, 675 + &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1], 676 + SMB2_WSL_XATTR_MODE_SIZE, cifs_sb); 677 + if (rc == SMB2_WSL_XATTR_MODE_SIZE) { 678 + ea->next_entry_offset = cpu_to_le32(0); 679 + ea->flags = 0; 680 + ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN; 681 + ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_MODE_SIZE); 682 + memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_MODE, SMB2_WSL_XATTR_NAME_LEN + 1); 683 + data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 + 684 + SMB2_WSL_XATTR_MODE_SIZE, 4); 685 + rc = 0; 686 + } else if (rc >= 0) { 687 + /* It is an error if EA $LXMOD has wrong size. */ 688 + rc = -EINVAL; 689 + } else { 690 + /* 691 + * In all other cases ignore error if fetching 692 + * of EA $LXMOD failed. It is needed only for 693 + * non-symlink WSL reparse points and wsl_to_fattr() 694 + * handle the case when EA is missing. 695 + */ 696 + rc = 0; 697 + } 698 + } 699 + 700 + /* 655 701 * For WSL CHR and BLK reparse points it is required to fetch 656 702 * EA $LXDEV which contains major and minor device numbers. 657 703 */ 658 704 if (!rc && data->reparse_point) { 659 705 struct smb2_file_full_ea_info *ea; 706 + u32 next = 0; 660 707 661 708 ea = (struct smb2_file_full_ea_info *)data->wsl.eas; 709 + do { 710 + ea = (void *)((u8 *)ea + next); 711 + next = le32_to_cpu(ea->next_entry_offset); 712 + } while (next); 713 + if (le16_to_cpu(ea->ea_value_length)) { 714 + ea->next_entry_offset = cpu_to_le32(ALIGN(sizeof(*ea) + 715 + ea->ea_name_length + 1 + 716 + le16_to_cpu(ea->ea_value_length), 4)); 717 + ea = (void *)((u8 *)ea + le32_to_cpu(ea->next_entry_offset)); 718 + } 719 + 662 720 rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV, 663 721 &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1], 664 722 SMB2_WSL_XATTR_DEV_SIZE, cifs_sb); ··· 726 668 ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN; 727 669 ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE); 728 670 memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1); 729 - data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 + 730 - SMB2_WSL_XATTR_DEV_SIZE; 671 + data->wsl.eas_len += ALIGN(sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 + 672 + SMB2_WSL_XATTR_MODE_SIZE, 4); 731 673 rc = 0; 732 674 } else if (rc >= 0) { 733 675 /* It is an error if EA $LXDEV has wrong size. */ ··· 876 818 info.Attributes = cpu_to_le32(dosattrs); 877 819 rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls, 878 820 cifs_sb); 821 + if (rc == -EOPNOTSUPP || rc == -EINVAL) 822 + rc = SMBSetInformation(xid, tcon, full_path, 823 + info.Attributes, 824 + 0 /* do not change write time */, 825 + cifs_sb->local_nls, cifs_sb); 879 826 if (rc == 0) 880 827 cifsInode->cifsAttrs = dosattrs; 881 828 } ··· 1037 974 .tcon = tcon, 1038 975 .cifs_sb = cifs_sb, 1039 976 .desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES, 1040 - .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR), 977 + .create_options = cifs_create_options(cifs_sb, 0), 1041 978 .disposition = FILE_OPEN, 1042 979 .path = full_path, 1043 980 .fid = &fid,
+13 -11
fs/smb/client/smb2inode.c
··· 676 676 idata->fi.EndOfFile = create_rsp->EndofFile; 677 677 if (le32_to_cpu(idata->fi.NumberOfLinks) == 0) 678 678 idata->fi.NumberOfLinks = cpu_to_le32(1); /* dummy value */ 679 - idata->fi.DeletePending = 0; 679 + idata->fi.DeletePending = 0; /* successful open = not delete pending */ 680 680 idata->fi.Directory = !!(le32_to_cpu(create_rsp->FileAttributes) & ATTR_DIRECTORY); 681 681 682 682 /* smb2_parse_contexts() fills idata->fi.IndexNumber */ ··· 1382 1382 smb2_set_file_info(struct inode *inode, const char *full_path, 1383 1383 FILE_BASIC_INFO *buf, const unsigned int xid) 1384 1384 { 1385 - struct cifs_open_parms oparms; 1385 + struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), }; 1386 1386 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1387 + struct cifsFileInfo *cfile = NULL; 1388 + struct cifs_open_parms oparms; 1387 1389 struct tcon_link *tlink; 1388 1390 struct cifs_tcon *tcon; 1389 - struct cifsFileInfo *cfile; 1390 - struct kvec in_iov = { .iov_base = buf, .iov_len = sizeof(*buf), }; 1391 - int rc; 1392 - 1393 - if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && 1394 - (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) && 1395 - (buf->Attributes == 0)) 1396 - return 0; /* would be a no op, no sense sending this */ 1391 + int rc = 0; 1397 1392 1398 1393 tlink = cifs_sb_tlink(cifs_sb); 1399 1394 if (IS_ERR(tlink)) 1400 1395 return PTR_ERR(tlink); 1401 1396 tcon = tlink_tcon(tlink); 1402 1397 1403 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1398 + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && 1399 + (buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) { 1400 + if (buf->Attributes == 0) 1401 + goto out; /* would be a no op, no sense sending this */ 1402 + cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1403 + } 1404 + 1404 1405 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES, 1405 1406 FILE_OPEN, 0, ACL_NO_MODE); 1406 1407 rc = smb2_compound_op(xid, tcon, cifs_sb, 1407 1408 full_path, &oparms, &in_iov, 1408 1409 &(int){SMB2_OP_SET_INFO}, 1, 1409 1410 cfile, NULL, NULL, NULL); 1411 + out: 1410 1412 cifs_put_tlink(tlink); 1411 1413 return rc; 1412 1414 }
+220 -140
fs/smb/client/smb2ops.c
··· 1803 1803 return rc; 1804 1804 } 1805 1805 1806 + /** 1807 + * calc_chunk_count - calculates the number chunks to be filled in the Chunks[] 1808 + * array of struct copychunk_ioctl 1809 + * 1810 + * @tcon: destination file tcon 1811 + * @bytes_left: how many bytes are left to copy 1812 + * 1813 + * Return: maximum number of chunks with which Chunks[] can be filled. 1814 + */ 1815 + static inline u32 1816 + calc_chunk_count(struct cifs_tcon *tcon, u64 bytes_left) 1817 + { 1818 + u32 max_chunks = READ_ONCE(tcon->max_chunks); 1819 + u32 max_bytes_copy = READ_ONCE(tcon->max_bytes_copy); 1820 + u32 max_bytes_chunk = READ_ONCE(tcon->max_bytes_chunk); 1821 + u64 need; 1822 + u32 allowed; 1823 + 1824 + if (!max_bytes_chunk || !max_bytes_copy || !max_chunks) 1825 + return 0; 1826 + 1827 + /* chunks needed for the remaining bytes */ 1828 + need = DIV_ROUND_UP_ULL(bytes_left, max_bytes_chunk); 1829 + /* chunks allowed per cc request */ 1830 + allowed = DIV_ROUND_UP(max_bytes_copy, max_bytes_chunk); 1831 + 1832 + return (u32)umin(need, umin(max_chunks, allowed)); 1833 + } 1834 + 1835 + /** 1836 + * smb2_copychunk_range - server-side copy of data range 1837 + * 1838 + * @xid: transaction id 1839 + * @src_file: source file 1840 + * @dst_file: destination file 1841 + * @src_off: source file byte offset 1842 + * @len: number of bytes to copy 1843 + * @dst_off: destination file byte offset 1844 + * 1845 + * Obtains a resume key for @src_file and issues FSCTL_SRV_COPYCHUNK_WRITE 1846 + * IOCTLs, splitting the request into chunks limited by tcon->max_*. 1847 + * 1848 + * Return: @len on success; negative errno on failure. 1849 + */ 1806 1850 static ssize_t 1807 1851 smb2_copychunk_range(const unsigned int xid, 1808 - struct cifsFileInfo *srcfile, 1809 - struct cifsFileInfo *trgtfile, u64 src_off, 1810 - u64 len, u64 dest_off) 1852 + struct cifsFileInfo *src_file, 1853 + struct cifsFileInfo *dst_file, 1854 + u64 src_off, 1855 + u64 len, 1856 + u64 dst_off) 1811 1857 { 1812 - int rc; 1813 - unsigned int ret_data_len; 1814 - struct copychunk_ioctl *pcchunk; 1815 - struct copychunk_ioctl_rsp *retbuf = NULL; 1858 + int rc = 0; 1859 + unsigned int ret_data_len = 0; 1860 + struct copychunk_ioctl *cc_req = NULL; 1861 + struct copychunk_ioctl_rsp *cc_rsp = NULL; 1816 1862 struct cifs_tcon *tcon; 1817 - int chunks_copied = 0; 1818 - bool chunk_sizes_updated = false; 1819 - ssize_t bytes_written, total_bytes_written = 0; 1863 + struct copychunk *chunk; 1864 + u32 chunks, chunk_count, chunk_bytes; 1865 + u32 copy_bytes, copy_bytes_left; 1866 + u32 chunks_written, bytes_written; 1867 + u64 total_bytes_left = len; 1868 + u64 src_off_prev, dst_off_prev; 1869 + u32 retries = 0; 1820 1870 1821 - pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); 1822 - if (pcchunk == NULL) 1823 - return -ENOMEM; 1871 + tcon = tlink_tcon(dst_file->tlink); 1824 1872 1825 - cifs_dbg(FYI, "%s: about to call request res key\n", __func__); 1826 - /* Request a key from the server to identify the source of the copy */ 1827 - rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), 1828 - srcfile->fid.persistent_fid, 1829 - srcfile->fid.volatile_fid, pcchunk); 1873 + trace_smb3_copychunk_enter(xid, src_file->fid.volatile_fid, 1874 + dst_file->fid.volatile_fid, tcon->tid, 1875 + tcon->ses->Suid, src_off, dst_off, len); 1830 1876 1831 - /* Note: request_res_key sets res_key null only if rc !=0 */ 1832 - if (rc) 1833 - goto cchunk_out; 1834 - 1835 - /* For now array only one chunk long, will make more flexible later */ 1836 - pcchunk->ChunkCount = cpu_to_le32(1); 1837 - pcchunk->Reserved = 0; 1838 - pcchunk->Reserved2 = 0; 1839 - 1840 - tcon = tlink_tcon(trgtfile->tlink); 1841 - 1842 - trace_smb3_copychunk_enter(xid, srcfile->fid.volatile_fid, 1843 - trgtfile->fid.volatile_fid, tcon->tid, 1844 - tcon->ses->Suid, src_off, dest_off, len); 1845 - 1846 - while (len > 0) { 1847 - pcchunk->SourceOffset = cpu_to_le64(src_off); 1848 - pcchunk->TargetOffset = cpu_to_le64(dest_off); 1849 - pcchunk->Length = 1850 - cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk)); 1851 - 1852 - /* Request server copy to target from src identified by key */ 1853 - kfree(retbuf); 1854 - retbuf = NULL; 1855 - rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, 1856 - trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, 1857 - (char *)pcchunk, sizeof(struct copychunk_ioctl), 1858 - CIFSMaxBufSize, (char **)&retbuf, &ret_data_len); 1859 - if (rc == 0) { 1860 - if (ret_data_len != 1861 - sizeof(struct copychunk_ioctl_rsp)) { 1862 - cifs_tcon_dbg(VFS, "Invalid cchunk response size\n"); 1863 - rc = -EIO; 1864 - goto cchunk_out; 1865 - } 1866 - if (retbuf->TotalBytesWritten == 0) { 1867 - cifs_dbg(FYI, "no bytes copied\n"); 1868 - rc = -EIO; 1869 - goto cchunk_out; 1870 - } 1871 - /* 1872 - * Check if server claimed to write more than we asked 1873 - */ 1874 - if (le32_to_cpu(retbuf->TotalBytesWritten) > 1875 - le32_to_cpu(pcchunk->Length)) { 1876 - cifs_tcon_dbg(VFS, "Invalid copy chunk response\n"); 1877 - rc = -EIO; 1878 - goto cchunk_out; 1879 - } 1880 - if (le32_to_cpu(retbuf->ChunksWritten) != 1) { 1881 - cifs_tcon_dbg(VFS, "Invalid num chunks written\n"); 1882 - rc = -EIO; 1883 - goto cchunk_out; 1884 - } 1885 - chunks_copied++; 1886 - 1887 - bytes_written = le32_to_cpu(retbuf->TotalBytesWritten); 1888 - src_off += bytes_written; 1889 - dest_off += bytes_written; 1890 - len -= bytes_written; 1891 - total_bytes_written += bytes_written; 1892 - 1893 - cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n", 1894 - le32_to_cpu(retbuf->ChunksWritten), 1895 - le32_to_cpu(retbuf->ChunkBytesWritten), 1896 - bytes_written); 1897 - trace_smb3_copychunk_done(xid, srcfile->fid.volatile_fid, 1898 - trgtfile->fid.volatile_fid, tcon->tid, 1899 - tcon->ses->Suid, src_off, dest_off, len); 1900 - } else if (rc == -EINVAL) { 1901 - if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) 1902 - goto cchunk_out; 1903 - 1904 - cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n", 1905 - le32_to_cpu(retbuf->ChunksWritten), 1906 - le32_to_cpu(retbuf->ChunkBytesWritten), 1907 - le32_to_cpu(retbuf->TotalBytesWritten)); 1908 - 1909 - /* 1910 - * Check if this is the first request using these sizes, 1911 - * (ie check if copy succeed once with original sizes 1912 - * and check if the server gave us different sizes after 1913 - * we already updated max sizes on previous request). 1914 - * if not then why is the server returning an error now 1915 - */ 1916 - if ((chunks_copied != 0) || chunk_sizes_updated) 1917 - goto cchunk_out; 1918 - 1919 - /* Check that server is not asking us to grow size */ 1920 - if (le32_to_cpu(retbuf->ChunkBytesWritten) < 1921 - tcon->max_bytes_chunk) 1922 - tcon->max_bytes_chunk = 1923 - le32_to_cpu(retbuf->ChunkBytesWritten); 1924 - else 1925 - goto cchunk_out; /* server gave us bogus size */ 1926 - 1927 - /* No need to change MaxChunks since already set to 1 */ 1928 - chunk_sizes_updated = true; 1929 - } else 1930 - goto cchunk_out; 1877 + retry: 1878 + chunk_count = calc_chunk_count(tcon, total_bytes_left); 1879 + if (!chunk_count) { 1880 + rc = -EOPNOTSUPP; 1881 + goto out; 1931 1882 } 1932 1883 1933 - cchunk_out: 1934 - kfree(pcchunk); 1935 - kfree(retbuf); 1884 + cc_req = kzalloc(struct_size(cc_req, Chunks, chunk_count), GFP_KERNEL); 1885 + if (!cc_req) { 1886 + rc = -ENOMEM; 1887 + goto out; 1888 + } 1889 + 1890 + /* Request a key from the server to identify the source of the copy */ 1891 + rc = SMB2_request_res_key(xid, 1892 + tlink_tcon(src_file->tlink), 1893 + src_file->fid.persistent_fid, 1894 + src_file->fid.volatile_fid, 1895 + cc_req); 1896 + 1897 + /* Note: request_res_key sets res_key null only if rc != 0 */ 1936 1898 if (rc) 1899 + goto out; 1900 + 1901 + while (total_bytes_left > 0) { 1902 + 1903 + /* Store previous offsets to allow rewind */ 1904 + src_off_prev = src_off; 1905 + dst_off_prev = dst_off; 1906 + 1907 + chunks = 0; 1908 + copy_bytes = 0; 1909 + copy_bytes_left = umin(total_bytes_left, tcon->max_bytes_copy); 1910 + while (copy_bytes_left > 0 && chunks < chunk_count) { 1911 + chunk = &cc_req->Chunks[chunks++]; 1912 + 1913 + chunk->SourceOffset = cpu_to_le64(src_off); 1914 + chunk->TargetOffset = cpu_to_le64(dst_off); 1915 + 1916 + chunk_bytes = umin(copy_bytes_left, tcon->max_bytes_chunk); 1917 + 1918 + chunk->Length = cpu_to_le32(chunk_bytes); 1919 + /* Buffer is zeroed, no need to set chunk->Reserved = 0 */ 1920 + 1921 + src_off += chunk_bytes; 1922 + dst_off += chunk_bytes; 1923 + 1924 + copy_bytes_left -= chunk_bytes; 1925 + copy_bytes += chunk_bytes; 1926 + } 1927 + 1928 + cc_req->ChunkCount = cpu_to_le32(chunks); 1929 + /* Buffer is zeroed, no need to set cc_req->Reserved = 0 */ 1930 + 1931 + /* Request server copy to target from src identified by key */ 1932 + kfree(cc_rsp); 1933 + cc_rsp = NULL; 1934 + rc = SMB2_ioctl(xid, tcon, dst_file->fid.persistent_fid, 1935 + dst_file->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, 1936 + (char *)cc_req, struct_size(cc_req, Chunks, chunks), 1937 + CIFSMaxBufSize, (char **)&cc_rsp, &ret_data_len); 1938 + 1939 + if (rc && rc != -EINVAL) 1940 + goto out; 1941 + 1942 + if (unlikely(ret_data_len != sizeof(*cc_rsp))) { 1943 + cifs_tcon_dbg(VFS, "Copychunk invalid response: size %u/%zu\n", 1944 + ret_data_len, sizeof(*cc_rsp)); 1945 + rc = -EIO; 1946 + goto out; 1947 + } 1948 + 1949 + bytes_written = le32_to_cpu(cc_rsp->TotalBytesWritten); 1950 + chunks_written = le32_to_cpu(cc_rsp->ChunksWritten); 1951 + chunk_bytes = le32_to_cpu(cc_rsp->ChunkBytesWritten); 1952 + 1953 + if (rc == 0) { 1954 + /* Check if server claimed to write more than we asked */ 1955 + if (unlikely(!bytes_written || bytes_written > copy_bytes || 1956 + !chunks_written || chunks_written > chunks)) { 1957 + cifs_tcon_dbg(VFS, "Copychunk invalid response: bytes written %u/%u, chunks written %u/%u\n", 1958 + bytes_written, copy_bytes, chunks_written, chunks); 1959 + rc = -EIO; 1960 + goto out; 1961 + } 1962 + 1963 + /* Partial write: rewind */ 1964 + if (bytes_written < copy_bytes) { 1965 + u32 delta = copy_bytes - bytes_written; 1966 + 1967 + src_off -= delta; 1968 + dst_off -= delta; 1969 + } 1970 + 1971 + total_bytes_left -= bytes_written; 1972 + continue; 1973 + } 1974 + 1975 + /* 1976 + * Check if server is not asking us to reduce size. 1977 + * 1978 + * Note: As per MS-SMB2 2.2.32.1, the values returned 1979 + * in cc_rsp are not strictly lower than what existed 1980 + * before. 1981 + */ 1982 + if (bytes_written < tcon->max_bytes_copy) { 1983 + cifs_tcon_dbg(FYI, "Copychunk MaxBytesCopy updated: %u -> %u\n", 1984 + tcon->max_bytes_copy, bytes_written); 1985 + tcon->max_bytes_copy = bytes_written; 1986 + } 1987 + 1988 + if (chunks_written < tcon->max_chunks) { 1989 + cifs_tcon_dbg(FYI, "Copychunk MaxChunks updated: %u -> %u\n", 1990 + tcon->max_chunks, chunks_written); 1991 + tcon->max_chunks = chunks_written; 1992 + } 1993 + 1994 + if (chunk_bytes < tcon->max_bytes_chunk) { 1995 + cifs_tcon_dbg(FYI, "Copychunk MaxBytesChunk updated: %u -> %u\n", 1996 + tcon->max_bytes_chunk, chunk_bytes); 1997 + tcon->max_bytes_chunk = chunk_bytes; 1998 + } 1999 + 2000 + /* reset to last offsets */ 2001 + if (retries++ < 2) { 2002 + src_off = src_off_prev; 2003 + dst_off = dst_off_prev; 2004 + kfree(cc_req); 2005 + cc_req = NULL; 2006 + goto retry; 2007 + } 2008 + 2009 + break; 2010 + } 2011 + 2012 + out: 2013 + kfree(cc_req); 2014 + kfree(cc_rsp); 2015 + if (rc) { 2016 + trace_smb3_copychunk_err(xid, src_file->fid.volatile_fid, 2017 + dst_file->fid.volatile_fid, tcon->tid, 2018 + tcon->ses->Suid, src_off, dst_off, len, rc); 1937 2019 return rc; 1938 - else 1939 - return total_bytes_written; 2020 + } else { 2021 + trace_smb3_copychunk_done(xid, src_file->fid.volatile_fid, 2022 + dst_file->fid.volatile_fid, tcon->tid, 2023 + tcon->ses->Suid, src_off, dst_off, len); 2024 + return len; 2025 + } 1940 2026 } 1941 2027 1942 2028 static int ··· 3367 3281 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, 3368 3282 ses->Suid, offset, len); 3369 3283 3370 - inode_lock(inode); 3371 3284 filemap_invalidate_lock(inode->i_mapping); 3372 3285 3373 3286 i_size = i_size_read(inode); ··· 3384 3299 * first, otherwise the data may be inconsistent with the server. 3385 3300 */ 3386 3301 truncate_pagecache_range(inode, offset, offset + len - 1); 3302 + netfs_wait_for_outstanding_io(inode); 3387 3303 3388 3304 /* if file not oplocked can't be sure whether asking to extend size */ 3389 3305 rc = -EOPNOTSUPP; ··· 3413 3327 3414 3328 zero_range_exit: 3415 3329 filemap_invalidate_unlock(inode->i_mapping); 3416 - inode_unlock(inode); 3417 3330 free_xid(xid); 3418 3331 if (rc) 3419 3332 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid, ··· 3436 3351 3437 3352 xid = get_xid(); 3438 3353 3439 - inode_lock(inode); 3440 3354 /* Need to make file sparse, if not already, before freeing range. */ 3441 3355 /* Consider adding equivalent for compressed since it could also work */ 3442 3356 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { ··· 3449 3365 * caches first, otherwise the data may be inconsistent with the server. 3450 3366 */ 3451 3367 truncate_pagecache_range(inode, offset, offset + len - 1); 3368 + netfs_wait_for_outstanding_io(inode); 3452 3369 3453 3370 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); 3454 3371 ··· 3484 3399 unlock: 3485 3400 filemap_invalidate_unlock(inode->i_mapping); 3486 3401 out: 3487 - inode_unlock(inode); 3488 3402 free_xid(xid); 3489 3403 return rc; 3490 3404 } ··· 3747 3663 3748 3664 xid = get_xid(); 3749 3665 3750 - inode_lock(inode); 3751 - 3752 3666 old_eof = i_size_read(inode); 3753 3667 if ((off >= old_eof) || 3754 3668 off + len >= old_eof) { ··· 3761 3679 3762 3680 truncate_pagecache_range(inode, off, old_eof); 3763 3681 ictx->zero_point = old_eof; 3682 + netfs_wait_for_outstanding_io(inode); 3764 3683 3765 3684 rc = smb2_copychunk_range(xid, cfile, cfile, off + len, 3766 3685 old_eof - off - len, off); ··· 3782 3699 fscache_resize_cookie(cifs_inode_cookie(inode), new_eof); 3783 3700 out_2: 3784 3701 filemap_invalidate_unlock(inode->i_mapping); 3785 - out: 3786 - inode_unlock(inode); 3702 + out: 3787 3703 free_xid(xid); 3788 3704 return rc; 3789 3705 } ··· 3799 3717 3800 3718 xid = get_xid(); 3801 3719 3802 - inode_lock(inode); 3803 - 3804 3720 old_eof = i_size_read(inode); 3805 3721 if (off >= old_eof) { 3806 3722 rc = -EINVAL; ··· 3813 3733 if (rc < 0) 3814 3734 goto out_2; 3815 3735 truncate_pagecache_range(inode, off, old_eof); 3736 + netfs_wait_for_outstanding_io(inode); 3816 3737 3817 3738 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, 3818 3739 cfile->fid.volatile_fid, cfile->pid, new_eof); ··· 3836 3755 rc = 0; 3837 3756 out_2: 3838 3757 filemap_invalidate_unlock(inode->i_mapping); 3839 - out: 3840 - inode_unlock(inode); 3758 + out: 3841 3759 free_xid(xid); 3842 3760 return rc; 3843 3761 } ··· 4730 4650 unsigned int pad_len; 4731 4651 struct cifs_io_subrequest *rdata = mid->callback_data; 4732 4652 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 4733 - int length; 4653 + size_t copied; 4734 4654 bool use_rdma_mr = false; 4735 4655 4736 4656 if (shdr->Command != SMB2_READ) { ··· 4843 4763 } else if (buf_len >= data_offset + data_len) { 4844 4764 /* read response payload is in buf */ 4845 4765 WARN_ONCE(buffer, "read data can be either in buf or in buffer"); 4846 - length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter); 4847 - if (length < 0) 4848 - return length; 4849 - rdata->got_bytes = data_len; 4766 + copied = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter); 4767 + if (copied == 0) 4768 + return -EIO; 4769 + rdata->got_bytes = copied; 4850 4770 } else { 4851 4771 /* read response payload cannot be in both buf and pages */ 4852 4772 WARN_ONCE(1, "buf can not contain only a part of read data");
+1 -1
fs/smb/client/smb2pdu.c
··· 3277 3277 buf->EndOfFile = rsp->EndofFile; 3278 3278 buf->Attributes = rsp->FileAttributes; 3279 3279 buf->NumberOfLinks = cpu_to_le32(1); 3280 - buf->DeletePending = 0; 3280 + buf->DeletePending = 0; /* successful open = not delete pending */ 3281 3281 } 3282 3282 3283 3283
+10 -6
fs/smb/client/smb2pdu.h
··· 201 201 char Context[]; /* ignored, Windows sets to 4 bytes of zero */ 202 202 } __packed; 203 203 204 + 205 + struct copychunk { 206 + __le64 SourceOffset; 207 + __le64 TargetOffset; 208 + __le32 Length; 209 + __le32 Reserved; 210 + } __packed; 211 + 204 212 /* this goes in the ioctl buffer when doing a copychunk request */ 205 213 struct copychunk_ioctl { 206 214 char SourceKey[COPY_CHUNK_RES_KEY_SIZE]; 207 - __le32 ChunkCount; /* we are only sending 1 */ 215 + __le32 ChunkCount; 208 216 __le32 Reserved; 209 - /* array will only be one chunk long for us */ 210 - __le64 SourceOffset; 211 - __le64 TargetOffset; 212 - __le32 Length; /* how many bytes to copy */ 213 - __u32 Reserved2; 217 + struct copychunk Chunks[]; 214 218 } __packed; 215 219 216 220 struct copychunk_ioctl_rsp {
-5
fs/smb/client/smb2transport.c
··· 240 240 return NULL; 241 241 } 242 242 tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid); 243 - if (!tcon) { 244 - spin_unlock(&cifs_tcp_ses_lock); 245 - cifs_put_smb_ses(ses); 246 - return NULL; 247 - } 248 243 spin_unlock(&cifs_tcp_ses_lock); 249 244 /* tcon already has a ref to ses, so we don't need ses anymore */ 250 245 cifs_put_smb_ses(ses);
+1 -1
fs/smb/client/trace.h
··· 266 266 TP_ARGS(xid, src_fid, target_fid, tid, sesid, src_offset, target_offset, len, rc)) 267 267 268 268 DEFINE_SMB3_COPY_RANGE_ERR_EVENT(clone_err); 269 - /* TODO: Add SMB3_COPY_RANGE_ERR_EVENT(copychunk_err) */ 269 + DEFINE_SMB3_COPY_RANGE_ERR_EVENT(copychunk_err); 270 270 271 271 DECLARE_EVENT_CLASS(smb3_copy_range_done_class, 272 272 TP_PROTO(unsigned int xid,