Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fdget(), trivial conversions

fdget() is the first thing done in scope, all matching fdput() are
immediately followed by leaving the scope.

Reviewed-by: Christian Brauner <brauner@kernel.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

+164 -339
+5 -16
arch/powerpc/kvm/book3s_64_vio.c
··· 115 115 struct iommu_table_group *table_group; 116 116 long i; 117 117 struct kvmppc_spapr_tce_iommu_table *stit; 118 - struct fd f; 118 + CLASS(fd, f)(tablefd); 119 119 120 - f = fdget(tablefd); 121 - if (!fd_file(f)) 120 + if (fd_empty(f)) 122 121 return -EBADF; 123 122 124 123 rcu_read_lock(); ··· 129 130 } 130 131 rcu_read_unlock(); 131 132 132 - if (!found) { 133 - fdput(f); 133 + if (!found) 134 134 return -EINVAL; 135 - } 136 135 137 136 table_group = iommu_group_get_iommudata(grp); 138 - if (WARN_ON(!table_group)) { 139 - fdput(f); 137 + if (WARN_ON(!table_group)) 140 138 return -EFAULT; 141 - } 142 139 143 140 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 144 141 struct iommu_table *tbltmp = table_group->tables[i]; ··· 155 160 break; 156 161 } 157 162 } 158 - if (!tbl) { 159 - fdput(f); 163 + if (!tbl) 160 164 return -EINVAL; 161 - } 162 165 163 166 rcu_read_lock(); 164 167 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { ··· 167 174 /* stit is being destroyed */ 168 175 iommu_tce_table_put(tbl); 169 176 rcu_read_unlock(); 170 - fdput(f); 171 177 return -ENOTTY; 172 178 } 173 179 /* ··· 174 182 * its KVM reference counter and can return. 175 183 */ 176 184 rcu_read_unlock(); 177 - fdput(f); 178 185 return 0; 179 186 } 180 187 rcu_read_unlock(); ··· 181 190 stit = kzalloc(sizeof(*stit), GFP_KERNEL); 182 191 if (!stit) { 183 192 iommu_tce_table_put(tbl); 184 - fdput(f); 185 193 return -ENOMEM; 186 194 } 187 195 ··· 189 199 190 200 list_add_rcu(&stit->next, &stt->iommu_tables); 191 201 192 - fdput(f); 193 202 return 0; 194 203 } 195 204
+7 -17
arch/powerpc/kvm/powerpc.c
··· 1933 1933 #endif 1934 1934 #ifdef CONFIG_KVM_MPIC 1935 1935 case KVM_CAP_IRQ_MPIC: { 1936 - struct fd f; 1936 + CLASS(fd, f)(cap->args[0]); 1937 1937 struct kvm_device *dev; 1938 1938 1939 1939 r = -EBADF; 1940 - f = fdget(cap->args[0]); 1941 - if (!fd_file(f)) 1940 + if (fd_empty(f)) 1942 1941 break; 1943 1942 1944 1943 r = -EPERM; ··· 1945 1946 if (dev) 1946 1947 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 1947 1948 1948 - fdput(f); 1949 1949 break; 1950 1950 } 1951 1951 #endif 1952 1952 #ifdef CONFIG_KVM_XICS 1953 1953 case KVM_CAP_IRQ_XICS: { 1954 - struct fd f; 1954 + CLASS(fd, f)(cap->args[0]); 1955 1955 struct kvm_device *dev; 1956 1956 1957 1957 r = -EBADF; 1958 - f = fdget(cap->args[0]); 1959 - if (!fd_file(f)) 1958 + if (fd_empty(f)) 1960 1959 break; 1961 1960 1962 1961 r = -EPERM; ··· 1965 1968 else 1966 1969 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 1967 1970 } 1968 - 1969 - fdput(f); 1970 1971 break; 1971 1972 } 1972 1973 #endif /* CONFIG_KVM_XICS */ 1973 1974 #ifdef CONFIG_KVM_XIVE 1974 1975 case KVM_CAP_PPC_IRQ_XIVE: { 1975 - struct fd f; 1976 + CLASS(fd, f)(cap->args[0]); 1976 1977 struct kvm_device *dev; 1977 1978 1978 1979 r = -EBADF; 1979 - f = fdget(cap->args[0]); 1980 - if (!fd_file(f)) 1980 + if (fd_empty(f)) 1981 1981 break; 1982 1982 1983 1983 r = -ENXIO; 1984 - if (!xive_enabled()) { 1985 - fdput(f); 1984 + if (!xive_enabled()) 1986 1985 break; 1987 - } 1988 1986 1989 1987 r = -EPERM; 1990 1988 dev = kvm_device_from_filp(fd_file(f)); 1991 1989 if (dev) 1992 1990 r = kvmppc_xive_native_connect_vcpu(dev, vcpu, 1993 1991 cap->args[1]); 1994 - 1995 - fdput(f); 1996 1992 break; 1997 1993 } 1998 1994 #endif /* CONFIG_KVM_XIVE */
+2 -4
arch/powerpc/platforms/cell/spu_syscalls.c
··· 64 64 return -ENOSYS; 65 65 66 66 if (flags & SPU_CREATE_AFFINITY_SPU) { 67 - struct fd neighbor = fdget(neighbor_fd); 67 + CLASS(fd, neighbor)(neighbor_fd); 68 68 ret = -EBADF; 69 - if (fd_file(neighbor)) { 69 + if (!fd_empty(neighbor)) 70 70 ret = calls->create_thread(name, flags, mode, fd_file(neighbor)); 71 - fdput(neighbor); 72 - } 73 71 } else 74 72 ret = calls->create_thread(name, flags, mode, NULL); 75 73
+3 -7
arch/x86/kernel/cpu/sgx/main.c
··· 901 901 int sgx_set_attribute(unsigned long *allowed_attributes, 902 902 unsigned int attribute_fd) 903 903 { 904 - struct fd f = fdget(attribute_fd); 904 + CLASS(fd, f)(attribute_fd); 905 905 906 - if (!fd_file(f)) 906 + if (fd_empty(f)) 907 907 return -EINVAL; 908 908 909 - if (fd_file(f)->f_op != &sgx_provision_fops) { 910 - fdput(f); 909 + if (fd_file(f)->f_op != &sgx_provision_fops) 911 910 return -EINVAL; 912 - } 913 911 914 912 *allowed_attributes |= SGX_ATTR_PROVISIONKEY; 915 - 916 - fdput(f); 917 913 return 0; 918 914 } 919 915 EXPORT_SYMBOL_GPL(sgx_set_attribute);
+13 -26
arch/x86/kvm/svm/sev.c
··· 530 530 531 531 static int __sev_issue_cmd(int fd, int id, void *data, int *error) 532 532 { 533 - struct fd f; 534 - int ret; 533 + CLASS(fd, f)(fd); 535 534 536 - f = fdget(fd); 537 - if (!fd_file(f)) 535 + if (fd_empty(f)) 538 536 return -EBADF; 539 537 540 - ret = sev_issue_cmd_external_user(fd_file(f), id, data, error); 541 - 542 - fdput(f); 543 - return ret; 538 + return sev_issue_cmd_external_user(fd_file(f), id, data, error); 544 539 } 545 540 546 541 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) ··· 2068 2073 { 2069 2074 struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info; 2070 2075 struct kvm_sev_info *src_sev, *cg_cleanup_sev; 2071 - struct fd f = fdget(source_fd); 2076 + CLASS(fd, f)(source_fd); 2072 2077 struct kvm *source_kvm; 2073 2078 bool charged = false; 2074 2079 int ret; 2075 2080 2076 - if (!fd_file(f)) 2081 + if (fd_empty(f)) 2077 2082 return -EBADF; 2078 2083 2079 - if (!file_is_kvm(fd_file(f))) { 2080 - ret = -EBADF; 2081 - goto out_fput; 2082 - } 2084 + if (!file_is_kvm(fd_file(f))) 2085 + return -EBADF; 2083 2086 2084 2087 source_kvm = fd_file(f)->private_data; 2085 2088 ret = sev_lock_two_vms(kvm, source_kvm); 2086 2089 if (ret) 2087 - goto out_fput; 2090 + return ret; 2088 2091 2089 2092 if (kvm->arch.vm_type != source_kvm->arch.vm_type || 2090 2093 sev_guest(kvm) || !sev_guest(source_kvm)) { ··· 2129 2136 cg_cleanup_sev->misc_cg = NULL; 2130 2137 out_unlock: 2131 2138 sev_unlock_two_vms(kvm, source_kvm); 2132 - out_fput: 2133 - fdput(f); 2134 2139 return ret; 2135 2140 } 2136 2141 ··· 2789 2798 2790 2799 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd) 2791 2800 { 2792 - struct fd f = fdget(source_fd); 2801 + CLASS(fd, f)(source_fd); 2793 2802 struct kvm *source_kvm; 2794 2803 struct kvm_sev_info *source_sev, *mirror_sev; 2795 2804 int ret; 2796 2805 2797 - if (!fd_file(f)) 2806 + if (fd_empty(f)) 2798 2807 return -EBADF; 2799 2808 2800 - if (!file_is_kvm(fd_file(f))) { 2801 - ret = -EBADF; 2802 - goto e_source_fput; 2803 - } 2809 + if (!file_is_kvm(fd_file(f))) 2810 + return -EBADF; 2804 2811 2805 2812 source_kvm = fd_file(f)->private_data; 2806 2813 ret = sev_lock_two_vms(kvm, source_kvm); 2807 2814 if (ret) 2808 - goto e_source_fput; 2815 + return ret; 2809 2816 2810 2817 /* 2811 2818 * Mirrors of mirrors should work, but let's not get silly. Also ··· 2846 2857 2847 2858 e_unlock: 2848 2859 sev_unlock_two_vms(kvm, source_kvm); 2849 - e_source_fput: 2850 - fdput(f); 2851 2860 return ret; 2852 2861 } 2853 2862
+7 -16
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
··· 35 35 int fd, 36 36 int32_t priority) 37 37 { 38 - struct fd f = fdget(fd); 38 + CLASS(fd, f)(fd); 39 39 struct amdgpu_fpriv *fpriv; 40 40 struct amdgpu_ctx_mgr *mgr; 41 41 struct amdgpu_ctx *ctx; 42 42 uint32_t id; 43 43 int r; 44 44 45 - if (!fd_file(f)) 45 + if (fd_empty(f)) 46 46 return -EINVAL; 47 47 48 48 r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); 49 - if (r) { 50 - fdput(f); 49 + if (r) 51 50 return r; 52 - } 53 51 54 52 mgr = &fpriv->ctx_mgr; 55 53 mutex_lock(&mgr->lock); ··· 55 57 amdgpu_ctx_priority_override(ctx, priority); 56 58 mutex_unlock(&mgr->lock); 57 59 58 - fdput(f); 59 60 return 0; 60 61 } 61 62 ··· 63 66 unsigned ctx_id, 64 67 int32_t priority) 65 68 { 66 - struct fd f = fdget(fd); 69 + CLASS(fd, f)(fd); 67 70 struct amdgpu_fpriv *fpriv; 68 71 struct amdgpu_ctx *ctx; 69 72 int r; 70 73 71 - if (!fd_file(f)) 74 + if (fd_empty(f)) 72 75 return -EINVAL; 73 76 74 77 r = amdgpu_file_to_fpriv(fd_file(f), &fpriv); 75 - if (r) { 76 - fdput(f); 78 + if (r) 77 79 return r; 78 - } 79 80 80 81 ctx = amdgpu_ctx_get(fpriv, ctx_id); 81 82 82 - if (!ctx) { 83 - fdput(f); 83 + if (!ctx) 84 84 return -EINVAL; 85 - } 86 85 87 86 amdgpu_ctx_priority_override(ctx, priority); 88 87 amdgpu_ctx_put(ctx); 89 - fdput(f); 90 - 91 88 return 0; 92 89 } 93 90
+3 -6
drivers/gpu/drm/drm_syncobj.c
··· 712 712 int fd, u32 *handle) 713 713 { 714 714 struct drm_syncobj *syncobj; 715 - struct fd f = fdget(fd); 715 + CLASS(fd, f)(fd); 716 716 int ret; 717 717 718 - if (!fd_file(f)) 718 + if (fd_empty(f)) 719 719 return -EINVAL; 720 720 721 - if (fd_file(f)->f_op != &drm_syncobj_file_fops) { 722 - fdput(f); 721 + if (fd_file(f)->f_op != &drm_syncobj_file_fops) 723 722 return -EINVAL; 724 - } 725 723 726 724 /* take a reference to put in the idr */ 727 725 syncobj = fd_file(f)->private_data; ··· 737 739 } else 738 740 drm_syncobj_put(syncobj); 739 741 740 - fdput(f); 741 742 return ret; 742 743 } 743 744
+4 -9
drivers/media/rc/lirc_dev.c
··· 815 815 816 816 struct rc_dev *rc_dev_get_from_fd(int fd, bool write) 817 817 { 818 - struct fd f = fdget(fd); 818 + CLASS(fd, f)(fd); 819 819 struct lirc_fh *fh; 820 820 struct rc_dev *dev; 821 821 822 - if (!fd_file(f)) 822 + if (fd_empty(f)) 823 823 return ERR_PTR(-EBADF); 824 824 825 - if (fd_file(f)->f_op != &lirc_fops) { 826 - fdput(f); 825 + if (fd_file(f)->f_op != &lirc_fops) 827 826 return ERR_PTR(-EINVAL); 828 - } 829 827 830 - if (write && !(fd_file(f)->f_mode & FMODE_WRITE)) { 831 - fdput(f); 828 + if (write && !(fd_file(f)->f_mode & FMODE_WRITE)) 832 829 return ERR_PTR(-EPERM); 833 - } 834 830 835 831 fh = fd_file(f)->private_data; 836 832 dev = fh->rc; 837 833 838 834 get_device(&dev->dev); 839 - fdput(f); 840 835 841 836 return dev; 842 837 }
+2 -3
fs/btrfs/ioctl.c
··· 1308 1308 ret = btrfs_mksubvol(&file->f_path, idmap, name, 1309 1309 namelen, NULL, readonly, inherit); 1310 1310 } else { 1311 - struct fd src = fdget(fd); 1311 + CLASS(fd, src)(fd); 1312 1312 struct inode *src_inode; 1313 - if (!fd_file(src)) { 1313 + if (fd_empty(src)) { 1314 1314 ret = -EINVAL; 1315 1315 goto out_drop_write; 1316 1316 } ··· 1341 1341 BTRFS_I(src_inode)->root, 1342 1342 readonly, inherit); 1343 1343 } 1344 - fdput(src); 1345 1344 } 1346 1345 out_drop_write: 1347 1346 mnt_drop_write_file(file);
+3 -6
fs/eventfd.c
··· 347 347 */ 348 348 struct eventfd_ctx *eventfd_ctx_fdget(int fd) 349 349 { 350 - struct eventfd_ctx *ctx; 351 - struct fd f = fdget(fd); 352 - if (!fd_file(f)) 350 + CLASS(fd, f)(fd); 351 + if (fd_empty(f)) 353 352 return ERR_PTR(-EBADF); 354 - ctx = eventfd_ctx_fileget(fd_file(f)); 355 - fdput(f); 356 - return ctx; 353 + return eventfd_ctx_fileget(fd_file(f)); 357 354 } 358 355 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); 359 356
+7 -16
fs/eventpoll.c
··· 2254 2254 { 2255 2255 int error; 2256 2256 int full_check = 0; 2257 - struct fd f, tf; 2258 2257 struct eventpoll *ep; 2259 2258 struct epitem *epi; 2260 2259 struct eventpoll *tep = NULL; 2261 2260 2262 - error = -EBADF; 2263 - f = fdget(epfd); 2264 - if (!fd_file(f)) 2265 - goto error_return; 2261 + CLASS(fd, f)(epfd); 2262 + if (fd_empty(f)) 2263 + return -EBADF; 2266 2264 2267 2265 /* Get the "struct file *" for the target file */ 2268 - tf = fdget(fd); 2269 - if (!fd_file(tf)) 2270 - goto error_fput; 2266 + CLASS(fd, tf)(fd); 2267 + if (fd_empty(tf)) 2268 + return -EBADF; 2271 2269 2272 2270 /* The target file descriptor must support poll */ 2273 - error = -EPERM; 2274 2271 if (!file_can_poll(fd_file(tf))) 2275 - goto error_tgt_fput; 2272 + return -EPERM; 2276 2273 2277 2274 /* Check if EPOLLWAKEUP is allowed */ 2278 2275 if (ep_op_has_event(op)) ··· 2388 2391 loop_check_gen++; 2389 2392 mutex_unlock(&epnested_mutex); 2390 2393 } 2391 - 2392 - fdput(tf); 2393 - error_fput: 2394 - fdput(f); 2395 - error_return: 2396 - 2397 2394 return error; 2398 2395 } 2399 2396
+2 -3
fs/fhandle.c
··· 139 139 path_get(root); 140 140 spin_unlock(&fs->lock); 141 141 } else { 142 - struct fd f = fdget(fd); 143 - if (!fd_file(f)) 142 + CLASS(fd, f)(fd); 143 + if (fd_empty(f)) 144 144 return -EBADF; 145 145 *root = fd_file(f)->f_path; 146 146 path_get(root); 147 - fdput(f); 148 147 } 149 148 150 149 return 0;
+8 -15
fs/ioctl.c
··· 231 231 static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd, 232 232 u64 off, u64 olen, u64 destoff) 233 233 { 234 - struct fd src_file = fdget(srcfd); 234 + CLASS(fd, src_file)(srcfd); 235 235 loff_t cloned; 236 236 int ret; 237 237 238 - if (!fd_file(src_file)) 238 + if (fd_empty(src_file)) 239 239 return -EBADF; 240 240 cloned = vfs_clone_file_range(fd_file(src_file), off, dst_file, destoff, 241 241 olen, 0); ··· 245 245 ret = -EINVAL; 246 246 else 247 247 ret = 0; 248 - fdput(src_file); 249 248 return ret; 250 249 } 251 250 ··· 891 892 892 893 SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) 893 894 { 894 - struct fd f = fdget(fd); 895 + CLASS(fd, f)(fd); 895 896 int error; 896 897 897 - if (!fd_file(f)) 898 + if (fd_empty(f)) 898 899 return -EBADF; 899 900 900 901 error = security_file_ioctl(fd_file(f), cmd, arg); 901 902 if (error) 902 - goto out; 903 + return error; 903 904 904 905 error = do_vfs_ioctl(fd_file(f), fd, cmd, arg); 905 906 if (error == -ENOIOCTLCMD) 906 907 error = vfs_ioctl(fd_file(f), cmd, arg); 907 908 908 - out: 909 - fdput(f); 910 909 return error; 911 910 } 912 911 ··· 947 950 COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, 948 951 compat_ulong_t, arg) 949 952 { 950 - struct fd f = fdget(fd); 953 + CLASS(fd, f)(fd); 951 954 int error; 952 955 953 - if (!fd_file(f)) 956 + if (fd_empty(f)) 954 957 return -EBADF; 955 958 956 959 error = security_file_ioctl_compat(fd_file(f), cmd, arg); 957 960 if (error) 958 - goto out; 961 + return error; 959 962 960 963 switch (cmd) { 961 964 /* FICLONE takes an int argument, so don't use compat_ptr() */ ··· 1006 1009 error = -ENOTTY; 1007 1010 break; 1008 1011 } 1009 - 1010 - out: 1011 - fdput(f); 1012 - 1013 1012 return error; 1014 1013 } 1015 1014 #endif
+4 -8
fs/kernel_read_file.c
··· 175 175 size_t buf_size, size_t *file_size, 176 176 enum kernel_read_file_id id) 177 177 { 178 - struct fd f = fdget(fd); 179 - ssize_t ret = -EBADF; 178 + CLASS(fd, f)(fd); 180 179 181 - if (!fd_file(f) || !(fd_file(f)->f_mode & FMODE_READ)) 182 - goto out; 180 + if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ)) 181 + return -EBADF; 183 182 184 - ret = kernel_read_file(fd_file(f), offset, buf, buf_size, file_size, id); 185 - out: 186 - fdput(f); 187 - return ret; 183 + return kernel_read_file(fd_file(f), offset, buf, buf_size, file_size, id); 188 184 } 189 185 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
+5 -10
fs/notify/fanotify/fanotify_user.c
··· 1003 1003 dfd, filename, flags); 1004 1004 1005 1005 if (filename == NULL) { 1006 - struct fd f = fdget(dfd); 1006 + CLASS(fd, f)(dfd); 1007 1007 1008 - ret = -EBADF; 1009 - if (!fd_file(f)) 1010 - goto out; 1008 + if (fd_empty(f)) 1009 + return -EBADF; 1011 1010 1012 - ret = -ENOTDIR; 1013 1011 if ((flags & FAN_MARK_ONLYDIR) && 1014 - !(S_ISDIR(file_inode(fd_file(f))->i_mode))) { 1015 - fdput(f); 1016 - goto out; 1017 - } 1012 + !(S_ISDIR(file_inode(fd_file(f))->i_mode))) 1013 + return -ENOTDIR; 1018 1014 1019 1015 *path = fd_file(f)->f_path; 1020 1016 path_get(path); 1021 - fdput(f); 1022 1017 } else { 1023 1018 unsigned int lookup_flags = 0; 1024 1019
+5 -12
fs/notify/inotify/inotify_user.c
··· 794 794 { 795 795 struct fsnotify_group *group; 796 796 struct inotify_inode_mark *i_mark; 797 - struct fd f; 798 - int ret = -EINVAL; 797 + CLASS(fd, f)(fd); 799 798 800 - f = fdget(fd); 801 - if (unlikely(!fd_file(f))) 799 + if (fd_empty(f)) 802 800 return -EBADF; 803 801 804 802 /* verify that this is indeed an inotify instance */ 805 803 if (unlikely(fd_file(f)->f_op != &inotify_fops)) 806 - goto out; 804 + return -EINVAL; 807 805 808 806 group = fd_file(f)->private_data; 809 807 810 808 i_mark = inotify_idr_find(group, wd); 811 809 if (unlikely(!i_mark)) 812 - goto out; 813 - 814 - ret = 0; 810 + return -EINVAL; 815 811 816 812 fsnotify_destroy_mark(&i_mark->fsn_mark, group); 817 813 818 814 /* match ref taken by inotify_idr_find */ 819 815 fsnotify_put_mark(&i_mark->fsn_mark); 820 - 821 - out: 822 - fdput(f); 823 - return ret; 816 + return 0; 824 817 } 825 818 826 819 /*
+15 -21
fs/open.c
··· 349 349 350 350 int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len) 351 351 { 352 - struct fd f = fdget(fd); 353 - int error = -EBADF; 352 + CLASS(fd, f)(fd); 354 353 355 - if (fd_file(f)) { 356 - error = vfs_fallocate(fd_file(f), mode, offset, len); 357 - fdput(f); 358 - } 359 - return error; 354 + if (fd_empty(f)) 355 + return -EBADF; 356 + 357 + return vfs_fallocate(fd_file(f), mode, offset, len); 360 358 } 361 359 362 360 SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len) ··· 664 666 665 667 SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) 666 668 { 667 - struct fd f = fdget(fd); 668 - int err = -EBADF; 669 + CLASS(fd, f)(fd); 669 670 670 - if (fd_file(f)) { 671 - err = vfs_fchmod(fd_file(f), mode); 672 - fdput(f); 673 - } 674 - return err; 671 + if (fd_empty(f)) 672 + return -EBADF; 673 + 674 + return vfs_fchmod(fd_file(f), mode); 675 675 } 676 676 677 677 static int do_fchmodat(int dfd, const char __user *filename, umode_t mode, ··· 856 860 857 861 int ksys_fchown(unsigned int fd, uid_t user, gid_t group) 858 862 { 859 - struct fd f = fdget(fd); 860 - int error = -EBADF; 863 + CLASS(fd, f)(fd); 861 864 862 - if (fd_file(f)) { 863 - error = vfs_fchown(fd_file(f), user, group); 864 - fdput(f); 865 - } 866 - return error; 865 + if (fd_empty(f)) 866 + return -EBADF; 867 + 868 + return vfs_fchown(fd_file(f), user, group); 867 869 } 868 870 869 871 SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
+9 -19
fs/read_write.c
··· 1663 1663 { 1664 1664 loff_t pos_in; 1665 1665 loff_t pos_out; 1666 - struct fd f_in; 1667 - struct fd f_out; 1668 1666 ssize_t ret = -EBADF; 1669 1667 1670 - f_in = fdget(fd_in); 1671 - if (!fd_file(f_in)) 1672 - goto out2; 1668 + CLASS(fd, f_in)(fd_in); 1669 + if (fd_empty(f_in)) 1670 + return -EBADF; 1673 1671 1674 - f_out = fdget(fd_out); 1675 - if (!fd_file(f_out)) 1676 - goto out1; 1672 + CLASS(fd, f_out)(fd_out); 1673 + if (fd_empty(f_out)) 1674 + return -EBADF; 1677 1675 1678 - ret = -EFAULT; 1679 1676 if (off_in) { 1680 1677 if (copy_from_user(&pos_in, off_in, sizeof(loff_t))) 1681 - goto out; 1678 + return -EFAULT; 1682 1679 } else { 1683 1680 pos_in = fd_file(f_in)->f_pos; 1684 1681 } 1685 1682 1686 1683 if (off_out) { 1687 1684 if (copy_from_user(&pos_out, off_out, sizeof(loff_t))) 1688 - goto out; 1685 + return -EFAULT; 1689 1686 } else { 1690 1687 pos_out = fd_file(f_out)->f_pos; 1691 1688 } 1692 1689 1693 - ret = -EINVAL; 1694 1690 if (flags != 0) 1695 - goto out; 1691 + return -EINVAL; 1696 1692 1697 1693 ret = vfs_copy_file_range(fd_file(f_in), pos_in, fd_file(f_out), pos_out, len, 1698 1694 flags); ··· 1710 1714 fd_file(f_out)->f_pos = pos_out; 1711 1715 } 1712 1716 } 1713 - 1714 - out: 1715 - fdput(f_out); 1716 - out1: 1717 - fdput(f_in); 1718 - out2: 1719 1717 return ret; 1720 1718 } 1721 1719
+3 -6
fs/signalfd.c
··· 288 288 289 289 fd_install(ufd, file); 290 290 } else { 291 - struct fd f = fdget(ufd); 292 - if (!fd_file(f)) 291 + CLASS(fd, f)(ufd); 292 + if (fd_empty(f)) 293 293 return -EBADF; 294 294 ctx = fd_file(f)->private_data; 295 - if (fd_file(f)->f_op != &signalfd_fops) { 296 - fdput(f); 295 + if (fd_file(f)->f_op != &signalfd_fops) 297 296 return -EINVAL; 298 - } 299 297 spin_lock_irq(&current->sighand->siglock); 300 298 ctx->sigmask = *mask; 301 299 spin_unlock_irq(&current->sighand->siglock); 302 300 303 301 wake_up(&current->sighand->signalfd_wqh); 304 - fdput(f); 305 302 } 306 303 307 304 return ufd;
+11 -18
fs/sync.c
··· 148 148 */ 149 149 SYSCALL_DEFINE1(syncfs, int, fd) 150 150 { 151 - struct fd f = fdget(fd); 151 + CLASS(fd, f)(fd); 152 152 struct super_block *sb; 153 153 int ret, ret2; 154 154 155 - if (!fd_file(f)) 155 + if (fd_empty(f)) 156 156 return -EBADF; 157 157 sb = fd_file(f)->f_path.dentry->d_sb; 158 158 ··· 162 162 163 163 ret2 = errseq_check_and_advance(&sb->s_wb_err, &fd_file(f)->f_sb_err); 164 164 165 - fdput(f); 166 165 return ret ? ret : ret2; 167 166 } 168 167 ··· 204 205 205 206 static int do_fsync(unsigned int fd, int datasync) 206 207 { 207 - struct fd f = fdget(fd); 208 - int ret = -EBADF; 208 + CLASS(fd, f)(fd); 209 209 210 - if (fd_file(f)) { 211 - ret = vfs_fsync(fd_file(f), datasync); 212 - fdput(f); 213 - } 214 - return ret; 210 + if (fd_empty(f)) 211 + return -EBADF; 212 + 213 + return vfs_fsync(fd_file(f), datasync); 215 214 } 216 215 217 216 SYSCALL_DEFINE1(fsync, unsigned int, fd) ··· 352 355 int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes, 353 356 unsigned int flags) 354 357 { 355 - int ret; 356 - struct fd f; 358 + CLASS(fd, f)(fd); 357 359 358 - ret = -EBADF; 359 - f = fdget(fd); 360 - if (fd_file(f)) 361 - ret = sync_file_range(fd_file(f), offset, nbytes, flags); 360 + if (fd_empty(f)) 361 + return -EBADF; 362 362 363 - fdput(f); 364 - return ret; 363 + return sync_file_range(fd_file(f), offset, nbytes, flags); 365 364 } 366 365 367 366 SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
+8 -21
io_uring/sqpoll.c
··· 106 106 { 107 107 struct io_ring_ctx *ctx_attach; 108 108 struct io_sq_data *sqd; 109 - struct fd f; 109 + CLASS(fd, f)(p->wq_fd); 110 110 111 - f = fdget(p->wq_fd); 112 - if (!fd_file(f)) 111 + if (fd_empty(f)) 113 112 return ERR_PTR(-ENXIO); 114 - if (!io_is_uring_fops(fd_file(f))) { 115 - fdput(f); 113 + if (!io_is_uring_fops(fd_file(f))) 116 114 return ERR_PTR(-EINVAL); 117 - } 118 115 119 116 ctx_attach = fd_file(f)->private_data; 120 117 sqd = ctx_attach->sq_data; 121 - if (!sqd) { 122 - fdput(f); 118 + if (!sqd) 123 119 return ERR_PTR(-EINVAL); 124 - } 125 - if (sqd->task_tgid != current->tgid) { 126 - fdput(f); 120 + if (sqd->task_tgid != current->tgid) 127 121 return ERR_PTR(-EPERM); 128 - } 129 122 130 123 refcount_inc(&sqd->refs); 131 - fdput(f); 132 124 return sqd; 133 125 } 134 126 ··· 409 417 /* Retain compatibility with failing for an invalid attach attempt */ 410 418 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == 411 419 IORING_SETUP_ATTACH_WQ) { 412 - struct fd f; 413 - 414 - f = fdget(p->wq_fd); 415 - if (!fd_file(f)) 420 + CLASS(fd, f)(p->wq_fd); 421 + if (fd_empty(f)) 416 422 return -ENXIO; 417 - if (!io_is_uring_fops(fd_file(f))) { 418 - fdput(f); 423 + if (!io_is_uring_fops(fd_file(f))) 419 424 return -EINVAL; 420 - } 421 - fdput(f); 422 425 } 423 426 if (ctx->flags & IORING_SETUP_SQPOLL) { 424 427 struct task_struct *tsk;
+5 -9
kernel/events/core.c
··· 966 966 { 967 967 struct perf_cgroup *cgrp; 968 968 struct cgroup_subsys_state *css; 969 - struct fd f = fdget(fd); 969 + CLASS(fd, f)(fd); 970 970 int ret = 0; 971 971 972 - if (!fd_file(f)) 972 + if (fd_empty(f)) 973 973 return -EBADF; 974 974 975 975 css = css_tryget_online_from_dir(fd_file(f)->f_path.dentry, 976 976 &perf_event_cgrp_subsys); 977 - if (IS_ERR(css)) { 978 - ret = PTR_ERR(css); 979 - goto out; 980 - } 977 + if (IS_ERR(css)) 978 + return PTR_ERR(css); 981 979 982 980 ret = perf_cgroup_ensure_storage(event, css); 983 981 if (ret) 984 - goto out; 982 + return ret; 985 983 986 984 cgrp = container_of(css, struct perf_cgroup, css); 987 985 event->cgrp = cgrp; ··· 993 995 perf_detach_cgroup(event); 994 996 ret = -EINVAL; 995 997 } 996 - out: 997 - fdput(f); 998 998 return ret; 999 999 } 1000 1000
+2 -3
kernel/nsproxy.c
··· 545 545 546 546 SYSCALL_DEFINE2(setns, int, fd, int, flags) 547 547 { 548 - struct fd f = fdget(fd); 548 + CLASS(fd, f)(fd); 549 549 struct ns_common *ns = NULL; 550 550 struct nsset nsset = {}; 551 551 int err = 0; 552 552 553 - if (!fd_file(f)) 553 + if (fd_empty(f)) 554 554 return -EBADF; 555 555 556 556 if (proc_ns_file(fd_file(f))) { ··· 580 580 } 581 581 put_nsset(&nsset); 582 582 out: 583 - fdput(f); 584 583 return err; 585 584 } 586 585
+2 -5
kernel/pid.c
··· 536 536 537 537 struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) 538 538 { 539 - struct fd f; 539 + CLASS(fd, f)(fd); 540 540 struct pid *pid; 541 541 542 - f = fdget(fd); 543 - if (!fd_file(f)) 542 + if (fd_empty(f)) 544 543 return ERR_PTR(-EBADF); 545 544 546 545 pid = pidfd_pid(fd_file(f)); ··· 547 548 get_pid(pid); 548 549 *flags = fd_file(f)->f_flags; 549 550 } 550 - 551 - fdput(f); 552 551 return pid; 553 552 } 554 553
+5 -10
kernel/sys.c
··· 1911 1911 1912 1912 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1913 1913 { 1914 - struct fd exe; 1914 + CLASS(fd, exe)(fd); 1915 1915 struct inode *inode; 1916 1916 int err; 1917 1917 1918 - exe = fdget(fd); 1919 - if (!fd_file(exe)) 1918 + if (fd_empty(exe)) 1920 1919 return -EBADF; 1921 1920 1922 1921 inode = file_inode(fd_file(exe)); ··· 1925 1926 * sure that this one is executable as well, to avoid breaking an 1926 1927 * overall picture. 1927 1928 */ 1928 - err = -EACCES; 1929 1929 if (!S_ISREG(inode->i_mode) || path_noexec(&fd_file(exe)->f_path)) 1930 - goto exit; 1930 + return -EACCES; 1931 1931 1932 1932 err = file_permission(fd_file(exe), MAY_EXEC); 1933 1933 if (err) 1934 - goto exit; 1934 + return err; 1935 1935 1936 - err = replace_mm_exe_file(mm, fd_file(exe)); 1937 - exit: 1938 - fdput(exe); 1939 - return err; 1936 + return replace_mm_exe_file(mm, fd_file(exe)); 1940 1937 } 1941 1938 1942 1939 /*
+2 -4
kernel/watch_queue.c
··· 663 663 { 664 664 struct pipe_inode_info *pipe; 665 665 struct watch_queue *wqueue = ERR_PTR(-EINVAL); 666 - struct fd f; 666 + CLASS(fd, f)(fd); 667 667 668 - f = fdget(fd); 669 - if (fd_file(f)) { 668 + if (!fd_empty(f)) { 670 669 pipe = get_pipe_info(fd_file(f), false); 671 670 if (pipe && pipe->watch_queue) { 672 671 wqueue = pipe->watch_queue; 673 672 kref_get(&wqueue->usage); 674 673 } 675 - fdput(f); 676 674 } 677 675 678 676 return wqueue;
+3 -7
mm/fadvise.c
··· 190 190 191 191 int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) 192 192 { 193 - struct fd f = fdget(fd); 194 - int ret; 193 + CLASS(fd, f)(fd); 195 194 196 - if (!fd_file(f)) 195 + if (fd_empty(f)) 197 196 return -EBADF; 198 197 199 - ret = vfs_fadvise(fd_file(f), offset, len, advice); 200 - 201 - fdput(f); 202 - return ret; 198 + return vfs_fadvise(fd_file(f), offset, len, advice); 203 199 } 204 200 205 201 SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
+5 -12
mm/readahead.c
··· 673 673 674 674 ssize_t ksys_readahead(int fd, loff_t offset, size_t count) 675 675 { 676 - ssize_t ret; 677 - struct fd f; 676 + CLASS(fd, f)(fd); 678 677 679 - ret = -EBADF; 680 - f = fdget(fd); 681 - if (!fd_file(f) || !(fd_file(f)->f_mode & FMODE_READ)) 682 - goto out; 678 + if (fd_empty(f) || !(fd_file(f)->f_mode & FMODE_READ)) 679 + return -EBADF; 683 680 684 681 /* 685 682 * The readahead() syscall is intended to run only on files 686 683 * that can execute readahead. If readahead is not possible 687 684 * on this file, then we must return -EINVAL. 688 685 */ 689 - ret = -EINVAL; 690 686 if (!fd_file(f)->f_mapping || !fd_file(f)->f_mapping->a_ops || 691 687 (!S_ISREG(file_inode(fd_file(f))->i_mode) && 692 688 !S_ISBLK(file_inode(fd_file(f))->i_mode))) 693 - goto out; 689 + return -EINVAL; 694 690 695 - ret = vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED); 696 - out: 697 - fdput(f); 698 - return ret; 691 + return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED); 699 692 } 700 693 701 694 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
+4 -6
net/core/net_namespace.c
··· 694 694 695 695 struct net *get_net_ns_by_fd(int fd) 696 696 { 697 - struct fd f = fdget(fd); 698 - struct net *net = ERR_PTR(-EINVAL); 697 + CLASS(fd, f)(fd); 699 698 700 - if (!fd_file(f)) 699 + if (fd_empty(f)) 701 700 return ERR_PTR(-EBADF); 702 701 703 702 if (proc_ns_file(fd_file(f))) { 704 703 struct ns_common *ns = get_proc_ns(file_inode(fd_file(f))); 705 704 if (ns->ops == &netns_operations) 706 - net = get_net(container_of(ns, struct net, ns)); 705 + return get_net(container_of(ns, struct net, ns)); 707 706 } 708 - fdput(f); 709 707 710 - return net; 708 + return ERR_PTR(-EINVAL); 711 709 } 712 710 EXPORT_SYMBOL_GPL(get_net_ns_by_fd); 713 711 #endif
+8 -18
security/landlock/syscalls.c
··· 241 241 static struct landlock_ruleset *get_ruleset_from_fd(const int fd, 242 242 const fmode_t mode) 243 243 { 244 - struct fd ruleset_f; 244 + CLASS(fd, ruleset_f)(fd); 245 245 struct landlock_ruleset *ruleset; 246 246 247 - ruleset_f = fdget(fd); 248 - if (!fd_file(ruleset_f)) 247 + if (fd_empty(ruleset_f)) 249 248 return ERR_PTR(-EBADF); 250 249 251 250 /* Checks FD type and access right. */ 252 - if (fd_file(ruleset_f)->f_op != &ruleset_fops) { 253 - ruleset = ERR_PTR(-EBADFD); 254 - goto out_fdput; 255 - } 256 - if (!(fd_file(ruleset_f)->f_mode & mode)) { 257 - ruleset = ERR_PTR(-EPERM); 258 - goto out_fdput; 259 - } 251 + if (fd_file(ruleset_f)->f_op != &ruleset_fops) 252 + return ERR_PTR(-EBADFD); 253 + if (!(fd_file(ruleset_f)->f_mode & mode)) 254 + return ERR_PTR(-EPERM); 260 255 ruleset = fd_file(ruleset_f)->private_data; 261 - if (WARN_ON_ONCE(ruleset->num_layers != 1)) { 262 - ruleset = ERR_PTR(-EINVAL); 263 - goto out_fdput; 264 - } 256 + if (WARN_ON_ONCE(ruleset->num_layers != 1)) 257 + return ERR_PTR(-EINVAL); 265 258 landlock_get_ruleset(ruleset); 266 - 267 - out_fdput: 268 - fdput(ruleset_f); 269 259 return ruleset; 270 260 } 271 261
+2 -6
virt/kvm/vfio.c
··· 190 190 { 191 191 struct kvm_vfio *kv = dev->private; 192 192 struct kvm_vfio_file *kvf; 193 - struct fd f; 193 + CLASS(fd, f)(fd); 194 194 int ret; 195 195 196 - f = fdget(fd); 197 - if (!fd_file(f)) 196 + if (fd_empty(f)) 198 197 return -EBADF; 199 198 200 199 ret = -ENOENT; ··· 219 220 kvm_vfio_update_coherency(dev); 220 221 221 222 mutex_unlock(&kv->lock); 222 - 223 - fdput(f); 224 - 225 223 return ret; 226 224 } 227 225