at v6.6 24 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8#include <linux/blkdev.h> 9#include <linux/export.h> 10#include <linux/mm.h> 11#include <linux/errno.h> 12#include <linux/file.h> 13#include <linux/highuid.h> 14#include <linux/fs.h> 15#include <linux/namei.h> 16#include <linux/security.h> 17#include <linux/cred.h> 18#include <linux/syscalls.h> 19#include <linux/pagemap.h> 20#include <linux/compat.h> 21#include <linux/iversion.h> 22 23#include <linux/uaccess.h> 24#include <asm/unistd.h> 25 26#include "internal.h" 27#include "mount.h" 28 29/** 30 * generic_fillattr - Fill in the basic attributes from the inode struct 31 * @idmap: idmap of the mount the inode was found from 32 * @request_mask: statx request_mask 33 * @inode: Inode to use as the source 34 * @stat: Where to fill in the attributes 35 * 36 * Fill in the basic attributes in the kstat structure from data that's to be 37 * found on the VFS inode structure. This is the default if no getattr inode 38 * operation is supplied. 39 * 40 * If the inode has been found through an idmapped mount the idmap of 41 * the vfsmount must be passed through @idmap. This function will then 42 * take care to map the inode according to @idmap before filling in the 43 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 44 * performed on the raw inode simply passs @nop_mnt_idmap. 45 */ 46void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask, 47 struct inode *inode, struct kstat *stat) 48{ 49 vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); 50 vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); 51 52 stat->dev = inode->i_sb->s_dev; 53 stat->ino = inode->i_ino; 54 stat->mode = inode->i_mode; 55 stat->nlink = inode->i_nlink; 56 stat->uid = vfsuid_into_kuid(vfsuid); 57 stat->gid = vfsgid_into_kgid(vfsgid); 58 stat->rdev = inode->i_rdev; 59 stat->size = i_size_read(inode); 60 stat->atime = inode->i_atime; 61 stat->mtime = inode->i_mtime; 62 stat->ctime = inode_get_ctime(inode); 63 stat->blksize = i_blocksize(inode); 64 stat->blocks = inode->i_blocks; 65 66 if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) { 67 stat->result_mask |= STATX_CHANGE_COOKIE; 68 stat->change_cookie = inode_query_iversion(inode); 69 } 70 71} 72EXPORT_SYMBOL(generic_fillattr); 73 74/** 75 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 76 * @inode: Inode to use as the source 77 * @stat: Where to fill in the attribute flags 78 * 79 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 80 * inode that are published on i_flags and enforced by the VFS. 81 */ 82void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 83{ 84 if (inode->i_flags & S_IMMUTABLE) 85 stat->attributes |= STATX_ATTR_IMMUTABLE; 86 if (inode->i_flags & S_APPEND) 87 stat->attributes |= STATX_ATTR_APPEND; 88 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 89} 90EXPORT_SYMBOL(generic_fill_statx_attr); 91 92/** 93 * vfs_getattr_nosec - getattr without security checks 94 * @path: file to get attributes from 95 * @stat: structure to return attributes in 96 * @request_mask: STATX_xxx flags indicating what the caller wants 97 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 98 * 99 * Get attributes without calling security_inode_getattr. 100 * 101 * Currently the only caller other than vfs_getattr is internal to the 102 * filehandle lookup code, which uses only the inode number and returns no 103 * attributes to any user. Any other code probably wants vfs_getattr. 104 */ 105int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 106 u32 request_mask, unsigned int query_flags) 107{ 108 struct mnt_idmap *idmap; 109 struct inode *inode = d_backing_inode(path->dentry); 110 111 memset(stat, 0, sizeof(*stat)); 112 stat->result_mask |= STATX_BASIC_STATS; 113 query_flags &= AT_STATX_SYNC_TYPE; 114 115 /* allow the fs to override these if it really wants to */ 116 /* SB_NOATIME means filesystem supplies dummy atime value */ 117 if (inode->i_sb->s_flags & SB_NOATIME) 118 stat->result_mask &= ~STATX_ATIME; 119 120 /* 121 * Note: If you add another clause to set an attribute flag, please 122 * update attributes_mask below. 123 */ 124 if (IS_AUTOMOUNT(inode)) 125 stat->attributes |= STATX_ATTR_AUTOMOUNT; 126 127 if (IS_DAX(inode)) 128 stat->attributes |= STATX_ATTR_DAX; 129 130 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 131 STATX_ATTR_DAX); 132 133 idmap = mnt_idmap(path->mnt); 134 if (inode->i_op->getattr) 135 return inode->i_op->getattr(idmap, path, stat, 136 request_mask, query_flags); 137 138 generic_fillattr(idmap, request_mask, inode, stat); 139 return 0; 140} 141EXPORT_SYMBOL(vfs_getattr_nosec); 142 143/* 144 * vfs_getattr - Get the enhanced basic attributes of a file 145 * @path: The file of interest 146 * @stat: Where to return the statistics 147 * @request_mask: STATX_xxx flags indicating what the caller wants 148 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 149 * 150 * Ask the filesystem for a file's attributes. The caller must indicate in 151 * request_mask and query_flags to indicate what they want. 152 * 153 * If the file is remote, the filesystem can be forced to update the attributes 154 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 155 * suppress the update by passing AT_STATX_DONT_SYNC. 156 * 157 * Bits must have been set in request_mask to indicate which attributes the 158 * caller wants retrieving. Any such attribute not requested may be returned 159 * anyway, but the value may be approximate, and, if remote, may not have been 160 * synchronised with the server. 161 * 162 * 0 will be returned on success, and a -ve error code if unsuccessful. 163 */ 164int vfs_getattr(const struct path *path, struct kstat *stat, 165 u32 request_mask, unsigned int query_flags) 166{ 167 int retval; 168 169 retval = security_inode_getattr(path); 170 if (retval) 171 return retval; 172 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 173} 174EXPORT_SYMBOL(vfs_getattr); 175 176/** 177 * vfs_fstat - Get the basic attributes by file descriptor 178 * @fd: The file descriptor referring to the file of interest 179 * @stat: The result structure to fill in. 180 * 181 * This function is a wrapper around vfs_getattr(). The main difference is 182 * that it uses a file descriptor to determine the file location. 183 * 184 * 0 will be returned on success, and a -ve error code if unsuccessful. 185 */ 186int vfs_fstat(int fd, struct kstat *stat) 187{ 188 struct fd f; 189 int error; 190 191 f = fdget_raw(fd); 192 if (!f.file) 193 return -EBADF; 194 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0); 195 fdput(f); 196 return error; 197} 198 199int getname_statx_lookup_flags(int flags) 200{ 201 int lookup_flags = 0; 202 203 if (!(flags & AT_SYMLINK_NOFOLLOW)) 204 lookup_flags |= LOOKUP_FOLLOW; 205 if (!(flags & AT_NO_AUTOMOUNT)) 206 lookup_flags |= LOOKUP_AUTOMOUNT; 207 if (flags & AT_EMPTY_PATH) 208 lookup_flags |= LOOKUP_EMPTY; 209 210 return lookup_flags; 211} 212 213/** 214 * vfs_statx - Get basic and extra attributes by filename 215 * @dfd: A file descriptor representing the base dir for a relative filename 216 * @filename: The name of the file of interest 217 * @flags: Flags to control the query 218 * @stat: The result structure to fill in. 219 * @request_mask: STATX_xxx flags indicating what the caller wants 220 * 221 * This function is a wrapper around vfs_getattr(). The main difference is 222 * that it uses a filename and base directory to determine the file location. 223 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 224 * at the given name from being referenced. 225 * 226 * 0 will be returned on success, and a -ve error code if unsuccessful. 227 */ 228static int vfs_statx(int dfd, struct filename *filename, int flags, 229 struct kstat *stat, u32 request_mask) 230{ 231 struct path path; 232 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 233 int error; 234 235 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 236 AT_STATX_SYNC_TYPE)) 237 return -EINVAL; 238 239retry: 240 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 241 if (error) 242 goto out; 243 244 error = vfs_getattr(&path, stat, request_mask, flags); 245 246 stat->mnt_id = real_mount(path.mnt)->mnt_id; 247 stat->result_mask |= STATX_MNT_ID; 248 249 if (path.mnt->mnt_root == path.dentry) 250 stat->attributes |= STATX_ATTR_MOUNT_ROOT; 251 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; 252 253 /* Handle STATX_DIOALIGN for block devices. */ 254 if (request_mask & STATX_DIOALIGN) { 255 struct inode *inode = d_backing_inode(path.dentry); 256 257 if (S_ISBLK(inode->i_mode)) 258 bdev_statx_dioalign(inode, stat); 259 } 260 261 path_put(&path); 262 if (retry_estale(error, lookup_flags)) { 263 lookup_flags |= LOOKUP_REVAL; 264 goto retry; 265 } 266out: 267 return error; 268} 269 270int vfs_fstatat(int dfd, const char __user *filename, 271 struct kstat *stat, int flags) 272{ 273 int ret; 274 int statx_flags = flags | AT_NO_AUTOMOUNT; 275 struct filename *name; 276 277 /* 278 * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH) 279 * 280 * If AT_EMPTY_PATH is set, we expect the common case to be that 281 * empty path, and avoid doing all the extra pathname work. 282 */ 283 if (dfd >= 0 && flags == AT_EMPTY_PATH) { 284 char c; 285 286 ret = get_user(c, filename); 287 if (unlikely(ret)) 288 return ret; 289 290 if (likely(!c)) 291 return vfs_fstat(dfd, stat); 292 } 293 294 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL); 295 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 296 putname(name); 297 298 return ret; 299} 300 301#ifdef __ARCH_WANT_OLD_STAT 302 303/* 304 * For backward compatibility? Maybe this should be moved 305 * into arch/i386 instead? 306 */ 307static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 308{ 309 static int warncount = 5; 310 struct __old_kernel_stat tmp; 311 312 if (warncount > 0) { 313 warncount--; 314 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 315 current->comm); 316 } else if (warncount < 0) { 317 /* it's laughable, but... */ 318 warncount = 0; 319 } 320 321 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 322 tmp.st_dev = old_encode_dev(stat->dev); 323 tmp.st_ino = stat->ino; 324 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 325 return -EOVERFLOW; 326 tmp.st_mode = stat->mode; 327 tmp.st_nlink = stat->nlink; 328 if (tmp.st_nlink != stat->nlink) 329 return -EOVERFLOW; 330 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 331 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 332 tmp.st_rdev = old_encode_dev(stat->rdev); 333#if BITS_PER_LONG == 32 334 if (stat->size > MAX_NON_LFS) 335 return -EOVERFLOW; 336#endif 337 tmp.st_size = stat->size; 338 tmp.st_atime = stat->atime.tv_sec; 339 tmp.st_mtime = stat->mtime.tv_sec; 340 tmp.st_ctime = stat->ctime.tv_sec; 341 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 342} 343 344SYSCALL_DEFINE2(stat, const char __user *, filename, 345 struct __old_kernel_stat __user *, statbuf) 346{ 347 struct kstat stat; 348 int error; 349 350 error = vfs_stat(filename, &stat); 351 if (error) 352 return error; 353 354 return cp_old_stat(&stat, statbuf); 355} 356 357SYSCALL_DEFINE2(lstat, const char __user *, filename, 358 struct __old_kernel_stat __user *, statbuf) 359{ 360 struct kstat stat; 361 int error; 362 363 error = vfs_lstat(filename, &stat); 364 if (error) 365 return error; 366 367 return cp_old_stat(&stat, statbuf); 368} 369 370SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 371{ 372 struct kstat stat; 373 int error = vfs_fstat(fd, &stat); 374 375 if (!error) 376 error = cp_old_stat(&stat, statbuf); 377 378 return error; 379} 380 381#endif /* __ARCH_WANT_OLD_STAT */ 382 383#ifdef __ARCH_WANT_NEW_STAT 384 385#ifndef INIT_STRUCT_STAT_PADDING 386# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 387#endif 388 389static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 390{ 391 struct stat tmp; 392 393 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 394 return -EOVERFLOW; 395 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 396 return -EOVERFLOW; 397#if BITS_PER_LONG == 32 398 if (stat->size > MAX_NON_LFS) 399 return -EOVERFLOW; 400#endif 401 402 INIT_STRUCT_STAT_PADDING(tmp); 403 tmp.st_dev = new_encode_dev(stat->dev); 404 tmp.st_ino = stat->ino; 405 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 406 return -EOVERFLOW; 407 tmp.st_mode = stat->mode; 408 tmp.st_nlink = stat->nlink; 409 if (tmp.st_nlink != stat->nlink) 410 return -EOVERFLOW; 411 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 412 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 413 tmp.st_rdev = new_encode_dev(stat->rdev); 414 tmp.st_size = stat->size; 415 tmp.st_atime = stat->atime.tv_sec; 416 tmp.st_mtime = stat->mtime.tv_sec; 417 tmp.st_ctime = stat->ctime.tv_sec; 418#ifdef STAT_HAVE_NSEC 419 tmp.st_atime_nsec = stat->atime.tv_nsec; 420 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 421 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 422#endif 423 tmp.st_blocks = stat->blocks; 424 tmp.st_blksize = stat->blksize; 425 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 426} 427 428SYSCALL_DEFINE2(newstat, const char __user *, filename, 429 struct stat __user *, statbuf) 430{ 431 struct kstat stat; 432 int error = vfs_stat(filename, &stat); 433 434 if (error) 435 return error; 436 return cp_new_stat(&stat, statbuf); 437} 438 439SYSCALL_DEFINE2(newlstat, const char __user *, filename, 440 struct stat __user *, statbuf) 441{ 442 struct kstat stat; 443 int error; 444 445 error = vfs_lstat(filename, &stat); 446 if (error) 447 return error; 448 449 return cp_new_stat(&stat, statbuf); 450} 451 452#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 453SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 454 struct stat __user *, statbuf, int, flag) 455{ 456 struct kstat stat; 457 int error; 458 459 error = vfs_fstatat(dfd, filename, &stat, flag); 460 if (error) 461 return error; 462 return cp_new_stat(&stat, statbuf); 463} 464#endif 465 466SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 467{ 468 struct kstat stat; 469 int error = vfs_fstat(fd, &stat); 470 471 if (!error) 472 error = cp_new_stat(&stat, statbuf); 473 474 return error; 475} 476#endif 477 478static int do_readlinkat(int dfd, const char __user *pathname, 479 char __user *buf, int bufsiz) 480{ 481 struct path path; 482 int error; 483 int empty = 0; 484 unsigned int lookup_flags = LOOKUP_EMPTY; 485 486 if (bufsiz <= 0) 487 return -EINVAL; 488 489retry: 490 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); 491 if (!error) { 492 struct inode *inode = d_backing_inode(path.dentry); 493 494 error = empty ? -ENOENT : -EINVAL; 495 /* 496 * AFS mountpoints allow readlink(2) but are not symlinks 497 */ 498 if (d_is_symlink(path.dentry) || inode->i_op->readlink) { 499 error = security_inode_readlink(path.dentry); 500 if (!error) { 501 touch_atime(&path); 502 error = vfs_readlink(path.dentry, buf, bufsiz); 503 } 504 } 505 path_put(&path); 506 if (retry_estale(error, lookup_flags)) { 507 lookup_flags |= LOOKUP_REVAL; 508 goto retry; 509 } 510 } 511 return error; 512} 513 514SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 515 char __user *, buf, int, bufsiz) 516{ 517 return do_readlinkat(dfd, pathname, buf, bufsiz); 518} 519 520SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 521 int, bufsiz) 522{ 523 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 524} 525 526 527/* ---------- LFS-64 ----------- */ 528#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 529 530#ifndef INIT_STRUCT_STAT64_PADDING 531# define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 532#endif 533 534static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 535{ 536 struct stat64 tmp; 537 538 INIT_STRUCT_STAT64_PADDING(tmp); 539#ifdef CONFIG_MIPS 540 /* mips has weird padding, so we don't get 64 bits there */ 541 tmp.st_dev = new_encode_dev(stat->dev); 542 tmp.st_rdev = new_encode_dev(stat->rdev); 543#else 544 tmp.st_dev = huge_encode_dev(stat->dev); 545 tmp.st_rdev = huge_encode_dev(stat->rdev); 546#endif 547 tmp.st_ino = stat->ino; 548 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 549 return -EOVERFLOW; 550#ifdef STAT64_HAS_BROKEN_ST_INO 551 tmp.__st_ino = stat->ino; 552#endif 553 tmp.st_mode = stat->mode; 554 tmp.st_nlink = stat->nlink; 555 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 556 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 557 tmp.st_atime = stat->atime.tv_sec; 558 tmp.st_atime_nsec = stat->atime.tv_nsec; 559 tmp.st_mtime = stat->mtime.tv_sec; 560 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 561 tmp.st_ctime = stat->ctime.tv_sec; 562 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 563 tmp.st_size = stat->size; 564 tmp.st_blocks = stat->blocks; 565 tmp.st_blksize = stat->blksize; 566 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 567} 568 569SYSCALL_DEFINE2(stat64, const char __user *, filename, 570 struct stat64 __user *, statbuf) 571{ 572 struct kstat stat; 573 int error = vfs_stat(filename, &stat); 574 575 if (!error) 576 error = cp_new_stat64(&stat, statbuf); 577 578 return error; 579} 580 581SYSCALL_DEFINE2(lstat64, const char __user *, filename, 582 struct stat64 __user *, statbuf) 583{ 584 struct kstat stat; 585 int error = vfs_lstat(filename, &stat); 586 587 if (!error) 588 error = cp_new_stat64(&stat, statbuf); 589 590 return error; 591} 592 593SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 594{ 595 struct kstat stat; 596 int error = vfs_fstat(fd, &stat); 597 598 if (!error) 599 error = cp_new_stat64(&stat, statbuf); 600 601 return error; 602} 603 604SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 605 struct stat64 __user *, statbuf, int, flag) 606{ 607 struct kstat stat; 608 int error; 609 610 error = vfs_fstatat(dfd, filename, &stat, flag); 611 if (error) 612 return error; 613 return cp_new_stat64(&stat, statbuf); 614} 615#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 616 617static noinline_for_stack int 618cp_statx(const struct kstat *stat, struct statx __user *buffer) 619{ 620 struct statx tmp; 621 622 memset(&tmp, 0, sizeof(tmp)); 623 624 /* STATX_CHANGE_COOKIE is kernel-only for now */ 625 tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE; 626 tmp.stx_blksize = stat->blksize; 627 /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */ 628 tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC; 629 tmp.stx_nlink = stat->nlink; 630 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 631 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 632 tmp.stx_mode = stat->mode; 633 tmp.stx_ino = stat->ino; 634 tmp.stx_size = stat->size; 635 tmp.stx_blocks = stat->blocks; 636 tmp.stx_attributes_mask = stat->attributes_mask; 637 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 638 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 639 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 640 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 641 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 642 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 643 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 644 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 645 tmp.stx_rdev_major = MAJOR(stat->rdev); 646 tmp.stx_rdev_minor = MINOR(stat->rdev); 647 tmp.stx_dev_major = MAJOR(stat->dev); 648 tmp.stx_dev_minor = MINOR(stat->dev); 649 tmp.stx_mnt_id = stat->mnt_id; 650 tmp.stx_dio_mem_align = stat->dio_mem_align; 651 tmp.stx_dio_offset_align = stat->dio_offset_align; 652 653 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 654} 655 656int do_statx(int dfd, struct filename *filename, unsigned int flags, 657 unsigned int mask, struct statx __user *buffer) 658{ 659 struct kstat stat; 660 int error; 661 662 if (mask & STATX__RESERVED) 663 return -EINVAL; 664 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 665 return -EINVAL; 666 667 /* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests 668 * from userland. 669 */ 670 mask &= ~STATX_CHANGE_COOKIE; 671 672 error = vfs_statx(dfd, filename, flags, &stat, mask); 673 if (error) 674 return error; 675 676 return cp_statx(&stat, buffer); 677} 678 679/** 680 * sys_statx - System call to get enhanced stats 681 * @dfd: Base directory to pathwalk from *or* fd to stat. 682 * @filename: File to stat or "" with AT_EMPTY_PATH 683 * @flags: AT_* flags to control pathwalk. 684 * @mask: Parts of statx struct actually required. 685 * @buffer: Result buffer. 686 * 687 * Note that fstat() can be emulated by setting dfd to the fd of interest, 688 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. 689 */ 690SYSCALL_DEFINE5(statx, 691 int, dfd, const char __user *, filename, unsigned, flags, 692 unsigned int, mask, 693 struct statx __user *, buffer) 694{ 695 int ret; 696 struct filename *name; 697 698 name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL); 699 ret = do_statx(dfd, name, flags, mask, buffer); 700 putname(name); 701 702 return ret; 703} 704 705#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 706static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 707{ 708 struct compat_stat tmp; 709 710 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 711 return -EOVERFLOW; 712 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 713 return -EOVERFLOW; 714 715 memset(&tmp, 0, sizeof(tmp)); 716 tmp.st_dev = new_encode_dev(stat->dev); 717 tmp.st_ino = stat->ino; 718 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 719 return -EOVERFLOW; 720 tmp.st_mode = stat->mode; 721 tmp.st_nlink = stat->nlink; 722 if (tmp.st_nlink != stat->nlink) 723 return -EOVERFLOW; 724 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 725 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 726 tmp.st_rdev = new_encode_dev(stat->rdev); 727 if ((u64) stat->size > MAX_NON_LFS) 728 return -EOVERFLOW; 729 tmp.st_size = stat->size; 730 tmp.st_atime = stat->atime.tv_sec; 731 tmp.st_atime_nsec = stat->atime.tv_nsec; 732 tmp.st_mtime = stat->mtime.tv_sec; 733 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 734 tmp.st_ctime = stat->ctime.tv_sec; 735 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 736 tmp.st_blocks = stat->blocks; 737 tmp.st_blksize = stat->blksize; 738 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 739} 740 741COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 742 struct compat_stat __user *, statbuf) 743{ 744 struct kstat stat; 745 int error; 746 747 error = vfs_stat(filename, &stat); 748 if (error) 749 return error; 750 return cp_compat_stat(&stat, statbuf); 751} 752 753COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 754 struct compat_stat __user *, statbuf) 755{ 756 struct kstat stat; 757 int error; 758 759 error = vfs_lstat(filename, &stat); 760 if (error) 761 return error; 762 return cp_compat_stat(&stat, statbuf); 763} 764 765#ifndef __ARCH_WANT_STAT64 766COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 767 const char __user *, filename, 768 struct compat_stat __user *, statbuf, int, flag) 769{ 770 struct kstat stat; 771 int error; 772 773 error = vfs_fstatat(dfd, filename, &stat, flag); 774 if (error) 775 return error; 776 return cp_compat_stat(&stat, statbuf); 777} 778#endif 779 780COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 781 struct compat_stat __user *, statbuf) 782{ 783 struct kstat stat; 784 int error = vfs_fstat(fd, &stat); 785 786 if (!error) 787 error = cp_compat_stat(&stat, statbuf); 788 return error; 789} 790#endif 791 792/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 793void __inode_add_bytes(struct inode *inode, loff_t bytes) 794{ 795 inode->i_blocks += bytes >> 9; 796 bytes &= 511; 797 inode->i_bytes += bytes; 798 if (inode->i_bytes >= 512) { 799 inode->i_blocks++; 800 inode->i_bytes -= 512; 801 } 802} 803EXPORT_SYMBOL(__inode_add_bytes); 804 805void inode_add_bytes(struct inode *inode, loff_t bytes) 806{ 807 spin_lock(&inode->i_lock); 808 __inode_add_bytes(inode, bytes); 809 spin_unlock(&inode->i_lock); 810} 811 812EXPORT_SYMBOL(inode_add_bytes); 813 814void __inode_sub_bytes(struct inode *inode, loff_t bytes) 815{ 816 inode->i_blocks -= bytes >> 9; 817 bytes &= 511; 818 if (inode->i_bytes < bytes) { 819 inode->i_blocks--; 820 inode->i_bytes += 512; 821 } 822 inode->i_bytes -= bytes; 823} 824 825EXPORT_SYMBOL(__inode_sub_bytes); 826 827void inode_sub_bytes(struct inode *inode, loff_t bytes) 828{ 829 spin_lock(&inode->i_lock); 830 __inode_sub_bytes(inode, bytes); 831 spin_unlock(&inode->i_lock); 832} 833 834EXPORT_SYMBOL(inode_sub_bytes); 835 836loff_t inode_get_bytes(struct inode *inode) 837{ 838 loff_t ret; 839 840 spin_lock(&inode->i_lock); 841 ret = __inode_get_bytes(inode); 842 spin_unlock(&inode->i_lock); 843 return ret; 844} 845 846EXPORT_SYMBOL(inode_get_bytes); 847 848void inode_set_bytes(struct inode *inode, loff_t bytes) 849{ 850 /* Caller is here responsible for sufficient locking 851 * (ie. inode->i_lock) */ 852 inode->i_blocks = bytes >> 9; 853 inode->i_bytes = bytes & 511; 854} 855 856EXPORT_SYMBOL(inode_set_bytes);