at v5.18 116 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_FS_H 3#define _LINUX_FS_H 4 5#include <linux/linkage.h> 6#include <linux/wait_bit.h> 7#include <linux/kdev_t.h> 8#include <linux/dcache.h> 9#include <linux/path.h> 10#include <linux/stat.h> 11#include <linux/cache.h> 12#include <linux/list.h> 13#include <linux/list_lru.h> 14#include <linux/llist.h> 15#include <linux/radix-tree.h> 16#include <linux/xarray.h> 17#include <linux/rbtree.h> 18#include <linux/init.h> 19#include <linux/pid.h> 20#include <linux/bug.h> 21#include <linux/mutex.h> 22#include <linux/rwsem.h> 23#include <linux/mm_types.h> 24#include <linux/capability.h> 25#include <linux/semaphore.h> 26#include <linux/fcntl.h> 27#include <linux/rculist_bl.h> 28#include <linux/atomic.h> 29#include <linux/shrinker.h> 30#include <linux/migrate_mode.h> 31#include <linux/uidgid.h> 32#include <linux/lockdep.h> 33#include <linux/percpu-rwsem.h> 34#include <linux/workqueue.h> 35#include <linux/delayed_call.h> 36#include <linux/uuid.h> 37#include <linux/errseq.h> 38#include <linux/ioprio.h> 39#include <linux/fs_types.h> 40#include <linux/build_bug.h> 41#include <linux/stddef.h> 42#include <linux/mount.h> 43#include <linux/cred.h> 44#include <linux/mnt_idmapping.h> 45#include <linux/slab.h> 46 47#include <asm/byteorder.h> 48#include <uapi/linux/fs.h> 49 50struct backing_dev_info; 51struct bdi_writeback; 52struct bio; 53struct io_comp_batch; 54struct export_operations; 55struct fiemap_extent_info; 56struct hd_geometry; 57struct iovec; 58struct kiocb; 59struct kobject; 60struct pipe_inode_info; 61struct poll_table_struct; 62struct kstatfs; 63struct vm_area_struct; 64struct vfsmount; 65struct cred; 66struct swap_info_struct; 67struct seq_file; 68struct workqueue_struct; 69struct iov_iter; 70struct fscrypt_info; 71struct fscrypt_operations; 72struct fsverity_info; 73struct fsverity_operations; 74struct fs_context; 75struct fs_parameter_spec; 76struct fileattr; 77 78extern void __init inode_init(void); 79extern void __init inode_init_early(void); 80extern void __init files_init(void); 81extern void __init files_maxfiles_init(void); 82 83extern unsigned long get_max_files(void); 84extern unsigned int sysctl_nr_open; 85 86typedef __kernel_rwf_t rwf_t; 87 88struct buffer_head; 89typedef int (get_block_t)(struct inode *inode, sector_t iblock, 90 struct buffer_head *bh_result, int create); 91typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, 92 ssize_t bytes, void *private); 93 94#define MAY_EXEC 0x00000001 95#define MAY_WRITE 0x00000002 96#define MAY_READ 0x00000004 97#define MAY_APPEND 0x00000008 98#define MAY_ACCESS 0x00000010 99#define MAY_OPEN 0x00000020 100#define MAY_CHDIR 0x00000040 101/* called from RCU mode, don't block */ 102#define MAY_NOT_BLOCK 0x00000080 103 104/* 105 * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond 106 * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open() 107 */ 108 109/* file is open for reading */ 110#define FMODE_READ ((__force fmode_t)0x1) 111/* file is open for writing */ 112#define FMODE_WRITE ((__force fmode_t)0x2) 113/* file is seekable */ 114#define FMODE_LSEEK ((__force fmode_t)0x4) 115/* file can be accessed using pread */ 116#define FMODE_PREAD ((__force fmode_t)0x8) 117/* file can be accessed using pwrite */ 118#define FMODE_PWRITE ((__force fmode_t)0x10) 119/* File is opened for execution with sys_execve / sys_uselib */ 120#define FMODE_EXEC ((__force fmode_t)0x20) 121/* File is opened with O_NDELAY (only set for block devices) */ 122#define FMODE_NDELAY ((__force fmode_t)0x40) 123/* File is opened with O_EXCL (only set for block devices) */ 124#define FMODE_EXCL ((__force fmode_t)0x80) 125/* File is opened using open(.., 3, ..) and is writeable only for ioctls 126 (specialy hack for floppy.c) */ 127#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) 128/* 32bit hashes as llseek() offset (for directories) */ 129#define FMODE_32BITHASH ((__force fmode_t)0x200) 130/* 64bit hashes as llseek() offset (for directories) */ 131#define FMODE_64BITHASH ((__force fmode_t)0x400) 132 133/* 134 * Don't update ctime and mtime. 135 * 136 * Currently a special hack for the XFS open_by_handle ioctl, but we'll 137 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. 138 */ 139#define FMODE_NOCMTIME ((__force fmode_t)0x800) 140 141/* Expect random access pattern */ 142#define FMODE_RANDOM ((__force fmode_t)0x1000) 143 144/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ 145#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) 146 147/* File is opened with O_PATH; almost nothing can be done with it */ 148#define FMODE_PATH ((__force fmode_t)0x4000) 149 150/* File needs atomic accesses to f_pos */ 151#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) 152/* Write access to underlying fs */ 153#define FMODE_WRITER ((__force fmode_t)0x10000) 154/* Has read method(s) */ 155#define FMODE_CAN_READ ((__force fmode_t)0x20000) 156/* Has write method(s) */ 157#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 158 159#define FMODE_OPENED ((__force fmode_t)0x80000) 160#define FMODE_CREATED ((__force fmode_t)0x100000) 161 162/* File is stream-like */ 163#define FMODE_STREAM ((__force fmode_t)0x200000) 164 165/* File was opened by fanotify and shouldn't generate fanotify events */ 166#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) 167 168/* File is capable of returning -EAGAIN if I/O will block */ 169#define FMODE_NOWAIT ((__force fmode_t)0x8000000) 170 171/* File represents mount that needs unmounting */ 172#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000) 173 174/* File does not contribute to nr_files count */ 175#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) 176 177/* File supports async buffered reads */ 178#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) 179 180/* 181 * Attribute flags. These should be or-ed together to figure out what 182 * has been changed! 183 */ 184#define ATTR_MODE (1 << 0) 185#define ATTR_UID (1 << 1) 186#define ATTR_GID (1 << 2) 187#define ATTR_SIZE (1 << 3) 188#define ATTR_ATIME (1 << 4) 189#define ATTR_MTIME (1 << 5) 190#define ATTR_CTIME (1 << 6) 191#define ATTR_ATIME_SET (1 << 7) 192#define ATTR_MTIME_SET (1 << 8) 193#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ 194#define ATTR_KILL_SUID (1 << 11) 195#define ATTR_KILL_SGID (1 << 12) 196#define ATTR_FILE (1 << 13) 197#define ATTR_KILL_PRIV (1 << 14) 198#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ 199#define ATTR_TIMES_SET (1 << 16) 200#define ATTR_TOUCH (1 << 17) 201 202/* 203 * Whiteout is represented by a char device. The following constants define the 204 * mode and device number to use. 205 */ 206#define WHITEOUT_MODE 0 207#define WHITEOUT_DEV 0 208 209/* 210 * This is the Inode Attributes structure, used for notify_change(). It 211 * uses the above definitions as flags, to know which values have changed. 212 * Also, in this manner, a Filesystem can look at only the values it cares 213 * about. Basically, these are the attributes that the VFS layer can 214 * request to change from the FS layer. 215 * 216 * Derek Atkins <warlord@MIT.EDU> 94-10-20 217 */ 218struct iattr { 219 unsigned int ia_valid; 220 umode_t ia_mode; 221 kuid_t ia_uid; 222 kgid_t ia_gid; 223 loff_t ia_size; 224 struct timespec64 ia_atime; 225 struct timespec64 ia_mtime; 226 struct timespec64 ia_ctime; 227 228 /* 229 * Not an attribute, but an auxiliary info for filesystems wanting to 230 * implement an ftruncate() like method. NOTE: filesystem should 231 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). 232 */ 233 struct file *ia_file; 234}; 235 236/* 237 * Includes for diskquotas. 238 */ 239#include <linux/quota.h> 240 241/* 242 * Maximum number of layers of fs stack. Needs to be limited to 243 * prevent kernel stack overflow 244 */ 245#define FILESYSTEM_MAX_STACK_DEPTH 2 246 247/** 248 * enum positive_aop_returns - aop return codes with specific semantics 249 * 250 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has 251 * completed, that the page is still locked, and 252 * should be considered active. The VM uses this hint 253 * to return the page to the active list -- it won't 254 * be a candidate for writeback again in the near 255 * future. Other callers must be careful to unlock 256 * the page if they get this return. Returned by 257 * writepage(); 258 * 259 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has 260 * unlocked it and the page might have been truncated. 261 * The caller should back up to acquiring a new page and 262 * trying again. The aop will be taking reasonable 263 * precautions not to livelock. If the caller held a page 264 * reference, it should drop it before retrying. Returned 265 * by readpage(). 266 * 267 * address_space_operation functions return these large constants to indicate 268 * special semantics to the caller. These are much larger than the bytes in a 269 * page to allow for functions that return the number of bytes operated on in a 270 * given page. 271 */ 272 273enum positive_aop_returns { 274 AOP_WRITEPAGE_ACTIVATE = 0x80000, 275 AOP_TRUNCATED_PAGE = 0x80001, 276}; 277 278#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct 279 * helper code (eg buffer layer) 280 * to clear GFP_FS from alloc */ 281 282/* 283 * oh the beauties of C type declarations. 284 */ 285struct page; 286struct address_space; 287struct writeback_control; 288struct readahead_control; 289 290/* 291 * Write life time hint values. 292 * Stored in struct inode as u8. 293 */ 294enum rw_hint { 295 WRITE_LIFE_NOT_SET = 0, 296 WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE, 297 WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT, 298 WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM, 299 WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG, 300 WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, 301}; 302 303/* Match RWF_* bits to IOCB bits */ 304#define IOCB_HIPRI (__force int) RWF_HIPRI 305#define IOCB_DSYNC (__force int) RWF_DSYNC 306#define IOCB_SYNC (__force int) RWF_SYNC 307#define IOCB_NOWAIT (__force int) RWF_NOWAIT 308#define IOCB_APPEND (__force int) RWF_APPEND 309 310/* non-RWF related bits - start at 16 */ 311#define IOCB_EVENTFD (1 << 16) 312#define IOCB_DIRECT (1 << 17) 313#define IOCB_WRITE (1 << 18) 314/* iocb->ki_waitq is valid */ 315#define IOCB_WAITQ (1 << 19) 316#define IOCB_NOIO (1 << 20) 317/* can use bio alloc cache */ 318#define IOCB_ALLOC_CACHE (1 << 21) 319 320struct kiocb { 321 struct file *ki_filp; 322 323 /* The 'ki_filp' pointer is shared in a union for aio */ 324 randomized_struct_fields_start 325 326 loff_t ki_pos; 327 void (*ki_complete)(struct kiocb *iocb, long ret); 328 void *private; 329 int ki_flags; 330 u16 ki_ioprio; /* See linux/ioprio.h */ 331 struct wait_page_queue *ki_waitq; /* for async buffered IO */ 332 randomized_struct_fields_end 333}; 334 335static inline bool is_sync_kiocb(struct kiocb *kiocb) 336{ 337 return kiocb->ki_complete == NULL; 338} 339 340struct address_space_operations { 341 int (*writepage)(struct page *page, struct writeback_control *wbc); 342 int (*readpage)(struct file *, struct page *); 343 344 /* Write back some dirty pages from this mapping. */ 345 int (*writepages)(struct address_space *, struct writeback_control *); 346 347 /* Mark a folio dirty. Return true if this dirtied it */ 348 bool (*dirty_folio)(struct address_space *, struct folio *); 349 350 void (*readahead)(struct readahead_control *); 351 352 int (*write_begin)(struct file *, struct address_space *mapping, 353 loff_t pos, unsigned len, unsigned flags, 354 struct page **pagep, void **fsdata); 355 int (*write_end)(struct file *, struct address_space *mapping, 356 loff_t pos, unsigned len, unsigned copied, 357 struct page *page, void *fsdata); 358 359 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ 360 sector_t (*bmap)(struct address_space *, sector_t); 361 void (*invalidate_folio) (struct folio *, size_t offset, size_t len); 362 int (*releasepage) (struct page *, gfp_t); 363 void (*freepage)(struct page *); 364 ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); 365 /* 366 * migrate the contents of a page to the specified target. If 367 * migrate_mode is MIGRATE_ASYNC, it must not block. 368 */ 369 int (*migratepage) (struct address_space *, 370 struct page *, struct page *, enum migrate_mode); 371 bool (*isolate_page)(struct page *, isolate_mode_t); 372 void (*putback_page)(struct page *); 373 int (*launder_folio)(struct folio *); 374 bool (*is_partially_uptodate) (struct folio *, size_t from, 375 size_t count); 376 void (*is_dirty_writeback) (struct page *, bool *, bool *); 377 int (*error_remove_page)(struct address_space *, struct page *); 378 379 /* swapfile support */ 380 int (*swap_activate)(struct swap_info_struct *sis, struct file *file, 381 sector_t *span); 382 void (*swap_deactivate)(struct file *file); 383}; 384 385extern const struct address_space_operations empty_aops; 386 387/* 388 * pagecache_write_begin/pagecache_write_end must be used by general code 389 * to write into the pagecache. 390 */ 391int pagecache_write_begin(struct file *, struct address_space *mapping, 392 loff_t pos, unsigned len, unsigned flags, 393 struct page **pagep, void **fsdata); 394 395int pagecache_write_end(struct file *, struct address_space *mapping, 396 loff_t pos, unsigned len, unsigned copied, 397 struct page *page, void *fsdata); 398 399/** 400 * struct address_space - Contents of a cacheable, mappable object. 401 * @host: Owner, either the inode or the block_device. 402 * @i_pages: Cached pages. 403 * @invalidate_lock: Guards coherency between page cache contents and 404 * file offset->disk block mappings in the filesystem during invalidates. 405 * It is also used to block modification of page cache contents through 406 * memory mappings. 407 * @gfp_mask: Memory allocation flags to use for allocating pages. 408 * @i_mmap_writable: Number of VM_SHARED mappings. 409 * @nr_thps: Number of THPs in the pagecache (non-shmem only). 410 * @i_mmap: Tree of private and shared mappings. 411 * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. 412 * @nrpages: Number of page entries, protected by the i_pages lock. 413 * @writeback_index: Writeback starts here. 414 * @a_ops: Methods. 415 * @flags: Error bits and flags (AS_*). 416 * @wb_err: The most recent error which has occurred. 417 * @private_lock: For use by the owner of the address_space. 418 * @private_list: For use by the owner of the address_space. 419 * @private_data: For use by the owner of the address_space. 420 */ 421struct address_space { 422 struct inode *host; 423 struct xarray i_pages; 424 struct rw_semaphore invalidate_lock; 425 gfp_t gfp_mask; 426 atomic_t i_mmap_writable; 427#ifdef CONFIG_READ_ONLY_THP_FOR_FS 428 /* number of thp, only for non-shmem files */ 429 atomic_t nr_thps; 430#endif 431 struct rb_root_cached i_mmap; 432 struct rw_semaphore i_mmap_rwsem; 433 unsigned long nrpages; 434 pgoff_t writeback_index; 435 const struct address_space_operations *a_ops; 436 unsigned long flags; 437 errseq_t wb_err; 438 spinlock_t private_lock; 439 struct list_head private_list; 440 void *private_data; 441} __attribute__((aligned(sizeof(long)))) __randomize_layout; 442 /* 443 * On most architectures that alignment is already the case; but 444 * must be enforced here for CRIS, to let the least significant bit 445 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. 446 */ 447 448/* XArray tags, for tagging dirty and writeback pages in the pagecache. */ 449#define PAGECACHE_TAG_DIRTY XA_MARK_0 450#define PAGECACHE_TAG_WRITEBACK XA_MARK_1 451#define PAGECACHE_TAG_TOWRITE XA_MARK_2 452 453/* 454 * Returns true if any of the pages in the mapping are marked with the tag. 455 */ 456static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag) 457{ 458 return xa_marked(&mapping->i_pages, tag); 459} 460 461static inline void i_mmap_lock_write(struct address_space *mapping) 462{ 463 down_write(&mapping->i_mmap_rwsem); 464} 465 466static inline int i_mmap_trylock_write(struct address_space *mapping) 467{ 468 return down_write_trylock(&mapping->i_mmap_rwsem); 469} 470 471static inline void i_mmap_unlock_write(struct address_space *mapping) 472{ 473 up_write(&mapping->i_mmap_rwsem); 474} 475 476static inline void i_mmap_lock_read(struct address_space *mapping) 477{ 478 down_read(&mapping->i_mmap_rwsem); 479} 480 481static inline void i_mmap_unlock_read(struct address_space *mapping) 482{ 483 up_read(&mapping->i_mmap_rwsem); 484} 485 486static inline void i_mmap_assert_locked(struct address_space *mapping) 487{ 488 lockdep_assert_held(&mapping->i_mmap_rwsem); 489} 490 491static inline void i_mmap_assert_write_locked(struct address_space *mapping) 492{ 493 lockdep_assert_held_write(&mapping->i_mmap_rwsem); 494} 495 496/* 497 * Might pages of this file be mapped into userspace? 498 */ 499static inline int mapping_mapped(struct address_space *mapping) 500{ 501 return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); 502} 503 504/* 505 * Might pages of this file have been modified in userspace? 506 * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap 507 * marks vma as VM_SHARED if it is shared, and the file was opened for 508 * writing i.e. vma may be mprotected writable even if now readonly. 509 * 510 * If i_mmap_writable is negative, no new writable mappings are allowed. You 511 * can only deny writable mappings, if none exists right now. 512 */ 513static inline int mapping_writably_mapped(struct address_space *mapping) 514{ 515 return atomic_read(&mapping->i_mmap_writable) > 0; 516} 517 518static inline int mapping_map_writable(struct address_space *mapping) 519{ 520 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? 521 0 : -EPERM; 522} 523 524static inline void mapping_unmap_writable(struct address_space *mapping) 525{ 526 atomic_dec(&mapping->i_mmap_writable); 527} 528 529static inline int mapping_deny_writable(struct address_space *mapping) 530{ 531 return atomic_dec_unless_positive(&mapping->i_mmap_writable) ? 532 0 : -EBUSY; 533} 534 535static inline void mapping_allow_writable(struct address_space *mapping) 536{ 537 atomic_inc(&mapping->i_mmap_writable); 538} 539 540/* 541 * Use sequence counter to get consistent i_size on 32-bit processors. 542 */ 543#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 544#include <linux/seqlock.h> 545#define __NEED_I_SIZE_ORDERED 546#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) 547#else 548#define i_size_ordered_init(inode) do { } while (0) 549#endif 550 551struct posix_acl; 552#define ACL_NOT_CACHED ((void *)(-1)) 553/* 554 * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to 555 * cache the ACL. This also means that ->get_acl() can be called in RCU mode 556 * with the LOOKUP_RCU flag. 557 */ 558#define ACL_DONT_CACHE ((void *)(-3)) 559 560static inline struct posix_acl * 561uncached_acl_sentinel(struct task_struct *task) 562{ 563 return (void *)task + 1; 564} 565 566static inline bool 567is_uncached_acl(struct posix_acl *acl) 568{ 569 return (long)acl & 1; 570} 571 572#define IOP_FASTPERM 0x0001 573#define IOP_LOOKUP 0x0002 574#define IOP_NOFOLLOW 0x0004 575#define IOP_XATTR 0x0008 576#define IOP_DEFAULT_READLINK 0x0010 577 578struct fsnotify_mark_connector; 579 580/* 581 * Keep mostly read-only and often accessed (especially for 582 * the RCU path lookup and 'stat' data) fields at the beginning 583 * of the 'struct inode' 584 */ 585struct inode { 586 umode_t i_mode; 587 unsigned short i_opflags; 588 kuid_t i_uid; 589 kgid_t i_gid; 590 unsigned int i_flags; 591 592#ifdef CONFIG_FS_POSIX_ACL 593 struct posix_acl *i_acl; 594 struct posix_acl *i_default_acl; 595#endif 596 597 const struct inode_operations *i_op; 598 struct super_block *i_sb; 599 struct address_space *i_mapping; 600 601#ifdef CONFIG_SECURITY 602 void *i_security; 603#endif 604 605 /* Stat data, not accessed from path walking */ 606 unsigned long i_ino; 607 /* 608 * Filesystems may only read i_nlink directly. They shall use the 609 * following functions for modification: 610 * 611 * (set|clear|inc|drop)_nlink 612 * inode_(inc|dec)_link_count 613 */ 614 union { 615 const unsigned int i_nlink; 616 unsigned int __i_nlink; 617 }; 618 dev_t i_rdev; 619 loff_t i_size; 620 struct timespec64 i_atime; 621 struct timespec64 i_mtime; 622 struct timespec64 i_ctime; 623 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 624 unsigned short i_bytes; 625 u8 i_blkbits; 626 u8 i_write_hint; 627 blkcnt_t i_blocks; 628 629#ifdef __NEED_I_SIZE_ORDERED 630 seqcount_t i_size_seqcount; 631#endif 632 633 /* Misc */ 634 unsigned long i_state; 635 struct rw_semaphore i_rwsem; 636 637 unsigned long dirtied_when; /* jiffies of first dirtying */ 638 unsigned long dirtied_time_when; 639 640 struct hlist_node i_hash; 641 struct list_head i_io_list; /* backing dev IO list */ 642#ifdef CONFIG_CGROUP_WRITEBACK 643 struct bdi_writeback *i_wb; /* the associated cgroup wb */ 644 645 /* foreign inode detection, see wbc_detach_inode() */ 646 int i_wb_frn_winner; 647 u16 i_wb_frn_avg_time; 648 u16 i_wb_frn_history; 649#endif 650 struct list_head i_lru; /* inode LRU list */ 651 struct list_head i_sb_list; 652 struct list_head i_wb_list; /* backing dev writeback list */ 653 union { 654 struct hlist_head i_dentry; 655 struct rcu_head i_rcu; 656 }; 657 atomic64_t i_version; 658 atomic64_t i_sequence; /* see futex */ 659 atomic_t i_count; 660 atomic_t i_dio_count; 661 atomic_t i_writecount; 662#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) 663 atomic_t i_readcount; /* struct files open RO */ 664#endif 665 union { 666 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 667 void (*free_inode)(struct inode *); 668 }; 669 struct file_lock_context *i_flctx; 670 struct address_space i_data; 671 struct list_head i_devices; 672 union { 673 struct pipe_inode_info *i_pipe; 674 struct cdev *i_cdev; 675 char *i_link; 676 unsigned i_dir_seq; 677 }; 678 679 __u32 i_generation; 680 681#ifdef CONFIG_FSNOTIFY 682 __u32 i_fsnotify_mask; /* all events this inode cares about */ 683 struct fsnotify_mark_connector __rcu *i_fsnotify_marks; 684#endif 685 686#ifdef CONFIG_FS_ENCRYPTION 687 struct fscrypt_info *i_crypt_info; 688#endif 689 690#ifdef CONFIG_FS_VERITY 691 struct fsverity_info *i_verity_info; 692#endif 693 694 void *i_private; /* fs or device private pointer */ 695} __randomize_layout; 696 697struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); 698 699static inline unsigned int i_blocksize(const struct inode *node) 700{ 701 return (1 << node->i_blkbits); 702} 703 704static inline int inode_unhashed(struct inode *inode) 705{ 706 return hlist_unhashed(&inode->i_hash); 707} 708 709/* 710 * __mark_inode_dirty expects inodes to be hashed. Since we don't 711 * want special inodes in the fileset inode space, we make them 712 * appear hashed, but do not put on any lists. hlist_del() 713 * will work fine and require no locking. 714 */ 715static inline void inode_fake_hash(struct inode *inode) 716{ 717 hlist_add_fake(&inode->i_hash); 718} 719 720/* 721 * inode->i_mutex nesting subclasses for the lock validator: 722 * 723 * 0: the object of the current VFS operation 724 * 1: parent 725 * 2: child/target 726 * 3: xattr 727 * 4: second non-directory 728 * 5: second parent (when locking independent directories in rename) 729 * 730 * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two 731 * non-directories at once. 732 * 733 * The locking order between these classes is 734 * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory 735 */ 736enum inode_i_mutex_lock_class 737{ 738 I_MUTEX_NORMAL, 739 I_MUTEX_PARENT, 740 I_MUTEX_CHILD, 741 I_MUTEX_XATTR, 742 I_MUTEX_NONDIR2, 743 I_MUTEX_PARENT2, 744}; 745 746static inline void inode_lock(struct inode *inode) 747{ 748 down_write(&inode->i_rwsem); 749} 750 751static inline void inode_unlock(struct inode *inode) 752{ 753 up_write(&inode->i_rwsem); 754} 755 756static inline void inode_lock_shared(struct inode *inode) 757{ 758 down_read(&inode->i_rwsem); 759} 760 761static inline void inode_unlock_shared(struct inode *inode) 762{ 763 up_read(&inode->i_rwsem); 764} 765 766static inline int inode_trylock(struct inode *inode) 767{ 768 return down_write_trylock(&inode->i_rwsem); 769} 770 771static inline int inode_trylock_shared(struct inode *inode) 772{ 773 return down_read_trylock(&inode->i_rwsem); 774} 775 776static inline int inode_is_locked(struct inode *inode) 777{ 778 return rwsem_is_locked(&inode->i_rwsem); 779} 780 781static inline void inode_lock_nested(struct inode *inode, unsigned subclass) 782{ 783 down_write_nested(&inode->i_rwsem, subclass); 784} 785 786static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass) 787{ 788 down_read_nested(&inode->i_rwsem, subclass); 789} 790 791static inline void filemap_invalidate_lock(struct address_space *mapping) 792{ 793 down_write(&mapping->invalidate_lock); 794} 795 796static inline void filemap_invalidate_unlock(struct address_space *mapping) 797{ 798 up_write(&mapping->invalidate_lock); 799} 800 801static inline void filemap_invalidate_lock_shared(struct address_space *mapping) 802{ 803 down_read(&mapping->invalidate_lock); 804} 805 806static inline int filemap_invalidate_trylock_shared( 807 struct address_space *mapping) 808{ 809 return down_read_trylock(&mapping->invalidate_lock); 810} 811 812static inline void filemap_invalidate_unlock_shared( 813 struct address_space *mapping) 814{ 815 up_read(&mapping->invalidate_lock); 816} 817 818void lock_two_nondirectories(struct inode *, struct inode*); 819void unlock_two_nondirectories(struct inode *, struct inode*); 820 821void filemap_invalidate_lock_two(struct address_space *mapping1, 822 struct address_space *mapping2); 823void filemap_invalidate_unlock_two(struct address_space *mapping1, 824 struct address_space *mapping2); 825 826 827/* 828 * NOTE: in a 32bit arch with a preemptable kernel and 829 * an UP compile the i_size_read/write must be atomic 830 * with respect to the local cpu (unlike with preempt disabled), 831 * but they don't need to be atomic with respect to other cpus like in 832 * true SMP (so they need either to either locally disable irq around 833 * the read or for example on x86 they can be still implemented as a 834 * cmpxchg8b without the need of the lock prefix). For SMP compiles 835 * and 64bit archs it makes no difference if preempt is enabled or not. 836 */ 837static inline loff_t i_size_read(const struct inode *inode) 838{ 839#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 840 loff_t i_size; 841 unsigned int seq; 842 843 do { 844 seq = read_seqcount_begin(&inode->i_size_seqcount); 845 i_size = inode->i_size; 846 } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); 847 return i_size; 848#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) 849 loff_t i_size; 850 851 preempt_disable(); 852 i_size = inode->i_size; 853 preempt_enable(); 854 return i_size; 855#else 856 return inode->i_size; 857#endif 858} 859 860/* 861 * NOTE: unlike i_size_read(), i_size_write() does need locking around it 862 * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount 863 * can be lost, resulting in subsequent i_size_read() calls spinning forever. 864 */ 865static inline void i_size_write(struct inode *inode, loff_t i_size) 866{ 867#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 868 preempt_disable(); 869 write_seqcount_begin(&inode->i_size_seqcount); 870 inode->i_size = i_size; 871 write_seqcount_end(&inode->i_size_seqcount); 872 preempt_enable(); 873#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) 874 preempt_disable(); 875 inode->i_size = i_size; 876 preempt_enable(); 877#else 878 inode->i_size = i_size; 879#endif 880} 881 882static inline unsigned iminor(const struct inode *inode) 883{ 884 return MINOR(inode->i_rdev); 885} 886 887static inline unsigned imajor(const struct inode *inode) 888{ 889 return MAJOR(inode->i_rdev); 890} 891 892struct fown_struct { 893 rwlock_t lock; /* protects pid, uid, euid fields */ 894 struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ 895 enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ 896 kuid_t uid, euid; /* uid/euid of process setting the owner */ 897 int signum; /* posix.1b rt signal to be delivered on IO */ 898}; 899 900/** 901 * struct file_ra_state - Track a file's readahead state. 902 * @start: Where the most recent readahead started. 903 * @size: Number of pages read in the most recent readahead. 904 * @async_size: Numer of pages that were/are not needed immediately 905 * and so were/are genuinely "ahead". Start next readahead when 906 * the first of these pages is accessed. 907 * @ra_pages: Maximum size of a readahead request, copied from the bdi. 908 * @mmap_miss: How many mmap accesses missed in the page cache. 909 * @prev_pos: The last byte in the most recent read request. 910 * 911 * When this structure is passed to ->readahead(), the "most recent" 912 * readahead means the current readahead. 913 */ 914struct file_ra_state { 915 pgoff_t start; 916 unsigned int size; 917 unsigned int async_size; 918 unsigned int ra_pages; 919 unsigned int mmap_miss; 920 loff_t prev_pos; 921}; 922 923/* 924 * Check if @index falls in the readahead windows. 925 */ 926static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) 927{ 928 return (index >= ra->start && 929 index < ra->start + ra->size); 930} 931 932struct file { 933 union { 934 struct llist_node fu_llist; 935 struct rcu_head fu_rcuhead; 936 } f_u; 937 struct path f_path; 938 struct inode *f_inode; /* cached value */ 939 const struct file_operations *f_op; 940 941 /* 942 * Protects f_ep, f_flags. 943 * Must not be taken from IRQ context. 944 */ 945 spinlock_t f_lock; 946 atomic_long_t f_count; 947 unsigned int f_flags; 948 fmode_t f_mode; 949 struct mutex f_pos_lock; 950 loff_t f_pos; 951 struct fown_struct f_owner; 952 const struct cred *f_cred; 953 struct file_ra_state f_ra; 954 955 u64 f_version; 956#ifdef CONFIG_SECURITY 957 void *f_security; 958#endif 959 /* needed for tty driver, and maybe others */ 960 void *private_data; 961 962#ifdef CONFIG_EPOLL 963 /* Used by fs/eventpoll.c to link all the hooks to this file */ 964 struct hlist_head *f_ep; 965#endif /* #ifdef CONFIG_EPOLL */ 966 struct address_space *f_mapping; 967 errseq_t f_wb_err; 968 errseq_t f_sb_err; /* for syncfs */ 969} __randomize_layout 970 __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ 971 972struct file_handle { 973 __u32 handle_bytes; 974 int handle_type; 975 /* file identifier */ 976 unsigned char f_handle[]; 977}; 978 979static inline struct file *get_file(struct file *f) 980{ 981 atomic_long_inc(&f->f_count); 982 return f; 983} 984#define get_file_rcu_many(x, cnt) \ 985 atomic_long_add_unless(&(x)->f_count, (cnt), 0) 986#define get_file_rcu(x) get_file_rcu_many((x), 1) 987#define file_count(x) atomic_long_read(&(x)->f_count) 988 989#define MAX_NON_LFS ((1UL<<31) - 1) 990 991/* Page cache limit. The filesystems should put that into their s_maxbytes 992 limits, otherwise bad things can happen in VM. */ 993#if BITS_PER_LONG==32 994#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) 995#elif BITS_PER_LONG==64 996#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) 997#endif 998 999#define FL_POSIX 1 1000#define FL_FLOCK 2 1001#define FL_DELEG 4 /* NFSv4 delegation */ 1002#define FL_ACCESS 8 /* not trying to lock, just looking */ 1003#define FL_EXISTS 16 /* when unlocking, test for existence */ 1004#define FL_LEASE 32 /* lease held on this file */ 1005#define FL_CLOSE 64 /* unlock on close */ 1006#define FL_SLEEP 128 /* A blocking lock */ 1007#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ 1008#define FL_UNLOCK_PENDING 512 /* Lease is being broken */ 1009#define FL_OFDLCK 1024 /* lock is "owned" by struct file */ 1010#define FL_LAYOUT 2048 /* outstanding pNFS layout */ 1011#define FL_RECLAIM 4096 /* reclaiming from a reboot server */ 1012 1013#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) 1014 1015/* 1016 * Special return value from posix_lock_file() and vfs_lock_file() for 1017 * asynchronous locking. 1018 */ 1019#define FILE_LOCK_DEFERRED 1 1020 1021/* legacy typedef, should eventually be removed */ 1022typedef void *fl_owner_t; 1023 1024struct file_lock; 1025 1026struct file_lock_operations { 1027 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 1028 void (*fl_release_private)(struct file_lock *); 1029}; 1030 1031struct lock_manager_operations { 1032 fl_owner_t (*lm_get_owner)(fl_owner_t); 1033 void (*lm_put_owner)(fl_owner_t); 1034 void (*lm_notify)(struct file_lock *); /* unblock callback */ 1035 int (*lm_grant)(struct file_lock *, int); 1036 bool (*lm_break)(struct file_lock *); 1037 int (*lm_change)(struct file_lock *, int, struct list_head *); 1038 void (*lm_setup)(struct file_lock *, void **); 1039 bool (*lm_breaker_owns_lease)(struct file_lock *); 1040}; 1041 1042struct lock_manager { 1043 struct list_head list; 1044 /* 1045 * NFSv4 and up also want opens blocked during the grace period; 1046 * NLM doesn't care: 1047 */ 1048 bool block_opens; 1049}; 1050 1051struct net; 1052void locks_start_grace(struct net *, struct lock_manager *); 1053void locks_end_grace(struct lock_manager *); 1054bool locks_in_grace(struct net *); 1055bool opens_in_grace(struct net *); 1056 1057/* that will die - we need it for nfs_lock_info */ 1058#include <linux/nfs_fs_i.h> 1059 1060/* 1061 * struct file_lock represents a generic "file lock". It's used to represent 1062 * POSIX byte range locks, BSD (flock) locks, and leases. It's important to 1063 * note that the same struct is used to represent both a request for a lock and 1064 * the lock itself, but the same object is never used for both. 1065 * 1066 * FIXME: should we create a separate "struct lock_request" to help distinguish 1067 * these two uses? 1068 * 1069 * The varous i_flctx lists are ordered by: 1070 * 1071 * 1) lock owner 1072 * 2) lock range start 1073 * 3) lock range end 1074 * 1075 * Obviously, the last two criteria only matter for POSIX locks. 1076 */ 1077struct file_lock { 1078 struct file_lock *fl_blocker; /* The lock, that is blocking us */ 1079 struct list_head fl_list; /* link into file_lock_context */ 1080 struct hlist_node fl_link; /* node in global lists */ 1081 struct list_head fl_blocked_requests; /* list of requests with 1082 * ->fl_blocker pointing here 1083 */ 1084 struct list_head fl_blocked_member; /* node in 1085 * ->fl_blocker->fl_blocked_requests 1086 */ 1087 fl_owner_t fl_owner; 1088 unsigned int fl_flags; 1089 unsigned char fl_type; 1090 unsigned int fl_pid; 1091 int fl_link_cpu; /* what cpu's list is this on? */ 1092 wait_queue_head_t fl_wait; 1093 struct file *fl_file; 1094 loff_t fl_start; 1095 loff_t fl_end; 1096 1097 struct fasync_struct * fl_fasync; /* for lease break notifications */ 1098 /* for lease breaks: */ 1099 unsigned long fl_break_time; 1100 unsigned long fl_downgrade_time; 1101 1102 const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ 1103 const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ 1104 union { 1105 struct nfs_lock_info nfs_fl; 1106 struct nfs4_lock_info nfs4_fl; 1107 struct { 1108 struct list_head link; /* link in AFS vnode's pending_locks list */ 1109 int state; /* state of grant or error if -ve */ 1110 unsigned int debug_id; 1111 } afs; 1112 } fl_u; 1113} __randomize_layout; 1114 1115struct file_lock_context { 1116 spinlock_t flc_lock; 1117 struct list_head flc_flock; 1118 struct list_head flc_posix; 1119 struct list_head flc_lease; 1120}; 1121 1122/* The following constant reflects the upper bound of the file/locking space */ 1123#ifndef OFFSET_MAX 1124#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) 1125#define OFFSET_MAX INT_LIMIT(loff_t) 1126#define OFFT_OFFSET_MAX INT_LIMIT(off_t) 1127#endif 1128 1129extern void send_sigio(struct fown_struct *fown, int fd, int band); 1130 1131#define locks_inode(f) file_inode(f) 1132 1133#ifdef CONFIG_FILE_LOCKING 1134extern int fcntl_getlk(struct file *, unsigned int, struct flock *); 1135extern int fcntl_setlk(unsigned int, struct file *, unsigned int, 1136 struct flock *); 1137 1138#if BITS_PER_LONG == 32 1139extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *); 1140extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, 1141 struct flock64 *); 1142#endif 1143 1144extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); 1145extern int fcntl_getlease(struct file *filp); 1146 1147/* fs/locks.c */ 1148void locks_free_lock_context(struct inode *inode); 1149void locks_free_lock(struct file_lock *fl); 1150extern void locks_init_lock(struct file_lock *); 1151extern struct file_lock * locks_alloc_lock(void); 1152extern void locks_copy_lock(struct file_lock *, struct file_lock *); 1153extern void locks_copy_conflock(struct file_lock *, struct file_lock *); 1154extern void locks_remove_posix(struct file *, fl_owner_t); 1155extern void locks_remove_file(struct file *); 1156extern void locks_release_private(struct file_lock *); 1157extern void posix_test_lock(struct file *, struct file_lock *); 1158extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); 1159extern int locks_delete_block(struct file_lock *); 1160extern int vfs_test_lock(struct file *, struct file_lock *); 1161extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); 1162extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); 1163extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); 1164extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); 1165extern void lease_get_mtime(struct inode *, struct timespec64 *time); 1166extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); 1167extern int vfs_setlease(struct file *, long, struct file_lock **, void **); 1168extern int lease_modify(struct file_lock *, int, struct list_head *); 1169 1170struct notifier_block; 1171extern int lease_register_notifier(struct notifier_block *); 1172extern void lease_unregister_notifier(struct notifier_block *); 1173 1174struct files_struct; 1175extern void show_fd_locks(struct seq_file *f, 1176 struct file *filp, struct files_struct *files); 1177#else /* !CONFIG_FILE_LOCKING */ 1178static inline int fcntl_getlk(struct file *file, unsigned int cmd, 1179 struct flock __user *user) 1180{ 1181 return -EINVAL; 1182} 1183 1184static inline int fcntl_setlk(unsigned int fd, struct file *file, 1185 unsigned int cmd, struct flock __user *user) 1186{ 1187 return -EACCES; 1188} 1189 1190#if BITS_PER_LONG == 32 1191static inline int fcntl_getlk64(struct file *file, unsigned int cmd, 1192 struct flock64 *user) 1193{ 1194 return -EINVAL; 1195} 1196 1197static inline int fcntl_setlk64(unsigned int fd, struct file *file, 1198 unsigned int cmd, struct flock64 *user) 1199{ 1200 return -EACCES; 1201} 1202#endif 1203static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1204{ 1205 return -EINVAL; 1206} 1207 1208static inline int fcntl_getlease(struct file *filp) 1209{ 1210 return F_UNLCK; 1211} 1212 1213static inline void 1214locks_free_lock_context(struct inode *inode) 1215{ 1216} 1217 1218static inline void locks_init_lock(struct file_lock *fl) 1219{ 1220 return; 1221} 1222 1223static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) 1224{ 1225 return; 1226} 1227 1228static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) 1229{ 1230 return; 1231} 1232 1233static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) 1234{ 1235 return; 1236} 1237 1238static inline void locks_remove_file(struct file *filp) 1239{ 1240 return; 1241} 1242 1243static inline void posix_test_lock(struct file *filp, struct file_lock *fl) 1244{ 1245 return; 1246} 1247 1248static inline int posix_lock_file(struct file *filp, struct file_lock *fl, 1249 struct file_lock *conflock) 1250{ 1251 return -ENOLCK; 1252} 1253 1254static inline int locks_delete_block(struct file_lock *waiter) 1255{ 1256 return -ENOENT; 1257} 1258 1259static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) 1260{ 1261 return 0; 1262} 1263 1264static inline int vfs_lock_file(struct file *filp, unsigned int cmd, 1265 struct file_lock *fl, struct file_lock *conf) 1266{ 1267 return -ENOLCK; 1268} 1269 1270static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) 1271{ 1272 return 0; 1273} 1274 1275static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) 1276{ 1277 return -ENOLCK; 1278} 1279 1280static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) 1281{ 1282 return 0; 1283} 1284 1285static inline void lease_get_mtime(struct inode *inode, 1286 struct timespec64 *time) 1287{ 1288 return; 1289} 1290 1291static inline int generic_setlease(struct file *filp, long arg, 1292 struct file_lock **flp, void **priv) 1293{ 1294 return -EINVAL; 1295} 1296 1297static inline int vfs_setlease(struct file *filp, long arg, 1298 struct file_lock **lease, void **priv) 1299{ 1300 return -EINVAL; 1301} 1302 1303static inline int lease_modify(struct file_lock *fl, int arg, 1304 struct list_head *dispose) 1305{ 1306 return -EINVAL; 1307} 1308 1309struct files_struct; 1310static inline void show_fd_locks(struct seq_file *f, 1311 struct file *filp, struct files_struct *files) {} 1312#endif /* !CONFIG_FILE_LOCKING */ 1313 1314static inline struct inode *file_inode(const struct file *f) 1315{ 1316 return f->f_inode; 1317} 1318 1319static inline struct dentry *file_dentry(const struct file *file) 1320{ 1321 return d_real(file->f_path.dentry, file_inode(file)); 1322} 1323 1324static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) 1325{ 1326 return locks_lock_inode_wait(locks_inode(filp), fl); 1327} 1328 1329struct fasync_struct { 1330 rwlock_t fa_lock; 1331 int magic; 1332 int fa_fd; 1333 struct fasync_struct *fa_next; /* singly linked list */ 1334 struct file *fa_file; 1335 struct rcu_head fa_rcu; 1336}; 1337 1338#define FASYNC_MAGIC 0x4601 1339 1340/* SMP safe fasync helpers: */ 1341extern int fasync_helper(int, struct file *, int, struct fasync_struct **); 1342extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); 1343extern int fasync_remove_entry(struct file *, struct fasync_struct **); 1344extern struct fasync_struct *fasync_alloc(void); 1345extern void fasync_free(struct fasync_struct *); 1346 1347/* can be called from interrupts */ 1348extern void kill_fasync(struct fasync_struct **, int, int); 1349 1350extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); 1351extern int f_setown(struct file *filp, unsigned long arg, int force); 1352extern void f_delown(struct file *filp); 1353extern pid_t f_getown(struct file *filp); 1354extern int send_sigurg(struct fown_struct *fown); 1355 1356/* 1357 * sb->s_flags. Note that these mirror the equivalent MS_* flags where 1358 * represented in both. 1359 */ 1360#define SB_RDONLY 1 /* Mount read-only */ 1361#define SB_NOSUID 2 /* Ignore suid and sgid bits */ 1362#define SB_NODEV 4 /* Disallow access to device special files */ 1363#define SB_NOEXEC 8 /* Disallow program execution */ 1364#define SB_SYNCHRONOUS 16 /* Writes are synced at once */ 1365#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */ 1366#define SB_DIRSYNC 128 /* Directory modifications are synchronous */ 1367#define SB_NOATIME 1024 /* Do not update access times. */ 1368#define SB_NODIRATIME 2048 /* Do not update directory access times */ 1369#define SB_SILENT 32768 1370#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ 1371#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */ 1372#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ 1373#define SB_I_VERSION (1<<23) /* Update inode I_version field */ 1374#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ 1375 1376/* These sb flags are internal to the kernel */ 1377#define SB_SUBMOUNT (1<<26) 1378#define SB_FORCE (1<<27) 1379#define SB_NOSEC (1<<28) 1380#define SB_BORN (1<<29) 1381#define SB_ACTIVE (1<<30) 1382#define SB_NOUSER (1<<31) 1383 1384/* These flags relate to encoding and casefolding */ 1385#define SB_ENC_STRICT_MODE_FL (1 << 0) 1386 1387#define sb_has_strict_encoding(sb) \ 1388 (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL) 1389 1390/* 1391 * Umount options 1392 */ 1393 1394#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ 1395#define MNT_DETACH 0x00000002 /* Just detach from the tree */ 1396#define MNT_EXPIRE 0x00000004 /* Mark for expiry */ 1397#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ 1398#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ 1399 1400/* sb->s_iflags */ 1401#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ 1402#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ 1403#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ 1404#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */ 1405 1406/* sb->s_iflags to limit user namespace mounts */ 1407#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ 1408#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 1409#define SB_I_UNTRUSTED_MOUNTER 0x00000040 1410 1411#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ 1412#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ 1413#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */ 1414 1415/* Possible states of 'frozen' field */ 1416enum { 1417 SB_UNFROZEN = 0, /* FS is unfrozen */ 1418 SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ 1419 SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ 1420 SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop 1421 * internal threads if needed) */ 1422 SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ 1423}; 1424 1425#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) 1426 1427struct sb_writers { 1428 int frozen; /* Is sb frozen? */ 1429 wait_queue_head_t wait_unfrozen; /* wait for thaw */ 1430 struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; 1431}; 1432 1433struct super_block { 1434 struct list_head s_list; /* Keep this first */ 1435 dev_t s_dev; /* search index; _not_ kdev_t */ 1436 unsigned char s_blocksize_bits; 1437 unsigned long s_blocksize; 1438 loff_t s_maxbytes; /* Max file size */ 1439 struct file_system_type *s_type; 1440 const struct super_operations *s_op; 1441 const struct dquot_operations *dq_op; 1442 const struct quotactl_ops *s_qcop; 1443 const struct export_operations *s_export_op; 1444 unsigned long s_flags; 1445 unsigned long s_iflags; /* internal SB_I_* flags */ 1446 unsigned long s_magic; 1447 struct dentry *s_root; 1448 struct rw_semaphore s_umount; 1449 int s_count; 1450 atomic_t s_active; 1451#ifdef CONFIG_SECURITY 1452 void *s_security; 1453#endif 1454 const struct xattr_handler **s_xattr; 1455#ifdef CONFIG_FS_ENCRYPTION 1456 const struct fscrypt_operations *s_cop; 1457 struct key *s_master_keys; /* master crypto keys in use */ 1458#endif 1459#ifdef CONFIG_FS_VERITY 1460 const struct fsverity_operations *s_vop; 1461#endif 1462#if IS_ENABLED(CONFIG_UNICODE) 1463 struct unicode_map *s_encoding; 1464 __u16 s_encoding_flags; 1465#endif 1466 struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ 1467 struct list_head s_mounts; /* list of mounts; _not_ for fs use */ 1468 struct block_device *s_bdev; 1469 struct backing_dev_info *s_bdi; 1470 struct mtd_info *s_mtd; 1471 struct hlist_node s_instances; 1472 unsigned int s_quota_types; /* Bitmask of supported quota types */ 1473 struct quota_info s_dquot; /* Diskquota specific options */ 1474 1475 struct sb_writers s_writers; 1476 1477 /* 1478 * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and 1479 * s_fsnotify_marks together for cache efficiency. They are frequently 1480 * accessed and rarely modified. 1481 */ 1482 void *s_fs_info; /* Filesystem private info */ 1483 1484 /* Granularity of c/m/atime in ns (cannot be worse than a second) */ 1485 u32 s_time_gran; 1486 /* Time limits for c/m/atime in seconds */ 1487 time64_t s_time_min; 1488 time64_t s_time_max; 1489#ifdef CONFIG_FSNOTIFY 1490 __u32 s_fsnotify_mask; 1491 struct fsnotify_mark_connector __rcu *s_fsnotify_marks; 1492#endif 1493 1494 char s_id[32]; /* Informational name */ 1495 uuid_t s_uuid; /* UUID */ 1496 1497 unsigned int s_max_links; 1498 fmode_t s_mode; 1499 1500 /* 1501 * The next field is for VFS *only*. No filesystems have any business 1502 * even looking at it. You had been warned. 1503 */ 1504 struct mutex s_vfs_rename_mutex; /* Kludge */ 1505 1506 /* 1507 * Filesystem subtype. If non-empty the filesystem type field 1508 * in /proc/mounts will be "type.subtype" 1509 */ 1510 const char *s_subtype; 1511 1512 const struct dentry_operations *s_d_op; /* default d_op for dentries */ 1513 1514 struct shrinker s_shrink; /* per-sb shrinker handle */ 1515 1516 /* Number of inodes with nlink == 0 but still referenced */ 1517 atomic_long_t s_remove_count; 1518 1519 /* 1520 * Number of inode/mount/sb objects that are being watched, note that 1521 * inodes objects are currently double-accounted. 1522 */ 1523 atomic_long_t s_fsnotify_connectors; 1524 1525 /* Being remounted read-only */ 1526 int s_readonly_remount; 1527 1528 /* per-sb errseq_t for reporting writeback errors via syncfs */ 1529 errseq_t s_wb_err; 1530 1531 /* AIO completions deferred from interrupt context */ 1532 struct workqueue_struct *s_dio_done_wq; 1533 struct hlist_head s_pins; 1534 1535 /* 1536 * Owning user namespace and default context in which to 1537 * interpret filesystem uids, gids, quotas, device nodes, 1538 * xattrs and security labels. 1539 */ 1540 struct user_namespace *s_user_ns; 1541 1542 /* 1543 * The list_lru structure is essentially just a pointer to a table 1544 * of per-node lru lists, each of which has its own spinlock. 1545 * There is no need to put them into separate cachelines. 1546 */ 1547 struct list_lru s_dentry_lru; 1548 struct list_lru s_inode_lru; 1549 struct rcu_head rcu; 1550 struct work_struct destroy_work; 1551 1552 struct mutex s_sync_lock; /* sync serialisation lock */ 1553 1554 /* 1555 * Indicates how deep in a filesystem stack this SB is 1556 */ 1557 int s_stack_depth; 1558 1559 /* s_inode_list_lock protects s_inodes */ 1560 spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; 1561 struct list_head s_inodes; /* all inodes */ 1562 1563 spinlock_t s_inode_wblist_lock; 1564 struct list_head s_inodes_wb; /* writeback inodes */ 1565} __randomize_layout; 1566 1567static inline struct user_namespace *i_user_ns(const struct inode *inode) 1568{ 1569 return inode->i_sb->s_user_ns; 1570} 1571 1572/* Helper functions so that in most cases filesystems will 1573 * not need to deal directly with kuid_t and kgid_t and can 1574 * instead deal with the raw numeric values that are stored 1575 * in the filesystem. 1576 */ 1577static inline uid_t i_uid_read(const struct inode *inode) 1578{ 1579 return from_kuid(i_user_ns(inode), inode->i_uid); 1580} 1581 1582static inline gid_t i_gid_read(const struct inode *inode) 1583{ 1584 return from_kgid(i_user_ns(inode), inode->i_gid); 1585} 1586 1587static inline void i_uid_write(struct inode *inode, uid_t uid) 1588{ 1589 inode->i_uid = make_kuid(i_user_ns(inode), uid); 1590} 1591 1592static inline void i_gid_write(struct inode *inode, gid_t gid) 1593{ 1594 inode->i_gid = make_kgid(i_user_ns(inode), gid); 1595} 1596 1597/** 1598 * i_uid_into_mnt - map an inode's i_uid down into a mnt_userns 1599 * @mnt_userns: user namespace of the mount the inode was found from 1600 * @inode: inode to map 1601 * 1602 * Return: the inode's i_uid mapped down according to @mnt_userns. 1603 * If the inode's i_uid has no mapping INVALID_UID is returned. 1604 */ 1605static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, 1606 const struct inode *inode) 1607{ 1608 return mapped_kuid_fs(mnt_userns, i_user_ns(inode), inode->i_uid); 1609} 1610 1611/** 1612 * i_gid_into_mnt - map an inode's i_gid down into a mnt_userns 1613 * @mnt_userns: user namespace of the mount the inode was found from 1614 * @inode: inode to map 1615 * 1616 * Return: the inode's i_gid mapped down according to @mnt_userns. 1617 * If the inode's i_gid has no mapping INVALID_GID is returned. 1618 */ 1619static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns, 1620 const struct inode *inode) 1621{ 1622 return mapped_kgid_fs(mnt_userns, i_user_ns(inode), inode->i_gid); 1623} 1624 1625/** 1626 * inode_fsuid_set - initialize inode's i_uid field with callers fsuid 1627 * @inode: inode to initialize 1628 * @mnt_userns: user namespace of the mount the inode was found from 1629 * 1630 * Initialize the i_uid field of @inode. If the inode was found/created via 1631 * an idmapped mount map the caller's fsuid according to @mnt_users. 1632 */ 1633static inline void inode_fsuid_set(struct inode *inode, 1634 struct user_namespace *mnt_userns) 1635{ 1636 inode->i_uid = mapped_fsuid(mnt_userns, i_user_ns(inode)); 1637} 1638 1639/** 1640 * inode_fsgid_set - initialize inode's i_gid field with callers fsgid 1641 * @inode: inode to initialize 1642 * @mnt_userns: user namespace of the mount the inode was found from 1643 * 1644 * Initialize the i_gid field of @inode. If the inode was found/created via 1645 * an idmapped mount map the caller's fsgid according to @mnt_users. 1646 */ 1647static inline void inode_fsgid_set(struct inode *inode, 1648 struct user_namespace *mnt_userns) 1649{ 1650 inode->i_gid = mapped_fsgid(mnt_userns, i_user_ns(inode)); 1651} 1652 1653/** 1654 * fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped 1655 * @sb: the superblock we want a mapping in 1656 * @mnt_userns: user namespace of the relevant mount 1657 * 1658 * Check whether the caller's fsuid and fsgid have a valid mapping in the 1659 * s_user_ns of the superblock @sb. If the caller is on an idmapped mount map 1660 * the caller's fsuid and fsgid according to the @mnt_userns first. 1661 * 1662 * Return: true if fsuid and fsgid is mapped, false if not. 1663 */ 1664static inline bool fsuidgid_has_mapping(struct super_block *sb, 1665 struct user_namespace *mnt_userns) 1666{ 1667 struct user_namespace *fs_userns = sb->s_user_ns; 1668 kuid_t kuid; 1669 kgid_t kgid; 1670 1671 kuid = mapped_fsuid(mnt_userns, fs_userns); 1672 if (!uid_valid(kuid)) 1673 return false; 1674 kgid = mapped_fsgid(mnt_userns, fs_userns); 1675 if (!gid_valid(kgid)) 1676 return false; 1677 return kuid_has_mapping(fs_userns, kuid) && 1678 kgid_has_mapping(fs_userns, kgid); 1679} 1680 1681extern struct timespec64 current_time(struct inode *inode); 1682 1683/* 1684 * Snapshotting support. 1685 */ 1686 1687/* 1688 * These are internal functions, please use sb_start_{write,pagefault,intwrite} 1689 * instead. 1690 */ 1691static inline void __sb_end_write(struct super_block *sb, int level) 1692{ 1693 percpu_up_read(sb->s_writers.rw_sem + level-1); 1694} 1695 1696static inline void __sb_start_write(struct super_block *sb, int level) 1697{ 1698 percpu_down_read(sb->s_writers.rw_sem + level - 1); 1699} 1700 1701static inline bool __sb_start_write_trylock(struct super_block *sb, int level) 1702{ 1703 return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1); 1704} 1705 1706#define __sb_writers_acquired(sb, lev) \ 1707 percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) 1708#define __sb_writers_release(sb, lev) \ 1709 percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) 1710 1711/** 1712 * sb_end_write - drop write access to a superblock 1713 * @sb: the super we wrote to 1714 * 1715 * Decrement number of writers to the filesystem. Wake up possible waiters 1716 * wanting to freeze the filesystem. 1717 */ 1718static inline void sb_end_write(struct super_block *sb) 1719{ 1720 __sb_end_write(sb, SB_FREEZE_WRITE); 1721} 1722 1723/** 1724 * sb_end_pagefault - drop write access to a superblock from a page fault 1725 * @sb: the super we wrote to 1726 * 1727 * Decrement number of processes handling write page fault to the filesystem. 1728 * Wake up possible waiters wanting to freeze the filesystem. 1729 */ 1730static inline void sb_end_pagefault(struct super_block *sb) 1731{ 1732 __sb_end_write(sb, SB_FREEZE_PAGEFAULT); 1733} 1734 1735/** 1736 * sb_end_intwrite - drop write access to a superblock for internal fs purposes 1737 * @sb: the super we wrote to 1738 * 1739 * Decrement fs-internal number of writers to the filesystem. Wake up possible 1740 * waiters wanting to freeze the filesystem. 1741 */ 1742static inline void sb_end_intwrite(struct super_block *sb) 1743{ 1744 __sb_end_write(sb, SB_FREEZE_FS); 1745} 1746 1747/** 1748 * sb_start_write - get write access to a superblock 1749 * @sb: the super we write to 1750 * 1751 * When a process wants to write data or metadata to a file system (i.e. dirty 1752 * a page or an inode), it should embed the operation in a sb_start_write() - 1753 * sb_end_write() pair to get exclusion against file system freezing. This 1754 * function increments number of writers preventing freezing. If the file 1755 * system is already frozen, the function waits until the file system is 1756 * thawed. 1757 * 1758 * Since freeze protection behaves as a lock, users have to preserve 1759 * ordering of freeze protection and other filesystem locks. Generally, 1760 * freeze protection should be the outermost lock. In particular, we have: 1761 * 1762 * sb_start_write 1763 * -> i_mutex (write path, truncate, directory ops, ...) 1764 * -> s_umount (freeze_super, thaw_super) 1765 */ 1766static inline void sb_start_write(struct super_block *sb) 1767{ 1768 __sb_start_write(sb, SB_FREEZE_WRITE); 1769} 1770 1771static inline bool sb_start_write_trylock(struct super_block *sb) 1772{ 1773 return __sb_start_write_trylock(sb, SB_FREEZE_WRITE); 1774} 1775 1776/** 1777 * sb_start_pagefault - get write access to a superblock from a page fault 1778 * @sb: the super we write to 1779 * 1780 * When a process starts handling write page fault, it should embed the 1781 * operation into sb_start_pagefault() - sb_end_pagefault() pair to get 1782 * exclusion against file system freezing. This is needed since the page fault 1783 * is going to dirty a page. This function increments number of running page 1784 * faults preventing freezing. If the file system is already frozen, the 1785 * function waits until the file system is thawed. 1786 * 1787 * Since page fault freeze protection behaves as a lock, users have to preserve 1788 * ordering of freeze protection and other filesystem locks. It is advised to 1789 * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault 1790 * handling code implies lock dependency: 1791 * 1792 * mmap_lock 1793 * -> sb_start_pagefault 1794 */ 1795static inline void sb_start_pagefault(struct super_block *sb) 1796{ 1797 __sb_start_write(sb, SB_FREEZE_PAGEFAULT); 1798} 1799 1800/** 1801 * sb_start_intwrite - get write access to a superblock for internal fs purposes 1802 * @sb: the super we write to 1803 * 1804 * This is the third level of protection against filesystem freezing. It is 1805 * free for use by a filesystem. The only requirement is that it must rank 1806 * below sb_start_pagefault. 1807 * 1808 * For example filesystem can call sb_start_intwrite() when starting a 1809 * transaction which somewhat eases handling of freezing for internal sources 1810 * of filesystem changes (internal fs threads, discarding preallocation on file 1811 * close, etc.). 1812 */ 1813static inline void sb_start_intwrite(struct super_block *sb) 1814{ 1815 __sb_start_write(sb, SB_FREEZE_FS); 1816} 1817 1818static inline bool sb_start_intwrite_trylock(struct super_block *sb) 1819{ 1820 return __sb_start_write_trylock(sb, SB_FREEZE_FS); 1821} 1822 1823bool inode_owner_or_capable(struct user_namespace *mnt_userns, 1824 const struct inode *inode); 1825 1826/* 1827 * VFS helper functions.. 1828 */ 1829int vfs_create(struct user_namespace *, struct inode *, 1830 struct dentry *, umode_t, bool); 1831int vfs_mkdir(struct user_namespace *, struct inode *, 1832 struct dentry *, umode_t); 1833int vfs_mknod(struct user_namespace *, struct inode *, struct dentry *, 1834 umode_t, dev_t); 1835int vfs_symlink(struct user_namespace *, struct inode *, 1836 struct dentry *, const char *); 1837int vfs_link(struct dentry *, struct user_namespace *, struct inode *, 1838 struct dentry *, struct inode **); 1839int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *); 1840int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *, 1841 struct inode **); 1842 1843/** 1844 * struct renamedata - contains all information required for renaming 1845 * @old_mnt_userns: old user namespace of the mount the inode was found from 1846 * @old_dir: parent of source 1847 * @old_dentry: source 1848 * @new_mnt_userns: new user namespace of the mount the inode was found from 1849 * @new_dir: parent of destination 1850 * @new_dentry: destination 1851 * @delegated_inode: returns an inode needing a delegation break 1852 * @flags: rename flags 1853 */ 1854struct renamedata { 1855 struct user_namespace *old_mnt_userns; 1856 struct inode *old_dir; 1857 struct dentry *old_dentry; 1858 struct user_namespace *new_mnt_userns; 1859 struct inode *new_dir; 1860 struct dentry *new_dentry; 1861 struct inode **delegated_inode; 1862 unsigned int flags; 1863} __randomize_layout; 1864 1865int vfs_rename(struct renamedata *); 1866 1867static inline int vfs_whiteout(struct user_namespace *mnt_userns, 1868 struct inode *dir, struct dentry *dentry) 1869{ 1870 return vfs_mknod(mnt_userns, dir, dentry, S_IFCHR | WHITEOUT_MODE, 1871 WHITEOUT_DEV); 1872} 1873 1874struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns, 1875 struct dentry *dentry, umode_t mode, int open_flag); 1876 1877int vfs_mkobj(struct dentry *, umode_t, 1878 int (*f)(struct dentry *, umode_t, void *), 1879 void *); 1880 1881int vfs_fchown(struct file *file, uid_t user, gid_t group); 1882int vfs_fchmod(struct file *file, umode_t mode); 1883int vfs_utimes(const struct path *path, struct timespec64 *times); 1884 1885extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 1886 1887#ifdef CONFIG_COMPAT 1888extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, 1889 unsigned long arg); 1890#else 1891#define compat_ptr_ioctl NULL 1892#endif 1893 1894/* 1895 * VFS file helper functions. 1896 */ 1897void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode, 1898 const struct inode *dir, umode_t mode); 1899extern bool may_open_dev(const struct path *path); 1900 1901/* 1902 * This is the "filldir" function type, used by readdir() to let 1903 * the kernel specify what kind of dirent layout it wants to have. 1904 * This allows the kernel to read directories into kernel space or 1905 * to have different dirent layouts depending on the binary type. 1906 */ 1907struct dir_context; 1908typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, 1909 unsigned); 1910 1911struct dir_context { 1912 filldir_t actor; 1913 loff_t pos; 1914}; 1915 1916/* 1917 * These flags let !MMU mmap() govern direct device mapping vs immediate 1918 * copying more easily for MAP_PRIVATE, especially for ROM filesystems. 1919 * 1920 * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) 1921 * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) 1922 * NOMMU_MAP_READ: Can be mapped for reading 1923 * NOMMU_MAP_WRITE: Can be mapped for writing 1924 * NOMMU_MAP_EXEC: Can be mapped for execution 1925 */ 1926#define NOMMU_MAP_COPY 0x00000001 1927#define NOMMU_MAP_DIRECT 0x00000008 1928#define NOMMU_MAP_READ VM_MAYREAD 1929#define NOMMU_MAP_WRITE VM_MAYWRITE 1930#define NOMMU_MAP_EXEC VM_MAYEXEC 1931 1932#define NOMMU_VMFLAGS \ 1933 (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) 1934 1935/* 1936 * These flags control the behavior of the remap_file_range function pointer. 1937 * If it is called with len == 0 that means "remap to end of source file". 1938 * See Documentation/filesystems/vfs.rst for more details about this call. 1939 * 1940 * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) 1941 * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request 1942 */ 1943#define REMAP_FILE_DEDUP (1 << 0) 1944#define REMAP_FILE_CAN_SHORTEN (1 << 1) 1945 1946/* 1947 * These flags signal that the caller is ok with altering various aspects of 1948 * the behavior of the remap operation. The changes must be made by the 1949 * implementation; the vfs remap helper functions can take advantage of them. 1950 * Flags in this category exist to preserve the quirky behavior of the hoisted 1951 * btrfs clone/dedupe ioctls. 1952 */ 1953#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) 1954 1955struct iov_iter; 1956 1957struct file_operations { 1958 struct module *owner; 1959 loff_t (*llseek) (struct file *, loff_t, int); 1960 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); 1961 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); 1962 ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); 1963 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); 1964 int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, 1965 unsigned int flags); 1966 int (*iterate) (struct file *, struct dir_context *); 1967 int (*iterate_shared) (struct file *, struct dir_context *); 1968 __poll_t (*poll) (struct file *, struct poll_table_struct *); 1969 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 1970 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 1971 int (*mmap) (struct file *, struct vm_area_struct *); 1972 unsigned long mmap_supported_flags; 1973 int (*open) (struct inode *, struct file *); 1974 int (*flush) (struct file *, fl_owner_t id); 1975 int (*release) (struct inode *, struct file *); 1976 int (*fsync) (struct file *, loff_t, loff_t, int datasync); 1977 int (*fasync) (int, struct file *, int); 1978 int (*lock) (struct file *, int, struct file_lock *); 1979 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); 1980 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1981 int (*check_flags)(int); 1982 int (*flock) (struct file *, int, struct file_lock *); 1983 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); 1984 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); 1985 int (*setlease)(struct file *, long, struct file_lock **, void **); 1986 long (*fallocate)(struct file *file, int mode, loff_t offset, 1987 loff_t len); 1988 void (*show_fdinfo)(struct seq_file *m, struct file *f); 1989#ifndef CONFIG_MMU 1990 unsigned (*mmap_capabilities)(struct file *); 1991#endif 1992 ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, 1993 loff_t, size_t, unsigned int); 1994 loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, 1995 struct file *file_out, loff_t pos_out, 1996 loff_t len, unsigned int remap_flags); 1997 int (*fadvise)(struct file *, loff_t, loff_t, int); 1998} __randomize_layout; 1999 2000struct inode_operations { 2001 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); 2002 const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); 2003 int (*permission) (struct user_namespace *, struct inode *, int); 2004 struct posix_acl * (*get_acl)(struct inode *, int, bool); 2005 2006 int (*readlink) (struct dentry *, char __user *,int); 2007 2008 int (*create) (struct user_namespace *, struct inode *,struct dentry *, 2009 umode_t, bool); 2010 int (*link) (struct dentry *,struct inode *,struct dentry *); 2011 int (*unlink) (struct inode *,struct dentry *); 2012 int (*symlink) (struct user_namespace *, struct inode *,struct dentry *, 2013 const char *); 2014 int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *, 2015 umode_t); 2016 int (*rmdir) (struct inode *,struct dentry *); 2017 int (*mknod) (struct user_namespace *, struct inode *,struct dentry *, 2018 umode_t,dev_t); 2019 int (*rename) (struct user_namespace *, struct inode *, struct dentry *, 2020 struct inode *, struct dentry *, unsigned int); 2021 int (*setattr) (struct user_namespace *, struct dentry *, 2022 struct iattr *); 2023 int (*getattr) (struct user_namespace *, const struct path *, 2024 struct kstat *, u32, unsigned int); 2025 ssize_t (*listxattr) (struct dentry *, char *, size_t); 2026 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, 2027 u64 len); 2028 int (*update_time)(struct inode *, struct timespec64 *, int); 2029 int (*atomic_open)(struct inode *, struct dentry *, 2030 struct file *, unsigned open_flag, 2031 umode_t create_mode); 2032 int (*tmpfile) (struct user_namespace *, struct inode *, 2033 struct dentry *, umode_t); 2034 int (*set_acl)(struct user_namespace *, struct inode *, 2035 struct posix_acl *, int); 2036 int (*fileattr_set)(struct user_namespace *mnt_userns, 2037 struct dentry *dentry, struct fileattr *fa); 2038 int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); 2039} ____cacheline_aligned; 2040 2041static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, 2042 struct iov_iter *iter) 2043{ 2044 return file->f_op->read_iter(kio, iter); 2045} 2046 2047static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, 2048 struct iov_iter *iter) 2049{ 2050 return file->f_op->write_iter(kio, iter); 2051} 2052 2053static inline int call_mmap(struct file *file, struct vm_area_struct *vma) 2054{ 2055 return file->f_op->mmap(file, vma); 2056} 2057 2058extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); 2059extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); 2060extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, 2061 loff_t, size_t, unsigned int); 2062extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, 2063 struct file *file_out, loff_t pos_out, 2064 size_t len, unsigned int flags); 2065extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, 2066 struct file *file_out, loff_t pos_out, 2067 loff_t *count, 2068 unsigned int remap_flags); 2069extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, 2070 struct file *file_out, loff_t pos_out, 2071 loff_t len, unsigned int remap_flags); 2072extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, 2073 struct file *file_out, loff_t pos_out, 2074 loff_t len, unsigned int remap_flags); 2075extern int vfs_dedupe_file_range(struct file *file, 2076 struct file_dedupe_range *same); 2077extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, 2078 struct file *dst_file, loff_t dst_pos, 2079 loff_t len, unsigned int remap_flags); 2080 2081 2082struct super_operations { 2083 struct inode *(*alloc_inode)(struct super_block *sb); 2084 void (*destroy_inode)(struct inode *); 2085 void (*free_inode)(struct inode *); 2086 2087 void (*dirty_inode) (struct inode *, int flags); 2088 int (*write_inode) (struct inode *, struct writeback_control *wbc); 2089 int (*drop_inode) (struct inode *); 2090 void (*evict_inode) (struct inode *); 2091 void (*put_super) (struct super_block *); 2092 int (*sync_fs)(struct super_block *sb, int wait); 2093 int (*freeze_super) (struct super_block *); 2094 int (*freeze_fs) (struct super_block *); 2095 int (*thaw_super) (struct super_block *); 2096 int (*unfreeze_fs) (struct super_block *); 2097 int (*statfs) (struct dentry *, struct kstatfs *); 2098 int (*remount_fs) (struct super_block *, int *, char *); 2099 void (*umount_begin) (struct super_block *); 2100 2101 int (*show_options)(struct seq_file *, struct dentry *); 2102 int (*show_devname)(struct seq_file *, struct dentry *); 2103 int (*show_path)(struct seq_file *, struct dentry *); 2104 int (*show_stats)(struct seq_file *, struct dentry *); 2105#ifdef CONFIG_QUOTA 2106 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 2107 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 2108 struct dquot **(*get_dquots)(struct inode *); 2109#endif 2110 long (*nr_cached_objects)(struct super_block *, 2111 struct shrink_control *); 2112 long (*free_cached_objects)(struct super_block *, 2113 struct shrink_control *); 2114}; 2115 2116/* 2117 * Inode flags - they have no relation to superblock flags now 2118 */ 2119#define S_SYNC (1 << 0) /* Writes are synced at once */ 2120#define S_NOATIME (1 << 1) /* Do not update access times */ 2121#define S_APPEND (1 << 2) /* Append-only file */ 2122#define S_IMMUTABLE (1 << 3) /* Immutable file */ 2123#define S_DEAD (1 << 4) /* removed, but still open directory */ 2124#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */ 2125#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */ 2126#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */ 2127#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */ 2128#define S_PRIVATE (1 << 9) /* Inode is fs-internal */ 2129#define S_IMA (1 << 10) /* Inode has an associated IMA struct */ 2130#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */ 2131#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */ 2132#ifdef CONFIG_FS_DAX 2133#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */ 2134#else 2135#define S_DAX 0 /* Make all the DAX code disappear */ 2136#endif 2137#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */ 2138#define S_CASEFOLD (1 << 15) /* Casefolded file */ 2139#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */ 2140#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */ 2141 2142/* 2143 * Note that nosuid etc flags are inode-specific: setting some file-system 2144 * flags just means all the inodes inherit those flags by default. It might be 2145 * possible to override it selectively if you really wanted to with some 2146 * ioctl() that is not currently implemented. 2147 * 2148 * Exception: SB_RDONLY is always applied to the entire file system. 2149 * 2150 * Unfortunately, it is possible to change a filesystems flags with it mounted 2151 * with files in use. This means that all of the inodes will not have their 2152 * i_flags updated. Hence, i_flags no longer inherit the superblock mount 2153 * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org 2154 */ 2155#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg)) 2156 2157static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; } 2158#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb) 2159#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \ 2160 ((inode)->i_flags & S_SYNC)) 2161#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \ 2162 ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) 2163#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK) 2164#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME) 2165#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION) 2166 2167#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) 2168#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) 2169#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) 2170#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL) 2171 2172#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) 2173#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) 2174#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) 2175#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) 2176#define IS_IMA(inode) ((inode)->i_flags & S_IMA) 2177#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) 2178#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) 2179#define IS_DAX(inode) ((inode)->i_flags & S_DAX) 2180#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED) 2181#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD) 2182#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY) 2183 2184#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ 2185 (inode)->i_rdev == WHITEOUT_DEV) 2186 2187static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, 2188 struct inode *inode) 2189{ 2190 return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) || 2191 !gid_valid(i_gid_into_mnt(mnt_userns, inode)); 2192} 2193 2194static inline int iocb_flags(struct file *file); 2195 2196static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) 2197{ 2198 *kiocb = (struct kiocb) { 2199 .ki_filp = filp, 2200 .ki_flags = iocb_flags(filp), 2201 .ki_ioprio = get_current_ioprio(), 2202 }; 2203} 2204 2205static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, 2206 struct file *filp) 2207{ 2208 *kiocb = (struct kiocb) { 2209 .ki_filp = filp, 2210 .ki_flags = kiocb_src->ki_flags, 2211 .ki_ioprio = kiocb_src->ki_ioprio, 2212 .ki_pos = kiocb_src->ki_pos, 2213 }; 2214} 2215 2216/* 2217 * Inode state bits. Protected by inode->i_lock 2218 * 2219 * Four bits determine the dirty state of the inode: I_DIRTY_SYNC, 2220 * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME. 2221 * 2222 * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, 2223 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at 2224 * various stages of removing an inode. 2225 * 2226 * Two bits are used for locking and completion notification, I_NEW and I_SYNC. 2227 * 2228 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on 2229 * fdatasync() (unless I_DIRTY_DATASYNC is also set). 2230 * Timestamp updates are the usual cause. 2231 * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of 2232 * these changes separately from I_DIRTY_SYNC so that we 2233 * don't have to write inode on fdatasync() when only 2234 * e.g. the timestamps have changed. 2235 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. 2236 * I_DIRTY_TIME The inode itself only has dirty timestamps, and the 2237 * lazytime mount option is enabled. We keep track of this 2238 * separately from I_DIRTY_SYNC in order to implement 2239 * lazytime. This gets cleared if I_DIRTY_INODE 2240 * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. I.e. 2241 * either I_DIRTY_TIME *or* I_DIRTY_INODE can be set in 2242 * i_state, but not both. I_DIRTY_PAGES may still be set. 2243 * I_NEW Serves as both a mutex and completion notification. 2244 * New inodes set I_NEW. If two processes both create 2245 * the same inode, one of them will release its inode and 2246 * wait for I_NEW to be released before returning. 2247 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can 2248 * also cause waiting on I_NEW, without I_NEW actually 2249 * being set. find_inode() uses this to prevent returning 2250 * nearly-dead inodes. 2251 * I_WILL_FREE Must be set when calling write_inode_now() if i_count 2252 * is zero. I_FREEING must be set when I_WILL_FREE is 2253 * cleared. 2254 * I_FREEING Set when inode is about to be freed but still has dirty 2255 * pages or buffers attached or the inode itself is still 2256 * dirty. 2257 * I_CLEAR Added by clear_inode(). In this state the inode is 2258 * clean and can be destroyed. Inode keeps I_FREEING. 2259 * 2260 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are 2261 * prohibited for many purposes. iget() must wait for 2262 * the inode to be completely released, then create it 2263 * anew. Other functions will just ignore such inodes, 2264 * if appropriate. I_NEW is used for waiting. 2265 * 2266 * I_SYNC Writeback of inode is running. The bit is set during 2267 * data writeback, and cleared with a wakeup on the bit 2268 * address once it is done. The bit is also used to pin 2269 * the inode in memory for flusher thread. 2270 * 2271 * I_REFERENCED Marks the inode as recently references on the LRU list. 2272 * 2273 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). 2274 * 2275 * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to 2276 * synchronize competing switching instances and to tell 2277 * wb stat updates to grab the i_pages lock. See 2278 * inode_switch_wbs_work_fn() for details. 2279 * 2280 * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper 2281 * and work dirs among overlayfs mounts. 2282 * 2283 * I_CREATING New object's inode in the middle of setting up. 2284 * 2285 * I_DONTCACHE Evict inode as soon as it is not used anymore. 2286 * 2287 * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. 2288 * Used to detect that mark_inode_dirty() should not move 2289 * inode between dirty lists. 2290 * 2291 * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback. 2292 * 2293 * Q: What is the difference between I_WILL_FREE and I_FREEING? 2294 */ 2295#define I_DIRTY_SYNC (1 << 0) 2296#define I_DIRTY_DATASYNC (1 << 1) 2297#define I_DIRTY_PAGES (1 << 2) 2298#define __I_NEW 3 2299#define I_NEW (1 << __I_NEW) 2300#define I_WILL_FREE (1 << 4) 2301#define I_FREEING (1 << 5) 2302#define I_CLEAR (1 << 6) 2303#define __I_SYNC 7 2304#define I_SYNC (1 << __I_SYNC) 2305#define I_REFERENCED (1 << 8) 2306#define __I_DIO_WAKEUP 9 2307#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) 2308#define I_LINKABLE (1 << 10) 2309#define I_DIRTY_TIME (1 << 11) 2310#define I_WB_SWITCH (1 << 13) 2311#define I_OVL_INUSE (1 << 14) 2312#define I_CREATING (1 << 15) 2313#define I_DONTCACHE (1 << 16) 2314#define I_SYNC_QUEUED (1 << 17) 2315#define I_PINNING_FSCACHE_WB (1 << 18) 2316 2317#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 2318#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) 2319#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) 2320 2321extern void __mark_inode_dirty(struct inode *, int); 2322static inline void mark_inode_dirty(struct inode *inode) 2323{ 2324 __mark_inode_dirty(inode, I_DIRTY); 2325} 2326 2327static inline void mark_inode_dirty_sync(struct inode *inode) 2328{ 2329 __mark_inode_dirty(inode, I_DIRTY_SYNC); 2330} 2331 2332/* 2333 * Returns true if the given inode itself only has dirty timestamps (its pages 2334 * may still be dirty) and isn't currently being allocated or freed. 2335 * Filesystems should call this if when writing an inode when lazytime is 2336 * enabled, they want to opportunistically write the timestamps of other inodes 2337 * located very nearby on-disk, e.g. in the same inode block. This returns true 2338 * if the given inode is in need of such an opportunistic update. Requires 2339 * i_lock, or at least later re-checking under i_lock. 2340 */ 2341static inline bool inode_is_dirtytime_only(struct inode *inode) 2342{ 2343 return (inode->i_state & (I_DIRTY_TIME | I_NEW | 2344 I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME; 2345} 2346 2347extern void inc_nlink(struct inode *inode); 2348extern void drop_nlink(struct inode *inode); 2349extern void clear_nlink(struct inode *inode); 2350extern void set_nlink(struct inode *inode, unsigned int nlink); 2351 2352static inline void inode_inc_link_count(struct inode *inode) 2353{ 2354 inc_nlink(inode); 2355 mark_inode_dirty(inode); 2356} 2357 2358static inline void inode_dec_link_count(struct inode *inode) 2359{ 2360 drop_nlink(inode); 2361 mark_inode_dirty(inode); 2362} 2363 2364enum file_time_flags { 2365 S_ATIME = 1, 2366 S_MTIME = 2, 2367 S_CTIME = 4, 2368 S_VERSION = 8, 2369}; 2370 2371extern bool atime_needs_update(const struct path *, struct inode *); 2372extern void touch_atime(const struct path *); 2373int inode_update_time(struct inode *inode, struct timespec64 *time, int flags); 2374 2375static inline void file_accessed(struct file *file) 2376{ 2377 if (!(file->f_flags & O_NOATIME)) 2378 touch_atime(&file->f_path); 2379} 2380 2381extern int file_modified(struct file *file); 2382 2383int sync_inode_metadata(struct inode *inode, int wait); 2384 2385struct file_system_type { 2386 const char *name; 2387 int fs_flags; 2388#define FS_REQUIRES_DEV 1 2389#define FS_BINARY_MOUNTDATA 2 2390#define FS_HAS_SUBTYPE 4 2391#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ 2392#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ 2393#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ 2394#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ 2395 int (*init_fs_context)(struct fs_context *); 2396 const struct fs_parameter_spec *parameters; 2397 struct dentry *(*mount) (struct file_system_type *, int, 2398 const char *, void *); 2399 void (*kill_sb) (struct super_block *); 2400 struct module *owner; 2401 struct file_system_type * next; 2402 struct hlist_head fs_supers; 2403 2404 struct lock_class_key s_lock_key; 2405 struct lock_class_key s_umount_key; 2406 struct lock_class_key s_vfs_rename_key; 2407 struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; 2408 2409 struct lock_class_key i_lock_key; 2410 struct lock_class_key i_mutex_key; 2411 struct lock_class_key invalidate_lock_key; 2412 struct lock_class_key i_mutex_dir_key; 2413}; 2414 2415#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) 2416 2417extern struct dentry *mount_bdev(struct file_system_type *fs_type, 2418 int flags, const char *dev_name, void *data, 2419 int (*fill_super)(struct super_block *, void *, int)); 2420extern struct dentry *mount_single(struct file_system_type *fs_type, 2421 int flags, void *data, 2422 int (*fill_super)(struct super_block *, void *, int)); 2423extern struct dentry *mount_nodev(struct file_system_type *fs_type, 2424 int flags, void *data, 2425 int (*fill_super)(struct super_block *, void *, int)); 2426extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); 2427void generic_shutdown_super(struct super_block *sb); 2428void kill_block_super(struct super_block *sb); 2429void kill_anon_super(struct super_block *sb); 2430void kill_litter_super(struct super_block *sb); 2431void deactivate_super(struct super_block *sb); 2432void deactivate_locked_super(struct super_block *sb); 2433int set_anon_super(struct super_block *s, void *data); 2434int set_anon_super_fc(struct super_block *s, struct fs_context *fc); 2435int get_anon_bdev(dev_t *); 2436void free_anon_bdev(dev_t); 2437struct super_block *sget_fc(struct fs_context *fc, 2438 int (*test)(struct super_block *, struct fs_context *), 2439 int (*set)(struct super_block *, struct fs_context *)); 2440struct super_block *sget(struct file_system_type *type, 2441 int (*test)(struct super_block *,void *), 2442 int (*set)(struct super_block *,void *), 2443 int flags, void *data); 2444 2445/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ 2446#define fops_get(fops) \ 2447 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) 2448#define fops_put(fops) \ 2449 do { if (fops) module_put((fops)->owner); } while(0) 2450/* 2451 * This one is to be used *ONLY* from ->open() instances. 2452 * fops must be non-NULL, pinned down *and* module dependencies 2453 * should be sufficient to pin the caller down as well. 2454 */ 2455#define replace_fops(f, fops) \ 2456 do { \ 2457 struct file *__file = (f); \ 2458 fops_put(__file->f_op); \ 2459 BUG_ON(!(__file->f_op = (fops))); \ 2460 } while(0) 2461 2462extern int register_filesystem(struct file_system_type *); 2463extern int unregister_filesystem(struct file_system_type *); 2464extern struct vfsmount *kern_mount(struct file_system_type *); 2465extern void kern_unmount(struct vfsmount *mnt); 2466extern int may_umount_tree(struct vfsmount *); 2467extern int may_umount(struct vfsmount *); 2468extern long do_mount(const char *, const char __user *, 2469 const char *, unsigned long, void *); 2470extern struct vfsmount *collect_mounts(const struct path *); 2471extern void drop_collected_mounts(struct vfsmount *); 2472extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, 2473 struct vfsmount *); 2474extern int vfs_statfs(const struct path *, struct kstatfs *); 2475extern int user_statfs(const char __user *, struct kstatfs *); 2476extern int fd_statfs(int, struct kstatfs *); 2477extern int freeze_super(struct super_block *super); 2478extern int thaw_super(struct super_block *super); 2479extern bool our_mnt(struct vfsmount *mnt); 2480extern __printf(2, 3) 2481int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); 2482extern int super_setup_bdi(struct super_block *sb); 2483 2484extern int current_umask(void); 2485 2486extern void ihold(struct inode * inode); 2487extern void iput(struct inode *); 2488extern int generic_update_time(struct inode *, struct timespec64 *, int); 2489 2490/* /sys/fs */ 2491extern struct kobject *fs_kobj; 2492 2493#define MAX_RW_COUNT (INT_MAX & PAGE_MASK) 2494 2495#ifdef CONFIG_FILE_LOCKING 2496static inline int break_lease(struct inode *inode, unsigned int mode) 2497{ 2498 /* 2499 * Since this check is lockless, we must ensure that any refcounts 2500 * taken are done before checking i_flctx->flc_lease. Otherwise, we 2501 * could end up racing with tasks trying to set a new lease on this 2502 * file. 2503 */ 2504 smp_mb(); 2505 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) 2506 return __break_lease(inode, mode, FL_LEASE); 2507 return 0; 2508} 2509 2510static inline int break_deleg(struct inode *inode, unsigned int mode) 2511{ 2512 /* 2513 * Since this check is lockless, we must ensure that any refcounts 2514 * taken are done before checking i_flctx->flc_lease. Otherwise, we 2515 * could end up racing with tasks trying to set a new lease on this 2516 * file. 2517 */ 2518 smp_mb(); 2519 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) 2520 return __break_lease(inode, mode, FL_DELEG); 2521 return 0; 2522} 2523 2524static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) 2525{ 2526 int ret; 2527 2528 ret = break_deleg(inode, O_WRONLY|O_NONBLOCK); 2529 if (ret == -EWOULDBLOCK && delegated_inode) { 2530 *delegated_inode = inode; 2531 ihold(inode); 2532 } 2533 return ret; 2534} 2535 2536static inline int break_deleg_wait(struct inode **delegated_inode) 2537{ 2538 int ret; 2539 2540 ret = break_deleg(*delegated_inode, O_WRONLY); 2541 iput(*delegated_inode); 2542 *delegated_inode = NULL; 2543 return ret; 2544} 2545 2546static inline int break_layout(struct inode *inode, bool wait) 2547{ 2548 smp_mb(); 2549 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) 2550 return __break_lease(inode, 2551 wait ? O_WRONLY : O_WRONLY | O_NONBLOCK, 2552 FL_LAYOUT); 2553 return 0; 2554} 2555 2556#else /* !CONFIG_FILE_LOCKING */ 2557static inline int break_lease(struct inode *inode, unsigned int mode) 2558{ 2559 return 0; 2560} 2561 2562static inline int break_deleg(struct inode *inode, unsigned int mode) 2563{ 2564 return 0; 2565} 2566 2567static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) 2568{ 2569 return 0; 2570} 2571 2572static inline int break_deleg_wait(struct inode **delegated_inode) 2573{ 2574 BUG(); 2575 return 0; 2576} 2577 2578static inline int break_layout(struct inode *inode, bool wait) 2579{ 2580 return 0; 2581} 2582 2583#endif /* CONFIG_FILE_LOCKING */ 2584 2585/* fs/open.c */ 2586struct audit_names; 2587struct filename { 2588 const char *name; /* pointer to actual string */ 2589 const __user char *uptr; /* original userland pointer */ 2590 int refcnt; 2591 struct audit_names *aname; 2592 const char iname[]; 2593}; 2594static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); 2595 2596static inline struct user_namespace *file_mnt_user_ns(struct file *file) 2597{ 2598 return mnt_user_ns(file->f_path.mnt); 2599} 2600 2601/** 2602 * is_idmapped_mnt - check whether a mount is mapped 2603 * @mnt: the mount to check 2604 * 2605 * If @mnt has an idmapping attached different from the 2606 * filesystem's idmapping then @mnt is mapped. 2607 * 2608 * Return: true if mount is mapped, false if not. 2609 */ 2610static inline bool is_idmapped_mnt(const struct vfsmount *mnt) 2611{ 2612 return mnt_user_ns(mnt) != mnt->mnt_sb->s_user_ns; 2613} 2614 2615extern long vfs_truncate(const struct path *, loff_t); 2616int do_truncate(struct user_namespace *, struct dentry *, loff_t start, 2617 unsigned int time_attrs, struct file *filp); 2618extern int vfs_fallocate(struct file *file, int mode, loff_t offset, 2619 loff_t len); 2620extern long do_sys_open(int dfd, const char __user *filename, int flags, 2621 umode_t mode); 2622extern struct file *file_open_name(struct filename *, int, umode_t); 2623extern struct file *filp_open(const char *, int, umode_t); 2624extern struct file *file_open_root(const struct path *, 2625 const char *, int, umode_t); 2626static inline struct file *file_open_root_mnt(struct vfsmount *mnt, 2627 const char *name, int flags, umode_t mode) 2628{ 2629 return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root}, 2630 name, flags, mode); 2631} 2632extern struct file * dentry_open(const struct path *, int, const struct cred *); 2633extern struct file * open_with_fake_path(const struct path *, int, 2634 struct inode*, const struct cred *); 2635static inline struct file *file_clone_open(struct file *file) 2636{ 2637 return dentry_open(&file->f_path, file->f_flags, file->f_cred); 2638} 2639extern int filp_close(struct file *, fl_owner_t id); 2640 2641extern struct filename *getname_flags(const char __user *, int, int *); 2642extern struct filename *getname_uflags(const char __user *, int); 2643extern struct filename *getname(const char __user *); 2644extern struct filename *getname_kernel(const char *); 2645extern void putname(struct filename *name); 2646 2647extern int finish_open(struct file *file, struct dentry *dentry, 2648 int (*open)(struct inode *, struct file *)); 2649extern int finish_no_open(struct file *file, struct dentry *dentry); 2650 2651/* fs/dcache.c */ 2652extern void __init vfs_caches_init_early(void); 2653extern void __init vfs_caches_init(void); 2654 2655extern struct kmem_cache *names_cachep; 2656 2657#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) 2658#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) 2659 2660extern struct super_block *blockdev_superblock; 2661static inline bool sb_is_blkdev_sb(struct super_block *sb) 2662{ 2663 return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; 2664} 2665 2666void emergency_thaw_all(void); 2667extern int sync_filesystem(struct super_block *); 2668extern const struct file_operations def_blk_fops; 2669extern const struct file_operations def_chr_fops; 2670 2671/* fs/char_dev.c */ 2672#define CHRDEV_MAJOR_MAX 512 2673/* Marks the bottom of the first segment of free char majors */ 2674#define CHRDEV_MAJOR_DYN_END 234 2675/* Marks the top and bottom of the second segment of free char majors */ 2676#define CHRDEV_MAJOR_DYN_EXT_START 511 2677#define CHRDEV_MAJOR_DYN_EXT_END 384 2678 2679extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); 2680extern int register_chrdev_region(dev_t, unsigned, const char *); 2681extern int __register_chrdev(unsigned int major, unsigned int baseminor, 2682 unsigned int count, const char *name, 2683 const struct file_operations *fops); 2684extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, 2685 unsigned int count, const char *name); 2686extern void unregister_chrdev_region(dev_t, unsigned); 2687extern void chrdev_show(struct seq_file *,off_t); 2688 2689static inline int register_chrdev(unsigned int major, const char *name, 2690 const struct file_operations *fops) 2691{ 2692 return __register_chrdev(major, 0, 256, name, fops); 2693} 2694 2695static inline void unregister_chrdev(unsigned int major, const char *name) 2696{ 2697 __unregister_chrdev(major, 0, 256, name); 2698} 2699 2700extern void init_special_inode(struct inode *, umode_t, dev_t); 2701 2702/* Invalid inode operations -- fs/bad_inode.c */ 2703extern void make_bad_inode(struct inode *); 2704extern bool is_bad_inode(struct inode *); 2705 2706extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, 2707 loff_t lend); 2708extern int __must_check file_check_and_advance_wb_err(struct file *file); 2709extern int __must_check file_write_and_wait_range(struct file *file, 2710 loff_t start, loff_t end); 2711 2712static inline int file_write_and_wait(struct file *file) 2713{ 2714 return file_write_and_wait_range(file, 0, LLONG_MAX); 2715} 2716 2717extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, 2718 int datasync); 2719extern int vfs_fsync(struct file *file, int datasync); 2720 2721extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, 2722 unsigned int flags); 2723 2724/* 2725 * Sync the bytes written if this was a synchronous write. Expect ki_pos 2726 * to already be updated for the write, and will return either the amount 2727 * of bytes passed in, or an error if syncing the file failed. 2728 */ 2729static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) 2730{ 2731 if (iocb->ki_flags & IOCB_DSYNC) { 2732 int ret = vfs_fsync_range(iocb->ki_filp, 2733 iocb->ki_pos - count, iocb->ki_pos - 1, 2734 (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); 2735 if (ret) 2736 return ret; 2737 } 2738 2739 return count; 2740} 2741 2742extern void emergency_sync(void); 2743extern void emergency_remount(void); 2744 2745#ifdef CONFIG_BLOCK 2746extern int bmap(struct inode *inode, sector_t *block); 2747#else 2748static inline int bmap(struct inode *inode, sector_t *block) 2749{ 2750 return -EINVAL; 2751} 2752#endif 2753 2754int notify_change(struct user_namespace *, struct dentry *, 2755 struct iattr *, struct inode **); 2756int inode_permission(struct user_namespace *, struct inode *, int); 2757int generic_permission(struct user_namespace *, struct inode *, int); 2758static inline int file_permission(struct file *file, int mask) 2759{ 2760 return inode_permission(file_mnt_user_ns(file), 2761 file_inode(file), mask); 2762} 2763static inline int path_permission(const struct path *path, int mask) 2764{ 2765 return inode_permission(mnt_user_ns(path->mnt), 2766 d_inode(path->dentry), mask); 2767} 2768int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir, 2769 struct inode *inode); 2770 2771static inline bool execute_ok(struct inode *inode) 2772{ 2773 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); 2774} 2775 2776static inline bool inode_wrong_type(const struct inode *inode, umode_t mode) 2777{ 2778 return (inode->i_mode ^ mode) & S_IFMT; 2779} 2780 2781static inline void file_start_write(struct file *file) 2782{ 2783 if (!S_ISREG(file_inode(file)->i_mode)) 2784 return; 2785 sb_start_write(file_inode(file)->i_sb); 2786} 2787 2788static inline bool file_start_write_trylock(struct file *file) 2789{ 2790 if (!S_ISREG(file_inode(file)->i_mode)) 2791 return true; 2792 return sb_start_write_trylock(file_inode(file)->i_sb); 2793} 2794 2795static inline void file_end_write(struct file *file) 2796{ 2797 if (!S_ISREG(file_inode(file)->i_mode)) 2798 return; 2799 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); 2800} 2801 2802/* 2803 * This is used for regular files where some users -- especially the 2804 * currently executed binary in a process, previously handled via 2805 * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap 2806 * read-write shared) accesses. 2807 * 2808 * get_write_access() gets write permission for a file. 2809 * put_write_access() releases this write permission. 2810 * deny_write_access() denies write access to a file. 2811 * allow_write_access() re-enables write access to a file. 2812 * 2813 * The i_writecount field of an inode can have the following values: 2814 * 0: no write access, no denied write access 2815 * < 0: (-i_writecount) users that denied write access to the file. 2816 * > 0: (i_writecount) users that have write access to the file. 2817 * 2818 * Normally we operate on that counter with atomic_{inc,dec} and it's safe 2819 * except for the cases where we don't hold i_writecount yet. Then we need to 2820 * use {get,deny}_write_access() - these functions check the sign and refuse 2821 * to do the change if sign is wrong. 2822 */ 2823static inline int get_write_access(struct inode *inode) 2824{ 2825 return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY; 2826} 2827static inline int deny_write_access(struct file *file) 2828{ 2829 struct inode *inode = file_inode(file); 2830 return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; 2831} 2832static inline void put_write_access(struct inode * inode) 2833{ 2834 atomic_dec(&inode->i_writecount); 2835} 2836static inline void allow_write_access(struct file *file) 2837{ 2838 if (file) 2839 atomic_inc(&file_inode(file)->i_writecount); 2840} 2841static inline bool inode_is_open_for_write(const struct inode *inode) 2842{ 2843 return atomic_read(&inode->i_writecount) > 0; 2844} 2845 2846#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) 2847static inline void i_readcount_dec(struct inode *inode) 2848{ 2849 BUG_ON(!atomic_read(&inode->i_readcount)); 2850 atomic_dec(&inode->i_readcount); 2851} 2852static inline void i_readcount_inc(struct inode *inode) 2853{ 2854 atomic_inc(&inode->i_readcount); 2855} 2856#else 2857static inline void i_readcount_dec(struct inode *inode) 2858{ 2859 return; 2860} 2861static inline void i_readcount_inc(struct inode *inode) 2862{ 2863 return; 2864} 2865#endif 2866extern int do_pipe_flags(int *, int); 2867 2868extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); 2869ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos); 2870extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); 2871extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); 2872extern struct file * open_exec(const char *); 2873 2874/* fs/dcache.c -- generic fs support functions */ 2875extern bool is_subdir(struct dentry *, struct dentry *); 2876extern bool path_is_under(const struct path *, const struct path *); 2877 2878extern char *file_path(struct file *, char *, int); 2879 2880#include <linux/err.h> 2881 2882/* needed for stackable file system support */ 2883extern loff_t default_llseek(struct file *file, loff_t offset, int whence); 2884 2885extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence); 2886 2887extern int inode_init_always(struct super_block *, struct inode *); 2888extern void inode_init_once(struct inode *); 2889extern void address_space_init_once(struct address_space *mapping); 2890extern struct inode * igrab(struct inode *); 2891extern ino_t iunique(struct super_block *, ino_t); 2892extern int inode_needs_sync(struct inode *inode); 2893extern int generic_delete_inode(struct inode *inode); 2894static inline int generic_drop_inode(struct inode *inode) 2895{ 2896 return !inode->i_nlink || inode_unhashed(inode); 2897} 2898extern void d_mark_dontcache(struct inode *inode); 2899 2900extern struct inode *ilookup5_nowait(struct super_block *sb, 2901 unsigned long hashval, int (*test)(struct inode *, void *), 2902 void *data); 2903extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, 2904 int (*test)(struct inode *, void *), void *data); 2905extern struct inode *ilookup(struct super_block *sb, unsigned long ino); 2906 2907extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval, 2908 int (*test)(struct inode *, void *), 2909 int (*set)(struct inode *, void *), 2910 void *data); 2911extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); 2912extern struct inode * iget_locked(struct super_block *, unsigned long); 2913extern struct inode *find_inode_nowait(struct super_block *, 2914 unsigned long, 2915 int (*match)(struct inode *, 2916 unsigned long, void *), 2917 void *data); 2918extern struct inode *find_inode_rcu(struct super_block *, unsigned long, 2919 int (*)(struct inode *, void *), void *); 2920extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long); 2921extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); 2922extern int insert_inode_locked(struct inode *); 2923#ifdef CONFIG_DEBUG_LOCK_ALLOC 2924extern void lockdep_annotate_inode_mutex_key(struct inode *inode); 2925#else 2926static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; 2927#endif 2928extern void unlock_new_inode(struct inode *); 2929extern void discard_new_inode(struct inode *); 2930extern unsigned int get_next_ino(void); 2931extern void evict_inodes(struct super_block *sb); 2932void dump_mapping(const struct address_space *); 2933 2934/* 2935 * Userspace may rely on the the inode number being non-zero. For example, glibc 2936 * simply ignores files with zero i_ino in unlink() and other places. 2937 * 2938 * As an additional complication, if userspace was compiled with 2939 * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the 2940 * lower 32 bits, so we need to check that those aren't zero explicitly. With 2941 * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but 2942 * better safe than sorry. 2943 */ 2944static inline bool is_zero_ino(ino_t ino) 2945{ 2946 return (u32)ino == 0; 2947} 2948 2949extern void __iget(struct inode * inode); 2950extern void iget_failed(struct inode *); 2951extern void clear_inode(struct inode *); 2952extern void __destroy_inode(struct inode *); 2953extern struct inode *new_inode_pseudo(struct super_block *sb); 2954extern struct inode *new_inode(struct super_block *sb); 2955extern void free_inode_nonrcu(struct inode *inode); 2956extern int should_remove_suid(struct dentry *); 2957extern int file_remove_privs(struct file *); 2958 2959/* 2960 * This must be used for allocating filesystems specific inodes to set 2961 * up the inode reclaim context correctly. 2962 */ 2963static inline void * 2964alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp) 2965{ 2966 return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp); 2967} 2968 2969extern void __insert_inode_hash(struct inode *, unsigned long hashval); 2970static inline void insert_inode_hash(struct inode *inode) 2971{ 2972 __insert_inode_hash(inode, inode->i_ino); 2973} 2974 2975extern void __remove_inode_hash(struct inode *); 2976static inline void remove_inode_hash(struct inode *inode) 2977{ 2978 if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash)) 2979 __remove_inode_hash(inode); 2980} 2981 2982extern void inode_sb_list_add(struct inode *inode); 2983extern void inode_add_lru(struct inode *inode); 2984 2985extern int sb_set_blocksize(struct super_block *, int); 2986extern int sb_min_blocksize(struct super_block *, int); 2987 2988extern int generic_file_mmap(struct file *, struct vm_area_struct *); 2989extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); 2990extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); 2991int generic_write_checks_count(struct kiocb *iocb, loff_t *count); 2992extern int generic_write_check_limits(struct file *file, loff_t pos, 2993 loff_t *count); 2994extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); 2995ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to, 2996 ssize_t already_read); 2997extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); 2998extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); 2999extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); 3000extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); 3001ssize_t generic_perform_write(struct kiocb *, struct iov_iter *); 3002 3003ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, 3004 rwf_t flags); 3005ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos, 3006 rwf_t flags); 3007ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, 3008 struct iov_iter *iter); 3009ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, 3010 struct iov_iter *iter); 3011 3012/* fs/splice.c */ 3013extern ssize_t generic_file_splice_read(struct file *, loff_t *, 3014 struct pipe_inode_info *, size_t, unsigned int); 3015extern ssize_t iter_file_splice_write(struct pipe_inode_info *, 3016 struct file *, loff_t *, size_t, unsigned int); 3017extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 3018 struct file *out, loff_t *, size_t len, unsigned int flags); 3019extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 3020 loff_t *opos, size_t len, unsigned int flags); 3021 3022 3023extern void 3024file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 3025extern loff_t noop_llseek(struct file *file, loff_t offset, int whence); 3026extern loff_t no_llseek(struct file *file, loff_t offset, int whence); 3027extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); 3028extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); 3029extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, 3030 int whence, loff_t maxsize, loff_t eof); 3031extern loff_t fixed_size_llseek(struct file *file, loff_t offset, 3032 int whence, loff_t size); 3033extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); 3034extern loff_t no_seek_end_llseek(struct file *, loff_t, int); 3035int rw_verify_area(int, struct file *, const loff_t *, size_t); 3036extern int generic_file_open(struct inode * inode, struct file * filp); 3037extern int nonseekable_open(struct inode * inode, struct file * filp); 3038extern int stream_open(struct inode * inode, struct file * filp); 3039 3040#ifdef CONFIG_BLOCK 3041typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, 3042 loff_t file_offset); 3043 3044enum { 3045 /* need locking between buffered and direct access */ 3046 DIO_LOCKING = 0x01, 3047 3048 /* filesystem does not support filling holes */ 3049 DIO_SKIP_HOLES = 0x02, 3050}; 3051 3052ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, 3053 struct block_device *bdev, struct iov_iter *iter, 3054 get_block_t get_block, 3055 dio_iodone_t end_io, dio_submit_t submit_io, 3056 int flags); 3057 3058static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, 3059 struct inode *inode, 3060 struct iov_iter *iter, 3061 get_block_t get_block) 3062{ 3063 return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3064 get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); 3065} 3066#endif 3067 3068void inode_dio_wait(struct inode *inode); 3069 3070/** 3071 * inode_dio_begin - signal start of a direct I/O requests 3072 * @inode: inode the direct I/O happens on 3073 * 3074 * This is called once we've finished processing a direct I/O request, 3075 * and is used to wake up callers waiting for direct I/O to be quiesced. 3076 */ 3077static inline void inode_dio_begin(struct inode *inode) 3078{ 3079 atomic_inc(&inode->i_dio_count); 3080} 3081 3082/** 3083 * inode_dio_end - signal finish of a direct I/O requests 3084 * @inode: inode the direct I/O happens on 3085 * 3086 * This is called once we've finished processing a direct I/O request, 3087 * and is used to wake up callers waiting for direct I/O to be quiesced. 3088 */ 3089static inline void inode_dio_end(struct inode *inode) 3090{ 3091 if (atomic_dec_and_test(&inode->i_dio_count)) 3092 wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); 3093} 3094 3095/* 3096 * Warn about a page cache invalidation failure diring a direct I/O write. 3097 */ 3098void dio_warn_stale_pagecache(struct file *filp); 3099 3100extern void inode_set_flags(struct inode *inode, unsigned int flags, 3101 unsigned int mask); 3102 3103extern const struct file_operations generic_ro_fops; 3104 3105#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) 3106 3107extern int readlink_copy(char __user *, int, const char *); 3108extern int page_readlink(struct dentry *, char __user *, int); 3109extern const char *page_get_link(struct dentry *, struct inode *, 3110 struct delayed_call *); 3111extern void page_put_link(void *); 3112extern int __page_symlink(struct inode *inode, const char *symname, int len, 3113 int nofs); 3114extern int page_symlink(struct inode *inode, const char *symname, int len); 3115extern const struct inode_operations page_symlink_inode_operations; 3116extern void kfree_link(void *); 3117void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *); 3118void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); 3119extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); 3120extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); 3121void __inode_add_bytes(struct inode *inode, loff_t bytes); 3122void inode_add_bytes(struct inode *inode, loff_t bytes); 3123void __inode_sub_bytes(struct inode *inode, loff_t bytes); 3124void inode_sub_bytes(struct inode *inode, loff_t bytes); 3125static inline loff_t __inode_get_bytes(struct inode *inode) 3126{ 3127 return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes; 3128} 3129loff_t inode_get_bytes(struct inode *inode); 3130void inode_set_bytes(struct inode *inode, loff_t bytes); 3131const char *simple_get_link(struct dentry *, struct inode *, 3132 struct delayed_call *); 3133extern const struct inode_operations simple_symlink_inode_operations; 3134 3135extern int iterate_dir(struct file *, struct dir_context *); 3136 3137int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, 3138 int flags); 3139int vfs_fstat(int fd, struct kstat *stat); 3140 3141static inline int vfs_stat(const char __user *filename, struct kstat *stat) 3142{ 3143 return vfs_fstatat(AT_FDCWD, filename, stat, 0); 3144} 3145static inline int vfs_lstat(const char __user *name, struct kstat *stat) 3146{ 3147 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW); 3148} 3149 3150extern const char *vfs_get_link(struct dentry *, struct delayed_call *); 3151extern int vfs_readlink(struct dentry *, char __user *, int); 3152 3153extern struct file_system_type *get_filesystem(struct file_system_type *fs); 3154extern void put_filesystem(struct file_system_type *fs); 3155extern struct file_system_type *get_fs_type(const char *name); 3156extern struct super_block *get_super(struct block_device *); 3157extern struct super_block *get_active_super(struct block_device *bdev); 3158extern void drop_super(struct super_block *sb); 3159extern void drop_super_exclusive(struct super_block *sb); 3160extern void iterate_supers(void (*)(struct super_block *, void *), void *); 3161extern void iterate_supers_type(struct file_system_type *, 3162 void (*)(struct super_block *, void *), void *); 3163 3164extern int dcache_dir_open(struct inode *, struct file *); 3165extern int dcache_dir_close(struct inode *, struct file *); 3166extern loff_t dcache_dir_lseek(struct file *, loff_t, int); 3167extern int dcache_readdir(struct file *, struct dir_context *); 3168extern int simple_setattr(struct user_namespace *, struct dentry *, 3169 struct iattr *); 3170extern int simple_getattr(struct user_namespace *, const struct path *, 3171 struct kstat *, u32, unsigned int); 3172extern int simple_statfs(struct dentry *, struct kstatfs *); 3173extern int simple_open(struct inode *inode, struct file *file); 3174extern int simple_link(struct dentry *, struct inode *, struct dentry *); 3175extern int simple_unlink(struct inode *, struct dentry *); 3176extern int simple_rmdir(struct inode *, struct dentry *); 3177extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, 3178 struct inode *new_dir, struct dentry *new_dentry); 3179extern int simple_rename(struct user_namespace *, struct inode *, 3180 struct dentry *, struct inode *, struct dentry *, 3181 unsigned int); 3182extern void simple_recursive_removal(struct dentry *, 3183 void (*callback)(struct dentry *)); 3184extern int noop_fsync(struct file *, loff_t, loff_t, int); 3185extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); 3186extern int simple_empty(struct dentry *); 3187extern int simple_write_begin(struct file *file, struct address_space *mapping, 3188 loff_t pos, unsigned len, unsigned flags, 3189 struct page **pagep, void **fsdata); 3190extern const struct address_space_operations ram_aops; 3191extern int always_delete_dentry(const struct dentry *); 3192extern struct inode *alloc_anon_inode(struct super_block *); 3193extern int simple_nosetlease(struct file *, long, struct file_lock **, void **); 3194extern const struct dentry_operations simple_dentry_operations; 3195 3196extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); 3197extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); 3198extern const struct file_operations simple_dir_operations; 3199extern const struct inode_operations simple_dir_inode_operations; 3200extern void make_empty_dir_inode(struct inode *inode); 3201extern bool is_empty_dir_inode(struct inode *inode); 3202struct tree_descr { const char *name; const struct file_operations *ops; int mode; }; 3203struct dentry *d_alloc_name(struct dentry *, const char *); 3204extern int simple_fill_super(struct super_block *, unsigned long, 3205 const struct tree_descr *); 3206extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); 3207extern void simple_release_fs(struct vfsmount **mount, int *count); 3208 3209extern ssize_t simple_read_from_buffer(void __user *to, size_t count, 3210 loff_t *ppos, const void *from, size_t available); 3211extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, 3212 const void __user *from, size_t count); 3213 3214extern int __generic_file_fsync(struct file *, loff_t, loff_t, int); 3215extern int generic_file_fsync(struct file *, loff_t, loff_t, int); 3216 3217extern int generic_check_addressable(unsigned, u64); 3218 3219extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry); 3220 3221#ifdef CONFIG_MIGRATION 3222extern int buffer_migrate_page(struct address_space *, 3223 struct page *, struct page *, 3224 enum migrate_mode); 3225extern int buffer_migrate_page_norefs(struct address_space *, 3226 struct page *, struct page *, 3227 enum migrate_mode); 3228#else 3229#define buffer_migrate_page NULL 3230#define buffer_migrate_page_norefs NULL 3231#endif 3232 3233int may_setattr(struct user_namespace *mnt_userns, struct inode *inode, 3234 unsigned int ia_valid); 3235int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); 3236extern int inode_newsize_ok(const struct inode *, loff_t offset); 3237void setattr_copy(struct user_namespace *, struct inode *inode, 3238 const struct iattr *attr); 3239 3240extern int file_update_time(struct file *file); 3241 3242static inline bool vma_is_dax(const struct vm_area_struct *vma) 3243{ 3244 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); 3245} 3246 3247static inline bool vma_is_fsdax(struct vm_area_struct *vma) 3248{ 3249 struct inode *inode; 3250 3251 if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file) 3252 return false; 3253 if (!vma_is_dax(vma)) 3254 return false; 3255 inode = file_inode(vma->vm_file); 3256 if (S_ISCHR(inode->i_mode)) 3257 return false; /* device-dax */ 3258 return true; 3259} 3260 3261static inline int iocb_flags(struct file *file) 3262{ 3263 int res = 0; 3264 if (file->f_flags & O_APPEND) 3265 res |= IOCB_APPEND; 3266 if (file->f_flags & O_DIRECT) 3267 res |= IOCB_DIRECT; 3268 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) 3269 res |= IOCB_DSYNC; 3270 if (file->f_flags & __O_SYNC) 3271 res |= IOCB_SYNC; 3272 return res; 3273} 3274 3275static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) 3276{ 3277 int kiocb_flags = 0; 3278 3279 /* make sure there's no overlap between RWF and private IOCB flags */ 3280 BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD); 3281 3282 if (!flags) 3283 return 0; 3284 if (unlikely(flags & ~RWF_SUPPORTED)) 3285 return -EOPNOTSUPP; 3286 3287 if (flags & RWF_NOWAIT) { 3288 if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) 3289 return -EOPNOTSUPP; 3290 kiocb_flags |= IOCB_NOIO; 3291 } 3292 kiocb_flags |= (__force int) (flags & RWF_SUPPORTED); 3293 if (flags & RWF_SYNC) 3294 kiocb_flags |= IOCB_DSYNC; 3295 3296 ki->ki_flags |= kiocb_flags; 3297 return 0; 3298} 3299 3300static inline ino_t parent_ino(struct dentry *dentry) 3301{ 3302 ino_t res; 3303 3304 /* 3305 * Don't strictly need d_lock here? If the parent ino could change 3306 * then surely we'd have a deeper race in the caller? 3307 */ 3308 spin_lock(&dentry->d_lock); 3309 res = dentry->d_parent->d_inode->i_ino; 3310 spin_unlock(&dentry->d_lock); 3311 return res; 3312} 3313 3314/* Transaction based IO helpers */ 3315 3316/* 3317 * An argresp is stored in an allocated page and holds the 3318 * size of the argument or response, along with its content 3319 */ 3320struct simple_transaction_argresp { 3321 ssize_t size; 3322 char data[]; 3323}; 3324 3325#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) 3326 3327char *simple_transaction_get(struct file *file, const char __user *buf, 3328 size_t size); 3329ssize_t simple_transaction_read(struct file *file, char __user *buf, 3330 size_t size, loff_t *pos); 3331int simple_transaction_release(struct inode *inode, struct file *file); 3332 3333void simple_transaction_set(struct file *file, size_t n); 3334 3335/* 3336 * simple attribute files 3337 * 3338 * These attributes behave similar to those in sysfs: 3339 * 3340 * Writing to an attribute immediately sets a value, an open file can be 3341 * written to multiple times. 3342 * 3343 * Reading from an attribute creates a buffer from the value that might get 3344 * read with multiple read calls. When the attribute has been read 3345 * completely, no further read calls are possible until the file is opened 3346 * again. 3347 * 3348 * All attributes contain a text representation of a numeric value 3349 * that are accessed with the get() and set() functions. 3350 */ 3351#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 3352static int __fops ## _open(struct inode *inode, struct file *file) \ 3353{ \ 3354 __simple_attr_check_format(__fmt, 0ull); \ 3355 return simple_attr_open(inode, file, __get, __set, __fmt); \ 3356} \ 3357static const struct file_operations __fops = { \ 3358 .owner = THIS_MODULE, \ 3359 .open = __fops ## _open, \ 3360 .release = simple_attr_release, \ 3361 .read = simple_attr_read, \ 3362 .write = simple_attr_write, \ 3363 .llseek = generic_file_llseek, \ 3364} 3365 3366static inline __printf(1, 2) 3367void __simple_attr_check_format(const char *fmt, ...) 3368{ 3369 /* don't do anything, just let the compiler check the arguments; */ 3370} 3371 3372int simple_attr_open(struct inode *inode, struct file *file, 3373 int (*get)(void *, u64 *), int (*set)(void *, u64), 3374 const char *fmt); 3375int simple_attr_release(struct inode *inode, struct file *file); 3376ssize_t simple_attr_read(struct file *file, char __user *buf, 3377 size_t len, loff_t *ppos); 3378ssize_t simple_attr_write(struct file *file, const char __user *buf, 3379 size_t len, loff_t *ppos); 3380 3381struct ctl_table; 3382int __init list_bdev_fs_names(char *buf, size_t size); 3383 3384#define __FMODE_EXEC ((__force int) FMODE_EXEC) 3385#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) 3386 3387#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) 3388#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ 3389 (flag & __FMODE_NONOTIFY))) 3390 3391static inline bool is_sxid(umode_t mode) 3392{ 3393 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); 3394} 3395 3396static inline int check_sticky(struct user_namespace *mnt_userns, 3397 struct inode *dir, struct inode *inode) 3398{ 3399 if (!(dir->i_mode & S_ISVTX)) 3400 return 0; 3401 3402 return __check_sticky(mnt_userns, dir, inode); 3403} 3404 3405static inline void inode_has_no_xattr(struct inode *inode) 3406{ 3407 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC)) 3408 inode->i_flags |= S_NOSEC; 3409} 3410 3411static inline bool is_root_inode(struct inode *inode) 3412{ 3413 return inode == inode->i_sb->s_root->d_inode; 3414} 3415 3416static inline bool dir_emit(struct dir_context *ctx, 3417 const char *name, int namelen, 3418 u64 ino, unsigned type) 3419{ 3420 return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0; 3421} 3422static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx) 3423{ 3424 return ctx->actor(ctx, ".", 1, ctx->pos, 3425 file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0; 3426} 3427static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx) 3428{ 3429 return ctx->actor(ctx, "..", 2, ctx->pos, 3430 parent_ino(file->f_path.dentry), DT_DIR) == 0; 3431} 3432static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx) 3433{ 3434 if (ctx->pos == 0) { 3435 if (!dir_emit_dot(file, ctx)) 3436 return false; 3437 ctx->pos = 1; 3438 } 3439 if (ctx->pos == 1) { 3440 if (!dir_emit_dotdot(file, ctx)) 3441 return false; 3442 ctx->pos = 2; 3443 } 3444 return true; 3445} 3446static inline bool dir_relax(struct inode *inode) 3447{ 3448 inode_unlock(inode); 3449 inode_lock(inode); 3450 return !IS_DEADDIR(inode); 3451} 3452 3453static inline bool dir_relax_shared(struct inode *inode) 3454{ 3455 inode_unlock_shared(inode); 3456 inode_lock_shared(inode); 3457 return !IS_DEADDIR(inode); 3458} 3459 3460extern bool path_noexec(const struct path *path); 3461extern void inode_nohighmem(struct inode *inode); 3462 3463/* mm/fadvise.c */ 3464extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, 3465 int advice); 3466extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, 3467 int advice); 3468 3469#endif /* _LINUX_FS_H */