at master 8.9 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __SHMEM_FS_H 3#define __SHMEM_FS_H 4 5#include <linux/file.h> 6#include <linux/swap.h> 7#include <linux/mempolicy.h> 8#include <linux/pagemap.h> 9#include <linux/percpu_counter.h> 10#include <linux/xattr.h> 11#include <linux/fs_parser.h> 12#include <linux/userfaultfd_k.h> 13#include <linux/bits.h> 14 15struct swap_iocb; 16 17/* inode in-kernel data */ 18 19#ifdef CONFIG_TMPFS_QUOTA 20#define SHMEM_MAXQUOTAS 2 21#endif 22 23/* Suppress pre-accounting of the entire object size. */ 24#define SHMEM_F_NORESERVE BIT(0) 25/* Disallow swapping. */ 26#define SHMEM_F_LOCKED BIT(1) 27/* 28 * Disallow growing, shrinking, or hole punching in the inode. Combined with 29 * folio pinning, makes sure the inode's mapping stays fixed. 30 * 31 * In some ways similar to F_SEAL_GROW | F_SEAL_SHRINK, but can be removed and 32 * isn't directly visible to userspace. 33 */ 34#define SHMEM_F_MAPPING_FROZEN BIT(2) 35 36struct shmem_inode_info { 37 spinlock_t lock; 38 unsigned int seals; /* shmem seals */ 39 unsigned long flags; 40 unsigned long alloced; /* data pages alloced to file */ 41 unsigned long swapped; /* subtotal assigned to swap */ 42 union { 43 struct offset_ctx dir_offsets; /* stable directory offsets */ 44 struct { 45 struct list_head shrinklist; /* shrinkable hpage inodes */ 46 struct list_head swaplist; /* chain of maybes on swap */ 47 }; 48 }; 49 struct timespec64 i_crtime; /* file creation time */ 50 struct shared_policy policy; /* NUMA memory alloc policy */ 51 struct simple_xattrs xattrs; /* list of xattrs */ 52 pgoff_t fallocend; /* highest fallocate endindex */ 53 unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */ 54 atomic_t stop_eviction; /* hold when working on inode */ 55#ifdef CONFIG_TMPFS_QUOTA 56 struct dquot __rcu *i_dquot[MAXQUOTAS]; 57#endif 58 struct inode vfs_inode; 59}; 60 61#define SHMEM_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | FS_CASEFOLD_FL) 62#define SHMEM_FL_USER_MODIFIABLE \ 63 (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL) 64#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL) 65 66struct shmem_quota_limits { 67 qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */ 68 qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */ 69 qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */ 70 qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */ 71}; 72 73struct shmem_sb_info { 74 unsigned long max_blocks; /* How many blocks are allowed */ 75 struct percpu_counter used_blocks; /* How many are allocated */ 76 unsigned long max_inodes; /* How many inodes are allowed */ 77 unsigned long free_ispace; /* How much ispace left for allocation */ 78 raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ 79 umode_t mode; /* Mount mode for root directory */ 80 unsigned char huge; /* Whether to try for hugepages */ 81 kuid_t uid; /* Mount uid for root directory */ 82 kgid_t gid; /* Mount gid for root directory */ 83 bool full_inums; /* If i_ino should be uint or ino_t */ 84 bool noswap; /* ignores VM reclaim / swap requests */ 85 ino_t next_ino; /* The next per-sb inode number to use */ 86 ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */ 87 struct mempolicy *mpol; /* default memory policy for mappings */ 88 spinlock_t shrinklist_lock; /* Protects shrinklist */ 89 struct list_head shrinklist; /* List of shinkable inodes */ 90 unsigned long shrinklist_len; /* Length of shrinklist */ 91 struct shmem_quota_limits qlimits; /* Default quota limits */ 92}; 93 94static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) 95{ 96 return container_of(inode, struct shmem_inode_info, vfs_inode); 97} 98 99/* 100 * Functions in mm/shmem.c called directly from elsewhere: 101 */ 102extern const struct fs_parameter_spec shmem_fs_parameters[]; 103extern void shmem_init(void); 104extern int shmem_init_fs_context(struct fs_context *fc); 105extern struct file *shmem_file_setup(const char *name, 106 loff_t size, unsigned long flags); 107extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, 108 unsigned long flags); 109extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, 110 const char *name, loff_t size, unsigned long flags); 111int shmem_zero_setup(struct vm_area_struct *vma); 112int shmem_zero_setup_desc(struct vm_area_desc *desc); 113extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, 114 unsigned long len, unsigned long pgoff, unsigned long flags); 115extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts); 116#ifdef CONFIG_SHMEM 117bool shmem_mapping(const struct address_space *mapping); 118#else 119static inline bool shmem_mapping(const struct address_space *mapping) 120{ 121 return false; 122} 123#endif /* CONFIG_SHMEM */ 124void shmem_unlock_mapping(struct address_space *mapping); 125struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 126 pgoff_t index, gfp_t gfp_mask); 127int shmem_writeout(struct folio *folio, struct swap_iocb **plug, 128 struct list_head *folio_list); 129void shmem_truncate_range(struct inode *inode, loff_t start, uoff_t end); 130int shmem_unuse(unsigned int type); 131 132#ifdef CONFIG_TRANSPARENT_HUGEPAGE 133unsigned long shmem_allowable_huge_orders(struct inode *inode, 134 struct vm_area_struct *vma, pgoff_t index, 135 loff_t write_end, bool shmem_huge_force); 136bool shmem_hpage_pmd_enabled(void); 137#else 138static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, 139 struct vm_area_struct *vma, pgoff_t index, 140 loff_t write_end, bool shmem_huge_force) 141{ 142 return 0; 143} 144 145static inline bool shmem_hpage_pmd_enabled(void) 146{ 147 return false; 148} 149#endif 150 151#ifdef CONFIG_SHMEM 152extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); 153extern void shmem_uncharge(struct inode *inode, long pages); 154#else 155static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma) 156{ 157 return 0; 158} 159 160static inline void shmem_uncharge(struct inode *inode, long pages) 161{ 162} 163#endif 164extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, 165 pgoff_t start, pgoff_t end); 166 167/* Flag allocation requirements to shmem_get_folio */ 168enum sgp_type { 169 SGP_READ, /* don't exceed i_size, don't allocate page */ 170 SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */ 171 SGP_CACHE, /* don't exceed i_size, may allocate page */ 172 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 173 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 174}; 175 176int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end, 177 struct folio **foliop, enum sgp_type sgp); 178struct folio *shmem_read_folio_gfp(struct address_space *mapping, 179 pgoff_t index, gfp_t gfp); 180 181static inline struct folio *shmem_read_folio(struct address_space *mapping, 182 pgoff_t index) 183{ 184 return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping)); 185} 186 187static inline struct page *shmem_read_mapping_page( 188 struct address_space *mapping, pgoff_t index) 189{ 190 return shmem_read_mapping_page_gfp(mapping, index, 191 mapping_gfp_mask(mapping)); 192} 193 194static inline bool shmem_file(struct file *file) 195{ 196 if (!IS_ENABLED(CONFIG_SHMEM)) 197 return false; 198 if (!file || !file->f_mapping) 199 return false; 200 return shmem_mapping(file->f_mapping); 201} 202 203/* Must be called with inode lock taken exclusive. */ 204static inline void shmem_freeze(struct inode *inode, bool freeze) 205{ 206 if (freeze) 207 SHMEM_I(inode)->flags |= SHMEM_F_MAPPING_FROZEN; 208 else 209 SHMEM_I(inode)->flags &= ~SHMEM_F_MAPPING_FROZEN; 210} 211 212/* 213 * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages 214 * beyond i_size's notion of EOF, which fallocate has committed to reserving: 215 * which split_huge_page() must therefore not delete. This use of a single 216 * "fallocend" per inode errs on the side of not deleting a reservation when 217 * in doubt: there are plenty of cases when it preserves unreserved pages. 218 */ 219static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) 220{ 221 return max(eof, SHMEM_I(inode)->fallocend); 222} 223 224extern bool shmem_charge(struct inode *inode, long pages); 225 226#ifdef CONFIG_USERFAULTFD 227#ifdef CONFIG_SHMEM 228extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd, 229 struct vm_area_struct *dst_vma, 230 unsigned long dst_addr, 231 unsigned long src_addr, 232 uffd_flags_t flags, 233 struct folio **foliop); 234#else /* !CONFIG_SHMEM */ 235#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \ 236 src_addr, flags, foliop) ({ BUG(); 0; }) 237#endif /* CONFIG_SHMEM */ 238#endif /* CONFIG_USERFAULTFD */ 239 240/* 241 * Used space is stored as unsigned 64-bit value in bytes but 242 * quota core supports only signed 64-bit values so use that 243 * as a limit 244 */ 245#define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */ 246#define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL 247 248#ifdef CONFIG_TMPFS_QUOTA 249extern const struct dquot_operations shmem_quota_operations; 250extern struct quota_format_type shmem_quota_format; 251#endif /* CONFIG_TMPFS_QUOTA */ 252 253#endif