at v5.16 6.7 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_DAX_H 3#define _LINUX_DAX_H 4 5#include <linux/fs.h> 6#include <linux/mm.h> 7#include <linux/radix-tree.h> 8 9/* Flag for synchronous flush */ 10#define DAXDEV_F_SYNC (1UL << 0) 11 12typedef unsigned long dax_entry_t; 13 14struct iomap_ops; 15struct iomap; 16struct dax_device; 17struct dax_operations { 18 /* 19 * direct_access: translate a device-relative 20 * logical-page-offset into an absolute physical pfn. Return the 21 * number of pages available for DAX at that pfn. 22 */ 23 long (*direct_access)(struct dax_device *, pgoff_t, long, 24 void **, pfn_t *); 25 /* 26 * Validate whether this device is usable as an fsdax backing 27 * device. 28 */ 29 bool (*dax_supported)(struct dax_device *, struct block_device *, int, 30 sector_t, sector_t); 31 /* copy_from_iter: required operation for fs-dax direct-i/o */ 32 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 33 struct iov_iter *); 34 /* copy_to_iter: required operation for fs-dax direct-i/o */ 35 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, 36 struct iov_iter *); 37 /* zero_page_range: required operation. Zero page range */ 38 int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); 39}; 40 41#if IS_ENABLED(CONFIG_DAX) 42struct dax_device *alloc_dax(void *private, const char *host, 43 const struct dax_operations *ops, unsigned long flags); 44void put_dax(struct dax_device *dax_dev); 45void kill_dax(struct dax_device *dax_dev); 46void dax_write_cache(struct dax_device *dax_dev, bool wc); 47bool dax_write_cache_enabled(struct dax_device *dax_dev); 48bool __dax_synchronous(struct dax_device *dax_dev); 49static inline bool dax_synchronous(struct dax_device *dax_dev) 50{ 51 return __dax_synchronous(dax_dev); 52} 53void __set_dax_synchronous(struct dax_device *dax_dev); 54static inline void set_dax_synchronous(struct dax_device *dax_dev) 55{ 56 __set_dax_synchronous(dax_dev); 57} 58/* 59 * Check if given mapping is supported by the file / underlying device. 60 */ 61static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 62 struct dax_device *dax_dev) 63{ 64 if (!(vma->vm_flags & VM_SYNC)) 65 return true; 66 if (!IS_DAX(file_inode(vma->vm_file))) 67 return false; 68 return dax_synchronous(dax_dev); 69} 70#else 71static inline struct dax_device *alloc_dax(void *private, const char *host, 72 const struct dax_operations *ops, unsigned long flags) 73{ 74 /* 75 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this 76 * NULL is an error or expected. 77 */ 78 return NULL; 79} 80static inline void put_dax(struct dax_device *dax_dev) 81{ 82} 83static inline void kill_dax(struct dax_device *dax_dev) 84{ 85} 86static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 87{ 88} 89static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 90{ 91 return false; 92} 93static inline bool dax_synchronous(struct dax_device *dax_dev) 94{ 95 return true; 96} 97static inline void set_dax_synchronous(struct dax_device *dax_dev) 98{ 99} 100static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, 101 struct dax_device *dax_dev) 102{ 103 return !(vma->vm_flags & VM_SYNC); 104} 105#endif 106 107struct writeback_control; 108int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 109#if IS_ENABLED(CONFIG_FS_DAX) 110bool generic_fsdax_supported(struct dax_device *dax_dev, 111 struct block_device *bdev, int blocksize, sector_t start, 112 sector_t sectors); 113 114bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, 115 int blocksize, sector_t start, sector_t len); 116 117static inline void fs_put_dax(struct dax_device *dax_dev) 118{ 119 put_dax(dax_dev); 120} 121 122struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); 123int dax_writeback_mapping_range(struct address_space *mapping, 124 struct dax_device *dax_dev, struct writeback_control *wbc); 125 126struct page *dax_layout_busy_page(struct address_space *mapping); 127struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); 128dax_entry_t dax_lock_page(struct page *page); 129void dax_unlock_page(struct page *page, dax_entry_t cookie); 130#else 131#define generic_fsdax_supported NULL 132 133static inline bool dax_supported(struct dax_device *dax_dev, 134 struct block_device *bdev, int blocksize, sector_t start, 135 sector_t len) 136{ 137 return false; 138} 139 140static inline void fs_put_dax(struct dax_device *dax_dev) 141{ 142} 143 144static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 145{ 146 return NULL; 147} 148 149static inline struct page *dax_layout_busy_page(struct address_space *mapping) 150{ 151 return NULL; 152} 153 154static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) 155{ 156 return NULL; 157} 158 159static inline int dax_writeback_mapping_range(struct address_space *mapping, 160 struct dax_device *dax_dev, struct writeback_control *wbc) 161{ 162 return -EOPNOTSUPP; 163} 164 165static inline dax_entry_t dax_lock_page(struct page *page) 166{ 167 if (IS_DAX(page->mapping->host)) 168 return ~0UL; 169 return 0; 170} 171 172static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) 173{ 174} 175#endif 176 177#if IS_ENABLED(CONFIG_DAX) 178int dax_read_lock(void); 179void dax_read_unlock(int id); 180#else 181static inline int dax_read_lock(void) 182{ 183 return 0; 184} 185 186static inline void dax_read_unlock(int id) 187{ 188} 189#endif /* CONFIG_DAX */ 190bool dax_alive(struct dax_device *dax_dev); 191void *dax_get_private(struct dax_device *dax_dev); 192long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 193 void **kaddr, pfn_t *pfn); 194size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 195 size_t bytes, struct iov_iter *i); 196size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 197 size_t bytes, struct iov_iter *i); 198int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, 199 size_t nr_pages); 200void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 201 202ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 203 const struct iomap_ops *ops); 204vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 205 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 206vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 207 enum page_entry_size pe_size, pfn_t pfn); 208int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 209int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 210 pgoff_t index); 211s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap); 212static inline bool dax_mapping(struct address_space *mapping) 213{ 214 return mapping->host && IS_DAX(mapping->host); 215} 216 217#ifdef CONFIG_DEV_DAX_HMEM_DEVICES 218void hmem_register_device(int target_nid, struct resource *r); 219#else 220static inline void hmem_register_device(int target_nid, struct resource *r) 221{ 222} 223#endif 224 225#endif