at v4.19 5.1 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_DAX_H 3#define _LINUX_DAX_H 4 5#include <linux/fs.h> 6#include <linux/mm.h> 7#include <linux/radix-tree.h> 8#include <asm/pgtable.h> 9 10struct iomap_ops; 11struct dax_device; 12struct dax_operations { 13 /* 14 * direct_access: translate a device-relative 15 * logical-page-offset into an absolute physical pfn. Return the 16 * number of pages available for DAX at that pfn. 17 */ 18 long (*direct_access)(struct dax_device *, pgoff_t, long, 19 void **, pfn_t *); 20 /* copy_from_iter: required operation for fs-dax direct-i/o */ 21 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 22 struct iov_iter *); 23 /* copy_to_iter: required operation for fs-dax direct-i/o */ 24 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, 25 struct iov_iter *); 26}; 27 28extern struct attribute_group dax_attribute_group; 29 30#if IS_ENABLED(CONFIG_DAX) 31struct dax_device *dax_get_by_host(const char *host); 32struct dax_device *alloc_dax(void *private, const char *host, 33 const struct dax_operations *ops); 34void put_dax(struct dax_device *dax_dev); 35void kill_dax(struct dax_device *dax_dev); 36void dax_write_cache(struct dax_device *dax_dev, bool wc); 37bool dax_write_cache_enabled(struct dax_device *dax_dev); 38#else 39static inline struct dax_device *dax_get_by_host(const char *host) 40{ 41 return NULL; 42} 43static inline struct dax_device *alloc_dax(void *private, const char *host, 44 const struct dax_operations *ops) 45{ 46 /* 47 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this 48 * NULL is an error or expected. 49 */ 50 return NULL; 51} 52static inline void put_dax(struct dax_device *dax_dev) 53{ 54} 55static inline void kill_dax(struct dax_device *dax_dev) 56{ 57} 58static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) 59{ 60} 61static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) 62{ 63 return false; 64} 65#endif 66 67struct writeback_control; 68int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); 69#if IS_ENABLED(CONFIG_FS_DAX) 70bool __bdev_dax_supported(struct block_device *bdev, int blocksize); 71static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) 72{ 73 return __bdev_dax_supported(bdev, blocksize); 74} 75 76static inline struct dax_device *fs_dax_get_by_host(const char *host) 77{ 78 return dax_get_by_host(host); 79} 80 81static inline void fs_put_dax(struct dax_device *dax_dev) 82{ 83 put_dax(dax_dev); 84} 85 86struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); 87int dax_writeback_mapping_range(struct address_space *mapping, 88 struct block_device *bdev, struct writeback_control *wbc); 89 90struct page *dax_layout_busy_page(struct address_space *mapping); 91bool dax_lock_mapping_entry(struct page *page); 92void dax_unlock_mapping_entry(struct page *page); 93#else 94static inline bool bdev_dax_supported(struct block_device *bdev, 95 int blocksize) 96{ 97 return false; 98} 99 100static inline struct dax_device *fs_dax_get_by_host(const char *host) 101{ 102 return NULL; 103} 104 105static inline void fs_put_dax(struct dax_device *dax_dev) 106{ 107} 108 109static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) 110{ 111 return NULL; 112} 113 114static inline struct page *dax_layout_busy_page(struct address_space *mapping) 115{ 116 return NULL; 117} 118 119static inline int dax_writeback_mapping_range(struct address_space *mapping, 120 struct block_device *bdev, struct writeback_control *wbc) 121{ 122 return -EOPNOTSUPP; 123} 124 125static inline bool dax_lock_mapping_entry(struct page *page) 126{ 127 if (IS_DAX(page->mapping->host)) 128 return true; 129 return false; 130} 131 132static inline void dax_unlock_mapping_entry(struct page *page) 133{ 134} 135#endif 136 137int dax_read_lock(void); 138void dax_read_unlock(int id); 139bool dax_alive(struct dax_device *dax_dev); 140void *dax_get_private(struct dax_device *dax_dev); 141long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, 142 void **kaddr, pfn_t *pfn); 143size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 144 size_t bytes, struct iov_iter *i); 145size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 146 size_t bytes, struct iov_iter *i); 147void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 148 149ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, 150 const struct iomap_ops *ops); 151vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, 152 pfn_t *pfnp, int *errp, const struct iomap_ops *ops); 153vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, 154 enum page_entry_size pe_size, pfn_t pfn); 155int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 156int dax_invalidate_mapping_entry_sync(struct address_space *mapping, 157 pgoff_t index); 158 159#ifdef CONFIG_FS_DAX 160int __dax_zero_page_range(struct block_device *bdev, 161 struct dax_device *dax_dev, sector_t sector, 162 unsigned int offset, unsigned int length); 163#else 164static inline int __dax_zero_page_range(struct block_device *bdev, 165 struct dax_device *dax_dev, sector_t sector, 166 unsigned int offset, unsigned int length) 167{ 168 return -ENXIO; 169} 170#endif 171 172static inline bool dax_mapping(struct address_space *mapping) 173{ 174 return mapping->host && IS_DAX(mapping->host); 175} 176 177#endif