at v6.7 12 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Berkeley style UIO structures - Alan Cox 1994. 4 */ 5#ifndef __LINUX_UIO_H 6#define __LINUX_UIO_H 7 8#include <linux/kernel.h> 9#include <linux/thread_info.h> 10#include <linux/mm_types.h> 11#include <uapi/linux/uio.h> 12 13struct page; 14 15typedef unsigned int __bitwise iov_iter_extraction_t; 16 17struct kvec { 18 void *iov_base; /* and that should *never* hold a userland pointer */ 19 size_t iov_len; 20}; 21 22enum iter_type { 23 /* iter types */ 24 ITER_UBUF, 25 ITER_IOVEC, 26 ITER_BVEC, 27 ITER_KVEC, 28 ITER_XARRAY, 29 ITER_DISCARD, 30}; 31 32#define ITER_SOURCE 1 // == WRITE 33#define ITER_DEST 0 // == READ 34 35struct iov_iter_state { 36 size_t iov_offset; 37 size_t count; 38 unsigned long nr_segs; 39}; 40 41struct iov_iter { 42 u8 iter_type; 43 bool copy_mc; 44 bool nofault; 45 bool data_source; 46 size_t iov_offset; 47 /* 48 * Hack alert: overlay ubuf_iovec with iovec + count, so 49 * that the members resolve correctly regardless of the type 50 * of iterator used. This means that you can use: 51 * 52 * &iter->__ubuf_iovec or iter->__iov 53 * 54 * interchangably for the user_backed cases, hence simplifying 55 * some of the cases that need to deal with both. 56 */ 57 union { 58 /* 59 * This really should be a const, but we cannot do that without 60 * also modifying any of the zero-filling iter init functions. 61 * Leave it non-const for now, but it should be treated as such. 62 */ 63 struct iovec __ubuf_iovec; 64 struct { 65 union { 66 /* use iter_iov() to get the current vec */ 67 const struct iovec *__iov; 68 const struct kvec *kvec; 69 const struct bio_vec *bvec; 70 struct xarray *xarray; 71 void __user *ubuf; 72 }; 73 size_t count; 74 }; 75 }; 76 union { 77 unsigned long nr_segs; 78 loff_t xarray_start; 79 }; 80}; 81 82static inline const struct iovec *iter_iov(const struct iov_iter *iter) 83{ 84 if (iter->iter_type == ITER_UBUF) 85 return (const struct iovec *) &iter->__ubuf_iovec; 86 return iter->__iov; 87} 88 89#define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset) 90#define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset) 91 92static inline enum iter_type iov_iter_type(const struct iov_iter *i) 93{ 94 return i->iter_type; 95} 96 97static inline void iov_iter_save_state(struct iov_iter *iter, 98 struct iov_iter_state *state) 99{ 100 state->iov_offset = iter->iov_offset; 101 state->count = iter->count; 102 state->nr_segs = iter->nr_segs; 103} 104 105static inline bool iter_is_ubuf(const struct iov_iter *i) 106{ 107 return iov_iter_type(i) == ITER_UBUF; 108} 109 110static inline bool iter_is_iovec(const struct iov_iter *i) 111{ 112 return iov_iter_type(i) == ITER_IOVEC; 113} 114 115static inline bool iov_iter_is_kvec(const struct iov_iter *i) 116{ 117 return iov_iter_type(i) == ITER_KVEC; 118} 119 120static inline bool iov_iter_is_bvec(const struct iov_iter *i) 121{ 122 return iov_iter_type(i) == ITER_BVEC; 123} 124 125static inline bool iov_iter_is_discard(const struct iov_iter *i) 126{ 127 return iov_iter_type(i) == ITER_DISCARD; 128} 129 130static inline bool iov_iter_is_xarray(const struct iov_iter *i) 131{ 132 return iov_iter_type(i) == ITER_XARRAY; 133} 134 135static inline unsigned char iov_iter_rw(const struct iov_iter *i) 136{ 137 return i->data_source ? WRITE : READ; 138} 139 140static inline bool user_backed_iter(const struct iov_iter *i) 141{ 142 return iter_is_ubuf(i) || iter_is_iovec(i); 143} 144 145/* 146 * Total number of bytes covered by an iovec. 147 * 148 * NOTE that it is not safe to use this function until all the iovec's 149 * segment lengths have been validated. Because the individual lengths can 150 * overflow a size_t when added together. 151 */ 152static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) 153{ 154 unsigned long seg; 155 size_t ret = 0; 156 157 for (seg = 0; seg < nr_segs; seg++) 158 ret += iov[seg].iov_len; 159 return ret; 160} 161 162size_t copy_page_from_iter_atomic(struct page *page, size_t offset, 163 size_t bytes, struct iov_iter *i); 164void iov_iter_advance(struct iov_iter *i, size_t bytes); 165void iov_iter_revert(struct iov_iter *i, size_t bytes); 166size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes); 167size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes); 168size_t iov_iter_single_seg_count(const struct iov_iter *i); 169size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 170 struct iov_iter *i); 171size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 172 struct iov_iter *i); 173 174size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 175size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 176size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 177 178static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, 179 size_t bytes, struct iov_iter *i) 180{ 181 return copy_page_to_iter(&folio->page, offset, bytes, i); 182} 183 184static inline size_t copy_folio_from_iter_atomic(struct folio *folio, 185 size_t offset, size_t bytes, struct iov_iter *i) 186{ 187 return copy_page_from_iter_atomic(&folio->page, offset, bytes, i); 188} 189 190size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, 191 size_t bytes, struct iov_iter *i); 192 193static __always_inline __must_check 194size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 195{ 196 if (check_copy_size(addr, bytes, true)) 197 return _copy_to_iter(addr, bytes, i); 198 return 0; 199} 200 201static __always_inline __must_check 202size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 203{ 204 if (check_copy_size(addr, bytes, false)) 205 return _copy_from_iter(addr, bytes, i); 206 return 0; 207} 208 209static __always_inline __must_check 210bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 211{ 212 size_t copied = copy_from_iter(addr, bytes, i); 213 if (likely(copied == bytes)) 214 return true; 215 iov_iter_revert(i, copied); 216 return false; 217} 218 219static __always_inline __must_check 220size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 221{ 222 if (check_copy_size(addr, bytes, false)) 223 return _copy_from_iter_nocache(addr, bytes, i); 224 return 0; 225} 226 227static __always_inline __must_check 228bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 229{ 230 size_t copied = copy_from_iter_nocache(addr, bytes, i); 231 if (likely(copied == bytes)) 232 return true; 233 iov_iter_revert(i, copied); 234 return false; 235} 236 237#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 238/* 239 * Note, users like pmem that depend on the stricter semantics of 240 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for 241 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the 242 * destination is flushed from the cache on return. 243 */ 244size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); 245#else 246#define _copy_from_iter_flushcache _copy_from_iter_nocache 247#endif 248 249#ifdef CONFIG_ARCH_HAS_COPY_MC 250size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 251static inline void iov_iter_set_copy_mc(struct iov_iter *i) 252{ 253 i->copy_mc = true; 254} 255 256static inline bool iov_iter_is_copy_mc(const struct iov_iter *i) 257{ 258 return i->copy_mc; 259} 260#else 261#define _copy_mc_to_iter _copy_to_iter 262static inline void iov_iter_set_copy_mc(struct iov_iter *i) { } 263static inline bool iov_iter_is_copy_mc(const struct iov_iter *i) 264{ 265 return false; 266} 267#endif 268 269size_t iov_iter_zero(size_t bytes, struct iov_iter *); 270bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 271 unsigned len_mask); 272unsigned long iov_iter_alignment(const struct iov_iter *i); 273unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 274void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, 275 unsigned long nr_segs, size_t count); 276void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, 277 unsigned long nr_segs, size_t count); 278void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, 279 unsigned long nr_segs, size_t count); 280void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); 281void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, 282 loff_t start, size_t count); 283ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, 284 size_t maxsize, unsigned maxpages, size_t *start); 285ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, 286 size_t maxsize, size_t *start); 287int iov_iter_npages(const struct iov_iter *i, int maxpages); 288void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); 289 290const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 291 292static inline size_t iov_iter_count(const struct iov_iter *i) 293{ 294 return i->count; 295} 296 297/* 298 * Cap the iov_iter by given limit; note that the second argument is 299 * *not* the new size - it's upper limit for such. Passing it a value 300 * greater than the amount of data in iov_iter is fine - it'll just do 301 * nothing in that case. 302 */ 303static inline void iov_iter_truncate(struct iov_iter *i, u64 count) 304{ 305 /* 306 * count doesn't have to fit in size_t - comparison extends both 307 * operands to u64 here and any value that would be truncated by 308 * conversion in assignement is by definition greater than all 309 * values of size_t, including old i->count. 310 */ 311 if (i->count > count) 312 i->count = count; 313} 314 315/* 316 * reexpand a previously truncated iterator; count must be no more than how much 317 * we had shrunk it. 318 */ 319static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 320{ 321 i->count = count; 322} 323 324static inline int 325iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) 326{ 327 size_t shorted = 0; 328 int npages; 329 330 if (iov_iter_count(i) > max_bytes) { 331 shorted = iov_iter_count(i) - max_bytes; 332 iov_iter_truncate(i, max_bytes); 333 } 334 npages = iov_iter_npages(i, maxpages); 335 if (shorted) 336 iov_iter_reexpand(i, iov_iter_count(i) + shorted); 337 338 return npages; 339} 340 341struct iovec *iovec_from_user(const struct iovec __user *uvector, 342 unsigned long nr_segs, unsigned long fast_segs, 343 struct iovec *fast_iov, bool compat); 344ssize_t import_iovec(int type, const struct iovec __user *uvec, 345 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 346 struct iov_iter *i); 347ssize_t __import_iovec(int type, const struct iovec __user *uvec, 348 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 349 struct iov_iter *i, bool compat); 350int import_single_range(int type, void __user *buf, size_t len, 351 struct iovec *iov, struct iov_iter *i); 352int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i); 353 354static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, 355 void __user *buf, size_t count) 356{ 357 WARN_ON(direction & ~(READ | WRITE)); 358 *i = (struct iov_iter) { 359 .iter_type = ITER_UBUF, 360 .copy_mc = false, 361 .data_source = direction, 362 .ubuf = buf, 363 .count = count, 364 .nr_segs = 1 365 }; 366} 367/* Flags for iov_iter_get/extract_pages*() */ 368/* Allow P2PDMA on the extracted pages */ 369#define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01) 370 371ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages, 372 size_t maxsize, unsigned int maxpages, 373 iov_iter_extraction_t extraction_flags, 374 size_t *offset0); 375 376/** 377 * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained 378 * @iter: The iterator 379 * 380 * Examine the iterator and indicate by returning true or false as to how, if 381 * at all, pages extracted from the iterator will be retained by the extraction 382 * function. 383 * 384 * %true indicates that the pages will have a pin placed in them that the 385 * caller must unpin. This is must be done for DMA/async DIO to force fork() 386 * to forcibly copy a page for the child (the parent must retain the original 387 * page). 388 * 389 * %false indicates that no measures are taken and that it's up to the caller 390 * to retain the pages. 391 */ 392static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter) 393{ 394 return user_backed_iter(iter); 395} 396 397struct sg_table; 398ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len, 399 struct sg_table *sgtable, unsigned int sg_max, 400 iov_iter_extraction_t extraction_flags); 401 402#endif