at v5.1 9.1 kB view raw
1/* 2 * Berkeley style UIO structures - Alan Cox 1994. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9#ifndef __LINUX_UIO_H 10#define __LINUX_UIO_H 11 12#include <linux/kernel.h> 13#include <linux/thread_info.h> 14#include <crypto/hash.h> 15#include <uapi/linux/uio.h> 16 17struct page; 18struct pipe_inode_info; 19 20struct kvec { 21 void *iov_base; /* and that should *never* hold a userland pointer */ 22 size_t iov_len; 23}; 24 25enum iter_type { 26 /* set if ITER_BVEC doesn't hold a bv_page ref */ 27 ITER_BVEC_FLAG_NO_REF = 2, 28 29 /* iter types */ 30 ITER_IOVEC = 4, 31 ITER_KVEC = 8, 32 ITER_BVEC = 16, 33 ITER_PIPE = 32, 34 ITER_DISCARD = 64, 35}; 36 37struct iov_iter { 38 /* 39 * Bit 0 is the read/write bit, set if we're writing. 40 * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and 41 * the caller isn't expecting to drop a page reference when done. 42 */ 43 unsigned int type; 44 size_t iov_offset; 45 size_t count; 46 union { 47 const struct iovec *iov; 48 const struct kvec *kvec; 49 const struct bio_vec *bvec; 50 struct pipe_inode_info *pipe; 51 }; 52 union { 53 unsigned long nr_segs; 54 struct { 55 int idx; 56 int start_idx; 57 }; 58 }; 59}; 60 61static inline enum iter_type iov_iter_type(const struct iov_iter *i) 62{ 63 return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF); 64} 65 66static inline bool iter_is_iovec(const struct iov_iter *i) 67{ 68 return iov_iter_type(i) == ITER_IOVEC; 69} 70 71static inline bool iov_iter_is_kvec(const struct iov_iter *i) 72{ 73 return iov_iter_type(i) == ITER_KVEC; 74} 75 76static inline bool iov_iter_is_bvec(const struct iov_iter *i) 77{ 78 return iov_iter_type(i) == ITER_BVEC; 79} 80 81static inline bool iov_iter_is_pipe(const struct iov_iter *i) 82{ 83 return iov_iter_type(i) == ITER_PIPE; 84} 85 86static inline bool iov_iter_is_discard(const struct iov_iter *i) 87{ 88 return iov_iter_type(i) == ITER_DISCARD; 89} 90 91static inline unsigned char iov_iter_rw(const struct iov_iter *i) 92{ 93 return i->type & (READ | WRITE); 94} 95 96static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i) 97{ 98 return (i->type & ITER_BVEC_FLAG_NO_REF) != 0; 99} 100 101/* 102 * Total number of bytes covered by an iovec. 103 * 104 * NOTE that it is not safe to use this function until all the iovec's 105 * segment lengths have been validated. Because the individual lengths can 106 * overflow a size_t when added together. 107 */ 108static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) 109{ 110 unsigned long seg; 111 size_t ret = 0; 112 113 for (seg = 0; seg < nr_segs; seg++) 114 ret += iov[seg].iov_len; 115 return ret; 116} 117 118static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) 119{ 120 return (struct iovec) { 121 .iov_base = iter->iov->iov_base + iter->iov_offset, 122 .iov_len = min(iter->count, 123 iter->iov->iov_len - iter->iov_offset), 124 }; 125} 126 127size_t iov_iter_copy_from_user_atomic(struct page *page, 128 struct iov_iter *i, unsigned long offset, size_t bytes); 129void iov_iter_advance(struct iov_iter *i, size_t bytes); 130void iov_iter_revert(struct iov_iter *i, size_t bytes); 131int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 132size_t iov_iter_single_seg_count(const struct iov_iter *i); 133size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 134 struct iov_iter *i); 135size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 136 struct iov_iter *i); 137 138size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 139size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 140bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); 141size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 142bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); 143 144static __always_inline __must_check 145size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 146{ 147 if (unlikely(!check_copy_size(addr, bytes, true))) 148 return 0; 149 else 150 return _copy_to_iter(addr, bytes, i); 151} 152 153static __always_inline __must_check 154size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 155{ 156 if (unlikely(!check_copy_size(addr, bytes, false))) 157 return 0; 158 else 159 return _copy_from_iter(addr, bytes, i); 160} 161 162static __always_inline __must_check 163bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 164{ 165 if (unlikely(!check_copy_size(addr, bytes, false))) 166 return false; 167 else 168 return _copy_from_iter_full(addr, bytes, i); 169} 170 171static __always_inline __must_check 172size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 173{ 174 if (unlikely(!check_copy_size(addr, bytes, false))) 175 return 0; 176 else 177 return _copy_from_iter_nocache(addr, bytes, i); 178} 179 180static __always_inline __must_check 181bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 182{ 183 if (unlikely(!check_copy_size(addr, bytes, false))) 184 return false; 185 else 186 return _copy_from_iter_full_nocache(addr, bytes, i); 187} 188 189#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 190/* 191 * Note, users like pmem that depend on the stricter semantics of 192 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for 193 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the 194 * destination is flushed from the cache on return. 195 */ 196size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); 197#else 198#define _copy_from_iter_flushcache _copy_from_iter_nocache 199#endif 200 201#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE 202size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i); 203#else 204#define _copy_to_iter_mcsafe _copy_to_iter 205#endif 206 207static __always_inline __must_check 208size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 209{ 210 if (unlikely(!check_copy_size(addr, bytes, false))) 211 return 0; 212 else 213 return _copy_from_iter_flushcache(addr, bytes, i); 214} 215 216static __always_inline __must_check 217size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) 218{ 219 if (unlikely(!check_copy_size(addr, bytes, true))) 220 return 0; 221 else 222 return _copy_to_iter_mcsafe(addr, bytes, i); 223} 224 225size_t iov_iter_zero(size_t bytes, struct iov_iter *); 226unsigned long iov_iter_alignment(const struct iov_iter *i); 227unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 228void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, 229 unsigned long nr_segs, size_t count); 230void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, 231 unsigned long nr_segs, size_t count); 232void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, 233 unsigned long nr_segs, size_t count); 234void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, 235 size_t count); 236void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); 237ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 238 size_t maxsize, unsigned maxpages, size_t *start); 239ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 240 size_t maxsize, size_t *start); 241int iov_iter_npages(const struct iov_iter *i, int maxpages); 242 243const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 244 245static inline size_t iov_iter_count(const struct iov_iter *i) 246{ 247 return i->count; 248} 249 250/* 251 * Cap the iov_iter by given limit; note that the second argument is 252 * *not* the new size - it's upper limit for such. Passing it a value 253 * greater than the amount of data in iov_iter is fine - it'll just do 254 * nothing in that case. 255 */ 256static inline void iov_iter_truncate(struct iov_iter *i, u64 count) 257{ 258 /* 259 * count doesn't have to fit in size_t - comparison extends both 260 * operands to u64 here and any value that would be truncated by 261 * conversion in assignement is by definition greater than all 262 * values of size_t, including old i->count. 263 */ 264 if (i->count > count) 265 i->count = count; 266} 267 268/* 269 * reexpand a previously truncated iterator; count must be no more than how much 270 * we had shrunk it. 271 */ 272static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 273{ 274 i->count = count; 275} 276size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); 277size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 278bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 279size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 280 struct iov_iter *i); 281 282int import_iovec(int type, const struct iovec __user * uvector, 283 unsigned nr_segs, unsigned fast_segs, 284 struct iovec **iov, struct iov_iter *i); 285 286#ifdef CONFIG_COMPAT 287struct compat_iovec; 288int compat_import_iovec(int type, const struct compat_iovec __user * uvector, 289 unsigned nr_segs, unsigned fast_segs, 290 struct iovec **iov, struct iov_iter *i); 291#endif 292 293int import_single_range(int type, void __user *buf, size_t len, 294 struct iovec *iov, struct iov_iter *i); 295 296int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, 297 int (*f)(struct kvec *vec, void *context), 298 void *context); 299 300#endif