at v5.0 9.0 kB view raw
1/* 2 * Berkeley style UIO structures - Alan Cox 1994. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9#ifndef __LINUX_UIO_H 10#define __LINUX_UIO_H 11 12#include <linux/kernel.h> 13#include <linux/thread_info.h> 14#include <crypto/hash.h> 15#include <uapi/linux/uio.h> 16 17struct page; 18struct pipe_inode_info; 19 20struct kvec { 21 void *iov_base; /* and that should *never* hold a userland pointer */ 22 size_t iov_len; 23}; 24 25enum iter_type { 26 ITER_IOVEC = 0, 27 ITER_KVEC = 2, 28 ITER_BVEC = 4, 29 ITER_PIPE = 8, 30 ITER_DISCARD = 16, 31}; 32 33struct iov_iter { 34 unsigned int type; 35 size_t iov_offset; 36 size_t count; 37 union { 38 const struct iovec *iov; 39 const struct kvec *kvec; 40 const struct bio_vec *bvec; 41 struct pipe_inode_info *pipe; 42 }; 43 union { 44 unsigned long nr_segs; 45 struct { 46 int idx; 47 int start_idx; 48 }; 49 }; 50}; 51 52static inline enum iter_type iov_iter_type(const struct iov_iter *i) 53{ 54 return i->type & ~(READ | WRITE); 55} 56 57static inline bool iter_is_iovec(const struct iov_iter *i) 58{ 59 return iov_iter_type(i) == ITER_IOVEC; 60} 61 62static inline bool iov_iter_is_kvec(const struct iov_iter *i) 63{ 64 return iov_iter_type(i) == ITER_KVEC; 65} 66 67static inline bool iov_iter_is_bvec(const struct iov_iter *i) 68{ 69 return iov_iter_type(i) == ITER_BVEC; 70} 71 72static inline bool iov_iter_is_pipe(const struct iov_iter *i) 73{ 74 return iov_iter_type(i) == ITER_PIPE; 75} 76 77static inline bool iov_iter_is_discard(const struct iov_iter *i) 78{ 79 return iov_iter_type(i) == ITER_DISCARD; 80} 81 82static inline unsigned char iov_iter_rw(const struct iov_iter *i) 83{ 84 return i->type & (READ | WRITE); 85} 86 87/* 88 * Total number of bytes covered by an iovec. 89 * 90 * NOTE that it is not safe to use this function until all the iovec's 91 * segment lengths have been validated. Because the individual lengths can 92 * overflow a size_t when added together. 93 */ 94static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) 95{ 96 unsigned long seg; 97 size_t ret = 0; 98 99 for (seg = 0; seg < nr_segs; seg++) 100 ret += iov[seg].iov_len; 101 return ret; 102} 103 104static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) 105{ 106 return (struct iovec) { 107 .iov_base = iter->iov->iov_base + iter->iov_offset, 108 .iov_len = min(iter->count, 109 iter->iov->iov_len - iter->iov_offset), 110 }; 111} 112 113#define iov_for_each(iov, iter, start) \ 114 if (iov_iter_type(start) == ITER_IOVEC || \ 115 iov_iter_type(start) == ITER_KVEC) \ 116 for (iter = (start); \ 117 (iter).count && \ 118 ((iov = iov_iter_iovec(&(iter))), 1); \ 119 iov_iter_advance(&(iter), (iov).iov_len)) 120 121size_t iov_iter_copy_from_user_atomic(struct page *page, 122 struct iov_iter *i, unsigned long offset, size_t bytes); 123void iov_iter_advance(struct iov_iter *i, size_t bytes); 124void iov_iter_revert(struct iov_iter *i, size_t bytes); 125int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 126size_t iov_iter_single_seg_count(const struct iov_iter *i); 127size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 128 struct iov_iter *i); 129size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 130 struct iov_iter *i); 131 132size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 133size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 134bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); 135size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 136bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); 137 138static __always_inline __must_check 139size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 140{ 141 if (unlikely(!check_copy_size(addr, bytes, true))) 142 return 0; 143 else 144 return _copy_to_iter(addr, bytes, i); 145} 146 147static __always_inline __must_check 148size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 149{ 150 if (unlikely(!check_copy_size(addr, bytes, false))) 151 return 0; 152 else 153 return _copy_from_iter(addr, bytes, i); 154} 155 156static __always_inline __must_check 157bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 158{ 159 if (unlikely(!check_copy_size(addr, bytes, false))) 160 return false; 161 else 162 return _copy_from_iter_full(addr, bytes, i); 163} 164 165static __always_inline __must_check 166size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 167{ 168 if (unlikely(!check_copy_size(addr, bytes, false))) 169 return 0; 170 else 171 return _copy_from_iter_nocache(addr, bytes, i); 172} 173 174static __always_inline __must_check 175bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 176{ 177 if (unlikely(!check_copy_size(addr, bytes, false))) 178 return false; 179 else 180 return _copy_from_iter_full_nocache(addr, bytes, i); 181} 182 183#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 184/* 185 * Note, users like pmem that depend on the stricter semantics of 186 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for 187 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the 188 * destination is flushed from the cache on return. 189 */ 190size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); 191#else 192#define _copy_from_iter_flushcache _copy_from_iter_nocache 193#endif 194 195#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE 196size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i); 197#else 198#define _copy_to_iter_mcsafe _copy_to_iter 199#endif 200 201static __always_inline __must_check 202size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 203{ 204 if (unlikely(!check_copy_size(addr, bytes, false))) 205 return 0; 206 else 207 return _copy_from_iter_flushcache(addr, bytes, i); 208} 209 210static __always_inline __must_check 211size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) 212{ 213 if (unlikely(!check_copy_size(addr, bytes, true))) 214 return 0; 215 else 216 return _copy_to_iter_mcsafe(addr, bytes, i); 217} 218 219size_t iov_iter_zero(size_t bytes, struct iov_iter *); 220unsigned long iov_iter_alignment(const struct iov_iter *i); 221unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 222void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, 223 unsigned long nr_segs, size_t count); 224void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, 225 unsigned long nr_segs, size_t count); 226void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, 227 unsigned long nr_segs, size_t count); 228void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, 229 size_t count); 230void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); 231ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 232 size_t maxsize, unsigned maxpages, size_t *start); 233ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 234 size_t maxsize, size_t *start); 235int iov_iter_npages(const struct iov_iter *i, int maxpages); 236 237const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 238 239static inline size_t iov_iter_count(const struct iov_iter *i) 240{ 241 return i->count; 242} 243 244/* 245 * Cap the iov_iter by given limit; note that the second argument is 246 * *not* the new size - it's upper limit for such. Passing it a value 247 * greater than the amount of data in iov_iter is fine - it'll just do 248 * nothing in that case. 249 */ 250static inline void iov_iter_truncate(struct iov_iter *i, u64 count) 251{ 252 /* 253 * count doesn't have to fit in size_t - comparison extends both 254 * operands to u64 here and any value that would be truncated by 255 * conversion in assignement is by definition greater than all 256 * values of size_t, including old i->count. 257 */ 258 if (i->count > count) 259 i->count = count; 260} 261 262/* 263 * reexpand a previously truncated iterator; count must be no more than how much 264 * we had shrunk it. 265 */ 266static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 267{ 268 i->count = count; 269} 270size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); 271size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 272bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 273size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 274 struct iov_iter *i); 275 276int import_iovec(int type, const struct iovec __user * uvector, 277 unsigned nr_segs, unsigned fast_segs, 278 struct iovec **iov, struct iov_iter *i); 279 280#ifdef CONFIG_COMPAT 281struct compat_iovec; 282int compat_import_iovec(int type, const struct compat_iovec __user * uvector, 283 unsigned nr_segs, unsigned fast_segs, 284 struct iovec **iov, struct iov_iter *i); 285#endif 286 287int import_single_range(int type, void __user *buf, size_t len, 288 struct iovec *iov, struct iov_iter *i); 289 290int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, 291 int (*f)(struct kvec *vec, void *context), 292 void *context); 293 294#endif