at v4.17 7.8 kB view raw
1/* 2 * Berkeley style UIO structures - Alan Cox 1994. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9#ifndef __LINUX_UIO_H 10#define __LINUX_UIO_H 11 12#include <linux/kernel.h> 13#include <linux/thread_info.h> 14#include <uapi/linux/uio.h> 15 16struct page; 17struct pipe_inode_info; 18 19struct kvec { 20 void *iov_base; /* and that should *never* hold a userland pointer */ 21 size_t iov_len; 22}; 23 24enum { 25 ITER_IOVEC = 0, 26 ITER_KVEC = 2, 27 ITER_BVEC = 4, 28 ITER_PIPE = 8, 29}; 30 31struct iov_iter { 32 int type; 33 size_t iov_offset; 34 size_t count; 35 union { 36 const struct iovec *iov; 37 const struct kvec *kvec; 38 const struct bio_vec *bvec; 39 struct pipe_inode_info *pipe; 40 }; 41 union { 42 unsigned long nr_segs; 43 struct { 44 int idx; 45 int start_idx; 46 }; 47 }; 48}; 49 50/* 51 * Total number of bytes covered by an iovec. 52 * 53 * NOTE that it is not safe to use this function until all the iovec's 54 * segment lengths have been validated. Because the individual lengths can 55 * overflow a size_t when added together. 56 */ 57static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) 58{ 59 unsigned long seg; 60 size_t ret = 0; 61 62 for (seg = 0; seg < nr_segs; seg++) 63 ret += iov[seg].iov_len; 64 return ret; 65} 66 67static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) 68{ 69 return (struct iovec) { 70 .iov_base = iter->iov->iov_base + iter->iov_offset, 71 .iov_len = min(iter->count, 72 iter->iov->iov_len - iter->iov_offset), 73 }; 74} 75 76#define iov_for_each(iov, iter, start) \ 77 if (!((start).type & (ITER_BVEC | ITER_PIPE))) \ 78 for (iter = (start); \ 79 (iter).count && \ 80 ((iov = iov_iter_iovec(&(iter))), 1); \ 81 iov_iter_advance(&(iter), (iov).iov_len)) 82 83size_t iov_iter_copy_from_user_atomic(struct page *page, 84 struct iov_iter *i, unsigned long offset, size_t bytes); 85void iov_iter_advance(struct iov_iter *i, size_t bytes); 86void iov_iter_revert(struct iov_iter *i, size_t bytes); 87int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 88size_t iov_iter_single_seg_count(const struct iov_iter *i); 89size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 90 struct iov_iter *i); 91size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 92 struct iov_iter *i); 93 94size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 95size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 96bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); 97size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 98bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); 99 100static __always_inline __must_check 101size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 102{ 103 if (unlikely(!check_copy_size(addr, bytes, true))) 104 return 0; 105 else 106 return _copy_to_iter(addr, bytes, i); 107} 108 109static __always_inline __must_check 110size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 111{ 112 if (unlikely(!check_copy_size(addr, bytes, false))) 113 return 0; 114 else 115 return _copy_from_iter(addr, bytes, i); 116} 117 118static __always_inline __must_check 119bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 120{ 121 if (unlikely(!check_copy_size(addr, bytes, false))) 122 return false; 123 else 124 return _copy_from_iter_full(addr, bytes, i); 125} 126 127static __always_inline __must_check 128size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 129{ 130 if (unlikely(!check_copy_size(addr, bytes, false))) 131 return 0; 132 else 133 return _copy_from_iter_nocache(addr, bytes, i); 134} 135 136static __always_inline __must_check 137bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 138{ 139 if (unlikely(!check_copy_size(addr, bytes, false))) 140 return false; 141 else 142 return _copy_from_iter_full_nocache(addr, bytes, i); 143} 144 145#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 146/* 147 * Note, users like pmem that depend on the stricter semantics of 148 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for 149 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the 150 * destination is flushed from the cache on return. 151 */ 152size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); 153#else 154#define _copy_from_iter_flushcache _copy_from_iter_nocache 155#endif 156 157static __always_inline __must_check 158size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 159{ 160 if (unlikely(!check_copy_size(addr, bytes, false))) 161 return 0; 162 else 163 return _copy_from_iter_flushcache(addr, bytes, i); 164} 165 166size_t iov_iter_zero(size_t bytes, struct iov_iter *); 167unsigned long iov_iter_alignment(const struct iov_iter *i); 168unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 169void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, 170 unsigned long nr_segs, size_t count); 171void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, 172 unsigned long nr_segs, size_t count); 173void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec, 174 unsigned long nr_segs, size_t count); 175void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe, 176 size_t count); 177ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 178 size_t maxsize, unsigned maxpages, size_t *start); 179ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 180 size_t maxsize, size_t *start); 181int iov_iter_npages(const struct iov_iter *i, int maxpages); 182 183const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 184 185static inline size_t iov_iter_count(const struct iov_iter *i) 186{ 187 return i->count; 188} 189 190static inline bool iter_is_iovec(const struct iov_iter *i) 191{ 192 return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE)); 193} 194 195/* 196 * Get one of READ or WRITE out of iter->type without any other flags OR'd in 197 * with it. 198 * 199 * The ?: is just for type safety. 200 */ 201#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE)) 202 203/* 204 * Cap the iov_iter by given limit; note that the second argument is 205 * *not* the new size - it's upper limit for such. Passing it a value 206 * greater than the amount of data in iov_iter is fine - it'll just do 207 * nothing in that case. 208 */ 209static inline void iov_iter_truncate(struct iov_iter *i, u64 count) 210{ 211 /* 212 * count doesn't have to fit in size_t - comparison extends both 213 * operands to u64 here and any value that would be truncated by 214 * conversion in assignement is by definition greater than all 215 * values of size_t, including old i->count. 216 */ 217 if (i->count > count) 218 i->count = count; 219} 220 221/* 222 * reexpand a previously truncated iterator; count must be no more than how much 223 * we had shrunk it. 224 */ 225static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 226{ 227 i->count = count; 228} 229size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 230size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 231bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 232 233int import_iovec(int type, const struct iovec __user * uvector, 234 unsigned nr_segs, unsigned fast_segs, 235 struct iovec **iov, struct iov_iter *i); 236 237#ifdef CONFIG_COMPAT 238struct compat_iovec; 239int compat_import_iovec(int type, const struct compat_iovec __user * uvector, 240 unsigned nr_segs, unsigned fast_segs, 241 struct iovec **iov, struct iov_iter *i); 242#endif 243 244int import_single_range(int type, void __user *buf, size_t len, 245 struct iovec *iov, struct iov_iter *i); 246 247int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, 248 int (*f)(struct kvec *vec, void *context), 249 void *context); 250 251#endif