Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Berkeley style UIO structures - Alan Cox 1994.
4 */
5#ifndef __LINUX_UIO_H
6#define __LINUX_UIO_H
7
8#include <linux/kernel.h>
9#include <linux/thread_info.h>
10#include <linux/mm_types.h>
11#include <uapi/linux/uio.h>
12
13struct page;
14
15typedef unsigned int __bitwise iov_iter_extraction_t;
16
17struct kvec {
18 void *iov_base; /* and that should *never* hold a userland pointer */
19 size_t iov_len;
20};
21
22enum iter_type {
23 /* iter types */
24 ITER_IOVEC,
25 ITER_KVEC,
26 ITER_BVEC,
27 ITER_XARRAY,
28 ITER_DISCARD,
29 ITER_UBUF,
30};
31
32#define ITER_SOURCE 1 // == WRITE
33#define ITER_DEST 0 // == READ
34
35struct iov_iter_state {
36 size_t iov_offset;
37 size_t count;
38 unsigned long nr_segs;
39};
40
41struct iov_iter {
42 u8 iter_type;
43 bool copy_mc;
44 bool nofault;
45 bool data_source;
46 bool user_backed;
47 union {
48 size_t iov_offset;
49 int last_offset;
50 };
51 /*
52 * Hack alert: overlay ubuf_iovec with iovec + count, so
53 * that the members resolve correctly regardless of the type
54 * of iterator used. This means that you can use:
55 *
56 * &iter->__ubuf_iovec or iter->__iov
57 *
58 * interchangably for the user_backed cases, hence simplifying
59 * some of the cases that need to deal with both.
60 */
61 union {
62 /*
63 * This really should be a const, but we cannot do that without
64 * also modifying any of the zero-filling iter init functions.
65 * Leave it non-const for now, but it should be treated as such.
66 */
67 struct iovec __ubuf_iovec;
68 struct {
69 union {
70 /* use iter_iov() to get the current vec */
71 const struct iovec *__iov;
72 const struct kvec *kvec;
73 const struct bio_vec *bvec;
74 struct xarray *xarray;
75 void __user *ubuf;
76 };
77 size_t count;
78 };
79 };
80 union {
81 unsigned long nr_segs;
82 loff_t xarray_start;
83 };
84};
85
86static inline const struct iovec *iter_iov(const struct iov_iter *iter)
87{
88 if (iter->iter_type == ITER_UBUF)
89 return (const struct iovec *) &iter->__ubuf_iovec;
90 return iter->__iov;
91}
92
93#define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
94#define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset)
95
96static inline enum iter_type iov_iter_type(const struct iov_iter *i)
97{
98 return i->iter_type;
99}
100
101static inline void iov_iter_save_state(struct iov_iter *iter,
102 struct iov_iter_state *state)
103{
104 state->iov_offset = iter->iov_offset;
105 state->count = iter->count;
106 state->nr_segs = iter->nr_segs;
107}
108
109static inline bool iter_is_ubuf(const struct iov_iter *i)
110{
111 return iov_iter_type(i) == ITER_UBUF;
112}
113
114static inline bool iter_is_iovec(const struct iov_iter *i)
115{
116 return iov_iter_type(i) == ITER_IOVEC;
117}
118
119static inline bool iov_iter_is_kvec(const struct iov_iter *i)
120{
121 return iov_iter_type(i) == ITER_KVEC;
122}
123
124static inline bool iov_iter_is_bvec(const struct iov_iter *i)
125{
126 return iov_iter_type(i) == ITER_BVEC;
127}
128
129static inline bool iov_iter_is_discard(const struct iov_iter *i)
130{
131 return iov_iter_type(i) == ITER_DISCARD;
132}
133
134static inline bool iov_iter_is_xarray(const struct iov_iter *i)
135{
136 return iov_iter_type(i) == ITER_XARRAY;
137}
138
139static inline unsigned char iov_iter_rw(const struct iov_iter *i)
140{
141 return i->data_source ? WRITE : READ;
142}
143
144static inline bool user_backed_iter(const struct iov_iter *i)
145{
146 return i->user_backed;
147}
148
149/*
150 * Total number of bytes covered by an iovec.
151 *
152 * NOTE that it is not safe to use this function until all the iovec's
153 * segment lengths have been validated. Because the individual lengths can
154 * overflow a size_t when added together.
155 */
156static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
157{
158 unsigned long seg;
159 size_t ret = 0;
160
161 for (seg = 0; seg < nr_segs; seg++)
162 ret += iov[seg].iov_len;
163 return ret;
164}
165
166size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
167 size_t bytes, struct iov_iter *i);
168void iov_iter_advance(struct iov_iter *i, size_t bytes);
169void iov_iter_revert(struct iov_iter *i, size_t bytes);
170size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
171size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
172size_t iov_iter_single_seg_count(const struct iov_iter *i);
173size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
174 struct iov_iter *i);
175size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
176 struct iov_iter *i);
177
178size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
179size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
180size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
181
182static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
183 size_t bytes, struct iov_iter *i)
184{
185 return copy_page_to_iter(&folio->page, offset, bytes, i);
186}
187size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
188 size_t bytes, struct iov_iter *i);
189
190static __always_inline __must_check
191size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
192{
193 if (check_copy_size(addr, bytes, true))
194 return _copy_to_iter(addr, bytes, i);
195 return 0;
196}
197
198static __always_inline __must_check
199size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
200{
201 if (check_copy_size(addr, bytes, false))
202 return _copy_from_iter(addr, bytes, i);
203 return 0;
204}
205
206static __always_inline __must_check
207bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
208{
209 size_t copied = copy_from_iter(addr, bytes, i);
210 if (likely(copied == bytes))
211 return true;
212 iov_iter_revert(i, copied);
213 return false;
214}
215
216static __always_inline __must_check
217size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
218{
219 if (check_copy_size(addr, bytes, false))
220 return _copy_from_iter_nocache(addr, bytes, i);
221 return 0;
222}
223
224static __always_inline __must_check
225bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
226{
227 size_t copied = copy_from_iter_nocache(addr, bytes, i);
228 if (likely(copied == bytes))
229 return true;
230 iov_iter_revert(i, copied);
231 return false;
232}
233
234#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
235/*
236 * Note, users like pmem that depend on the stricter semantics of
237 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
238 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
239 * destination is flushed from the cache on return.
240 */
241size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
242#else
243#define _copy_from_iter_flushcache _copy_from_iter_nocache
244#endif
245
246#ifdef CONFIG_ARCH_HAS_COPY_MC
247size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
248static inline void iov_iter_set_copy_mc(struct iov_iter *i)
249{
250 i->copy_mc = true;
251}
252
253static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
254{
255 return i->copy_mc;
256}
257#else
258#define _copy_mc_to_iter _copy_to_iter
259static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
260static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
261{
262 return false;
263}
264#endif
265
266size_t iov_iter_zero(size_t bytes, struct iov_iter *);
267bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
268 unsigned len_mask);
269unsigned long iov_iter_alignment(const struct iov_iter *i);
270unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
271void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
272 unsigned long nr_segs, size_t count);
273void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
274 unsigned long nr_segs, size_t count);
275void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
276 unsigned long nr_segs, size_t count);
277void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
278void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
279 loff_t start, size_t count);
280ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
281 size_t maxsize, unsigned maxpages, size_t *start);
282ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
283 size_t maxsize, size_t *start);
284int iov_iter_npages(const struct iov_iter *i, int maxpages);
285void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
286
287const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
288
289static inline size_t iov_iter_count(const struct iov_iter *i)
290{
291 return i->count;
292}
293
294/*
295 * Cap the iov_iter by given limit; note that the second argument is
296 * *not* the new size - it's upper limit for such. Passing it a value
297 * greater than the amount of data in iov_iter is fine - it'll just do
298 * nothing in that case.
299 */
300static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
301{
302 /*
303 * count doesn't have to fit in size_t - comparison extends both
304 * operands to u64 here and any value that would be truncated by
305 * conversion in assignement is by definition greater than all
306 * values of size_t, including old i->count.
307 */
308 if (i->count > count)
309 i->count = count;
310}
311
312/*
313 * reexpand a previously truncated iterator; count must be no more than how much
314 * we had shrunk it.
315 */
316static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
317{
318 i->count = count;
319}
320
321static inline int
322iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
323{
324 size_t shorted = 0;
325 int npages;
326
327 if (iov_iter_count(i) > max_bytes) {
328 shorted = iov_iter_count(i) - max_bytes;
329 iov_iter_truncate(i, max_bytes);
330 }
331 npages = iov_iter_npages(i, maxpages);
332 if (shorted)
333 iov_iter_reexpand(i, iov_iter_count(i) + shorted);
334
335 return npages;
336}
337
338struct csum_state {
339 __wsum csum;
340 size_t off;
341};
342
343size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
344size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
345
346static __always_inline __must_check
347bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
348 __wsum *csum, struct iov_iter *i)
349{
350 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
351 if (likely(copied == bytes))
352 return true;
353 iov_iter_revert(i, copied);
354 return false;
355}
356size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
357 struct iov_iter *i);
358
359struct iovec *iovec_from_user(const struct iovec __user *uvector,
360 unsigned long nr_segs, unsigned long fast_segs,
361 struct iovec *fast_iov, bool compat);
362ssize_t import_iovec(int type, const struct iovec __user *uvec,
363 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
364 struct iov_iter *i);
365ssize_t __import_iovec(int type, const struct iovec __user *uvec,
366 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
367 struct iov_iter *i, bool compat);
368int import_single_range(int type, void __user *buf, size_t len,
369 struct iovec *iov, struct iov_iter *i);
370int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
371
372static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
373 void __user *buf, size_t count)
374{
375 WARN_ON(direction & ~(READ | WRITE));
376 *i = (struct iov_iter) {
377 .iter_type = ITER_UBUF,
378 .copy_mc = false,
379 .user_backed = true,
380 .data_source = direction,
381 .ubuf = buf,
382 .count = count,
383 .nr_segs = 1
384 };
385}
386/* Flags for iov_iter_get/extract_pages*() */
387/* Allow P2PDMA on the extracted pages */
388#define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01)
389
390ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
391 size_t maxsize, unsigned int maxpages,
392 iov_iter_extraction_t extraction_flags,
393 size_t *offset0);
394
395/**
396 * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
397 * @iter: The iterator
398 *
399 * Examine the iterator and indicate by returning true or false as to how, if
400 * at all, pages extracted from the iterator will be retained by the extraction
401 * function.
402 *
403 * %true indicates that the pages will have a pin placed in them that the
404 * caller must unpin. This is must be done for DMA/async DIO to force fork()
405 * to forcibly copy a page for the child (the parent must retain the original
406 * page).
407 *
408 * %false indicates that no measures are taken and that it's up to the caller
409 * to retain the pages.
410 */
411static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
412{
413 return user_backed_iter(iter);
414}
415
416struct sg_table;
417ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
418 struct sg_table *sgtable, unsigned int sg_max,
419 iov_iter_extraction_t extraction_flags);
420
421#endif