Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.15-rc2 1933 lines 50 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2#include <linux/export.h> 3#include <linux/bvec.h> 4#include <linux/fault-inject-usercopy.h> 5#include <linux/uio.h> 6#include <linux/pagemap.h> 7#include <linux/highmem.h> 8#include <linux/slab.h> 9#include <linux/vmalloc.h> 10#include <linux/splice.h> 11#include <linux/compat.h> 12#include <linux/scatterlist.h> 13#include <linux/instrumented.h> 14#include <linux/iov_iter.h> 15 16static __always_inline 17size_t copy_to_user_iter(void __user *iter_to, size_t progress, 18 size_t len, void *from, void *priv2) 19{ 20 if (should_fail_usercopy()) 21 return len; 22 if (access_ok(iter_to, len)) { 23 from += progress; 24 instrument_copy_to_user(iter_to, from, len); 25 len = raw_copy_to_user(iter_to, from, len); 26 } 27 return len; 28} 29 30static __always_inline 31size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress, 32 size_t len, void *from, void *priv2) 33{ 34 ssize_t res; 35 36 if (should_fail_usercopy()) 37 return len; 38 39 from += progress; 40 res = copy_to_user_nofault(iter_to, from, len); 41 return res < 0 ? len : res; 42} 43 44static __always_inline 45size_t copy_from_user_iter(void __user *iter_from, size_t progress, 46 size_t len, void *to, void *priv2) 47{ 48 size_t res = len; 49 50 if (should_fail_usercopy()) 51 return len; 52 if (access_ok(iter_from, len)) { 53 to += progress; 54 instrument_copy_from_user_before(to, iter_from, len); 55 res = raw_copy_from_user(to, iter_from, len); 56 instrument_copy_from_user_after(to, iter_from, len, res); 57 } 58 return res; 59} 60 61static __always_inline 62size_t memcpy_to_iter(void *iter_to, size_t progress, 63 size_t len, void *from, void *priv2) 64{ 65 memcpy(iter_to, from + progress, len); 66 return 0; 67} 68 69static __always_inline 70size_t memcpy_from_iter(void *iter_from, size_t progress, 71 size_t len, void *to, void *priv2) 72{ 73 memcpy(to + progress, iter_from, len); 74 return 0; 75} 76 77/* 78 * fault_in_iov_iter_readable - fault in iov iterator for reading 79 * @i: iterator 80 * @size: maximum length 81 * 82 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 83 * @size. For each iovec, fault in each page that constitutes the iovec. 84 * 85 * Returns the number of bytes not faulted in (like copy_to_user() and 86 * copy_from_user()). 87 * 88 * Always returns 0 for non-userspace iterators. 89 */ 90size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 91{ 92 if (iter_is_ubuf(i)) { 93 size_t n = min(size, iov_iter_count(i)); 94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); 95 return size - n; 96 } else if (iter_is_iovec(i)) { 97 size_t count = min(size, iov_iter_count(i)); 98 const struct iovec *p; 99 size_t skip; 100 101 size -= count; 102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { 103 size_t len = min(count, p->iov_len - skip); 104 size_t ret; 105 106 if (unlikely(!len)) 107 continue; 108 ret = fault_in_readable(p->iov_base + skip, len); 109 count -= len - ret; 110 if (ret) 111 break; 112 } 113 return count + size; 114 } 115 return 0; 116} 117EXPORT_SYMBOL(fault_in_iov_iter_readable); 118 119/* 120 * fault_in_iov_iter_writeable - fault in iov iterator for writing 121 * @i: iterator 122 * @size: maximum length 123 * 124 * Faults in the iterator using get_user_pages(), i.e., without triggering 125 * hardware page faults. This is primarily useful when we already know that 126 * some or all of the pages in @i aren't in memory. 127 * 128 * Returns the number of bytes not faulted in, like copy_to_user() and 129 * copy_from_user(). 130 * 131 * Always returns 0 for non-user-space iterators. 132 */ 133size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 134{ 135 if (iter_is_ubuf(i)) { 136 size_t n = min(size, iov_iter_count(i)); 137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); 138 return size - n; 139 } else if (iter_is_iovec(i)) { 140 size_t count = min(size, iov_iter_count(i)); 141 const struct iovec *p; 142 size_t skip; 143 144 size -= count; 145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { 146 size_t len = min(count, p->iov_len - skip); 147 size_t ret; 148 149 if (unlikely(!len)) 150 continue; 151 ret = fault_in_safe_writeable(p->iov_base + skip, len); 152 count -= len - ret; 153 if (ret) 154 break; 155 } 156 return count + size; 157 } 158 return 0; 159} 160EXPORT_SYMBOL(fault_in_iov_iter_writeable); 161 162void iov_iter_init(struct iov_iter *i, unsigned int direction, 163 const struct iovec *iov, unsigned long nr_segs, 164 size_t count) 165{ 166 WARN_ON(direction & ~(READ | WRITE)); 167 *i = (struct iov_iter) { 168 .iter_type = ITER_IOVEC, 169 .nofault = false, 170 .data_source = direction, 171 .__iov = iov, 172 .nr_segs = nr_segs, 173 .iov_offset = 0, 174 .count = count 175 }; 176} 177EXPORT_SYMBOL(iov_iter_init); 178 179size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 180{ 181 if (WARN_ON_ONCE(i->data_source)) 182 return 0; 183 if (user_backed_iter(i)) 184 might_fault(); 185 return iterate_and_advance(i, bytes, (void *)addr, 186 copy_to_user_iter, memcpy_to_iter); 187} 188EXPORT_SYMBOL(_copy_to_iter); 189 190#ifdef CONFIG_ARCH_HAS_COPY_MC 191static __always_inline 192size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress, 193 size_t len, void *from, void *priv2) 194{ 195 if (access_ok(iter_to, len)) { 196 from += progress; 197 instrument_copy_to_user(iter_to, from, len); 198 len = copy_mc_to_user(iter_to, from, len); 199 } 200 return len; 201} 202 203static __always_inline 204size_t memcpy_to_iter_mc(void *iter_to, size_t progress, 205 size_t len, void *from, void *priv2) 206{ 207 return copy_mc_to_kernel(iter_to, from + progress, len); 208} 209 210/** 211 * _copy_mc_to_iter - copy to iter with source memory error exception handling 212 * @addr: source kernel address 213 * @bytes: total transfer length 214 * @i: destination iterator 215 * 216 * The pmem driver deploys this for the dax operation 217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 219 * successfully copied. 220 * 221 * The main differences between this and typical _copy_to_iter(). 222 * 223 * * Typical tail/residue handling after a fault retries the copy 224 * byte-by-byte until the fault happens again. Re-triggering machine 225 * checks is potentially fatal so the implementation uses source 226 * alignment and poison alignment assumptions to avoid re-triggering 227 * hardware exceptions. 228 * 229 * * ITER_KVEC and ITER_BVEC can return short copies. Compare to 230 * copy_to_iter() where only ITER_IOVEC attempts might return a short copy. 231 * 232 * Return: number of bytes copied (may be %0) 233 */ 234size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 235{ 236 if (WARN_ON_ONCE(i->data_source)) 237 return 0; 238 if (user_backed_iter(i)) 239 might_fault(); 240 return iterate_and_advance(i, bytes, (void *)addr, 241 copy_to_user_iter_mc, memcpy_to_iter_mc); 242} 243EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 244#endif /* CONFIG_ARCH_HAS_COPY_MC */ 245 246static __always_inline 247size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 248{ 249 return iterate_and_advance(i, bytes, addr, 250 copy_from_user_iter, memcpy_from_iter); 251} 252 253size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 254{ 255 if (WARN_ON_ONCE(!i->data_source)) 256 return 0; 257 258 if (user_backed_iter(i)) 259 might_fault(); 260 return __copy_from_iter(addr, bytes, i); 261} 262EXPORT_SYMBOL(_copy_from_iter); 263 264static __always_inline 265size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress, 266 size_t len, void *to, void *priv2) 267{ 268 return __copy_from_user_inatomic_nocache(to + progress, iter_from, len); 269} 270 271size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 272{ 273 if (WARN_ON_ONCE(!i->data_source)) 274 return 0; 275 276 return iterate_and_advance(i, bytes, addr, 277 copy_from_user_iter_nocache, 278 memcpy_from_iter); 279} 280EXPORT_SYMBOL(_copy_from_iter_nocache); 281 282#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 283static __always_inline 284size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress, 285 size_t len, void *to, void *priv2) 286{ 287 return __copy_from_user_flushcache(to + progress, iter_from, len); 288} 289 290static __always_inline 291size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress, 292 size_t len, void *to, void *priv2) 293{ 294 memcpy_flushcache(to + progress, iter_from, len); 295 return 0; 296} 297 298/** 299 * _copy_from_iter_flushcache - write destination through cpu cache 300 * @addr: destination kernel address 301 * @bytes: total transfer length 302 * @i: source iterator 303 * 304 * The pmem driver arranges for filesystem-dax to use this facility via 305 * dax_copy_from_iter() for ensuring that writes to persistent memory 306 * are flushed through the CPU cache. It is differentiated from 307 * _copy_from_iter_nocache() in that guarantees all data is flushed for 308 * all iterator types. The _copy_from_iter_nocache() only attempts to 309 * bypass the cache for the ITER_IOVEC case, and on some archs may use 310 * instructions that strand dirty-data in the cache. 311 * 312 * Return: number of bytes copied (may be %0) 313 */ 314size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 315{ 316 if (WARN_ON_ONCE(!i->data_source)) 317 return 0; 318 319 return iterate_and_advance(i, bytes, addr, 320 copy_from_user_iter_flushcache, 321 memcpy_from_iter_flushcache); 322} 323EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 324#endif 325 326static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 327{ 328 struct page *head; 329 size_t v = n + offset; 330 331 /* 332 * The general case needs to access the page order in order 333 * to compute the page size. 334 * However, we mostly deal with order-0 pages and thus can 335 * avoid a possible cache line miss for requests that fit all 336 * page orders. 337 */ 338 if (n <= v && v <= PAGE_SIZE) 339 return true; 340 341 head = compound_head(page); 342 v += (page - head) << PAGE_SHIFT; 343 344 if (WARN_ON(n > v || v > page_size(head))) 345 return false; 346 return true; 347} 348 349size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 350 struct iov_iter *i) 351{ 352 size_t res = 0; 353 if (!page_copy_sane(page, offset, bytes)) 354 return 0; 355 if (WARN_ON_ONCE(i->data_source)) 356 return 0; 357 page += offset / PAGE_SIZE; // first subpage 358 offset %= PAGE_SIZE; 359 while (1) { 360 void *kaddr = kmap_local_page(page); 361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 362 n = _copy_to_iter(kaddr + offset, n, i); 363 kunmap_local(kaddr); 364 res += n; 365 bytes -= n; 366 if (!bytes || !n) 367 break; 368 offset += n; 369 if (offset == PAGE_SIZE) { 370 page++; 371 offset = 0; 372 } 373 } 374 return res; 375} 376EXPORT_SYMBOL(copy_page_to_iter); 377 378size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, 379 struct iov_iter *i) 380{ 381 size_t res = 0; 382 383 if (!page_copy_sane(page, offset, bytes)) 384 return 0; 385 if (WARN_ON_ONCE(i->data_source)) 386 return 0; 387 page += offset / PAGE_SIZE; // first subpage 388 offset %= PAGE_SIZE; 389 while (1) { 390 void *kaddr = kmap_local_page(page); 391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 392 393 n = iterate_and_advance(i, n, kaddr + offset, 394 copy_to_user_iter_nofault, 395 memcpy_to_iter); 396 kunmap_local(kaddr); 397 res += n; 398 bytes -= n; 399 if (!bytes || !n) 400 break; 401 offset += n; 402 if (offset == PAGE_SIZE) { 403 page++; 404 offset = 0; 405 } 406 } 407 return res; 408} 409EXPORT_SYMBOL(copy_page_to_iter_nofault); 410 411size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 412 struct iov_iter *i) 413{ 414 size_t res = 0; 415 if (!page_copy_sane(page, offset, bytes)) 416 return 0; 417 page += offset / PAGE_SIZE; // first subpage 418 offset %= PAGE_SIZE; 419 while (1) { 420 void *kaddr = kmap_local_page(page); 421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 422 n = _copy_from_iter(kaddr + offset, n, i); 423 kunmap_local(kaddr); 424 res += n; 425 bytes -= n; 426 if (!bytes || !n) 427 break; 428 offset += n; 429 if (offset == PAGE_SIZE) { 430 page++; 431 offset = 0; 432 } 433 } 434 return res; 435} 436EXPORT_SYMBOL(copy_page_from_iter); 437 438static __always_inline 439size_t zero_to_user_iter(void __user *iter_to, size_t progress, 440 size_t len, void *priv, void *priv2) 441{ 442 return clear_user(iter_to, len); 443} 444 445static __always_inline 446size_t zero_to_iter(void *iter_to, size_t progress, 447 size_t len, void *priv, void *priv2) 448{ 449 memset(iter_to, 0, len); 450 return 0; 451} 452 453size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 454{ 455 return iterate_and_advance(i, bytes, NULL, 456 zero_to_user_iter, zero_to_iter); 457} 458EXPORT_SYMBOL(iov_iter_zero); 459 460size_t copy_page_from_iter_atomic(struct page *page, size_t offset, 461 size_t bytes, struct iov_iter *i) 462{ 463 size_t n, copied = 0; 464 bool uses_kmap = IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || 465 PageHighMem(page); 466 467 if (!page_copy_sane(page, offset, bytes)) 468 return 0; 469 if (WARN_ON_ONCE(!i->data_source)) 470 return 0; 471 472 do { 473 char *p; 474 475 n = bytes - copied; 476 if (uses_kmap) { 477 page += offset / PAGE_SIZE; 478 offset %= PAGE_SIZE; 479 n = min_t(size_t, n, PAGE_SIZE - offset); 480 } 481 482 p = kmap_atomic(page) + offset; 483 n = __copy_from_iter(p, n, i); 484 kunmap_atomic(p); 485 copied += n; 486 offset += n; 487 } while (uses_kmap && copied != bytes && n > 0); 488 489 return copied; 490} 491EXPORT_SYMBOL(copy_page_from_iter_atomic); 492 493static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 494{ 495 const struct bio_vec *bvec, *end; 496 497 if (!i->count) 498 return; 499 i->count -= size; 500 501 size += i->iov_offset; 502 503 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 504 if (likely(size < bvec->bv_len)) 505 break; 506 size -= bvec->bv_len; 507 } 508 i->iov_offset = size; 509 i->nr_segs -= bvec - i->bvec; 510 i->bvec = bvec; 511} 512 513static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 514{ 515 const struct iovec *iov, *end; 516 517 if (!i->count) 518 return; 519 i->count -= size; 520 521 size += i->iov_offset; // from beginning of current segment 522 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { 523 if (likely(size < iov->iov_len)) 524 break; 525 size -= iov->iov_len; 526 } 527 i->iov_offset = size; 528 i->nr_segs -= iov - iter_iov(i); 529 i->__iov = iov; 530} 531 532static void iov_iter_folioq_advance(struct iov_iter *i, size_t size) 533{ 534 const struct folio_queue *folioq = i->folioq; 535 unsigned int slot = i->folioq_slot; 536 537 if (!i->count) 538 return; 539 i->count -= size; 540 541 if (slot >= folioq_nr_slots(folioq)) { 542 folioq = folioq->next; 543 slot = 0; 544 } 545 546 size += i->iov_offset; /* From beginning of current segment. */ 547 do { 548 size_t fsize = folioq_folio_size(folioq, slot); 549 550 if (likely(size < fsize)) 551 break; 552 size -= fsize; 553 slot++; 554 if (slot >= folioq_nr_slots(folioq) && folioq->next) { 555 folioq = folioq->next; 556 slot = 0; 557 } 558 } while (size); 559 560 i->iov_offset = size; 561 i->folioq_slot = slot; 562 i->folioq = folioq; 563} 564 565void iov_iter_advance(struct iov_iter *i, size_t size) 566{ 567 if (unlikely(i->count < size)) 568 size = i->count; 569 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { 570 i->iov_offset += size; 571 i->count -= size; 572 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 573 /* iovec and kvec have identical layouts */ 574 iov_iter_iovec_advance(i, size); 575 } else if (iov_iter_is_bvec(i)) { 576 iov_iter_bvec_advance(i, size); 577 } else if (iov_iter_is_folioq(i)) { 578 iov_iter_folioq_advance(i, size); 579 } else if (iov_iter_is_discard(i)) { 580 i->count -= size; 581 } 582} 583EXPORT_SYMBOL(iov_iter_advance); 584 585static void iov_iter_folioq_revert(struct iov_iter *i, size_t unroll) 586{ 587 const struct folio_queue *folioq = i->folioq; 588 unsigned int slot = i->folioq_slot; 589 590 for (;;) { 591 size_t fsize; 592 593 if (slot == 0) { 594 folioq = folioq->prev; 595 slot = folioq_nr_slots(folioq); 596 } 597 slot--; 598 599 fsize = folioq_folio_size(folioq, slot); 600 if (unroll <= fsize) { 601 i->iov_offset = fsize - unroll; 602 break; 603 } 604 unroll -= fsize; 605 } 606 607 i->folioq_slot = slot; 608 i->folioq = folioq; 609} 610 611void iov_iter_revert(struct iov_iter *i, size_t unroll) 612{ 613 if (!unroll) 614 return; 615 if (WARN_ON(unroll > MAX_RW_COUNT)) 616 return; 617 i->count += unroll; 618 if (unlikely(iov_iter_is_discard(i))) 619 return; 620 if (unroll <= i->iov_offset) { 621 i->iov_offset -= unroll; 622 return; 623 } 624 unroll -= i->iov_offset; 625 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { 626 BUG(); /* We should never go beyond the start of the specified 627 * range since we might then be straying into pages that 628 * aren't pinned. 629 */ 630 } else if (iov_iter_is_bvec(i)) { 631 const struct bio_vec *bvec = i->bvec; 632 while (1) { 633 size_t n = (--bvec)->bv_len; 634 i->nr_segs++; 635 if (unroll <= n) { 636 i->bvec = bvec; 637 i->iov_offset = n - unroll; 638 return; 639 } 640 unroll -= n; 641 } 642 } else if (iov_iter_is_folioq(i)) { 643 i->iov_offset = 0; 644 iov_iter_folioq_revert(i, unroll); 645 } else { /* same logics for iovec and kvec */ 646 const struct iovec *iov = iter_iov(i); 647 while (1) { 648 size_t n = (--iov)->iov_len; 649 i->nr_segs++; 650 if (unroll <= n) { 651 i->__iov = iov; 652 i->iov_offset = n - unroll; 653 return; 654 } 655 unroll -= n; 656 } 657 } 658} 659EXPORT_SYMBOL(iov_iter_revert); 660 661/* 662 * Return the count of just the current iov_iter segment. 663 */ 664size_t iov_iter_single_seg_count(const struct iov_iter *i) 665{ 666 if (i->nr_segs > 1) { 667 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 668 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); 669 if (iov_iter_is_bvec(i)) 670 return min(i->count, i->bvec->bv_len - i->iov_offset); 671 } 672 if (unlikely(iov_iter_is_folioq(i))) 673 return !i->count ? 0 : 674 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count); 675 return i->count; 676} 677EXPORT_SYMBOL(iov_iter_single_seg_count); 678 679void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 680 const struct kvec *kvec, unsigned long nr_segs, 681 size_t count) 682{ 683 WARN_ON(direction & ~(READ | WRITE)); 684 *i = (struct iov_iter){ 685 .iter_type = ITER_KVEC, 686 .data_source = direction, 687 .kvec = kvec, 688 .nr_segs = nr_segs, 689 .iov_offset = 0, 690 .count = count 691 }; 692} 693EXPORT_SYMBOL(iov_iter_kvec); 694 695void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 696 const struct bio_vec *bvec, unsigned long nr_segs, 697 size_t count) 698{ 699 WARN_ON(direction & ~(READ | WRITE)); 700 *i = (struct iov_iter){ 701 .iter_type = ITER_BVEC, 702 .data_source = direction, 703 .bvec = bvec, 704 .nr_segs = nr_segs, 705 .iov_offset = 0, 706 .count = count 707 }; 708} 709EXPORT_SYMBOL(iov_iter_bvec); 710 711/** 712 * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue 713 * @i: The iterator to initialise. 714 * @direction: The direction of the transfer. 715 * @folioq: The starting point in the folio queue. 716 * @first_slot: The first slot in the folio queue to use 717 * @offset: The offset into the folio in the first slot to start at 718 * @count: The size of the I/O buffer in bytes. 719 * 720 * Set up an I/O iterator to either draw data out of the pages attached to an 721 * inode or to inject data into those pages. The pages *must* be prevented 722 * from evaporation, either by taking a ref on them or locking them by the 723 * caller. 724 */ 725void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, 726 const struct folio_queue *folioq, unsigned int first_slot, 727 unsigned int offset, size_t count) 728{ 729 BUG_ON(direction & ~1); 730 *i = (struct iov_iter) { 731 .iter_type = ITER_FOLIOQ, 732 .data_source = direction, 733 .folioq = folioq, 734 .folioq_slot = first_slot, 735 .count = count, 736 .iov_offset = offset, 737 }; 738} 739EXPORT_SYMBOL(iov_iter_folio_queue); 740 741/** 742 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 743 * @i: The iterator to initialise. 744 * @direction: The direction of the transfer. 745 * @xarray: The xarray to access. 746 * @start: The start file position. 747 * @count: The size of the I/O buffer in bytes. 748 * 749 * Set up an I/O iterator to either draw data out of the pages attached to an 750 * inode or to inject data into those pages. The pages *must* be prevented 751 * from evaporation, either by taking a ref on them or locking them by the 752 * caller. 753 */ 754void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 755 struct xarray *xarray, loff_t start, size_t count) 756{ 757 BUG_ON(direction & ~1); 758 *i = (struct iov_iter) { 759 .iter_type = ITER_XARRAY, 760 .data_source = direction, 761 .xarray = xarray, 762 .xarray_start = start, 763 .count = count, 764 .iov_offset = 0 765 }; 766} 767EXPORT_SYMBOL(iov_iter_xarray); 768 769/** 770 * iov_iter_discard - Initialise an I/O iterator that discards data 771 * @i: The iterator to initialise. 772 * @direction: The direction of the transfer. 773 * @count: The size of the I/O buffer in bytes. 774 * 775 * Set up an I/O iterator that just discards everything that's written to it. 776 * It's only available as a READ iterator. 777 */ 778void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 779{ 780 BUG_ON(direction != READ); 781 *i = (struct iov_iter){ 782 .iter_type = ITER_DISCARD, 783 .data_source = false, 784 .count = count, 785 .iov_offset = 0 786 }; 787} 788EXPORT_SYMBOL(iov_iter_discard); 789 790static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, 791 unsigned len_mask) 792{ 793 const struct iovec *iov = iter_iov(i); 794 size_t size = i->count; 795 size_t skip = i->iov_offset; 796 797 do { 798 size_t len = iov->iov_len - skip; 799 800 if (len > size) 801 len = size; 802 if (len & len_mask) 803 return false; 804 if ((unsigned long)(iov->iov_base + skip) & addr_mask) 805 return false; 806 807 iov++; 808 size -= len; 809 skip = 0; 810 } while (size); 811 812 return true; 813} 814 815static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, 816 unsigned len_mask) 817{ 818 const struct bio_vec *bvec = i->bvec; 819 unsigned skip = i->iov_offset; 820 size_t size = i->count; 821 822 do { 823 size_t len = bvec->bv_len; 824 825 if (len > size) 826 len = size; 827 if (len & len_mask) 828 return false; 829 if ((unsigned long)(bvec->bv_offset + skip) & addr_mask) 830 return false; 831 832 bvec++; 833 size -= len; 834 skip = 0; 835 } while (size); 836 837 return true; 838} 839 840/** 841 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments 842 * are aligned to the parameters. 843 * 844 * @i: &struct iov_iter to restore 845 * @addr_mask: bit mask to check against the iov element's addresses 846 * @len_mask: bit mask to check against the iov element's lengths 847 * 848 * Return: false if any addresses or lengths intersect with the provided masks 849 */ 850bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 851 unsigned len_mask) 852{ 853 if (likely(iter_is_ubuf(i))) { 854 if (i->count & len_mask) 855 return false; 856 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) 857 return false; 858 return true; 859 } 860 861 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 862 return iov_iter_aligned_iovec(i, addr_mask, len_mask); 863 864 if (iov_iter_is_bvec(i)) 865 return iov_iter_aligned_bvec(i, addr_mask, len_mask); 866 867 /* With both xarray and folioq types, we're dealing with whole folios. */ 868 if (iov_iter_is_xarray(i)) { 869 if (i->count & len_mask) 870 return false; 871 if ((i->xarray_start + i->iov_offset) & addr_mask) 872 return false; 873 } 874 if (iov_iter_is_folioq(i)) { 875 if (i->count & len_mask) 876 return false; 877 if (i->iov_offset & addr_mask) 878 return false; 879 } 880 881 return true; 882} 883EXPORT_SYMBOL_GPL(iov_iter_is_aligned); 884 885static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 886{ 887 const struct iovec *iov = iter_iov(i); 888 unsigned long res = 0; 889 size_t size = i->count; 890 size_t skip = i->iov_offset; 891 892 do { 893 size_t len = iov->iov_len - skip; 894 if (len) { 895 res |= (unsigned long)iov->iov_base + skip; 896 if (len > size) 897 len = size; 898 res |= len; 899 size -= len; 900 } 901 iov++; 902 skip = 0; 903 } while (size); 904 return res; 905} 906 907static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 908{ 909 const struct bio_vec *bvec = i->bvec; 910 unsigned res = 0; 911 size_t size = i->count; 912 unsigned skip = i->iov_offset; 913 914 do { 915 size_t len = bvec->bv_len - skip; 916 res |= (unsigned long)bvec->bv_offset + skip; 917 if (len > size) 918 len = size; 919 res |= len; 920 bvec++; 921 size -= len; 922 skip = 0; 923 } while (size); 924 925 return res; 926} 927 928unsigned long iov_iter_alignment(const struct iov_iter *i) 929{ 930 if (likely(iter_is_ubuf(i))) { 931 size_t size = i->count; 932 if (size) 933 return ((unsigned long)i->ubuf + i->iov_offset) | size; 934 return 0; 935 } 936 937 /* iovec and kvec have identical layouts */ 938 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 939 return iov_iter_alignment_iovec(i); 940 941 if (iov_iter_is_bvec(i)) 942 return iov_iter_alignment_bvec(i); 943 944 /* With both xarray and folioq types, we're dealing with whole folios. */ 945 if (iov_iter_is_folioq(i)) 946 return i->iov_offset | i->count; 947 if (iov_iter_is_xarray(i)) 948 return (i->xarray_start + i->iov_offset) | i->count; 949 950 return 0; 951} 952EXPORT_SYMBOL(iov_iter_alignment); 953 954unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 955{ 956 unsigned long res = 0; 957 unsigned long v = 0; 958 size_t size = i->count; 959 unsigned k; 960 961 if (iter_is_ubuf(i)) 962 return 0; 963 964 if (WARN_ON(!iter_is_iovec(i))) 965 return ~0U; 966 967 for (k = 0; k < i->nr_segs; k++) { 968 const struct iovec *iov = iter_iov(i) + k; 969 if (iov->iov_len) { 970 unsigned long base = (unsigned long)iov->iov_base; 971 if (v) // if not the first one 972 res |= base | v; // this start | previous end 973 v = base + iov->iov_len; 974 if (size <= iov->iov_len) 975 break; 976 size -= iov->iov_len; 977 } 978 } 979 return res; 980} 981EXPORT_SYMBOL(iov_iter_gap_alignment); 982 983static int want_pages_array(struct page ***res, size_t size, 984 size_t start, unsigned int maxpages) 985{ 986 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); 987 988 if (count > maxpages) 989 count = maxpages; 990 WARN_ON(!count); // caller should've prevented that 991 if (!*res) { 992 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); 993 if (!*res) 994 return 0; 995 } 996 return count; 997} 998 999static ssize_t iter_folioq_get_pages(struct iov_iter *iter, 1000 struct page ***ppages, size_t maxsize, 1001 unsigned maxpages, size_t *_start_offset) 1002{ 1003 const struct folio_queue *folioq = iter->folioq; 1004 struct page **pages; 1005 unsigned int slot = iter->folioq_slot; 1006 size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset; 1007 1008 if (slot >= folioq_nr_slots(folioq)) { 1009 folioq = folioq->next; 1010 slot = 0; 1011 if (WARN_ON(iov_offset != 0)) 1012 return -EIO; 1013 } 1014 1015 maxpages = want_pages_array(ppages, maxsize, iov_offset & ~PAGE_MASK, maxpages); 1016 if (!maxpages) 1017 return -ENOMEM; 1018 *_start_offset = iov_offset & ~PAGE_MASK; 1019 pages = *ppages; 1020 1021 for (;;) { 1022 struct folio *folio = folioq_folio(folioq, slot); 1023 size_t offset = iov_offset, fsize = folioq_folio_size(folioq, slot); 1024 size_t part = PAGE_SIZE - offset % PAGE_SIZE; 1025 1026 if (offset < fsize) { 1027 part = umin(part, umin(maxsize - extracted, fsize - offset)); 1028 count -= part; 1029 iov_offset += part; 1030 extracted += part; 1031 1032 *pages = folio_page(folio, offset / PAGE_SIZE); 1033 get_page(*pages); 1034 pages++; 1035 maxpages--; 1036 } 1037 1038 if (maxpages == 0 || extracted >= maxsize) 1039 break; 1040 1041 if (iov_offset >= fsize) { 1042 iov_offset = 0; 1043 slot++; 1044 if (slot == folioq_nr_slots(folioq) && folioq->next) { 1045 folioq = folioq->next; 1046 slot = 0; 1047 } 1048 } 1049 } 1050 1051 iter->count = count; 1052 iter->iov_offset = iov_offset; 1053 iter->folioq = folioq; 1054 iter->folioq_slot = slot; 1055 return extracted; 1056} 1057 1058static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1059 pgoff_t index, unsigned int nr_pages) 1060{ 1061 XA_STATE(xas, xa, index); 1062 struct page *page; 1063 unsigned int ret = 0; 1064 1065 rcu_read_lock(); 1066 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1067 if (xas_retry(&xas, page)) 1068 continue; 1069 1070 /* Has the page moved or been split? */ 1071 if (unlikely(page != xas_reload(&xas))) { 1072 xas_reset(&xas); 1073 continue; 1074 } 1075 1076 pages[ret] = find_subpage(page, xas.xa_index); 1077 get_page(pages[ret]); 1078 if (++ret == nr_pages) 1079 break; 1080 } 1081 rcu_read_unlock(); 1082 return ret; 1083} 1084 1085static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1086 struct page ***pages, size_t maxsize, 1087 unsigned maxpages, size_t *_start_offset) 1088{ 1089 unsigned nr, offset, count; 1090 pgoff_t index; 1091 loff_t pos; 1092 1093 pos = i->xarray_start + i->iov_offset; 1094 index = pos >> PAGE_SHIFT; 1095 offset = pos & ~PAGE_MASK; 1096 *_start_offset = offset; 1097 1098 count = want_pages_array(pages, maxsize, offset, maxpages); 1099 if (!count) 1100 return -ENOMEM; 1101 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); 1102 if (nr == 0) 1103 return 0; 1104 1105 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1106 i->iov_offset += maxsize; 1107 i->count -= maxsize; 1108 return maxsize; 1109} 1110 1111/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ 1112static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) 1113{ 1114 size_t skip; 1115 long k; 1116 1117 if (iter_is_ubuf(i)) 1118 return (unsigned long)i->ubuf + i->iov_offset; 1119 1120 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1121 const struct iovec *iov = iter_iov(i) + k; 1122 size_t len = iov->iov_len - skip; 1123 1124 if (unlikely(!len)) 1125 continue; 1126 if (*size > len) 1127 *size = len; 1128 return (unsigned long)iov->iov_base + skip; 1129 } 1130 BUG(); // if it had been empty, we wouldn't get called 1131} 1132 1133/* must be done on non-empty ITER_BVEC one */ 1134static struct page *first_bvec_segment(const struct iov_iter *i, 1135 size_t *size, size_t *start) 1136{ 1137 struct page *page; 1138 size_t skip = i->iov_offset, len; 1139 1140 len = i->bvec->bv_len - skip; 1141 if (*size > len) 1142 *size = len; 1143 skip += i->bvec->bv_offset; 1144 page = i->bvec->bv_page + skip / PAGE_SIZE; 1145 *start = skip % PAGE_SIZE; 1146 return page; 1147} 1148 1149static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, 1150 struct page ***pages, size_t maxsize, 1151 unsigned int maxpages, size_t *start) 1152{ 1153 unsigned int n, gup_flags = 0; 1154 1155 if (maxsize > i->count) 1156 maxsize = i->count; 1157 if (!maxsize) 1158 return 0; 1159 if (maxsize > MAX_RW_COUNT) 1160 maxsize = MAX_RW_COUNT; 1161 1162 if (likely(user_backed_iter(i))) { 1163 unsigned long addr; 1164 int res; 1165 1166 if (iov_iter_rw(i) != WRITE) 1167 gup_flags |= FOLL_WRITE; 1168 if (i->nofault) 1169 gup_flags |= FOLL_NOFAULT; 1170 1171 addr = first_iovec_segment(i, &maxsize); 1172 *start = addr % PAGE_SIZE; 1173 addr &= PAGE_MASK; 1174 n = want_pages_array(pages, maxsize, *start, maxpages); 1175 if (!n) 1176 return -ENOMEM; 1177 res = get_user_pages_fast(addr, n, gup_flags, *pages); 1178 if (unlikely(res <= 0)) 1179 return res; 1180 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1181 iov_iter_advance(i, maxsize); 1182 return maxsize; 1183 } 1184 if (iov_iter_is_bvec(i)) { 1185 struct page **p; 1186 struct page *page; 1187 1188 page = first_bvec_segment(i, &maxsize, start); 1189 n = want_pages_array(pages, maxsize, *start, maxpages); 1190 if (!n) 1191 return -ENOMEM; 1192 p = *pages; 1193 for (int k = 0; k < n; k++) { 1194 struct folio *folio = page_folio(page); 1195 p[k] = page + k; 1196 if (!folio_test_slab(folio)) 1197 folio_get(folio); 1198 } 1199 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1200 i->count -= maxsize; 1201 i->iov_offset += maxsize; 1202 if (i->iov_offset == i->bvec->bv_len) { 1203 i->iov_offset = 0; 1204 i->bvec++; 1205 i->nr_segs--; 1206 } 1207 return maxsize; 1208 } 1209 if (iov_iter_is_folioq(i)) 1210 return iter_folioq_get_pages(i, pages, maxsize, maxpages, start); 1211 if (iov_iter_is_xarray(i)) 1212 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1213 return -EFAULT; 1214} 1215 1216ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, 1217 size_t maxsize, unsigned maxpages, size_t *start) 1218{ 1219 if (!maxpages) 1220 return 0; 1221 BUG_ON(!pages); 1222 1223 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); 1224} 1225EXPORT_SYMBOL(iov_iter_get_pages2); 1226 1227ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, 1228 struct page ***pages, size_t maxsize, size_t *start) 1229{ 1230 ssize_t len; 1231 1232 *pages = NULL; 1233 1234 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); 1235 if (len <= 0) { 1236 kvfree(*pages); 1237 *pages = NULL; 1238 } 1239 return len; 1240} 1241EXPORT_SYMBOL(iov_iter_get_pages_alloc2); 1242 1243static int iov_npages(const struct iov_iter *i, int maxpages) 1244{ 1245 size_t skip = i->iov_offset, size = i->count; 1246 const struct iovec *p; 1247 int npages = 0; 1248 1249 for (p = iter_iov(i); size; skip = 0, p++) { 1250 unsigned offs = offset_in_page(p->iov_base + skip); 1251 size_t len = min(p->iov_len - skip, size); 1252 1253 if (len) { 1254 size -= len; 1255 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1256 if (unlikely(npages > maxpages)) 1257 return maxpages; 1258 } 1259 } 1260 return npages; 1261} 1262 1263static int bvec_npages(const struct iov_iter *i, int maxpages) 1264{ 1265 size_t skip = i->iov_offset, size = i->count; 1266 const struct bio_vec *p; 1267 int npages = 0; 1268 1269 for (p = i->bvec; size; skip = 0, p++) { 1270 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1271 size_t len = min(p->bv_len - skip, size); 1272 1273 size -= len; 1274 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1275 if (unlikely(npages > maxpages)) 1276 return maxpages; 1277 } 1278 return npages; 1279} 1280 1281int iov_iter_npages(const struct iov_iter *i, int maxpages) 1282{ 1283 if (unlikely(!i->count)) 1284 return 0; 1285 if (likely(iter_is_ubuf(i))) { 1286 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); 1287 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); 1288 return min(npages, maxpages); 1289 } 1290 /* iovec and kvec have identical layouts */ 1291 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1292 return iov_npages(i, maxpages); 1293 if (iov_iter_is_bvec(i)) 1294 return bvec_npages(i, maxpages); 1295 if (iov_iter_is_folioq(i)) { 1296 unsigned offset = i->iov_offset % PAGE_SIZE; 1297 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1298 return min(npages, maxpages); 1299 } 1300 if (iov_iter_is_xarray(i)) { 1301 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1302 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1303 return min(npages, maxpages); 1304 } 1305 return 0; 1306} 1307EXPORT_SYMBOL(iov_iter_npages); 1308 1309const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1310{ 1311 *new = *old; 1312 if (iov_iter_is_bvec(new)) 1313 return new->bvec = kmemdup(new->bvec, 1314 new->nr_segs * sizeof(struct bio_vec), 1315 flags); 1316 else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) 1317 /* iovec and kvec have identical layout */ 1318 return new->__iov = kmemdup(new->__iov, 1319 new->nr_segs * sizeof(struct iovec), 1320 flags); 1321 return NULL; 1322} 1323EXPORT_SYMBOL(dup_iter); 1324 1325static __noclone int copy_compat_iovec_from_user(struct iovec *iov, 1326 const struct iovec __user *uvec, u32 nr_segs) 1327{ 1328 const struct compat_iovec __user *uiov = 1329 (const struct compat_iovec __user *)uvec; 1330 int ret = -EFAULT; 1331 u32 i; 1332 1333 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1334 return -EFAULT; 1335 1336 for (i = 0; i < nr_segs; i++) { 1337 compat_uptr_t buf; 1338 compat_ssize_t len; 1339 1340 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1341 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1342 1343 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1344 if (len < 0) { 1345 ret = -EINVAL; 1346 goto uaccess_end; 1347 } 1348 iov[i].iov_base = compat_ptr(buf); 1349 iov[i].iov_len = len; 1350 } 1351 1352 ret = 0; 1353uaccess_end: 1354 user_access_end(); 1355 return ret; 1356} 1357 1358static __noclone int copy_iovec_from_user(struct iovec *iov, 1359 const struct iovec __user *uiov, unsigned long nr_segs) 1360{ 1361 int ret = -EFAULT; 1362 1363 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1364 return -EFAULT; 1365 1366 do { 1367 void __user *buf; 1368 ssize_t len; 1369 1370 unsafe_get_user(len, &uiov->iov_len, uaccess_end); 1371 unsafe_get_user(buf, &uiov->iov_base, uaccess_end); 1372 1373 /* check for size_t not fitting in ssize_t .. */ 1374 if (unlikely(len < 0)) { 1375 ret = -EINVAL; 1376 goto uaccess_end; 1377 } 1378 iov->iov_base = buf; 1379 iov->iov_len = len; 1380 1381 uiov++; iov++; 1382 } while (--nr_segs); 1383 1384 ret = 0; 1385uaccess_end: 1386 user_access_end(); 1387 return ret; 1388} 1389 1390struct iovec *iovec_from_user(const struct iovec __user *uvec, 1391 unsigned long nr_segs, unsigned long fast_segs, 1392 struct iovec *fast_iov, bool compat) 1393{ 1394 struct iovec *iov = fast_iov; 1395 int ret; 1396 1397 /* 1398 * SuS says "The readv() function *may* fail if the iovcnt argument was 1399 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1400 * traditionally returned zero for zero segments, so... 1401 */ 1402 if (nr_segs == 0) 1403 return iov; 1404 if (nr_segs > UIO_MAXIOV) 1405 return ERR_PTR(-EINVAL); 1406 if (nr_segs > fast_segs) { 1407 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1408 if (!iov) 1409 return ERR_PTR(-ENOMEM); 1410 } 1411 1412 if (unlikely(compat)) 1413 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1414 else 1415 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1416 if (ret) { 1417 if (iov != fast_iov) 1418 kfree(iov); 1419 return ERR_PTR(ret); 1420 } 1421 1422 return iov; 1423} 1424 1425/* 1426 * Single segment iovec supplied by the user, import it as ITER_UBUF. 1427 */ 1428static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec, 1429 struct iovec **iovp, struct iov_iter *i, 1430 bool compat) 1431{ 1432 struct iovec *iov = *iovp; 1433 ssize_t ret; 1434 1435 *iovp = NULL; 1436 1437 if (compat) 1438 ret = copy_compat_iovec_from_user(iov, uvec, 1); 1439 else 1440 ret = copy_iovec_from_user(iov, uvec, 1); 1441 if (unlikely(ret)) 1442 return ret; 1443 1444 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); 1445 if (unlikely(ret)) 1446 return ret; 1447 return i->count; 1448} 1449 1450ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1451 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1452 struct iov_iter *i, bool compat) 1453{ 1454 ssize_t total_len = 0; 1455 unsigned long seg; 1456 struct iovec *iov; 1457 1458 if (nr_segs == 1) 1459 return __import_iovec_ubuf(type, uvec, iovp, i, compat); 1460 1461 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1462 if (IS_ERR(iov)) { 1463 *iovp = NULL; 1464 return PTR_ERR(iov); 1465 } 1466 1467 /* 1468 * According to the Single Unix Specification we should return EINVAL if 1469 * an element length is < 0 when cast to ssize_t or if the total length 1470 * would overflow the ssize_t return value of the system call. 1471 * 1472 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1473 * overflow case. 1474 */ 1475 for (seg = 0; seg < nr_segs; seg++) { 1476 ssize_t len = (ssize_t)iov[seg].iov_len; 1477 1478 if (!access_ok(iov[seg].iov_base, len)) { 1479 if (iov != *iovp) 1480 kfree(iov); 1481 *iovp = NULL; 1482 return -EFAULT; 1483 } 1484 1485 if (len > MAX_RW_COUNT - total_len) { 1486 len = MAX_RW_COUNT - total_len; 1487 iov[seg].iov_len = len; 1488 } 1489 total_len += len; 1490 } 1491 1492 iov_iter_init(i, type, iov, nr_segs, total_len); 1493 if (iov == *iovp) 1494 *iovp = NULL; 1495 else 1496 *iovp = iov; 1497 return total_len; 1498} 1499 1500/** 1501 * import_iovec() - Copy an array of &struct iovec from userspace 1502 * into the kernel, check that it is valid, and initialize a new 1503 * &struct iov_iter iterator to access it. 1504 * 1505 * @type: One of %READ or %WRITE. 1506 * @uvec: Pointer to the userspace array. 1507 * @nr_segs: Number of elements in userspace array. 1508 * @fast_segs: Number of elements in @iov. 1509 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1510 * on-stack) kernel array. 1511 * @i: Pointer to iterator that will be initialized on success. 1512 * 1513 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1514 * then this function places %NULL in *@iov on return. Otherwise, a new 1515 * array will be allocated and the result placed in *@iov. This means that 1516 * the caller may call kfree() on *@iov regardless of whether the small 1517 * on-stack array was used or not (and regardless of whether this function 1518 * returns an error or not). 1519 * 1520 * Return: Negative error code on error, bytes imported on success 1521 */ 1522ssize_t import_iovec(int type, const struct iovec __user *uvec, 1523 unsigned nr_segs, unsigned fast_segs, 1524 struct iovec **iovp, struct iov_iter *i) 1525{ 1526 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1527 in_compat_syscall()); 1528} 1529EXPORT_SYMBOL(import_iovec); 1530 1531int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) 1532{ 1533 if (len > MAX_RW_COUNT) 1534 len = MAX_RW_COUNT; 1535 if (unlikely(!access_ok(buf, len))) 1536 return -EFAULT; 1537 1538 iov_iter_ubuf(i, rw, buf, len); 1539 return 0; 1540} 1541EXPORT_SYMBOL_GPL(import_ubuf); 1542 1543/** 1544 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1545 * iov_iter_save_state() was called. 1546 * 1547 * @i: &struct iov_iter to restore 1548 * @state: state to restore from 1549 * 1550 * Used after iov_iter_save_state() to bring restore @i, if operations may 1551 * have advanced it. 1552 * 1553 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1554 */ 1555void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1556{ 1557 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) && 1558 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i)) 1559 return; 1560 i->iov_offset = state->iov_offset; 1561 i->count = state->count; 1562 if (iter_is_ubuf(i)) 1563 return; 1564 /* 1565 * For the *vec iters, nr_segs + iov is constant - if we increment 1566 * the vec, then we also decrement the nr_segs count. Hence we don't 1567 * need to track both of these, just one is enough and we can deduct 1568 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 1569 * size, so we can just increment the iov pointer as they are unionzed. 1570 * ITER_BVEC _may_ be the same size on some archs, but on others it is 1571 * not. Be safe and handle it separately. 1572 */ 1573 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 1574 if (iov_iter_is_bvec(i)) 1575 i->bvec -= state->nr_segs - i->nr_segs; 1576 else 1577 i->__iov -= state->nr_segs - i->nr_segs; 1578 i->nr_segs = state->nr_segs; 1579} 1580 1581/* 1582 * Extract a list of contiguous pages from an ITER_FOLIOQ iterator. This does 1583 * not get references on the pages, nor does it get a pin on them. 1584 */ 1585static ssize_t iov_iter_extract_folioq_pages(struct iov_iter *i, 1586 struct page ***pages, size_t maxsize, 1587 unsigned int maxpages, 1588 iov_iter_extraction_t extraction_flags, 1589 size_t *offset0) 1590{ 1591 const struct folio_queue *folioq = i->folioq; 1592 struct page **p; 1593 unsigned int nr = 0; 1594 size_t extracted = 0, offset, slot = i->folioq_slot; 1595 1596 if (slot >= folioq_nr_slots(folioq)) { 1597 folioq = folioq->next; 1598 slot = 0; 1599 if (WARN_ON(i->iov_offset != 0)) 1600 return -EIO; 1601 } 1602 1603 offset = i->iov_offset & ~PAGE_MASK; 1604 *offset0 = offset; 1605 1606 maxpages = want_pages_array(pages, maxsize, offset, maxpages); 1607 if (!maxpages) 1608 return -ENOMEM; 1609 p = *pages; 1610 1611 for (;;) { 1612 struct folio *folio = folioq_folio(folioq, slot); 1613 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot); 1614 size_t part = PAGE_SIZE - offset % PAGE_SIZE; 1615 1616 if (offset < fsize) { 1617 part = umin(part, umin(maxsize - extracted, fsize - offset)); 1618 i->count -= part; 1619 i->iov_offset += part; 1620 extracted += part; 1621 1622 p[nr++] = folio_page(folio, offset / PAGE_SIZE); 1623 } 1624 1625 if (nr >= maxpages || extracted >= maxsize) 1626 break; 1627 1628 if (i->iov_offset >= fsize) { 1629 i->iov_offset = 0; 1630 slot++; 1631 if (slot == folioq_nr_slots(folioq) && folioq->next) { 1632 folioq = folioq->next; 1633 slot = 0; 1634 } 1635 } 1636 } 1637 1638 i->folioq = folioq; 1639 i->folioq_slot = slot; 1640 return extracted; 1641} 1642 1643/* 1644 * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not 1645 * get references on the pages, nor does it get a pin on them. 1646 */ 1647static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, 1648 struct page ***pages, size_t maxsize, 1649 unsigned int maxpages, 1650 iov_iter_extraction_t extraction_flags, 1651 size_t *offset0) 1652{ 1653 struct page *page, **p; 1654 unsigned int nr = 0, offset; 1655 loff_t pos = i->xarray_start + i->iov_offset; 1656 pgoff_t index = pos >> PAGE_SHIFT; 1657 XA_STATE(xas, i->xarray, index); 1658 1659 offset = pos & ~PAGE_MASK; 1660 *offset0 = offset; 1661 1662 maxpages = want_pages_array(pages, maxsize, offset, maxpages); 1663 if (!maxpages) 1664 return -ENOMEM; 1665 p = *pages; 1666 1667 rcu_read_lock(); 1668 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1669 if (xas_retry(&xas, page)) 1670 continue; 1671 1672 /* Has the page moved or been split? */ 1673 if (unlikely(page != xas_reload(&xas))) { 1674 xas_reset(&xas); 1675 continue; 1676 } 1677 1678 p[nr++] = find_subpage(page, xas.xa_index); 1679 if (nr == maxpages) 1680 break; 1681 } 1682 rcu_read_unlock(); 1683 1684 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1685 iov_iter_advance(i, maxsize); 1686 return maxsize; 1687} 1688 1689/* 1690 * Extract a list of virtually contiguous pages from an ITER_BVEC iterator. 1691 * This does not get references on the pages, nor does it get a pin on them. 1692 */ 1693static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, 1694 struct page ***pages, size_t maxsize, 1695 unsigned int maxpages, 1696 iov_iter_extraction_t extraction_flags, 1697 size_t *offset0) 1698{ 1699 size_t skip = i->iov_offset, size = 0; 1700 struct bvec_iter bi; 1701 int k = 0; 1702 1703 if (i->nr_segs == 0) 1704 return 0; 1705 1706 if (i->iov_offset == i->bvec->bv_len) { 1707 i->iov_offset = 0; 1708 i->nr_segs--; 1709 i->bvec++; 1710 skip = 0; 1711 } 1712 bi.bi_idx = 0; 1713 bi.bi_size = maxsize; 1714 bi.bi_bvec_done = skip; 1715 1716 maxpages = want_pages_array(pages, maxsize, skip, maxpages); 1717 1718 while (bi.bi_size && bi.bi_idx < i->nr_segs) { 1719 struct bio_vec bv = bvec_iter_bvec(i->bvec, bi); 1720 1721 /* 1722 * The iov_iter_extract_pages interface only allows an offset 1723 * into the first page. Break out of the loop if we see an 1724 * offset into subsequent pages, the caller will have to call 1725 * iov_iter_extract_pages again for the reminder. 1726 */ 1727 if (k) { 1728 if (bv.bv_offset) 1729 break; 1730 } else { 1731 *offset0 = bv.bv_offset; 1732 } 1733 1734 (*pages)[k++] = bv.bv_page; 1735 size += bv.bv_len; 1736 1737 if (k >= maxpages) 1738 break; 1739 1740 /* 1741 * We are done when the end of the bvec doesn't align to a page 1742 * boundary as that would create a hole in the returned space. 1743 * The caller will handle this with another call to 1744 * iov_iter_extract_pages. 1745 */ 1746 if (bv.bv_offset + bv.bv_len != PAGE_SIZE) 1747 break; 1748 1749 bvec_iter_advance_single(i->bvec, &bi, bv.bv_len); 1750 } 1751 1752 iov_iter_advance(i, size); 1753 return size; 1754} 1755 1756/* 1757 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator. 1758 * This does not get references on the pages, nor does it get a pin on them. 1759 */ 1760static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i, 1761 struct page ***pages, size_t maxsize, 1762 unsigned int maxpages, 1763 iov_iter_extraction_t extraction_flags, 1764 size_t *offset0) 1765{ 1766 struct page **p, *page; 1767 const void *kaddr; 1768 size_t skip = i->iov_offset, offset, len, size; 1769 int k; 1770 1771 for (;;) { 1772 if (i->nr_segs == 0) 1773 return 0; 1774 size = min(maxsize, i->kvec->iov_len - skip); 1775 if (size) 1776 break; 1777 i->iov_offset = 0; 1778 i->nr_segs--; 1779 i->kvec++; 1780 skip = 0; 1781 } 1782 1783 kaddr = i->kvec->iov_base + skip; 1784 offset = (unsigned long)kaddr & ~PAGE_MASK; 1785 *offset0 = offset; 1786 1787 maxpages = want_pages_array(pages, size, offset, maxpages); 1788 if (!maxpages) 1789 return -ENOMEM; 1790 p = *pages; 1791 1792 kaddr -= offset; 1793 len = offset + size; 1794 for (k = 0; k < maxpages; k++) { 1795 size_t seg = min_t(size_t, len, PAGE_SIZE); 1796 1797 if (is_vmalloc_or_module_addr(kaddr)) 1798 page = vmalloc_to_page(kaddr); 1799 else 1800 page = virt_to_page(kaddr); 1801 1802 p[k] = page; 1803 len -= seg; 1804 kaddr += PAGE_SIZE; 1805 } 1806 1807 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); 1808 iov_iter_advance(i, size); 1809 return size; 1810} 1811 1812/* 1813 * Extract a list of contiguous pages from a user iterator and get a pin on 1814 * each of them. This should only be used if the iterator is user-backed 1815 * (IOBUF/UBUF). 1816 * 1817 * It does not get refs on the pages, but the pages must be unpinned by the 1818 * caller once the transfer is complete. 1819 * 1820 * This is safe to be used where background IO/DMA *is* going to be modifying 1821 * the buffer; using a pin rather than a ref makes forces fork() to give the 1822 * child a copy of the page. 1823 */ 1824static ssize_t iov_iter_extract_user_pages(struct iov_iter *i, 1825 struct page ***pages, 1826 size_t maxsize, 1827 unsigned int maxpages, 1828 iov_iter_extraction_t extraction_flags, 1829 size_t *offset0) 1830{ 1831 unsigned long addr; 1832 unsigned int gup_flags = 0; 1833 size_t offset; 1834 int res; 1835 1836 if (i->data_source == ITER_DEST) 1837 gup_flags |= FOLL_WRITE; 1838 if (extraction_flags & ITER_ALLOW_P2PDMA) 1839 gup_flags |= FOLL_PCI_P2PDMA; 1840 if (i->nofault) 1841 gup_flags |= FOLL_NOFAULT; 1842 1843 addr = first_iovec_segment(i, &maxsize); 1844 *offset0 = offset = addr % PAGE_SIZE; 1845 addr &= PAGE_MASK; 1846 maxpages = want_pages_array(pages, maxsize, offset, maxpages); 1847 if (!maxpages) 1848 return -ENOMEM; 1849 res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages); 1850 if (unlikely(res <= 0)) 1851 return res; 1852 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); 1853 iov_iter_advance(i, maxsize); 1854 return maxsize; 1855} 1856 1857/** 1858 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator 1859 * @i: The iterator to extract from 1860 * @pages: Where to return the list of pages 1861 * @maxsize: The maximum amount of iterator to extract 1862 * @maxpages: The maximum size of the list of pages 1863 * @extraction_flags: Flags to qualify request 1864 * @offset0: Where to return the starting offset into (*@pages)[0] 1865 * 1866 * Extract a list of contiguous pages from the current point of the iterator, 1867 * advancing the iterator. The maximum number of pages and the maximum amount 1868 * of page contents can be set. 1869 * 1870 * If *@pages is NULL, a page list will be allocated to the required size and 1871 * *@pages will be set to its base. If *@pages is not NULL, it will be assumed 1872 * that the caller allocated a page list at least @maxpages in size and this 1873 * will be filled in. 1874 * 1875 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA 1876 * be allowed on the pages extracted. 1877 * 1878 * The iov_iter_extract_will_pin() function can be used to query how cleanup 1879 * should be performed. 1880 * 1881 * Extra refs or pins on the pages may be obtained as follows: 1882 * 1883 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be 1884 * added to the pages, but refs will not be taken. 1885 * iov_iter_extract_will_pin() will return true. 1886 * 1887 * (*) If the iterator is ITER_KVEC, ITER_BVEC, ITER_FOLIOQ or ITER_XARRAY, the 1888 * pages are merely listed; no extra refs or pins are obtained. 1889 * iov_iter_extract_will_pin() will return 0. 1890 * 1891 * Note also: 1892 * 1893 * (*) Use with ITER_DISCARD is not supported as that has no content. 1894 * 1895 * On success, the function sets *@pages to the new pagelist, if allocated, and 1896 * sets *offset0 to the offset into the first page. 1897 * 1898 * It may also return -ENOMEM and -EFAULT. 1899 */ 1900ssize_t iov_iter_extract_pages(struct iov_iter *i, 1901 struct page ***pages, 1902 size_t maxsize, 1903 unsigned int maxpages, 1904 iov_iter_extraction_t extraction_flags, 1905 size_t *offset0) 1906{ 1907 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); 1908 if (!maxsize) 1909 return 0; 1910 1911 if (likely(user_backed_iter(i))) 1912 return iov_iter_extract_user_pages(i, pages, maxsize, 1913 maxpages, extraction_flags, 1914 offset0); 1915 if (iov_iter_is_kvec(i)) 1916 return iov_iter_extract_kvec_pages(i, pages, maxsize, 1917 maxpages, extraction_flags, 1918 offset0); 1919 if (iov_iter_is_bvec(i)) 1920 return iov_iter_extract_bvec_pages(i, pages, maxsize, 1921 maxpages, extraction_flags, 1922 offset0); 1923 if (iov_iter_is_folioq(i)) 1924 return iov_iter_extract_folioq_pages(i, pages, maxsize, 1925 maxpages, extraction_flags, 1926 offset0); 1927 if (iov_iter_is_xarray(i)) 1928 return iov_iter_extract_xarray_pages(i, pages, maxsize, 1929 maxpages, extraction_flags, 1930 offset0); 1931 return -EFAULT; 1932} 1933EXPORT_SYMBOL_GPL(iov_iter_extract_pages);