Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iov_iter: Use accessor function

Use accessor functions to access an iterator's type and direction. This
allows for the possibility of using some other method of determining the
type of iterator than if-chains with bitwise-AND conditions.

Signed-off-by: David Howells <dhowells@redhat.com>

+87 -60
+1 -1
block/bio.c
··· 1255 1255 /* 1256 1256 * success 1257 1257 */ 1258 - if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || 1258 + if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) || 1259 1259 (map_data && map_data->from_user)) { 1260 1260 ret = bio_copy_from_iter(bio, iter); 1261 1261 if (ret)
+1 -1
fs/block_dev.c
··· 349 349 350 350 dio->size = 0; 351 351 dio->multi_bio = false; 352 - dio->should_dirty = is_read && (iter->type == ITER_IOVEC); 352 + dio->should_dirty = is_read && iter_is_iovec(iter); 353 353 354 354 blk_start_plug(&plug); 355 355 for (;;) {
+1 -1
fs/ceph/file.c
··· 658 658 if (ret < 0) 659 659 return ret; 660 660 661 - if (unlikely(to->type & ITER_PIPE)) { 661 + if (unlikely(iov_iter_is_pipe(to))) { 662 662 size_t page_off; 663 663 ret = iov_iter_get_pages_alloc(to, &pages, len, 664 664 &page_off);
+2 -2
fs/cifs/file.c
··· 2990 2990 size_t copy = min_t(size_t, remaining, PAGE_SIZE); 2991 2991 size_t written; 2992 2992 2993 - if (unlikely(iter->type & ITER_PIPE)) { 2993 + if (unlikely(iov_iter_is_pipe(iter))) { 2994 2994 void *addr = kmap_atomic(page); 2995 2995 2996 2996 written = copy_to_iter(addr, copy, iter); ··· 3302 3302 if (!is_sync_kiocb(iocb)) 3303 3303 ctx->iocb = iocb; 3304 3304 3305 - if (to->type == ITER_IOVEC) 3305 + if (iter_is_iovec(to)) 3306 3306 ctx->should_dirty = true; 3307 3307 3308 3308 rc = setup_aio_ctx_iter(ctx, to, READ);
+1 -1
fs/cifs/misc.c
··· 786 786 struct page **pages = NULL; 787 787 struct bio_vec *bv = NULL; 788 788 789 - if (iter->type & ITER_KVEC) { 789 + if (iov_iter_is_kvec(iter)) { 790 790 memcpy(&ctx->iter, iter, sizeof(struct iov_iter)); 791 791 ctx->len = count; 792 792 iov_iter_advance(iter, count);
+13 -4
fs/cifs/smbdirect.c
··· 2054 2054 2055 2055 info->smbd_recv_pending++; 2056 2056 2057 - switch (msg->msg_iter.type) { 2058 - case READ | ITER_KVEC: 2057 + if (iov_iter_rw(&msg->msg_iter) == WRITE) { 2058 + /* It's a bug in upper layer to get there */ 2059 + cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n", 2060 + iov_iter_rw(&msg->msg_iter)); 2061 + rc = -EINVAL; 2062 + goto out; 2063 + } 2064 + 2065 + switch (iov_iter_type(&msg->msg_iter)) { 2066 + case ITER_KVEC: 2059 2067 buf = msg->msg_iter.kvec->iov_base; 2060 2068 to_read = msg->msg_iter.kvec->iov_len; 2061 2069 rc = smbd_recv_buf(info, buf, to_read); 2062 2070 break; 2063 2071 2064 - case READ | ITER_BVEC: 2072 + case ITER_BVEC: 2065 2073 page = msg->msg_iter.bvec->bv_page; 2066 2074 page_offset = msg->msg_iter.bvec->bv_offset; 2067 2075 to_read = msg->msg_iter.bvec->bv_len; ··· 2079 2071 default: 2080 2072 /* It's a bug in upper layer to get there */ 2081 2073 cifs_dbg(VFS, "CIFS: invalid msg type %d\n", 2082 - msg->msg_iter.type); 2074 + iov_iter_type(&msg->msg_iter)); 2083 2075 rc = -EINVAL; 2084 2076 } 2085 2077 2078 + out: 2086 2079 info->smbd_recv_pending--; 2087 2080 wake_up(&info->wait_smbd_recv_pending); 2088 2081
+1 -1
fs/direct-io.c
··· 1313 1313 spin_lock_init(&dio->bio_lock); 1314 1314 dio->refcount = 1; 1315 1315 1316 - dio->should_dirty = (iter->type == ITER_IOVEC); 1316 + dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ; 1317 1317 sdio.iter = iter; 1318 1318 sdio.final_block_in_request = end >> blkbits; 1319 1319
+1 -1
fs/fuse/file.c
··· 1271 1271 ssize_t ret = 0; 1272 1272 1273 1273 /* Special case for kernel I/O: can copy directly into the buffer */ 1274 - if (ii->type & ITER_KVEC) { 1274 + if (iov_iter_is_kvec(ii)) { 1275 1275 unsigned long user_addr = fuse_get_user_addr(ii); 1276 1276 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1277 1277
+1 -1
fs/iomap.c
··· 1795 1795 if (pos >= dio->i_size) 1796 1796 goto out_free_dio; 1797 1797 1798 - if (iter->type == ITER_IOVEC) 1798 + if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ) 1799 1799 dio->flags |= IOMAP_DIO_DIRTY; 1800 1800 } else { 1801 1801 flags |= IOMAP_WRITE;
+33 -15
include/linux/uio.h
··· 21 21 size_t iov_len; 22 22 }; 23 23 24 - enum { 24 + enum iter_type { 25 25 ITER_IOVEC = 0, 26 26 ITER_KVEC = 2, 27 27 ITER_BVEC = 4, ··· 46 46 }; 47 47 }; 48 48 }; 49 + 50 + static inline enum iter_type iov_iter_type(const struct iov_iter *i) 51 + { 52 + return i->type & ~(READ | WRITE); 53 + } 54 + 55 + static inline bool iter_is_iovec(const struct iov_iter *i) 56 + { 57 + return iov_iter_type(i) == ITER_IOVEC; 58 + } 59 + 60 + static inline bool iov_iter_is_kvec(const struct iov_iter *i) 61 + { 62 + return iov_iter_type(i) == ITER_KVEC; 63 + } 64 + 65 + static inline bool iov_iter_is_bvec(const struct iov_iter *i) 66 + { 67 + return iov_iter_type(i) == ITER_BVEC; 68 + } 69 + 70 + static inline bool iov_iter_is_pipe(const struct iov_iter *i) 71 + { 72 + return iov_iter_type(i) == ITER_PIPE; 73 + } 74 + 75 + static inline unsigned char iov_iter_rw(const struct iov_iter *i) 76 + { 77 + return i->type & (READ | WRITE); 78 + } 49 79 50 80 /* 51 81 * Total number of bytes covered by an iovec. ··· 104 74 } 105 75 106 76 #define iov_for_each(iov, iter, start) \ 107 - if (!((start).type & (ITER_BVEC | ITER_PIPE))) \ 77 + if (iov_iter_type(start) == ITER_IOVEC || \ 78 + iov_iter_type(start) == ITER_KVEC) \ 108 79 for (iter = (start); \ 109 80 (iter).count && \ 110 81 ((iov = iov_iter_iovec(&(iter))), 1); \ ··· 232 201 { 233 202 return i->count; 234 203 } 235 - 236 - static inline bool iter_is_iovec(const struct iov_iter *i) 237 - { 238 - return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE)); 239 - } 240 - 241 - /* 242 - * Get one of READ or WRITE out of iter->type without any other flags OR'd in 243 - * with it. 244 - * 245 - * The ?: is just for type safety. 246 - */ 247 - #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE)) 248 204 249 205 /* 250 206 * Cap the iov_iter by given limit; note that the second argument is
+28 -28
lib/iov_iter.c
··· 558 558 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 559 559 { 560 560 const char *from = addr; 561 - if (unlikely(i->type & ITER_PIPE)) 561 + if (unlikely(iov_iter_is_pipe(i))) 562 562 return copy_pipe_to_iter(addr, bytes, i); 563 563 if (iter_is_iovec(i)) 564 564 might_fault(); ··· 658 658 const char *from = addr; 659 659 unsigned long rem, curr_addr, s_addr = (unsigned long) addr; 660 660 661 - if (unlikely(i->type & ITER_PIPE)) 661 + if (unlikely(iov_iter_is_pipe(i))) 662 662 return copy_pipe_to_iter_mcsafe(addr, bytes, i); 663 663 if (iter_is_iovec(i)) 664 664 might_fault(); ··· 692 692 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 693 693 { 694 694 char *to = addr; 695 - if (unlikely(i->type & ITER_PIPE)) { 695 + if (unlikely(iov_iter_is_pipe(i))) { 696 696 WARN_ON(1); 697 697 return 0; 698 698 } ··· 712 712 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 713 713 { 714 714 char *to = addr; 715 - if (unlikely(i->type & ITER_PIPE)) { 715 + if (unlikely(iov_iter_is_pipe(i))) { 716 716 WARN_ON(1); 717 717 return false; 718 718 } ··· 739 739 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 740 740 { 741 741 char *to = addr; 742 - if (unlikely(i->type & ITER_PIPE)) { 742 + if (unlikely(iov_iter_is_pipe(i))) { 743 743 WARN_ON(1); 744 744 return 0; 745 745 } ··· 773 773 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 774 774 { 775 775 char *to = addr; 776 - if (unlikely(i->type & ITER_PIPE)) { 776 + if (unlikely(iov_iter_is_pipe(i))) { 777 777 WARN_ON(1); 778 778 return 0; 779 779 } ··· 794 794 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 795 795 { 796 796 char *to = addr; 797 - if (unlikely(i->type & ITER_PIPE)) { 797 + if (unlikely(iov_iter_is_pipe(i))) { 798 798 WARN_ON(1); 799 799 return false; 800 800 } ··· 836 836 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 837 837 kunmap_atomic(kaddr); 838 838 return wanted; 839 - } else if (likely(!(i->type & ITER_PIPE))) 839 + } else if (likely(!iov_iter_is_pipe(i))) 840 840 return copy_page_to_iter_iovec(page, offset, bytes, i); 841 841 else 842 842 return copy_page_to_iter_pipe(page, offset, bytes, i); ··· 848 848 { 849 849 if (unlikely(!page_copy_sane(page, offset, bytes))) 850 850 return 0; 851 - if (unlikely(i->type & ITER_PIPE)) { 851 + if (unlikely(iov_iter_is_pipe(i))) { 852 852 WARN_ON(1); 853 853 return 0; 854 854 } ··· 888 888 889 889 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 890 890 { 891 - if (unlikely(i->type & ITER_PIPE)) 891 + if (unlikely(iov_iter_is_pipe(i))) 892 892 return pipe_zero(bytes, i); 893 893 iterate_and_advance(i, bytes, v, 894 894 clear_user(v.iov_base, v.iov_len), ··· 908 908 kunmap_atomic(kaddr); 909 909 return 0; 910 910 } 911 - if (unlikely(i->type & ITER_PIPE)) { 911 + if (unlikely(iov_iter_is_pipe(i))) { 912 912 kunmap_atomic(kaddr); 913 913 WARN_ON(1); 914 914 return 0; ··· 972 972 973 973 void iov_iter_advance(struct iov_iter *i, size_t size) 974 974 { 975 - if (unlikely(i->type & ITER_PIPE)) { 975 + if (unlikely(iov_iter_is_pipe(i))) { 976 976 pipe_advance(i, size); 977 977 return; 978 978 } ··· 987 987 if (WARN_ON(unroll > MAX_RW_COUNT)) 988 988 return; 989 989 i->count += unroll; 990 - if (unlikely(i->type & ITER_PIPE)) { 990 + if (unlikely(iov_iter_is_pipe(i))) { 991 991 struct pipe_inode_info *pipe = i->pipe; 992 992 int idx = i->idx; 993 993 size_t off = i->iov_offset; ··· 1016 1016 return; 1017 1017 } 1018 1018 unroll -= i->iov_offset; 1019 - if (i->type & ITER_BVEC) { 1019 + if (iov_iter_is_bvec(i)) { 1020 1020 const struct bio_vec *bvec = i->bvec; 1021 1021 while (1) { 1022 1022 size_t n = (--bvec)->bv_len; ··· 1049 1049 */ 1050 1050 size_t iov_iter_single_seg_count(const struct iov_iter *i) 1051 1051 { 1052 - if (unlikely(i->type & ITER_PIPE)) 1052 + if (unlikely(iov_iter_is_pipe(i))) 1053 1053 return i->count; // it is a silly place, anyway 1054 1054 if (i->nr_segs == 1) 1055 1055 return i->count; 1056 - else if (i->type & ITER_BVEC) 1056 + else if (iov_iter_is_bvec(i)) 1057 1057 return min(i->count, i->bvec->bv_len - i->iov_offset); 1058 1058 else 1059 1059 return min(i->count, i->iov->iov_len - i->iov_offset); ··· 1106 1106 unsigned long res = 0; 1107 1107 size_t size = i->count; 1108 1108 1109 - if (unlikely(i->type & ITER_PIPE)) { 1109 + if (unlikely(iov_iter_is_pipe(i))) { 1110 1110 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) 1111 1111 return size | i->iov_offset; 1112 1112 return size; ··· 1125 1125 unsigned long res = 0; 1126 1126 size_t size = i->count; 1127 1127 1128 - if (unlikely(i->type & ITER_PIPE)) { 1128 + if (unlikely(iov_iter_is_pipe(i))) { 1129 1129 WARN_ON(1); 1130 1130 return ~0U; 1131 1131 } ··· 1193 1193 if (maxsize > i->count) 1194 1194 maxsize = i->count; 1195 1195 1196 - if (unlikely(i->type & ITER_PIPE)) 1196 + if (unlikely(iov_iter_is_pipe(i))) 1197 1197 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1198 1198 iterate_all_kinds(i, maxsize, v, ({ 1199 1199 unsigned long addr = (unsigned long)v.iov_base; ··· 1205 1205 len = maxpages * PAGE_SIZE; 1206 1206 addr &= ~(PAGE_SIZE - 1); 1207 1207 n = DIV_ROUND_UP(len, PAGE_SIZE); 1208 - res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); 1208 + res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages); 1209 1209 if (unlikely(res < 0)) 1210 1210 return res; 1211 1211 return (res == n ? len : res * PAGE_SIZE) - *start; ··· 1270 1270 if (maxsize > i->count) 1271 1271 maxsize = i->count; 1272 1272 1273 - if (unlikely(i->type & ITER_PIPE)) 1273 + if (unlikely(iov_iter_is_pipe(i))) 1274 1274 return pipe_get_pages_alloc(i, pages, maxsize, start); 1275 1275 iterate_all_kinds(i, maxsize, v, ({ 1276 1276 unsigned long addr = (unsigned long)v.iov_base; ··· 1283 1283 p = get_pages_array(n); 1284 1284 if (!p) 1285 1285 return -ENOMEM; 1286 - res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); 1286 + res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p); 1287 1287 if (unlikely(res < 0)) { 1288 1288 kvfree(p); 1289 1289 return res; ··· 1313 1313 __wsum sum, next; 1314 1314 size_t off = 0; 1315 1315 sum = *csum; 1316 - if (unlikely(i->type & ITER_PIPE)) { 1316 + if (unlikely(iov_iter_is_pipe(i))) { 1317 1317 WARN_ON(1); 1318 1318 return 0; 1319 1319 } ··· 1355 1355 __wsum sum, next; 1356 1356 size_t off = 0; 1357 1357 sum = *csum; 1358 - if (unlikely(i->type & ITER_PIPE)) { 1358 + if (unlikely(iov_iter_is_pipe(i))) { 1359 1359 WARN_ON(1); 1360 1360 return false; 1361 1361 } ··· 1400 1400 __wsum sum, next; 1401 1401 size_t off = 0; 1402 1402 sum = *csum; 1403 - if (unlikely(i->type & ITER_PIPE)) { 1403 + if (unlikely(iov_iter_is_pipe(i))) { 1404 1404 WARN_ON(1); /* for now */ 1405 1405 return 0; 1406 1406 } ··· 1443 1443 if (!size) 1444 1444 return 0; 1445 1445 1446 - if (unlikely(i->type & ITER_PIPE)) { 1446 + if (unlikely(iov_iter_is_pipe(i))) { 1447 1447 struct pipe_inode_info *pipe = i->pipe; 1448 1448 size_t off; 1449 1449 int idx; ··· 1481 1481 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1482 1482 { 1483 1483 *new = *old; 1484 - if (unlikely(new->type & ITER_PIPE)) { 1484 + if (unlikely(iov_iter_is_pipe(new))) { 1485 1485 WARN_ON(1); 1486 1486 return NULL; 1487 1487 } 1488 - if (new->type & ITER_BVEC) 1488 + if (iov_iter_is_bvec(new)) 1489 1489 return new->bvec = kmemdup(new->bvec, 1490 1490 new->nr_segs * sizeof(struct bio_vec), 1491 1491 flags);
+1 -1
mm/filemap.c
··· 2122 2122 !mapping->a_ops->is_partially_uptodate) 2123 2123 goto page_not_up_to_date; 2124 2124 /* pipes can't handle partially uptodate pages */ 2125 - if (unlikely(iter->type & ITER_PIPE)) 2125 + if (unlikely(iov_iter_is_pipe(iter))) 2126 2126 goto page_not_up_to_date; 2127 2127 if (!trylock_page(page)) 2128 2128 goto page_not_up_to_date;
+1 -1
net/9p/trans_virtio.c
··· 322 322 if (!iov_iter_count(data)) 323 323 return 0; 324 324 325 - if (!(data->type & ITER_KVEC)) { 325 + if (iov_iter_is_kvec(data)) { 326 326 int n; 327 327 /* 328 328 * We allow only p9_max_pages pinned. We wait for the
+2 -2
net/tls/tls_sw.c
··· 799 799 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send); 800 800 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; 801 801 unsigned char record_type = TLS_RECORD_TYPE_DATA; 802 - bool is_kvec = msg->msg_iter.type & ITER_KVEC; 802 + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 803 803 bool eor = !(msg->msg_flags & MSG_MORE); 804 804 size_t try_to_copy, copied = 0; 805 805 struct sk_msg *msg_pl, *msg_en; ··· 1457 1457 bool cmsg = false; 1458 1458 int target, err = 0; 1459 1459 long timeo; 1460 - bool is_kvec = msg->msg_iter.type & ITER_KVEC; 1460 + bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1461 1461 int num_async = 0; 1462 1462 1463 1463 flags |= nonblock;