Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

use less confusing names for iov_iter direction initializers

READ/WRITE proved to be actively confusing - the meanings are
"data destination, as used with read(2)" and "data source, as
used with write(2)", but people keep interpreting those as
"we read data from it" and "we write data to it", i.e. exactly
the wrong way.

Call them ITER_DEST and ITER_SOURCE - at least that is harder
to misinterpret...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro de4eda9d a41dad90

+195 -191
+1 -1
arch/s390/kernel/crash_dump.c
··· 153 153 154 154 kvec.iov_base = dst; 155 155 kvec.iov_len = count; 156 - iov_iter_kvec(&iter, READ, &kvec, 1, count); 156 + iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); 157 157 if (copy_oldmem_iter(&iter, src, count) < count) 158 158 return -EFAULT; 159 159 return 0;
+1 -1
arch/s390/mm/maccess.c
··· 128 128 129 129 kvec.iov_base = dest; 130 130 kvec.iov_len = count; 131 - iov_iter_kvec(&iter, READ, &kvec, 1, count); 131 + iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); 132 132 if (memcpy_real_iter(&iter, src, count) < count) 133 133 return -EFAULT; 134 134 return 0;
+1 -1
arch/x86/kernel/cpu/microcode/intel.c
··· 908 908 909 909 kvec.iov_base = (void *)firmware->data; 910 910 kvec.iov_len = firmware->size; 911 - iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size); 911 + iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); 912 912 ret = generic_load_microcode(cpu, &iter); 913 913 914 914 release_firmware(firmware);
+1 -1
arch/x86/kernel/crash_dump_64.c
··· 57 57 struct kvec kvec = { .iov_base = buf, .iov_len = count }; 58 58 struct iov_iter iter; 59 59 60 - iov_iter_kvec(&iter, READ, &kvec, 1, count); 60 + iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); 61 61 62 62 return read_from_oldmem(&iter, count, ppos, 63 63 cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT));
+2 -2
crypto/testmgr.c
··· 766 766 struct iov_iter input; 767 767 int err; 768 768 769 - iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len); 769 + iov_iter_kvec(&input, ITER_SOURCE, inputs, nr_inputs, src_total_len); 770 770 err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask, 771 771 cfg->inplace_mode != OUT_OF_PLACE ? 772 772 max(dst_total_len, src_total_len) : ··· 1180 1180 1181 1181 kv.iov_base = (void *)vec->plaintext; 1182 1182 kv.iov_len = vec->psize; 1183 - iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize); 1183 + iov_iter_kvec(&input, ITER_SOURCE, &kv, 1, vec->psize); 1184 1184 return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, 1185 1185 &input, divs); 1186 1186 }
+1 -1
drivers/acpi/pfr_update.c
··· 455 455 456 456 iov.iov_base = (void __user *)buf; 457 457 iov.iov_len = len; 458 - iov_iter_init(&iter, WRITE, &iov, 1, len); 458 + iov_iter_init(&iter, ITER_SOURCE, &iov, 1, len); 459 459 460 460 /* map the communication buffer */ 461 461 phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
+1 -1
drivers/block/drbd/drbd_main.c
··· 1816 1816 1817 1817 /* THINK if (signal_pending) return ... ? */ 1818 1818 1819 - iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size); 1819 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size); 1820 1820 1821 1821 if (sock == connection->data.socket) { 1822 1822 rcu_read_lock();
+1 -1
drivers/block/drbd/drbd_receiver.c
··· 507 507 struct msghdr msg = { 508 508 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 509 509 }; 510 - iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size); 510 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size); 511 511 return sock_recvmsg(sock, &msg, msg.msg_flags); 512 512 } 513 513
+6 -6
drivers/block/loop.c
··· 243 243 struct iov_iter i; 244 244 ssize_t bw; 245 245 246 - iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); 246 + iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); 247 247 248 248 file_start_write(file); 249 249 bw = vfs_iter_write(file, &i, ppos, 0); ··· 286 286 ssize_t len; 287 287 288 288 rq_for_each_segment(bvec, rq, iter) { 289 - iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len); 289 + iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len); 290 290 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); 291 291 if (len < 0) 292 292 return len; ··· 392 392 } 393 393 394 394 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, 395 - loff_t pos, bool rw) 395 + loff_t pos, int rw) 396 396 { 397 397 struct iov_iter iter; 398 398 struct req_iterator rq_iter; ··· 448 448 cmd->iocb.ki_flags = IOCB_DIRECT; 449 449 cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); 450 450 451 - if (rw == WRITE) 451 + if (rw == ITER_SOURCE) 452 452 ret = call_write_iter(file, &cmd->iocb, &iter); 453 453 else 454 454 ret = call_read_iter(file, &cmd->iocb, &iter); ··· 490 490 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); 491 491 case REQ_OP_WRITE: 492 492 if (cmd->use_aio) 493 - return lo_rw_aio(lo, cmd, pos, WRITE); 493 + return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); 494 494 else 495 495 return lo_write_simple(lo, rq, pos); 496 496 case REQ_OP_READ: 497 497 if (cmd->use_aio) 498 - return lo_rw_aio(lo, cmd, pos, READ); 498 + return lo_rw_aio(lo, cmd, pos, ITER_DEST); 499 499 else 500 500 return lo_read_simple(lo, rq, pos); 501 501 default:
+5 -5
drivers/block/nbd.c
··· 563 563 u32 nbd_cmd_flags = 0; 564 564 int sent = nsock->sent, skip = 0; 565 565 566 - iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); 566 + iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request)); 567 567 568 568 type = req_to_nbd_cmd_type(req); 569 569 if (type == U32_MAX) ··· 649 649 650 650 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 651 651 req, bvec.bv_len); 652 - iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); 652 + iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len); 653 653 if (skip) { 654 654 if (skip >= iov_iter_count(&from)) { 655 655 skip -= iov_iter_count(&from); ··· 701 701 int result; 702 702 703 703 reply->magic = 0; 704 - iov_iter_kvec(&to, READ, &iov, 1, sizeof(*reply)); 704 + iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply)); 705 705 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 706 706 if (result < 0) { 707 707 if (!nbd_disconnected(nbd->config)) ··· 790 790 struct iov_iter to; 791 791 792 792 rq_for_each_segment(bvec, req, iter) { 793 - iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); 793 + iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len); 794 794 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 795 795 if (result < 0) { 796 796 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", ··· 1267 1267 for (i = 0; i < config->num_connections; i++) { 1268 1268 struct nbd_sock *nsock = config->socks[i]; 1269 1269 1270 - iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); 1270 + iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request)); 1271 1271 mutex_lock(&nsock->tx_lock); 1272 1272 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 1273 1273 if (ret < 0)
+2 -2
drivers/char/random.c
··· 1291 1291 return ret; 1292 1292 } 1293 1293 1294 - ret = import_single_range(READ, ubuf, len, &iov, &iter); 1294 + ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter); 1295 1295 if (unlikely(ret)) 1296 1296 return ret; 1297 1297 return get_random_bytes_user(&iter); ··· 1409 1409 return -EINVAL; 1410 1410 if (get_user(len, p++)) 1411 1411 return -EFAULT; 1412 - ret = import_single_range(WRITE, p, len, &iov, &iter); 1412 + ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter); 1413 1413 if (unlikely(ret)) 1414 1414 return ret; 1415 1415 ret = write_pool_user(&iter);
+3 -3
drivers/fsi/fsi-sbefifo.c
··· 659 659 } 660 660 ffdc_iov.iov_base = ffdc; 661 661 ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE; 662 - iov_iter_kvec(&ffdc_iter, READ, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); 662 + iov_iter_kvec(&ffdc_iter, ITER_DEST, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE); 663 663 cmd[0] = cpu_to_be32(2); 664 664 cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC); 665 665 rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter); ··· 756 756 rbytes = (*resp_len) * sizeof(__be32); 757 757 resp_iov.iov_base = response; 758 758 resp_iov.iov_len = rbytes; 759 - iov_iter_kvec(&resp_iter, READ, &resp_iov, 1, rbytes); 759 + iov_iter_kvec(&resp_iter, ITER_DEST, &resp_iov, 1, rbytes); 760 760 761 761 /* Perform the command */ 762 762 rc = mutex_lock_interruptible(&sbefifo->lock); ··· 839 839 /* Prepare iov iterator */ 840 840 resp_iov.iov_base = buf; 841 841 resp_iov.iov_len = len; 842 - iov_iter_init(&resp_iter, READ, &resp_iov, 1, len); 842 + iov_iter_init(&resp_iter, ITER_DEST, &resp_iov, 1, len); 843 843 844 844 /* Perform the command */ 845 845 rc = mutex_lock_interruptible(&sbefifo->lock);
+1 -1
drivers/infiniband/ulp/rtrs/rtrs-clt.c
··· 966 966 refcount_set(&req->ref, 1); 967 967 req->mp_policy = clt_path->clt->mp_policy; 968 968 969 - iov_iter_kvec(&iter, WRITE, vec, 1, usr_len); 969 + iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len); 970 970 len = _copy_from_iter(req->iu->buf, usr_len, &iter); 971 971 WARN_ON(len != usr_len); 972 972
+1 -1
drivers/isdn/mISDN/l1oip_core.c
··· 706 706 printk(KERN_DEBUG "%s: socket created and open\n", 707 707 __func__); 708 708 while (!signal_pending(current)) { 709 - iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, recvbuf_size); 709 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, recvbuf_size); 710 710 recvlen = sock_recvmsg(socket, &msg, 0); 711 711 if (recvlen > 0) { 712 712 l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
+3 -3
drivers/misc/vmw_vmci/vmci_queue_pair.c
··· 3044 3044 if (!qpair || !buf) 3045 3045 return VMCI_ERROR_INVALID_ARGS; 3046 3046 3047 - iov_iter_kvec(&from, WRITE, &v, 1, buf_size); 3047 + iov_iter_kvec(&from, ITER_SOURCE, &v, 1, buf_size); 3048 3048 3049 3049 qp_lock(qpair); 3050 3050 ··· 3088 3088 if (!qpair || !buf) 3089 3089 return VMCI_ERROR_INVALID_ARGS; 3090 3090 3091 - iov_iter_kvec(&to, READ, &v, 1, buf_size); 3091 + iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size); 3092 3092 3093 3093 qp_lock(qpair); 3094 3094 ··· 3133 3133 if (!qpair || !buf) 3134 3134 return VMCI_ERROR_INVALID_ARGS; 3135 3135 3136 - iov_iter_kvec(&to, READ, &v, 1, buf_size); 3136 + iov_iter_kvec(&to, ITER_DEST, &v, 1, buf_size); 3137 3137 3138 3138 qp_lock(qpair); 3139 3139
+1 -1
drivers/net/ppp/ppp_generic.c
··· 480 480 ret = -EFAULT; 481 481 iov.iov_base = buf; 482 482 iov.iov_len = count; 483 - iov_iter_init(&to, READ, &iov, 1, count); 483 + iov_iter_init(&to, ITER_DEST, &iov, 1, count); 484 484 if (skb_copy_datagram_iter(skb, 0, &to, skb->len)) 485 485 goto outf; 486 486 ret = skb->len;
+2 -2
drivers/nvme/host/tcp.c
··· 301 301 if (!iov_iter_count(&req->iter) && 302 302 req->data_sent < req->data_len) { 303 303 req->curr_bio = req->curr_bio->bi_next; 304 - nvme_tcp_init_iter(req, WRITE); 304 + nvme_tcp_init_iter(req, ITER_SOURCE); 305 305 } 306 306 } 307 307 ··· 781 781 nvme_tcp_init_recv_ctx(queue); 782 782 return -EIO; 783 783 } 784 - nvme_tcp_init_iter(req, READ); 784 + nvme_tcp_init_iter(req, ITER_DEST); 785 785 } 786 786 787 787 /* we can read only from what is left in this bio */
+2 -2
drivers/nvme/target/io-cmd-file.c
··· 102 102 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) 103 103 ki_flags |= IOCB_DSYNC; 104 104 call_iter = req->ns->file->f_op->write_iter; 105 - rw = WRITE; 105 + rw = ITER_SOURCE; 106 106 } else { 107 107 call_iter = req->ns->file->f_op->read_iter; 108 - rw = READ; 108 + rw = ITER_DEST; 109 109 } 110 110 111 111 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
+1 -1
drivers/nvme/target/tcp.c
··· 331 331 sg_offset = 0; 332 332 } 333 333 334 - iov_iter_bvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, 334 + iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, 335 335 nr_pages, cmd->pdu_len); 336 336 } 337 337
+1 -1
drivers/s390/char/zcore.c
··· 103 103 104 104 kvec.iov_base = dst; 105 105 kvec.iov_len = count; 106 - iov_iter_kvec(&iter, READ, &kvec, 1, count); 106 + iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); 107 107 if (memcpy_hsa_iter(&iter, src, count) < count) 108 108 return -EIO; 109 109 return 0;
+1 -1
drivers/scsi/sg.c
··· 1726 1726 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1727 1727 struct request_queue *q = sfp->parentdp->device->request_queue; 1728 1728 struct rq_map_data *md, map_data; 1729 - int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; 1729 + int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? ITER_SOURCE : ITER_DEST; 1730 1730 struct scsi_cmnd *scmd; 1731 1731 1732 1732 SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+2 -2
drivers/target/iscsi/iscsi_target_util.c
··· 1225 1225 return -1; 1226 1226 1227 1227 memset(&msg, 0, sizeof(struct msghdr)); 1228 - iov_iter_kvec(&msg.msg_iter, READ, iov, iov_count, data); 1228 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, iov, iov_count, data); 1229 1229 1230 1230 while (msg_data_left(&msg)) { 1231 1231 rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL); ··· 1261 1261 1262 1262 memset(&msg, 0, sizeof(struct msghdr)); 1263 1263 1264 - iov_iter_kvec(&msg.msg_iter, WRITE, iov, iov_count, data); 1264 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, iov_count, data); 1265 1265 1266 1266 while (msg_data_left(&msg)) { 1267 1267 int tx_loop = sock_sendmsg(conn->sock, &msg);
+1 -1
drivers/target/target_core_file.c
··· 473 473 len += se_dev->dev_attrib.block_size; 474 474 } 475 475 476 - iov_iter_bvec(&iter, WRITE, bvec, nolb, len); 476 + iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len); 477 477 ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); 478 478 479 479 kfree(bvec);
+1 -1
drivers/usb/usbip/usbip_common.c
··· 309 309 if (!sock || !buf || !size) 310 310 return -EINVAL; 311 311 312 - iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size); 312 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size); 313 313 314 314 usbip_dbg_xmit("enter\n"); 315 315
+3 -3
drivers/vhost/net.c
··· 611 611 /* Skip header. TODO: support TSO. */ 612 612 size_t len = iov_length(vq->iov, out); 613 613 614 - iov_iter_init(iter, WRITE, vq->iov, out, len); 614 + iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len); 615 615 iov_iter_advance(iter, hdr_size); 616 616 617 617 return iov_iter_count(iter); ··· 1184 1184 msg.msg_control = vhost_net_buf_consume(&nvq->rxq); 1185 1185 /* On overrun, truncate and discard */ 1186 1186 if (unlikely(headcount > UIO_MAXIOV)) { 1187 - iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); 1187 + iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1); 1188 1188 err = sock->ops->recvmsg(sock, &msg, 1189 1189 1, MSG_DONTWAIT | MSG_TRUNC); 1190 1190 pr_debug("Discarded rx packet: len %zd\n", sock_len); 1191 1191 continue; 1192 1192 } 1193 1193 /* We don't need to be notified again. */ 1194 - iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); 1194 + iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len); 1195 1195 fixup = msg.msg_iter; 1196 1196 if (unlikely((vhost_hlen))) { 1197 1197 /* We will supply the header ourselves
+5 -5
drivers/vhost/scsi.c
··· 563 563 memcpy(v_rsp.sense, cmd->tvc_sense_buf, 564 564 se_cmd->scsi_sense_length); 565 565 566 - iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov, 566 + iov_iter_init(&iov_iter, ITER_DEST, &cmd->tvc_resp_iov, 567 567 cmd->tvc_in_iovs, sizeof(v_rsp)); 568 568 ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); 569 569 if (likely(ret == sizeof(v_rsp))) { ··· 864 864 * point at the start of the outgoing WRITE payload, if 865 865 * DMA_TO_DEVICE is set. 866 866 */ 867 - iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size); 867 + iov_iter_init(&vc->out_iter, ITER_SOURCE, vq->iov, vc->out, vc->out_size); 868 868 ret = 0; 869 869 870 870 done: ··· 1016 1016 data_direction = DMA_FROM_DEVICE; 1017 1017 exp_data_len = vc.in_size - vc.rsp_size; 1018 1018 1019 - iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in, 1019 + iov_iter_init(&in_iter, ITER_DEST, &vq->iov[vc.out], vc.in, 1020 1020 vc.rsp_size + exp_data_len); 1021 1021 iov_iter_advance(&in_iter, vc.rsp_size); 1022 1022 data_iter = in_iter; ··· 1146 1146 memset(&rsp, 0, sizeof(rsp)); 1147 1147 rsp.response = tmf_resp_code; 1148 1148 1149 - iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp)); 1149 + iov_iter_init(&iov_iter, ITER_DEST, resp_iov, in_iovs, sizeof(rsp)); 1150 1150 1151 1151 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); 1152 1152 if (likely(ret == sizeof(rsp))) ··· 1238 1238 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ 1239 1239 rsp.response = VIRTIO_SCSI_S_OK; 1240 1240 1241 - iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp)); 1241 + iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in, sizeof(rsp)); 1242 1242 1243 1243 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter); 1244 1244 if (likely(ret == sizeof(rsp)))
+3 -3
drivers/vhost/vhost.c
··· 832 832 VHOST_ACCESS_WO); 833 833 if (ret < 0) 834 834 goto out; 835 - iov_iter_init(&t, READ, vq->iotlb_iov, ret, size); 835 + iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); 836 836 ret = copy_to_iter(from, size, &t); 837 837 if (ret == size) 838 838 ret = 0; ··· 871 871 (unsigned long long) size); 872 872 goto out; 873 873 } 874 - iov_iter_init(&f, WRITE, vq->iotlb_iov, ret, size); 874 + iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); 875 875 ret = copy_from_iter(to, size, &f); 876 876 if (ret == size) 877 877 ret = 0; ··· 2135 2135 vq_err(vq, "Translation failure %d in indirect.\n", ret); 2136 2136 return ret; 2137 2137 } 2138 - iov_iter_init(&from, WRITE, vq->indirect, ret, len); 2138 + iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); 2139 2139 count = len / sizeof desc; 2140 2140 /* Buffers are chained via a 16 bit next field, so 2141 2141 * we can have at most 2^16 of these. */
+2 -2
drivers/vhost/vringh.c
··· 1162 1162 else if (ret < 0) 1163 1163 return ret; 1164 1164 1165 - iov_iter_bvec(&iter, WRITE, iov, ret, translated); 1165 + iov_iter_bvec(&iter, ITER_SOURCE, iov, ret, translated); 1166 1166 1167 1167 ret = copy_from_iter(dst, translated, &iter); 1168 1168 if (ret < 0) ··· 1195 1195 else if (ret < 0) 1196 1196 return ret; 1197 1197 1198 - iov_iter_bvec(&iter, READ, iov, ret, translated); 1198 + iov_iter_bvec(&iter, ITER_DEST, iov, ret, translated); 1199 1199 1200 1200 ret = copy_to_iter(src, translated, &iter); 1201 1201 if (ret < 0)
+2 -2
drivers/vhost/vsock.c
··· 165 165 break; 166 166 } 167 167 168 - iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len); 168 + iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len); 169 169 payload_len = pkt->len - pkt->off; 170 170 171 171 /* If the packet is greater than the space available in the ··· 371 371 return NULL; 372 372 373 373 len = iov_length(vq->iov, out); 374 - iov_iter_init(&iov_iter, WRITE, vq->iov, out, len); 374 + iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len); 375 375 376 376 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); 377 377 if (nbytes != sizeof(pkt->hdr)) {
+4 -4
drivers/xen/pvcalls-back.c
··· 129 129 if (masked_prod < masked_cons) { 130 130 vec[0].iov_base = data->in + masked_prod; 131 131 vec[0].iov_len = wanted; 132 - iov_iter_kvec(&msg.msg_iter, READ, vec, 1, wanted); 132 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 1, wanted); 133 133 } else { 134 134 vec[0].iov_base = data->in + masked_prod; 135 135 vec[0].iov_len = array_size - masked_prod; 136 136 vec[1].iov_base = data->in; 137 137 vec[1].iov_len = wanted - vec[0].iov_len; 138 - iov_iter_kvec(&msg.msg_iter, READ, vec, 2, wanted); 138 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 2, wanted); 139 139 } 140 140 141 141 atomic_set(&map->read, 0); ··· 188 188 if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) { 189 189 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); 190 190 vec[0].iov_len = size; 191 - iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, size); 191 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, size); 192 192 } else { 193 193 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size); 194 194 vec[0].iov_len = array_size - pvcalls_mask(cons, array_size); 195 195 vec[1].iov_base = data->out; 196 196 vec[1].iov_len = size - vec[0].iov_len; 197 - iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, size); 197 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 2, size); 198 198 } 199 199 200 200 atomic_set(&map->write, 0);
+2 -2
fs/9p/vfs_addr.c
··· 40 40 size_t len = subreq->len - subreq->transferred; 41 41 int total, err; 42 42 43 - iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len); 43 + iov_iter_xarray(&to, ITER_DEST, &rreq->mapping->i_pages, pos, len); 44 44 45 45 total = p9_client_read(fid, pos, &to, &err); 46 46 ··· 172 172 173 173 len = min_t(loff_t, i_size - start, len); 174 174 175 - iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len); 175 + iov_iter_xarray(&from, ITER_SOURCE, &folio_mapping(folio)->i_pages, start, len); 176 176 177 177 /* We should have writeback_fid always set */ 178 178 BUG_ON(!v9inode->writeback_fid);
+1 -1
fs/9p/vfs_dir.c
··· 109 109 struct iov_iter to; 110 110 int n; 111 111 112 - iov_iter_kvec(&to, READ, &kvec, 1, buflen); 112 + iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buflen); 113 113 n = p9_client_read(file->private_data, ctx->pos, &to, 114 114 &err); 115 115 if (err)
+2 -2
fs/9p/xattr.c
··· 24 24 struct iov_iter to; 25 25 int err; 26 26 27 - iov_iter_kvec(&to, READ, &kvec, 1, buffer_size); 27 + iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buffer_size); 28 28 29 29 attr_fid = p9_client_xattrwalk(fid, name, &attr_size); 30 30 if (IS_ERR(attr_fid)) { ··· 109 109 struct iov_iter from; 110 110 int retval, err; 111 111 112 - iov_iter_kvec(&from, WRITE, &kvec, 1, value_len); 112 + iov_iter_kvec(&from, ITER_SOURCE, &kvec, 1, value_len); 113 113 114 114 p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", 115 115 name, value_len, flags);
+1 -1
fs/afs/cmservice.c
··· 298 298 if (call->count2 != call->count && call->count2 != 0) 299 299 return afs_protocol_error(call, afs_eproto_cb_count); 300 300 call->iter = &call->def_iter; 301 - iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4); 301 + iov_iter_discard(&call->def_iter, ITER_DEST, call->count2 * 3 * 4); 302 302 call->unmarshall++; 303 303 304 304 fallthrough;
+1 -1
fs/afs/dir.c
··· 305 305 req->actual_len = i_size; /* May change */ 306 306 req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */ 307 307 req->data_version = dvnode->status.data_version; /* May change */ 308 - iov_iter_xarray(&req->def_iter, READ, &dvnode->netfs.inode.i_mapping->i_pages, 308 + iov_iter_xarray(&req->def_iter, ITER_DEST, &dvnode->netfs.inode.i_mapping->i_pages, 309 309 0, i_size); 310 310 req->iter = &req->def_iter; 311 311
+2 -2
fs/afs/file.c
··· 324 324 fsreq->vnode = vnode; 325 325 fsreq->iter = &fsreq->def_iter; 326 326 327 - iov_iter_xarray(&fsreq->def_iter, READ, 327 + iov_iter_xarray(&fsreq->def_iter, ITER_DEST, 328 328 &fsreq->vnode->netfs.inode.i_mapping->i_pages, 329 329 fsreq->pos, fsreq->len); 330 330 ··· 346 346 fsreq->len = folio_size(folio); 347 347 fsreq->vnode = vnode; 348 348 fsreq->iter = &fsreq->def_iter; 349 - iov_iter_xarray(&fsreq->def_iter, READ, &folio->mapping->i_pages, 349 + iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages, 350 350 fsreq->pos, fsreq->len); 351 351 352 352 ret = afs_fetch_data(fsreq->vnode, fsreq);
+2 -2
fs/afs/internal.h
··· 1301 1301 call->iov_len = size; 1302 1302 call->kvec[0].iov_base = buf; 1303 1303 call->kvec[0].iov_len = size; 1304 - iov_iter_kvec(&call->def_iter, READ, call->kvec, 1, size); 1304 + iov_iter_kvec(&call->def_iter, ITER_DEST, call->kvec, 1, size); 1305 1305 } 1306 1306 1307 1307 static inline void afs_extract_to_tmp(struct afs_call *call) ··· 1319 1319 static inline void afs_extract_discard(struct afs_call *call, size_t size) 1320 1320 { 1321 1321 call->iov_len = size; 1322 - iov_iter_discard(&call->def_iter, READ, size); 1322 + iov_iter_discard(&call->def_iter, ITER_DEST, size); 1323 1323 } 1324 1324 1325 1325 static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
+5 -5
fs/afs/rxrpc.c
··· 359 359 360 360 msg.msg_name = NULL; 361 361 msg.msg_namelen = 0; 362 - iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size); 362 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, call->request_size); 363 363 msg.msg_control = NULL; 364 364 msg.msg_controllen = 0; 365 365 msg.msg_flags = MSG_WAITALL | (call->write_iter ? MSG_MORE : 0); ··· 400 400 RX_USER_ABORT, ret, "KSD"); 401 401 } else { 402 402 len = 0; 403 - iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0); 403 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0); 404 404 rxrpc_kernel_recv_data(call->net->socket, rxcall, 405 405 &msg.msg_iter, &len, false, 406 406 &call->abort_code, &call->service_id); ··· 485 485 ) { 486 486 if (state == AFS_CALL_SV_AWAIT_ACK) { 487 487 len = 0; 488 - iov_iter_kvec(&call->def_iter, READ, NULL, 0, 0); 488 + iov_iter_kvec(&call->def_iter, ITER_DEST, NULL, 0, 0); 489 489 ret = rxrpc_kernel_recv_data(call->net->socket, 490 490 call->rxcall, &call->def_iter, 491 491 &len, false, &remote_abort, ··· 822 822 823 823 msg.msg_name = NULL; 824 824 msg.msg_namelen = 0; 825 - iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0); 825 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, NULL, 0, 0); 826 826 msg.msg_control = NULL; 827 827 msg.msg_controllen = 0; 828 828 msg.msg_flags = 0; ··· 862 862 iov[0].iov_len = len; 863 863 msg.msg_name = NULL; 864 864 msg.msg_namelen = 0; 865 - iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); 865 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, 1, len); 866 866 msg.msg_control = NULL; 867 867 msg.msg_controllen = 0; 868 868 msg.msg_flags = 0;
+2 -2
fs/afs/write.c
··· 609 609 */ 610 610 afs_write_to_cache(vnode, start, len, i_size, caching); 611 611 612 - iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 612 + iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len); 613 613 ret = afs_store_data(vnode, &iter, start, false); 614 614 } else { 615 615 _debug("write discard %x @%llx [%llx]", len, start, i_size); ··· 1000 1000 bv[0].bv_page = &folio->page; 1001 1001 bv[0].bv_offset = f; 1002 1002 bv[0].bv_len = t - f; 1003 - iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 1003 + iov_iter_bvec(&iter, ITER_SOURCE, bv, 1, bv[0].bv_len); 1004 1004 1005 1005 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio); 1006 1006 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
+2 -2
fs/aio.c
··· 1552 1552 if (unlikely(!file->f_op->read_iter)) 1553 1553 return -EINVAL; 1554 1554 1555 - ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); 1555 + ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter); 1556 1556 if (ret < 0) 1557 1557 return ret; 1558 1558 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); ··· 1580 1580 if (unlikely(!file->f_op->write_iter)) 1581 1581 return -EINVAL; 1582 1582 1583 - ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); 1583 + ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter); 1584 1584 if (ret < 0) 1585 1585 return ret; 1586 1586 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
+2 -2
fs/btrfs/ioctl.c
··· 5283 5283 goto out_acct; 5284 5284 } 5285 5285 5286 - ret = import_iovec(READ, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), 5286 + ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), 5287 5287 &iov, &iter); 5288 5288 if (ret < 0) 5289 5289 goto out_acct; ··· 5382 5382 if (args.len > args.unencoded_len - args.unencoded_offset) 5383 5383 goto out_acct; 5384 5384 5385 - ret = import_iovec(WRITE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), 5385 + ret = import_iovec(ITER_SOURCE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), 5386 5386 &iov, &iter); 5387 5387 if (ret < 0) 5388 5388 goto out_acct;
+2 -2
fs/ceph/addr.c
··· 288 288 } 289 289 290 290 len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len); 291 - iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); 291 + iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); 292 292 err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter); 293 293 if (err == 0) 294 294 err = -EFAULT; ··· 327 327 } 328 328 329 329 dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len); 330 - iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, subreq->start, len); 330 + iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len); 331 331 err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off); 332 332 if (err < 0) { 333 333 dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
+2 -2
fs/ceph/file.c
··· 1161 1161 aio_req->total_len = rc + zlen; 1162 1162 } 1163 1163 1164 - iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs, 1164 + iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs, 1165 1165 osd_data->num_bvecs, len); 1166 1166 iov_iter_advance(&i, rc); 1167 1167 iov_iter_zero(zlen, &i); ··· 1400 1400 int zlen = min_t(size_t, len - ret, 1401 1401 size - pos - ret); 1402 1402 1403 - iov_iter_bvec(&i, READ, bvecs, num_pages, len); 1403 + iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len); 1404 1404 iov_iter_advance(&i, ret); 1405 1405 iov_iter_zero(zlen, &i); 1406 1406 ret += zlen;
+3 -3
fs/cifs/connect.c
··· 759 759 { 760 760 struct msghdr smb_msg = {}; 761 761 struct kvec iov = {.iov_base = buf, .iov_len = to_read}; 762 - iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read); 762 + iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read); 763 763 764 764 return cifs_readv_from_socket(server, &smb_msg); 765 765 } ··· 774 774 * and cifs_readv_from_socket sets msg_control and msg_controllen 775 775 * so little to initialize in struct msghdr 776 776 */ 777 - iov_iter_discard(&smb_msg.msg_iter, READ, to_read); 777 + iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read); 778 778 779 779 return cifs_readv_from_socket(server, &smb_msg); 780 780 } ··· 786 786 struct msghdr smb_msg = {}; 787 787 struct bio_vec bv = { 788 788 .bv_page = page, .bv_len = to_read, .bv_offset = page_offset}; 789 - iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read); 789 + iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read); 790 790 return cifs_readv_from_socket(server, &smb_msg); 791 791 } 792 792
+2 -2
fs/cifs/file.c
··· 3532 3532 ctx->iter = *from; 3533 3533 ctx->len = len; 3534 3534 } else { 3535 - rc = setup_aio_ctx_iter(ctx, from, WRITE); 3535 + rc = setup_aio_ctx_iter(ctx, from, ITER_SOURCE); 3536 3536 if (rc) { 3537 3537 kref_put(&ctx->refcount, cifs_aio_ctx_release); 3538 3538 return rc; ··· 4276 4276 ctx->iter = *to; 4277 4277 ctx->len = len; 4278 4278 } else { 4279 - rc = setup_aio_ctx_iter(ctx, to, READ); 4279 + rc = setup_aio_ctx_iter(ctx, to, ITER_DEST); 4280 4280 if (rc) { 4281 4281 kref_put(&ctx->refcount, cifs_aio_ctx_release); 4282 4282 return rc;
+2 -2
fs/cifs/fscache.c
··· 150 150 bvec[0].bv_page = page; 151 151 bvec[0].bv_offset = 0; 152 152 bvec[0].bv_len = PAGE_SIZE; 153 - iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 153 + iov_iter_bvec(&iter, ITER_DEST, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 154 154 155 155 ret = fscache_begin_read_operation(&cres, cookie); 156 156 if (ret < 0) ··· 180 180 bvec[0].bv_page = page; 181 181 bvec[0].bv_offset = 0; 182 182 bvec[0].bv_len = PAGE_SIZE; 183 - iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 183 + iov_iter_bvec(&iter, ITER_SOURCE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 184 184 185 185 ret = fscache_begin_write_operation(&cres, cookie); 186 186 if (ret < 0)
+2 -2
fs/cifs/smb2ops.c
··· 4723 4723 return 0; 4724 4724 } 4725 4725 4726 - iov_iter_bvec(&iter, WRITE, bvec, npages, data_len); 4726 + iov_iter_bvec(&iter, ITER_SOURCE, bvec, npages, data_len); 4727 4727 } else if (buf_len >= data_offset + data_len) { 4728 4728 /* read response payload is in buf */ 4729 4729 WARN_ONCE(npages > 0, "read data can be either in buf or in pages"); 4730 4730 iov.iov_base = buf + data_offset; 4731 4731 iov.iov_len = data_len; 4732 - iov_iter_kvec(&iter, WRITE, &iov, 1, data_len); 4732 + iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, data_len); 4733 4733 } else { 4734 4734 /* read response payload cannot be in both buf and pages */ 4735 4735 WARN_ONCE(1, "buf can not contain only a part of read data");
+3 -3
fs/cifs/transport.c
··· 347 347 .iov_base = &rfc1002_marker, 348 348 .iov_len = 4 349 349 }; 350 - iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4); 350 + iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4); 351 351 rc = smb_send_kvec(server, &smb_msg, &sent); 352 352 if (rc < 0) 353 353 goto unmask; ··· 368 368 size += iov[i].iov_len; 369 369 } 370 370 371 - iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size); 371 + iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size); 372 372 373 373 rc = smb_send_kvec(server, &smb_msg, &sent); 374 374 if (rc < 0) ··· 384 384 rqst_page_get_length(&rqst[j], i, &bvec.bv_len, 385 385 &bvec.bv_offset); 386 386 387 - iov_iter_bvec(&smb_msg.msg_iter, WRITE, 387 + iov_iter_bvec(&smb_msg.msg_iter, ITER_SOURCE, 388 388 &bvec, 1, bvec.bv_len); 389 389 rc = smb_send_kvec(server, &smb_msg, &sent); 390 390 if (rc < 0)
+1 -1
fs/coredump.c
··· 853 853 if (dump_interrupted()) 854 854 return 0; 855 855 pos = file->f_pos; 856 - iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE); 856 + iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE); 857 857 n = __kernel_write_iter(cprm->file, &iter, &pos); 858 858 if (n != PAGE_SIZE) 859 859 return 0;
+3 -3
fs/erofs/fscache.c
··· 194 194 195 195 atomic_inc(&rreq->nr_outstanding); 196 196 197 - iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, 197 + iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, 198 198 start + done, subreq->len); 199 199 200 200 ret = fscache_read(cres, subreq->start, &iter, ··· 290 290 if (IS_ERR(src)) 291 291 return PTR_ERR(src); 292 292 293 - iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, PAGE_SIZE); 293 + iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE); 294 294 if (copy_to_iter(src + offset, size, &iter) != size) { 295 295 erofs_put_metabuf(&buf); 296 296 return -EFAULT; ··· 302 302 303 303 if (!(map.m_flags & EROFS_MAP_MAPPED)) { 304 304 count = len; 305 - iov_iter_xarray(&iter, READ, &mapping->i_pages, pos, count); 305 + iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count); 306 306 iov_iter_zero(count, &iter); 307 307 return count; 308 308 }
+1 -1
fs/fscache/io.c
··· 286 286 * taken into account. 287 287 */ 288 288 289 - iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); 289 + iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len); 290 290 fscache_write(cres, start, &iter, fscache_wreq_done, wreq); 291 291 return; 292 292
+2 -2
fs/fuse/ioctl.c
··· 255 255 ap.args.in_pages = true; 256 256 257 257 err = -EFAULT; 258 - iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size); 258 + iov_iter_init(&ii, ITER_SOURCE, in_iov, in_iovs, in_size); 259 259 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { 260 260 c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii); 261 261 if (c != PAGE_SIZE && iov_iter_count(&ii)) ··· 324 324 goto out; 325 325 326 326 err = -EFAULT; 327 - iov_iter_init(&ii, READ, out_iov, out_iovs, transferred); 327 + iov_iter_init(&ii, ITER_DEST, out_iov, out_iovs, transferred); 328 328 for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { 329 329 c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii); 330 330 if (c != PAGE_SIZE && iov_iter_count(&ii))
+3 -3
fs/netfs/io.c
··· 23 23 { 24 24 struct iov_iter iter; 25 25 26 - iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages, 26 + iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages, 27 27 subreq->start + subreq->transferred, 28 28 subreq->len - subreq->transferred); 29 29 iov_iter_zero(iov_iter_count(&iter), &iter); ··· 49 49 struct iov_iter iter; 50 50 51 51 netfs_stat(&netfs_n_rh_read); 52 - iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, 52 + iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, 53 53 subreq->start + subreq->transferred, 54 54 subreq->len - subreq->transferred); 55 55 ··· 208 208 continue; 209 209 } 210 210 211 - iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages, 211 + iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages, 212 212 subreq->start, subreq->len); 213 213 214 214 atomic_inc(&rreq->nr_copy_ops);
+2 -2
fs/nfs/fscache.c
··· 252 252 bvec[0].bv_page = page; 253 253 bvec[0].bv_offset = 0; 254 254 bvec[0].bv_len = PAGE_SIZE; 255 - iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 255 + iov_iter_bvec(&iter, ITER_DEST, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 256 256 257 257 ret = fscache_begin_read_operation(&cres, cookie); 258 258 if (ret < 0) ··· 282 282 bvec[0].bv_page = page; 283 283 bvec[0].bv_offset = 0; 284 284 bvec[0].bv_len = PAGE_SIZE; 285 - iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 285 + iov_iter_bvec(&iter, ITER_SOURCE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); 286 286 287 287 ret = fscache_begin_write_operation(&cres, cookie); 288 288 if (ret < 0)
+2 -2
fs/nfsd/vfs.c
··· 942 942 ssize_t host_err; 943 943 944 944 trace_nfsd_read_vector(rqstp, fhp, offset, *count); 945 - iov_iter_kvec(&iter, READ, vec, vlen, *count); 945 + iov_iter_kvec(&iter, ITER_DEST, vec, vlen, *count); 946 946 host_err = vfs_iter_read(file, &iter, &ppos, 0); 947 947 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); 948 948 } ··· 1032 1032 if (stable && !use_wgather) 1033 1033 flags |= RWF_SYNC; 1034 1034 1035 - iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt); 1035 + iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt); 1036 1036 since = READ_ONCE(file->f_wb_err); 1037 1037 if (verf) 1038 1038 nfsd_copy_write_verifier(verf, nn);
+1 -1
fs/ocfs2/cluster/tcp.c
··· 900 900 { 901 901 struct kvec vec = { .iov_len = len, .iov_base = data, }; 902 902 struct msghdr msg = { .msg_flags = MSG_DONTWAIT, }; 903 - iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, len); 903 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, len); 904 904 return sock_recvmsg(sock, &msg, MSG_DONTWAIT); 905 905 } 906 906
+4 -4
fs/orangefs/inode.c
··· 53 53 bv.bv_len = wlen; 54 54 bv.bv_offset = off % PAGE_SIZE; 55 55 WARN_ON(wlen == 0); 56 - iov_iter_bvec(&iter, WRITE, &bv, 1, wlen); 56 + iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen); 57 57 58 58 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen, 59 59 len, wr, NULL, NULL); ··· 112 112 else 113 113 ow->bv[i].bv_offset = 0; 114 114 } 115 - iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len); 115 + iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->npages, ow->len); 116 116 117 117 WARN_ON(ow->off >= len); 118 118 if (ow->off + ow->len > len) ··· 270 270 offset = readahead_pos(rac); 271 271 i_pages = &rac->mapping->i_pages; 272 272 273 - iov_iter_xarray(&iter, READ, i_pages, offset, readahead_length(rac)); 273 + iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac)); 274 274 275 275 /* read in the pages. */ 276 276 if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, ··· 303 303 bv.bv_page = &folio->page; 304 304 bv.bv_len = folio_size(folio); 305 305 bv.bv_offset = 0; 306 - iov_iter_bvec(&iter, READ, &bv, 1, folio_size(folio)); 306 + iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio)); 307 307 308 308 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter, 309 309 folio_size(folio), inode->i_size, NULL, NULL, file);
+3 -3
fs/proc/vmcore.c
··· 199 199 struct kvec kvec = { .iov_base = buf, .iov_len = count }; 200 200 struct iov_iter iter; 201 201 202 - iov_iter_kvec(&iter, READ, &kvec, 1, count); 202 + iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); 203 203 204 204 return read_from_oldmem(&iter, count, ppos, false); 205 205 } ··· 212 212 struct kvec kvec = { .iov_base = buf, .iov_len = count }; 213 213 struct iov_iter iter; 214 214 215 - iov_iter_kvec(&iter, READ, &kvec, 1, count); 215 + iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count); 216 216 217 217 return read_from_oldmem(&iter, count, ppos, 218 218 cc_platform_has(CC_ATTR_MEM_ENCRYPT)); ··· 437 437 offset = (loff_t) index << PAGE_SHIFT; 438 438 kvec.iov_base = page_address(page); 439 439 kvec.iov_len = PAGE_SIZE; 440 - iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE); 440 + iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE); 441 441 442 442 rc = __read_vmcore(&iter, &offset); 443 443 if (rc < 0) {
+6 -6
fs/read_write.c
··· 384 384 385 385 init_sync_kiocb(&kiocb, filp); 386 386 kiocb.ki_pos = (ppos ? *ppos : 0); 387 - iov_iter_ubuf(&iter, READ, buf, len); 387 + iov_iter_ubuf(&iter, ITER_DEST, buf, len); 388 388 389 389 ret = call_read_iter(filp, &kiocb, &iter); 390 390 BUG_ON(ret == -EIOCBQUEUED); ··· 424 424 425 425 init_sync_kiocb(&kiocb, file); 426 426 kiocb.ki_pos = pos ? *pos : 0; 427 - iov_iter_kvec(&iter, READ, &iov, 1, iov.iov_len); 427 + iov_iter_kvec(&iter, ITER_DEST, &iov, 1, iov.iov_len); 428 428 ret = file->f_op->read_iter(&kiocb, &iter); 429 429 if (ret > 0) { 430 430 if (pos) ··· 486 486 487 487 init_sync_kiocb(&kiocb, filp); 488 488 kiocb.ki_pos = (ppos ? *ppos : 0); 489 - iov_iter_ubuf(&iter, WRITE, (void __user *)buf, len); 489 + iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len); 490 490 491 491 ret = call_write_iter(filp, &kiocb, &iter); 492 492 BUG_ON(ret == -EIOCBQUEUED); ··· 533 533 .iov_len = min_t(size_t, count, MAX_RW_COUNT), 534 534 }; 535 535 struct iov_iter iter; 536 - iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len); 536 + iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, iov.iov_len); 537 537 return __kernel_write_iter(file, &iter, pos); 538 538 } 539 539 /* ··· 911 911 struct iov_iter iter; 912 912 ssize_t ret; 913 913 914 - ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 914 + ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 915 915 if (ret >= 0) { 916 916 ret = do_iter_read(file, &iter, pos, flags); 917 917 kfree(iov); ··· 928 928 struct iov_iter iter; 929 929 ssize_t ret; 930 930 931 - ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 931 + ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 932 932 if (ret >= 0) { 933 933 file_start_write(file); 934 934 ret = do_iter_write(file, &iter, pos, flags);
+1 -1
fs/seq_file.c
··· 156 156 ssize_t ret; 157 157 158 158 init_sync_kiocb(&kiocb, file); 159 - iov_iter_init(&iter, READ, &iov, 1, size); 159 + iov_iter_init(&iter, ITER_DEST, &iov, 1, size); 160 160 161 161 kiocb.ki_pos = *ppos; 162 162 ret = seq_read_iter(&kiocb, &iter);
+5 -5
fs/splice.c
··· 303 303 struct kiocb kiocb; 304 304 int ret; 305 305 306 - iov_iter_pipe(&to, READ, pipe, len); 306 + iov_iter_pipe(&to, ITER_DEST, pipe, len); 307 307 init_sync_kiocb(&kiocb, in); 308 308 kiocb.ki_pos = *ppos; 309 309 ret = call_read_iter(in, &kiocb, &to); ··· 682 682 n++; 683 683 } 684 684 685 - iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left); 685 + iov_iter_bvec(&from, ITER_SOURCE, array, n, sd.total_len - left); 686 686 ret = vfs_iter_write(out, &from, &sd.pos, 0); 687 687 if (ret <= 0) 688 688 break; ··· 1263 1263 if (!f.file) 1264 1264 return -EBADF; 1265 1265 if (f.file->f_mode & FMODE_WRITE) { 1266 - *type = WRITE; 1266 + *type = ITER_SOURCE; 1267 1267 } else if (f.file->f_mode & FMODE_READ) { 1268 - *type = READ; 1268 + *type = ITER_DEST; 1269 1269 } else { 1270 1270 fdput(f); 1271 1271 return -EBADF; ··· 1314 1314 1315 1315 if (!iov_iter_count(&iter)) 1316 1316 error = 0; 1317 - else if (iov_iter_rw(&iter) == WRITE) 1317 + else if (type == ITER_SOURCE) 1318 1318 error = vmsplice_to_pipe(f.file, &iter, flags); 1319 1319 else 1320 1320 error = vmsplice_to_user(f.file, &iter, flags);
+3
include/linux/uio.h
··· 29 29 ITER_UBUF, 30 30 }; 31 31 32 + #define ITER_SOURCE 1 // == WRITE 33 + #define ITER_DEST 0 // == READ 34 + 32 35 struct iov_iter_state { 33 36 size_t iov_offset; 34 37 size_t count;
+7 -7
io_uring/net.c
··· 363 363 if (unlikely(!sock)) 364 364 return -ENOTSOCK; 365 365 366 - ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter); 366 + ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter); 367 367 if (unlikely(ret)) 368 368 return ret; 369 369 ··· 449 449 } 450 450 } else { 451 451 iomsg->free_iov = iomsg->fast_iov; 452 - ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, 452 + ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV, 453 453 &iomsg->free_iov, &iomsg->msg.msg_iter, 454 454 false); 455 455 if (ret > 0) ··· 501 501 } 502 502 } else { 503 503 iomsg->free_iov = iomsg->fast_iov; 504 - ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen, 504 + ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen, 505 505 UIO_FASTIOV, &iomsg->free_iov, 506 506 &iomsg->msg.msg_iter, true); 507 507 if (ret < 0) ··· 751 751 752 752 kmsg->fast_iov[0].iov_base = buf; 753 753 kmsg->fast_iov[0].iov_len = len; 754 - iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1, 754 + iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1, 755 755 len); 756 756 } 757 757 ··· 845 845 sr->buf = buf; 846 846 } 847 847 848 - ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter); 848 + ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter); 849 849 if (unlikely(ret)) 850 850 goto out_free; 851 851 ··· 1081 1081 return io_setup_async_addr(req, &__address, issue_flags); 1082 1082 1083 1083 if (zc->flags & IORING_RECVSEND_FIXED_BUF) { 1084 - ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu, 1084 + ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu, 1085 1085 (u64)(uintptr_t)zc->buf, zc->len); 1086 1086 if (unlikely(ret)) 1087 1087 return ret; 1088 1088 msg.sg_from_iter = io_sg_from_iter; 1089 1089 } else { 1090 - ret = import_single_range(WRITE, zc->buf, zc->len, &iov, 1090 + ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov, 1091 1091 &msg.msg_iter); 1092 1092 if (unlikely(ret)) 1093 1093 return ret;
+5 -5
io_uring/rw.c
··· 548 548 549 549 int io_readv_prep_async(struct io_kiocb *req) 550 550 { 551 - return io_rw_prep_async(req, READ); 551 + return io_rw_prep_async(req, ITER_DEST); 552 552 } 553 553 554 554 int io_writev_prep_async(struct io_kiocb *req) 555 555 { 556 - return io_rw_prep_async(req, WRITE); 556 + return io_rw_prep_async(req, ITER_SOURCE); 557 557 } 558 558 559 559 /* ··· 704 704 loff_t *ppos; 705 705 706 706 if (!req_has_async_data(req)) { 707 - ret = io_import_iovec(READ, req, &iovec, s, issue_flags); 707 + ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); 708 708 if (unlikely(ret < 0)) 709 709 return ret; 710 710 } else { ··· 716 716 * buffers, as we dropped the selected one before retry. 717 717 */ 718 718 if (io_do_buffer_select(req)) { 719 - ret = io_import_iovec(READ, req, &iovec, s, issue_flags); 719 + ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); 720 720 if (unlikely(ret < 0)) 721 721 return ret; 722 722 } ··· 851 851 loff_t *ppos; 852 852 853 853 if (!req_has_async_data(req)) { 854 - ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); 854 + ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags); 855 855 if (unlikely(ret < 0)) 856 856 return ret; 857 857 } else {
+1 -1
kernel/trace/trace_events_user.c
··· 1486 1486 if (unlikely(*ppos != 0)) 1487 1487 return -EFAULT; 1488 1488 1489 - if (unlikely(import_single_range(WRITE, (char __user *)ubuf, 1489 + if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf, 1490 1490 count, &iov, &i))) 1491 1491 return -EFAULT; 1492 1492
+1 -1
mm/madvise.c
··· 1459 1459 goto out; 1460 1460 } 1461 1461 1462 - ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1462 + ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1463 1463 if (ret < 0) 1464 1464 goto out; 1465 1465
+2 -2
mm/page_io.c
··· 376 376 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 377 377 int ret; 378 378 379 - iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages, sio->len); 379 + iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); 380 380 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 381 381 if (ret != -EIOCBQUEUED) 382 382 sio_write_complete(&sio->iocb, ret); ··· 530 530 struct address_space *mapping = sio->iocb.ki_filp->f_mapping; 531 531 int ret; 532 532 533 - iov_iter_bvec(&from, READ, sio->bvec, sio->pages, sio->len); 533 + iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); 534 534 ret = mapping->a_ops->swap_rw(&sio->iocb, &from); 535 535 if (ret != -EIOCBQUEUED) 536 536 sio_read_complete(&sio->iocb, ret);
+1 -1
mm/process_vm_access.c
··· 263 263 struct iovec *iov_r; 264 264 struct iov_iter iter; 265 265 ssize_t rc; 266 - int dir = vm_write ? WRITE : READ; 266 + int dir = vm_write ? ITER_SOURCE : ITER_DEST; 267 267 268 268 if (flags != 0) 269 269 return -EINVAL;
+1 -1
net/9p/client.c
··· 2043 2043 struct kvec kv = {.iov_base = data, .iov_len = count}; 2044 2044 struct iov_iter to; 2045 2045 2046 - iov_iter_kvec(&to, READ, &kv, 1, count); 2046 + iov_iter_kvec(&to, ITER_DEST, &kv, 1, count); 2047 2047 2048 2048 p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", 2049 2049 fid->fid, offset, count);
+1 -1
net/bluetooth/6lowpan.c
··· 441 441 iv.iov_len = skb->len; 442 442 443 443 memset(&msg, 0, sizeof(msg)); 444 - iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len); 444 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len); 445 445 446 446 err = l2cap_chan_send(chan, &msg, skb->len); 447 447 if (err > 0) {
+1 -1
net/bluetooth/a2mp.c
··· 56 56 57 57 memset(&msg, 0, sizeof(msg)); 58 58 59 - iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, total_len); 59 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len); 60 60 61 61 l2cap_chan_send(chan, &msg, total_len); 62 62
+1 -1
net/bluetooth/smp.c
··· 605 605 606 606 memset(&msg, 0, sizeof(msg)); 607 607 608 - iov_iter_kvec(&msg.msg_iter, WRITE, iv, 2, 1 + len); 608 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iv, 2, 1 + len); 609 609 610 610 l2cap_chan_send(chan, &msg, 1 + len); 611 611
+2 -2
net/ceph/messenger_v1.c
··· 30 30 if (!buf) 31 31 msg.msg_flags |= MSG_TRUNC; 32 32 33 - iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len); 33 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len); 34 34 r = sock_recvmsg(sock, &msg, msg.msg_flags); 35 35 if (r == -EAGAIN) 36 36 r = 0; ··· 49 49 int r; 50 50 51 51 BUG_ON(page_offset + length > PAGE_SIZE); 52 - iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length); 52 + iov_iter_bvec(&msg.msg_iter, ITER_DEST, &bvec, 1, length); 53 53 r = sock_recvmsg(sock, &msg, msg.msg_flags); 54 54 if (r == -EAGAIN) 55 55 r = 0;
+7 -7
net/ceph/messenger_v2.c
··· 168 168 bv.bv_offset, bv.bv_len, 169 169 CEPH_MSG_FLAGS); 170 170 } else { 171 - iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len); 171 + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len); 172 172 ret = sock_sendmsg(sock, &msg); 173 173 } 174 174 if (ret <= 0) { ··· 225 225 WARN_ON(iov_iter_count(&con->v2.in_iter)); 226 226 227 227 con->v2.in_kvec_cnt = 0; 228 - iov_iter_kvec(&con->v2.in_iter, READ, con->v2.in_kvecs, 0, 0); 228 + iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0); 229 229 } 230 230 231 231 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv) ··· 233 233 WARN_ON(iov_iter_count(&con->v2.in_iter)); 234 234 235 235 con->v2.in_bvec = *bv; 236 - iov_iter_bvec(&con->v2.in_iter, READ, &con->v2.in_bvec, 1, bv->bv_len); 236 + iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len); 237 237 } 238 238 239 239 static void set_in_skip(struct ceph_connection *con, int len) ··· 241 241 WARN_ON(iov_iter_count(&con->v2.in_iter)); 242 242 243 243 dout("%s con %p len %d\n", __func__, con, len); 244 - iov_iter_discard(&con->v2.in_iter, READ, len); 244 + iov_iter_discard(&con->v2.in_iter, ITER_DEST, len); 245 245 } 246 246 247 247 static void add_out_kvec(struct ceph_connection *con, void *buf, int len) ··· 265 265 266 266 con->v2.out_kvec_cnt = 0; 267 267 268 - iov_iter_kvec(&con->v2.out_iter, WRITE, con->v2.out_kvecs, 0, 0); 268 + iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0); 269 269 con->v2.out_iter_sendpage = false; 270 270 } 271 271 ··· 277 277 278 278 con->v2.out_bvec = *bv; 279 279 con->v2.out_iter_sendpage = zerocopy; 280 - iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1, 280 + iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1, 281 281 con->v2.out_bvec.bv_len); 282 282 } 283 283 ··· 290 290 con->v2.out_bvec.bv_offset = 0; 291 291 con->v2.out_bvec.bv_len = min(con->v2.out_zero, (int)PAGE_SIZE); 292 292 con->v2.out_iter_sendpage = true; 293 - iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1, 293 + iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1, 294 294 con->v2.out_bvec.bv_len); 295 295 } 296 296
+2 -1
net/compat.c
··· 95 95 if (err) 96 96 return err; 97 97 98 - err = import_iovec(save_addr ? READ : WRITE, compat_ptr(msg.msg_iov), msg.msg_iovlen, 98 + err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE, 99 + compat_ptr(msg.msg_iov), msg.msg_iovlen, 99 100 UIO_FASTIOV, iov, &kmsg->msg_iter); 100 101 return err < 0 ? err : 0; 101 102 }
+2 -2
net/ipv4/tcp.c
··· 2000 2000 if (copy_address != zc->copybuf_address) 2001 2001 return -EINVAL; 2002 2002 2003 - err = import_single_range(READ, (void __user *)copy_address, 2003 + err = import_single_range(ITER_DEST, (void __user *)copy_address, 2004 2004 inq, &iov, &msg.msg_iter); 2005 2005 if (err) 2006 2006 return err; ··· 2034 2034 if (copy_address != zc->copybuf_address) 2035 2035 return -EINVAL; 2036 2036 2037 - err = import_single_range(READ, (void __user *)copy_address, 2037 + err = import_single_range(ITER_DEST, (void __user *)copy_address, 2038 2038 copylen, &iov, &msg.msg_iter); 2039 2039 if (err) 2040 2040 return err;
+1 -1
net/netfilter/ipvs/ip_vs_sync.c
··· 1617 1617 EnterFunction(7); 1618 1618 1619 1619 /* Receive a packet */ 1620 - iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, buflen); 1620 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, buflen); 1621 1621 len = sock_recvmsg(sock, &msg, MSG_DONTWAIT); 1622 1622 if (len < 0) 1623 1623 return len;
+3 -3
net/smc/smc_clc.c
··· 673 673 */ 674 674 krflags = MSG_PEEK | MSG_WAITALL; 675 675 clc_sk->sk_rcvtimeo = timeout; 676 - iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, 676 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, 677 677 sizeof(struct smc_clc_msg_hdr)); 678 678 len = sock_recvmsg(smc->clcsock, &msg, krflags); 679 679 if (signal_pending(current)) { ··· 720 720 } else { 721 721 recvlen = datlen; 722 722 } 723 - iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); 723 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen); 724 724 krflags = MSG_WAITALL; 725 725 len = sock_recvmsg(smc->clcsock, &msg, krflags); 726 726 if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) { ··· 737 737 /* receive remaining proposal message */ 738 738 recvlen = datlen > SMC_CLC_RECV_BUF_LEN ? 739 739 SMC_CLC_RECV_BUF_LEN : datlen; 740 - iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); 740 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen); 741 741 len = sock_recvmsg(smc->clcsock, &msg, krflags); 742 742 datlen -= len; 743 743 }
+1 -1
net/smc/smc_tx.c
··· 308 308 309 309 iov.iov_base = kaddr + offset; 310 310 iov.iov_len = size; 311 - iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size); 311 + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size); 312 312 rc = smc_tx_sendmsg(smc, &msg, size); 313 313 kunmap(page); 314 314 return rc;
+6 -6
net/socket.c
··· 750 750 int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 751 751 struct kvec *vec, size_t num, size_t size) 752 752 { 753 - iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size); 753 + iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size); 754 754 return sock_sendmsg(sock, msg); 755 755 } 756 756 EXPORT_SYMBOL(kernel_sendmsg); ··· 776 776 if (!sock->ops->sendmsg_locked) 777 777 return sock_no_sendmsg_locked(sk, msg, size); 778 778 779 - iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size); 779 + iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, num, size); 780 780 781 781 return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg)); 782 782 } ··· 1034 1034 struct kvec *vec, size_t num, size_t size, int flags) 1035 1035 { 1036 1036 msg->msg_control_is_user = false; 1037 - iov_iter_kvec(&msg->msg_iter, READ, vec, num, size); 1037 + iov_iter_kvec(&msg->msg_iter, ITER_DEST, vec, num, size); 1038 1038 return sock_recvmsg(sock, msg, flags); 1039 1039 } 1040 1040 EXPORT_SYMBOL(kernel_recvmsg); ··· 2092 2092 struct iovec iov; 2093 2093 int fput_needed; 2094 2094 2095 - err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter); 2095 + err = import_single_range(ITER_SOURCE, buff, len, &iov, &msg.msg_iter); 2096 2096 if (unlikely(err)) 2097 2097 return err; 2098 2098 sock = sockfd_lookup_light(fd, &err, &fput_needed); ··· 2157 2157 int err, err2; 2158 2158 int fput_needed; 2159 2159 2160 - err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter); 2160 + err = import_single_range(ITER_DEST, ubuf, size, &iov, &msg.msg_iter); 2161 2161 if (unlikely(err)) 2162 2162 return err; 2163 2163 sock = sockfd_lookup_light(fd, &err, &fput_needed); ··· 2417 2417 if (err) 2418 2418 return err; 2419 2419 2420 - err = import_iovec(save_addr ? READ : WRITE, 2420 + err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE, 2421 2421 msg.msg_iov, msg.msg_iovlen, 2422 2422 UIO_FASTIOV, iov, &kmsg->msg_iter); 2423 2423 return err < 0 ? err : 0;
+3 -3
net/sunrpc/socklib.c
··· 214 214 static int xprt_send_kvec(struct socket *sock, struct msghdr *msg, 215 215 struct kvec *vec, size_t seek) 216 216 { 217 - iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); 217 + iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, vec, 1, vec->iov_len); 218 218 return xprt_sendmsg(sock, msg, seek); 219 219 } 220 220 221 221 static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg, 222 222 struct xdr_buf *xdr, size_t base) 223 223 { 224 - iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr), 224 + iov_iter_bvec(&msg->msg_iter, ITER_SOURCE, xdr->bvec, xdr_buf_pagecount(xdr), 225 225 xdr->page_len + xdr->page_base); 226 226 return xprt_sendmsg(sock, msg, base + xdr->page_base); 227 227 } ··· 244 244 }; 245 245 size_t len = iov[0].iov_len + iov[1].iov_len; 246 246 247 - iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len); 247 + iov_iter_kvec(&msg->msg_iter, ITER_SOURCE, iov, 2, len); 248 248 return xprt_sendmsg(sock, msg, base); 249 249 } 250 250
+2 -2
net/sunrpc/svcsock.c
··· 260 260 rqstp->rq_respages = &rqstp->rq_pages[i]; 261 261 rqstp->rq_next_page = rqstp->rq_respages + 1; 262 262 263 - iov_iter_bvec(&msg.msg_iter, READ, bvec, i, buflen); 263 + iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen); 264 264 if (seek) { 265 265 iov_iter_advance(&msg.msg_iter, seek); 266 266 buflen -= seek; ··· 874 874 want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; 875 875 iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; 876 876 iov.iov_len = want; 877 - iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, want); 877 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want); 878 878 len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT); 879 879 if (len < 0) 880 880 return len;
+3 -3
net/sunrpc/xprtsock.c
··· 364 364 xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags, 365 365 struct kvec *kvec, size_t count, size_t seek) 366 366 { 367 - iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count); 367 + iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count); 368 368 return xs_sock_recvmsg(sock, msg, flags, seek); 369 369 } 370 370 ··· 373 373 struct bio_vec *bvec, unsigned long nr, size_t count, 374 374 size_t seek) 375 375 { 376 - iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count); 376 + iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count); 377 377 return xs_sock_recvmsg(sock, msg, flags, seek); 378 378 } 379 379 ··· 381 381 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, 382 382 size_t count) 383 383 { 384 - iov_iter_discard(&msg->msg_iter, READ, count); 384 + iov_iter_discard(&msg->msg_iter, ITER_DEST, count); 385 385 return sock_recvmsg(sock, msg, flags); 386 386 } 387 387
+1 -1
net/tipc/topsrv.c
··· 394 394 iov.iov_base = &s; 395 395 iov.iov_len = sizeof(s); 396 396 msg.msg_name = NULL; 397 - iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len); 397 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, iov.iov_len); 398 398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); 399 399 if (ret == -EWOULDBLOCK) 400 400 return -EWOULDBLOCK;
+2 -2
net/tls/tls_device.c
··· 620 620 kaddr = kmap(page); 621 621 iov.iov_base = kaddr + offset; 622 622 iov.iov_len = size; 623 - iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); 623 + iov_iter_kvec(&msg_iter, ITER_SOURCE, &iov, 1, size); 624 624 iter_offset.msg_iter = &msg_iter; 625 625 rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA, 626 626 NULL); ··· 697 697 union tls_iter_offset iter; 698 698 struct iov_iter msg_iter; 699 699 700 - iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0); 700 + iov_iter_kvec(&msg_iter, ITER_SOURCE, NULL, 0, 0); 701 701 iter.msg_iter = &msg_iter; 702 702 return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL); 703 703 }
+1 -1
net/xfrm/espintcp.c
··· 354 354 *((__be16 *)buf) = cpu_to_be16(msglen); 355 355 pfx_iov.iov_base = buf; 356 356 pfx_iov.iov_len = sizeof(buf); 357 - iov_iter_kvec(&pfx_iter, WRITE, &pfx_iov, 1, pfx_iov.iov_len); 357 + iov_iter_kvec(&pfx_iter, ITER_SOURCE, &pfx_iov, 1, pfx_iov.iov_len); 358 358 359 359 err = sk_msg_memcopy_from_iter(sk, &pfx_iter, &emsg->skmsg, 360 360 pfx_iov.iov_len);
+2 -2
security/keys/keyctl.c
··· 1251 1251 struct iov_iter from; 1252 1252 int ret; 1253 1253 1254 - ret = import_single_range(WRITE, (void __user *)_payload, plen, 1254 + ret = import_single_range(ITER_SOURCE, (void __user *)_payload, plen, 1255 1255 &iov, &from); 1256 1256 if (unlikely(ret)) 1257 1257 return ret; ··· 1283 1283 if (!_payload_iov) 1284 1284 ioc = 0; 1285 1285 1286 - ret = import_iovec(WRITE, _payload_iov, ioc, 1286 + ret = import_iovec(ITER_SOURCE, _payload_iov, ioc, 1287 1287 ARRAY_SIZE(iovstack), &iov, &from); 1288 1288 if (ret < 0) 1289 1289 return ret;