Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rds: switch ->inc_copy_to_user() to passing iov_iter

instances get considerably simpler from that...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro c310e72c 7424ce65

+47 -120
+1 -2
net/rds/ib.h
··· 316 316 void rds_ib_recv_free_caches(struct rds_ib_connection *ic); 317 317 void rds_ib_recv_refill(struct rds_connection *conn, int prefill); 318 318 void rds_ib_inc_free(struct rds_incoming *inc); 319 - int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 320 - size_t size); 319 + int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 321 320 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context); 322 321 void rds_ib_recv_tasklet_fn(unsigned long data); 323 322 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
+11 -26
net/rds/ib_recv.c
··· 472 472 return head; 473 473 } 474 474 475 - int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 476 - size_t size) 475 + int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) 477 476 { 478 477 struct rds_ib_incoming *ibinc; 479 478 struct rds_page_frag *frag; 480 - struct iovec *iov = first_iov; 481 479 unsigned long to_copy; 482 480 unsigned long frag_off = 0; 483 - unsigned long iov_off = 0; 484 481 int copied = 0; 485 482 int ret; 486 483 u32 len; ··· 486 489 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); 487 490 len = be32_to_cpu(inc->i_hdr.h_len); 488 491 489 - while (copied < size && copied < len) { 492 + while (iov_iter_count(to) && copied < len) { 490 493 if (frag_off == RDS_FRAG_SIZE) { 491 494 frag = list_entry(frag->f_item.next, 492 495 struct rds_page_frag, f_item); 493 496 frag_off = 0; 494 497 } 495 - while (iov_off == iov->iov_len) { 496 - iov_off = 0; 497 - iov++; 498 - } 499 - 500 - to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); 501 - to_copy = min_t(size_t, to_copy, size - copied); 498 + to_copy = min_t(unsigned long, iov_iter_count(to), 499 + RDS_FRAG_SIZE - frag_off); 502 500 to_copy = min_t(unsigned long, to_copy, len - copied); 503 501 504 - rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " 505 - "[%p, %u] + %lu\n", 506 - to_copy, iov->iov_base, iov->iov_len, iov_off, 507 - sg_page(&frag->f_sg), frag->f_sg.offset, frag_off); 508 - 509 502 /* XXX needs + offset for multiple recvs per page */ 510 - ret = rds_page_copy_to_user(sg_page(&frag->f_sg), 511 - frag->f_sg.offset + frag_off, 512 - iov->iov_base + iov_off, 513 - to_copy); 514 - if (ret) { 515 - copied = ret; 516 - break; 517 - } 503 + rds_stats_add(s_copy_to_user, to_copy); 504 + ret = copy_page_to_iter(sg_page(&frag->f_sg), 505 + frag->f_sg.offset + frag_off, 506 + to_copy, 507 + to); 508 + if (ret != to_copy) 509 + return -EFAULT; 518 510 519 - iov_off += to_copy; 520 511 frag_off += to_copy; 521 512 copied += to_copy; 522 513 }
+1 -2
net/rds/iw.h
··· 325 325 int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 326 326 gfp_t page_gfp, int prefill); 327 327 void rds_iw_inc_free(struct rds_incoming *inc); 328 - int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 329 - size_t size); 328 + int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 330 329 void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); 331 330 void rds_iw_recv_tasklet_fn(unsigned long data); 332 331 void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
+11 -26
net/rds/iw_recv.c
··· 303 303 BUG_ON(atomic_read(&rds_iw_allocation) < 0); 304 304 } 305 305 306 - int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 307 - size_t size) 306 + int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) 308 307 { 309 308 struct rds_iw_incoming *iwinc; 310 309 struct rds_page_frag *frag; 311 - struct iovec *iov = first_iov; 312 310 unsigned long to_copy; 313 311 unsigned long frag_off = 0; 314 - unsigned long iov_off = 0; 315 312 int copied = 0; 316 313 int ret; 317 314 u32 len; ··· 317 320 frag = list_entry(iwinc->ii_frags.next, struct rds_page_frag, f_item); 318 321 len = be32_to_cpu(inc->i_hdr.h_len); 319 322 320 - while (copied < size && copied < len) { 323 + while (iov_iter_count(to) && copied < len) { 321 324 if (frag_off == RDS_FRAG_SIZE) { 322 325 frag = list_entry(frag->f_item.next, 323 326 struct rds_page_frag, f_item); 324 327 frag_off = 0; 325 328 } 326 - while (iov_off == iov->iov_len) { 327 - iov_off = 0; 328 - iov++; 329 - } 330 - 331 - to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); 332 - to_copy = min_t(size_t, to_copy, size - copied); 329 + to_copy = min_t(unsigned long, iov_iter_count(to), 330 + RDS_FRAG_SIZE - frag_off); 333 331 to_copy = min_t(unsigned long, to_copy, len - copied); 334 332 335 - rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " 336 - "[%p, %lu] + %lu\n", 337 - to_copy, iov->iov_base, iov->iov_len, iov_off, 338 - frag->f_page, frag->f_offset, frag_off); 339 - 340 333 /* XXX needs + offset for multiple recvs per page */ 341 - ret = rds_page_copy_to_user(frag->f_page, 342 - frag->f_offset + frag_off, 343 - iov->iov_base + iov_off, 344 - to_copy); 345 - if (ret) { 346 - copied = ret; 347 - break; 348 - } 334 + rds_stats_add(s_copy_to_user, to_copy); 335 + ret = copy_page_to_iter(frag->f_page, 336 + frag->f_offset + frag_off, 337 + to_copy, 338 + to); 339 + if (ret != to_copy) 340 + return -EFAULT; 349 341 350 - iov_off += to_copy; 351 342 frag_off += to_copy; 352 343 copied += to_copy; 353 344 }
+8 -27
net/rds/message.c
··· 325 325 return ret; 326 326 } 327 327 328 - int rds_message_inc_copy_to_user(struct rds_incoming *inc, 329 - struct iovec *first_iov, size_t size) 328 + int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) 330 329 { 331 330 struct rds_message *rm; 332 - struct iovec *iov; 333 331 struct scatterlist *sg; 334 332 unsigned long to_copy; 335 - unsigned long iov_off; 336 333 unsigned long vec_off; 337 334 int copied; 338 335 int ret; ··· 338 341 rm = container_of(inc, struct rds_message, m_inc); 339 342 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); 340 343 341 - iov = first_iov; 342 - iov_off = 0; 343 344 sg = rm->data.op_sg; 344 345 vec_off = 0; 345 346 copied = 0; 346 347 347 - while (copied < size && copied < len) { 348 - while (iov_off == iov->iov_len) { 349 - iov_off = 0; 350 - iov++; 351 - } 352 - 353 - to_copy = min(iov->iov_len - iov_off, sg->length - vec_off); 354 - to_copy = min_t(size_t, to_copy, size - copied); 348 + while (iov_iter_count(to) && copied < len) { 349 + to_copy = min(iov_iter_count(to), sg->length - vec_off); 355 350 to_copy = min_t(unsigned long, to_copy, len - copied); 356 351 357 - rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to " 358 - "sg [%p, %u, %u] + %lu\n", 359 - to_copy, iov->iov_base, iov->iov_len, iov_off, 360 - sg_page(sg), sg->offset, sg->length, vec_off); 352 + rds_stats_add(s_copy_to_user, to_copy); 353 + ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off, 354 + to_copy, to); 355 + if (ret != to_copy) 356 + return -EFAULT; 361 357 362 - ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off, 363 - iov->iov_base + iov_off, 364 - to_copy); 365 - if (ret) { 366 - copied = ret; 367 - break; 368 - } 369 - 370 - iov_off += to_copy; 371 358 vec_off += to_copy; 372 359 copied += to_copy; 373 360
+2 -4
net/rds/rds.h
··· 431 431 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op); 432 432 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op); 433 433 int (*recv)(struct rds_connection *conn); 434 - int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov, 435 - size_t size); 434 + int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to); 436 435 void (*inc_free)(struct rds_incoming *inc); 437 436 438 437 int (*cm_handle_connect)(struct rdma_cm_id *cm_id, ··· 666 667 int rds_message_next_extension(struct rds_header *hdr, 667 668 unsigned int *pos, void *buf, unsigned int *buflen); 668 669 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset); 669 - int rds_message_inc_copy_to_user(struct rds_incoming *inc, 670 - struct iovec *first_iov, size_t size); 670 + int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 671 671 void rds_message_inc_free(struct rds_incoming *inc); 672 672 void rds_message_addref(struct rds_message *rm); 673 673 void rds_message_put(struct rds_message *rm);
+3 -2
net/rds/recv.c
··· 404 404 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT; 405 405 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 406 406 struct rds_incoming *inc = NULL; 407 + struct iov_iter to; 407 408 408 409 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */ 409 410 timeo = sock_rcvtimeo(sk, nonblock); ··· 450 449 rdsdebug("copying inc %p from %pI4:%u to user\n", inc, 451 450 &inc->i_conn->c_faddr, 452 451 ntohs(inc->i_hdr.h_sport)); 453 - ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov, 454 - size); 452 + iov_iter_init(&to, READ, msg->msg_iov, msg->msg_iovlen, size); 453 + ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &to); 455 454 if (ret < 0) 456 455 break; 457 456
+1 -2
net/rds/tcp.h
··· 69 69 void rds_tcp_data_ready(struct sock *sk); 70 70 int rds_tcp_recv(struct rds_connection *conn); 71 71 void rds_tcp_inc_free(struct rds_incoming *inc); 72 - int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 73 - size_t size); 72 + int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to); 74 73 75 74 /* tcp_send.c */ 76 75 void rds_tcp_xmit_prepare(struct rds_connection *conn);
+9 -29
net/rds/tcp_recv.c
··· 59 59 /* 60 60 * this is pretty lame, but, whatever. 61 61 */ 62 - int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 63 - size_t size) 62 + int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) 64 63 { 65 64 struct rds_tcp_incoming *tinc; 66 - struct iovec *iov, tmp; 67 65 struct sk_buff *skb; 68 - unsigned long to_copy, skb_off; 69 66 int ret = 0; 70 67 71 - if (size == 0) 68 + if (!iov_iter_count(to)) 72 69 goto out; 73 70 74 71 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); 75 - iov = first_iov; 76 - tmp = *iov; 77 72 78 73 skb_queue_walk(&tinc->ti_skb_list, skb) { 79 - skb_off = 0; 80 - while (skb_off < skb->len) { 81 - while (tmp.iov_len == 0) { 82 - iov++; 83 - tmp = *iov; 84 - } 85 - 86 - to_copy = min(tmp.iov_len, size); 74 + unsigned long to_copy, skb_off; 75 + for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) { 76 + to_copy = iov_iter_count(to); 87 77 to_copy = min(to_copy, skb->len - skb_off); 88 78 89 - rdsdebug("ret %d size %zu skb %p skb_off %lu " 90 - "skblen %d iov_base %p iov_len %zu cpy %lu\n", 91 - ret, size, skb, skb_off, skb->len, 92 - tmp.iov_base, tmp.iov_len, to_copy); 93 - 94 - /* modifies tmp as it copies */ 95 - if (skb_copy_datagram_iovec(skb, skb_off, &tmp, 96 - to_copy)) { 97 - ret = -EFAULT; 98 - goto out; 99 - } 79 + if (skb_copy_datagram_iter(skb, skb_off, to, to_copy)) 80 + return -EFAULT; 100 81 101 82 rds_stats_add(s_copy_to_user, to_copy); 102 - size -= to_copy; 103 83 ret += to_copy; 104 - skb_off += to_copy; 105 - if (size == 0) 84 + 85 + if (!iov_iter_count(to)) 106 86 goto out; 107 87 } 108 88 }