Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: get rid of the res2 iocb->ki_complete argument

The second argument was only used by the USB gadget code, yet everyone
pays the overhead of passing a zero to be passed into aio, where it
ends up being part of the aio res2 value.

Now that everybody is passing in zero, kill off the extra argument.

Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

+33 -34
+1 -1
block/fops.c
··· 164 164 ret = blk_status_to_errno(dio->bio.bi_status); 165 165 } 166 166 167 - dio->iocb->ki_complete(iocb, ret, 0); 167 + dio->iocb->ki_complete(iocb, ret); 168 168 if (dio->flags & DIO_MULTI_BIO) 169 169 bio_put(&dio->bio); 170 170 } else {
+1 -1
crypto/af_alg.c
··· 1076 1076 af_alg_free_resources(areq); 1077 1077 sock_put(sk); 1078 1078 1079 - iocb->ki_complete(iocb, err ? err : (int)resultlen, 0); 1079 + iocb->ki_complete(iocb, err ? err : (int)resultlen); 1080 1080 } 1081 1081 EXPORT_SYMBOL_GPL(af_alg_async_cb); 1082 1082
+2 -2
drivers/block/loop.c
··· 554 554 blk_mq_complete_request(rq); 555 555 } 556 556 557 - static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 557 + static void lo_rw_aio_complete(struct kiocb *iocb, long ret) 558 558 { 559 559 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); 560 560 ··· 627 627 lo_rw_aio_do_completion(cmd); 628 628 629 629 if (ret != -EIOCBQUEUED) 630 - cmd->iocb.ki_complete(&cmd->iocb, ret, 0); 630 + lo_rw_aio_complete(&cmd->iocb, ret); 631 631 return 0; 632 632 } 633 633
+2 -2
drivers/nvme/target/io-cmd-file.c
··· 125 125 return call_iter(iocb, &iter); 126 126 } 127 127 128 - static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) 128 + static void nvmet_file_io_done(struct kiocb *iocb, long ret) 129 129 { 130 130 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); 131 131 u16 status = NVME_SC_SUCCESS; ··· 222 222 } 223 223 224 224 complete: 225 - nvmet_file_io_done(&req->f.iocb, ret, 0); 225 + nvmet_file_io_done(&req->f.iocb, ret); 226 226 return true; 227 227 } 228 228
+2 -2
drivers/target/target_core_file.c
··· 245 245 struct bio_vec bvecs[]; 246 246 }; 247 247 248 - static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) 248 + static void cmd_rw_aio_complete(struct kiocb *iocb, long ret) 249 249 { 250 250 struct target_core_file_cmd *cmd; 251 251 ··· 303 303 ret = call_read_iter(file, &aio_cmd->iocb, &iter); 304 304 305 305 if (ret != -EIOCBQUEUED) 306 - cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0); 306 + cmd_rw_aio_complete(&aio_cmd->iocb, ret); 307 307 308 308 return 0; 309 309 }
+1 -1
drivers/usb/gadget/function/f_fs.c
··· 831 831 kthread_unuse_mm(io_data->mm); 832 832 } 833 833 834 - io_data->kiocb->ki_complete(io_data->kiocb, ret, 0); 834 + io_data->kiocb->ki_complete(io_data->kiocb, ret); 835 835 836 836 if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd) 837 837 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+2 -3
drivers/usb/gadget/legacy/inode.c
··· 469 469 ret = -EFAULT; 470 470 471 471 /* completing the iocb can drop the ctx and mm, don't touch mm after */ 472 - iocb->ki_complete(iocb, ret, 0); 472 + iocb->ki_complete(iocb, ret); 473 473 474 474 kfree(priv->buf); 475 475 kfree(priv->to_free); ··· 497 497 kfree(priv); 498 498 iocb->private = NULL; 499 499 iocb->ki_complete(iocb, 500 - req->actual ? req->actual : (long)req->status, 501 - 0); 500 + req->actual ? req->actual : (long)req->status); 502 501 } else { 503 502 /* ep_copy_to_user() won't report both; we hide some faults */ 504 503 if (unlikely(0 != req->status))
+3 -3
fs/aio.c
··· 1417 1417 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1418 1418 } 1419 1419 1420 - static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) 1420 + static void aio_complete_rw(struct kiocb *kiocb, long res) 1421 1421 { 1422 1422 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); 1423 1423 ··· 1437 1437 } 1438 1438 1439 1439 iocb->ki_res.res = res; 1440 - iocb->ki_res.res2 = res2; 1440 + iocb->ki_res.res2 = 0; 1441 1441 iocb_put(iocb); 1442 1442 } 1443 1443 ··· 1508 1508 ret = -EINTR; 1509 1509 fallthrough; 1510 1510 default: 1511 - req->ki_complete(req, ret, 0); 1511 + req->ki_complete(req, ret); 1512 1512 } 1513 1513 } 1514 1514
+6 -6
fs/cachefiles/io.c
··· 37 37 /* 38 38 * Handle completion of a read from the cache. 39 39 */ 40 - static void cachefiles_read_complete(struct kiocb *iocb, long ret, long ret2) 40 + static void cachefiles_read_complete(struct kiocb *iocb, long ret) 41 41 { 42 42 struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb); 43 43 44 - _enter("%ld,%ld", ret, ret2); 44 + _enter("%ld", ret); 45 45 46 46 if (ki->term_func) { 47 47 if (ret >= 0) ··· 139 139 fallthrough; 140 140 default: 141 141 ki->was_async = false; 142 - cachefiles_read_complete(&ki->iocb, ret, 0); 142 + cachefiles_read_complete(&ki->iocb, ret); 143 143 if (ret > 0) 144 144 ret = 0; 145 145 break; ··· 159 159 /* 160 160 * Handle completion of a write to the cache. 161 161 */ 162 - static void cachefiles_write_complete(struct kiocb *iocb, long ret, long ret2) 162 + static void cachefiles_write_complete(struct kiocb *iocb, long ret) 163 163 { 164 164 struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb); 165 165 struct inode *inode = file_inode(ki->iocb.ki_filp); 166 166 167 - _enter("%ld,%ld", ret, ret2); 167 + _enter("%ld", ret); 168 168 169 169 /* Tell lockdep we inherited freeze protection from submission thread */ 170 170 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); ··· 244 244 fallthrough; 245 245 default: 246 246 ki->was_async = false; 247 - cachefiles_write_complete(&ki->iocb, ret, 0); 247 + cachefiles_write_complete(&ki->iocb, ret); 248 248 if (ret > 0) 249 249 ret = 0; 250 250 break;
+1 -1
fs/ceph/file.c
··· 1023 1023 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR : 1024 1024 CEPH_CAP_FILE_RD)); 1025 1025 1026 - aio_req->iocb->ki_complete(aio_req->iocb, ret, 0); 1026 + aio_req->iocb->ki_complete(aio_req->iocb, ret); 1027 1027 1028 1028 ceph_free_cap_flush(aio_req->prealloc_cf); 1029 1029 kfree(aio_req);
+2 -2
fs/cifs/file.c
··· 3184 3184 mutex_unlock(&ctx->aio_mutex); 3185 3185 3186 3186 if (ctx->iocb && ctx->iocb->ki_complete) 3187 - ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0); 3187 + ctx->iocb->ki_complete(ctx->iocb, ctx->rc); 3188 3188 else 3189 3189 complete(&ctx->done); 3190 3190 } ··· 3917 3917 mutex_unlock(&ctx->aio_mutex); 3918 3918 3919 3919 if (ctx->iocb && ctx->iocb->ki_complete) 3920 - ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0); 3920 + ctx->iocb->ki_complete(ctx->iocb, ctx->rc); 3921 3921 else 3922 3922 complete(&ctx->done); 3923 3923 }
+1 -1
fs/direct-io.c
··· 307 307 308 308 if (ret > 0 && dio->op == REQ_OP_WRITE) 309 309 ret = generic_write_sync(dio->iocb, ret); 310 - dio->iocb->ki_complete(dio->iocb, ret, 0); 310 + dio->iocb->ki_complete(dio->iocb, ret); 311 311 } 312 312 313 313 kmem_cache_free(dio_cache, dio);
+1 -1
fs/fuse/file.c
··· 687 687 spin_unlock(&fi->lock); 688 688 } 689 689 690 - io->iocb->ki_complete(io->iocb, res, 0); 690 + io->iocb->ki_complete(io->iocb, res); 691 691 } 692 692 693 693 kref_put(&io->refcnt, fuse_io_release);
+3 -3
fs/io_uring.c
··· 2689 2689 __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req)); 2690 2690 } 2691 2691 2692 - static void io_complete_rw(struct kiocb *kiocb, long res, long res2) 2692 + static void io_complete_rw(struct kiocb *kiocb, long res) 2693 2693 { 2694 2694 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); 2695 2695 ··· 2700 2700 io_req_task_work_add(req); 2701 2701 } 2702 2702 2703 - static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) 2703 + static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) 2704 2704 { 2705 2705 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); 2706 2706 ··· 2913 2913 ret = -EINTR; 2914 2914 fallthrough; 2915 2915 default: 2916 - kiocb->ki_complete(kiocb, ret, 0); 2916 + kiocb->ki_complete(kiocb, ret); 2917 2917 } 2918 2918 } 2919 2919
+1 -1
fs/iomap/direct-io.c
··· 125 125 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); 126 126 struct kiocb *iocb = dio->iocb; 127 127 128 - iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); 128 + iocb->ki_complete(iocb, iomap_dio_complete(dio)); 129 129 } 130 130 131 131 /*
+1 -1
fs/nfs/direct.c
··· 275 275 res = (long) dreq->count; 276 276 WARN_ON_ONCE(dreq->count < 0); 277 277 } 278 - dreq->iocb->ki_complete(dreq->iocb, res, 0); 278 + dreq->iocb->ki_complete(dreq->iocb, res); 279 279 } 280 280 281 281 complete(&dreq->completion);
+2 -2
fs/overlayfs/file.c
··· 272 272 kmem_cache_free(ovl_aio_request_cachep, aio_req); 273 273 } 274 274 275 - static void ovl_aio_rw_complete(struct kiocb *iocb, long res, long res2) 275 + static void ovl_aio_rw_complete(struct kiocb *iocb, long res) 276 276 { 277 277 struct ovl_aio_req *aio_req = container_of(iocb, 278 278 struct ovl_aio_req, iocb); 279 279 struct kiocb *orig_iocb = aio_req->orig_iocb; 280 280 281 281 ovl_aio_cleanup_handler(aio_req); 282 - orig_iocb->ki_complete(orig_iocb, res, res2); 282 + orig_iocb->ki_complete(orig_iocb, res); 283 283 } 284 284 285 285 static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+1 -1
include/linux/fs.h
··· 330 330 randomized_struct_fields_start 331 331 332 332 loff_t ki_pos; 333 - void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); 333 + void (*ki_complete)(struct kiocb *iocb, long ret); 334 334 void *private; 335 335 int ki_flags; 336 336 u16 ki_hint;