Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: switch bios to blk_status_t

Replace bi_error with a new bi_status to allow for a clear conversion.
Note that device mapper overloaded bi_error with a private value, which
we'll have to keep arround at least for now and thus propagate to a
proper blk_status_t value.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
4e4cbee9 fc17b653

+625 -603
+4 -4
block/bio-integrity.c
··· 221 221 * @bio: bio to generate/verify integrity metadata for 222 222 * @proc_fn: Pointer to the relevant processing function 223 223 */ 224 - static int bio_integrity_process(struct bio *bio, 224 + static blk_status_t bio_integrity_process(struct bio *bio, 225 225 integrity_processing_fn *proc_fn) 226 226 { 227 227 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); ··· 229 229 struct bvec_iter bviter; 230 230 struct bio_vec bv; 231 231 struct bio_integrity_payload *bip = bio_integrity(bio); 232 - unsigned int ret = 0; 232 + blk_status_t ret = BLK_STS_OK; 233 233 void *prot_buf = page_address(bip->bip_vec->bv_page) + 234 234 bip->bip_vec->bv_offset; 235 235 ··· 366 366 struct bio *bio = bip->bip_bio; 367 367 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 368 368 369 - bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn); 369 + bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn); 370 370 371 371 /* Restore original bio completion handler */ 372 372 bio->bi_end_io = bip->bip_end_io; ··· 395 395 * integrity metadata. Restore original bio end_io handler 396 396 * and run it. 397 397 */ 398 - if (bio->bi_error) { 398 + if (bio->bi_status) { 399 399 bio->bi_end_io = bip->bip_end_io; 400 400 bio_endio(bio); 401 401
+4 -4
block/bio.c
··· 309 309 { 310 310 struct bio *parent = bio->bi_private; 311 311 312 - if (!parent->bi_error) 313 - parent->bi_error = bio->bi_error; 312 + if (!parent->bi_status) 313 + parent->bi_status = bio->bi_status; 314 314 bio_put(bio); 315 315 return parent; 316 316 } ··· 918 918 { 919 919 struct submit_bio_ret *ret = bio->bi_private; 920 920 921 - ret->error = bio->bi_error; 921 + ret->error = blk_status_to_errno(bio->bi_status); 922 922 complete(&ret->event); 923 923 } 924 924 ··· 1818 1818 1819 1819 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1820 1820 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), 1821 - bio, bio->bi_error); 1821 + bio, bio->bi_status); 1822 1822 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1823 1823 } 1824 1824
+13 -7
block/blk-core.c
··· 144 144 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 145 145 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, 146 146 147 + /* device mapper special case, should not leak out: */ 148 + [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" }, 149 + 147 150 /* everything else not covered above: */ 148 151 [BLK_STS_IOERR] = { -EIO, "I/O" }, 149 152 }; ··· 191 188 unsigned int nbytes, blk_status_t error) 192 189 { 193 190 if (error) 194 - bio->bi_error = blk_status_to_errno(error); 191 + bio->bi_status = error; 195 192 196 193 if (unlikely(rq->rq_flags & RQF_QUIET)) 197 194 bio_set_flag(bio, BIO_QUIET); ··· 1720 1717 blk_queue_split(q, &bio, q->bio_split); 1721 1718 1722 1719 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1723 - bio->bi_error = -EIO; 1720 + bio->bi_status = BLK_STS_IOERR; 1724 1721 bio_endio(bio); 1725 1722 return BLK_QC_T_NONE; 1726 1723 } ··· 1778 1775 req = get_request(q, bio->bi_opf, bio, GFP_NOIO); 1779 1776 if (IS_ERR(req)) { 1780 1777 __wbt_done(q->rq_wb, wb_acct); 1781 - bio->bi_error = PTR_ERR(req); 1778 + if (PTR_ERR(req) == -ENOMEM) 1779 + bio->bi_status = BLK_STS_RESOURCE; 1780 + else 1781 + bio->bi_status = BLK_STS_IOERR; 1782 1782 bio_endio(bio); 1783 1783 goto out_unlock; 1784 1784 } ··· 1936 1930 { 1937 1931 struct request_queue *q; 1938 1932 int nr_sectors = bio_sectors(bio); 1939 - int err = -EIO; 1933 + blk_status_t status = BLK_STS_IOERR; 1940 1934 char b[BDEVNAME_SIZE]; 1941 1935 struct hd_struct *part; 1942 1936 ··· 1979 1973 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { 1980 1974 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); 1981 1975 if (!nr_sectors) { 1982 - err = 0; 1976 + status = BLK_STS_OK; 1983 1977 goto end_io; 1984 1978 } 1985 1979 } ··· 2031 2025 return true; 2032 2026 2033 2027 not_supported: 2034 - err = -EOPNOTSUPP; 2028 + status = BLK_STS_NOTSUPP; 2035 2029 end_io: 2036 - bio->bi_error = err; 2030 + bio->bi_status = status; 2037 2031 bio_endio(bio); 2038 2032 return false; 2039 2033 }
+2 -2
block/blk-integrity.c
··· 384 384 .sysfs_ops = &integrity_ops, 385 385 }; 386 386 387 - static int blk_integrity_nop_fn(struct blk_integrity_iter *iter) 387 + static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter) 388 388 { 389 - return 0; 389 + return BLK_STS_OK; 390 390 } 391 391 392 392 static const struct blk_integrity_profile nop_profile = {
+2 -2
block/bounce.c
··· 143 143 mempool_free(bvec->bv_page, pool); 144 144 } 145 145 146 - bio_orig->bi_error = bio->bi_error; 146 + bio_orig->bi_status = bio->bi_status; 147 147 bio_endio(bio_orig); 148 148 bio_put(bio); 149 149 } ··· 163 163 { 164 164 struct bio *bio_orig = bio->bi_private; 165 165 166 - if (!bio->bi_error) 166 + if (!bio->bi_status) 167 167 copy_to_high_bio_irq(bio_orig, bio); 168 168 169 169 bounce_end_io(bio, pool);
+15 -15
block/t10-pi.c
··· 46 46 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref 47 47 * tag. 48 48 */ 49 - static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, 50 - unsigned int type) 49 + static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter, 50 + csum_fn *fn, unsigned int type) 51 51 { 52 52 unsigned int i; 53 53 ··· 67 67 iter->seed++; 68 68 } 69 69 70 - return 0; 70 + return BLK_STS_OK; 71 71 } 72 72 73 - static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, 74 - unsigned int type) 73 + static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, 74 + csum_fn *fn, unsigned int type) 75 75 { 76 76 unsigned int i; 77 77 ··· 108 108 "(rcvd %04x, want %04x)\n", iter->disk_name, 109 109 (unsigned long long)iter->seed, 110 110 be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); 111 - return -EILSEQ; 111 + return BLK_STS_PROTECTION; 112 112 } 113 113 114 114 next: ··· 117 117 iter->seed++; 118 118 } 119 119 120 - return 0; 120 + return BLK_STS_OK; 121 121 } 122 122 123 - static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) 123 + static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) 124 124 { 125 125 return t10_pi_generate(iter, t10_pi_crc_fn, 1); 126 126 } 127 127 128 - static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) 128 + static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) 129 129 { 130 130 return t10_pi_generate(iter, t10_pi_ip_fn, 1); 131 131 } 132 132 133 - static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) 133 + static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) 134 134 { 135 135 return t10_pi_verify(iter, t10_pi_crc_fn, 1); 136 136 } 137 137 138 - static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) 138 + static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) 139 139 { 140 140 return t10_pi_verify(iter, t10_pi_ip_fn, 1); 141 141 } 142 142 143 - static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) 143 + static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) 144 144 { 145 145 return t10_pi_generate(iter, t10_pi_crc_fn, 3); 146 146 } 147 147 148 - static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) 148 + static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) 149 149 { 150 150 return t10_pi_generate(iter, t10_pi_ip_fn, 3); 151 151 } 152 152 153 - static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) 153 + static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) 154 154 { 155 155 return t10_pi_verify(iter, t10_pi_crc_fn, 3); 156 156 } 157 157 158 - static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) 158 + static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) 159 159 { 160 160 return t10_pi_verify(iter, t10_pi_ip_fn, 3); 161 161 }
+5 -5
drivers/block/aoe/aoecmd.c
··· 1070 1070 d->ip.rq = NULL; 1071 1071 do { 1072 1072 bio = rq->bio; 1073 - bok = !fastfail && !bio->bi_error; 1073 + bok = !fastfail && !bio->bi_status; 1074 1074 } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); 1075 1075 1076 1076 /* cf. http://lkml.org/lkml/2006/10/31/28 */ ··· 1131 1131 ahout->cmdstat, ahin->cmdstat, 1132 1132 d->aoemajor, d->aoeminor); 1133 1133 noskb: if (buf) 1134 - buf->bio->bi_error = -EIO; 1134 + buf->bio->bi_status = BLK_STS_IOERR; 1135 1135 goto out; 1136 1136 } 1137 1137 ··· 1144 1144 "aoe: runt data size in read from", 1145 1145 (long) d->aoemajor, d->aoeminor, 1146 1146 skb->len, n); 1147 - buf->bio->bi_error = -EIO; 1147 + buf->bio->bi_status = BLK_STS_IOERR; 1148 1148 break; 1149 1149 } 1150 1150 if (n > f->iter.bi_size) { ··· 1152 1152 "aoe: too-large data size in read from", 1153 1153 (long) d->aoemajor, d->aoeminor, 1154 1154 n, f->iter.bi_size); 1155 - buf->bio->bi_error = -EIO; 1155 + buf->bio->bi_status = BLK_STS_IOERR; 1156 1156 break; 1157 1157 } 1158 1158 bvcpy(skb, f->buf->bio, f->iter, n); ··· 1654 1654 if (buf == NULL) 1655 1655 return; 1656 1656 buf->iter.bi_size = 0; 1657 - buf->bio->bi_error = -EIO; 1657 + buf->bio->bi_status = BLK_STS_IOERR; 1658 1658 if (buf->nframesout == 0) 1659 1659 aoe_end_buf(d, buf); 1660 1660 }
+1 -1
drivers/block/aoe/aoedev.c
··· 170 170 if (rq == NULL) 171 171 return; 172 172 while ((bio = d->ip.nxbio)) { 173 - bio->bi_error = -EIO; 173 + bio->bi_status = BLK_STS_IOERR; 174 174 d->ip.nxbio = bio->bi_next; 175 175 n = (unsigned long) rq->special; 176 176 rq->special = (void *) --n;
+1 -1
drivers/block/drbd/drbd_actlog.c
··· 178 178 else 179 179 submit_bio(bio); 180 180 wait_until_done_or_force_detached(device, bdev, &device->md_io.done); 181 - if (!bio->bi_error) 181 + if (!bio->bi_status) 182 182 err = device->md_io.error; 183 183 184 184 out:
+3 -3
drivers/block/drbd/drbd_bitmap.c
··· 959 959 !bm_test_page_unchanged(b->bm_pages[idx])) 960 960 drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); 961 961 962 - if (bio->bi_error) { 962 + if (bio->bi_status) { 963 963 /* ctx error will hold the completed-last non-zero error code, 964 964 * in case error codes differ. */ 965 - ctx->error = bio->bi_error; 965 + ctx->error = blk_status_to_errno(bio->bi_status); 966 966 bm_set_page_io_err(b->bm_pages[idx]); 967 967 /* Not identical to on disk version of it. 968 968 * Is BM_PAGE_IO_ERROR enough? */ 969 969 if (__ratelimit(&drbd_ratelimit_state)) 970 970 drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", 971 - bio->bi_error, idx); 971 + bio->bi_status, idx); 972 972 } else { 973 973 bm_clear_page_io_err(b->bm_pages[idx]); 974 974 dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
+1 -1
drivers/block/drbd/drbd_int.h
··· 1627 1627 __release(local); 1628 1628 if (!bio->bi_bdev) { 1629 1629 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); 1630 - bio->bi_error = -ENODEV; 1630 + bio->bi_status = BLK_STS_IOERR; 1631 1631 bio_endio(bio); 1632 1632 return; 1633 1633 }
+3 -3
drivers/block/drbd/drbd_receiver.c
··· 1229 1229 struct drbd_device *device = octx->device; 1230 1230 struct issue_flush_context *ctx = octx->ctx; 1231 1231 1232 - if (bio->bi_error) { 1233 - ctx->error = bio->bi_error; 1234 - drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error); 1232 + if (bio->bi_status) { 1233 + ctx->error = blk_status_to_errno(bio->bi_status); 1234 + drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status); 1235 1235 } 1236 1236 kfree(octx); 1237 1237 bio_put(bio);
+3 -3
drivers/block/drbd/drbd_req.c
··· 203 203 void complete_master_bio(struct drbd_device *device, 204 204 struct bio_and_error *m) 205 205 { 206 - m->bio->bi_error = m->error; 206 + m->bio->bi_status = errno_to_blk_status(m->error); 207 207 bio_endio(m->bio); 208 208 dec_ap_bio(device); 209 209 } ··· 1157 1157 1158 1158 if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9, 1159 1159 GFP_NOIO, 0)) 1160 - req->private_bio->bi_error = -EIO; 1160 + req->private_bio->bi_status = BLK_STS_IOERR; 1161 1161 bio_endio(req->private_bio); 1162 1162 } 1163 1163 ··· 1225 1225 /* only pass the error to the upper layers. 1226 1226 * if user cannot handle io errors, that's not our business. */ 1227 1227 drbd_err(device, "could not kmalloc() req\n"); 1228 - bio->bi_error = -ENOMEM; 1228 + bio->bi_status = BLK_STS_RESOURCE; 1229 1229 bio_endio(bio); 1230 1230 return ERR_PTR(-ENOMEM); 1231 1231 }
+8 -8
drivers/block/drbd/drbd_worker.c
··· 63 63 struct drbd_device *device; 64 64 65 65 device = bio->bi_private; 66 - device->md_io.error = bio->bi_error; 66 + device->md_io.error = blk_status_to_errno(bio->bi_status); 67 67 68 68 /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able 69 69 * to timeout on the lower level device, and eventually detach from it. ··· 177 177 bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES || 178 178 bio_op(bio) == REQ_OP_DISCARD; 179 179 180 - if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) 180 + if (bio->bi_status && __ratelimit(&drbd_ratelimit_state)) 181 181 drbd_warn(device, "%s: error=%d s=%llus\n", 182 182 is_write ? (is_discard ? "discard" : "write") 183 - : "read", bio->bi_error, 183 + : "read", bio->bi_status, 184 184 (unsigned long long)peer_req->i.sector); 185 185 186 - if (bio->bi_error) 186 + if (bio->bi_status) 187 187 set_bit(__EE_WAS_ERROR, &peer_req->flags); 188 188 189 189 bio_put(bio); /* no need for the bio anymore */ ··· 243 243 if (__ratelimit(&drbd_ratelimit_state)) 244 244 drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); 245 245 246 - if (!bio->bi_error) 246 + if (!bio->bi_status) 247 247 drbd_panic_after_delayed_completion_of_aborted_request(device); 248 248 } 249 249 250 250 /* to avoid recursion in __req_mod */ 251 - if (unlikely(bio->bi_error)) { 251 + if (unlikely(bio->bi_status)) { 252 252 switch (bio_op(bio)) { 253 253 case REQ_OP_WRITE_ZEROES: 254 254 case REQ_OP_DISCARD: 255 - if (bio->bi_error == -EOPNOTSUPP) 255 + if (bio->bi_status == BLK_STS_NOTSUPP) 256 256 what = DISCARD_COMPLETED_NOTSUPP; 257 257 else 258 258 what = DISCARD_COMPLETED_WITH_ERROR; ··· 272 272 } 273 273 274 274 bio_put(req->private_bio); 275 - req->private_bio = ERR_PTR(bio->bi_error); 275 + req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); 276 276 277 277 /* not req_mod(), we need irqsave here! */ 278 278 spin_lock_irqsave(&device->resource->req_lock, flags);
+2 -2
drivers/block/floppy.c
··· 3780 3780 struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; 3781 3781 int drive = cbdata->drive; 3782 3782 3783 - if (bio->bi_error) { 3783 + if (bio->bi_status) { 3784 3784 pr_info("floppy: error %d while reading block 0\n", 3785 - bio->bi_error); 3785 + bio->bi_status); 3786 3786 set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3787 3787 } 3788 3788 complete(&cbdata->complete);
+9 -9
drivers/block/pktcdvd.c
··· 952 952 953 953 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 954 954 bio, (unsigned long long)pkt->sector, 955 - (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); 955 + (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); 956 956 957 - if (bio->bi_error) 957 + if (bio->bi_status) 958 958 atomic_inc(&pkt->io_errors); 959 959 if (atomic_dec_and_test(&pkt->io_wait)) { 960 960 atomic_inc(&pkt->run_sm); ··· 969 969 struct pktcdvd_device *pd = pkt->pd; 970 970 BUG_ON(!pd); 971 971 972 - pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error); 972 + pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status); 973 973 974 974 pd->stats.pkt_ended++; 975 975 ··· 1305 1305 pkt_queue_bio(pd, pkt->w_bio); 1306 1306 } 1307 1307 1308 - static void pkt_finish_packet(struct packet_data *pkt, int error) 1308 + static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status) 1309 1309 { 1310 1310 struct bio *bio; 1311 1311 1312 - if (error) 1312 + if (status) 1313 1313 pkt->cache_valid = 0; 1314 1314 1315 1315 /* Finish all bios corresponding to this packet */ 1316 1316 while ((bio = bio_list_pop(&pkt->orig_bios))) { 1317 - bio->bi_error = error; 1317 + bio->bi_status = status; 1318 1318 bio_endio(bio); 1319 1319 } 1320 1320 } ··· 1349 1349 if (atomic_read(&pkt->io_wait) > 0) 1350 1350 return; 1351 1351 1352 - if (!pkt->w_bio->bi_error) { 1352 + if (!pkt->w_bio->bi_status) { 1353 1353 pkt_set_state(pkt, PACKET_FINISHED_STATE); 1354 1354 } else { 1355 1355 pkt_set_state(pkt, PACKET_RECOVERY_STATE); ··· 1366 1366 break; 1367 1367 1368 1368 case PACKET_FINISHED_STATE: 1369 - pkt_finish_packet(pkt, pkt->w_bio->bi_error); 1369 + pkt_finish_packet(pkt, pkt->w_bio->bi_status); 1370 1370 return; 1371 1371 1372 1372 default: ··· 2301 2301 struct packet_stacked_data *psd = bio->bi_private; 2302 2302 struct pktcdvd_device *pd = psd->pd; 2303 2303 2304 - psd->bio->bi_error = bio->bi_error; 2304 + psd->bio->bi_status = bio->bi_status; 2305 2305 bio_put(bio); 2306 2306 bio_endio(psd->bio); 2307 2307 mempool_free(psd, psd_pool);
+7 -7
drivers/block/ps3vram.c
··· 428 428 kfree(priv->cache.tags); 429 429 } 430 430 431 - static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, 431 + static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, 432 432 size_t len, size_t *retlen, u_char *buf) 433 433 { 434 434 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); ··· 438 438 (unsigned int)from, len); 439 439 440 440 if (from >= priv->size) 441 - return -EIO; 441 + return BLK_STS_IOERR; 442 442 443 443 if (len > priv->size - from) 444 444 len = priv->size - from; ··· 472 472 return 0; 473 473 } 474 474 475 - static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to, 475 + static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to, 476 476 size_t len, size_t *retlen, const u_char *buf) 477 477 { 478 478 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 479 479 unsigned int cached, count; 480 480 481 481 if (to >= priv->size) 482 - return -EIO; 482 + return BLK_STS_IOERR; 483 483 484 484 if (len > priv->size - to) 485 485 len = priv->size - to; ··· 554 554 int write = bio_data_dir(bio) == WRITE; 555 555 const char *op = write ? "write" : "read"; 556 556 loff_t offset = bio->bi_iter.bi_sector << 9; 557 - int error = 0; 557 + blk_status_t error = 0; 558 558 struct bio_vec bvec; 559 559 struct bvec_iter iter; 560 560 struct bio *next; ··· 578 578 579 579 if (retlen != len) { 580 580 dev_err(&dev->core, "Short %s\n", op); 581 - error = -EIO; 581 + error = BLK_STS_IOERR; 582 582 goto out; 583 583 } 584 584 ··· 593 593 next = bio_list_peek(&priv->list); 594 594 spin_unlock_irq(&priv->lock); 595 595 596 - bio->bi_error = error; 596 + bio->bi_status = error; 597 597 bio_endio(bio); 598 598 return next; 599 599 }
+5 -9
drivers/block/rsxx/dev.c
··· 149 149 { 150 150 struct rsxx_cardinfo *card = q->queuedata; 151 151 struct rsxx_bio_meta *bio_meta; 152 - int st = -EINVAL; 152 + blk_status_t st = BLK_STS_IOERR; 153 153 154 154 blk_queue_split(q, &bio, q->bio_split); 155 155 ··· 161 161 if (bio_end_sector(bio) > get_capacity(card->gendisk)) 162 162 goto req_err; 163 163 164 - if (unlikely(card->halt)) { 165 - st = -EFAULT; 164 + if (unlikely(card->halt)) 166 165 goto req_err; 167 - } 168 166 169 - if (unlikely(card->dma_fault)) { 170 - st = (-EFAULT); 167 + if (unlikely(card->dma_fault)) 171 168 goto req_err; 172 - } 173 169 174 170 if (bio->bi_iter.bi_size == 0) { 175 171 dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); ··· 174 178 175 179 bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL); 176 180 if (!bio_meta) { 177 - st = -ENOMEM; 181 + st = BLK_STS_RESOURCE; 178 182 goto req_err; 179 183 } 180 184 ··· 201 205 kmem_cache_free(bio_meta_pool, bio_meta); 202 206 req_err: 203 207 if (st) 204 - bio->bi_error = st; 208 + bio->bi_status = st; 205 209 bio_endio(bio); 206 210 return BLK_QC_T_NONE; 207 211 }
+6 -7
drivers/block/rsxx/dma.c
··· 611 611 mutex_unlock(&ctrl->work_lock); 612 612 } 613 613 614 - static int rsxx_queue_discard(struct rsxx_cardinfo *card, 614 + static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card, 615 615 struct list_head *q, 616 616 unsigned int laddr, 617 617 rsxx_dma_cb cb, ··· 621 621 622 622 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); 623 623 if (!dma) 624 - return -ENOMEM; 624 + return BLK_STS_RESOURCE; 625 625 626 626 dma->cmd = HW_CMD_BLK_DISCARD; 627 627 dma->laddr = laddr; ··· 640 640 return 0; 641 641 } 642 642 643 - static int rsxx_queue_dma(struct rsxx_cardinfo *card, 643 + static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card, 644 644 struct list_head *q, 645 645 int dir, 646 646 unsigned int dma_off, ··· 655 655 656 656 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); 657 657 if (!dma) 658 - return -ENOMEM; 658 + return BLK_STS_RESOURCE; 659 659 660 660 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; 661 661 dma->laddr = laddr; ··· 677 677 return 0; 678 678 } 679 679 680 - int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 680 + blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 681 681 struct bio *bio, 682 682 atomic_t *n_dmas, 683 683 rsxx_dma_cb cb, ··· 694 694 unsigned int dma_len; 695 695 int dma_cnt[RSXX_MAX_TARGETS]; 696 696 int tgt; 697 - int st; 697 + blk_status_t st; 698 698 int i; 699 699 700 700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ ··· 769 769 for (i = 0; i < card->n_targets; i++) 770 770 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], 771 771 FREE_DMA); 772 - 773 772 return st; 774 773 } 775 774
+1 -1
drivers/block/rsxx/rsxx_priv.h
··· 391 391 void rsxx_dma_cleanup(void); 392 392 void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); 393 393 int rsxx_dma_configure(struct rsxx_cardinfo *card); 394 - int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 394 + blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 395 395 struct bio *bio, 396 396 atomic_t *n_dmas, 397 397 rsxx_dma_cb cb,
+1 -1
drivers/block/umem.c
··· 454 454 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 455 455 if (control & DMASCR_HARD_ERROR) { 456 456 /* error */ 457 - bio->bi_error = -EIO; 457 + bio->bi_status = BLK_STS_IOERR; 458 458 dev_printk(KERN_WARNING, &card->dev->dev, 459 459 "I/O error on sector %d/%d\n", 460 460 le32_to_cpu(desc->local_addr)>>9,
+8 -11
drivers/block/xen-blkback/blkback.c
··· 1069 1069 atomic_set(&blkif->drain, 0); 1070 1070 } 1071 1071 1072 - /* 1073 - * Completion callback on the bio's. Called as bh->b_end_io() 1074 - */ 1075 - 1076 - static void __end_block_io_op(struct pending_req *pending_req, int error) 1072 + static void __end_block_io_op(struct pending_req *pending_req, 1073 + blk_status_t error) 1077 1074 { 1078 1075 /* An error fails the entire request. */ 1079 - if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && 1080 - (error == -EOPNOTSUPP)) { 1076 + if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE && 1077 + error == BLK_STS_NOTSUPP) { 1081 1078 pr_debug("flush diskcache op failed, not supported\n"); 1082 1079 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0); 1083 1080 pending_req->status = BLKIF_RSP_EOPNOTSUPP; 1084 - } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && 1085 - (error == -EOPNOTSUPP)) { 1081 + } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER && 1082 + error == BLK_STS_NOTSUPP) { 1086 1083 pr_debug("write barrier op failed, not supported\n"); 1087 1084 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0); 1088 1085 pending_req->status = BLKIF_RSP_EOPNOTSUPP; ··· 1103 1106 */ 1104 1107 static void end_block_io_op(struct bio *bio) 1105 1108 { 1106 - __end_block_io_op(bio->bi_private, bio->bi_error); 1109 + __end_block_io_op(bio->bi_private, bio->bi_status); 1107 1110 bio_put(bio); 1108 1111 } 1109 1112 ··· 1420 1423 for (i = 0; i < nbio; i++) 1421 1424 bio_put(biolist[i]); 1422 1425 atomic_set(&pending_req->pendcnt, 1); 1423 - __end_block_io_op(pending_req, -EINVAL); 1426 + __end_block_io_op(pending_req, BLK_STS_RESOURCE); 1424 1427 msleep(1); /* back off a bit */ 1425 1428 return -EIO; 1426 1429 }
+1 -1
drivers/block/xen-blkfront.c
··· 2006 2006 2007 2007 if (atomic_dec_and_test(&split_bio->pending)) { 2008 2008 split_bio->bio->bi_phys_segments = 0; 2009 - split_bio->bio->bi_error = bio->bi_error; 2009 + split_bio->bio->bi_status = bio->bi_status; 2010 2010 bio_endio(split_bio->bio); 2011 2011 kfree(split_bio); 2012 2012 }
+2 -2
drivers/lightnvm/pblk-core.c
··· 296 296 pr_err("pblk: tear down bio failed\n"); 297 297 } 298 298 299 - if (bio->bi_error) 300 - pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error); 299 + if (bio->bi_status) 300 + pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status); 301 301 302 302 bio_put(bio); 303 303 }
+2 -2
drivers/lightnvm/pblk-read.c
··· 114 114 pblk_log_read_err(pblk, rqd); 115 115 #ifdef CONFIG_NVM_DEBUG 116 116 else 117 - WARN_ONCE(bio->bi_error, "pblk: corrupted read error\n"); 117 + WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n"); 118 118 #endif 119 119 120 120 if (rqd->nr_ppas > 1) ··· 123 123 bio_put(bio); 124 124 if (r_ctx->orig_bio) { 125 125 #ifdef CONFIG_NVM_DEBUG 126 - WARN_ONCE(r_ctx->orig_bio->bi_error, 126 + WARN_ONCE(r_ctx->orig_bio->bi_status, 127 127 "pblk: corrupted read bio\n"); 128 128 #endif 129 129 bio_endio(r_ctx->orig_bio);
+1 -1
drivers/lightnvm/pblk-write.c
··· 186 186 } 187 187 #ifdef CONFIG_NVM_DEBUG 188 188 else 189 - WARN_ONCE(rqd->bio->bi_error, "pblk: corrupted write error\n"); 189 + WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n"); 190 190 #endif 191 191 192 192 pblk_complete_write(pblk, rqd, c_ctx);
+4 -4
drivers/lightnvm/rrpc.c
··· 279 279 { 280 280 struct completion *waiting = bio->bi_private; 281 281 282 - if (bio->bi_error) 283 - pr_err("nvm: gc request failed (%u).\n", bio->bi_error); 282 + if (bio->bi_status) 283 + pr_err("nvm: gc request failed (%u).\n", bio->bi_status); 284 284 285 285 complete(waiting); 286 286 } ··· 359 359 goto finished; 360 360 } 361 361 wait_for_completion_io(&wait); 362 - if (bio->bi_error) { 362 + if (bio->bi_status) { 363 363 rrpc_inflight_laddr_release(rrpc, rqd); 364 364 goto finished; 365 365 } ··· 385 385 wait_for_completion_io(&wait); 386 386 387 387 rrpc_inflight_laddr_release(rrpc, rqd); 388 - if (bio->bi_error) 388 + if (bio->bi_status) 389 389 goto finished; 390 390 391 391 bio_reset(bio);
+4 -3
drivers/md/bcache/bcache.h
··· 849 849 850 850 /* Forward declarations */ 851 851 852 - void bch_count_io_errors(struct cache *, int, const char *); 852 + void bch_count_io_errors(struct cache *, blk_status_t, const char *); 853 853 void bch_bbio_count_io_errors(struct cache_set *, struct bio *, 854 - int, const char *); 855 - void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); 854 + blk_status_t, const char *); 855 + void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t, 856 + const char *); 856 857 void bch_bbio_free(struct bio *, struct cache_set *); 857 858 struct bio *bch_bbio_alloc(struct cache_set *); 858 859
+3 -3
drivers/md/bcache/btree.c
··· 307 307 bch_submit_bbio(bio, b->c, &b->key, 0); 308 308 closure_sync(&cl); 309 309 310 - if (bio->bi_error) 310 + if (bio->bi_status) 311 311 set_btree_node_io_error(b); 312 312 313 313 bch_bbio_free(bio, b->c); ··· 374 374 struct closure *cl = bio->bi_private; 375 375 struct btree *b = container_of(cl, struct btree, io); 376 376 377 - if (bio->bi_error) 377 + if (bio->bi_status) 378 378 set_btree_node_io_error(b); 379 379 380 - bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree"); 380 + bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); 381 381 closure_put(cl); 382 382 } 383 383
+3 -3
drivers/md/bcache/io.c
··· 50 50 51 51 /* IO errors */ 52 52 53 - void bch_count_io_errors(struct cache *ca, int error, const char *m) 53 + void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m) 54 54 { 55 55 /* 56 56 * The halflife of an error is: ··· 103 103 } 104 104 105 105 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, 106 - int error, const char *m) 106 + blk_status_t error, const char *m) 107 107 { 108 108 struct bbio *b = container_of(bio, struct bbio, bio); 109 109 struct cache *ca = PTR_CACHE(c, &b->key, 0); ··· 132 132 } 133 133 134 134 void bch_bbio_endio(struct cache_set *c, struct bio *bio, 135 - int error, const char *m) 135 + blk_status_t error, const char *m) 136 136 { 137 137 struct closure *cl = bio->bi_private; 138 138
+1 -1
drivers/md/bcache/journal.c
··· 549 549 { 550 550 struct journal_write *w = bio->bi_private; 551 551 552 - cache_set_err_on(bio->bi_error, w->c, "journal io error"); 552 + cache_set_err_on(bio->bi_status, w->c, "journal io error"); 553 553 closure_put(&w->c->journal.io); 554 554 } 555 555
+5 -5
drivers/md/bcache/movinggc.c
··· 63 63 struct moving_io *io = container_of(bio->bi_private, 64 64 struct moving_io, cl); 65 65 66 - if (bio->bi_error) 67 - io->op.error = bio->bi_error; 66 + if (bio->bi_status) 67 + io->op.status = bio->bi_status; 68 68 else if (!KEY_DIRTY(&b->key) && 69 69 ptr_stale(io->op.c, &b->key, 0)) { 70 - io->op.error = -EINTR; 70 + io->op.status = BLK_STS_IOERR; 71 71 } 72 72 73 - bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move"); 73 + bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move"); 74 74 } 75 75 76 76 static void moving_init(struct moving_io *io) ··· 92 92 struct moving_io *io = container_of(cl, struct moving_io, cl); 93 93 struct data_insert_op *op = &io->op; 94 94 95 - if (!op->error) { 95 + if (!op->status) { 96 96 moving_init(io); 97 97 98 98 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
+14 -14
drivers/md/bcache/request.c
··· 81 81 if (ret == -ESRCH) { 82 82 op->replace_collision = true; 83 83 } else if (ret) { 84 - op->error = -ENOMEM; 84 + op->status = BLK_STS_RESOURCE; 85 85 op->insert_data_done = true; 86 86 } 87 87 ··· 178 178 struct closure *cl = bio->bi_private; 179 179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 180 180 181 - if (bio->bi_error) { 181 + if (bio->bi_status) { 182 182 /* TODO: We could try to recover from this. */ 183 183 if (op->writeback) 184 - op->error = bio->bi_error; 184 + op->status = bio->bi_status; 185 185 else if (!op->replace) 186 186 set_closure_fn(cl, bch_data_insert_error, op->wq); 187 187 else 188 188 set_closure_fn(cl, NULL, NULL); 189 189 } 190 190 191 - bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache"); 191 + bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); 192 192 } 193 193 194 194 static void bch_data_insert_start(struct closure *cl) ··· 488 488 * from the backing device. 489 489 */ 490 490 491 - if (bio->bi_error) 492 - s->iop.error = bio->bi_error; 491 + if (bio->bi_status) 492 + s->iop.status = bio->bi_status; 493 493 else if (!KEY_DIRTY(&b->key) && 494 494 ptr_stale(s->iop.c, &b->key, 0)) { 495 495 atomic_long_inc(&s->iop.c->cache_read_races); 496 - s->iop.error = -EINTR; 496 + s->iop.status = BLK_STS_IOERR; 497 497 } 498 498 499 - bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache"); 499 + bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); 500 500 } 501 501 502 502 /* ··· 593 593 { 594 594 struct closure *cl = bio->bi_private; 595 595 596 - if (bio->bi_error) { 596 + if (bio->bi_status) { 597 597 struct search *s = container_of(cl, struct search, cl); 598 - s->iop.error = bio->bi_error; 598 + s->iop.status = bio->bi_status; 599 599 /* Only cache read errors are recoverable */ 600 600 s->recoverable = false; 601 601 } ··· 611 611 &s->d->disk->part0, s->start_time); 612 612 613 613 trace_bcache_request_end(s->d, s->orig_bio); 614 - s->orig_bio->bi_error = s->iop.error; 614 + s->orig_bio->bi_status = s->iop.status; 615 615 bio_endio(s->orig_bio); 616 616 s->orig_bio = NULL; 617 617 } ··· 664 664 s->iop.inode = d->id; 665 665 s->iop.write_point = hash_long((unsigned long) current, 16); 666 666 s->iop.write_prio = 0; 667 - s->iop.error = 0; 667 + s->iop.status = 0; 668 668 s->iop.flags = 0; 669 669 s->iop.flush_journal = op_is_flush(bio->bi_opf); 670 670 s->iop.wq = bcache_wq; ··· 707 707 /* Retry from the backing device: */ 708 708 trace_bcache_read_retry(s->orig_bio); 709 709 710 - s->iop.error = 0; 710 + s->iop.status = 0; 711 711 do_bio_hook(s, s->orig_bio); 712 712 713 713 /* XXX: invalidate cache */ ··· 767 767 !s->cache_miss, s->iop.bypass); 768 768 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); 769 769 770 - if (s->iop.error) 770 + if (s->iop.status) 771 771 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); 772 772 else if (s->iop.bio || verify(dc, &s->bio.bio)) 773 773 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
+1 -1
drivers/md/bcache/request.h
··· 10 10 unsigned inode; 11 11 uint16_t write_point; 12 12 uint16_t write_prio; 13 - short error; 13 + blk_status_t status; 14 14 15 15 union { 16 16 uint16_t flags;
+3 -3
drivers/md/bcache/super.c
··· 271 271 { 272 272 struct cache *ca = bio->bi_private; 273 273 274 - bch_count_io_errors(ca, bio->bi_error, "writing superblock"); 274 + bch_count_io_errors(ca, bio->bi_status, "writing superblock"); 275 275 closure_put(&ca->set->sb_write); 276 276 } 277 277 ··· 321 321 struct closure *cl = bio->bi_private; 322 322 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 323 323 324 - cache_set_err_on(bio->bi_error, c, "accessing uuids"); 324 + cache_set_err_on(bio->bi_status, c, "accessing uuids"); 325 325 bch_bbio_free(bio, c); 326 326 closure_put(cl); 327 327 } ··· 494 494 { 495 495 struct cache *ca = bio->bi_private; 496 496 497 - cache_set_err_on(bio->bi_error, ca->set, "accessing priorities"); 497 + cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); 498 498 bch_bbio_free(bio, ca->set); 499 499 closure_put(&ca->prio); 500 500 }
+2 -2
drivers/md/bcache/writeback.c
··· 167 167 struct keybuf_key *w = bio->bi_private; 168 168 struct dirty_io *io = w->private; 169 169 170 - if (bio->bi_error) 170 + if (bio->bi_status) 171 171 SET_KEY_DIRTY(&w->key, false); 172 172 173 173 closure_put(&io->cl); ··· 195 195 struct dirty_io *io = w->private; 196 196 197 197 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), 198 - bio->bi_error, "reading dirty data from cache"); 198 + bio->bi_status, "reading dirty data from cache"); 199 199 200 200 dirty_endio(bio); 201 201 }
+2 -2
drivers/md/dm-bio-prison-v1.c
··· 229 229 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder); 230 230 231 231 void dm_cell_error(struct dm_bio_prison *prison, 232 - struct dm_bio_prison_cell *cell, int error) 232 + struct dm_bio_prison_cell *cell, blk_status_t error) 233 233 { 234 234 struct bio_list bios; 235 235 struct bio *bio; ··· 238 238 dm_cell_release(prison, cell, &bios); 239 239 240 240 while ((bio = bio_list_pop(&bios))) { 241 - bio->bi_error = error; 241 + bio->bi_status = error; 242 242 bio_endio(bio); 243 243 } 244 244 }
+1 -1
drivers/md/dm-bio-prison-v1.h
··· 91 91 struct dm_bio_prison_cell *cell, 92 92 struct bio_list *inmates); 93 93 void dm_cell_error(struct dm_bio_prison *prison, 94 - struct dm_bio_prison_cell *cell, int error); 94 + struct dm_bio_prison_cell *cell, blk_status_t error); 95 95 96 96 /* 97 97 * Visits the cell and then releases. Guarantees no new inmates are
+15 -13
drivers/md/dm-bufio.c
··· 145 145 enum data_mode data_mode; 146 146 unsigned char list_mode; /* LIST_* */ 147 147 unsigned hold_count; 148 - int read_error; 149 - int write_error; 148 + blk_status_t read_error; 149 + blk_status_t write_error; 150 150 unsigned long state; 151 151 unsigned long last_accessed; 152 152 struct dm_bufio_client *c; ··· 555 555 { 556 556 struct dm_buffer *b = context; 557 557 558 - b->bio.bi_error = error ? -EIO : 0; 558 + b->bio.bi_status = error ? BLK_STS_IOERR : 0; 559 559 b->bio.bi_end_io(&b->bio); 560 560 } 561 561 ··· 588 588 589 589 r = dm_io(&io_req, 1, &region, NULL); 590 590 if (r) { 591 - b->bio.bi_error = r; 591 + b->bio.bi_status = errno_to_blk_status(r); 592 592 end_io(&b->bio); 593 593 } 594 594 } ··· 596 596 static void inline_endio(struct bio *bio) 597 597 { 598 598 bio_end_io_t *end_fn = bio->bi_private; 599 - int error = bio->bi_error; 599 + blk_status_t status = bio->bi_status; 600 600 601 601 /* 602 602 * Reset the bio to free any attached resources ··· 604 604 */ 605 605 bio_reset(bio); 606 606 607 - bio->bi_error = error; 607 + bio->bi_status = status; 608 608 end_fn(bio); 609 609 } 610 610 ··· 685 685 { 686 686 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); 687 687 688 - b->write_error = bio->bi_error; 689 - if (unlikely(bio->bi_error)) { 688 + b->write_error = bio->bi_status; 689 + if (unlikely(bio->bi_status)) { 690 690 struct dm_bufio_client *c = b->c; 691 - int error = bio->bi_error; 692 - (void)cmpxchg(&c->async_write_error, 0, error); 691 + 692 + (void)cmpxchg(&c->async_write_error, 0, 693 + blk_status_to_errno(bio->bi_status)); 693 694 } 694 695 695 696 BUG_ON(!test_bit(B_WRITING, &b->state)); ··· 1064 1063 { 1065 1064 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio); 1066 1065 1067 - b->read_error = bio->bi_error; 1066 + b->read_error = bio->bi_status; 1068 1067 1069 1068 BUG_ON(!test_bit(B_READING, &b->state)); 1070 1069 ··· 1108 1107 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); 1109 1108 1110 1109 if (b->read_error) { 1111 - int error = b->read_error; 1110 + int error = blk_status_to_errno(b->read_error); 1112 1111 1113 1112 dm_bufio_release(b); 1114 1113 ··· 1258 1257 */ 1259 1258 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) 1260 1259 { 1261 - int a, f; 1260 + blk_status_t a; 1261 + int f; 1262 1262 unsigned long buffers_processed = 0; 1263 1263 struct dm_buffer *b, *tmp; 1264 1264
+18 -16
drivers/md/dm-cache-target.c
··· 119 119 */ 120 120 struct continuation { 121 121 struct work_struct ws; 122 - int input; 122 + blk_status_t input; 123 123 }; 124 124 125 125 static inline void init_continuation(struct continuation *k, ··· 145 145 /* 146 146 * The operation that everyone is waiting for. 147 147 */ 148 - int (*commit_op)(void *context); 148 + blk_status_t (*commit_op)(void *context); 149 149 void *commit_context; 150 150 151 151 /* ··· 171 171 static void __commit(struct work_struct *_ws) 172 172 { 173 173 struct batcher *b = container_of(_ws, struct batcher, commit_work); 174 - 175 - int r; 174 + blk_status_t r; 176 175 unsigned long flags; 177 176 struct list_head work_items; 178 177 struct work_struct *ws, *tmp; ··· 204 205 205 206 while ((bio = bio_list_pop(&bios))) { 206 207 if (r) { 207 - bio->bi_error = r; 208 + bio->bi_status = r; 208 209 bio_endio(bio); 209 210 } else 210 211 b->issue_op(bio, b->issue_context); ··· 212 213 } 213 214 214 215 static void batcher_init(struct batcher *b, 215 - int (*commit_op)(void *), 216 + blk_status_t (*commit_op)(void *), 216 217 void *commit_context, 217 218 void (*issue_op)(struct bio *bio, void *), 218 219 void *issue_context, ··· 954 955 955 956 dm_unhook_bio(&pb->hook_info, bio); 956 957 957 - if (bio->bi_error) { 958 + if (bio->bi_status) { 958 959 bio_endio(bio); 959 960 return; 960 961 } ··· 1219 1220 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); 1220 1221 1221 1222 if (read_err || write_err) 1222 - mg->k.input = -EIO; 1223 + mg->k.input = BLK_STS_IOERR; 1223 1224 1224 1225 queue_continuation(mg->cache->wq, &mg->k); 1225 1226 } ··· 1265 1266 1266 1267 dm_unhook_bio(&pb->hook_info, bio); 1267 1268 1268 - if (bio->bi_error) 1269 - mg->k.input = bio->bi_error; 1269 + if (bio->bi_status) 1270 + mg->k.input = bio->bi_status; 1270 1271 1271 1272 queue_continuation(mg->cache->wq, &mg->k); 1272 1273 } ··· 1322 1323 if (mg->overwrite_bio) { 1323 1324 if (success) 1324 1325 force_set_dirty(cache, cblock); 1326 + else if (mg->k.input) 1327 + mg->overwrite_bio->bi_status = mg->k.input; 1325 1328 else 1326 - mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO); 1329 + mg->overwrite_bio->bi_status = BLK_STS_IOERR; 1327 1330 bio_endio(mg->overwrite_bio); 1328 1331 } else { 1329 1332 if (success) ··· 1505 1504 r = copy(mg, is_policy_promote); 1506 1505 if (r) { 1507 1506 DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache)); 1508 - mg->k.input = -EIO; 1507 + mg->k.input = BLK_STS_IOERR; 1509 1508 mg_complete(mg, false); 1510 1509 } 1511 1510 } ··· 1908 1907 /* 1909 1908 * Used by the batcher. 1910 1909 */ 1911 - static int commit_op(void *context) 1910 + static blk_status_t commit_op(void *context) 1912 1911 { 1913 1912 struct cache *cache = context; 1914 1913 1915 1914 if (dm_cache_changed_this_transaction(cache->cmd)) 1916 - return commit(cache, false); 1915 + return errno_to_blk_status(commit(cache, false)); 1917 1916 1918 1917 return 0; 1919 1918 } ··· 2019 2018 bio_list_init(&cache->deferred_bios); 2020 2019 2021 2020 while ((bio = bio_list_pop(&bios))) { 2022 - bio->bi_error = DM_ENDIO_REQUEUE; 2021 + bio->bi_status = BLK_STS_DM_REQUEUE; 2023 2022 bio_endio(bio); 2024 2023 } 2025 2024 } ··· 2821 2820 return r; 2822 2821 } 2823 2822 2824 - static int cache_end_io(struct dm_target *ti, struct bio *bio, int *error) 2823 + static int cache_end_io(struct dm_target *ti, struct bio *bio, 2824 + blk_status_t *error) 2825 2825 { 2826 2826 struct cache *cache = ti->private; 2827 2827 unsigned long flags;
+17 -17
drivers/md/dm-crypt.c
··· 71 71 struct convert_context ctx; 72 72 73 73 atomic_t io_pending; 74 - int error; 74 + blk_status_t error; 75 75 sector_t sector; 76 76 77 77 struct rb_node rb_node; ··· 1292 1292 /* 1293 1293 * Encrypt / decrypt data from one bio to another one (can be the same one) 1294 1294 */ 1295 - static int crypt_convert(struct crypt_config *cc, 1295 + static blk_status_t crypt_convert(struct crypt_config *cc, 1296 1296 struct convert_context *ctx) 1297 1297 { 1298 1298 unsigned int tag_offset = 0; ··· 1343 1343 */ 1344 1344 case -EBADMSG: 1345 1345 atomic_dec(&ctx->cc_pending); 1346 - return -EILSEQ; 1346 + return BLK_STS_PROTECTION; 1347 1347 /* 1348 1348 * There was an error while processing the request. 1349 1349 */ 1350 1350 default: 1351 1351 atomic_dec(&ctx->cc_pending); 1352 - return -EIO; 1352 + return BLK_STS_IOERR; 1353 1353 } 1354 1354 } 1355 1355 ··· 1463 1463 { 1464 1464 struct crypt_config *cc = io->cc; 1465 1465 struct bio *base_bio = io->base_bio; 1466 - int error = io->error; 1466 + blk_status_t error = io->error; 1467 1467 1468 1468 if (!atomic_dec_and_test(&io->io_pending)) 1469 1469 return; ··· 1476 1476 else 1477 1477 kfree(io->integrity_metadata); 1478 1478 1479 - base_bio->bi_error = error; 1479 + base_bio->bi_status = error; 1480 1480 bio_endio(base_bio); 1481 1481 } 1482 1482 ··· 1502 1502 struct dm_crypt_io *io = clone->bi_private; 1503 1503 struct crypt_config *cc = io->cc; 1504 1504 unsigned rw = bio_data_dir(clone); 1505 - int error; 1505 + blk_status_t error; 1506 1506 1507 1507 /* 1508 1508 * free the processed pages ··· 1510 1510 if (rw == WRITE) 1511 1511 crypt_free_buffer_pages(cc, clone); 1512 1512 1513 - error = clone->bi_error; 1513 + error = clone->bi_status; 1514 1514 bio_put(clone); 1515 1515 1516 1516 if (rw == READ && !error) { ··· 1570 1570 1571 1571 crypt_inc_pending(io); 1572 1572 if (kcryptd_io_read(io, GFP_NOIO)) 1573 - io->error = -ENOMEM; 1573 + io->error = BLK_STS_RESOURCE; 1574 1574 crypt_dec_pending(io); 1575 1575 } 1576 1576 ··· 1656 1656 sector_t sector; 1657 1657 struct rb_node **rbp, *parent; 1658 1658 1659 - if (unlikely(io->error < 0)) { 1659 + if (unlikely(io->error)) { 1660 1660 crypt_free_buffer_pages(cc, clone); 1661 1661 bio_put(clone); 1662 1662 crypt_dec_pending(io); ··· 1697 1697 struct bio *clone; 1698 1698 int crypt_finished; 1699 1699 sector_t sector = io->sector; 1700 - int r; 1700 + blk_status_t r; 1701 1701 1702 1702 /* 1703 1703 * Prevent io from disappearing until this function completes. ··· 1707 1707 1708 1708 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); 1709 1709 if (unlikely(!clone)) { 1710 - io->error = -EIO; 1710 + io->error = BLK_STS_IOERR; 1711 1711 goto dec; 1712 1712 } 1713 1713 ··· 1718 1718 1719 1719 crypt_inc_pending(io); 1720 1720 r = crypt_convert(cc, &io->ctx); 1721 - if (r < 0) 1721 + if (r) 1722 1722 io->error = r; 1723 1723 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); 1724 1724 ··· 1740 1740 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) 1741 1741 { 1742 1742 struct crypt_config *cc = io->cc; 1743 - int r = 0; 1743 + blk_status_t r; 1744 1744 1745 1745 crypt_inc_pending(io); 1746 1746 ··· 1748 1748 io->sector); 1749 1749 1750 1750 r = crypt_convert(cc, &io->ctx); 1751 - if (r < 0) 1751 + if (r) 1752 1752 io->error = r; 1753 1753 1754 1754 if (atomic_dec_and_test(&io->ctx.cc_pending)) ··· 1781 1781 if (error == -EBADMSG) { 1782 1782 DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", 1783 1783 (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); 1784 - io->error = -EILSEQ; 1784 + io->error = BLK_STS_PROTECTION; 1785 1785 } else if (error < 0) 1786 - io->error = -EIO; 1786 + io->error = BLK_STS_IOERR; 1787 1787 1788 1788 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); 1789 1789
+3 -2
drivers/md/dm-flakey.c
··· 358 358 return DM_MAPIO_REMAPPED; 359 359 } 360 360 361 - static int flakey_end_io(struct dm_target *ti, struct bio *bio, int *error) 361 + static int flakey_end_io(struct dm_target *ti, struct bio *bio, 362 + blk_status_t *error) 362 363 { 363 364 struct flakey_c *fc = ti->private; 364 365 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); ··· 378 377 * Error read during the down_interval if drop_writes 379 378 * and error_writes were not configured. 380 379 */ 381 - *error = -EIO; 380 + *error = BLK_STS_IOERR; 382 381 } 383 382 } 384 383
+9 -9
drivers/md/dm-integrity.c
··· 246 246 unsigned metadata_offset; 247 247 248 248 atomic_t in_flight; 249 - int bi_error; 249 + blk_status_t bi_status; 250 250 251 251 struct completion *completion; 252 252 ··· 1114 1114 static void do_endio(struct dm_integrity_c *ic, struct bio *bio) 1115 1115 { 1116 1116 int r = dm_integrity_failed(ic); 1117 - if (unlikely(r) && !bio->bi_error) 1118 - bio->bi_error = r; 1117 + if (unlikely(r) && !bio->bi_status) 1118 + bio->bi_status = errno_to_blk_status(r); 1119 1119 bio_endio(bio); 1120 1120 } 1121 1121 ··· 1123 1123 { 1124 1124 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1125 1125 1126 - if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic))) 1126 + if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) 1127 1127 submit_flush_bio(ic, dio); 1128 1128 else 1129 1129 do_endio(ic, bio); ··· 1142 1142 1143 1143 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1144 1144 1145 - if (unlikely(dio->bi_error) && !bio->bi_error) 1146 - bio->bi_error = dio->bi_error; 1147 - if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { 1145 + if (unlikely(dio->bi_status) && !bio->bi_status) 1146 + bio->bi_status = dio->bi_status; 1147 + if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { 1148 1148 dio->range.logical_sector += dio->range.n_sectors; 1149 1149 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT); 1150 1150 INIT_WORK(&dio->work, integrity_bio_wait); ··· 1318 1318 dec_in_flight(dio); 1319 1319 return; 1320 1320 error: 1321 - dio->bi_error = r; 1321 + dio->bi_status = errno_to_blk_status(r); 1322 1322 dec_in_flight(dio); 1323 1323 } 1324 1324 ··· 1331 1331 sector_t area, offset; 1332 1332 1333 1333 dio->ic = ic; 1334 - dio->bi_error = 0; 1334 + dio->bi_status = 0; 1335 1335 1336 1336 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1337 1337 submit_flush_bio(ic, dio);
+5 -5
drivers/md/dm-io.c
··· 124 124 fn(error_bits, context); 125 125 } 126 126 127 - static void dec_count(struct io *io, unsigned int region, int error) 127 + static void dec_count(struct io *io, unsigned int region, blk_status_t error) 128 128 { 129 129 if (error) 130 130 set_bit(region, &io->error_bits); ··· 137 137 { 138 138 struct io *io; 139 139 unsigned region; 140 - int error; 140 + blk_status_t error; 141 141 142 - if (bio->bi_error && bio_data_dir(bio) == READ) 142 + if (bio->bi_status && bio_data_dir(bio) == READ) 143 143 zero_fill_bio(bio); 144 144 145 145 /* ··· 147 147 */ 148 148 retrieve_io_and_region_from_bio(bio, &io, &region); 149 149 150 - error = bio->bi_error; 150 + error = bio->bi_status; 151 151 bio_put(bio); 152 152 153 153 dec_count(io, region, error); ··· 319 319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || 320 320 op == REQ_OP_WRITE_SAME) && 321 321 special_cmd_max_sectors == 0) { 322 - dec_count(io, region, -EOPNOTSUPP); 322 + dec_count(io, region, BLK_STS_NOTSUPP); 323 323 return; 324 324 } 325 325
+4 -3
drivers/md/dm-log-writes.c
··· 150 150 { 151 151 struct log_writes_c *lc = bio->bi_private; 152 152 153 - if (bio->bi_error) { 153 + if (bio->bi_status) { 154 154 unsigned long flags; 155 155 156 - DMERR("Error writing log block, error=%d", bio->bi_error); 156 + DMERR("Error writing log block, error=%d", bio->bi_status); 157 157 spin_lock_irqsave(&lc->blocks_lock, flags); 158 158 lc->logging_enabled = false; 159 159 spin_unlock_irqrestore(&lc->blocks_lock, flags); ··· 664 664 return DM_MAPIO_REMAPPED; 665 665 } 666 666 667 - static int normal_end_io(struct dm_target *ti, struct bio *bio, int *error) 667 + static int normal_end_io(struct dm_target *ti, struct bio *bio, 668 + blk_status_t *error) 668 669 { 669 670 struct log_writes_c *lc = ti->private; 670 671 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+8 -7
drivers/md/dm-mpath.c
··· 565 565 mpio->pgpath = pgpath; 566 566 mpio->nr_bytes = nr_bytes; 567 567 568 - bio->bi_error = 0; 568 + bio->bi_status = 0; 569 569 bio->bi_bdev = pgpath->path.dev->bdev; 570 570 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 571 571 ··· 623 623 r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio)); 624 624 switch (r) { 625 625 case DM_MAPIO_KILL: 626 - r = -EIO; 627 - /*FALLTHRU*/ 626 + bio->bi_status = BLK_STS_IOERR; 627 + bio_endio(bio); 628 628 case DM_MAPIO_REQUEUE: 629 - bio->bi_error = r; 629 + bio->bi_status = BLK_STS_DM_REQUEUE; 630 630 bio_endio(bio); 631 631 break; 632 632 case DM_MAPIO_REMAPPED: ··· 1510 1510 return r; 1511 1511 } 1512 1512 1513 - static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int *error) 1513 + static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, 1514 + blk_status_t *error) 1514 1515 { 1515 1516 struct multipath *m = ti->private; 1516 1517 struct dm_mpath_io *mpio = get_mpio_from_bio(clone); ··· 1519 1518 unsigned long flags; 1520 1519 int r = DM_ENDIO_DONE; 1521 1520 1522 - if (!*error || noretry_error(errno_to_blk_status(*error))) 1521 + if (!*error || noretry_error(*error)) 1523 1522 goto done; 1524 1523 1525 1524 if (pgpath) ··· 1528 1527 if (atomic_read(&m->nr_valid_paths) == 0 && 1529 1528 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1530 1529 dm_report_EIO(m); 1531 - *error = -EIO; 1530 + *error = BLK_STS_IOERR; 1532 1531 goto done; 1533 1532 } 1534 1533
+7 -6
drivers/md/dm-raid1.c
··· 490 490 * If device is suspended, complete the bio. 491 491 */ 492 492 if (dm_noflush_suspending(ms->ti)) 493 - bio->bi_error = DM_ENDIO_REQUEUE; 493 + bio->bi_status = BLK_STS_DM_REQUEUE; 494 494 else 495 - bio->bi_error = -EIO; 495 + bio->bi_status = BLK_STS_IOERR; 496 496 497 497 bio_endio(bio); 498 498 return; ··· 626 626 * degrade the array. 627 627 */ 628 628 if (bio_op(bio) == REQ_OP_DISCARD) { 629 - bio->bi_error = -EOPNOTSUPP; 629 + bio->bi_status = BLK_STS_NOTSUPP; 630 630 bio_endio(bio); 631 631 return; 632 632 } ··· 1236 1236 return DM_MAPIO_REMAPPED; 1237 1237 } 1238 1238 1239 - static int mirror_end_io(struct dm_target *ti, struct bio *bio, int *error) 1239 + static int mirror_end_io(struct dm_target *ti, struct bio *bio, 1240 + blk_status_t *error) 1240 1241 { 1241 1242 int rw = bio_data_dir(bio); 1242 1243 struct mirror_set *ms = (struct mirror_set *) ti->private; ··· 1256 1255 return DM_ENDIO_DONE; 1257 1256 } 1258 1257 1259 - if (*error == -EOPNOTSUPP) 1258 + if (*error == BLK_STS_NOTSUPP) 1260 1259 return DM_ENDIO_DONE; 1261 1260 1262 1261 if (bio->bi_opf & REQ_RAHEAD) ··· 1278 1277 bd = &bio_record->details; 1279 1278 1280 1279 dm_bio_restore(bd, bio); 1281 - bio->bi_error = 0; 1280 + bio->bi_status = 0; 1282 1281 1283 1282 queue_bio(ms, bio, rw); 1284 1283 return DM_ENDIO_INCOMPLETE;
+1 -1
drivers/md/dm-rq.c
··· 119 119 struct dm_rq_target_io *tio = info->tio; 120 120 struct bio *bio = info->orig; 121 121 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 122 - blk_status_t error = errno_to_blk_status(clone->bi_error); 122 + blk_status_t error = clone->bi_status; 123 123 124 124 bio_put(clone); 125 125
+3 -2
drivers/md/dm-snap.c
··· 1590 1590 { 1591 1591 void *callback_data = bio->bi_private; 1592 1592 1593 - dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); 1593 + dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0); 1594 1594 } 1595 1595 1596 1596 static void start_full_bio(struct dm_snap_pending_exception *pe, ··· 1851 1851 return r; 1852 1852 } 1853 1853 1854 - static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int *error) 1854 + static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1855 + blk_status_t *error) 1855 1856 { 1856 1857 struct dm_snapshot *s = ti->private; 1857 1858
+3 -2
drivers/md/dm-stripe.c
··· 375 375 } 376 376 } 377 377 378 - static int stripe_end_io(struct dm_target *ti, struct bio *bio, int *error) 378 + static int stripe_end_io(struct dm_target *ti, struct bio *bio, 379 + blk_status_t *error) 379 380 { 380 381 unsigned i; 381 382 char major_minor[16]; ··· 388 387 if (bio->bi_opf & REQ_RAHEAD) 389 388 return DM_ENDIO_DONE; 390 389 391 - if (*error == -EOPNOTSUPP) 390 + if (*error == BLK_STS_NOTSUPP) 392 391 return DM_ENDIO_DONE; 393 392 394 393 memset(major_minor, 0, sizeof(major_minor));
+32 -33
drivers/md/dm-thin.c
··· 383 383 * Even if r is set, there could be sub discards in flight that we 384 384 * need to wait for. 385 385 */ 386 - if (r && !op->parent_bio->bi_error) 387 - op->parent_bio->bi_error = r; 386 + if (r && !op->parent_bio->bi_status) 387 + op->parent_bio->bi_status = errno_to_blk_status(r); 388 388 bio_endio(op->parent_bio); 389 389 } 390 390 ··· 450 450 } 451 451 452 452 static void cell_error_with_code(struct pool *pool, 453 - struct dm_bio_prison_cell *cell, int error_code) 453 + struct dm_bio_prison_cell *cell, blk_status_t error_code) 454 454 { 455 455 dm_cell_error(pool->prison, cell, error_code); 456 456 dm_bio_prison_free_cell(pool->prison, cell); 457 457 } 458 458 459 - static int get_pool_io_error_code(struct pool *pool) 459 + static blk_status_t get_pool_io_error_code(struct pool *pool) 460 460 { 461 - return pool->out_of_data_space ? -ENOSPC : -EIO; 461 + return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; 462 462 } 463 463 464 464 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) 465 465 { 466 - int error = get_pool_io_error_code(pool); 467 - 468 - cell_error_with_code(pool, cell, error); 466 + cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); 469 467 } 470 468 471 469 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) ··· 473 475 474 476 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) 475 477 { 476 - cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); 478 + cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); 477 479 } 478 480 479 481 /*----------------------------------------------------------------*/ ··· 553 555 bio_list_init(master); 554 556 } 555 557 556 - static void error_bio_list(struct bio_list *bios, int error) 558 + static void error_bio_list(struct bio_list *bios, blk_status_t error) 557 559 { 558 560 struct bio *bio; 559 561 560 562 while ((bio = bio_list_pop(bios))) { 561 - bio->bi_error = error; 563 + bio->bi_status = error; 562 564 bio_endio(bio); 563 565 } 564 566 } 565 567 566 - static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error) 568 + static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, 569 + blk_status_t error) 567 570 { 568 571 struct bio_list bios; 569 572 unsigned long flags; ··· 607 608 __merge_bio_list(&bios, &tc->retry_on_resume_list); 608 609 spin_unlock_irqrestore(&tc->lock, flags); 609 610 610 - error_bio_list(&bios, DM_ENDIO_REQUEUE); 611 + error_bio_list(&bios, BLK_STS_DM_REQUEUE); 611 612 requeue_deferred_cells(tc); 612 613 } 613 614 614 - static void error_retry_list_with_code(struct pool *pool, int error) 615 + static void error_retry_list_with_code(struct pool *pool, blk_status_t error) 615 616 { 616 617 struct thin_c *tc; 617 618 ··· 623 624 624 625 static void error_retry_list(struct pool *pool) 625 626 { 626 - int error = get_pool_io_error_code(pool); 627 - 628 - error_retry_list_with_code(pool, error); 627 + error_retry_list_with_code(pool, get_pool_io_error_code(pool)); 629 628 } 630 629 631 630 /* ··· 771 774 */ 772 775 atomic_t prepare_actions; 773 776 774 - int err; 777 + blk_status_t status; 775 778 struct thin_c *tc; 776 779 dm_block_t virt_begin, virt_end; 777 780 dm_block_t data_block; ··· 811 814 { 812 815 struct dm_thin_new_mapping *m = context; 813 816 814 - m->err = read_err || write_err ? -EIO : 0; 817 + m->status = read_err || write_err ? BLK_STS_IOERR : 0; 815 818 complete_mapping_preparation(m); 816 819 } 817 820 ··· 822 825 823 826 bio->bi_end_io = m->saved_bi_end_io; 824 827 825 - m->err = bio->bi_error; 828 + m->status = bio->bi_status; 826 829 complete_mapping_preparation(m); 827 830 } 828 831 ··· 922 925 struct bio *bio = m->bio; 923 926 int r; 924 927 925 - if (m->err) { 928 + if (m->status) { 926 929 cell_error(pool, m->cell); 927 930 goto out; 928 931 } ··· 1492 1495 spin_unlock_irqrestore(&tc->lock, flags); 1493 1496 } 1494 1497 1495 - static int should_error_unserviceable_bio(struct pool *pool) 1498 + static blk_status_t should_error_unserviceable_bio(struct pool *pool) 1496 1499 { 1497 1500 enum pool_mode m = get_pool_mode(pool); 1498 1501 ··· 1500 1503 case PM_WRITE: 1501 1504 /* Shouldn't get here */ 1502 1505 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); 1503 - return -EIO; 1506 + return BLK_STS_IOERR; 1504 1507 1505 1508 case PM_OUT_OF_DATA_SPACE: 1506 - return pool->pf.error_if_no_space ? -ENOSPC : 0; 1509 + return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; 1507 1510 1508 1511 case PM_READ_ONLY: 1509 1512 case PM_FAIL: 1510 - return -EIO; 1513 + return BLK_STS_IOERR; 1511 1514 default: 1512 1515 /* Shouldn't get here */ 1513 1516 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); 1514 - return -EIO; 1517 + return BLK_STS_IOERR; 1515 1518 } 1516 1519 } 1517 1520 1518 1521 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) 1519 1522 { 1520 - int error = should_error_unserviceable_bio(pool); 1523 + blk_status_t error = should_error_unserviceable_bio(pool); 1521 1524 1522 1525 if (error) { 1523 - bio->bi_error = error; 1526 + bio->bi_status = error; 1524 1527 bio_endio(bio); 1525 1528 } else 1526 1529 retry_on_resume(bio); ··· 1530 1533 { 1531 1534 struct bio *bio; 1532 1535 struct bio_list bios; 1533 - int error; 1536 + blk_status_t error; 1534 1537 1535 1538 error = should_error_unserviceable_bio(pool); 1536 1539 if (error) { ··· 2068 2071 unsigned count = 0; 2069 2072 2070 2073 if (tc->requeue_mode) { 2071 - error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE); 2074 + error_thin_bio_list(tc, &tc->deferred_bio_list, 2075 + BLK_STS_DM_REQUEUE); 2072 2076 return; 2073 2077 } 2074 2078 ··· 2320 2322 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2321 2323 pool->pf.error_if_no_space = true; 2322 2324 notify_of_pool_mode_change_to_oods(pool); 2323 - error_retry_list_with_code(pool, -ENOSPC); 2325 + error_retry_list_with_code(pool, BLK_STS_NOSPC); 2324 2326 } 2325 2327 } 2326 2328 ··· 2622 2624 thin_hook_bio(tc, bio); 2623 2625 2624 2626 if (tc->requeue_mode) { 2625 - bio->bi_error = DM_ENDIO_REQUEUE; 2627 + bio->bi_status = BLK_STS_DM_REQUEUE; 2626 2628 bio_endio(bio); 2627 2629 return DM_MAPIO_SUBMITTED; 2628 2630 } ··· 4175 4177 return thin_bio_map(ti, bio); 4176 4178 } 4177 4179 4178 - static int thin_endio(struct dm_target *ti, struct bio *bio, int *err) 4180 + static int thin_endio(struct dm_target *ti, struct bio *bio, 4181 + blk_status_t *err) 4179 4182 { 4180 4183 unsigned long flags; 4181 4184 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+5 -5
drivers/md/dm-verity-target.c
··· 538 538 /* 539 539 * End one "io" structure with a given error. 540 540 */ 541 - static void verity_finish_io(struct dm_verity_io *io, int error) 541 + static void verity_finish_io(struct dm_verity_io *io, blk_status_t status) 542 542 { 543 543 struct dm_verity *v = io->v; 544 544 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); 545 545 546 546 bio->bi_end_io = io->orig_bi_end_io; 547 - bio->bi_error = error; 547 + bio->bi_status = status; 548 548 549 549 verity_fec_finish_io(io); 550 550 ··· 555 555 { 556 556 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); 557 557 558 - verity_finish_io(io, verity_verify_io(io)); 558 + verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); 559 559 } 560 560 561 561 static void verity_end_io(struct bio *bio) 562 562 { 563 563 struct dm_verity_io *io = bio->bi_private; 564 564 565 - if (bio->bi_error && !verity_fec_is_enabled(io->v)) { 566 - verity_finish_io(io, bio->bi_error); 565 + if (bio->bi_status && !verity_fec_is_enabled(io->v)) { 566 + verity_finish_io(io, bio->bi_status); 567 567 return; 568 568 } 569 569
+20 -20
drivers/md/dm.c
··· 63 63 */ 64 64 struct dm_io { 65 65 struct mapped_device *md; 66 - int error; 66 + blk_status_t status; 67 67 atomic_t io_count; 68 68 struct bio *bio; 69 69 unsigned long start_time; ··· 768 768 * Decrements the number of outstanding ios that a bio has been 769 769 * cloned into, completing the original io if necc. 770 770 */ 771 - static void dec_pending(struct dm_io *io, int error) 771 + static void dec_pending(struct dm_io *io, blk_status_t error) 772 772 { 773 773 unsigned long flags; 774 - int io_error; 774 + blk_status_t io_error; 775 775 struct bio *bio; 776 776 struct mapped_device *md = io->md; 777 777 778 778 /* Push-back supersedes any I/O errors */ 779 779 if (unlikely(error)) { 780 780 spin_lock_irqsave(&io->endio_lock, flags); 781 - if (!(io->error > 0 && __noflush_suspending(md))) 782 - io->error = error; 781 + if (!(io->status == BLK_STS_DM_REQUEUE && 782 + __noflush_suspending(md))) 783 + io->status = error; 783 784 spin_unlock_irqrestore(&io->endio_lock, flags); 784 785 } 785 786 786 787 if (atomic_dec_and_test(&io->io_count)) { 787 - if (io->error == DM_ENDIO_REQUEUE) { 788 + if (io->status == BLK_STS_DM_REQUEUE) { 788 789 /* 789 790 * Target requested pushing back the I/O. 790 791 */ ··· 794 793 bio_list_add_head(&md->deferred, io->bio); 795 794 else 796 795 /* noflush suspend was interrupted. */ 797 - io->error = -EIO; 796 + io->status = BLK_STS_IOERR; 798 797 spin_unlock_irqrestore(&md->deferred_lock, flags); 799 798 } 800 799 801 - io_error = io->error; 800 + io_error = io->status; 802 801 bio = io->bio; 803 802 end_io_acct(io); 804 803 free_io(md, io); 805 804 806 - if (io_error == DM_ENDIO_REQUEUE) 805 + if (io_error == BLK_STS_DM_REQUEUE) 807 806 return; 808 807 809 808 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { ··· 815 814 queue_io(md, bio); 816 815 } else { 817 816 /* done with normal IO or empty flush */ 818 - bio->bi_error = io_error; 817 + bio->bi_status = io_error; 819 818 bio_endio(bio); 820 819 } 821 820 } ··· 839 838 840 839 static void clone_endio(struct bio *bio) 841 840 { 842 - int error = bio->bi_error; 843 - int r = error; 841 + blk_status_t error = bio->bi_status; 844 842 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 845 843 struct dm_io *io = tio->io; 846 844 struct mapped_device *md = tio->io->md; 847 845 dm_endio_fn endio = tio->ti->type->end_io; 848 846 849 - if (unlikely(error == -EREMOTEIO)) { 847 + if (unlikely(error == BLK_STS_TARGET)) { 850 848 if (bio_op(bio) == REQ_OP_WRITE_SAME && 851 849 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) 852 850 disable_write_same(md); ··· 855 855 } 856 856 857 857 if (endio) { 858 - r = endio(tio->ti, bio, &error); 858 + int r = endio(tio->ti, bio, &error); 859 859 switch (r) { 860 860 case DM_ENDIO_REQUEUE: 861 - error = DM_ENDIO_REQUEUE; 861 + error = BLK_STS_DM_REQUEUE; 862 862 /*FALLTHRU*/ 863 863 case DM_ENDIO_DONE: 864 864 break; ··· 1094 1094 generic_make_request(clone); 1095 1095 break; 1096 1096 case DM_MAPIO_KILL: 1097 - r = -EIO; 1098 - /*FALLTHRU*/ 1097 + dec_pending(tio->io, BLK_STS_IOERR); 1098 + free_tio(tio); 1099 + break; 1099 1100 case DM_MAPIO_REQUEUE: 1100 - /* error the io and bail out, or requeue it if needed */ 1101 - dec_pending(tio->io, r); 1101 + dec_pending(tio->io, BLK_STS_DM_REQUEUE); 1102 1102 free_tio(tio); 1103 1103 break; 1104 1104 default: ··· 1366 1366 ci.map = map; 1367 1367 ci.md = md; 1368 1368 ci.io = alloc_io(md); 1369 - ci.io->error = 0; 1369 + ci.io->status = 0; 1370 1370 atomic_set(&ci.io->io_count, 1); 1371 1371 ci.io->bio = bio; 1372 1372 ci.io->md = md;
+4 -4
drivers/md/md.c
··· 273 273 } 274 274 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 275 275 if (bio_sectors(bio) != 0) 276 - bio->bi_error = -EROFS; 276 + bio->bi_status = BLK_STS_IOERR; 277 277 bio_endio(bio); 278 278 return BLK_QC_T_NONE; 279 279 } ··· 719 719 struct md_rdev *rdev = bio->bi_private; 720 720 struct mddev *mddev = rdev->mddev; 721 721 722 - if (bio->bi_error) { 723 - pr_err("md: super_written gets error=%d\n", bio->bi_error); 722 + if (bio->bi_status) { 723 + pr_err("md: super_written gets error=%d\n", bio->bi_status); 724 724 md_error(mddev, rdev); 725 725 if (!test_bit(Faulty, &rdev->flags) 726 726 && (bio->bi_opf & MD_FAILFAST)) { ··· 801 801 802 802 submit_bio_wait(bio); 803 803 804 - ret = !bio->bi_error; 804 + ret = !bio->bi_status; 805 805 bio_put(bio); 806 806 return ret; 807 807 }
+5 -5
drivers/md/multipath.c
··· 73 73 * operation and are ready to return a success/failure code to the buffer 74 74 * cache layer. 75 75 */ 76 - static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) 76 + static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status) 77 77 { 78 78 struct bio *bio = mp_bh->master_bio; 79 79 struct mpconf *conf = mp_bh->mddev->private; 80 80 81 - bio->bi_error = err; 81 + bio->bi_status = status; 82 82 bio_endio(bio); 83 83 mempool_free(mp_bh, conf->pool); 84 84 } ··· 89 89 struct mpconf *conf = mp_bh->mddev->private; 90 90 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; 91 91 92 - if (!bio->bi_error) 92 + if (!bio->bi_status) 93 93 multipath_end_bh_io(mp_bh, 0); 94 94 else if (!(bio->bi_opf & REQ_RAHEAD)) { 95 95 /* ··· 102 102 (unsigned long long)bio->bi_iter.bi_sector); 103 103 multipath_reschedule_retry(mp_bh); 104 104 } else 105 - multipath_end_bh_io(mp_bh, bio->bi_error); 105 + multipath_end_bh_io(mp_bh, bio->bi_status); 106 106 rdev_dec_pending(rdev, conf->mddev); 107 107 } 108 108 ··· 347 347 pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", 348 348 bdevname(bio->bi_bdev,b), 349 349 (unsigned long long)bio->bi_iter.bi_sector); 350 - multipath_end_bh_io(mp_bh, -EIO); 350 + multipath_end_bh_io(mp_bh, BLK_STS_IOERR); 351 351 } else { 352 352 pr_err("multipath: %s: redirecting sector %llu to another IO path\n", 353 353 bdevname(bio->bi_bdev,b),
+18 -18
drivers/md/raid1.c
··· 277 277 struct r1conf *conf = r1_bio->mddev->private; 278 278 279 279 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 280 - bio->bi_error = -EIO; 280 + bio->bi_status = BLK_STS_IOERR; 281 281 282 282 bio_endio(bio); 283 283 /* ··· 335 335 336 336 static void raid1_end_read_request(struct bio *bio) 337 337 { 338 - int uptodate = !bio->bi_error; 338 + int uptodate = !bio->bi_status; 339 339 struct r1bio *r1_bio = bio->bi_private; 340 340 struct r1conf *conf = r1_bio->mddev->private; 341 341 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; ··· 426 426 struct md_rdev *rdev = conf->mirrors[mirror].rdev; 427 427 bool discard_error; 428 428 429 - discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; 429 + discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; 430 430 431 431 /* 432 432 * 'one mirror IO has finished' event handler: 433 433 */ 434 - if (bio->bi_error && !discard_error) { 434 + if (bio->bi_status && !discard_error) { 435 435 set_bit(WriteErrorSeen, &rdev->flags); 436 436 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 437 437 set_bit(MD_RECOVERY_NEEDED, & ··· 802 802 bio->bi_next = NULL; 803 803 bio->bi_bdev = rdev->bdev; 804 804 if (test_bit(Faulty, &rdev->flags)) { 805 - bio->bi_error = -EIO; 805 + bio->bi_status = BLK_STS_IOERR; 806 806 bio_endio(bio); 807 807 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 808 808 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) ··· 1856 1856 * or re-read if the read failed. 1857 1857 * We don't do much here, just schedule handling by raid1d 1858 1858 */ 1859 - if (!bio->bi_error) 1859 + if (!bio->bi_status) 1860 1860 set_bit(R1BIO_Uptodate, &r1_bio->state); 1861 1861 1862 1862 if (atomic_dec_and_test(&r1_bio->remaining)) ··· 1865 1865 1866 1866 static void end_sync_write(struct bio *bio) 1867 1867 { 1868 - int uptodate = !bio->bi_error; 1868 + int uptodate = !bio->bi_status; 1869 1869 struct r1bio *r1_bio = get_resync_r1bio(bio); 1870 1870 struct mddev *mddev = r1_bio->mddev; 1871 1871 struct r1conf *conf = mddev->private; ··· 2058 2058 idx ++; 2059 2059 } 2060 2060 set_bit(R1BIO_Uptodate, &r1_bio->state); 2061 - bio->bi_error = 0; 2061 + bio->bi_status = 0; 2062 2062 return 1; 2063 2063 } 2064 2064 ··· 2082 2082 for (i = 0; i < conf->raid_disks * 2; i++) { 2083 2083 int j; 2084 2084 int size; 2085 - int error; 2085 + blk_status_t status; 2086 2086 struct bio_vec *bi; 2087 2087 struct bio *b = r1_bio->bios[i]; 2088 2088 struct resync_pages *rp = get_resync_pages(b); 2089 2089 if (b->bi_end_io != end_sync_read) 2090 2090 continue; 2091 2091 /* fixup the bio for reuse, but preserve errno */ 2092 - error = b->bi_error; 2092 + status = b->bi_status; 2093 2093 bio_reset(b); 2094 - b->bi_error = error; 2094 + b->bi_status = status; 2095 2095 b->bi_vcnt = vcnt; 2096 2096 b->bi_iter.bi_size = r1_bio->sectors << 9; 2097 2097 b->bi_iter.bi_sector = r1_bio->sector + ··· 2113 2113 } 2114 2114 for (primary = 0; primary < conf->raid_disks * 2; primary++) 2115 2115 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 2116 - !r1_bio->bios[primary]->bi_error) { 2116 + !r1_bio->bios[primary]->bi_status) { 2117 2117 r1_bio->bios[primary]->bi_end_io = NULL; 2118 2118 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 2119 2119 break; ··· 2123 2123 int j; 2124 2124 struct bio *pbio = r1_bio->bios[primary]; 2125 2125 struct bio *sbio = r1_bio->bios[i]; 2126 - int error = sbio->bi_error; 2126 + blk_status_t status = sbio->bi_status; 2127 2127 struct page **ppages = get_resync_pages(pbio)->pages; 2128 2128 struct page **spages = get_resync_pages(sbio)->pages; 2129 2129 struct bio_vec *bi; ··· 2132 2132 if (sbio->bi_end_io != end_sync_read) 2133 2133 continue; 2134 2134 /* Now we can 'fixup' the error value */ 2135 - sbio->bi_error = 0; 2135 + sbio->bi_status = 0; 2136 2136 2137 2137 bio_for_each_segment_all(bi, sbio, j) 2138 2138 page_len[j] = bi->bv_len; 2139 2139 2140 - if (!error) { 2140 + if (!status) { 2141 2141 for (j = vcnt; j-- ; ) { 2142 2142 if (memcmp(page_address(ppages[j]), 2143 2143 page_address(spages[j]), ··· 2149 2149 if (j >= 0) 2150 2150 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); 2151 2151 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 2152 - && !error)) { 2152 + && !status)) { 2153 2153 /* No need to write to this device. */ 2154 2154 sbio->bi_end_io = NULL; 2155 2155 rdev_dec_pending(conf->mirrors[i].rdev, mddev); ··· 2400 2400 struct bio *bio = r1_bio->bios[m]; 2401 2401 if (bio->bi_end_io == NULL) 2402 2402 continue; 2403 - if (!bio->bi_error && 2403 + if (!bio->bi_status && 2404 2404 test_bit(R1BIO_MadeGood, &r1_bio->state)) { 2405 2405 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); 2406 2406 } 2407 - if (bio->bi_error && 2407 + if (bio->bi_status && 2408 2408 test_bit(R1BIO_WriteError, &r1_bio->state)) { 2409 2409 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 2410 2410 md_error(conf->mddev, rdev);
+18 -18
drivers/md/raid10.c
··· 336 336 struct r10conf *conf = r10_bio->mddev->private; 337 337 338 338 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 339 - bio->bi_error = -EIO; 339 + bio->bi_status = BLK_STS_IOERR; 340 340 341 341 bio_endio(bio); 342 342 /* ··· 389 389 390 390 static void raid10_end_read_request(struct bio *bio) 391 391 { 392 - int uptodate = !bio->bi_error; 392 + int uptodate = !bio->bi_status; 393 393 struct r10bio *r10_bio = bio->bi_private; 394 394 int slot, dev; 395 395 struct md_rdev *rdev; ··· 477 477 struct bio *to_put = NULL; 478 478 bool discard_error; 479 479 480 - discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD; 480 + discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; 481 481 482 482 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 483 483 ··· 491 491 /* 492 492 * this branch is our 'one mirror IO has finished' event handler: 493 493 */ 494 - if (bio->bi_error && !discard_error) { 494 + if (bio->bi_status && !discard_error) { 495 495 if (repl) 496 496 /* Never record new bad blocks to replacement, 497 497 * just fail it. ··· 913 913 bio->bi_next = NULL; 914 914 bio->bi_bdev = rdev->bdev; 915 915 if (test_bit(Faulty, &rdev->flags)) { 916 - bio->bi_error = -EIO; 916 + bio->bi_status = BLK_STS_IOERR; 917 917 bio_endio(bio); 918 918 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 919 919 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) ··· 1098 1098 bio->bi_next = NULL; 1099 1099 bio->bi_bdev = rdev->bdev; 1100 1100 if (test_bit(Faulty, &rdev->flags)) { 1101 - bio->bi_error = -EIO; 1101 + bio->bi_status = BLK_STS_IOERR; 1102 1102 bio_endio(bio); 1103 1103 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1104 1104 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) ··· 1888 1888 { 1889 1889 struct r10conf *conf = r10_bio->mddev->private; 1890 1890 1891 - if (!bio->bi_error) 1891 + if (!bio->bi_status) 1892 1892 set_bit(R10BIO_Uptodate, &r10_bio->state); 1893 1893 else 1894 1894 /* The write handler will notice the lack of ··· 1972 1972 else 1973 1973 rdev = conf->mirrors[d].rdev; 1974 1974 1975 - if (bio->bi_error) { 1975 + if (bio->bi_status) { 1976 1976 if (repl) 1977 1977 md_error(mddev, rdev); 1978 1978 else { ··· 2021 2021 2022 2022 /* find the first device with a block */ 2023 2023 for (i=0; i<conf->copies; i++) 2024 - if (!r10_bio->devs[i].bio->bi_error) 2024 + if (!r10_bio->devs[i].bio->bi_status) 2025 2025 break; 2026 2026 2027 2027 if (i == conf->copies) ··· 2050 2050 tpages = get_resync_pages(tbio)->pages; 2051 2051 d = r10_bio->devs[i].devnum; 2052 2052 rdev = conf->mirrors[d].rdev; 2053 - if (!r10_bio->devs[i].bio->bi_error) { 2053 + if (!r10_bio->devs[i].bio->bi_status) { 2054 2054 /* We know that the bi_io_vec layout is the same for 2055 2055 * both 'first' and 'i', so we just compare them. 2056 2056 * All vec entries are PAGE_SIZE; ··· 2633 2633 rdev = conf->mirrors[dev].rdev; 2634 2634 if (r10_bio->devs[m].bio == NULL) 2635 2635 continue; 2636 - if (!r10_bio->devs[m].bio->bi_error) { 2636 + if (!r10_bio->devs[m].bio->bi_status) { 2637 2637 rdev_clear_badblocks( 2638 2638 rdev, 2639 2639 r10_bio->devs[m].addr, ··· 2649 2649 if (r10_bio->devs[m].repl_bio == NULL) 2650 2650 continue; 2651 2651 2652 - if (!r10_bio->devs[m].repl_bio->bi_error) { 2652 + if (!r10_bio->devs[m].repl_bio->bi_status) { 2653 2653 rdev_clear_badblocks( 2654 2654 rdev, 2655 2655 r10_bio->devs[m].addr, ··· 2675 2675 r10_bio->devs[m].addr, 2676 2676 r10_bio->sectors, 0); 2677 2677 rdev_dec_pending(rdev, conf->mddev); 2678 - } else if (bio != NULL && bio->bi_error) { 2678 + } else if (bio != NULL && bio->bi_status) { 2679 2679 fail = true; 2680 2680 if (!narrow_write_error(r10_bio, m)) { 2681 2681 md_error(conf->mddev, rdev); ··· 3267 3267 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3268 3268 3269 3269 bio = r10_bio->devs[i].bio; 3270 - bio->bi_error = -EIO; 3270 + bio->bi_status = BLK_STS_IOERR; 3271 3271 rcu_read_lock(); 3272 3272 rdev = rcu_dereference(conf->mirrors[d].rdev); 3273 3273 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { ··· 3309 3309 3310 3310 /* Need to set up for writing to the replacement */ 3311 3311 bio = r10_bio->devs[i].repl_bio; 3312 - bio->bi_error = -EIO; 3312 + bio->bi_status = BLK_STS_IOERR; 3313 3313 3314 3314 sector = r10_bio->devs[i].addr; 3315 3315 bio->bi_next = biolist; ··· 3375 3375 3376 3376 if (bio->bi_end_io == end_sync_read) { 3377 3377 md_sync_acct(bio->bi_bdev, nr_sectors); 3378 - bio->bi_error = 0; 3378 + bio->bi_status = 0; 3379 3379 generic_make_request(bio); 3380 3380 } 3381 3381 } ··· 4394 4394 read_bio->bi_end_io = end_reshape_read; 4395 4395 bio_set_op_attrs(read_bio, REQ_OP_READ, 0); 4396 4396 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4397 - read_bio->bi_error = 0; 4397 + read_bio->bi_status = 0; 4398 4398 read_bio->bi_vcnt = 0; 4399 4399 read_bio->bi_iter.bi_size = 0; 4400 4400 r10_bio->master_bio = read_bio; ··· 4638 4638 rdev = conf->mirrors[d].rdev; 4639 4639 } 4640 4640 4641 - if (bio->bi_error) { 4641 + if (bio->bi_status) { 4642 4642 /* FIXME should record badblock */ 4643 4643 md_error(mddev, rdev); 4644 4644 }
+2 -2
drivers/md/raid5-cache.c
··· 572 572 struct r5l_log *log = io->log; 573 573 unsigned long flags; 574 574 575 - if (bio->bi_error) 575 + if (bio->bi_status) 576 576 md_error(log->rdev->mddev, log->rdev); 577 577 578 578 bio_put(bio); ··· 1247 1247 unsigned long flags; 1248 1248 struct r5l_io_unit *io; 1249 1249 1250 - if (bio->bi_error) 1250 + if (bio->bi_status) 1251 1251 md_error(log->rdev->mddev, log->rdev); 1252 1252 1253 1253 spin_lock_irqsave(&log->io_list_lock, flags);
+1 -1
drivers/md/raid5-ppl.c
··· 397 397 398 398 pr_debug("%s: seq: %llu\n", __func__, io->seq); 399 399 400 - if (bio->bi_error) 400 + if (bio->bi_status) 401 401 md_error(ppl_conf->mddev, log->rdev); 402 402 403 403 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
+11 -11
drivers/md/raid5.c
··· 2476 2476 2477 2477 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", 2478 2478 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2479 - bi->bi_error); 2479 + bi->bi_status); 2480 2480 if (i == disks) { 2481 2481 bio_reset(bi); 2482 2482 BUG(); ··· 2496 2496 s = sh->sector + rdev->new_data_offset; 2497 2497 else 2498 2498 s = sh->sector + rdev->data_offset; 2499 - if (!bi->bi_error) { 2499 + if (!bi->bi_status) { 2500 2500 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2501 2501 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2502 2502 /* Note that this cannot happen on a ··· 2613 2613 } 2614 2614 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", 2615 2615 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2616 - bi->bi_error); 2616 + bi->bi_status); 2617 2617 if (i == disks) { 2618 2618 bio_reset(bi); 2619 2619 BUG(); ··· 2621 2621 } 2622 2622 2623 2623 if (replacement) { 2624 - if (bi->bi_error) 2624 + if (bi->bi_status) 2625 2625 md_error(conf->mddev, rdev); 2626 2626 else if (is_badblock(rdev, sh->sector, 2627 2627 STRIPE_SECTORS, 2628 2628 &first_bad, &bad_sectors)) 2629 2629 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2630 2630 } else { 2631 - if (bi->bi_error) { 2631 + if (bi->bi_status) { 2632 2632 set_bit(STRIPE_DEGRADED, &sh->state); 2633 2633 set_bit(WriteErrorSeen, &rdev->flags); 2634 2634 set_bit(R5_WriteError, &sh->dev[i].flags); ··· 2649 2649 } 2650 2650 rdev_dec_pending(rdev, conf->mddev); 2651 2651 2652 - if (sh->batch_head && bi->bi_error && !replacement) 2652 + if (sh->batch_head && bi->bi_status && !replacement) 2653 2653 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2654 2654 2655 2655 bio_reset(bi); ··· 3381 3381 sh->dev[i].sector + STRIPE_SECTORS) { 3382 3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3383 3383 3384 - bi->bi_error = -EIO; 3384 + bi->bi_status = BLK_STS_IOERR; 3385 3385 md_write_end(conf->mddev); 3386 3386 bio_endio(bi); 3387 3387 bi = nextbi; ··· 3403 3403 sh->dev[i].sector + STRIPE_SECTORS) { 3404 3404 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3405 3405 3406 - bi->bi_error = -EIO; 3406 + bi->bi_status = BLK_STS_IOERR; 3407 3407 md_write_end(conf->mddev); 3408 3408 bio_endio(bi); 3409 3409 bi = bi2; ··· 3429 3429 struct bio *nextbi = 3430 3430 r5_next_bio(bi, sh->dev[i].sector); 3431 3431 3432 - bi->bi_error = -EIO; 3432 + bi->bi_status = BLK_STS_IOERR; 3433 3433 bio_endio(bi); 3434 3434 bi = nextbi; 3435 3435 } ··· 5144 5144 struct mddev *mddev; 5145 5145 struct r5conf *conf; 5146 5146 struct md_rdev *rdev; 5147 - int error = bi->bi_error; 5147 + blk_status_t error = bi->bi_status; 5148 5148 5149 5149 bio_put(bi); 5150 5150 ··· 5721 5721 release_stripe_plug(mddev, sh); 5722 5722 } else { 5723 5723 /* cannot get stripe for read-ahead, just give-up */ 5724 - bi->bi_error = -EIO; 5724 + bi->bi_status = BLK_STS_IOERR; 5725 5725 break; 5726 5726 } 5727 5727 }
+2 -2
drivers/nvdimm/blk.c
··· 186 186 * another kernel subsystem, and we just pass it through. 187 187 */ 188 188 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 189 - bio->bi_error = -EIO; 189 + bio->bi_status = BLK_STS_IOERR; 190 190 goto out; 191 191 } 192 192 ··· 205 205 "io error in %s sector %lld, len %d,\n", 206 206 (rw == READ) ? "READ" : "WRITE", 207 207 (unsigned long long) iter.bi_sector, len); 208 - bio->bi_error = err; 208 + bio->bi_status = errno_to_blk_status(err); 209 209 break; 210 210 } 211 211 }
+2 -2
drivers/nvdimm/btt.c
··· 1210 1210 * another kernel subsystem, and we just pass it through. 1211 1211 */ 1212 1212 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1213 - bio->bi_error = -EIO; 1213 + bio->bi_status = BLK_STS_IOERR; 1214 1214 goto out; 1215 1215 } 1216 1216 ··· 1232 1232 (op_is_write(bio_op(bio))) ? "WRITE" : 1233 1233 "READ", 1234 1234 (unsigned long long) iter.bi_sector, len); 1235 - bio->bi_error = err; 1235 + bio->bi_status = errno_to_blk_status(err); 1236 1236 break; 1237 1237 } 1238 1238 }
+14 -14
drivers/nvdimm/pmem.c
··· 49 49 return to_nd_region(to_dev(pmem)->parent); 50 50 } 51 51 52 - static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, 53 - unsigned int len) 52 + static blk_status_t pmem_clear_poison(struct pmem_device *pmem, 53 + phys_addr_t offset, unsigned int len) 54 54 { 55 55 struct device *dev = to_dev(pmem); 56 56 sector_t sector; 57 57 long cleared; 58 - int rc = 0; 58 + blk_status_t rc = BLK_STS_OK; 59 59 60 60 sector = (offset - pmem->data_offset) / 512; 61 61 62 62 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); 63 63 if (cleared < len) 64 - rc = -EIO; 64 + rc = BLK_STS_IOERR; 65 65 if (cleared > 0 && cleared / 512) { 66 66 cleared /= 512; 67 67 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, ··· 84 84 kunmap_atomic(mem); 85 85 } 86 86 87 - static int read_pmem(struct page *page, unsigned int off, 87 + static blk_status_t read_pmem(struct page *page, unsigned int off, 88 88 void *pmem_addr, unsigned int len) 89 89 { 90 90 int rc; ··· 93 93 rc = memcpy_mcsafe(mem + off, pmem_addr, len); 94 94 kunmap_atomic(mem); 95 95 if (rc) 96 - return -EIO; 97 - return 0; 96 + return BLK_STS_IOERR; 97 + return BLK_STS_OK; 98 98 } 99 99 100 - static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 100 + static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, 101 101 unsigned int len, unsigned int off, bool is_write, 102 102 sector_t sector) 103 103 { 104 - int rc = 0; 104 + blk_status_t rc = BLK_STS_OK; 105 105 bool bad_pmem = false; 106 106 phys_addr_t pmem_off = sector * 512 + pmem->data_offset; 107 107 void *pmem_addr = pmem->virt_addr + pmem_off; ··· 111 111 112 112 if (!is_write) { 113 113 if (unlikely(bad_pmem)) 114 - rc = -EIO; 114 + rc = BLK_STS_IOERR; 115 115 else { 116 116 rc = read_pmem(page, off, pmem_addr, len); 117 117 flush_dcache_page(page); ··· 149 149 150 150 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) 151 151 { 152 - int rc = 0; 152 + blk_status_t rc = 0; 153 153 bool do_acct; 154 154 unsigned long start; 155 155 struct bio_vec bvec; ··· 166 166 bvec.bv_offset, op_is_write(bio_op(bio)), 167 167 iter.bi_sector); 168 168 if (rc) { 169 - bio->bi_error = rc; 169 + bio->bi_status = rc; 170 170 break; 171 171 } 172 172 } ··· 184 184 struct page *page, bool is_write) 185 185 { 186 186 struct pmem_device *pmem = bdev->bd_queue->queuedata; 187 - int rc; 187 + blk_status_t rc; 188 188 189 189 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector); 190 190 ··· 197 197 if (rc == 0) 198 198 page_endio(page, is_write, 0); 199 199 200 - return rc; 200 + return blk_status_to_errno(rc); 201 201 } 202 202 203 203 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
+2 -2
drivers/nvme/target/io-cmd.c
··· 21 21 struct nvmet_req *req = bio->bi_private; 22 22 23 23 nvmet_req_complete(req, 24 - bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); 24 + bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); 25 25 26 26 if (bio != &req->inline_bio) 27 27 bio_put(bio); ··· 145 145 bio->bi_private = req; 146 146 bio->bi_end_io = nvmet_bio_done; 147 147 if (status) { 148 - bio->bi_error = -EIO; 148 + bio->bi_status = BLK_STS_IOERR; 149 149 bio_endio(bio); 150 150 } else { 151 151 submit_bio(bio);
+5 -5
drivers/target/target_core_iblock.c
··· 296 296 struct se_cmd *cmd = bio->bi_private; 297 297 struct iblock_req *ibr = cmd->priv; 298 298 299 - if (bio->bi_error) { 300 - pr_err("bio error: %p, err: %d\n", bio, bio->bi_error); 299 + if (bio->bi_status) { 300 + pr_err("bio error: %p, err: %d\n", bio, bio->bi_status); 301 301 /* 302 302 * Bump the ib_bio_err_cnt and release bio. 303 303 */ ··· 354 354 { 355 355 struct se_cmd *cmd = bio->bi_private; 356 356 357 - if (bio->bi_error) 358 - pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error); 357 + if (bio->bi_status) 358 + pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status); 359 359 360 360 if (cmd) { 361 - if (bio->bi_error) 361 + if (bio->bi_status) 362 362 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 363 363 else 364 364 target_complete_cmd(cmd, SAM_STAT_GOOD);
+10 -8
fs/block_dev.c
··· 262 262 if (vecs != inline_vecs) 263 263 kfree(vecs); 264 264 265 - if (unlikely(bio.bi_error)) 266 - return bio.bi_error; 265 + if (unlikely(bio.bi_status)) 266 + return blk_status_to_errno(bio.bi_status); 267 267 return ret; 268 268 } 269 269 ··· 288 288 bool should_dirty = dio->should_dirty; 289 289 290 290 if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) { 291 - if (bio->bi_error && !dio->bio.bi_error) 292 - dio->bio.bi_error = bio->bi_error; 291 + if (bio->bi_status && !dio->bio.bi_status) 292 + dio->bio.bi_status = bio->bi_status; 293 293 } else { 294 294 if (!dio->is_sync) { 295 295 struct kiocb *iocb = dio->iocb; 296 - ssize_t ret = dio->bio.bi_error; 296 + ssize_t ret; 297 297 298 - if (likely(!ret)) { 298 + if (likely(!dio->bio.bi_status)) { 299 299 ret = dio->size; 300 300 iocb->ki_pos += ret; 301 + } else { 302 + ret = blk_status_to_errno(dio->bio.bi_status); 301 303 } 302 304 303 305 dio->iocb->ki_complete(iocb, ret, 0); ··· 365 363 366 364 ret = bio_iov_iter_get_pages(bio, iter); 367 365 if (unlikely(ret)) { 368 - bio->bi_error = -EIO; 366 + bio->bi_status = BLK_STS_IOERR; 369 367 bio_endio(bio); 370 368 break; 371 369 } ··· 415 413 __set_current_state(TASK_RUNNING); 416 414 417 415 if (!ret) 418 - ret = dio->bio.bi_error; 416 + ret = blk_status_to_errno(dio->bio.bi_status); 419 417 if (likely(!ret)) 420 418 ret = dio->size; 421 419
+2 -1
fs/btrfs/btrfs_inode.h
··· 310 310 * The original bio may be split to several sub-bios, this is 311 311 * done during endio of sub-bios 312 312 */ 313 - int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int); 313 + blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *, 314 + blk_status_t); 314 315 }; 315 316 316 317 /*
+2 -2
fs/btrfs/check-integrity.c
··· 2129 2129 /* mutex is not held! This is not save if IO is not yet completed 2130 2130 * on umount */ 2131 2131 iodone_w_error = 0; 2132 - if (bp->bi_error) 2132 + if (bp->bi_status) 2133 2133 iodone_w_error = 1; 2134 2134 2135 2135 BUG_ON(NULL == block); ··· 2143 2143 if ((dev_state->state->print_mask & 2144 2144 BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) 2145 2145 pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", 2146 - bp->bi_error, 2146 + bp->bi_status, 2147 2147 btrfsic_get_block_type(dev_state->state, block), 2148 2148 block->logical_bytenr, dev_state->name, 2149 2149 block->dev_bytenr, block->mirror_num);
+22 -22
fs/btrfs/compression.c
··· 155 155 unsigned long index; 156 156 int ret; 157 157 158 - if (bio->bi_error) 158 + if (bio->bi_status) 159 159 cb->errors = 1; 160 160 161 161 /* if there are more bios still pending for this compressed ··· 268 268 struct page *page; 269 269 unsigned long index; 270 270 271 - if (bio->bi_error) 271 + if (bio->bi_status) 272 272 cb->errors = 1; 273 273 274 274 /* if there are more bios still pending for this compressed ··· 287 287 cb->start, 288 288 cb->start + cb->len - 1, 289 289 NULL, 290 - bio->bi_error ? 0 : 1); 290 + bio->bi_status ? 0 : 1); 291 291 cb->compressed_pages[0]->mapping = NULL; 292 292 293 293 end_compressed_writeback(inode, cb); ··· 320 320 * This also checksums the file bytes and gets things ready for 321 321 * the end io hooks. 322 322 */ 323 - int btrfs_submit_compressed_write(struct inode *inode, u64 start, 323 + blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, 324 324 unsigned long len, u64 disk_start, 325 325 unsigned long compressed_len, 326 326 struct page **compressed_pages, ··· 335 335 struct page *page; 336 336 u64 first_byte = disk_start; 337 337 struct block_device *bdev; 338 - int ret; 338 + blk_status_t ret; 339 339 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 340 340 341 341 WARN_ON(start & ((u64)PAGE_SIZE - 1)); 342 342 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); 343 343 if (!cb) 344 - return -ENOMEM; 344 + return BLK_STS_RESOURCE; 345 345 refcount_set(&cb->pending_bios, 0); 346 346 cb->errors = 0; 347 347 cb->inode = inode; ··· 358 358 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 359 359 if (!bio) { 360 360 kfree(cb); 361 - return -ENOMEM; 361 + return BLK_STS_RESOURCE; 362 362 } 363 363 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 364 364 bio->bi_private = cb; ··· 368 368 /* create and submit bios for the compressed pages */ 369 369 bytes_left = compressed_len; 370 370 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 371 + int submit = 0; 372 + 371 373 page = compressed_pages[pg_index]; 372 374 page->mapping = inode->i_mapping; 373 375 if (bio->bi_iter.bi_size) 374 - ret = io_tree->ops->merge_bio_hook(page, 0, 376 + submit = io_tree->ops->merge_bio_hook(page, 0, 375 377 PAGE_SIZE, 376 378 bio, 0); 377 - else 378 - ret = 0; 379 379 380 380 page->mapping = NULL; 381 - if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) < 381 + if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < 382 382 PAGE_SIZE) { 383 383 bio_get(bio); 384 384 ··· 400 400 401 401 ret = btrfs_map_bio(fs_info, bio, 0, 1); 402 402 if (ret) { 403 - bio->bi_error = ret; 403 + bio->bi_status = ret; 404 404 bio_endio(bio); 405 405 } 406 406 ··· 434 434 435 435 ret = btrfs_map_bio(fs_info, bio, 0, 1); 436 436 if (ret) { 437 - bio->bi_error = ret; 437 + bio->bi_status = ret; 438 438 bio_endio(bio); 439 439 } 440 440 ··· 569 569 * After the compressed pages are read, we copy the bytes into the 570 570 * bio we were passed and then call the bio end_io calls 571 571 */ 572 - int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 572 + blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 573 573 int mirror_num, unsigned long bio_flags) 574 574 { 575 575 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 586 586 u64 em_len; 587 587 u64 em_start; 588 588 struct extent_map *em; 589 - int ret = -ENOMEM; 589 + blk_status_t ret = BLK_STS_RESOURCE; 590 590 int faili = 0; 591 591 u32 *sums; 592 592 ··· 600 600 PAGE_SIZE); 601 601 read_unlock(&em_tree->lock); 602 602 if (!em) 603 - return -EIO; 603 + return BLK_STS_IOERR; 604 604 605 605 compressed_len = em->block_len; 606 606 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); ··· 659 659 refcount_set(&cb->pending_bios, 1); 660 660 661 661 for (pg_index = 0; pg_index < nr_pages; pg_index++) { 662 + int submit = 0; 663 + 662 664 page = cb->compressed_pages[pg_index]; 663 665 page->mapping = inode->i_mapping; 664 666 page->index = em_start >> PAGE_SHIFT; 665 667 666 668 if (comp_bio->bi_iter.bi_size) 667 - ret = tree->ops->merge_bio_hook(page, 0, 669 + submit = tree->ops->merge_bio_hook(page, 0, 668 670 PAGE_SIZE, 669 671 comp_bio, 0); 670 - else 671 - ret = 0; 672 672 673 673 page->mapping = NULL; 674 - if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < 674 + if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < 675 675 PAGE_SIZE) { 676 676 bio_get(comp_bio); 677 677 ··· 697 697 698 698 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); 699 699 if (ret) { 700 - comp_bio->bi_error = ret; 700 + comp_bio->bi_status = ret; 701 701 bio_endio(comp_bio); 702 702 } 703 703 ··· 726 726 727 727 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); 728 728 if (ret) { 729 - comp_bio->bi_error = ret; 729 + comp_bio->bi_status = ret; 730 730 bio_endio(comp_bio); 731 731 } 732 732
+2 -2
fs/btrfs/compression.h
··· 48 48 unsigned long total_out, u64 disk_start, 49 49 struct bio *bio); 50 50 51 - int btrfs_submit_compressed_write(struct inode *inode, u64 start, 51 + blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, 52 52 unsigned long len, u64 disk_start, 53 53 unsigned long compressed_len, 54 54 struct page **compressed_pages, 55 55 unsigned long nr_pages); 56 - int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 56 + blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 57 57 int mirror_num, unsigned long bio_flags); 58 58 59 59 enum btrfs_compression_type {
+3 -3
fs/btrfs/ctree.h
··· 3078 3078 struct btrfs_dio_private; 3079 3079 int btrfs_del_csums(struct btrfs_trans_handle *trans, 3080 3080 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); 3081 - int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst); 3082 - int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, 3081 + blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst); 3082 + blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, 3083 3083 u64 logical_offset); 3084 3084 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3085 3085 struct btrfs_root *root, ··· 3094 3094 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3095 3095 struct btrfs_root *root, 3096 3096 struct btrfs_ordered_sum *sums); 3097 - int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, 3097 + blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, 3098 3098 u64 file_start, int contig); 3099 3099 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3100 3100 struct list_head *list, int search_commit);
+37 -38
fs/btrfs/disk-io.c
··· 87 87 bio_end_io_t *end_io; 88 88 void *private; 89 89 struct btrfs_fs_info *info; 90 - int error; 90 + blk_status_t status; 91 91 enum btrfs_wq_endio_type metadata; 92 92 struct list_head list; 93 93 struct btrfs_work work; ··· 131 131 */ 132 132 u64 bio_offset; 133 133 struct btrfs_work work; 134 - int error; 134 + blk_status_t status; 135 135 }; 136 136 137 137 /* ··· 799 799 btrfs_work_func_t func; 800 800 801 801 fs_info = end_io_wq->info; 802 - end_io_wq->error = bio->bi_error; 802 + end_io_wq->status = bio->bi_status; 803 803 804 804 if (bio_op(bio) == REQ_OP_WRITE) { 805 805 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { ··· 836 836 btrfs_queue_work(wq, &end_io_wq->work); 837 837 } 838 838 839 - int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 839 + blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 840 840 enum btrfs_wq_endio_type metadata) 841 841 { 842 842 struct btrfs_end_io_wq *end_io_wq; 843 843 844 844 end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS); 845 845 if (!end_io_wq) 846 - return -ENOMEM; 846 + return BLK_STS_RESOURCE; 847 847 848 848 end_io_wq->private = bio->bi_private; 849 849 end_io_wq->end_io = bio->bi_end_io; 850 850 end_io_wq->info = info; 851 - end_io_wq->error = 0; 851 + end_io_wq->status = 0; 852 852 end_io_wq->bio = bio; 853 853 end_io_wq->metadata = metadata; 854 854 ··· 868 868 static void run_one_async_start(struct btrfs_work *work) 869 869 { 870 870 struct async_submit_bio *async; 871 - int ret; 871 + blk_status_t ret; 872 872 873 873 async = container_of(work, struct async_submit_bio, work); 874 874 ret = async->submit_bio_start(async->inode, async->bio, 875 875 async->mirror_num, async->bio_flags, 876 876 async->bio_offset); 877 877 if (ret) 878 - async->error = ret; 878 + async->status = ret; 879 879 } 880 880 881 881 static void run_one_async_done(struct btrfs_work *work) ··· 898 898 wake_up(&fs_info->async_submit_wait); 899 899 900 900 /* If an error occurred we just want to clean up the bio and move on */ 901 - if (async->error) { 902 - async->bio->bi_error = async->error; 901 + if (async->status) { 902 + async->bio->bi_status = async->status; 903 903 bio_endio(async->bio); 904 904 return; 905 905 } ··· 916 916 kfree(async); 917 917 } 918 918 919 - int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 920 - struct bio *bio, int mirror_num, 921 - unsigned long bio_flags, 922 - u64 bio_offset, 923 - extent_submit_bio_hook_t *submit_bio_start, 924 - extent_submit_bio_hook_t *submit_bio_done) 919 + blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, 920 + struct inode *inode, struct bio *bio, int mirror_num, 921 + unsigned long bio_flags, u64 bio_offset, 922 + extent_submit_bio_hook_t *submit_bio_start, 923 + extent_submit_bio_hook_t *submit_bio_done) 925 924 { 926 925 struct async_submit_bio *async; 927 926 928 927 async = kmalloc(sizeof(*async), GFP_NOFS); 929 928 if (!async) 930 - return -ENOMEM; 929 + return BLK_STS_RESOURCE; 931 930 932 931 async->inode = inode; 933 932 async->bio = bio; ··· 940 941 async->bio_flags = bio_flags; 941 942 async->bio_offset = bio_offset; 942 943 943 - async->error = 0; 944 + async->status = 0; 944 945 945 946 atomic_inc(&fs_info->nr_async_submits); 946 947 ··· 958 959 return 0; 959 960 } 960 961 961 - static int btree_csum_one_bio(struct bio *bio) 962 + static blk_status_t btree_csum_one_bio(struct bio *bio) 962 963 { 963 964 struct bio_vec *bvec; 964 965 struct btrfs_root *root; ··· 971 972 break; 972 973 } 973 974 974 - return ret; 975 + return errno_to_blk_status(ret); 975 976 } 976 977 977 - static int __btree_submit_bio_start(struct inode *inode, struct bio *bio, 978 - int mirror_num, unsigned long bio_flags, 979 - u64 bio_offset) 978 + static blk_status_t __btree_submit_bio_start(struct inode *inode, 979 + struct bio *bio, int mirror_num, unsigned long bio_flags, 980 + u64 bio_offset) 980 981 { 981 982 /* 982 983 * when we're called for a write, we're already in the async ··· 985 986 return btree_csum_one_bio(bio); 986 987 } 987 988 988 - static int __btree_submit_bio_done(struct inode *inode, struct bio *bio, 989 - int mirror_num, unsigned long bio_flags, 990 - u64 bio_offset) 989 + static blk_status_t __btree_submit_bio_done(struct inode *inode, 990 + struct bio *bio, int mirror_num, unsigned long bio_flags, 991 + u64 bio_offset) 991 992 { 992 - int ret; 993 + blk_status_t ret; 993 994 994 995 /* 995 996 * when we're called for a write, we're already in the async ··· 997 998 */ 998 999 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1); 999 1000 if (ret) { 1000 - bio->bi_error = ret; 1001 + bio->bi_status = ret; 1001 1002 bio_endio(bio); 1002 1003 } 1003 1004 return ret; ··· 1014 1015 return 1; 1015 1016 } 1016 1017 1017 - static int btree_submit_bio_hook(struct inode *inode, struct bio *bio, 1018 + static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, 1018 1019 int mirror_num, unsigned long bio_flags, 1019 1020 u64 bio_offset) 1020 1021 { 1021 1022 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1022 1023 int async = check_async_write(bio_flags); 1023 - int ret; 1024 + blk_status_t ret; 1024 1025 1025 1026 if (bio_op(bio) != REQ_OP_WRITE) { 1026 1027 /* ··· 1053 1054 return 0; 1054 1055 1055 1056 out_w_error: 1056 - bio->bi_error = ret; 1057 + bio->bi_status = ret; 1057 1058 bio_endio(bio); 1058 1059 return ret; 1059 1060 } ··· 1819 1820 end_io_wq = container_of(work, struct btrfs_end_io_wq, work); 1820 1821 bio = end_io_wq->bio; 1821 1822 1822 - bio->bi_error = end_io_wq->error; 1823 + bio->bi_status = end_io_wq->status; 1823 1824 bio->bi_private = end_io_wq->private; 1824 1825 bio->bi_end_io = end_io_wq->end_io; 1825 1826 kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); ··· 3494 3495 * any device where the flush fails with eopnotsupp are flagged as not-barrier 3495 3496 * capable 3496 3497 */ 3497 - static int write_dev_flush(struct btrfs_device *device, int wait) 3498 + static blk_status_t write_dev_flush(struct btrfs_device *device, int wait) 3498 3499 { 3499 3500 struct request_queue *q = bdev_get_queue(device->bdev); 3500 3501 struct bio *bio; 3501 - int ret = 0; 3502 + blk_status_t ret = 0; 3502 3503 3503 3504 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 3504 3505 return 0; ··· 3510 3511 3511 3512 wait_for_completion(&device->flush_wait); 3512 3513 3513 - if (bio->bi_error) { 3514 - ret = bio->bi_error; 3514 + if (bio->bi_status) { 3515 + ret = bio->bi_status; 3515 3516 btrfs_dev_stat_inc_and_print(device, 3516 3517 BTRFS_DEV_STAT_FLUSH_ERRS); 3517 3518 } ··· 3530 3531 device->flush_bio = NULL; 3531 3532 bio = btrfs_io_bio_alloc(GFP_NOFS, 0); 3532 3533 if (!bio) 3533 - return -ENOMEM; 3534 + return BLK_STS_RESOURCE; 3534 3535 3535 3536 bio->bi_end_io = btrfs_end_empty_barrier; 3536 3537 bio->bi_bdev = device->bdev; ··· 3555 3556 struct btrfs_device *dev; 3556 3557 int errors_send = 0; 3557 3558 int errors_wait = 0; 3558 - int ret; 3559 + blk_status_t ret; 3559 3560 3560 3561 /* send down all the barriers */ 3561 3562 head = &info->fs_devices->devices;
+6 -6
fs/btrfs/disk-io.h
··· 118 118 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); 119 119 u32 btrfs_csum_data(const char *data, u32 seed, size_t len); 120 120 void btrfs_csum_final(u32 crc, u8 *result); 121 - int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 121 + blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, 122 122 enum btrfs_wq_endio_type metadata); 123 - int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 124 - struct bio *bio, int mirror_num, 125 - unsigned long bio_flags, u64 bio_offset, 126 - extent_submit_bio_hook_t *submit_bio_start, 127 - extent_submit_bio_hook_t *submit_bio_done); 123 + blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, 124 + struct inode *inode, struct bio *bio, int mirror_num, 125 + unsigned long bio_flags, u64 bio_offset, 126 + extent_submit_bio_hook_t *submit_bio_start, 127 + extent_submit_bio_hook_t *submit_bio_done); 128 128 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); 129 129 int btrfs_write_tree_block(struct extent_buffer *buf); 130 130 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
+13 -10
fs/btrfs/extent_io.c
··· 2399 2399 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2400 2400 struct bio *bio; 2401 2401 int read_mode = 0; 2402 + blk_status_t status; 2402 2403 int ret; 2403 2404 2404 2405 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); ··· 2432 2431 "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", 2433 2432 read_mode, failrec->this_mirror, failrec->in_validation); 2434 2433 2435 - ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, 2434 + status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, 2436 2435 failrec->bio_flags, 0); 2437 - if (ret) { 2436 + if (status) { 2438 2437 free_io_failure(BTRFS_I(inode), failrec); 2439 2438 bio_put(bio); 2439 + ret = blk_status_to_errno(status); 2440 2440 } 2441 2441 2442 2442 return ret; ··· 2476 2474 */ 2477 2475 static void end_bio_extent_writepage(struct bio *bio) 2478 2476 { 2477 + int error = blk_status_to_errno(bio->bi_status); 2479 2478 struct bio_vec *bvec; 2480 2479 u64 start; 2481 2480 u64 end; ··· 2506 2503 start = page_offset(page); 2507 2504 end = start + bvec->bv_offset + bvec->bv_len - 1; 2508 2505 2509 - end_extent_writepage(page, bio->bi_error, start, end); 2506 + end_extent_writepage(page, error, start, end); 2510 2507 end_page_writeback(page); 2511 2508 } 2512 2509 ··· 2539 2536 static void end_bio_extent_readpage(struct bio *bio) 2540 2537 { 2541 2538 struct bio_vec *bvec; 2542 - int uptodate = !bio->bi_error; 2539 + int uptodate = !bio->bi_status; 2543 2540 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 2544 2541 struct extent_io_tree *tree; 2545 2542 u64 offset = 0; ··· 2559 2556 2560 2557 btrfs_debug(fs_info, 2561 2558 "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u", 2562 - (u64)bio->bi_iter.bi_sector, bio->bi_error, 2559 + (u64)bio->bi_iter.bi_sector, bio->bi_status, 2563 2560 io_bio->mirror_num); 2564 2561 tree = &BTRFS_I(inode)->io_tree; 2565 2562 ··· 2618 2615 ret = bio_readpage_error(bio, offset, page, 2619 2616 start, end, mirror); 2620 2617 if (ret == 0) { 2621 - uptodate = !bio->bi_error; 2618 + uptodate = !bio->bi_status; 2622 2619 offset += len; 2623 2620 continue; 2624 2621 } ··· 2676 2673 endio_readpage_release_extent(tree, extent_start, extent_len, 2677 2674 uptodate); 2678 2675 if (io_bio->end_io) 2679 - io_bio->end_io(io_bio, bio->bi_error); 2676 + io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status)); 2680 2677 bio_put(bio); 2681 2678 } 2682 2679 ··· 2746 2743 static int __must_check submit_one_bio(struct bio *bio, int mirror_num, 2747 2744 unsigned long bio_flags) 2748 2745 { 2749 - int ret = 0; 2746 + blk_status_t ret = 0; 2750 2747 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2751 2748 struct page *page = bvec->bv_page; 2752 2749 struct extent_io_tree *tree = bio->bi_private; ··· 2764 2761 btrfsic_submit_bio(bio); 2765 2762 2766 2763 bio_put(bio); 2767 - return ret; 2764 + return blk_status_to_errno(ret); 2768 2765 } 2769 2766 2770 2767 static int merge_bio(struct extent_io_tree *tree, struct page *page, ··· 3710 3707 BUG_ON(!eb); 3711 3708 done = atomic_dec_and_test(&eb->io_pages); 3712 3709 3713 - if (bio->bi_error || 3710 + if (bio->bi_status || 3714 3711 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { 3715 3712 ClearPageUptodate(page); 3716 3713 set_btree_ioerr(page);
+3 -3
fs/btrfs/extent_io.h
··· 92 92 struct btrfs_io_bio; 93 93 struct io_failure_record; 94 94 95 - typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, 96 - int mirror_num, unsigned long bio_flags, 97 - u64 bio_offset); 95 + typedef blk_status_t (extent_submit_bio_hook_t)(struct inode *inode, 96 + struct bio *bio, int mirror_num, unsigned long bio_flags, 97 + u64 bio_offset); 98 98 struct extent_io_ops { 99 99 /* 100 100 * The following callbacks must be allways defined, the function
+7 -7
fs/btrfs/file-item.c
··· 160 160 kfree(bio->csum_allocated); 161 161 } 162 162 163 - static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, 163 + static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, 164 164 u64 logical_offset, u32 *dst, int dio) 165 165 { 166 166 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 182 182 183 183 path = btrfs_alloc_path(); 184 184 if (!path) 185 - return -ENOMEM; 185 + return BLK_STS_RESOURCE; 186 186 187 187 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 188 188 if (!dst) { ··· 191 191 csum_size, GFP_NOFS); 192 192 if (!btrfs_bio->csum_allocated) { 193 193 btrfs_free_path(path); 194 - return -ENOMEM; 194 + return BLK_STS_RESOURCE; 195 195 } 196 196 btrfs_bio->csum = btrfs_bio->csum_allocated; 197 197 btrfs_bio->end_io = btrfs_io_bio_endio_readpage; ··· 303 303 return 0; 304 304 } 305 305 306 - int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst) 306 + blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst) 307 307 { 308 308 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); 309 309 } 310 310 311 - int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) 311 + blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) 312 312 { 313 313 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); 314 314 } ··· 433 433 return ret; 434 434 } 435 435 436 - int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, 436 + blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, 437 437 u64 file_start, int contig) 438 438 { 439 439 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); ··· 452 452 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 453 453 GFP_NOFS); 454 454 if (!sums) 455 - return -ENOMEM; 455 + return BLK_STS_RESOURCE; 456 456 457 457 sums->len = bio->bi_iter.bi_size; 458 458 INIT_LIST_HEAD(&sums->list);
+36 -37
fs/btrfs/inode.c
··· 842 842 NULL, EXTENT_LOCKED | EXTENT_DELALLOC, 843 843 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 844 844 PAGE_SET_WRITEBACK); 845 - ret = btrfs_submit_compressed_write(inode, 845 + if (btrfs_submit_compressed_write(inode, 846 846 async_extent->start, 847 847 async_extent->ram_size, 848 848 ins.objectid, 849 849 ins.offset, async_extent->pages, 850 - async_extent->nr_pages); 851 - if (ret) { 850 + async_extent->nr_pages)) { 852 851 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 853 852 struct page *p = async_extent->pages[0]; 854 853 const u64 start = async_extent->start; ··· 1900 1901 * At IO completion time the cums attached on the ordered extent record 1901 1902 * are inserted into the btree 1902 1903 */ 1903 - static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio, 1904 - int mirror_num, unsigned long bio_flags, 1905 - u64 bio_offset) 1904 + static blk_status_t __btrfs_submit_bio_start(struct inode *inode, 1905 + struct bio *bio, int mirror_num, unsigned long bio_flags, 1906 + u64 bio_offset) 1906 1907 { 1907 - int ret = 0; 1908 + blk_status_t ret = 0; 1908 1909 1909 1910 ret = btrfs_csum_one_bio(inode, bio, 0, 0); 1910 1911 BUG_ON(ret); /* -ENOMEM */ ··· 1919 1920 * At IO completion time the cums attached on the ordered extent record 1920 1921 * are inserted into the btree 1921 1922 */ 1922 - static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio, 1923 - int mirror_num, unsigned long bio_flags, 1924 - u64 bio_offset) 1923 + static blk_status_t __btrfs_submit_bio_done(struct inode *inode, 1924 + struct bio *bio, int mirror_num, unsigned long bio_flags, 1925 + u64 bio_offset) 1925 1926 { 1926 1927 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1927 - int ret; 1928 + blk_status_t ret; 1928 1929 1929 1930 ret = btrfs_map_bio(fs_info, bio, mirror_num, 1); 1930 1931 if (ret) { 1931 - bio->bi_error = ret; 1932 + bio->bi_status = ret; 1932 1933 bio_endio(bio); 1933 1934 } 1934 1935 return ret; ··· 1938 1939 * extent_io.c submission hook. This does the right thing for csum calculation 1939 1940 * on write, or reading the csums from the tree before a read 1940 1941 */ 1941 - static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, 1942 + static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, 1942 1943 int mirror_num, unsigned long bio_flags, 1943 1944 u64 bio_offset) 1944 1945 { 1945 1946 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1946 1947 struct btrfs_root *root = BTRFS_I(inode)->root; 1947 1948 enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA; 1948 - int ret = 0; 1949 + blk_status_t ret = 0; 1949 1950 int skip_sum; 1950 1951 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); 1951 1952 ··· 1990 1991 ret = btrfs_map_bio(fs_info, bio, mirror_num, 0); 1991 1992 1992 1993 out: 1993 - if (ret < 0) { 1994 - bio->bi_error = ret; 1994 + if (ret) { 1995 + bio->bi_status = ret; 1995 1996 bio_endio(bio); 1996 1997 } 1997 1998 return ret; ··· 8036 8037 struct bio_vec *bvec; 8037 8038 int i; 8038 8039 8039 - if (bio->bi_error) 8040 + if (bio->bi_status) 8040 8041 goto end; 8041 8042 8042 8043 ASSERT(bio->bi_vcnt == 1); ··· 8115 8116 int ret; 8116 8117 int i; 8117 8118 8118 - if (bio->bi_error) 8119 + if (bio->bi_status) 8119 8120 goto end; 8120 8121 8121 8122 uptodate = 1; ··· 8140 8141 bio_put(bio); 8141 8142 } 8142 8143 8143 - static int __btrfs_subio_endio_read(struct inode *inode, 8144 - struct btrfs_io_bio *io_bio, int err) 8144 + static blk_status_t __btrfs_subio_endio_read(struct inode *inode, 8145 + struct btrfs_io_bio *io_bio, blk_status_t err) 8145 8146 { 8146 8147 struct btrfs_fs_info *fs_info; 8147 8148 struct bio_vec *bvec; ··· 8183 8184 io_bio->mirror_num, 8184 8185 btrfs_retry_endio, &done); 8185 8186 if (ret) { 8186 - err = ret; 8187 + err = errno_to_blk_status(ret); 8187 8188 goto next; 8188 8189 } 8189 8190 ··· 8210 8211 return err; 8211 8212 } 8212 8213 8213 - static int btrfs_subio_endio_read(struct inode *inode, 8214 - struct btrfs_io_bio *io_bio, int err) 8214 + static blk_status_t btrfs_subio_endio_read(struct inode *inode, 8215 + struct btrfs_io_bio *io_bio, blk_status_t err) 8215 8216 { 8216 8217 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 8217 8218 ··· 8231 8232 struct inode *inode = dip->inode; 8232 8233 struct bio *dio_bio; 8233 8234 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8234 - int err = bio->bi_error; 8235 + blk_status_t err = bio->bi_status; 8235 8236 8236 8237 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) 8237 8238 err = btrfs_subio_endio_read(inode, io_bio, err); ··· 8242 8243 8243 8244 kfree(dip); 8244 8245 8245 - dio_bio->bi_error = bio->bi_error; 8246 + dio_bio->bi_status = bio->bi_status; 8246 8247 dio_end_io(dio_bio); 8247 8248 8248 8249 if (io_bio->end_io) 8249 - io_bio->end_io(io_bio, err); 8250 + io_bio->end_io(io_bio, blk_status_to_errno(err)); 8250 8251 bio_put(bio); 8251 8252 } 8252 8253 ··· 8298 8299 struct bio *dio_bio = dip->dio_bio; 8299 8300 8300 8301 __endio_write_update_ordered(dip->inode, dip->logical_offset, 8301 - dip->bytes, !bio->bi_error); 8302 + dip->bytes, !bio->bi_status); 8302 8303 8303 8304 kfree(dip); 8304 8305 8305 - dio_bio->bi_error = bio->bi_error; 8306 + dio_bio->bi_status = bio->bi_status; 8306 8307 dio_end_io(dio_bio); 8307 8308 bio_put(bio); 8308 8309 } 8309 8310 8310 - static int __btrfs_submit_bio_start_direct_io(struct inode *inode, 8311 + static blk_status_t __btrfs_submit_bio_start_direct_io(struct inode *inode, 8311 8312 struct bio *bio, int mirror_num, 8312 8313 unsigned long bio_flags, u64 offset) 8313 8314 { 8314 - int ret; 8315 + blk_status_t ret; 8315 8316 ret = btrfs_csum_one_bio(inode, bio, offset, 1); 8316 8317 BUG_ON(ret); /* -ENOMEM */ 8317 8318 return 0; ··· 8320 8321 static void btrfs_end_dio_bio(struct bio *bio) 8321 8322 { 8322 8323 struct btrfs_dio_private *dip = bio->bi_private; 8323 - int err = bio->bi_error; 8324 + blk_status_t err = bio->bi_status; 8324 8325 8325 8326 if (err) 8326 8327 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, ··· 8350 8351 if (dip->errors) { 8351 8352 bio_io_error(dip->orig_bio); 8352 8353 } else { 8353 - dip->dio_bio->bi_error = 0; 8354 + dip->dio_bio->bi_status = 0; 8354 8355 bio_endio(dip->orig_bio); 8355 8356 } 8356 8357 out: ··· 8367 8368 return bio; 8368 8369 } 8369 8370 8370 - static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode, 8371 + static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, 8371 8372 struct btrfs_dio_private *dip, 8372 8373 struct bio *bio, 8373 8374 u64 file_offset) 8374 8375 { 8375 8376 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8376 8377 struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio); 8377 - int ret; 8378 + blk_status_t ret; 8378 8379 8379 8380 /* 8380 8381 * We load all the csum data we need when we submit ··· 8405 8406 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 8406 8407 struct btrfs_dio_private *dip = bio->bi_private; 8407 8408 bool write = bio_op(bio) == REQ_OP_WRITE; 8408 - int ret; 8409 + blk_status_t ret; 8409 8410 8410 8411 if (async_submit) 8411 8412 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); ··· 8648 8649 * callbacks - they require an allocated dip and a clone of dio_bio. 8649 8650 */ 8650 8651 if (io_bio && dip) { 8651 - io_bio->bi_error = -EIO; 8652 + io_bio->bi_status = BLK_STS_IOERR; 8652 8653 bio_endio(io_bio); 8653 8654 /* 8654 8655 * The end io callbacks free our dip, do the final put on io_bio ··· 8667 8668 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, 8668 8669 file_offset + dio_bio->bi_iter.bi_size - 1); 8669 8670 8670 - dio_bio->bi_error = -EIO; 8671 + dio_bio->bi_status = BLK_STS_IOERR; 8671 8672 /* 8672 8673 * Releases and cleans up our dio_bio, no need to bio_put() 8673 8674 * nor bio_endio()/bio_io_error() against dio_bio.
+8 -8
fs/btrfs/raid56.c
··· 871 871 * this frees the rbio and runs through all the bios in the 872 872 * bio_list and calls end_io on them 873 873 */ 874 - static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) 874 + static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) 875 875 { 876 876 struct bio *cur = bio_list_get(&rbio->bio_list); 877 877 struct bio *next; ··· 884 884 while (cur) { 885 885 next = cur->bi_next; 886 886 cur->bi_next = NULL; 887 - cur->bi_error = err; 887 + cur->bi_status = err; 888 888 bio_endio(cur); 889 889 cur = next; 890 890 } ··· 897 897 static void raid_write_end_io(struct bio *bio) 898 898 { 899 899 struct btrfs_raid_bio *rbio = bio->bi_private; 900 - int err = bio->bi_error; 900 + blk_status_t err = bio->bi_status; 901 901 int max_errors; 902 902 903 903 if (err) ··· 914 914 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? 915 915 0 : rbio->bbio->max_errors; 916 916 if (atomic_read(&rbio->error) > max_errors) 917 - err = -EIO; 917 + err = BLK_STS_IOERR; 918 918 919 919 rbio_orig_end_io(rbio, err); 920 920 } ··· 1092 1092 * devices or if they are not contiguous 1093 1093 */ 1094 1094 if (last_end == disk_start && stripe->dev->bdev && 1095 - !last->bi_error && 1095 + !last->bi_status && 1096 1096 last->bi_bdev == stripe->dev->bdev) { 1097 1097 ret = bio_add_page(last, page, PAGE_SIZE, 0); 1098 1098 if (ret == PAGE_SIZE) ··· 1448 1448 { 1449 1449 struct btrfs_raid_bio *rbio = bio->bi_private; 1450 1450 1451 - if (bio->bi_error) 1451 + if (bio->bi_status) 1452 1452 fail_bio_stripe(rbio, bio); 1453 1453 else 1454 1454 set_bio_pages_uptodate(bio); ··· 1991 1991 * we only read stripe pages off the disk, set them 1992 1992 * up to date if there were no errors 1993 1993 */ 1994 - if (bio->bi_error) 1994 + if (bio->bi_status) 1995 1995 fail_bio_stripe(rbio, bio); 1996 1996 else 1997 1997 set_bio_pages_uptodate(bio); ··· 2530 2530 { 2531 2531 struct btrfs_raid_bio *rbio = bio->bi_private; 2532 2532 2533 - if (bio->bi_error) 2533 + if (bio->bi_status) 2534 2534 fail_bio_stripe(rbio, bio); 2535 2535 else 2536 2536 set_bio_pages_uptodate(bio);
+13 -13
fs/btrfs/scrub.c
··· 95 95 struct scrub_ctx *sctx; 96 96 struct btrfs_device *dev; 97 97 struct bio *bio; 98 - int err; 98 + blk_status_t status; 99 99 u64 logical; 100 100 u64 physical; 101 101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO ··· 1668 1668 1669 1669 struct scrub_bio_ret { 1670 1670 struct completion event; 1671 - int error; 1671 + blk_status_t status; 1672 1672 }; 1673 1673 1674 1674 static void scrub_bio_wait_endio(struct bio *bio) 1675 1675 { 1676 1676 struct scrub_bio_ret *ret = bio->bi_private; 1677 1677 1678 - ret->error = bio->bi_error; 1678 + ret->status = bio->bi_status; 1679 1679 complete(&ret->event); 1680 1680 } 1681 1681 ··· 1693 1693 int ret; 1694 1694 1695 1695 init_completion(&done.event); 1696 - done.error = 0; 1696 + done.status = 0; 1697 1697 bio->bi_iter.bi_sector = page->logical >> 9; 1698 1698 bio->bi_private = &done; 1699 1699 bio->bi_end_io = scrub_bio_wait_endio; ··· 1705 1705 return ret; 1706 1706 1707 1707 wait_for_completion(&done.event); 1708 - if (done.error) 1708 + if (done.status) 1709 1709 return -EIO; 1710 1710 1711 1711 return 0; ··· 1937 1937 bio->bi_bdev = sbio->dev->bdev; 1938 1938 bio->bi_iter.bi_sector = sbio->physical >> 9; 1939 1939 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1940 - sbio->err = 0; 1940 + sbio->status = 0; 1941 1941 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1942 1942 spage->physical_for_dev_replace || 1943 1943 sbio->logical + sbio->page_count * PAGE_SIZE != ··· 1992 1992 struct scrub_bio *sbio = bio->bi_private; 1993 1993 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; 1994 1994 1995 - sbio->err = bio->bi_error; 1995 + sbio->status = bio->bi_status; 1996 1996 sbio->bio = bio; 1997 1997 1998 1998 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, ··· 2007 2007 int i; 2008 2008 2009 2009 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO); 2010 - if (sbio->err) { 2010 + if (sbio->status) { 2011 2011 struct btrfs_dev_replace *dev_replace = 2012 2012 &sbio->sctx->fs_info->dev_replace; 2013 2013 ··· 2341 2341 bio->bi_bdev = sbio->dev->bdev; 2342 2342 bio->bi_iter.bi_sector = sbio->physical >> 9; 2343 2343 bio_set_op_attrs(bio, REQ_OP_READ, 0); 2344 - sbio->err = 0; 2344 + sbio->status = 0; 2345 2345 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 2346 2346 spage->physical || 2347 2347 sbio->logical + sbio->page_count * PAGE_SIZE != ··· 2377 2377 struct scrub_block *sblock = bio->bi_private; 2378 2378 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; 2379 2379 2380 - if (bio->bi_error) 2380 + if (bio->bi_status) 2381 2381 sblock->no_io_error_seen = 0; 2382 2382 2383 2383 bio_put(bio); ··· 2588 2588 struct scrub_bio *sbio = bio->bi_private; 2589 2589 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; 2590 2590 2591 - sbio->err = bio->bi_error; 2591 + sbio->status = bio->bi_status; 2592 2592 sbio->bio = bio; 2593 2593 2594 2594 btrfs_queue_work(fs_info->scrub_workers, &sbio->work); ··· 2601 2601 int i; 2602 2602 2603 2603 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO); 2604 - if (sbio->err) { 2604 + if (sbio->status) { 2605 2605 for (i = 0; i < sbio->page_count; i++) { 2606 2606 struct scrub_page *spage = sbio->pagev[i]; 2607 2607 ··· 3004 3004 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; 3005 3005 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; 3006 3006 3007 - if (bio->bi_error) 3007 + if (bio->bi_status) 3008 3008 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, 3009 3009 sparity->nsectors); 3010 3010
+6 -5
fs/btrfs/volumes.c
··· 6042 6042 struct btrfs_bio *bbio = bio->bi_private; 6043 6043 int is_orig_bio = 0; 6044 6044 6045 - if (bio->bi_error) { 6045 + if (bio->bi_status) { 6046 6046 atomic_inc(&bbio->error); 6047 - if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) { 6047 + if (bio->bi_status == BLK_STS_IOERR || 6048 + bio->bi_status == BLK_STS_TARGET) { 6048 6049 unsigned int stripe_index = 6049 6050 btrfs_io_bio(bio)->stripe_index; 6050 6051 struct btrfs_device *dev; ··· 6083 6082 * beyond the tolerance of the btrfs bio 6084 6083 */ 6085 6084 if (atomic_read(&bbio->error) > bbio->max_errors) { 6086 - bio->bi_error = -EIO; 6085 + bio->bi_status = BLK_STS_IOERR; 6087 6086 } else { 6088 6087 /* 6089 6088 * this bio is actually up to date, we didn't 6090 6089 * go over the max number of errors 6091 6090 */ 6092 - bio->bi_error = 0; 6091 + bio->bi_status = 0; 6093 6092 } 6094 6093 6095 6094 btrfs_end_bbio(bbio, bio); ··· 6200 6199 6201 6200 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 6202 6201 bio->bi_iter.bi_sector = logical >> 9; 6203 - bio->bi_error = -EIO; 6202 + bio->bi_status = BLK_STS_IOERR; 6204 6203 btrfs_end_bbio(bbio, bio); 6205 6204 } 6206 6205 }
+1 -1
fs/buffer.c
··· 3038 3038 if (unlikely(bio_flagged(bio, BIO_QUIET))) 3039 3039 set_bit(BH_Quiet, &bh->b_state); 3040 3040 3041 - bh->b_end_io(bh, !bio->bi_error); 3041 + bh->b_end_io(bh, !bio->bi_status); 3042 3042 bio_put(bio); 3043 3043 } 3044 3044
+1 -1
fs/crypto/bio.c
··· 129 129 goto errout; 130 130 } 131 131 err = submit_bio_wait(bio); 132 - if ((err == 0) && bio->bi_error) 132 + if (err == 0 && bio->bi_status) 133 133 err = -EIO; 134 134 bio_put(bio); 135 135 if (err)
+4 -4
fs/direct-io.c
··· 294 294 dio_complete(dio, 0, true); 295 295 } 296 296 297 - static int dio_bio_complete(struct dio *dio, struct bio *bio); 297 + static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); 298 298 299 299 /* 300 300 * Asynchronous IO callback. ··· 473 473 /* 474 474 * Process one completed BIO. No locks are held. 475 475 */ 476 - static int dio_bio_complete(struct dio *dio, struct bio *bio) 476 + static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) 477 477 { 478 478 struct bio_vec *bvec; 479 479 unsigned i; 480 - int err = bio->bi_error; 480 + blk_status_t err = bio->bi_status; 481 481 482 482 if (err) 483 483 dio->io_error = -EIO; ··· 536 536 bio = dio->bio_list; 537 537 dio->bio_list = bio->bi_private; 538 538 spin_unlock_irqrestore(&dio->bio_lock, flags); 539 - ret2 = dio_bio_complete(dio, bio); 539 + ret2 = blk_status_to_errno(dio_bio_complete(dio, bio)); 540 540 if (ret == 0) 541 541 ret = ret2; 542 542 }
+7 -6
fs/ext4/page-io.c
··· 85 85 } 86 86 #endif 87 87 88 - if (bio->bi_error) { 88 + if (bio->bi_status) { 89 89 SetPageError(page); 90 90 mapping_set_error(page->mapping, -EIO); 91 91 } ··· 104 104 continue; 105 105 } 106 106 clear_buffer_async_write(bh); 107 - if (bio->bi_error) 107 + if (bio->bi_status) 108 108 buffer_io_error(bh); 109 109 } while ((bh = bh->b_this_page) != head); 110 110 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); ··· 303 303 bdevname(bio->bi_bdev, b), 304 304 (long long) bio->bi_iter.bi_sector, 305 305 (unsigned) bio_sectors(bio), 306 - bio->bi_error)) { 306 + bio->bi_status)) { 307 307 ext4_finish_bio(bio); 308 308 bio_put(bio); 309 309 return; 310 310 } 311 311 bio->bi_end_io = NULL; 312 312 313 - if (bio->bi_error) { 313 + if (bio->bi_status) { 314 314 struct inode *inode = io_end->inode; 315 315 316 316 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu " 317 317 "(offset %llu size %ld starting block %llu)", 318 - bio->bi_error, inode->i_ino, 318 + bio->bi_status, inode->i_ino, 319 319 (unsigned long long) io_end->offset, 320 320 (long) io_end->size, 321 321 (unsigned long long) 322 322 bi_sector >> (inode->i_blkbits - 9)); 323 - mapping_set_error(inode->i_mapping, bio->bi_error); 323 + mapping_set_error(inode->i_mapping, 324 + blk_status_to_errno(bio->bi_status)); 324 325 } 325 326 326 327 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+2 -2
fs/ext4/readpage.c
··· 73 73 int i; 74 74 75 75 if (ext4_bio_encrypted(bio)) { 76 - if (bio->bi_error) { 76 + if (bio->bi_status) { 77 77 fscrypt_release_ctx(bio->bi_private); 78 78 } else { 79 79 fscrypt_decrypt_bio_pages(bio->bi_private, bio); ··· 83 83 bio_for_each_segment_all(bv, bio, i) { 84 84 struct page *page = bv->bv_page; 85 85 86 - if (!bio->bi_error) { 86 + if (!bio->bi_status) { 87 87 SetPageUptodate(page); 88 88 } else { 89 89 ClearPageUptodate(page);
+5 -5
fs/f2fs/data.c
··· 58 58 #ifdef CONFIG_F2FS_FAULT_INJECTION 59 59 if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) { 60 60 f2fs_show_injection_info(FAULT_IO); 61 - bio->bi_error = -EIO; 61 + bio->bi_status = BLK_STS_IOERR; 62 62 } 63 63 #endif 64 64 65 65 if (f2fs_bio_encrypted(bio)) { 66 - if (bio->bi_error) { 66 + if (bio->bi_status) { 67 67 fscrypt_release_ctx(bio->bi_private); 68 68 } else { 69 69 fscrypt_decrypt_bio_pages(bio->bi_private, bio); ··· 74 74 bio_for_each_segment_all(bvec, bio, i) { 75 75 struct page *page = bvec->bv_page; 76 76 77 - if (!bio->bi_error) { 77 + if (!bio->bi_status) { 78 78 if (!PageUptodate(page)) 79 79 SetPageUptodate(page); 80 80 } else { ··· 102 102 unlock_page(page); 103 103 mempool_free(page, sbi->write_io_dummy); 104 104 105 - if (unlikely(bio->bi_error)) 105 + if (unlikely(bio->bi_status)) 106 106 f2fs_stop_checkpoint(sbi, true); 107 107 continue; 108 108 } 109 109 110 110 fscrypt_pullback_bio_page(&page, true); 111 111 112 - if (unlikely(bio->bi_error)) { 112 + if (unlikely(bio->bi_status)) { 113 113 mapping_set_error(page->mapping, -EIO); 114 114 f2fs_stop_checkpoint(sbi, true); 115 115 }
+1 -1
fs/f2fs/segment.c
··· 749 749 { 750 750 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; 751 751 752 - dc->error = bio->bi_error; 752 + dc->error = blk_status_to_errno(bio->bi_status); 753 753 dc->state = D_DONE; 754 754 complete(&dc->wait); 755 755 bio_put(bio);
+4 -4
fs/gfs2/lops.c
··· 170 170 */ 171 171 172 172 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, 173 - int error) 173 + blk_status_t error) 174 174 { 175 175 struct buffer_head *bh, *next; 176 176 struct page *page = bvec->bv_page; ··· 209 209 struct page *page; 210 210 int i; 211 211 212 - if (bio->bi_error) 213 - fs_err(sdp, "Error %d writing to log\n", bio->bi_error); 212 + if (bio->bi_status) 213 + fs_err(sdp, "Error %d writing to log\n", bio->bi_status); 214 214 215 215 bio_for_each_segment_all(bvec, bio, i) { 216 216 page = bvec->bv_page; 217 217 if (page_has_buffers(page)) 218 - gfs2_end_log_write_bh(sdp, bvec, bio->bi_error); 218 + gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); 219 219 else 220 220 mempool_free(page, gfs2_page_pool); 221 221 }
+1 -1
fs/gfs2/meta_io.c
··· 201 201 do { 202 202 struct buffer_head *next = bh->b_this_page; 203 203 len -= bh->b_size; 204 - bh->b_end_io(bh, !bio->bi_error); 204 + bh->b_end_io(bh, !bio->bi_status); 205 205 bh = next; 206 206 } while (bh && len); 207 207 }
+2 -2
fs/gfs2/ops_fstype.c
··· 176 176 { 177 177 struct page *page = bio->bi_private; 178 178 179 - if (!bio->bi_error) 179 + if (!bio->bi_status) 180 180 SetPageUptodate(page); 181 181 else 182 - pr_warn("error %d reading superblock\n", bio->bi_error); 182 + pr_warn("error %d reading superblock\n", bio->bi_status); 183 183 unlock_page(page); 184 184 } 185 185
+2 -2
fs/iomap.c
··· 672 672 struct iomap_dio *dio = bio->bi_private; 673 673 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); 674 674 675 - if (bio->bi_error) 676 - iomap_dio_set_error(dio, bio->bi_error); 675 + if (bio->bi_status) 676 + iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); 677 677 678 678 if (atomic_dec_and_test(&dio->ref)) { 679 679 if (is_sync_kiocb(dio->iocb)) {
+1 -1
fs/jfs/jfs_logmgr.c
··· 2205 2205 2206 2206 bp->l_flag |= lbmDONE; 2207 2207 2208 - if (bio->bi_error) { 2208 + if (bio->bi_status) { 2209 2209 bp->l_flag |= lbmERROR; 2210 2210 2211 2211 jfs_err("lbmIODone: I/O error in JFS log");
+2 -2
fs/jfs/jfs_metapage.c
··· 280 280 { 281 281 struct page *page = bio->bi_private; 282 282 283 - if (bio->bi_error) { 283 + if (bio->bi_status) { 284 284 printk(KERN_ERR "metapage_read_end_io: I/O error\n"); 285 285 SetPageError(page); 286 286 } ··· 337 337 338 338 BUG_ON(!PagePrivate(page)); 339 339 340 - if (bio->bi_error) { 340 + if (bio->bi_status) { 341 341 printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 342 342 SetPageError(page); 343 343 }
+2 -1
fs/mpage.c
··· 50 50 51 51 bio_for_each_segment_all(bv, bio, i) { 52 52 struct page *page = bv->bv_page; 53 - page_endio(page, op_is_write(bio_op(bio)), bio->bi_error); 53 + page_endio(page, op_is_write(bio_op(bio)), 54 + blk_status_to_errno(bio->bi_status)); 54 55 } 55 56 56 57 bio_put(bio);
+2 -2
fs/nfs/blocklayout/blocklayout.c
··· 188 188 { 189 189 struct parallel_io *par = bio->bi_private; 190 190 191 - if (bio->bi_error) { 191 + if (bio->bi_status) { 192 192 struct nfs_pgio_header *header = par->data; 193 193 194 194 if (!header->pnfs_error) ··· 319 319 struct parallel_io *par = bio->bi_private; 320 320 struct nfs_pgio_header *header = par->data; 321 321 322 - if (bio->bi_error) { 322 + if (bio->bi_status) { 323 323 if (!header->pnfs_error) 324 324 header->pnfs_error = -EIO; 325 325 pnfs_set_lo_fail(header->lseg);
+1 -1
fs/nilfs2/segbuf.c
··· 338 338 { 339 339 struct nilfs_segment_buffer *segbuf = bio->bi_private; 340 340 341 - if (bio->bi_error) 341 + if (bio->bi_status) 342 342 atomic_inc(&segbuf->sb_err); 343 343 344 344 bio_put(bio);
+3 -3
fs/ocfs2/cluster/heartbeat.c
··· 516 516 { 517 517 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; 518 518 519 - if (bio->bi_error) { 520 - mlog(ML_ERROR, "IO Error %d\n", bio->bi_error); 521 - wc->wc_error = bio->bi_error; 519 + if (bio->bi_status) { 520 + mlog(ML_ERROR, "IO Error %d\n", bio->bi_status); 521 + wc->wc_error = blk_status_to_errno(bio->bi_status); 522 522 } 523 523 524 524 o2hb_bio_wait_dec(wc, 1);
+4 -3
fs/xfs/xfs_aops.c
··· 276 276 struct xfs_inode *ip = XFS_I(ioend->io_inode); 277 277 xfs_off_t offset = ioend->io_offset; 278 278 size_t size = ioend->io_size; 279 - int error = ioend->io_bio->bi_error; 279 + int error; 280 280 281 281 /* 282 282 * Just clean up the in-memory strutures if the fs has been shut down. ··· 289 289 /* 290 290 * Clean up any COW blocks on an I/O error. 291 291 */ 292 + error = blk_status_to_errno(ioend->io_bio->bi_status); 292 293 if (unlikely(error)) { 293 294 switch (ioend->io_type) { 294 295 case XFS_IO_COW: ··· 333 332 else if (ioend->io_append_trans) 334 333 queue_work(mp->m_data_workqueue, &ioend->io_work); 335 334 else 336 - xfs_destroy_ioend(ioend, bio->bi_error); 335 + xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status)); 337 336 } 338 337 339 338 STATIC int ··· 501 500 * time. 502 501 */ 503 502 if (status) { 504 - ioend->io_bio->bi_error = status; 503 + ioend->io_bio->bi_status = errno_to_blk_status(status); 505 504 bio_endio(ioend->io_bio); 506 505 return status; 507 506 }
+5 -2
fs/xfs/xfs_buf.c
··· 1213 1213 * don't overwrite existing errors - otherwise we can lose errors on 1214 1214 * buffers that require multiple bios to complete. 1215 1215 */ 1216 - if (bio->bi_error) 1217 - cmpxchg(&bp->b_io_error, 0, bio->bi_error); 1216 + if (bio->bi_status) { 1217 + int error = blk_status_to_errno(bio->bi_status); 1218 + 1219 + cmpxchg(&bp->b_io_error, 0, error); 1220 + } 1218 1221 1219 1222 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1220 1223 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
+1 -1
include/linux/bio.h
··· 414 414 415 415 static inline void bio_io_error(struct bio *bio) 416 416 { 417 - bio->bi_error = -EIO; 417 + bio->bi_status = BLK_STS_IOERR; 418 418 bio_endio(bio); 419 419 } 420 420
+4 -1
include/linux/blk_types.h
··· 33 33 #define BLK_STS_RESOURCE ((__force blk_status_t)9) 34 34 #define BLK_STS_IOERR ((__force blk_status_t)10) 35 35 36 + /* hack for device mapper, don't use elsewhere: */ 37 + #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) 38 + 36 39 struct blk_issue_stat { 37 40 u64 stat; 38 41 }; ··· 47 44 struct bio { 48 45 struct bio *bi_next; /* request queue link */ 49 46 struct block_device *bi_bdev; 50 - int bi_error; 47 + blk_status_t bi_status; 51 48 unsigned int bi_opf; /* bottom bits req flags, 52 49 * top bits REQ_OP. Use 53 50 * accessors.
+1 -1
include/linux/blkdev.h
··· 1782 1782 const char *disk_name; 1783 1783 }; 1784 1784 1785 - typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1785 + typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); 1786 1786 1787 1787 struct blk_integrity_profile { 1788 1788 integrity_processing_fn *generate_fn;
+1 -1
include/linux/device-mapper.h
··· 72 72 * 2 : The target wants to push back the io 73 73 */ 74 74 typedef int (*dm_endio_fn) (struct dm_target *ti, 75 - struct bio *bio, int *error); 75 + struct bio *bio, blk_status_t *error); 76 76 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 77 77 struct request *clone, blk_status_t error, 78 78 union map_info *map_context);
+7 -7
kernel/power/swap.c
··· 225 225 struct hib_bio_batch { 226 226 atomic_t count; 227 227 wait_queue_head_t wait; 228 - int error; 228 + blk_status_t error; 229 229 }; 230 230 231 231 static void hib_init_batch(struct hib_bio_batch *hb) 232 232 { 233 233 atomic_set(&hb->count, 0); 234 234 init_waitqueue_head(&hb->wait); 235 - hb->error = 0; 235 + hb->error = BLK_STS_OK; 236 236 } 237 237 238 238 static void hib_end_io(struct bio *bio) ··· 240 240 struct hib_bio_batch *hb = bio->bi_private; 241 241 struct page *page = bio->bi_io_vec[0].bv_page; 242 242 243 - if (bio->bi_error) { 243 + if (bio->bi_status) { 244 244 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 245 245 imajor(bio->bi_bdev->bd_inode), 246 246 iminor(bio->bi_bdev->bd_inode), ··· 253 253 flush_icache_range((unsigned long)page_address(page), 254 254 (unsigned long)page_address(page) + PAGE_SIZE); 255 255 256 - if (bio->bi_error && !hb->error) 257 - hb->error = bio->bi_error; 256 + if (bio->bi_status && !hb->error) 257 + hb->error = bio->bi_status; 258 258 if (atomic_dec_and_test(&hb->count)) 259 259 wake_up(&hb->wait); 260 260 ··· 293 293 return error; 294 294 } 295 295 296 - static int hib_wait_io(struct hib_bio_batch *hb) 296 + static blk_status_t hib_wait_io(struct hib_bio_batch *hb) 297 297 { 298 298 wait_event(hb->wait, atomic_read(&hb->count) == 0); 299 - return hb->error; 299 + return blk_status_to_errno(hb->error); 300 300 } 301 301 302 302 /*
+2 -2
kernel/trace/blktrace.c
··· 867 867 868 868 __blk_add_trace(bt, bio->bi_iter.bi_sector, 869 869 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, 870 - BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu), 870 + BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), 871 871 &rpdu); 872 872 } 873 873 } ··· 900 900 r.sector_from = cpu_to_be64(from); 901 901 902 902 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 903 - bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error, 903 + bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, 904 904 sizeof(r), &r); 905 905 } 906 906
+2 -2
mm/page_io.c
··· 45 45 { 46 46 struct page *page = bio->bi_io_vec[0].bv_page; 47 47 48 - if (bio->bi_error) { 48 + if (bio->bi_status) { 49 49 SetPageError(page); 50 50 /* 51 51 * We failed to write the page out to swap-space. ··· 118 118 { 119 119 struct page *page = bio->bi_io_vec[0].bv_page; 120 120 121 - if (bio->bi_error) { 121 + if (bio->bi_status) { 122 122 SetPageError(page); 123 123 ClearPageUptodate(page); 124 124 pr_alert("Read-error on swap-device (%u:%u:%llu)\n",