Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: return blk_status_t from scsi_init_io and ->init_command

Replace the old BLKPREP_* values with the BLK_STS_ ones that they are
converted to later anyway.

Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
159b2cbf 14784565

+78 -85
+23 -22
drivers/scsi/scsi_lib.c
··· 1005 1005 scsi_io_completion_action(cmd, result); 1006 1006 } 1007 1007 1008 - static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) 1008 + static blk_status_t scsi_init_sgtable(struct request *req, 1009 + struct scsi_data_buffer *sdb) 1009 1010 { 1010 1011 int count; 1011 1012 ··· 1015 1014 */ 1016 1015 if (unlikely(sg_alloc_table_chained(&sdb->table, 1017 1016 blk_rq_nr_phys_segments(req), sdb->table.sgl))) 1018 - return BLKPREP_DEFER; 1017 + return BLK_STS_RESOURCE; 1019 1018 1020 1019 /* 1021 1020 * Next, walk the list, and fill in the addresses and sizes of ··· 1025 1024 BUG_ON(count > sdb->table.nents); 1026 1025 sdb->table.nents = count; 1027 1026 sdb->length = blk_rq_payload_bytes(req); 1028 - return BLKPREP_OK; 1027 + return BLK_STS_OK; 1029 1028 } 1030 1029 1031 1030 /* ··· 1035 1034 * 1036 1035 * Arguments: cmd - Command descriptor we wish to initialize 1037 1036 * 1038 - * Returns: 0 on success 1039 - * BLKPREP_DEFER if the failure is retryable 1040 - * BLKPREP_KILL if the failure is fatal 1037 + * Returns: BLK_STS_OK on success 1038 + * BLK_STS_RESOURCE if the failure is retryable 1039 + * BLK_STS_IOERR if the failure is fatal 1041 1040 */ 1042 - int scsi_init_io(struct scsi_cmnd *cmd) 1041 + blk_status_t scsi_init_io(struct scsi_cmnd *cmd) 1043 1042 { 1044 1043 struct request *rq = cmd->request; 1045 - int error = BLKPREP_KILL; 1044 + blk_status_t ret; 1046 1045 1047 1046 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq))) 1048 - return BLKPREP_KILL; 1047 + return BLK_STS_IOERR; 1049 1048 1050 - error = scsi_init_sgtable(rq, &cmd->sdb); 1051 - if (error) 1052 - return error; 1049 + ret = scsi_init_sgtable(rq, &cmd->sdb); 1050 + if (ret) 1051 + return ret; 1053 1052 1054 1053 if (blk_bidi_rq(rq)) { 1055 - error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); 1056 - if (error) 1054 + ret = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); 1055 + if (ret) 1057 1056 goto out_free_sgtables; 1058 1057 } 1059 1058 ··· 1067 1066 * queues a command to a device on an adapter 1068 1067 * that does not support DIX. 1069 1068 */ 1070 - error = BLKPREP_KILL; 1069 + ret = BLK_STS_IOERR; 1071 1070 goto out_free_sgtables; 1072 1071 } 1073 1072 ··· 1075 1074 1076 1075 if (sg_alloc_table_chained(&prot_sdb->table, ivecs, 1077 1076 prot_sdb->table.sgl)) { 1078 - error = BLKPREP_DEFER; 1077 + ret = BLK_STS_RESOURCE; 1079 1078 goto out_free_sgtables; 1080 1079 } 1081 1080 ··· 1088 1087 cmd->prot_sdb->table.nents = count; 1089 1088 } 1090 1089 1091 - return BLKPREP_OK; 1090 + return BLK_STS_OK; 1092 1091 out_free_sgtables: 1093 1092 scsi_mq_free_sgtables(cmd); 1094 - return error; 1093 + return ret; 1095 1094 } 1096 1095 EXPORT_SYMBOL(scsi_init_io); 1097 1096 ··· 1201 1200 * submit a request without an attached bio. 1202 1201 */ 1203 1202 if (req->bio) { 1204 - int ret = scsi_init_io(cmd); 1205 - if (unlikely(ret)) 1206 - return prep_to_mq(ret); 1203 + blk_status_t ret = scsi_init_io(cmd); 1204 + if (unlikely(ret != BLK_STS_OK)) 1205 + return ret; 1207 1206 } else { 1208 1207 BUG_ON(blk_rq_bytes(req)); 1209 1208 ··· 1234 1233 1235 1234 cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; 1236 1235 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1237 - return prep_to_mq(scsi_cmd_to_driver(cmd)->init_command(cmd)); 1236 + return scsi_cmd_to_driver(cmd)->init_command(cmd); 1238 1237 } 1239 1238 1240 1239 static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
+38 -47
drivers/scsi/sd.c
··· 114 114 static int sd_suspend_runtime(struct device *); 115 115 static int sd_resume(struct device *); 116 116 static void sd_rescan(struct device *); 117 - static int sd_init_command(struct scsi_cmnd *SCpnt); 117 + static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); 118 118 static void sd_uninit_command(struct scsi_cmnd *SCpnt); 119 119 static int sd_done(struct scsi_cmnd *); 120 120 static void sd_eh_reset(struct scsi_cmnd *); ··· 750 750 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 751 751 } 752 752 753 - static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 753 + static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) 754 754 { 755 755 struct scsi_device *sdp = cmd->device; 756 756 struct request *rq = cmd->request; ··· 761 761 762 762 rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); 763 763 if (!rq->special_vec.bv_page) 764 - return BLKPREP_DEFER; 764 + return BLK_STS_RESOURCE; 765 765 rq->special_vec.bv_offset = 0; 766 766 rq->special_vec.bv_len = data_len; 767 767 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; ··· 784 784 return scsi_init_io(cmd); 785 785 } 786 786 787 - static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) 787 + static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, 788 + bool unmap) 788 789 { 789 790 struct scsi_device *sdp = cmd->device; 790 791 struct request *rq = cmd->request; ··· 795 794 796 795 rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); 797 796 if (!rq->special_vec.bv_page) 798 - return BLKPREP_DEFER; 797 + return BLK_STS_RESOURCE; 799 798 rq->special_vec.bv_offset = 0; 800 799 rq->special_vec.bv_len = data_len; 801 800 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; ··· 815 814 return scsi_init_io(cmd); 816 815 } 817 816 818 - static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap) 817 + static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, 818 + bool unmap) 819 819 { 820 820 struct scsi_device *sdp = cmd->device; 821 821 struct request *rq = cmd->request; ··· 826 824 827 825 rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); 828 826 if (!rq->special_vec.bv_page) 829 - return BLKPREP_DEFER; 827 + return BLK_STS_RESOURCE; 830 828 rq->special_vec.bv_offset = 0; 831 829 rq->special_vec.bv_len = data_len; 832 830 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; ··· 846 844 return scsi_init_io(cmd); 847 845 } 848 846 849 - static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 847 + static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) 850 848 { 851 849 struct request *rq = cmd->request; 852 850 struct scsi_device *sdp = cmd->device; ··· 864 862 } 865 863 866 864 if (sdp->no_write_same) 867 - return BLKPREP_INVALID; 865 + return BLK_STS_TARGET; 868 866 869 867 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) 870 868 return sd_setup_write_same16_cmnd(cmd, false); ··· 941 939 * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on 942 940 * the preference indicated by the target device. 943 941 **/ 944 - static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) 942 + static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) 945 943 { 946 944 struct request *rq = cmd->request; 947 945 struct scsi_device *sdp = cmd->device; ··· 950 948 sector_t sector = blk_rq_pos(rq); 951 949 unsigned int nr_sectors = blk_rq_sectors(rq); 952 950 unsigned int nr_bytes = blk_rq_bytes(rq); 953 - int ret; 951 + blk_status_t ret; 954 952 955 953 if (sdkp->device->no_write_same) 956 - return BLKPREP_INVALID; 954 + return BLK_STS_TARGET; 957 955 958 956 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); 959 957 ··· 994 992 return ret; 995 993 } 996 994 997 - static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 995 + static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 998 996 { 999 997 struct request *rq = cmd->request; 1000 998 ··· 1007 1005 cmd->allowed = SD_MAX_RETRIES; 1008 1006 1009 1007 rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; 1010 - return BLKPREP_OK; 1008 + return BLK_STS_OK; 1011 1009 } 1012 1010 1013 - static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) 1011 + static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) 1014 1012 { 1015 1013 struct request *rq = SCpnt->request; 1016 1014 struct scsi_device *sdp = SCpnt->device; ··· 1020 1018 sector_t threshold; 1021 1019 unsigned int this_count = blk_rq_sectors(rq); 1022 1020 unsigned int dif, dix; 1023 - int ret; 1024 1021 unsigned char protect; 1022 + blk_status_t ret; 1025 1023 1026 1024 ret = scsi_init_io(SCpnt); 1027 - if (ret != BLKPREP_OK) 1025 + if (ret != BLK_STS_OK) 1028 1026 return ret; 1029 1027 WARN_ON_ONCE(SCpnt != rq->special); 1030 - 1031 - /* from here on until we're complete, any goto out 1032 - * is used for a killable error condition */ 1033 - ret = BLKPREP_KILL; 1034 1028 1035 1029 SCSI_LOG_HLQUEUE(1, 1036 1030 scmd_printk(KERN_INFO, SCpnt, ··· 1040 1042 blk_rq_sectors(rq))); 1041 1043 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 1042 1044 "Retry with 0x%p\n", SCpnt)); 1043 - goto out; 1045 + return BLK_STS_IOERR; 1044 1046 } 1045 1047 1046 1048 if (sdp->changed) { ··· 1049 1051 * the changed bit has been reset 1050 1052 */ 1051 1053 /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */ 1052 - goto out; 1054 + return BLK_STS_IOERR; 1053 1055 } 1054 1056 1055 1057 /* ··· 1087 1089 if ((block & 1) || (blk_rq_sectors(rq) & 1)) { 1088 1090 scmd_printk(KERN_ERR, SCpnt, 1089 1091 "Bad block number requested\n"); 1090 - goto out; 1091 - } else { 1092 - block = block >> 1; 1093 - this_count = this_count >> 1; 1092 + return BLK_STS_IOERR; 1094 1093 } 1094 + block = block >> 1; 1095 + this_count = this_count >> 1; 1095 1096 } 1096 1097 if (sdp->sector_size == 2048) { 1097 1098 if ((block & 3) || (blk_rq_sectors(rq) & 3)) { 1098 1099 scmd_printk(KERN_ERR, SCpnt, 1099 1100 "Bad block number requested\n"); 1100 - goto out; 1101 - } else { 1102 - block = block >> 2; 1103 - this_count = this_count >> 2; 1101 + return BLK_STS_IOERR; 1104 1102 } 1103 + block = block >> 2; 1104 + this_count = this_count >> 2; 1105 1105 } 1106 1106 if (sdp->sector_size == 4096) { 1107 1107 if ((block & 7) || (blk_rq_sectors(rq) & 7)) { 1108 1108 scmd_printk(KERN_ERR, SCpnt, 1109 1109 "Bad block number requested\n"); 1110 - goto out; 1111 - } else { 1112 - block = block >> 3; 1113 - this_count = this_count >> 3; 1110 + return BLK_STS_IOERR; 1114 1111 } 1112 + block = block >> 3; 1113 + this_count = this_count >> 3; 1115 1114 } 1116 1115 if (rq_data_dir(rq) == WRITE) { 1117 1116 SCpnt->cmnd[0] = WRITE_6; ··· 1120 1125 SCpnt->cmnd[0] = READ_6; 1121 1126 } else { 1122 1127 scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq)); 1123 - goto out; 1128 + return BLK_STS_IOERR; 1124 1129 } 1125 1130 1126 1131 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, ··· 1140 1145 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { 1141 1146 SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); 1142 1147 1143 - if (unlikely(SCpnt->cmnd == NULL)) { 1144 - ret = BLKPREP_DEFER; 1145 - goto out; 1146 - } 1148 + if (unlikely(!SCpnt->cmnd)) 1149 + return BLK_STS_RESOURCE; 1147 1150 1148 1151 SCpnt->cmd_len = SD_EXT_CDB_SIZE; 1149 1152 memset(SCpnt->cmnd, 0, SCpnt->cmd_len); ··· 1209 1216 */ 1210 1217 scmd_printk(KERN_ERR, SCpnt, 1211 1218 "FUA write on READ/WRITE(6) drive\n"); 1212 - goto out; 1219 + return BLK_STS_IOERR; 1213 1220 } 1214 1221 1215 1222 SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); ··· 1233 1240 * This indicates that the command is ready from our end to be 1234 1241 * queued. 1235 1242 */ 1236 - ret = BLKPREP_OK; 1237 - out: 1238 - return ret; 1243 + return BLK_STS_OK; 1239 1244 } 1240 1245 1241 - static int sd_init_command(struct scsi_cmnd *cmd) 1246 + static blk_status_t sd_init_command(struct scsi_cmnd *cmd) 1242 1247 { 1243 1248 struct request *rq = cmd->request; 1244 1249 ··· 1252 1261 case SD_LBP_ZERO: 1253 1262 return sd_setup_write_same10_cmnd(cmd, false); 1254 1263 default: 1255 - return BLKPREP_INVALID; 1264 + return BLK_STS_TARGET; 1256 1265 } 1257 1266 case REQ_OP_WRITE_ZEROES: 1258 1267 return sd_setup_write_zeroes_cmnd(cmd); ··· 1267 1276 return sd_zbc_setup_reset_cmnd(cmd); 1268 1277 default: 1269 1278 WARN_ON_ONCE(1); 1270 - return BLKPREP_KILL; 1279 + return BLK_STS_NOTSUPP; 1271 1280 } 1272 1281 } 1273 1282
+3 -3
drivers/scsi/sd.h
··· 271 271 272 272 extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); 273 273 extern void sd_zbc_print_zones(struct scsi_disk *sdkp); 274 - extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd); 274 + extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd); 275 275 extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, 276 276 struct scsi_sense_hdr *sshdr); 277 277 extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, ··· 288 288 289 289 static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {} 290 290 291 - static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) 291 + static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) 292 292 { 293 - return BLKPREP_INVALID; 293 + return BLK_STS_TARGET; 294 294 } 295 295 296 296 static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
+5 -5
drivers/scsi/sd_zbc.c
··· 185 185 * 186 186 * Called from sd_init_command() for a REQ_OP_ZONE_RESET request. 187 187 */ 188 - int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) 188 + blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) 189 189 { 190 190 struct request *rq = cmd->request; 191 191 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); ··· 194 194 195 195 if (!sd_is_zoned(sdkp)) 196 196 /* Not a zoned device */ 197 - return BLKPREP_KILL; 197 + return BLK_STS_IOERR; 198 198 199 199 if (sdkp->device->changed) 200 - return BLKPREP_KILL; 200 + return BLK_STS_IOERR; 201 201 202 202 if (sector & (sd_zbc_zone_sectors(sdkp) - 1)) 203 203 /* Unaligned request */ 204 - return BLKPREP_KILL; 204 + return BLK_STS_IOERR; 205 205 206 206 cmd->cmd_len = 16; 207 207 memset(cmd->cmnd, 0, cmd->cmd_len); ··· 214 214 cmd->transfersize = 0; 215 215 cmd->allowed = 0; 216 216 217 - return BLKPREP_OK; 217 + return BLK_STS_OK; 218 218 } 219 219 220 220 /**
+6 -6
drivers/scsi/sr.c
··· 80 80 static DEFINE_MUTEX(sr_mutex); 81 81 static int sr_probe(struct device *); 82 82 static int sr_remove(struct device *); 83 - static int sr_init_command(struct scsi_cmnd *SCpnt); 83 + static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt); 84 84 static int sr_done(struct scsi_cmnd *); 85 85 static int sr_runtime_suspend(struct device *dev); 86 86 ··· 384 384 return good_bytes; 385 385 } 386 386 387 - static int sr_init_command(struct scsi_cmnd *SCpnt) 387 + static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) 388 388 { 389 389 int block = 0, this_count, s_size; 390 390 struct scsi_cd *cd; 391 391 struct request *rq = SCpnt->request; 392 - int ret; 392 + blk_status_t ret; 393 393 394 394 ret = scsi_init_io(SCpnt); 395 - if (ret != BLKPREP_OK) 395 + if (ret != BLK_STS_OK) 396 396 goto out; 397 397 WARN_ON_ONCE(SCpnt != rq->special); 398 398 cd = scsi_cd(rq->rq_disk); 399 399 400 400 /* from here on until we're complete, any goto out 401 401 * is used for a killable error condition */ 402 - ret = BLKPREP_KILL; 402 + ret = BLK_STS_IOERR; 403 403 404 404 SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, 405 405 "Doing sr request, block = %d\n", block)); ··· 516 516 * This indicates that the command is ready from our end to be 517 517 * queued. 518 518 */ 519 - ret = BLKPREP_OK; 519 + ret = BLK_STS_OK; 520 520 out: 521 521 return ret; 522 522 }
+1 -1
include/scsi/scsi_cmnd.h
··· 171 171 size_t *offset, size_t *len); 172 172 extern void scsi_kunmap_atomic_sg(void *virt); 173 173 174 - extern int scsi_init_io(struct scsi_cmnd *cmd); 174 + extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd); 175 175 176 176 #ifdef CONFIG_SCSI_DMA 177 177 extern int scsi_dma_map(struct scsi_cmnd *cmd);
+2 -1
include/scsi/scsi_driver.h
··· 2 2 #ifndef _SCSI_SCSI_DRIVER_H 3 3 #define _SCSI_SCSI_DRIVER_H 4 4 5 + #include <linux/blk_types.h> 5 6 #include <linux/device.h> 6 7 7 8 struct module; ··· 14 13 struct device_driver gendrv; 15 14 16 15 void (*rescan)(struct device *); 17 - int (*init_command)(struct scsi_cmnd *); 16 + blk_status_t (*init_command)(struct scsi_cmnd *); 18 17 void (*uninit_command)(struct scsi_cmnd *); 19 18 int (*done)(struct scsi_cmnd *); 20 19 int (*eh_action)(struct scsi_cmnd *, int);