Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: cleanup rq->data_len usages

With recent unification of fields, it's now guaranteed that
rq->data_len always equals blk_rq_bytes(). Convert all non-IDE direct
users to accessors. IDE will be converted in a separate patch.

Boaz: spotted incorrect data_len/resid_len conversion in osd.

[ Impact: convert direct rq->data_len usages to blk_rq_bytes() ]

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Sergei Shtylyov <sshtylyov@ru.mvista.com>
Cc: Pete Zaitcev <zaitcev@redhat.com>
Cc: Eric Moore <Eric.Moore@lsi.com>
Cc: Markus Lidel <Markus.Lidel@shadowconnect.com>
Cc: Darrick J. Wong <djwong@us.ibm.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Eric Moore <Eric.Moore@lsi.com>
Cc: Boaz Harrosh <bharrosh@panasas.com>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

Tejun Heo and committed by
Jens Axboe
b0790410 2e46e8b2

+49 -49
+1 -1
drivers/ata/libata-scsi.c
··· 1084 1084 if (likely(!blk_pc_request(rq))) 1085 1085 return 0; 1086 1086 1087 - if (!rq->data_len || (rq->cmd_flags & REQ_RW)) 1087 + if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) 1088 1088 return 0; 1089 1089 1090 1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
+4 -4
drivers/block/ub.c
··· 747 747 { 748 748 struct request *rq = urq->rq; 749 749 750 - if (rq->data_len == 0) { 750 + if (blk_rq_bytes(rq) == 0) { 751 751 cmd->dir = UB_DIR_NONE; 752 752 } else { 753 753 if (rq_data_dir(rq) == WRITE) ··· 762 762 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 763 763 cmd->cdb_len = rq->cmd_len; 764 764 765 - cmd->len = rq->data_len; 765 + cmd->len = blk_rq_bytes(rq); 766 766 767 767 /* 768 768 * To reapply this to every URB is not as incorrect as it looks. ··· 783 783 784 784 if (cmd->error == 0) { 785 785 if (blk_pc_request(rq)) { 786 - if (cmd->act_len < rq->data_len) 787 - rq->resid_len = rq->data_len - cmd->act_len; 786 + if (cmd->act_len < blk_rq_bytes(rq)) 787 + rq->resid_len = blk_rq_bytes(rq) - cmd->act_len; 788 788 scsi_status = 0; 789 789 } else { 790 790 if (cmd->act_len != cmd->len) {
+10 -10
drivers/message/fusion/mptsas.c
··· 1277 1277 /* do we need to support multiple segments? */ 1278 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1279 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1280 - ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 1281 - rsp->bio->bi_vcnt, rsp->data_len); 1280 + ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req), 1281 + rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); 1282 1282 return -EINVAL; 1283 1283 } 1284 1284 ··· 1295 1295 smpreq = (SmpPassthroughRequest_t *)mf; 1296 1296 memset(smpreq, 0, sizeof(*smpreq)); 1297 1297 1298 - smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); 1298 + smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 1299 1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; 1300 1300 1301 1301 if (rphy) ··· 1321 1321 MPI_SGE_FLAGS_END_OF_BUFFER | 1322 1322 MPI_SGE_FLAGS_DIRECTION | 1323 1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 1324 - flagsLength |= (req->data_len - 4); 1324 + flagsLength |= (blk_rq_bytes(req) - 4); 1325 1325 1326 1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 1327 - req->data_len, PCI_DMA_BIDIRECTIONAL); 1327 + blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 1328 1328 if (!dma_addr_out) 1329 1329 goto put_mf; 1330 1330 mpt_add_sge(psge, flagsLength, dma_addr_out); ··· 1332 1332 1333 1333 /* response */ 1334 1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 1335 - flagsLength |= rsp->data_len + 4; 1335 + flagsLength |= blk_rq_bytes(rsp) + 4; 1336 1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 1337 - rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1337 + blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 1338 1338 if (!dma_addr_in) 1339 1339 goto unmap; 1340 1340 mpt_add_sge(psge, flagsLength, dma_addr_in); ··· 1357 1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 1358 1358 memcpy(req->sense, smprep, sizeof(*smprep)); 1359 1359 req->sense_len = sizeof(*smprep); 1360 - rsp->resid_len = rsp->data_len - smprep->ResponseDataLength; 1360 + rsp->resid_len = blk_rq_bytes(rsp) - smprep->ResponseDataLength; 1361 1361 } else { 1362 1362 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1363 1363 ioc->name, __func__); ··· 1365 1365 } 1366 1366 unmap: 1367 1367 if (dma_addr_out) 1368 - pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, 1368 + pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req), 1369 1369 PCI_DMA_BIDIRECTIONAL); 1370 1370 if (dma_addr_in) 1371 - pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, 1371 + pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp), 1372 1372 PCI_DMA_BIDIRECTIONAL); 1373 1373 put_mf: 1374 1374 if (mf)
+1 -1
drivers/message/i2o/i2o_block.c
··· 430 430 int leftover = (blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT); 431 431 432 432 if (blk_pc_request(req)) 433 - leftover = req->data_len; 433 + leftover = blk_rq_bytes(req); 434 434 435 435 if (error) 436 436 blk_end_request(req, -EIO, leftover);
+4 -4
drivers/scsi/libsas/sas_expander.c
··· 1927 1927 /* do we need to support multiple segments? */ 1928 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1929 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1930 - __func__, req->bio->bi_vcnt, req->data_len, 1931 - rsp->bio->bi_vcnt, rsp->data_len); 1930 + __func__, req->bio->bi_vcnt, blk_rq_bytes(req), 1931 + rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); 1932 1932 return -EINVAL; 1933 1933 } 1934 1934 1935 - ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1936 - bio_data(rsp->bio), rsp->data_len); 1935 + ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req), 1936 + bio_data(rsp->bio), blk_rq_bytes(rsp)); 1937 1937 if (ret > 0) { 1938 1938 /* positive number is the untransferred residual */ 1939 1939 rsp->resid_len = ret;
+9 -9
drivers/scsi/libsas/sas_host_smp.c
··· 137 137 int error = -EINVAL; 138 138 139 139 /* eight is the minimum size for request and response frames */ 140 - if (req->data_len < 8 || rsp->data_len < 8) 140 + if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8) 141 141 goto out; 142 142 143 - if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 144 - bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 143 + if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || 144 + bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) { 145 145 shost_printk(KERN_ERR, shost, 146 146 "SMP request/response frame crosses page boundary"); 147 147 goto out; 148 148 } 149 149 150 - req_data = kzalloc(req->data_len, GFP_KERNEL); 150 + req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL); 151 151 152 152 /* make sure frame can always be built ... we copy 153 153 * back only the requested length */ 154 - resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 + resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL); 155 155 156 156 if (!req_data || !resp_data) { 157 157 error = -ENOMEM; ··· 160 160 161 161 local_irq_disable(); 162 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 163 - memcpy(req_data, buf, req->data_len); 163 + memcpy(req_data, buf, blk_rq_bytes(req)); 164 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 165 165 local_irq_enable(); 166 166 ··· 176 176 resp_data[1] = req_data[1]; 177 177 resp_data[2] = SMP_RESP_FUNC_UNK; 178 178 179 - req->resid_len = req->data_len; 180 - rsp->resid_len = rsp->data_len; 179 + req->resid_len = blk_rq_bytes(req); 180 + rsp->resid_len = blk_rq_bytes(rsp); 181 181 182 182 switch (req_data[1]) { 183 183 case SMP_REPORT_GENERAL: ··· 264 264 265 265 local_irq_disable(); 266 266 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 267 - memcpy(buf, resp_data, rsp->data_len); 267 + memcpy(buf, resp_data, blk_rq_bytes(rsp)); 268 268 flush_kernel_dcache_page(bio_page(rsp->bio)); 269 269 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 270 270 local_irq_enable();
+11 -10
drivers/scsi/mpt2sas/mpt2sas_transport.c
··· 1041 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1042 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1043 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1044 - req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 + blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); 1045 1045 return -EINVAL; 1046 1046 } 1047 1047 ··· 1104 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1105 1105 cpu_to_le64(rphy->identify.sas_address) : 1106 1106 cpu_to_le64(ioc->sas_hba.sas_address); 1107 - mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 + mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 1108 1108 psge = &mpi_request->SGL; 1109 1109 1110 1110 /* WRITE sgel first */ ··· 1112 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1113 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1114 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1115 - req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 + blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 1116 1116 if (!dma_addr_out) { 1117 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1118 1118 goto unmap; 1119 1119 } 1120 1120 1121 - ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 + ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4), 1122 1122 dma_addr_out); 1123 1123 1124 1124 /* incr sgel */ ··· 1129 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1130 1130 MPI2_SGE_FLAGS_END_OF_LIST); 1131 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1132 - dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1133 - rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1132 + dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1133 + blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 1134 1134 if (!dma_addr_in) { 1135 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1136 1136 goto unmap; 1137 1137 } 1138 1138 1139 - ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 + ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4), 1140 1140 dma_addr_in); 1141 1141 1142 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " ··· 1170 1170 1171 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1172 1172 req->sense_len = sizeof(*mpi_reply); 1173 - rsp->resid_len = rsp->data_len - mpi_reply->ResponseDataLength; 1173 + rsp->resid_len = blk_rq_bytes(rsp) - 1174 + mpi_reply->ResponseDataLength; 1174 1175 } else { 1175 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1176 1177 "%s - no reply\n", ioc->name, __func__)); ··· 1187 1186 1188 1187 unmap: 1189 1188 if (dma_addr_out) 1190 - pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1189 + pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req), 1191 1190 PCI_DMA_BIDIRECTIONAL); 1192 1191 if (dma_addr_in) 1193 - pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1192 + pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp), 1194 1193 PCI_DMA_BIDIRECTIONAL); 1195 1194 1196 1195 out:
+2 -2
drivers/scsi/osd/osd_initiator.c
··· 1299 1299 return ret; 1300 1300 } 1301 1301 OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n", 1302 - _LLU(or->out.total_bytes), or->out.req->data_len); 1302 + _LLU(or->out.total_bytes), blk_rq_bytes(or->out.req)); 1303 1303 } 1304 1304 if (or->in.bio) { 1305 1305 ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio); ··· 1308 1308 return ret; 1309 1309 } 1310 1310 OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n", 1311 - _LLU(or->in.total_bytes), or->in.req->data_len); 1311 + _LLU(or->in.total_bytes), blk_rq_bytes(or->in.req)); 1312 1312 } 1313 1313 1314 1314 or->out.pad_buff = sg_out_pad_buffer;
+6 -7
drivers/scsi/scsi_lib.c
··· 682 682 static void scsi_end_bidi_request(struct scsi_cmnd *cmd) 683 683 { 684 684 struct request *req = cmd->request; 685 - unsigned int dlen = req->data_len; 686 - unsigned int next_dlen = req->next_rq->data_len; 687 685 688 686 req->resid_len = scsi_out(cmd)->resid; 689 687 req->next_rq->resid_len = scsi_in(cmd)->resid; 690 688 691 689 /* The req and req->next_rq have not been completed */ 692 - BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 690 + BUG_ON(blk_end_bidi_request(req, 0, blk_rq_bytes(req), 691 + blk_rq_bytes(req->next_rq))); 693 692 694 693 scsi_release_buffers(cmd); 695 694 ··· 965 966 BUG_ON(count > sdb->table.nents); 966 967 sdb->table.nents = count; 967 968 if (blk_pc_request(req)) 968 - sdb->length = req->data_len; 969 + sdb->length = blk_rq_bytes(req); 969 970 else 970 971 sdb->length = blk_rq_sectors(req) << 9; 971 972 return BLKPREP_OK; ··· 1086 1087 if (unlikely(ret)) 1087 1088 return ret; 1088 1089 } else { 1089 - BUG_ON(req->data_len); 1090 + BUG_ON(blk_rq_bytes(req)); 1090 1091 1091 1092 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1092 1093 req->buffer = NULL; 1093 1094 } 1094 1095 1095 1096 cmd->cmd_len = req->cmd_len; 1096 - if (!req->data_len) 1097 + if (!blk_rq_bytes(req)) 1097 1098 cmd->sc_data_direction = DMA_NONE; 1098 1099 else if (rq_data_dir(req) == WRITE) 1099 1100 cmd->sc_data_direction = DMA_TO_DEVICE; 1100 1101 else 1101 1102 cmd->sc_data_direction = DMA_FROM_DEVICE; 1102 1103 1103 - cmd->transfersize = req->data_len; 1104 + cmd->transfersize = blk_rq_bytes(req); 1104 1105 cmd->allowed = req->retries; 1105 1106 return BLKPREP_OK; 1106 1107 }
+1 -1
drivers/scsi/scsi_tgt_lib.c
··· 387 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 388 388 * length for us. 389 389 */ 390 - cmd->sdb.length = rq->data_len; 390 + cmd->sdb.length = blk_rq_bytes(rq); 391 391 392 392 return 0; 393 393