Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: fnic: Stop using the SCSI pointer

Set .cmd_size in the SCSI host template instead of using the SCSI pointer
from struct scsi_cmnd. This patch prepares for removal of the SCSI pointer
from struct scsi_cmnd.

Link: https://lore.kernel.org/r/20220218195117.25689-23-bvanassche@acm.org
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Bart Van Assche and committed by
Martin K. Petersen
924cb24d 3032ed77

+163 -154
+20 -7
drivers/scsi/fnic/fnic.h
··· 89 89 #define FNIC_DEV_RST_ABTS_PENDING BIT(21) 90 90 91 91 /* 92 - * Usage of the scsi_cmnd scratchpad. 92 + * fnic private data per SCSI command. 93 93 * These fields are locked by the hashed io_req_lock. 94 94 */ 95 - #define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) 96 - #define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase) 97 - #define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message) 98 - #define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) 99 - #define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command) 100 - #define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status) 95 + struct fnic_cmd_priv { 96 + struct fnic_io_req *io_req; 97 + enum fnic_ioreq_state state; 98 + u32 flags; 99 + u16 abts_status; 100 + u16 lr_status; 101 + }; 102 + 103 + static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd) 104 + { 105 + return scsi_cmd_priv(cmd); 106 + } 107 + 108 + static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) 109 + { 110 + struct fnic_cmd_priv *fcmd = fnic_priv(cmd); 111 + 112 + return ((u64)fcmd->flags << 32) | fcmd->state; 113 + } 101 114 102 115 #define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */ 103 116
+1
drivers/scsi/fnic/fnic_main.c
··· 124 124 .max_sectors = 0xffff, 125 125 .shost_groups = fnic_host_groups, 126 126 .track_queue_depth = 1, 127 + .cmd_size = sizeof(struct fnic_cmd_priv), 127 128 }; 128 129 129 130 static void
+142 -147
drivers/scsi/fnic/fnic_scsi.c
··· 497 497 * caller disabling them. 498 498 */ 499 499 spin_unlock(lp->host->host_lock); 500 - CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED; 501 - CMD_FLAGS(sc) = FNIC_NO_FLAGS; 500 + fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; 501 + fnic_priv(sc)->flags = FNIC_NO_FLAGS; 502 502 503 503 /* Get a new io_req for this SCSI IO */ 504 504 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); ··· 513 513 sg_count = scsi_dma_map(sc); 514 514 if (sg_count < 0) { 515 515 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, 516 - tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc)); 516 + tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state); 517 517 mempool_free(io_req, fnic->io_req_pool); 518 518 goto out; 519 519 } ··· 558 558 io_lock_acquired = 1; 559 559 io_req->port_id = rport->port_id; 560 560 io_req->start_time = jiffies; 561 - CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; 562 - CMD_SP(sc) = (char *)io_req; 563 - CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; 561 + fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; 562 + fnic_priv(sc)->io_req = io_req; 563 + fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED; 564 564 565 565 /* create copy wq desc and enqueue it */ 566 566 wq = &fnic->wq_copy[0]; ··· 571 571 * refetch the pointer under the lock. 572 572 */ 573 573 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, 574 - tag, sc, 0, 0, 0, 575 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 576 - io_req = (struct fnic_io_req *)CMD_SP(sc); 577 - CMD_SP(sc) = NULL; 578 - CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; 574 + tag, sc, 0, 0, 0, fnic_flags_and_state(sc)); 575 + io_req = fnic_priv(sc)->io_req; 576 + fnic_priv(sc)->io_req = NULL; 577 + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; 579 578 spin_unlock_irqrestore(io_lock, flags); 580 579 if (io_req) { 581 580 fnic_release_ioreq_buf(fnic, io_req, sc); ··· 593 594 atomic64_read(&fnic_stats->io_stats.active_ios)); 594 595 595 596 /* REVISIT: Use per IO lock in the final code */ 596 - CMD_FLAGS(sc) |= FNIC_IO_ISSUED; 597 + fnic_priv(sc)->flags |= FNIC_IO_ISSUED; 597 598 } 598 599 out: 599 600 cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | ··· 602 603 sc->cmnd[5]); 603 604 604 605 FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, 605 - tag, sc, io_req, sg_count, cmd_trace, 606 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 606 + tag, sc, io_req, sg_count, cmd_trace, 607 + fnic_flags_and_state(sc)); 607 608 608 609 /* if only we issued IO, will we have the io lock */ 609 610 if (io_lock_acquired) ··· 866 867 867 868 io_lock = fnic_io_lock_hash(fnic, sc); 868 869 spin_lock_irqsave(io_lock, flags); 869 - io_req = (struct fnic_io_req *)CMD_SP(sc); 870 + io_req = fnic_priv(sc)->io_req; 870 871 WARN_ON_ONCE(!io_req); 871 872 if (!io_req) { 872 873 atomic64_inc(&fnic_stats->io_stats.ioreq_null); 873 - CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; 874 + fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; 874 875 spin_unlock_irqrestore(io_lock, flags); 875 876 shost_printk(KERN_ERR, fnic->lport->host, 876 877 "icmnd_cmpl io_req is null - " ··· 887 888 * if SCSI-ML has already issued abort on this command, 888 889 * set completion of the IO. The abts path will clean it up 889 890 */ 890 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 891 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { 891 892 892 893 /* 893 894 * set the FNIC_IO_DONE so that this doesn't get 894 895 * flagged as 'out of order' if it was not aborted 895 896 */ 896 - CMD_FLAGS(sc) |= FNIC_IO_DONE; 897 - CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; 897 + fnic_priv(sc)->flags |= FNIC_IO_DONE; 898 + fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING; 898 899 spin_unlock_irqrestore(io_lock, flags); 899 900 if(FCPIO_ABORTED == hdr_status) 900 - CMD_FLAGS(sc) |= FNIC_IO_ABORTED; 901 + fnic_priv(sc)->flags |= FNIC_IO_ABORTED; 901 902 902 903 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 903 904 "icmnd_cmpl abts pending " ··· 911 912 } 912 913 913 914 /* Mark the IO as complete */ 914 - CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; 915 + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; 915 916 916 917 icmnd_cmpl = &desc->u.icmnd_cmpl; 917 918 ··· 982 983 } 983 984 984 985 /* Break link with the SCSI command */ 985 - CMD_SP(sc) = NULL; 986 - CMD_FLAGS(sc) |= FNIC_IO_DONE; 986 + fnic_priv(sc)->io_req = NULL; 987 + fnic_priv(sc)->flags |= FNIC_IO_DONE; 987 988 988 989 spin_unlock_irqrestore(io_lock, flags); 989 990 ··· 1008 1009 ((u64)icmnd_cmpl->_resvd0[1] << 56 | 1009 1010 (u64)icmnd_cmpl->_resvd0[0] << 48 | 1010 1011 jiffies_to_msecs(jiffies - start_time)), 1011 - desc, cmd_trace, 1012 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1012 + desc, cmd_trace, fnic_flags_and_state(sc)); 1013 1013 1014 1014 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 1015 1015 fnic->lport->host_stats.fcp_input_requests++; ··· 1093 1095 } 1094 1096 io_lock = fnic_io_lock_hash(fnic, sc); 1095 1097 spin_lock_irqsave(io_lock, flags); 1096 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1098 + io_req = fnic_priv(sc)->io_req; 1097 1099 WARN_ON_ONCE(!io_req); 1098 1100 if (!io_req) { 1099 1101 atomic64_inc(&fnic_stats->io_stats.ioreq_null); 1100 1102 spin_unlock_irqrestore(io_lock, flags); 1101 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; 1103 + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; 1102 1104 shost_printk(KERN_ERR, fnic->lport->host, 1103 1105 "itmf_cmpl io_req is null - " 1104 1106 "hdr status = %s tag = 0x%x sc 0x%p\n", ··· 1113 1115 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1114 1116 "dev reset abts cmpl recd. id %x status %s\n", 1115 1117 id, fnic_fcpio_status_to_str(hdr_status)); 1116 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 1117 - CMD_ABTS_STATUS(sc) = hdr_status; 1118 - CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; 1118 + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; 1119 + fnic_priv(sc)->abts_status = hdr_status; 1120 + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; 1119 1121 if (io_req->abts_done) 1120 1122 complete(io_req->abts_done); 1121 1123 spin_unlock_irqrestore(io_lock, flags); ··· 1125 1127 case FCPIO_SUCCESS: 1126 1128 break; 1127 1129 case FCPIO_TIMEOUT: 1128 - if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) 1130 + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) 1129 1131 atomic64_inc(&abts_stats->abort_fw_timeouts); 1130 1132 else 1131 1133 atomic64_inc( ··· 1137 1139 (int)(id & FNIC_TAG_MASK)); 1138 1140 break; 1139 1141 case FCPIO_IO_NOT_FOUND: 1140 - if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) 1142 + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) 1141 1143 atomic64_inc(&abts_stats->abort_io_not_found); 1142 1144 else 1143 1145 atomic64_inc( 1144 1146 &term_stats->terminate_io_not_found); 1145 1147 break; 1146 1148 default: 1147 - if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) 1149 + if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED) 1148 1150 atomic64_inc(&abts_stats->abort_failures); 1149 1151 else 1150 1152 atomic64_inc( 1151 1153 &term_stats->terminate_failures); 1152 1154 break; 1153 1155 } 1154 - if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { 1156 + if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) { 1155 1157 /* This is a late completion. Ignore it */ 1156 1158 spin_unlock_irqrestore(io_lock, flags); 1157 1159 return; 1158 1160 } 1159 1161 1160 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 1161 - CMD_ABTS_STATUS(sc) = hdr_status; 1162 + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; 1163 + fnic_priv(sc)->abts_status = hdr_status; 1162 1164 1163 1165 /* If the status is IO not found consider it as success */ 1164 1166 if (hdr_status == FCPIO_IO_NOT_FOUND) 1165 - CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; 1167 + fnic_priv(sc)->abts_status = FCPIO_SUCCESS; 1166 1168 1167 - if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) 1169 + if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) 1168 1170 atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); 1169 1171 1170 1172 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ··· 1183 1185 } else { 1184 1186 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1185 1187 "abts cmpl, completing IO\n"); 1186 - CMD_SP(sc) = NULL; 1188 + fnic_priv(sc)->io_req = NULL; 1187 1189 sc->result = (DID_ERROR << 16); 1188 1190 1189 1191 spin_unlock_irqrestore(io_lock, flags); ··· 1200 1202 (u64)sc->cmnd[2] << 24 | 1201 1203 (u64)sc->cmnd[3] << 16 | 1202 1204 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1203 - (((u64)CMD_FLAGS(sc) << 32) | 1204 - CMD_STATE(sc))); 1205 + fnic_flags_and_state(sc)); 1205 1206 scsi_done(sc); 1206 1207 atomic64_dec(&fnic_stats->io_stats.active_ios); 1207 1208 if (atomic64_read(&fnic->io_cmpl_skip)) ··· 1210 1213 } 1211 1214 } else if (id & FNIC_TAG_DEV_RST) { 1212 1215 /* Completion of device reset */ 1213 - CMD_LR_STATUS(sc) = hdr_status; 1214 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1216 + fnic_priv(sc)->lr_status = hdr_status; 1217 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { 1215 1218 spin_unlock_irqrestore(io_lock, flags); 1216 - CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; 1219 + fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING; 1217 1220 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, 1218 1221 sc->device->host->host_no, id, sc, 1219 1222 jiffies_to_msecs(jiffies - start_time), 1220 - desc, 0, 1221 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1223 + desc, 0, fnic_flags_and_state(sc)); 1222 1224 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1223 1225 "Terminate pending " 1224 1226 "dev reset cmpl recd. id %d status %s\n", ··· 1225 1229 fnic_fcpio_status_to_str(hdr_status)); 1226 1230 return; 1227 1231 } 1228 - if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) { 1232 + if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) { 1229 1233 /* Need to wait for terminate completion */ 1230 1234 spin_unlock_irqrestore(io_lock, flags); 1231 1235 FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, 1232 1236 sc->device->host->host_no, id, sc, 1233 1237 jiffies_to_msecs(jiffies - start_time), 1234 - desc, 0, 1235 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1238 + desc, 0, fnic_flags_and_state(sc)); 1236 1239 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1237 1240 "dev reset cmpl recd after time out. " 1238 1241 "id %d status %s\n", ··· 1239 1244 fnic_fcpio_status_to_str(hdr_status)); 1240 1245 return; 1241 1246 } 1242 - CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; 1243 - CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; 1247 + fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; 1248 + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; 1244 1249 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1245 1250 "dev reset cmpl recd. id %d status %s\n", 1246 1251 (int)(id & FNIC_TAG_MASK), ··· 1252 1257 } else { 1253 1258 shost_printk(KERN_ERR, fnic->lport->host, 1254 1259 "Unexpected itmf io state %s tag %x\n", 1255 - fnic_ioreq_state_to_str(CMD_STATE(sc)), id); 1260 + fnic_ioreq_state_to_str(fnic_priv(sc)->state), id); 1256 1261 spin_unlock_irqrestore(io_lock, flags); 1257 1262 } 1258 1263 ··· 1365 1370 io_lock = fnic_io_lock_tag(fnic, tag); 1366 1371 spin_lock_irqsave(io_lock, flags); 1367 1372 1368 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1369 - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1370 - !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { 1373 + io_req = fnic_priv(sc)->io_req; 1374 + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && 1375 + !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { 1371 1376 /* 1372 1377 * We will be here only when FW completes reset 1373 1378 * without sending completions for outstanding ios. 1374 1379 */ 1375 - CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; 1380 + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; 1376 1381 if (io_req && io_req->dr_done) 1377 1382 complete(io_req->dr_done); 1378 1383 else if (io_req && io_req->abts_done) 1379 1384 complete(io_req->abts_done); 1380 1385 spin_unlock_irqrestore(io_lock, flags); 1381 1386 return true; 1382 - } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1387 + } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { 1383 1388 spin_unlock_irqrestore(io_lock, flags); 1384 1389 return true; 1385 1390 } ··· 1388 1393 goto cleanup_scsi_cmd; 1389 1394 } 1390 1395 1391 - CMD_SP(sc) = NULL; 1396 + fnic_priv(sc)->io_req = NULL; 1392 1397 1393 1398 spin_unlock_irqrestore(io_lock, flags); 1394 1399 ··· 1412 1417 atomic64_inc(&fnic_stats->io_stats.io_completions); 1413 1418 1414 1419 /* Complete the command to SCSI */ 1415 - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) 1420 + if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) 1416 1421 shost_printk(KERN_ERR, fnic->lport->host, 1417 1422 "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", 1418 1423 tag, sc); ··· 1424 1429 (u64)sc->cmnd[2] << 24 | 1425 1430 (u64)sc->cmnd[3] << 16 | 1426 1431 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1427 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1432 + fnic_flags_and_state(sc)); 1428 1433 1429 1434 scsi_done(sc); 1430 1435 ··· 1463 1468 spin_lock_irqsave(io_lock, flags); 1464 1469 1465 1470 /* Get the IO context which this desc refers to */ 1466 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1471 + io_req = fnic_priv(sc)->io_req; 1467 1472 1468 1473 /* fnic interrupts are turned off by now */ 1469 1474 ··· 1472 1477 goto wq_copy_cleanup_scsi_cmd; 1473 1478 } 1474 1479 1475 - CMD_SP(sc) = NULL; 1480 + fnic_priv(sc)->io_req = NULL; 1476 1481 1477 1482 spin_unlock_irqrestore(io_lock, flags); 1478 1483 ··· 1491 1496 0, ((u64)sc->cmnd[0] << 32 | 1492 1497 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | 1493 1498 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1494 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1499 + fnic_flags_and_state(sc)); 1495 1500 1496 1501 scsi_done(sc); 1497 1502 } ··· 1566 1571 io_lock = fnic_io_lock_tag(fnic, abt_tag); 1567 1572 spin_lock_irqsave(io_lock, flags); 1568 1573 1569 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1574 + io_req = fnic_priv(sc)->io_req; 1570 1575 1571 1576 if (!io_req || io_req->port_id != iter_data->port_id) { 1572 1577 spin_unlock_irqrestore(io_lock, flags); 1573 1578 return true; 1574 1579 } 1575 1580 1576 - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1577 - (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { 1581 + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && 1582 + !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { 1578 1583 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1579 1584 "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", 1580 1585 sc); ··· 1586 1591 * Found IO that is still pending with firmware and 1587 1592 * belongs to rport that went away 1588 1593 */ 1589 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1594 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { 1590 1595 spin_unlock_irqrestore(io_lock, flags); 1591 1596 return true; 1592 1597 } ··· 1594 1599 shost_printk(KERN_ERR, fnic->lport->host, 1595 1600 "fnic_rport_exch_reset: io_req->abts_done is set " 1596 1601 "state is %s\n", 1597 - fnic_ioreq_state_to_str(CMD_STATE(sc))); 1602 + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); 1598 1603 } 1599 1604 1600 - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { 1605 + if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { 1601 1606 shost_printk(KERN_ERR, fnic->lport->host, 1602 1607 "rport_exch_reset " 1603 1608 "IO not yet issued %p tag 0x%x flags " 1604 1609 "%x state %d\n", 1605 - sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc)); 1610 + sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state); 1606 1611 } 1607 - old_ioreq_state = CMD_STATE(sc); 1608 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1609 - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1610 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1612 + old_ioreq_state = fnic_priv(sc)->state; 1613 + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; 1614 + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; 1615 + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { 1611 1616 atomic64_inc(&reset_stats->device_reset_terminates); 1612 1617 abt_tag |= FNIC_TAG_DEV_RST; 1613 1618 } ··· 1633 1638 * lun reset 1634 1639 */ 1635 1640 spin_lock_irqsave(io_lock, flags); 1636 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 1637 - CMD_STATE(sc) = old_ioreq_state; 1641 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) 1642 + fnic_priv(sc)->state = old_ioreq_state; 1638 1643 spin_unlock_irqrestore(io_lock, flags); 1639 1644 } else { 1640 1645 spin_lock_irqsave(io_lock, flags); 1641 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 1642 - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 1646 + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) 1647 + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; 1643 1648 else 1644 - CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 1649 + fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; 1645 1650 spin_unlock_irqrestore(io_lock, flags); 1646 1651 atomic64_inc(&term_stats->terminates); 1647 1652 iter_data->term_cnt++; ··· 1749 1754 FNIC_SCSI_DBG(KERN_DEBUG, 1750 1755 fnic->lport->host, 1751 1756 "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", 1752 - rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); 1757 + rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags); 1753 1758 1754 - CMD_FLAGS(sc) = FNIC_NO_FLAGS; 1759 + fnic_priv(sc)->flags = FNIC_NO_FLAGS; 1755 1760 1756 1761 if (lp->state != LPORT_ST_READY || !(lp->link_up)) { 1757 1762 ret = FAILED; ··· 1768 1773 * happened, the completion wont actually complete the command 1769 1774 * and it will be considered as an aborted command 1770 1775 * 1771 - * The CMD_SP will not be cleared except while holding io_req_lock. 1776 + * .io_req will not be cleared except while holding io_req_lock. 1772 1777 */ 1773 1778 io_lock = fnic_io_lock_hash(fnic, sc); 1774 1779 spin_lock_irqsave(io_lock, flags); 1775 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1780 + io_req = fnic_priv(sc)->io_req; 1776 1781 if (!io_req) { 1777 1782 spin_unlock_irqrestore(io_lock, flags); 1778 1783 goto fnic_abort_cmd_end; ··· 1780 1785 1781 1786 io_req->abts_done = &tm_done; 1782 1787 1783 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1788 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { 1784 1789 spin_unlock_irqrestore(io_lock, flags); 1785 1790 goto wait_pending; 1786 1791 } ··· 1809 1814 * the completion wont be done till mid-layer, since abort 1810 1815 * has already started. 1811 1816 */ 1812 - old_ioreq_state = CMD_STATE(sc); 1813 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1814 - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1817 + old_ioreq_state = fnic_priv(sc)->state; 1818 + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; 1819 + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; 1815 1820 1816 1821 spin_unlock_irqrestore(io_lock, flags); 1817 1822 ··· 1833 1838 if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun, 1834 1839 io_req)) { 1835 1840 spin_lock_irqsave(io_lock, flags); 1836 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 1837 - CMD_STATE(sc) = old_ioreq_state; 1838 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1841 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) 1842 + fnic_priv(sc)->state = old_ioreq_state; 1843 + io_req = fnic_priv(sc)->io_req; 1839 1844 if (io_req) 1840 1845 io_req->abts_done = NULL; 1841 1846 spin_unlock_irqrestore(io_lock, flags); ··· 1843 1848 goto fnic_abort_cmd_end; 1844 1849 } 1845 1850 if (task_req == FCPIO_ITMF_ABT_TASK) { 1846 - CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; 1851 + fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED; 1847 1852 atomic64_inc(&fnic_stats->abts_stats.aborts); 1848 1853 } else { 1849 - CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; 1854 + fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED; 1850 1855 atomic64_inc(&fnic_stats->term_stats.terminates); 1851 1856 } 1852 1857 ··· 1864 1869 /* Check the abort status */ 1865 1870 spin_lock_irqsave(io_lock, flags); 1866 1871 1867 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1872 + io_req = fnic_priv(sc)->io_req; 1868 1873 if (!io_req) { 1869 1874 atomic64_inc(&fnic_stats->io_stats.ioreq_null); 1870 1875 spin_unlock_irqrestore(io_lock, flags); 1871 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; 1876 + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; 1872 1877 ret = FAILED; 1873 1878 goto fnic_abort_cmd_end; 1874 1879 } 1875 1880 io_req->abts_done = NULL; 1876 1881 1877 1882 /* fw did not complete abort, timed out */ 1878 - if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 1883 + if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { 1879 1884 spin_unlock_irqrestore(io_lock, flags); 1880 1885 if (task_req == FCPIO_ITMF_ABT_TASK) { 1881 1886 atomic64_inc(&abts_stats->abort_drv_timeouts); 1882 1887 } else { 1883 1888 atomic64_inc(&term_stats->terminate_drv_timeouts); 1884 1889 } 1885 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; 1890 + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT; 1886 1891 ret = FAILED; 1887 1892 goto fnic_abort_cmd_end; 1888 1893 } 1889 1894 1890 1895 /* IO out of order */ 1891 1896 1892 - if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { 1897 + if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { 1893 1898 spin_unlock_irqrestore(io_lock, flags); 1894 1899 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1895 1900 "Issuing Host reset due to out of order IO\n"); ··· 1898 1903 goto fnic_abort_cmd_end; 1899 1904 } 1900 1905 1901 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 1906 + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; 1902 1907 1903 1908 start_time = io_req->start_time; 1904 1909 /* ··· 1906 1911 * free the io_req if successful. If abort fails, 1907 1912 * Device reset will clean the I/O. 1908 1913 */ 1909 - if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS) 1910 - CMD_SP(sc) = NULL; 1911 - else { 1914 + if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) { 1915 + fnic_priv(sc)->io_req = NULL; 1916 + } else { 1912 1917 ret = FAILED; 1913 1918 spin_unlock_irqrestore(io_lock, flags); 1914 1919 goto fnic_abort_cmd_end; ··· 1934 1939 0, ((u64)sc->cmnd[0] << 32 | 1935 1940 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | 1936 1941 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1937 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1942 + fnic_flags_and_state(sc)); 1938 1943 1939 1944 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1940 1945 "Returning from abort cmd type %x %s\n", task_req, ··· 2025 2030 2026 2031 io_lock = fnic_io_lock_tag(fnic, abt_tag); 2027 2032 spin_lock_irqsave(io_lock, flags); 2028 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2033 + io_req = fnic_priv(sc)->io_req; 2029 2034 if (!io_req) { 2030 2035 spin_unlock_irqrestore(io_lock, flags); 2031 2036 return true; ··· 2037 2042 */ 2038 2043 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2039 2044 "Found IO in %s on lun\n", 2040 - fnic_ioreq_state_to_str(CMD_STATE(sc))); 2045 + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); 2041 2046 2042 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 2047 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) { 2043 2048 spin_unlock_irqrestore(io_lock, flags); 2044 2049 return true; 2045 2050 } 2046 - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 2047 - (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { 2051 + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && 2052 + (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { 2048 2053 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2049 2054 "%s dev rst not pending sc 0x%p\n", __func__, 2050 2055 sc); ··· 2055 2060 if (io_req->abts_done) 2056 2061 shost_printk(KERN_ERR, fnic->lport->host, 2057 2062 "%s: io_req->abts_done is set state is %s\n", 2058 - __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); 2059 - old_ioreq_state = CMD_STATE(sc); 2063 + __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); 2064 + old_ioreq_state = fnic_priv(sc)->state; 2060 2065 /* 2061 2066 * Any pending IO issued prior to reset is expected to be 2062 2067 * in abts pending state, if not we need to set ··· 2064 2069 * When IO is completed, the IO will be handed over and 2065 2070 * handled in this function. 2066 2071 */ 2067 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 2072 + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; 2068 2073 2069 2074 BUG_ON(io_req->abts_done); 2070 2075 2071 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 2076 + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { 2072 2077 abt_tag |= FNIC_TAG_DEV_RST; 2073 2078 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2074 2079 "%s: dev rst sc 0x%p\n", __func__, sc); 2075 2080 } 2076 2081 2077 - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 2082 + fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; 2078 2083 io_req->abts_done = &tm_done; 2079 2084 spin_unlock_irqrestore(io_lock, flags); 2080 2085 ··· 2085 2090 FCPIO_ITMF_ABT_TASK_TERM, 2086 2091 fc_lun.scsi_lun, io_req)) { 2087 2092 spin_lock_irqsave(io_lock, flags); 2088 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2093 + io_req = fnic_priv(sc)->io_req; 2089 2094 if (io_req) 2090 2095 io_req->abts_done = NULL; 2091 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 2092 - CMD_STATE(sc) = old_ioreq_state; 2096 + if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) 2097 + fnic_priv(sc)->state = old_ioreq_state; 2093 2098 spin_unlock_irqrestore(io_lock, flags); 2094 2099 iter_data->ret = FAILED; 2095 2100 return false; 2096 2101 } else { 2097 2102 spin_lock_irqsave(io_lock, flags); 2098 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 2099 - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 2103 + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) 2104 + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; 2100 2105 spin_unlock_irqrestore(io_lock, flags); 2101 2106 } 2102 - CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 2107 + fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED; 2103 2108 2104 2109 wait_for_completion_timeout(&tm_done, msecs_to_jiffies 2105 2110 (fnic->config.ed_tov)); 2106 2111 2107 2112 /* Recheck cmd state to check if it is now aborted */ 2108 2113 spin_lock_irqsave(io_lock, flags); 2109 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2114 + io_req = fnic_priv(sc)->io_req; 2110 2115 if (!io_req) { 2111 2116 spin_unlock_irqrestore(io_lock, flags); 2112 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; 2117 + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; 2113 2118 return true; 2114 2119 } 2115 2120 2116 2121 io_req->abts_done = NULL; 2117 2122 2118 2123 /* if abort is still pending with fw, fail */ 2119 - if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 2124 + if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) { 2120 2125 spin_unlock_irqrestore(io_lock, flags); 2121 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 2126 + fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE; 2122 2127 iter_data->ret = FAILED; 2123 2128 return false; 2124 2129 } 2125 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 2130 + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE; 2126 2131 2127 2132 /* original sc used for lr is handled by dev reset code */ 2128 2133 if (sc != iter_data->lr_sc) 2129 - CMD_SP(sc) = NULL; 2134 + fnic_priv(sc)->io_req = NULL; 2130 2135 spin_unlock_irqrestore(io_lock, flags); 2131 2136 2132 2137 /* original sc used for lr is handled by dev reset code */ ··· 2267 2272 goto fnic_device_reset_end; 2268 2273 } 2269 2274 2270 - CMD_FLAGS(sc) = FNIC_DEVICE_RESET; 2275 + fnic_priv(sc)->flags = FNIC_DEVICE_RESET; 2271 2276 /* Allocate tag if not present */ 2272 2277 2273 2278 if (unlikely(tag < 0)) { ··· 2283 2288 } 2284 2289 io_lock = fnic_io_lock_hash(fnic, sc); 2285 2290 spin_lock_irqsave(io_lock, flags); 2286 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2291 + io_req = fnic_priv(sc)->io_req; 2287 2292 2288 2293 /* 2289 2294 * If there is a io_req attached to this command, then use it, ··· 2297 2302 } 2298 2303 memset(io_req, 0, sizeof(*io_req)); 2299 2304 io_req->port_id = rport->port_id; 2300 - CMD_SP(sc) = (char *)io_req; 2305 + fnic_priv(sc)->io_req = io_req; 2301 2306 } 2302 2307 io_req->dr_done = &tm_done; 2303 - CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; 2304 - CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; 2308 + fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING; 2309 + fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; 2305 2310 spin_unlock_irqrestore(io_lock, flags); 2306 2311 2307 2312 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); ··· 2312 2317 */ 2313 2318 if (fnic_queue_dr_io_req(fnic, sc, io_req)) { 2314 2319 spin_lock_irqsave(io_lock, flags); 2315 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2320 + io_req = fnic_priv(sc)->io_req; 2316 2321 if (io_req) 2317 2322 io_req->dr_done = NULL; 2318 2323 goto fnic_device_reset_clean; 2319 2324 } 2320 2325 spin_lock_irqsave(io_lock, flags); 2321 - CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED; 2326 + fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; 2322 2327 spin_unlock_irqrestore(io_lock, flags); 2323 2328 2324 2329 /* ··· 2329 2334 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); 2330 2335 2331 2336 spin_lock_irqsave(io_lock, flags); 2332 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2337 + io_req = fnic_priv(sc)->io_req; 2333 2338 if (!io_req) { 2334 2339 spin_unlock_irqrestore(io_lock, flags); 2335 2340 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ··· 2338 2343 } 2339 2344 io_req->dr_done = NULL; 2340 2345 2341 - status = CMD_LR_STATUS(sc); 2346 + status = fnic_priv(sc)->lr_status; 2342 2347 2343 2348 /* 2344 2349 * If lun reset not completed, bail out with failed. io_req ··· 2348 2353 atomic64_inc(&reset_stats->device_reset_timeouts); 2349 2354 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2350 2355 "Device reset timed out\n"); 2351 - CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; 2356 + fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; 2352 2357 spin_unlock_irqrestore(io_lock, flags); 2353 2358 int_to_scsilun(sc->device->lun, &fc_lun); 2354 2359 /* ··· 2357 2362 */ 2358 2363 while (1) { 2359 2364 spin_lock_irqsave(io_lock, flags); 2360 - if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) { 2365 + if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) { 2361 2366 spin_unlock_irqrestore(io_lock, flags); 2362 2367 break; 2363 2368 } ··· 2370 2375 msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); 2371 2376 } else { 2372 2377 spin_lock_irqsave(io_lock, flags); 2373 - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 2374 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 2378 + fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; 2379 + fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; 2375 2380 io_req->abts_done = &tm_done; 2376 2381 spin_unlock_irqrestore(io_lock, flags); 2377 2382 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, ··· 2382 2387 } 2383 2388 while (1) { 2384 2389 spin_lock_irqsave(io_lock, flags); 2385 - if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { 2390 + if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { 2386 2391 spin_unlock_irqrestore(io_lock, flags); 2387 2392 wait_for_completion_timeout(&tm_done, 2388 2393 msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); 2389 2394 break; 2390 2395 } else { 2391 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2396 + io_req = fnic_priv(sc)->io_req; 2392 2397 io_req->abts_done = NULL; 2393 2398 goto fnic_device_reset_clean; 2394 2399 } ··· 2403 2408 FNIC_SCSI_DBG(KERN_DEBUG, 2404 2409 fnic->lport->host, 2405 2410 "Device reset completed - failed\n"); 2406 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2411 + io_req = fnic_priv(sc)->io_req; 2407 2412 goto fnic_device_reset_clean; 2408 2413 } 2409 2414 ··· 2416 2421 */ 2417 2422 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { 2418 2423 spin_lock_irqsave(io_lock, flags); 2419 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2424 + io_req = fnic_priv(sc)->io_req; 2420 2425 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2421 2426 "Device reset failed" 2422 2427 " since could not abort all IOs\n"); ··· 2425 2430 2426 2431 /* Clean lun reset command */ 2427 2432 spin_lock_irqsave(io_lock, flags); 2428 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2433 + io_req = fnic_priv(sc)->io_req; 2429 2434 if (io_req) 2430 2435 /* Completed, and successful */ 2431 2436 ret = SUCCESS; 2432 2437 2433 2438 fnic_device_reset_clean: 2434 2439 if (io_req) 2435 - CMD_SP(sc) = NULL; 2440 + fnic_priv(sc)->io_req = NULL; 2436 2441 2437 2442 spin_unlock_irqrestore(io_lock, flags); 2438 2443 ··· 2448 2453 0, ((u64)sc->cmnd[0] << 32 | 2449 2454 (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | 2450 2455 (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 2451 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 2456 + fnic_flags_and_state(sc)); 2452 2457 2453 2458 /* free tag if it is allocated */ 2454 2459 if (unlikely(tag_gen_flag)) ··· 2693 2698 io_lock = fnic_io_lock_hash(fnic, sc); 2694 2699 spin_lock_irqsave(io_lock, flags); 2695 2700 2696 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2701 + io_req = fnic_priv(sc)->io_req; 2697 2702 if (!io_req) { 2698 2703 spin_unlock_irqrestore(io_lock, flags); 2699 2704 return true; ··· 2705 2710 */ 2706 2711 FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2707 2712 "Found IO in %s on lun\n", 2708 - fnic_ioreq_state_to_str(CMD_STATE(sc))); 2709 - cmd_state = CMD_STATE(sc); 2713 + fnic_ioreq_state_to_str(fnic_priv(sc)->state)); 2714 + cmd_state = fnic_priv(sc)->state; 2710 2715 spin_unlock_irqrestore(io_lock, flags); 2711 2716 if (cmd_state == FNIC_IOREQ_ABTS_PENDING) 2712 2717 iter_data->ret = 1;