Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull misc SCSI driver updates from James Bottomley:
"This patch set is a set of driver updates (megaraid_sas, fnic, lpfc,
ufs, hpsa) we also have a couple of bug fixes (sd out of bounds and
ibmvfc error handling) and the first round of esas2r checker fixes and
finally the much anticipated big endian additions for megaraid_sas"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits)
[SCSI] fnic: fnic Driver Tuneables Exposed through CLI
[SCSI] fnic: Kernel panic while running sh/nosh with max lun cfg
[SCSI] fnic: Hitting BUG_ON(io_req->abts_done) in fnic_rport_exch_reset
[SCSI] fnic: Remove QUEUE_FULL handling code
[SCSI] fnic: On system with >1.1TB RAM, VIC fails multipath after boot up
[SCSI] fnic: FC stat param seconds_since_last_reset not getting updated
[SCSI] sd: Fix potential out-of-bounds access
[SCSI] lpfc 8.3.42: Update lpfc version to driver version 8.3.42
[SCSI] lpfc 8.3.42: Fixed issue of task management commands having a fixed timeout
[SCSI] lpfc 8.3.42: Fixed inconsistent spin lock usage.
[SCSI] lpfc 8.3.42: Fix driver's abort loop functionality to skip IOs already getting aborted
[SCSI] lpfc 8.3.42: Fixed failure to allocate SCSI buffer on PPC64 platform for SLI4 devices
[SCSI] lpfc 8.3.42: Fix WARN_ON when driver unloads
[SCSI] lpfc 8.3.42: Avoided making pci bar ioremap call during dual-chute WQ/RQ pci bar selection
[SCSI] lpfc 8.3.42: Fixed driver iocbq structure's iocb_flag field running out of space
[SCSI] lpfc 8.3.42: Fix crash on driver load due to cpu affinity logic
[SCSI] lpfc 8.3.42: Fixed logging format of setting driver sysfs attributes hard to interpret
[SCSI] lpfc 8.3.42: Fixed back to back RSCNs discovery failure.
[SCSI] lpfc 8.3.42: Fixed race condition between BSG I/O dispatch and timeout handling
[SCSI] lpfc 8.3.42: Fixed function mode field defined too small for not recognizing dual-chute mode
...

+1720 -680
+10
Documentation/scsi/ChangeLog.megaraid_sas
··· 1 + Release Date : Sat. Aug 31, 2013 17:00:00 PST 2013 - 2 + (emaild-id:megaraidlinux@lsi.com) 3 + Adam Radford 4 + Kashyap Desai 5 + Sumit Saxena 6 + Current Version : 06.700.06.00-rc1 7 + Old Version : 06.600.18.00-rc1 8 + 1. Add High Availability clustering support using shared Logical Disks. 9 + 2. Version and Changelog update. 10 + ------------------------------------------------------------------------------- 1 11 Release Date : Wed. May 15, 2013 17:00:00 PST 2013 - 2 12 (emaild-id:megaraidlinux@lsi.com) 3 13 Adam Radford
+1 -1
drivers/scsi/aic7xxx/aic7xxx_pci.c
··· 692 692 * ID as valid. 693 693 */ 694 694 if (ahc_get_pci_function(pci) > 0 695 - && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice) 695 + && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor) 696 696 && SUBID_9005_MFUNCENB(subdevice) == 0) 697 697 return (NULL); 698 698
+8 -3
drivers/scsi/esas2r/esas2r_flash.c
··· 860 860 return false; 861 861 } 862 862 863 + if (fsc->command >= cmdcnt) { 864 + fs->status = ATTO_STS_INV_FUNC; 865 + return false; 866 + } 867 + 863 868 func = cmd_to_fls_func[fsc->command]; 864 - if (fsc->command >= cmdcnt || func == 0xFF) { 869 + if (func == 0xFF) { 865 870 fs->status = ATTO_STS_INV_FUNC; 866 871 return false; 867 872 } ··· 1360 1355 u32 time = jiffies_to_msecs(jiffies); 1361 1356 1362 1357 esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); 1363 - memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); 1358 + *n = default_sas_nvram; 1364 1359 n->sas_addr[3] |= 0x0F; 1365 1360 n->sas_addr[4] = HIBYTE(LOWORD(time)); 1366 1361 n->sas_addr[5] = LOBYTE(LOWORD(time)); ··· 1378 1373 * address out first. 1379 1374 */ 1380 1375 memcpy(&sas_addr[0], a->nvram->sas_addr, 8); 1381 - memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); 1376 + *nvram = default_sas_nvram; 1382 1377 memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); 1383 1378 } 1384 1379
+4 -4
drivers/scsi/esas2r/esas2r_init.c
··· 665 665 666 666 int esas2r_cleanup(struct Scsi_Host *host) 667 667 { 668 - struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; 668 + struct esas2r_adapter *a; 669 669 int index; 670 670 671 671 if (host == NULL) { ··· 678 678 } 679 679 680 680 esas2r_debug("esas2r_cleanup called for host %p", host); 681 + a = (struct esas2r_adapter *)host->hostdata; 681 682 index = a->index; 682 683 esas2r_kill_adapter(index); 683 684 return index; ··· 809 808 int pcie_cap_reg; 810 809 811 810 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); 812 - if (0xffff && pcie_cap_reg) { 811 + if (0xffff & pcie_cap_reg) { 813 812 u16 devcontrol; 814 813 815 814 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, ··· 1551 1550 * to not overwrite a previous crash that was saved. 1552 1551 */ 1553 1552 if ((a->flags2 & AF2_COREDUMP_AVAIL) 1554 - && !(a->flags2 & AF2_COREDUMP_SAVED) 1555 - && a->fw_coredump_buff) { 1553 + && !(a->flags2 & AF2_COREDUMP_SAVED)) { 1556 1554 esas2r_read_mem_block(a, 1557 1555 a->fw_coredump_buff, 1558 1556 MW_DATA_ADDR_SRAM + 0x80000,
+1 -1
drivers/scsi/esas2r/esas2r_ioctl.c
··· 415 415 lun = tm->lun; 416 416 } 417 417 418 - if (path > 0 || tid > ESAS2R_MAX_ID) { 418 + if (path > 0) { 419 419 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( 420 420 CSMI_STS_INV_PARAM); 421 421 return false;
+5 -2
drivers/scsi/esas2r/esas2r_vda.c
··· 302 302 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { 303 303 struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; 304 304 struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; 305 + char buf[sizeof(cfg->data.init.fw_release) + 1]; 305 306 306 307 cfg->data_length = 307 308 cpu_to_le32(sizeof(struct atto_vda_cfg_init)); ··· 310 309 le32_to_cpu(rsp->vda_version); 311 310 cfg->data.init.fw_build = rsp->fw_build; 312 311 313 - sprintf((char *)&cfg->data.init.fw_release, 314 - "%1d.%02d", 312 + snprintf(buf, sizeof(buf), "%1d.%02d", 315 313 (int)LOBYTE(le16_to_cpu(rsp->fw_release)), 316 314 (int)HIBYTE(le16_to_cpu(rsp->fw_release))); 315 + 316 + memcpy(&cfg->data.init.fw_release, buf, 317 + sizeof(cfg->data.init.fw_release)); 317 318 318 319 if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') 319 320 cfg->data.init.fw_version =
+8
drivers/scsi/fnic/fnic.h
··· 43 43 #define DFX DRV_NAME "%d: " 44 44 45 45 #define DESC_CLEAN_LOW_WATERMARK 8 46 + #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ 47 + #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ 46 48 #define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ 47 49 #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ 48 50 #define FNIC_DFLT_QUEUE_DEPTH 32 ··· 156 154 FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ 157 155 shost_printk(kern_level, host, fmt, ##args);) 158 156 157 + #define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \ 158 + shost_printk(kern_level, host, fmt, ##args) 159 + 159 160 extern const char *fnic_state_str[]; 160 161 161 162 enum fnic_intx_intr_index { ··· 220 215 221 216 struct vnic_stats *stats; 222 217 unsigned long stats_time; /* time of stats update */ 218 + unsigned long stats_reset_time; /* time of stats reset */ 223 219 struct vnic_nic_cfg *nic_cfg; 224 220 char name[IFNAMSIZ]; 225 221 struct timer_list notify_timer; /* used for MSI interrupts */ 226 222 223 + unsigned int fnic_max_tag_id; 227 224 unsigned int err_intr_offset; 228 225 unsigned int link_intr_offset; 229 226 ··· 366 359 return ((fnic->state_flags & st_flags) == st_flags); 367 360 } 368 361 void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); 362 + void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); 369 363 #endif /* _FNIC_H_ */
+132 -13
drivers/scsi/fnic/fnic_main.c
··· 74 74 MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " 75 75 "for fnic trace buffer"); 76 76 77 + static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; 78 + module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); 79 + MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); 80 + 77 81 static struct libfc_function_template fnic_transport_template = { 78 82 .frame_send = fnic_send, 79 83 .lport_set_port_id = fnic_set_port_id, ··· 95 91 if (!rport || fc_remote_port_chkready(rport)) 96 92 return -ENXIO; 97 93 98 - scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); 94 + scsi_activate_tcq(sdev, fnic_max_qdepth); 99 95 return 0; 100 96 } 101 97 ··· 130 126 static void fnic_get_host_speed(struct Scsi_Host *shost); 131 127 static struct scsi_transport_template *fnic_fc_transport; 132 128 static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); 129 + static void fnic_reset_host_stats(struct Scsi_Host *); 133 130 134 131 static struct fc_function_template fnic_fc_functions = { 135 132 ··· 158 153 .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, 159 154 .issue_fc_host_lip = fnic_reset, 160 155 .get_fc_host_stats = fnic_get_stats, 156 + .reset_fc_host_stats = fnic_reset_host_stats, 161 157 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), 162 158 .terminate_rport_io = fnic_terminate_rport_io, 163 159 .bsg_request = fc_lport_bsg_request, ··· 212 206 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; 213 207 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; 214 208 stats->invalid_crc_count = vs->rx.rx_crc_errors; 215 - stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; 209 + stats->seconds_since_last_reset = 210 + (jiffies - fnic->stats_reset_time) / HZ; 216 211 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); 217 212 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); 218 213 219 214 return stats; 215 + } 216 + 217 + /* 218 + * fnic_dump_fchost_stats 219 + * note : dumps fc_statistics into system logs 220 + */ 221 + void fnic_dump_fchost_stats(struct Scsi_Host *host, 222 + struct fc_host_statistics *stats) 223 + { 224 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 225 + "fnic: seconds since last reset = %llu\n", 226 + stats->seconds_since_last_reset); 227 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 228 + "fnic: tx frames = %llu\n", 229 + stats->tx_frames); 230 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 231 + "fnic: tx words = %llu\n", 232 + stats->tx_words); 233 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 234 + "fnic: rx frames = %llu\n", 235 + stats->rx_frames); 236 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 237 + "fnic: rx words = %llu\n", 238 + stats->rx_words); 239 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 240 + "fnic: lip count = %llu\n", 241 + stats->lip_count); 242 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 243 + "fnic: nos count = %llu\n", 244 + stats->nos_count); 245 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 246 + "fnic: error frames = %llu\n", 247 + stats->error_frames); 248 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 249 + "fnic: dumped frames = %llu\n", 250 + stats->dumped_frames); 251 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 252 + "fnic: link failure count = %llu\n", 253 + stats->link_failure_count); 254 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 255 + "fnic: loss of sync count = %llu\n", 256 + stats->loss_of_sync_count); 257 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 258 + "fnic: loss of signal count = %llu\n", 259 + stats->loss_of_signal_count); 260 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 261 + "fnic: prim seq protocol err count = %llu\n", 262 + stats->prim_seq_protocol_err_count); 263 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 264 + "fnic: invalid tx word count= %llu\n", 265 + stats->invalid_tx_word_count); 266 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 267 + "fnic: invalid crc count = %llu\n", 268 + stats->invalid_crc_count); 269 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 270 + "fnic: fcp input requests = %llu\n", 271 + stats->fcp_input_requests); 272 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 273 + "fnic: fcp output requests = %llu\n", 274 + stats->fcp_output_requests); 275 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 276 + "fnic: fcp control requests = %llu\n", 277 + stats->fcp_control_requests); 278 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 279 + "fnic: fcp input megabytes = %llu\n", 280 + stats->fcp_input_megabytes); 281 + FNIC_MAIN_NOTE(KERN_NOTICE, host, 282 + "fnic: fcp output megabytes = %llu\n", 283 + stats->fcp_output_megabytes); 284 + return; 285 + } 286 + 287 + /* 288 + * fnic_reset_host_stats : clears host stats 289 + * note : called when reset_statistics set under sysfs dir 290 + */ 291 + static void fnic_reset_host_stats(struct Scsi_Host *host) 292 + { 293 + int ret; 294 + struct fc_lport *lp = shost_priv(host); 295 + struct fnic *fnic = lport_priv(lp); 296 + struct fc_host_statistics *stats; 297 + unsigned long flags; 298 + 299 + /* dump current stats, before clearing them */ 300 + stats = fnic_get_stats(host); 301 + fnic_dump_fchost_stats(host, stats); 302 + 303 + spin_lock_irqsave(&fnic->fnic_lock, flags); 304 + ret = vnic_dev_stats_clear(fnic->vdev); 305 + spin_unlock_irqrestore(&fnic->fnic_lock, flags); 306 + 307 + if (ret) { 308 + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, 309 + "fnic: Reset vnic stats failed" 310 + " 0x%x", ret); 311 + return; 312 + } 313 + fnic->stats_reset_time = jiffies; 314 + memset(stats, 0, sizeof(*stats)); 315 + 316 + return; 220 317 } 221 318 222 319 void fnic_log_q_error(struct fnic *fnic) ··· 556 447 557 448 host->transportt = fnic_fc_transport; 558 449 559 - err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); 560 - if (err) { 561 - shost_printk(KERN_ERR, fnic->lport->host, 562 - "Unable to alloc shared tag map\n"); 563 - goto err_out_free_hba; 564 - } 565 - 566 450 /* Setup PCI resources */ 567 451 pci_set_drvdata(pdev, fnic); 568 452 ··· 578 476 pci_set_master(pdev); 579 477 580 478 /* Query PCI controller on system for DMA addressing 581 - * limitation for the device. Try 40-bit first, and 479 + * limitation for the device. Try 64-bit first, and 582 480 * fail to 32-bit. 583 481 */ 584 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 482 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 585 483 if (err) { 586 484 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 587 485 if (err) { ··· 598 496 goto err_out_release_regions; 599 497 } 600 498 } else { 601 - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); 499 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 602 500 if (err) { 603 501 shost_printk(KERN_ERR, fnic->lport->host, 604 - "Unable to obtain 40-bit DMA " 502 + "Unable to obtain 64-bit DMA " 605 503 "for consistent allocations, aborting.\n"); 606 504 goto err_out_release_regions; 607 505 } ··· 668 566 "aborting.\n"); 669 567 goto err_out_dev_close; 670 568 } 569 + 570 + /* Configure Maximum Outstanding IO reqs*/ 571 + if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { 572 + host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, 573 + max_t(u32, FNIC_MIN_IO_REQ, 574 + fnic->config.io_throttle_count)); 575 + } 576 + fnic->fnic_max_tag_id = host->can_queue; 577 + 578 + err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id); 579 + if (err) { 580 + shost_printk(KERN_ERR, fnic->lport->host, 581 + "Unable to alloc shared tag map\n"); 582 + goto err_out_dev_close; 583 + } 584 + 671 585 host->max_lun = fnic->config.luns_per_tgt; 672 586 host->max_id = FNIC_MAX_FCP_TARGET; 673 587 host->max_cmd_len = FCOE_MAX_CMD_LEN; ··· 837 719 } 838 720 839 721 fc_lport_init_stats(lp); 722 + fnic->stats_reset_time = jiffies; 840 723 841 724 fc_lport_config(lp); 842 725
+73 -74
drivers/scsi/fnic/fnic_scsi.c
··· 111 111 return &fnic->io_req_lock[hash]; 112 112 } 113 113 114 + static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, 115 + int tag) 116 + { 117 + return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; 118 + } 119 + 114 120 /* 115 121 * Unmap the data buffer and sense buffer for an io_req, 116 122 * also unmap and free the device-private scatter/gather list. ··· 736 730 fcpio_tag_id_dec(&tag, &id); 737 731 icmnd_cmpl = &desc->u.icmnd_cmpl; 738 732 739 - if (id >= FNIC_MAX_IO_REQ) { 733 + if (id >= fnic->fnic_max_tag_id) { 740 734 shost_printk(KERN_ERR, fnic->lport->host, 741 735 "Tag out of range tag %x hdr status = %s\n", 742 736 id, fnic_fcpio_status_to_str(hdr_status)); ··· 824 818 if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) 825 819 xfer_len -= icmnd_cmpl->residual; 826 820 827 - /* 828 - * If queue_full, then try to reduce queue depth for all 829 - * LUNS on the target. Todo: this should be accompanied 830 - * by a periodic queue_depth rampup based on successful 831 - * IO completion. 832 - */ 833 - if (icmnd_cmpl->scsi_status == QUEUE_FULL) { 834 - struct scsi_device *t_sdev; 835 - int qd = 0; 836 - 837 - shost_for_each_device(t_sdev, sc->device->host) { 838 - if (t_sdev->id != sc->device->id) 839 - continue; 840 - 841 - if (t_sdev->queue_depth > 1) { 842 - qd = scsi_track_queue_full 843 - (t_sdev, 844 - t_sdev->queue_depth - 1); 845 - if (qd == -1) 846 - qd = t_sdev->host->cmd_per_lun; 847 - shost_printk(KERN_INFO, 848 - fnic->lport->host, 849 - "scsi[%d:%d:%d:%d" 850 - "] queue full detected," 851 - "new depth = %d\n", 852 - t_sdev->host->host_no, 853 - t_sdev->channel, 854 - t_sdev->id, t_sdev->lun, 855 - t_sdev->queue_depth); 856 - } 857 - } 858 - } 859 821 break; 860 822 861 823 case FCPIO_TIMEOUT: /* request was timed out */ ··· 913 939 fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); 914 940 fcpio_tag_id_dec(&tag, &id); 915 941 916 - if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { 942 + if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { 917 943 shost_printk(KERN_ERR, fnic->lport->host, 918 944 "Tag out of range tag %x hdr status = %s\n", 919 945 id, fnic_fcpio_status_to_str(hdr_status)); ··· 962 988 spin_unlock_irqrestore(io_lock, flags); 963 989 return; 964 990 } 965 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 966 991 CMD_ABTS_STATUS(sc) = hdr_status; 967 - 968 992 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 969 993 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 970 994 "abts cmpl recd. id %d status %s\n", ··· 1120 1148 1121 1149 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) 1122 1150 { 1123 - unsigned int i; 1151 + int i; 1124 1152 struct fnic_io_req *io_req; 1125 1153 unsigned long flags = 0; 1126 1154 struct scsi_cmnd *sc; 1127 1155 spinlock_t *io_lock; 1128 1156 unsigned long start_time = 0; 1129 1157 1130 - for (i = 0; i < FNIC_MAX_IO_REQ; i++) { 1158 + for (i = 0; i < fnic->fnic_max_tag_id; i++) { 1131 1159 if (i == exclude_id) 1132 1160 continue; 1133 1161 1134 - sc = scsi_host_find_tag(fnic->lport->host, i); 1135 - if (!sc) 1136 - continue; 1137 - 1138 - io_lock = fnic_io_lock_hash(fnic, sc); 1162 + io_lock = fnic_io_lock_tag(fnic, i); 1139 1163 spin_lock_irqsave(io_lock, flags); 1164 + sc = scsi_host_find_tag(fnic->lport->host, i); 1165 + if (!sc) { 1166 + spin_unlock_irqrestore(io_lock, flags); 1167 + continue; 1168 + } 1169 + 1140 1170 io_req = (struct fnic_io_req *)CMD_SP(sc); 1141 1171 if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1142 1172 !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { ··· 1210 1236 fcpio_tag_id_dec(&desc->hdr.tag, &id); 1211 1237 id &= FNIC_TAG_MASK; 1212 1238 1213 - if (id >= FNIC_MAX_IO_REQ) 1239 + if (id >= fnic->fnic_max_tag_id) 1214 1240 return; 1215 1241 1216 1242 sc = scsi_host_find_tag(fnic->lport->host, id); ··· 1314 1340 if (fnic->in_remove) 1315 1341 return; 1316 1342 1317 - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1343 + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 1318 1344 abt_tag = tag; 1319 - sc = scsi_host_find_tag(fnic->lport->host, tag); 1320 - if (!sc) 1321 - continue; 1322 - 1323 - io_lock = fnic_io_lock_hash(fnic, sc); 1345 + io_lock = fnic_io_lock_tag(fnic, tag); 1324 1346 spin_lock_irqsave(io_lock, flags); 1347 + sc = scsi_host_find_tag(fnic->lport->host, tag); 1348 + if (!sc) { 1349 + spin_unlock_irqrestore(io_lock, flags); 1350 + continue; 1351 + } 1325 1352 1326 1353 io_req = (struct fnic_io_req *)CMD_SP(sc); 1327 1354 ··· 1416 1441 unsigned long flags; 1417 1442 struct scsi_cmnd *sc; 1418 1443 struct scsi_lun fc_lun; 1419 - struct fc_rport_libfc_priv *rdata = rport->dd_data; 1420 - struct fc_lport *lport = rdata->local_port; 1421 - struct fnic *fnic = lport_priv(lport); 1444 + struct fc_rport_libfc_priv *rdata; 1445 + struct fc_lport *lport; 1446 + struct fnic *fnic; 1422 1447 struct fc_rport *cmd_rport; 1423 1448 enum fnic_ioreq_state old_ioreq_state; 1424 1449 1450 + if (!rport) { 1451 + printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); 1452 + return; 1453 + } 1454 + rdata = rport->dd_data; 1455 + 1456 + if (!rdata) { 1457 + printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); 1458 + return; 1459 + } 1460 + lport = rdata->local_port; 1461 + 1462 + if (!lport) { 1463 + printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); 1464 + return; 1465 + } 1466 + fnic = lport_priv(lport); 1425 1467 FNIC_SCSI_DBG(KERN_DEBUG, 1426 1468 fnic->lport->host, "fnic_terminate_rport_io called" 1427 1469 " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", ··· 1448 1456 if (fnic->in_remove) 1449 1457 return; 1450 1458 1451 - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1459 + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 1452 1460 abt_tag = tag; 1461 + io_lock = fnic_io_lock_tag(fnic, tag); 1462 + spin_lock_irqsave(io_lock, flags); 1453 1463 sc = scsi_host_find_tag(fnic->lport->host, tag); 1454 - if (!sc) 1464 + if (!sc) { 1465 + spin_unlock_irqrestore(io_lock, flags); 1455 1466 continue; 1467 + } 1456 1468 1457 1469 cmd_rport = starget_to_rport(scsi_target(sc->device)); 1458 - if (rport != cmd_rport) 1470 + if (rport != cmd_rport) { 1471 + spin_unlock_irqrestore(io_lock, flags); 1459 1472 continue; 1460 - 1461 - io_lock = fnic_io_lock_hash(fnic, sc); 1462 - spin_lock_irqsave(io_lock, flags); 1473 + } 1463 1474 1464 1475 io_req = (struct fnic_io_req *)CMD_SP(sc); 1465 1476 ··· 1675 1680 io_req->abts_done = NULL; 1676 1681 1677 1682 /* fw did not complete abort, timed out */ 1678 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1683 + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 1679 1684 spin_unlock_irqrestore(io_lock, flags); 1680 1685 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; 1681 1686 ret = FAILED; 1682 1687 goto fnic_abort_cmd_end; 1683 1688 } 1689 + 1690 + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 1684 1691 1685 1692 /* 1686 1693 * firmware completed the abort, check the status, ··· 1781 1784 DECLARE_COMPLETION_ONSTACK(tm_done); 1782 1785 enum fnic_ioreq_state old_ioreq_state; 1783 1786 1784 - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 1787 + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 1788 + io_lock = fnic_io_lock_tag(fnic, tag); 1789 + spin_lock_irqsave(io_lock, flags); 1785 1790 sc = scsi_host_find_tag(fnic->lport->host, tag); 1786 1791 /* 1787 1792 * ignore this lun reset cmd or cmds that do not belong to 1788 1793 * this lun 1789 1794 */ 1790 - if (!sc || sc == lr_sc || sc->device != lun_dev) 1795 + if (!sc || sc == lr_sc || sc->device != lun_dev) { 1796 + spin_unlock_irqrestore(io_lock, flags); 1791 1797 continue; 1792 - 1793 - io_lock = fnic_io_lock_hash(fnic, sc); 1794 - spin_lock_irqsave(io_lock, flags); 1798 + } 1795 1799 1796 1800 io_req = (struct fnic_io_req *)CMD_SP(sc); 1797 1801 ··· 1821 1823 spin_unlock_irqrestore(io_lock, flags); 1822 1824 continue; 1823 1825 } 1826 + 1827 + if (io_req->abts_done) 1828 + shost_printk(KERN_ERR, fnic->lport->host, 1829 + "%s: io_req->abts_done is set state is %s\n", 1830 + __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); 1824 1831 old_ioreq_state = CMD_STATE(sc); 1825 1832 /* 1826 1833 * Any pending IO issued prior to reset is expected to be ··· 1835 1832 * handled in this function. 1836 1833 */ 1837 1834 CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1838 - 1839 - if (io_req->abts_done) 1840 - shost_printk(KERN_ERR, fnic->lport->host, 1841 - "%s: io_req->abts_done is set state is %s\n", 1842 - __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); 1843 1835 1844 1836 BUG_ON(io_req->abts_done); 1845 1837 ··· 1888 1890 io_req->abts_done = NULL; 1889 1891 1890 1892 /* if abort is still pending with fw, fail */ 1891 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1893 + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 1892 1894 spin_unlock_irqrestore(io_lock, flags); 1893 1895 CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 1894 1896 ret = 1; 1895 1897 goto clean_pending_aborts_end; 1896 1898 } 1899 + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 1897 1900 CMD_SP(sc) = NULL; 1898 1901 spin_unlock_irqrestore(io_lock, flags); 1899 1902 ··· 2092 2093 spin_unlock_irqrestore(io_lock, flags); 2093 2094 int_to_scsilun(sc->device->lun, &fc_lun); 2094 2095 /* 2095 - * Issue abort and terminate on the device reset request. 2096 - * If q'ing of the abort fails, retry issue it after a delay. 2096 + * Issue abort and terminate on device reset request. 2097 + * If q'ing of terminate fails, retry it after a delay. 2097 2098 */ 2098 2099 while (1) { 2099 2100 spin_lock_irqsave(io_lock, flags); ··· 2404 2405 lun_dev = lr_sc->device; 2405 2406 2406 2407 /* walk again to check, if IOs are still pending in fw */ 2407 - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { 2408 + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 2408 2409 sc = scsi_host_find_tag(fnic->lport->host, tag); 2409 2410 /* 2410 2411 * ignore this lun reset cmd or cmds that do not belong to
+2 -2
drivers/scsi/fnic/vnic_scsi.h
··· 54 54 #define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 55 55 #define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 56 56 57 - #define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 58 - #define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 57 + #define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1 58 + #define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048 59 59 60 60 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 61 61 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000
+39 -15
drivers/scsi/hpsa.c
··· 54 54 #include "hpsa.h" 55 55 56 56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 57 - #define HPSA_DRIVER_VERSION "2.0.2-1" 57 + #define HPSA_DRIVER_VERSION "3.4.0-1" 58 58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 59 59 #define HPSA "hpsa" 60 60 ··· 89 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, 90 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, 91 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 92 - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 93 - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 92 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 93 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 94 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 95 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, 96 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, 97 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, 98 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, 99 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D}, 99 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 100 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 101 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, ··· 108 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, 109 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, 110 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, 111 - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, 110 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, 111 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, 112 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, 113 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, 114 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, 115 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, 116 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, 117 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, 118 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, 119 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, 120 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, 121 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, 122 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, 112 123 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 113 124 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 114 125 {0,} ··· 138 125 {0x3245103C, "Smart Array P410i", &SA5_access}, 139 126 {0x3247103C, "Smart Array P411", &SA5_access}, 140 127 {0x3249103C, "Smart Array P812", &SA5_access}, 141 - {0x324a103C, "Smart Array P712m", &SA5_access}, 142 - {0x324b103C, "Smart Array P711m", &SA5_access}, 128 + {0x324A103C, "Smart Array P712m", &SA5_access}, 129 + {0x324B103C, "Smart Array P711m", &SA5_access}, 143 130 {0x3350103C, "Smart Array P222", &SA5_access}, 144 131 {0x3351103C, "Smart Array P420", &SA5_access}, 145 132 {0x3352103C, "Smart Array P421", &SA5_access}, 146 133 {0x3353103C, "Smart Array P822", &SA5_access}, 134 + {0x334D103C, "Smart Array P822se", &SA5_access}, 147 135 {0x3354103C, "Smart Array P420i", &SA5_access}, 148 136 {0x3355103C, "Smart Array P220i", &SA5_access}, 149 137 {0x3356103C, "Smart Array P721m", &SA5_access}, 150 - {0x1920103C, "Smart Array", &SA5_access}, 151 - {0x1921103C, "Smart Array", &SA5_access}, 152 - {0x1922103C, "Smart Array", &SA5_access}, 153 - {0x1923103C, "Smart Array", &SA5_access}, 154 - {0x1924103C, "Smart Array", &SA5_access}, 155 - {0x1925103C, "Smart Array", &SA5_access}, 156 - {0x1926103C, "Smart Array", &SA5_access}, 157 - {0x1928103C, "Smart Array", &SA5_access}, 158 - {0x334d103C, "Smart Array P822se", &SA5_access}, 138 + {0x1921103C, "Smart Array P830i", &SA5_access}, 139 + {0x1922103C, "Smart Array P430", &SA5_access}, 140 + {0x1923103C, "Smart Array P431", &SA5_access}, 141 + {0x1924103C, "Smart Array P830", &SA5_access}, 142 + {0x1926103C, "Smart Array P731m", &SA5_access}, 143 + {0x1928103C, "Smart Array P230i", &SA5_access}, 144 + {0x1929103C, "Smart Array P530", &SA5_access}, 145 + {0x21BD103C, "Smart Array", &SA5_access}, 146 + {0x21BE103C, "Smart Array", &SA5_access}, 147 + {0x21BF103C, "Smart Array", &SA5_access}, 148 + {0x21C0103C, "Smart Array", &SA5_access}, 149 + {0x21C1103C, "Smart Array", &SA5_access}, 150 + {0x21C2103C, "Smart Array", &SA5_access}, 151 + {0x21C3103C, "Smart Array", &SA5_access}, 152 + {0x21C4103C, "Smart Array", &SA5_access}, 153 + {0x21C5103C, "Smart Array", &SA5_access}, 154 + {0x21C7103C, "Smart Array", &SA5_access}, 155 + {0x21C8103C, "Smart Array", &SA5_access}, 156 + {0x21C9103C, "Smart Array", &SA5_access}, 159 157 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 160 158 }; 161 159
+13 -2
drivers/scsi/ibmvscsi/ibmvfc.c
··· 2208 2208 2209 2209 if (rsp_rc != 0) { 2210 2210 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); 2211 - return -EIO; 2211 + /* If failure is received, the host adapter is most likely going 2212 + through reset, return success so the caller will wait for the command 2213 + being cancelled to get returned */ 2214 + return 0; 2212 2215 } 2213 2216 2214 2217 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); ··· 2224 2221 2225 2222 if (status != IBMVFC_MAD_SUCCESS) { 2226 2223 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); 2227 - return -EIO; 2224 + switch (status) { 2225 + case IBMVFC_MAD_DRIVER_FAILED: 2226 + case IBMVFC_MAD_CRQ_ERROR: 2227 + /* Host adapter most likely going through reset, return success to 2228 + the caller will wait for the command being cancelled to get returned */ 2229 + return 0; 2230 + default: 2231 + return -EIO; 2232 + }; 2228 2233 } 2229 2234 2230 2235 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+86 -68
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 241 241 struct device_node *rootdn; 242 242 243 243 const char *ppartition_name; 244 - const unsigned int *p_number_ptr; 244 + const __be32 *p_number_ptr; 245 245 246 246 /* Retrieve information about this partition */ 247 247 rootdn = of_find_node_by_path("/"); ··· 255 255 sizeof(partition_name)); 256 256 p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); 257 257 if (p_number_ptr) 258 - partition_number = *p_number_ptr; 258 + partition_number = of_read_number(p_number_ptr, 1); 259 259 of_node_put(rootdn); 260 260 } 261 261 ··· 270 270 strncpy(hostdata->madapter_info.partition_name, partition_name, 271 271 sizeof(hostdata->madapter_info.partition_name)); 272 272 273 - hostdata->madapter_info.partition_number = partition_number; 273 + hostdata->madapter_info.partition_number = 274 + cpu_to_be32(partition_number); 274 275 275 - hostdata->madapter_info.mad_version = 1; 276 - hostdata->madapter_info.os_type = 2; 276 + hostdata->madapter_info.mad_version = cpu_to_be32(1); 277 + hostdata->madapter_info.os_type = cpu_to_be32(2); 277 278 } 278 279 279 280 /** ··· 465 464 memset(&evt->crq, 0x00, sizeof(evt->crq)); 466 465 atomic_set(&evt->free, 1); 467 466 evt->crq.valid = 0x80; 468 - evt->crq.IU_length = sizeof(*evt->xfer_iu); 469 - evt->crq.IU_data_ptr = pool->iu_token + 470 - sizeof(*evt->xfer_iu) * i; 467 + evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); 468 + evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + 469 + sizeof(*evt->xfer_iu) * i); 471 470 evt->xfer_iu = pool->iu_storage + i; 472 471 evt->hostdata = hostdata; 473 472 evt->ext_list = NULL; ··· 589 588 evt_struct->cmnd_done = NULL; 590 589 evt_struct->sync_srp = NULL; 591 590 evt_struct->crq.format = format; 592 - evt_struct->crq.timeout = timeout; 591 + evt_struct->crq.timeout = cpu_to_be16(timeout); 593 592 evt_struct->done = done; 594 593 } 595 594 ··· 660 659 661 660 scsi_for_each_sg(cmd, sg, nseg, i) { 662 661 struct srp_direct_buf *descr = md + i; 663 - descr->va = sg_dma_address(sg); 664 - descr->len = sg_dma_len(sg); 662 + descr->va = cpu_to_be64(sg_dma_address(sg)); 663 + descr->len = cpu_to_be32(sg_dma_len(sg)); 665 664 descr->key = 0; 666 665 total_length += sg_dma_len(sg); 667 666 } ··· 704 703 } 705 704 706 705 indirect->table_desc.va = 0; 707 - indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); 706 + indirect->table_desc.len = cpu_to_be32(sg_mapped * 707 + sizeof(struct srp_direct_buf)); 708 708 indirect->table_desc.key = 0; 709 709 710 710 if (sg_mapped <= MAX_INDIRECT_BUFS) { 711 711 total_length = map_sg_list(cmd, sg_mapped, 712 712 &indirect->desc_list[0]); 713 - indirect->len = total_length; 713 + indirect->len = cpu_to_be32(total_length); 714 714 return 1; 715 715 } 716 716 ··· 733 731 734 732 total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); 735 733 736 - indirect->len = total_length; 737 - indirect->table_desc.va = evt_struct->ext_list_token; 738 - indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); 734 + indirect->len = cpu_to_be32(total_length); 735 + indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token); 736 + indirect->table_desc.len = cpu_to_be32(sg_mapped * 737 + sizeof(indirect->desc_list[0])); 739 738 memcpy(indirect->desc_list, evt_struct->ext_list, 740 739 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); 741 740 return 1; ··· 852 849 struct ibmvscsi_host_data *hostdata, 853 850 unsigned long timeout) 854 851 { 855 - u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 852 + __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq; 856 853 int request_status = 0; 857 854 int rc; 858 855 int srp_req = 0; ··· 923 920 add_timer(&evt_struct->timer); 924 921 } 925 922 926 - if ((rc = 927 - ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 923 + rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), 924 + be64_to_cpu(crq_as_u64[1])); 925 + if (rc != 0) { 928 926 list_del(&evt_struct->list); 929 927 del_timer(&evt_struct->timer); 930 928 ··· 991 987 if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) 992 988 memcpy(cmnd->sense_buffer, 993 989 rsp->data, 994 - rsp->sense_data_len); 990 + be32_to_cpu(rsp->sense_data_len)); 995 991 unmap_cmd_data(&evt_struct->iu.srp.cmd, 996 992 evt_struct, 997 993 evt_struct->hostdata->dev); 998 994 999 995 if (rsp->flags & SRP_RSP_FLAG_DOOVER) 1000 - scsi_set_resid(cmnd, rsp->data_out_res_cnt); 996 + scsi_set_resid(cmnd, 997 + be32_to_cpu(rsp->data_out_res_cnt)); 1001 998 else if (rsp->flags & SRP_RSP_FLAG_DIOVER) 1002 - scsi_set_resid(cmnd, rsp->data_in_res_cnt); 999 + scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt)); 1003 1000 } 1004 1001 1005 1002 if (evt_struct->cmnd_done) ··· 1042 1037 memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); 1043 1038 srp_cmd->opcode = SRP_CMD; 1044 1039 memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); 1045 - srp_cmd->lun = ((u64) lun) << 48; 1040 + srp_cmd->lun = cpu_to_be64(((u64)lun) << 48); 1046 1041 1047 1042 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 1048 1043 if (!firmware_has_feature(FW_FEATURE_CMO)) ··· 1067 1062 if ((in_fmt == SRP_DATA_DESC_INDIRECT || 1068 1063 out_fmt == SRP_DATA_DESC_INDIRECT) && 1069 1064 indirect->table_desc.va == 0) { 1070 - indirect->table_desc.va = evt_struct->crq.IU_data_ptr + 1065 + indirect->table_desc.va = 1066 + cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) + 1071 1067 offsetof(struct srp_cmd, add_data) + 1072 - offsetof(struct srp_indirect_buf, desc_list); 1068 + offsetof(struct srp_indirect_buf, desc_list)); 1073 1069 } 1074 1070 1075 1071 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); ··· 1164 1158 * request_limit could have been set to -1 by this client. 1165 1159 */ 1166 1160 atomic_set(&hostdata->request_limit, 1167 - evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); 1161 + be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta)); 1168 1162 1169 1163 /* If we had any pending I/Os, kick them */ 1170 1164 scsi_unblock_requests(hostdata->host); ··· 1190 1184 login = &evt_struct->iu.srp.login_req; 1191 1185 memset(login, 0, sizeof(*login)); 1192 1186 login->opcode = SRP_LOGIN_REQ; 1193 - login->req_it_iu_len = sizeof(union srp_iu); 1194 - login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; 1187 + login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); 1188 + login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 1189 + SRP_BUF_FORMAT_INDIRECT); 1195 1190 1196 1191 spin_lock_irqsave(hostdata->host->host_lock, flags); 1197 1192 /* Start out with a request limit of 0, since this is negotiated in ··· 1221 1214 dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", 1222 1215 evt_struct->xfer_iu->mad.capabilities.common.status); 1223 1216 } else { 1224 - if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) 1217 + if (hostdata->caps.migration.common.server_support != 1218 + cpu_to_be16(SERVER_SUPPORTS_CAP)) 1225 1219 dev_info(hostdata->dev, "Partition migration not supported\n"); 1226 1220 1227 1221 if (client_reserve) { 1228 1222 if (hostdata->caps.reserve.common.server_support == 1229 - SERVER_SUPPORTS_CAP) 1223 + cpu_to_be16(SERVER_SUPPORTS_CAP)) 1230 1224 dev_info(hostdata->dev, "Client reserve enabled\n"); 1231 1225 else 1232 1226 dev_info(hostdata->dev, "Client reserve not supported\n"); ··· 1259 1251 req = &evt_struct->iu.mad.capabilities; 1260 1252 memset(req, 0, sizeof(*req)); 1261 1253 1262 - hostdata->caps.flags = CAP_LIST_SUPPORTED; 1254 + hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED); 1263 1255 if (hostdata->client_migrated) 1264 - hostdata->caps.flags |= CLIENT_MIGRATED; 1256 + hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); 1265 1257 1266 1258 strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), 1267 1259 sizeof(hostdata->caps.name)); ··· 1272 1264 strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); 1273 1265 hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; 1274 1266 1275 - req->common.type = VIOSRP_CAPABILITIES_TYPE; 1276 - req->buffer = hostdata->caps_addr; 1267 + req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); 1268 + req->buffer = cpu_to_be64(hostdata->caps_addr); 1277 1269 1278 - hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; 1279 - hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); 1280 - hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; 1281 - hostdata->caps.migration.ecl = 1; 1270 + hostdata->caps.migration.common.cap_type = 1271 + cpu_to_be32(MIGRATION_CAPABILITIES); 1272 + hostdata->caps.migration.common.length = 1273 + cpu_to_be16(sizeof(hostdata->caps.migration)); 1274 + hostdata->caps.migration.common.server_support = 1275 + cpu_to_be16(SERVER_SUPPORTS_CAP); 1276 + hostdata->caps.migration.ecl = cpu_to_be32(1); 1282 1277 1283 1278 if (client_reserve) { 1284 - hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; 1285 - hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); 1286 - hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; 1287 - hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; 1288 - req->common.length = sizeof(hostdata->caps); 1279 + hostdata->caps.reserve.common.cap_type = 1280 + cpu_to_be32(RESERVATION_CAPABILITIES); 1281 + hostdata->caps.reserve.common.length = 1282 + cpu_to_be16(sizeof(hostdata->caps.reserve)); 1283 + hostdata->caps.reserve.common.server_support = 1284 + cpu_to_be16(SERVER_SUPPORTS_CAP); 1285 + hostdata->caps.reserve.type = 1286 + cpu_to_be32(CLIENT_RESERVE_SCSI_2); 1287 + req->common.length = 1288 + cpu_to_be16(sizeof(hostdata->caps)); 1289 1289 } else 1290 - req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); 1290 + req->common.length = cpu_to_be16(sizeof(hostdata->caps) - 1291 + sizeof(hostdata->caps.reserve)); 1291 1292 1292 1293 spin_lock_irqsave(hostdata->host->host_lock, flags); 1293 1294 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) ··· 1314 1297 static void fast_fail_rsp(struct srp_event_struct *evt_struct) 1315 1298 { 1316 1299 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 1317 - u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; 1300 + u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status); 1318 1301 1319 1302 if (status == VIOSRP_MAD_NOT_SUPPORTED) 1320 1303 dev_err(hostdata->dev, "fast_fail not supported in server\n"); ··· 1351 1334 1352 1335 fast_fail_mad = &evt_struct->iu.mad.fast_fail; 1353 1336 memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); 1354 - fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; 1355 - fast_fail_mad->common.length = sizeof(*fast_fail_mad); 1337 + fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL); 1338 + fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad)); 1356 1339 1357 1340 spin_lock_irqsave(hostdata->host->host_lock, flags); 1358 1341 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); ··· 1379 1362 "host partition %s (%d), OS %d, max io %u\n", 1380 1363 hostdata->madapter_info.srp_version, 1381 1364 hostdata->madapter_info.partition_name, 1382 - hostdata->madapter_info.partition_number, 1383 - hostdata->madapter_info.os_type, 1384 - hostdata->madapter_info.port_max_txu[0]); 1365 + be32_to_cpu(hostdata->madapter_info.partition_number), 1366 + be32_to_cpu(hostdata->madapter_info.os_type), 1367 + be32_to_cpu(hostdata->madapter_info.port_max_txu[0])); 1385 1368 1386 1369 if (hostdata->madapter_info.port_max_txu[0]) 1387 1370 hostdata->host->max_sectors = 1388 - hostdata->madapter_info.port_max_txu[0] >> 9; 1371 + be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; 1389 1372 1390 - if (hostdata->madapter_info.os_type == 3 && 1373 + if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 && 1391 1374 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { 1392 1375 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", 1393 1376 hostdata->madapter_info.srp_version); ··· 1396 1379 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 1397 1380 } 1398 1381 1399 - if (hostdata->madapter_info.os_type == 3) { 1382 + if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) { 1400 1383 enable_fast_fail(hostdata); 1401 1384 return; 1402 1385 } ··· 1431 1414 req = &evt_struct->iu.mad.adapter_info; 1432 1415 memset(req, 0x00, sizeof(*req)); 1433 1416 1434 - req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 1435 - req->common.length = sizeof(hostdata->madapter_info); 1436 - req->buffer = hostdata->adapter_info_addr; 1417 + req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE); 1418 + req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info)); 1419 + req->buffer = cpu_to_be64(hostdata->adapter_info_addr); 1437 1420 1438 1421 spin_lock_irqsave(hostdata->host->host_lock, flags); 1439 1422 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) ··· 1518 1501 /* Set up an abort SRP command */ 1519 1502 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1520 1503 tsk_mgmt->opcode = SRP_TSK_MGMT; 1521 - tsk_mgmt->lun = ((u64) lun) << 48; 1504 + tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); 1522 1505 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 1523 1506 tsk_mgmt->task_tag = (u64) found_evt; 1524 1507 ··· 1641 1624 /* Set up a lun reset SRP command */ 1642 1625 memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); 1643 1626 tsk_mgmt->opcode = SRP_TSK_MGMT; 1644 - tsk_mgmt->lun = ((u64) lun) << 48; 1627 + tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); 1645 1628 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1646 1629 1647 1630 evt->sync_srp = &srp_rsp; ··· 1752 1735 { 1753 1736 long rc; 1754 1737 unsigned long flags; 1738 + /* The hypervisor copies our tag value here so no byteswapping */ 1755 1739 struct srp_event_struct *evt_struct = 1756 - (struct srp_event_struct *)crq->IU_data_ptr; 1740 + (__force struct srp_event_struct *)crq->IU_data_ptr; 1757 1741 switch (crq->valid) { 1758 1742 case 0xC0: /* initialization */ 1759 1743 switch (crq->format) { ··· 1810 1792 */ 1811 1793 if (!valid_event_struct(&hostdata->pool, evt_struct)) { 1812 1794 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", 1813 - (void *)crq->IU_data_ptr); 1795 + evt_struct); 1814 1796 return; 1815 1797 } 1816 1798 1817 1799 if (atomic_read(&evt_struct->free)) { 1818 1800 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", 1819 - (void *)crq->IU_data_ptr); 1801 + evt_struct); 1820 1802 return; 1821 1803 } 1822 1804 1823 1805 if (crq->format == VIOSRP_SRP_FORMAT) 1824 - atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, 1806 + atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), 1825 1807 &hostdata->request_limit); 1826 1808 1827 1809 del_timer(&evt_struct->timer); ··· 1874 1856 1875 1857 /* Set up a lun reset SRP command */ 1876 1858 memset(host_config, 0x00, sizeof(*host_config)); 1877 - host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1878 - host_config->common.length = length; 1879 - host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, 1880 - length, 1881 - DMA_BIDIRECTIONAL); 1859 + host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE); 1860 + host_config->common.length = cpu_to_be16(length); 1861 + addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL); 1882 1862 1883 - if (dma_mapping_error(hostdata->dev, host_config->buffer)) { 1863 + if (dma_mapping_error(hostdata->dev, addr)) { 1884 1864 if (!firmware_has_feature(FW_FEATURE_CMO)) 1885 1865 dev_err(hostdata->dev, 1886 1866 "dma_mapping error getting host config\n"); 1887 1867 free_event_struct(&hostdata->pool, evt_struct); 1888 1868 return -1; 1889 1869 } 1870 + 1871 + host_config->buffer = cpu_to_be64(addr); 1890 1872 1891 1873 init_completion(&evt_struct->comp); 1892 1874 spin_lock_irqsave(hostdata->host->host_lock, flags);
+23 -23
drivers/scsi/ibmvscsi/viosrp.h
··· 75 75 u8 format; /* SCSI vs out-of-band */ 76 76 u8 reserved; 77 77 u8 status; /* non-scsi failure? (e.g. DMA failure) */ 78 - u16 timeout; /* in seconds */ 79 - u16 IU_length; /* in bytes */ 80 - u64 IU_data_ptr; /* the TCE for transferring data */ 78 + __be16 timeout; /* in seconds */ 79 + __be16 IU_length; /* in bytes */ 80 + __be64 IU_data_ptr; /* the TCE for transferring data */ 81 81 }; 82 82 83 83 /* MADs are Management requests above and beyond the IUs defined in the SRP ··· 124 124 * Common MAD header 125 125 */ 126 126 struct mad_common { 127 - u32 type; 128 - u16 status; 129 - u16 length; 130 - u64 tag; 127 + __be32 type; 128 + __be16 status; 129 + __be16 length; 130 + __be64 tag; 131 131 }; 132 132 133 133 /* ··· 139 139 */ 140 140 struct viosrp_empty_iu { 141 141 struct mad_common common; 142 - u64 buffer; 143 - u32 port; 142 + __be64 buffer; 143 + __be32 port; 144 144 }; 145 145 146 146 struct viosrp_error_log { 147 147 struct mad_common common; 148 - u64 buffer; 148 + __be64 buffer; 149 149 }; 150 150 151 151 struct viosrp_adapter_info { 152 152 struct mad_common common; 153 - u64 buffer; 153 + __be64 buffer; 154 154 }; 155 155 156 156 struct viosrp_host_config { 157 157 struct mad_common common; 158 - u64 buffer; 158 + __be64 buffer; 159 159 }; 160 160 161 161 struct viosrp_fast_fail { ··· 164 164 165 165 struct viosrp_capabilities { 166 166 struct mad_common common; 167 - u64 buffer; 167 + __be64 buffer; 168 168 }; 169 169 170 170 struct mad_capability_common { 171 - u32 cap_type; 172 - u16 length; 173 - u16 server_support; 171 + __be32 cap_type; 172 + __be16 length; 173 + __be16 server_support; 174 174 }; 175 175 176 176 struct mad_reserve_cap { 177 177 struct mad_capability_common common; 178 - u32 type; 178 + __be32 type; 179 179 }; 180 180 181 181 struct mad_migration_cap { 182 182 struct mad_capability_common common; 183 - u32 ecl; 183 + __be32 ecl; 184 184 }; 185 185 186 186 struct capabilities{ 187 - u32 flags; 187 + __be32 flags; 188 188 char name[SRP_MAX_LOC_LEN]; 189 189 char loc[SRP_MAX_LOC_LEN]; 190 190 struct mad_migration_cap migration; ··· 208 208 struct mad_adapter_info_data { 209 209 char srp_version[8]; 210 210 char partition_name[96]; 211 - u32 partition_number; 212 - u32 mad_version; 213 - u32 os_type; 214 - u32 port_max_txu[8]; /* per-port maximum transfer */ 211 + __be32 partition_number; 212 + __be32 mad_version; 213 + __be32 os_type; 214 + __be32 port_max_txu[8]; /* per-port maximum transfer */ 215 215 }; 216 216 217 217 #endif
+1
drivers/scsi/lpfc/lpfc.h
··· 708 708 uint32_t cfg_multi_ring_type; 709 709 uint32_t cfg_poll; 710 710 uint32_t cfg_poll_tmo; 711 + uint32_t cfg_task_mgmt_tmo; 711 712 uint32_t cfg_use_msi; 712 713 uint32_t cfg_fcp_imax; 713 714 uint32_t cfg_fcp_cpu_map;
+16 -3
drivers/scsi/lpfc/lpfc_attr.c
··· 1865 1865 { \ 1866 1866 if (val >= minval && val <= maxval) {\ 1867 1867 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ 1868 - "3053 lpfc_" #attr " changed from %d to %d\n", \ 1869 - vport->cfg_##attr, val); \ 1868 + "3053 lpfc_" #attr \ 1869 + " changed from %d (x%x) to %d (x%x)\n", \ 1870 + vport->cfg_##attr, vport->cfg_##attr, \ 1871 + val, val); \ 1870 1872 vport->cfg_##attr = val;\ 1871 1873 return 0;\ 1872 1874 }\ ··· 4013 4011 # For [0], FCP commands are issued to Work Queues ina round robin fashion. 4014 4012 # For [1], FCP commands are issued to a Work Queue associated with the 4015 4013 # current CPU. 4014 + # It would be set to 1 by the driver if it's able to set up cpu affinity 4015 + # for FCP I/Os through Work Queue associated with the current CPU. Otherwise, 4016 + # roundrobin scheduling of FCP I/Os through WQs will be used. 4016 4017 */ 4017 - LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " 4018 + LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " 4018 4019 "issuing commands [0] - Round Robin, [1] - Current CPU"); 4019 4020 4020 4021 /* ··· 4114 4109 LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 4115 4110 "Milliseconds driver will wait between polling FCP ring"); 4116 4111 4112 + /* 4113 + # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands 4114 + # to complete in seconds. Value range is [5,180], default value is 60. 4115 + */ 4116 + LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, 4117 + "Maximum time to wait for task management commands to complete"); 4117 4118 /* 4118 4119 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that 4119 4120 # support this feature ··· 4306 4295 &dev_attr_issue_reset, 4307 4296 &dev_attr_lpfc_poll, 4308 4297 &dev_attr_lpfc_poll_tmo, 4298 + &dev_attr_lpfc_task_mgmt_tmo, 4309 4299 &dev_attr_lpfc_use_msi, 4310 4300 &dev_attr_lpfc_fcp_imax, 4311 4301 &dev_attr_lpfc_fcp_cpu_map, ··· 5286 5274 lpfc_topology_init(phba, lpfc_topology); 5287 5275 lpfc_link_speed_init(phba, lpfc_link_speed); 5288 5276 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 5277 + lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); 5289 5278 lpfc_enable_npiv_init(phba, lpfc_enable_npiv); 5290 5279 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); 5291 5280 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
+72 -18
drivers/scsi/lpfc/lpfc_bsg.c
··· 317 317 } 318 318 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 319 319 320 + /* Close the timeout handler abort window */ 321 + spin_lock_irqsave(&phba->hbalock, flags); 322 + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 323 + spin_unlock_irqrestore(&phba->hbalock, flags); 324 + 320 325 iocb = &dd_data->context_un.iocb; 321 326 ndlp = iocb->ndlp; 322 327 rmp = iocb->rmp; ··· 392 387 int request_nseg; 393 388 int reply_nseg; 394 389 struct bsg_job_data *dd_data; 390 + unsigned long flags; 395 391 uint32_t creg_val; 396 392 int rc = 0; 397 393 int iocb_stat; ··· 507 501 } 508 502 509 503 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 510 - if (iocb_stat == IOCB_SUCCESS) 504 + 505 + if (iocb_stat == IOCB_SUCCESS) { 506 + spin_lock_irqsave(&phba->hbalock, flags); 507 + /* make sure the I/O had not been completed yet */ 508 + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 509 + /* open up abort window to timeout handler */ 510 + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 511 + } 512 + spin_unlock_irqrestore(&phba->hbalock, flags); 511 513 return 0; /* done for now */ 512 - else if (iocb_stat == IOCB_BUSY) 514 + } else if (iocb_stat == IOCB_BUSY) { 513 515 rc = -EAGAIN; 514 - else 516 + } else { 515 517 rc = -EIO; 518 + } 516 519 517 520 /* iocb failed so cleanup */ 521 + job->dd_data = NULL; 518 522 519 523 free_rmp: 520 524 lpfc_free_bsg_buffers(phba, rmp); ··· 593 577 } 594 578 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 595 579 580 + /* Close the timeout handler abort window */ 581 + spin_lock_irqsave(&phba->hbalock, flags); 582 + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 583 + spin_unlock_irqrestore(&phba->hbalock, flags); 584 + 596 585 rsp = &rspiocbq->iocb; 597 586 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; 598 587 prsp = (struct lpfc_dmabuf *)pcmd->list.next; ··· 660 639 struct lpfc_iocbq *cmdiocbq; 661 640 uint16_t rpi = 0; 662 641 struct bsg_job_data *dd_data; 642 + unsigned long flags; 663 643 uint32_t creg_val; 664 644 int rc = 0; 665 645 ··· 743 721 744 722 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 745 723 746 - if (rc == IOCB_SUCCESS) 724 + if (rc == IOCB_SUCCESS) { 725 + spin_lock_irqsave(&phba->hbalock, flags); 726 + /* make sure the I/O had not been completed/released */ 727 + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { 728 + /* open up abort window to timeout handler */ 729 + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 730 + } 731 + spin_unlock_irqrestore(&phba->hbalock, flags); 747 732 return 0; /* done for now */ 748 - else if (rc == IOCB_BUSY) 733 + } else if (rc == IOCB_BUSY) { 749 734 rc = -EAGAIN; 750 - else 735 + } else { 751 736 rc = -EIO; 737 + } 738 + 739 + /* iocb failed so cleanup */ 740 + job->dd_data = NULL; 752 741 753 742 linkdown_err: 754 - 755 743 cmdiocbq->context1 = ndlp; 756 744 lpfc_els_free_iocb(phba, cmdiocbq); 757 745 ··· 1281 1249 struct lpfc_hba *phba = vport->phba; 1282 1250 struct get_ct_event *event_req; 1283 1251 struct get_ct_event_reply *event_reply; 1284 - struct lpfc_bsg_event *evt; 1252 + struct lpfc_bsg_event *evt, *evt_next; 1285 1253 struct event_data *evt_dat = NULL; 1286 1254 unsigned long flags; 1287 1255 uint32_t rc = 0; ··· 1301 1269 event_reply = (struct get_ct_event_reply *) 1302 1270 job->reply->reply_data.vendor_reply.vendor_rsp; 1303 1271 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1304 - list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1272 + list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { 1305 1273 if (evt->reg_id == event_req->ev_reg_id) { 1306 1274 if (list_empty(&evt->events_to_get)) 1307 1275 break; ··· 1402 1370 } 1403 1371 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1404 1372 1373 + /* Close the timeout handler abort window */ 1374 + spin_lock_irqsave(&phba->hbalock, flags); 1375 + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; 1376 + spin_unlock_irqrestore(&phba->hbalock, flags); 1377 + 1405 1378 ndlp = dd_data->context_un.iocb.ndlp; 1406 1379 cmp = cmdiocbq->context2; 1407 1380 bmp = cmdiocbq->context3; ··· 1470 1433 int rc = 0; 1471 1434 struct lpfc_nodelist *ndlp = NULL; 1472 1435 struct bsg_job_data *dd_data; 1436 + unsigned long flags; 1473 1437 uint32_t creg_val; 1474 1438 1475 1439 /* allocate our bsg tracking structure */ ··· 1580 1542 1581 1543 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1582 1544 1583 - if (rc == IOCB_SUCCESS) 1545 + if (rc == IOCB_SUCCESS) { 1546 + spin_lock_irqsave(&phba->hbalock, flags); 1547 + /* make sure the I/O had not been completed/released */ 1548 + if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { 1549 + /* open up abort window to timeout handler */ 1550 + ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; 1551 + } 1552 + spin_unlock_irqrestore(&phba->hbalock, flags); 1584 1553 return 0; /* done for now */ 1554 + } 1555 + 1556 + /* iocb failed so cleanup */ 1557 + job->dd_data = NULL; 1585 1558 1586 1559 issue_ct_rsp_exit: 1587 1560 lpfc_sli_release_iocbq(phba, ctiocb); ··· 5333 5284 * remove it from the txq queue and call cancel iocbs. 5334 5285 * Otherwise, call abort iotag 5335 5286 */ 5336 - 5337 5287 cmdiocb = dd_data->context_un.iocb.cmdiocbq; 5338 - spin_lock_irq(&phba->hbalock); 5288 + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5289 + 5290 + spin_lock_irqsave(&phba->hbalock, flags); 5291 + /* make sure the I/O abort window is still open */ 5292 + if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { 5293 + spin_unlock_irqrestore(&phba->hbalock, flags); 5294 + return -EAGAIN; 5295 + } 5339 5296 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5340 5297 list) { 5341 5298 if (check_iocb == cmdiocb) { ··· 5351 5296 } 5352 5297 if (list_empty(&completions)) 5353 5298 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5354 - spin_unlock_irq(&phba->hbalock); 5355 - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5299 + spin_unlock_irqrestore(&phba->hbalock, flags); 5356 5300 if (!list_empty(&completions)) { 5357 5301 lpfc_sli_cancel_iocbs(phba, &completions, 5358 5302 IOSTAT_LOCAL_REJECT, ··· 5375 5321 * remove it from the txq queue and call cancel iocbs. 5376 5322 * Otherwise, call abort iotag. 5377 5323 */ 5378 - 5379 5324 cmdiocb = dd_data->context_un.menlo.cmdiocbq; 5380 - spin_lock_irq(&phba->hbalock); 5325 + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5326 + 5327 + spin_lock_irqsave(&phba->hbalock, flags); 5381 5328 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, 5382 5329 list) { 5383 5330 if (check_iocb == cmdiocb) { ··· 5388 5333 } 5389 5334 if (list_empty(&completions)) 5390 5335 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5391 - spin_unlock_irq(&phba->hbalock); 5392 - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5336 + spin_unlock_irqrestore(&phba->hbalock, flags); 5393 5337 if (!list_empty(&completions)) { 5394 5338 lpfc_sli_cancel_iocbs(phba, &completions, 5395 5339 IOSTAT_LOCAL_REJECT,
+10 -1
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 4437 4437 if (!ndlp) 4438 4438 return; 4439 4439 lpfc_issue_els_logo(vport, ndlp, 0); 4440 + mempool_free(pmb, phba->mbox_mem_pool); 4440 4441 } 4441 4442 4442 4443 /* ··· 4457 4456 int rc; 4458 4457 uint16_t rpi; 4459 4458 4460 - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4459 + if (ndlp->nlp_flag & NLP_RPI_REGISTERED || 4460 + ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { 4461 + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 4462 + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4463 + "3366 RPI x%x needs to be " 4464 + "unregistered nlp_flag x%x " 4465 + "did x%x\n", 4466 + ndlp->nlp_rpi, ndlp->nlp_flag, 4467 + ndlp->nlp_DID); 4461 4468 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4462 4469 if (mbox) { 4463 4470 /* SLI4 ports require the physical rpi value. */
+50 -42
drivers/scsi/lpfc/lpfc_init.c
··· 3031 3031 phba->sli4_hba.scsi_xri_max); 3032 3032 3033 3033 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3034 - spin_lock_irq(&phba->scsi_buf_list_put_lock); 3034 + spin_lock(&phba->scsi_buf_list_put_lock); 3035 3035 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3036 3036 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3037 - spin_unlock_irq(&phba->scsi_buf_list_put_lock); 3037 + spin_unlock(&phba->scsi_buf_list_put_lock); 3038 3038 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3039 3039 3040 3040 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { ··· 3070 3070 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3071 3071 } 3072 3072 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3073 - spin_lock_irq(&phba->scsi_buf_list_put_lock); 3073 + spin_lock(&phba->scsi_buf_list_put_lock); 3074 3074 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3075 3075 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3076 - spin_unlock_irq(&phba->scsi_buf_list_put_lock); 3076 + spin_unlock(&phba->scsi_buf_list_put_lock); 3077 3077 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3078 3078 3079 3079 return 0; ··· 4859 4859 struct lpfc_mqe *mqe; 4860 4860 int longs; 4861 4861 4862 + /* Get all the module params for configuring this host */ 4863 + lpfc_get_cfgparam(phba); 4864 + 4862 4865 /* Before proceed, wait for POST done and device ready */ 4863 4866 rc = lpfc_sli4_post_status_check(phba); 4864 4867 if (rc) ··· 4905 4902 sizeof(struct lpfc_mbox_ext_buf_ctx)); 4906 4903 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4907 4904 4908 - /* 4909 - * We need to do a READ_CONFIG mailbox command here before 4910 - * calling lpfc_get_cfgparam. For VFs this will report the 4911 - * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4912 - * All of the resources allocated 4913 - * for this Port are tied to these values. 4914 - */ 4915 - /* Get all the module params for configuring this host */ 4916 - lpfc_get_cfgparam(phba); 4917 4905 phba->max_vpi = LPFC_MAX_VPI; 4918 4906 4919 4907 /* This will be set to correct value after the read_config mbox */ ··· 7135 7141 phba->sli4_hba.fcp_wq = NULL; 7136 7142 } 7137 7143 7138 - if (phba->pci_bar0_memmap_p) { 7139 - iounmap(phba->pci_bar0_memmap_p); 7140 - phba->pci_bar0_memmap_p = NULL; 7141 - } 7142 - if (phba->pci_bar2_memmap_p) { 7143 - iounmap(phba->pci_bar2_memmap_p); 7144 - phba->pci_bar2_memmap_p = NULL; 7145 - } 7146 - if (phba->pci_bar4_memmap_p) { 7147 - iounmap(phba->pci_bar4_memmap_p); 7148 - phba->pci_bar4_memmap_p = NULL; 7149 - } 7150 - 7151 7144 /* Release FCP CQ mapping array */ 7152 7145 if (phba->sli4_hba.fcp_cq_map != NULL) { 7153 7146 kfree(phba->sli4_hba.fcp_cq_map); ··· 7923 7942 * particular PCI BARs regions is dependent on the type of 7924 7943 * SLI4 device. 7925 7944 */ 7926 - if (pci_resource_start(pdev, 0)) { 7927 - phba->pci_bar0_map = pci_resource_start(pdev, 0); 7928 - bar0map_len = pci_resource_len(pdev, 0); 7945 + if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 7946 + phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 7947 + bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 7929 7948 7930 7949 /* 7931 7950 * Map SLI4 PCI Config Space Register base to a kernel virtual ··· 7939 7958 "registers.\n"); 7940 7959 goto out; 7941 7960 } 7961 + phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 7942 7962 /* Set up BAR0 PCI config space register memory map */ 7943 7963 lpfc_sli4_bar0_register_memmap(phba, if_type); 7944 7964 } else { ··· 7962 7980 } 7963 7981 7964 7982 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7965 - (pci_resource_start(pdev, 2))) { 7983 + (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 7966 7984 /* 7967 7985 * Map SLI4 if type 0 HBA Control Register base to a kernel 7968 7986 * virtual address and setup the registers. 7969 7987 */ 7970 - phba->pci_bar1_map = pci_resource_start(pdev, 2); 7971 - bar1map_len = pci_resource_len(pdev, 2); 7988 + phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 7989 + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 7972 7990 phba->sli4_hba.ctrl_regs_memmap_p = 7973 7991 ioremap(phba->pci_bar1_map, bar1map_len); 7974 7992 if (!phba->sli4_hba.ctrl_regs_memmap_p) { ··· 7976 7994 "ioremap failed for SLI4 HBA control registers.\n"); 7977 7995 goto out_iounmap_conf; 7978 7996 } 7997 + phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 7979 7998 lpfc_sli4_bar1_register_memmap(phba); 7980 7999 } 7981 8000 7982 8001 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 7983 - (pci_resource_start(pdev, 4))) { 8002 + (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 7984 8003 /* 7985 8004 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7986 8005 * virtual address and setup the registers. 7987 8006 */ 7988 - phba->pci_bar2_map = pci_resource_start(pdev, 4); 7989 - bar2map_len = pci_resource_len(pdev, 4); 8007 + phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 8008 + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 7990 8009 phba->sli4_hba.drbl_regs_memmap_p = 7991 8010 ioremap(phba->pci_bar2_map, bar2map_len); 7992 8011 if (!phba->sli4_hba.drbl_regs_memmap_p) { ··· 7995 8012 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7996 8013 goto out_iounmap_ctrl; 7997 8014 } 8015 + phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 7998 8016 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7999 8017 if (error) 8000 8018 goto out_iounmap_all; ··· 8389 8405 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8390 8406 { 8391 8407 int i, idx, saved_chann, used_chann, cpu, phys_id; 8392 - int max_phys_id, num_io_channel, first_cpu; 8408 + int max_phys_id, min_phys_id; 8409 + int num_io_channel, first_cpu, chan; 8393 8410 struct lpfc_vector_map_info *cpup; 8394 8411 #ifdef CONFIG_X86 8395 8412 struct cpuinfo_x86 *cpuinfo; ··· 8408 8423 phba->sli4_hba.num_present_cpu)); 8409 8424 8410 8425 max_phys_id = 0; 8426 + min_phys_id = 0xff; 8411 8427 phys_id = 0; 8412 8428 num_io_channel = 0; 8413 8429 first_cpu = LPFC_VECTOR_MAP_EMPTY; ··· 8432 8446 8433 8447 if (cpup->phys_id > max_phys_id) 8434 8448 max_phys_id = cpup->phys_id; 8449 + if (cpup->phys_id < min_phys_id) 8450 + min_phys_id = cpup->phys_id; 8435 8451 cpup++; 8436 8452 } 8437 8453 8454 + phys_id = min_phys_id; 8438 8455 /* Now associate the HBA vectors with specific CPUs */ 8439 8456 for (idx = 0; idx < vectors; idx++) { 8440 8457 cpup = phba->sli4_hba.cpu_map; ··· 8448 8459 for (i = 1; i < max_phys_id; i++) { 8449 8460 phys_id++; 8450 8461 if (phys_id > max_phys_id) 8451 - phys_id = 0; 8462 + phys_id = min_phys_id; 8452 8463 cpu = lpfc_find_next_cpu(phba, phys_id); 8453 8464 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8454 8465 continue; 8455 8466 goto found; 8467 + } 8468 + 8469 + /* Use round robin for scheduling */ 8470 + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8471 + chan = 0; 8472 + cpup = phba->sli4_hba.cpu_map; 8473 + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8474 + cpup->channel_id = chan; 8475 + cpup++; 8476 + chan++; 8477 + if (chan >= phba->cfg_fcp_io_channel) 8478 + chan = 0; 8456 8479 } 8457 8480 8458 8481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, ··· 8504 8503 /* Spread vector mapping across multple physical CPU nodes */ 8505 8504 phys_id++; 8506 8505 if (phys_id > max_phys_id) 8507 - phys_id = 0; 8506 + phys_id = min_phys_id; 8508 8507 } 8509 8508 8510 8509 /* ··· 8514 8513 * Base the remaining IO channel assigned, to IO channels already 8515 8514 * assigned to other CPUs on the same phys_id. 8516 8515 */ 8517 - for (i = 0; i <= max_phys_id; i++) { 8516 + for (i = min_phys_id; i <= max_phys_id; i++) { 8518 8517 /* 8519 8518 * If there are no io channels already mapped to 8520 8519 * this phys_id, just round robin thru the io_channels. ··· 8596 8595 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8597 8596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8598 8597 "3333 Set affinity mismatch:" 8599 - "%d chann != %d cpus: %d vactors\n", 8598 + "%d chann != %d cpus: %d vectors\n", 8600 8599 num_io_channel, phba->sli4_hba.num_present_cpu, 8601 8600 vectors); 8602 8601 8602 + /* Enable using cpu affinity for scheduling */ 8603 8603 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8604 8604 return 1; 8605 8605 } ··· 8691 8689 8692 8690 cfg_fail_out: 8693 8691 /* free the irq already requested */ 8694 - for (--index; index >= 0; index--) 8692 + for (--index; index >= 0; index--) { 8693 + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 8694 + vector, NULL); 8695 8695 free_irq(phba->sli4_hba.msix_entries[index].vector, 8696 8696 &phba->sli4_hba.fcp_eq_hdl[index]); 8697 + } 8697 8698 8698 8699 msi_fail_out: 8699 8700 /* Unconfigure MSI-X capability structure */ ··· 8717 8712 int index; 8718 8713 8719 8714 /* Free up MSI-X multi-message vectors */ 8720 - for (index = 0; index < phba->cfg_fcp_io_channel; index++) 8715 + for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 8716 + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 8717 + vector, NULL); 8721 8718 free_irq(phba->sli4_hba.msix_entries[index].vector, 8722 8719 &phba->sli4_hba.fcp_eq_hdl[index]); 8720 + } 8723 8721 8724 8722 /* Disable MSI-X */ 8725 8723 pci_disable_msix(phba->pcidev);
+32 -23
drivers/scsi/lpfc/lpfc_scsi.c
··· 926 926 927 927 /* get all SCSI buffers need to repost to a local list */ 928 928 spin_lock_irq(&phba->scsi_buf_list_get_lock); 929 - spin_lock_irq(&phba->scsi_buf_list_put_lock); 929 + spin_lock(&phba->scsi_buf_list_put_lock); 930 930 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); 931 931 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); 932 - spin_unlock_irq(&phba->scsi_buf_list_put_lock); 932 + spin_unlock(&phba->scsi_buf_list_put_lock); 933 933 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 934 934 935 935 /* post the list of scsi buffer sgls to port if available */ ··· 1000 1000 } 1001 1001 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 1002 1002 1003 - /* Page alignment is CRITICAL, double check to be sure */ 1004 - if (((unsigned long)(psb->data) & 1005 - (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { 1003 + /* 1004 + * 4K Page alignment is CRITICAL to BlockGuard, double check 1005 + * to be sure. 1006 + */ 1007 + if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & 1008 + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { 1006 1009 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 1007 1010 psb->data, psb->dma_handle); 1008 1011 kfree(psb); ··· 1137 1134 { 1138 1135 struct lpfc_scsi_buf * lpfc_cmd = NULL; 1139 1136 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 1140 - unsigned long gflag = 0; 1141 - unsigned long pflag = 0; 1137 + unsigned long iflag = 0; 1142 1138 1143 - spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); 1139 + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 1144 1140 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, 1145 1141 list); 1146 1142 if (!lpfc_cmd) { 1147 - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); 1143 + spin_lock(&phba->scsi_buf_list_put_lock); 1148 1144 list_splice(&phba->lpfc_scsi_buf_list_put, 1149 1145 &phba->lpfc_scsi_buf_list_get); 1150 1146 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 1151 1147 list_remove_head(scsi_buf_list_get, lpfc_cmd, 1152 1148 struct lpfc_scsi_buf, list); 1153 - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); 1149 + spin_unlock(&phba->scsi_buf_list_put_lock); 1154 1150 } 1155 - spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); 1151 + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 1156 1152 return lpfc_cmd; 1157 1153 } 1158 1154 /** ··· 1169 1167 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1170 1168 { 1171 1169 struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; 1172 - unsigned long gflag = 0; 1173 - unsigned long pflag = 0; 1170 + unsigned long iflag = 0; 1174 1171 int found = 0; 1175 1172 1176 - spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); 1173 + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 1177 1174 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 1178 1175 &phba->lpfc_scsi_buf_list_get, list) { 1179 1176 if (lpfc_test_rrq_active(phba, ndlp, ··· 1183 1182 break; 1184 1183 } 1185 1184 if (!found) { 1186 - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); 1185 + spin_lock(&phba->scsi_buf_list_put_lock); 1187 1186 list_splice(&phba->lpfc_scsi_buf_list_put, 1188 1187 &phba->lpfc_scsi_buf_list_get); 1189 1188 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 1190 - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); 1189 + spin_unlock(&phba->scsi_buf_list_put_lock); 1191 1190 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 1192 1191 &phba->lpfc_scsi_buf_list_get, list) { 1193 1192 if (lpfc_test_rrq_active( ··· 1198 1197 break; 1199 1198 } 1200 1199 } 1201 - spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); 1200 + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 1202 1201 if (!found) 1203 1202 return NULL; 1204 1203 return lpfc_cmd; ··· 3967 3966 3968 3967 /* 3969 3968 * Check SLI validation that all the transfer was actually done 3970 - * (fcpi_parm should be zero). 3969 + * (fcpi_parm should be zero). Apply check only to reads. 3971 3970 */ 3972 - } else if (fcpi_parm) { 3971 + } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 3973 3972 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 3974 - "9029 FCP Data Transfer Check Error: " 3973 + "9029 FCP Read Check Error Data: " 3975 3974 "x%x x%x x%x x%x x%x\n", 3976 3975 be32_to_cpu(fcpcmd->fcpDl), 3977 3976 be32_to_cpu(fcprsp->rspResId), ··· 4343 4342 char tag[2]; 4344 4343 uint8_t *ptr; 4345 4344 bool sli4; 4345 + uint32_t fcpdl; 4346 4346 4347 4347 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 4348 4348 return; ··· 4391 4389 iocb_cmd->ulpPU = PARM_READ_CHECK; 4392 4390 if (vport->cfg_first_burst_size && 4393 4391 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4394 - piocbq->iocb.un.fcpi.fcpi_XRdy = 4395 - vport->cfg_first_burst_size; 4392 + fcpdl = scsi_bufflen(scsi_cmnd); 4393 + if (fcpdl < vport->cfg_first_burst_size) 4394 + piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; 4395 + else 4396 + piocbq->iocb.un.fcpi.fcpi_XRdy = 4397 + vport->cfg_first_burst_size; 4396 4398 } 4397 4399 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4398 4400 phba->fc4OutputRequests++; ··· 4884 4878 goto out_unlock; 4885 4879 } 4886 4880 4881 + /* Indicate the IO is being aborted by the driver. */ 4882 + iocb->iocb_flag |= LPFC_DRIVER_ABORTED; 4883 + 4887 4884 /* 4888 4885 * The scsi command can not be in txq and it is in flight because the 4889 4886 * pCmd is still pointig at the SCSI command we have to abort. There ··· 5015 5006 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); 5016 5007 if (lpfc_cmd == NULL) 5017 5008 return FAILED; 5018 - lpfc_cmd->timeout = 60; 5009 + lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 5019 5010 lpfc_cmd->rdata = rdata; 5020 5011 5021 5012 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+14 -19
drivers/scsi/lpfc/lpfc_sli.c
··· 9831 9831 abort_cmd) != 0) 9832 9832 continue; 9833 9833 9834 + /* 9835 + * If the iocbq is already being aborted, don't take a second 9836 + * action, but do count it. 9837 + */ 9838 + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 9839 + continue; 9840 + 9834 9841 /* issue ABTS for this IOCB based on iotag */ 9835 9842 abtsiocb = lpfc_sli_get_iocbq(phba); 9836 9843 if (abtsiocb == NULL) { 9837 9844 errcnt++; 9838 9845 continue; 9839 9846 } 9847 + 9848 + /* indicate the IO is being aborted by the driver. */ 9849 + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 9840 9850 9841 9851 cmd = &iocbq->iocb; 9842 9852 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; ··· 9857 9847 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 9858 9848 abtsiocb->iocb.ulpLe = 1; 9859 9849 abtsiocb->iocb.ulpClass = cmd->ulpClass; 9860 - abtsiocb->vport = phba->pport; 9850 + abtsiocb->vport = vport; 9861 9851 9862 9852 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9863 9853 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; ··· 12243 12233 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 12244 12234 { 12245 12235 struct pci_dev *pdev; 12246 - unsigned long bar_map, bar_map_len; 12247 12236 12248 12237 if (!phba->pcidev) 12249 12238 return NULL; ··· 12251 12242 12252 12243 switch (pci_barset) { 12253 12244 case WQ_PCI_BAR_0_AND_1: 12254 - if (!phba->pci_bar0_memmap_p) { 12255 - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 12256 - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 12257 - phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len); 12258 - } 12259 12245 return phba->pci_bar0_memmap_p; 12260 12246 case WQ_PCI_BAR_2_AND_3: 12261 - if (!phba->pci_bar2_memmap_p) { 12262 - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 12263 - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 12264 - phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len); 12265 - } 12266 12247 return phba->pci_bar2_memmap_p; 12267 12248 case WQ_PCI_BAR_4_AND_5: 12268 - if (!phba->pci_bar4_memmap_p) { 12269 - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 12270 - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 12271 - phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len); 12272 - } 12273 12249 return phba->pci_bar4_memmap_p; 12274 12250 default: 12275 12251 break; ··· 15802 15808 void 15803 15809 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 15804 15810 { 15805 - struct lpfc_fcf_pri *fcf_pri; 15811 + struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 15806 15812 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 15807 15813 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 15808 15814 "2762 FCF (x%x) reached driver's book " ··· 15812 15818 } 15813 15819 /* Clear the eligible FCF record index bmask */ 15814 15820 spin_lock_irq(&phba->hbalock); 15815 - list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 15821 + list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 15822 + list) { 15816 15823 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 15817 15824 list_del_init(&fcf_pri->list); 15818 15825 break;
+2 -2
drivers/scsi/lpfc/lpfc_sli.h
··· 58 58 59 59 IOCB_t iocb; /* IOCB cmd */ 60 60 uint8_t retry; /* retry counter for IOCB cmd - if needed */ 61 - uint16_t iocb_flag; 61 + uint32_t iocb_flag; 62 62 #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ 63 63 #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ 64 64 #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ ··· 73 73 #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ 74 74 #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ 75 75 #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ 76 + #define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ 76 77 77 78 #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 78 79 #define LPFC_FIP_ELS_ID_SHIFT 14 79 80 80 - uint8_t rsvd2; 81 81 uint32_t drvrTimeout; /* driver timeout in seconds */ 82 82 uint32_t fcp_wqidx; /* index to FCP work queue */ 83 83 struct lpfc_vport *vport;/* virtual port pointer */
+1 -1
drivers/scsi/lpfc/lpfc_sli4.h
··· 523 523 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 524 524 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 525 525 526 - uint8_t fw_func_mode; /* FW function protocol mode */ 526 + uint32_t fw_func_mode; /* FW function protocol mode */ 527 527 uint32_t ulp0_mode; /* ULP0 protocol mode */ 528 528 uint32_t ulp1_mode; /* ULP1 protocol mode */ 529 529
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 18 18 * included with this package. * 19 19 *******************************************************************/ 20 20 21 - #define LPFC_DRIVER_VERSION "8.3.41" 21 + #define LPFC_DRIVER_VERSION "8.3.42" 22 22 #define LPFC_DRIVER_NAME "lpfc" 23 23 24 24 /* Used for SLI 2/3 */
+98 -19
drivers/scsi/megaraid/megaraid_sas.h
··· 33 33 /* 34 34 * MegaRAID SAS Driver meta data 35 35 */ 36 - #define MEGASAS_VERSION "06.600.18.00-rc1" 37 - #define MEGASAS_RELDATE "May. 15, 2013" 38 - #define MEGASAS_EXT_VERSION "Wed. May. 15 17:00:00 PDT 2013" 36 + #define MEGASAS_VERSION "06.700.06.00-rc1" 37 + #define MEGASAS_RELDATE "Aug. 31, 2013" 38 + #define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013" 39 39 40 40 /* 41 41 * Device IDs ··· 170 170 171 171 #define MR_DCMD_CTRL_GET_INFO 0x01010000 172 172 #define MR_DCMD_LD_GET_LIST 0x03010000 173 + #define MR_DCMD_LD_LIST_QUERY 0x03010100 173 174 174 175 #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 175 176 #define MR_FLUSH_CTRL_CACHE 0x01 ··· 346 345 MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, 347 346 }; 348 347 348 + enum MR_LD_QUERY_TYPE { 349 + MR_LD_QUERY_TYPE_ALL = 0, 350 + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1, 351 + MR_LD_QUERY_TYPE_USED_TGT_IDS = 2, 352 + MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3, 353 + MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4, 354 + }; 355 + 356 + 349 357 #define MR_EVT_CFG_CLEARED 0x0004 350 358 #define MR_EVT_LD_STATE_CHANGE 0x0051 351 359 #define MR_EVT_PD_INSERTED 0x005b ··· 445 435 } ldList[MAX_LOGICAL_DRIVES]; 446 436 } __packed; 447 437 438 + struct MR_LD_TARGETID_LIST { 439 + u32 size; 440 + u32 count; 441 + u8 pad[3]; 442 + u8 targetId[MAX_LOGICAL_DRIVES]; 443 + }; 444 + 445 + 448 446 /* 449 447 * SAS controller properties 450 448 */ ··· 492 474 * a bit in the following structure. 493 475 */ 494 476 struct { 495 - u32 copyBackDisabled : 1; 496 - u32 SMARTerEnabled : 1; 497 - u32 prCorrectUnconfiguredAreas : 1; 498 - u32 useFdeOnly : 1; 499 - u32 disableNCQ : 1; 500 - u32 SSDSMARTerEnabled : 1; 501 - u32 SSDPatrolReadEnabled : 1; 502 - u32 enableSpinDownUnconfigured : 1; 503 - u32 autoEnhancedImport : 1; 504 - u32 enableSecretKeyControl : 1; 505 - u32 disableOnlineCtrlReset : 1; 506 - u32 allowBootWithPinnedCache : 1; 507 - u32 disableSpinDownHS : 1; 508 - u32 enableJBOD : 1; 509 - u32 reserved :18; 477 + #if defined(__BIG_ENDIAN_BITFIELD) 478 + u32 reserved:18; 479 + u32 enableJBOD:1; 480 + u32 disableSpinDownHS:1; 481 + u32 allowBootWithPinnedCache:1; 482 + u32 disableOnlineCtrlReset:1; 483 + u32 enableSecretKeyControl:1; 484 + u32 autoEnhancedImport:1; 485 + u32 enableSpinDownUnconfigured:1; 486 + u32 SSDPatrolReadEnabled:1; 487 + u32 SSDSMARTerEnabled:1; 488 + u32 disableNCQ:1; 489 + u32 useFdeOnly:1; 490 + u32 prCorrectUnconfiguredAreas:1; 491 + u32 SMARTerEnabled:1; 492 + u32 copyBackDisabled:1; 493 + #else 494 + u32 copyBackDisabled:1; 495 + u32 SMARTerEnabled:1; 496 + u32 prCorrectUnconfiguredAreas:1; 497 + u32 useFdeOnly:1; 498 + u32 disableNCQ:1; 499 + u32 SSDSMARTerEnabled:1; 500 + u32 SSDPatrolReadEnabled:1; 501 + u32 enableSpinDownUnconfigured:1; 502 + u32 autoEnhancedImport:1; 503 + u32 enableSecretKeyControl:1; 504 + u32 disableOnlineCtrlReset:1; 505 + u32 allowBootWithPinnedCache:1; 506 + u32 disableSpinDownHS:1; 507 + u32 enableJBOD:1; 508 + u32 reserved:18; 509 + #endif 510 510 } OnOffProperties; 511 511 u8 autoSnapVDSpace; 512 512 u8 viewSpace; ··· 838 802 u16 cacheMemorySize; /*7A2h */ 839 803 840 804 struct { /*7A4h */ 805 + #if defined(__BIG_ENDIAN_BITFIELD) 806 + u32 reserved:11; 807 + u32 supportUnevenSpans:1; 808 + u32 dedicatedHotSparesLimited:1; 809 + u32 headlessMode:1; 810 + u32 supportEmulatedDrives:1; 811 + u32 supportResetNow:1; 812 + u32 realTimeScheduler:1; 813 + u32 supportSSDPatrolRead:1; 814 + u32 supportPerfTuning:1; 815 + u32 disableOnlinePFKChange:1; 816 + u32 supportJBOD:1; 817 + u32 supportBootTimePFKChange:1; 818 + u32 supportSetLinkSpeed:1; 819 + u32 supportEmergencySpares:1; 820 + u32 supportSuspendResumeBGops:1; 821 + u32 blockSSDWriteCacheChange:1; 822 + u32 supportShieldState:1; 823 + u32 supportLdBBMInfo:1; 824 + u32 supportLdPIType3:1; 825 + u32 supportLdPIType2:1; 826 + u32 supportLdPIType1:1; 827 + u32 supportPIcontroller:1; 828 + #else 841 829 u32 supportPIcontroller:1; 842 830 u32 supportLdPIType1:1; 843 831 u32 supportLdPIType2:1; ··· 887 827 888 828 u32 supportUnevenSpans:1; 889 829 u32 reserved:11; 830 + #endif 890 831 } adapterOperations2; 891 832 892 833 u8 driverVersion[32]; /*7A8h */ ··· 924 863 * =============================== 925 864 */ 926 865 #define MEGASAS_MAX_PD_CHANNELS 2 927 - #define MEGASAS_MAX_LD_CHANNELS 2 866 + #define MEGASAS_MAX_LD_CHANNELS 1 928 867 #define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ 929 868 MEGASAS_MAX_LD_CHANNELS) 930 869 #define MEGASAS_MAX_DEV_PER_CHANNEL 128 ··· 1112 1051 1113 1052 typedef union _MFI_CAPABILITIES { 1114 1053 struct { 1054 + #if defined(__BIG_ENDIAN_BITFIELD) 1055 + u32 reserved:30; 1056 + u32 support_additional_msix:1; 1057 + u32 support_fp_remote_lun:1; 1058 + #else 1115 1059 u32 support_fp_remote_lun:1; 1116 1060 u32 support_additional_msix:1; 1117 1061 u32 reserved:30; 1062 + #endif 1118 1063 } mfi_capabilities; 1119 1064 u32 reg; 1120 1065 } MFI_CAPABILITIES; ··· 1722 1655 struct megasas_instance *instance[MAX_MGMT_ADAPTERS]; 1723 1656 int max_index; 1724 1657 }; 1658 + 1659 + u8 1660 + MR_BuildRaidContext(struct megasas_instance *instance, 1661 + struct IO_REQUEST_INFO *io_info, 1662 + struct RAID_CONTEXT *pRAID_Context, 1663 + struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); 1664 + u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); 1665 + struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); 1666 + u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map); 1667 + u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); 1668 + u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map); 1669 + u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); 1725 1670 1726 1671 #endif /*LSI_MEGARAID_SAS_H */
+275 -142
drivers/scsi/megaraid/megaraid_sas_base.c
··· 18 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 19 * 20 20 * FILE: megaraid_sas_base.c 21 - * Version : 06.600.18.00-rc1 21 + * Version : 06.700.06.00-rc1 22 22 * 23 23 * Authors: LSI Corporation 24 24 * Sreenivas Bagalkote ··· 92 92 93 93 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 94 94 static int megasas_get_pd_list(struct megasas_instance *instance); 95 + static int megasas_ld_list_query(struct megasas_instance *instance, 96 + u8 query_type); 95 97 static int megasas_issue_init_mfi(struct megasas_instance *instance); 96 98 static int megasas_register_aen(struct megasas_instance *instance, 97 99 u32 seq_num, u32 class_locale_word); ··· 376 374 megasas_check_reset_xscale(struct megasas_instance *instance, 377 375 struct megasas_register_set __iomem *regs) 378 376 { 379 - u32 consumer; 380 - consumer = *instance->consumer; 381 377 382 378 if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && 383 - (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { 379 + (le32_to_cpu(*instance->consumer) == 380 + MEGASAS_ADPRESET_INPROG_SIGN)) 384 381 return 1; 385 - } 386 382 return 0; 387 383 } 388 384 ··· 629 629 { 630 630 unsigned long flags; 631 631 spin_lock_irqsave(&instance->hba_lock, flags); 632 - writel(0, &(regs)->inbound_high_queue_port); 633 - writel((frame_phys_addr | (frame_count<<1))|1, 634 - &(regs)->inbound_low_queue_port); 632 + writel(upper_32_bits(frame_phys_addr), 633 + &(regs)->inbound_high_queue_port); 634 + writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, 635 + &(regs)->inbound_low_queue_port); 635 636 spin_unlock_irqrestore(&instance->hba_lock, flags); 636 637 } 637 638 ··· 880 879 881 880 struct megasas_header *frame_hdr = &cmd->frame->hdr; 882 881 883 - frame_hdr->cmd_status = 0xFF; 884 - frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 882 + frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; 883 + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 885 884 886 885 /* 887 886 * Issue the frame using inbound queue port ··· 945 944 */ 946 945 abort_fr->cmd = MFI_CMD_ABORT; 947 946 abort_fr->cmd_status = 0xFF; 948 - abort_fr->flags = 0; 949 - abort_fr->abort_context = cmd_to_abort->index; 950 - abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 951 - abort_fr->abort_mfi_phys_addr_hi = 0; 947 + abort_fr->flags = cpu_to_le16(0); 948 + abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); 949 + abort_fr->abort_mfi_phys_addr_lo = 950 + cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); 951 + abort_fr->abort_mfi_phys_addr_hi = 952 + cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); 952 953 953 954 cmd->sync_cmd = 1; 954 955 cmd->cmd_status = 0xFF; ··· 989 986 990 987 if (sge_count) { 991 988 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 992 - mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); 993 - mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); 989 + mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 990 + mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); 994 991 } 995 992 } 996 993 return sge_count; ··· 1018 1015 1019 1016 if (sge_count) { 1020 1017 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1021 - mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); 1022 - mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); 1018 + mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); 1019 + mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); 1023 1020 } 1024 1021 } 1025 1022 return sge_count; ··· 1046 1043 1047 1044 if (sge_count) { 1048 1045 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1049 - mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); 1046 + mfi_sgl->sge_skinny[i].length = 1047 + cpu_to_le32(sg_dma_len(os_sgl)); 1050 1048 mfi_sgl->sge_skinny[i].phys_addr = 1051 - sg_dma_address(os_sgl); 1052 - mfi_sgl->sge_skinny[i].flag = 0; 1049 + cpu_to_le64(sg_dma_address(os_sgl)); 1050 + mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); 1053 1051 } 1054 1052 } 1055 1053 return sge_count; ··· 1159 1155 pthru->cdb_len = scp->cmd_len; 1160 1156 pthru->timeout = 0; 1161 1157 pthru->pad_0 = 0; 1162 - pthru->flags = flags; 1163 - pthru->data_xfer_len = scsi_bufflen(scp); 1158 + pthru->flags = cpu_to_le16(flags); 1159 + pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); 1164 1160 1165 1161 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1166 1162 ··· 1172 1168 if ((scp->request->timeout / HZ) > 0xFFFF) 1173 1169 pthru->timeout = 0xFFFF; 1174 1170 else 1175 - pthru->timeout = scp->request->timeout / HZ; 1171 + pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); 1176 1172 } 1177 1173 1178 1174 /* 1179 1175 * Construct SGL 1180 1176 */ 1181 1177 if (instance->flag_ieee == 1) { 1182 - pthru->flags |= MFI_FRAME_SGL64; 1178 + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1183 1179 pthru->sge_count = megasas_make_sgl_skinny(instance, scp, 1184 1180 &pthru->sgl); 1185 1181 } else if (IS_DMA64) { 1186 - pthru->flags |= MFI_FRAME_SGL64; 1182 + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1187 1183 pthru->sge_count = megasas_make_sgl64(instance, scp, 1188 1184 &pthru->sgl); 1189 1185 } else ··· 1200 1196 * Sense info specific 1201 1197 */ 1202 1198 pthru->sense_len = SCSI_SENSE_BUFFERSIZE; 1203 - pthru->sense_buf_phys_addr_hi = 0; 1204 - pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 1199 + pthru->sense_buf_phys_addr_hi = 1200 + cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); 1201 + pthru->sense_buf_phys_addr_lo = 1202 + cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); 1205 1203 1206 1204 /* 1207 1205 * Compute the total number of frames this command consumes. FW uses ··· 1254 1248 ldio->timeout = 0; 1255 1249 ldio->reserved_0 = 0; 1256 1250 ldio->pad_0 = 0; 1257 - ldio->flags = flags; 1251 + ldio->flags = cpu_to_le16(flags); 1258 1252 ldio->start_lba_hi = 0; 1259 1253 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; 1260 1254 ··· 1262 1256 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1263 1257 */ 1264 1258 if (scp->cmd_len == 6) { 1265 - ldio->lba_count = (u32) scp->cmnd[4]; 1266 - ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | 1267 - ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 1259 + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); 1260 + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | 1261 + ((u32) scp->cmnd[2] << 8) | 1262 + (u32) scp->cmnd[3]); 1268 1263 1269 - ldio->start_lba_lo &= 0x1FFFFF; 1264 + ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); 1270 1265 } 1271 1266 1272 1267 /* 1273 1268 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1274 1269 */ 1275 1270 else if (scp->cmd_len == 10) { 1276 - ldio->lba_count = (u32) scp->cmnd[8] | 1277 - ((u32) scp->cmnd[7] << 8); 1278 - ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1279 - ((u32) scp->cmnd[3] << 16) | 1280 - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1271 + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | 1272 + ((u32) scp->cmnd[7] << 8)); 1273 + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1274 + ((u32) scp->cmnd[3] << 16) | 1275 + ((u32) scp->cmnd[4] << 8) | 1276 + (u32) scp->cmnd[5]); 1281 1277 } 1282 1278 1283 1279 /* 1284 1280 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1285 1281 */ 1286 1282 else if (scp->cmd_len == 12) { 1287 - ldio->lba_count = ((u32) scp->cmnd[6] << 24) | 1288 - ((u32) scp->cmnd[7] << 16) | 1289 - ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1283 + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1284 + ((u32) scp->cmnd[7] << 16) | 1285 + ((u32) scp->cmnd[8] << 8) | 1286 + (u32) scp->cmnd[9]); 1290 1287 1291 - ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1292 - ((u32) scp->cmnd[3] << 16) | 1293 - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1288 + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1289 + ((u32) scp->cmnd[3] << 16) | 1290 + ((u32) scp->cmnd[4] << 8) | 1291 + (u32) scp->cmnd[5]); 1294 1292 } 1295 1293 1296 1294 /* 1297 1295 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1298 1296 */ 1299 1297 else if (scp->cmd_len == 16) { 1300 - ldio->lba_count = ((u32) scp->cmnd[10] << 24) | 1301 - ((u32) scp->cmnd[11] << 16) | 1302 - ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 1298 + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | 1299 + ((u32) scp->cmnd[11] << 16) | 1300 + ((u32) scp->cmnd[12] << 8) | 1301 + (u32) scp->cmnd[13]); 1303 1302 1304 - ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | 1305 - ((u32) scp->cmnd[7] << 16) | 1306 - ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1303 + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | 1304 + ((u32) scp->cmnd[7] << 16) | 1305 + ((u32) scp->cmnd[8] << 8) | 1306 + (u32) scp->cmnd[9]); 1307 1307 1308 - ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | 1309 - ((u32) scp->cmnd[3] << 16) | 1310 - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1308 + ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | 1309 + ((u32) scp->cmnd[3] << 16) | 1310 + ((u32) scp->cmnd[4] << 8) | 1311 + (u32) scp->cmnd[5]); 1311 1312 1312 1313 } 1313 1314 ··· 1322 1309 * Construct SGL 1323 1310 */ 1324 1311 if (instance->flag_ieee) { 1325 - ldio->flags |= MFI_FRAME_SGL64; 1312 + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1326 1313 ldio->sge_count = megasas_make_sgl_skinny(instance, scp, 1327 1314 &ldio->sgl); 1328 1315 } else if (IS_DMA64) { 1329 - ldio->flags |= MFI_FRAME_SGL64; 1316 + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); 1330 1317 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); 1331 1318 } else 1332 1319 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); ··· 1342 1329 */ 1343 1330 ldio->sense_len = SCSI_SENSE_BUFFERSIZE; 1344 1331 ldio->sense_buf_phys_addr_hi = 0; 1345 - ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; 1332 + ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); 1346 1333 1347 1334 /* 1348 1335 * Compute the total number of frames this command consumes. FW uses ··· 1413 1400 ldio = (struct megasas_io_frame *)cmd->frame; 1414 1401 mfi_sgl = &ldio->sgl; 1415 1402 sgcount = ldio->sge_count; 1416 - printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); 1403 + printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1404 + " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1405 + instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1406 + le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1407 + le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1417 1408 } 1418 1409 else { 1419 1410 pthru = (struct megasas_pthru_frame *) cmd->frame; 1420 1411 mfi_sgl = &pthru->sgl; 1421 1412 sgcount = pthru->sge_count; 1422 - printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); 1413 + printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1414 + "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1415 + instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1416 + pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1417 + le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1423 1418 } 1424 1419 if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ 1425 1420 for (n = 0; n < sgcount; n++){ 1426 1421 if (IS_DMA64) 1427 - printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; 1422 + printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ", 1423 + le32_to_cpu(mfi_sgl->sge64[n].length), 1424 + le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1428 1425 else 1429 - printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; 1426 + printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ", 1427 + le32_to_cpu(mfi_sgl->sge32[n].length), 1428 + le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1430 1429 } 1431 1430 } 1432 1431 printk(KERN_ERR "\n"); ··· 1699 1674 1700 1675 spin_lock_irqsave(&instance->completion_lock, flags); 1701 1676 1702 - producer = *instance->producer; 1703 - consumer = *instance->consumer; 1677 + producer = le32_to_cpu(*instance->producer); 1678 + consumer = le32_to_cpu(*instance->consumer); 1704 1679 1705 1680 while (consumer != producer) { 1706 - context = instance->reply_queue[consumer]; 1681 + context = le32_to_cpu(instance->reply_queue[consumer]); 1707 1682 if (context >= instance->max_fw_cmds) { 1708 1683 printk(KERN_ERR "Unexpected context value %x\n", 1709 1684 context); ··· 1720 1695 } 1721 1696 } 1722 1697 1723 - *instance->consumer = producer; 1698 + *instance->consumer = cpu_to_le32(producer); 1724 1699 1725 1700 spin_unlock_irqrestore(&instance->completion_lock, flags); 1726 1701 ··· 1741 1716 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || 1742 1717 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || 1743 1718 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 1744 - *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; 1719 + *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 1745 1720 } 1746 1721 instance->instancet->disable_intr(instance); 1747 1722 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; ··· 2211 2186 struct megasas_header *hdr = &cmd->frame->hdr; 2212 2187 unsigned long flags; 2213 2188 struct fusion_context *fusion = instance->ctrl_context; 2189 + u32 opcode; 2214 2190 2215 2191 /* flag for the retry reset */ 2216 2192 cmd->retry_for_fw_reset = 0; ··· 2313 2287 case MFI_CMD_SMP: 2314 2288 case MFI_CMD_STP: 2315 2289 case MFI_CMD_DCMD: 2290 + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); 2316 2291 /* Check for LD map update */ 2317 - if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 2318 - (cmd->frame->dcmd.mbox.b[1] == 1)) { 2292 + if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 2293 + && (cmd->frame->dcmd.mbox.b[1] == 1)) { 2319 2294 fusion->fast_path_io = 0; 2320 2295 spin_lock_irqsave(instance->host->host_lock, flags); 2321 2296 if (cmd->frame->hdr.cmd_status != 0) { ··· 2350 2323 flags); 2351 2324 break; 2352 2325 } 2353 - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 2354 - cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 2326 + if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 2327 + opcode == MR_DCMD_CTRL_EVENT_GET) { 2355 2328 spin_lock_irqsave(&poll_aen_lock, flags); 2356 2329 megasas_poll_wait_aen = 0; 2357 2330 spin_unlock_irqrestore(&poll_aen_lock, flags); ··· 2360 2333 /* 2361 2334 * See if got an event notification 2362 2335 */ 2363 - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 2336 + if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 2364 2337 megasas_service_aen(instance, cmd); 2365 2338 else 2366 2339 megasas_complete_int_cmd(instance, cmd); ··· 2633 2606 PCI_DEVICE_ID_LSI_VERDE_ZCR)) { 2634 2607 2635 2608 *instance->consumer = 2636 - MEGASAS_ADPRESET_INPROG_SIGN; 2609 + cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); 2637 2610 } 2638 2611 2639 2612 ··· 3010 2983 } 3011 2984 3012 2985 memset(cmd->frame, 0, total_sz); 3013 - cmd->frame->io.context = cmd->index; 2986 + cmd->frame->io.context = cpu_to_le32(cmd->index); 3014 2987 cmd->frame->io.pad_0 = 0; 3015 2988 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && 3016 2989 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && ··· 3170 3143 dcmd->cmd = MFI_CMD_DCMD; 3171 3144 dcmd->cmd_status = 0xFF; 3172 3145 dcmd->sge_count = 1; 3173 - dcmd->flags = MFI_FRAME_DIR_READ; 3146 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3174 3147 dcmd->timeout = 0; 3175 3148 dcmd->pad_0 = 0; 3176 - dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); 3177 - dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 3178 - dcmd->sgl.sge32[0].phys_addr = ci_h; 3179 - dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); 3149 + dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 3150 + dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); 3151 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 3152 + dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 3180 3153 3181 3154 if (!megasas_issue_polled(instance, cmd)) { 3182 3155 ret = 0; ··· 3191 3164 pd_addr = ci->addr; 3192 3165 3193 3166 if ( ret == 0 && 3194 - (ci->count < 3167 + (le32_to_cpu(ci->count) < 3195 3168 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { 3196 3169 3197 3170 memset(instance->pd_list, 0, 3198 3171 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); 3199 3172 3200 - for (pd_index = 0; pd_index < ci->count; pd_index++) { 3173 + for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { 3201 3174 3202 3175 instance->pd_list[pd_addr->deviceId].tid = 3203 - pd_addr->deviceId; 3176 + le16_to_cpu(pd_addr->deviceId); 3204 3177 instance->pd_list[pd_addr->deviceId].driveType = 3205 3178 pd_addr->scsiDevType; 3206 3179 instance->pd_list[pd_addr->deviceId].driveState = ··· 3234 3207 struct megasas_dcmd_frame *dcmd; 3235 3208 struct MR_LD_LIST *ci; 3236 3209 dma_addr_t ci_h = 0; 3210 + u32 ld_count; 3237 3211 3238 3212 cmd = megasas_get_cmd(instance); 3239 3213 ··· 3261 3233 dcmd->cmd = MFI_CMD_DCMD; 3262 3234 dcmd->cmd_status = 0xFF; 3263 3235 dcmd->sge_count = 1; 3264 - dcmd->flags = MFI_FRAME_DIR_READ; 3236 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3265 3237 dcmd->timeout = 0; 3266 - dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 3267 - dcmd->opcode = MR_DCMD_LD_GET_LIST; 3268 - dcmd->sgl.sge32[0].phys_addr = ci_h; 3269 - dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 3238 + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); 3239 + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); 3240 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 3241 + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); 3270 3242 dcmd->pad_0 = 0; 3271 3243 3272 3244 if (!megasas_issue_polled(instance, cmd)) { ··· 3275 3247 ret = -1; 3276 3248 } 3277 3249 3250 + ld_count = le32_to_cpu(ci->ldCount); 3251 + 3278 3252 /* the following function will get the instance PD LIST */ 3279 3253 3280 - if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { 3254 + if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) { 3281 3255 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 3282 3256 3283 - for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { 3257 + for (ld_index = 0; ld_index < ld_count; ld_index++) { 3284 3258 if (ci->ldList[ld_index].state != 0) { 3285 3259 ids = ci->ldList[ld_index].ref.targetId; 3286 3260 instance->ld_ids[ids] = ··· 3297 3267 ci_h); 3298 3268 3299 3269 megasas_return_cmd(instance, cmd); 3270 + return ret; 3271 + } 3272 + 3273 + /** 3274 + * megasas_ld_list_query - Returns FW's ld_list structure 3275 + * @instance: Adapter soft state 3276 + * @ld_list: ld_list structure 3277 + * 3278 + * Issues an internal command (DCMD) to get the FW's controller PD 3279 + * list structure. This information is mainly used to find out SYSTEM 3280 + * supported by the FW. 3281 + */ 3282 + static int 3283 + megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) 3284 + { 3285 + int ret = 0, ld_index = 0, ids = 0; 3286 + struct megasas_cmd *cmd; 3287 + struct megasas_dcmd_frame *dcmd; 3288 + struct MR_LD_TARGETID_LIST *ci; 3289 + dma_addr_t ci_h = 0; 3290 + u32 tgtid_count; 3291 + 3292 + cmd = megasas_get_cmd(instance); 3293 + 3294 + if (!cmd) { 3295 + printk(KERN_WARNING 3296 + "megasas:(megasas_ld_list_query): Failed to get cmd\n"); 3297 + return -ENOMEM; 3298 + } 3299 + 3300 + dcmd = &cmd->frame->dcmd; 3301 + 3302 + ci = pci_alloc_consistent(instance->pdev, 3303 + sizeof(struct MR_LD_TARGETID_LIST), &ci_h); 3304 + 3305 + if (!ci) { 3306 + printk(KERN_WARNING 3307 + "megasas: Failed to alloc mem for ld_list_query\n"); 3308 + megasas_return_cmd(instance, cmd); 3309 + return -ENOMEM; 3310 + } 3311 + 3312 + memset(ci, 0, sizeof(*ci)); 3313 + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3314 + 3315 + dcmd->mbox.b[0] = query_type; 3316 + 3317 + dcmd->cmd = MFI_CMD_DCMD; 3318 + dcmd->cmd_status = 0xFF; 3319 + dcmd->sge_count = 1; 3320 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3321 + dcmd->timeout = 0; 3322 + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 3323 + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); 3324 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 3325 + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 3326 + dcmd->pad_0 = 0; 3327 + 3328 + if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) { 3329 + ret = 0; 3330 + } else { 3331 + /* On failure, call older LD list DCMD */ 3332 + ret = 1; 3333 + } 3334 + 3335 + tgtid_count = le32_to_cpu(ci->count); 3336 + 3337 + if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) { 3338 + memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 3339 + for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 3340 + ids = ci->targetId[ld_index]; 3341 + instance->ld_ids[ids] = ci->targetId[ld_index]; 3342 + } 3343 + 3344 + } 3345 + 3346 + pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), 3347 + ci, ci_h); 3348 + 3349 + megasas_return_cmd(instance, cmd); 3350 + 3300 3351 return ret; 3301 3352 } 3302 3353 ··· 3424 3313 dcmd->cmd = MFI_CMD_DCMD; 3425 3314 dcmd->cmd_status = 0xFF; 3426 3315 dcmd->sge_count = 1; 3427 - dcmd->flags = MFI_FRAME_DIR_READ; 3316 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3428 3317 dcmd->timeout = 0; 3429 3318 dcmd->pad_0 = 0; 3430 - dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); 3431 - dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3432 - dcmd->sgl.sge32[0].phys_addr = ci_h; 3433 - dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); 3319 + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 3320 + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 3321 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 3322 + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 3434 3323 3435 3324 if (!megasas_issue_polled(instance, cmd)) { 3436 3325 ret = 0; ··· 3486 3375 memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); 3487 3376 init_frame->context = context; 3488 3377 3489 - initq_info->reply_queue_entries = instance->max_fw_cmds + 1; 3490 - initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; 3378 + initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); 3379 + initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); 3491 3380 3492 - initq_info->producer_index_phys_addr_lo = instance->producer_h; 3493 - initq_info->consumer_index_phys_addr_lo = instance->consumer_h; 3381 + initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); 3382 + initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); 3494 3383 3495 3384 init_frame->cmd = MFI_CMD_INIT; 3496 3385 init_frame->cmd_status = 0xFF; 3497 - init_frame->queue_info_new_phys_addr_lo = initq_info_h; 3386 + init_frame->queue_info_new_phys_addr_lo = 3387 + cpu_to_le32(lower_32_bits(initq_info_h)); 3388 + init_frame->queue_info_new_phys_addr_hi = 3389 + cpu_to_le32(upper_32_bits(initq_info_h)); 3498 3390 3499 - init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); 3391 + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); 3500 3392 3501 3393 /* 3502 3394 * disable the intr before firing the init frame to FW ··· 3762 3648 megasas_get_pd_list(instance); 3763 3649 3764 3650 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 3765 - megasas_get_ld_list(instance); 3651 + if (megasas_ld_list_query(instance, 3652 + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 3653 + megasas_get_ld_list(instance); 3766 3654 3767 3655 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); 3768 3656 ··· 3781 3665 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { 3782 3666 3783 3667 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 3784 - ctrl_info->max_strips_per_io; 3785 - max_sectors_2 = ctrl_info->max_request_size; 3668 + le16_to_cpu(ctrl_info->max_strips_per_io); 3669 + max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 3786 3670 3787 3671 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); 3788 3672 ··· 3791 3675 instance->is_imr = 0; 3792 3676 dev_info(&instance->pdev->dev, "Controller type: MR," 3793 3677 "Memory size is: %dMB\n", 3794 - ctrl_info->memory_size); 3678 + le16_to_cpu(ctrl_info->memory_size)); 3795 3679 } else { 3796 3680 instance->is_imr = 1; 3797 3681 dev_info(&instance->pdev->dev, 3798 3682 "Controller type: iMR\n"); 3799 3683 } 3684 + /* OnOffProperties are converted into CPU arch*/ 3685 + le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); 3800 3686 instance->disableOnlineCtrlReset = 3801 3687 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3688 + /* adapterOperations2 are converted into CPU arch*/ 3689 + le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 3802 3690 instance->UnevenSpanSupport = 3803 3691 ctrl_info->adapterOperations2.supportUnevenSpans; 3804 3692 if (instance->UnevenSpanSupport) { ··· 3816 3696 3817 3697 } 3818 3698 } 3819 - 3820 3699 instance->max_sectors_per_req = instance->max_num_sge * 3821 3700 PAGE_SIZE / 512; 3822 3701 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) ··· 3921 3802 dcmd->cmd = MFI_CMD_DCMD; 3922 3803 dcmd->cmd_status = 0x0; 3923 3804 dcmd->sge_count = 1; 3924 - dcmd->flags = MFI_FRAME_DIR_READ; 3805 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 3925 3806 dcmd->timeout = 0; 3926 3807 dcmd->pad_0 = 0; 3927 - dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); 3928 - dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 3929 - dcmd->sgl.sge32[0].phys_addr = el_info_h; 3930 - dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); 3808 + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 3809 + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); 3810 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); 3811 + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); 3931 3812 3932 3813 megasas_issue_blocked_cmd(instance, cmd); 3933 3814 3934 3815 /* 3935 3816 * Copy the data back into callers buffer 3936 3817 */ 3937 - memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); 3818 + eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); 3819 + eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); 3820 + eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); 3821 + eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); 3822 + eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); 3938 3823 3939 3824 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 3940 3825 el_info, el_info_h); ··· 3985 3862 if (instance->aen_cmd) { 3986 3863 3987 3864 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; 3865 + prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale); 3988 3866 3989 3867 /* 3990 3868 * A class whose enum value is smaller is inclusive of all ··· 3998 3874 * values 3999 3875 */ 4000 3876 if ((prev_aen.members.class <= curr_aen.members.class) && 4001 - !((prev_aen.members.locale & curr_aen.members.locale) ^ 3877 + !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^ 4002 3878 curr_aen.members.locale)) { 4003 3879 /* 4004 3880 * Previously issued event registration includes ··· 4006 3882 */ 4007 3883 return 0; 4008 3884 } else { 4009 - curr_aen.members.locale |= prev_aen.members.locale; 3885 + curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale); 4010 3886 4011 3887 if (prev_aen.members.class < curr_aen.members.class) 4012 3888 curr_aen.members.class = prev_aen.members.class; ··· 4041 3917 dcmd->cmd = MFI_CMD_DCMD; 4042 3918 dcmd->cmd_status = 0x0; 4043 3919 dcmd->sge_count = 1; 4044 - dcmd->flags = MFI_FRAME_DIR_READ; 3920 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 4045 3921 dcmd->timeout = 0; 4046 3922 dcmd->pad_0 = 0; 3923 + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); 3924 + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); 3925 + dcmd->mbox.w[0] = cpu_to_le32(seq_num); 4047 3926 instance->last_seq_num = seq_num; 4048 - dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); 4049 - dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 4050 - dcmd->mbox.w[0] = seq_num; 4051 - dcmd->mbox.w[1] = curr_aen.word; 4052 - dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; 4053 - dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); 3927 + dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); 3928 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); 3929 + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); 4054 3930 4055 3931 if (instance->aen_cmd != NULL) { 4056 3932 megasas_return_cmd(instance, cmd); ··· 4096 3972 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4097 3973 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4098 3974 4099 - return megasas_register_aen(instance, eli.newest_seq_num + 1, 4100 - class_locale.word); 3975 + return megasas_register_aen(instance, 3976 + le32_to_cpu(eli.newest_seq_num) + 1, 3977 + class_locale.word); 4101 3978 } 4102 3979 4103 3980 /** ··· 4193 4068 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4194 4069 goto fail_set_dma_mask; 4195 4070 } 4071 + 4196 4072 return 0; 4197 4073 4198 4074 fail_set_dma_mask: ··· 4512 4386 dcmd->cmd = MFI_CMD_DCMD; 4513 4387 dcmd->cmd_status = 0x0; 4514 4388 dcmd->sge_count = 0; 4515 - dcmd->flags = MFI_FRAME_DIR_NONE; 4389 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 4516 4390 dcmd->timeout = 0; 4517 4391 dcmd->pad_0 = 0; 4518 4392 dcmd->data_xfer_len = 0; 4519 - dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 4393 + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); 4520 4394 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 4521 4395 4522 4396 megasas_issue_blocked_cmd(instance, cmd); ··· 4557 4431 dcmd->cmd = MFI_CMD_DCMD; 4558 4432 dcmd->cmd_status = 0x0; 4559 4433 dcmd->sge_count = 0; 4560 - dcmd->flags = MFI_FRAME_DIR_NONE; 4434 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); 4561 4435 dcmd->timeout = 0; 4562 4436 dcmd->pad_0 = 0; 4563 4437 dcmd->data_xfer_len = 0; 4564 - dcmd->opcode = opcode; 4438 + dcmd->opcode = cpu_to_le32(opcode); 4565 4439 4566 4440 megasas_issue_blocked_cmd(instance, cmd); 4567 4441 ··· 4976 4850 * alone separately 4977 4851 */ 4978 4852 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); 4979 - cmd->frame->hdr.context = cmd->index; 4853 + cmd->frame->hdr.context = cpu_to_le32(cmd->index); 4980 4854 cmd->frame->hdr.pad_0 = 0; 4981 - cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | 4982 - MFI_FRAME_SENSE64); 4855 + cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | 4856 + MFI_FRAME_SGL64 | 4857 + MFI_FRAME_SENSE64)); 4983 4858 4984 4859 /* 4985 4860 * The management interface between applications and the fw uses ··· 5014 4887 * We don't change the dma_coherent_mask, so 5015 4888 * pci_alloc_consistent only returns 32bit addresses 5016 4889 */ 5017 - kern_sge32[i].phys_addr = (u32) buf_handle; 5018 - kern_sge32[i].length = ioc->sgl[i].iov_len; 4890 + kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); 4891 + kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); 5019 4892 5020 4893 /* 5021 4894 * We created a kernel buffer corresponding to the ··· 5038 4911 5039 4912 sense_ptr = 5040 4913 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); 5041 - *sense_ptr = sense_handle; 4914 + *sense_ptr = cpu_to_le32(sense_handle); 5042 4915 } 5043 4916 5044 4917 /* ··· 5098 4971 for (i = 0; i < ioc->sge_count; i++) { 5099 4972 if (kbuff_arr[i]) 5100 4973 dma_free_coherent(&instance->pdev->dev, 5101 - kern_sge32[i].length, 4974 + le32_to_cpu(kern_sge32[i].length), 5102 4975 kbuff_arr[i], 5103 - kern_sge32[i].phys_addr); 4976 + le32_to_cpu(kern_sge32[i].phys_addr)); 5104 4977 } 5105 4978 5106 4979 megasas_return_cmd(instance, cmd); ··· 5454 5327 host = instance->host; 5455 5328 if (instance->evt_detail) { 5456 5329 5457 - switch (instance->evt_detail->code) { 5330 + switch (le32_to_cpu(instance->evt_detail->code)) { 5458 5331 case MR_EVT_PD_INSERTED: 5459 5332 if (megasas_get_pd_list(instance) == 0) { 5460 5333 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { ··· 5516 5389 case MR_EVT_LD_OFFLINE: 5517 5390 case MR_EVT_CFG_CLEARED: 5518 5391 case MR_EVT_LD_DELETED: 5519 - megasas_get_ld_list(instance); 5392 + if (megasas_ld_list_query(instance, 5393 + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5394 + megasas_get_ld_list(instance); 5520 5395 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 5521 5396 for (j = 0; 5522 5397 j < MEGASAS_MAX_DEV_PER_CHANNEL; ··· 5528 5399 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 5529 5400 5530 5401 sdev1 = scsi_device_lookup(host, 5531 - i + MEGASAS_MAX_LD_CHANNELS, 5402 + MEGASAS_MAX_PD_CHANNELS + i, 5532 5403 j, 5533 5404 0); 5534 5405 ··· 5547 5418 doscan = 0; 5548 5419 break; 5549 5420 case MR_EVT_LD_CREATED: 5550 - megasas_get_ld_list(instance); 5421 + if (megasas_ld_list_query(instance, 5422 + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5423 + megasas_get_ld_list(instance); 5551 5424 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 5552 5425 for (j = 0; 5553 5426 j < MEGASAS_MAX_DEV_PER_CHANNEL; ··· 5558 5427 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 5559 5428 5560 5429 sdev1 = scsi_device_lookup(host, 5561 - i+MEGASAS_MAX_LD_CHANNELS, 5430 + MEGASAS_MAX_PD_CHANNELS + i, 5562 5431 j, 0); 5563 5432 5564 5433 if (instance->ld_ids[ld_index] != 5565 5434 0xff) { 5566 5435 if (!sdev1) { 5567 5436 scsi_add_device(host, 5568 - i + 2, 5437 + MEGASAS_MAX_PD_CHANNELS + i, 5569 5438 j, 0); 5570 5439 } 5571 5440 } ··· 5614 5483 } 5615 5484 } 5616 5485 5617 - megasas_get_ld_list(instance); 5486 + if (megasas_ld_list_query(instance, 5487 + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 5488 + megasas_get_ld_list(instance); 5618 5489 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { 5619 5490 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { 5620 5491 ld_index = 5621 5492 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 5622 5493 5623 5494 sdev1 = scsi_device_lookup(host, 5624 - i+MEGASAS_MAX_LD_CHANNELS, j, 0); 5495 + MEGASAS_MAX_PD_CHANNELS + i, j, 0); 5625 5496 if (instance->ld_ids[ld_index] != 0xff) { 5626 5497 if (!sdev1) { 5627 5498 scsi_add_device(host, 5628 - i+2, 5499 + MEGASAS_MAX_PD_CHANNELS + i, 5629 5500 j, 0); 5630 5501 } else { 5631 5502 scsi_device_put(sdev1); ··· 5647 5514 return ; 5648 5515 } 5649 5516 5650 - seq_num = instance->evt_detail->seq_num + 1; 5517 + seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; 5651 5518 5652 5519 /* Register AEN with FW for latest sequence number plus 1 */ 5653 5520 class_locale.members.reserved = 0;
+79 -66
drivers/scsi/megaraid/megaraid_sas_fp.c
··· 126 126 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 127 127 } 128 128 129 - static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) 129 + u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) 130 130 { 131 - return map->raidMap.arMapInfo[ar].pd[arm]; 131 + return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); 132 132 } 133 133 134 - static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) 134 + u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) 135 135 { 136 - return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; 136 + return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 137 137 } 138 138 139 - static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) 139 + u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) 140 140 { 141 141 return map->raidMap.devHndlInfo[pd].curDevHdl; 142 142 } ··· 148 148 149 149 u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) 150 150 { 151 - return map->raidMap.ldTgtIdToLd[ldTgtId]; 151 + return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]); 152 152 } 153 153 154 154 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, ··· 167 167 struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; 168 168 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 169 169 struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; 170 + struct MR_LD_RAID *raid; 171 + int ldCount, num_lds; 172 + u16 ld; 170 173 171 - if (pFwRaidMap->totalSize != 174 + 175 + if (le32_to_cpu(pFwRaidMap->totalSize) != 172 176 (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + 173 - (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) { 177 + (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) { 174 178 printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", 175 179 (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - 176 180 sizeof(struct MR_LD_SPAN_MAP)) + 177 181 (sizeof(struct MR_LD_SPAN_MAP) * 178 - pFwRaidMap->ldCount))); 182 + le32_to_cpu(pFwRaidMap->ldCount)))); 179 183 printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " 180 184 ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), 181 - pFwRaidMap->totalSize); 185 + le32_to_cpu(pFwRaidMap->totalSize)); 182 186 return 0; 183 187 } 184 188 ··· 190 186 mr_update_span_set(map, ldSpanInfo); 191 187 192 188 mr_update_load_balance_params(map, lbInfo); 189 + 190 + num_lds = le32_to_cpu(map->raidMap.ldCount); 191 + 192 + /*Convert Raid capability values to CPU arch */ 193 + for (ldCount = 0; ldCount < num_lds; ldCount++) { 194 + ld = MR_TargetIdToLdGet(ldCount, map); 195 + raid = MR_LdRaidGet(ld, map); 196 + le32_to_cpus((u32 *)&raid->capability); 197 + } 193 198 194 199 return 1; 195 200 } ··· 213 200 214 201 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { 215 202 216 - for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { 203 + for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { 217 204 quad = &pSpanBlock->block_span_info.quad[j]; 218 205 219 - if (quad->diff == 0) 206 + if (le32_to_cpu(quad->diff) == 0) 220 207 return SPAN_INVALID; 221 - if (quad->logStart <= row && row <= quad->logEnd && 222 - (mega_mod64(row-quad->logStart, quad->diff)) == 0) { 208 + if (le64_to_cpu(quad->logStart) <= row && row <= 209 + le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), 210 + le32_to_cpu(quad->diff))) == 0) { 223 211 if (span_blk != NULL) { 224 212 u64 blk, debugBlk; 225 - blk = 226 - mega_div64_32( 227 - (row-quad->logStart), 228 - quad->diff); 213 + blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); 229 214 debugBlk = blk; 230 215 231 - blk = (blk + quad->offsetInSpan) << 232 - raid->stripeShift; 216 + blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; 233 217 *span_blk = blk; 234 218 } 235 219 return span; ··· 267 257 for (span = 0; span < raid->spanDepth; span++) 268 258 dev_dbg(&instance->pdev->dev, "Span=%x," 269 259 " number of quads=%x\n", span, 270 - map->raidMap.ldSpanMap[ld].spanBlock[span]. 271 - block_span_info.noElements); 260 + le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 261 + block_span_info.noElements)); 272 262 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 273 263 span_set = &(ldSpanInfo[ld].span_set[element]); 274 264 if (span_set->span_row_data_width == 0) ··· 296 286 (long unsigned int)span_set->data_strip_end); 297 287 298 288 for (span = 0; span < raid->spanDepth; span++) { 299 - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 300 - block_span_info.noElements >= 289 + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 290 + block_span_info.noElements) >= 301 291 element + 1) { 302 292 quad = &map->raidMap.ldSpanMap[ld]. 303 293 spanBlock[span].block_span_info. 304 294 quad[element]; 305 295 dev_dbg(&instance->pdev->dev, "Span=%x," 306 296 "Quad=%x, diff=%x\n", span, 307 - element, quad->diff); 297 + element, le32_to_cpu(quad->diff)); 308 298 dev_dbg(&instance->pdev->dev, 309 299 "offset_in_span=0x%08lx\n", 310 - (long unsigned int)quad->offsetInSpan); 300 + (long unsigned int)le64_to_cpu(quad->offsetInSpan)); 311 301 dev_dbg(&instance->pdev->dev, 312 302 "logical start=0x%08lx, end=0x%08lx\n", 313 - (long unsigned int)quad->logStart, 314 - (long unsigned int)quad->logEnd); 303 + (long unsigned int)le64_to_cpu(quad->logStart), 304 + (long unsigned int)le64_to_cpu(quad->logEnd)); 315 305 } 316 306 } 317 307 } ··· 358 348 continue; 359 349 360 350 for (span = 0; span < raid->spanDepth; span++) 361 - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 362 - block_span_info.noElements >= info+1) { 351 + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 352 + block_span_info.noElements) >= info+1) { 363 353 quad = &map->raidMap.ldSpanMap[ld]. 364 354 spanBlock[span]. 365 355 block_span_info.quad[info]; 366 - if (quad->diff == 0) 356 + if (le32_to_cpu(quad->diff == 0)) 367 357 return SPAN_INVALID; 368 - if (quad->logStart <= row && 369 - row <= quad->logEnd && 370 - (mega_mod64(row - quad->logStart, 371 - quad->diff)) == 0) { 358 + if (le64_to_cpu(quad->logStart) <= row && 359 + row <= le64_to_cpu(quad->logEnd) && 360 + (mega_mod64(row - le64_to_cpu(quad->logStart), 361 + le32_to_cpu(quad->diff))) == 0) { 372 362 if (span_blk != NULL) { 373 363 u64 blk; 374 364 blk = mega_div64_32 375 - ((row - quad->logStart), 376 - quad->diff); 377 - blk = (blk + quad->offsetInSpan) 365 + ((row - le64_to_cpu(quad->logStart)), 366 + le32_to_cpu(quad->diff)); 367 + blk = (blk + le64_to_cpu(quad->offsetInSpan)) 378 368 << raid->stripeShift; 379 369 *span_blk = blk; 380 370 } ··· 425 415 span_set_Row = mega_div64_32(span_set_Strip, 426 416 span_set->span_row_data_width) * span_set->diff; 427 417 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 428 - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 429 - block_span_info.noElements >= info+1) { 418 + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 419 + block_span_info.noElements >= info+1)) { 430 420 if (strip_offset >= 431 421 span_set->strip_offset[span]) 432 422 span_offset++; ··· 490 480 continue; 491 481 492 482 for (span = 0; span < raid->spanDepth; span++) 493 - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 494 - block_span_info.noElements >= info+1) { 483 + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 484 + block_span_info.noElements) >= info+1) { 495 485 quad = &map->raidMap.ldSpanMap[ld]. 496 486 spanBlock[span].block_span_info.quad[info]; 497 - if (quad->logStart <= row && 498 - row <= quad->logEnd && 499 - mega_mod64((row - quad->logStart), 500 - quad->diff) == 0) { 487 + if (le64_to_cpu(quad->logStart) <= row && 488 + row <= le64_to_cpu(quad->logEnd) && 489 + mega_mod64((row - le64_to_cpu(quad->logStart)), 490 + le32_to_cpu(quad->diff)) == 0) { 501 491 strip = mega_div64_32 502 492 (((row - span_set->data_row_start) 503 - - quad->logStart), 504 - quad->diff); 493 + - le64_to_cpu(quad->logStart)), 494 + le32_to_cpu(quad->diff)); 505 495 strip *= span_set->span_row_data_width; 506 496 strip += span_set->data_strip_start; 507 497 strip += span_set->strip_offset[span]; ··· 553 543 span_set->span_row_data_width); 554 544 555 545 for (span = 0, span_offset = 0; span < raid->spanDepth; span++) 556 - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 557 - block_span_info.noElements >= info+1) { 546 + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 547 + block_span_info.noElements) >= info+1) { 558 548 if (strip_offset >= 559 549 span_set->strip_offset[span]) 560 550 span_offset = ··· 679 669 } 680 670 } 681 671 682 - *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 672 + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 683 673 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 684 674 physArm; 685 675 return retval; ··· 775 765 } 776 766 } 777 767 778 - *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; 768 + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 779 769 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 780 770 physArm; 781 771 return retval; ··· 794 784 MR_BuildRaidContext(struct megasas_instance *instance, 795 785 struct IO_REQUEST_INFO *io_info, 796 786 struct RAID_CONTEXT *pRAID_Context, 797 - struct MR_FW_RAID_MAP_ALL *map) 787 + struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN) 798 788 { 799 789 struct MR_LD_RAID *raid; 800 790 u32 ld, stripSize, stripe_mask; ··· 975 965 regSize += stripSize; 976 966 } 977 967 978 - pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 968 + pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec); 979 969 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 980 970 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 981 971 pRAID_Context->regLockFlags = (isRead) ? ··· 984 974 pRAID_Context->regLockFlags = (isRead) ? 985 975 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; 986 976 pRAID_Context->VirtualDiskTgtId = raid->targetId; 987 - pRAID_Context->regLockRowLBA = regStart; 988 - pRAID_Context->regLockLength = regSize; 977 + pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); 978 + pRAID_Context->regLockLength = cpu_to_le32(regSize); 989 979 pRAID_Context->configSeqNum = raid->seqNum; 980 + /* save pointer to raid->LUN array */ 981 + *raidLUN = raid->LUN; 982 + 990 983 991 984 /*Get Phy Params only if FP capable, or else leave it to MR firmware 992 985 to do the calculation.*/ ··· 1060 1047 raid = MR_LdRaidGet(ld, map); 1061 1048 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1062 1049 for (span = 0; span < raid->spanDepth; span++) { 1063 - if (map->raidMap.ldSpanMap[ld].spanBlock[span]. 1064 - block_span_info.noElements < 1050 + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. 1051 + block_span_info.noElements) < 1065 1052 element + 1) 1066 1053 continue; 1067 1054 span_set = &(ldSpanInfo[ld].span_set[element]); ··· 1069 1056 spanBlock[span].block_span_info. 1070 1057 quad[element]; 1071 1058 1072 - span_set->diff = quad->diff; 1059 + span_set->diff = le32_to_cpu(quad->diff); 1073 1060 1074 1061 for (count = 0, span_row_width = 0; 1075 1062 count < raid->spanDepth; count++) { 1076 - if (map->raidMap.ldSpanMap[ld]. 1063 + if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. 1077 1064 spanBlock[count]. 1078 1065 block_span_info. 1079 - noElements >= element + 1) { 1066 + noElements) >= element + 1) { 1080 1067 span_set->strip_offset[count] = 1081 1068 span_row_width; 1082 1069 span_row_width += ··· 1090 1077 } 1091 1078 1092 1079 span_set->span_row_data_width = span_row_width; 1093 - span_row = mega_div64_32(((quad->logEnd - 1094 - quad->logStart) + quad->diff), 1095 - quad->diff); 1080 + span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - 1081 + le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), 1082 + le32_to_cpu(quad->diff)); 1096 1083 1097 1084 if (element == 0) { 1098 1085 span_set->log_start_lba = 0; ··· 1109 1096 1110 1097 span_set->data_row_start = 0; 1111 1098 span_set->data_row_end = 1112 - (span_row * quad->diff) - 1; 1099 + (span_row * le32_to_cpu(quad->diff)) - 1; 1113 1100 } else { 1114 1101 span_set_prev = &(ldSpanInfo[ld]. 1115 1102 span_set[element - 1]); ··· 1135 1122 span_set_prev->data_row_end + 1; 1136 1123 span_set->data_row_end = 1137 1124 span_set->data_row_start + 1138 - (span_row * quad->diff) - 1; 1125 + (span_row * le32_to_cpu(quad->diff)) - 1; 1139 1126 } 1140 1127 break; 1141 1128 }
+123 -75
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 72 72 int 73 73 megasas_issue_polled(struct megasas_instance *instance, 74 74 struct megasas_cmd *cmd); 75 - 76 - u8 77 - MR_BuildRaidContext(struct megasas_instance *instance, 78 - struct IO_REQUEST_INFO *io_info, 79 - struct RAID_CONTEXT *pRAID_Context, 80 - struct MR_FW_RAID_MAP_ALL *map); 81 - u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); 82 - struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); 83 - 84 - u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); 85 - 86 75 void 87 76 megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 88 77 ··· 615 626 616 627 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 617 628 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 618 - IOCInitMessage->MsgVersion = MPI2_VERSION; 619 - IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; 620 - IOCInitMessage->SystemRequestFrameSize = 621 - MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 629 + IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); 630 + IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 631 + IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 622 632 623 - IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; 624 - IOCInitMessage->ReplyDescriptorPostQueueAddress = 625 - fusion->reply_frames_desc_phys; 626 - IOCInitMessage->SystemRequestFrameBaseAddress = 627 - fusion->io_request_frames_phys; 633 + IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); 634 + IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys); 635 + IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 628 636 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 629 637 init_frame = (struct megasas_init_frame *)cmd->frame; 630 638 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 631 639 632 640 frame_hdr = &cmd->frame->hdr; 633 641 frame_hdr->cmd_status = 0xFF; 634 - frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 642 + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 635 643 636 644 init_frame->cmd = MFI_CMD_INIT; 637 645 init_frame->cmd_status = 0xFF; ··· 638 652 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 639 653 init_frame->driver_operations. 640 654 mfi_capabilities.support_additional_msix = 1; 655 + /* driver supports HA / Remote LUN over Fast Path interface */ 656 + init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun 657 + = 1; 658 + /* Convert capability to LE32 */ 659 + cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 641 660 642 - init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; 643 - init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); 661 + init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle); 662 + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 644 663 645 664 req_desc = 646 665 (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; 647 666 648 - req_desc->Words = cmd->frame_phys_addr; 667 + req_desc->Words = 0; 649 668 req_desc->MFAIo.RequestFlags = 650 669 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 651 670 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 671 + cpu_to_le32s((u32 *)&req_desc->MFAIo); 672 + req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr); 652 673 653 674 /* 654 675 * disable the intr before firing the init frame ··· 746 753 dcmd->cmd = MFI_CMD_DCMD; 747 754 dcmd->cmd_status = 0xFF; 748 755 dcmd->sge_count = 1; 749 - dcmd->flags = MFI_FRAME_DIR_READ; 756 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 750 757 dcmd->timeout = 0; 751 758 dcmd->pad_0 = 0; 752 - dcmd->data_xfer_len = size_map_info; 753 - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 754 - dcmd->sgl.sge32[0].phys_addr = ci_h; 755 - dcmd->sgl.sge32[0].length = size_map_info; 759 + dcmd->data_xfer_len = cpu_to_le32(size_map_info); 760 + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 761 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 762 + dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 756 763 757 764 if (!megasas_issue_polled(instance, cmd)) 758 765 ret = 0; ··· 821 828 822 829 map = fusion->ld_map[instance->map_id & 1]; 823 830 824 - num_lds = map->raidMap.ldCount; 831 + num_lds = le32_to_cpu(map->raidMap.ldCount); 825 832 826 833 dcmd = &cmd->frame->dcmd; 827 834 ··· 849 856 dcmd->cmd = MFI_CMD_DCMD; 850 857 dcmd->cmd_status = 0xFF; 851 858 dcmd->sge_count = 1; 852 - dcmd->flags = MFI_FRAME_DIR_WRITE; 859 + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); 853 860 dcmd->timeout = 0; 854 861 dcmd->pad_0 = 0; 855 - dcmd->data_xfer_len = size_map_info; 862 + dcmd->data_xfer_len = cpu_to_le32(size_map_info); 856 863 dcmd->mbox.b[0] = num_lds; 857 864 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 858 - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 859 - dcmd->sgl.sge32[0].phys_addr = ci_h; 860 - dcmd->sgl.sge32[0].length = size_map_info; 865 + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 866 + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 867 + dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 861 868 862 869 instance->map_update_cmd = cmd; 863 870 ··· 1060 1067 1061 1068 spin_lock_irqsave(&instance->hba_lock, flags); 1062 1069 1063 - writel(req_desc_lo, 1064 - &(regs)->inbound_low_queue_port); 1065 - writel(req_desc_hi, &(regs)->inbound_high_queue_port); 1070 + writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); 1071 + writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); 1066 1072 spin_unlock_irqrestore(&instance->hba_lock, flags); 1067 1073 } 1068 1074 ··· 1149 1157 return sge_count; 1150 1158 1151 1159 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1152 - sgl_ptr->Length = sg_dma_len(os_sgl); 1153 - sgl_ptr->Address = sg_dma_address(os_sgl); 1160 + sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 1161 + sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 1154 1162 sgl_ptr->Flags = 0; 1155 1163 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1156 1164 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { ··· 1169 1177 PCI_DEVICE_ID_LSI_INVADER) || 1170 1178 (instance->pdev->device == 1171 1179 PCI_DEVICE_ID_LSI_FURY)) { 1172 - if ((cmd->io_request->IoFlags & 1173 - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 1174 - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1180 + if ((le16_to_cpu(cmd->io_request->IoFlags) & 1181 + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 1182 + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1175 1183 cmd->io_request->ChainOffset = 1176 1184 fusion-> 1177 1185 chain_offset_io_request; ··· 1193 1201 sg_chain->Flags = 1194 1202 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1195 1203 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1196 - sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) 1197 - *(sge_count - sg_processed)); 1198 - sg_chain->Address = cmd->sg_frame_phys_addr; 1204 + sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); 1205 + sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); 1199 1206 1200 1207 sgl_ptr = 1201 1208 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; ··· 1252 1261 io_request->CDB.EEDP32.PrimaryReferenceTag = 1253 1262 cpu_to_be32(ref_tag); 1254 1263 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1255 - io_request->IoFlags = 32; /* Specify 32-byte cdb */ 1264 + io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 1256 1265 1257 1266 /* Transfer length */ 1258 1267 cdb[28] = (u8)((num_blocks >> 24) & 0xff); ··· 1262 1271 1263 1272 /* set SCSI IO EEDPFlags */ 1264 1273 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { 1265 - io_request->EEDPFlags = 1274 + io_request->EEDPFlags = cpu_to_le16( 1266 1275 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1267 1276 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1268 1277 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1269 1278 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1270 - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 1279 + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1271 1280 } else { 1272 - io_request->EEDPFlags = 1281 + io_request->EEDPFlags = cpu_to_le16( 1273 1282 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1274 - MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 1283 + MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 1275 1284 } 1276 - io_request->Control |= (0x4 << 26); 1277 - io_request->EEDPBlockSize = scp->device->sector_size; 1285 + io_request->Control |= cpu_to_le32((0x4 << 26)); 1286 + io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); 1278 1287 } else { 1279 1288 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1280 1289 if (((cdb_len == 12) || (cdb_len == 16)) && ··· 1302 1311 cdb[8] = (u8)(num_blocks & 0xff); 1303 1312 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1304 1313 1305 - io_request->IoFlags = 10; /* Specify 10-byte cdb */ 1314 + io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ 1306 1315 cdb_len = 10; 1307 1316 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1308 1317 /* Convert to 16 byte CDB for large LBA's */ ··· 1340 1349 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 1341 1350 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 1342 1351 1343 - io_request->IoFlags = 16; /* Specify 16-byte cdb */ 1352 + io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ 1344 1353 cdb_len = 16; 1345 1354 } 1346 1355 ··· 1401 1410 struct IO_REQUEST_INFO io_info; 1402 1411 struct fusion_context *fusion; 1403 1412 struct MR_FW_RAID_MAP_ALL *local_map_ptr; 1413 + u8 *raidLUN; 1404 1414 1405 1415 device_id = MEGASAS_DEV_INDEX(instance, scp); 1406 1416 1407 1417 fusion = instance->ctrl_context; 1408 1418 1409 1419 io_request = cmd->io_request; 1410 - io_request->RaidContext.VirtualDiskTgtId = device_id; 1420 + io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); 1411 1421 io_request->RaidContext.status = 0; 1412 1422 io_request->RaidContext.exStatus = 0; 1413 1423 ··· 1472 1480 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 1473 1481 io_info.numBlocks = datalength; 1474 1482 io_info.ldTgtId = device_id; 1475 - io_request->DataLength = scsi_bufflen(scp); 1483 + io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); 1476 1484 1477 1485 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1478 1486 io_info.isRead = 1; ··· 1486 1494 } else { 1487 1495 if (MR_BuildRaidContext(instance, &io_info, 1488 1496 &io_request->RaidContext, 1489 - local_map_ptr)) 1497 + local_map_ptr, &raidLUN)) 1490 1498 fp_possible = io_info.fpOkForIo; 1491 1499 } 1492 1500 ··· 1512 1520 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1513 1521 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1514 1522 io_request->RaidContext.nseg = 0x1; 1515 - io_request->IoFlags |= 1516 - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1523 + io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1517 1524 io_request->RaidContext.regLockFlags |= 1518 1525 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1519 1526 MR_RL_FLAGS_SEQ_NUM_ENABLE); ··· 1528 1537 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 1529 1538 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1530 1539 io_request->DevHandle = io_info.devHandle; 1540 + /* populate the LUN field */ 1541 + memcpy(io_request->LUN, raidLUN, 8); 1531 1542 } else { 1532 1543 io_request->RaidContext.timeoutValue = 1533 - local_map_ptr->raidMap.fpPdIoTimeoutSec; 1544 + cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 1534 1545 cmd->request_desc->SCSIIO.RequestFlags = 1535 1546 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 1536 1547 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); ··· 1550 1557 io_request->RaidContext.nseg = 0x1; 1551 1558 } 1552 1559 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1553 - io_request->DevHandle = device_id; 1560 + io_request->DevHandle = cpu_to_le16(device_id); 1554 1561 } /* Not FP */ 1555 1562 } 1556 1563 ··· 1572 1579 u16 pd_index = 0; 1573 1580 struct MR_FW_RAID_MAP_ALL *local_map_ptr; 1574 1581 struct fusion_context *fusion = instance->ctrl_context; 1582 + u8 span, physArm; 1583 + u16 devHandle; 1584 + u32 ld, arRef, pd; 1585 + struct MR_LD_RAID *raid; 1586 + struct RAID_CONTEXT *pRAID_Context; 1575 1587 1576 1588 io_request = cmd->io_request; 1577 1589 device_id = MEGASAS_DEV_INDEX(instance, scmd); 1578 1590 pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) 1579 1591 +scmd->device->id; 1580 1592 local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; 1593 + 1594 + io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1595 + 1581 1596 1582 1597 /* Check if this is a system PD I/O */ 1583 1598 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && ··· 1624 1623 scmd->request->timeout / HZ; 1625 1624 } 1626 1625 } else { 1626 + if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) 1627 + goto NonFastPath; 1628 + 1629 + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1630 + if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) 1631 + goto NonFastPath; 1632 + 1633 + raid = MR_LdRaidGet(ld, local_map_ptr); 1634 + 1635 + /* check if this LD is FP capable */ 1636 + if (!(raid->capability.fpNonRWCapable)) 1637 + /* not FP capable, send as non-FP */ 1638 + goto NonFastPath; 1639 + 1640 + /* get RAID_Context pointer */ 1641 + pRAID_Context = &io_request->RaidContext; 1642 + 1643 + /* set RAID context values */ 1644 + pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 1645 + pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; 1646 + pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1647 + pRAID_Context->regLockRowLBA = 0; 1648 + pRAID_Context->regLockLength = 0; 1649 + pRAID_Context->configSeqNum = raid->seqNum; 1650 + 1651 + /* get the DevHandle for the PD (since this is 1652 + fpNonRWCapable, this is a single disk RAID0) */ 1653 + span = physArm = 0; 1654 + arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); 1655 + pd = MR_ArPdGet(arRef, physArm, local_map_ptr); 1656 + devHandle = MR_PdDevHandleGet(pd, local_map_ptr); 1657 + 1658 + /* build request descriptor */ 1659 + cmd->request_desc->SCSIIO.RequestFlags = 1660 + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1661 + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1662 + cmd->request_desc->SCSIIO.DevHandle = devHandle; 1663 + 1664 + /* populate the LUN field */ 1665 + memcpy(io_request->LUN, raid->LUN, 8); 1666 + 1667 + /* build the raidScsiIO structure */ 1668 + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1669 + io_request->DevHandle = devHandle; 1670 + 1671 + return; 1672 + 1673 + NonFastPath: 1627 1674 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1628 - io_request->DevHandle = device_id; 1675 + io_request->DevHandle = cpu_to_le16(device_id); 1629 1676 cmd->request_desc->SCSIIO.RequestFlags = 1630 1677 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1631 1678 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1632 1679 } 1633 - io_request->RaidContext.VirtualDiskTgtId = device_id; 1680 + io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); 1634 1681 io_request->LUN[1] = scmd->device->lun; 1635 - io_request->DataLength = scsi_bufflen(scmd); 1636 1682 } 1637 1683 1638 1684 /** ··· 1718 1670 * Just the CDB length,rest of the Flags are zero 1719 1671 * This will be modified for FP in build_ldio_fusion 1720 1672 */ 1721 - io_request->IoFlags = scp->cmd_len; 1673 + io_request->IoFlags = cpu_to_le16(scp->cmd_len); 1722 1674 1723 1675 if (megasas_is_ldio(scp)) 1724 1676 megasas_build_ldio_fusion(instance, scp, cmd); ··· 1743 1695 1744 1696 io_request->RaidContext.numSGE = sge_count; 1745 1697 1746 - io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 1698 + io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 1747 1699 1748 1700 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1749 - io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; 1701 + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 1750 1702 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1751 - io_request->Control |= MPI2_SCSIIO_CONTROL_READ; 1703 + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 1752 1704 1753 1705 io_request->SGLOffset0 = 1754 1706 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 1755 1707 1756 - io_request->SenseBufferLowAddress = cmd->sense_phys_addr; 1708 + io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); 1757 1709 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 1758 1710 1759 1711 cmd->scmd = scp; ··· 1818 1770 } 1819 1771 1820 1772 req_desc = cmd->request_desc; 1821 - req_desc->SCSIIO.SMID = index; 1773 + req_desc->SCSIIO.SMID = cpu_to_le16(index); 1822 1774 1823 1775 if (cmd->io_request->ChainOffset != 0 && 1824 1776 cmd->io_request->ChainOffset != 0xF) ··· 1880 1832 num_completed = 0; 1881 1833 1882 1834 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { 1883 - smid = reply_desc->SMID; 1835 + smid = le16_to_cpu(reply_desc->SMID); 1884 1836 1885 1837 cmd_fusion = fusion->cmd_list[smid - 1]; 1886 1838 ··· 2098 2050 SGL) / 4; 2099 2051 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 2100 2052 2101 - mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 2053 + mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); 2102 2054 2103 2055 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2104 2056 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2105 2057 2106 - mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; 2058 + mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME); 2107 2059 2108 2060 return 0; 2109 2061 } ··· 2136 2088 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2137 2089 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2138 2090 2139 - req_desc->SCSIIO.SMID = index; 2091 + req_desc->SCSIIO.SMID = cpu_to_le16(index); 2140 2092 2141 2093 return req_desc; 2142 2094 }
+29 -2
drivers/scsi/megaraid/megaraid_sas_fusion.h
··· 93 93 */ 94 94 95 95 struct RAID_CONTEXT { 96 + #if defined(__BIG_ENDIAN_BITFIELD) 97 + u8 nseg:4; 98 + u8 Type:4; 99 + #else 96 100 u8 Type:4; 97 101 u8 nseg:4; 102 + #endif 98 103 u8 resvd0; 99 104 u16 timeoutValue; 100 105 u8 regLockFlags; ··· 303 298 * MPT RAID MFA IO Descriptor. 304 299 */ 305 300 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { 301 + #if defined(__BIG_ENDIAN_BITFIELD) 302 + u32 MessageAddress1:24; /* bits 31:8*/ 303 + u32 RequestFlags:8; 304 + #else 306 305 u32 RequestFlags:8; 307 306 u32 MessageAddress1:24; /* bits 31:8*/ 307 + #endif 308 308 u32 MessageAddress2; /* bits 61:32 */ 309 309 }; 310 310 ··· 528 518 529 519 struct MR_LD_RAID { 530 520 struct { 521 + #if defined(__BIG_ENDIAN_BITFIELD) 522 + u32 reserved4:7; 523 + u32 fpNonRWCapable:1; 524 + u32 fpReadAcrossStripe:1; 525 + u32 fpWriteAcrossStripe:1; 526 + u32 fpReadCapable:1; 527 + u32 fpWriteCapable:1; 528 + u32 encryptionType:8; 529 + u32 pdPiMode:4; 530 + u32 ldPiMode:4; 531 + u32 reserved5:3; 532 + u32 fpCapable:1; 533 + #else 531 534 u32 fpCapable:1; 532 535 u32 reserved5:3; 533 536 u32 ldPiMode:4; ··· 550 527 u32 fpReadCapable:1; 551 528 u32 fpWriteAcrossStripe:1; 552 529 u32 fpReadAcrossStripe:1; 553 - u32 reserved4:8; 530 + u32 fpNonRWCapable:1; 531 + u32 reserved4:7; 532 + #endif 554 533 } capability; 555 534 u32 reserved6; 556 535 u64 size; ··· 576 551 u32 reserved:31; 577 552 } flags; 578 553 579 - u8 reserved3[0x5C]; 554 + u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ 555 + u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ 556 + u8 reserved3[0x80-0x2D]; /* 0x2D */ 580 557 }; 581 558 582 559 struct MR_LD_SPAN_MAP {
+1 -1
drivers/scsi/mpt3sas/Makefile
··· 1 1 # mpt3sas makefile 2 - obj-m += mpt3sas.o 2 + obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o 3 3 mpt3sas-y += mpt3sas_base.o \ 4 4 mpt3sas_config.o \ 5 5 mpt3sas_scsih.o \
+3 -8
drivers/scsi/sd.c
··· 2420 2420 } 2421 2421 } 2422 2422 2423 - if (modepage == 0x3F) { 2424 - sd_printk(KERN_ERR, sdkp, "No Caching mode page " 2425 - "present\n"); 2426 - goto defaults; 2427 - } else if ((buffer[offset] & 0x3f) != modepage) { 2428 - sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); 2429 - goto defaults; 2430 - } 2423 + sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); 2424 + goto defaults; 2425 + 2431 2426 Page_found: 2432 2427 if (modepage == 8) { 2433 2428 sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
+1
drivers/scsi/ufs/ufs.h
··· 177 177 MASK_TASK_RESPONSE = 0xFF00, 178 178 MASK_RSP_UPIU_RESULT = 0xFFFF, 179 179 MASK_QUERY_DATA_SEG_LEN = 0xFFFF, 180 + MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF, 180 181 MASK_RSP_EXCEPTION_EVENT = 0x10000, 181 182 }; 182 183
+281 -47
drivers/scsi/ufs/ufshcd.c
··· 36 36 #include <linux/async.h> 37 37 38 38 #include "ufshcd.h" 39 + #include "unipro.h" 39 40 40 41 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 41 42 UTP_TASK_REQ_COMPL |\ 43 + UIC_POWER_MODE |\ 42 44 UFSHCD_ERROR_MASK) 43 45 /* UIC command timeout, unit: ms */ 44 46 #define UIC_CMD_TIMEOUT 500 ··· 57 55 58 56 /* Expose the flag value from utp_upiu_query.value */ 59 57 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF 58 + 59 + /* Interrupt aggregation default timeout, unit: 40us */ 60 + #define INT_AGGR_DEF_TO 0x02 60 61 61 62 enum { 62 63 UFSHCD_MAX_CHANNEL = 0, ··· 81 76 UFSHCD_INT_DISABLE, 82 77 UFSHCD_INT_ENABLE, 83 78 UFSHCD_INT_CLEAR, 84 - }; 85 - 86 - /* Interrupt aggregation options */ 87 - enum { 88 - INT_AGGR_RESET, 89 - INT_AGGR_CONFIG, 90 79 }; 91 80 92 81 /* ··· 237 238 } 238 239 239 240 /** 241 + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command 242 + * @hba: Pointer to adapter instance 243 + * 244 + * This function gets UIC command argument3 245 + * Returns 0 on success, non zero value on error 246 + */ 247 + static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) 248 + { 249 + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); 250 + } 251 + 252 + /** 240 253 * ufshcd_get_req_rsp - returns the TR response transaction type 241 254 * @ucd_rsp_ptr: pointer to response UPIU 242 255 */ ··· 271 260 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; 272 261 } 273 262 263 + /* 264 + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length 265 + * from response UPIU 266 + * @ucd_rsp_ptr: pointer to response UPIU 267 + * 268 + * Return the data segment length. 269 + */ 270 + static inline unsigned int 271 + ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) 272 + { 273 + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & 274 + MASK_RSP_UPIU_DATA_SEG_LEN; 275 + } 276 + 274 277 /** 275 278 * ufshcd_is_exception_event - Check if the device raised an exception event 276 279 * @ucd_rsp_ptr: pointer to response UPIU ··· 301 276 } 302 277 303 278 /** 304 - * ufshcd_config_int_aggr - Configure interrupt aggregation values. 305 - * Currently there is no use case where we want to configure 306 - * interrupt aggregation dynamically. So to configure interrupt 307 - * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and 308 - * INT_AGGR_TIMEOUT_VALUE are used. 279 + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. 309 280 * @hba: per adapter instance 310 - * @option: Interrupt aggregation option 311 281 */ 312 282 static inline void 313 - ufshcd_config_int_aggr(struct ufs_hba *hba, int option) 283 + ufshcd_reset_intr_aggr(struct ufs_hba *hba) 314 284 { 315 - switch (option) { 316 - case INT_AGGR_RESET: 317 - ufshcd_writel(hba, INT_AGGR_ENABLE | 318 - INT_AGGR_COUNTER_AND_TIMER_RESET, 319 - REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 320 - break; 321 - case INT_AGGR_CONFIG: 322 - ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | 323 - INT_AGGR_COUNTER_THRESHOLD_VALUE | 324 - INT_AGGR_TIMEOUT_VALUE, 325 - REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 326 - break; 327 - } 285 + ufshcd_writel(hba, INT_AGGR_ENABLE | 286 + INT_AGGR_COUNTER_AND_TIMER_RESET, 287 + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 288 + } 289 + 290 + /** 291 + * ufshcd_config_intr_aggr - Configure interrupt aggregation values. 292 + * @hba: per adapter instance 293 + * @cnt: Interrupt aggregation counter threshold 294 + * @tmout: Interrupt aggregation timeout value 295 + */ 296 + static inline void 297 + ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) 298 + { 299 + ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | 300 + INT_AGGR_COUNTER_THLD_VAL(cnt) | 301 + INT_AGGR_TIMEOUT_VAL(tmout), 302 + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); 328 303 } 329 304 330 305 /** ··· 380 355 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) 381 356 { 382 357 int len; 383 - if (lrbp->sense_buffer) { 358 + if (lrbp->sense_buffer && 359 + ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { 384 360 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); 385 361 memcpy(lrbp->sense_buffer, 386 362 lrbp->ucd_rsp_ptr->sr.sense_data, ··· 469 443 return true; 470 444 else 471 445 return false; 446 + } 447 + 448 + /** 449 + * ufshcd_get_upmcrs - Get the power mode change request status 450 + * @hba: Pointer to adapter instance 451 + * 452 + * This function gets the UPMCRS field of HCS register 453 + * Returns value of UPMCRS field 454 + */ 455 + static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) 456 + { 457 + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; 472 458 } 473 459 474 460 /** ··· 1400 1362 } 1401 1363 1402 1364 /** 1365 + * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET 1366 + * @hba: per adapter instance 1367 + * @attr_sel: uic command argument1 1368 + * @attr_set: attribute set type as uic command argument2 1369 + * @mib_val: setting value as uic command argument3 1370 + * @peer: indicate whether peer or local 1371 + * 1372 + * Returns 0 on success, non-zero value on failure 1373 + */ 1374 + int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 1375 + u8 attr_set, u32 mib_val, u8 peer) 1376 + { 1377 + struct uic_command uic_cmd = {0}; 1378 + static const char *const action[] = { 1379 + "dme-set", 1380 + "dme-peer-set" 1381 + }; 1382 + const char *set = action[!!peer]; 1383 + int ret; 1384 + 1385 + uic_cmd.command = peer ? 1386 + UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; 1387 + uic_cmd.argument1 = attr_sel; 1388 + uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); 1389 + uic_cmd.argument3 = mib_val; 1390 + 1391 + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 1392 + if (ret) 1393 + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", 1394 + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); 1395 + 1396 + return ret; 1397 + } 1398 + EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); 1399 + 1400 + /** 1401 + * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET 1402 + * @hba: per adapter instance 1403 + * @attr_sel: uic command argument1 1404 + * @mib_val: the value of the attribute as returned by the UIC command 1405 + * @peer: indicate whether peer or local 1406 + * 1407 + * Returns 0 on success, non-zero value on failure 1408 + */ 1409 + int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 1410 + u32 *mib_val, u8 peer) 1411 + { 1412 + struct uic_command uic_cmd = {0}; 1413 + static const char *const action[] = { 1414 + "dme-get", 1415 + "dme-peer-get" 1416 + }; 1417 + const char *get = action[!!peer]; 1418 + int ret; 1419 + 1420 + uic_cmd.command = peer ? 1421 + UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; 1422 + uic_cmd.argument1 = attr_sel; 1423 + 1424 + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); 1425 + if (ret) { 1426 + dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", 1427 + get, UIC_GET_ATTR_ID(attr_sel), ret); 1428 + goto out; 1429 + } 1430 + 1431 + if (mib_val) 1432 + *mib_val = uic_cmd.argument3; 1433 + out: 1434 + return ret; 1435 + } 1436 + EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 1437 + 1438 + /** 1439 + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage 1440 + * using DME_SET primitives. 1441 + * @hba: per adapter instance 1442 + * @mode: powr mode value 1443 + * 1444 + * Returns 0 on success, non-zero value on failure 1445 + */ 1446 + int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 1447 + { 1448 + struct uic_command uic_cmd = {0}; 1449 + struct completion pwr_done; 1450 + unsigned long flags; 1451 + u8 status; 1452 + int ret; 1453 + 1454 + uic_cmd.command = UIC_CMD_DME_SET; 1455 + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 1456 + uic_cmd.argument3 = mode; 1457 + init_completion(&pwr_done); 1458 + 1459 + mutex_lock(&hba->uic_cmd_mutex); 1460 + 1461 + spin_lock_irqsave(hba->host->host_lock, flags); 1462 + hba->pwr_done = &pwr_done; 1463 + spin_unlock_irqrestore(hba->host->host_lock, flags); 1464 + ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); 1465 + if (ret) { 1466 + dev_err(hba->dev, 1467 + "pwr mode change with mode 0x%x uic error %d\n", 1468 + mode, ret); 1469 + goto out; 1470 + } 1471 + 1472 + if (!wait_for_completion_timeout(hba->pwr_done, 1473 + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 1474 + dev_err(hba->dev, 1475 + "pwr mode change with mode 0x%x completion timeout\n", 1476 + mode); 1477 + ret = -ETIMEDOUT; 1478 + goto out; 1479 + } 1480 + 1481 + status = ufshcd_get_upmcrs(hba); 1482 + if (status != PWR_LOCAL) { 1483 + dev_err(hba->dev, 1484 + "pwr mode change failed, host umpcrs:0x%x\n", 1485 + status); 1486 + ret = (status != PWR_OK) ? status : -1; 1487 + } 1488 + out: 1489 + spin_lock_irqsave(hba->host->host_lock, flags); 1490 + hba->pwr_done = NULL; 1491 + spin_unlock_irqrestore(hba->host->host_lock, flags); 1492 + mutex_unlock(&hba->uic_cmd_mutex); 1493 + return ret; 1494 + } 1495 + 1496 + /** 1497 + * ufshcd_config_max_pwr_mode - Set & Change power mode with 1498 + * maximum capability attribute information. 1499 + * @hba: per adapter instance 1500 + * 1501 + * Returns 0 on success, non-zero value on failure 1502 + */ 1503 + static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) 1504 + { 1505 + enum {RX = 0, TX = 1}; 1506 + u32 lanes[] = {1, 1}; 1507 + u32 gear[] = {1, 1}; 1508 + u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; 1509 + int ret; 1510 + 1511 + /* Get the connected lane count */ 1512 + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); 1513 + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); 1514 + 1515 + /* 1516 + * First, get the maximum gears of HS speed. 1517 + * If a zero value, it means there is no HSGEAR capability. 1518 + * Then, get the maximum gears of PWM speed. 1519 + */ 1520 + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); 1521 + if (!gear[RX]) { 1522 + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); 1523 + pwr[RX] = SLOWAUTO_MODE; 1524 + } 1525 + 1526 + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); 1527 + if (!gear[TX]) { 1528 + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 1529 + &gear[TX]); 1530 + pwr[TX] = SLOWAUTO_MODE; 1531 + } 1532 + 1533 + /* 1534 + * Configure attributes for power mode change with below. 1535 + * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, 1536 + * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 1537 + * - PA_HSSERIES 1538 + */ 1539 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); 1540 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); 1541 + if (pwr[RX] == FASTAUTO_MODE) 1542 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); 1543 + 1544 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); 1545 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); 1546 + if (pwr[TX] == FASTAUTO_MODE) 1547 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); 1548 + 1549 + if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) 1550 + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); 1551 + 1552 + ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); 1553 + if (ret) 1554 + dev_err(hba->dev, 1555 + "pwr_mode: power mode change failed %d\n", ret); 1556 + 1557 + return ret; 1558 + } 1559 + 1560 + /** 1403 1561 * ufshcd_complete_dev_init() - checks device readiness 1404 1562 * hba: per-adapter instance 1405 1563 * ··· 1676 1442 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 1677 1443 1678 1444 /* Configure interrupt aggregation */ 1679 - ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); 1445 + ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); 1680 1446 1681 1447 /* Configure UTRL and UTMRL base address registers */ 1682 1448 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), ··· 2022 1788 int result = 0; 2023 1789 2024 1790 switch (scsi_status) { 1791 + case SAM_STAT_CHECK_CONDITION: 1792 + ufshcd_copy_sense_data(lrbp); 2025 1793 case SAM_STAT_GOOD: 2026 1794 result |= DID_OK << 16 | 2027 1795 COMMAND_COMPLETE << 8 | 2028 - SAM_STAT_GOOD; 2029 - break; 2030 - case SAM_STAT_CHECK_CONDITION: 2031 - result |= DID_OK << 16 | 2032 - COMMAND_COMPLETE << 8 | 2033 - SAM_STAT_CHECK_CONDITION; 2034 - ufshcd_copy_sense_data(lrbp); 2035 - break; 2036 - case SAM_STAT_BUSY: 2037 - result |= SAM_STAT_BUSY; 1796 + scsi_status; 2038 1797 break; 2039 1798 case SAM_STAT_TASK_SET_FULL: 2040 - 2041 1799 /* 2042 1800 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue 2043 1801 * depth needs to be adjusted to the exact number of 2044 1802 * outstanding commands the LUN can handle at any given time. 2045 1803 */ 2046 1804 ufshcd_adjust_lun_qdepth(lrbp->cmd); 2047 - result |= SAM_STAT_TASK_SET_FULL; 2048 - break; 1805 + case SAM_STAT_BUSY: 2049 1806 case SAM_STAT_TASK_ABORTED: 2050 - result |= SAM_STAT_TASK_ABORTED; 1807 + ufshcd_copy_sense_data(lrbp); 1808 + result |= scsi_status; 2051 1809 break; 2052 1810 default: 2053 1811 result |= DID_ERROR << 16; ··· 2124 1898 /** 2125 1899 * ufshcd_uic_cmd_compl - handle completion of uic command 2126 1900 * @hba: per adapter instance 1901 + * @intr_status: interrupt status generated by the controller 2127 1902 */ 2128 - static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) 1903 + static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) 2129 1904 { 2130 - if (hba->active_uic_cmd) { 1905 + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { 2131 1906 hba->active_uic_cmd->argument2 |= 2132 1907 ufshcd_get_uic_cmd_result(hba); 1908 + hba->active_uic_cmd->argument3 = 1909 + ufshcd_get_dme_attr_val(hba); 2133 1910 complete(&hba->active_uic_cmd->done); 2134 1911 } 1912 + 1913 + if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) 1914 + complete(hba->pwr_done); 2135 1915 } 2136 1916 2137 1917 /** ··· 2192 1960 2193 1961 /* Reset interrupt aggregation counters */ 2194 1962 if (int_aggr_reset) 2195 - ufshcd_config_int_aggr(hba, INT_AGGR_RESET); 1963 + ufshcd_reset_intr_aggr(hba); 2196 1964 } 2197 1965 2198 1966 /** ··· 2483 2251 if (hba->errors) 2484 2252 ufshcd_err_handler(hba); 2485 2253 2486 - if (intr_status & UIC_COMMAND_COMPL) 2487 - ufshcd_uic_cmd_compl(hba); 2254 + if (intr_status & UFSHCD_UIC_MASK) 2255 + ufshcd_uic_cmd_compl(hba, intr_status); 2488 2256 2489 2257 if (intr_status & UTP_TASK_REQ_COMPL) 2490 2258 ufshcd_tmc_handler(hba); ··· 2725 2493 ret = ufshcd_link_startup(hba); 2726 2494 if (ret) 2727 2495 goto out; 2496 + 2497 + ufshcd_config_max_pwr_mode(hba); 2728 2498 2729 2499 ret = ufshcd_verify_dev_init(hba); 2730 2500 if (ret)
+54
drivers/scsi/ufs/ufshcd.h
··· 175 175 * @active_uic_cmd: handle of active UIC command 176 176 * @uic_cmd_mutex: mutex for uic command 177 177 * @ufshcd_tm_wait_queue: wait queue for task management 178 + * @pwr_done: completion for power mode change 178 179 * @tm_condition: condition variable for task management 179 180 * @ufshcd_state: UFSHCD states 180 181 * @intr_mask: Interrupt Mask Bits ··· 219 218 220 219 wait_queue_head_t ufshcd_tm_wait_queue; 221 220 unsigned long tm_condition; 221 + 222 + struct completion *pwr_done; 222 223 223 224 u32 ufshcd_state; 224 225 u32 intr_mask; ··· 266 263 extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 267 264 extern int ufshcd_runtime_resume(struct ufs_hba *hba); 268 265 extern int ufshcd_runtime_idle(struct ufs_hba *hba); 266 + extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 267 + u8 attr_set, u32 mib_val, u8 peer); 268 + extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 269 + u32 *mib_val, u8 peer); 270 + 271 + /* UIC command interfaces for DME primitives */ 272 + #define DME_LOCAL 0 273 + #define DME_PEER 1 274 + #define ATTR_SET_NOR 0 /* NORMAL */ 275 + #define ATTR_SET_ST 1 /* STATIC */ 276 + 277 + static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, 278 + u32 mib_val) 279 + { 280 + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, 281 + mib_val, DME_LOCAL); 282 + } 283 + 284 + static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, 285 + u32 mib_val) 286 + { 287 + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, 288 + mib_val, DME_LOCAL); 289 + } 290 + 291 + static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, 292 + u32 mib_val) 293 + { 294 + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, 295 + mib_val, DME_PEER); 296 + } 297 + 298 + static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, 299 + u32 mib_val) 300 + { 301 + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, 302 + mib_val, DME_PEER); 303 + } 304 + 305 + static inline int ufshcd_dme_get(struct ufs_hba *hba, 306 + u32 attr_sel, u32 *mib_val) 307 + { 308 + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); 309 + } 310 + 311 + static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, 312 + u32 attr_sel, u32 *mib_val) 313 + { 314 + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 315 + } 316 + 269 317 #endif /* End of Header */
+20 -2
drivers/scsi/ufs/ufshci.h
··· 124 124 #define CONTROLLER_FATAL_ERROR UFS_BIT(16) 125 125 #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) 126 126 127 + #define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ 128 + UIC_POWER_MODE) 129 + 127 130 #define UFSHCD_ERROR_MASK (UIC_ERROR |\ 128 131 DEVICE_FATAL_ERROR |\ 129 132 CONTROLLER_FATAL_ERROR |\ ··· 144 141 #define HOST_ERROR_INDICATOR UFS_BIT(4) 145 142 #define DEVICE_ERROR_INDICATOR UFS_BIT(5) 146 143 #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) 144 + 145 + enum { 146 + PWR_OK = 0x0, 147 + PWR_LOCAL = 0x01, 148 + PWR_REMOTE = 0x02, 149 + PWR_BUSY = 0x03, 150 + PWR_ERROR_CAP = 0x04, 151 + PWR_FATAL_ERROR = 0x05, 152 + }; 147 153 148 154 /* HCE - Host Controller Enable 34h */ 149 155 #define CONTROLLER_ENABLE UFS_BIT(0) ··· 203 191 #define CONFIG_RESULT_CODE_MASK 0xFF 204 192 #define GENERIC_ERROR_CODE_MASK 0xFF 205 193 194 + #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ 195 + ((sel) & 0xFFFF)) 196 + #define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) 197 + #define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) 198 + #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) 199 + 206 200 /* UIC Commands */ 207 201 enum { 208 202 UIC_CMD_DME_GET = 0x01, ··· 244 226 245 227 #define MASK_UIC_COMMAND_RESULT 0xFF 246 228 247 - #define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) 248 - #define INT_AGGR_TIMEOUT_VALUE (0x02) 229 + #define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) 230 + #define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0) 249 231 250 232 /* Interrupt disable masks */ 251 233 enum {
+151
drivers/scsi/ufs/unipro.h
··· 1 + /* 2 + * drivers/scsi/ufs/unipro.h 3 + * 4 + * Copyright (C) 2013 Samsung Electronics Co., Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + */ 11 + 12 + #ifndef _UNIPRO_H_ 13 + #define _UNIPRO_H_ 14 + 15 + /* 16 + * PHY Adpater attributes 17 + */ 18 + #define PA_ACTIVETXDATALANES 0x1560 19 + #define PA_ACTIVERXDATALANES 0x1580 20 + #define PA_TXTRAILINGCLOCKS 0x1564 21 + #define PA_PHY_TYPE 0x1500 22 + #define PA_AVAILTXDATALANES 0x1520 23 + #define PA_AVAILRXDATALANES 0x1540 24 + #define PA_MINRXTRAILINGCLOCKS 0x1543 25 + #define PA_TXPWRSTATUS 0x1567 26 + #define PA_RXPWRSTATUS 0x1582 27 + #define PA_TXFORCECLOCK 0x1562 28 + #define PA_TXPWRMODE 0x1563 29 + #define PA_LEGACYDPHYESCDL 0x1570 30 + #define PA_MAXTXSPEEDFAST 0x1521 31 + #define PA_MAXTXSPEEDSLOW 0x1522 32 + #define PA_MAXRXSPEEDFAST 0x1541 33 + #define PA_MAXRXSPEEDSLOW 0x1542 34 + #define PA_TXLINKSTARTUPHS 0x1544 35 + #define PA_TXSPEEDFAST 0x1565 36 + #define PA_TXSPEEDSLOW 0x1566 37 + #define PA_REMOTEVERINFO 0x15A0 38 + #define PA_TXGEAR 0x1568 39 + #define PA_TXTERMINATION 0x1569 40 + #define PA_HSSERIES 0x156A 41 + #define PA_PWRMODE 0x1571 42 + #define PA_RXGEAR 0x1583 43 + #define PA_RXTERMINATION 0x1584 44 + #define PA_MAXRXPWMGEAR 0x1586 45 + #define PA_MAXRXHSGEAR 0x1587 46 + #define PA_RXHSUNTERMCAP 0x15A5 47 + #define PA_RXLSTERMCAP 0x15A6 48 + #define PA_PACPREQTIMEOUT 0x1590 49 + #define PA_PACPREQEOBTIMEOUT 0x1591 50 + #define PA_HIBERN8TIME 0x15A7 51 + #define PA_LOCALVERINFO 0x15A9 52 + #define PA_TACTIVATE 0x15A8 53 + #define PA_PACPFRAMECOUNT 0x15C0 54 + #define PA_PACPERRORCOUNT 0x15C1 55 + #define PA_PHYTESTCONTROL 0x15C2 56 + #define PA_PWRMODEUSERDATA0 0x15B0 57 + #define PA_PWRMODEUSERDATA1 0x15B1 58 + #define PA_PWRMODEUSERDATA2 0x15B2 59 + #define PA_PWRMODEUSERDATA3 0x15B3 60 + #define PA_PWRMODEUSERDATA4 0x15B4 61 + #define PA_PWRMODEUSERDATA5 0x15B5 62 + #define PA_PWRMODEUSERDATA6 0x15B6 63 + #define PA_PWRMODEUSERDATA7 0x15B7 64 + #define PA_PWRMODEUSERDATA8 0x15B8 65 + #define PA_PWRMODEUSERDATA9 0x15B9 66 + #define PA_PWRMODEUSERDATA10 0x15BA 67 + #define PA_PWRMODEUSERDATA11 0x15BB 68 + #define PA_CONNECTEDTXDATALANES 0x1561 69 + #define PA_CONNECTEDRXDATALANES 0x1581 70 + #define PA_LOGICALLANEMAP 0x15A1 71 + #define PA_SLEEPNOCONFIGTIME 0x15A2 72 + #define PA_STALLNOCONFIGTIME 0x15A3 73 + #define PA_SAVECONFIGTIME 0x15A4 74 + 75 + /* PA power modes */ 76 + enum { 77 + FAST_MODE = 1, 78 + SLOW_MODE = 2, 79 + FASTAUTO_MODE = 4, 80 + SLOWAUTO_MODE = 5, 81 + UNCHANGED = 7, 82 + }; 83 + 84 + /* PA TX/RX Frequency Series */ 85 + enum { 86 + PA_HS_MODE_A = 1, 87 + PA_HS_MODE_B = 2, 88 + }; 89 + 90 + /* 91 + * Data Link Layer Attributes 92 + */ 93 + #define DL_TC0TXFCTHRESHOLD 0x2040 94 + #define DL_FC0PROTTIMEOUTVAL 0x2041 95 + #define DL_TC0REPLAYTIMEOUTVAL 0x2042 96 + #define DL_AFC0REQTIMEOUTVAL 0x2043 97 + #define DL_AFC0CREDITTHRESHOLD 0x2044 98 + #define DL_TC0OUTACKTHRESHOLD 0x2045 99 + #define DL_TC1TXFCTHRESHOLD 0x2060 100 + #define DL_FC1PROTTIMEOUTVAL 0x2061 101 + #define DL_TC1REPLAYTIMEOUTVAL 0x2062 102 + #define DL_AFC1REQTIMEOUTVAL 0x2063 103 + #define DL_AFC1CREDITTHRESHOLD 0x2064 104 + #define DL_TC1OUTACKTHRESHOLD 0x2065 105 + #define DL_TXPREEMPTIONCAP 0x2000 106 + #define DL_TC0TXMAXSDUSIZE 0x2001 107 + #define DL_TC0RXINITCREDITVAL 0x2002 108 + #define DL_TC0TXBUFFERSIZE 0x2005 109 + #define DL_PEERTC0PRESENT 0x2046 110 + #define DL_PEERTC0RXINITCREVAL 0x2047 111 + #define DL_TC1TXMAXSDUSIZE 0x2003 112 + #define DL_TC1RXINITCREDITVAL 0x2004 113 + #define DL_TC1TXBUFFERSIZE 0x2006 114 + #define DL_PEERTC1PRESENT 0x2066 115 + #define DL_PEERTC1RXINITCREVAL 0x2067 116 + 117 + /* 118 + * Network Layer Attributes 119 + */ 120 + #define N_DEVICEID 0x3000 121 + #define N_DEVICEID_VALID 0x3001 122 + #define N_TC0TXMAXSDUSIZE 0x3020 123 + #define N_TC1TXMAXSDUSIZE 0x3021 124 + 125 + /* 126 + * Transport Layer Attributes 127 + */ 128 + #define T_NUMCPORTS 0x4000 129 + #define T_NUMTESTFEATURES 0x4001 130 + #define T_CONNECTIONSTATE 0x4020 131 + #define T_PEERDEVICEID 0x4021 132 + #define T_PEERCPORTID 0x4022 133 + #define T_TRAFFICCLASS 0x4023 134 + #define T_PROTOCOLID 0x4024 135 + #define T_CPORTFLAGS 0x4025 136 + #define T_TXTOKENVALUE 0x4026 137 + #define T_RXTOKENVALUE 0x4027 138 + #define T_LOCALBUFFERSPACE 0x4028 139 + #define T_PEERBUFFERSPACE 0x4029 140 + #define T_CREDITSTOSEND 0x402A 141 + #define T_CPORTMODE 0x402B 142 + #define T_TC0TXMAXSDUSIZE 0x4060 143 + #define T_TC1TXMAXSDUSIZE 0x4061 144 + 145 + /* Boolean attribute values */ 146 + enum { 147 + FALSE = 0, 148 + TRUE, 149 + }; 150 + 151 + #endif /* _UNIPRO_H_ */
+1
include/linux/pci_ids.h
··· 758 758 #define PCI_DEVICE_ID_HP_CISSE 0x323a 759 759 #define PCI_DEVICE_ID_HP_CISSF 0x323b 760 760 #define PCI_DEVICE_ID_HP_CISSH 0x323c 761 + #define PCI_DEVICE_ID_HP_CISSI 0x3239 761 762 #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 762 763 763 764 #define PCI_VENDOR_ID_PCTECH 0x1042