Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: stex: Support Pegasus 3 product

Pegasus series is a RAID support product using Thunderbolt technology.
The newest product, Pegasus 3(P3) supports Thunderbolt 3 technology with
a different chip.

1. Change driver version.

2. Add P3 VID, DID and define it's device address.

3. P3 use msi interrupt, so stex_request_irq P3 type enable msi.

4. For hibernation, use msi_lock in stex_ss_handshake to prevent msi
register write again when handshaking.

5. P3 doesn't need read() as flush.

6. In stex_ss_intr & stex_abort, P3 only clear interrupt register when
getting vendor defined interrupt.

Signed-off-by: Charles.Chiou <charles.chiou@tw.promise.com>
Signed-off-by: Paul.Lyu <paul.lyu@tw.promise.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Charles and committed by
Martin K. Petersen
d6570227 6dc618cd

+196 -68
+196 -68
drivers/scsi/stex.c
··· 38 38 #include <scsi/scsi_eh.h> 39 39 40 40 #define DRV_NAME "stex" 41 - #define ST_DRIVER_VERSION "5.00.0000.01" 42 - #define ST_VER_MAJOR 5 43 - #define ST_VER_MINOR 00 41 + #define ST_DRIVER_VERSION "6.02.0000.01" 42 + #define ST_VER_MAJOR 6 43 + #define ST_VER_MINOR 02 44 44 #define ST_OEM 0000 45 45 #define ST_BUILD_VER 01 46 46 ··· 64 64 YI2H_INT_C = 0xa0, 65 65 YH2I_REQ = 0xc0, 66 66 YH2I_REQ_HI = 0xc4, 67 + PSCRATCH0 = 0xb0, 68 + PSCRATCH1 = 0xb4, 69 + PSCRATCH2 = 0xb8, 70 + PSCRATCH3 = 0xbc, 71 + PSCRATCH4 = 0xc8, 72 + MAILBOX_BASE = 0x1000, 73 + MAILBOX_HNDSHK_STS = 0x0, 67 74 68 75 /* MU register value */ 69 76 MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), ··· 94 87 MU_STATE_STOP = 5, 95 88 MU_STATE_NOCONNECT = 6, 96 89 97 - MU_MAX_DELAY = 120, 90 + MU_MAX_DELAY = 50, 98 91 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, 99 92 MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000, 100 93 MU_HARD_RESET_WAIT = 30000, ··· 142 135 st_yosemite = 2, 143 136 st_seq = 3, 144 137 st_yel = 4, 138 + st_P3 = 5, 145 139 146 140 PASSTHRU_REQ_TYPE = 0x00000001, 147 141 PASSTHRU_REQ_NO_WAKEUP = 0x00000100, ··· 347 339 u16 rq_size; 348 340 u16 sts_count; 349 341 u8 supports_pm; 342 + int msi_lock; 350 343 }; 351 344 352 345 struct st_card_info { ··· 549 540 550 541 ++hba->req_head; 551 542 hba->req_head %= hba->rq_count+1; 552 - 553 - writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); 554 - readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ 555 - writel(addr, hba->mmio_base + YH2I_REQ); 556 - readl(hba->mmio_base + YH2I_REQ); /* flush */ 543 + if (hba->cardtype == st_P3) { 544 + writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); 545 + writel(addr, hba->mmio_base + YH2I_REQ); 546 + } else { 547 + writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); 548 + readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ 549 + writel(addr, hba->mmio_base + YH2I_REQ); 550 + readl(hba->mmio_base + YH2I_REQ); /* flush */ 551 + } 557 552 } 558 553 559 554 static void return_abnormal_state(struct st_hba *hba, int status) ··· 987 974 988 975 spin_lock_irqsave(hba->host->host_lock, flags); 989 976 990 - data = readl(base + YI2H_INT); 991 - if (data && data != 0xffffffff) { 992 - /* clear the interrupt */ 993 - writel(data, base + YI2H_INT_C); 994 - stex_ss_mu_intr(hba); 995 - spin_unlock_irqrestore(hba->host->host_lock, flags); 996 - if (unlikely(data & SS_I2H_REQUEST_RESET)) 997 - queue_work(hba->work_q, &hba->reset_work); 998 - return IRQ_HANDLED; 977 + if (hba->cardtype == st_yel) { 978 + data = readl(base + YI2H_INT); 979 + if (data && data != 0xffffffff) { 980 + /* clear the interrupt */ 981 + writel(data, base + YI2H_INT_C); 982 + stex_ss_mu_intr(hba); 983 + spin_unlock_irqrestore(hba->host->host_lock, flags); 984 + if (unlikely(data & SS_I2H_REQUEST_RESET)) 985 + queue_work(hba->work_q, &hba->reset_work); 986 + return IRQ_HANDLED; 987 + } 988 + } else { 989 + data = readl(base + PSCRATCH4); 990 + if (data != 0xffffffff) { 991 + if (data != 0) { 992 + /* clear the interrupt */ 993 + writel(data, base + PSCRATCH1); 994 + writel((1 << 22), base + YH2I_INT); 995 + } 996 + stex_ss_mu_intr(hba); 997 + spin_unlock_irqrestore(hba->host->host_lock, flags); 998 + if (unlikely(data & SS_I2H_REQUEST_RESET)) 999 + queue_work(hba->work_q, &hba->reset_work); 1000 + return IRQ_HANDLED; 1001 + } 999 1002 } 1000 1003 1001 1004 spin_unlock_irqrestore(hba->host->host_lock, flags); ··· 1109 1080 struct st_msg_header *msg_h; 1110 1081 struct handshake_frame *h; 1111 1082 __le32 *scratch; 1112 - u32 data, scratch_size; 1083 + u32 data, scratch_size, mailboxdata, operationaldata; 1113 1084 unsigned long before; 1114 1085 int ret = 0; 1115 1086 1116 1087 before = jiffies; 1117 - while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) { 1118 - if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { 1119 - printk(KERN_ERR DRV_NAME 1120 - "(%s): firmware not operational\n", 1121 - pci_name(hba->pdev)); 1122 - return -1; 1088 + 1089 + if (hba->cardtype == st_yel) { 1090 + operationaldata = readl(base + YIOA_STATUS); 1091 + while (operationaldata != SS_MU_OPERATIONAL) { 1092 + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { 1093 + printk(KERN_ERR DRV_NAME 1094 + "(%s): firmware not operational\n", 1095 + pci_name(hba->pdev)); 1096 + return -1; 1097 + } 1098 + msleep(1); 1099 + operationaldata = readl(base + YIOA_STATUS); 1123 1100 } 1124 - msleep(1); 1101 + } else { 1102 + operationaldata = readl(base + PSCRATCH3); 1103 + while (operationaldata != SS_MU_OPERATIONAL) { 1104 + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { 1105 + printk(KERN_ERR DRV_NAME 1106 + "(%s): firmware not operational\n", 1107 + pci_name(hba->pdev)); 1108 + return -1; 1109 + } 1110 + msleep(1); 1111 + operationaldata = readl(base + PSCRATCH3); 1112 + } 1125 1113 } 1126 1114 1127 1115 msg_h = (struct st_msg_header *)hba->dma_mem; ··· 1157 1111 scratch_size = (hba->sts_count+1)*sizeof(u32); 1158 1112 h->scratch_size = cpu_to_le32(scratch_size); 1159 1113 1160 - data = readl(base + YINT_EN); 1161 - data &= ~4; 1162 - writel(data, base + YINT_EN); 1163 - writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); 1164 - readl(base + YH2I_REQ_HI); 1165 - writel(hba->dma_handle, base + YH2I_REQ); 1166 - readl(base + YH2I_REQ); /* flush */ 1167 - 1168 - scratch = hba->scratch; 1169 - before = jiffies; 1170 - while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { 1171 - if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { 1172 - printk(KERN_ERR DRV_NAME 1173 - "(%s): no signature after handshake frame\n", 1174 - pci_name(hba->pdev)); 1175 - ret = -1; 1176 - break; 1114 + if (hba->cardtype == st_yel) { 1115 + data = readl(base + YINT_EN); 1116 + data &= ~4; 1117 + writel(data, base + YINT_EN); 1118 + writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); 1119 + readl(base + YH2I_REQ_HI); 1120 + writel(hba->dma_handle, base + YH2I_REQ); 1121 + readl(base + YH2I_REQ); /* flush */ 1122 + } else { 1123 + data = readl(base + YINT_EN); 1124 + data &= ~(1 << 0); 1125 + data &= ~(1 << 2); 1126 + writel(data, base + YINT_EN); 1127 + if (hba->msi_lock == 0) { 1128 + /* P3 MSI Register cannot access twice */ 1129 + writel((1 << 6), base + YH2I_INT); 1130 + hba->msi_lock = 1; 1177 1131 } 1178 - rmb(); 1179 - msleep(1); 1132 + writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); 1133 + writel(hba->dma_handle, base + YH2I_REQ); 1180 1134 } 1181 1135 1136 + before = jiffies; 1137 + scratch = hba->scratch; 1138 + if (hba->cardtype == st_yel) { 1139 + while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { 1140 + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { 1141 + printk(KERN_ERR DRV_NAME 1142 + "(%s): no signature after handshake frame\n", 1143 + pci_name(hba->pdev)); 1144 + ret = -1; 1145 + break; 1146 + } 1147 + rmb(); 1148 + msleep(1); 1149 + } 1150 + } else { 1151 + mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); 1152 + while (mailboxdata != SS_STS_HANDSHAKE) { 1153 + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { 1154 + printk(KERN_ERR DRV_NAME 1155 + "(%s): no signature after handshake frame\n", 1156 + pci_name(hba->pdev)); 1157 + ret = -1; 1158 + break; 1159 + } 1160 + rmb(); 1161 + msleep(1); 1162 + mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); 1163 + } 1164 + } 1182 1165 memset(scratch, 0, scratch_size); 1183 1166 msg_h->flag = 0; 1167 + 1184 1168 return ret; 1185 1169 } 1186 1170 ··· 1220 1144 unsigned long flags; 1221 1145 unsigned int mu_status; 1222 1146 1223 - err = (hba->cardtype == st_yel) ? 1224 - stex_ss_handshake(hba) : stex_common_handshake(hba); 1147 + if (hba->cardtype == st_yel || hba->cardtype == st_P3) 1148 + err = stex_ss_handshake(hba); 1149 + else 1150 + err = stex_common_handshake(hba); 1225 1151 spin_lock_irqsave(hba->host->host_lock, flags); 1226 1152 mu_status = hba->mu_status; 1227 1153 if (err == 0) { ··· 1268 1190 1269 1191 writel(data, base + YI2H_INT_C); 1270 1192 stex_ss_mu_intr(hba); 1193 + } else if (hba->cardtype == st_P3) { 1194 + data = readl(base + PSCRATCH4); 1195 + if (data == 0xffffffff) 1196 + goto fail_out; 1197 + if (data != 0) { 1198 + writel(data, base + PSCRATCH1); 1199 + writel((1 << 22), base + YH2I_INT); 1200 + } 1201 + stex_ss_mu_intr(hba); 1271 1202 } else { 1272 1203 data = readl(base + ODBL); 1273 1204 if (data == 0 || data == 0xffffffff) ··· 1284 1197 1285 1198 writel(data, base + ODBL); 1286 1199 readl(base + ODBL); /* flush */ 1287 - 1288 1200 stex_mu_intr(hba, data); 1289 1201 } 1290 1202 if (hba->wait_ccb == NULL) { ··· 1379 1293 ssleep(5); 1380 1294 } 1381 1295 1296 + static void stex_p3_reset(struct st_hba *hba) 1297 + { 1298 + writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); 1299 + ssleep(5); 1300 + } 1301 + 1382 1302 static int stex_do_reset(struct st_hba *hba) 1383 1303 { 1384 1304 unsigned long flags; ··· 1421 1329 stex_hard_reset(hba); 1422 1330 else if (hba->cardtype == st_yel) 1423 1331 stex_ss_reset(hba); 1424 - 1332 + else if (hba->cardtype == st_P3) 1333 + stex_p3_reset(hba); 1425 1334 1426 1335 return_abnormal_state(hba, DID_RESET); 1427 1336 ··· 1507 1414 /* st_yel */ 1508 1415 { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel }, 1509 1416 { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel }, 1417 + 1418 + /* st_P3, pluto */ 1419 + { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, 1420 + 0x8870, 0, 0, st_P3 }, 1421 + /* st_P3, p3 */ 1422 + { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, 1423 + 0x4300, 0, 0, st_P3 }, 1424 + 1425 + /* st_P3, SymplyStor4E */ 1426 + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 1427 + 0x4311, 0, 0, st_P3 }, 1428 + /* st_P3, SymplyStor8E */ 1429 + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 1430 + 0x4312, 0, 0, st_P3 }, 1431 + /* st_P3, SymplyStor4 */ 1432 + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 1433 + 0x4321, 0, 0, st_P3 }, 1434 + /* st_P3, SymplyStor8 */ 1435 + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, 1436 + 0x4322, 0, 0, st_P3 }, 1510 1437 { } /* terminate list */ 1511 1438 }; 1512 1439 ··· 1595 1482 .map_sg = stex_ss_map_sg, 1596 1483 .send = stex_ss_send_cmd, 1597 1484 }, 1485 + 1486 + /* st_P3 */ 1487 + { 1488 + .max_id = 129, 1489 + .max_lun = 256, 1490 + .max_channel = 0, 1491 + .rq_count = 801, 1492 + .rq_size = 512, 1493 + .sts_count = 801, 1494 + .alloc_rq = stex_ss_alloc_req, 1495 + .map_sg = stex_ss_map_sg, 1496 + .send = stex_ss_send_cmd, 1497 + }, 1598 1498 }; 1599 1499 1600 1500 static int stex_set_dma_mask(struct pci_dev * pdev) ··· 1628 1502 struct pci_dev *pdev = hba->pdev; 1629 1503 int status; 1630 1504 1631 - if (msi) { 1505 + if (msi || hba->cardtype == st_P3) { 1632 1506 status = pci_enable_msi(pdev); 1633 1507 if (status != 0) 1634 1508 printk(KERN_ERR DRV_NAME ··· 1639 1513 } else 1640 1514 hba->msi_enabled = 0; 1641 1515 1642 - status = request_irq(pdev->irq, hba->cardtype == st_yel ? 1516 + status = request_irq(pdev->irq, 1517 + (hba->cardtype == st_yel || hba->cardtype == st_P3) ? 1643 1518 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); 1644 1519 1645 1520 if (status != 0) { ··· 1724 1597 case 0x4265: 1725 1598 break; 1726 1599 default: 1727 - if (hba->cardtype == st_yel) 1600 + if (hba->cardtype == st_yel || hba->cardtype == st_P3) 1728 1601 hba->supports_pm = 1; 1729 1602 } 1730 1603 1731 1604 sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; 1732 - if (hba->cardtype == st_yel) 1605 + if (hba->cardtype == st_yel || hba->cardtype == st_P3) 1733 1606 sts_offset += (ci->sts_count+1) * sizeof(u32); 1734 1607 cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); 1735 1608 hba->dma_size = cp_offset + sizeof(struct st_frame); ··· 1769 1642 goto out_pci_free; 1770 1643 } 1771 1644 1772 - if (hba->cardtype == st_yel) 1645 + if (hba->cardtype == st_yel || hba->cardtype == st_P3) 1773 1646 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); 1774 1647 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); 1775 1648 hba->copy_buffer = hba->dma_mem + cp_offset; ··· 1780 1653 hba->map_sg = ci->map_sg; 1781 1654 hba->send = ci->send; 1782 1655 hba->mu_status = MU_STATE_STARTING; 1656 + hba->msi_lock = 0; 1783 1657 1784 - if (hba->cardtype == st_yel) 1658 + if (hba->cardtype == st_yel || hba->cardtype == st_P3) 1785 1659 host->sg_tablesize = 38; 1786 1660 else 1787 1661 host->sg_tablesize = 32; ··· 1864 1736 1865 1737 spin_lock_irqsave(hba->host->host_lock, flags); 1866 1738 1867 - if (hba->cardtype == st_yel && hba->supports_pm == 1) 1868 - { 1869 - if(st_sleep_mic == ST_NOTHANDLED) 1870 - { 1739 + if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && 1740 + hba->supports_pm == 1) { 1741 + if (st_sleep_mic == ST_NOTHANDLED) { 1871 1742 spin_unlock_irqrestore(hba->host->host_lock, flags); 1872 1743 return; 1873 1744 } 1874 1745 } 1875 1746 req = hba->alloc_rq(hba); 1876 - if (hba->cardtype == st_yel) { 1747 + if (hba->cardtype == st_yel || hba->cardtype == st_P3) { 1877 1748 msg_h = (struct st_msg_header *)req - 1; 1878 1749 memset(msg_h, 0, hba->rq_size); 1879 1750 } else 1880 1751 memset(req, 0, hba->rq_size); 1881 1752 1882 - if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel) 1753 + if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel 1754 + || hba->cardtype == st_P3) 1883 1755 && st_sleep_mic == ST_IGNORED) { 1884 1756 req->cdb[0] = MGT_CMD; 1885 1757 req->cdb[1] = MGT_CMD_SIGNATURE; 1886 1758 req->cdb[2] = CTLR_CONFIG_CMD; 1887 1759 req->cdb[3] = CTLR_SHUTDOWN; 1888 - } else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) { 1760 + } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3) 1761 + && st_sleep_mic != ST_IGNORED) { 1889 1762 req->cdb[0] = MGT_CMD; 1890 1763 req->cdb[1] = MGT_CMD_SIGNATURE; 1891 1764 req->cdb[2] = CTLR_CONFIG_CMD; ··· 1897 1768 req->cdb[1] = CTLR_POWER_STATE_CHANGE; 1898 1769 req->cdb[2] = CTLR_POWER_SAVING; 1899 1770 } 1900 - 1901 1771 hba->ccb[tag].cmd = NULL; 1902 1772 hba->ccb[tag].sg_count = 0; 1903 1773 hba->ccb[tag].sense_bufflen = 0; 1904 1774 hba->ccb[tag].sense_buffer = NULL; 1905 1775 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; 1906 - 1907 1776 hba->send(hba, req, tag); 1908 1777 spin_unlock_irqrestore(hba->host->host_lock, flags); 1909 - 1910 1778 before = jiffies; 1911 1779 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { 1912 1780 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { ··· 1959 1833 stex_hba_stop(hba, ST_S5); 1960 1834 } 1961 1835 1962 - static int stex_choice_sleep_mic(pm_message_t state) 1836 + static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state) 1963 1837 { 1964 1838 switch (state.event) { 1965 1839 case PM_EVENT_SUSPEND: 1966 1840 return ST_S3; 1967 1841 case PM_EVENT_HIBERNATE: 1842 + hba->msi_lock = 0; 1968 1843 return ST_S4; 1969 1844 default: 1970 1845 return ST_NOTHANDLED; ··· 1976 1849 { 1977 1850 struct st_hba *hba = pci_get_drvdata(pdev); 1978 1851 1979 - if (hba->cardtype == st_yel && hba->supports_pm == 1) 1980 - stex_hba_stop(hba, stex_choice_sleep_mic(state)); 1852 + if ((hba->cardtype == st_yel || hba->cardtype == st_P3) 1853 + && hba->supports_pm == 1) 1854 + stex_hba_stop(hba, stex_choice_sleep_mic(hba, state)); 1981 1855 else 1982 1856 stex_hba_stop(hba, ST_IGNORED); 1983 1857 return 0;