Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
"This is a set of minor fixes in various drivers (qla2xxx, ufs,
scsi_debug, lpfc) one doc fix and a fairly large update to the fnic
driver to remove the open coded iteration functions in favour of the
scsi provided ones"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
scsi: fnic: Use scsi_host_busy_iter() to traverse commands
scsi: fnic: Kill 'exclude_id' argument to fnic_cleanup_io()
scsi: scsi_debug: Fix cmd_per_lun, set to max_queue
scsi: ufs: core: Narrow down fast path in system suspend path
scsi: ufs: core: Cancel rpm_dev_flush_recheck_work during system suspend
scsi: ufs: core: Do not put UFS power into LPM if link is broken
scsi: qla2xxx: Prevent PRLI in target mode
scsi: qla2xxx: Add marginal path handling support
scsi: target: tcmu: Return from tcmu_handle_completions() if cmd_id not found
scsi: ufs: core: Fix a typo in ufs-sysfs.c
scsi: lpfc: Fix bad memory access during VPD DUMP mailbox command
scsi: lpfc: Fix DMA virtual address ptr assignment in bsg
scsi: lpfc: Fix illegal memory access on Abort IOCBs
scsi: blk-mq: Fix build warning when making htmldocs

+467 -518
+406 -480
drivers/scsi/fnic/fnic_scsi.c
··· 102 102 return fcpio_status_str[status]; 103 103 } 104 104 105 - static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); 105 + static void fnic_cleanup_io(struct fnic *fnic); 106 106 107 107 static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, 108 108 struct scsi_cmnd *sc) ··· 638 638 atomic64_inc(&reset_stats->fw_reset_completions); 639 639 640 640 /* Clean up all outstanding io requests */ 641 - fnic_cleanup_io(fnic, SCSI_NO_TAG); 641 + fnic_cleanup_io(fnic); 642 642 643 643 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); 644 644 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); ··· 1361 1361 return wq_work_done; 1362 1362 } 1363 1363 1364 - static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) 1364 + static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data, 1365 + bool reserved) 1365 1366 { 1366 - int i; 1367 + struct fnic *fnic = data; 1367 1368 struct fnic_io_req *io_req; 1368 1369 unsigned long flags = 0; 1369 - struct scsi_cmnd *sc; 1370 1370 spinlock_t *io_lock; 1371 1371 unsigned long start_time = 0; 1372 1372 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 1373 1373 1374 - for (i = 0; i < fnic->fnic_max_tag_id; i++) { 1375 - if (i == exclude_id) 1376 - continue; 1374 + io_lock = fnic_io_lock_tag(fnic, sc->request->tag); 1375 + spin_lock_irqsave(io_lock, flags); 1377 1376 1378 - io_lock = fnic_io_lock_tag(fnic, i); 1379 - spin_lock_irqsave(io_lock, flags); 1380 - sc = scsi_host_find_tag(fnic->lport->host, i); 1381 - if (!sc) { 1382 - spin_unlock_irqrestore(io_lock, flags); 1383 - continue; 1384 - } 1385 - 1386 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1387 - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1388 - !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { 1389 - /* 1390 - * We will be here only when FW completes reset 1391 - * without sending completions for outstanding ios. 1392 - */ 1393 - CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; 1394 - if (io_req && io_req->dr_done) 1395 - complete(io_req->dr_done); 1396 - else if (io_req && io_req->abts_done) 1397 - complete(io_req->abts_done); 1398 - spin_unlock_irqrestore(io_lock, flags); 1399 - continue; 1400 - } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1401 - spin_unlock_irqrestore(io_lock, flags); 1402 - continue; 1403 - } 1404 - if (!io_req) { 1405 - spin_unlock_irqrestore(io_lock, flags); 1406 - continue; 1407 - } 1408 - 1409 - CMD_SP(sc) = NULL; 1410 - 1411 - spin_unlock_irqrestore(io_lock, flags); 1412 - 1377 + io_req = (struct fnic_io_req *)CMD_SP(sc); 1378 + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1379 + !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { 1413 1380 /* 1414 - * If there is a scsi_cmnd associated with this io_req, then 1415 - * free the corresponding state 1381 + * We will be here only when FW completes reset 1382 + * without sending completions for outstanding ios. 1416 1383 */ 1417 - start_time = io_req->start_time; 1418 - fnic_release_ioreq_buf(fnic, io_req, sc); 1419 - mempool_free(io_req, fnic->io_req_pool); 1420 - 1421 - sc->result = DID_TRANSPORT_DISRUPTED << 16; 1422 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1423 - "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", 1424 - __func__, sc->request->tag, sc, 1425 - (jiffies - start_time)); 1426 - 1427 - if (atomic64_read(&fnic->io_cmpl_skip)) 1428 - atomic64_dec(&fnic->io_cmpl_skip); 1429 - else 1430 - atomic64_inc(&fnic_stats->io_stats.io_completions); 1431 - 1432 - /* Complete the command to SCSI */ 1433 - if (sc->scsi_done) { 1434 - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) 1435 - shost_printk(KERN_ERR, fnic->lport->host, 1436 - "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", 1437 - sc->request->tag, sc); 1438 - 1439 - FNIC_TRACE(fnic_cleanup_io, 1440 - sc->device->host->host_no, i, sc, 1441 - jiffies_to_msecs(jiffies - start_time), 1442 - 0, ((u64)sc->cmnd[0] << 32 | 1443 - (u64)sc->cmnd[2] << 24 | 1444 - (u64)sc->cmnd[3] << 16 | 1445 - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1446 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1447 - 1448 - sc->scsi_done(sc); 1449 - } 1384 + CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; 1385 + if (io_req && io_req->dr_done) 1386 + complete(io_req->dr_done); 1387 + else if (io_req && io_req->abts_done) 1388 + complete(io_req->abts_done); 1389 + spin_unlock_irqrestore(io_lock, flags); 1390 + return true; 1391 + } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1392 + spin_unlock_irqrestore(io_lock, flags); 1393 + return true; 1450 1394 } 1395 + if (!io_req) { 1396 + spin_unlock_irqrestore(io_lock, flags); 1397 + goto cleanup_scsi_cmd; 1398 + } 1399 + 1400 + CMD_SP(sc) = NULL; 1401 + 1402 + spin_unlock_irqrestore(io_lock, flags); 1403 + 1404 + /* 1405 + * If there is a scsi_cmnd associated with this io_req, then 1406 + * free the corresponding state 1407 + */ 1408 + start_time = io_req->start_time; 1409 + fnic_release_ioreq_buf(fnic, io_req, sc); 1410 + mempool_free(io_req, fnic->io_req_pool); 1411 + 1412 + cleanup_scsi_cmd: 1413 + sc->result = DID_TRANSPORT_DISRUPTED << 16; 1414 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1415 + "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", 1416 + sc->request->tag, sc, (jiffies - start_time)); 1417 + 1418 + if (atomic64_read(&fnic->io_cmpl_skip)) 1419 + atomic64_dec(&fnic->io_cmpl_skip); 1420 + else 1421 + atomic64_inc(&fnic_stats->io_stats.io_completions); 1422 + 1423 + /* Complete the command to SCSI */ 1424 + if (sc->scsi_done) { 1425 + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) 1426 + shost_printk(KERN_ERR, fnic->lport->host, 1427 + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", 1428 + sc->request->tag, sc); 1429 + 1430 + FNIC_TRACE(fnic_cleanup_io, 1431 + sc->device->host->host_no, sc->request->tag, sc, 1432 + jiffies_to_msecs(jiffies - start_time), 1433 + 0, ((u64)sc->cmnd[0] << 32 | 1434 + (u64)sc->cmnd[2] << 24 | 1435 + (u64)sc->cmnd[3] << 16 | 1436 + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1437 + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1438 + 1439 + sc->scsi_done(sc); 1440 + } 1441 + return true; 1442 + } 1443 + 1444 + static void fnic_cleanup_io(struct fnic *fnic) 1445 + { 1446 + scsi_host_busy_iter(fnic->lport->host, 1447 + fnic_cleanup_io_iter, fnic); 1451 1448 } 1452 1449 1453 1450 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, ··· 1555 1558 return 0; 1556 1559 } 1557 1560 1558 - static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) 1561 + struct fnic_rport_abort_io_iter_data { 1562 + struct fnic *fnic; 1563 + u32 port_id; 1564 + int term_cnt; 1565 + }; 1566 + 1567 + static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data, 1568 + bool reserved) 1559 1569 { 1560 - int tag; 1561 - int abt_tag; 1562 - int term_cnt = 0; 1570 + struct fnic_rport_abort_io_iter_data *iter_data = data; 1571 + struct fnic *fnic = iter_data->fnic; 1572 + int abt_tag = sc->request->tag; 1563 1573 struct fnic_io_req *io_req; 1564 1574 spinlock_t *io_lock; 1565 1575 unsigned long flags; 1566 - struct scsi_cmnd *sc; 1567 1576 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; 1568 1577 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; 1569 1578 struct scsi_lun fc_lun; 1570 1579 enum fnic_ioreq_state old_ioreq_state; 1580 + 1581 + io_lock = fnic_io_lock_tag(fnic, abt_tag); 1582 + spin_lock_irqsave(io_lock, flags); 1583 + 1584 + io_req = (struct fnic_io_req *)CMD_SP(sc); 1585 + 1586 + if (!io_req || io_req->port_id != iter_data->port_id) { 1587 + spin_unlock_irqrestore(io_lock, flags); 1588 + return true; 1589 + } 1590 + 1591 + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1592 + (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { 1593 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1594 + "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", 1595 + sc); 1596 + spin_unlock_irqrestore(io_lock, flags); 1597 + return true; 1598 + } 1599 + 1600 + /* 1601 + * Found IO that is still pending with firmware and 1602 + * belongs to rport that went away 1603 + */ 1604 + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1605 + spin_unlock_irqrestore(io_lock, flags); 1606 + return true; 1607 + } 1608 + if (io_req->abts_done) { 1609 + shost_printk(KERN_ERR, fnic->lport->host, 1610 + "fnic_rport_exch_reset: io_req->abts_done is set " 1611 + "state is %s\n", 1612 + fnic_ioreq_state_to_str(CMD_STATE(sc))); 1613 + } 1614 + 1615 + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { 1616 + shost_printk(KERN_ERR, fnic->lport->host, 1617 + "rport_exch_reset " 1618 + "IO not yet issued %p tag 0x%x flags " 1619 + "%x state %d\n", 1620 + sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc)); 1621 + } 1622 + old_ioreq_state = CMD_STATE(sc); 1623 + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1624 + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1625 + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1626 + atomic64_inc(&reset_stats->device_reset_terminates); 1627 + abt_tag |= FNIC_TAG_DEV_RST; 1628 + } 1629 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1630 + "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); 1631 + BUG_ON(io_req->abts_done); 1632 + 1633 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1634 + "fnic_rport_reset_exch: Issuing abts\n"); 1635 + 1636 + spin_unlock_irqrestore(io_lock, flags); 1637 + 1638 + /* Now queue the abort command to firmware */ 1639 + int_to_scsilun(sc->device->lun, &fc_lun); 1640 + 1641 + if (fnic_queue_abort_io_req(fnic, abt_tag, 1642 + FCPIO_ITMF_ABT_TASK_TERM, 1643 + fc_lun.scsi_lun, io_req)) { 1644 + /* 1645 + * Revert the cmd state back to old state, if 1646 + * it hasn't changed in between. This cmd will get 1647 + * aborted later by scsi_eh, or cleaned up during 1648 + * lun reset 1649 + */ 1650 + spin_lock_irqsave(io_lock, flags); 1651 + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 1652 + CMD_STATE(sc) = old_ioreq_state; 1653 + spin_unlock_irqrestore(io_lock, flags); 1654 + } else { 1655 + spin_lock_irqsave(io_lock, flags); 1656 + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 1657 + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 1658 + else 1659 + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 1660 + spin_unlock_irqrestore(io_lock, flags); 1661 + atomic64_inc(&term_stats->terminates); 1662 + iter_data->term_cnt++; 1663 + } 1664 + return true; 1665 + } 1666 + 1667 + static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) 1668 + { 1669 + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; 1670 + struct fnic_rport_abort_io_iter_data iter_data = { 1671 + .fnic = fnic, 1672 + .port_id = port_id, 1673 + .term_cnt = 0, 1674 + }; 1571 1675 1572 1676 FNIC_SCSI_DBG(KERN_DEBUG, 1573 1677 fnic->lport->host, ··· 1678 1580 if (fnic->in_remove) 1679 1581 return; 1680 1582 1681 - for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 1682 - abt_tag = tag; 1683 - io_lock = fnic_io_lock_tag(fnic, tag); 1684 - spin_lock_irqsave(io_lock, flags); 1685 - sc = scsi_host_find_tag(fnic->lport->host, tag); 1686 - if (!sc) { 1687 - spin_unlock_irqrestore(io_lock, flags); 1688 - continue; 1689 - } 1690 - 1691 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1692 - 1693 - if (!io_req || io_req->port_id != port_id) { 1694 - spin_unlock_irqrestore(io_lock, flags); 1695 - continue; 1696 - } 1697 - 1698 - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1699 - (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { 1700 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1701 - "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", 1702 - sc); 1703 - spin_unlock_irqrestore(io_lock, flags); 1704 - continue; 1705 - } 1706 - 1707 - /* 1708 - * Found IO that is still pending with firmware and 1709 - * belongs to rport that went away 1710 - */ 1711 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1712 - spin_unlock_irqrestore(io_lock, flags); 1713 - continue; 1714 - } 1715 - if (io_req->abts_done) { 1716 - shost_printk(KERN_ERR, fnic->lport->host, 1717 - "fnic_rport_exch_reset: io_req->abts_done is set " 1718 - "state is %s\n", 1719 - fnic_ioreq_state_to_str(CMD_STATE(sc))); 1720 - } 1721 - 1722 - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { 1723 - shost_printk(KERN_ERR, fnic->lport->host, 1724 - "rport_exch_reset " 1725 - "IO not yet issued %p tag 0x%x flags " 1726 - "%x state %d\n", 1727 - sc, tag, CMD_FLAGS(sc), CMD_STATE(sc)); 1728 - } 1729 - old_ioreq_state = CMD_STATE(sc); 1730 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1731 - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1732 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1733 - atomic64_inc(&reset_stats->device_reset_terminates); 1734 - abt_tag = (tag | FNIC_TAG_DEV_RST); 1735 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1736 - "fnic_rport_exch_reset dev rst sc 0x%p\n", 1737 - sc); 1738 - } 1739 - 1740 - BUG_ON(io_req->abts_done); 1741 - 1742 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1743 - "fnic_rport_reset_exch: Issuing abts\n"); 1744 - 1745 - spin_unlock_irqrestore(io_lock, flags); 1746 - 1747 - /* Now queue the abort command to firmware */ 1748 - int_to_scsilun(sc->device->lun, &fc_lun); 1749 - 1750 - if (fnic_queue_abort_io_req(fnic, abt_tag, 1751 - FCPIO_ITMF_ABT_TASK_TERM, 1752 - fc_lun.scsi_lun, io_req)) { 1753 - /* 1754 - * Revert the cmd state back to old state, if 1755 - * it hasn't changed in between. This cmd will get 1756 - * aborted later by scsi_eh, or cleaned up during 1757 - * lun reset 1758 - */ 1759 - spin_lock_irqsave(io_lock, flags); 1760 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 1761 - CMD_STATE(sc) = old_ioreq_state; 1762 - spin_unlock_irqrestore(io_lock, flags); 1763 - } else { 1764 - spin_lock_irqsave(io_lock, flags); 1765 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 1766 - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 1767 - else 1768 - CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 1769 - spin_unlock_irqrestore(io_lock, flags); 1770 - atomic64_inc(&term_stats->terminates); 1771 - term_cnt++; 1772 - } 1773 - } 1774 - if (term_cnt > atomic64_read(&term_stats->max_terminates)) 1775 - atomic64_set(&term_stats->max_terminates, term_cnt); 1583 + scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter, 1584 + &iter_data); 1585 + if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) 1586 + atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); 1776 1587 1777 1588 } 1778 1589 1779 1590 void fnic_terminate_rport_io(struct fc_rport *rport) 1780 1591 { 1781 - int tag; 1782 - int abt_tag; 1783 - int term_cnt = 0; 1784 - struct fnic_io_req *io_req; 1785 - spinlock_t *io_lock; 1786 - unsigned long flags; 1787 - struct scsi_cmnd *sc; 1788 - struct scsi_lun fc_lun; 1789 1592 struct fc_rport_libfc_priv *rdata; 1790 1593 struct fc_lport *lport; 1791 1594 struct fnic *fnic; 1792 - struct fc_rport *cmd_rport; 1793 - struct reset_stats *reset_stats; 1794 - struct terminate_stats *term_stats; 1795 - enum fnic_ioreq_state old_ioreq_state; 1796 1595 1797 1596 if (!rport) { 1798 1597 printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); ··· 1717 1722 if (fnic->in_remove) 1718 1723 return; 1719 1724 1720 - reset_stats = &fnic->fnic_stats.reset_stats; 1721 - term_stats = &fnic->fnic_stats.term_stats; 1722 - 1723 - for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 1724 - abt_tag = tag; 1725 - io_lock = fnic_io_lock_tag(fnic, tag); 1726 - spin_lock_irqsave(io_lock, flags); 1727 - sc = scsi_host_find_tag(fnic->lport->host, tag); 1728 - if (!sc) { 1729 - spin_unlock_irqrestore(io_lock, flags); 1730 - continue; 1731 - } 1732 - 1733 - io_req = (struct fnic_io_req *)CMD_SP(sc); 1734 - if (!io_req) { 1735 - spin_unlock_irqrestore(io_lock, flags); 1736 - continue; 1737 - } 1738 - 1739 - cmd_rport = starget_to_rport(scsi_target(sc->device)); 1740 - if (rport != cmd_rport) { 1741 - spin_unlock_irqrestore(io_lock, flags); 1742 - continue; 1743 - } 1744 - 1745 - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 1746 - (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { 1747 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1748 - "fnic_terminate_rport_io dev rst not pending sc 0x%p\n", 1749 - sc); 1750 - spin_unlock_irqrestore(io_lock, flags); 1751 - continue; 1752 - } 1753 - /* 1754 - * Found IO that is still pending with firmware and 1755 - * belongs to rport that went away 1756 - */ 1757 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 1758 - spin_unlock_irqrestore(io_lock, flags); 1759 - continue; 1760 - } 1761 - if (io_req->abts_done) { 1762 - shost_printk(KERN_ERR, fnic->lport->host, 1763 - "fnic_terminate_rport_io: io_req->abts_done is set " 1764 - "state is %s\n", 1765 - fnic_ioreq_state_to_str(CMD_STATE(sc))); 1766 - } 1767 - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { 1768 - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 1769 - "fnic_terminate_rport_io " 1770 - "IO not yet issued %p tag 0x%x flags " 1771 - "%x state %d\n", 1772 - sc, tag, CMD_FLAGS(sc), CMD_STATE(sc)); 1773 - } 1774 - old_ioreq_state = CMD_STATE(sc); 1775 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 1776 - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 1777 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 1778 - atomic64_inc(&reset_stats->device_reset_terminates); 1779 - abt_tag = (tag | FNIC_TAG_DEV_RST); 1780 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1781 - "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); 1782 - } 1783 - 1784 - BUG_ON(io_req->abts_done); 1785 - 1786 - FNIC_SCSI_DBG(KERN_DEBUG, 1787 - fnic->lport->host, 1788 - "fnic_terminate_rport_io: Issuing abts\n"); 1789 - 1790 - spin_unlock_irqrestore(io_lock, flags); 1791 - 1792 - /* Now queue the abort command to firmware */ 1793 - int_to_scsilun(sc->device->lun, &fc_lun); 1794 - 1795 - if (fnic_queue_abort_io_req(fnic, abt_tag, 1796 - FCPIO_ITMF_ABT_TASK_TERM, 1797 - fc_lun.scsi_lun, io_req)) { 1798 - /* 1799 - * Revert the cmd state back to old state, if 1800 - * it hasn't changed in between. This cmd will get 1801 - * aborted later by scsi_eh, or cleaned up during 1802 - * lun reset 1803 - */ 1804 - spin_lock_irqsave(io_lock, flags); 1805 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 1806 - CMD_STATE(sc) = old_ioreq_state; 1807 - spin_unlock_irqrestore(io_lock, flags); 1808 - } else { 1809 - spin_lock_irqsave(io_lock, flags); 1810 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 1811 - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 1812 - else 1813 - CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 1814 - spin_unlock_irqrestore(io_lock, flags); 1815 - atomic64_inc(&term_stats->terminates); 1816 - term_cnt++; 1817 - } 1818 - } 1819 - if (term_cnt > atomic64_read(&term_stats->max_terminates)) 1820 - atomic64_set(&term_stats->max_terminates, term_cnt); 1821 - 1725 + fnic_rport_exch_reset(fnic, rport->port_id); 1822 1726 } 1823 1727 1824 1728 /* ··· 2012 2118 return ret; 2013 2119 } 2014 2120 2121 + struct fnic_pending_aborts_iter_data { 2122 + struct fnic *fnic; 2123 + struct scsi_cmnd *lr_sc; 2124 + struct scsi_device *lun_dev; 2125 + int ret; 2126 + }; 2127 + 2128 + static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, 2129 + void *data, bool reserved) 2130 + { 2131 + struct fnic_pending_aborts_iter_data *iter_data = data; 2132 + struct fnic *fnic = iter_data->fnic; 2133 + struct scsi_device *lun_dev = iter_data->lun_dev; 2134 + int abt_tag = sc->request->tag; 2135 + struct fnic_io_req *io_req; 2136 + spinlock_t *io_lock; 2137 + unsigned long flags; 2138 + struct scsi_lun fc_lun; 2139 + DECLARE_COMPLETION_ONSTACK(tm_done); 2140 + enum fnic_ioreq_state old_ioreq_state; 2141 + 2142 + if (sc == iter_data->lr_sc || sc->device != lun_dev) 2143 + return true; 2144 + if (reserved) 2145 + return true; 2146 + 2147 + io_lock = fnic_io_lock_tag(fnic, abt_tag); 2148 + spin_lock_irqsave(io_lock, flags); 2149 + io_req = (struct fnic_io_req *)CMD_SP(sc); 2150 + if (!io_req) { 2151 + spin_unlock_irqrestore(io_lock, flags); 2152 + return true; 2153 + } 2154 + 2155 + /* 2156 + * Found IO that is still pending with firmware and 2157 + * belongs to the LUN that we are resetting 2158 + */ 2159 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2160 + "Found IO in %s on lun\n", 2161 + fnic_ioreq_state_to_str(CMD_STATE(sc))); 2162 + 2163 + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 2164 + spin_unlock_irqrestore(io_lock, flags); 2165 + return true; 2166 + } 2167 + if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 2168 + (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { 2169 + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2170 + "%s dev rst not pending sc 0x%p\n", __func__, 2171 + sc); 2172 + spin_unlock_irqrestore(io_lock, flags); 2173 + return true; 2174 + } 2175 + 2176 + if (io_req->abts_done) 2177 + shost_printk(KERN_ERR, fnic->lport->host, 2178 + "%s: io_req->abts_done is set state is %s\n", 2179 + __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); 2180 + old_ioreq_state = CMD_STATE(sc); 2181 + /* 2182 + * Any pending IO issued prior to reset is expected to be 2183 + * in abts pending state, if not we need to set 2184 + * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. 2185 + * When IO is completed, the IO will be handed over and 2186 + * handled in this function. 2187 + */ 2188 + CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 2189 + 2190 + BUG_ON(io_req->abts_done); 2191 + 2192 + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 2193 + abt_tag |= FNIC_TAG_DEV_RST; 2194 + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2195 + "%s: dev rst sc 0x%p\n", __func__, sc); 2196 + } 2197 + 2198 + CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 2199 + io_req->abts_done = &tm_done; 2200 + spin_unlock_irqrestore(io_lock, flags); 2201 + 2202 + /* Now queue the abort command to firmware */ 2203 + int_to_scsilun(sc->device->lun, &fc_lun); 2204 + 2205 + if (fnic_queue_abort_io_req(fnic, abt_tag, 2206 + FCPIO_ITMF_ABT_TASK_TERM, 2207 + fc_lun.scsi_lun, io_req)) { 2208 + spin_lock_irqsave(io_lock, flags); 2209 + io_req = (struct fnic_io_req *)CMD_SP(sc); 2210 + if (io_req) 2211 + io_req->abts_done = NULL; 2212 + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 2213 + CMD_STATE(sc) = old_ioreq_state; 2214 + spin_unlock_irqrestore(io_lock, flags); 2215 + iter_data->ret = FAILED; 2216 + return false; 2217 + } else { 2218 + spin_lock_irqsave(io_lock, flags); 2219 + if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 2220 + CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 2221 + spin_unlock_irqrestore(io_lock, flags); 2222 + } 2223 + CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 2224 + 2225 + wait_for_completion_timeout(&tm_done, msecs_to_jiffies 2226 + (fnic->config.ed_tov)); 2227 + 2228 + /* Recheck cmd state to check if it is now aborted */ 2229 + spin_lock_irqsave(io_lock, flags); 2230 + io_req = (struct fnic_io_req *)CMD_SP(sc); 2231 + if (!io_req) { 2232 + spin_unlock_irqrestore(io_lock, flags); 2233 + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; 2234 + return true; 2235 + } 2236 + 2237 + io_req->abts_done = NULL; 2238 + 2239 + /* if abort is still pending with fw, fail */ 2240 + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 2241 + spin_unlock_irqrestore(io_lock, flags); 2242 + CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 2243 + iter_data->ret = FAILED; 2244 + return false; 2245 + } 2246 + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 2247 + 2248 + /* original sc used for lr is handled by dev reset code */ 2249 + if (sc != iter_data->lr_sc) 2250 + CMD_SP(sc) = NULL; 2251 + spin_unlock_irqrestore(io_lock, flags); 2252 + 2253 + /* original sc used for lr is handled by dev reset code */ 2254 + if (sc != iter_data->lr_sc) { 2255 + fnic_release_ioreq_buf(fnic, io_req, sc); 2256 + mempool_free(io_req, fnic->io_req_pool); 2257 + } 2258 + 2259 + /* 2260 + * Any IO is returned during reset, it needs to call scsi_done 2261 + * to return the scsi_cmnd to upper layer. 2262 + */ 2263 + if (sc->scsi_done) { 2264 + /* Set result to let upper SCSI layer retry */ 2265 + sc->result = DID_RESET << 16; 2266 + sc->scsi_done(sc); 2267 + } 2268 + return true; 2269 + } 2270 + 2015 2271 /* 2016 2272 * Clean up any pending aborts on the lun 2017 2273 * For each outstanding IO on this lun, whose abort is not completed by fw, ··· 2170 2126 */ 2171 2127 static int fnic_clean_pending_aborts(struct fnic *fnic, 2172 2128 struct scsi_cmnd *lr_sc, 2173 - bool new_sc) 2129 + bool new_sc) 2174 2130 2175 2131 { 2176 - int tag, abt_tag; 2177 - struct fnic_io_req *io_req; 2178 - spinlock_t *io_lock; 2179 - unsigned long flags; 2180 - int ret = 0; 2181 - struct scsi_cmnd *sc; 2182 - struct scsi_lun fc_lun; 2183 - struct scsi_device *lun_dev = lr_sc->device; 2184 - DECLARE_COMPLETION_ONSTACK(tm_done); 2185 - enum fnic_ioreq_state old_ioreq_state; 2132 + int ret = SUCCESS; 2133 + struct fnic_pending_aborts_iter_data iter_data = { 2134 + .fnic = fnic, 2135 + .lun_dev = lr_sc->device, 2136 + .ret = SUCCESS, 2137 + }; 2186 2138 2187 - for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 2188 - io_lock = fnic_io_lock_tag(fnic, tag); 2189 - spin_lock_irqsave(io_lock, flags); 2190 - sc = scsi_host_find_tag(fnic->lport->host, tag); 2191 - /* 2192 - * ignore this lun reset cmd if issued using new SC 2193 - * or cmds that do not belong to this lun 2194 - */ 2195 - if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) { 2196 - spin_unlock_irqrestore(io_lock, flags); 2197 - continue; 2198 - } 2139 + if (new_sc) 2140 + iter_data.lr_sc = lr_sc; 2199 2141 2200 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2201 - 2202 - if (!io_req || sc->device != lun_dev) { 2203 - spin_unlock_irqrestore(io_lock, flags); 2204 - continue; 2205 - } 2206 - 2207 - /* 2208 - * Found IO that is still pending with firmware and 2209 - * belongs to the LUN that we are resetting 2210 - */ 2211 - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 2212 - "Found IO in %s on lun\n", 2213 - fnic_ioreq_state_to_str(CMD_STATE(sc))); 2214 - 2215 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { 2216 - spin_unlock_irqrestore(io_lock, flags); 2217 - continue; 2218 - } 2219 - if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && 2220 - (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { 2221 - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2222 - "%s dev rst not pending sc 0x%p\n", __func__, 2223 - sc); 2224 - spin_unlock_irqrestore(io_lock, flags); 2225 - continue; 2226 - } 2227 - 2228 - if (io_req->abts_done) 2229 - shost_printk(KERN_ERR, fnic->lport->host, 2230 - "%s: io_req->abts_done is set state is %s\n", 2231 - __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); 2232 - old_ioreq_state = CMD_STATE(sc); 2233 - /* 2234 - * Any pending IO issued prior to reset is expected to be 2235 - * in abts pending state, if not we need to set 2236 - * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. 2237 - * When IO is completed, the IO will be handed over and 2238 - * handled in this function. 2239 - */ 2240 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; 2241 - 2242 - BUG_ON(io_req->abts_done); 2243 - 2244 - abt_tag = tag; 2245 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { 2246 - abt_tag |= FNIC_TAG_DEV_RST; 2247 - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2248 - "%s: dev rst sc 0x%p\n", __func__, sc); 2249 - } 2250 - 2251 - CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; 2252 - io_req->abts_done = &tm_done; 2253 - spin_unlock_irqrestore(io_lock, flags); 2254 - 2255 - /* Now queue the abort command to firmware */ 2256 - int_to_scsilun(sc->device->lun, &fc_lun); 2257 - 2258 - if (fnic_queue_abort_io_req(fnic, abt_tag, 2259 - FCPIO_ITMF_ABT_TASK_TERM, 2260 - fc_lun.scsi_lun, io_req)) { 2261 - spin_lock_irqsave(io_lock, flags); 2262 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2263 - if (io_req) 2264 - io_req->abts_done = NULL; 2265 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 2266 - CMD_STATE(sc) = old_ioreq_state; 2267 - spin_unlock_irqrestore(io_lock, flags); 2268 - ret = 1; 2269 - goto clean_pending_aborts_end; 2270 - } else { 2271 - spin_lock_irqsave(io_lock, flags); 2272 - if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) 2273 - CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; 2274 - spin_unlock_irqrestore(io_lock, flags); 2275 - } 2276 - CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; 2277 - 2278 - wait_for_completion_timeout(&tm_done, 2279 - msecs_to_jiffies 2280 - (fnic->config.ed_tov)); 2281 - 2282 - /* Recheck cmd state to check if it is now aborted */ 2283 - spin_lock_irqsave(io_lock, flags); 2284 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2285 - if (!io_req) { 2286 - spin_unlock_irqrestore(io_lock, flags); 2287 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; 2288 - continue; 2289 - } 2290 - 2291 - io_req->abts_done = NULL; 2292 - 2293 - /* if abort is still pending with fw, fail */ 2294 - if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { 2295 - spin_unlock_irqrestore(io_lock, flags); 2296 - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; 2297 - ret = 1; 2298 - goto clean_pending_aborts_end; 2299 - } 2300 - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 2301 - 2302 - /* original sc used for lr is handled by dev reset code */ 2303 - if (sc != lr_sc) 2304 - CMD_SP(sc) = NULL; 2305 - spin_unlock_irqrestore(io_lock, flags); 2306 - 2307 - /* original sc used for lr is handled by dev reset code */ 2308 - if (sc != lr_sc) { 2309 - fnic_release_ioreq_buf(fnic, io_req, sc); 2310 - mempool_free(io_req, fnic->io_req_pool); 2311 - } 2312 - 2313 - /* 2314 - * Any IO is returned during reset, it needs to call scsi_done 2315 - * to return the scsi_cmnd to upper layer. 2316 - */ 2317 - if (sc->scsi_done) { 2318 - /* Set result to let upper SCSI layer retry */ 2319 - sc->result = DID_RESET << 16; 2320 - sc->scsi_done(sc); 2321 - } 2142 + scsi_host_busy_iter(fnic->lport->host, 2143 + fnic_pending_aborts_iter, &iter_data); 2144 + if (iter_data.ret == FAILED) { 2145 + ret = iter_data.ret; 2146 + goto clean_pending_aborts_end; 2322 2147 } 2323 - 2324 2148 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); 2325 2149 2326 2150 /* walk again to check, if IOs are still pending in fw */ ··· 2687 2775 2688 2776 } 2689 2777 2778 + static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data, 2779 + bool reserved) 2780 + { 2781 + struct fnic_pending_aborts_iter_data *iter_data = data; 2782 + struct fnic *fnic = iter_data->fnic; 2783 + int cmd_state; 2784 + struct fnic_io_req *io_req; 2785 + spinlock_t *io_lock; 2786 + unsigned long flags; 2787 + 2788 + /* 2789 + * ignore this lun reset cmd or cmds that do not belong to 2790 + * this lun 2791 + */ 2792 + if (iter_data->lr_sc && sc == iter_data->lr_sc) 2793 + return true; 2794 + if (iter_data->lun_dev && sc->device != iter_data->lun_dev) 2795 + return true; 2796 + 2797 + io_lock = fnic_io_lock_hash(fnic, sc); 2798 + spin_lock_irqsave(io_lock, flags); 2799 + 2800 + io_req = (struct fnic_io_req *)CMD_SP(sc); 2801 + if (!io_req) { 2802 + spin_unlock_irqrestore(io_lock, flags); 2803 + return true; 2804 + } 2805 + 2806 + /* 2807 + * Found IO that is still pending with firmware and 2808 + * belongs to the LUN that we are resetting 2809 + */ 2810 + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2811 + "Found IO in %s on lun\n", 2812 + fnic_ioreq_state_to_str(CMD_STATE(sc))); 2813 + cmd_state = CMD_STATE(sc); 2814 + spin_unlock_irqrestore(io_lock, flags); 2815 + if (cmd_state == FNIC_IOREQ_ABTS_PENDING) 2816 + iter_data->ret = 1; 2817 + 2818 + return iter_data->ret ? false : true; 2819 + } 2820 + 2690 2821 /* 2691 2822 * fnic_is_abts_pending() is a helper function that 2692 2823 * walks through tag map to check if there is any IOs pending,if there is one, ··· 2739 2784 */ 2740 2785 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) 2741 2786 { 2742 - int tag; 2743 - struct fnic_io_req *io_req; 2744 - spinlock_t *io_lock; 2745 - unsigned long flags; 2746 - int ret = 0; 2747 - struct scsi_cmnd *sc; 2748 - struct scsi_device *lun_dev = NULL; 2787 + struct fnic_pending_aborts_iter_data iter_data = { 2788 + .fnic = fnic, 2789 + .lun_dev = NULL, 2790 + .ret = 0, 2791 + }; 2749 2792 2750 - if (lr_sc) 2751 - lun_dev = lr_sc->device; 2752 - 2753 - /* walk again to check, if IOs are still pending in fw */ 2754 - for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { 2755 - sc = scsi_host_find_tag(fnic->lport->host, tag); 2756 - /* 2757 - * ignore this lun reset cmd or cmds that do not belong to 2758 - * this lun 2759 - */ 2760 - if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc))) 2761 - continue; 2762 - 2763 - io_lock = fnic_io_lock_hash(fnic, sc); 2764 - spin_lock_irqsave(io_lock, flags); 2765 - 2766 - io_req = (struct fnic_io_req *)CMD_SP(sc); 2767 - 2768 - if (!io_req || sc->device != lun_dev) { 2769 - spin_unlock_irqrestore(io_lock, flags); 2770 - continue; 2771 - } 2772 - 2773 - /* 2774 - * Found IO that is still pending with firmware and 2775 - * belongs to the LUN that we are resetting 2776 - */ 2777 - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, 2778 - "Found IO in %s on lun\n", 2779 - fnic_ioreq_state_to_str(CMD_STATE(sc))); 2780 - 2781 - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) 2782 - ret = 1; 2783 - spin_unlock_irqrestore(io_lock, flags); 2793 + if (lr_sc) { 2794 + iter_data.lun_dev = lr_sc->device; 2795 + iter_data.lr_sc = lr_sc; 2784 2796 } 2785 2797 2786 - return ret; 2798 + /* walk again to check, if IOs are still pending in fw */ 2799 + scsi_host_busy_iter(fnic->lport->host, 2800 + fnic_abts_pending_iter, &iter_data); 2801 + 2802 + return iter_data.ret; 2787 2803 }
+1 -1
drivers/scsi/lpfc/lpfc_bsg.c
··· 934 934 INIT_LIST_HEAD(&head); 935 935 list_add_tail(&head, &piocbq->list); 936 936 937 - ct_req = (struct lpfc_sli_ct_request *)bdeBuf1; 937 + ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt; 938 938 evt_req_id = ct_req->FsType; 939 939 cmd = ct_req->CommandResponse.bits.CmdRsp; 940 940
+6 -6
drivers/scsi/lpfc/lpfc_init.c
··· 254 254 if (mb->un.varDmp.word_cnt == 0) 255 255 break; 256 256 257 - i = mb->un.varDmp.word_cnt * sizeof(uint32_t); 258 - if (offset + i > DMP_VPD_SIZE) 259 - i = DMP_VPD_SIZE - offset; 257 + if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 258 + mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 260 259 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 261 - lpfc_vpd_data + offset, i); 262 - offset += i; 263 - } while (offset < DMP_VPD_SIZE); 260 + lpfc_vpd_data + offset, 261 + mb->un.varDmp.word_cnt); 262 + offset += mb->un.varDmp.word_cnt; 263 + } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 264 264 265 265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 266 266
+17 -9
drivers/scsi/lpfc/lpfc_sli.c
··· 11804 11804 lpfc_ctx_cmd ctx_cmd) 11805 11805 { 11806 11806 struct lpfc_io_buf *lpfc_cmd; 11807 + IOCB_t *icmd = NULL; 11807 11808 int rc = 1; 11808 11809 11809 11810 if (!iocbq || iocbq->vport != vport) 11810 11811 return rc; 11811 11812 11812 - if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11813 - !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11813 + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11814 + !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) || 11815 + iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11816 + return rc; 11817 + 11818 + icmd = &iocbq->iocb; 11819 + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11820 + icmd->ulpCommand == CMD_CLOSE_XRI_CN) 11814 11821 return rc; 11815 11822 11816 11823 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); ··· 19777 19770 LPFC_MBOXQ_t *pmb = NULL; 19778 19771 MAILBOX_t *mb; 19779 19772 uint32_t offset = 0; 19780 - int i, rc; 19773 + int rc; 19781 19774 19782 19775 if (!rgn23_data) 19783 19776 return 0; ··· 19808 19801 if (mb->un.varDmp.word_cnt == 0) 19809 19802 break; 19810 19803 19811 - i = mb->un.varDmp.word_cnt * sizeof(uint32_t); 19812 - if (offset + i > DMP_RGN23_SIZE) 19813 - i = DMP_RGN23_SIZE - offset; 19804 + if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 19805 + mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 19806 + 19814 19807 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 19815 - rgn23_data + offset, i); 19816 - offset += i; 19817 - } while (offset < DMP_RGN23_SIZE); 19808 + rgn23_data + offset, 19809 + mb->un.varDmp.word_cnt); 19810 + offset += mb->un.varDmp.word_cnt; 19811 + } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 19818 19812 19819 19813 mempool_free(pmb, phba->mbox_mem_pool); 19820 19814 return offset;
+3
drivers/scsi/qla2xxx/qla_init.c
··· 1195 1195 { 1196 1196 struct qla_work_evt *e; 1197 1197 1198 + if (vha->host->active_mode == MODE_TARGET) 1199 + return QLA_FUNCTION_FAILED; 1200 + 1198 1201 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); 1199 1202 if (!e) 1200 1203 return QLA_FUNCTION_FAILED;
+1
drivers/scsi/qla2xxx/qla_os.c
··· 7707 7707 7708 7708 .eh_timed_out = fc_eh_timed_out, 7709 7709 .eh_abort_handler = qla2xxx_eh_abort, 7710 + .eh_should_retry_cmd = fc_eh_should_retry_cmd, 7710 7711 .eh_device_reset_handler = qla2xxx_eh_device_reset, 7711 7712 .eh_target_reset_handler = qla2xxx_eh_target_reset, 7712 7713 .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
+16 -8
drivers/scsi/scsi_debug.c
··· 218 218 */ 219 219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */ 220 220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG) 221 - #define DEF_CMD_PER_LUN 255 221 + #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE 222 222 223 223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */ 224 224 #define F_D_IN 1 /* Data-in command (e.g. READ) */ ··· 5695 5695 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); 5696 5696 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); 5697 5697 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); 5698 - MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 5699 5698 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method"); 5699 + MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 5700 5700 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))"); 5701 5701 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error"); 5702 5702 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error"); ··· 5710 5710 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); 5711 5711 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)"); 5712 5712 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 5713 - MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1)"); 5713 + MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))"); 5714 5714 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 5715 5715 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns"); 5716 5716 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); ··· 7165 7165 } 7166 7166 num_in_q = atomic_read(&devip->num_in_q); 7167 7167 7168 + if (qdepth > SDEBUG_CANQUEUE) { 7169 + qdepth = SDEBUG_CANQUEUE; 7170 + pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__, 7171 + qdepth, SDEBUG_CANQUEUE); 7172 + } 7168 7173 if (qdepth < 1) 7169 7174 qdepth = 1; 7170 - /* allow to exceed max host qc_arr elements for testing */ 7171 - if (qdepth > SDEBUG_CANQUEUE + 10) 7172 - qdepth = SDEBUG_CANQUEUE + 10; 7173 - scsi_change_queue_depth(sdev, qdepth); 7175 + if (qdepth != sdev->queue_depth) 7176 + scsi_change_queue_depth(sdev, qdepth); 7174 7177 7175 7178 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) { 7176 7179 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n", ··· 7561 7558 sdbg_host = to_sdebug_host(dev); 7562 7559 7563 7560 sdebug_driver_template.can_queue = sdebug_max_queue; 7561 + sdebug_driver_template.cmd_per_lun = sdebug_max_queue; 7564 7562 if (!sdebug_clustering) 7565 7563 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1; 7566 7564 ··· 7597 7593 * If condition not met, trim poll_queues to 1 (just for simplicity). 7598 7594 */ 7599 7595 if (poll_queues >= submit_queues) { 7600 - pr_warn("%s: trim poll_queues to 1\n", my_name); 7596 + if (submit_queues < 3) 7597 + pr_warn("%s: trim poll_queues to 1\n", my_name); 7598 + else 7599 + pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n", 7600 + my_name, submit_queues - 1); 7601 7601 poll_queues = 1; 7602 7602 } 7603 7603 if (poll_queues)
+6 -6
drivers/scsi/ufs/ufs-sysfs.c
··· 9 9 #include "ufs.h" 10 10 #include "ufs-sysfs.h" 11 11 12 - static const char *ufschd_uic_link_state_to_string( 12 + static const char *ufshcd_uic_link_state_to_string( 13 13 enum uic_link_state state) 14 14 { 15 15 switch (state) { ··· 21 21 } 22 22 } 23 23 24 - static const char *ufschd_ufs_dev_pwr_mode_to_string( 24 + static const char *ufshcd_ufs_dev_pwr_mode_to_string( 25 25 enum ufs_dev_pwr_mode state) 26 26 { 27 27 switch (state) { ··· 81 81 { 82 82 struct ufs_hba *hba = dev_get_drvdata(dev); 83 83 84 - return sysfs_emit(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string( 84 + return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( 85 85 ufs_pm_lvl_states[hba->rpm_lvl].dev_state)); 86 86 } 87 87 ··· 90 90 { 91 91 struct ufs_hba *hba = dev_get_drvdata(dev); 92 92 93 - return sysfs_emit(buf, "%s\n", ufschd_uic_link_state_to_string( 93 + return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( 94 94 ufs_pm_lvl_states[hba->rpm_lvl].link_state)); 95 95 } 96 96 ··· 113 113 { 114 114 struct ufs_hba *hba = dev_get_drvdata(dev); 115 115 116 - return sysfs_emit(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string( 116 + return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( 117 117 ufs_pm_lvl_states[hba->spm_lvl].dev_state)); 118 118 } 119 119 ··· 122 122 { 123 123 struct ufs_hba *hba = dev_get_drvdata(dev); 124 124 125 - return sysfs_emit(buf, "%s\n", ufschd_uic_link_state_to_string( 125 + return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( 126 126 ufs_pm_lvl_states[hba->spm_lvl].link_state)); 127 127 } 128 128
+5 -2
drivers/scsi/ufs/ufshcd.c
··· 8593 8593 } else if (!ufshcd_is_ufs_dev_active(hba)) { 8594 8594 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); 8595 8595 vcc_off = true; 8596 - if (!ufshcd_is_link_active(hba)) { 8596 + if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { 8597 8597 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); 8598 8598 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); 8599 8599 } ··· 8615 8615 !hba->dev_info.is_lu_power_on_wp) { 8616 8616 ret = ufshcd_setup_vreg(hba, true); 8617 8617 } else if (!ufshcd_is_ufs_dev_active(hba)) { 8618 - if (!ret && !ufshcd_is_link_active(hba)) { 8618 + if (!ufshcd_is_link_active(hba)) { 8619 8619 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); 8620 8620 if (ret) 8621 8621 goto vcc_disable; ··· 8975 8975 if (!hba->is_powered) 8976 8976 return 0; 8977 8977 8978 + cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work); 8979 + 8978 8980 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == 8979 8981 hba->curr_dev_pwr_mode) && 8980 8982 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) == 8981 8983 hba->uic_link_state) && 8984 + pm_runtime_suspended(hba->dev) && 8982 8985 !hba->dev_info.b_rpm_dev_flush_capable) 8983 8986 goto out; 8984 8987
+2 -2
drivers/target/target_core_user.c
··· 1413 1413 return 1; 1414 1414 } 1415 1415 1416 - static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1416 + static bool tcmu_handle_completions(struct tcmu_dev *udev) 1417 1417 { 1418 1418 struct tcmu_mailbox *mb; 1419 1419 struct tcmu_cmd *cmd; ··· 1456 1456 pr_err("cmd_id %u not found, ring is broken\n", 1457 1457 entry->hdr.cmd_id); 1458 1458 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1459 - break; 1459 + return false; 1460 1460 } 1461 1461 1462 1462 tcmu_handle_completion(cmd, entry);
+4 -4
include/linux/blk-mq.h
··· 313 313 */ 314 314 void (*put_budget)(struct request_queue *, int); 315 315 316 - /* 317 - * @set_rq_budget_toekn: store rq's budget token 316 + /** 317 + * @set_rq_budget_token: store rq's budget token 318 318 */ 319 319 void (*set_rq_budget_token)(struct request *, int); 320 - /* 321 - * @get_rq_budget_toekn: retrieve rq's budget token 320 + /** 321 + * @get_rq_budget_token: retrieve rq's budget token 322 322 */ 323 323 int (*get_rq_budget_token)(struct request *); 324 324