Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
"This consists of the usual driver updates (ufs, smartpqi, lpfc,
target, megaraid_sas, hisi_sas, qla2xxx) and minor updates and bug
fixes.

Notable core changes are the removal of scsi->tag which caused some
churn in obsolete drivers and a sweep through all drivers to call
scsi_done() directly instead of scsi->done() which removes a pointer
indirection from the hot path and a move to register core sysfs files
earlier, which means they're available to KOBJ_ADD processing, which
necessitates switching all drivers to using attribute groups"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits)
scsi: lpfc: Update lpfc version to 14.0.0.3
scsi: lpfc: Allow fabric node recovery if recovery is in progress before devloss
scsi: lpfc: Fix link down processing to address NULL pointer dereference
scsi: lpfc: Allow PLOGI retry if previous PLOGI was aborted
scsi: lpfc: Fix use-after-free in lpfc_unreg_rpi() routine
scsi: lpfc: Correct sysfs reporting of loop support after SFP status change
scsi: lpfc: Wait for successful restart of SLI3 adapter during host sg_reset
scsi: lpfc: Revert LOG_TRACE_EVENT back to LOG_INIT prior to driver_resource_setup()
scsi: ufs: ufshcd-pltfrm: Fix memory leak due to probe defer
scsi: ufs: mediatek: Avoid sched_clock() misuse
scsi: mpt3sas: Make mpt3sas_dev_attrs static
scsi: scsi_transport_sas: Add 22.5 Gbps link rate definitions
scsi: target: core: Stop using bdevname()
scsi: aha1542: Use memcpy_{from,to}_bvec()
scsi: sr: Add error handling support for add_disk()
scsi: sd: Add error handling support for add_disk()
scsi: target: Perform ALUA group changes in one step
scsi: target: Replace lun_tg_pt_gp_lock with rcu in I/O path
scsi: target: Fix alua_tg_pt_gps_count tracking
scsi: target: Fix ordered tag handling
...

+4171 -2787
+27
Documentation/ABI/testing/sysfs-class-fc
··· 1 + What: /sys/class/fc/fc_udev_device/appid_store 2 + Date: Aug 2021 3 + Contact: Muneendra Kumar <muneendra.kumar@broadconm.com> 4 + Description: 5 + This interface allows an admin to set an FC application 6 + identifier in the blkcg associated with a cgroup id. The 7 + identifier is typically a UUID that is associated with 8 + an application or logical entity such as a virtual 9 + machine or container group. The application or logical 10 + entity utilizes a block device via the cgroup id. 11 + FC adapter drivers may query the identifier and tag FC 12 + traffic based on the identifier. FC host and FC fabric 13 + entities can utilize the application id and FC traffic 14 + tag to identify traffic sources. 15 + 16 + The interface expects a string "<cgroupid>:<appid>" where: 17 + <cgroupid> is inode of the cgroup in hexadecimal 18 + <appid> is user provided string upto 128 characters 19 + in length. 20 + 21 + If an appid_store is done for a cgroup id that already 22 + has an appid set, the new value will override the 23 + previous value. 24 + 25 + If an admin wants to remove an FC application identifier 26 + from a cgroup, an appid_store should be done with the 27 + following string: "<cgroupid>:"
+1 -1
Documentation/ABI/testing/sysfs-driver-ufs
··· 983 983 What: /sys/class/scsi_device/*/device/dyn_cap_needed 984 984 Date: February 2018 985 985 Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com> 986 - Description: This file shows the The amount of physical memory needed 986 + Description: This file shows the amount of physical memory needed 987 987 to be removed from the physical memory resources pool of 988 988 the particular logical unit. The full information about 989 989 the attribute could be found at UFS specifications 2.1.
+4 -4
drivers/ata/ahci.h
··· 376 376 377 377 extern int ahci_ignore_sss; 378 378 379 - extern struct device_attribute *ahci_shost_attrs[]; 380 - extern struct device_attribute *ahci_sdev_attrs[]; 379 + extern const struct attribute_group *ahci_shost_groups[]; 380 + extern const struct attribute_group *ahci_sdev_groups[]; 381 381 382 382 /* 383 383 * This must be instantiated by the edge drivers. Read the comments ··· 388 388 .can_queue = AHCI_MAX_CMDS, \ 389 389 .sg_tablesize = AHCI_MAX_SG, \ 390 390 .dma_boundary = AHCI_DMA_BOUNDARY, \ 391 - .shost_attrs = ahci_shost_attrs, \ 392 - .sdev_attrs = ahci_sdev_attrs, \ 391 + .shost_groups = ahci_shost_groups, \ 392 + .sdev_groups = ahci_sdev_groups, \ 393 393 .change_queue_depth = ata_scsi_change_queue_depth, \ 394 394 .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ 395 395 .slave_configure = ata_scsi_slave_config
+5 -3
drivers/ata/ata_piix.c
··· 1085 1085 .set_dmamode = ich_set_dmamode, 1086 1086 }; 1087 1087 1088 - static struct device_attribute *piix_sidpr_shost_attrs[] = { 1089 - &dev_attr_link_power_management_policy, 1088 + static struct attribute *piix_sidpr_shost_attrs[] = { 1089 + &dev_attr_link_power_management_policy.attr, 1090 1090 NULL 1091 1091 }; 1092 1092 1093 + ATTRIBUTE_GROUPS(piix_sidpr_shost); 1094 + 1093 1095 static struct scsi_host_template piix_sidpr_sht = { 1094 1096 ATA_BMDMA_SHT(DRV_NAME), 1095 - .shost_attrs = piix_sidpr_shost_attrs, 1097 + .shost_groups = piix_sidpr_shost_groups, 1096 1098 }; 1097 1099 1098 1100 static struct ata_port_operations piix_sidpr_sata_ops = {
+35 -17
drivers/ata/libahci.c
··· 108 108 ahci_read_em_buffer, ahci_store_em_buffer); 109 109 static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL); 110 110 111 - struct device_attribute *ahci_shost_attrs[] = { 112 - &dev_attr_link_power_management_policy, 113 - &dev_attr_em_message_type, 114 - &dev_attr_em_message, 115 - &dev_attr_ahci_host_caps, 116 - &dev_attr_ahci_host_cap2, 117 - &dev_attr_ahci_host_version, 118 - &dev_attr_ahci_port_cmd, 119 - &dev_attr_em_buffer, 120 - &dev_attr_em_message_supported, 111 + static struct attribute *ahci_shost_attrs[] = { 112 + &dev_attr_link_power_management_policy.attr, 113 + &dev_attr_em_message_type.attr, 114 + &dev_attr_em_message.attr, 115 + &dev_attr_ahci_host_caps.attr, 116 + &dev_attr_ahci_host_cap2.attr, 117 + &dev_attr_ahci_host_version.attr, 118 + &dev_attr_ahci_port_cmd.attr, 119 + &dev_attr_em_buffer.attr, 120 + &dev_attr_em_message_supported.attr, 121 121 NULL 122 122 }; 123 - EXPORT_SYMBOL_GPL(ahci_shost_attrs); 124 123 125 - struct device_attribute *ahci_sdev_attrs[] = { 126 - &dev_attr_sw_activity, 127 - &dev_attr_unload_heads, 128 - &dev_attr_ncq_prio_supported, 129 - &dev_attr_ncq_prio_enable, 124 + static const struct attribute_group ahci_shost_attr_group = { 125 + .attrs = ahci_shost_attrs 126 + }; 127 + 128 + const struct attribute_group *ahci_shost_groups[] = { 129 + &ahci_shost_attr_group, 130 130 NULL 131 131 }; 132 - EXPORT_SYMBOL_GPL(ahci_sdev_attrs); 132 + EXPORT_SYMBOL_GPL(ahci_shost_groups); 133 + 134 + struct attribute *ahci_sdev_attrs[] = { 135 + &dev_attr_sw_activity.attr, 136 + &dev_attr_unload_heads.attr, 137 + &dev_attr_ncq_prio_supported.attr, 138 + &dev_attr_ncq_prio_enable.attr, 139 + NULL 140 + }; 141 + 142 + static const struct attribute_group ahci_sdev_attr_group = { 143 + .attrs = ahci_sdev_attrs 144 + }; 145 + 146 + const struct attribute_group *ahci_sdev_groups[] = { 147 + &ahci_sdev_attr_group, 148 + NULL 149 + }; 150 + EXPORT_SYMBOL_GPL(ahci_sdev_groups); 133 151 134 152 struct ata_port_operations ahci_ops = { 135 153 .inherits = &sata_pmp_port_ops,
+15 -6
drivers/ata/libata-sata.c
··· 922 922 ata_ncq_prio_enable_show, ata_ncq_prio_enable_store); 923 923 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable); 924 924 925 - struct device_attribute *ata_ncq_sdev_attrs[] = { 926 - &dev_attr_unload_heads, 927 - &dev_attr_ncq_prio_enable, 928 - &dev_attr_ncq_prio_supported, 925 + struct attribute *ata_ncq_sdev_attrs[] = { 926 + &dev_attr_unload_heads.attr, 927 + &dev_attr_ncq_prio_enable.attr, 928 + &dev_attr_ncq_prio_supported.attr, 929 929 NULL 930 930 }; 931 - EXPORT_SYMBOL_GPL(ata_ncq_sdev_attrs); 931 + 932 + static const struct attribute_group ata_ncq_sdev_attr_group = { 933 + .attrs = ata_ncq_sdev_attrs 934 + }; 935 + 936 + const struct attribute_group *ata_ncq_sdev_groups[] = { 937 + &ata_ncq_sdev_attr_group, 938 + NULL 939 + }; 940 + EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups); 932 941 933 942 static ssize_t 934 943 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, ··· 1267 1258 rc = __ata_scsi_queuecmd(cmd, ap->link.device); 1268 1259 else { 1269 1260 cmd->result = (DID_BAD_TARGET << 16); 1270 - cmd->scsi_done(cmd); 1261 + scsi_done(cmd); 1271 1262 } 1272 1263 return rc; 1273 1264 }
+19 -10
drivers/ata/libata-scsi.c
··· 234 234 field, 0xff, 0); 235 235 } 236 236 237 - struct device_attribute *ata_common_sdev_attrs[] = { 238 - &dev_attr_unload_heads, 237 + static struct attribute *ata_common_sdev_attrs[] = { 238 + &dev_attr_unload_heads.attr, 239 239 NULL 240 240 }; 241 - EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); 241 + 242 + static const struct attribute_group ata_common_sdev_attr_group = { 243 + .attrs = ata_common_sdev_attrs 244 + }; 245 + 246 + const struct attribute_group *ata_common_sdev_groups[] = { 247 + &ata_common_sdev_attr_group, 248 + NULL 249 + }; 250 + EXPORT_SYMBOL_GPL(ata_common_sdev_groups); 242 251 243 252 /** 244 253 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. ··· 643 634 qc = ata_qc_new_init(dev, scsi_cmd_to_rq(cmd)->tag); 644 635 if (qc) { 645 636 qc->scsicmd = cmd; 646 - qc->scsidone = cmd->scsi_done; 637 + qc->scsidone = scsi_done; 647 638 648 639 qc->sg = scsi_sglist(cmd); 649 640 qc->n_elem = scsi_sg_count(cmd); ··· 652 643 qc->flags |= ATA_QCFLAG_QUIET; 653 644 } else { 654 645 cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL; 655 - cmd->scsi_done(cmd); 646 + scsi_done(cmd); 656 647 } 657 648 658 649 return qc; ··· 1747 1738 1748 1739 early_finish: 1749 1740 ata_qc_free(qc); 1750 - cmd->scsi_done(cmd); 1741 + scsi_done(cmd); 1751 1742 DPRINTK("EXIT - early finish (good or error)\n"); 1752 1743 return 0; 1753 1744 1754 1745 err_did: 1755 1746 ata_qc_free(qc); 1756 1747 cmd->result = (DID_ERROR << 16); 1757 - cmd->scsi_done(cmd); 1748 + scsi_done(cmd); 1758 1749 err_mem: 1759 1750 DPRINTK("EXIT - internal\n"); 1760 1751 return 0; ··· 4051 4042 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", 4052 4043 scmd->cmd_len, scsi_op, dev->cdb_len); 4053 4044 scmd->result = DID_ERROR << 16; 4054 - scmd->scsi_done(scmd); 4045 + scsi_done(scmd); 4055 4046 return 0; 4056 4047 } 4057 4048 ··· 4093 4084 rc = __ata_scsi_queuecmd(cmd, dev); 4094 4085 else { 4095 4086 cmd->result = (DID_BAD_TARGET << 16); 4096 - cmd->scsi_done(cmd); 4087 + scsi_done(cmd); 4097 4088 } 4098 4089 4099 4090 spin_unlock_irqrestore(ap->lock, irq_flags); ··· 4227 4218 break; 4228 4219 } 4229 4220 4230 - cmd->scsi_done(cmd); 4221 + scsi_done(cmd); 4231 4222 } 4232 4223 4233 4224 int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
+1 -1
drivers/ata/pata_macio.c
··· 923 923 */ 924 924 .max_segment_size = MAX_DBDMA_SEG, 925 925 .slave_configure = pata_macio_slave_config, 926 - .sdev_attrs = ata_common_sdev_attrs, 926 + .sdev_groups = ata_common_sdev_groups, 927 927 .can_queue = ATA_DEF_QUEUE, 928 928 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 929 929 };
+1 -1
drivers/ata/sata_mv.c
··· 670 670 .can_queue = MV_MAX_Q_DEPTH - 1, 671 671 .sg_tablesize = MV_MAX_SG_CT / 2, 672 672 .dma_boundary = MV_DMA_BOUNDARY, 673 - .sdev_attrs = ata_ncq_sdev_attrs, 673 + .sdev_groups = ata_ncq_sdev_groups, 674 674 .change_queue_depth = ata_scsi_change_queue_depth, 675 675 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 676 676 .slave_configure = ata_scsi_slave_config
+2 -2
drivers/ata/sata_nv.c
··· 380 380 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, 381 381 .dma_boundary = NV_ADMA_DMA_BOUNDARY, 382 382 .slave_configure = nv_adma_slave_config, 383 - .sdev_attrs = ata_ncq_sdev_attrs, 383 + .sdev_groups = ata_ncq_sdev_groups, 384 384 .change_queue_depth = ata_scsi_change_queue_depth, 385 385 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 386 386 }; ··· 391 391 .sg_tablesize = LIBATA_MAX_PRD, 392 392 .dma_boundary = ATA_DMA_BOUNDARY, 393 393 .slave_configure = nv_swncq_slave_config, 394 - .sdev_attrs = ata_ncq_sdev_attrs, 394 + .sdev_groups = ata_ncq_sdev_groups, 395 395 .change_queue_depth = ata_scsi_change_queue_depth, 396 396 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 397 397 };
+1 -1
drivers/ata/sata_sil24.c
··· 379 379 .sg_tablesize = SIL24_MAX_SGE, 380 380 .dma_boundary = ATA_DMA_BOUNDARY, 381 381 .tag_alloc_policy = BLK_TAG_ALLOC_FIFO, 382 - .sdev_attrs = ata_ncq_sdev_attrs, 382 + .sdev_groups = ata_ncq_sdev_groups, 383 383 .change_queue_depth = ata_scsi_change_queue_depth, 384 384 .slave_configure = ata_scsi_slave_config 385 385 };
+6 -4
drivers/firewire/sbp2.c
··· 1375 1375 sbp2_unmap_scatterlist(device->card->device, orb); 1376 1376 1377 1377 orb->cmd->result = result; 1378 - orb->cmd->scsi_done(orb->cmd); 1378 + scsi_done(orb->cmd); 1379 1379 } 1380 1380 1381 1381 static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, ··· 1578 1578 1579 1579 static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL); 1580 1580 1581 - static struct device_attribute *sbp2_scsi_sysfs_attrs[] = { 1582 - &dev_attr_ieee1394_id, 1581 + static struct attribute *sbp2_scsi_sysfs_attrs[] = { 1582 + &dev_attr_ieee1394_id.attr, 1583 1583 NULL 1584 1584 }; 1585 + 1586 + ATTRIBUTE_GROUPS(sbp2_scsi_sysfs); 1585 1587 1586 1588 static struct scsi_host_template scsi_driver_template = { 1587 1589 .module = THIS_MODULE, ··· 1597 1595 .sg_tablesize = SG_ALL, 1598 1596 .max_segment_size = SBP2_MAX_SEG_SIZE, 1599 1597 .can_queue = 1, 1600 - .sdev_attrs = sbp2_scsi_sysfs_attrs, 1598 + .sdev_groups = sbp2_scsi_sysfs_groups, 1601 1599 }; 1602 1600 1603 1601 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+34 -25
drivers/infiniband/ulp/srp/ib_srp.c
··· 1026 1026 */ 1027 1027 static void srp_del_scsi_host_attr(struct Scsi_Host *shost) 1028 1028 { 1029 - struct device_attribute **attr; 1029 + const struct attribute_group **g; 1030 + struct attribute **attr; 1030 1031 1031 - for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) 1032 - device_remove_file(&shost->shost_dev, *attr); 1032 + for (g = shost->hostt->shost_groups; *g; ++g) { 1033 + for (attr = (*g)->attrs; *attr; ++attr) { 1034 + struct device_attribute *dev_attr = 1035 + container_of(*attr, typeof(*dev_attr), attr); 1036 + 1037 + device_remove_file(&shost->shost_dev, dev_attr); 1038 + } 1039 + } 1033 1040 } 1034 1041 1035 1042 static void srp_remove_target(struct srp_target_port *target) ··· 1273 1266 if (scmnd) { 1274 1267 srp_free_req(ch, req, scmnd, 0); 1275 1268 scmnd->result = result; 1276 - scmnd->scsi_done(scmnd); 1269 + scsi_done(scmnd); 1277 1270 } 1278 1271 } 1279 1272 ··· 1994 1987 srp_free_req(ch, req, scmnd, 1995 1988 be32_to_cpu(rsp->req_lim_delta)); 1996 1989 1997 - scmnd->scsi_done(scmnd); 1990 + scsi_done(scmnd); 1998 1991 } 1999 1992 } 2000 1993 ··· 2246 2239 2247 2240 err: 2248 2241 if (scmnd->result) { 2249 - scmnd->scsi_done(scmnd); 2242 + scsi_done(scmnd); 2250 2243 ret = 0; 2251 2244 } else { 2252 2245 ret = SCSI_MLQUEUE_HOST_BUSY; ··· 2818 2811 if (ret == SUCCESS) { 2819 2812 srp_free_req(ch, req, scmnd, 0); 2820 2813 scmnd->result = DID_ABORT << 16; 2821 - scmnd->scsi_done(scmnd); 2814 + scsi_done(scmnd); 2822 2815 } 2823 2816 2824 2817 return ret; ··· 3057 3050 3058 3051 static DEVICE_ATTR_RO(allow_ext_sg); 3059 3052 3060 - static struct device_attribute *srp_host_attrs[] = { 3061 - &dev_attr_id_ext, 3062 - &dev_attr_ioc_guid, 3063 - &dev_attr_service_id, 3064 - &dev_attr_pkey, 3065 - &dev_attr_sgid, 3066 - &dev_attr_dgid, 3067 - &dev_attr_orig_dgid, 3068 - &dev_attr_req_lim, 3069 - &dev_attr_zero_req_lim, 3070 - &dev_attr_local_ib_port, 3071 - &dev_attr_local_ib_device, 3072 - &dev_attr_ch_count, 3073 - &dev_attr_comp_vector, 3074 - &dev_attr_tl_retry_count, 3075 - &dev_attr_cmd_sg_entries, 3076 - &dev_attr_allow_ext_sg, 3053 + static struct attribute *srp_host_attrs[] = { 3054 + &dev_attr_id_ext.attr, 3055 + &dev_attr_ioc_guid.attr, 3056 + &dev_attr_service_id.attr, 3057 + &dev_attr_pkey.attr, 3058 + &dev_attr_sgid.attr, 3059 + &dev_attr_dgid.attr, 3060 + &dev_attr_orig_dgid.attr, 3061 + &dev_attr_req_lim.attr, 3062 + &dev_attr_zero_req_lim.attr, 3063 + &dev_attr_local_ib_port.attr, 3064 + &dev_attr_local_ib_device.attr, 3065 + &dev_attr_ch_count.attr, 3066 + &dev_attr_comp_vector.attr, 3067 + &dev_attr_tl_retry_count.attr, 3068 + &dev_attr_cmd_sg_entries.attr, 3069 + &dev_attr_allow_ext_sg.attr, 3077 3070 NULL 3078 3071 }; 3072 + 3073 + ATTRIBUTE_GROUPS(srp_host); 3079 3074 3080 3075 static struct scsi_host_template srp_template = { 3081 3076 .module = THIS_MODULE, ··· 3099 3090 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, 3100 3091 .this_id = -1, 3101 3092 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, 3102 - .shost_attrs = srp_host_attrs, 3093 + .shost_groups = srp_host_groups, 3103 3094 .track_queue_depth = 1, 3104 3095 .cmd_size = sizeof(struct srp_request), 3105 3096 };
+4 -34
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 3705 3705 NULL, 3706 3706 }; 3707 3707 3708 - static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page) 3708 + static int srpt_enable_tpg(struct se_portal_group *se_tpg, bool enable) 3709 3709 { 3710 - struct se_portal_group *se_tpg = to_tpg(item); 3711 3710 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3712 - 3713 - return sysfs_emit(page, "%d\n", sport->enabled); 3714 - } 3715 - 3716 - static ssize_t srpt_tpg_enable_store(struct config_item *item, 3717 - const char *page, size_t count) 3718 - { 3719 - struct se_portal_group *se_tpg = to_tpg(item); 3720 - struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3721 - unsigned long tmp; 3722 - int ret; 3723 - 3724 - ret = kstrtoul(page, 0, &tmp); 3725 - if (ret < 0) { 3726 - pr_err("Unable to extract srpt_tpg_store_enable\n"); 3727 - return -EINVAL; 3728 - } 3729 - 3730 - if ((tmp != 0) && (tmp != 1)) { 3731 - pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp); 3732 - return -EINVAL; 3733 - } 3734 3711 3735 3712 mutex_lock(&sport->mutex); 3736 - srpt_set_enabled(sport, tmp); 3713 + srpt_set_enabled(sport, enable); 3737 3714 mutex_unlock(&sport->mutex); 3738 3715 3739 - return count; 3716 + return 0; 3740 3717 } 3741 - 3742 - CONFIGFS_ATTR(srpt_tpg_, enable); 3743 - 3744 - static struct configfs_attribute *srpt_tpg_attrs[] = { 3745 - &srpt_tpg_attr_enable, 3746 - NULL, 3747 - }; 3748 3718 3749 3719 /** 3750 3720 * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg ··· 3826 3856 .fabric_make_wwn = srpt_make_tport, 3827 3857 .fabric_drop_wwn = srpt_drop_tport, 3828 3858 .fabric_make_tpg = srpt_make_tpg, 3859 + .fabric_enable_tpg = srpt_enable_tpg, 3829 3860 .fabric_drop_tpg = srpt_drop_tpg, 3830 3861 .fabric_init_nodeacl = srpt_init_nodeacl, 3831 3862 3832 3863 .tfc_discovery_attrs = srpt_da_attrs, 3833 3864 .tfc_wwn_attrs = srpt_wwn_attrs, 3834 - .tfc_tpg_base_attrs = srpt_tpg_attrs, 3835 3865 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, 3836 3866 }; 3837 3867
+4 -4
drivers/message/fusion/mptfc.c
··· 129 129 .sg_tablesize = MPT_SCSI_SG_DEPTH, 130 130 .max_sectors = 8192, 131 131 .cmd_per_lun = 7, 132 - .shost_attrs = mptscsih_host_attrs, 132 + .shost_groups = mptscsih_host_attr_groups, 133 133 }; 134 134 135 135 /**************************************************************************** ··· 649 649 650 650 if (!vdevice || !vdevice->vtarget) { 651 651 SCpnt->result = DID_NO_CONNECT << 16; 652 - SCpnt->scsi_done(SCpnt); 652 + scsi_done(SCpnt); 653 653 return 0; 654 654 } 655 655 656 656 err = fc_remote_port_chkready(rport); 657 657 if (unlikely(err)) { 658 658 SCpnt->result = err; 659 - SCpnt->scsi_done(SCpnt); 659 + scsi_done(SCpnt); 660 660 return 0; 661 661 } 662 662 ··· 664 664 ri = *((struct mptfc_rport_info **)rport->dd_data); 665 665 if (unlikely(!ri)) { 666 666 SCpnt->result = DID_IMM_RETRY << 16; 667 - SCpnt->scsi_done(SCpnt); 667 + scsi_done(SCpnt); 668 668 return 0; 669 669 } 670 670
+2 -2
drivers/message/fusion/mptsas.c
··· 1929 1929 1930 1930 if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { 1931 1931 SCpnt->result = DID_NO_CONNECT << 16; 1932 - SCpnt->scsi_done(SCpnt); 1932 + scsi_done(SCpnt); 1933 1933 return 0; 1934 1934 } 1935 1935 ··· 2020 2020 .sg_tablesize = MPT_SCSI_SG_DEPTH, 2021 2021 .max_sectors = 8192, 2022 2022 .cmd_per_lun = 7, 2023 - .shost_attrs = mptscsih_host_attrs, 2023 + .shost_groups = mptscsih_host_attr_groups, 2024 2024 .no_write_same = 1, 2025 2025 }; 2026 2026
+27 -19
drivers/message/fusion/mptscsih.c
··· 1009 1009 /* Unmap the DMA buffers, if any. */ 1010 1010 scsi_dma_unmap(sc); 1011 1011 1012 - sc->scsi_done(sc); /* Issue the command callback */ 1012 + scsi_done(sc); /* Issue the command callback */ 1013 1013 1014 1014 /* Free Chain buffers */ 1015 1015 mptscsih_freeChainBuffers(ioc, req_idx); ··· 1054 1054 dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT 1055 1055 "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, " 1056 1056 "idx=%x\n", ioc->name, channel, id, sc, mf, ii)); 1057 - sc->scsi_done(sc); 1057 + scsi_done(sc); 1058 1058 } 1059 1059 } 1060 1060 EXPORT_SYMBOL(mptscsih_flush_running_cmds); ··· 1118 1118 "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, 1119 1119 vdevice->vtarget->channel, vdevice->vtarget->id, 1120 1120 sc, mf, ii)); 1121 - sc->scsi_done(sc); 1121 + scsi_done(sc); 1122 1122 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1123 1123 } 1124 1124 } ··· 1693 1693 */ 1694 1694 if ((hd = shost_priv(SCpnt->device->host)) == NULL) { 1695 1695 SCpnt->result = DID_RESET << 16; 1696 - SCpnt->scsi_done(SCpnt); 1696 + scsi_done(SCpnt); 1697 1697 printk(KERN_ERR MYNAM ": task abort: " 1698 1698 "can't locate host! (sc=%p)\n", SCpnt); 1699 1699 return FAILED; ··· 1710 1710 "task abort: device has been deleted (sc=%p)\n", 1711 1711 ioc->name, SCpnt)); 1712 1712 SCpnt->result = DID_NO_CONNECT << 16; 1713 - SCpnt->scsi_done(SCpnt); 1713 + scsi_done(SCpnt); 1714 1714 retval = SUCCESS; 1715 1715 goto out; 1716 1716 } ··· 3218 3218 static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR, 3219 3219 mptscsih_debug_level_show, mptscsih_debug_level_store); 3220 3220 3221 - struct device_attribute *mptscsih_host_attrs[] = { 3222 - &dev_attr_version_fw, 3223 - &dev_attr_version_bios, 3224 - &dev_attr_version_mpi, 3225 - &dev_attr_version_product, 3226 - &dev_attr_version_nvdata_persistent, 3227 - &dev_attr_version_nvdata_default, 3228 - &dev_attr_board_name, 3229 - &dev_attr_board_assembly, 3230 - &dev_attr_board_tracer, 3231 - &dev_attr_io_delay, 3232 - &dev_attr_device_delay, 3233 - &dev_attr_debug_level, 3221 + static struct attribute *mptscsih_host_attrs[] = { 3222 + &dev_attr_version_fw.attr, 3223 + &dev_attr_version_bios.attr, 3224 + &dev_attr_version_mpi.attr, 3225 + &dev_attr_version_product.attr, 3226 + &dev_attr_version_nvdata_persistent.attr, 3227 + &dev_attr_version_nvdata_default.attr, 3228 + &dev_attr_board_name.attr, 3229 + &dev_attr_board_assembly.attr, 3230 + &dev_attr_board_tracer.attr, 3231 + &dev_attr_io_delay.attr, 3232 + &dev_attr_device_delay.attr, 3233 + &dev_attr_debug_level.attr, 3234 3234 NULL, 3235 3235 }; 3236 3236 3237 - EXPORT_SYMBOL(mptscsih_host_attrs); 3237 + static const struct attribute_group mptscsih_host_attr_group = { 3238 + .attrs = mptscsih_host_attrs 3239 + }; 3240 + 3241 + const struct attribute_group *mptscsih_host_attr_groups[] = { 3242 + &mptscsih_host_attr_group, 3243 + NULL 3244 + }; 3245 + EXPORT_SYMBOL(mptscsih_host_attr_groups); 3238 3246 3239 3247 EXPORT_SYMBOL(mptscsih_remove); 3240 3248 EXPORT_SYMBOL(mptscsih_shutdown);
+1 -1
drivers/message/fusion/mptscsih.h
··· 131 131 extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); 132 132 extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); 133 133 extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); 134 - extern struct device_attribute *mptscsih_host_attrs[]; 134 + extern const struct attribute_group *mptscsih_host_attr_groups[]; 135 135 extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); 136 136 extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); 137 137 extern void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd);
+3 -3
drivers/message/fusion/mptspi.c
··· 782 782 783 783 if (!vdevice || !vdevice->vtarget) { 784 784 SCpnt->result = DID_NO_CONNECT << 16; 785 - SCpnt->scsi_done(SCpnt); 785 + scsi_done(SCpnt); 786 786 return 0; 787 787 } 788 788 789 789 if (SCpnt->device->channel == 1 && 790 790 mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) { 791 791 SCpnt->result = DID_NO_CONNECT << 16; 792 - SCpnt->scsi_done(SCpnt); 792 + scsi_done(SCpnt); 793 793 return 0; 794 794 } 795 795 ··· 843 843 .sg_tablesize = MPT_SCSI_SG_DEPTH, 844 844 .max_sectors = 8192, 845 845 .cmd_per_lun = 7, 846 - .shost_attrs = mptscsih_host_attrs, 846 + .shost_groups = mptscsih_host_attr_groups, 847 847 }; 848 848 849 849 static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
+2 -2
drivers/s390/scsi/zfcp_ext.h
··· 184 184 extern const struct attribute_group *zfcp_unit_attr_groups[]; 185 185 extern const struct attribute_group *zfcp_port_attr_groups[]; 186 186 extern struct mutex zfcp_sysfs_port_units_mutex; 187 - extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 188 - extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 187 + extern const struct attribute_group *zfcp_sysfs_sdev_attr_groups[]; 188 + extern const struct attribute_group *zfcp_sysfs_shost_attr_groups[]; 189 189 bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port); 190 190 191 191 /* zfcp_unit.c */
+1 -1
drivers/s390/scsi/zfcp_fsf.c
··· 2501 2501 zfcp_dbf_scsi_result(scpnt, req); 2502 2502 2503 2503 scpnt->host_scribble = NULL; 2504 - (scpnt->scsi_done) (scpnt); 2504 + scsi_done(scpnt); 2505 2505 /* 2506 2506 * We must hold this lock until scsi_done has been called. 2507 2507 * Otherwise we may call scsi_done after abort regarding this
+4 -4
drivers/s390/scsi/zfcp_scsi.c
··· 60 60 { 61 61 set_host_byte(scpnt, result); 62 62 zfcp_dbf_scsi_fail_send(scpnt); 63 - scpnt->scsi_done(scpnt); 63 + scsi_done(scpnt); 64 64 } 65 65 66 66 static ··· 78 78 if (unlikely(scsi_result)) { 79 79 scpnt->result = scsi_result; 80 80 zfcp_dbf_scsi_fail_send(scpnt); 81 - scpnt->scsi_done(scpnt); 81 + scsi_done(scpnt); 82 82 return 0; 83 83 } 84 84 ··· 444 444 /* report size limit per scatter-gather segment */ 445 445 .max_segment_size = ZFCP_QDIO_SBALE_LEN, 446 446 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 447 - .shost_attrs = zfcp_sysfs_shost_attrs, 448 - .sdev_attrs = zfcp_sysfs_sdev_attrs, 447 + .shost_groups = zfcp_sysfs_shost_attr_groups, 448 + .sdev_groups = zfcp_sysfs_sdev_attr_groups, 449 449 .track_queue_depth = 1, 450 450 .supported_mode = MODE_INITIATOR, 451 451 };
+35 -17
drivers/s390/scsi/zfcp_sysfs.c
··· 672 672 ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n", 673 673 atomic_read(&zfcp_sdev->status)); 674 674 675 - struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 676 - &dev_attr_fcp_lun, 677 - &dev_attr_wwpn, 678 - &dev_attr_hba_id, 679 - &dev_attr_read_latency, 680 - &dev_attr_write_latency, 681 - &dev_attr_cmd_latency, 682 - &dev_attr_zfcp_access_denied, 683 - &dev_attr_zfcp_failed, 684 - &dev_attr_zfcp_in_recovery, 685 - &dev_attr_zfcp_status, 675 + struct attribute *zfcp_sdev_attrs[] = { 676 + &dev_attr_fcp_lun.attr, 677 + &dev_attr_wwpn.attr, 678 + &dev_attr_hba_id.attr, 679 + &dev_attr_read_latency.attr, 680 + &dev_attr_write_latency.attr, 681 + &dev_attr_cmd_latency.attr, 682 + &dev_attr_zfcp_access_denied.attr, 683 + &dev_attr_zfcp_failed.attr, 684 + &dev_attr_zfcp_in_recovery.attr, 685 + &dev_attr_zfcp_status.attr, 686 + NULL 687 + }; 688 + 689 + static const struct attribute_group zfcp_sysfs_sdev_attr_group = { 690 + .attrs = zfcp_sdev_attrs 691 + }; 692 + 693 + const struct attribute_group *zfcp_sysfs_sdev_attr_groups[] = { 694 + &zfcp_sysfs_sdev_attr_group, 686 695 NULL 687 696 }; 688 697 ··· 792 783 } 793 784 static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); 794 785 795 - struct device_attribute *zfcp_sysfs_shost_attrs[] = { 796 - &dev_attr_utilization, 797 - &dev_attr_requests, 798 - &dev_attr_megabytes, 799 - &dev_attr_seconds_active, 800 - &dev_attr_queue_full, 786 + static struct attribute *zfcp_sysfs_shost_attrs[] = { 787 + &dev_attr_utilization.attr, 788 + &dev_attr_requests.attr, 789 + &dev_attr_megabytes.attr, 790 + &dev_attr_seconds_active.attr, 791 + &dev_attr_queue_full.attr, 792 + NULL 793 + }; 794 + 795 + static const struct attribute_group zfcp_sysfs_shost_attr_group = { 796 + .attrs = zfcp_sysfs_shost_attrs 797 + }; 798 + 799 + const struct attribute_group *zfcp_sysfs_shost_attr_groups[] = { 800 + &zfcp_sysfs_shost_attr_group, 801 801 NULL 802 802 }; 803 803
+9 -9
drivers/scsi/3w-9xxx.c
··· 197 197 }; 198 198 199 199 /* Host attributes initializer */ 200 - static struct device_attribute *twa_host_attrs[] = { 201 - &twa_host_stats_attr, 200 + static struct attribute *twa_host_attrs[] = { 201 + &twa_host_stats_attr.attr, 202 202 NULL, 203 203 }; 204 + 205 + ATTRIBUTE_GROUPS(twa_host); 204 206 205 207 /* File operations struct for character device */ 206 208 static const struct file_operations twa_fops = { ··· 1354 1352 /* Now complete the io */ 1355 1353 if (twa_command_mapped(cmd)) 1356 1354 scsi_dma_unmap(cmd); 1357 - cmd->scsi_done(cmd); 1355 + scsi_done(cmd); 1358 1356 tw_dev->state[request_id] = TW_S_COMPLETED; 1359 1357 twa_free_request_id(tw_dev, request_id); 1360 1358 tw_dev->posted_request_count--; ··· 1598 1596 cmd->result = (DID_RESET << 16); 1599 1597 if (twa_command_mapped(cmd)) 1600 1598 scsi_dma_unmap(cmd); 1601 - cmd->scsi_done(cmd); 1599 + scsi_done(cmd); 1602 1600 } 1603 1601 } 1604 1602 } ··· 1746 1744 } /* End twa_scsi_eh_reset() */ 1747 1745 1748 1746 /* This is the main scsi queue function to handle scsi opcodes */ 1749 - static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1747 + static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1750 1748 { 1749 + void (*done)(struct scsi_cmnd *) = scsi_done; 1751 1750 int request_id, retval; 1752 1751 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1753 1752 ··· 1765 1762 retval = 0; 1766 1763 goto out; 1767 1764 } 1768 - 1769 - /* Save done function into scsi_cmnd struct */ 1770 - SCpnt->scsi_done = done; 1771 1765 1772 1766 /* Get a free request id */ 1773 1767 twa_get_request_id(tw_dev, &request_id); ··· 1990 1990 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, 1991 1991 .max_sectors = TW_MAX_SECTORS, 1992 1992 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1993 - .shost_attrs = twa_host_attrs, 1993 + .shost_groups = twa_host_groups, 1994 1994 .emulated = 1, 1995 1995 .no_write_same = 1, 1996 1996 };
+9 -9
drivers/scsi/3w-sas.c
··· 198 198 }; 199 199 200 200 /* Host attributes initializer */ 201 - static struct device_attribute *twl_host_attrs[] = { 202 - &twl_host_stats_attr, 201 + static struct attribute *twl_host_attrs[] = { 202 + &twl_host_stats_attr.attr, 203 203 NULL, 204 204 }; 205 + 206 + ATTRIBUTE_GROUPS(twl_host); 205 207 206 208 /* This function will look up an AEN severity string */ 207 209 static char *twl_aen_severity_lookup(unsigned char severity_code) ··· 1218 1216 1219 1217 /* Now complete the io */ 1220 1218 scsi_dma_unmap(cmd); 1221 - cmd->scsi_done(cmd); 1219 + scsi_done(cmd); 1222 1220 tw_dev->state[request_id] = TW_S_COMPLETED; 1223 1221 twl_free_request_id(tw_dev, request_id); 1224 1222 tw_dev->posted_request_count--; ··· 1371 1369 if (cmd) { 1372 1370 cmd->result = (DID_RESET << 16); 1373 1371 scsi_dma_unmap(cmd); 1374 - cmd->scsi_done(cmd); 1372 + scsi_done(cmd); 1375 1373 } 1376 1374 } 1377 1375 } ··· 1452 1450 } /* End twl_scsi_eh_reset() */ 1453 1451 1454 1452 /* This is the main scsi queue function to handle scsi opcodes */ 1455 - static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1453 + static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1456 1454 { 1455 + void (*done)(struct scsi_cmnd *) = scsi_done; 1457 1456 int request_id, retval; 1458 1457 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; 1459 1458 ··· 1463 1460 retval = SCSI_MLQUEUE_HOST_BUSY; 1464 1461 goto out; 1465 1462 } 1466 - 1467 - /* Save done function into scsi_cmnd struct */ 1468 - SCpnt->scsi_done = done; 1469 1463 1470 1464 /* Get a free request id */ 1471 1465 twl_get_request_id(tw_dev, &request_id); ··· 1544 1544 .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH, 1545 1545 .max_sectors = TW_MAX_SECTORS, 1546 1546 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 1547 - .shost_attrs = twl_host_attrs, 1547 + .shost_groups = twl_host_groups, 1548 1548 .emulated = 1, 1549 1549 .no_write_same = 1, 1550 1550 };
+13 -13
drivers/scsi/3w-xxxx.c
··· 532 532 }; 533 533 534 534 /* Host attributes initializer */ 535 - static struct device_attribute *tw_host_attrs[] = { 536 - &tw_host_stats_attr, 535 + static struct attribute *tw_host_attrs[] = { 536 + &tw_host_stats_attr.attr, 537 537 NULL, 538 538 }; 539 + 540 + ATTRIBUTE_GROUPS(tw_host); 539 541 540 542 /* This function will read the aen queue from the isr */ 541 543 static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) ··· 1162 1160 tw_dev->state[request_id] = TW_S_COMPLETED; 1163 1161 tw_state_request_finish(tw_dev, request_id); 1164 1162 tw_dev->srb[request_id]->result = (DID_OK << 16); 1165 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 1163 + scsi_done(tw_dev->srb[request_id]); 1166 1164 } 1167 1165 command_packet->byte8.param.sgl[0].address = param_value; 1168 1166 command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector); ··· 1307 1305 if (srb != NULL) { 1308 1306 srb->result = (DID_RESET << 16); 1309 1307 scsi_dma_unmap(srb); 1310 - srb->scsi_done(srb); 1308 + scsi_done(srb); 1311 1309 } 1312 1310 } 1313 1311 } ··· 1507 1505 tw_dev->state[request_id] = TW_S_COMPLETED; 1508 1506 tw_state_request_finish(tw_dev, request_id); 1509 1507 tw_dev->srb[request_id]->result = (DID_OK << 16); 1510 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 1508 + scsi_done(tw_dev->srb[request_id]); 1511 1509 return 0; 1512 1510 } 1513 1511 ··· 1798 1796 1799 1797 /* If we got a request_sense, we probably want a reset, return error */ 1800 1798 tw_dev->srb[request_id]->result = (DID_ERROR << 16); 1801 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 1799 + scsi_done(tw_dev->srb[request_id]); 1802 1800 1803 1801 return 0; 1804 1802 } /* End tw_scsiop_request_sense() */ ··· 1920 1918 } /* End tw_scsiop_test_unit_ready_complete() */ 1921 1919 1922 1920 /* This is the main scsi queue function to handle scsi opcodes */ 1923 - static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 1921 + static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt) 1924 1922 { 1923 + void (*done)(struct scsi_cmnd *) = scsi_done; 1925 1924 unsigned char *command = SCpnt->cmnd; 1926 1925 int request_id = 0; 1927 1926 int retval = 1; ··· 1931 1928 /* If we are resetting due to timed out ioctl, report as busy */ 1932 1929 if (test_bit(TW_IN_RESET, &tw_dev->flags)) 1933 1930 return SCSI_MLQUEUE_HOST_BUSY; 1934 - 1935 - /* Save done function into struct scsi_cmnd */ 1936 - SCpnt->scsi_done = done; 1937 1931 1938 1932 /* Queue the command and get a request id */ 1939 1933 tw_state_request_start(tw_dev, &request_id); ··· 2165 2165 /* Now complete the io */ 2166 2166 if ((error != TW_ISR_DONT_COMPLETE)) { 2167 2167 scsi_dma_unmap(tw_dev->srb[request_id]); 2168 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); 2168 + scsi_done(tw_dev->srb[request_id]); 2169 2169 tw_dev->state[request_id] = TW_S_COMPLETED; 2170 2170 tw_state_request_finish(tw_dev, request_id); 2171 2171 tw_dev->posted_request_count--; ··· 2242 2242 .sg_tablesize = TW_MAX_SGL_LENGTH, 2243 2243 .max_sectors = TW_MAX_SECTORS, 2244 2244 .cmd_per_lun = TW_MAX_CMDS_PER_LUN, 2245 - .shost_attrs = tw_host_attrs, 2245 + .shost_groups = tw_host_groups, 2246 2246 .emulated = 1, 2247 2247 .no_write_same = 1, 2248 2248 }; ··· 2252 2252 { 2253 2253 struct Scsi_Host *host = NULL; 2254 2254 TW_Device_Extension *tw_dev; 2255 - int retval = -ENODEV; 2255 + int retval; 2256 2256 2257 2257 retval = pci_enable_device(pdev); 2258 2258 if (retval) {
+10 -10
drivers/scsi/53c700.c
··· 163 163 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 164 164 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); 165 165 166 - STATIC struct device_attribute *NCR_700_dev_attrs[]; 166 + STATIC const struct attribute_group *NCR_700_dev_groups[]; 167 167 168 168 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL; 169 169 ··· 300 300 static int banner = 0; 301 301 int j; 302 302 303 - if(tpnt->sdev_attrs == NULL) 304 - tpnt->sdev_attrs = NCR_700_dev_attrs; 303 + if (tpnt->sdev_groups == NULL) 304 + tpnt->sdev_groups = NCR_700_dev_groups; 305 305 306 306 memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL); 307 307 if (!memory) { ··· 634 634 635 635 SCp->host_scribble = NULL; 636 636 SCp->result = result; 637 - SCp->scsi_done(SCp); 637 + scsi_done(SCp); 638 638 } else { 639 639 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n"); 640 640 } ··· 1571 1571 * deadlock on the 1572 1572 * hostdata->state_lock */ 1573 1573 SCp->result = DID_RESET << 16; 1574 - SCp->scsi_done(SCp); 1574 + scsi_done(SCp); 1575 1575 } 1576 1576 mdelay(25); 1577 1577 NCR_700_chip_setup(host); ··· 1751 1751 return IRQ_RETVAL(handled); 1752 1752 } 1753 1753 1754 - static int 1755 - NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) 1754 + static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp) 1756 1755 { 1757 1756 struct NCR_700_Host_Parameters *hostdata = 1758 1757 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; ··· 1791 1792 1792 1793 slot->cmnd = SCp; 1793 1794 1794 - SCp->scsi_done = done; 1795 1795 SCp->host_scribble = (unsigned char *)slot; 1796 1796 SCp->SCp.ptr = NULL; 1797 1797 SCp->SCp.buffer = NULL; ··· 2085 2087 .show = NCR_700_show_active_tags, 2086 2088 }; 2087 2089 2088 - STATIC struct device_attribute *NCR_700_dev_attrs[] = { 2089 - &NCR_700_active_tags_attr, 2090 + STATIC struct attribute *NCR_700_dev_attrs[] = { 2091 + &NCR_700_active_tags_attr.attr, 2090 2092 NULL, 2091 2093 }; 2094 + 2095 + ATTRIBUTE_GROUPS(NCR_700_dev); 2092 2096 2093 2097 EXPORT_SYMBOL(NCR_700_detect); 2094 2098 EXPORT_SYMBOL(NCR_700_release);
+6 -7
drivers/scsi/BusLogic.c
··· 2624 2624 command->reset_chain; 2625 2625 command->reset_chain = NULL; 2626 2626 command->result = DID_RESET << 16; 2627 - command->scsi_done(command); 2627 + scsi_done(command); 2628 2628 command = nxt_cmd; 2629 2629 } 2630 2630 #endif ··· 2641 2641 blogic_dealloc_ccb(ccb, 1); 2642 2642 adapter->active_cmds[tgt_id]--; 2643 2643 command->result = DID_RESET << 16; 2644 - command->scsi_done(command); 2644 + scsi_done(command); 2645 2645 } 2646 2646 adapter->bdr_pend[tgt_id] = NULL; 2647 2647 } else { ··· 2713 2713 /* 2714 2714 Call the SCSI Command Completion Routine. 2715 2715 */ 2716 - command->scsi_done(command); 2716 + scsi_done(command); 2717 2717 } 2718 2718 } 2719 2719 adapter->processing_ccbs = false; ··· 2866 2866 Outgoing Mailbox for execution by the associated Host Adapter. 2867 2867 */ 2868 2868 2869 - static int blogic_qcmd_lck(struct scsi_cmnd *command, 2870 - void (*comp_cb) (struct scsi_cmnd *)) 2869 + static int blogic_qcmd_lck(struct scsi_cmnd *command) 2871 2870 { 2871 + void (*comp_cb)(struct scsi_cmnd *) = scsi_done; 2872 2872 struct blogic_adapter *adapter = 2873 2873 (struct blogic_adapter *) command->device->host->hostdata; 2874 2874 struct blogic_tgt_flags *tgt_flags = ··· 3038 3038 return SCSI_MLQUEUE_HOST_BUSY; 3039 3039 } 3040 3040 ccb->sensedata = sense_buf; 3041 - command->scsi_done = comp_cb; 3042 3041 if (blogic_multimaster_type(adapter)) { 3043 3042 /* 3044 3043 Place the CCB in an Outgoing Mailbox. The higher levels ··· 3059 3060 blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter); 3060 3061 blogic_dealloc_ccb(ccb, 1); 3061 3062 command->result = DID_ERROR << 16; 3062 - command->scsi_done(command); 3063 + scsi_done(command); 3063 3064 } 3064 3065 } 3065 3066 } else {
+6 -6
drivers/scsi/NCR5380.c
··· 547 547 hostdata->sensing = NULL; 548 548 } 549 549 550 - cmd->scsi_done(cmd); 550 + scsi_done(cmd); 551 551 } 552 552 553 553 /** ··· 573 573 case WRITE_10: 574 574 shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n"); 575 575 cmd->result = (DID_ERROR << 16); 576 - cmd->scsi_done(cmd); 576 + scsi_done(cmd); 577 577 return 0; 578 578 } 579 579 #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ ··· 960 960 * hostdata->connected will be set to cmd. 961 961 * SELECT interrupt will be disabled. 962 962 * 963 - * If failed (no target) : cmd->scsi_done() will be called, and the 963 + * If failed (no target) : scsi_done() will be called, and the 964 964 * cmd->result host byte set to DID_BAD_TARGET. 965 965 */ 966 966 ··· 2262 2262 dsprintk(NDEBUG_ABORT, instance, 2263 2263 "abort: removed %p from issue queue\n", cmd); 2264 2264 cmd->result = DID_ABORT << 16; 2265 - cmd->scsi_done(cmd); /* No tag or busy flag to worry about */ 2265 + scsi_done(cmd); /* No tag or busy flag to worry about */ 2266 2266 goto out; 2267 2267 } 2268 2268 ··· 2357 2357 list_for_each_entry(ncmd, &hostdata->autosense, list) { 2358 2358 struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); 2359 2359 2360 - cmd->scsi_done(cmd); 2360 + scsi_done(cmd); 2361 2361 } 2362 2362 INIT_LIST_HEAD(&hostdata->autosense); 2363 2363 ··· 2400 2400 struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd); 2401 2401 2402 2402 scmd->result = DID_RESET << 16; 2403 - scmd->scsi_done(scmd); 2403 + scsi_done(scmd); 2404 2404 } 2405 2405 INIT_LIST_HEAD(&hostdata->unissued); 2406 2406
+2 -3
drivers/scsi/a100u2w.c
··· 911 911 * queue the command down to the controller 912 912 */ 913 913 914 - static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) 914 + static int inia100_queue_lck(struct scsi_cmnd *cmd) 915 915 { 916 916 struct orc_scb *scb; 917 917 struct orc_host *host; /* Point to Host adapter control block */ 918 918 919 919 host = (struct orc_host *) cmd->device->host->hostdata; 920 - cmd->scsi_done = done; 921 920 /* Get free SCSI control block */ 922 921 if ((scb = orc_alloc_scb(host)) == NULL) 923 922 return SCSI_MLQUEUE_HOST_BUSY; ··· 1041 1042 } 1042 1043 cmd->result = scb->tastat | (scb->hastat << 16); 1043 1044 scsi_dma_unmap(cmd); 1044 - cmd->scsi_done(cmd); /* Notify system DONE */ 1045 + scsi_done(cmd); /* Notify system DONE */ 1045 1046 orc_release_scb(host, scb); /* Release SCB for current channel */ 1046 1047 } 1047 1048
+32 -21
drivers/scsi/aacraid/aachba.c
··· 223 223 int sg_max, u64 sg_address); 224 224 static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, 225 225 int pages, int nseg, int nseg_new); 226 + static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd); 226 227 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); 227 228 static int aac_send_hba_fib(struct scsi_cmnd *scsicmd); 228 229 #ifdef AAC_DETAILED_STATUS_INFO ··· 333 332 struct fib *fibptr) { 334 333 struct scsi_device *device; 335 334 336 - if (unlikely(!scsicmd || !scsicmd->scsi_done)) { 335 + if (unlikely(!scsicmd)) { 337 336 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n")); 338 337 aac_fib_complete(fibptr); 339 338 return 0; ··· 518 517 return status; 519 518 } 520 519 520 + static void aac_scsi_done(struct scsi_cmnd *scmd) 521 + { 522 + if (scmd->device->request_queue) { 523 + /* SCSI command has been submitted by the SCSI mid-layer. */ 524 + scsi_done(scmd); 525 + } else { 526 + /* SCSI command has been submitted by aac_probe_container(). */ 527 + aac_probe_container_scsi_done(scmd); 528 + } 529 + } 530 + 521 531 static void get_container_name_callback(void *context, struct fib * fibptr) 522 532 { 523 533 struct aac_get_name_resp * get_name_reply; ··· 570 558 scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; 571 559 572 560 aac_fib_complete(fibptr); 573 - scsicmd->scsi_done(scsicmd); 561 + aac_scsi_done(scsicmd); 574 562 } 575 563 576 564 /* ··· 626 614 return aac_scsi_cmd(scsicmd); 627 615 628 616 scsicmd->result = DID_NO_CONNECT << 16; 629 - scsicmd->scsi_done(scsicmd); 617 + aac_scsi_done(scsicmd); 630 618 return 0; 631 619 } 632 620 ··· 816 804 817 805 int aac_probe_container(struct aac_dev *dev, int cid) 818 806 { 819 - struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL); 820 - struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL); 807 + struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL); 808 + struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL); 821 809 int status; 822 810 823 811 if (!scsicmd || !scsidev) { ··· 825 813 kfree(scsidev); 826 814 return -ENOMEM; 827 815 } 828 - scsicmd->scsi_done = aac_probe_container_scsi_done; 829 816 830 817 scsicmd->device = scsidev; 831 818 scsidev->sdev_state = 0; ··· 1105 1094 scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; 1106 1095 1107 1096 aac_fib_complete(fibptr); 1108 - scsicmd->scsi_done(scsicmd); 1097 + aac_scsi_done(scsicmd); 1109 1098 } 1110 1099 1111 1100 /* ··· 1208 1197 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1209 1198 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1210 1199 SCSI_SENSE_BUFFERSIZE)); 1211 - cmd->scsi_done(cmd); 1200 + aac_scsi_done(cmd); 1212 1201 return 1; 1213 1202 } 1214 1203 return 0; ··· 2403 2392 } 2404 2393 aac_fib_complete(fibptr); 2405 2394 2406 - scsicmd->scsi_done(scsicmd); 2395 + aac_scsi_done(scsicmd); 2407 2396 } 2408 2397 2409 2398 static int aac_read(struct scsi_cmnd * scsicmd) ··· 2474 2463 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2475 2464 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2476 2465 SCSI_SENSE_BUFFERSIZE)); 2477 - scsicmd->scsi_done(scsicmd); 2466 + aac_scsi_done(scsicmd); 2478 2467 return 0; 2479 2468 } 2480 2469 ··· 2500 2489 * For some reason, the Fib didn't queue, return QUEUE_FULL 2501 2490 */ 2502 2491 scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL; 2503 - scsicmd->scsi_done(scsicmd); 2492 + aac_scsi_done(scsicmd); 2504 2493 aac_fib_complete(cmd_fibcontext); 2505 2494 aac_fib_free(cmd_fibcontext); 2506 2495 return 0; ··· 2565 2554 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2566 2555 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2567 2556 SCSI_SENSE_BUFFERSIZE)); 2568 - scsicmd->scsi_done(scsicmd); 2557 + aac_scsi_done(scsicmd); 2569 2558 return 0; 2570 2559 } 2571 2560 ··· 2591 2580 * For some reason, the Fib didn't queue, return QUEUE_FULL 2592 2581 */ 2593 2582 scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL; 2594 - scsicmd->scsi_done(scsicmd); 2583 + aac_scsi_done(scsicmd); 2595 2584 2596 2585 aac_fib_complete(cmd_fibcontext); 2597 2586 aac_fib_free(cmd_fibcontext); ··· 2632 2621 2633 2622 aac_fib_complete(fibptr); 2634 2623 aac_fib_free(fibptr); 2635 - cmd->scsi_done(cmd); 2624 + aac_scsi_done(cmd); 2636 2625 } 2637 2626 2638 2627 static int aac_synchronize(struct scsi_cmnd *scsicmd) ··· 2699 2688 2700 2689 aac_fib_complete(fibptr); 2701 2690 aac_fib_free(fibptr); 2702 - scsicmd->scsi_done(scsicmd); 2691 + aac_scsi_done(scsicmd); 2703 2692 } 2704 2693 2705 2694 static int aac_start_stop(struct scsi_cmnd *scsicmd) ··· 2713 2702 if (!(aac->supplement_adapter_info.supported_options2 & 2714 2703 AAC_OPTION_POWER_MANAGEMENT)) { 2715 2704 scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD; 2716 - scsicmd->scsi_done(scsicmd); 2705 + aac_scsi_done(scsicmd); 2717 2706 return 0; 2718 2707 } 2719 2708 ··· 3248 3237 3249 3238 scsi_done_ret: 3250 3239 3251 - scsicmd->scsi_done(scsicmd); 3240 + aac_scsi_done(scsicmd); 3252 3241 return 0; 3253 3242 } 3254 3243 ··· 3557 3546 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 3558 3547 3559 3548 aac_fib_complete(fibptr); 3560 - scsicmd->scsi_done(scsicmd); 3549 + aac_scsi_done(scsicmd); 3561 3550 } 3562 3551 3563 3552 static void hba_resp_task_complete(struct aac_dev *dev, ··· 3697 3686 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) 3698 3687 scsicmd->SCp.sent_command = 1; 3699 3688 else 3700 - scsicmd->scsi_done(scsicmd); 3689 + aac_scsi_done(scsicmd); 3701 3690 } 3702 3691 3703 3692 /** ··· 3717 3706 if (scmd_id(scsicmd) >= dev->maximum_num_physicals || 3718 3707 scsicmd->device->lun > 7) { 3719 3708 scsicmd->result = DID_NO_CONNECT << 16; 3720 - scsicmd->scsi_done(scsicmd); 3709 + aac_scsi_done(scsicmd); 3721 3710 return 0; 3722 3711 } 3723 3712 ··· 3758 3747 if (scmd_id(scsicmd) >= dev->maximum_num_physicals || 3759 3748 scsicmd->device->lun > AAC_MAX_LUN - 1) { 3760 3749 scsicmd->result = DID_NO_CONNECT << 16; 3761 - scsicmd->scsi_done(scsicmd); 3750 + aac_scsi_done(scsicmd); 3762 3751 return 0; 3763 3752 } 3764 3753
+21 -17
drivers/scsi/aacraid/linit.c
··· 605 605 606 606 607 607 608 - static struct device_attribute *aac_dev_attrs[] = { 609 - &aac_raid_level_attr, 610 - &aac_unique_id_attr, 608 + static struct attribute *aac_dev_attrs[] = { 609 + &aac_raid_level_attr.attr, 610 + &aac_unique_id_attr.attr, 611 611 NULL, 612 612 }; 613 + 614 + ATTRIBUTE_GROUPS(aac_dev); 613 615 614 616 static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd, 615 617 void __user *arg) ··· 1444 1442 .show = aac_show_reset_adapter, 1445 1443 }; 1446 1444 1447 - static struct device_attribute *aac_attrs[] = { 1448 - &aac_model, 1449 - &aac_vendor, 1450 - &aac_flags, 1451 - &aac_kernel_version, 1452 - &aac_monitor_version, 1453 - &aac_bios_version, 1454 - &aac_lld_version, 1455 - &aac_serial_number, 1456 - &aac_max_channel, 1457 - &aac_max_id, 1458 - &aac_reset, 1445 + static struct attribute *aac_host_attrs[] = { 1446 + &aac_model.attr, 1447 + &aac_vendor.attr, 1448 + &aac_flags.attr, 1449 + &aac_kernel_version.attr, 1450 + &aac_monitor_version.attr, 1451 + &aac_bios_version.attr, 1452 + &aac_lld_version.attr, 1453 + &aac_serial_number.attr, 1454 + &aac_max_channel.attr, 1455 + &aac_max_id.attr, 1456 + &aac_reset.attr, 1459 1457 NULL 1460 1458 }; 1459 + 1460 + ATTRIBUTE_GROUPS(aac_host); 1461 1461 1462 1462 ssize_t aac_get_serial_number(struct device *device, char *buf) 1463 1463 { ··· 1487 1483 #endif 1488 1484 .queuecommand = aac_queuecommand, 1489 1485 .bios_param = aac_biosparm, 1490 - .shost_attrs = aac_attrs, 1486 + .shost_groups = aac_host_groups, 1491 1487 .slave_configure = aac_slave_configure, 1492 1488 .change_queue_depth = aac_change_queue_depth, 1493 - .sdev_attrs = aac_dev_attrs, 1489 + .sdev_groups = aac_dev_groups, 1494 1490 .eh_abort_handler = aac_eh_abort, 1495 1491 .eh_device_reset_handler = aac_eh_dev_reset, 1496 1492 .eh_target_reset_handler = aac_eh_target_reset,
+6 -8
drivers/scsi/advansys.c
··· 3308 3308 shost->host_no); 3309 3309 3310 3310 seq_printf(m, 3311 - " iop_base 0x%lx, cable_detect: %X, err_code %u\n", 3312 - (unsigned long)v->iop_base, 3311 + " iop_base 0x%p, cable_detect: %X, err_code %u\n", 3312 + v->iop_base, 3313 3313 AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT, 3314 3314 v->err_code); 3315 3315 ··· 3592 3592 { 3593 3593 scsi_dma_unmap(scp); 3594 3594 ASC_STATS(scp->device->host, done); 3595 - scp->scsi_done(scp); 3595 + scsi_done(scp); 3596 3596 } 3597 3597 3598 3598 static void AscSetBank(PortAddr iop_base, uchar bank) ··· 7477 7477 return ASC_ERROR; 7478 7478 } 7479 7479 7480 - asc_sg_head = kzalloc(sizeof(asc_scsi_q->sg_head) + 7481 - use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC); 7480 + asc_sg_head = kzalloc(struct_size(asc_sg_head, sg_list, use_sg), 7481 + GFP_ATOMIC); 7482 7482 if (!asc_sg_head) { 7483 7483 scsi_dma_unmap(scp); 7484 7484 set_host_byte(scp, DID_SOFT_ERROR); ··· 8453 8453 * This function always returns 0. Command return status is saved 8454 8454 * in the 'scp' result field. 8455 8455 */ 8456 - static int 8457 - advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) 8456 + static int advansys_queuecommand_lck(struct scsi_cmnd *scp) 8458 8457 { 8459 8458 struct Scsi_Host *shost = scp->device->host; 8460 8459 int asc_res, result = 0; 8461 8460 8462 8461 ASC_STATS(shost, queuecommand); 8463 - scp->scsi_done = done; 8464 8462 8465 8463 asc_res = asc_execute_scsi_cmnd(scp); 8466 8464
+17 -12
drivers/scsi/aha152x.c
··· 905 905 * Queue a command and setup interrupts for a free bus. 906 906 */ 907 907 static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, 908 - struct completion *complete, 909 - int phase, void (*done)(struct scsi_cmnd *)) 908 + struct completion *complete, int phase) 910 909 { 911 910 struct Scsi_Host *shpnt = SCpnt->device->host; 912 911 unsigned long flags; 913 912 914 - SCpnt->scsi_done = done; 915 913 SCpnt->SCp.phase = not_issued | phase; 916 914 SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */ 917 915 SCpnt->SCp.Message = 0; ··· 975 977 * queue a command 976 978 * 977 979 */ 978 - static int aha152x_queue_lck(struct scsi_cmnd *SCpnt, 979 - void (*done)(struct scsi_cmnd *)) 980 + static int aha152x_queue_lck(struct scsi_cmnd *SCpnt) 980 981 { 981 - return aha152x_internal_queue(SCpnt, NULL, 0, done); 982 + return aha152x_internal_queue(SCpnt, NULL, 0); 982 983 } 983 984 984 985 static DEF_SCSI_QCMD(aha152x_queue) ··· 993 996 } else { 994 997 printk(KERN_ERR "aha152x: reset_done w/o completion\n"); 995 998 } 999 + } 1000 + 1001 + static void aha152x_scsi_done(struct scsi_cmnd *SCpnt) 1002 + { 1003 + if (SCpnt->SCp.phase & resetting) 1004 + reset_done(SCpnt); 1005 + else 1006 + scsi_done(SCpnt); 996 1007 } 997 1008 998 1009 /* ··· 1069 1064 1070 1065 SCpnt->cmd_len = 0; 1071 1066 1072 - aha152x_internal_queue(SCpnt, &done, resetting, reset_done); 1067 + aha152x_internal_queue(SCpnt, &done, resetting); 1073 1068 1074 1069 timeleft = wait_for_completion_timeout(&done, 100*HZ); 1075 1070 if (!timeleft) { ··· 1444 1439 scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0); 1445 1440 1446 1441 DO_UNLOCK(flags); 1447 - aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done); 1442 + aha152x_internal_queue(ptr, NULL, check_condition); 1448 1443 DO_LOCK(flags); 1449 1444 } 1450 1445 } 1451 1446 1452 - if(DONE_SC && DONE_SC->scsi_done) { 1447 + if (DONE_SC) { 1453 1448 struct scsi_cmnd *ptr = DONE_SC; 1454 1449 DONE_SC=NULL; 1455 1450 ··· 1458 1453 if (!HOSTDATA(shpnt)->commands) 1459 1454 SETPORT(PORTA, 0); /* turn led off */ 1460 1455 1461 - if(ptr->scsi_done != reset_done) { 1456 + if (!(ptr->SCp.phase & resetting)) { 1462 1457 kfree(ptr->host_scribble); 1463 1458 ptr->host_scribble=NULL; 1464 1459 } 1465 1460 1466 1461 DO_UNLOCK(flags); 1467 - ptr->scsi_done(ptr); 1462 + aha152x_scsi_done(ptr); 1468 1463 DO_LOCK(flags); 1469 1464 } 1470 1465 ··· 2263 2258 ptr->host_scribble=NULL; 2264 2259 2265 2260 set_host_byte(ptr, DID_RESET); 2266 - ptr->scsi_done(ptr); 2261 + aha152x_scsi_done(ptr); 2267 2262 } 2268 2263 2269 2264 ptr = next;
+6 -10
drivers/scsi/aha1542.c
··· 268 268 struct bio_vec bv; 269 269 270 270 rq_for_each_segment(bv, rq, iter) { 271 - memcpy_to_page(bv.bv_page, bv.bv_offset, buf, 272 - bv.bv_len); 271 + memcpy_to_bvec(&bv, buf); 273 272 buf += bv.bv_len; 274 273 } 275 274 } ··· 280 281 { 281 282 struct Scsi_Host *sh = dev_id; 282 283 struct aha1542_hostdata *aha1542 = shost_priv(sh); 283 - void (*my_done)(struct scsi_cmnd *) = NULL; 284 284 int errstatus, mbi, mbo, mbistatus; 285 285 int number_serviced; 286 286 unsigned long flags; ··· 367 369 368 370 tmp_cmd = aha1542->int_cmds[mbo]; 369 371 370 - if (!tmp_cmd || !tmp_cmd->scsi_done) { 372 + if (!tmp_cmd) { 371 373 spin_unlock_irqrestore(sh->host_lock, flags); 372 374 shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); 373 375 shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, 374 376 ccb[mbo].hastat, ccb[mbo].idlun, mbo); 375 377 return IRQ_HANDLED; 376 378 } 377 - my_done = tmp_cmd->scsi_done; 378 379 aha1542_free_cmd(tmp_cmd); 379 380 /* 380 381 * Fetch the sense data, and tuck it away, in the required slot. The ··· 407 410 aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as 408 411 * far as queuecommand is concerned 409 412 */ 410 - my_done(tmp_cmd); 413 + scsi_done(tmp_cmd); 411 414 number_serviced++; 412 415 }; 413 416 } ··· 428 431 if (*cmd->cmnd == REQUEST_SENSE) { 429 432 /* Don't do the command - we have the sense data already */ 430 433 cmd->result = 0; 431 - cmd->scsi_done(cmd); 434 + scsi_done(cmd); 432 435 return 0; 433 436 } 434 437 #ifdef DEBUG ··· 451 454 struct bio_vec bv; 452 455 453 456 rq_for_each_segment(bv, rq, iter) { 454 - memcpy_from_page(buf, bv.bv_page, bv.bv_offset, 455 - bv.bv_len); 457 + memcpy_from_bvec(buf, &bv); 456 458 buf += bv.bv_len; 457 459 } 458 460 } ··· 484 488 aha1542->aha1542_last_mbo_used = mbo; 485 489 486 490 #ifdef DEBUG 487 - shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done); 491 + shost_printk(KERN_DEBUG, sh, "Sending command (%d)...", mbo); 488 492 #endif 489 493 490 494 /* This gets trashed for some reason */
+2 -2
drivers/scsi/aha1740.c
··· 315 315 return IRQ_RETVAL(handled); 316 316 } 317 317 318 - static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt, 319 - void (*done)(struct scsi_cmnd *)) 318 + static int aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt) 320 319 { 320 + void (*done)(struct scsi_cmnd *) = scsi_done; 321 321 unchar direction; 322 322 unchar *cmd = (unchar *) SCpnt->cmnd; 323 323 unchar target = scmd_id(SCpnt);
+2 -4
drivers/scsi/aic7xxx/aic79xx_osm.c
··· 572 572 /* 573 573 * Queue an SCB to the controller. 574 574 */ 575 - static int 576 - ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) 575 + static int ahd_linux_queue_lck(struct scsi_cmnd *cmd) 577 576 { 578 577 struct ahd_softc *ahd; 579 578 struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); ··· 580 581 581 582 ahd = *(struct ahd_softc **)cmd->device->host->hostdata; 582 583 583 - cmd->scsi_done = scsi_done; 584 584 cmd->result = CAM_REQ_INPROG << 16; 585 585 rtn = ahd_linux_run_command(ahd, dev, cmd); 586 586 ··· 2109 2111 2110 2112 ahd_cmd_set_transaction_status(cmd, new_status); 2111 2113 2112 - cmd->scsi_done(cmd); 2114 + scsi_done(cmd); 2113 2115 } 2114 2116 2115 2117 static void
+1 -1
drivers/scsi/aic7xxx/aic79xx_osm.h
··· 196 196 /* 197 197 * XXX 198 198 * ahd_dmamap_sync is only used on buffers allocated with 199 - * the pci_alloc_consistent() API. Although I'm not sure how 199 + * the dma_alloc_coherent() API. Although I'm not sure how 200 200 * this works on architectures with a write buffer, Linux does 201 201 * not have an API to sync "coherent" memory. Perhaps we need 202 202 * to do an mb()?
+2 -4
drivers/scsi/aic7xxx/aic7xxx_osm.c
··· 518 518 /* 519 519 * Queue an SCB to the controller. 520 520 */ 521 - static int 522 - ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) 521 + static int ahc_linux_queue_lck(struct scsi_cmnd *cmd) 523 522 { 524 523 struct ahc_softc *ahc; 525 524 struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); ··· 529 530 530 531 ahc_lock(ahc, &flags); 531 532 if (ahc->platform_data->qfrozen == 0) { 532 - cmd->scsi_done = scsi_done; 533 533 cmd->result = CAM_REQ_INPROG << 16; 534 534 rtn = ahc_linux_run_command(ahc, dev, cmd); 535 535 } ··· 1984 1986 ahc_cmd_set_transaction_status(cmd, new_status); 1985 1987 } 1986 1988 1987 - cmd->scsi_done(cmd); 1989 + scsi_done(cmd); 1988 1990 } 1989 1991 1990 1992 static void
+1 -1
drivers/scsi/aic7xxx/aic7xxx_osm.h
··· 209 209 /* 210 210 * XXX 211 211 * ahc_dmamap_sync is only used on buffers allocated with 212 - * the pci_alloc_consistent() API. Although I'm not sure how 212 + * the dma_alloc_coherent() API. Although I'm not sure how 213 213 * this works on architectures with a write buffer, Linux does 214 214 * not have an API to sync "coherent" memory. Perhaps we need 215 215 * to do an mb()?
+1 -1
drivers/scsi/arcmsr/arcmsr.h
··· 1041 1041 struct QBUFFER __iomem *); 1042 1042 extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *); 1043 1043 extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); 1044 - extern struct device_attribute *arcmsr_host_attrs[]; 1044 + extern const struct attribute_group *arcmsr_host_groups[]; 1045 1045 extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); 1046 1046 void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
+20 -13
drivers/scsi/arcmsr/arcmsr_attr.c
··· 58 58 #include <scsi/scsi_transport.h> 59 59 #include "arcmsr.h" 60 60 61 - struct device_attribute *arcmsr_host_attrs[]; 62 - 63 61 static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp, 64 62 struct kobject *kobj, 65 63 struct bin_attribute *bin, ··· 387 389 static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL); 388 390 static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL); 389 391 390 - struct device_attribute *arcmsr_host_attrs[] = { 391 - &dev_attr_host_driver_version, 392 - &dev_attr_host_driver_posted_cmd, 393 - &dev_attr_host_driver_reset, 394 - &dev_attr_host_driver_abort, 395 - &dev_attr_host_fw_model, 396 - &dev_attr_host_fw_version, 397 - &dev_attr_host_fw_request_len, 398 - &dev_attr_host_fw_numbers_queue, 399 - &dev_attr_host_fw_sdram_size, 400 - &dev_attr_host_fw_hd_channels, 392 + static struct attribute *arcmsr_host_attrs[] = { 393 + &dev_attr_host_driver_version.attr, 394 + &dev_attr_host_driver_posted_cmd.attr, 395 + &dev_attr_host_driver_reset.attr, 396 + &dev_attr_host_driver_abort.attr, 397 + &dev_attr_host_fw_model.attr, 398 + &dev_attr_host_fw_version.attr, 399 + &dev_attr_host_fw_request_len.attr, 400 + &dev_attr_host_fw_numbers_queue.attr, 401 + &dev_attr_host_fw_sdram_size.attr, 402 + &dev_attr_host_fw_hd_channels.attr, 401 403 NULL, 404 + }; 405 + 406 + static const struct attribute_group arcmsr_host_attr_group = { 407 + .attrs = arcmsr_host_attrs, 408 + }; 409 + 410 + const struct attribute_group *arcmsr_host_groups[] = { 411 + &arcmsr_host_attr_group, 412 + NULL 402 413 };
+10 -12
drivers/scsi/arcmsr/arcmsr_hba.c
··· 167 167 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, 168 168 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, 169 169 .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN, 170 - .shost_attrs = arcmsr_host_attrs, 170 + .shost_groups = arcmsr_host_groups, 171 171 .no_write_same = 1, 172 172 }; 173 173 ··· 1318 1318 spin_lock_irqsave(&acb->ccblist_lock, flags); 1319 1319 list_add_tail(&ccb->list, &acb->ccb_free_list); 1320 1320 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 1321 - pcmd->scsi_done(pcmd); 1321 + scsi_done(pcmd); 1322 1322 } 1323 1323 1324 1324 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) ··· 1598 1598 if (ccb->startdone == ARCMSR_CCB_START) { 1599 1599 ccb->pcmd->result = DID_NO_CONNECT << 16; 1600 1600 arcmsr_pci_unmap_dma(ccb); 1601 - ccb->pcmd->scsi_done(ccb->pcmd); 1601 + scsi_done(ccb->pcmd); 1602 1602 } 1603 1603 } 1604 1604 for (target = 0; target < ARCMSR_MAX_TARGETID; target++) { ··· 3192 3192 3193 3193 if (cmd->device->lun) { 3194 3194 cmd->result = (DID_TIME_OUT << 16); 3195 - cmd->scsi_done(cmd); 3195 + scsi_done(cmd); 3196 3196 return; 3197 3197 } 3198 3198 inqdata[0] = TYPE_PROCESSOR; ··· 3216 3216 sg = scsi_sglist(cmd); 3217 3217 kunmap_atomic(buffer - sg->offset); 3218 3218 3219 - cmd->scsi_done(cmd); 3219 + scsi_done(cmd); 3220 3220 } 3221 3221 break; 3222 3222 case WRITE_BUFFER: 3223 3223 case READ_BUFFER: { 3224 3224 if (arcmsr_iop_message_xfer(acb, cmd)) 3225 3225 cmd->result = (DID_ERROR << 16); 3226 - cmd->scsi_done(cmd); 3226 + scsi_done(cmd); 3227 3227 } 3228 3228 break; 3229 3229 default: 3230 - cmd->scsi_done(cmd); 3230 + scsi_done(cmd); 3231 3231 } 3232 3232 } 3233 3233 3234 - static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, 3235 - void (* done)(struct scsi_cmnd *)) 3234 + static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd) 3236 3235 { 3237 3236 struct Scsi_Host *host = cmd->device->host; 3238 3237 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; ··· 3240 3241 3241 3242 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) { 3242 3243 cmd->result = (DID_NO_CONNECT << 16); 3243 - cmd->scsi_done(cmd); 3244 + scsi_done(cmd); 3244 3245 return 0; 3245 3246 } 3246 - cmd->scsi_done = done; 3247 3247 cmd->host_scribble = NULL; 3248 3248 cmd->result = 0; 3249 3249 if (target == 16) { ··· 3255 3257 return SCSI_MLQUEUE_HOST_BUSY; 3256 3258 if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) { 3257 3259 cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT; 3258 - cmd->scsi_done(cmd); 3260 + scsi_done(cmd); 3259 3261 return 0; 3260 3262 } 3261 3263 arcmsr_post_ccb(acb, ccb);
+4 -16
drivers/scsi/arm/acornscsi.c
··· 841 841 } 842 842 } 843 843 844 - if (!SCpnt->scsi_done) 845 - panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no); 846 - 847 844 clear_bit(SCpnt->device->id * 8 + 848 845 (u8)(SCpnt->device->lun & 0x7), host->busyluns); 849 846 850 - SCpnt->scsi_done(SCpnt); 847 + scsi_done(SCpnt); 851 848 } else 852 849 printk("scsi%d: null command in acornscsi_done", host->host->host_no); 853 850 ··· 2397 2400 */ 2398 2401 2399 2402 /* 2400 - * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2403 + * Function : acornscsi_queuecmd(struct scsi_cmnd *cmd) 2401 2404 * Purpose : queues a SCSI command 2402 2405 * Params : cmd - SCSI command 2403 - * done - function called on completion, with pointer to command descriptor 2404 2406 * Returns : 0, or < 0 on error. 2405 2407 */ 2406 - static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt, 2407 - void (*done)(struct scsi_cmnd *)) 2408 + static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt) 2408 2409 { 2410 + void (*done)(struct scsi_cmnd *) = scsi_done; 2409 2411 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; 2410 - 2411 - if (!done) { 2412 - /* there should be some way of rejecting errors like this without panicing... */ 2413 - panic("scsi%d: queuecommand called with NULL done function [cmd=%p]", 2414 - host->host->host_no, SCpnt); 2415 - return -EINVAL; 2416 - } 2417 2412 2418 2413 #if (DEBUG & DEBUG_NO_WRITE) 2419 2414 if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) { ··· 2417 2428 } 2418 2429 #endif 2419 2430 2420 - SCpnt->scsi_done = done; 2421 2431 SCpnt->host_scribble = NULL; 2422 2432 SCpnt->result = 0; 2423 2433 SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
+1
drivers/scsi/arm/arxescsi.c
··· 243 243 .eh_bus_reset_handler = fas216_eh_bus_reset, 244 244 .eh_device_reset_handler = fas216_eh_device_reset, 245 245 .eh_abort_handler = fas216_eh_abort, 246 + .cmd_size = sizeof(struct fas216_cmd_priv), 246 247 .can_queue = 0, 247 248 .this_id = 7, 248 249 .sg_tablesize = SG_ALL,
+1
drivers/scsi/arm/cumana_2.c
··· 363 363 .eh_bus_reset_handler = fas216_eh_bus_reset, 364 364 .eh_device_reset_handler = fas216_eh_device_reset, 365 365 .eh_abort_handler = fas216_eh_abort, 366 + .cmd_size = sizeof(struct fas216_cmd_priv), 366 367 .can_queue = 1, 367 368 .this_id = 7, 368 369 .sg_tablesize = SG_MAX_SEGMENTS,
+1
drivers/scsi/arm/eesox.c
··· 480 480 .eh_bus_reset_handler = fas216_eh_bus_reset, 481 481 .eh_device_reset_handler = fas216_eh_device_reset, 482 482 .eh_abort_handler = fas216_eh_abort, 483 + .cmd_size = sizeof(struct fas216_cmd_priv), 483 484 .can_queue = 1, 484 485 .this_id = 7, 485 486 .sg_tablesize = SG_MAX_SEGMENTS,
+15 -11
drivers/scsi/arm/fas216.c
··· 2015 2015 * correctly by fas216_std_done. 2016 2016 */ 2017 2017 scsi_eh_restore_cmnd(SCpnt, &info->ses); 2018 - SCpnt->scsi_done(SCpnt); 2018 + fas216_cmd_priv(SCpnt)->scsi_done(SCpnt); 2019 2019 } 2020 2020 2021 2021 /** ··· 2086 2086 } 2087 2087 2088 2088 done: 2089 - if (SCpnt->scsi_done) { 2090 - SCpnt->scsi_done(SCpnt); 2089 + if (fas216_cmd_priv(SCpnt)->scsi_done) { 2090 + fas216_cmd_priv(SCpnt)->scsi_done(SCpnt); 2091 2091 return; 2092 2092 } 2093 2093 ··· 2184 2184 } 2185 2185 2186 2186 /** 2187 - * fas216_queue_command - queue a command for adapter to process. 2187 + * fas216_queue_command_internal - queue a command for the adapter to process 2188 2188 * @SCpnt: Command to queue 2189 2189 * @done: done function to call once command is complete 2190 2190 * ··· 2192 2192 * Returns: 0 on success, else error. 2193 2193 * Notes: io_request_lock is held, interrupts are disabled. 2194 2194 */ 2195 - static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt, 2196 - void (*done)(struct scsi_cmnd *)) 2195 + static int fas216_queue_command_internal(struct scsi_cmnd *SCpnt, 2196 + void (*done)(struct scsi_cmnd *)) 2197 2197 { 2198 2198 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2199 2199 int result; ··· 2203 2203 fas216_log_command(info, LOG_CONNECT, SCpnt, 2204 2204 "received command (%p)", SCpnt); 2205 2205 2206 - SCpnt->scsi_done = done; 2206 + fas216_cmd_priv(SCpnt)->scsi_done = done; 2207 2207 SCpnt->host_scribble = (void *)fas216_std_done; 2208 2208 SCpnt->result = 0; 2209 2209 ··· 2233 2233 return result; 2234 2234 } 2235 2235 2236 + static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt) 2237 + { 2238 + return fas216_queue_command_internal(SCpnt, scsi_done); 2239 + } 2240 + 2236 2241 DEF_SCSI_QCMD(fas216_queue_command) 2237 2242 2238 2243 /** ··· 2263 2258 * Returns: scsi result code. 2264 2259 * Notes: io_request_lock is held, interrupts are disabled. 2265 2260 */ 2266 - static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt, 2267 - void (*done)(struct scsi_cmnd *)) 2261 + static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt) 2268 2262 { 2269 2263 FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; 2270 2264 ··· 2276 2272 BUG_ON(info->scsi.irq); 2277 2273 2278 2274 info->internal_done = 0; 2279 - fas216_queue_command_lck(SCpnt, fas216_internal_done); 2275 + fas216_queue_command_internal(SCpnt, fas216_internal_done); 2280 2276 2281 2277 /* 2282 2278 * This wastes time, since we can't return until the command is ··· 2304 2300 2305 2301 spin_lock_irq(info->host->host_lock); 2306 2302 2307 - done(SCpnt); 2303 + scsi_done(SCpnt); 2308 2304 2309 2305 return 0; 2310 2306 }
+10
drivers/scsi/arm/fas216.h
··· 310 310 unsigned long magic_end; 311 311 } FAS216_Info; 312 312 313 + /* driver-private data per SCSI command. */ 314 + struct fas216_cmd_priv { 315 + void (*scsi_done)(struct scsi_cmnd *cmd); 316 + }; 317 + 318 + static inline struct fas216_cmd_priv *fas216_cmd_priv(struct scsi_cmnd *cmd) 319 + { 320 + return scsi_cmd_priv(cmd); 321 + } 322 + 313 323 /* Function: int fas216_init (struct Scsi_Host *instance) 314 324 * Purpose : initialise FAS/NCR/AMD SCSI structures. 315 325 * Params : instance - a driver-specific filled-out structure
+1 -1
drivers/scsi/arm/powertec.c
··· 286 286 .eh_bus_reset_handler = fas216_eh_bus_reset, 287 287 .eh_device_reset_handler = fas216_eh_device_reset, 288 288 .eh_abort_handler = fas216_eh_abort, 289 - 289 + .cmd_size = sizeof(struct fas216_cmd_priv), 290 290 .can_queue = 8, 291 291 .this_id = 7, 292 292 .sg_tablesize = SG_MAX_SEGMENTS,
+3 -14
drivers/scsi/atp870u.c
··· 512 512 scsi_dma_unmap(workreq); 513 513 514 514 spin_lock_irqsave(dev->host->host_lock, flags); 515 - (*workreq->scsi_done) (workreq); 515 + scsi_done(workreq); 516 516 #ifdef ED_DBGP 517 517 printk("workreq->scsi_done\n"); 518 518 #endif ··· 618 618 * 619 619 * Queue a command to the ATP queue. Called with the host lock held. 620 620 */ 621 - static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, 622 - void (*done) (struct scsi_cmnd *)) 621 + static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p) 623 622 { 623 + void (*done)(struct scsi_cmnd *) = scsi_done; 624 624 unsigned char c; 625 625 unsigned int m; 626 626 struct atp_unit *dev; ··· 650 650 651 651 if ((m & dev->active_id[c]) == 0) { 652 652 req_p->result = DID_BAD_TARGET << 16; 653 - done(req_p); 654 - return 0; 655 - } 656 - 657 - if (done) { 658 - req_p->scsi_done = done; 659 - } else { 660 - #ifdef ED_DBGP 661 - printk( "atp870u_queuecommand: done can't be NULL\n"); 662 - #endif 663 - req_p->result = 0; 664 653 done(req_p); 665 654 return 0; 666 655 }
+12 -9
drivers/scsi/be2iscsi/be_main.c
··· 163 163 beiscsi_active_session_disp, NULL); 164 164 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 165 165 beiscsi_free_session_disp, NULL); 166 - static struct device_attribute *beiscsi_attrs[] = { 167 - &dev_attr_beiscsi_log_enable, 168 - &dev_attr_beiscsi_drvr_ver, 169 - &dev_attr_beiscsi_adapter_family, 170 - &dev_attr_beiscsi_fw_ver, 171 - &dev_attr_beiscsi_active_session_count, 172 - &dev_attr_beiscsi_free_session_count, 173 - &dev_attr_beiscsi_phys_port, 166 + 167 + static struct attribute *beiscsi_attrs[] = { 168 + &dev_attr_beiscsi_log_enable.attr, 169 + &dev_attr_beiscsi_drvr_ver.attr, 170 + &dev_attr_beiscsi_adapter_family.attr, 171 + &dev_attr_beiscsi_fw_ver.attr, 172 + &dev_attr_beiscsi_active_session_count.attr, 173 + &dev_attr_beiscsi_free_session_count.attr, 174 + &dev_attr_beiscsi_phys_port.attr, 174 175 NULL, 175 176 }; 177 + 178 + ATTRIBUTE_GROUPS(beiscsi); 176 179 177 180 static char const *cqe_desc[] = { 178 181 "RESERVED_DESC", ··· 394 391 .eh_abort_handler = beiscsi_eh_abort, 395 392 .eh_device_reset_handler = beiscsi_eh_device_reset, 396 393 .eh_target_reset_handler = iscsi_eh_session_reset, 397 - .shost_attrs = beiscsi_attrs, 394 + .shost_groups = beiscsi_groups, 398 395 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 399 396 .can_queue = BE2_IO_DEPTH, 400 397 .this_id = -1,
+42 -26
drivers/scsi/bfa/bfad_attr.c
··· 956 956 static DEVICE_ATTR(number_of_discovered_ports, S_IRUGO, 957 957 bfad_im_num_of_discovered_ports_show, NULL); 958 958 959 - struct device_attribute *bfad_im_host_attrs[] = { 960 - &dev_attr_serial_number, 961 - &dev_attr_model, 962 - &dev_attr_model_description, 963 - &dev_attr_node_name, 964 - &dev_attr_symbolic_name, 965 - &dev_attr_hardware_version, 966 - &dev_attr_driver_version, 967 - &dev_attr_option_rom_version, 968 - &dev_attr_firmware_version, 969 - &dev_attr_number_of_ports, 970 - &dev_attr_driver_name, 971 - &dev_attr_number_of_discovered_ports, 959 + static struct attribute *bfad_im_host_attrs[] = { 960 + &dev_attr_serial_number.attr, 961 + &dev_attr_model.attr, 962 + &dev_attr_model_description.attr, 963 + &dev_attr_node_name.attr, 964 + &dev_attr_symbolic_name.attr, 965 + &dev_attr_hardware_version.attr, 966 + &dev_attr_driver_version.attr, 967 + &dev_attr_option_rom_version.attr, 968 + &dev_attr_firmware_version.attr, 969 + &dev_attr_number_of_ports.attr, 970 + &dev_attr_driver_name.attr, 971 + &dev_attr_number_of_discovered_ports.attr, 972 972 NULL, 973 973 }; 974 974 975 - struct device_attribute *bfad_im_vport_attrs[] = { 976 - &dev_attr_serial_number, 977 - &dev_attr_model, 978 - &dev_attr_model_description, 979 - &dev_attr_node_name, 980 - &dev_attr_symbolic_name, 981 - &dev_attr_hardware_version, 982 - &dev_attr_driver_version, 983 - &dev_attr_option_rom_version, 984 - &dev_attr_firmware_version, 985 - &dev_attr_number_of_ports, 986 - &dev_attr_driver_name, 987 - &dev_attr_number_of_discovered_ports, 975 + static const struct attribute_group bfad_im_host_attr_group = { 976 + .attrs = bfad_im_host_attrs 977 + }; 978 + 979 + const struct attribute_group *bfad_im_host_groups[] = { 980 + &bfad_im_host_attr_group, 981 + NULL 982 + }; 983 + 984 + struct attribute *bfad_im_vport_attrs[] = { 985 + &dev_attr_serial_number.attr, 986 + &dev_attr_model.attr, 987 + &dev_attr_model_description.attr, 988 + &dev_attr_node_name.attr, 989 + &dev_attr_symbolic_name.attr, 990 + &dev_attr_hardware_version.attr, 991 + &dev_attr_driver_version.attr, 992 + &dev_attr_option_rom_version.attr, 993 + &dev_attr_firmware_version.attr, 994 + &dev_attr_number_of_ports.attr, 995 + &dev_attr_driver_name.attr, 996 + &dev_attr_number_of_discovered_ports.attr, 988 997 NULL, 989 998 }; 990 999 1000 + static const struct attribute_group bfad_im_vport_attr_group = { 1001 + .attrs = bfad_im_vport_attrs 1002 + }; 991 1003 1004 + const struct attribute_group *bfad_im_vport_groups[] = { 1005 + &bfad_im_vport_attr_group, 1006 + NULL 1007 + };
+7 -9
drivers/scsi/bfa/bfad_im.c
··· 96 96 } 97 97 } 98 98 99 - cmnd->scsi_done(cmnd); 99 + scsi_done(cmnd); 100 100 } 101 101 102 102 void ··· 124 124 } 125 125 } 126 126 127 - cmnd->scsi_done(cmnd); 127 + scsi_done(cmnd); 128 128 } 129 129 130 130 void ··· 226 226 timeout *= 2; 227 227 } 228 228 229 - cmnd->scsi_done(cmnd); 229 + scsi_done(cmnd); 230 230 bfa_trc(bfad, hal_io->iotag); 231 231 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 232 232 "scsi%d: complete abort 0x%p iotag 0x%x\n", ··· 809 809 .this_id = -1, 810 810 .sg_tablesize = BFAD_IO_MAX_SGE, 811 811 .cmd_per_lun = 3, 812 - .shost_attrs = bfad_im_host_attrs, 812 + .shost_groups = bfad_im_host_groups, 813 813 .max_sectors = BFAD_MAX_SECTORS, 814 814 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, 815 815 }; ··· 831 831 .this_id = -1, 832 832 .sg_tablesize = BFAD_IO_MAX_SGE, 833 833 .cmd_per_lun = 3, 834 - .shost_attrs = bfad_im_vport_attrs, 834 + .shost_groups = bfad_im_vport_groups, 835 835 .max_sectors = BFAD_MAX_SECTORS, 836 836 }; 837 837 ··· 1199 1199 /* 1200 1200 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1201 1201 */ 1202 - static int 1203 - bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 1202 + static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd) 1204 1203 { 1204 + void (*done)(struct scsi_cmnd *) = scsi_done; 1205 1205 struct bfad_im_port_s *im_port = 1206 1206 (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; 1207 1207 struct bfad_s *bfad = im_port->bfad; ··· 1232 1232 sg_cnt = scsi_dma_map(cmnd); 1233 1233 if (sg_cnt < 0) 1234 1234 return SCSI_MLQUEUE_HOST_BUSY; 1235 - 1236 - cmnd->scsi_done = done; 1237 1235 1238 1236 spin_lock_irqsave(&bfad->bfad_lock, flags); 1239 1237 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
+2 -2
drivers/scsi/bfa/bfad_im.h
··· 174 174 extern struct scsi_transport_template *bfad_im_scsi_transport_template; 175 175 extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 176 176 177 - extern struct device_attribute *bfad_im_host_attrs[]; 178 - extern struct device_attribute *bfad_im_vport_attrs[]; 177 + extern const struct attribute_group *bfad_im_host_groups[]; 178 + extern const struct attribute_group *bfad_im_vport_groups[]; 179 179 180 180 irqreturn_t bfad_intx(int irq, void *dev_id); 181 181
+5 -3
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 2951 2951 static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show, 2952 2952 bnx2fc_tm_timeout_store); 2953 2953 2954 - static struct device_attribute *bnx2fc_host_attrs[] = { 2955 - &dev_attr_tm_timeout, 2954 + static struct attribute *bnx2fc_host_attrs[] = { 2955 + &dev_attr_tm_timeout.attr, 2956 2956 NULL, 2957 2957 }; 2958 + 2959 + ATTRIBUTE_GROUPS(bnx2fc_host); 2958 2960 2959 2961 /* 2960 2962 * scsi_host_template structure used while registering with SCSI-ml ··· 2979 2977 .max_sectors = 0x3fbf, 2980 2978 .track_queue_depth = 1, 2981 2979 .slave_configure = bnx2fc_slave_configure, 2982 - .shost_attrs = bnx2fc_host_attrs, 2980 + .shost_groups = bnx2fc_host_groups, 2983 2981 }; 2984 2982 2985 2983 static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+4 -4
drivers/scsi/bnx2fc/bnx2fc_io.c
··· 205 205 sc_cmd->allowed); 206 206 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 207 207 sc_cmd->SCp.ptr = NULL; 208 - sc_cmd->scsi_done(sc_cmd); 208 + scsi_done(sc_cmd); 209 209 } 210 210 211 211 struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) ··· 1610 1610 } 1611 1611 1612 1612 sc_cmd->SCp.ptr = NULL; 1613 - sc_cmd->scsi_done(sc_cmd); 1613 + scsi_done(sc_cmd); 1614 1614 1615 1615 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1616 1616 if (io_req->wait_for_abts_comp) { ··· 1853 1853 rval = fc_remote_port_chkready(rport); 1854 1854 if (rval) { 1855 1855 sc_cmd->result = rval; 1856 - sc_cmd->scsi_done(sc_cmd); 1856 + scsi_done(sc_cmd); 1857 1857 return 0; 1858 1858 } 1859 1859 ··· 2019 2019 break; 2020 2020 } 2021 2021 sc_cmd->SCp.ptr = NULL; 2022 - sc_cmd->scsi_done(sc_cmd); 2022 + scsi_done(sc_cmd); 2023 2023 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2024 2024 } 2025 2025
+1 -1
drivers/scsi/bnx2i/bnx2i.h
··· 795 795 extern unsigned int sq_size; 796 796 extern unsigned int rq_size; 797 797 798 - extern struct device_attribute *bnx2i_dev_attributes[]; 798 + extern const struct attribute_group *bnx2i_dev_groups[]; 799 799 800 800 801 801
+1 -1
drivers/scsi/bnx2i/bnx2i_iscsi.c
··· 2266 2266 .cmd_per_lun = 128, 2267 2267 .this_id = -1, 2268 2268 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2269 - .shost_attrs = bnx2i_dev_attributes, 2269 + .shost_groups = bnx2i_dev_groups, 2270 2270 .track_queue_depth = 1, 2271 2271 }; 2272 2272
+12 -3
drivers/scsi/bnx2i/bnx2i_sysfs.c
··· 142 142 static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR, 143 143 bnx2i_show_ccell_info, bnx2i_set_ccell_info); 144 144 145 - struct device_attribute *bnx2i_dev_attributes[] = { 146 - &dev_attr_sq_size, 147 - &dev_attr_num_ccell, 145 + static struct attribute *bnx2i_dev_attributes[] = { 146 + &dev_attr_sq_size.attr, 147 + &dev_attr_num_ccell.attr, 148 + NULL 149 + }; 150 + 151 + static const struct attribute_group bnx2i_dev_attr_group = { 152 + .attrs = bnx2i_dev_attributes 153 + }; 154 + 155 + const struct attribute_group *bnx2i_dev_groups[] = { 156 + &bnx2i_dev_attr_group, 148 157 NULL 149 158 };
+1 -1
drivers/scsi/csiostor/csio_lnode.c
··· 619 619 struct fc_els_csp *csp; 620 620 struct fc_els_cssp *clsp; 621 621 enum fw_retval retval; 622 - __be32 nport_id; 622 + __be32 nport_id = 0; 623 623 624 624 retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); 625 625 if (retval != FW_SUCCESS) {
+18 -14
drivers/scsi/csiostor/csio_scsi.c
··· 1460 1460 static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, 1461 1461 csio_store_dbg_level); 1462 1462 1463 - static struct device_attribute *csio_fcoe_lport_attrs[] = { 1464 - &dev_attr_hw_state, 1465 - &dev_attr_device_reset, 1466 - &dev_attr_disable_port, 1467 - &dev_attr_dbg_level, 1463 + static struct attribute *csio_fcoe_lport_attrs[] = { 1464 + &dev_attr_hw_state.attr, 1465 + &dev_attr_device_reset.attr, 1466 + &dev_attr_disable_port.attr, 1467 + &dev_attr_dbg_level.attr, 1468 1468 NULL, 1469 1469 }; 1470 + 1471 + ATTRIBUTE_GROUPS(csio_fcoe_lport); 1470 1472 1471 1473 static ssize_t 1472 1474 csio_show_num_reg_rnodes(struct device *dev, ··· 1481 1479 1482 1480 static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); 1483 1481 1484 - static struct device_attribute *csio_fcoe_vport_attrs[] = { 1485 - &dev_attr_num_reg_rnodes, 1486 - &dev_attr_dbg_level, 1482 + static struct attribute *csio_fcoe_vport_attrs[] = { 1483 + &dev_attr_num_reg_rnodes.attr, 1484 + &dev_attr_dbg_level.attr, 1487 1485 NULL, 1488 1486 }; 1487 + 1488 + ATTRIBUTE_GROUPS(csio_fcoe_vport); 1489 1489 1490 1490 static inline uint32_t 1491 1491 csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) ··· 1724 1720 } 1725 1721 1726 1722 cmnd->result = (((host_status) << 16) | scsi_status); 1727 - cmnd->scsi_done(cmnd); 1723 + scsi_done(cmnd); 1728 1724 1729 1725 /* Wake up waiting threads */ 1730 1726 csio_scsi_cmnd(req) = NULL; ··· 1752 1748 } 1753 1749 1754 1750 cmnd->result = (((host_status) << 16) | scsi_status); 1755 - cmnd->scsi_done(cmnd); 1751 + scsi_done(cmnd); 1756 1752 csio_scsi_cmnd(req) = NULL; 1757 1753 CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); 1758 1754 } else { ··· 1880 1876 return rv; 1881 1877 1882 1878 err_done: 1883 - cmnd->scsi_done(cmnd); 1879 + scsi_done(cmnd); 1884 1880 return 0; 1885 1881 } 1886 1882 ··· 1983 1979 spin_unlock_irq(&hw->lock); 1984 1980 1985 1981 cmnd->result = (DID_ERROR << 16); 1986 - cmnd->scsi_done(cmnd); 1982 + scsi_done(cmnd); 1987 1983 1988 1984 return FAILED; 1989 1985 } ··· 2281 2277 .this_id = -1, 2282 2278 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2283 2279 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2284 - .shost_attrs = csio_fcoe_lport_attrs, 2280 + .shost_groups = csio_fcoe_lport_groups, 2285 2281 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2286 2282 }; 2287 2283 ··· 2300 2296 .this_id = -1, 2301 2297 .sg_tablesize = CSIO_SCSI_MAX_SGE, 2302 2298 .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, 2303 - .shost_attrs = csio_fcoe_vport_attrs, 2299 + .shost_groups = csio_fcoe_vport_groups, 2304 2300 .max_sectors = CSIO_MAX_SECTOR_SIZE, 2305 2301 }; 2306 2302
+25 -21
drivers/scsi/cxlflash/main.c
··· 171 171 172 172 dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", 173 173 __func__, scp, scp->result, cmd->sa.ioasc); 174 - scp->scsi_done(scp); 174 + scsi_done(scp); 175 175 } else if (cmd->cmd_tmf) { 176 176 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 177 177 cfg->tmf_active = false; ··· 205 205 if (cmd->scp) { 206 206 scp = cmd->scp; 207 207 scp->result = (DID_IMM_RETRY << 16); 208 - scp->scsi_done(scp); 208 + scsi_done(scp); 209 209 } else { 210 210 cmd->cmd_aborted = true; 211 211 ··· 601 601 case STATE_FAILTERM: 602 602 dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); 603 603 scp->result = (DID_NO_CONNECT << 16); 604 - scp->scsi_done(scp); 604 + scsi_done(scp); 605 605 rc = 0; 606 606 goto out; 607 607 default: ··· 3103 3103 static DEVICE_ATTR_RW(num_hwqs); 3104 3104 static DEVICE_ATTR_RW(hwq_mode); 3105 3105 3106 - static struct device_attribute *cxlflash_host_attrs[] = { 3107 - &dev_attr_port0, 3108 - &dev_attr_port1, 3109 - &dev_attr_port2, 3110 - &dev_attr_port3, 3111 - &dev_attr_lun_mode, 3112 - &dev_attr_ioctl_version, 3113 - &dev_attr_port0_lun_table, 3114 - &dev_attr_port1_lun_table, 3115 - &dev_attr_port2_lun_table, 3116 - &dev_attr_port3_lun_table, 3117 - &dev_attr_irqpoll_weight, 3118 - &dev_attr_num_hwqs, 3119 - &dev_attr_hwq_mode, 3106 + static struct attribute *cxlflash_host_attrs[] = { 3107 + &dev_attr_port0.attr, 3108 + &dev_attr_port1.attr, 3109 + &dev_attr_port2.attr, 3110 + &dev_attr_port3.attr, 3111 + &dev_attr_lun_mode.attr, 3112 + &dev_attr_ioctl_version.attr, 3113 + &dev_attr_port0_lun_table.attr, 3114 + &dev_attr_port1_lun_table.attr, 3115 + &dev_attr_port2_lun_table.attr, 3116 + &dev_attr_port3_lun_table.attr, 3117 + &dev_attr_irqpoll_weight.attr, 3118 + &dev_attr_num_hwqs.attr, 3119 + &dev_attr_hwq_mode.attr, 3120 3120 NULL 3121 3121 }; 3122 + 3123 + ATTRIBUTE_GROUPS(cxlflash_host); 3122 3124 3123 3125 /* 3124 3126 * Device attributes 3125 3127 */ 3126 3128 static DEVICE_ATTR_RO(mode); 3127 3129 3128 - static struct device_attribute *cxlflash_dev_attrs[] = { 3129 - &dev_attr_mode, 3130 + static struct attribute *cxlflash_dev_attrs[] = { 3131 + &dev_attr_mode.attr, 3130 3132 NULL 3131 3133 }; 3134 + 3135 + ATTRIBUTE_GROUPS(cxlflash_dev); 3132 3136 3133 3137 /* 3134 3138 * Host template ··· 3154 3150 .this_id = -1, 3155 3151 .sg_tablesize = 1, /* No scatter gather support */ 3156 3152 .max_sectors = CXLFLASH_MAX_SECTORS, 3157 - .shost_attrs = cxlflash_host_attrs, 3158 - .sdev_attrs = cxlflash_dev_attrs, 3153 + .shost_groups = cxlflash_host_groups, 3154 + .sdev_groups = cxlflash_dev_groups, 3159 3155 }; 3160 3156 3161 3157 /*
+6 -6
drivers/scsi/dc395x.c
··· 960 960 * and is expected to be held on return. 961 961 * 962 962 **/ 963 - static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 963 + static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) 964 964 { 965 + void (*done)(struct scsi_cmnd *) = scsi_done; 965 966 struct DeviceCtlBlk *dcb; 966 967 struct ScsiReqBlk *srb; 967 968 struct AdapterCtlBlk *acb = ··· 996 995 goto complete; 997 996 } 998 997 999 - /* set callback and clear result in the command */ 1000 - cmd->scsi_done = done; 1001 998 set_host_byte(cmd, DID_OK); 1002 999 set_status_byte(cmd, SAM_STAT_GOOD); 1003 1000 ··· 3335 3336 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); 3336 3337 } 3337 3338 3338 - cmd->scsi_done(cmd); 3339 + scsi_done(cmd); 3339 3340 waiting_process_next(acb); 3340 3341 } 3341 3342 ··· 3366 3367 if (force) { 3367 3368 /* For new EH, we normally don't need to give commands back, 3368 3369 * as they all complete or all time out */ 3369 - p->scsi_done(p); 3370 + scsi_done(p); 3370 3371 } 3371 3372 } 3372 3373 if (!list_empty(&dcb->srb_going_list)) ··· 3393 3394 if (force) { 3394 3395 /* For new EH, we normally don't need to give commands back, 3395 3396 * as they all complete or all time out */ 3396 - cmd->scsi_done(cmd); 3397 + scsi_done(cmd); 3397 3398 } 3398 3399 } 3399 3400 if (!list_empty(&dcb->srb_waiting_list)) ··· 4617 4618 /* initialise the adapter and everything we need */ 4618 4619 if (adapter_init(acb, io_port_base, io_port_len, irq)) { 4619 4620 dprintkl(KERN_INFO, "adapter init failed\n"); 4621 + acb = NULL; 4620 4622 goto fail; 4621 4623 } 4622 4624
+5 -8
drivers/scsi/dpt_i2o.c
··· 416 416 return 0; 417 417 } 418 418 419 - static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) 419 + static int adpt_queue_lck(struct scsi_cmnd *cmd) 420 420 { 421 421 adpt_hba* pHba = NULL; 422 422 struct adpt_device* pDev = NULL; /* dpt per device information */ 423 423 424 - cmd->scsi_done = done; 425 424 /* 426 425 * SCSI REQUEST_SENSE commands will be executed automatically by the 427 426 * Host Adapter for any errors, so they should not be executed ··· 430 431 431 432 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) { 432 433 cmd->result = (DID_OK << 16); 433 - cmd->scsi_done(cmd); 434 + scsi_done(cmd); 434 435 return 0; 435 436 } 436 437 ··· 455 456 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response 456 457 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue. 457 458 cmd->result = (DID_NO_CONNECT << 16); 458 - cmd->scsi_done(cmd); 459 + scsi_done(cmd); 459 460 return 0; 460 461 } 461 462 cmd->device->hostdata = pDev; ··· 2226 2227 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n", 2227 2228 pHba->name, cmd->cmnd[0]); 2228 2229 cmd->result = (DID_ERROR <<16); 2229 - cmd->scsi_done(cmd); 2230 + scsi_done(cmd); 2230 2231 return 0; 2231 2232 } 2232 2233 } ··· 2450 2451 2451 2452 cmd->result |= (dev_status); 2452 2453 2453 - if(cmd->scsi_done != NULL){ 2454 - cmd->scsi_done(cmd); 2455 - } 2454 + scsi_done(cmd); 2456 2455 } 2457 2456 2458 2457
+2 -4
drivers/scsi/elx/efct/efct_driver.c
··· 541 541 542 542 pci_set_drvdata(pdev, efct); 543 543 544 - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 || 545 - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 544 + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) { 546 545 dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n"); 547 - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 || 548 - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 546 + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 549 547 dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n"); 550 548 rc = -1; 551 549 goto dma_mask_out;
+2 -2
drivers/scsi/elx/efct/efct_lio.c
··· 382 382 struct efct_scsi_tgt_io *ocp = &io->tgt_io; 383 383 struct se_cmd *cmd = &ocp->cmd; 384 384 385 - ocp->seg_map_cnt = pci_map_sg(io->efct->pci, cmd->t_data_sg, 385 + ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg, 386 386 cmd->t_data_nents, cmd->data_direction); 387 387 if (ocp->seg_map_cnt == 0) 388 388 return -EFAULT; ··· 398 398 if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg)) 399 399 return; 400 400 401 - pci_unmap_sg(io->efct->pci, cmd->t_data_sg, 401 + dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg, 402 402 ocp->seg_map_cnt, cmd->data_direction); 403 403 ocp->seg_map_cnt = 0; 404 404 }
+1 -2
drivers/scsi/elx/efct/efct_scsi.c
··· 38 38 39 39 xport = efct->xport; 40 40 41 - spin_lock_irqsave(&node->active_ios_lock, flags); 42 - 43 41 io = efct_io_pool_io_alloc(efct->xport->io_pool); 44 42 if (!io) { 45 43 efc_log_err(efct, "IO alloc Failed\n"); ··· 63 65 64 66 /* Add to node's active_ios list */ 65 67 INIT_LIST_HEAD(&io->list_entry); 68 + spin_lock_irqsave(&node->active_ios_lock, flags); 66 69 list_add(&io->list_entry, &node->active_ios); 67 70 68 71 spin_unlock_irqrestore(&node->active_ios_lock, flags);
+1 -1
drivers/scsi/elx/libefc/efc.h
··· 47 47 48 48 #define nport_sm_trace(nport) \ 49 49 efc_log_debug(nport->efc, \ 50 - "[%s] %-20s\n", nport->display_name, efc_sm_event_name(evt)) \ 50 + "[%s] %-20s %-20s\n", nport->display_name, __func__, efc_sm_event_name(evt)) \ 51 51 52 52 #endif /* __EFC_H__ */
+6 -1
drivers/scsi/elx/libefc/efc_cmds.c
··· 249 249 { 250 250 struct efc_nport *nport = arg; 251 251 252 + nport->attaching = false; 252 253 if (efc_nport_get_mbox_status(nport, mqe, status)) { 253 254 efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe); 254 255 return -EIO; ··· 287 286 if (rc) { 288 287 efc_log_err(efc, "REG_VPI command failure\n"); 289 288 efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf); 289 + } else { 290 + nport->attaching = true; 290 291 } 291 292 292 293 return rc; ··· 305 302 /* Issue the UNREG_VPI command to free the assigned VPI context */ 306 303 if (nport->attached) 307 304 efc_nport_free_unreg_vpi(nport); 308 - else 305 + else if (nport->attaching) 309 306 nport->free_req_pending = true; 307 + else 308 + efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL); 310 309 311 310 return 0; 312 311 }
+1 -1
drivers/scsi/elx/libefc/efc_fabric.c
··· 685 685 } 686 686 687 687 /* Allocate a buffer for all nodes */ 688 - active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC); 688 + active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC); 689 689 if (!active_nodes) { 690 690 node_printf(node, "efc_malloc failed\n"); 691 691 return -EIO;
+1
drivers/scsi/elx/libefc/efclib.h
··· 142 142 bool is_vport; 143 143 bool free_req_pending; 144 144 bool attached; 145 + bool attaching; 145 146 bool p2p_winner; 146 147 struct efc_domain *domain; 147 148 u64 wwpn;
+4 -4
drivers/scsi/esas2r/esas2r_main.c
··· 828 828 829 829 if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) { 830 830 cmd->result = DID_NO_CONNECT << 16; 831 - cmd->scsi_done(cmd); 831 + scsi_done(cmd); 832 832 return 0; 833 833 } 834 834 ··· 988 988 989 989 scsi_set_resid(cmd, 0); 990 990 991 - cmd->scsi_done(cmd); 991 + scsi_done(cmd); 992 992 993 993 return SUCCESS; 994 994 } ··· 1054 1054 1055 1055 scsi_set_resid(cmd, 0); 1056 1056 1057 - cmd->scsi_done(cmd); 1057 + scsi_done(cmd); 1058 1058 1059 1059 return SUCCESS; 1060 1060 } ··· 1535 1535 scsi_set_resid(rq->cmd, 0); 1536 1536 } 1537 1537 1538 - rq->cmd->scsi_done(rq->cmd); 1538 + scsi_done(rq->cmd); 1539 1539 1540 1540 esas2r_free_request(a, rq); 1541 1541 }
+5 -7
drivers/scsi/esp_scsi.c
··· 936 936 } 937 937 } 938 938 939 - cmd->scsi_done(cmd); 939 + scsi_done(cmd); 940 940 941 941 list_del(&ent->list); 942 942 esp_put_ent(esp, ent); ··· 952 952 scsi_track_queue_full(dev, lp->num_tagged - 1); 953 953 } 954 954 955 - static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 955 + static int esp_queuecommand_lck(struct scsi_cmnd *cmd) 956 956 { 957 957 struct scsi_device *dev = cmd->device; 958 958 struct esp *esp = shost_priv(dev->host); ··· 964 964 return SCSI_MLQUEUE_HOST_BUSY; 965 965 966 966 ent->cmd = cmd; 967 - 968 - cmd->scsi_done = done; 969 967 970 968 spriv = ESP_CMD_PRIV(cmd); 971 969 spriv->num_sg = 0; ··· 2036 2038 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) 2037 2039 esp_unmap_sense(esp, ent); 2038 2040 2039 - cmd->scsi_done(cmd); 2041 + scsi_done(cmd); 2040 2042 list_del(&ent->list); 2041 2043 esp_put_ent(esp, ent); 2042 2044 } ··· 2059 2061 2060 2062 list_del(&ent->list); 2061 2063 cmd->result = DID_RESET << 16; 2062 - cmd->scsi_done(cmd); 2064 + scsi_done(cmd); 2063 2065 esp_put_ent(esp, ent); 2064 2066 } 2065 2067 ··· 2533 2535 list_del(&ent->list); 2534 2536 2535 2537 cmd->result = DID_ABORT << 16; 2536 - cmd->scsi_done(cmd); 2538 + scsi_done(cmd); 2537 2539 2538 2540 esp_put_ent(esp, ent); 2539 2541
+1 -1
drivers/scsi/fcoe/fcoe.c
··· 307 307 } 308 308 309 309 /* Do not support for bonding device */ 310 - if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) { 310 + if (netif_is_bond_master(netdev)) { 311 311 FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); 312 312 return -EOPNOTSUPP; 313 313 }
+1 -1
drivers/scsi/fdomain.c
··· 206 206 { 207 207 outb(0, fd->base + REG_ICTL); 208 208 fdomain_make_bus_idle(fd); 209 - fd->cur_cmd->scsi_done(fd->cur_cmd); 209 + scsi_done(fd->cur_cmd); 210 210 fd->cur_cmd = NULL; 211 211 } 212 212
+1 -1
drivers/scsi/fnic/fnic.h
··· 322 322 323 323 extern struct workqueue_struct *fnic_event_queue; 324 324 extern struct workqueue_struct *fnic_fip_queue; 325 - extern struct device_attribute *fnic_attrs[]; 325 + extern const struct attribute_group *fnic_host_groups[]; 326 326 327 327 void fnic_clear_intr_mode(struct fnic *fnic); 328 328 int fnic_set_intr_mode(struct fnic *fnic);
+13 -4
drivers/scsi/fnic/fnic_attrs.c
··· 48 48 static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL); 49 49 static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL); 50 50 51 - struct device_attribute *fnic_attrs[] = { 52 - &dev_attr_fnic_state, 53 - &dev_attr_drv_version, 54 - &dev_attr_link_state, 51 + static struct attribute *fnic_host_attrs[] = { 52 + &dev_attr_fnic_state.attr, 53 + &dev_attr_drv_version.attr, 54 + &dev_attr_link_state.attr, 55 55 NULL, 56 + }; 57 + 58 + static const struct attribute_group fnic_host_attr_group = { 59 + .attrs = fnic_host_attrs 60 + }; 61 + 62 + const struct attribute_group *fnic_host_groups[] = { 63 + &fnic_host_attr_group, 64 + NULL 56 65 };
+1 -1
drivers/scsi/fnic/fnic_main.c
··· 122 122 .can_queue = FNIC_DFLT_IO_REQ, 123 123 .sg_tablesize = FNIC_MAX_SG_DESC_CNT, 124 124 .max_sectors = 0xffff, 125 - .shost_attrs = fnic_attrs, 125 + .shost_groups = fnic_host_groups, 126 126 .track_queue_depth = 1, 127 127 }; 128 128
+54 -64
drivers/scsi/fnic/fnic_scsi.c
··· 420 420 * Routine to send a scsi cdb 421 421 * Called with host_lock held and interrupts disabled. 422 422 */ 423 - static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 423 + static int fnic_queuecommand_lck(struct scsi_cmnd *sc) 424 424 { 425 + void (*done)(struct scsi_cmnd *) = scsi_done; 425 426 const int tag = scsi_cmd_to_rq(sc)->tag; 426 427 struct fc_lport *lp = shost_priv(sc->device->host); 427 428 struct fc_rport *rport; ··· 561 560 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; 562 561 CMD_SP(sc) = (char *)io_req; 563 562 CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; 564 - sc->scsi_done = done; 565 563 566 564 /* create copy wq desc and enqueue it */ 567 565 wq = &fnic->wq_copy[0]; ··· 1051 1051 } 1052 1052 1053 1053 /* Call SCSI completion function to complete the IO */ 1054 - if (sc->scsi_done) 1055 - sc->scsi_done(sc); 1054 + scsi_done(sc); 1056 1055 } 1057 1056 1058 1057 /* fnic_fcpio_itmf_cmpl_handler ··· 1192 1193 1193 1194 fnic_release_ioreq_buf(fnic, io_req, sc); 1194 1195 mempool_free(io_req, fnic->io_req_pool); 1195 - if (sc->scsi_done) { 1196 - FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, 1197 - sc->device->host->host_no, id, 1198 - sc, 1199 - jiffies_to_msecs(jiffies - start_time), 1200 - desc, 1201 - (((u64)hdr_status << 40) | 1202 - (u64)sc->cmnd[0] << 32 | 1203 - (u64)sc->cmnd[2] << 24 | 1204 - (u64)sc->cmnd[3] << 16 | 1205 - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1206 - (((u64)CMD_FLAGS(sc) << 32) | 1207 - CMD_STATE(sc))); 1208 - sc->scsi_done(sc); 1209 - atomic64_dec(&fnic_stats->io_stats.active_ios); 1210 - if (atomic64_read(&fnic->io_cmpl_skip)) 1211 - atomic64_dec(&fnic->io_cmpl_skip); 1212 - else 1213 - atomic64_inc(&fnic_stats->io_stats.io_completions); 1214 - } 1196 + FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, 1197 + sc->device->host->host_no, id, 1198 + sc, 1199 + jiffies_to_msecs(jiffies - start_time), 1200 + desc, 1201 + (((u64)hdr_status << 40) | 1202 + (u64)sc->cmnd[0] << 32 | 1203 + (u64)sc->cmnd[2] << 24 | 1204 + (u64)sc->cmnd[3] << 16 | 1205 + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1206 + (((u64)CMD_FLAGS(sc) << 32) | 1207 + CMD_STATE(sc))); 1208 + scsi_done(sc); 1209 + atomic64_dec(&fnic_stats->io_stats.active_ios); 1210 + if (atomic64_read(&fnic->io_cmpl_skip)) 1211 + atomic64_dec(&fnic->io_cmpl_skip); 1212 + else 1213 + atomic64_inc(&fnic_stats->io_stats.io_completions); 1215 1214 } 1216 - 1217 1215 } else if (id & FNIC_TAG_DEV_RST) { 1218 1216 /* Completion of device reset */ 1219 1217 CMD_LR_STATUS(sc) = hdr_status; ··· 1417 1421 atomic64_inc(&fnic_stats->io_stats.io_completions); 1418 1422 1419 1423 /* Complete the command to SCSI */ 1420 - if (sc->scsi_done) { 1421 - if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) 1422 - shost_printk(KERN_ERR, fnic->lport->host, 1423 - "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", 1424 - tag, sc); 1424 + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) 1425 + shost_printk(KERN_ERR, fnic->lport->host, 1426 + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", 1427 + tag, sc); 1425 1428 1426 - FNIC_TRACE(fnic_cleanup_io, 1427 - sc->device->host->host_no, tag, sc, 1428 - jiffies_to_msecs(jiffies - start_time), 1429 - 0, ((u64)sc->cmnd[0] << 32 | 1430 - (u64)sc->cmnd[2] << 24 | 1431 - (u64)sc->cmnd[3] << 16 | 1432 - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1433 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1429 + FNIC_TRACE(fnic_cleanup_io, 1430 + sc->device->host->host_no, tag, sc, 1431 + jiffies_to_msecs(jiffies - start_time), 1432 + 0, ((u64)sc->cmnd[0] << 32 | 1433 + (u64)sc->cmnd[2] << 24 | 1434 + (u64)sc->cmnd[3] << 16 | 1435 + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1436 + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1434 1437 1435 - sc->scsi_done(sc); 1436 - } 1438 + scsi_done(sc); 1439 + 1437 1440 return true; 1438 1441 } 1439 1442 ··· 1490 1495 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" 1491 1496 " DID_NO_CONNECT\n"); 1492 1497 1493 - if (sc->scsi_done) { 1494 - FNIC_TRACE(fnic_wq_copy_cleanup_handler, 1495 - sc->device->host->host_no, id, sc, 1496 - jiffies_to_msecs(jiffies - start_time), 1497 - 0, ((u64)sc->cmnd[0] << 32 | 1498 - (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | 1499 - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1500 - (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1498 + FNIC_TRACE(fnic_wq_copy_cleanup_handler, 1499 + sc->device->host->host_no, id, sc, 1500 + jiffies_to_msecs(jiffies - start_time), 1501 + 0, ((u64)sc->cmnd[0] << 32 | 1502 + (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | 1503 + (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), 1504 + (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); 1501 1505 1502 - sc->scsi_done(sc); 1503 - } 1506 + scsi_done(sc); 1504 1507 } 1505 1508 1506 1509 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, ··· 1924 1931 fnic_release_ioreq_buf(fnic, io_req, sc); 1925 1932 mempool_free(io_req, fnic->io_req_pool); 1926 1933 1927 - if (sc->scsi_done) { 1928 1934 /* Call SCSI completion function to complete the IO */ 1929 - sc->result = (DID_ABORT << 16); 1930 - sc->scsi_done(sc); 1931 - atomic64_dec(&fnic_stats->io_stats.active_ios); 1932 - if (atomic64_read(&fnic->io_cmpl_skip)) 1933 - atomic64_dec(&fnic->io_cmpl_skip); 1934 - else 1935 - atomic64_inc(&fnic_stats->io_stats.io_completions); 1936 - } 1935 + sc->result = DID_ABORT << 16; 1936 + scsi_done(sc); 1937 + atomic64_dec(&fnic_stats->io_stats.active_ios); 1938 + if (atomic64_read(&fnic->io_cmpl_skip)) 1939 + atomic64_dec(&fnic->io_cmpl_skip); 1940 + else 1941 + atomic64_inc(&fnic_stats->io_stats.io_completions); 1937 1942 1938 1943 fnic_abort_cmd_end: 1939 1944 FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc, ··· 2144 2153 * Any IO is returned during reset, it needs to call scsi_done 2145 2154 * to return the scsi_cmnd to upper layer. 2146 2155 */ 2147 - if (sc->scsi_done) { 2148 - /* Set result to let upper SCSI layer retry */ 2149 - sc->result = DID_RESET << 16; 2150 - sc->scsi_done(sc); 2151 - } 2156 + /* Set result to let upper SCSI layer retry */ 2157 + sc->result = DID_RESET << 16; 2158 + scsi_done(sc); 2159 + 2152 2160 return true; 2153 2161 } 2154 2162
+2 -1
drivers/scsi/hisi_sas/hisi_sas.h
··· 35 35 #define HISI_SAS_QUEUE_SLOTS 4096 36 36 #define HISI_SAS_MAX_ITCT_ENTRIES 1024 37 37 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES 38 - #define HISI_SAS_RESET_BIT 0 38 + #define HISI_SAS_RESETTING_BIT 0 39 39 #define HISI_SAS_REJECT_CMD_BIT 1 40 40 #define HISI_SAS_PM_BIT 2 41 41 #define HISI_SAS_HW_FAULT_BIT 3 ··· 649 649 extern int hisi_sas_remove(struct platform_device *pdev); 650 650 651 651 extern int hisi_sas_slave_configure(struct scsi_device *sdev); 652 + extern int hisi_sas_slave_alloc(struct scsi_device *sdev); 652 653 extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); 653 654 extern void hisi_sas_scan_start(struct Scsi_Host *shost); 654 655 extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
+82 -33
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 724 724 */ 725 725 local_phy = sas_get_local_phy(device); 726 726 if (!scsi_is_sas_phy_local(local_phy) && 727 - !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 727 + !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 728 728 unsigned long deadline = ata_deadline(jiffies, 20000); 729 729 struct sata_device *sata_dev = &device->sata_dev; 730 730 struct ata_host *ata_host = sata_dev->ata_host; ··· 755 755 756 756 return rc; 757 757 } 758 + 759 + int hisi_sas_slave_alloc(struct scsi_device *sdev) 760 + { 761 + struct domain_device *ddev; 762 + int rc; 763 + 764 + rc = sas_slave_alloc(sdev); 765 + if (rc) 766 + return rc; 767 + ddev = sdev_to_domain_dev(sdev); 768 + 769 + return hisi_sas_init_device(ddev); 770 + } 771 + EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 758 772 759 773 static int hisi_sas_dev_found(struct domain_device *device) 760 774 { ··· 816 802 dev_info(dev, "dev[%d:%x] found\n", 817 803 sas_dev->device_id, sas_dev->dev_type); 818 804 819 - rc = hisi_sas_init_device(device); 820 - if (rc) 821 - goto err_out; 822 805 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 823 806 return 0; 824 807 ··· 1083 1072 sas_dev->device_id, sas_dev->dev_type); 1084 1073 1085 1074 down(&hisi_hba->sem); 1086 - if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 1075 + if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1087 1076 hisi_sas_internal_task_abort(hisi_hba, device, 1088 1077 HISI_SAS_INT_ABT_DEV, 0, true); 1089 1078 ··· 1146 1135 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1147 1136 void *funcdata) 1148 1137 { 1138 + struct hisi_sas_phy *phy = container_of(sas_phy, 1139 + struct hisi_sas_phy, sas_phy); 1149 1140 struct sas_ha_struct *sas_ha = sas_phy->ha; 1150 1141 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1142 + struct device *dev = hisi_hba->dev; 1143 + DECLARE_COMPLETION_ONSTACK(completion); 1151 1144 int phy_no = sas_phy->id; 1145 + u8 sts = phy->phy_attached; 1146 + int ret = 0; 1147 + 1148 + phy->reset_completion = &completion; 1152 1149 1153 1150 switch (func) { 1154 1151 case PHY_FUNC_HARD_RESET: ··· 1171 1152 1172 1153 case PHY_FUNC_DISABLE: 1173 1154 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1174 - break; 1155 + goto out; 1175 1156 1176 1157 case PHY_FUNC_SET_LINK_RATE: 1177 - return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1158 + ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1159 + break; 1160 + 1178 1161 case PHY_FUNC_GET_EVENTS: 1179 1162 if (hisi_hba->hw->get_events) { 1180 1163 hisi_hba->hw->get_events(hisi_hba, phy_no); 1181 - break; 1164 + goto out; 1182 1165 } 1183 1166 fallthrough; 1184 1167 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1185 1168 default: 1186 - return -EOPNOTSUPP; 1169 + ret = -EOPNOTSUPP; 1170 + goto out; 1187 1171 } 1188 - return 0; 1172 + 1173 + if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) { 1174 + dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1175 + phy_no, func); 1176 + if (phy->in_reset) 1177 + ret = -ETIMEDOUT; 1178 + } 1179 + 1180 + out: 1181 + phy->reset_completion = NULL; 1182 + 1183 + return ret; 1189 1184 } 1190 1185 1191 1186 static void hisi_sas_task_done(struct sas_task *task) 1192 1187 { 1193 - del_timer(&task->slow_task->timer); 1188 + del_timer_sync(&task->slow_task->timer); 1194 1189 complete(&task->slow_task->completion); 1195 1190 } 1196 1191 ··· 1262 1229 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1263 1230 1264 1231 if (res) { 1265 - del_timer(&task->slow_task->timer); 1232 + del_timer_sync(&task->slow_task->timer); 1266 1233 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1267 1234 res); 1268 1235 goto ex_err; ··· 1587 1554 scsi_block_requests(shost); 1588 1555 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1589 1556 1590 - if (timer_pending(&hisi_hba->timer)) 1591 - del_timer_sync(&hisi_hba->timer); 1557 + del_timer_sync(&hisi_hba->timer); 1592 1558 1593 1559 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1594 1560 } ··· 1608 1576 hisi_sas_reset_init_all_devices(hisi_hba); 1609 1577 up(&hisi_hba->sem); 1610 1578 scsi_unblock_requests(shost); 1611 - clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1579 + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1612 1580 1613 1581 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1614 1582 } ··· 1619 1587 if (!hisi_hba->hw->soft_reset) 1620 1588 return -1; 1621 1589 1622 - if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1590 + if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 1623 1591 return -1; 1624 1592 1625 1593 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) ··· 1643 1611 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1644 1612 up(&hisi_hba->sem); 1645 1613 scsi_unblock_requests(shost); 1646 - clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1614 + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1647 1615 return rc; 1648 1616 } 1649 1617 ··· 1805 1773 struct hisi_sas_device *sas_dev = device->lldd_dev; 1806 1774 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1807 1775 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1808 - DECLARE_COMPLETION_ONSTACK(phyreset); 1809 1776 int rc, reset_type; 1810 1777 1811 1778 if (!local_phy->enabled) { ··· 1817 1786 sas_ha->sas_phy[local_phy->number]; 1818 1787 struct hisi_sas_phy *phy = 1819 1788 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1789 + unsigned long flags; 1790 + 1791 + spin_lock_irqsave(&phy->lock, flags); 1820 1792 phy->in_reset = 1; 1821 - phy->reset_completion = &phyreset; 1793 + spin_unlock_irqrestore(&phy->lock, flags); 1822 1794 } 1823 1795 1824 1796 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || ··· 1835 1801 sas_ha->sas_phy[local_phy->number]; 1836 1802 struct hisi_sas_phy *phy = 1837 1803 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1838 - int ret = wait_for_completion_timeout(&phyreset, 1839 - I_T_NEXUS_RESET_PHYUP_TIMEOUT); 1840 1804 unsigned long flags; 1841 1805 1842 1806 spin_lock_irqsave(&phy->lock, flags); 1843 - phy->reset_completion = NULL; 1844 1807 phy->in_reset = 0; 1845 1808 spin_unlock_irqrestore(&phy->lock, flags); 1846 1809 1847 1810 /* report PHY down if timed out */ 1848 - if (!ret) 1811 + if (rc == -ETIMEDOUT) 1849 1812 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1850 1813 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1851 1814 /* ··· 1870 1839 } 1871 1840 hisi_sas_dereg_device(hisi_hba, device); 1872 1841 1873 - if (dev_is_sata(device)) { 1874 - rc = hisi_sas_softreset_ata_disk(device); 1875 - if (rc == TMF_RESP_FUNC_FAILED) 1876 - return TMF_RESP_FUNC_FAILED; 1877 - } 1878 - 1879 1842 rc = hisi_sas_debug_I_T_nexus_reset(device); 1843 + if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1844 + struct sas_phy *local_phy; 1845 + 1846 + rc = hisi_sas_softreset_ata_disk(device); 1847 + switch (rc) { 1848 + case -ECOMM: 1849 + rc = -ENODEV; 1850 + break; 1851 + case TMF_RESP_FUNC_FAILED: 1852 + case -EMSGSIZE: 1853 + case -EIO: 1854 + local_phy = sas_get_local_phy(device); 1855 + rc = sas_phy_enable(local_phy, 0); 1856 + if (!rc) { 1857 + local_phy->enabled = 0; 1858 + dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1859 + SAS_ADDR(device->sas_addr), rc); 1860 + rc = -ENODEV; 1861 + } 1862 + sas_put_local_phy(local_phy); 1863 + break; 1864 + default: 1865 + break; 1866 + } 1867 + } 1880 1868 1881 1869 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1882 1870 hisi_sas_release_task(hisi_hba, device); ··· 2147 2097 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2148 2098 task, abort_flag, tag, dq); 2149 2099 if (res) { 2150 - del_timer(&task->slow_task->timer); 2100 + del_timer_sync(&task->slow_task->timer); 2151 2101 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2152 2102 res); 2153 2103 goto exit; ··· 2301 2251 } else { 2302 2252 struct hisi_sas_port *port = phy->port; 2303 2253 2304 - if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || 2254 + if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 2305 2255 phy->in_reset) { 2306 2256 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2307 2257 return; ··· 2819 2769 struct hisi_hba *hisi_hba = sha->lldd_ha; 2820 2770 struct Scsi_Host *shost = sha->core.shost; 2821 2771 2822 - if (timer_pending(&hisi_hba->timer)) 2823 - del_timer(&hisi_hba->timer); 2772 + del_timer_sync(&hisi_hba->timer); 2824 2773 2825 2774 sas_unregister_ha(sha); 2826 2775 sas_remove_host(sha->core.shost);
+9 -14
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
··· 1327 1327 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 1328 1328 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 1329 1329 irqreturn_t res = IRQ_HANDLED; 1330 - unsigned long flags; 1331 1330 1332 1331 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); 1333 1332 if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) { ··· 1379 1380 phy->identify.target_port_protocols = 1380 1381 SAS_PROTOCOL_SMP; 1381 1382 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1382 - 1383 - spin_lock_irqsave(&phy->lock, flags); 1384 - if (phy->reset_completion) { 1385 - phy->in_reset = 0; 1386 - complete(phy->reset_completion); 1387 - } 1388 - spin_unlock_irqrestore(&phy->lock, flags); 1389 - 1390 1383 end: 1384 + if (phy->reset_completion) 1385 + complete(phy->reset_completion); 1391 1386 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, 1392 1387 CHL_INT2_SL_PHY_ENA_MSK); 1393 1388 ··· 1415 1422 goto end; 1416 1423 } 1417 1424 1418 - if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1425 + if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 1419 1426 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 1420 1427 GFP_ATOMIC); 1421 1428 ··· 1742 1749 return 0; 1743 1750 } 1744 1751 1745 - static struct device_attribute *host_attrs_v1_hw[] = { 1746 - &dev_attr_phy_event_threshold, 1752 + static struct attribute *host_v1_hw_attrs[] = { 1753 + &dev_attr_phy_event_threshold.attr, 1747 1754 NULL 1748 1755 }; 1756 + 1757 + ATTRIBUTE_GROUPS(host_v1_hw); 1749 1758 1750 1759 static struct scsi_host_template sht_v1_hw = { 1751 1760 .name = DRV_NAME, ··· 1766 1771 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 1767 1772 .eh_device_reset_handler = sas_eh_device_reset_handler, 1768 1773 .eh_target_reset_handler = sas_eh_target_reset_handler, 1769 - .slave_alloc = sas_slave_alloc, 1774 + .slave_alloc = hisi_sas_slave_alloc, 1770 1775 .target_destroy = sas_target_destroy, 1771 1776 .ioctl = sas_ioctl, 1772 1777 #ifdef CONFIG_COMPAT 1773 1778 .compat_ioctl = sas_ioctl, 1774 1779 #endif 1775 - .shost_attrs = host_attrs_v1_hw, 1780 + .shost_groups = host_v1_hw_groups, 1776 1781 .host_reset = hisi_sas_host_reset, 1777 1782 }; 1778 1783
+13 -22
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
··· 2368 2368 case STAT_IO_COMPLETE: 2369 2369 /* internal abort command complete */ 2370 2370 ts->stat = TMF_RESP_FUNC_SUCC; 2371 - del_timer(&slot->internal_abort_timer); 2371 + del_timer_sync(&slot->internal_abort_timer); 2372 2372 goto out; 2373 2373 case STAT_IO_NO_DEVICE: 2374 2374 ts->stat = TMF_RESP_FUNC_COMPLETE; 2375 - del_timer(&slot->internal_abort_timer); 2375 + del_timer_sync(&slot->internal_abort_timer); 2376 2376 goto out; 2377 2377 case STAT_IO_NOT_VALID: 2378 2378 /* abort single io, controller don't find 2379 2379 * the io need to abort 2380 2380 */ 2381 2381 ts->stat = TMF_RESP_FUNC_FAILED; 2382 - del_timer(&slot->internal_abort_timer); 2382 + del_timer_sync(&slot->internal_abort_timer); 2383 2383 goto out; 2384 2384 default: 2385 2385 break; ··· 2641 2641 struct device *dev = hisi_hba->dev; 2642 2642 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; 2643 2643 struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; 2644 - unsigned long flags; 2645 2644 2646 2645 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); 2647 2646 ··· 2695 2696 set_link_timer_quirk(hisi_hba); 2696 2697 } 2697 2698 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 2698 - spin_lock_irqsave(&phy->lock, flags); 2699 - if (phy->reset_completion) { 2700 - phy->in_reset = 0; 2701 - complete(phy->reset_completion); 2702 - } 2703 - spin_unlock_irqrestore(&phy->lock, flags); 2704 - 2705 2699 end: 2700 + if (phy->reset_completion) 2701 + complete(phy->reset_completion); 2706 2702 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 2707 2703 CHL_INT0_SL_PHY_ENABLE_MSK); 2708 2704 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); ··· 2818 2824 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 2819 2825 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 2820 2826 if ((bcast_status & RX_BCAST_CHG_MSK) && 2821 - !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 2827 + !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 2822 2828 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 2823 2829 GFP_ATOMIC); 2824 2830 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, ··· 3198 3204 u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; 3199 3205 irqreturn_t res = IRQ_HANDLED; 3200 3206 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 3201 - unsigned long flags; 3202 3207 int phy_no, offset; 3203 3208 3204 3209 del_timer(&phy->timer); ··· 3273 3280 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; 3274 3281 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 3275 3282 3276 - spin_lock_irqsave(&phy->lock, flags); 3277 - if (phy->reset_completion) { 3278 - phy->in_reset = 0; 3283 + if (phy->reset_completion) 3279 3284 complete(phy->reset_completion); 3280 - } 3281 - spin_unlock_irqrestore(&phy->lock, flags); 3282 3285 end: 3283 3286 hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); 3284 3287 hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); ··· 3531 3542 3532 3543 } 3533 3544 3534 - static struct device_attribute *host_attrs_v2_hw[] = { 3535 - &dev_attr_phy_event_threshold, 3545 + static struct attribute *host_v2_hw_attrs[] = { 3546 + &dev_attr_phy_event_threshold.attr, 3536 3547 NULL 3537 3548 }; 3549 + 3550 + ATTRIBUTE_GROUPS(host_v2_hw); 3538 3551 3539 3552 static int map_queues_v2_hw(struct Scsi_Host *shost) 3540 3553 { ··· 3575 3584 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 3576 3585 .eh_device_reset_handler = sas_eh_device_reset_handler, 3577 3586 .eh_target_reset_handler = sas_eh_target_reset_handler, 3578 - .slave_alloc = sas_slave_alloc, 3587 + .slave_alloc = hisi_sas_slave_alloc, 3579 3588 .target_destroy = sas_target_destroy, 3580 3589 .ioctl = sas_ioctl, 3581 3590 #ifdef CONFIG_COMPAT 3582 3591 .compat_ioctl = sas_ioctl, 3583 3592 #endif 3584 - .shost_attrs = host_attrs_v2_hw, 3593 + .shost_groups = host_v2_hw_groups, 3585 3594 .host_reset = hisi_sas_host_reset, 3586 3595 .map_queues = map_queues_v2_hw, 3587 3596 .host_tagset = 1,
+27 -35
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 519 519 #define CHNL_INT_STS_INT2_MSK BIT(3) 520 520 #define CHNL_WIDTH 4 521 521 522 + #define BAR_NO_V3_HW 5 523 + 522 524 enum { 523 525 DSM_FUNC_ERR_HANDLE_MSI = 0, 524 526 }; ··· 1483 1481 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1484 1482 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1485 1483 struct device *dev = hisi_hba->dev; 1486 - unsigned long flags; 1487 1484 1488 1485 del_timer(&phy->timer); 1489 1486 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); ··· 1564 1563 phy->phy_attached = 1; 1565 1564 hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); 1566 1565 res = IRQ_HANDLED; 1567 - spin_lock_irqsave(&phy->lock, flags); 1568 - if (phy->reset_completion) { 1569 - phy->in_reset = 0; 1570 - complete(phy->reset_completion); 1571 - } 1572 - spin_unlock_irqrestore(&phy->lock, flags); 1573 1566 end: 1567 + if (phy->reset_completion) 1568 + complete(phy->reset_completion); 1574 1569 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, 1575 1570 CHL_INT0_SL_PHY_ENABLE_MSK); 1576 1571 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); ··· 1613 1616 hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); 1614 1617 bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); 1615 1618 if ((bcast_status & RX_BCAST_CHG_MSK) && 1616 - !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1619 + !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 1617 1620 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, 1618 1621 GFP_ATOMIC); 1619 1622 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, ··· 2767 2770 return 0; 2768 2771 } 2769 2772 2770 - static struct device_attribute *host_attrs_v3_hw[] = { 2771 - &dev_attr_phy_event_threshold, 2772 - &dev_attr_intr_conv_v3_hw, 2773 - &dev_attr_intr_coal_ticks_v3_hw, 2774 - &dev_attr_intr_coal_count_v3_hw, 2773 + static struct attribute *host_v3_hw_attrs[] = { 2774 + &dev_attr_phy_event_threshold.attr, 2775 + &dev_attr_intr_conv_v3_hw.attr, 2776 + &dev_attr_intr_coal_ticks_v3_hw.attr, 2777 + &dev_attr_intr_coal_count_v3_hw.attr, 2775 2778 NULL 2776 2779 }; 2780 + 2781 + ATTRIBUTE_GROUPS(host_v3_hw); 2777 2782 2778 2783 #define HISI_SAS_DEBUGFS_REG(x) {#x, x} 2779 2784 ··· 3155 3156 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 3156 3157 .eh_device_reset_handler = sas_eh_device_reset_handler, 3157 3158 .eh_target_reset_handler = sas_eh_target_reset_handler, 3158 - .slave_alloc = sas_slave_alloc, 3159 + .slave_alloc = hisi_sas_slave_alloc, 3159 3160 .target_destroy = sas_target_destroy, 3160 3161 .ioctl = sas_ioctl, 3161 3162 #ifdef CONFIG_COMPAT 3162 3163 .compat_ioctl = sas_ioctl, 3163 3164 #endif 3164 - .shost_attrs = host_attrs_v3_hw, 3165 + .shost_groups = host_v3_hw_groups, 3165 3166 .tag_alloc_policy = BLK_TAG_ALLOC_RR, 3166 3167 .host_reset = hisi_sas_host_reset, 3167 3168 .host_tagset = 1, ··· 3686 3687 3687 3688 do_div(timestamp, NSEC_PER_MSEC); 3688 3689 hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp; 3689 - hisi_hba->debugfs_dump_index++; 3690 3690 3691 3691 debugfs_snapshot_prepare_v3_hw(hisi_hba); 3692 3692 ··· 3701 3703 debugfs_create_files_v3_hw(hisi_hba); 3702 3704 3703 3705 debugfs_snapshot_restore_v3_hw(hisi_hba); 3706 + hisi_hba->debugfs_dump_index++; 3704 3707 } 3705 3708 3706 3709 static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file, ··· 4676 4677 struct sas_ha_struct *sha; 4677 4678 int rc, phy_nr, port_nr, i; 4678 4679 4679 - rc = pci_enable_device(pdev); 4680 + rc = pcim_enable_device(pdev); 4680 4681 if (rc) 4681 4682 goto err_out; 4682 4683 4683 4684 pci_set_master(pdev); 4684 4685 4685 - rc = pci_request_regions(pdev, DRV_NAME); 4686 + rc = pcim_iomap_regions(pdev, 1 << BAR_NO_V3_HW, DRV_NAME); 4686 4687 if (rc) 4687 - goto err_out_disable_device; 4688 + goto err_out; 4688 4689 4689 4690 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4690 4691 if (rc) ··· 4692 4693 if (rc) { 4693 4694 dev_err(dev, "No usable DMA addressing method\n"); 4694 4695 rc = -ENODEV; 4695 - goto err_out_regions; 4696 + goto err_out; 4696 4697 } 4697 4698 4698 4699 shost = hisi_sas_shost_alloc_pci(pdev); 4699 4700 if (!shost) { 4700 4701 rc = -ENOMEM; 4701 - goto err_out_regions; 4702 + goto err_out; 4702 4703 } 4703 4704 4704 4705 sha = SHOST_TO_SAS_HA(shost); 4705 4706 hisi_hba = shost_priv(shost); 4706 4707 dev_set_drvdata(dev, sha); 4707 4708 4708 - hisi_hba->regs = pcim_iomap(pdev, 5, 0); 4709 + hisi_hba->regs = pcim_iomap_table(pdev)[BAR_NO_V3_HW]; 4709 4710 if (!hisi_hba->regs) { 4710 4711 dev_err(dev, "cannot map register\n"); 4711 4712 rc = -ENOMEM; ··· 4760 4761 rc = interrupt_preinit_v3_hw(hisi_hba); 4761 4762 if (rc) 4762 4763 goto err_out_debugfs; 4763 - dev_err(dev, "%d hw queues\n", shost->nr_hw_queues); 4764 + 4764 4765 rc = scsi_add_host(shost, dev); 4765 4766 if (rc) 4766 4767 goto err_out_free_irq_vectors; ··· 4799 4800 err_out_ha: 4800 4801 hisi_sas_free(hisi_hba); 4801 4802 scsi_host_put(shost); 4802 - err_out_regions: 4803 - pci_release_regions(pdev); 4804 - err_out_disable_device: 4805 - pci_disable_device(pdev); 4806 4803 err_out: 4807 4804 return rc; 4808 4805 } ··· 4828 4833 struct Scsi_Host *shost = sha->core.shost; 4829 4834 4830 4835 pm_runtime_get_noresume(dev); 4831 - if (timer_pending(&hisi_hba->timer)) 4832 - del_timer(&hisi_hba->timer); 4836 + del_timer_sync(&hisi_hba->timer); 4833 4837 4834 4838 sas_unregister_ha(sha); 4835 4839 flush_workqueue(hisi_hba->wq); 4836 4840 sas_remove_host(sha->core.shost); 4837 4841 4838 4842 hisi_sas_v3_destroy_irqs(pdev, hisi_hba); 4839 - pci_release_regions(pdev); 4840 - pci_disable_device(pdev); 4841 4843 hisi_sas_free(hisi_hba); 4842 4844 debugfs_exit_v3_hw(hisi_hba); 4843 4845 scsi_host_put(shost); ··· 4848 4856 int rc; 4849 4857 4850 4858 dev_info(dev, "FLR prepare\n"); 4851 - set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 4859 + set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 4852 4860 hisi_sas_controller_reset_prepare(hisi_hba); 4853 4861 4854 4862 rc = disable_host_v3_hw(hisi_hba); ··· 4894 4902 return -ENODEV; 4895 4903 } 4896 4904 4897 - if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 4905 + if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 4898 4906 return -1; 4899 4907 4900 4908 scsi_block_requests(shost); ··· 4905 4913 if (rc) { 4906 4914 dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); 4907 4915 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 4908 - clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 4916 + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 4909 4917 scsi_unblock_requests(shost); 4910 4918 return rc; 4911 4919 } ··· 4944 4952 } 4945 4953 phys_init_v3_hw(hisi_hba); 4946 4954 sas_resume_ha(sha); 4947 - clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 4955 + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 4948 4956 4949 4957 return 0; 4950 4958 }
+14 -3
drivers/scsi/hosts.c
··· 377 377 struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) 378 378 { 379 379 struct Scsi_Host *shost; 380 - int index; 380 + int index, i, j = 0; 381 381 382 382 shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL); 383 383 if (!shost) ··· 476 476 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); 477 477 shost->shost_gendev.bus = &scsi_bus_type; 478 478 shost->shost_gendev.type = &scsi_host_type; 479 + scsi_enable_async_suspend(&shost->shost_gendev); 479 480 480 481 device_initialize(&shost->shost_dev); 481 482 shost->shost_dev.parent = &shost->shost_gendev; 482 483 shost->shost_dev.class = &shost_class; 483 484 dev_set_name(&shost->shost_dev, "host%d", shost->host_no); 484 - shost->shost_dev.groups = scsi_sysfs_shost_attr_groups; 485 + shost->shost_dev.groups = shost->shost_dev_attr_groups; 486 + shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group; 487 + if (sht->shost_groups) { 488 + for (i = 0; sht->shost_groups[i] && 489 + j < ARRAY_SIZE(shost->shost_dev_attr_groups); 490 + i++, j++) { 491 + shost->shost_dev_attr_groups[j] = 492 + sht->shost_groups[i]; 493 + } 494 + } 495 + WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups)); 485 496 486 497 shost->ehandler = kthread_run(scsi_error_handler, shost, 487 498 "scsi_eh_%d", shost->host_no); ··· 678 667 scsi_dma_unmap(scmd); 679 668 scmd->result = 0; 680 669 set_host_byte(scmd, status); 681 - scmd->scsi_done(scmd); 670 + scsi_done(scmd); 682 671 return true; 683 672 } 684 673
+30 -26
drivers/scsi/hpsa.c
··· 936 936 static DEVICE_ATTR(legacy_board, S_IRUGO, 937 937 host_show_legacy_board, NULL); 938 938 939 - static struct device_attribute *hpsa_sdev_attrs[] = { 940 - &dev_attr_raid_level, 941 - &dev_attr_lunid, 942 - &dev_attr_unique_id, 943 - &dev_attr_hp_ssd_smart_path_enabled, 944 - &dev_attr_path_info, 945 - &dev_attr_sas_address, 939 + static struct attribute *hpsa_sdev_attrs[] = { 940 + &dev_attr_raid_level.attr, 941 + &dev_attr_lunid.attr, 942 + &dev_attr_unique_id.attr, 943 + &dev_attr_hp_ssd_smart_path_enabled.attr, 944 + &dev_attr_path_info.attr, 945 + &dev_attr_sas_address.attr, 946 946 NULL, 947 947 }; 948 948 949 - static struct device_attribute *hpsa_shost_attrs[] = { 950 - &dev_attr_rescan, 951 - &dev_attr_firmware_revision, 952 - &dev_attr_commands_outstanding, 953 - &dev_attr_transport_mode, 954 - &dev_attr_resettable, 955 - &dev_attr_hp_ssd_smart_path_status, 956 - &dev_attr_raid_offload_debug, 957 - &dev_attr_lockup_detected, 958 - &dev_attr_ctlr_num, 959 - &dev_attr_legacy_board, 949 + ATTRIBUTE_GROUPS(hpsa_sdev); 950 + 951 + static struct attribute *hpsa_shost_attrs[] = { 952 + &dev_attr_rescan.attr, 953 + &dev_attr_firmware_revision.attr, 954 + &dev_attr_commands_outstanding.attr, 955 + &dev_attr_transport_mode.attr, 956 + &dev_attr_resettable.attr, 957 + &dev_attr_hp_ssd_smart_path_status.attr, 958 + &dev_attr_raid_offload_debug.attr, 959 + &dev_attr_lockup_detected.attr, 960 + &dev_attr_ctlr_num.attr, 961 + &dev_attr_legacy_board.attr, 960 962 NULL, 961 963 }; 964 + 965 + ATTRIBUTE_GROUPS(hpsa_shost); 962 966 963 967 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ 964 968 HPSA_MAX_CONCURRENT_PASSTHRUS) ··· 984 980 #ifdef CONFIG_COMPAT 985 981 .compat_ioctl = hpsa_compat_ioctl, 986 982 #endif 987 - .sdev_attrs = hpsa_sdev_attrs, 988 - .shost_attrs = hpsa_shost_attrs, 983 + .sdev_groups = hpsa_sdev_groups, 984 + .shost_groups = hpsa_shost_groups, 989 985 .max_sectors = 2048, 990 986 .no_write_same = 1, 991 987 }; ··· 2486 2482 struct CommandList *c, struct scsi_cmnd *cmd) 2487 2483 { 2488 2484 hpsa_cmd_resolve_and_free(h, c); 2489 - if (cmd && cmd->scsi_done) 2490 - cmd->scsi_done(cmd); 2485 + if (cmd) 2486 + scsi_done(cmd); 2491 2487 } 2492 2488 2493 2489 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) ··· 5675 5671 * if it encountered a dma mapping failure. 5676 5672 */ 5677 5673 cmd->result = DID_IMM_RETRY << 16; 5678 - cmd->scsi_done(cmd); 5674 + scsi_done(cmd); 5679 5675 } 5680 5676 } 5681 5677 ··· 5695 5691 dev = cmd->device->hostdata; 5696 5692 if (!dev) { 5697 5693 cmd->result = DID_NO_CONNECT << 16; 5698 - cmd->scsi_done(cmd); 5694 + scsi_done(cmd); 5699 5695 return 0; 5700 5696 } 5701 5697 5702 5698 if (dev->removed) { 5703 5699 cmd->result = DID_NO_CONNECT << 16; 5704 - cmd->scsi_done(cmd); 5700 + scsi_done(cmd); 5705 5701 return 0; 5706 5702 } 5707 5703 5708 5704 if (unlikely(lockup_detected(h))) { 5709 5705 cmd->result = DID_NO_CONNECT << 16; 5710 - cmd->scsi_done(cmd); 5706 + scsi_done(cmd); 5711 5707 return 0; 5712 5708 } 5713 5709
+9 -11
drivers/scsi/hptiop.c
··· 769 769 770 770 skip_resid: 771 771 dprintk("scsi_done(%p)\n", scp); 772 - scp->scsi_done(scp); 772 + scsi_done(scp); 773 773 free_req(hba, &hba->reqs[tag]); 774 774 } 775 775 ··· 993 993 return 0; 994 994 } 995 995 996 - static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, 997 - void (*done)(struct scsi_cmnd *)) 996 + static int hptiop_queuecommand_lck(struct scsi_cmnd *scp) 998 997 { 999 998 struct Scsi_Host *host = scp->device->host; 1000 999 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 1001 1000 struct hpt_iop_request_scsi_command *req; 1002 1001 int sg_count = 0; 1003 1002 struct hptiop_request *_req; 1004 - 1005 - BUG_ON(!done); 1006 - scp->scsi_done = done; 1007 1003 1008 1004 _req = get_req(hba); 1009 1005 if (_req == NULL) { ··· 1055 1059 1056 1060 cmd_done: 1057 1061 dprintk("scsi_done(scp=%p)\n", scp); 1058 - scp->scsi_done(scp); 1062 + scsi_done(scp); 1059 1063 return 0; 1060 1064 } 1061 1065 ··· 1146 1150 .show = hptiop_show_fw_version, 1147 1151 }; 1148 1152 1149 - static struct device_attribute *hptiop_attrs[] = { 1150 - &hptiop_attr_version, 1151 - &hptiop_attr_fw_version, 1153 + static struct attribute *hptiop_host_attrs[] = { 1154 + &hptiop_attr_version.attr, 1155 + &hptiop_attr_fw_version.attr, 1152 1156 NULL 1153 1157 }; 1158 + 1159 + ATTRIBUTE_GROUPS(hptiop_host); 1154 1160 1155 1161 static int hptiop_slave_config(struct scsi_device *sdev) 1156 1162 { ··· 1170 1172 .info = hptiop_info, 1171 1173 .emulated = 0, 1172 1174 .proc_name = driver_name, 1173 - .shost_attrs = hptiop_attrs, 1175 + .shost_groups = hptiop_host_groups, 1174 1176 .slave_configure = hptiop_slave_config, 1175 1177 .this_id = -1, 1176 1178 .change_queue_depth = hptiop_adjust_disk_queue_depth,
+16 -14
drivers/scsi/ibmvscsi/ibmvfc.c
··· 1046 1046 1047 1047 if (cmnd) { 1048 1048 scsi_dma_unmap(cmnd); 1049 - cmnd->scsi_done(cmnd); 1049 + scsi_done(cmnd); 1050 1050 } 1051 1051 1052 1052 ibmvfc_free_event(evt); ··· 1849 1849 cmnd->result = (DID_ERROR << 16); 1850 1850 1851 1851 scsi_dma_unmap(cmnd); 1852 - cmnd->scsi_done(cmnd); 1852 + scsi_done(cmnd); 1853 1853 } 1854 1854 1855 1855 ibmvfc_free_event(evt); ··· 1935 1935 if (unlikely((rc = fc_remote_port_chkready(rport))) || 1936 1936 unlikely((rc = ibmvfc_host_chkready(vhost)))) { 1937 1937 cmnd->result = rc; 1938 - cmnd->scsi_done(cmnd); 1938 + scsi_done(cmnd); 1939 1939 return 0; 1940 1940 } 1941 1941 ··· 1975 1975 "Failed to map DMA buffer for command. rc=%d\n", rc); 1976 1976 1977 1977 cmnd->result = DID_ERROR << 16; 1978 - cmnd->scsi_done(cmnd); 1978 + scsi_done(cmnd); 1979 1979 return 0; 1980 1980 } 1981 1981 ··· 3589 3589 }; 3590 3590 #endif 3591 3591 3592 - static struct device_attribute *ibmvfc_attrs[] = { 3593 - &dev_attr_partition_name, 3594 - &dev_attr_device_name, 3595 - &dev_attr_port_loc_code, 3596 - &dev_attr_drc_name, 3597 - &dev_attr_npiv_version, 3598 - &dev_attr_capabilities, 3599 - &dev_attr_log_level, 3600 - &dev_attr_nr_scsi_channels, 3592 + static struct attribute *ibmvfc_host_attrs[] = { 3593 + &dev_attr_partition_name.attr, 3594 + &dev_attr_device_name.attr, 3595 + &dev_attr_port_loc_code.attr, 3596 + &dev_attr_drc_name.attr, 3597 + &dev_attr_npiv_version.attr, 3598 + &dev_attr_capabilities.attr, 3599 + &dev_attr_log_level.attr, 3600 + &dev_attr_nr_scsi_channels.attr, 3601 3601 NULL 3602 3602 }; 3603 + 3604 + ATTRIBUTE_GROUPS(ibmvfc_host); 3603 3605 3604 3606 static struct scsi_host_template driver_template = { 3605 3607 .module = THIS_MODULE, ··· 3623 3621 .this_id = -1, 3624 3622 .sg_tablesize = SG_ALL, 3625 3623 .max_sectors = IBMVFC_MAX_SECTORS, 3626 - .shost_attrs = ibmvfc_attrs, 3624 + .shost_groups = ibmvfc_host_groups, 3627 3625 .track_queue_depth = 1, 3628 3626 .host_tagset = 1, 3629 3627 };
+15 -13
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 454 454 pool->iu_storage = 455 455 dma_alloc_coherent(hostdata->dev, 456 456 pool->size * sizeof(*pool->iu_storage), 457 - &pool->iu_token, 0); 457 + &pool->iu_token, GFP_KERNEL); 458 458 if (!pool->iu_storage) { 459 459 kfree(pool->events); 460 460 return -ENOMEM; ··· 1039 1039 * @cmnd: struct scsi_cmnd to be executed 1040 1040 * @done: Callback function to be called when cmd is completed 1041 1041 */ 1042 - static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, 1043 - void (*done) (struct scsi_cmnd *)) 1042 + static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd) 1044 1043 { 1044 + void (*done)(struct scsi_cmnd *) = scsi_done; 1045 1045 struct srp_cmd *srp_cmd; 1046 1046 struct srp_event_struct *evt_struct; 1047 1047 struct srp_indirect_buf *indirect; ··· 2065 2065 return 0; 2066 2066 } 2067 2067 2068 - static struct device_attribute *ibmvscsi_attrs[] = { 2069 - &ibmvscsi_host_vhost_loc, 2070 - &ibmvscsi_host_vhost_name, 2071 - &ibmvscsi_host_srp_version, 2072 - &ibmvscsi_host_partition_name, 2073 - &ibmvscsi_host_partition_number, 2074 - &ibmvscsi_host_mad_version, 2075 - &ibmvscsi_host_os_type, 2076 - &ibmvscsi_host_config, 2068 + static struct attribute *ibmvscsi_host_attrs[] = { 2069 + &ibmvscsi_host_vhost_loc.attr, 2070 + &ibmvscsi_host_vhost_name.attr, 2071 + &ibmvscsi_host_srp_version.attr, 2072 + &ibmvscsi_host_partition_name.attr, 2073 + &ibmvscsi_host_partition_number.attr, 2074 + &ibmvscsi_host_mad_version.attr, 2075 + &ibmvscsi_host_os_type.attr, 2076 + &ibmvscsi_host_config.attr, 2077 2077 NULL 2078 2078 }; 2079 + 2080 + ATTRIBUTE_GROUPS(ibmvscsi_host); 2079 2081 2080 2082 /* ------------------------------------------------------------ 2081 2083 * SCSI driver registration ··· 2098 2096 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, 2099 2097 .this_id = -1, 2100 2098 .sg_tablesize = SG_ALL, 2101 - .shost_attrs = ibmvscsi_attrs, 2099 + .shost_groups = ibmvscsi_host_groups, 2102 2100 }; 2103 2101 2104 2102 /**
+4 -38
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 3948 3948 NULL, 3949 3949 }; 3950 3950 3951 - static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item, 3952 - char *page) 3953 - { 3954 - struct se_portal_group *se_tpg = to_tpg(item); 3955 - struct ibmvscsis_tport *tport = container_of(se_tpg, 3956 - struct ibmvscsis_tport, 3957 - se_tpg); 3958 3951 3959 - return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0); 3960 - } 3961 - 3962 - static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, 3963 - const char *page, size_t count) 3952 + static int ibmvscsis_enable_tpg(struct se_portal_group *se_tpg, bool enable) 3964 3953 { 3965 - struct se_portal_group *se_tpg = to_tpg(item); 3966 3954 struct ibmvscsis_tport *tport = container_of(se_tpg, 3967 3955 struct ibmvscsis_tport, 3968 3956 se_tpg); 3969 3957 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); 3970 - unsigned long tmp; 3971 - int rc; 3972 3958 long lrc; 3973 3959 3974 - rc = kstrtoul(page, 0, &tmp); 3975 - if (rc < 0) { 3976 - dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n"); 3977 - return -EINVAL; 3978 - } 3979 - 3980 - if ((tmp != 0) && (tmp != 1)) { 3981 - dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n"); 3982 - return -EINVAL; 3983 - } 3984 - 3985 - if (tmp) { 3960 + if (enable) { 3986 3961 spin_lock_bh(&vscsi->intr_lock); 3987 3962 tport->enabled = true; 3988 3963 lrc = ibmvscsis_enable_change_state(vscsi); ··· 3973 3998 spin_unlock_bh(&vscsi->intr_lock); 3974 3999 } 3975 4000 3976 - dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp, 3977 - vscsi->state); 3978 - 3979 - return count; 4001 + return 0; 3980 4002 } 3981 - CONFIGFS_ATTR(ibmvscsis_tpg_, enable); 3982 - 3983 - static struct configfs_attribute *ibmvscsis_tpg_attrs[] = { 3984 - &ibmvscsis_tpg_attr_enable, 3985 - NULL, 3986 - }; 3987 4003 3988 4004 static const struct target_core_fabric_ops ibmvscsis_ops = { 3989 4005 .module = THIS_MODULE, ··· 4004 4038 .fabric_make_wwn = ibmvscsis_make_tport, 4005 4039 .fabric_drop_wwn = ibmvscsis_drop_tport, 4006 4040 .fabric_make_tpg = ibmvscsis_make_tpg, 4041 + .fabric_enable_tpg = ibmvscsis_enable_tpg, 4007 4042 .fabric_drop_tpg = ibmvscsis_drop_tpg, 4008 4043 4009 4044 .tfc_wwn_attrs = ibmvscsis_wwn_attrs, 4010 - .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs, 4011 4045 }; 4012 4046 4013 4047 static void ibmvscsis_dev_release(struct device *dev) {};
+2 -4
drivers/scsi/imm.c
··· 769 769 770 770 spin_lock_irqsave(host->host_lock, flags); 771 771 dev->cur_cmd = NULL; 772 - cmd->scsi_done(cmd); 772 + scsi_done(cmd); 773 773 spin_unlock_irqrestore(host->host_lock, flags); 774 774 return; 775 775 } ··· 910 910 return 0; 911 911 } 912 912 913 - static int imm_queuecommand_lck(struct scsi_cmnd *cmd, 914 - void (*done)(struct scsi_cmnd *)) 913 + static int imm_queuecommand_lck(struct scsi_cmnd *cmd) 915 914 { 916 915 imm_struct *dev = imm_dev(cmd->device->host); 917 916 ··· 921 922 dev->failed = 0; 922 923 dev->jstart = jiffies; 923 924 dev->cur_cmd = cmd; 924 - cmd->scsi_done = done; 925 925 cmd->result = DID_ERROR << 16; /* default return code */ 926 926 cmd->SCp.phase = 0; /* bus free */ 927 927
+2 -5
drivers/scsi/initio.c
··· 2609 2609 * will cause the mid layer to call us again later with the command) 2610 2610 */ 2611 2611 2612 - static int i91u_queuecommand_lck(struct scsi_cmnd *cmd, 2613 - void (*done)(struct scsi_cmnd *)) 2612 + static int i91u_queuecommand_lck(struct scsi_cmnd *cmd) 2614 2613 { 2615 2614 struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; 2616 2615 struct scsi_ctrl_blk *cmnd; 2617 - 2618 - cmd->scsi_done = done; 2619 2616 2620 2617 cmnd = initio_alloc_scb(host); 2621 2618 if (!cmnd) ··· 2785 2788 2786 2789 cmnd->result = cblk->tastat | (cblk->hastat << 16); 2787 2790 i91u_unmap_scb(host->pci_dev, cmnd); 2788 - cmnd->scsi_done(cmnd); /* Notify system DONE */ 2791 + scsi_done(cmnd); /* Notify system DONE */ 2789 2792 initio_release_scb(host, cblk); /* Release SCB for current channel */ 2790 2793 } 2791 2794
+26 -22
drivers/scsi/ipr.c
··· 866 866 scsi_cmd->result |= (DID_ERROR << 16); 867 867 868 868 scsi_dma_unmap(ipr_cmd->scsi_cmd); 869 - scsi_cmd->scsi_done(scsi_cmd); 869 + scsi_done(scsi_cmd); 870 870 if (ipr_cmd->eh_comp) 871 871 complete(ipr_cmd->eh_comp); 872 872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); ··· 4236 4236 .write = ipr_next_async_err_log 4237 4237 }; 4238 4238 4239 - static struct device_attribute *ipr_ioa_attrs[] = { 4240 - &ipr_fw_version_attr, 4241 - &ipr_log_level_attr, 4242 - &ipr_diagnostics_attr, 4243 - &ipr_ioa_state_attr, 4244 - &ipr_ioa_reset_attr, 4245 - &ipr_update_fw_attr, 4246 - &ipr_ioa_fw_type_attr, 4247 - &ipr_iopoll_weight_attr, 4239 + static struct attribute *ipr_ioa_attrs[] = { 4240 + &ipr_fw_version_attr.attr, 4241 + &ipr_log_level_attr.attr, 4242 + &ipr_diagnostics_attr.attr, 4243 + &ipr_ioa_state_attr.attr, 4244 + &ipr_ioa_reset_attr.attr, 4245 + &ipr_update_fw_attr.attr, 4246 + &ipr_ioa_fw_type_attr.attr, 4247 + &ipr_iopoll_weight_attr.attr, 4248 4248 NULL, 4249 4249 }; 4250 + 4251 + ATTRIBUTE_GROUPS(ipr_ioa); 4250 4252 4251 4253 #ifdef CONFIG_SCSI_IPR_DUMP 4252 4254 /** ··· 4734 4732 .store = ipr_store_raw_mode 4735 4733 }; 4736 4734 4737 - static struct device_attribute *ipr_dev_attrs[] = { 4738 - &ipr_adapter_handle_attr, 4739 - &ipr_resource_path_attr, 4740 - &ipr_device_id_attr, 4741 - &ipr_resource_type_attr, 4742 - &ipr_raw_mode_attr, 4735 + static struct attribute *ipr_dev_attrs[] = { 4736 + &ipr_adapter_handle_attr.attr, 4737 + &ipr_resource_path_attr.attr, 4738 + &ipr_device_id_attr.attr, 4739 + &ipr_resource_type_attr.attr, 4740 + &ipr_raw_mode_attr.attr, 4743 4741 NULL, 4744 4742 }; 4743 + 4744 + ATTRIBUTE_GROUPS(ipr_dev); 4745 4745 4746 4746 /** 4747 4747 * ipr_biosparam - Return the HSC mapping ··· 6069 6065 res->in_erp = 0; 6070 6066 } 6071 6067 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6072 - scsi_cmd->scsi_done(scsi_cmd); 6068 + scsi_done(scsi_cmd); 6073 6069 if (ipr_cmd->eh_comp) 6074 6070 complete(ipr_cmd->eh_comp); 6075 6071 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); ··· 6506 6502 } 6507 6503 6508 6504 scsi_dma_unmap(ipr_cmd->scsi_cmd); 6509 - scsi_cmd->scsi_done(scsi_cmd); 6505 + scsi_done(scsi_cmd); 6510 6506 if (ipr_cmd->eh_comp) 6511 6507 complete(ipr_cmd->eh_comp); 6512 6508 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); ··· 6535 6531 scsi_dma_unmap(scsi_cmd); 6536 6532 6537 6533 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); 6538 - scsi_cmd->scsi_done(scsi_cmd); 6534 + scsi_done(scsi_cmd); 6539 6535 if (ipr_cmd->eh_comp) 6540 6536 complete(ipr_cmd->eh_comp); 6541 6537 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); ··· 6689 6685 spin_lock_irqsave(hrrq->lock, hrrq_flags); 6690 6686 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 6691 6687 scsi_cmd->result = (DID_NO_CONNECT << 16); 6692 - scsi_cmd->scsi_done(scsi_cmd); 6688 + scsi_done(scsi_cmd); 6693 6689 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 6694 6690 return 0; 6695 6691 } ··· 6766 6762 .sg_tablesize = IPR_MAX_SGLIST, 6767 6763 .max_sectors = IPR_IOA_MAX_SECTORS, 6768 6764 .cmd_per_lun = IPR_MAX_CMD_PER_LUN, 6769 - .shost_attrs = ipr_ioa_attrs, 6770 - .sdev_attrs = ipr_dev_attrs, 6765 + .shost_groups = ipr_ioa_groups, 6766 + .sdev_groups = ipr_dev_groups, 6771 6767 .proc_name = IPR_NAME, 6772 6768 }; 6773 6769
+15 -16
drivers/scsi/ips.c
··· 936 936 937 937 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { 938 938 scb->scsi_cmd->result = DID_ERROR << 16; 939 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 939 + scsi_done(scb->scsi_cmd); 940 940 ips_freescb(ha, scb); 941 941 } 942 942 ··· 946 946 947 947 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { 948 948 scsi_cmd->result = DID_ERROR; 949 - scsi_cmd->scsi_done(scsi_cmd); 949 + scsi_done(scsi_cmd); 950 950 } 951 951 952 952 ha->active = FALSE; ··· 965 965 966 966 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { 967 967 scb->scsi_cmd->result = DID_ERROR << 16; 968 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 968 + scsi_done(scb->scsi_cmd); 969 969 ips_freescb(ha, scb); 970 970 } 971 971 ··· 975 975 976 976 while ((scsi_cmd = ips_removeq_wait_head(&ha->scb_waitlist))) { 977 977 scsi_cmd->result = DID_ERROR << 16; 978 - scsi_cmd->scsi_done(scsi_cmd); 978 + scsi_done(scsi_cmd); 979 979 } 980 980 981 981 ha->active = FALSE; ··· 994 994 995 995 while ((scb = ips_removeq_scb_head(&ha->scb_activelist))) { 996 996 scb->scsi_cmd->result = DID_RESET << 16; 997 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 997 + scsi_done(scb->scsi_cmd); 998 998 ips_freescb(ha, scb); 999 999 } 1000 1000 ··· 1035 1035 /* Linux obtains io_request_lock before calling this function */ 1036 1036 /* */ 1037 1037 /****************************************************************************/ 1038 - static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) 1038 + static int ips_queue_lck(struct scsi_cmnd *SC) 1039 1039 { 1040 + void (*done)(struct scsi_cmnd *) = scsi_done; 1040 1041 ips_ha_t *ha; 1041 1042 ips_passthru_t *pt; 1042 1043 ··· 1064 1063 1065 1064 return (0); 1066 1065 } 1067 - 1068 - SC->scsi_done = done; 1069 1066 1070 1067 DEBUG_VAR(2, "(%s%d): ips_queue: cmd 0x%X (%d %d %d)", 1071 1068 ips_name, ··· 1098 1099 ha->ioctl_reset = 1; /* This reset request is from an IOCTL */ 1099 1100 __ips_eh_reset(SC); 1100 1101 SC->result = DID_OK << 16; 1101 - SC->scsi_done(SC); 1102 + scsi_done(SC); 1102 1103 return (0); 1103 1104 } 1104 1105 ··· 2578 2579 case IPS_FAILURE: 2579 2580 if (scb->scsi_cmd) { 2580 2581 scb->scsi_cmd->result = DID_ERROR << 16; 2581 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 2582 + scsi_done(scb->scsi_cmd); 2582 2583 } 2583 2584 2584 2585 ips_freescb(ha, scb); ··· 2586 2587 case IPS_SUCCESS_IMM: 2587 2588 if (scb->scsi_cmd) { 2588 2589 scb->scsi_cmd->result = DID_OK << 16; 2589 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 2590 + scsi_done(scb->scsi_cmd); 2590 2591 } 2591 2592 2592 2593 ips_freescb(ha, scb); ··· 2711 2712 case IPS_FAILURE: 2712 2713 if (scb->scsi_cmd) { 2713 2714 scb->scsi_cmd->result = DID_ERROR << 16; 2714 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 2715 + scsi_done(scb->scsi_cmd); 2715 2716 } 2716 2717 2717 2718 if (scb->bus) ··· 2722 2723 break; 2723 2724 case IPS_SUCCESS_IMM: 2724 2725 if (scb->scsi_cmd) 2725 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 2726 + scsi_done(scb->scsi_cmd); 2726 2727 2727 2728 if (scb->bus) 2728 2729 ha->dcdb_active[scb->bus - 1] &= ··· 3205 3206 case IPS_FAILURE: 3206 3207 if (scb->scsi_cmd) { 3207 3208 scb->scsi_cmd->result = DID_ERROR << 16; 3208 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 3209 + scsi_done(scb->scsi_cmd); 3209 3210 } 3210 3211 3211 3212 ips_freescb(ha, scb); ··· 3213 3214 case IPS_SUCCESS_IMM: 3214 3215 if (scb->scsi_cmd) { 3215 3216 scb->scsi_cmd->result = DID_ERROR << 16; 3216 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 3217 + scsi_done(scb->scsi_cmd); 3217 3218 } 3218 3219 3219 3220 ips_freescb(ha, scb); ··· 3230 3231 ha->dcdb_active[scb->bus - 1] &= ~(1 << scb->target_id); 3231 3232 } 3232 3233 3233 - scb->scsi_cmd->scsi_done(scb->scsi_cmd); 3234 + scsi_done(scb->scsi_cmd); 3234 3235 3235 3236 ips_freescb(ha, scb); 3236 3237 }
+5 -3
drivers/scsi/isci/init.c
··· 142 142 143 143 static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL); 144 144 145 - static struct device_attribute *isci_host_attrs[] = { 146 - &dev_attr_isci_id, 145 + static struct attribute *isci_host_attrs[] = { 146 + &dev_attr_isci_id.attr, 147 147 NULL 148 148 }; 149 + 150 + ATTRIBUTE_GROUPS(isci_host); 149 151 150 152 static struct scsi_host_template isci_sht = { 151 153 ··· 175 173 #ifdef CONFIG_COMPAT 176 174 .compat_ioctl = sas_ioctl, 177 175 #endif 178 - .shost_attrs = isci_host_attrs, 176 + .shost_groups = isci_host_groups, 179 177 .track_queue_depth = 1, 180 178 }; 181 179
-4
drivers/scsi/isci/task.h
··· 182 182 u32 isci_task_ssp_request_get_response_data_length( 183 183 struct isci_request *request); 184 184 185 - int isci_queuecommand( 186 - struct scsi_cmnd *scsi_cmd, 187 - void (*donefunc)(struct scsi_cmnd *)); 188 - 189 185 #endif /* !defined(_SCI_TASK_H_) */
+3 -3
drivers/scsi/libfc/fc_fcp.c
··· 1870 1870 rval = fc_remote_port_chkready(rport); 1871 1871 if (rval) { 1872 1872 sc_cmd->result = rval; 1873 - sc_cmd->scsi_done(sc_cmd); 1873 + scsi_done(sc_cmd); 1874 1874 return 0; 1875 1875 } 1876 1876 ··· 1880 1880 * online 1881 1881 */ 1882 1882 sc_cmd->result = DID_IMM_RETRY << 16; 1883 - sc_cmd->scsi_done(sc_cmd); 1883 + scsi_done(sc_cmd); 1884 1884 goto out; 1885 1885 } 1886 1886 ··· 2087 2087 list_del(&fsp->list); 2088 2088 sc_cmd->SCp.ptr = NULL; 2089 2089 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 2090 - sc_cmd->scsi_done(sc_cmd); 2090 + scsi_done(sc_cmd); 2091 2091 2092 2092 /* release ref from initial allocation in queue command */ 2093 2093 fc_fcp_pkt_release(fsp);
+3 -4
drivers/scsi/libiscsi.c
··· 468 468 * it will decide how to return sc to scsi-ml. 469 469 */ 470 470 if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ) 471 - sc->scsi_done(sc); 471 + scsi_done(sc); 472 472 } 473 473 } 474 474 ··· 1807 1807 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1808 1808 sc->cmnd[0], reason); 1809 1809 scsi_set_resid(sc, scsi_bufflen(sc)); 1810 - sc->scsi_done(sc); 1810 + scsi_done(sc); 1811 1811 return 0; 1812 1812 } 1813 1813 EXPORT_SYMBOL_GPL(iscsi_queuecommand); ··· 2950 2950 session->tmf_state = TMF_INITIAL; 2951 2951 timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0); 2952 2952 mutex_init(&session->eh_mutex); 2953 + init_waitqueue_head(&session->ehwait); 2953 2954 2954 2955 spin_lock_init(&session->frwd_lock); 2955 2956 spin_lock_init(&session->back_lock); ··· 3077 3076 if (!data) 3078 3077 goto login_task_data_alloc_fail; 3079 3078 conn->login_task->data = conn->data = data; 3080 - 3081 - init_waitqueue_head(&session->ehwait); 3082 3079 3083 3080 return cls_conn; 3084 3081
+5 -3
drivers/scsi/libsas/sas_init.c
··· 147 147 148 148 return error; 149 149 } 150 + EXPORT_SYMBOL_GPL(sas_register_ha); 150 151 151 152 static void sas_disable_events(struct sas_ha_struct *sas_ha) 152 153 { ··· 177 176 178 177 return 0; 179 178 } 179 + EXPORT_SYMBOL_GPL(sas_unregister_ha); 180 180 181 181 static int sas_get_linkerrors(struct sas_phy *phy) 182 182 { ··· 254 252 } 255 253 } 256 254 257 - static int sas_phy_enable(struct sas_phy *phy, int enable) 255 + int sas_phy_enable(struct sas_phy *phy, int enable) 258 256 { 259 257 int ret; 260 258 enum phy_func cmd; ··· 286 284 } 287 285 return ret; 288 286 } 287 + EXPORT_SYMBOL_GPL(sas_phy_enable); 289 288 290 289 int sas_phy_reset(struct sas_phy *phy, int hard_reset) 291 290 { ··· 316 313 } 317 314 return ret; 318 315 } 316 + EXPORT_SYMBOL_GPL(sas_phy_reset); 319 317 320 318 int sas_set_phy_speed(struct sas_phy *phy, 321 319 struct sas_phy_linkrates *rates) ··· 663 659 module_init(sas_class_init); 664 660 module_exit(sas_class_exit); 665 661 666 - EXPORT_SYMBOL_GPL(sas_register_ha); 667 - EXPORT_SYMBOL_GPL(sas_unregister_ha);
+13 -14
drivers/scsi/libsas/sas_scsi_host.c
··· 125 125 } 126 126 127 127 sas_end_task(sc, task); 128 - sc->scsi_done(sc); 128 + scsi_done(sc); 129 129 } 130 130 131 131 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, ··· 198 198 else 199 199 cmd->result = DID_ERROR << 16; 200 200 out_done: 201 - cmd->scsi_done(cmd); 201 + scsi_done(cmd); 202 202 return 0; 203 203 } 204 + EXPORT_SYMBOL_GPL(sas_queuecommand); 204 205 205 206 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 206 207 { ··· 512 511 513 512 return FAILED; 514 513 } 514 + EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); 515 515 516 516 int sas_eh_target_reset_handler(struct scsi_cmnd *cmd) 517 517 { ··· 534 532 535 533 return FAILED; 536 534 } 535 + EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler); 537 536 538 537 /* Try to reset a device */ 539 538 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) ··· 793 790 794 791 return -EINVAL; 795 792 } 793 + EXPORT_SYMBOL_GPL(sas_ioctl); 796 794 797 795 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) 798 796 { ··· 836 832 starget->hostdata = found_dev; 837 833 return 0; 838 834 } 835 + EXPORT_SYMBOL_GPL(sas_target_alloc); 839 836 840 837 #define SAS_DEF_QD 256 841 838 ··· 865 860 866 861 return 0; 867 862 } 863 + EXPORT_SYMBOL_GPL(sas_slave_configure); 868 864 869 865 int sas_change_queue_depth(struct scsi_device *sdev, int depth) 870 866 { ··· 878 872 depth = 1; 879 873 return scsi_change_queue_depth(sdev, depth); 880 874 } 875 + EXPORT_SYMBOL_GPL(sas_change_queue_depth); 881 876 882 877 int sas_bios_param(struct scsi_device *scsi_dev, 883 878 struct block_device *bdev, ··· 891 884 892 885 return 0; 893 886 } 887 + EXPORT_SYMBOL_GPL(sas_bios_param); 894 888 895 889 /* 896 890 * Tell an upper layer that it needs to initiate an abort for a given task. ··· 918 910 else 919 911 blk_abort_request(scsi_cmd_to_rq(sc)); 920 912 } 913 + EXPORT_SYMBOL_GPL(sas_task_abort); 921 914 922 915 int sas_slave_alloc(struct scsi_device *sdev) 923 916 { ··· 927 918 928 919 return 0; 929 920 } 921 + EXPORT_SYMBOL_GPL(sas_slave_alloc); 930 922 931 923 void sas_target_destroy(struct scsi_target *starget) 932 924 { ··· 939 929 starget->hostdata = NULL; 940 930 sas_put_device(found_dev); 941 931 } 932 + EXPORT_SYMBOL_GPL(sas_target_destroy); 942 933 943 934 #define SAS_STRING_ADDR_SIZE 16 944 935 ··· 967 956 } 968 957 EXPORT_SYMBOL_GPL(sas_request_addr); 969 958 970 - EXPORT_SYMBOL_GPL(sas_queuecommand); 971 - EXPORT_SYMBOL_GPL(sas_target_alloc); 972 - EXPORT_SYMBOL_GPL(sas_slave_configure); 973 - EXPORT_SYMBOL_GPL(sas_change_queue_depth); 974 - EXPORT_SYMBOL_GPL(sas_bios_param); 975 - EXPORT_SYMBOL_GPL(sas_task_abort); 976 - EXPORT_SYMBOL_GPL(sas_phy_reset); 977 - EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); 978 - EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler); 979 - EXPORT_SYMBOL_GPL(sas_slave_alloc); 980 - EXPORT_SYMBOL_GPL(sas_target_destroy); 981 - EXPORT_SYMBOL_GPL(sas_ioctl);
+1
drivers/scsi/lpfc/lpfc.h
··· 1029 1029 * Firmware supports Forced Link Speed 1030 1030 * capability 1031 1031 */ 1032 + #define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */ 1032 1033 #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ 1033 1034 #define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */ 1034 1035 #define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
+166 -148
drivers/scsi/lpfc/lpfc_attr.c
··· 6394 6394 LPFC_VMID_PRIO_TAG_ALL_TARGETS, 6395 6395 "Enable Priority Tagging VMID support"); 6396 6396 6397 - struct device_attribute *lpfc_hba_attrs[] = { 6398 - &dev_attr_nvme_info, 6399 - &dev_attr_scsi_stat, 6400 - &dev_attr_bg_info, 6401 - &dev_attr_bg_guard_err, 6402 - &dev_attr_bg_apptag_err, 6403 - &dev_attr_bg_reftag_err, 6404 - &dev_attr_info, 6405 - &dev_attr_serialnum, 6406 - &dev_attr_modeldesc, 6407 - &dev_attr_modelname, 6408 - &dev_attr_programtype, 6409 - &dev_attr_portnum, 6410 - &dev_attr_fwrev, 6411 - &dev_attr_hdw, 6412 - &dev_attr_option_rom_version, 6413 - &dev_attr_link_state, 6414 - &dev_attr_num_discovered_ports, 6415 - &dev_attr_menlo_mgmt_mode, 6416 - &dev_attr_lpfc_drvr_version, 6417 - &dev_attr_lpfc_enable_fip, 6418 - &dev_attr_lpfc_temp_sensor, 6419 - &dev_attr_lpfc_log_verbose, 6420 - &dev_attr_lpfc_lun_queue_depth, 6421 - &dev_attr_lpfc_tgt_queue_depth, 6422 - &dev_attr_lpfc_hba_queue_depth, 6423 - &dev_attr_lpfc_peer_port_login, 6424 - &dev_attr_lpfc_nodev_tmo, 6425 - &dev_attr_lpfc_devloss_tmo, 6426 - &dev_attr_lpfc_enable_fc4_type, 6427 - &dev_attr_lpfc_fcp_class, 6428 - &dev_attr_lpfc_use_adisc, 6429 - &dev_attr_lpfc_first_burst_size, 6430 - &dev_attr_lpfc_ack0, 6431 - &dev_attr_lpfc_xri_rebalancing, 6432 - &dev_attr_lpfc_topology, 6433 - &dev_attr_lpfc_scan_down, 6434 - &dev_attr_lpfc_link_speed, 6435 - &dev_attr_lpfc_fcp_io_sched, 6436 - &dev_attr_lpfc_ns_query, 6437 - &dev_attr_lpfc_fcp2_no_tgt_reset, 6438 - &dev_attr_lpfc_cr_delay, 6439 - &dev_attr_lpfc_cr_count, 6440 - &dev_attr_lpfc_multi_ring_support, 6441 - &dev_attr_lpfc_multi_ring_rctl, 6442 - &dev_attr_lpfc_multi_ring_type, 6443 - &dev_attr_lpfc_fdmi_on, 6444 - &dev_attr_lpfc_enable_SmartSAN, 6445 - &dev_attr_lpfc_max_luns, 6446 - &dev_attr_lpfc_enable_npiv, 6447 - &dev_attr_lpfc_fcf_failover_policy, 6448 - &dev_attr_lpfc_enable_rrq, 6449 - &dev_attr_lpfc_fcp_wait_abts_rsp, 6450 - &dev_attr_nport_evt_cnt, 6451 - &dev_attr_board_mode, 6452 - &dev_attr_max_vpi, 6453 - &dev_attr_used_vpi, 6454 - &dev_attr_max_rpi, 6455 - &dev_attr_used_rpi, 6456 - &dev_attr_max_xri, 6457 - &dev_attr_used_xri, 6458 - &dev_attr_npiv_info, 6459 - &dev_attr_issue_reset, 6460 - &dev_attr_lpfc_poll, 6461 - &dev_attr_lpfc_poll_tmo, 6462 - &dev_attr_lpfc_task_mgmt_tmo, 6463 - &dev_attr_lpfc_use_msi, 6464 - &dev_attr_lpfc_nvme_oas, 6465 - &dev_attr_lpfc_nvme_embed_cmd, 6466 - &dev_attr_lpfc_fcp_imax, 6467 - &dev_attr_lpfc_force_rscn, 6468 - &dev_attr_lpfc_cq_poll_threshold, 6469 - &dev_attr_lpfc_cq_max_proc_limit, 6470 - &dev_attr_lpfc_fcp_cpu_map, 6471 - &dev_attr_lpfc_fcp_mq_threshold, 6472 - &dev_attr_lpfc_hdw_queue, 6473 - &dev_attr_lpfc_irq_chann, 6474 - &dev_attr_lpfc_suppress_rsp, 6475 - &dev_attr_lpfc_nvmet_mrq, 6476 - &dev_attr_lpfc_nvmet_mrq_post, 6477 - &dev_attr_lpfc_nvme_enable_fb, 6478 - &dev_attr_lpfc_nvmet_fb_size, 6479 - &dev_attr_lpfc_enable_bg, 6480 - &dev_attr_lpfc_soft_wwnn, 6481 - &dev_attr_lpfc_soft_wwpn, 6482 - &dev_attr_lpfc_soft_wwn_enable, 6483 - &dev_attr_lpfc_enable_hba_reset, 6484 - &dev_attr_lpfc_enable_hba_heartbeat, 6485 - &dev_attr_lpfc_EnableXLane, 6486 - &dev_attr_lpfc_XLanePriority, 6487 - &dev_attr_lpfc_xlane_lun, 6488 - &dev_attr_lpfc_xlane_tgt, 6489 - &dev_attr_lpfc_xlane_vpt, 6490 - &dev_attr_lpfc_xlane_lun_state, 6491 - &dev_attr_lpfc_xlane_lun_status, 6492 - &dev_attr_lpfc_xlane_priority, 6493 - &dev_attr_lpfc_sg_seg_cnt, 6494 - &dev_attr_lpfc_max_scsicmpl_time, 6495 - &dev_attr_lpfc_stat_data_ctrl, 6496 - &dev_attr_lpfc_aer_support, 6497 - &dev_attr_lpfc_aer_state_cleanup, 6498 - &dev_attr_lpfc_sriov_nr_virtfn, 6499 - &dev_attr_lpfc_req_fw_upgrade, 6500 - &dev_attr_lpfc_suppress_link_up, 6501 - &dev_attr_iocb_hw, 6502 - &dev_attr_pls, 6503 - &dev_attr_pt, 6504 - &dev_attr_txq_hw, 6505 - &dev_attr_txcmplq_hw, 6506 - &dev_attr_lpfc_sriov_hw_max_virtfn, 6507 - &dev_attr_protocol, 6508 - &dev_attr_lpfc_xlane_supported, 6509 - &dev_attr_lpfc_enable_mds_diags, 6510 - &dev_attr_lpfc_ras_fwlog_buffsize, 6511 - &dev_attr_lpfc_ras_fwlog_level, 6512 - &dev_attr_lpfc_ras_fwlog_func, 6513 - &dev_attr_lpfc_enable_bbcr, 6514 - &dev_attr_lpfc_enable_dpp, 6515 - &dev_attr_lpfc_enable_mi, 6516 - &dev_attr_cmf_info, 6517 - &dev_attr_lpfc_max_vmid, 6518 - &dev_attr_lpfc_vmid_inactivity_timeout, 6519 - &dev_attr_lpfc_vmid_app_header, 6520 - &dev_attr_lpfc_vmid_priority_tagging, 6397 + static struct attribute *lpfc_hba_attrs[] = { 6398 + &dev_attr_nvme_info.attr, 6399 + &dev_attr_scsi_stat.attr, 6400 + &dev_attr_bg_info.attr, 6401 + &dev_attr_bg_guard_err.attr, 6402 + &dev_attr_bg_apptag_err.attr, 6403 + &dev_attr_bg_reftag_err.attr, 6404 + &dev_attr_info.attr, 6405 + &dev_attr_serialnum.attr, 6406 + &dev_attr_modeldesc.attr, 6407 + &dev_attr_modelname.attr, 6408 + &dev_attr_programtype.attr, 6409 + &dev_attr_portnum.attr, 6410 + &dev_attr_fwrev.attr, 6411 + &dev_attr_hdw.attr, 6412 + &dev_attr_option_rom_version.attr, 6413 + &dev_attr_link_state.attr, 6414 + &dev_attr_num_discovered_ports.attr, 6415 + &dev_attr_menlo_mgmt_mode.attr, 6416 + &dev_attr_lpfc_drvr_version.attr, 6417 + &dev_attr_lpfc_enable_fip.attr, 6418 + &dev_attr_lpfc_temp_sensor.attr, 6419 + &dev_attr_lpfc_log_verbose.attr, 6420 + &dev_attr_lpfc_lun_queue_depth.attr, 6421 + &dev_attr_lpfc_tgt_queue_depth.attr, 6422 + &dev_attr_lpfc_hba_queue_depth.attr, 6423 + &dev_attr_lpfc_peer_port_login.attr, 6424 + &dev_attr_lpfc_nodev_tmo.attr, 6425 + &dev_attr_lpfc_devloss_tmo.attr, 6426 + &dev_attr_lpfc_enable_fc4_type.attr, 6427 + &dev_attr_lpfc_fcp_class.attr, 6428 + &dev_attr_lpfc_use_adisc.attr, 6429 + &dev_attr_lpfc_first_burst_size.attr, 6430 + &dev_attr_lpfc_ack0.attr, 6431 + &dev_attr_lpfc_xri_rebalancing.attr, 6432 + &dev_attr_lpfc_topology.attr, 6433 + &dev_attr_lpfc_scan_down.attr, 6434 + &dev_attr_lpfc_link_speed.attr, 6435 + &dev_attr_lpfc_fcp_io_sched.attr, 6436 + &dev_attr_lpfc_ns_query.attr, 6437 + &dev_attr_lpfc_fcp2_no_tgt_reset.attr, 6438 + &dev_attr_lpfc_cr_delay.attr, 6439 + &dev_attr_lpfc_cr_count.attr, 6440 + &dev_attr_lpfc_multi_ring_support.attr, 6441 + &dev_attr_lpfc_multi_ring_rctl.attr, 6442 + &dev_attr_lpfc_multi_ring_type.attr, 6443 + &dev_attr_lpfc_fdmi_on.attr, 6444 + &dev_attr_lpfc_enable_SmartSAN.attr, 6445 + &dev_attr_lpfc_max_luns.attr, 6446 + &dev_attr_lpfc_enable_npiv.attr, 6447 + &dev_attr_lpfc_fcf_failover_policy.attr, 6448 + &dev_attr_lpfc_enable_rrq.attr, 6449 + &dev_attr_lpfc_fcp_wait_abts_rsp.attr, 6450 + &dev_attr_nport_evt_cnt.attr, 6451 + &dev_attr_board_mode.attr, 6452 + &dev_attr_max_vpi.attr, 6453 + &dev_attr_used_vpi.attr, 6454 + &dev_attr_max_rpi.attr, 6455 + &dev_attr_used_rpi.attr, 6456 + &dev_attr_max_xri.attr, 6457 + &dev_attr_used_xri.attr, 6458 + &dev_attr_npiv_info.attr, 6459 + &dev_attr_issue_reset.attr, 6460 + &dev_attr_lpfc_poll.attr, 6461 + &dev_attr_lpfc_poll_tmo.attr, 6462 + &dev_attr_lpfc_task_mgmt_tmo.attr, 6463 + &dev_attr_lpfc_use_msi.attr, 6464 + &dev_attr_lpfc_nvme_oas.attr, 6465 + &dev_attr_lpfc_nvme_embed_cmd.attr, 6466 + &dev_attr_lpfc_fcp_imax.attr, 6467 + &dev_attr_lpfc_force_rscn.attr, 6468 + &dev_attr_lpfc_cq_poll_threshold.attr, 6469 + &dev_attr_lpfc_cq_max_proc_limit.attr, 6470 + &dev_attr_lpfc_fcp_cpu_map.attr, 6471 + &dev_attr_lpfc_fcp_mq_threshold.attr, 6472 + &dev_attr_lpfc_hdw_queue.attr, 6473 + &dev_attr_lpfc_irq_chann.attr, 6474 + &dev_attr_lpfc_suppress_rsp.attr, 6475 + &dev_attr_lpfc_nvmet_mrq.attr, 6476 + &dev_attr_lpfc_nvmet_mrq_post.attr, 6477 + &dev_attr_lpfc_nvme_enable_fb.attr, 6478 + &dev_attr_lpfc_nvmet_fb_size.attr, 6479 + &dev_attr_lpfc_enable_bg.attr, 6480 + &dev_attr_lpfc_soft_wwnn.attr, 6481 + &dev_attr_lpfc_soft_wwpn.attr, 6482 + &dev_attr_lpfc_soft_wwn_enable.attr, 6483 + &dev_attr_lpfc_enable_hba_reset.attr, 6484 + &dev_attr_lpfc_enable_hba_heartbeat.attr, 6485 + &dev_attr_lpfc_EnableXLane.attr, 6486 + &dev_attr_lpfc_XLanePriority.attr, 6487 + &dev_attr_lpfc_xlane_lun.attr, 6488 + &dev_attr_lpfc_xlane_tgt.attr, 6489 + &dev_attr_lpfc_xlane_vpt.attr, 6490 + &dev_attr_lpfc_xlane_lun_state.attr, 6491 + &dev_attr_lpfc_xlane_lun_status.attr, 6492 + &dev_attr_lpfc_xlane_priority.attr, 6493 + &dev_attr_lpfc_sg_seg_cnt.attr, 6494 + &dev_attr_lpfc_max_scsicmpl_time.attr, 6495 + &dev_attr_lpfc_stat_data_ctrl.attr, 6496 + &dev_attr_lpfc_aer_support.attr, 6497 + &dev_attr_lpfc_aer_state_cleanup.attr, 6498 + &dev_attr_lpfc_sriov_nr_virtfn.attr, 6499 + &dev_attr_lpfc_req_fw_upgrade.attr, 6500 + &dev_attr_lpfc_suppress_link_up.attr, 6501 + &dev_attr_iocb_hw.attr, 6502 + &dev_attr_pls.attr, 6503 + &dev_attr_pt.attr, 6504 + &dev_attr_txq_hw.attr, 6505 + &dev_attr_txcmplq_hw.attr, 6506 + &dev_attr_lpfc_sriov_hw_max_virtfn.attr, 6507 + &dev_attr_protocol.attr, 6508 + &dev_attr_lpfc_xlane_supported.attr, 6509 + &dev_attr_lpfc_enable_mds_diags.attr, 6510 + &dev_attr_lpfc_ras_fwlog_buffsize.attr, 6511 + &dev_attr_lpfc_ras_fwlog_level.attr, 6512 + &dev_attr_lpfc_ras_fwlog_func.attr, 6513 + &dev_attr_lpfc_enable_bbcr.attr, 6514 + &dev_attr_lpfc_enable_dpp.attr, 6515 + &dev_attr_lpfc_enable_mi.attr, 6516 + &dev_attr_cmf_info.attr, 6517 + &dev_attr_lpfc_max_vmid.attr, 6518 + &dev_attr_lpfc_vmid_inactivity_timeout.attr, 6519 + &dev_attr_lpfc_vmid_app_header.attr, 6520 + &dev_attr_lpfc_vmid_priority_tagging.attr, 6521 6521 NULL, 6522 6522 }; 6523 6523 6524 - struct device_attribute *lpfc_vport_attrs[] = { 6525 - &dev_attr_info, 6526 - &dev_attr_link_state, 6527 - &dev_attr_num_discovered_ports, 6528 - &dev_attr_lpfc_drvr_version, 6529 - &dev_attr_lpfc_log_verbose, 6530 - &dev_attr_lpfc_lun_queue_depth, 6531 - &dev_attr_lpfc_tgt_queue_depth, 6532 - &dev_attr_lpfc_nodev_tmo, 6533 - &dev_attr_lpfc_devloss_tmo, 6534 - &dev_attr_lpfc_hba_queue_depth, 6535 - &dev_attr_lpfc_peer_port_login, 6536 - &dev_attr_lpfc_restrict_login, 6537 - &dev_attr_lpfc_fcp_class, 6538 - &dev_attr_lpfc_use_adisc, 6539 - &dev_attr_lpfc_first_burst_size, 6540 - &dev_attr_lpfc_max_luns, 6541 - &dev_attr_nport_evt_cnt, 6542 - &dev_attr_npiv_info, 6543 - &dev_attr_lpfc_enable_da_id, 6544 - &dev_attr_lpfc_max_scsicmpl_time, 6545 - &dev_attr_lpfc_stat_data_ctrl, 6546 - &dev_attr_lpfc_static_vport, 6547 - &dev_attr_cmf_info, 6524 + static const struct attribute_group lpfc_hba_attr_group = { 6525 + .attrs = lpfc_hba_attrs 6526 + }; 6527 + 6528 + const struct attribute_group *lpfc_hba_groups[] = { 6529 + &lpfc_hba_attr_group, 6530 + NULL 6531 + }; 6532 + 6533 + static struct attribute *lpfc_vport_attrs[] = { 6534 + &dev_attr_info.attr, 6535 + &dev_attr_link_state.attr, 6536 + &dev_attr_num_discovered_ports.attr, 6537 + &dev_attr_lpfc_drvr_version.attr, 6538 + &dev_attr_lpfc_log_verbose.attr, 6539 + &dev_attr_lpfc_lun_queue_depth.attr, 6540 + &dev_attr_lpfc_tgt_queue_depth.attr, 6541 + &dev_attr_lpfc_nodev_tmo.attr, 6542 + &dev_attr_lpfc_devloss_tmo.attr, 6543 + &dev_attr_lpfc_hba_queue_depth.attr, 6544 + &dev_attr_lpfc_peer_port_login.attr, 6545 + &dev_attr_lpfc_restrict_login.attr, 6546 + &dev_attr_lpfc_fcp_class.attr, 6547 + &dev_attr_lpfc_use_adisc.attr, 6548 + &dev_attr_lpfc_first_burst_size.attr, 6549 + &dev_attr_lpfc_max_luns.attr, 6550 + &dev_attr_nport_evt_cnt.attr, 6551 + &dev_attr_npiv_info.attr, 6552 + &dev_attr_lpfc_enable_da_id.attr, 6553 + &dev_attr_lpfc_max_scsicmpl_time.attr, 6554 + &dev_attr_lpfc_stat_data_ctrl.attr, 6555 + &dev_attr_lpfc_static_vport.attr, 6556 + &dev_attr_cmf_info.attr, 6548 6557 NULL, 6558 + }; 6559 + 6560 + static const struct attribute_group lpfc_vport_attr_group = { 6561 + .attrs = lpfc_vport_attrs 6562 + }; 6563 + 6564 + const struct attribute_group *lpfc_vport_groups[] = { 6565 + &lpfc_vport_attr_group, 6566 + NULL 6549 6567 }; 6550 6568 6551 6569 /**
+5 -2
drivers/scsi/lpfc/lpfc_crtn.h
··· 119 119 struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did); 120 120 struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); 121 121 int lpfc_nlp_put(struct lpfc_nodelist *); 122 + void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, 123 + struct lpfc_nodelist *ndlp); 122 124 void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 123 125 struct lpfc_iocbq *rspiocb); 124 126 int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); ··· 207 205 int lpfc_config_port_prep(struct lpfc_hba *); 208 206 void lpfc_update_vport_wwn(struct lpfc_vport *vport); 209 207 int lpfc_config_port_post(struct lpfc_hba *); 208 + int lpfc_sli4_refresh_params(struct lpfc_hba *phba); 210 209 int lpfc_hba_down_prep(struct lpfc_hba *); 211 210 int lpfc_hba_down_post(struct lpfc_hba *); 212 211 void lpfc_hba_init(struct lpfc_hba *, uint32_t *); ··· 431 428 void lpfc_get_vport_cfgparam(struct lpfc_vport *); 432 429 int lpfc_alloc_sysfs_attr(struct lpfc_vport *); 433 430 void lpfc_free_sysfs_attr(struct lpfc_vport *); 434 - extern struct device_attribute *lpfc_hba_attrs[]; 435 - extern struct device_attribute *lpfc_vport_attrs[]; 431 + extern const struct attribute_group *lpfc_hba_groups[]; 432 + extern const struct attribute_group *lpfc_vport_groups[]; 436 433 extern struct scsi_host_template lpfc_template; 437 434 extern struct scsi_host_template lpfc_template_nvme; 438 435 extern struct fc_function_template lpfc_transport_functions;
+10 -2
drivers/scsi/lpfc/lpfc_disc.h
··· 85 85 NLP_XPT_HAS_HH = 0x10 86 86 }; 87 87 88 + enum lpfc_nlp_save_flags { 89 + /* devloss occurred during recovery */ 90 + NLP_IN_RECOV_POST_DEV_LOSS = 0x1, 91 + /* wait for outstanding LOGO to cmpl */ 92 + NLP_WAIT_FOR_LOGO = 0x2, 93 + }; 94 + 88 95 struct lpfc_nodelist { 89 96 struct list_head nlp_listp; 90 97 struct serv_parm fc_sparam; /* buffer for service params */ ··· 151 144 unsigned long *active_rrqs_xri_bitmap; 152 145 struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ 153 146 uint32_t fc4_prli_sent; 154 - u32 upcall_flags; 155 - #define NLP_WAIT_FOR_LOGO 0x2 147 + 148 + /* flags to keep ndlp alive until special conditions are met */ 149 + enum lpfc_nlp_save_flags save_flags; 156 150 157 151 enum lpfc_fc4_xpt_flags fc4_xpt_flags; 158 152
+50 -11
drivers/scsi/lpfc/lpfc_els.c
··· 1059 1059 1060 1060 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1061 1061 "0150 FLOGI failure Status:x%x/x%x " 1062 - "xri x%x TMO:x%x\n", 1062 + "xri x%x TMO:x%x refcnt %d\n", 1063 1063 irsp->ulpStatus, irsp->un.ulpWord[4], 1064 - cmdiocb->sli4_xritag, irsp->ulpTimeout); 1064 + cmdiocb->sli4_xritag, irsp->ulpTimeout, 1065 + kref_read(&ndlp->kref)); 1065 1066 1066 1067 /* If this is not a loop open failure, bail out */ 1067 1068 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && ··· 1123 1122 /* FLOGI completes successfully */ 1124 1123 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1125 1124 "0101 FLOGI completes successfully, I/O tag:x%x, " 1126 - "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n", 1125 + "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n", 1127 1126 cmdiocb->iotag, cmdiocb->sli4_xritag, 1128 1127 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1129 1128 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1130 1129 vport->port_state, vport->fc_flag, 1131 - sp->cmn.priority_tagging); 1130 + sp->cmn.priority_tagging, kref_read(&ndlp->kref)); 1132 1131 1133 1132 if (sp->cmn.priority_tagging) 1134 1133 vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA; ··· 1206 1205 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1207 1206 spin_unlock_irq(&phba->hbalock); 1208 1207 1209 - if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 1210 - lpfc_nlp_put(ndlp); 1211 1208 if (!lpfc_error_lost_link(irsp)) { 1212 1209 /* FLOGI failed, so just use loop map to make discovery list */ 1213 1210 lpfc_disc_list_loopmap(vport); ··· 2329 2330 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2330 2331 NLP_EVT_CMPL_PRLI); 2331 2332 2333 + /* 2334 + * For P2P topology, retain the node so that PLOGI can be 2335 + * attempted on it again. 2336 + */ 2337 + if (vport->fc_flag & FC_PT2PT) 2338 + goto out; 2339 + 2332 2340 /* As long as this node is not registered with the SCSI 2333 2341 * or NVMe transport and no other PRLIs are outstanding, 2334 2342 * it is no longer an active node. Otherwise devloss ··· 2905 2899 irsp = &(rspiocb->iocb); 2906 2900 spin_lock_irq(&ndlp->lock); 2907 2901 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2908 - if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) { 2902 + if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { 2909 2903 wake_up_waiter = 1; 2910 - ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 2904 + ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; 2911 2905 } 2912 2906 spin_unlock_irq(&ndlp->lock); 2913 2907 ··· 4577 4571 retry = 1; 4578 4572 delay = 100; 4579 4573 break; 4574 + case IOERR_SLI_ABORTED: 4575 + /* Retry ELS PLOGI command? 4576 + * Possibly the rport just wasn't ready. 4577 + */ 4578 + if (cmd == ELS_CMD_PLOGI) { 4579 + /* No retry if state change */ 4580 + if (ndlp && 4581 + ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 4582 + goto out_retry; 4583 + retry = 1; 4584 + maxretry = 2; 4585 + } 4586 + break; 4580 4587 } 4581 4588 break; 4582 4589 ··· 5315 5296 */ 5316 5297 if (phba->sli_rev == LPFC_SLI_REV4 && 5317 5298 (vport && vport->port_type == LPFC_NPIV_PORT) && 5299 + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) && 5318 5300 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5319 5301 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5320 5302 spin_lock_irq(&ndlp->lock); ··· 5619 5599 } 5620 5600 5621 5601 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5622 - * node's assigned RPI needs to be released as this node will get 5623 - * freed. 5602 + * node's assigned RPI gets released provided this node is not already 5603 + * registered with the transport. 5624 5604 */ 5625 5605 if (phba->sli_rev == LPFC_SLI_REV4 && 5626 - vport->port_type == LPFC_NPIV_PORT) { 5606 + vport->port_type == LPFC_NPIV_PORT && 5607 + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { 5627 5608 spin_lock_irq(&ndlp->lock); 5628 5609 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5629 5610 spin_unlock_irq(&ndlp->lock); ··· 6237 6216 * from backend 6238 6217 */ 6239 6218 lpfc_nlp_unreg_node(vport, ndlp); 6219 + lpfc_unreg_rpi(vport, ndlp); 6240 6220 continue; 6241 6221 } 6242 6222 ··· 10735 10713 irsp->ulpStatus, irsp->un.ulpWord[4]); 10736 10714 goto fdisc_failed; 10737 10715 } 10716 + 10717 + lpfc_check_nlp_post_devloss(vport, ndlp); 10718 + 10738 10719 spin_lock_irq(shost->host_lock); 10739 10720 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 10740 10721 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; ··· 11410 11385 { 11411 11386 struct lpfc_hba *phba = vport->phba; 11412 11387 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11388 + struct lpfc_nodelist *ndlp = NULL; 11413 11389 unsigned long iflag = 0; 11414 11390 11415 11391 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); ··· 11418 11392 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11419 11393 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11420 11394 lpfc_nlp_put(sglq_entry->ndlp); 11395 + ndlp = sglq_entry->ndlp; 11421 11396 sglq_entry->ndlp = NULL; 11397 + 11398 + /* If the xri on the abts_els_sgl list is for the Fport 11399 + * node and the vport is unloading, the xri aborted wcqe 11400 + * likely isn't coming back. Just release the sgl. 11401 + */ 11402 + if ((vport->load_flag & FC_UNLOADING) && 11403 + ndlp->nlp_DID == Fabric_DID) { 11404 + list_del(&sglq_entry->list); 11405 + sglq_entry->state = SGL_FREED; 11406 + list_add_tail(&sglq_entry->list, 11407 + &phba->sli4_hba.lpfc_els_sgl_list); 11408 + } 11422 11409 } 11423 11410 } 11424 11411 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
+135 -9
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 209 209 210 210 spin_lock_irqsave(&ndlp->lock, iflags); 211 211 ndlp->nlp_flag |= NLP_IN_DEV_LOSS; 212 - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 212 + 213 + /* If there is a PLOGI in progress, and we are in a 214 + * NLP_NPR_2B_DISC state, don't turn off the flag. 215 + */ 216 + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 217 + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 213 218 214 219 /* 215 220 * The backend does not expect any more calls associated with this ··· 346 341 } 347 342 348 343 /** 344 + * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss 345 + * @vport: Pointer to vport object. 346 + * @ndlp: Pointer to remote node object. 347 + * 348 + * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of 349 + * node during dev_loss_tmo processing, then this function restores the nlp_put 350 + * kref decrement from lpfc_dev_loss_tmo_handler. 351 + **/ 352 + void 353 + lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, 354 + struct lpfc_nodelist *ndlp) 355 + { 356 + unsigned long iflags; 357 + 358 + spin_lock_irqsave(&ndlp->lock, iflags); 359 + if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { 360 + ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; 361 + spin_unlock_irqrestore(&ndlp->lock, iflags); 362 + lpfc_nlp_get(ndlp); 363 + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, 364 + "8438 Devloss timeout reversed on DID x%x " 365 + "refcnt %d ndlp %p flag x%x " 366 + "port_state = x%x\n", 367 + ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, 368 + ndlp->nlp_flag, vport->port_state); 369 + spin_lock_irqsave(&ndlp->lock, iflags); 370 + } 371 + spin_unlock_irqrestore(&ndlp->lock, iflags); 372 + } 373 + 374 + /** 349 375 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 350 376 * @ndlp: Pointer to remote node object. 351 377 * ··· 394 358 uint8_t *name; 395 359 int warn_on = 0; 396 360 int fcf_inuse = 0; 361 + bool recovering = false; 362 + struct fc_vport *fc_vport = NULL; 397 363 unsigned long iflags; 398 364 399 365 vport = ndlp->vport; ··· 432 394 433 395 /* Fabric nodes are done. */ 434 396 if (ndlp->nlp_type & NLP_FABRIC) { 397 + spin_lock_irqsave(&ndlp->lock, iflags); 398 + /* In massive vport configuration settings, it's possible 399 + * dev_loss_tmo fired during node recovery. So, check if 400 + * fabric nodes are in discovery states outstanding. 401 + */ 402 + switch (ndlp->nlp_DID) { 403 + case Fabric_DID: 404 + fc_vport = vport->fc_vport; 405 + if (fc_vport && 406 + fc_vport->vport_state == FC_VPORT_INITIALIZING) 407 + recovering = true; 408 + break; 409 + case Fabric_Cntl_DID: 410 + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 411 + recovering = true; 412 + break; 413 + case FDMI_DID: 414 + fallthrough; 415 + case NameServer_DID: 416 + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 417 + ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) 418 + recovering = true; 419 + break; 420 + } 421 + spin_unlock_irqrestore(&ndlp->lock, iflags); 422 + 423 + /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing 424 + * the following lpfc_nlp_put is necessary after fabric node is 425 + * recovered. 426 + */ 427 + if (recovering) { 428 + lpfc_printf_vlog(vport, KERN_INFO, 429 + LOG_DISCOVERY | LOG_NODE, 430 + "8436 Devloss timeout marked on " 431 + "DID x%x refcnt %d ndlp %p " 432 + "flag x%x port_state = x%x\n", 433 + ndlp->nlp_DID, kref_read(&ndlp->kref), 434 + ndlp, ndlp->nlp_flag, 435 + vport->port_state); 436 + spin_lock_irqsave(&ndlp->lock, iflags); 437 + ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; 438 + spin_unlock_irqrestore(&ndlp->lock, iflags); 439 + } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 440 + /* Fabric node fully recovered before this dev_loss_tmo 441 + * queue work is processed. Thus, ignore the 442 + * dev_loss_tmo event. 443 + */ 444 + lpfc_printf_vlog(vport, KERN_INFO, 445 + LOG_DISCOVERY | LOG_NODE, 446 + "8437 Devloss timeout ignored on " 447 + "DID x%x refcnt %d ndlp %p " 448 + "flag x%x port_state = x%x\n", 449 + ndlp->nlp_DID, kref_read(&ndlp->kref), 450 + ndlp, ndlp->nlp_flag, 451 + vport->port_state); 452 + return fcf_inuse; 453 + } 454 + 435 455 lpfc_nlp_put(ndlp); 436 456 return fcf_inuse; 437 457 } ··· 517 421 *(name+4), *(name+5), *(name+6), *(name+7), 518 422 ndlp->nlp_DID, ndlp->nlp_flag, 519 423 ndlp->nlp_state, ndlp->nlp_rpi); 424 + } 425 + 426 + /* If we are devloss, but we are in the process of rediscovering the 427 + * ndlp, don't issue a NLP_EVT_DEVICE_RM event. 428 + */ 429 + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 430 + ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { 431 + return fcf_inuse; 520 432 } 521 433 522 434 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) ··· 1070 966 struct lpfc_nodelist *ndlp, *next_ndlp; 1071 967 1072 968 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1073 - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 969 + if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 970 + /* It's possible the FLOGI to the fabric node never 971 + * successfully completed and never registered with the 972 + * transport. In this case there is no way to clean up 973 + * the node. 974 + */ 975 + if (ndlp->nlp_DID == Fabric_DID) { 976 + if (ndlp->nlp_prev_state == 977 + NLP_STE_UNUSED_NODE && 978 + !ndlp->fc4_xpt_flags) 979 + lpfc_nlp_put(ndlp); 980 + } 1074 981 continue; 982 + } 1075 983 1076 984 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 1077 985 ((vport->port_type == LPFC_NPIV_PORT) && ··· 4467 4351 goto out; 4468 4352 } 4469 4353 4354 + lpfc_check_nlp_post_devloss(vport, ndlp); 4355 + 4470 4356 if (phba->sli_rev < LPFC_SLI_REV4) 4471 4357 ndlp->nlp_rpi = mb->un.varWords[0]; 4472 4358 ··· 4478 4360 ndlp->nlp_state); 4479 4361 4480 4362 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 4363 + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4481 4364 ndlp->nlp_type |= NLP_FABRIC; 4482 4365 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4483 4366 ··· 4568 4449 fc_remote_port_rolechg(rport, rport_ids.roles); 4569 4450 4570 4451 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4571 - "3183 %s rport x%px DID x%x, role x%x\n", 4572 - __func__, rport, rport->port_id, rport->roles); 4452 + "3183 %s rport x%px DID x%x, role x%x refcnt %d\n", 4453 + __func__, rport, rport->port_id, rport->roles, 4454 + kref_read(&ndlp->kref)); 4573 4455 4574 4456 if ((rport->scsi_target_id != -1) && 4575 4457 (rport->scsi_target_id < LPFC_MAX_TARGET)) { ··· 4595 4475 4596 4476 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4597 4477 "3184 rport unregister x%06x, rport x%px " 4598 - "xptflg x%x\n", 4599 - ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags); 4478 + "xptflg x%x refcnt %d\n", 4479 + ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags, 4480 + kref_read(&ndlp->kref)); 4600 4481 4601 4482 fc_remote_port_delete(rport); 4602 4483 lpfc_nlp_put(ndlp); ··· 4646 4525 void 4647 4526 lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4648 4527 { 4649 - 4650 4528 unsigned long iflags; 4529 + 4530 + lpfc_check_nlp_post_devloss(vport, ndlp); 4651 4531 4652 4532 spin_lock_irqsave(&ndlp->lock, iflags); 4653 4533 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { ··· 4801 4679 /* Reg/Unreg for FCP and NVME Transport interface */ 4802 4680 if ((old_state == NLP_STE_MAPPED_NODE || 4803 4681 old_state == NLP_STE_UNMAPPED_NODE)) { 4804 - /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */ 4805 - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) 4682 + /* For nodes marked for ADISC, Handle unreg in ADISC cmpl 4683 + * if linkup. In linkdown do unreg_node 4684 + */ 4685 + if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || 4686 + !lpfc_is_link_up(vport->phba)) 4806 4687 lpfc_nlp_unreg_node(vport, ndlp); 4807 4688 } 4808 4689 ··· 5358 5233 5359 5234 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5360 5235 if (rc == MBX_NOT_FINISHED) { 5236 + ndlp->nlp_flag &= ~NLP_UNREG_INP; 5361 5237 mempool_free(mbox, phba->mbox_mem_pool); 5362 5238 acc_plogi = 1; 5363 5239 }
+4
drivers/scsi/lpfc/lpfc_hw4.h
··· 673 673 #define lpfc_sliport_status_rdy_SHIFT 23 674 674 #define lpfc_sliport_status_rdy_MASK 0x1 675 675 #define lpfc_sliport_status_rdy_WORD word0 676 + #define lpfc_sliport_status_pldv_SHIFT 0 677 + #define lpfc_sliport_status_pldv_MASK 0x1 678 + #define lpfc_sliport_status_pldv_WORD word0 679 + #define CFG_PLD 0x3C 676 680 #define MAX_IF_TYPE_2_RESETS 6 677 681 678 682 #define LPFC_CTL_PORT_CTL_OFFSET 0x408
+120 -15
drivers/scsi/lpfc/lpfc_init.c
··· 68 68 static enum cpuhp_state lpfc_cpuhp_state; 69 69 /* Used when mapping IRQ vectors in a driver centric manner */ 70 70 static uint32_t lpfc_present_cpu; 71 + static bool lpfc_pldv_detect; 71 72 72 73 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); 73 74 static void lpfc_cpuhp_remove(struct lpfc_hba *phba); ··· 660 659 } 661 660 662 661 return 0; 662 + } 663 + 664 + /** 665 + * lpfc_sli4_refresh_params - update driver copy of params. 666 + * @phba: Pointer to HBA context object. 667 + * 668 + * This is called to refresh driver copy of dynamic fields from the 669 + * common_get_sli4_parameters descriptor. 670 + **/ 671 + int 672 + lpfc_sli4_refresh_params(struct lpfc_hba *phba) 673 + { 674 + LPFC_MBOXQ_t *mboxq; 675 + struct lpfc_mqe *mqe; 676 + struct lpfc_sli4_parameters *mbx_sli4_parameters; 677 + int length, rc; 678 + 679 + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 680 + if (!mboxq) 681 + return -ENOMEM; 682 + 683 + mqe = &mboxq->u.mqe; 684 + /* Read the port's SLI4 Config Parameters */ 685 + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 686 + sizeof(struct lpfc_sli4_cfg_mhdr)); 687 + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 688 + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 689 + length, LPFC_SLI4_MBX_EMBED); 690 + 691 + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 692 + if (unlikely(rc)) { 693 + mempool_free(mboxq, phba->mbox_mem_pool); 694 + return rc; 695 + } 696 + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 697 + phba->sli4_hba.pc_sli4_params.mi_ver = 698 + bf_get(cfg_mi_ver, mbx_sli4_parameters); 699 + phba->sli4_hba.pc_sli4_params.cmf = 700 + bf_get(cfg_cmf, mbx_sli4_parameters); 701 + phba->sli4_hba.pc_sli4_params.pls = 702 + bf_get(cfg_pvl, mbx_sli4_parameters); 703 + 704 + mempool_free(mboxq, phba->mbox_mem_pool); 705 + return rc; 663 706 } 664 707 665 708 /** ··· 1651 1606 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1652 1607 { 1653 1608 spin_lock_irq(&phba->hbalock); 1609 + if (phba->link_state == LPFC_HBA_ERROR && 1610 + phba->hba_flag & HBA_PCI_ERR) { 1611 + spin_unlock_irq(&phba->hbalock); 1612 + return; 1613 + } 1654 1614 phba->link_state = LPFC_HBA_ERROR; 1655 1615 spin_unlock_irq(&phba->hbalock); 1656 1616 ··· 1995 1945 if (pci_channel_offline(phba->pcidev)) { 1996 1946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1997 1947 "3166 pci channel is offline\n"); 1998 - lpfc_sli4_offline_eratt(phba); 1999 1948 return; 2000 1949 } 2001 1950 ··· 3692 3643 struct lpfc_vport **vports; 3693 3644 struct Scsi_Host *shost; 3694 3645 int i; 3646 + int offline = 0; 3695 3647 3696 3648 if (vport->fc_flag & FC_OFFLINE_MODE) 3697 3649 return; ··· 3700 3650 lpfc_block_mgmt_io(phba, mbx_action); 3701 3651 3702 3652 lpfc_linkdown(phba); 3653 + 3654 + offline = pci_channel_offline(phba->pcidev); 3703 3655 3704 3656 /* Issue an unreg_login to all nodes on all vports */ 3705 3657 vports = lpfc_create_vport_work_array(phba); ··· 3725 3673 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3726 3674 spin_unlock_irq(&ndlp->lock); 3727 3675 3728 - lpfc_unreg_rpi(vports[i], ndlp); 3676 + if (offline) { 3677 + spin_lock_irq(&ndlp->lock); 3678 + ndlp->nlp_flag &= ~(NLP_UNREG_INP | 3679 + NLP_RPI_REGISTERED); 3680 + spin_unlock_irq(&ndlp->lock); 3681 + } else { 3682 + lpfc_unreg_rpi(vports[i], ndlp); 3683 + } 3729 3684 /* 3730 3685 * Whenever an SLI4 port goes offline, free the 3731 3686 * RPI. Get a new RPI when the adapter port ··· 3753 3694 lpfc_disc_state_machine(vports[i], ndlp, 3754 3695 NULL, NLP_EVT_DEVICE_RECOVERY); 3755 3696 3756 - /* Don't remove the node unless the 3697 + /* Don't remove the node unless the node 3757 3698 * has been unregistered with the 3758 - * transport. If so, let dev_loss 3759 - * take care of the node. 3699 + * transport, and we're not in recovery 3700 + * before dev_loss_tmo triggered. 3701 + * Otherwise, let dev_loss take care of 3702 + * the node. 3760 3703 */ 3761 - if (!(ndlp->fc4_xpt_flags & 3704 + if (!(ndlp->save_flags & 3705 + NLP_IN_RECOV_POST_DEV_LOSS) && 3706 + !(ndlp->fc4_xpt_flags & 3762 3707 (NVME_XPT_REGD | SCSI_XPT_REGD))) 3763 3708 lpfc_disc_state_machine 3764 3709 (vports[i], ndlp, ··· 4622 4559 /* Template for all vports this physical port creates */ 4623 4560 memcpy(&phba->vport_template, &lpfc_template, 4624 4561 sizeof(*template)); 4625 - phba->vport_template.shost_attrs = lpfc_vport_attrs; 4562 + phba->vport_template.shost_groups = lpfc_vport_groups; 4626 4563 phba->vport_template.eh_bus_reset_handler = NULL; 4627 4564 phba->vport_template.eh_host_reset_handler = NULL; 4628 4565 phba->vport_template.vendor_id = 0; ··· 5925 5862 uint32_t io_cnt; 5926 5863 uint32_t head, tail; 5927 5864 uint32_t busy, max_read; 5928 - uint64_t total, rcv, lat, mbpi; 5865 + uint64_t total, rcv, lat, mbpi, extra; 5929 5866 int timer_interval = LPFC_CMF_INTERVAL; 5930 5867 uint32_t ms; 5931 5868 struct lpfc_cgn_stat *cgs; ··· 5992 5929 phba->hba_flag & HBA_SETUP) { 5993 5930 mbpi = phba->cmf_last_sync_bw; 5994 5931 phba->cmf_last_sync_bw = 0; 5995 - lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total); 5932 + extra = 0; 5933 + 5934 + /* Calculate any extra bytes needed to account for the 5935 + * timer accuracy. If we are less than LPFC_CMF_INTERVAL 5936 + * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL 5937 + * add an extra 2%. The goal is to equalize total with a 5938 + * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1 5939 + */ 5940 + if (ms == LPFC_CMF_INTERVAL) 5941 + extra = div_u64(total, 50); 5942 + else if (ms < LPFC_CMF_INTERVAL) 5943 + extra = div_u64(total, 33); 5944 + lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); 5996 5945 } else { 5997 5946 /* For Monitor mode or link down we want mbpi 5998 5947 * to be the full link speed ··· 6503 6428 "3194 Unable to retrieve supported " 6504 6429 "speeds, rc = 0x%x\n", rc); 6505 6430 } 6431 + rc = lpfc_sli4_refresh_params(phba); 6432 + if (rc) { 6433 + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6434 + "3174 Unable to update pls support, " 6435 + "rc x%x\n", rc); 6436 + } 6506 6437 vports = lpfc_create_vport_work_array(phba); 6507 6438 if (vports != NULL) { 6508 6439 for (i = 0; i <= phba->max_vports && vports[i] != NULL; ··· 6619 6538 /* Cannot find existing Fabric ndlp, so allocate a new one */ 6620 6539 ndlp = lpfc_nlp_init(vport, Fabric_DID); 6621 6540 if (!ndlp) 6622 - return 0; 6541 + return NULL; 6623 6542 /* Set the node type */ 6624 6543 ndlp->nlp_type |= NLP_FABRIC; 6625 6544 /* Put ndlp onto node list */ ··· 7439 7358 out_disable_device: 7440 7359 pci_disable_device(pdev); 7441 7360 out_error: 7442 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7361 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7443 7362 "1401 Failed to enable pci device\n"); 7444 7363 return -ENODEV; 7445 7364 } ··· 8482 8401 phba->lpfc_stop_port = lpfc_stop_port_s4; 8483 8402 break; 8484 8403 default: 8485 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 8404 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8486 8405 "1431 Invalid HBA PCI-device group: 0x%x\n", 8487 8406 dev_grp); 8488 8407 return -ENODEV; ··· 9414 9333 phba->work_status[0], 9415 9334 phba->work_status[1]); 9416 9335 port_error = -ENODEV; 9336 + break; 9417 9337 } 9338 + 9339 + if (lpfc_pldv_detect && 9340 + bf_get(lpfc_sli_intf_sli_family, 9341 + &phba->sli4_hba.sli_intf) == 9342 + LPFC_SLI_INTF_FAMILY_G6) 9343 + pci_write_config_byte(phba->pcidev, 9344 + LPFC_SLI_INTF, CFG_PLD); 9418 9345 break; 9419 9346 case LPFC_SLI_INTF_IF_TYPE_1: 9420 9347 default: ··· 11630 11541 goto out; 11631 11542 } 11632 11543 11544 + if (bf_get(lpfc_sliport_status_pldv, &reg_data)) 11545 + lpfc_pldv_detect = true; 11546 + 11633 11547 if (!port_reset) { 11634 11548 /* 11635 11549 * Reset the port now ··· 11715 11623 /* There is no SLI3 failback for SLI4 devices. */ 11716 11624 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 11717 11625 LPFC_SLI_INTF_VALID) { 11718 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11626 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11719 11627 "2894 SLI_INTF reg contents invalid " 11720 11628 "sli_intf reg 0x%x\n", 11721 11629 phba->sli4_hba.sli_intf.word0); ··· 13460 13368 atomic_set(&phba->cgn_sync_alarm_cnt, 0); 13461 13369 atomic_set(&phba->cgn_sync_warn_cnt, 0); 13462 13370 13463 - atomic64_set(&phba->cgn_acqe_stat.alarm, 0); 13464 - atomic64_set(&phba->cgn_acqe_stat.warn, 0); 13465 13371 atomic_set(&phba->cgn_driver_evt_cnt, 0); 13466 13372 atomic_set(&phba->cgn_latency_evt_cnt, 0); 13467 13373 atomic64_set(&phba->cgn_latency_evt, 0); ··· 14170 14080 return error; 14171 14081 } 14172 14082 14083 + /* Init cpu_map array */ 14084 + lpfc_cpu_map_array_init(phba); 14085 + /* Init hba_eq_hdl array */ 14086 + lpfc_hba_eq_hdl_array_init(phba); 14173 14087 /* Configure and enable interrupt */ 14174 14088 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 14175 14089 if (intr_mode == LPFC_INTR_ERROR) { ··· 15127 15033 lpfc_sli4_prep_dev_for_recover(phba); 15128 15034 return PCI_ERS_RESULT_CAN_RECOVER; 15129 15035 case pci_channel_io_frozen: 15036 + phba->hba_flag |= HBA_PCI_ERR; 15130 15037 /* Fatal error, prepare for slot reset */ 15131 15038 lpfc_sli4_prep_dev_for_reset(phba); 15132 15039 return PCI_ERS_RESULT_NEED_RESET; 15133 15040 case pci_channel_io_perm_failure: 15041 + phba->hba_flag |= HBA_PCI_ERR; 15134 15042 /* Permanent failure, prepare for device down */ 15135 15043 lpfc_sli4_prep_dev_for_perm_failure(phba); 15136 15044 return PCI_ERS_RESULT_DISCONNECT; 15137 15045 default: 15046 + phba->hba_flag |= HBA_PCI_ERR; 15138 15047 /* Unknown state, prepare and request slot reset */ 15139 15048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 15140 15049 "2825 Unknown PCI error state: x%x\n", state); ··· 15181 15084 15182 15085 pci_restore_state(pdev); 15183 15086 15087 + phba->hba_flag &= ~HBA_PCI_ERR; 15184 15088 /* 15185 15089 * As the new kernel behavior of pci_restore_state() API call clears 15186 15090 * device saved_state flag, need to save the restored state again. ··· 15204 15106 return PCI_ERS_RESULT_DISCONNECT; 15205 15107 } else 15206 15108 phba->intr_mode = intr_mode; 15109 + lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); 15207 15110 15208 15111 /* Log the current active interrupt mode */ 15209 15112 lpfc_log_intr_mode(phba, phba->intr_mode); ··· 15405 15306 struct Scsi_Host *shost = pci_get_drvdata(pdev); 15406 15307 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 15407 15308 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 15309 + 15310 + if (phba->link_state == LPFC_HBA_ERROR && 15311 + phba->hba_flag & HBA_IOQ_FLUSH) 15312 + return PCI_ERS_RESULT_NEED_RESET; 15408 15313 15409 15314 switch (phba->pci_dev_grp) { 15410 15315 case LPFC_PCI_DEV_LP: ··· 15625 15522 15626 15523 /* Initialize in case vector mapping is needed */ 15627 15524 lpfc_present_cpu = num_present_cpus(); 15525 + 15526 + lpfc_pldv_detect = false; 15628 15527 15629 15528 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 15630 15529 "lpfc/sli4:online",
+60 -10
drivers/scsi/lpfc/lpfc_nvme.c
··· 209 209 * calling state machine to remove the node. 210 210 */ 211 211 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 212 - "6146 remoteport delete of remoteport x%px\n", 213 - remoteport); 212 + "6146 remoteport delete of remoteport x%px, ndlp x%px " 213 + "DID x%x xflags x%x\n", 214 + remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags); 214 215 spin_lock_irq(&ndlp->lock); 215 216 216 217 /* The register rebind might have occurred before the delete ··· 937 936 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 938 937 int cpu; 939 938 #endif 939 + int offline = 0; 940 940 941 941 /* Sanity check on return of outstanding command */ 942 942 if (!lpfc_ncmd) { ··· 1099 1097 nCmd->transferred_length = 0; 1100 1098 nCmd->rcv_rsplen = 0; 1101 1099 nCmd->status = NVME_SC_INTERNAL; 1100 + offline = pci_channel_offline(vport->phba->pcidev); 1102 1101 } 1103 1102 } 1104 1103 1105 1104 /* pick up SLI4 exhange busy condition */ 1106 - if (bf_get(lpfc_wcqe_c_xb, wcqe)) 1105 + if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline) 1107 1106 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; 1108 1107 else 1109 1108 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; ··· 1299 1296 struct sli4_sge *first_data_sgl; 1300 1297 struct ulp_bde64 *bde; 1301 1298 dma_addr_t physaddr = 0; 1302 - uint32_t num_bde = 0; 1303 1299 uint32_t dma_len = 0; 1304 1300 uint32_t dma_offset = 0; 1305 1301 int nseg, i, j; ··· 1352 1350 } 1353 1351 1354 1352 sgl->word2 = 0; 1355 - if ((num_bde + 1) == nseg) { 1353 + if (nseg == 1) { 1356 1354 bf_set(lpfc_sli4_sge_last, sgl, 1); 1357 1355 bf_set(lpfc_sli4_sge_type, sgl, 1358 1356 LPFC_SGE_TYPE_DATA); ··· 1421 1419 1422 1420 j++; 1423 1421 } 1424 - if (phba->cfg_enable_pbde) { 1425 - /* Use PBDE support for first SGL only, offset == 0 */ 1422 + 1423 + /* PBDE support for first data SGE only */ 1424 + if (nseg == 1 && phba->cfg_enable_pbde) { 1426 1425 /* Words 13-15 */ 1427 1426 bde = (struct ulp_bde64 *) 1428 1427 &wqe->words[13]; ··· 1434 1431 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1435 1432 bde->tus.w = cpu_to_le32(bde->tus.w); 1436 1433 1437 - /* Word 11 */ 1434 + /* Word 11 - set PBDE bit */ 1438 1435 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 1439 1436 } else { 1440 1437 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 1441 - bf_set(wqe_pbde, &wqe->generic.wqe_com, 0); 1438 + /* Word 11 - PBDE bit disabled by default template */ 1442 1439 } 1443 1440 1444 1441 } else { ··· 2169 2166 abts_nvme = 0; 2170 2167 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2171 2168 qp = &phba->sli4_hba.hdwq[i]; 2169 + if (!vport || !vport->localport || 2170 + !qp || !qp->io_wq) 2171 + return; 2172 + 2172 2173 pring = qp->io_wq->pring; 2173 2174 if (!pring) 2174 2175 continue; ··· 2180 2173 abts_scsi += qp->abts_scsi_io_bufs; 2181 2174 abts_nvme += qp->abts_nvme_io_bufs; 2182 2175 } 2176 + if (!vport || !vport->localport || 2177 + vport->phba->hba_flag & HBA_PCI_ERR) 2178 + return; 2179 + 2183 2180 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2184 2181 "6176 Lport x%px Localport x%px wait " 2185 2182 "timed out. Pending %d [%d:%d]. " ··· 2223 2212 return; 2224 2213 2225 2214 localport = vport->localport; 2215 + if (!localport) 2216 + return; 2226 2217 lport = (struct lpfc_nvme_lport *)localport->private; 2227 2218 2228 2219 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, ··· 2541 2528 * return values is ignored. The upcall is a courtesy to the 2542 2529 * transport. 2543 2530 */ 2544 - if (vport->load_flag & FC_UNLOADING) 2531 + if (vport->load_flag & FC_UNLOADING || 2532 + unlikely(vport->phba->hba_flag & HBA_PCI_ERR)) 2545 2533 (void)nvme_fc_set_remoteport_devloss(remoteport, 0); 2546 2534 2547 2535 ret = nvme_fc_unregister_remoteport(remoteport); ··· 2568 2554 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2569 2555 "6168 State error: lport x%px, rport x%px FCID x%06x\n", 2570 2556 vport->localport, ndlp->rport, ndlp->nlp_DID); 2557 + } 2558 + 2559 + /** 2560 + * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort 2561 + * @phba: pointer to lpfc hba data structure. 2562 + * @lpfc_ncmd: The nvme job structure for the request being aborted. 2563 + * 2564 + * This routine is invoked by the worker thread to process a SLI4 fast-path 2565 + * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2566 + * here. 2567 + **/ 2568 + void 2569 + lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, 2570 + struct lpfc_io_buf *lpfc_ncmd) 2571 + { 2572 + struct nvmefc_fcp_req *nvme_cmd = NULL; 2573 + 2574 + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2575 + "6533 %s nvme_cmd %p tag x%x abort complete and " 2576 + "xri released\n", __func__, 2577 + lpfc_ncmd->nvmeCmd, 2578 + lpfc_ncmd->cur_iocbq.iotag); 2579 + 2580 + /* Aborted NVME commands are required to not complete 2581 + * before the abort exchange command fully completes. 2582 + * Once completed, it is available via the put list. 2583 + */ 2584 + if (lpfc_ncmd->nvmeCmd) { 2585 + nvme_cmd = lpfc_ncmd->nvmeCmd; 2586 + nvme_cmd->transferred_length = 0; 2587 + nvme_cmd->rcv_rsplen = 0; 2588 + nvme_cmd->status = NVME_SC_INTERNAL; 2589 + nvme_cmd->done(nvme_cmd); 2590 + lpfc_ncmd->nvmeCmd = NULL; 2591 + } 2592 + lpfc_release_nvme_buf(phba, lpfc_ncmd); 2571 2593 } 2572 2594 2573 2595 /**
+21 -23
drivers/scsi/lpfc/lpfc_nvmet.c
··· 2708 2708 struct ulp_bde64 *bde; 2709 2709 dma_addr_t physaddr; 2710 2710 int i, cnt, nsegs; 2711 - int do_pbde; 2711 + bool use_pbde = false; 2712 2712 int xc = 1; 2713 2713 2714 2714 if (!lpfc_is_link_up(phba)) { ··· 2816 2816 if (!xc) 2817 2817 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0); 2818 2818 2819 - /* Word 11 - set sup, irsp, irsplen later */ 2820 - do_pbde = 0; 2821 - 2822 2819 /* Word 12 */ 2823 2820 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 2824 2821 ··· 2893 2896 if (!xc) 2894 2897 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); 2895 2898 2896 - /* Word 11 - set pbde later */ 2897 - if (phba->cfg_enable_pbde) { 2898 - do_pbde = 1; 2899 + /* Word 11 - check for pbde */ 2900 + if (nsegs == 1 && phba->cfg_enable_pbde) { 2901 + use_pbde = true; 2902 + /* Word 11 - PBDE bit already preset by template */ 2899 2903 } else { 2904 + /* Overwrite default template setting */ 2900 2905 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); 2901 - do_pbde = 0; 2902 2906 } 2903 2907 2904 2908 /* Word 12 */ ··· 2970 2972 ((rsp->rsplen >> 2) - 1)); 2971 2973 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); 2972 2974 } 2973 - do_pbde = 0; 2974 2975 2975 2976 /* Word 12 */ 2976 2977 wqe->fcp_trsp.rsvd_12_15[0] = 0; ··· 3004 3007 bf_set(lpfc_sli4_sge_last, sgl, 1); 3005 3008 sgl->word2 = cpu_to_le32(sgl->word2); 3006 3009 sgl->sge_len = cpu_to_le32(cnt); 3007 - if (i == 0) { 3008 - bde = (struct ulp_bde64 *)&wqe->words[13]; 3009 - if (do_pbde) { 3010 - /* Words 13-15 (PBDE) */ 3011 - bde->addrLow = sgl->addr_lo; 3012 - bde->addrHigh = sgl->addr_hi; 3013 - bde->tus.f.bdeSize = 3014 - le32_to_cpu(sgl->sge_len); 3015 - bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3016 - bde->tus.w = cpu_to_le32(bde->tus.w); 3017 - } else { 3018 - memset(bde, 0, sizeof(struct ulp_bde64)); 3019 - } 3020 - } 3021 3010 sgl++; 3022 3011 ctxp->offset += cnt; 3012 + } 3013 + 3014 + bde = (struct ulp_bde64 *)&wqe->words[13]; 3015 + if (use_pbde) { 3016 + /* decrement sgl ptr backwards once to first data sge */ 3017 + sgl--; 3018 + 3019 + /* Words 13-15 (PBDE) */ 3020 + bde->addrLow = sgl->addr_lo; 3021 + bde->addrHigh = sgl->addr_hi; 3022 + bde->tus.f.bdeSize = le32_to_cpu(sgl->sge_len); 3023 + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3024 + bde->tus.w = cpu_to_le32(bde->tus.w); 3025 + } else { 3026 + memset(bde, 0, sizeof(struct ulp_bde64)); 3023 3027 } 3024 3028 ctxp->state = LPFC_NVME_STE_DATA; 3025 3029 ctxp->entry_cnt++;
+79 -52
drivers/scsi/lpfc/lpfc_scsi.c
··· 493 493 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, 494 494 struct sli4_wcqe_xri_aborted *axri, int idx) 495 495 { 496 - uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 497 - uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 496 + u16 xri = 0; 497 + u16 rxid = 0; 498 498 struct lpfc_io_buf *psb, *next_psb; 499 499 struct lpfc_sli4_hdw_queue *qp; 500 500 unsigned long iflag = 0; ··· 504 504 int rrq_empty = 0; 505 505 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 506 506 struct scsi_cmnd *cmd; 507 + int offline = 0; 507 508 508 509 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 509 510 return; 510 - 511 + offline = pci_channel_offline(phba->pcidev); 512 + if (!offline) { 513 + xri = bf_get(lpfc_wcqe_xa_xri, axri); 514 + rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 515 + } 511 516 qp = &phba->sli4_hba.hdwq[idx]; 512 517 spin_lock_irqsave(&phba->hbalock, iflag); 513 518 spin_lock(&qp->abts_io_buf_list_lock); 514 519 list_for_each_entry_safe(psb, next_psb, 515 520 &qp->lpfc_abts_io_buf_list, list) { 521 + if (offline) 522 + xri = psb->cur_iocbq.sli4_xritag; 516 523 if (psb->cur_iocbq.sli4_xritag == xri) { 517 524 list_del_init(&psb->list); 518 525 psb->flags &= ~LPFC_SBUF_XBUSY; ··· 528 521 qp->abts_nvme_io_bufs--; 529 522 spin_unlock(&qp->abts_io_buf_list_lock); 530 523 spin_unlock_irqrestore(&phba->hbalock, iflag); 531 - lpfc_sli4_nvme_xri_aborted(phba, axri, psb); 532 - return; 524 + if (!offline) { 525 + lpfc_sli4_nvme_xri_aborted(phba, axri, 526 + psb); 527 + return; 528 + } 529 + lpfc_sli4_nvme_pci_offline_aborted(phba, psb); 530 + spin_lock_irqsave(&phba->hbalock, iflag); 531 + spin_lock(&qp->abts_io_buf_list_lock); 532 + continue; 533 533 } 534 534 qp->abts_scsi_io_bufs--; 535 535 spin_unlock(&qp->abts_io_buf_list_lock); ··· 548 534 549 535 rrq_empty = list_empty(&phba->active_rrq_list); 550 536 spin_unlock_irqrestore(&phba->hbalock, iflag); 551 - if (ndlp) { 537 + if (ndlp && !offline) { 552 538 lpfc_set_rrq_active(phba, ndlp, 553 539 psb->cur_iocbq.sli4_lxritag, rxid, 1); 554 540 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 555 541 } 556 542 557 - if (phba->cfg_fcp_wait_abts_rsp) { 543 + if (phba->cfg_fcp_wait_abts_rsp || offline) { 558 544 spin_lock_irqsave(&psb->buf_lock, iflag); 559 545 cmd = psb->pCmd; 560 546 psb->pCmd = NULL; ··· 564 550 * scsi_done upcall. 565 551 */ 566 552 if (cmd) 567 - cmd->scsi_done(cmd); 553 + scsi_done(cmd); 568 554 569 555 /* 570 556 * We expect there is an abort thread waiting ··· 581 567 lpfc_release_scsi_buf_s4(phba, psb); 582 568 if (rrq_empty) 583 569 lpfc_worker_wake_up(phba); 584 - return; 570 + if (!offline) 571 + return; 572 + spin_lock_irqsave(&phba->hbalock, iflag); 573 + spin_lock(&qp->abts_io_buf_list_lock); 574 + continue; 585 575 } 586 576 } 587 577 spin_unlock(&qp->abts_io_buf_list_lock); 588 - for (i = 1; i <= phba->sli.last_iotag; i++) { 589 - iocbq = phba->sli.iocbq_lookup[i]; 578 + if (!offline) { 579 + for (i = 1; i <= phba->sli.last_iotag; i++) { 580 + iocbq = phba->sli.iocbq_lookup[i]; 590 581 591 - if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 592 - (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 593 - continue; 594 - if (iocbq->sli4_xritag != xri) 595 - continue; 596 - psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 597 - psb->flags &= ~LPFC_SBUF_XBUSY; 598 - spin_unlock_irqrestore(&phba->hbalock, iflag); 599 - if (!list_empty(&pring->txq)) 600 - lpfc_worker_wake_up(phba); 601 - return; 602 - 582 + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 583 + (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 584 + continue; 585 + if (iocbq->sli4_xritag != xri) 586 + continue; 587 + psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 588 + psb->flags &= ~LPFC_SBUF_XBUSY; 589 + spin_unlock_irqrestore(&phba->hbalock, iflag); 590 + if (!list_empty(&pring->txq)) 591 + lpfc_worker_wake_up(phba); 592 + return; 593 + } 603 594 } 604 595 spin_unlock_irqrestore(&phba->hbalock, iflag); 605 596 } ··· 894 875 bpl += 2; 895 876 if (scsi_sg_count(scsi_cmnd)) { 896 877 /* 897 - * The driver stores the segment count returned from pci_map_sg 878 + * The driver stores the segment count returned from dma_map_sg 898 879 * because this a count of dma-mappings used to map the use_sg 899 880 * pages. They are not guaranteed to be the same for those 900 881 * architectures that implement an IOMMU. ··· 2589 2570 bpl += 2; 2590 2571 if (scsi_sg_count(scsi_cmnd)) { 2591 2572 /* 2592 - * The driver stores the segment count returned from pci_map_sg 2573 + * The driver stores the segment count returned from dma_map_sg 2593 2574 * because this a count of dma-mappings used to map the use_sg 2594 2575 * pages. They are not guaranteed to be the same for those 2595 2576 * architectures that implement an IOMMU. ··· 3234 3215 struct lpfc_vport *vport = phba->pport; 3235 3216 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3236 3217 dma_addr_t physaddr; 3237 - uint32_t num_bde = 0; 3238 3218 uint32_t dma_len; 3239 3219 uint32_t dma_offset = 0; 3240 3220 int nseg, i, j; ··· 3249 3231 */ 3250 3232 if (scsi_sg_count(scsi_cmnd)) { 3251 3233 /* 3252 - * The driver stores the segment count returned from pci_map_sg 3234 + * The driver stores the segment count returned from dma_map_sg 3253 3235 * because this a count of dma-mappings used to map the use_sg 3254 3236 * pages. They are not guaranteed to be the same for those 3255 3237 * architectures that implement an IOMMU. ··· 3295 3277 j = 2; 3296 3278 for (i = 0; i < nseg; i++) { 3297 3279 sgl->word2 = 0; 3298 - if ((num_bde + 1) == nseg) { 3280 + if (nseg == 1) { 3299 3281 bf_set(lpfc_sli4_sge_last, sgl, 1); 3300 3282 bf_set(lpfc_sli4_sge_type, sgl, 3301 3283 LPFC_SGE_TYPE_DATA); ··· 3364 3346 3365 3347 j++; 3366 3348 } 3367 - /* 3368 - * Setup the first Payload BDE. For FCoE we just key off 3369 - * Performance Hints, for FC we use lpfc_enable_pbde. 3370 - * We populate words 13-15 of IOCB/WQE. 3349 + 3350 + /* PBDE support for first data SGE only. 3351 + * For FCoE, we key off Performance Hints. 3352 + * For FC, we key off lpfc_enable_pbde. 3371 3353 */ 3372 - if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3373 - phba->cfg_enable_pbde) { 3354 + if (nseg == 1 && 3355 + ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3356 + phba->cfg_enable_pbde)) { 3357 + /* Words 13-15 */ 3374 3358 bde = (struct ulp_bde64 *) 3375 3359 &wqe->words[13]; 3376 3360 bde->addrLow = first_data_sgl->addr_lo; ··· 3382 3362 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3383 3363 bde->tus.w = cpu_to_le32(bde->tus.w); 3384 3364 3365 + /* Word 11 - set PBDE bit */ 3366 + bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3385 3367 } else { 3386 3368 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3369 + /* Word 11 - PBDE bit disabled by default template */ 3387 3370 } 3388 3371 } else { 3389 3372 sgl += 1; 3390 - /* clear the last flag in the fcp_rsp map entry */ 3373 + /* set the last flag in the fcp_rsp map entry */ 3391 3374 sgl->word2 = le32_to_cpu(sgl->word2); 3392 3375 bf_set(lpfc_sli4_sge_last, sgl, 1); 3393 3376 sgl->word2 = cpu_to_le32(sgl->word2); ··· 3402 3379 memset(bde, 0, (sizeof(uint32_t) * 3)); 3403 3380 } 3404 3381 } 3405 - 3406 - /* Word 11 */ 3407 - if (phba->cfg_enable_pbde) 3408 - bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3409 3382 3410 3383 /* 3411 3384 * Finish initializing those IOCB fields that are dependent on the ··· 3488 3469 */ 3489 3470 if (scsi_sg_count(scsi_cmnd)) { 3490 3471 /* 3491 - * The driver stores the segment count returned from pci_map_sg 3472 + * The driver stores the segment count returned from dma_map_sg 3492 3473 * because this a count of dma-mappings used to map the use_sg 3493 3474 * pages. They are not guaranteed to be the same for those 3494 3475 * architectures that implement an IOMMU. ··· 3960 3941 int cpu; 3961 3942 3962 3943 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ 3963 - if (phba->cmf_active_mode == LPFC_CFG_MANAGED) { 3944 + if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 3945 + phba->cmf_max_bytes_per_interval) { 3964 3946 total = 0; 3965 3947 for_each_present_cpu(cpu) { 3966 3948 cgs = per_cpu_ptr(phba->cmf_stat, cpu); ··· 4501 4481 goto out; 4502 4482 4503 4483 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4504 - cmd->scsi_done(cmd); 4484 + scsi_done(cmd); 4505 4485 4506 4486 /* 4507 4487 * If there is an abort thread waiting for command completion ··· 4770 4750 #endif 4771 4751 4772 4752 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4773 - cmd->scsi_done(cmd); 4753 + scsi_done(cmd); 4774 4754 4775 4755 /* 4776 4756 * If there is an abort thread waiting for command completion ··· 5115 5095 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; 5116 5096 break; 5117 5097 default: 5118 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5098 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5119 5099 "1418 Invalid HBA PCI-device group: 0x%x\n", 5120 5100 dev_grp); 5121 5101 return -ENODEV; ··· 5842 5822 shost); 5843 5823 5844 5824 out_fail_command: 5845 - cmnd->scsi_done(cmnd); 5825 + scsi_done(cmnd); 5846 5826 return 0; 5847 5827 } 5848 5828 ··· 6475 6455 6476 6456 /* Issue LOGO, if no LOGO is outstanding */ 6477 6457 spin_lock_irqsave(&pnode->lock, flags); 6478 - if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) && 6458 + if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && 6479 6459 !pnode->logo_waitq) { 6480 6460 pnode->logo_waitq = &waitq; 6481 6461 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6482 6462 pnode->nlp_flag |= NLP_ISSUE_LOGO; 6483 - pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; 6463 + pnode->save_flags |= NLP_WAIT_FOR_LOGO; 6484 6464 spin_unlock_irqrestore(&pnode->lock, flags); 6485 6465 lpfc_unreg_rpi(vport, pnode); 6486 6466 wait_event_timeout(waitq, 6487 - (!(pnode->upcall_flags & 6467 + (!(pnode->save_flags & 6488 6468 NLP_WAIT_FOR_LOGO)), 6489 6469 msecs_to_jiffies(dev_loss_tmo * 6490 6470 1000)); 6491 6471 6492 - if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { 6472 + if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { 6493 6473 lpfc_printf_vlog(vport, KERN_ERR, logit, 6494 6474 "0725 SCSI layer TGTRST " 6495 6475 "failed & LOGO TMO (%d, %llu) " 6496 6476 "return x%x\n", 6497 6477 tgt_id, lun_id, status); 6498 6478 spin_lock_irqsave(&pnode->lock, flags); 6499 - pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 6479 + pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; 6500 6480 } else { 6501 6481 spin_lock_irqsave(&pnode->lock, flags); 6502 6482 } ··· 6647 6627 rc = lpfc_sli_brdrestart(phba); 6648 6628 if (rc) 6649 6629 goto error; 6630 + 6631 + /* Wait for successful restart of adapter */ 6632 + if (phba->sli_rev < LPFC_SLI_REV4) { 6633 + rc = lpfc_sli_chipset_init(phba); 6634 + if (rc) 6635 + goto error; 6636 + } 6650 6637 6651 6638 rc = lpfc_online(phba); 6652 6639 if (rc) ··· 7209 7182 .this_id = -1, 7210 7183 .sg_tablesize = 1, 7211 7184 .cmd_per_lun = 1, 7212 - .shost_attrs = lpfc_hba_attrs, 7185 + .shost_groups = lpfc_hba_groups, 7213 7186 .max_sectors = 0xFFFFFFFF, 7214 7187 .vendor_id = LPFC_NL_VENDOR_ID, 7215 7188 .track_queue_depth = 0, ··· 7235 7208 .this_id = -1, 7236 7209 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 7237 7210 .cmd_per_lun = LPFC_CMD_PER_LUN, 7238 - .shost_attrs = lpfc_hba_attrs, 7211 + .shost_groups = lpfc_hba_groups, 7239 7212 .max_sectors = 0xFFFFFFFF, 7240 7213 .vendor_id = LPFC_NL_VENDOR_ID, 7241 7214 .change_queue_depth = scsi_change_queue_depth,
+138 -59
drivers/scsi/lpfc/lpfc_sli.c
··· 1404 1404 } 1405 1405 1406 1406 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1407 - (sglq->state != SGL_XRI_ABORTED)) { 1407 + (!(unlikely(pci_channel_offline(phba->pcidev)))) && 1408 + sglq->state != SGL_XRI_ABORTED) { 1408 1409 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1409 1410 iflag); 1410 1411 ··· 4584 4583 lpfc_sli_cancel_iocbs(phba, &txq, 4585 4584 IOSTAT_LOCAL_REJECT, 4586 4585 IOERR_SLI_DOWN); 4587 - /* Flush the txcmpq */ 4586 + /* Flush the txcmplq */ 4588 4587 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4589 4588 IOSTAT_LOCAL_REJECT, 4590 4589 IOERR_SLI_DOWN); 4590 + if (unlikely(pci_channel_offline(phba->pcidev))) 4591 + lpfc_sli4_io_xri_aborted(phba, NULL, 0); 4591 4592 } 4592 4593 } else { 4593 4594 pring = &psli->sli3_ring[LPFC_FCP_RING]; ··· 7764 7761 7765 7762 /* Zero out Congestion Signal ACQE counter */ 7766 7763 phba->cgn_acqe_cnt = 0; 7767 - atomic64_set(&phba->cgn_acqe_stat.warn, 0); 7768 - atomic64_set(&phba->cgn_acqe_stat.alarm, 0); 7769 7764 7770 7765 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq, 7771 7766 &pmb->u.mqe.un.set_feature); ··· 7891 7890 lpfc_cmf_setup(struct lpfc_hba *phba) 7892 7891 { 7893 7892 LPFC_MBOXQ_t *mboxq; 7894 - struct lpfc_mqe *mqe; 7895 7893 struct lpfc_dmabuf *mp; 7896 7894 struct lpfc_pc_sli4_params *sli4_params; 7897 - struct lpfc_sli4_parameters *mbx_sli4_parameters; 7898 - int length; 7899 7895 int rc, cmf, mi_ver; 7896 + 7897 + rc = lpfc_sli4_refresh_params(phba); 7898 + if (unlikely(rc)) 7899 + return rc; 7900 7900 7901 7901 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7902 7902 if (!mboxq) 7903 7903 return -ENOMEM; 7904 - mqe = &mboxq->u.mqe; 7905 7904 7906 - /* Read the port's SLI4 Config Parameters */ 7907 - length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 7908 - sizeof(struct lpfc_sli4_cfg_mhdr)); 7909 - lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7910 - LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 7911 - length, LPFC_SLI4_MBX_EMBED); 7912 - 7913 - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7914 - if (unlikely(rc)) { 7915 - mempool_free(mboxq, phba->mbox_mem_pool); 7916 - return rc; 7917 - } 7918 - 7919 - /* Gather info on CMF and MI support */ 7920 7905 sli4_params = &phba->sli4_hba.pc_sli4_params; 7921 - mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 7922 - sli4_params->mi_ver = bf_get(cfg_mi_ver, mbx_sli4_parameters); 7923 - sli4_params->cmf = bf_get(cfg_cmf, mbx_sli4_parameters); 7924 7906 7925 7907 /* Are we forcing MI off via module parameter? */ 7926 7908 if (!phba->cfg_enable_mi) ··· 7998 8014 /* initialize congestion buffer info */ 7999 8015 lpfc_init_congestion_buf(phba); 8000 8016 lpfc_init_congestion_stat(phba); 8017 + 8018 + /* Zero out Congestion Signal counters */ 8019 + atomic64_set(&phba->cgn_acqe_stat.alarm, 0); 8020 + atomic64_set(&phba->cgn_acqe_stat.warn, 0); 8001 8021 } 8002 8022 8003 8023 rc = lpfc_sli4_cgn_params_read(phba); ··· 8141 8153 struct lpfc_vport *vport = phba->pport; 8142 8154 struct lpfc_dmabuf *mp; 8143 8155 struct lpfc_rqb *rqbp; 8156 + u32 flg; 8144 8157 8145 8158 /* Perform a PCI function reset to start from clean */ 8146 8159 rc = lpfc_pci_function_reset(phba); ··· 8155 8166 else { 8156 8167 spin_lock_irq(&phba->hbalock); 8157 8168 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 8169 + flg = phba->sli.sli_flag; 8158 8170 spin_unlock_irq(&phba->hbalock); 8171 + /* Allow a little time after setting SLI_ACTIVE for any polled 8172 + * MBX commands to complete via BSG. 8173 + */ 8174 + for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { 8175 + msleep(20); 8176 + spin_lock_irq(&phba->hbalock); 8177 + flg = phba->sli.sli_flag; 8178 + spin_unlock_irq(&phba->hbalock); 8179 + } 8159 8180 } 8160 8181 8161 8182 lpfc_sli4_dip(phba); ··· 9749 9750 "(%d):2541 Mailbox command x%x " 9750 9751 "(x%x/x%x) failure: " 9751 9752 "mqe_sta: x%x mcqe_sta: x%x/x%x " 9752 - "Data: x%x x%x\n,", 9753 + "Data: x%x x%x\n", 9753 9754 mboxq->vport ? mboxq->vport->vpi : 0, 9754 9755 mboxq->u.mb.mbxCommand, 9755 9756 lpfc_sli_config_mbox_subsys_get(phba, ··· 9783 9784 "(%d):2597 Sync Mailbox command " 9784 9785 "x%x (x%x/x%x) failure: " 9785 9786 "mqe_sta: x%x mcqe_sta: x%x/x%x " 9786 - "Data: x%x x%x\n,", 9787 + "Data: x%x x%x\n", 9787 9788 mboxq->vport ? mboxq->vport->vpi : 0, 9788 9789 mboxq->u.mb.mbxCommand, 9789 9790 lpfc_sli_config_mbox_subsys_get(phba, ··· 10009 10010 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 10010 10011 break; 10011 10012 default: 10012 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10013 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10013 10014 "1420 Invalid HBA PCI-device group: 0x%x\n", 10014 10015 dev_grp); 10015 10016 return -ENODEV; ··· 11177 11178 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4; 11178 11179 break; 11179 11180 default: 11180 - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11181 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11181 11182 "1419 Invalid HBA PCI-device group: 0x%x\n", 11182 11183 dev_grp); 11183 11184 return -ENODEV; ··· 12403 12404 12404 12405 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 12405 12406 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 12406 - if (cmdiocb->iocb_flag & LPFC_IO_FCP) { 12407 - abtsiocbp->iocb_flag |= LPFC_IO_FCP; 12408 - abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 12409 - } 12407 + if (cmdiocb->iocb_flag & LPFC_IO_FCP) 12408 + abtsiocbp->iocb_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX); 12410 12409 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 12411 12410 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 12412 12411 12413 - if (phba->link_state >= LPFC_LINK_UP) 12414 - iabt->ulpCommand = CMD_ABORT_XRI_CN; 12415 - else 12412 + if (phba->link_state < LPFC_LINK_UP || 12413 + (phba->sli_rev == LPFC_SLI_REV4 && 12414 + phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN)) 12416 12415 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 12416 + else 12417 + iabt->ulpCommand = CMD_ABORT_XRI_CN; 12417 12418 12418 12419 if (cmpl) 12419 12420 abtsiocbp->iocb_cmpl = cmpl; ··· 12487 12488 } 12488 12489 12489 12490 /** 12490 - * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 12491 + * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts 12492 + * @iocbq: Pointer to iocb object. 12493 + * @vport: Pointer to driver virtual port object. 12494 + * 12495 + * This function acts as an iocb filter for functions which abort FCP iocbs. 12496 + * 12497 + * Return values 12498 + * -ENODEV, if a null iocb or vport ptr is encountered 12499 + * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as 12500 + * driver already started the abort process, or is an abort iocb itself 12501 + * 0, passes criteria for aborting the FCP I/O iocb 12502 + **/ 12503 + static int 12504 + lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq, 12505 + struct lpfc_vport *vport) 12506 + { 12507 + IOCB_t *icmd = NULL; 12508 + 12509 + /* No null ptr vports */ 12510 + if (!iocbq || iocbq->vport != vport) 12511 + return -ENODEV; 12512 + 12513 + /* iocb must be for FCP IO, already exists on the TX cmpl queue, 12514 + * can't be premarked as driver aborted, nor be an ABORT iocb itself 12515 + */ 12516 + icmd = &iocbq->iocb; 12517 + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 12518 + !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) || 12519 + (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || 12520 + (icmd->ulpCommand == CMD_ABORT_XRI_CN || 12521 + icmd->ulpCommand == CMD_CLOSE_XRI_CN)) 12522 + return -EINVAL; 12523 + 12524 + return 0; 12525 + } 12526 + 12527 + /** 12528 + * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target 12491 12529 * @iocbq: Pointer to driver iocb object. 12492 12530 * @vport: Pointer to driver virtual port object. 12493 12531 * @tgt_id: SCSI ID of the target. 12494 12532 * @lun_id: LUN ID of the scsi device. 12495 12533 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 12496 12534 * 12497 - * This function acts as an iocb filter for functions which abort or count 12498 - * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 12535 + * This function acts as an iocb filter for validating a lun/SCSI target/SCSI 12536 + * host. 12537 + * 12538 + * It will return 12499 12539 * 0 if the filtering criteria is met for the given iocb and will return 12500 12540 * 1 if the filtering criteria is not met. 12501 12541 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the ··· 12553 12515 lpfc_ctx_cmd ctx_cmd) 12554 12516 { 12555 12517 struct lpfc_io_buf *lpfc_cmd; 12556 - IOCB_t *icmd = NULL; 12557 12518 int rc = 1; 12558 - 12559 - if (!iocbq || iocbq->vport != vport) 12560 - return rc; 12561 - 12562 - if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 12563 - !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) || 12564 - iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 12565 - return rc; 12566 - 12567 - icmd = &iocbq->iocb; 12568 - if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 12569 - icmd->ulpCommand == CMD_CLOSE_XRI_CN) 12570 - return rc; 12571 12519 12572 12520 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 12573 12521 ··· 12609 12585 { 12610 12586 struct lpfc_hba *phba = vport->phba; 12611 12587 struct lpfc_iocbq *iocbq; 12588 + IOCB_t *icmd = NULL; 12612 12589 int sum, i; 12590 + unsigned long iflags; 12613 12591 12614 - spin_lock_irq(&phba->hbalock); 12592 + spin_lock_irqsave(&phba->hbalock, iflags); 12615 12593 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 12616 12594 iocbq = phba->sli.iocbq_lookup[i]; 12617 12595 12618 - if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 12619 - ctx_cmd) == 0) 12596 + if (!iocbq || iocbq->vport != vport) 12597 + continue; 12598 + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 12599 + !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 12600 + continue; 12601 + 12602 + /* Include counting outstanding aborts */ 12603 + icmd = &iocbq->iocb; 12604 + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 12605 + icmd->ulpCommand == CMD_CLOSE_XRI_CN) { 12606 + sum++; 12607 + continue; 12608 + } 12609 + 12610 + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12611 + ctx_cmd) == 0) 12620 12612 sum++; 12621 12613 } 12622 - spin_unlock_irq(&phba->hbalock); 12614 + spin_unlock_irqrestore(&phba->hbalock, iflags); 12623 12615 12624 12616 return sum; 12625 12617 } ··· 12702 12662 * 12703 12663 * This function sends an abort command for every SCSI command 12704 12664 * associated with the given virtual port pending on the ring 12705 - * filtered by lpfc_sli_validate_fcp_iocb function. 12665 + * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then 12666 + * lpfc_sli_validate_fcp_iocb function. The ordering for validation before 12667 + * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort 12668 + * followed by lpfc_sli_validate_fcp_iocb. 12669 + * 12706 12670 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 12707 12671 * FCP iocbs associated with lun specified by tgt_id and lun_id 12708 12672 * parameters ··· 12737 12693 12738 12694 for (i = 1; i <= phba->sli.last_iotag; i++) { 12739 12695 iocbq = phba->sli.iocbq_lookup[i]; 12696 + 12697 + if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) 12698 + continue; 12740 12699 12741 12700 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12742 12701 abort_cmd) != 0) ··· 12773 12726 * 12774 12727 * This function sends an abort command for every SCSI command 12775 12728 * associated with the given virtual port pending on the ring 12776 - * filtered by lpfc_sli_validate_fcp_iocb function. 12729 + * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then 12730 + * lpfc_sli_validate_fcp_iocb function. The ordering for validation before 12731 + * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort 12732 + * followed by lpfc_sli_validate_fcp_iocb. 12733 + * 12777 12734 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 12778 12735 * FCP iocbs associated with lun specified by tgt_id and lun_id 12779 12736 * parameters ··· 12814 12763 12815 12764 for (i = 1; i <= phba->sli.last_iotag; i++) { 12816 12765 iocbq = phba->sli.iocbq_lookup[i]; 12766 + 12767 + if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport)) 12768 + continue; 12817 12769 12818 12770 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 12819 12771 cmd) != 0) ··· 21161 21107 fail_msg, 21162 21108 piocbq->iotag, piocbq->sli4_xritag); 21163 21109 list_add_tail(&piocbq->list, &completions); 21110 + fail_msg = NULL; 21164 21111 } 21165 21112 spin_unlock_irqrestore(&pring->ring_lock, iflags); 21166 21113 } ··· 22021 21966 22022 21967 qp = &phba->sli4_hba.hdwq[hwqid]; 22023 21968 lpfc_ncmd = NULL; 21969 + if (!qp) { 21970 + lpfc_printf_log(phba, KERN_INFO, 21971 + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 21972 + "5556 NULL qp for hwqid x%x\n", hwqid); 21973 + return lpfc_ncmd; 21974 + } 22024 21975 multixri_pool = qp->p_multixri_pool; 21976 + if (!multixri_pool) { 21977 + lpfc_printf_log(phba, KERN_INFO, 21978 + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 21979 + "5557 NULL multixri for hwqid x%x\n", hwqid); 21980 + return lpfc_ncmd; 21981 + } 22025 21982 pvt_pool = &multixri_pool->pvt_pool; 21983 + if (!pvt_pool) { 21984 + lpfc_printf_log(phba, KERN_INFO, 21985 + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 21986 + "5558 NULL pvt_pool for hwqid x%x\n", hwqid); 21987 + return lpfc_ncmd; 21988 + } 22026 21989 multixri_pool->io_req_count++; 22027 21990 22028 21991 /* If pvt_pool is empty, move some XRIs from public to private pool */ ··· 22116 22043 22117 22044 qp = &phba->sli4_hba.hdwq[hwqid]; 22118 22045 lpfc_cmd = NULL; 22046 + if (!qp) { 22047 + lpfc_printf_log(phba, KERN_WARNING, 22048 + LOG_SLI | LOG_NVME_ABTS | LOG_FCP, 22049 + "5555 NULL qp for hwqid x%x\n", hwqid); 22050 + return lpfc_cmd; 22051 + } 22119 22052 22120 22053 if (phba->cfg_xri_rebalancing) 22121 22054 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
+2
drivers/scsi/lpfc/lpfc_sli4.h
··· 1116 1116 int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, 1117 1117 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); 1118 1118 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba); 1119 + void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, 1120 + struct lpfc_io_buf *lpfc_ncmd); 1119 1121 void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, 1120 1122 struct sli4_wcqe_xri_aborted *axri, 1121 1123 struct lpfc_io_buf *lpfc_ncmd);
+1 -1
drivers/scsi/lpfc/lpfc_version.h
··· 20 20 * included with this package. * 21 21 *******************************************************************/ 22 22 23 - #define LPFC_DRIVER_VERSION "14.0.0.1" 23 + #define LPFC_DRIVER_VERSION "14.0.0.3" 24 24 #define LPFC_DRIVER_NAME "lpfc" 25 25 26 26 /* Used for SLI 2/3 */
+2 -4
drivers/scsi/mac53c94.c
··· 66 66 static void cmd_done(struct fsc_state *, int result); 67 67 static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); 68 68 69 - 70 - static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 69 + static int mac53c94_queue_lck(struct scsi_cmnd *cmd) 71 70 { 72 71 struct fsc_state *state; 73 72 ··· 82 83 } 83 84 #endif 84 85 85 - cmd->scsi_done = done; 86 86 cmd->host_scribble = NULL; 87 87 88 88 state = (struct fsc_state *) cmd->device->host->hostdata; ··· 346 348 cmd = state->current_req; 347 349 if (cmd) { 348 350 cmd->result = result; 349 - (*cmd->scsi_done)(cmd); 351 + scsi_done(cmd); 350 352 state->current_req = NULL; 351 353 } 352 354 state->phase = idle;
+10 -14
drivers/scsi/megaraid.c
··· 370 370 * 371 371 * The command queuing entry point for the mid-layer. 372 372 */ 373 - static int 374 - megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) 373 + static int megaraid_queue_lck(struct scsi_cmnd *scmd) 375 374 { 376 375 adapter_t *adapter; 377 376 scb_t *scb; ··· 378 379 unsigned long flags; 379 380 380 381 adapter = (adapter_t *)scmd->device->host->hostdata; 381 - 382 - scmd->scsi_done = done; 383 - 384 382 385 383 /* 386 384 * Allocate and build a SCB request ··· 582 586 /* have just LUN 0 for each target on virtual channels */ 583 587 if (cmd->device->lun) { 584 588 cmd->result = (DID_BAD_TARGET << 16); 585 - cmd->scsi_done(cmd); 589 + scsi_done(cmd); 586 590 return NULL; 587 591 } 588 592 ··· 601 605 602 606 if(ldrv_num > max_ldrv_num ) { 603 607 cmd->result = (DID_BAD_TARGET << 16); 604 - cmd->scsi_done(cmd); 608 + scsi_done(cmd); 605 609 return NULL; 606 610 } 607 611 ··· 613 617 * devices 614 618 */ 615 619 cmd->result = (DID_BAD_TARGET << 16); 616 - cmd->scsi_done(cmd); 620 + scsi_done(cmd); 617 621 return NULL; 618 622 } 619 623 } ··· 633 637 */ 634 638 if( !adapter->has_cluster ) { 635 639 cmd->result = (DID_OK << 16); 636 - cmd->scsi_done(cmd); 640 + scsi_done(cmd); 637 641 return NULL; 638 642 } 639 643 ··· 651 655 return scb; 652 656 #else 653 657 cmd->result = (DID_OK << 16); 654 - cmd->scsi_done(cmd); 658 + scsi_done(cmd); 655 659 return NULL; 656 660 #endif 657 661 ··· 666 670 kunmap_atomic(buf - sg->offset); 667 671 668 672 cmd->result = (DID_OK << 16); 669 - cmd->scsi_done(cmd); 673 + scsi_done(cmd); 670 674 return NULL; 671 675 } 672 676 ··· 862 866 if( ! adapter->has_cluster ) { 863 867 864 868 cmd->result = (DID_BAD_TARGET << 16); 865 - cmd->scsi_done(cmd); 869 + scsi_done(cmd); 866 870 return NULL; 867 871 } 868 872 ··· 885 889 886 890 default: 887 891 cmd->result = (DID_BAD_TARGET << 16); 888 - cmd->scsi_done(cmd); 892 + scsi_done(cmd); 889 893 return NULL; 890 894 } 891 895 } ··· 1650 1654 struct scsi_pointer* spos = (struct scsi_pointer *)pos; 1651 1655 1652 1656 cmd = list_entry(spos, struct scsi_cmnd, SCp); 1653 - cmd->scsi_done(cmd); 1657 + scsi_done(cmd); 1654 1658 } 1655 1659 1656 1660 INIT_LIST_HEAD(&adapter->completed_list);
+15 -13
drivers/scsi/megaraid/megaraid_mbox.c
··· 305 305 static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_app_hndl); 306 306 307 307 // Host template initializer for megaraid mbox sysfs device attributes 308 - static struct device_attribute *megaraid_shost_attrs[] = { 309 - &dev_attr_megaraid_mbox_app_hndl, 308 + static struct attribute *megaraid_shost_attrs[] = { 309 + &dev_attr_megaraid_mbox_app_hndl.attr, 310 310 NULL, 311 311 }; 312 312 313 + ATTRIBUTE_GROUPS(megaraid_shost); 313 314 314 315 static DEVICE_ATTR_ADMIN_RO(megaraid_mbox_ld); 315 316 316 317 // Host template initializer for megaraid mbox sysfs device attributes 317 - static struct device_attribute *megaraid_sdev_attrs[] = { 318 - &dev_attr_megaraid_mbox_ld, 318 + static struct attribute *megaraid_sdev_attrs[] = { 319 + &dev_attr_megaraid_mbox_ld.attr, 319 320 NULL, 320 321 }; 322 + 323 + ATTRIBUTE_GROUPS(megaraid_sdev); 321 324 322 325 /* 323 326 * Scsi host template for megaraid unified driver ··· 334 331 .eh_host_reset_handler = megaraid_reset_handler, 335 332 .change_queue_depth = scsi_change_queue_depth, 336 333 .no_write_same = 1, 337 - .sdev_attrs = megaraid_sdev_attrs, 338 - .shost_attrs = megaraid_shost_attrs, 334 + .sdev_groups = megaraid_sdev_groups, 335 + .shost_groups = megaraid_shost_groups, 339 336 }; 340 337 341 338 ··· 1435 1432 * 1436 1433 * Queue entry point for mailbox based controllers. 1437 1434 */ 1438 - static int 1439 - megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) 1435 + static int megaraid_queue_command_lck(struct scsi_cmnd *scp) 1440 1436 { 1437 + void (*done)(struct scsi_cmnd *) = scsi_done; 1441 1438 adapter_t *adapter; 1442 1439 scb_t *scb; 1443 1440 int if_busy; 1444 1441 1445 1442 adapter = SCP2ADAPTER(scp); 1446 - scp->scsi_done = done; 1447 1443 scp->result = 0; 1448 1444 1449 1445 /* ··· 2360 2358 megaraid_dealloc_scb(adapter, scb); 2361 2359 2362 2360 // send the scsi packet back to kernel 2363 - scp->scsi_done(scp); 2361 + scsi_done(scp); 2364 2362 } 2365 2363 2366 2364 return; ··· 2418 2416 scb->sno, scb->dev_channel, scb->dev_target)); 2419 2417 2420 2418 scp->result = (DID_ABORT << 16); 2421 - scp->scsi_done(scp); 2419 + scsi_done(scp); 2422 2420 2423 2421 megaraid_dealloc_scb(adapter, scb); 2424 2422 ··· 2448 2446 scb->dev_channel, scb->dev_target)); 2449 2447 2450 2448 scp->result = (DID_ABORT << 16); 2451 - scp->scsi_done(scp); 2449 + scsi_done(scp); 2452 2450 2453 2451 megaraid_dealloc_scb(adapter, scb); 2454 2452 ··· 2568 2566 } 2569 2567 2570 2568 scb->scp->result = (DID_RESET << 16); 2571 - scb->scp->scsi_done(scb->scp); 2569 + scsi_done(scb->scp); 2572 2570 2573 2571 megaraid_dealloc_scb(adapter, scb); 2574 2572 }
+2 -2
drivers/scsi/megaraid/megaraid_sas.h
··· 21 21 /* 22 22 * MegaRAID SAS Driver meta data 23 23 */ 24 - #define MEGASAS_VERSION "07.717.02.00-rc1" 25 - #define MEGASAS_RELDATE "May 19, 2021" 24 + #define MEGASAS_VERSION "07.719.03.00-rc1" 25 + #define MEGASAS_RELDATE "Sep 29, 2021" 26 26 27 27 #define MEGASAS_MSIX_NAME_LEN 32 28 28
+21 -19
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1794 1794 1795 1795 if (instance->unload == 1) { 1796 1796 scmd->result = DID_NO_CONNECT << 16; 1797 - scmd->scsi_done(scmd); 1797 + scsi_done(scmd); 1798 1798 return 0; 1799 1799 } 1800 1800 ··· 1809 1809 return SCSI_MLQUEUE_HOST_BUSY; 1810 1810 } else { 1811 1811 scmd->result = DID_NO_CONNECT << 16; 1812 - scmd->scsi_done(scmd); 1812 + scsi_done(scmd); 1813 1813 return 0; 1814 1814 } 1815 1815 } ··· 1818 1818 if (!mr_device_priv_data || 1819 1819 (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) { 1820 1820 scmd->result = DID_NO_CONNECT << 16; 1821 - scmd->scsi_done(scmd); 1821 + scsi_done(scmd); 1822 1822 return 0; 1823 1823 } 1824 1824 ··· 1826 1826 ld_tgt_id = MEGASAS_TARGET_ID(scmd->device); 1827 1827 if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) { 1828 1828 scmd->result = DID_NO_CONNECT << 16; 1829 - scmd->scsi_done(scmd); 1829 + scsi_done(scmd); 1830 1830 return 0; 1831 1831 } 1832 1832 } ··· 1857 1857 return instance->instancet->build_and_issue_cmd(instance, scmd); 1858 1858 1859 1859 out_done: 1860 - scmd->scsi_done(scmd); 1860 + scsi_done(scmd); 1861 1861 return 0; 1862 1862 } 1863 1863 ··· 2783 2783 reset_index, reset_cmd, 2784 2784 reset_cmd->scmd->cmnd[0]); 2785 2785 2786 - reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2786 + scsi_done(reset_cmd->scmd); 2787 2787 megasas_return_cmd(instance, reset_cmd); 2788 2788 } else if (reset_cmd->sync_cmd) { 2789 2789 dev_notice(&instance->pdev->dev, "%p synch cmds" ··· 3481 3481 static DEVICE_ATTR_RO(dump_system_regs); 3482 3482 static DEVICE_ATTR_RO(raid_map_id); 3483 3483 3484 - static struct device_attribute *megaraid_host_attrs[] = { 3485 - &dev_attr_fw_crash_buffer_size, 3486 - &dev_attr_fw_crash_buffer, 3487 - &dev_attr_fw_crash_state, 3488 - &dev_attr_page_size, 3489 - &dev_attr_ldio_outstanding, 3490 - &dev_attr_fw_cmds_outstanding, 3491 - &dev_attr_enable_sdev_max_qd, 3492 - &dev_attr_dump_system_regs, 3493 - &dev_attr_raid_map_id, 3484 + static struct attribute *megaraid_host_attrs[] = { 3485 + &dev_attr_fw_crash_buffer_size.attr, 3486 + &dev_attr_fw_crash_buffer.attr, 3487 + &dev_attr_fw_crash_state.attr, 3488 + &dev_attr_page_size.attr, 3489 + &dev_attr_ldio_outstanding.attr, 3490 + &dev_attr_fw_cmds_outstanding.attr, 3491 + &dev_attr_enable_sdev_max_qd.attr, 3492 + &dev_attr_dump_system_regs.attr, 3493 + &dev_attr_raid_map_id.attr, 3494 3494 NULL, 3495 3495 }; 3496 + 3497 + ATTRIBUTE_GROUPS(megaraid_host); 3496 3498 3497 3499 /* 3498 3500 * Scsi host template for megaraid_sas driver ··· 3512 3510 .eh_abort_handler = megasas_task_abort, 3513 3511 .eh_host_reset_handler = megasas_reset_bus_host, 3514 3512 .eh_timed_out = megasas_reset_timer, 3515 - .shost_attrs = megaraid_host_attrs, 3513 + .shost_groups = megaraid_host_groups, 3516 3514 .bios_param = megasas_bios_param, 3517 3515 .map_queues = megasas_map_queues, 3518 3516 .mq_poll = megasas_blk_mq_poll, ··· 3642 3640 atomic_dec(&instance->fw_outstanding); 3643 3641 3644 3642 scsi_dma_unmap(cmd->scmd); 3645 - cmd->scmd->scsi_done(cmd->scmd); 3643 + scsi_done(cmd->scmd); 3646 3644 megasas_return_cmd(instance, cmd); 3647 3645 3648 3646 break; ··· 3688 3686 atomic_dec(&instance->fw_outstanding); 3689 3687 3690 3688 scsi_dma_unmap(cmd->scmd); 3691 - cmd->scmd->scsi_done(cmd->scmd); 3689 + scsi_done(cmd->scmd); 3692 3690 megasas_return_cmd(instance, cmd); 3693 3691 3694 3692 break;
+46 -10
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 3493 3493 megasas_return_cmd_fusion(instance, cmd); 3494 3494 scsi_dma_unmap(scmd_local); 3495 3495 megasas_sdev_busy_dec(instance, scmd_local); 3496 - scmd_local->scsi_done(scmd_local); 3496 + scsi_done(scmd_local); 3497 3497 } 3498 + } 3499 + 3500 + /** 3501 + * access_irq_context: Access to reply processing 3502 + * @irq_context: IRQ context 3503 + * 3504 + * Synchronize access to reply processing. 3505 + * 3506 + * Return: true on success, false on failure. 3507 + */ 3508 + static inline 3509 + bool access_irq_context(struct megasas_irq_context *irq_context) 3510 + { 3511 + if (!irq_context) 3512 + return true; 3513 + 3514 + if (atomic_add_unless(&irq_context->in_used, 1, 1)) 3515 + return true; 3516 + 3517 + return false; 3518 + } 3519 + 3520 + /** 3521 + * release_irq_context: Release reply processing 3522 + * @irq_context: IRQ context 3523 + * 3524 + * Release access of reply processing. 3525 + * 3526 + * Return: Nothing. 3527 + */ 3528 + static inline 3529 + void release_irq_context(struct megasas_irq_context *irq_context) 3530 + { 3531 + if (irq_context) 3532 + atomic_dec(&irq_context->in_used); 3498 3533 } 3499 3534 3500 3535 /** ··· 3565 3530 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) 3566 3531 return IRQ_HANDLED; 3567 3532 3533 + if (!access_irq_context(irq_context)) 3534 + return 0; 3535 + 3568 3536 desc = fusion->reply_frames_desc[MSIxIndex] + 3569 3537 fusion->last_reply_idx[MSIxIndex]; 3570 3538 ··· 3578 3540 reply_descript_type = reply_desc->ReplyFlags & 3579 3541 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 3580 3542 3581 - if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 3543 + if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { 3544 + release_irq_context(irq_context); 3582 3545 return IRQ_NONE; 3583 - 3584 - if (irq_context && !atomic_add_unless(&irq_context->in_used, 1, 1)) 3585 - return 0; 3546 + } 3586 3547 3587 3548 num_completed = 0; 3588 3549 ··· 3634 3597 megasas_return_cmd_fusion(instance, cmd_fusion); 3635 3598 scsi_dma_unmap(scmd_local); 3636 3599 megasas_sdev_busy_dec(instance, scmd_local); 3637 - scmd_local->scsi_done(scmd_local); 3600 + scsi_done(scmd_local); 3638 3601 } else /* Optimal VD - R1 FP command completion. */ 3639 3602 megasas_complete_r1_command(instance, cmd_fusion); 3640 3603 break; ··· 3697 3660 irq_context->irq_line_enable = true; 3698 3661 irq_poll_sched(&irq_context->irqpoll); 3699 3662 } 3700 - atomic_dec(&irq_context->in_used); 3663 + release_irq_context(irq_context); 3701 3664 return num_completed; 3702 3665 } 3703 3666 } ··· 3716 3679 megasas_check_and_restore_queue_depth(instance); 3717 3680 } 3718 3681 3719 - if (irq_context) 3720 - atomic_dec(&irq_context->in_used); 3682 + release_irq_context(irq_context); 3721 3683 3722 3684 return num_completed; 3723 3685 } ··· 5013 4977 atomic_dec(&instance->ldio_outstanding); 5014 4978 megasas_return_cmd_fusion(instance, cmd_fusion); 5015 4979 scsi_dma_unmap(scmd_local); 5016 - scmd_local->scsi_done(scmd_local); 4980 + scsi_done(scmd_local); 5017 4981 } 5018 4982 } 5019 4983
+4 -14
drivers/scsi/mesh.c
··· 342 342 } 343 343 344 344 345 - /* 346 - * Complete a SCSI command 347 - */ 348 - static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd) 349 - { 350 - (*cmd->scsi_done)(cmd); 351 - } 352 - 353 - 354 345 /* Called with meshinterrupt disabled, initialize the chipset 355 346 * and eventually do the initial bus reset. The lock must not be 356 347 * held since we can schedule. ··· 604 613 #endif 605 614 } 606 615 cmd->SCp.this_residual -= ms->data_ptr; 607 - mesh_completed(ms, cmd); 616 + scsi_done(cmd); 608 617 } 609 618 if (start_next) { 610 619 out_8(&ms->mesh->sequence, SEQ_ENBRESEL); ··· 987 996 if ((cmd = tp->current_req) != NULL) { 988 997 set_host_byte(cmd, DID_RESET); 989 998 tp->current_req = NULL; 990 - mesh_completed(ms, cmd); 999 + scsi_done(cmd); 991 1000 } 992 1001 ms->tgts[tgt].sdtr_state = do_sdtr; 993 1002 ms->tgts[tgt].sync_params = ASYNC_PARAMS; ··· 996 1005 while ((cmd = ms->request_q) != NULL) { 997 1006 ms->request_q = (struct scsi_cmnd *) cmd->host_scribble; 998 1007 set_host_byte(cmd, DID_RESET); 999 - mesh_completed(ms, cmd); 1008 + scsi_done(cmd); 1000 1009 } 1001 1010 ms->phase = idle; 1002 1011 ms->msgphase = msg_none; ··· 1621 1630 * Called by midlayer with host locked to queue a new 1622 1631 * request 1623 1632 */ 1624 - static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 1633 + static int mesh_queue_lck(struct scsi_cmnd *cmd) 1625 1634 { 1626 1635 struct mesh_state *ms; 1627 1636 1628 - cmd->scsi_done = done; 1629 1637 cmd->host_scribble = NULL; 1630 1638 1631 1639 ms = (struct mesh_state *) cmd->device->host->hostdata;
+9 -23
drivers/scsi/mpi3mr/mpi3mr_fw.c
··· 3018 3018 static void 3019 3019 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc) 3020 3020 { 3021 - int i = 0, bytes_wrote = 0; 3021 + int i = 0, bytes_written = 0; 3022 3022 char personality[16]; 3023 3023 char protocol[50] = {0}; 3024 3024 char capabilities[100] = {0}; 3025 - bool is_string_nonempty = false; 3026 3025 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 3027 3026 3028 3027 switch (mrioc->facts.personality) { ··· 3045 3046 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) { 3046 3047 if (mrioc->facts.protocol_flags & 3047 3048 mpi3mr_protocols[i].protocol) { 3048 - if (is_string_nonempty && 3049 - (bytes_wrote < sizeof(protocol))) 3050 - bytes_wrote += snprintf(protocol + bytes_wrote, 3051 - (sizeof(protocol) - bytes_wrote), ","); 3052 - 3053 - if (bytes_wrote < sizeof(protocol)) 3054 - bytes_wrote += snprintf(protocol + bytes_wrote, 3055 - (sizeof(protocol) - bytes_wrote), "%s", 3049 + bytes_written += scnprintf(protocol + bytes_written, 3050 + sizeof(protocol) - bytes_written, "%s%s", 3051 + bytes_written ? "," : "", 3056 3052 mpi3mr_protocols[i].name); 3057 - is_string_nonempty = true; 3058 3053 } 3059 3054 } 3060 3055 3061 - bytes_wrote = 0; 3062 - is_string_nonempty = false; 3056 + bytes_written = 0; 3063 3057 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) { 3064 3058 if (mrioc->facts.protocol_flags & 3065 3059 mpi3mr_capabilities[i].capability) { 3066 - if (is_string_nonempty && 3067 - (bytes_wrote < sizeof(capabilities))) 3068 - bytes_wrote += snprintf(capabilities + bytes_wrote, 3069 - (sizeof(capabilities) - bytes_wrote), ","); 3070 - 3071 - if (bytes_wrote < sizeof(capabilities)) 3072 - bytes_wrote += snprintf(capabilities + bytes_wrote, 3073 - (sizeof(capabilities) - bytes_wrote), "%s", 3060 + bytes_written += scnprintf(capabilities + bytes_written, 3061 + sizeof(capabilities) - bytes_written, "%s%s", 3062 + bytes_written ? "," : "", 3074 3063 mpi3mr_capabilities[i].name); 3075 - is_string_nonempty = true; 3076 3064 } 3077 3065 } 3078 3066 3079 3067 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n", 3080 - protocol, capabilities); 3068 + protocol, capabilities); 3081 3069 } 3082 3070 3083 3071 /**
+13 -13
drivers/scsi/mpi3mr/mpi3mr_os.c
··· 409 409 scsi_dma_unmap(scmd); 410 410 scmd->result = DID_RESET << 16; 411 411 scsi_print_command(scmd); 412 - scmd->scsi_done(scmd); 412 + scsi_done(scmd); 413 413 mrioc->flush_io_count++; 414 414 } 415 415 ··· 2312 2312 } 2313 2313 mpi3mr_clear_scmd_priv(mrioc, scmd); 2314 2314 scsi_dma_unmap(scmd); 2315 - scmd->scsi_done(scmd); 2315 + scsi_done(scmd); 2316 2316 out: 2317 2317 if (sense_buf) 2318 2318 mpi3mr_repost_sense_buf(mrioc, ··· 3322 3322 __func__); 3323 3323 scsi_print_command(scmd); 3324 3324 scmd->result = DID_OK << 16; 3325 - scmd->scsi_done(scmd); 3325 + scsi_done(scmd); 3326 3326 return true; 3327 3327 } 3328 3328 ··· 3334 3334 scmd->result = SAM_STAT_CHECK_CONDITION; 3335 3335 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3336 3336 0x1A, 0); 3337 - scmd->scsi_done(scmd); 3337 + scsi_done(scmd); 3338 3338 return true; 3339 3339 } 3340 3340 if (param_len != scsi_bufflen(scmd)) { ··· 3345 3345 scmd->result = SAM_STAT_CHECK_CONDITION; 3346 3346 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3347 3347 0x1A, 0); 3348 - scmd->scsi_done(scmd); 3348 + scsi_done(scmd); 3349 3349 return true; 3350 3350 } 3351 3351 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); ··· 3354 3354 scmd->result = SAM_STAT_CHECK_CONDITION; 3355 3355 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3356 3356 0x55, 0x03); 3357 - scmd->scsi_done(scmd); 3357 + scsi_done(scmd); 3358 3358 return true; 3359 3359 } 3360 3360 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); ··· 3368 3368 scmd->result = SAM_STAT_CHECK_CONDITION; 3369 3369 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3370 3370 0x26, 0); 3371 - scmd->scsi_done(scmd); 3371 + scsi_done(scmd); 3372 3372 kfree(buf); 3373 3373 return true; 3374 3374 } ··· 3438 3438 sdev_priv_data = scmd->device->hostdata; 3439 3439 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 3440 3440 scmd->result = DID_NO_CONNECT << 16; 3441 - scmd->scsi_done(scmd); 3441 + scsi_done(scmd); 3442 3442 goto out; 3443 3443 } 3444 3444 3445 3445 if (mrioc->stop_drv_processing && 3446 3446 !(mpi3mr_allow_scmd_to_fw(scmd))) { 3447 3447 scmd->result = DID_NO_CONNECT << 16; 3448 - scmd->scsi_done(scmd); 3448 + scsi_done(scmd); 3449 3449 goto out; 3450 3450 } 3451 3451 ··· 3459 3459 dev_handle = stgt_priv_data->dev_handle; 3460 3460 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 3461 3461 scmd->result = DID_NO_CONNECT << 16; 3462 - scmd->scsi_done(scmd); 3462 + scsi_done(scmd); 3463 3463 goto out; 3464 3464 } 3465 3465 if (stgt_priv_data->dev_removed) { 3466 3466 scmd->result = DID_NO_CONNECT << 16; 3467 - scmd->scsi_done(scmd); 3467 + scsi_done(scmd); 3468 3468 goto out; 3469 3469 } 3470 3470 3471 3471 if (atomic_read(&stgt_priv_data->block_io)) { 3472 3472 if (mrioc->stop_drv_processing) { 3473 3473 scmd->result = DID_NO_CONNECT << 16; 3474 - scmd->scsi_done(scmd); 3474 + scsi_done(scmd); 3475 3475 goto out; 3476 3476 } 3477 3477 retval = SCSI_MLQUEUE_DEVICE_BUSY; ··· 3486 3486 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 3487 3487 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 3488 3488 scmd->result = DID_ERROR << 16; 3489 - scmd->scsi_done(scmd); 3489 + scsi_done(scmd); 3490 3490 goto out; 3491 3491 } 3492 3492
+2 -2
drivers/scsi/mpt3sas/mpt3sas_base.h
··· 1939 1939 struct SL_WH_MPI_TRIGGERS_T *mpi_tg, bool set); 1940 1940 1941 1941 /* ctl shared API */ 1942 - extern struct device_attribute *mpt3sas_host_attrs[]; 1943 - extern struct device_attribute *mpt3sas_dev_attrs[]; 1942 + extern const struct attribute_group *mpt3sas_host_groups[]; 1943 + extern const struct attribute_group *mpt3sas_dev_groups[]; 1944 1944 void mpt3sas_ctl_init(ushort hbas_to_enumerate); 1945 1945 void mpt3sas_ctl_exit(ushort hbas_to_enumerate); 1946 1946 u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+51 -33
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 3842 3842 } 3843 3843 static DEVICE_ATTR_RW(enable_sdev_max_qd); 3844 3844 3845 - struct device_attribute *mpt3sas_host_attrs[] = { 3846 - &dev_attr_version_fw, 3847 - &dev_attr_version_bios, 3848 - &dev_attr_version_mpi, 3849 - &dev_attr_version_product, 3850 - &dev_attr_version_nvdata_persistent, 3851 - &dev_attr_version_nvdata_default, 3852 - &dev_attr_board_name, 3853 - &dev_attr_board_assembly, 3854 - &dev_attr_board_tracer, 3855 - &dev_attr_io_delay, 3856 - &dev_attr_device_delay, 3857 - &dev_attr_logging_level, 3858 - &dev_attr_fwfault_debug, 3859 - &dev_attr_fw_queue_depth, 3860 - &dev_attr_host_sas_address, 3861 - &dev_attr_ioc_reset_count, 3862 - &dev_attr_host_trace_buffer_size, 3863 - &dev_attr_host_trace_buffer, 3864 - &dev_attr_host_trace_buffer_enable, 3865 - &dev_attr_reply_queue_count, 3866 - &dev_attr_diag_trigger_master, 3867 - &dev_attr_diag_trigger_event, 3868 - &dev_attr_diag_trigger_scsi, 3869 - &dev_attr_diag_trigger_mpi, 3870 - &dev_attr_drv_support_bitmap, 3871 - &dev_attr_BRM_status, 3872 - &dev_attr_enable_sdev_max_qd, 3845 + static struct attribute *mpt3sas_host_attrs[] = { 3846 + &dev_attr_version_fw.attr, 3847 + &dev_attr_version_bios.attr, 3848 + &dev_attr_version_mpi.attr, 3849 + &dev_attr_version_product.attr, 3850 + &dev_attr_version_nvdata_persistent.attr, 3851 + &dev_attr_version_nvdata_default.attr, 3852 + &dev_attr_board_name.attr, 3853 + &dev_attr_board_assembly.attr, 3854 + &dev_attr_board_tracer.attr, 3855 + &dev_attr_io_delay.attr, 3856 + &dev_attr_device_delay.attr, 3857 + &dev_attr_logging_level.attr, 3858 + &dev_attr_fwfault_debug.attr, 3859 + &dev_attr_fw_queue_depth.attr, 3860 + &dev_attr_host_sas_address.attr, 3861 + &dev_attr_ioc_reset_count.attr, 3862 + &dev_attr_host_trace_buffer_size.attr, 3863 + &dev_attr_host_trace_buffer.attr, 3864 + &dev_attr_host_trace_buffer_enable.attr, 3865 + &dev_attr_reply_queue_count.attr, 3866 + &dev_attr_diag_trigger_master.attr, 3867 + &dev_attr_diag_trigger_event.attr, 3868 + &dev_attr_diag_trigger_scsi.attr, 3869 + &dev_attr_diag_trigger_mpi.attr, 3870 + &dev_attr_drv_support_bitmap.attr, 3871 + &dev_attr_BRM_status.attr, 3872 + &dev_attr_enable_sdev_max_qd.attr, 3873 3873 NULL, 3874 + }; 3875 + 3876 + static const struct attribute_group mpt3sas_host_attr_group = { 3877 + .attrs = mpt3sas_host_attrs 3878 + }; 3879 + 3880 + const struct attribute_group *mpt3sas_host_groups[] = { 3881 + &mpt3sas_host_attr_group, 3882 + NULL 3874 3883 }; 3875 3884 3876 3885 /* device attributes */ ··· 3985 3976 } 3986 3977 static DEVICE_ATTR_RW(sas_ncq_prio_enable); 3987 3978 3988 - struct device_attribute *mpt3sas_dev_attrs[] = { 3989 - &dev_attr_sas_address, 3990 - &dev_attr_sas_device_handle, 3991 - &dev_attr_sas_ncq_prio_supported, 3992 - &dev_attr_sas_ncq_prio_enable, 3979 + static struct attribute *mpt3sas_dev_attrs[] = { 3980 + &dev_attr_sas_address.attr, 3981 + &dev_attr_sas_device_handle.attr, 3982 + &dev_attr_sas_ncq_prio_supported.attr, 3983 + &dev_attr_sas_ncq_prio_enable.attr, 3993 3984 NULL, 3985 + }; 3986 + 3987 + static const struct attribute_group mpt3sas_dev_attr_group = { 3988 + .attrs = mpt3sas_dev_attrs 3989 + }; 3990 + 3991 + const struct attribute_group *mpt3sas_dev_groups[] = { 3992 + &mpt3sas_dev_attr_group, 3993 + NULL 3994 3994 }; 3995 3995 3996 3996 /* file operations table for mpt3ctl device */
+13 -13
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 3314 3314 sdev_printk(KERN_INFO, scmd->device, 3315 3315 "device been deleted! scmd(0x%p)\n", scmd); 3316 3316 scmd->result = DID_NO_CONNECT << 16; 3317 - scmd->scsi_done(scmd); 3317 + scsi_done(scmd); 3318 3318 r = SUCCESS; 3319 3319 goto out; 3320 3320 } ··· 3390 3390 sdev_printk(KERN_INFO, scmd->device, 3391 3391 "device been deleted! scmd(0x%p)\n", scmd); 3392 3392 scmd->result = DID_NO_CONNECT << 16; 3393 - scmd->scsi_done(scmd); 3393 + scsi_done(scmd); 3394 3394 r = SUCCESS; 3395 3395 goto out; 3396 3396 } ··· 3470 3470 starget_printk(KERN_INFO, starget, 3471 3471 "target been deleted! scmd(0x%p)\n", scmd); 3472 3472 scmd->result = DID_NO_CONNECT << 16; 3473 - scmd->scsi_done(scmd); 3473 + scsi_done(scmd); 3474 3474 r = SUCCESS; 3475 3475 goto out; 3476 3476 } ··· 5030 5030 scmd->result = DID_NO_CONNECT << 16; 5031 5031 else 5032 5032 scmd->result = DID_RESET << 16; 5033 - scmd->scsi_done(scmd); 5033 + scsi_done(scmd); 5034 5034 } 5035 5035 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); 5036 5036 } ··· 5142 5142 sas_device_priv_data = scmd->device->hostdata; 5143 5143 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 5144 5144 scmd->result = DID_NO_CONNECT << 16; 5145 - scmd->scsi_done(scmd); 5145 + scsi_done(scmd); 5146 5146 return 0; 5147 5147 } 5148 5148 5149 5149 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { 5150 5150 scmd->result = DID_NO_CONNECT << 16; 5151 - scmd->scsi_done(scmd); 5151 + scsi_done(scmd); 5152 5152 return 0; 5153 5153 } 5154 5154 ··· 5158 5158 handle = sas_target_priv_data->handle; 5159 5159 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 5160 5160 scmd->result = DID_NO_CONNECT << 16; 5161 - scmd->scsi_done(scmd); 5161 + scsi_done(scmd); 5162 5162 return 0; 5163 5163 } 5164 5164 ··· 5169 5169 } else if (sas_target_priv_data->deleted) { 5170 5170 /* device has been deleted */ 5171 5171 scmd->result = DID_NO_CONNECT << 16; 5172 - scmd->scsi_done(scmd); 5172 + scsi_done(scmd); 5173 5173 return 0; 5174 5174 } else if (sas_target_priv_data->tm_busy || 5175 5175 sas_device_priv_data->block) { ··· 5912 5912 5913 5913 scsi_dma_unmap(scmd); 5914 5914 mpt3sas_base_free_smid(ioc, smid); 5915 - scmd->scsi_done(scmd); 5915 + scsi_done(scmd); 5916 5916 return 0; 5917 5917 } 5918 5918 ··· 11878 11878 .sg_tablesize = MPT2SAS_SG_DEPTH, 11879 11879 .max_sectors = 32767, 11880 11880 .cmd_per_lun = 7, 11881 - .shost_attrs = mpt3sas_host_attrs, 11882 - .sdev_attrs = mpt3sas_dev_attrs, 11881 + .shost_groups = mpt3sas_host_groups, 11882 + .sdev_groups = mpt3sas_dev_groups, 11883 11883 .track_queue_depth = 1, 11884 11884 .cmd_size = sizeof(struct scsiio_tracker), 11885 11885 }; ··· 11917 11917 .max_sectors = 32767, 11918 11918 .max_segment_size = 0xffffffff, 11919 11919 .cmd_per_lun = 7, 11920 - .shost_attrs = mpt3sas_host_attrs, 11921 - .sdev_attrs = mpt3sas_dev_attrs, 11920 + .shost_groups = mpt3sas_host_groups, 11921 + .sdev_groups = mpt3sas_dev_groups, 11922 11922 .track_queue_depth = 1, 11923 11923 .cmd_size = sizeof(struct scsiio_tracker), 11924 11924 .map_queues = scsih_map_queues,
+7 -5
drivers/scsi/mvsas/mv_init.c
··· 25 25 [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, }, 26 26 }; 27 27 28 - static struct device_attribute *mvst_host_attrs[]; 28 + static const struct attribute_group *mvst_host_groups[]; 29 29 30 30 #define SOC_SAS_NUM 2 31 31 ··· 52 52 #ifdef CONFIG_COMPAT 53 53 .compat_ioctl = sas_ioctl, 54 54 #endif 55 - .shost_attrs = mvst_host_attrs, 55 + .shost_groups = mvst_host_groups, 56 56 .track_queue_depth = 1, 57 57 }; 58 58 ··· 773 773 sas_release_transport(mvs_stt); 774 774 } 775 775 776 - static struct device_attribute *mvst_host_attrs[] = { 777 - &dev_attr_driver_version, 778 - &dev_attr_interrupt_coalescing, 776 + static struct attribute *mvst_host_attrs[] = { 777 + &dev_attr_driver_version.attr, 778 + &dev_attr_interrupt_coalescing.attr, 779 779 NULL, 780 780 }; 781 + 782 + ATTRIBUTE_GROUPS(mvst_host); 781 783 782 784 module_init(mvs_init); 783 785 module_exit(mvs_exit);
+2 -2
drivers/scsi/mvumi.c
··· 1328 1328 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), 1329 1329 scsi_sg_count(scmd), 1330 1330 scmd->sc_data_direction); 1331 - cmd->scmd->scsi_done(scmd); 1331 + scsi_done(scmd); 1332 1332 mvumi_return_cmd(mhba, cmd); 1333 1333 } 1334 1334 ··· 2104 2104 2105 2105 out_return_cmd: 2106 2106 mvumi_return_cmd(mhba, cmd); 2107 - scmd->scsi_done(scmd); 2107 + scsi_done(scmd); 2108 2108 spin_unlock_irqrestore(shost->host_lock, irq_flags); 2109 2109 return 0; 2110 2110 }
+32 -28
drivers/scsi/myrb.c
··· 1282 1282 if (nsge > 1) { 1283 1283 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr); 1284 1284 scmd->result = (DID_ERROR << 16); 1285 - scmd->scsi_done(scmd); 1285 + scsi_done(scmd); 1286 1286 return 0; 1287 1287 } 1288 1288 ··· 1436 1436 dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n", 1437 1437 sdev->id, ldev_info ? ldev_info->state : 0xff); 1438 1438 scmd->result = (DID_BAD_TARGET << 16); 1439 - scmd->scsi_done(scmd); 1439 + scsi_done(scmd); 1440 1440 return 0; 1441 1441 } 1442 1442 switch (scmd->cmnd[0]) { 1443 1443 case TEST_UNIT_READY: 1444 1444 scmd->result = (DID_OK << 16); 1445 - scmd->scsi_done(scmd); 1445 + scsi_done(scmd); 1446 1446 return 0; 1447 1447 case INQUIRY: 1448 1448 if (scmd->cmnd[1] & 1) { ··· 1452 1452 myrb_inquiry(cb, scmd); 1453 1453 scmd->result = (DID_OK << 16); 1454 1454 } 1455 - scmd->scsi_done(scmd); 1455 + scsi_done(scmd); 1456 1456 return 0; 1457 1457 case SYNCHRONIZE_CACHE: 1458 1458 scmd->result = (DID_OK << 16); 1459 - scmd->scsi_done(scmd); 1459 + scsi_done(scmd); 1460 1460 return 0; 1461 1461 case MODE_SENSE: 1462 1462 if ((scmd->cmnd[2] & 0x3F) != 0x3F && ··· 1467 1467 myrb_mode_sense(cb, scmd, ldev_info); 1468 1468 scmd->result = (DID_OK << 16); 1469 1469 } 1470 - scmd->scsi_done(scmd); 1470 + scsi_done(scmd); 1471 1471 return 0; 1472 1472 case READ_CAPACITY: 1473 1473 if ((scmd->cmnd[1] & 1) || 1474 1474 (scmd->cmnd[8] & 1)) { 1475 1475 /* Illegal request, invalid field in CDB */ 1476 1476 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); 1477 - scmd->scsi_done(scmd); 1477 + scsi_done(scmd); 1478 1478 return 0; 1479 1479 } 1480 1480 lba = get_unaligned_be32(&scmd->cmnd[2]); 1481 1481 if (lba) { 1482 1482 /* Illegal request, invalid field in CDB */ 1483 1483 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0); 1484 - scmd->scsi_done(scmd); 1484 + scsi_done(scmd); 1485 1485 return 0; 1486 1486 } 1487 1487 myrb_read_capacity(cb, scmd, ldev_info); 1488 - scmd->scsi_done(scmd); 1488 + scsi_done(scmd); 1489 1489 return 0; 1490 1490 case REQUEST_SENSE: 1491 1491 myrb_request_sense(cb, scmd); ··· 1499 1499 /* Assume good status */ 1500 1500 scmd->result = (DID_OK << 16); 1501 1501 } 1502 - scmd->scsi_done(scmd); 1502 + scsi_done(scmd); 1503 1503 return 0; 1504 1504 case READ_6: 1505 1505 if (ldev_info->state == MYRB_DEVICE_WO) { 1506 1506 /* Data protect, attempt to read invalid data */ 1507 1507 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); 1508 - scmd->scsi_done(scmd); 1508 + scsi_done(scmd); 1509 1509 return 0; 1510 1510 } 1511 1511 fallthrough; ··· 1519 1519 if (ldev_info->state == MYRB_DEVICE_WO) { 1520 1520 /* Data protect, attempt to read invalid data */ 1521 1521 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); 1522 - scmd->scsi_done(scmd); 1522 + scsi_done(scmd); 1523 1523 return 0; 1524 1524 } 1525 1525 fallthrough; ··· 1533 1533 if (ldev_info->state == MYRB_DEVICE_WO) { 1534 1534 /* Data protect, attempt to read invalid data */ 1535 1535 scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06); 1536 - scmd->scsi_done(scmd); 1536 + scsi_done(scmd); 1537 1537 return 0; 1538 1538 } 1539 1539 fallthrough; ··· 1546 1546 default: 1547 1547 /* Illegal request, invalid opcode */ 1548 1548 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0); 1549 - scmd->scsi_done(scmd); 1549 + scsi_done(scmd); 1550 1550 return 0; 1551 1551 } 1552 1552 ··· 1610 1610 1611 1611 if (sdev->channel > myrb_logical_channel(shost)) { 1612 1612 scmd->result = (DID_BAD_TARGET << 16); 1613 - scmd->scsi_done(scmd); 1613 + scsi_done(scmd); 1614 1614 return 0; 1615 1615 } 1616 1616 if (sdev->channel == myrb_logical_channel(shost)) ··· 2182 2182 } 2183 2183 static DEVICE_ATTR_WO(flush_cache); 2184 2184 2185 - static struct device_attribute *myrb_sdev_attrs[] = { 2186 - &dev_attr_rebuild, 2187 - &dev_attr_consistency_check, 2188 - &dev_attr_raid_state, 2189 - &dev_attr_raid_level, 2185 + static struct attribute *myrb_sdev_attrs[] = { 2186 + &dev_attr_rebuild.attr, 2187 + &dev_attr_consistency_check.attr, 2188 + &dev_attr_raid_state.attr, 2189 + &dev_attr_raid_level.attr, 2190 2190 NULL, 2191 2191 }; 2192 2192 2193 - static struct device_attribute *myrb_shost_attrs[] = { 2194 - &dev_attr_ctlr_num, 2195 - &dev_attr_model, 2196 - &dev_attr_firmware, 2197 - &dev_attr_flush_cache, 2193 + ATTRIBUTE_GROUPS(myrb_sdev); 2194 + 2195 + static struct attribute *myrb_shost_attrs[] = { 2196 + &dev_attr_ctlr_num.attr, 2197 + &dev_attr_model.attr, 2198 + &dev_attr_firmware.attr, 2199 + &dev_attr_flush_cache.attr, 2198 2200 NULL, 2199 2201 }; 2202 + 2203 + ATTRIBUTE_GROUPS(myrb_shost); 2200 2204 2201 2205 static struct scsi_host_template myrb_template = { 2202 2206 .module = THIS_MODULE, ··· 2213 2209 .slave_destroy = myrb_slave_destroy, 2214 2210 .bios_param = myrb_biosparam, 2215 2211 .cmd_size = sizeof(struct myrb_cmdblk), 2216 - .shost_attrs = myrb_shost_attrs, 2217 - .sdev_attrs = myrb_sdev_attrs, 2212 + .shost_groups = myrb_shost_groups, 2213 + .sdev_groups = myrb_sdev_groups, 2218 2214 .this_id = -1, 2219 2215 }; 2220 2216 ··· 2365 2361 scmd->result = (DID_ERROR << 16); 2366 2362 break; 2367 2363 } 2368 - scmd->scsi_done(scmd); 2364 + scsi_done(scmd); 2369 2365 } 2370 2366 2371 2367 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+27 -23
drivers/scsi/myrs.c
··· 1286 1286 } 1287 1287 static DEVICE_ATTR_RW(consistency_check); 1288 1288 1289 - static struct device_attribute *myrs_sdev_attrs[] = { 1290 - &dev_attr_consistency_check, 1291 - &dev_attr_rebuild, 1292 - &dev_attr_raid_state, 1293 - &dev_attr_raid_level, 1289 + static struct attribute *myrs_sdev_attrs[] = { 1290 + &dev_attr_consistency_check.attr, 1291 + &dev_attr_rebuild.attr, 1292 + &dev_attr_raid_state.attr, 1293 + &dev_attr_raid_level.attr, 1294 1294 NULL, 1295 1295 }; 1296 + 1297 + ATTRIBUTE_GROUPS(myrs_sdev); 1296 1298 1297 1299 static ssize_t serial_show(struct device *dev, 1298 1300 struct device_attribute *attr, char *buf) ··· 1512 1510 } 1513 1511 static DEVICE_ATTR_RW(disable_enclosure_messages); 1514 1512 1515 - static struct device_attribute *myrs_shost_attrs[] = { 1516 - &dev_attr_serial, 1517 - &dev_attr_ctlr_num, 1518 - &dev_attr_processor, 1519 - &dev_attr_model, 1520 - &dev_attr_ctlr_type, 1521 - &dev_attr_cache_size, 1522 - &dev_attr_firmware, 1523 - &dev_attr_discovery, 1524 - &dev_attr_flush_cache, 1525 - &dev_attr_disable_enclosure_messages, 1513 + static struct attribute *myrs_shost_attrs[] = { 1514 + &dev_attr_serial.attr, 1515 + &dev_attr_ctlr_num.attr, 1516 + &dev_attr_processor.attr, 1517 + &dev_attr_model.attr, 1518 + &dev_attr_ctlr_type.attr, 1519 + &dev_attr_cache_size.attr, 1520 + &dev_attr_firmware.attr, 1521 + &dev_attr_discovery.attr, 1522 + &dev_attr_flush_cache.attr, 1523 + &dev_attr_disable_enclosure_messages.attr, 1526 1524 NULL, 1527 1525 }; 1526 + 1527 + ATTRIBUTE_GROUPS(myrs_shost); 1528 1528 1529 1529 /* 1530 1530 * SCSI midlayer interface ··· 1599 1595 1600 1596 if (!scmd->device->hostdata) { 1601 1597 scmd->result = (DID_NO_CONNECT << 16); 1602 - scmd->scsi_done(scmd); 1598 + scsi_done(scmd); 1603 1599 return 0; 1604 1600 } 1605 1601 1606 1602 switch (scmd->cmnd[0]) { 1607 1603 case REPORT_LUNS: 1608 1604 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0); 1609 - scmd->scsi_done(scmd); 1605 + scsi_done(scmd); 1610 1606 return 0; 1611 1607 case MODE_SENSE: 1612 1608 if (scmd->device->channel >= cs->ctlr_info->physchan_present) { ··· 1620 1616 myrs_mode_sense(cs, scmd, ldev_info); 1621 1617 scmd->result = (DID_OK << 16); 1622 1618 } 1623 - scmd->scsi_done(scmd); 1619 + scsi_done(scmd); 1624 1620 return 0; 1625 1621 } 1626 1622 break; ··· 1760 1756 if (WARN_ON(!hw_sgl)) { 1761 1757 scsi_dma_unmap(scmd); 1762 1758 scmd->result = (DID_ERROR << 16); 1763 - scmd->scsi_done(scmd); 1759 + scsi_done(scmd); 1764 1760 return 0; 1765 1761 } 1766 1762 hw_sgl->sge_addr = (u64)sg_dma_address(sgl); ··· 1927 1923 .slave_configure = myrs_slave_configure, 1928 1924 .slave_destroy = myrs_slave_destroy, 1929 1925 .cmd_size = sizeof(struct myrs_cmdblk), 1930 - .shost_attrs = myrs_shost_attrs, 1931 - .sdev_attrs = myrs_sdev_attrs, 1926 + .shost_groups = myrs_shost_groups, 1927 + .sdev_groups = myrs_sdev_groups, 1932 1928 .this_id = -1, 1933 1929 }; 1934 1930 ··· 2087 2083 scmd->result = (DID_BAD_TARGET << 16); 2088 2084 else 2089 2085 scmd->result = (DID_OK << 16) | status; 2090 - scmd->scsi_done(scmd); 2086 + scsi_done(scmd); 2091 2087 } 2092 2088 2093 2089 static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+9 -7
drivers/scsi/ncr53c8xx.c
··· 4003 4003 while (lcmd) { 4004 4004 cmd = lcmd; 4005 4005 lcmd = (struct scsi_cmnd *) cmd->host_scribble; 4006 - cmd->scsi_done(cmd); 4006 + scsi_done(cmd); 4007 4007 } 4008 4008 } 4009 4009 ··· 7852 7852 return 0; 7853 7853 } 7854 7854 7855 - static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 7855 + static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd) 7856 7856 { 7857 + void (*done)(struct scsi_cmnd *) = scsi_done; 7857 7858 struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; 7858 7859 unsigned long flags; 7859 7860 int sts; ··· 7863 7862 printk("ncr53c8xx_queue_command\n"); 7864 7863 #endif 7865 7864 7866 - cmd->scsi_done = done; 7867 7865 cmd->host_scribble = NULL; 7868 7866 cmd->__data_mapped = 0; 7869 7867 cmd->__data_mapping = 0; ··· 8039 8039 .show = show_ncr53c8xx_revision, 8040 8040 }; 8041 8041 8042 - static struct device_attribute *ncr53c8xx_host_attrs[] = { 8043 - &ncr53c8xx_revision_attr, 8042 + static struct attribute *ncr53c8xx_host_attrs[] = { 8043 + &ncr53c8xx_revision_attr.attr, 8044 8044 NULL 8045 8045 }; 8046 + 8047 + ATTRIBUTE_GROUPS(ncr53c8xx_host); 8046 8048 8047 8049 /*========================================================== 8048 8050 ** ··· 8087 8085 8088 8086 if (!tpnt->name) 8089 8087 tpnt->name = SCSI_NCR_DRIVER_NAME; 8090 - if (!tpnt->shost_attrs) 8091 - tpnt->shost_attrs = ncr53c8xx_host_attrs; 8088 + if (!tpnt->shost_groups) 8089 + tpnt->shost_groups = ncr53c8xx_host_groups; 8092 8090 8093 8091 tpnt->queuecommand = ncr53c8xx_queue_command; 8094 8092 tpnt->slave_configure = ncr53c8xx_slave_configure;
+3 -4
drivers/scsi/nsp32.c
··· 904 904 return TRUE; 905 905 } 906 906 907 - static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, 908 - void (*done)(struct scsi_cmnd *)) 907 + static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt) 909 908 { 909 + void (*done)(struct scsi_cmnd *) = scsi_done; 910 910 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; 911 911 nsp32_target *target; 912 912 nsp32_lunt *cur_lunt; ··· 945 945 946 946 show_command(SCpnt); 947 947 948 - SCpnt->scsi_done = done; 949 948 data->CurrentSC = SCpnt; 950 949 SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION; 951 950 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); ··· 1545 1546 /* 1546 1547 * call scsi_done 1547 1548 */ 1548 - (*SCpnt->scsi_done)(SCpnt); 1549 + scsi_done(SCpnt); 1549 1550 1550 1551 /* 1551 1552 * reset parameters
+2 -5
drivers/scsi/pcmcia/nsp_cs.c
··· 178 178 179 179 data->CurrentSC = NULL; 180 180 181 - SCpnt->scsi_done(SCpnt); 181 + scsi_done(SCpnt); 182 182 } 183 183 184 - static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt, 185 - void (*done)(struct scsi_cmnd *)) 184 + static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt) 186 185 { 187 186 #ifdef NSP_DEBUG 188 187 /*unsigned int host_id = SCpnt->device->host->this_id;*/ ··· 195 196 SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt), 196 197 scsi_bufflen(SCpnt), scsi_sg_count(SCpnt)); 197 198 //nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC); 198 - 199 - SCpnt->scsi_done = done; 200 199 201 200 if (data->CurrentSC != NULL) { 202 201 nsp_msg(KERN_DEBUG, "CurrentSC!=NULL this can't be happen");
+7 -7
drivers/scsi/pcmcia/sym53c500_cs.c
··· 492 492 493 493 idle_out: 494 494 curSC->SCp.phase = idle; 495 - curSC->scsi_done(curSC); 495 + scsi_done(curSC); 496 496 goto out; 497 497 } 498 498 ··· 537 537 return (info_msg); 538 538 } 539 539 540 - static int 541 - SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) 540 + static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt) 542 541 { 543 542 int i; 544 543 int port_base = SCpnt->device->host->io_port; ··· 555 556 VDEB(printk("\n")); 556 557 557 558 data->current_SC = SCpnt; 558 - data->current_SC->scsi_done = done; 559 559 data->current_SC->SCp.phase = command_ph; 560 560 data->current_SC->SCp.Status = 0; 561 561 data->current_SC->SCp.Message = 0; ··· 650 652 .store = SYM53C500_store_pio, 651 653 }; 652 654 653 - static struct device_attribute *SYM53C500_shost_attrs[] = { 654 - &SYM53C500_pio_attr, 655 + static struct attribute *SYM53C500_shost_attrs[] = { 656 + &SYM53C500_pio_attr.attr, 655 657 NULL, 656 658 }; 659 + 660 + ATTRIBUTE_GROUPS(SYM53C500_shost); 657 661 658 662 /* 659 663 * scsi_host_template initializer ··· 671 671 .can_queue = 1, 672 672 .this_id = 7, 673 673 .sg_tablesize = 32, 674 - .shost_attrs = SYM53C500_shost_attrs 674 + .shost_groups = SYM53C500_shost_groups 675 675 }; 676 676 677 677 static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
+40 -30
drivers/scsi/pm8001/pm8001_ctl.c
··· 409 409 char *str = buf; 410 410 int start = 0; 411 411 u32 ib_offset = pm8001_ha->ib_offset; 412 + u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128; 412 413 #define IB_MEMMAP(c) \ 413 414 (*(u32 *)((u8 *)pm8001_ha-> \ 414 415 memoryMap.region[ib_offset].virt_ptr + \ ··· 420 419 start = start + 4; 421 420 } 422 421 pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; 423 - if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) 422 + if (((pm8001_ha->evtlog_ib_offset) % queue_size) == 0) 424 423 pm8001_ha->evtlog_ib_offset = 0; 425 424 426 425 return str - buf; ··· 446 445 char *str = buf; 447 446 int start = 0; 448 447 u32 ob_offset = pm8001_ha->ob_offset; 448 + u32 queue_size = pm8001_ha->max_q_num * PM8001_MPI_QUEUE * 128; 449 449 #define OB_MEMMAP(c) \ 450 450 (*(u32 *)((u8 *)pm8001_ha-> \ 451 451 memoryMap.region[ob_offset].virt_ptr + \ ··· 457 455 start = start + 4; 458 456 } 459 457 pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; 460 - if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) 458 + if (((pm8001_ha->evtlog_ob_offset) % queue_size) == 0) 461 459 pm8001_ha->evtlog_ob_offset = 0; 462 460 463 461 return str - buf; ··· 1002 1000 } 1003 1001 static DEVICE_ATTR_RO(ctl_iop1_count); 1004 1002 1005 - struct device_attribute *pm8001_host_attrs[] = { 1006 - &dev_attr_interface_rev, 1007 - &dev_attr_controller_fatal_error, 1008 - &dev_attr_fw_version, 1009 - &dev_attr_update_fw, 1010 - &dev_attr_aap_log, 1011 - &dev_attr_iop_log, 1012 - &dev_attr_fatal_log, 1013 - &dev_attr_non_fatal_log, 1014 - &dev_attr_non_fatal_count, 1015 - &dev_attr_gsm_log, 1016 - &dev_attr_max_out_io, 1017 - &dev_attr_max_devices, 1018 - &dev_attr_max_sg_list, 1019 - &dev_attr_sas_spec_support, 1020 - &dev_attr_logging_level, 1021 - &dev_attr_event_log_size, 1022 - &dev_attr_host_sas_address, 1023 - &dev_attr_bios_version, 1024 - &dev_attr_ib_log, 1025 - &dev_attr_ob_log, 1026 - &dev_attr_ila_version, 1027 - &dev_attr_inc_fw_ver, 1028 - &dev_attr_ctl_mpi_state, 1029 - &dev_attr_ctl_hmi_error, 1030 - &dev_attr_ctl_raae_count, 1031 - &dev_attr_ctl_iop0_count, 1032 - &dev_attr_ctl_iop1_count, 1003 + static struct attribute *pm8001_host_attrs[] = { 1004 + &dev_attr_interface_rev.attr, 1005 + &dev_attr_controller_fatal_error.attr, 1006 + &dev_attr_fw_version.attr, 1007 + &dev_attr_update_fw.attr, 1008 + &dev_attr_aap_log.attr, 1009 + &dev_attr_iop_log.attr, 1010 + &dev_attr_fatal_log.attr, 1011 + &dev_attr_non_fatal_log.attr, 1012 + &dev_attr_non_fatal_count.attr, 1013 + &dev_attr_gsm_log.attr, 1014 + &dev_attr_max_out_io.attr, 1015 + &dev_attr_max_devices.attr, 1016 + &dev_attr_max_sg_list.attr, 1017 + &dev_attr_sas_spec_support.attr, 1018 + &dev_attr_logging_level.attr, 1019 + &dev_attr_event_log_size.attr, 1020 + &dev_attr_host_sas_address.attr, 1021 + &dev_attr_bios_version.attr, 1022 + &dev_attr_ib_log.attr, 1023 + &dev_attr_ob_log.attr, 1024 + &dev_attr_ila_version.attr, 1025 + &dev_attr_inc_fw_ver.attr, 1026 + &dev_attr_ctl_mpi_state.attr, 1027 + &dev_attr_ctl_hmi_error.attr, 1028 + &dev_attr_ctl_raae_count.attr, 1029 + &dev_attr_ctl_iop0_count.attr, 1030 + &dev_attr_ctl_iop1_count.attr, 1033 1031 NULL, 1034 1032 }; 1035 1033 1034 + static const struct attribute_group pm8001_host_attr_group = { 1035 + .attrs = pm8001_host_attrs 1036 + }; 1037 + 1038 + const struct attribute_group *pm8001_host_groups[] = { 1039 + &pm8001_host_attr_group, 1040 + NULL 1041 + };
+8 -4
drivers/scsi/pm8001/pm8001_hwi.c
··· 3169 3169 * fw_control_context->usrAddr 3170 3170 */ 3171 3171 complete(pm8001_ha->nvmd_completion); 3172 - pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n"); 3172 + pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n"); 3173 3173 ccb->task = NULL; 3174 3174 ccb->ccb_tag = 0xFFFFFFFF; 3175 3175 pm8001_tag_free(pm8001_ha, tag); ··· 3358 3358 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3359 3359 unsigned long flags; 3360 3360 u8 deviceType = pPayload->sas_identify.dev_type; 3361 + phy->port = port; 3362 + port->port_id = port_id; 3361 3363 port->port_state = portstate; 3362 3364 phy->phy_state = PHY_STATE_LINK_UP_SPC; 3363 3365 pm8001_dbg(pm8001_ha, MSG, ··· 3436 3434 unsigned long flags; 3437 3435 pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n", 3438 3436 port_id, phy_id); 3437 + phy->port = port; 3438 + port->port_id = port_id; 3439 3439 port->port_state = portstate; 3440 3440 phy->phy_state = PHY_STATE_LINK_UP_SPC; 3441 3441 port->port_attached = 1; ··· 4464 4460 u16 ITNT = 2000; 4465 4461 struct domain_device *dev = pm8001_dev->sas_device; 4466 4462 struct domain_device *parent_dev = dev->parent; 4463 + struct pm8001_port *port = dev->port->lldd_port; 4467 4464 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4468 4465 4469 4466 memset(&payload, 0, sizeof(payload)); ··· 4481 4476 if (pm8001_dev->dev_type == SAS_SATA_DEV) 4482 4477 stp_sspsmp_sata = 0x00; /* stp*/ 4483 4478 else if (pm8001_dev->dev_type == SAS_END_DEVICE || 4484 - pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || 4485 - pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) 4479 + dev_is_expander(pm8001_dev->dev_type)) 4486 4480 stp_sspsmp_sata = 0x01; /*ssp or smp*/ 4487 4481 } 4488 4482 if (parent_dev && dev_is_expander(parent_dev->dev_type)) ··· 4492 4488 linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? 4493 4489 pm8001_dev->sas_device->linkrate : dev->port->linkrate; 4494 4490 payload.phyid_portid = 4495 - cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) | 4491 + cpu_to_le32(((port->port_id) & 0x0F) | 4496 4492 ((phy_id & 0x0F) << 4)); 4497 4493 payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) | 4498 4494 ((linkrate & 0x0F) * 0x1000000) |
+13 -1
drivers/scsi/pm8001/pm8001_init.c
··· 107 107 #ifdef CONFIG_COMPAT 108 108 .compat_ioctl = sas_ioctl, 109 109 #endif 110 - .shost_attrs = pm8001_host_attrs, 110 + .shost_groups = pm8001_host_groups, 111 111 .track_queue_depth = 1, 112 112 }; 113 113 ··· 128 128 .lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset, 129 129 .lldd_lu_reset = pm8001_lu_reset, 130 130 .lldd_query_task = pm8001_query_task, 131 + .lldd_port_formed = pm8001_port_formed, 131 132 }; 132 133 133 134 /** ··· 1199 1198 goto err_out; 1200 1199 1201 1200 /* Memory region for ccb_info*/ 1201 + pm8001_ha->ccb_count = ccb_count; 1202 1202 pm8001_ha->ccb_info = 1203 1203 kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL); 1204 1204 if (!pm8001_ha->ccb_info) { ··· 1261 1259 tasklet_kill(&pm8001_ha->tasklet[j]); 1262 1260 #endif 1263 1261 scsi_host_put(pm8001_ha->shost); 1262 + 1263 + for (i = 0; i < pm8001_ha->ccb_count; i++) { 1264 + dma_free_coherent(&pm8001_ha->pdev->dev, 1265 + sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG, 1266 + pm8001_ha->ccb_info[i].buf_prd, 1267 + pm8001_ha->ccb_info[i].ccb_dma_handle); 1268 + } 1269 + kfree(pm8001_ha->ccb_info); 1270 + kfree(pm8001_ha->devices); 1271 + 1264 1272 pm8001_free(pm8001_ha); 1265 1273 kfree(sha->sas_phy); 1266 1274 kfree(sha->sas_port);
+15
drivers/scsi/pm8001/pm8001_sas.c
··· 1355 1355 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1356 1356 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1357 1357 } 1358 + 1359 + void pm8001_port_formed(struct asd_sas_phy *sas_phy) 1360 + { 1361 + struct sas_ha_struct *sas_ha = sas_phy->ha; 1362 + struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha; 1363 + struct pm8001_phy *phy = sas_phy->lldd_phy; 1364 + struct asd_sas_port *sas_port = sas_phy->port; 1365 + struct pm8001_port *port = phy->port; 1366 + 1367 + if (!sas_port) { 1368 + pm8001_dbg(pm8001_ha, FAIL, "Received null port\n"); 1369 + return; 1370 + } 1371 + sas_port->lldd_port = port; 1372 + }
+5 -3
drivers/scsi/pm8001/pm8001_sas.h
··· 230 230 u8 port_attached; 231 231 u16 wide_port_phymap; 232 232 u8 port_state; 233 + u8 port_id; 233 234 struct list_head list; 234 235 }; 235 236 ··· 458 457 __le32 producer_index; 459 458 u32 consumer_idx; 460 459 spinlock_t oq_lock; 460 + unsigned long lock_flags; 461 461 }; 462 462 struct pm8001_hba_memspace { 463 463 void __iomem *memvirtaddr; ··· 518 516 u32 iomb_size; /* SPC and SPCV IOMB size */ 519 517 struct pm8001_device *devices; 520 518 struct pm8001_ccb_info *ccb_info; 519 + u32 ccb_count; 521 520 #ifdef PM8001_USE_MSIX 522 521 int number_of_intr;/*will be used in remove()*/ 523 522 char intr_drvname[PM8001_MAX_MSIX_VEC] ··· 654 651 int pm8001_I_T_nexus_reset(struct domain_device *dev); 655 652 int pm8001_I_T_nexus_event_handler(struct domain_device *dev); 656 653 int pm8001_query_task(struct sas_task *task); 654 + void pm8001_port_formed(struct asd_sas_phy *sas_phy); 657 655 void pm8001_open_reject_retry( 658 656 struct pm8001_hba_info *pm8001_ha, 659 657 struct sas_task *task_to_close, ··· 733 729 int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha); 734 730 void pm8001_free_dev(struct pm8001_device *pm8001_dev); 735 731 /* ctl shared API */ 736 - extern struct device_attribute *pm8001_host_attrs[]; 732 + extern const struct attribute_group *pm8001_host_groups[]; 737 733 738 734 static inline void 739 735 pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha, ··· 742 738 { 743 739 pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx); 744 740 smp_mb(); /*in order to force CPU ordering*/ 745 - spin_unlock(&pm8001_ha->lock); 746 741 task->task_done(task); 747 - spin_lock(&pm8001_ha->lock); 748 742 } 749 743 750 744 #endif
+51 -12
drivers/scsi/pm8001/pm80xx_hwi.c
··· 2379 2379 2380 2380 /*See the comments for mpi_ssp_completion */ 2381 2381 static void 2382 - mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) 2382 + mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, 2383 + struct outbound_queue_table *circularQ, void *piomb) 2383 2384 { 2384 2385 struct sas_task *t; 2385 2386 struct pm8001_ccb_info *ccb; ··· 2617 2616 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2618 2617 ts->resp = SAS_TASK_UNDELIVERED; 2619 2618 ts->stat = SAS_QUEUE_FULL; 2619 + spin_unlock_irqrestore(&circularQ->oq_lock, 2620 + circularQ->lock_flags); 2620 2621 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2622 + spin_lock_irqsave(&circularQ->oq_lock, 2623 + circularQ->lock_flags); 2621 2624 return; 2622 2625 } 2623 2626 break; ··· 2637 2632 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2638 2633 ts->resp = SAS_TASK_UNDELIVERED; 2639 2634 ts->stat = SAS_QUEUE_FULL; 2635 + spin_unlock_irqrestore(&circularQ->oq_lock, 2636 + circularQ->lock_flags); 2640 2637 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2638 + spin_lock_irqsave(&circularQ->oq_lock, 2639 + circularQ->lock_flags); 2641 2640 return; 2642 2641 } 2643 2642 break; ··· 2665 2656 IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); 2666 2657 ts->resp = SAS_TASK_UNDELIVERED; 2667 2658 ts->stat = SAS_QUEUE_FULL; 2659 + spin_unlock_irqrestore(&circularQ->oq_lock, 2660 + circularQ->lock_flags); 2668 2661 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2662 + spin_lock_irqsave(&circularQ->oq_lock, 2663 + circularQ->lock_flags); 2669 2664 return; 2670 2665 } 2671 2666 break; ··· 2740 2727 IO_DS_NON_OPERATIONAL); 2741 2728 ts->resp = SAS_TASK_UNDELIVERED; 2742 2729 ts->stat = SAS_QUEUE_FULL; 2730 + spin_unlock_irqrestore(&circularQ->oq_lock, 2731 + circularQ->lock_flags); 2743 2732 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2733 + spin_lock_irqsave(&circularQ->oq_lock, 2734 + circularQ->lock_flags); 2744 2735 return; 2745 2736 } 2746 2737 break; ··· 2764 2747 IO_DS_IN_ERROR); 2765 2748 ts->resp = SAS_TASK_UNDELIVERED; 2766 2749 ts->stat = SAS_QUEUE_FULL; 2750 + spin_unlock_irqrestore(&circularQ->oq_lock, 2751 + circularQ->lock_flags); 2767 2752 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2753 + spin_lock_irqsave(&circularQ->oq_lock, 2754 + circularQ->lock_flags); 2768 2755 return; 2769 2756 } 2770 2757 break; ··· 2806 2785 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2807 2786 } else { 2808 2787 spin_unlock_irqrestore(&t->task_state_lock, flags); 2788 + spin_unlock_irqrestore(&circularQ->oq_lock, 2789 + circularQ->lock_flags); 2809 2790 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2791 + spin_lock_irqsave(&circularQ->oq_lock, 2792 + circularQ->lock_flags); 2810 2793 } 2811 2794 } 2812 2795 2813 2796 /*See the comments for mpi_ssp_completion */ 2814 - static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 2797 + static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, 2798 + struct outbound_queue_table *circularQ, void *piomb) 2815 2799 { 2816 2800 struct sas_task *t; 2817 2801 struct task_status_struct *ts; ··· 2916 2890 IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); 2917 2891 ts->resp = SAS_TASK_COMPLETE; 2918 2892 ts->stat = SAS_QUEUE_FULL; 2893 + spin_unlock_irqrestore(&circularQ->oq_lock, 2894 + circularQ->lock_flags); 2919 2895 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 2896 + spin_lock_irqsave(&circularQ->oq_lock, 2897 + circularQ->lock_flags); 2920 2898 return; 2921 2899 } 2922 2900 break; ··· 3032 3002 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 3033 3003 } else { 3034 3004 spin_unlock_irqrestore(&t->task_state_lock, flags); 3005 + spin_unlock_irqrestore(&circularQ->oq_lock, 3006 + circularQ->lock_flags); 3035 3007 pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); 3008 + spin_lock_irqsave(&circularQ->oq_lock, 3009 + circularQ->lock_flags); 3036 3010 } 3037 3011 } 3038 3012 ··· 3333 3299 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3334 3300 unsigned long flags; 3335 3301 u8 deviceType = pPayload->sas_identify.dev_type; 3302 + phy->port = port; 3303 + port->port_id = port_id; 3336 3304 port->port_state = portstate; 3337 3305 port->wide_port_phymap |= (1U << phy_id); 3338 3306 phy->phy_state = PHY_STATE_LINK_UP_SPCV; ··· 3416 3380 "port id %d, phy id %d link_rate %d portstate 0x%x\n", 3417 3381 port_id, phy_id, link_rate, portstate); 3418 3382 3383 + phy->port = port; 3384 + port->port_id = port_id; 3419 3385 port->port_state = portstate; 3420 3386 phy->phy_state = PHY_STATE_LINK_UP_SPCV; 3421 3387 port->port_attached = 1; ··· 3940 3902 * @pm8001_ha: our hba card information 3941 3903 * @piomb: IO message buffer 3942 3904 */ 3943 - static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) 3905 + static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, 3906 + struct outbound_queue_table *circularQ, void *piomb) 3944 3907 { 3945 3908 __le32 pHeader = *(__le32 *)piomb; 3946 3909 u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); ··· 3983 3944 break; 3984 3945 case OPC_OUB_SATA_COMP: 3985 3946 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_COMP\n"); 3986 - mpi_sata_completion(pm8001_ha, piomb); 3947 + mpi_sata_completion(pm8001_ha, circularQ, piomb); 3987 3948 break; 3988 3949 case OPC_OUB_SATA_EVENT: 3989 3950 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SATA_EVENT\n"); 3990 - mpi_sata_event(pm8001_ha, piomb); 3951 + mpi_sata_event(pm8001_ha, circularQ, piomb); 3991 3952 break; 3992 3953 case OPC_OUB_SSP_EVENT: 3993 3954 pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SSP_EVENT\n"); ··· 4156 4117 void *pMsg1 = NULL; 4157 4118 u8 bc; 4158 4119 u32 ret = MPI_IO_STATUS_FAIL; 4159 - unsigned long flags; 4160 4120 u32 regval; 4161 4121 4162 4122 if (vec == (pm8001_ha->max_q_num - 1)) { ··· 4172 4134 } 4173 4135 } 4174 4136 circularQ = &pm8001_ha->outbnd_q_tbl[vec]; 4175 - spin_lock_irqsave(&circularQ->oq_lock, flags); 4137 + spin_lock_irqsave(&circularQ->oq_lock, circularQ->lock_flags); 4176 4138 do { 4177 4139 /* spurious interrupt during setup if kexec-ing and 4178 4140 * driver doing a doorbell access w/ the pre-kexec oq ··· 4183 4145 ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); 4184 4146 if (MPI_IO_STATUS_SUCCESS == ret) { 4185 4147 /* process the outbound message */ 4186 - process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); 4148 + process_one_iomb(pm8001_ha, circularQ, 4149 + (void *)(pMsg1 - 4)); 4187 4150 /* free the message from the outbound circular buffer */ 4188 4151 pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, 4189 4152 circularQ, bc); ··· 4199 4160 break; 4200 4161 } 4201 4162 } while (1); 4202 - spin_unlock_irqrestore(&circularQ->oq_lock, flags); 4163 + spin_unlock_irqrestore(&circularQ->oq_lock, circularQ->lock_flags); 4203 4164 return ret; 4204 4165 } 4205 4166 ··· 4847 4808 u16 ITNT = 2000; 4848 4809 struct domain_device *dev = pm8001_dev->sas_device; 4849 4810 struct domain_device *parent_dev = dev->parent; 4811 + struct pm8001_port *port = dev->port->lldd_port; 4850 4812 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4851 4813 4852 4814 memset(&payload, 0, sizeof(payload)); ··· 4865 4825 if (pm8001_dev->dev_type == SAS_SATA_DEV) 4866 4826 stp_sspsmp_sata = 0x00; /* stp*/ 4867 4827 else if (pm8001_dev->dev_type == SAS_END_DEVICE || 4868 - pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || 4869 - pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) 4828 + dev_is_expander(pm8001_dev->dev_type)) 4870 4829 stp_sspsmp_sata = 0x01; /*ssp or smp*/ 4871 4830 } 4872 4831 if (parent_dev && dev_is_expander(parent_dev->dev_type)) ··· 4879 4840 pm8001_dev->sas_device->linkrate : dev->port->linkrate; 4880 4841 4881 4842 payload.phyid_portid = 4882 - cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) | 4843 + cpu_to_le32(((port->port_id) & 0xFF) | 4883 4844 ((phy_id & 0xFF) << 8)); 4884 4845 4885 4846 payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
+12 -15
drivers/scsi/pmcraid.c
··· 837 837 838 838 scsi_dma_unmap(scsi_cmd); 839 839 pmcraid_return_cmd(cmd); 840 - scsi_cmd->scsi_done(scsi_cmd); 840 + scsi_done(scsi_cmd); 841 841 } 842 842 843 843 /** ··· 2017 2017 le32_to_cpu(resp) >> 2, 2018 2018 cmd->ioa_cb->ioarcb.cdb[0], 2019 2019 scsi_cmd->result); 2020 - scsi_cmd->scsi_done(scsi_cmd); 2020 + scsi_done(scsi_cmd); 2021 2021 } else if (cmd->cmd_done == pmcraid_internal_done || 2022 2022 cmd->cmd_done == pmcraid_erp_done) { 2023 2023 cmd->cmd_done(cmd); ··· 2814 2814 2815 2815 if (rc == 0) { 2816 2816 scsi_dma_unmap(scsi_cmd); 2817 - scsi_cmd->scsi_done(scsi_cmd); 2817 + scsi_done(scsi_cmd); 2818 2818 } 2819 2819 2820 2820 return rc; ··· 3313 3313 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 3314 3314 * SCSI_MLQUEUE_HOST_BUSY if host is busy 3315 3315 */ 3316 - static int pmcraid_queuecommand_lck( 3317 - struct scsi_cmnd *scsi_cmd, 3318 - void (*done) (struct scsi_cmnd *) 3319 - ) 3316 + static int pmcraid_queuecommand_lck(struct scsi_cmnd *scsi_cmd) 3320 3317 { 3321 3318 struct pmcraid_instance *pinstance; 3322 3319 struct pmcraid_resource_entry *res; ··· 3325 3328 pinstance = 3326 3329 (struct pmcraid_instance *)scsi_cmd->device->host->hostdata; 3327 3330 fw_version = be16_to_cpu(pinstance->inq_data->fw_version); 3328 - scsi_cmd->scsi_done = done; 3329 3331 res = scsi_cmd->device->hostdata; 3330 3332 scsi_cmd->result = (DID_OK << 16); 3331 3333 ··· 3334 3338 if (pinstance->ioa_state == IOA_STATE_DEAD) { 3335 3339 pmcraid_info("IOA is dead, but queuecommand is scheduled\n"); 3336 3340 scsi_cmd->result = (DID_NO_CONNECT << 16); 3337 - scsi_cmd->scsi_done(scsi_cmd); 3341 + scsi_done(scsi_cmd); 3338 3342 return 0; 3339 3343 } 3340 3344 ··· 3347 3351 */ 3348 3352 if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) { 3349 3353 pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n"); 3350 - scsi_cmd->scsi_done(scsi_cmd); 3354 + scsi_done(scsi_cmd); 3351 3355 return 0; 3352 3356 } 3353 3357 ··· 4093 4097 .show = pmcraid_show_adapter_id, 4094 4098 }; 4095 4099 4096 - static struct device_attribute *pmcraid_host_attrs[] = { 4097 - &pmcraid_log_level_attr, 4098 - &pmcraid_driver_version_attr, 4099 - &pmcraid_adapter_id_attr, 4100 + static struct attribute *pmcraid_host_attrs[] = { 4101 + &pmcraid_log_level_attr.attr, 4102 + &pmcraid_driver_version_attr.attr, 4103 + &pmcraid_adapter_id_attr.attr, 4100 4104 NULL, 4101 4105 }; 4102 4106 4107 + ATTRIBUTE_GROUPS(pmcraid_host); 4103 4108 4104 4109 /* host template structure for pmcraid driver */ 4105 4110 static struct scsi_host_template pmcraid_host_template = { ··· 4123 4126 .max_sectors = PMCRAID_IOA_MAX_SECTORS, 4124 4127 .no_write_same = 1, 4125 4128 .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, 4126 - .shost_attrs = pmcraid_host_attrs, 4129 + .shost_groups = pmcraid_host_groups, 4127 4130 .proc_name = PMCRAID_DRIVER_NAME, 4128 4131 }; 4129 4132
+2 -4
drivers/scsi/ppa.c
··· 665 665 666 666 dev->cur_cmd = NULL; 667 667 668 - cmd->scsi_done(cmd); 668 + scsi_done(cmd); 669 669 } 670 670 671 671 static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) ··· 786 786 return 0; 787 787 } 788 788 789 - static int ppa_queuecommand_lck(struct scsi_cmnd *cmd, 790 - void (*done) (struct scsi_cmnd *)) 789 + static int ppa_queuecommand_lck(struct scsi_cmnd *cmd) 791 790 { 792 791 ppa_struct *dev = ppa_dev(cmd->device->host); 793 792 ··· 797 798 dev->failed = 0; 798 799 dev->jstart = jiffies; 799 800 dev->cur_cmd = cmd; 800 - cmd->scsi_done = done; 801 801 cmd->result = DID_ERROR << 16; /* default return code */ 802 802 cmd->SCp.phase = 0; /* bus free */ 803 803
+3 -5
drivers/scsi/ps3rom.c
··· 200 200 return 0; 201 201 } 202 202 203 - static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd, 204 - void (*done)(struct scsi_cmnd *)) 203 + static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd) 205 204 { 206 205 struct ps3rom_private *priv = shost_priv(cmd->device->host); 207 206 struct ps3_storage_device *dev = priv->dev; ··· 208 209 int res; 209 210 210 211 priv->curr_cmd = cmd; 211 - cmd->scsi_done = done; 212 212 213 213 opcode = cmd->cmnd[0]; 214 214 /* ··· 235 237 scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0, 0); 236 238 cmd->result = res; 237 239 priv->curr_cmd = NULL; 238 - cmd->scsi_done(cmd); 240 + scsi_done(cmd); 239 241 } 240 242 241 243 return 0; ··· 319 321 320 322 done: 321 323 priv->curr_cmd = NULL; 322 - cmd->scsi_done(cmd); 324 + scsi_done(cmd); 323 325 return IRQ_HANDLED; 324 326 } 325 327
+1 -1
drivers/scsi/qedf/qedf.h
··· 498 498 extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, 499 499 u8 cmd_type); 500 500 501 - extern struct device_attribute *qedf_host_attrs[]; 501 + extern const struct attribute_group *qedf_host_groups[]; 502 502 extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 503 503 unsigned int timer_msec); 504 504 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
+12 -3
drivers/scsi/qedf/qedf_attr.c
··· 60 60 static DEVICE_ATTR_RO(fcoe_mac); 61 61 static DEVICE_ATTR_RO(fka_period); 62 62 63 - struct device_attribute *qedf_host_attrs[] = { 64 - &dev_attr_fcoe_mac, 65 - &dev_attr_fka_period, 63 + static struct attribute *qedf_host_attrs[] = { 64 + &dev_attr_fcoe_mac.attr, 65 + &dev_attr_fka_period.attr, 66 66 NULL, 67 + }; 68 + 69 + static const struct attribute_group qedf_host_attr_group = { 70 + .attrs = qedf_host_attrs 71 + }; 72 + 73 + const struct attribute_group *qedf_host_groups[] = { 74 + &qedf_host_attr_group, 75 + NULL 67 76 }; 68 77 69 78 extern const struct qed_fcoe_ops *qed_ops;
+6 -13
drivers/scsi/qedf/qedf_io.c
··· 947 947 "Number of SG elements %d exceeds what hardware limitation of %d.\n", 948 948 num_sgs, QEDF_MAX_BDS_PER_CMD); 949 949 sc_cmd->result = DID_ERROR; 950 - sc_cmd->scsi_done(sc_cmd); 950 + scsi_done(sc_cmd); 951 951 return 0; 952 952 } 953 953 ··· 957 957 "Returning DNC as unloading or stop io, flags 0x%lx.\n", 958 958 qedf->flags); 959 959 sc_cmd->result = DID_NO_CONNECT << 16; 960 - sc_cmd->scsi_done(sc_cmd); 960 + scsi_done(sc_cmd); 961 961 return 0; 962 962 } 963 963 ··· 966 966 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n", 967 967 sc_cmd); 968 968 sc_cmd->result = DID_NO_CONNECT << 16; 969 - sc_cmd->scsi_done(sc_cmd); 969 + scsi_done(sc_cmd); 970 970 return 0; 971 971 } 972 972 ··· 976 976 "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n", 977 977 rval, rport->port_id); 978 978 sc_cmd->result = rval; 979 - sc_cmd->scsi_done(sc_cmd); 979 + scsi_done(sc_cmd); 980 980 return 0; 981 981 } 982 982 ··· 1313 1313 1314 1314 io_req->sc_cmd = NULL; 1315 1315 sc_cmd->SCp.ptr = NULL; 1316 - sc_cmd->scsi_done(sc_cmd); 1316 + scsi_done(sc_cmd); 1317 1317 kref_put(&io_req->refcount, qedf_release_cmd); 1318 1318 } 1319 1319 ··· 1386 1386 goto bad_scsi_ptr; 1387 1387 } 1388 1388 1389 - if (!sc_cmd->scsi_done) { 1390 - QEDF_ERR(&qedf->dbg_ctx, 1391 - "sc_cmd->scsi_done for sc_cmd %p is NULL.\n", 1392 - sc_cmd); 1393 - goto bad_scsi_ptr; 1394 - } 1395 - 1396 1389 qedf_unmap_sg_list(qedf, io_req); 1397 1390 1398 1391 sc_cmd->result = result << 16; ··· 1410 1417 1411 1418 io_req->sc_cmd = NULL; 1412 1419 sc_cmd->SCp.ptr = NULL; 1413 - sc_cmd->scsi_done(sc_cmd); 1420 + scsi_done(sc_cmd); 1414 1421 kref_put(&io_req->refcount, qedf_release_cmd); 1415 1422 return; 1416 1423
+1 -1
drivers/scsi/qedf/qedf_main.c
··· 986 986 .cmd_per_lun = 32, 987 987 .max_sectors = 0xffff, 988 988 .queuecommand = qedf_queuecommand, 989 - .shost_attrs = qedf_host_attrs, 989 + .shost_groups = qedf_host_groups, 990 990 .eh_abort_handler = qedf_eh_abort, 991 991 .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ 992 992 .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
+1 -1
drivers/scsi/qedi/qedi_gbl.h
··· 22 22 extern const struct qed_iscsi_ops *qedi_ops; 23 23 extern const struct qedi_debugfs_ops qedi_debugfs_ops[]; 24 24 extern const struct file_operations qedi_dbg_fops[]; 25 - extern struct device_attribute *qedi_shost_attrs[]; 25 + extern const struct attribute_group *qedi_shost_groups[]; 26 26 27 27 int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep); 28 28 void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+1 -1
drivers/scsi/qedi/qedi_iscsi.c
··· 58 58 .max_sectors = 0xffff, 59 59 .dma_boundary = QEDI_HW_DMA_BOUNDARY, 60 60 .cmd_per_lun = 128, 61 - .shost_attrs = qedi_shost_attrs, 61 + .shost_groups = qedi_shost_groups, 62 62 }; 63 63 64 64 static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
+12 -3
drivers/scsi/qedi/qedi_sysfs.c
··· 42 42 static DEVICE_ATTR_RO(port_state); 43 43 static DEVICE_ATTR_RO(speed); 44 44 45 - struct device_attribute *qedi_shost_attrs[] = { 46 - &dev_attr_port_state, 47 - &dev_attr_speed, 45 + static struct attribute *qedi_shost_attrs[] = { 46 + &dev_attr_port_state.attr, 47 + &dev_attr_speed.attr, 48 + NULL 49 + }; 50 + 51 + static const struct attribute_group qedi_shost_attr_group = { 52 + .attrs = qedi_shost_attrs 53 + }; 54 + 55 + const struct attribute_group *qedi_shost_groups[] = { 56 + &qedi_shost_attr_group, 48 57 NULL 49 58 };
+3 -5
drivers/scsi/qla1280.c
··· 689 689 * handling). Unfortunately, it sometimes calls the scheduler in interrupt 690 690 * context which is a big NO! NO!. 691 691 **************************************************************************/ 692 - static int 693 - qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) 692 + static int qla1280_queuecommand_lck(struct scsi_cmnd *cmd) 694 693 { 695 694 struct Scsi_Host *host = cmd->device->host; 696 695 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 697 696 struct srb *sp = (struct srb *)CMD_SP(cmd); 698 697 int status; 699 698 700 - cmd->scsi_done = fn; 701 699 sp->cmd = cmd; 702 700 sp->flags = 0; 703 701 sp->wait = NULL; ··· 753 755 sp->wait = NULL; 754 756 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) { 755 757 status = SUCCESS; 756 - (*cmd->scsi_done)(cmd); 758 + scsi_done(cmd); 757 759 } 758 760 return status; 759 761 } ··· 1275 1277 ha->actthreads--; 1276 1278 1277 1279 if (sp->wait == NULL) 1278 - (*(cmd)->scsi_done)(cmd); 1280 + scsi_done(cmd); 1279 1281 else 1280 1282 complete(sp->wait); 1281 1283 }
+88 -63
drivers/scsi/qla2xxx/qla_attr.c
··· 1868 1868 return strlen(buf); 1869 1869 } 1870 1870 1871 + static const struct { 1872 + u16 rate; 1873 + char *str; 1874 + } port_speed_str[] = { 1875 + { PORT_SPEED_4GB, "4" }, 1876 + { PORT_SPEED_8GB, "8" }, 1877 + { PORT_SPEED_16GB, "16" }, 1878 + { PORT_SPEED_32GB, "32" }, 1879 + { PORT_SPEED_64GB, "64" }, 1880 + { PORT_SPEED_10GB, "10" }, 1881 + }; 1882 + 1871 1883 static ssize_t 1872 1884 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, 1873 1885 char *buf) ··· 1887 1875 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); 1888 1876 struct qla_hw_data *ha = vha->hw; 1889 1877 ssize_t rval; 1890 - char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"}; 1878 + u16 i; 1879 + char *speed = "Unknown"; 1891 1880 1892 1881 rval = qla2x00_get_data_rate(vha); 1893 1882 if (rval != QLA_SUCCESS) { ··· 1897 1884 return -EINVAL; 1898 1885 } 1899 1886 1900 - return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]); 1887 + for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) { 1888 + if (port_speed_str[i].rate != ha->link_data_rate) 1889 + continue; 1890 + speed = port_speed_str[i].str; 1891 + break; 1892 + } 1893 + 1894 + return scnprintf(buf, PAGE_SIZE, "%s\n", speed); 1901 1895 } 1902 1896 1903 1897 static ssize_t ··· 2481 2461 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); 2482 2462 static DEVICE_ATTR_RO(edif_doorbell); 2483 2463 2484 - 2485 - struct device_attribute *qla2x00_host_attrs[] = { 2486 - &dev_attr_driver_version, 2487 - &dev_attr_fw_version, 2488 - &dev_attr_serial_num, 2489 - &dev_attr_isp_name, 2490 - &dev_attr_isp_id, 2491 - &dev_attr_model_name, 2492 - &dev_attr_model_desc, 2493 - &dev_attr_pci_info, 2494 - &dev_attr_link_state, 2495 - &dev_attr_zio, 2496 - &dev_attr_zio_timer, 2497 - &dev_attr_beacon, 2498 - &dev_attr_beacon_config, 2499 - &dev_attr_optrom_bios_version, 2500 - &dev_attr_optrom_efi_version, 2501 - &dev_attr_optrom_fcode_version, 2502 - &dev_attr_optrom_fw_version, 2503 - &dev_attr_84xx_fw_version, 2504 - &dev_attr_total_isp_aborts, 2505 - &dev_attr_serdes_version, 2506 - &dev_attr_mpi_version, 2507 - &dev_attr_phy_version, 2508 - &dev_attr_flash_block_size, 2509 - &dev_attr_vlan_id, 2510 - &dev_attr_vn_port_mac_address, 2511 - &dev_attr_fabric_param, 2512 - &dev_attr_fw_state, 2513 - &dev_attr_optrom_gold_fw_version, 2514 - &dev_attr_thermal_temp, 2515 - &dev_attr_diag_requests, 2516 - &dev_attr_diag_megabytes, 2517 - &dev_attr_fw_dump_size, 2518 - &dev_attr_allow_cna_fw_dump, 2519 - &dev_attr_pep_version, 2520 - &dev_attr_min_supported_speed, 2521 - &dev_attr_max_supported_speed, 2522 - &dev_attr_zio_threshold, 2523 - &dev_attr_dif_bundle_statistics, 2524 - &dev_attr_port_speed, 2525 - &dev_attr_port_no, 2526 - &dev_attr_fw_attr, 2527 - &dev_attr_dport_diagnostics, 2528 - &dev_attr_edif_doorbell, 2529 - &dev_attr_mpi_pause, 2530 - NULL, /* reserve for qlini_mode */ 2531 - NULL, /* reserve for ql2xiniexchg */ 2532 - NULL, /* reserve for ql2xexchoffld */ 2464 + static struct attribute *qla2x00_host_attrs[] = { 2465 + &dev_attr_driver_version.attr, 2466 + &dev_attr_fw_version.attr, 2467 + &dev_attr_serial_num.attr, 2468 + &dev_attr_isp_name.attr, 2469 + &dev_attr_isp_id.attr, 2470 + &dev_attr_model_name.attr, 2471 + &dev_attr_model_desc.attr, 2472 + &dev_attr_pci_info.attr, 2473 + &dev_attr_link_state.attr, 2474 + &dev_attr_zio.attr, 2475 + &dev_attr_zio_timer.attr, 2476 + &dev_attr_beacon.attr, 2477 + &dev_attr_beacon_config.attr, 2478 + &dev_attr_optrom_bios_version.attr, 2479 + &dev_attr_optrom_efi_version.attr, 2480 + &dev_attr_optrom_fcode_version.attr, 2481 + &dev_attr_optrom_fw_version.attr, 2482 + &dev_attr_84xx_fw_version.attr, 2483 + &dev_attr_total_isp_aborts.attr, 2484 + &dev_attr_serdes_version.attr, 2485 + &dev_attr_mpi_version.attr, 2486 + &dev_attr_phy_version.attr, 2487 + &dev_attr_flash_block_size.attr, 2488 + &dev_attr_vlan_id.attr, 2489 + &dev_attr_vn_port_mac_address.attr, 2490 + &dev_attr_fabric_param.attr, 2491 + &dev_attr_fw_state.attr, 2492 + &dev_attr_optrom_gold_fw_version.attr, 2493 + &dev_attr_thermal_temp.attr, 2494 + &dev_attr_diag_requests.attr, 2495 + &dev_attr_diag_megabytes.attr, 2496 + &dev_attr_fw_dump_size.attr, 2497 + &dev_attr_allow_cna_fw_dump.attr, 2498 + &dev_attr_pep_version.attr, 2499 + &dev_attr_min_supported_speed.attr, 2500 + &dev_attr_max_supported_speed.attr, 2501 + &dev_attr_zio_threshold.attr, 2502 + &dev_attr_dif_bundle_statistics.attr, 2503 + &dev_attr_port_speed.attr, 2504 + &dev_attr_port_no.attr, 2505 + &dev_attr_fw_attr.attr, 2506 + &dev_attr_dport_diagnostics.attr, 2507 + &dev_attr_edif_doorbell.attr, 2508 + &dev_attr_mpi_pause.attr, 2509 + &dev_attr_qlini_mode.attr, 2510 + &dev_attr_ql2xiniexchg.attr, 2511 + &dev_attr_ql2xexchoffld.attr, 2533 2512 NULL, 2534 2513 }; 2535 2514 2536 - void qla_insert_tgt_attrs(void) 2515 + static umode_t qla_host_attr_is_visible(struct kobject *kobj, 2516 + struct attribute *attr, int i) 2537 2517 { 2538 - struct device_attribute **attr; 2539 - 2540 - /* advance to empty slot */ 2541 - for (attr = &qla2x00_host_attrs[0]; *attr; ++attr) 2542 - continue; 2543 - 2544 - *attr = &dev_attr_qlini_mode; 2545 - attr++; 2546 - *attr = &dev_attr_ql2xiniexchg; 2547 - attr++; 2548 - *attr = &dev_attr_ql2xexchoffld; 2518 + if (ql2x_ini_mode != QLA2XXX_INI_MODE_DUAL && 2519 + (attr == &dev_attr_qlini_mode.attr || 2520 + attr == &dev_attr_ql2xiniexchg.attr || 2521 + attr == &dev_attr_ql2xexchoffld.attr)) 2522 + return 0; 2523 + return attr->mode; 2549 2524 } 2525 + 2526 + static const struct attribute_group qla2x00_host_attr_group = { 2527 + .is_visible = qla_host_attr_is_visible, 2528 + .attrs = qla2x00_host_attrs 2529 + }; 2530 + 2531 + const struct attribute_group *qla2x00_host_groups[] = { 2532 + &qla2x00_host_attr_group, 2533 + NULL 2534 + }; 2550 2535 2551 2536 /* Host attributes. */ 2552 2537
+48
drivers/scsi/qla2xxx/qla_bsg.c
··· 2877 2877 case QL_VND_MANAGE_HOST_PORT: 2878 2878 return qla2x00_manage_host_port(bsg_job); 2879 2879 2880 + case QL_VND_MBX_PASSTHRU: 2881 + return qla2x00_mailbox_passthru(bsg_job); 2882 + 2880 2883 default: 2881 2884 return -ENOSYS; 2882 2885 } ··· 3015 3012 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3016 3013 sp->free(sp); 3017 3014 return 0; 3015 + } 3016 + 3017 + int qla2x00_mailbox_passthru(struct bsg_job *bsg_job) 3018 + { 3019 + struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3020 + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3021 + int ret = -EINVAL; 3022 + int ptsize = sizeof(struct qla_mbx_passthru); 3023 + struct qla_mbx_passthru *req_data = NULL; 3024 + uint32_t req_data_len; 3025 + 3026 + req_data_len = bsg_job->request_payload.payload_len; 3027 + if (req_data_len != ptsize) { 3028 + ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n"); 3029 + return -EIO; 3030 + } 3031 + req_data = kzalloc(ptsize, GFP_KERNEL); 3032 + if (!req_data) { 3033 + ql_log(ql_log_warn, vha, 0xf0a4, 3034 + "req_data memory allocation failure.\n"); 3035 + return -ENOMEM; 3036 + } 3037 + 3038 + /* Copy the request buffer in req_data */ 3039 + sg_copy_to_buffer(bsg_job->request_payload.sg_list, 3040 + bsg_job->request_payload.sg_cnt, req_data, ptsize); 3041 + ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out); 3042 + 3043 + /* Copy the req_data in request buffer */ 3044 + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 3045 + bsg_job->reply_payload.sg_cnt, req_data, ptsize); 3046 + 3047 + bsg_reply->reply_payload_rcv_len = ptsize; 3048 + if (ret == QLA_SUCCESS) 3049 + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 3050 + else 3051 + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR; 3052 + 3053 + bsg_job->reply_len = sizeof(*bsg_job->reply); 3054 + bsg_reply->result = DID_OK << 16; 3055 + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 3056 + 3057 + kfree(req_data); 3058 + 3059 + return ret; 3018 3060 }
+7
drivers/scsi/qla2xxx/qla_bsg.h
··· 36 36 #define QL_VND_GET_HOST_STATS 0x24 37 37 #define QL_VND_GET_TGT_STATS 0x25 38 38 #define QL_VND_MANAGE_HOST_PORT 0x26 39 + #define QL_VND_MBX_PASSTHRU 0x2B 39 40 40 41 /* BSG Vendor specific subcode returns */ 41 42 #define EXT_STATUS_OK 0 ··· 188 187 uint16_t speed; 189 188 } __attribute__ ((packed)); 190 189 190 + struct qla_mbx_passthru { 191 + uint16_t reserved1[2]; 192 + uint16_t mbx_in[32]; 193 + uint16_t mbx_out[32]; 194 + uint32_t reserved2[16]; 195 + } __packed; 191 196 192 197 /* FRU VPD */ 193 198
+2 -2
drivers/scsi/qla2xxx/qla_def.h
··· 3750 3750 struct qla_fw_resources fwres ____cacheline_aligned; 3751 3751 u32 cmd_cnt; 3752 3752 u32 cmd_completion_cnt; 3753 + u32 prev_completion_cnt; 3753 3754 }; 3754 3755 3755 3756 /* Place holder for FW buffer parameters */ ··· 4608 4607 struct qla_chip_state_84xx *cs84xx; 4609 4608 struct isp_operations *isp_ops; 4610 4609 struct workqueue_struct *wq; 4610 + struct work_struct heartbeat_work; 4611 4611 struct qlfc_fw fw_buf; 4612 4612 4613 4613 /* FCP_CMND priority support */ ··· 4710 4708 4711 4709 struct qla_hw_data_stat stat; 4712 4710 pci_error_state_t pci_error_state; 4713 - u64 prev_cmd_cnt; 4714 4711 struct dma_pool *purex_dma_pool; 4715 4712 struct btree_head32 host_map; 4716 4713 ··· 4855 4854 #define SET_ZIO_THRESHOLD_NEEDED 32 4856 4855 #define ISP_ABORT_TO_ROM 33 4857 4856 #define VPORT_DELETE 34 4858 - #define HEARTBEAT_CHK 38 4859 4857 4860 4858 #define PROCESS_PUREX_IOCB 63 4861 4859
+5 -3
drivers/scsi/qla2xxx/qla_gbl.h
··· 662 662 663 663 extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *); 664 664 extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *); 665 + extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job); 665 666 int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt, 666 667 struct rsp_que **rsp, u8 *buf, u32 buf_len); 668 + 669 + int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in, 670 + uint16_t *mbx_out); 667 671 668 672 /* 669 673 * Global Function Prototypes in qla_dbg.c source file. ··· 742 738 * Global Function Prototypes in qla_attr.c source file. 743 739 */ 744 740 struct device_attribute; 745 - extern struct device_attribute *qla2x00_host_attrs[]; 746 - extern struct device_attribute *qla2x00_host_attrs_dm[]; 741 + extern const struct attribute_group *qla2x00_host_groups[]; 747 742 struct fc_function_template; 748 743 extern struct fc_function_template qla2xxx_transport_functions; 749 744 extern struct fc_function_template qla2xxx_transport_vport_functions; ··· 756 753 extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *); 757 754 extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *, 758 755 struct qla_fcp_prio_cfg *, uint8_t); 759 - void qla_insert_tgt_attrs(void); 760 756 /* 761 757 * Global Function Prototypes in qla_dfs.c source file. 762 758 */
+2 -1
drivers/scsi/qla2xxx/qla_gs.c
··· 1537 1537 } 1538 1538 if (IS_QLA2031(ha)) { 1539 1539 if ((ha->pdev->subsystem_vendor == 0x103C) && 1540 - (ha->pdev->subsystem_device == 0x8002)) { 1540 + ((ha->pdev->subsystem_device == 0x8002) || 1541 + (ha->pdev->subsystem_device == 0x8086))) { 1541 1542 speeds = FDMI_PORT_SPEED_16GB; 1542 1543 } else { 1543 1544 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
+9 -8
drivers/scsi/qla2xxx/qla_init.c
··· 5335 5335 "LOOP READY.\n"); 5336 5336 ha->flags.fw_init_done = 1; 5337 5337 5338 + /* 5339 + * use link up to wake up app to get ready for 5340 + * authentication. 5341 + */ 5338 5342 if (ha->flags.edif_enabled && 5339 - !(vha->e_dbell.db_flags & EDB_ACTIVE) && 5340 - N2N_TOPO(vha->hw)) { 5341 - /* 5342 - * use port online to wake up app to get ready 5343 - * for authentication 5344 - */ 5345 - qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0); 5346 - } 5343 + !(vha->e_dbell.db_flags & EDB_ACTIVE)) 5344 + qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, 5345 + ha->link_data_rate); 5347 5346 5348 5347 /* 5349 5348 * Process any ATIO queue entries that came in ··· 7025 7026 ha->chip_reset++; 7026 7027 ha->base_qpair->chip_reset = ha->chip_reset; 7027 7028 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; 7029 + ha->base_qpair->prev_completion_cnt = 0; 7028 7030 for (i = 0; i < ha->max_qpairs; i++) { 7029 7031 if (ha->queue_pair_map[i]) { 7030 7032 ha->queue_pair_map[i]->chip_reset = 7031 7033 ha->base_qpair->chip_reset; 7032 7034 ha->queue_pair_map[i]->cmd_cnt = 7033 7035 ha->queue_pair_map[i]->cmd_completion_cnt = 0; 7036 + ha->base_qpair->prev_completion_cnt = 0; 7034 7037 } 7035 7038 } 7036 7039
+34 -1
drivers/scsi/qla2xxx/qla_mbx.c
··· 3236 3236 fc_port_t *fcport = sp->fcport; 3237 3237 struct scsi_qla_host *vha = fcport->vha; 3238 3238 struct qla_hw_data *ha = vha->hw; 3239 - struct req_que *req = vha->req; 3239 + struct req_que *req; 3240 3240 struct qla_qpair *qpair = sp->qpair; 3241 3241 3242 3242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, ··· 7010 7010 ql_dbg(ql_dbg_async, vha, 0x7071, 7011 7011 "Failed %s %x\n", __func__, rval); 7012 7012 } 7013 + } 7014 + 7015 + int qla_mailbox_passthru(scsi_qla_host_t *vha, 7016 + uint16_t *mbx_in, uint16_t *mbx_out) 7017 + { 7018 + mbx_cmd_t mc; 7019 + mbx_cmd_t *mcp = &mc; 7020 + int rval = -EINVAL; 7021 + 7022 + memset(&mc, 0, sizeof(mc)); 7023 + /* Receiving all 32 register's contents */ 7024 + memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t))); 7025 + 7026 + mcp->out_mb = 0xFFFFFFFF; 7027 + mcp->in_mb = 0xFFFFFFFF; 7028 + 7029 + mcp->tov = MBX_TOV_SECONDS; 7030 + mcp->flags = 0; 7031 + mcp->bufp = NULL; 7032 + 7033 + rval = qla2x00_mailbox_command(vha, mcp); 7034 + 7035 + if (rval != QLA_SUCCESS) { 7036 + ql_dbg(ql_dbg_mbx, vha, 0xf0a2, 7037 + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 7038 + } else { 7039 + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n", 7040 + __func__); 7041 + /* passing all 32 register's contents */ 7042 + memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t)); 7043 + } 7044 + 7045 + return rval; 7013 7046 }
+18 -2
drivers/scsi/qla2xxx/qla_nvme.c
··· 230 230 fc_port_t *fcport = sp->fcport; 231 231 struct qla_hw_data *ha = fcport->vha->hw; 232 232 int rval, abts_done_called = 1; 233 + bool io_wait_for_abort_done; 234 + uint32_t handle; 233 235 234 236 ql_dbg(ql_dbg_io, fcport->vha, 0xffff, 235 237 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n", ··· 248 246 goto out; 249 247 } 250 248 249 + /* 250 + * sp may not be valid after abort_command if return code is either 251 + * SUCCESS or ERR_FROM_FW codes, so cache the value here. 252 + */ 253 + io_wait_for_abort_done = ql2xabts_wait_nvme && 254 + QLA_ABTS_WAIT_ENABLED(sp); 255 + handle = sp->handle; 256 + 251 257 rval = ha->isp_ops->abort_command(sp); 252 258 253 259 ql_dbg(ql_dbg_io, fcport->vha, 0x212b, 254 260 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", 255 261 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", 256 - sp, sp->handle, fcport, rval); 262 + sp, handle, fcport, rval); 257 263 258 264 /* 259 265 * If async tmf is enabled, the abort callback is called only on ··· 276 266 * are waited until ABTS complete. This kref is decreased 277 267 * at qla24xx_abort_sp_done function. 278 268 */ 279 - if (abts_done_called && ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp)) 269 + if (abts_done_called && io_wait_for_abort_done) 280 270 return; 281 271 out: 282 272 /* kref_get was done before work was schedule. */ ··· 401 391 uint16_t avail_dsds; 402 392 struct dsd64 *cur_dsd; 403 393 struct req_que *req = NULL; 394 + struct rsp_que *rsp = NULL; 404 395 struct scsi_qla_host *vha = sp->fcport->vha; 405 396 struct qla_hw_data *ha = vha->hw; 406 397 struct qla_qpair *qpair = sp->qpair; ··· 413 402 414 403 /* Setup qpair pointers */ 415 404 req = qpair->req; 405 + rsp = qpair->rsp; 416 406 tot_dsds = fd->sg_cnt; 417 407 418 408 /* Acquire qpair specific lock */ ··· 574 562 575 563 /* Set chip new ring index. */ 576 564 wrt_reg_dword(req->req_q_in, req->ring_index); 565 + 566 + if (vha->flags.process_response_queue && 567 + rsp->ring_ptr->signature != RESPONSE_PROCESSED) 568 + qla24xx_process_response_queue(vha, rsp); 577 569 578 570 queuing_error: 579 571 spin_unlock_irqrestore(&qpair->qp_lock, flags);
+50 -53
drivers/scsi/qla2xxx/qla_os.c
··· 737 737 sp->free(sp); 738 738 cmd->result = res; 739 739 CMD_SP(cmd) = NULL; 740 - cmd->scsi_done(cmd); 740 + scsi_done(cmd); 741 741 if (comp) 742 742 complete(comp); 743 743 } ··· 828 828 sp->free(sp); 829 829 cmd->result = res; 830 830 CMD_SP(cmd) = NULL; 831 - cmd->scsi_done(cmd); 831 + scsi_done(cmd); 832 832 if (comp) 833 833 complete(comp); 834 834 } ··· 950 950 return SCSI_MLQUEUE_TARGET_BUSY; 951 951 952 952 qc24_fail_command: 953 - cmd->scsi_done(cmd); 953 + scsi_done(cmd); 954 954 955 955 return 0; 956 956 } ··· 1038 1038 return SCSI_MLQUEUE_TARGET_BUSY; 1039 1039 1040 1040 qc24_fail_command: 1041 - cmd->scsi_done(cmd); 1041 + scsi_done(cmd); 1042 1042 1043 1043 return 0; 1044 1044 } ··· 1258 1258 uint32_t ratov_j; 1259 1259 struct qla_qpair *qpair; 1260 1260 unsigned long flags; 1261 + int fast_fail_status = SUCCESS; 1261 1262 1262 1263 if (qla2x00_isp_reg_stat(ha)) { 1263 1264 ql_log(ql_log_info, vha, 0x8042, ··· 1267 1266 return FAILED; 1268 1267 } 1269 1268 1269 + /* Save any FAST_IO_FAIL value to return later if abort succeeds */ 1270 1270 ret = fc_block_scsi_eh(cmd); 1271 1271 if (ret != 0) 1272 - return ret; 1272 + fast_fail_status = ret; 1273 1273 1274 1274 sp = scsi_cmd_priv(cmd); 1275 1275 qpair = sp->qpair; ··· 1278 1276 vha->cmd_timeout_cnt++; 1279 1277 1280 1278 if ((sp->fcport && sp->fcport->deleted) || !qpair) 1281 - return SUCCESS; 1279 + return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; 1282 1280 1283 1281 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 1284 1282 sp->comp = &comp; ··· 1313 1311 __func__, ha->r_a_tov/10); 1314 1312 ret = FAILED; 1315 1313 } else { 1316 - ret = SUCCESS; 1314 + ret = fast_fail_status; 1317 1315 } 1318 1316 break; 1319 1317 default: ··· 2796 2794 return atomic_read(&vha->loop_state) == LOOP_READY; 2797 2795 } 2798 2796 2797 + static void qla_heartbeat_work_fn(struct work_struct *work) 2798 + { 2799 + struct qla_hw_data *ha = container_of(work, 2800 + struct qla_hw_data, heartbeat_work); 2801 + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2802 + 2803 + if (!ha->flags.mbox_busy && base_vha->flags.init_done) 2804 + qla_no_op_mb(base_vha); 2805 + } 2806 + 2799 2807 static void qla2x00_iocb_work_fn(struct work_struct *work) 2800 2808 { 2801 2809 struct scsi_qla_host *vha = container_of(work, ··· 3244 3232 host->transportt, sht->vendor_id); 3245 3233 3246 3234 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); 3235 + INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); 3247 3236 3248 3237 /* Set up the irqs */ 3249 3238 ret = qla2x00_request_irqs(ha, rsp); ··· 3376 3363 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 3377 3364 host->can_queue, base_vha->req, 3378 3365 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3366 + 3367 + /* Check if FW supports MQ or not for ISP25xx */ 3368 + if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6)) 3369 + ha->mqenable = 0; 3379 3370 3380 3371 if (ha->mqenable) { 3381 3372 bool startit = false; ··· 7131 7114 qla2x00_lip_reset(base_vha); 7132 7115 } 7133 7116 7134 - if (test_bit(HEARTBEAT_CHK, &base_vha->dpc_flags)) { 7135 - /* 7136 - * if there is a mb in progress then that's 7137 - * enough of a check to see if fw is still ticking. 7138 - */ 7139 - if (!ha->flags.mbox_busy && base_vha->flags.init_done) 7140 - qla_no_op_mb(base_vha); 7141 - 7142 - clear_bit(HEARTBEAT_CHK, &base_vha->dpc_flags); 7143 - } 7144 - 7145 7117 ha->dpc_active = 0; 7146 7118 end_loop: 7147 7119 set_current_state(TASK_INTERRUPTIBLE); ··· 7189 7183 7190 7184 static bool qla_do_heartbeat(struct scsi_qla_host *vha) 7191 7185 { 7192 - u64 cmd_cnt, prev_cmd_cnt; 7193 - bool do_hb = false; 7194 7186 struct qla_hw_data *ha = vha->hw; 7195 - int i; 7187 + u32 cmpl_cnt; 7188 + u16 i; 7189 + bool do_heartbeat = false; 7196 7190 7197 - /* if cmds are still pending down in fw, then do hb */ 7198 - if (ha->base_qpair->cmd_cnt != ha->base_qpair->cmd_completion_cnt) { 7199 - do_hb = true; 7191 + /* 7192 + * Allow do_heartbeat only if we don’t have any active interrupts, 7193 + * but there are still IOs outstanding with firmware. 7194 + */ 7195 + cmpl_cnt = ha->base_qpair->cmd_completion_cnt; 7196 + if (cmpl_cnt == ha->base_qpair->prev_completion_cnt && 7197 + cmpl_cnt != ha->base_qpair->cmd_cnt) { 7198 + do_heartbeat = true; 7200 7199 goto skip; 7201 7200 } 7201 + ha->base_qpair->prev_completion_cnt = cmpl_cnt; 7202 7202 7203 7203 for (i = 0; i < ha->max_qpairs; i++) { 7204 - if (ha->queue_pair_map[i] && 7205 - ha->queue_pair_map[i]->cmd_cnt != 7206 - ha->queue_pair_map[i]->cmd_completion_cnt) { 7207 - do_hb = true; 7208 - break; 7204 + if (ha->queue_pair_map[i]) { 7205 + cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt; 7206 + if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt && 7207 + cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) { 7208 + do_heartbeat = true; 7209 + break; 7210 + } 7211 + ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt; 7209 7212 } 7210 7213 } 7211 7214 7212 7215 skip: 7213 - prev_cmd_cnt = ha->prev_cmd_cnt; 7214 - cmd_cnt = ha->base_qpair->cmd_cnt; 7215 - for (i = 0; i < ha->max_qpairs; i++) { 7216 - if (ha->queue_pair_map[i]) 7217 - cmd_cnt += ha->queue_pair_map[i]->cmd_cnt; 7218 - } 7219 - ha->prev_cmd_cnt = cmd_cnt; 7220 - 7221 - if (!do_hb && ((cmd_cnt - prev_cmd_cnt) > 50)) 7222 - /* 7223 - * IOs are completing before periodic hb check. 7224 - * IOs seems to be running, do hb for sanity check. 7225 - */ 7226 - do_hb = true; 7227 - 7228 - return do_hb; 7216 + return do_heartbeat; 7229 7217 } 7230 7218 7231 7219 static void qla_heart_beat(struct scsi_qla_host *vha) 7232 7220 { 7221 + struct qla_hw_data *ha = vha->hw; 7222 + 7233 7223 if (vha->vp_idx) 7234 7224 return; 7235 7225 7236 7226 if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) 7237 7227 return; 7238 7228 7239 - if (qla_do_heartbeat(vha)) { 7240 - set_bit(HEARTBEAT_CHK, &vha->dpc_flags); 7241 - qla2xxx_wake_dpc(vha); 7242 - } 7229 + if (qla_do_heartbeat(vha)) 7230 + queue_work(ha->wq, &ha->heartbeat_work); 7243 7231 } 7244 7232 7245 7233 /************************************************************************** ··· 7943 7943 .sg_tablesize = SG_ALL, 7944 7944 7945 7945 .max_sectors = 0xFFFF, 7946 - .shost_attrs = qla2x00_host_attrs, 7946 + .shost_groups = qla2x00_host_groups, 7947 7947 7948 7948 .supported_mode = MODE_INITIATOR, 7949 7949 .track_queue_depth = 1, ··· 8130 8130 strcat(qla2x00_version_str, "-debug"); 8131 8131 if (ql2xextended_error_logging == 1) 8132 8132 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; 8133 - 8134 - if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL) 8135 - qla_insert_tgt_attrs(); 8136 8133 8137 8134 qla2xxx_transport_template = 8138 8135 fc_attach_transport(&qla2xxx_transport_functions);
+3 -3
drivers/scsi/qla2xxx/qla_version.h
··· 6 6 /* 7 7 * Driver version 8 8 */ 9 - #define QLA2XXX_VERSION "10.02.06.200-k" 9 + #define QLA2XXX_VERSION "10.02.07.100-k" 10 10 11 11 #define QLA_DRIVER_MAJOR_VER 10 12 12 #define QLA_DRIVER_MINOR_VER 2 13 - #define QLA_DRIVER_PATCH_VER 6 14 - #define QLA_DRIVER_BETA_VER 200 13 + #define QLA_DRIVER_PATCH_VER 7 14 + #define QLA_DRIVER_BETA_VER 100
+12 -61
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 915 915 916 916 /* End items for tcm_qla2xxx_tpg_attrib_cit */ 917 917 918 - static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item, 919 - char *page) 918 + static int tcm_qla2xxx_enable_tpg(struct se_portal_group *se_tpg, 919 + bool enable) 920 920 { 921 - struct se_portal_group *se_tpg = to_tpg(item); 922 - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 923 - struct tcm_qla2xxx_tpg, se_tpg); 924 - 925 - return snprintf(page, PAGE_SIZE, "%d\n", 926 - atomic_read(&tpg->lport_tpg_enabled)); 927 - } 928 - 929 - static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, 930 - const char *page, size_t count) 931 - { 932 - struct se_portal_group *se_tpg = to_tpg(item); 933 921 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 934 922 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 935 923 struct tcm_qla2xxx_lport, lport_wwn); 936 924 struct scsi_qla_host *vha = lport->qla_vha; 937 925 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 938 926 struct tcm_qla2xxx_tpg, se_tpg); 939 - unsigned long op; 940 - int rc; 941 927 942 - rc = kstrtoul(page, 0, &op); 943 - if (rc < 0) { 944 - pr_err("kstrtoul() returned %d\n", rc); 945 - return -EINVAL; 946 - } 947 - if ((op != 1) && (op != 0)) { 948 - pr_err("Illegal value for tpg_enable: %lu\n", op); 949 - return -EINVAL; 950 - } 951 - if (op) { 928 + if (enable) { 952 929 if (atomic_read(&tpg->lport_tpg_enabled)) 953 930 return -EEXIST; 954 931 ··· 933 956 qlt_enable_vha(vha); 934 957 } else { 935 958 if (!atomic_read(&tpg->lport_tpg_enabled)) 936 - return count; 959 + return 0; 937 960 938 961 atomic_set(&tpg->lport_tpg_enabled, 0); 939 962 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 940 963 qlt_stop_phase2(vha->vha_tgt.qla_tgt); 941 964 } 942 965 943 - return count; 966 + return 0; 944 967 } 945 968 946 969 static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item, ··· 981 1004 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); 982 1005 } 983 1006 984 - CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); 985 1007 CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); 986 1008 CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); 987 1009 988 1010 static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { 989 - &tcm_qla2xxx_tpg_attr_enable, 990 1011 &tcm_qla2xxx_tpg_attr_dynamic_sessions, 991 1012 &tcm_qla2xxx_tpg_attr_fabric_prot_type, 992 1013 NULL, ··· 1058 1083 kfree(tpg); 1059 1084 } 1060 1085 1061 - static ssize_t tcm_qla2xxx_npiv_tpg_enable_show(struct config_item *item, 1062 - char *page) 1086 + static int tcm_qla2xxx_npiv_enable_tpg(struct se_portal_group *se_tpg, 1087 + bool enable) 1063 1088 { 1064 - return tcm_qla2xxx_tpg_enable_show(item, page); 1065 - } 1066 - 1067 - static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, 1068 - const char *page, size_t count) 1069 - { 1070 - struct se_portal_group *se_tpg = to_tpg(item); 1071 1089 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 1072 1090 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 1073 1091 struct tcm_qla2xxx_lport, lport_wwn); 1074 1092 struct scsi_qla_host *vha = lport->qla_vha; 1075 1093 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1076 1094 struct tcm_qla2xxx_tpg, se_tpg); 1077 - unsigned long op; 1078 - int rc; 1079 1095 1080 - rc = kstrtoul(page, 0, &op); 1081 - if (rc < 0) { 1082 - pr_err("kstrtoul() returned %d\n", rc); 1083 - return -EINVAL; 1084 - } 1085 - if ((op != 1) && (op != 0)) { 1086 - pr_err("Illegal value for tpg_enable: %lu\n", op); 1087 - return -EINVAL; 1088 - } 1089 - if (op) { 1096 + if (enable) { 1090 1097 if (atomic_read(&tpg->lport_tpg_enabled)) 1091 1098 return -EEXIST; 1092 1099 ··· 1076 1119 qlt_enable_vha(vha); 1077 1120 } else { 1078 1121 if (!atomic_read(&tpg->lport_tpg_enabled)) 1079 - return count; 1122 + return 0; 1080 1123 1081 1124 atomic_set(&tpg->lport_tpg_enabled, 0); 1082 1125 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1083 1126 qlt_stop_phase2(vha->vha_tgt.qla_tgt); 1084 1127 } 1085 1128 1086 - return count; 1129 + return 0; 1087 1130 } 1088 - 1089 - CONFIGFS_ATTR(tcm_qla2xxx_npiv_tpg_, enable); 1090 - 1091 - static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { 1092 - &tcm_qla2xxx_npiv_tpg_attr_enable, 1093 - NULL, 1094 - }; 1095 1131 1096 1132 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, 1097 1133 const char *name) ··· 1828 1878 .fabric_make_wwn = tcm_qla2xxx_make_lport, 1829 1879 .fabric_drop_wwn = tcm_qla2xxx_drop_lport, 1830 1880 .fabric_make_tpg = tcm_qla2xxx_make_tpg, 1881 + .fabric_enable_tpg = tcm_qla2xxx_enable_tpg, 1831 1882 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1832 1883 .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, 1833 1884 ··· 1869 1918 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, 1870 1919 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, 1871 1920 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, 1921 + .fabric_enable_tpg = tcm_qla2xxx_npiv_enable_tpg, 1872 1922 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1873 1923 .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, 1874 1924 1875 1925 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 1876 - .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, 1877 1926 }; 1878 1927 1879 1928 static int tcm_qla2xxx_register_configfs(void)
+25 -16
drivers/scsi/qla4xxx/ql4_attr.c
··· 330 330 static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL); 331 331 static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL); 332 332 333 - struct device_attribute *qla4xxx_host_attrs[] = { 334 - &dev_attr_fw_version, 335 - &dev_attr_serial_num, 336 - &dev_attr_iscsi_version, 337 - &dev_attr_optrom_version, 338 - &dev_attr_board_id, 339 - &dev_attr_fw_state, 340 - &dev_attr_phy_port_cnt, 341 - &dev_attr_phy_port_num, 342 - &dev_attr_iscsi_func_cnt, 343 - &dev_attr_hba_model, 344 - &dev_attr_fw_timestamp, 345 - &dev_attr_fw_build_user, 346 - &dev_attr_fw_ext_timestamp, 347 - &dev_attr_fw_load_src, 348 - &dev_attr_fw_uptime, 333 + static struct attribute *qla4xxx_host_attrs[] = { 334 + &dev_attr_fw_version.attr, 335 + &dev_attr_serial_num.attr, 336 + &dev_attr_iscsi_version.attr, 337 + &dev_attr_optrom_version.attr, 338 + &dev_attr_board_id.attr, 339 + &dev_attr_fw_state.attr, 340 + &dev_attr_phy_port_cnt.attr, 341 + &dev_attr_phy_port_num.attr, 342 + &dev_attr_iscsi_func_cnt.attr, 343 + &dev_attr_hba_model.attr, 344 + &dev_attr_fw_timestamp.attr, 345 + &dev_attr_fw_build_user.attr, 346 + &dev_attr_fw_ext_timestamp.attr, 347 + &dev_attr_fw_load_src.attr, 348 + &dev_attr_fw_uptime.attr, 349 349 NULL, 350 + }; 351 + 352 + static const struct attribute_group qla4xxx_host_attr_group = { 353 + .attrs = qla4xxx_host_attrs 354 + }; 355 + 356 + const struct attribute_group *qla4xxx_host_groups[] = { 357 + &qla4xxx_host_attr_group, 358 + NULL 350 359 };
+2 -1
drivers/scsi/qla4xxx/ql4_glbl.h
··· 286 286 extern int ql4xmdcapmask; 287 287 extern int ql4xenablemd; 288 288 289 - extern struct device_attribute *qla4xxx_host_attrs[]; 289 + extern const struct attribute_group *qla4xxx_host_groups[]; 290 + 290 291 #endif /* _QLA4x_GBL_H */
+3 -3
drivers/scsi/qla4xxx/ql4_os.c
··· 241 241 .sg_tablesize = SG_ALL, 242 242 243 243 .max_sectors = 0xFFFF, 244 - .shost_attrs = qla4xxx_host_attrs, 244 + .shost_groups = qla4xxx_host_groups, 245 245 .host_reset = qla4xxx_host_reset, 246 246 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, 247 247 }; ··· 4080 4080 4081 4081 mempool_free(srb, ha->srb_mempool); 4082 4082 4083 - cmd->scsi_done(cmd); 4083 + scsi_done(cmd); 4084 4084 } 4085 4085 4086 4086 /** ··· 4154 4154 return SCSI_MLQUEUE_HOST_BUSY; 4155 4155 4156 4156 qc_fail_command: 4157 - cmd->scsi_done(cmd); 4157 + scsi_done(cmd); 4158 4158 4159 4159 return 0; 4160 4160 }
+3 -4
drivers/scsi/qlogicfas408.c
··· 442 442 * If result is CHECK CONDITION done calls qcommand to request 443 443 * sense 444 444 */ 445 - (icmd->scsi_done) (icmd); 445 + scsi_done(icmd); 446 446 } 447 447 448 448 irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id) ··· 460 460 * Queued command 461 461 */ 462 462 463 - static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd, 464 - void (*done) (struct scsi_cmnd *)) 463 + static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd) 465 464 { 465 + void (*done)(struct scsi_cmnd *) = scsi_done; 466 466 struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); 467 467 468 468 set_host_byte(cmd, DID_OK); ··· 473 473 return 0; 474 474 } 475 475 476 - cmd->scsi_done = done; 477 476 /* wait for the last command's interrupt to finish */ 478 477 while (priv->qlcmd != NULL) { 479 478 barrier();
+3 -4
drivers/scsi/qlogicpti.c
··· 1013 1013 * 1014 1014 * "This code must fly." -davem 1015 1015 */ 1016 - static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) 1016 + static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd) 1017 1017 { 1018 + void (*done)(struct scsi_cmnd *) = scsi_done; 1018 1019 struct Scsi_Host *host = Cmnd->device->host; 1019 1020 struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; 1020 1021 struct Command_Entry *cmd; 1021 1022 u_int out_ptr; 1022 1023 int in_ptr; 1023 - 1024 - Cmnd->scsi_done = done; 1025 1024 1026 1025 in_ptr = qpti->req_in_ptr; 1027 1026 cmd = (struct Command_Entry *) &qpti->req_cpu[in_ptr]; ··· 1213 1214 struct scsi_cmnd *next; 1214 1215 1215 1216 next = (struct scsi_cmnd *) dq->host_scribble; 1216 - dq->scsi_done(dq); 1217 + scsi_done(dq); 1217 1218 dq = next; 1218 1219 } while (dq != NULL); 1219 1220 }
-8
drivers/scsi/scsi.c
··· 86 86 EXPORT_SYMBOL(scsi_logging_level); 87 87 #endif 88 88 89 - /* 90 - * Domain for asynchronous system resume operations. It is marked 'exclusive' 91 - * to avoid being included in the async_synchronize_full() that is invoked by 92 - * dpm_resume(). 93 - */ 94 - ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); 95 - EXPORT_SYMBOL(scsi_sd_pm_domain); 96 - 97 89 #ifdef CONFIG_SCSI_LOGGING 98 90 void scsi_log_send(struct scsi_cmnd *cmd) 99 91 {
+10 -9
drivers/scsi/scsi_debug.c
··· 1856 1856 { 1857 1857 unsigned char *cmd = scp->cmnd; 1858 1858 unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; 1859 - int alloc_len; 1859 + u32 alloc_len; 1860 1860 1861 1861 alloc_len = get_unaligned_be32(cmd + 10); 1862 1862 /* following just in case virtual_gb changed */ ··· 1885 1885 } 1886 1886 1887 1887 return fill_from_dev_buffer(scp, arr, 1888 - min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ)); 1888 + min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ)); 1889 1889 } 1890 1890 1891 1891 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412 ··· 1896 1896 unsigned char *cmd = scp->cmnd; 1897 1897 unsigned char *arr; 1898 1898 int host_no = devip->sdbg_host->shost->host_no; 1899 - int n, ret, alen, rlen; 1900 1899 int port_group_a, port_group_b, port_a, port_b; 1900 + u32 alen, n, rlen; 1901 + int ret; 1901 1902 1902 1903 alen = get_unaligned_be32(cmd + 6); 1903 1904 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); ··· 1960 1959 * - The constructed command length 1961 1960 * - The maximum array size 1962 1961 */ 1963 - rlen = min_t(int, alen, n); 1962 + rlen = min(alen, n); 1964 1963 ret = fill_from_dev_buffer(scp, arr, 1965 - min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ)); 1964 + min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ)); 1966 1965 kfree(arr); 1967 1966 return ret; 1968 1967 } ··· 4810 4809 pr_info("bypassing scsi_done() due to aborted cmd\n"); 4811 4810 return; 4812 4811 } 4813 - scp->scsi_done(scp); /* callback to mid level */ 4812 + scsi_done(scp); /* callback to mid level */ 4814 4813 } 4815 4814 4816 4815 /* When high resolution timer goes off this function is called. */ ··· 5525 5524 if (new_sd_dp) 5526 5525 kfree(sd_dp); 5527 5526 /* call scsi_done() from this thread */ 5528 - cmnd->scsi_done(cmnd); 5527 + scsi_done(cmnd); 5529 5528 return 0; 5530 5529 } 5531 5530 /* otherwise reduce kt by elapsed time */ ··· 5605 5604 cmnd->result &= ~SDEG_RES_IMMED_MASK; 5606 5605 if (cmnd->result == 0 && scsi_result != 0) 5607 5606 cmnd->result = scsi_result; 5608 - cmnd->scsi_done(cmnd); 5607 + scsi_done(cmnd); 5609 5608 return 0; 5610 5609 } 5611 5610 ··· 7364 7363 } 7365 7364 sd_dp->defer_t = SDEB_DEFER_NONE; 7366 7365 spin_unlock_irqrestore(&sqp->qc_lock, iflags); 7367 - scp->scsi_done(scp); /* callback to mid level */ 7366 + scsi_done(scp); /* callback to mid level */ 7368 7367 spin_lock_irqsave(&sqp->qc_lock, iflags); 7369 7368 num_entries++; 7370 7369 }
+6 -11
drivers/scsi/scsi_error.c
··· 50 50 51 51 #include <asm/unaligned.h> 52 52 53 - static void scsi_eh_done(struct scsi_cmnd *scmd); 54 - 55 53 /* 56 54 * These should *probably* be handled by the host itself. 57 55 * Since it is allowed to sleep, it probably should. ··· 518 520 /* handler does not care. Drop down to default handling */ 519 521 } 520 522 521 - if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) 523 + if (scmd->cmnd[0] == TEST_UNIT_READY && 524 + scmd->submitter != SUBMITTED_BY_SCSI_ERROR_HANDLER) 522 525 /* 523 526 * nasty: for mid-layer issued TURs, we need to return the 524 527 * actual sense data without any recovery attempt. For eh ··· 781 782 * scsi_eh_done - Completion function for error handling. 782 783 * @scmd: Cmd that is done. 783 784 */ 784 - static void scsi_eh_done(struct scsi_cmnd *scmd) 785 + void scsi_eh_done(struct scsi_cmnd *scmd) 785 786 { 786 787 struct completion *eh_action; 787 788 ··· 1081 1082 shost->eh_action = &done; 1082 1083 1083 1084 scsi_log_send(scmd); 1084 - scmd->scsi_done = scsi_eh_done; 1085 + scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER; 1085 1086 1086 1087 /* 1087 1088 * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can ··· 1108 1109 if (rtn) { 1109 1110 if (timeleft > stall_for) { 1110 1111 scsi_eh_restore_cmnd(scmd, &ses); 1112 + 1111 1113 timeleft -= stall_for; 1112 1114 msleep(jiffies_to_msecs(stall_for)); 1113 1115 goto retry; ··· 2338 2338 } 2339 2339 EXPORT_SYMBOL(scsi_report_device_reset); 2340 2340 2341 - static void 2342 - scsi_reset_provider_done_command(struct scsi_cmnd *scmd) 2343 - { 2344 - } 2345 - 2346 2341 /** 2347 2342 * scsi_ioctl_reset: explicitly reset a host/bus/target/device 2348 2343 * @dev: scsi_device to operate on ··· 2374 2379 scsi_init_command(dev, scmd); 2375 2380 scmd->cmnd = scsi_req(rq)->cmd; 2376 2381 2377 - scmd->scsi_done = scsi_reset_provider_done_command; 2382 + scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL; 2378 2383 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 2379 2384 2380 2385 scmd->cmd_len = 0;
+42 -22
drivers/scsi/scsi_lib.c
··· 950 950 951 951 /* 952 952 * If there had been no error, but we have leftover bytes in the 953 - * requeues just queue the command up again. 953 + * request just queue the command up again. 954 954 */ 955 955 if (likely(result == 0)) 956 956 scsi_io_completion_reprep(cmd, q); ··· 1530 1530 1531 1531 return rtn; 1532 1532 done: 1533 - cmd->scsi_done(cmd); 1533 + scsi_done(cmd); 1534 1534 return 0; 1535 1535 } 1536 1536 ··· 1585 1585 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1586 1586 } 1587 1587 1588 - static void scsi_mq_done(struct scsi_cmnd *cmd) 1588 + void scsi_done(struct scsi_cmnd *cmd) 1589 1589 { 1590 + switch (cmd->submitter) { 1591 + case SUBMITTED_BY_BLOCK_LAYER: 1592 + break; 1593 + case SUBMITTED_BY_SCSI_ERROR_HANDLER: 1594 + return scsi_eh_done(cmd); 1595 + case SUBMITTED_BY_SCSI_RESET_IOCTL: 1596 + return; 1597 + } 1598 + 1590 1599 if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q))) 1591 1600 return; 1592 1601 if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) ··· 1603 1594 trace_scsi_dispatch_cmd_done(cmd); 1604 1595 blk_mq_complete_request(scsi_cmd_to_rq(cmd)); 1605 1596 } 1597 + EXPORT_SYMBOL(scsi_done); 1606 1598 1607 1599 static void scsi_mq_put_budget(struct request_queue *q, int budget_token) 1608 1600 { ··· 1703 1693 1704 1694 scsi_set_resid(cmd, 0); 1705 1695 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1706 - cmd->scsi_done = scsi_mq_done; 1696 + cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; 1707 1697 1708 1698 blk_mq_start_request(req); 1709 1699 reason = scsi_dispatch_cmd(cmd); ··· 2052 2042 memset(cmd, 0, sizeof(cmd)); 2053 2043 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 2054 2044 2055 - if (sdev->use_10_for_ms) { 2056 - if (len > 65535) 2045 + /* 2046 + * Use MODE SELECT(10) if the device asked for it or if the mode page 2047 + * and the mode select header cannot fit within the maximumm 255 bytes 2048 + * of the MODE SELECT(6) command. 2049 + */ 2050 + if (sdev->use_10_for_ms || 2051 + len + 4 > 255 || 2052 + data->block_descriptor_length > 255) { 2053 + if (len > 65535 - 8) 2057 2054 return -EINVAL; 2058 2055 real_buffer = kmalloc(8 + len, GFP_KERNEL); 2059 2056 if (!real_buffer) ··· 2073 2056 real_buffer[3] = data->device_specific; 2074 2057 real_buffer[4] = data->longlba ? 0x01 : 0; 2075 2058 real_buffer[5] = 0; 2076 - real_buffer[6] = data->block_descriptor_length >> 8; 2077 - real_buffer[7] = data->block_descriptor_length; 2059 + put_unaligned_be16(data->block_descriptor_length, 2060 + &real_buffer[6]); 2078 2061 2079 2062 cmd[0] = MODE_SELECT_10; 2080 - cmd[7] = len >> 8; 2081 - cmd[8] = len; 2063 + put_unaligned_be16(len, &cmd[7]); 2082 2064 } else { 2083 - if (len > 255 || data->block_descriptor_length > 255 || 2084 - data->longlba) 2065 + if (data->longlba) 2085 2066 return -EINVAL; 2086 2067 2087 2068 real_buffer = kmalloc(4 + len, GFP_KERNEL); ··· 2106 2091 /** 2107 2092 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 2108 2093 * @sdev: SCSI device to be queried 2109 - * @dbd: set if mode sense will allow block descriptors to be returned 2094 + * @dbd: set to prevent mode sense from returning block descriptors 2110 2095 * @modepage: mode page being requested 2111 2096 * @buffer: request buffer (may not be smaller than eight bytes) 2112 2097 * @len: length of request buffer. ··· 2141 2126 sshdr = &my_sshdr; 2142 2127 2143 2128 retry: 2144 - use_10_for_ms = sdev->use_10_for_ms; 2129 + use_10_for_ms = sdev->use_10_for_ms || len > 255; 2145 2130 2146 2131 if (use_10_for_ms) { 2147 - if (len < 8) 2148 - len = 8; 2132 + if (len < 8 || len > 65535) 2133 + return -EINVAL; 2149 2134 2150 2135 cmd[0] = MODE_SENSE_10; 2151 - cmd[8] = len; 2136 + put_unaligned_be16(len, &cmd[7]); 2152 2137 header_length = 8; 2153 2138 } else { 2154 2139 if (len < 4) 2155 - len = 4; 2140 + return -EINVAL; 2156 2141 2157 2142 cmd[0] = MODE_SENSE; 2158 2143 cmd[4] = len; ··· 2176 2161 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 2177 2162 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 2178 2163 /* 2179 - * Invalid command operation code 2164 + * Invalid command operation code: retry using 2165 + * MODE SENSE(6) if this was a MODE SENSE(10) 2166 + * request, except if the request mode page is 2167 + * too large for MODE SENSE single byte 2168 + * allocation length field. 2180 2169 */ 2181 2170 if (use_10_for_ms) { 2171 + if (len > 255) 2172 + return -EIO; 2182 2173 sdev->use_10_for_ms = 0; 2183 2174 goto retry; 2184 2175 } ··· 2208 2187 data->longlba = 0; 2209 2188 data->block_descriptor_length = 0; 2210 2189 } else if (use_10_for_ms) { 2211 - data->length = buffer[0]*256 + buffer[1] + 2; 2190 + data->length = get_unaligned_be16(&buffer[0]) + 2; 2212 2191 data->medium_type = buffer[2]; 2213 2192 data->device_specific = buffer[3]; 2214 2193 data->longlba = buffer[4] & 0x01; 2215 - data->block_descriptor_length = buffer[6]*256 2216 - + buffer[7]; 2194 + data->block_descriptor_length = get_unaligned_be16(&buffer[6]); 2217 2195 } else { 2218 2196 data->length = buffer[0] + 1; 2219 2197 data->medium_type = buffer[1];
+10 -91
drivers/scsi/scsi_pm.c
··· 56 56 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 57 57 int err; 58 58 59 - /* flush pending in-flight resume operations, suspend is synchronous */ 60 - async_synchronize_full_domain(&scsi_sd_pm_domain); 61 - 62 59 err = scsi_device_quiesce(to_scsi_device(dev)); 63 60 if (err == 0) { 64 61 err = cb(dev, pm); ··· 66 69 return err; 67 70 } 68 71 69 - static int scsi_dev_type_resume(struct device *dev, 70 - int (*cb)(struct device *, const struct dev_pm_ops *)) 71 - { 72 - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 73 - int err = 0; 74 - 75 - err = cb(dev, pm); 76 - scsi_device_resume(to_scsi_device(dev)); 77 - dev_dbg(dev, "scsi resume: %d\n", err); 78 - 79 - if (err == 0) { 80 - pm_runtime_disable(dev); 81 - err = pm_runtime_set_active(dev); 82 - pm_runtime_enable(dev); 83 - 84 - /* 85 - * Forcibly set runtime PM status of request queue to "active" 86 - * to make sure we can again get requests from the queue 87 - * (see also blk_pm_peek_request()). 88 - * 89 - * The resume hook will correct runtime PM status of the disk. 90 - */ 91 - if (!err && scsi_is_sdev_device(dev)) { 92 - struct scsi_device *sdev = to_scsi_device(dev); 93 - 94 - blk_set_runtime_active(sdev->request_queue); 95 - } 96 - } 97 - 98 - return err; 99 - } 100 - 101 72 static int 102 73 scsi_bus_suspend_common(struct device *dev, 103 74 int (*cb)(struct device *, const struct dev_pm_ops *)) 104 75 { 105 - int err = 0; 76 + if (!scsi_is_sdev_device(dev)) 77 + return 0; 106 78 107 - if (scsi_is_sdev_device(dev)) { 108 - /* 109 - * All the high-level SCSI drivers that implement runtime 110 - * PM treat runtime suspend, system suspend, and system 111 - * hibernate nearly identically. In all cases the requirements 112 - * for runtime suspension are stricter. 113 - */ 114 - if (pm_runtime_suspended(dev)) 115 - return 0; 116 - 117 - err = scsi_dev_type_suspend(dev, cb); 118 - } 119 - 120 - return err; 121 - } 122 - 123 - static void async_sdev_resume(void *dev, async_cookie_t cookie) 124 - { 125 - scsi_dev_type_resume(dev, do_scsi_resume); 126 - } 127 - 128 - static void async_sdev_thaw(void *dev, async_cookie_t cookie) 129 - { 130 - scsi_dev_type_resume(dev, do_scsi_thaw); 131 - } 132 - 133 - static void async_sdev_restore(void *dev, async_cookie_t cookie) 134 - { 135 - scsi_dev_type_resume(dev, do_scsi_restore); 79 + return scsi_dev_type_suspend(dev, cb); 136 80 } 137 81 138 82 static int scsi_bus_resume_common(struct device *dev, 139 83 int (*cb)(struct device *, const struct dev_pm_ops *)) 140 84 { 141 - async_func_t fn; 85 + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 86 + int err; 142 87 143 88 if (!scsi_is_sdev_device(dev)) 144 - fn = NULL; 145 - else if (cb == do_scsi_resume) 146 - fn = async_sdev_resume; 147 - else if (cb == do_scsi_thaw) 148 - fn = async_sdev_thaw; 149 - else if (cb == do_scsi_restore) 150 - fn = async_sdev_restore; 151 - else 152 - fn = NULL; 89 + return 0; 153 90 154 - if (fn) { 155 - async_schedule_domain(fn, dev, &scsi_sd_pm_domain); 91 + err = cb(dev, pm); 92 + scsi_device_resume(to_scsi_device(dev)); 93 + dev_dbg(dev, "scsi resume: %d\n", err); 156 94 157 - /* 158 - * If a user has disabled async probing a likely reason 159 - * is due to a storage enclosure that does not inject 160 - * staggered spin-ups. For safety, make resume 161 - * synchronous as well in that case. 162 - */ 163 - if (strncmp(scsi_scan_type, "async", 5) != 0) 164 - async_synchronize_full_domain(&scsi_sd_pm_domain); 165 - } else { 166 - pm_runtime_disable(dev); 167 - pm_runtime_set_active(dev); 168 - pm_runtime_enable(dev); 169 - } 170 - return 0; 95 + return err; 171 96 } 172 97 173 98 static int scsi_bus_prepare(struct device *dev)
+3 -4
drivers/scsi/scsi_priv.h
··· 84 84 int scsi_eh_get_sense(struct list_head *work_q, 85 85 struct list_head *done_q); 86 86 int scsi_noretry_cmd(struct scsi_cmnd *scmd); 87 + void scsi_eh_done(struct scsi_cmnd *scmd); 87 88 88 89 /* scsi_lib.c */ 89 90 extern int scsi_maybe_unblock_host(struct scsi_device *sdev); ··· 117 116 #endif /* CONFIG_PROC_FS */ 118 117 119 118 /* scsi_scan.c */ 120 - extern char scsi_scan_type[]; 119 + void scsi_enable_async_suspend(struct device *dev); 121 120 extern int scsi_complete_async_scans(void); 122 121 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, 123 122 unsigned int, u64, enum scsi_scan_mode); ··· 144 143 extern void __scsi_remove_device(struct scsi_device *); 145 144 146 145 extern struct bus_type scsi_bus_type; 147 - extern const struct attribute_group *scsi_sysfs_shost_attr_groups[]; 146 + extern const struct attribute_group scsi_shost_attr_group; 148 147 149 148 /* scsi_netlink.c */ 150 149 #ifdef CONFIG_SCSI_NETLINK ··· 170 169 static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } 171 170 static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} 172 171 #endif /* CONFIG_PM */ 173 - 174 - extern struct async_domain scsi_sd_pm_domain; 175 172 176 173 /* scsi_dh.c */ 177 174 #ifdef CONFIG_SCSI_DH
+17 -57
drivers/scsi/scsi_scan.c
··· 123 123 }; 124 124 125 125 /** 126 + * scsi_enable_async_suspend - Enable async suspend and resume 127 + */ 128 + void scsi_enable_async_suspend(struct device *dev) 129 + { 130 + /* 131 + * If a user has disabled async probing a likely reason is due to a 132 + * storage enclosure that does not inject staggered spin-ups. For 133 + * safety, make resume synchronous as well in that case. 134 + */ 135 + if (strncmp(scsi_scan_type, "async", 5) != 0) 136 + return; 137 + /* Enable asynchronous suspend and resume. */ 138 + device_enable_async_suspend(dev); 139 + } 140 + 141 + /** 126 142 * scsi_complete_async_scans - Wait for asynchronous scans to complete 127 143 * 128 144 * When this function returns, any host which started scanning before ··· 469 453 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); 470 454 dev->bus = &scsi_bus_type; 471 455 dev->type = &scsi_target_type; 456 + scsi_enable_async_suspend(dev); 472 457 starget->id = id; 473 458 starget->channel = channel; 474 459 starget->can_queue = 0; ··· 1917 1900 } 1918 1901 spin_unlock_irqrestore(shost->host_lock, flags); 1919 1902 } 1920 - 1921 - /** 1922 - * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself 1923 - * @shost: Host that needs a scsi_device 1924 - * 1925 - * Lock status: None assumed. 1926 - * 1927 - * Returns: The scsi_device or NULL 1928 - * 1929 - * Notes: 1930 - * Attach a single scsi_device to the Scsi_Host - this should 1931 - * be made to look like a "pseudo-device" that points to the 1932 - * HA itself. 1933 - * 1934 - * Note - this device is not accessible from any high-level 1935 - * drivers (including generics), which is probably not 1936 - * optimal. We can add hooks later to attach. 1937 - */ 1938 - struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) 1939 - { 1940 - struct scsi_device *sdev = NULL; 1941 - struct scsi_target *starget; 1942 - 1943 - mutex_lock(&shost->scan_mutex); 1944 - if (!scsi_host_scan_allowed(shost)) 1945 - goto out; 1946 - starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id); 1947 - if (!starget) 1948 - goto out; 1949 - 1950 - sdev = scsi_alloc_sdev(starget, 0, NULL); 1951 - if (sdev) 1952 - sdev->borken = 0; 1953 - else 1954 - scsi_target_reap(starget); 1955 - put_device(&starget->dev); 1956 - out: 1957 - mutex_unlock(&shost->scan_mutex); 1958 - return sdev; 1959 - } 1960 - EXPORT_SYMBOL(scsi_get_host_dev); 1961 - 1962 - /** 1963 - * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself 1964 - * @sdev: Host device to be freed 1965 - * 1966 - * Lock status: None assumed. 1967 - * 1968 - * Returns: Nothing 1969 - */ 1970 - void scsi_free_host_dev(struct scsi_device *sdev) 1971 - { 1972 - BUG_ON(sdev->id != sdev->host->this_id); 1973 - 1974 - __scsi_remove_device(sdev); 1975 - } 1976 - EXPORT_SYMBOL(scsi_free_host_dev); 1977 1903
+14 -40
drivers/scsi/scsi_sysfs.c
··· 424 424 NULL 425 425 }; 426 426 427 - static struct attribute_group scsi_shost_attr_group = { 427 + const struct attribute_group scsi_shost_attr_group = { 428 428 .attrs = scsi_sysfs_shost_attrs, 429 - }; 430 - 431 - const struct attribute_group *scsi_sysfs_shost_attr_groups[] = { 432 - &scsi_shost_attr_group, 433 - NULL 434 429 }; 435 430 436 431 static void scsi_device_cls_release(struct device *class_dev) ··· 1337 1342 **/ 1338 1343 int scsi_sysfs_add_sdev(struct scsi_device *sdev) 1339 1344 { 1340 - int error, i; 1345 + int error; 1341 1346 struct scsi_target *starget = sdev->sdev_target; 1342 1347 1343 1348 error = scsi_target_add(starget); ··· 1390 1395 } 1391 1396 } 1392 1397 1393 - /* add additional host specific attributes */ 1394 - if (sdev->host->hostt->sdev_attrs) { 1395 - for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { 1396 - error = device_create_file(&sdev->sdev_gendev, 1397 - sdev->host->hostt->sdev_attrs[i]); 1398 - if (error) 1399 - return error; 1400 - } 1401 - } 1402 - 1403 - if (sdev->host->hostt->sdev_groups) { 1404 - error = sysfs_create_groups(&sdev->sdev_gendev.kobj, 1405 - sdev->host->hostt->sdev_groups); 1406 - if (error) 1407 - return error; 1408 - } 1409 - 1410 1398 scsi_autopm_put_device(sdev); 1411 1399 return error; 1412 1400 } ··· 1428 1450 1429 1451 if (res != 0) 1430 1452 return; 1431 - 1432 - if (sdev->host->hostt->sdev_groups) 1433 - sysfs_remove_groups(&sdev->sdev_gendev.kobj, 1434 - sdev->host->hostt->sdev_groups); 1435 1453 1436 1454 if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev) 1437 1455 bsg_unregister_queue(sdev->bsg_dev); ··· 1567 1593 **/ 1568 1594 int scsi_sysfs_add_host(struct Scsi_Host *shost) 1569 1595 { 1570 - int error, i; 1571 - 1572 - /* add host specific attributes */ 1573 - if (shost->hostt->shost_attrs) { 1574 - for (i = 0; shost->hostt->shost_attrs[i]; i++) { 1575 - error = device_create_file(&shost->shost_dev, 1576 - shost->hostt->shost_attrs[i]); 1577 - if (error) 1578 - return error; 1579 - } 1580 - } 1581 - 1582 1596 transport_register_device(&shost->shost_gendev); 1583 1597 transport_configure_device(&shost->shost_gendev); 1584 1598 return 0; ··· 1580 1618 1581 1619 void scsi_sysfs_device_initialize(struct scsi_device *sdev) 1582 1620 { 1621 + int i, j = 0; 1583 1622 unsigned long flags; 1584 1623 struct Scsi_Host *shost = sdev->host; 1624 + struct scsi_host_template *hostt = shost->hostt; 1585 1625 struct scsi_target *starget = sdev->sdev_target; 1586 1626 1587 1627 device_initialize(&sdev->sdev_gendev); 1588 1628 sdev->sdev_gendev.bus = &scsi_bus_type; 1589 1629 sdev->sdev_gendev.type = &scsi_dev_type; 1630 + scsi_enable_async_suspend(&sdev->sdev_gendev); 1590 1631 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", 1591 1632 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1633 + sdev->gendev_attr_groups[j++] = &scsi_sdev_attr_group; 1634 + if (hostt->sdev_groups) { 1635 + for (i = 0; hostt->sdev_groups[i] && 1636 + j < ARRAY_SIZE(sdev->gendev_attr_groups); 1637 + i++, j++) { 1638 + sdev->gendev_attr_groups[j] = hostt->sdev_groups[i]; 1639 + } 1640 + } 1641 + WARN_ON_ONCE(j >= ARRAY_SIZE(sdev->gendev_attr_groups)); 1592 1642 1593 1643 device_initialize(&sdev->sdev_dev); 1594 1644 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
+1
drivers/scsi/scsi_transport_sas.c
··· 154 154 { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, 155 155 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, 156 156 { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, 157 + { SAS_LINK_RATE_22_5_GBPS, "22.5 Gbit" }, 157 158 }; 158 159 sas_bitfield_name_search(linkspeed, sas_linkspeed_names) 159 160 sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
+31 -7
drivers/scsi/sd.c
··· 110 110 static void sd_shutdown(struct device *); 111 111 static int sd_suspend_system(struct device *); 112 112 static int sd_suspend_runtime(struct device *); 113 - static int sd_resume(struct device *); 113 + static int sd_resume_system(struct device *); 114 114 static int sd_resume_runtime(struct device *); 115 115 static void sd_rescan(struct device *); 116 116 static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); ··· 603 603 604 604 static const struct dev_pm_ops sd_pm_ops = { 605 605 .suspend = sd_suspend_system, 606 - .resume = sd_resume, 606 + .resume = sd_resume_system, 607 607 .poweroff = sd_suspend_system, 608 - .restore = sd_resume, 608 + .restore = sd_resume_system, 609 609 .runtime_suspend = sd_suspend_runtime, 610 610 .runtime_resume = sd_resume_runtime, 611 611 }; ··· 2647 2647 unsigned char *buffer, int len, struct scsi_mode_data *data, 2648 2648 struct scsi_sense_hdr *sshdr) 2649 2649 { 2650 + /* 2651 + * If we must use MODE SENSE(10), make sure that the buffer length 2652 + * is at least 8 bytes so that the mode sense header fits. 2653 + */ 2654 + if (sdkp->device->use_10_for_ms && len < 8) 2655 + len = 8; 2656 + 2650 2657 return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len, 2651 2658 SD_TIMEOUT, sdkp->max_retries, data, 2652 2659 sshdr); ··· 2832 2825 } 2833 2826 } 2834 2827 2835 - sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); 2828 + sd_first_printk(KERN_WARNING, sdkp, 2829 + "No Caching mode page found\n"); 2836 2830 goto defaults; 2837 2831 2838 2832 Page_found: ··· 2888 2880 "Assuming drive cache: write back\n"); 2889 2881 sdkp->WCE = 1; 2890 2882 } else { 2891 - sd_first_printk(KERN_ERR, sdkp, 2883 + sd_first_printk(KERN_WARNING, sdkp, 2892 2884 "Assuming drive cache: write through\n"); 2893 2885 sdkp->WCE = 0; 2894 2886 } ··· 3578 3570 pm_runtime_set_autosuspend_delay(dev, 3579 3571 sdp->host->hostt->rpm_autosuspend_delay); 3580 3572 } 3581 - device_add_disk(dev, gd, NULL); 3573 + 3574 + error = device_add_disk(dev, gd, NULL); 3575 + if (error) { 3576 + put_device(&sdkp->dev); 3577 + goto out; 3578 + } 3579 + 3582 3580 if (sdkp->capacity) 3583 3581 sd_dif_config_host(sdkp); 3584 3582 ··· 3632 3618 sdkp = dev_get_drvdata(dev); 3633 3619 scsi_autopm_get_device(sdkp->device); 3634 3620 3635 - async_synchronize_full_domain(&scsi_sd_pm_domain); 3636 3621 device_del(&sdkp->dev); 3637 3622 del_gendisk(sdkp->disk); 3638 3623 sd_shutdown(dev); ··· 3788 3775 3789 3776 static int sd_suspend_system(struct device *dev) 3790 3777 { 3778 + if (pm_runtime_suspended(dev)) 3779 + return 0; 3780 + 3791 3781 return sd_suspend_common(dev, true); 3792 3782 } 3793 3783 ··· 3815 3799 if (!ret) 3816 3800 opal_unlock_from_suspend(sdkp->opal_dev); 3817 3801 return ret; 3802 + } 3803 + 3804 + static int sd_resume_system(struct device *dev) 3805 + { 3806 + if (pm_runtime_suspended(dev)) 3807 + return 0; 3808 + 3809 + return sd_resume(dev); 3818 3810 } 3819 3811 3820 3812 static int sd_resume_runtime(struct device *dev)
+50 -11
drivers/scsi/smartpqi/smartpqi.h
··· 82 82 __le32 sis_product_identifier; /* B4h */ 83 83 u8 reserved5[0xbc - (0xb4 + sizeof(__le32))]; 84 84 __le32 sis_firmware_status; /* BCh */ 85 - u8 reserved6[0x1000 - (0xbc + sizeof(__le32))]; 85 + u8 reserved6[0xcc - (0xbc + sizeof(__le32))]; 86 + __le32 sis_ctrl_shutdown_reason_code; /* CCh */ 87 + u8 reserved7[0x1000 - (0xcc + sizeof(__le32))]; 86 88 __le32 sis_mailbox[8]; /* 1000h */ 87 - u8 reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))]; 89 + u8 reserved8[0x4000 - (0x1000 + (sizeof(__le32) * 8))]; 88 90 /* 89 91 * The PQI spec states that the PQI registers should be at 90 92 * offset 0 from the PCIe BAR 0. However, we can't map ··· 103 101 #endif 104 102 105 103 #define PQI_DEVICE_REGISTERS_OFFSET 0x4000 104 + 105 + /* shutdown reasons for taking the controller offline */ 106 + enum pqi_ctrl_shutdown_reason { 107 + PQI_IQ_NOT_DRAINED_TIMEOUT = 1, 108 + PQI_LUN_RESET_TIMEOUT = 2, 109 + PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT = 3, 110 + PQI_NO_HEARTBEAT = 4, 111 + PQI_FIRMWARE_KERNEL_NOT_UP = 5, 112 + PQI_OFA_RESPONSE_TIMEOUT = 6, 113 + PQI_INVALID_REQ_ID = 7, 114 + PQI_UNMATCHED_REQ_ID = 8, 115 + PQI_IO_PI_OUT_OF_RANGE = 9, 116 + PQI_EVENT_PI_OUT_OF_RANGE = 10, 117 + PQI_UNEXPECTED_IU_TYPE = 11 118 + }; 106 119 107 120 enum pqi_io_path { 108 121 RAID_PATH = 0, ··· 867 850 #define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14 868 851 #define PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME 15 869 852 #define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16 870 - #define PQI_FIRMWARE_FEATURE_MAXIMUM 16 853 + #define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17 854 + #define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18 855 + #define PQI_FIRMWARE_FEATURE_MAXIMUM 18 871 856 872 857 struct pqi_config_table_debug { 873 858 struct pqi_config_table_section_header header; ··· 944 925 #define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5) 945 926 #define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6) 946 927 947 - #define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1) 928 + #define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2 0x2 929 + #define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4 0x4 930 + #define CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK 0xf 948 931 949 - struct report_log_lun_extended_entry { 932 + struct report_log_lun { 950 933 u8 lunid[8]; 951 934 u8 volume_id[16]; 952 935 }; 953 936 954 - struct report_log_lun_extended { 937 + struct report_log_lun_list { 955 938 struct report_lun_header header; 956 - struct report_log_lun_extended_entry lun_entries[1]; 939 + struct report_log_lun lun_entries[1]; 957 940 }; 958 941 959 - struct report_phys_lun_extended_entry { 942 + struct report_phys_lun_8byte_wwid { 960 943 u8 lunid[8]; 961 944 __be64 wwid; 945 + u8 device_type; 946 + u8 device_flags; 947 + u8 lun_count; /* number of LUNs in a multi-LUN device */ 948 + u8 redundant_paths; 949 + u32 aio_handle; 950 + }; 951 + 952 + struct report_phys_lun_16byte_wwid { 953 + u8 lunid[8]; 954 + u8 wwid[16]; 962 955 u8 device_type; 963 956 u8 device_flags; 964 957 u8 lun_count; /* number of LUNs in a multi-LUN device */ ··· 981 950 /* for device_flags field of struct report_phys_lun_extended_entry */ 982 951 #define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8 983 952 984 - struct report_phys_lun_extended { 953 + struct report_phys_lun_8byte_wwid_list { 985 954 struct report_lun_header header; 986 - struct report_phys_lun_extended_entry lun_entries[1]; 955 + struct report_phys_lun_8byte_wwid lun_entries[1]; 956 + }; 957 + 958 + struct report_phys_lun_16byte_wwid_list { 959 + struct report_lun_header header; 960 + struct report_phys_lun_16byte_wwid lun_entries[1]; 987 961 }; 988 962 989 963 struct raid_map_disk_data { ··· 1095 1059 int target; 1096 1060 int lun; 1097 1061 u8 scsi3addr[8]; 1098 - __be64 wwid; 1062 + u8 wwid[16]; 1099 1063 u8 volume_id[16]; 1100 1064 u8 is_physical_device : 1; 1101 1065 u8 is_external_raid_device : 1; ··· 1106 1070 u8 keep_device : 1; 1107 1071 u8 volume_offline : 1; 1108 1072 u8 rescan : 1; 1073 + u8 ignore_device : 1; 1109 1074 bool aio_enabled; /* only valid for physical disks */ 1110 1075 bool in_remove; 1111 1076 bool device_offline; ··· 1334 1297 u8 raid_iu_timeout_supported : 1; 1335 1298 u8 tmf_iu_timeout_supported : 1; 1336 1299 u8 unique_wwid_in_report_phys_lun_supported : 1; 1300 + u8 firmware_triage_supported : 1; 1301 + u8 rpl_extended_format_4_5_supported : 1; 1337 1302 u8 enable_r1_writes : 1; 1338 1303 u8 enable_r5_writes : 1; 1339 1304 u8 enable_r6_writes : 1;
+423 -167
drivers/scsi/smartpqi/smartpqi_init.c
··· 33 33 #define BUILD_TIMESTAMP 34 34 #endif 35 35 36 - #define DRIVER_VERSION "2.1.10-020" 36 + #define DRIVER_VERSION "2.1.12-055" 37 37 #define DRIVER_MAJOR 2 38 38 #define DRIVER_MINOR 1 39 - #define DRIVER_RELEASE 10 40 - #define DRIVER_REVISION 20 39 + #define DRIVER_RELEASE 12 40 + #define DRIVER_REVISION 55 41 41 42 42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ 43 43 DRIVER_VERSION BUILD_TIMESTAMP ")" ··· 54 54 MODULE_VERSION(DRIVER_VERSION); 55 55 MODULE_LICENSE("GPL"); 56 56 57 - static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 57 + static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 58 + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); 58 59 static void pqi_ctrl_offline_worker(struct work_struct *work); 59 60 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 60 61 static void pqi_scan_start(struct Scsi_Host *shost); ··· 195 194 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 196 195 { 197 196 pqi_prep_for_scsi_done(scmd); 198 - scmd->scsi_done(scmd); 197 + scsi_done(scmd); 199 198 } 200 199 201 200 static inline void pqi_disable_write_same(struct scsi_device *sdev) ··· 227 226 { 228 227 if (ctrl_info->controller_online) 229 228 if (!sis_is_firmware_running(ctrl_info)) 230 - pqi_take_ctrl_offline(ctrl_info); 229 + pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); 231 230 } 232 231 233 232 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) ··· 235 234 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 236 235 } 237 236 237 + #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 238 + #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 239 + 238 240 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) 239 241 { 240 - return sis_read_driver_scratch(ctrl_info); 242 + return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; 241 243 } 242 244 243 245 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 244 246 enum pqi_ctrl_mode mode) 245 247 { 246 - sis_write_driver_scratch(ctrl_info, mode); 248 + u32 driver_scratch; 249 + 250 + driver_scratch = sis_read_driver_scratch(ctrl_info); 251 + 252 + if (mode == PQI_MODE) 253 + driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; 254 + else 255 + driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; 256 + 257 + sis_write_driver_scratch(ctrl_info, driver_scratch); 258 + } 259 + 260 + static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) 261 + { 262 + return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; 263 + } 264 + 265 + static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) 266 + { 267 + u32 driver_scratch; 268 + 269 + driver_scratch = sis_read_driver_scratch(ctrl_info); 270 + 271 + if (is_supported) 272 + driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 273 + else 274 + driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 275 + 276 + sis_write_driver_scratch(ctrl_info, driver_scratch); 247 277 } 248 278 249 279 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) ··· 555 523 cdb = request->cdb; 556 524 557 525 switch (cmd) { 526 + case TEST_UNIT_READY: 527 + request->data_direction = SOP_READ_FLAG; 528 + cdb[0] = TEST_UNIT_READY; 529 + break; 558 530 case INQUIRY: 559 531 request->data_direction = SOP_READ_FLAG; 560 532 cdb[0] = INQUIRY; ··· 572 536 case CISS_REPORT_PHYS: 573 537 request->data_direction = SOP_READ_FLAG; 574 538 cdb[0] = cmd; 575 - if (cmd == CISS_REPORT_PHYS) 576 - cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; 577 - else 539 + if (cmd == CISS_REPORT_PHYS) { 540 + if (ctrl_info->rpl_extended_format_4_5_supported) 541 + cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; 542 + else 543 + cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; 544 + } else { 578 545 cdb[1] = ctrl_info->ciss_report_log_flags; 546 + } 579 547 put_unaligned_be32(cdb_length, &cdb[6]); 580 548 break; 581 549 case CISS_GET_RAID_MAP: ··· 1136 1096 1137 1097 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1138 1098 { 1139 - return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer); 1099 + int rc; 1100 + unsigned int i; 1101 + u8 rpl_response_format; 1102 + u32 num_physicals; 1103 + size_t rpl_16byte_wwid_list_length; 1104 + void *rpl_list; 1105 + struct report_lun_header *rpl_header; 1106 + struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; 1107 + struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; 1108 + 1109 + rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); 1110 + if (rc) 1111 + return rc; 1112 + 1113 + if (ctrl_info->rpl_extended_format_4_5_supported) { 1114 + rpl_header = rpl_list; 1115 + rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; 1116 + if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { 1117 + *buffer = rpl_list; 1118 + return 0; 1119 + } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { 1120 + dev_err(&ctrl_info->pci_dev->dev, 1121 + "RPL returned unsupported data format %u\n", 1122 + rpl_response_format); 1123 + return -EINVAL; 1124 + } else { 1125 + dev_warn(&ctrl_info->pci_dev->dev, 1126 + "RPL returned extended format 2 instead of 4\n"); 1127 + } 1128 + } 1129 + 1130 + rpl_8byte_wwid_list = rpl_list; 1131 + num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); 1132 + rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid)); 1133 + 1134 + rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL); 1135 + if (!rpl_16byte_wwid_list) 1136 + return -ENOMEM; 1137 + 1138 + put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), 1139 + &rpl_16byte_wwid_list->header.list_length); 1140 + rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; 1141 + 1142 + for (i = 0; i < num_physicals; i++) { 1143 + memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); 1144 + memset(&rpl_16byte_wwid_list->lun_entries[i].wwid, 0, 8); 1145 + memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); 1146 + rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; 1147 + rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; 1148 + rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; 1149 + rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; 1150 + rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; 1151 + } 1152 + 1153 + kfree(rpl_8byte_wwid_list); 1154 + *buffer = rpl_16byte_wwid_list; 1155 + 1156 + return 0; 1140 1157 } 1141 1158 1142 1159 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) ··· 1202 1105 } 1203 1106 1204 1107 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 1205 - struct report_phys_lun_extended **physdev_list, 1206 - struct report_log_lun_extended **logdev_list) 1108 + struct report_phys_lun_16byte_wwid_list **physdev_list, 1109 + struct report_log_lun_list **logdev_list) 1207 1110 { 1208 1111 int rc; 1209 1112 size_t logdev_list_length; 1210 1113 size_t logdev_data_length; 1211 - struct report_log_lun_extended *internal_logdev_list; 1212 - struct report_log_lun_extended *logdev_data; 1114 + struct report_log_lun_list *internal_logdev_list; 1115 + struct report_log_lun_list *logdev_data; 1213 1116 struct report_lun_header report_lun_header; 1214 1117 1215 1118 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); ··· 1234 1137 } else { 1235 1138 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1236 1139 logdev_data = 1237 - (struct report_log_lun_extended *)&report_lun_header; 1140 + (struct report_log_lun_list *)&report_lun_header; 1238 1141 logdev_list_length = 0; 1239 1142 } 1240 1143 ··· 1242 1145 logdev_list_length; 1243 1146 1244 1147 internal_logdev_list = kmalloc(logdev_data_length + 1245 - sizeof(struct report_log_lun_extended), GFP_KERNEL); 1148 + sizeof(struct report_log_lun), GFP_KERNEL); 1246 1149 if (!internal_logdev_list) { 1247 1150 kfree(*logdev_list); 1248 1151 *logdev_list = NULL; ··· 1251 1154 1252 1155 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1253 1156 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1254 - sizeof(struct report_log_lun_extended_entry)); 1157 + sizeof(struct report_log_lun)); 1255 1158 put_unaligned_be32(logdev_list_length + 1256 - sizeof(struct report_log_lun_extended_entry), 1159 + sizeof(struct report_log_lun), 1257 1160 &internal_logdev_list->header.list_length); 1258 1161 1259 1162 kfree(*logdev_list); ··· 1640 1543 return rc; 1641 1544 } 1642 1545 1546 + /* 1547 + * Prevent adding drive to OS for some corner cases such as a drive 1548 + * undergoing a sanitize operation. Some OSes will continue to poll 1549 + * the drive until the sanitize completes, which can take hours, 1550 + * resulting in long bootup delays. Commands such as TUR, READ_CAP 1551 + * are allowed, but READ/WRITE cause check condition. So the OS 1552 + * cannot check/read the partition table. 1553 + * Note: devices that have completed sanitize must be re-enabled 1554 + * using the management utility. 1555 + */ 1556 + static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info, 1557 + struct pqi_scsi_dev *device) 1558 + { 1559 + u8 scsi_status; 1560 + int rc; 1561 + enum dma_data_direction dir; 1562 + char *buffer; 1563 + int buffer_length = 64; 1564 + size_t sense_data_length; 1565 + struct scsi_sense_hdr sshdr; 1566 + struct pqi_raid_path_request request; 1567 + struct pqi_raid_error_info error_info; 1568 + bool offline = false; /* Assume keep online */ 1569 + 1570 + /* Do not check controllers. */ 1571 + if (pqi_is_hba_lunid(device->scsi3addr)) 1572 + return false; 1573 + 1574 + /* Do not check LVs. */ 1575 + if (pqi_is_logical_device(device)) 1576 + return false; 1577 + 1578 + buffer = kmalloc(buffer_length, GFP_KERNEL); 1579 + if (!buffer) 1580 + return false; /* Assume not offline */ 1581 + 1582 + /* Check for SANITIZE in progress using TUR */ 1583 + rc = pqi_build_raid_path_request(ctrl_info, &request, 1584 + TEST_UNIT_READY, RAID_CTLR_LUNID, buffer, 1585 + buffer_length, 0, &dir); 1586 + if (rc) 1587 + goto out; /* Assume not offline */ 1588 + 1589 + memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number)); 1590 + 1591 + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info); 1592 + 1593 + if (rc) 1594 + goto out; /* Assume not offline */ 1595 + 1596 + scsi_status = error_info.status; 1597 + sense_data_length = get_unaligned_le16(&error_info.sense_data_length); 1598 + if (sense_data_length == 0) 1599 + sense_data_length = 1600 + get_unaligned_le16(&error_info.response_data_length); 1601 + if (sense_data_length) { 1602 + if (sense_data_length > sizeof(error_info.data)) 1603 + sense_data_length = sizeof(error_info.data); 1604 + 1605 + /* 1606 + * Check for sanitize in progress: asc:0x04, ascq: 0x1b 1607 + */ 1608 + if (scsi_status == SAM_STAT_CHECK_CONDITION && 1609 + scsi_normalize_sense(error_info.data, 1610 + sense_data_length, &sshdr) && 1611 + sshdr.sense_key == NOT_READY && 1612 + sshdr.asc == 0x04 && 1613 + sshdr.ascq == 0x1b) { 1614 + device->device_offline = true; 1615 + offline = true; 1616 + goto out; /* Keep device offline */ 1617 + } 1618 + } 1619 + 1620 + out: 1621 + kfree(buffer); 1622 + return offline; 1623 + } 1624 + 1643 1625 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1644 1626 struct pqi_scsi_dev *device, 1645 1627 struct bmic_identify_physical_device *id_phys) ··· 1869 1693 { 1870 1694 int rc; 1871 1695 1872 - pqi_device_remove_start(device); 1873 - 1874 1696 rc = pqi_device_wait_for_pending_io(ctrl_info, device, 1875 1697 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); 1876 1698 if (rc) ··· 1882 1708 scsi_remove_device(device->sdev); 1883 1709 else 1884 1710 pqi_remove_sas_device(device); 1711 + 1712 + pqi_device_remove_start(device); 1885 1713 } 1886 1714 1887 1715 /* Assumes the SCSI device list lock is held. */ ··· 1906 1730 return false; 1907 1731 1908 1732 if (dev1->is_physical_device) 1909 - return dev1->wwid == dev2->wwid; 1733 + return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; 1910 1734 1911 1735 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; 1912 1736 } ··· 1976 1800 else 1977 1801 count += scnprintf(buffer + count, 1978 1802 PQI_DEV_INFO_BUFFER_LENGTH - count, 1979 - " %016llx", device->sas_address); 1803 + " %016llx%016llx", 1804 + get_unaligned_be64(&device->wwid[0]), 1805 + get_unaligned_be64(&device->wwid[8])); 1980 1806 1981 1807 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1982 1808 " %s %.8s %.16s ", ··· 2164 1986 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 2165 1987 scsi_device_list_entry) { 2166 1988 if (device->device_gone) { 2167 - list_del_init(&device->scsi_device_list_entry); 1989 + list_del(&device->scsi_device_list_entry); 2168 1990 list_add_tail(&device->delete_list_entry, &delete_list); 2169 1991 } 2170 1992 } ··· 2203 2025 if (device->volume_offline) { 2204 2026 pqi_dev_info(ctrl_info, "offline", device); 2205 2027 pqi_show_volume_status(ctrl_info, device); 2206 - } 2207 - list_del(&device->delete_list_entry); 2208 - if (pqi_is_device_added(device)) { 2209 - pqi_remove_device(ctrl_info, device); 2210 2028 } else { 2211 - if (!device->volume_offline) 2212 - pqi_dev_info(ctrl_info, "removed", device); 2213 - pqi_free_device(device); 2029 + pqi_dev_info(ctrl_info, "removed", device); 2214 2030 } 2031 + if (pqi_is_device_added(device)) 2032 + pqi_remove_device(ctrl_info, device); 2033 + list_del(&device->delete_list_entry); 2034 + pqi_free_device(device); 2215 2035 } 2216 2036 2217 2037 /* ··· 2292 2116 } 2293 2117 2294 2118 static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info, 2295 - struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry) 2119 + struct pqi_scsi_dev *device, struct report_phys_lun_16byte_wwid *phys_lun) 2296 2120 { 2297 2121 if (ctrl_info->unique_wwid_in_report_phys_lun_supported || 2122 + ctrl_info->rpl_extended_format_4_5_supported || 2298 2123 pqi_is_device_with_sas_address(device)) 2299 - device->wwid = phys_lun_ext_entry->wwid; 2124 + memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); 2300 2125 else 2301 - device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier)); 2126 + memcpy(&device->wwid[8], device->page_83_identifier, 8); 2302 2127 } 2303 2128 2304 2129 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) ··· 2307 2130 int i; 2308 2131 int rc; 2309 2132 LIST_HEAD(new_device_list_head); 2310 - struct report_phys_lun_extended *physdev_list = NULL; 2311 - struct report_log_lun_extended *logdev_list = NULL; 2312 - struct report_phys_lun_extended_entry *phys_lun_ext_entry; 2313 - struct report_log_lun_extended_entry *log_lun_ext_entry; 2133 + struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; 2134 + struct report_log_lun_list *logdev_list = NULL; 2135 + struct report_phys_lun_16byte_wwid *phys_lun; 2136 + struct report_log_lun *log_lun; 2314 2137 struct bmic_identify_physical_device *id_phys = NULL; 2315 2138 u32 num_physicals; 2316 2139 u32 num_logicals; ··· 2361 2184 2362 2185 if (pqi_hide_vsep) { 2363 2186 for (i = num_physicals - 1; i >= 0; i--) { 2364 - phys_lun_ext_entry = 2365 - &physdev_list->lun_entries[i]; 2366 - if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) { 2367 - pqi_mask_device(phys_lun_ext_entry->lunid); 2187 + phys_lun = &physdev_list->lun_entries[i]; 2188 + if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { 2189 + pqi_mask_device(phys_lun->lunid); 2368 2190 break; 2369 2191 } 2370 2192 } ··· 2407 2231 if ((!pqi_expose_ld_first && i < num_physicals) || 2408 2232 (pqi_expose_ld_first && i >= num_logicals)) { 2409 2233 is_physical_device = true; 2410 - phys_lun_ext_entry = 2411 - &physdev_list->lun_entries[physical_index++]; 2412 - log_lun_ext_entry = NULL; 2413 - scsi3addr = phys_lun_ext_entry->lunid; 2234 + phys_lun = &physdev_list->lun_entries[physical_index++]; 2235 + log_lun = NULL; 2236 + scsi3addr = phys_lun->lunid; 2414 2237 } else { 2415 2238 is_physical_device = false; 2416 - phys_lun_ext_entry = NULL; 2417 - log_lun_ext_entry = 2418 - &logdev_list->lun_entries[logical_index++]; 2419 - scsi3addr = log_lun_ext_entry->lunid; 2239 + phys_lun = NULL; 2240 + log_lun = &logdev_list->lun_entries[logical_index++]; 2241 + scsi3addr = log_lun->lunid; 2420 2242 } 2421 2243 2422 2244 if (is_physical_device && pqi_skip_device(scsi3addr)) ··· 2429 2255 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2430 2256 device->is_physical_device = is_physical_device; 2431 2257 if (is_physical_device) { 2432 - device->device_type = phys_lun_ext_entry->device_type; 2258 + device->device_type = phys_lun->device_type; 2433 2259 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) 2434 2260 device->is_expander_smp_device = true; 2435 2261 } else { ··· 2438 2264 } 2439 2265 2440 2266 if (!pqi_is_supported_device(device)) 2267 + continue; 2268 + 2269 + /* Do not present disks that the OS cannot fully probe */ 2270 + if (pqi_keep_device_offline(ctrl_info, device)) 2441 2271 continue; 2442 2272 2443 2273 /* Gather information about the device. */ ··· 2454 2276 if (rc) { 2455 2277 if (device->is_physical_device) 2456 2278 dev_warn(&ctrl_info->pci_dev->dev, 2457 - "obtaining device info failed, skipping physical device %016llx\n", 2458 - get_unaligned_be64(&phys_lun_ext_entry->wwid)); 2279 + "obtaining device info failed, skipping physical device %016llx%016llx\n", 2280 + get_unaligned_be64(&phys_lun->wwid[0]), 2281 + get_unaligned_be64(&phys_lun->wwid[8])); 2459 2282 else 2460 2283 dev_warn(&ctrl_info->pci_dev->dev, 2461 2284 "obtaining device info failed, skipping logical device %08x%08x\n", ··· 2469 2290 pqi_assign_bus_target_lun(device); 2470 2291 2471 2292 if (device->is_physical_device) { 2472 - pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry); 2473 - if ((phys_lun_ext_entry->device_flags & 2293 + pqi_set_physical_device_wwid(ctrl_info, device, phys_lun); 2294 + if ((phys_lun->device_flags & 2474 2295 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2475 - phys_lun_ext_entry->aio_handle) { 2296 + phys_lun->aio_handle) { 2476 2297 device->aio_enabled = true; 2477 2298 device->aio_handle = 2478 - phys_lun_ext_entry->aio_handle; 2299 + phys_lun->aio_handle; 2479 2300 } 2480 2301 } else { 2481 - memcpy(device->volume_id, log_lun_ext_entry->volume_id, 2302 + memcpy(device->volume_id, log_lun->volume_id, 2482 2303 sizeof(device->volume_id)); 2483 2304 } 2484 2305 2485 2306 if (pqi_is_device_with_sas_address(device)) 2486 - device->sas_address = get_unaligned_be64(&device->wwid); 2307 + device->sas_address = get_unaligned_be64(&device->wwid[8]); 2487 2308 2488 2309 new_device_list[num_valid_devices++] = device; 2489 2310 } ··· 2505 2326 kfree(id_phys); 2506 2327 2507 2328 return rc; 2329 + } 2330 + 2331 + static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2332 + { 2333 + unsigned long flags; 2334 + struct pqi_scsi_dev *device; 2335 + struct pqi_scsi_dev *next; 2336 + 2337 + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2338 + 2339 + list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 2340 + scsi_device_list_entry) { 2341 + if (pqi_is_device_added(device)) 2342 + pqi_remove_device(ctrl_info, device); 2343 + list_del(&device->scsi_device_list_entry); 2344 + pqi_free_device(device); 2345 + } 2346 + 2347 + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2508 2348 } 2509 2349 2510 2350 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) ··· 3330 3132 return rc; 3331 3133 } 3332 3134 3333 - static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) 3135 + static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, 3136 + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 3334 3137 { 3335 - pqi_take_ctrl_offline(ctrl_info); 3138 + pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); 3336 3139 } 3337 3140 3338 3141 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) ··· 3351 3152 while (1) { 3352 3153 oq_pi = readl(queue_group->oq_pi); 3353 3154 if (oq_pi >= ctrl_info->num_elements_per_oq) { 3354 - pqi_invalid_response(ctrl_info); 3155 + pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); 3355 3156 dev_err(&ctrl_info->pci_dev->dev, 3356 3157 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3357 3158 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); ··· 3366 3167 3367 3168 request_id = get_unaligned_le16(&response->request_id); 3368 3169 if (request_id >= ctrl_info->max_io_slots) { 3369 - pqi_invalid_response(ctrl_info); 3170 + pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); 3370 3171 dev_err(&ctrl_info->pci_dev->dev, 3371 3172 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", 3372 3173 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); ··· 3375 3176 3376 3177 io_request = &ctrl_info->io_request_pool[request_id]; 3377 3178 if (atomic_read(&io_request->refcount) == 0) { 3378 - pqi_invalid_response(ctrl_info); 3179 + pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); 3379 3180 dev_err(&ctrl_info->pci_dev->dev, 3380 3181 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", 3381 3182 request_id, oq_pi, oq_ci); ··· 3411 3212 pqi_process_io_error(response->header.iu_type, io_request); 3412 3213 break; 3413 3214 default: 3414 - pqi_invalid_response(ctrl_info); 3215 + pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); 3415 3216 dev_err(&ctrl_info->pci_dev->dev, 3416 3217 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", 3417 3218 response->header.iu_type, oq_pi, oq_ci); ··· 3593 3394 pqi_ofa_free_host_buffer(ctrl_info); 3594 3395 pqi_ctrl_ofa_done(ctrl_info); 3595 3396 pqi_ofa_ctrl_unquiesce(ctrl_info); 3596 - pqi_take_ctrl_offline(ctrl_info); 3397 + pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); 3597 3398 break; 3598 3399 } 3599 3400 } ··· 3718 3519 dev_err(&ctrl_info->pci_dev->dev, 3719 3520 "no heartbeat detected - last heartbeat count: %u\n", 3720 3521 heartbeat_count); 3721 - pqi_take_ctrl_offline(ctrl_info); 3522 + pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); 3722 3523 return; 3723 3524 } 3724 3525 } else { ··· 3782 3583 while (1) { 3783 3584 oq_pi = readl(event_queue->oq_pi); 3784 3585 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { 3785 - pqi_invalid_response(ctrl_info); 3586 + pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); 3786 3587 dev_err(&ctrl_info->pci_dev->dev, 3787 3588 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3788 3589 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); ··· 4278 4079 4279 4080 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 4280 4081 while (1) { 4082 + msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 4281 4083 status = readb(&pqi_registers->function_and_status_code); 4282 4084 if (status == PQI_STATUS_IDLE) 4283 4085 break; 4284 4086 if (time_after(jiffies, timeout)) 4285 4087 return -ETIMEDOUT; 4286 - msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 4287 4088 } 4288 4089 4289 4090 /* ··· 5948 5749 return rc; 5949 5750 } 5950 5751 5951 - static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5952 - struct pqi_queue_group *queue_group) 5752 + static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) 5953 5753 { 5754 + unsigned int i; 5954 5755 unsigned int path; 5955 5756 unsigned long flags; 5956 - bool list_is_empty; 5757 + unsigned int queued_io_count; 5758 + struct pqi_queue_group *queue_group; 5759 + struct pqi_io_request *io_request; 5957 5760 5958 - for (path = 0; path < 2; path++) { 5959 - while (1) { 5960 - spin_lock_irqsave( 5961 - &queue_group->submit_lock[path], flags); 5962 - list_is_empty = 5963 - list_empty(&queue_group->request_list[path]); 5964 - spin_unlock_irqrestore( 5965 - &queue_group->submit_lock[path], flags); 5966 - if (list_is_empty) 5967 - break; 5968 - pqi_check_ctrl_health(ctrl_info); 5969 - if (pqi_ctrl_offline(ctrl_info)) 5970 - return -ENXIO; 5971 - usleep_range(1000, 2000); 5761 + queued_io_count = 0; 5762 + 5763 + for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5764 + queue_group = &ctrl_info->queue_groups[i]; 5765 + for (path = 0; path < 2; path++) { 5766 + spin_lock_irqsave(&queue_group->submit_lock[path], flags); 5767 + list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) 5768 + queued_io_count++; 5769 + spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 5972 5770 } 5973 5771 } 5974 5772 5975 - return 0; 5773 + return queued_io_count; 5976 5774 } 5977 5775 5978 - static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5776 + static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) 5979 5777 { 5980 - int rc; 5981 5778 unsigned int i; 5982 5779 unsigned int path; 5780 + unsigned int nonempty_inbound_queue_count; 5983 5781 struct pqi_queue_group *queue_group; 5984 5782 pqi_index_t iq_pi; 5985 5783 pqi_index_t iq_ci; 5986 5784 5785 + nonempty_inbound_queue_count = 0; 5786 + 5987 5787 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5988 5788 queue_group = &ctrl_info->queue_groups[i]; 5989 - 5990 - rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5991 - if (rc) 5992 - return rc; 5993 - 5994 5789 for (path = 0; path < 2; path++) { 5995 5790 iq_pi = queue_group->iq_pi_copy[path]; 5996 - 5997 - while (1) { 5998 - iq_ci = readl(queue_group->iq_ci[path]); 5999 - if (iq_ci == iq_pi) 6000 - break; 6001 - pqi_check_ctrl_health(ctrl_info); 6002 - if (pqi_ctrl_offline(ctrl_info)) 6003 - return -ENXIO; 6004 - usleep_range(1000, 2000); 6005 - } 5791 + iq_ci = readl(queue_group->iq_ci[path]); 5792 + if (iq_ci != iq_pi) 5793 + nonempty_inbound_queue_count++; 6006 5794 } 6007 5795 } 5796 + 5797 + return nonempty_inbound_queue_count; 5798 + } 5799 + 5800 + #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 5801 + 5802 + static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5803 + { 5804 + unsigned long start_jiffies; 5805 + unsigned long warning_timeout; 5806 + unsigned int queued_io_count; 5807 + unsigned int nonempty_inbound_queue_count; 5808 + bool displayed_warning; 5809 + 5810 + displayed_warning = false; 5811 + start_jiffies = jiffies; 5812 + warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies; 5813 + 5814 + while (1) { 5815 + queued_io_count = pqi_queued_io_count(ctrl_info); 5816 + nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); 5817 + if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) 5818 + break; 5819 + pqi_check_ctrl_health(ctrl_info); 5820 + if (pqi_ctrl_offline(ctrl_info)) 5821 + return -ENXIO; 5822 + if (time_after(jiffies, warning_timeout)) { 5823 + dev_warn(&ctrl_info->pci_dev->dev, 5824 + "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", 5825 + jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); 5826 + displayed_warning = true; 5827 + warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies; 5828 + } 5829 + usleep_range(1000, 2000); 5830 + } 5831 + 5832 + if (displayed_warning) 5833 + dev_warn(&ctrl_info->pci_dev->dev, 5834 + "queued I/O drained after waiting for %u seconds\n", 5835 + jiffies_to_msecs(jiffies - start_jiffies) / 1000); 6008 5836 6009 5837 return 0; 6010 5838 } ··· 6098 5872 if (pqi_ctrl_offline(ctrl_info)) 6099 5873 return -ENXIO; 6100 5874 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); 6101 - if (msecs_waiting > timeout_msecs) { 5875 + if (msecs_waiting >= timeout_msecs) { 6102 5876 dev_err(&ctrl_info->pci_dev->dev, 6103 5877 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", 6104 5878 ctrl_info->scsi_host->host_no, device->bus, device->target, ··· 6133 5907 { 6134 5908 int rc; 6135 5909 unsigned int wait_secs; 5910 + int cmds_outstanding; 6136 5911 6137 5912 wait_secs = 0; 6138 5913 ··· 6151 5924 } 6152 5925 6153 5926 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; 6154 - 5927 + cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding); 6155 5928 dev_warn(&ctrl_info->pci_dev->dev, 6156 - "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n", 6157 - ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, 6158 - wait_secs); 5929 + "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", 5930 + ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding); 6159 5931 } 6160 5932 6161 5933 return rc; ··· 6297 6071 rphy = target_to_rphy(starget); 6298 6072 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 6299 6073 if (device) { 6300 - device->target = sdev_id(sdev); 6301 - device->lun = sdev->lun; 6302 - device->target_lun_valid = true; 6074 + if (device->target_lun_valid) { 6075 + device->ignore_device = true; 6076 + } else { 6077 + device->target = sdev_id(sdev); 6078 + device->lun = sdev->lun; 6079 + device->target_lun_valid = true; 6080 + } 6303 6081 } 6304 6082 } else { 6305 6083 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), ··· 6340 6110 ctrl_info->pci_dev, 0); 6341 6111 } 6342 6112 6113 + static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) 6114 + { 6115 + return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; 6116 + } 6117 + 6343 6118 static int pqi_slave_configure(struct scsi_device *sdev) 6344 6119 { 6120 + int rc = 0; 6345 6121 struct pqi_scsi_dev *device; 6346 6122 6347 6123 device = sdev->hostdata; 6348 6124 device->devtype = sdev->type; 6349 6125 6350 - return 0; 6351 - } 6352 - 6353 - static void pqi_slave_destroy(struct scsi_device *sdev) 6354 - { 6355 - unsigned long flags; 6356 - struct pqi_scsi_dev *device; 6357 - struct pqi_ctrl_info *ctrl_info; 6358 - 6359 - ctrl_info = shost_to_hba(sdev->host); 6360 - 6361 - spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6362 - 6363 - device = sdev->hostdata; 6364 - if (device) { 6365 - sdev->hostdata = NULL; 6366 - if (!list_empty(&device->scsi_device_list_entry)) 6367 - list_del(&device->scsi_device_list_entry); 6126 + if (pqi_is_tape_changer_device(device) && device->ignore_device) { 6127 + rc = -ENXIO; 6128 + device->ignore_device = false; 6368 6129 } 6369 6130 6370 - spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6371 - 6372 - if (device) { 6373 - pqi_dev_info(ctrl_info, "removed", device); 6374 - pqi_free_device(device); 6375 - } 6131 + return rc; 6376 6132 } 6377 6133 6378 6134 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) ··· 6847 6631 static DEVICE_ATTR(enable_r6_writes, 0644, 6848 6632 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); 6849 6633 6850 - static struct device_attribute *pqi_shost_attrs[] = { 6851 - &dev_attr_driver_version, 6852 - &dev_attr_firmware_version, 6853 - &dev_attr_model, 6854 - &dev_attr_serial_number, 6855 - &dev_attr_vendor, 6856 - &dev_attr_rescan, 6857 - &dev_attr_lockup_action, 6858 - &dev_attr_enable_stream_detection, 6859 - &dev_attr_enable_r5_writes, 6860 - &dev_attr_enable_r6_writes, 6634 + static struct attribute *pqi_shost_attrs[] = { 6635 + &dev_attr_driver_version.attr, 6636 + &dev_attr_firmware_version.attr, 6637 + &dev_attr_model.attr, 6638 + &dev_attr_serial_number.attr, 6639 + &dev_attr_vendor.attr, 6640 + &dev_attr_rescan.attr, 6641 + &dev_attr_lockup_action.attr, 6642 + &dev_attr_enable_stream_detection.attr, 6643 + &dev_attr_enable_r5_writes.attr, 6644 + &dev_attr_enable_r6_writes.attr, 6861 6645 NULL 6862 6646 }; 6647 + 6648 + ATTRIBUTE_GROUPS(pqi_shost); 6863 6649 6864 6650 static ssize_t pqi_unique_id_show(struct device *dev, 6865 6651 struct device_attribute *attr, char *buffer) ··· 6883 6665 return -ENODEV; 6884 6666 } 6885 6667 6886 - if (device->is_physical_device) { 6887 - memset(unique_id, 0, 8); 6888 - memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); 6889 - } else { 6668 + if (device->is_physical_device) 6669 + memcpy(unique_id, device->wwid, sizeof(device->wwid)); 6670 + else 6890 6671 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 6891 - } 6892 6672 6893 6673 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6894 6674 ··· 7131 6915 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 7132 6916 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); 7133 6917 7134 - static struct device_attribute *pqi_sdev_attrs[] = { 7135 - &dev_attr_lunid, 7136 - &dev_attr_unique_id, 7137 - &dev_attr_path_info, 7138 - &dev_attr_sas_address, 7139 - &dev_attr_ssd_smart_path_enabled, 7140 - &dev_attr_raid_level, 7141 - &dev_attr_raid_bypass_cnt, 6918 + static struct attribute *pqi_sdev_attrs[] = { 6919 + &dev_attr_lunid.attr, 6920 + &dev_attr_unique_id.attr, 6921 + &dev_attr_path_info.attr, 6922 + &dev_attr_sas_address.attr, 6923 + &dev_attr_ssd_smart_path_enabled.attr, 6924 + &dev_attr_raid_level.attr, 6925 + &dev_attr_raid_bypass_cnt.attr, 7142 6926 NULL 7143 6927 }; 6928 + 6929 + ATTRIBUTE_GROUPS(pqi_sdev); 7144 6930 7145 6931 static struct scsi_host_template pqi_driver_template = { 7146 6932 .module = THIS_MODULE, ··· 7156 6938 .ioctl = pqi_ioctl, 7157 6939 .slave_alloc = pqi_slave_alloc, 7158 6940 .slave_configure = pqi_slave_configure, 7159 - .slave_destroy = pqi_slave_destroy, 7160 6941 .map_queues = pqi_map_queues, 7161 - .sdev_attrs = pqi_sdev_attrs, 7162 - .shost_attrs = pqi_shost_attrs, 6942 + .sdev_groups = pqi_sdev_groups, 6943 + .shost_groups = pqi_shost_groups, 7163 6944 }; 7164 6945 7165 6946 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) ··· 7518 7301 ctrl_info->unique_wwid_in_report_phys_lun_supported = 7519 7302 firmware_feature->enabled; 7520 7303 break; 7304 + case PQI_FIRMWARE_FEATURE_FW_TRIAGE: 7305 + ctrl_info->firmware_triage_supported = firmware_feature->enabled; 7306 + pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); 7307 + break; 7308 + case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: 7309 + ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; 7310 + break; 7521 7311 } 7522 7312 7523 7313 pqi_firmware_feature_status(ctrl_info, firmware_feature); ··· 7618 7394 { 7619 7395 .feature_name = "Unique WWID in Report Physical LUN", 7620 7396 .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN, 7397 + .feature_status = pqi_ctrl_update_feature_flags, 7398 + }, 7399 + { 7400 + .feature_name = "Firmware Triage", 7401 + .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, 7402 + .feature_status = pqi_ctrl_update_feature_flags, 7403 + }, 7404 + { 7405 + .feature_name = "RPL Extended Formats 4 and 5", 7406 + .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, 7621 7407 .feature_status = pqi_ctrl_update_feature_flags, 7622 7408 }, 7623 7409 }; ··· 7730 7496 ctrl_info->raid_iu_timeout_supported = false; 7731 7497 ctrl_info->tmf_iu_timeout_supported = false; 7732 7498 ctrl_info->unique_wwid_in_report_phys_lun_supported = false; 7499 + ctrl_info->firmware_triage_supported = false; 7500 + ctrl_info->rpl_extended_format_4_5_supported = false; 7733 7501 } 7734 7502 7735 7503 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) ··· 7863 7627 u32 product_id; 7864 7628 7865 7629 if (reset_devices) { 7630 + if (pqi_is_fw_triage_supported(ctrl_info)) { 7631 + rc = sis_wait_for_fw_triage_completion(ctrl_info); 7632 + if (rc) 7633 + return rc; 7634 + } 7866 7635 sis_soft_reset(ctrl_info); 7867 7636 msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ); 7868 7637 } else { ··· 8410 8169 { 8411 8170 pqi_cancel_rescan_worker(ctrl_info); 8412 8171 pqi_cancel_update_time_worker(ctrl_info); 8172 + pqi_remove_all_scsi_devices(ctrl_info); 8413 8173 pqi_unregister_scsi(ctrl_info); 8414 8174 if (ctrl_info->pqi_mode_enabled) 8415 8175 pqi_revert_to_sis_mode(ctrl_info); ··· 8632 8390 unsigned int i; 8633 8391 struct pqi_io_request *io_request; 8634 8392 struct scsi_cmnd *scmd; 8393 + struct scsi_device *sdev; 8635 8394 8636 8395 for (i = 0; i < ctrl_info->max_io_slots; i++) { 8637 8396 io_request = &ctrl_info->io_request_pool[i]; ··· 8641 8398 8642 8399 scmd = io_request->scmd; 8643 8400 if (scmd) { 8644 - set_host_byte(scmd, DID_NO_CONNECT); 8401 + sdev = scmd->device; 8402 + if (!sdev || !scsi_device_online(sdev)) { 8403 + pqi_free_io_request(io_request); 8404 + continue; 8405 + } else { 8406 + set_host_byte(scmd, DID_NO_CONNECT); 8407 + } 8645 8408 } else { 8646 8409 io_request->status = -ENXIO; 8647 8410 io_request->error_info = ··· 8679 8430 pqi_take_ctrl_offline_deferred(ctrl_info); 8680 8431 } 8681 8432 8682 - static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 8433 + static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 8434 + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 8683 8435 { 8684 8436 if (!ctrl_info->controller_online) 8685 8437 return; ··· 8689 8439 ctrl_info->pqi_mode_enabled = false; 8690 8440 pqi_ctrl_block_requests(ctrl_info); 8691 8441 if (!pqi_disable_ctrl_shutdown) 8692 - sis_shutdown_ctrl(ctrl_info); 8442 + sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); 8693 8443 pci_disable_device(ctrl_info->pci_dev); 8694 8444 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 8695 8445 schedule_work(&ctrl_info->ctrl_offline_work); ··· 9293 9043 }, 9294 9044 { 9295 9045 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9046 + PCI_VENDOR_ID_ADAPTEC2, 0x14a2) 9047 + }, 9048 + { 9049 + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9296 9050 PCI_VENDOR_ID_ADAPTEC2, 0x14b0) 9297 9051 }, 9298 9052 { ··· 9528 9274 sis_product_identifier) != 0xb4); 9529 9275 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 9530 9276 sis_firmware_status) != 0xbc); 9277 + BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 9278 + sis_ctrl_shutdown_reason_code) != 0xcc); 9531 9279 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 9532 9280 sis_mailbox) != 0x1000); 9533 9281 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+3 -3
drivers/scsi/smartpqi/smartpqi_sas_transport.c
··· 343 343 } 344 344 345 345 if (found_device->devtype == TYPE_ENCLOSURE) { 346 - *identifier = get_unaligned_be64(&found_device->wwid); 346 + *identifier = get_unaligned_be64(&found_device->wwid[8]); 347 347 rc = 0; 348 348 goto out; 349 349 } ··· 364 364 memcmp(device->phys_connector, 365 365 found_device->phys_connector, 2) == 0) { 366 366 *identifier = 367 - get_unaligned_be64(&device->wwid); 367 + get_unaligned_be64(&device->wwid[8]); 368 368 rc = 0; 369 369 goto out; 370 370 } ··· 380 380 if (device->devtype == TYPE_ENCLOSURE && 381 381 CISS_GET_DRIVE_NUMBER(device->scsi3addr) == 382 382 PQI_VSEP_CISS_BTL) { 383 - *identifier = get_unaligned_be64(&device->wwid); 383 + *identifier = get_unaligned_be64(&device->wwid[8]); 384 384 rc = 0; 385 385 goto out; 386 386 }
+57 -3
drivers/scsi/smartpqi/smartpqi_sis.c
··· 51 51 #define SIS_BASE_STRUCT_REVISION 9 52 52 #define SIS_BASE_STRUCT_ALIGNMENT 16 53 53 54 + #define SIS_CTRL_KERNEL_FW_TRIAGE 0x3 54 55 #define SIS_CTRL_KERNEL_UP 0x80 55 56 #define SIS_CTRL_KERNEL_PANIC 0x100 56 57 #define SIS_CTRL_READY_TIMEOUT_SECS 180 57 58 #define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90 58 59 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10 60 + 61 + enum sis_fw_triage_status { 62 + FW_TRIAGE_NOT_STARTED = 0, 63 + FW_TRIAGE_STARTED, 64 + FW_TRIAGE_COND_INVALID, 65 + FW_TRIAGE_COMPLETED 66 + }; 59 67 60 68 #pragma pack(1) 61 69 ··· 397 389 sis_set_doorbell_bit(ctrl_info, SIS_ENABLE_INTX); 398 390 } 399 391 400 - void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info) 392 + void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info, 393 + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 401 394 { 402 395 if (readl(&ctrl_info->registers->sis_firmware_status) & 403 396 SIS_CTRL_KERNEL_PANIC) 404 397 return; 405 398 406 - writel(SIS_TRIGGER_SHUTDOWN, 407 - &ctrl_info->registers->sis_host_to_ctrl_doorbell); 399 + if (ctrl_info->firmware_triage_supported) 400 + writel(ctrl_shutdown_reason, &ctrl_info->registers->sis_ctrl_shutdown_reason_code); 401 + 402 + writel(SIS_TRIGGER_SHUTDOWN, &ctrl_info->registers->sis_host_to_ctrl_doorbell); 408 403 } 409 404 410 405 int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info) ··· 430 419 return readl(&ctrl_info->registers->sis_driver_scratch); 431 420 } 432 421 422 + static inline enum sis_fw_triage_status 423 + sis_read_firmware_triage_status(struct pqi_ctrl_info *ctrl_info) 424 + { 425 + return ((enum sis_fw_triage_status)(readl(&ctrl_info->registers->sis_firmware_status) & 426 + SIS_CTRL_KERNEL_FW_TRIAGE)); 427 + } 428 + 433 429 void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) 434 430 { 435 431 writel(SIS_SOFT_RESET, 436 432 &ctrl_info->registers->sis_host_to_ctrl_doorbell); 433 + } 434 + 435 + #define SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS 300 436 + #define SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS 1 437 + 438 + int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info) 439 + { 440 + int rc; 441 + enum sis_fw_triage_status status; 442 + unsigned long timeout; 443 + 444 + timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; 445 + while (1) { 446 + status = sis_read_firmware_triage_status(ctrl_info); 447 + if (status == FW_TRIAGE_COND_INVALID) { 448 + dev_err(&ctrl_info->pci_dev->dev, 449 + "firmware triage condition invalid\n"); 450 + rc = -EINVAL; 451 + break; 452 + } else if (status == FW_TRIAGE_NOT_STARTED || 453 + status == FW_TRIAGE_COMPLETED) { 454 + rc = 0; 455 + break; 456 + } 457 + 458 + if (time_after(jiffies, timeout)) { 459 + dev_err(&ctrl_info->pci_dev->dev, 460 + "timed out waiting for firmware triage status\n"); 461 + rc = -ETIMEDOUT; 462 + break; 463 + } 464 + 465 + ssleep(SIS_FW_TRIAGE_STATUS_POLL_INTERVAL_SECS); 466 + } 467 + 468 + return rc; 437 469 } 438 470 439 471 static void __attribute__((unused)) verify_structures(void)
+3 -1
drivers/scsi/smartpqi/smartpqi_sis.h
··· 21 21 int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info); 22 22 void sis_enable_msix(struct pqi_ctrl_info *ctrl_info); 23 23 void sis_enable_intx(struct pqi_ctrl_info *ctrl_info); 24 - void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info); 24 + void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info, 25 + enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); 25 26 int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info); 26 27 int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); 27 28 void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); 28 29 u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); 29 30 void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); 30 31 u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info); 32 + int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info); 31 33 32 34 #endif /* _SMARTPQI_SIS_H */
+1 -1
drivers/scsi/snic/snic.h
··· 374 374 void snic_glob_cleanup(void); 375 375 376 376 extern struct workqueue_struct *snic_event_queue; 377 - extern struct device_attribute *snic_attrs[]; 377 + extern const struct attribute_group *snic_host_groups[]; 378 378 379 379 int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); 380 380 int snic_abort_cmd(struct scsi_cmnd *);
+14 -5
drivers/scsi/snic/snic_attrs.c
··· 68 68 static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL); 69 69 static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL); 70 70 71 - struct device_attribute *snic_attrs[] = { 72 - &dev_attr_snic_sym_name, 73 - &dev_attr_snic_state, 74 - &dev_attr_drv_version, 75 - &dev_attr_link_state, 71 + static struct attribute *snic_host_attrs[] = { 72 + &dev_attr_snic_sym_name.attr, 73 + &dev_attr_snic_state.attr, 74 + &dev_attr_drv_version.attr, 75 + &dev_attr_link_state.attr, 76 76 NULL, 77 + }; 78 + 79 + static const struct attribute_group snic_host_attr_group = { 80 + .attrs = snic_host_attrs 81 + }; 82 + 83 + const struct attribute_group *snic_host_groups[] = { 84 + &snic_host_attr_group, 85 + NULL 77 86 };
+1 -1
drivers/scsi/snic/snic_main.c
··· 129 129 .can_queue = SNIC_MAX_IO_REQ, 130 130 .sg_tablesize = SNIC_MAX_SG_DESC_CNT, 131 131 .max_sectors = 0x800, 132 - .shost_attrs = snic_attrs, 132 + .shost_groups = snic_host_groups, 133 133 .track_queue_depth = 1, 134 134 .cmd_size = sizeof(struct snic_internal_io_state), 135 135 .proc_name = "snic_scsi",
+14 -19
drivers/scsi/snic/snic_scsi.c
··· 342 342 SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id); 343 343 atomic64_inc(&snic->s_stats.misc.tgt_not_rdy); 344 344 sc->result = ret; 345 - sc->scsi_done(sc); 345 + scsi_done(sc); 346 346 347 347 return 0; 348 348 } ··· 676 676 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); 677 677 678 678 679 - if (sc->scsi_done) 680 - sc->scsi_done(sc); 679 + scsi_done(sc); 681 680 682 681 snic_stats_update_io_cmpl(&snic->s_stats); 683 682 } /* end of snic_icmnd_cmpl_handler */ ··· 854 855 855 856 snic_release_req_buf(snic, rqi, sc); 856 857 857 - if (sc->scsi_done) { 858 - SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, 859 - jiffies_to_msecs(jiffies - start_time), 860 - (ulong) fwreq, SNIC_TRC_CMD(sc), 861 - SNIC_TRC_CMD_STATE_FLAGS(sc)); 858 + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, 859 + jiffies_to_msecs(jiffies - start_time), 860 + (ulong) fwreq, SNIC_TRC_CMD(sc), 861 + SNIC_TRC_CMD_STATE_FLAGS(sc)); 862 862 863 - sc->scsi_done(sc); 864 - } 863 + scsi_done(sc); 865 864 866 865 break; 867 866 ··· 1472 1475 * Call scsi_done to complete the IO. 1473 1476 */ 1474 1477 sc->result = (DID_ERROR << 16); 1475 - sc->scsi_done(sc); 1478 + scsi_done(sc); 1476 1479 break; 1477 1480 1478 1481 default: ··· 1852 1855 snic_release_req_buf(snic, rqi, sc); 1853 1856 1854 1857 sc->result = (DID_ERROR << 16); 1855 - sc->scsi_done(sc); 1858 + scsi_done(sc); 1856 1859 1857 1860 ret = 0; 1858 1861 ··· 2497 2500 /* Update IO stats */ 2498 2501 snic_stats_update_io_cmpl(&snic->s_stats); 2499 2502 2500 - if (sc->scsi_done) { 2501 - SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 2502 - jiffies_to_msecs(jiffies - st_time), 0, 2503 - SNIC_TRC_CMD(sc), 2504 - SNIC_TRC_CMD_STATE_FLAGS(sc)); 2503 + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 2504 + jiffies_to_msecs(jiffies - st_time), 0, 2505 + SNIC_TRC_CMD(sc), 2506 + SNIC_TRC_CMD_STATE_FLAGS(sc)); 2505 2507 2506 - sc->scsi_done(sc); 2507 - } 2508 + scsi_done(sc); 2508 2509 } 2509 2510 } /* end of snic_scsi_cleanup */ 2510 2511
+6 -1
drivers/scsi/sr.c
··· 728 728 dev_set_drvdata(dev, cd); 729 729 disk->flags |= GENHD_FL_REMOVABLE; 730 730 sr_revalidate_disk(cd); 731 - device_add_disk(&sdev->sdev_gendev, disk, NULL); 731 + 732 + error = device_add_disk(&sdev->sdev_gendev, disk, NULL); 733 + if (error) { 734 + kref_put(&cd->kref, sr_kref_release); 735 + goto fail; 736 + } 732 737 733 738 sdev_printk(KERN_DEBUG, sdev, 734 739 "Attached scsi CD-ROM %s\n", cd->cdi.name);
+4 -6
drivers/scsi/stex.c
··· 574 574 if (ccb->cmd) { 575 575 scsi_dma_unmap(ccb->cmd); 576 576 ccb->cmd->result = status << 16; 577 - ccb->cmd->scsi_done(ccb->cmd); 577 + scsi_done(ccb->cmd); 578 578 ccb->cmd = NULL; 579 579 } 580 580 } ··· 590 590 return 0; 591 591 } 592 592 593 - static int 594 - stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 593 + static int stex_queuecommand_lck(struct scsi_cmnd *cmd) 595 594 { 595 + void (*done)(struct scsi_cmnd *) = scsi_done; 596 596 struct st_hba *hba; 597 597 struct Scsi_Host *host; 598 598 unsigned int id, lun; ··· 688 688 break; 689 689 } 690 690 691 - cmd->scsi_done = done; 692 - 693 691 tag = scsi_cmd_to_rq(cmd)->tag; 694 692 695 693 if (unlikely(tag >= host->can_queue)) ··· 762 764 } 763 765 764 766 cmd->result = result; 765 - cmd->scsi_done(cmd); 767 + scsi_done(cmd); 766 768 } 767 769 768 770 static void stex_copy_data(struct st_ccb *ccb,
+2 -2
drivers/scsi/storvsc_drv.c
··· 1154 1154 scsi_set_resid(scmnd, 1155 1155 cmd_request->payload->range.len - data_transfer_length); 1156 1156 1157 - scmnd->scsi_done(scmnd); 1157 + scsi_done(scmnd); 1158 1158 1159 1159 if (payload_sz > 1160 1160 sizeof(struct vmbus_channel_packet_multipage_buffer)) ··· 1767 1767 * future versions of the host. 1768 1768 */ 1769 1769 if (!storvsc_scsi_cmd_ok(scmnd)) { 1770 - scmnd->scsi_done(scmnd); 1770 + scsi_done(scmnd); 1771 1771 return 0; 1772 1772 } 1773 1773 }
+2 -4
drivers/scsi/sym53c8xx_2/sym_glue.c
··· 133 133 complete(ucmd->eh_done); 134 134 135 135 scsi_dma_unmap(cmd); 136 - cmd->scsi_done(cmd); 136 + scsi_done(cmd); 137 137 } 138 138 139 139 /* ··· 486 486 * queuecommand method. Entered with the host adapter lock held and 487 487 * interrupts disabled. 488 488 */ 489 - static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd, 490 - void (*done)(struct scsi_cmnd *)) 489 + static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd) 491 490 { 492 491 struct sym_hcb *np = SYM_SOFTC_PTR(cmd); 493 492 struct sym_ucmd *ucp = SYM_UCMD_PTR(cmd); 494 493 int sts = 0; 495 494 496 - cmd->scsi_done = done; 497 495 memset(ucp, 0, sizeof(*ucp)); 498 496 499 497 /*
+14 -5
drivers/scsi/ufs/Kconfig
··· 165 165 If unsure, say N. 166 166 167 167 config SCSI_UFS_EXYNOS 168 - tristate "EXYNOS specific hooks to UFS controller platform driver" 168 + tristate "Exynos specific hooks to UFS controller platform driver" 169 169 depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST) 170 170 help 171 - This selects the EXYNOS specific additions to UFSHCD platform driver. 172 - UFS host on EXYNOS includes HCI and UNIPRO layer, and associates with 173 - UFS-PHY driver. 171 + This selects the Samsung Exynos SoC specific additions to UFSHCD 172 + platform driver. UFS host on Samsung Exynos SoC includes HCI and 173 + UNIPRO layer, and associates with UFS-PHY driver. 174 174 175 - Select this if you have UFS host controller on EXYNOS chipset. 175 + Select this if you have UFS host controller on Samsung Exynos SoC. 176 176 If unsure, say N. 177 177 178 178 config SCSI_UFS_CRYPTO ··· 199 199 help 200 200 Enable fault injection support in the UFS driver. This makes it easier 201 201 to test the UFS error handler and abort handler. 202 + 203 + config SCSI_UFS_HWMON 204 + bool "UFS Temperature Notification" 205 + depends on SCSI_UFSHCD=HWMON || HWMON=y 206 + help 207 + This provides support for UFS hardware monitoring. If enabled, 208 + a hardware monitoring device will be created for the UFS device. 209 + 210 + If unsure, say N.
+1
drivers/scsi/ufs/Makefile
··· 10 10 ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o 11 11 ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o 12 12 ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o 13 + ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o 13 14 14 15 obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o 15 16 obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
+5 -1
drivers/scsi/ufs/ufs-exynos.c
··· 1176 1176 } 1177 1177 } 1178 1178 1179 - static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 1179 + static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 1180 + enum ufs_notify_change_status status) 1180 1181 { 1181 1182 struct exynos_ufs *ufs = ufshcd_get_variant(hba); 1183 + 1184 + if (status == PRE_CHANGE) 1185 + return 0; 1182 1186 1183 1187 if (!ufshcd_is_link_active(hba)) 1184 1188 phy_power_off(ufs->phy);
+5 -1
drivers/scsi/ufs/ufs-hisi.c
··· 396 396 return ret; 397 397 } 398 398 399 - static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 399 + static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 400 + enum ufs_notify_change_status status) 400 401 { 401 402 struct ufs_hisi_host *host = ufshcd_get_variant(hba); 403 + 404 + if (status == PRE_CHANGE) 405 + return 0; 402 406 403 407 if (pm_op == UFS_RUNTIME_PM) 404 408 return 0;
+210
drivers/scsi/ufs/ufs-hwmon.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * UFS hardware monitoring support 4 + * Copyright (c) 2021, Western Digital Corporation 5 + */ 6 + 7 + #include <linux/hwmon.h> 8 + #include <linux/units.h> 9 + 10 + #include "ufshcd.h" 11 + 12 + struct ufs_hwmon_data { 13 + struct ufs_hba *hba; 14 + u8 mask; 15 + }; 16 + 17 + static int ufs_read_temp_enable(struct ufs_hba *hba, u8 mask, long *val) 18 + { 19 + u32 ee_mask; 20 + int err; 21 + 22 + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, 23 + &ee_mask); 24 + if (err) 25 + return err; 26 + 27 + *val = (mask & ee_mask & MASK_EE_TOO_HIGH_TEMP) || (mask & ee_mask & MASK_EE_TOO_LOW_TEMP); 28 + 29 + return 0; 30 + } 31 + 32 + static int ufs_get_temp(struct ufs_hba *hba, enum attr_idn idn, long *val) 33 + { 34 + u32 value; 35 + int err; 36 + 37 + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, 0, 0, &value); 38 + if (err) 39 + return err; 40 + 41 + if (value == 0) 42 + return -ENODATA; 43 + 44 + *val = ((long)value - 80) * MILLIDEGREE_PER_DEGREE; 45 + 46 + return 0; 47 + } 48 + 49 + static int ufs_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, 50 + long *val) 51 + { 52 + struct ufs_hwmon_data *data = dev_get_drvdata(dev); 53 + struct ufs_hba *hba = data->hba; 54 + int err; 55 + 56 + down(&hba->host_sem); 57 + 58 + if (!ufshcd_is_user_access_allowed(hba)) { 59 + up(&hba->host_sem); 60 + return -EBUSY; 61 + } 62 + 63 + ufshcd_rpm_get_sync(hba); 64 + 65 + switch (attr) { 66 + case hwmon_temp_enable: 67 + err = ufs_read_temp_enable(hba, data->mask, val); 68 + 69 + break; 70 + case hwmon_temp_crit: 71 + err = ufs_get_temp(hba, QUERY_ATTR_IDN_HIGH_TEMP_BOUND, val); 72 + 73 + break; 74 + case hwmon_temp_lcrit: 75 + err = ufs_get_temp(hba, QUERY_ATTR_IDN_LOW_TEMP_BOUND, val); 76 + 77 + break; 78 + case hwmon_temp_input: 79 + err = ufs_get_temp(hba, QUERY_ATTR_IDN_CASE_ROUGH_TEMP, val); 80 + 81 + break; 82 + default: 83 + err = -EOPNOTSUPP; 84 + 85 + break; 86 + } 87 + 88 + ufshcd_rpm_put_sync(hba); 89 + 90 + up(&hba->host_sem); 91 + 92 + return err; 93 + } 94 + 95 + static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, 96 + long val) 97 + { 98 + struct ufs_hwmon_data *data = dev_get_drvdata(dev); 99 + struct ufs_hba *hba = data->hba; 100 + int err; 101 + 102 + if (attr != hwmon_temp_enable) 103 + return -EINVAL; 104 + 105 + if (val != 0 && val != 1) 106 + return -EINVAL; 107 + 108 + down(&hba->host_sem); 109 + 110 + if (!ufshcd_is_user_access_allowed(hba)) { 111 + up(&hba->host_sem); 112 + return -EBUSY; 113 + } 114 + 115 + ufshcd_rpm_get_sync(hba); 116 + 117 + if (val == 1) 118 + err = ufshcd_update_ee_usr_mask(hba, MASK_EE_URGENT_TEMP, 0); 119 + else 120 + err = ufshcd_update_ee_usr_mask(hba, 0, MASK_EE_URGENT_TEMP); 121 + 122 + ufshcd_rpm_put_sync(hba); 123 + 124 + up(&hba->host_sem); 125 + 126 + return err; 127 + } 128 + 129 + static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr, 130 + int channel) 131 + { 132 + if (type != hwmon_temp) 133 + return 0; 134 + 135 + switch (attr) { 136 + case hwmon_temp_enable: 137 + return 0644; 138 + case hwmon_temp_crit: 139 + case hwmon_temp_lcrit: 140 + case hwmon_temp_input: 141 + return 0444; 142 + default: 143 + break; 144 + } 145 + return 0; 146 + } 147 + 148 + static const struct hwmon_channel_info *ufs_hwmon_info[] = { 149 + HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT), 150 + NULL 151 + }; 152 + 153 + static const struct hwmon_ops ufs_hwmon_ops = { 154 + .is_visible = ufs_hwmon_is_visible, 155 + .read = ufs_hwmon_read, 156 + .write = ufs_hwmon_write, 157 + }; 158 + 159 + static const struct hwmon_chip_info ufs_hwmon_hba_info = { 160 + .ops = &ufs_hwmon_ops, 161 + .info = ufs_hwmon_info, 162 + }; 163 + 164 + void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) 165 + { 166 + struct device *dev = hba->dev; 167 + struct ufs_hwmon_data *data; 168 + struct device *hwmon; 169 + 170 + data = kzalloc(sizeof(*data), GFP_KERNEL); 171 + if (!data) 172 + return; 173 + 174 + data->hba = hba; 175 + data->mask = mask; 176 + 177 + hwmon = hwmon_device_register_with_info(dev, "ufs", data, &ufs_hwmon_hba_info, NULL); 178 + if (IS_ERR(hwmon)) { 179 + dev_warn(dev, "Failed to instantiate hwmon device\n"); 180 + kfree(data); 181 + return; 182 + } 183 + 184 + hba->hwmon_device = hwmon; 185 + } 186 + 187 + void ufs_hwmon_remove(struct ufs_hba *hba) 188 + { 189 + struct ufs_hwmon_data *data; 190 + 191 + if (!hba->hwmon_device) 192 + return; 193 + 194 + data = dev_get_drvdata(hba->hwmon_device); 195 + hwmon_device_unregister(hba->hwmon_device); 196 + hba->hwmon_device = NULL; 197 + kfree(data); 198 + } 199 + 200 + void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) 201 + { 202 + if (!hba->hwmon_device) 203 + return; 204 + 205 + if (ee_mask & MASK_EE_TOO_HIGH_TEMP) 206 + hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_max_alarm, 0); 207 + 208 + if (ee_mask & MASK_EE_TOO_LOW_TEMP) 209 + hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_min_alarm, 0); 210 + }
+99 -12
drivers/scsi/ufs/ufs-mediatek.c
··· 15 15 #include <linux/platform_device.h> 16 16 #include <linux/regulator/consumer.h> 17 17 #include <linux/reset.h> 18 + #include <linux/sched/clock.h> 18 19 #include <linux/soc/mediatek/mtk_sip_svc.h> 19 20 20 21 #include "ufshcd.h" ··· 247 246 248 247 if (on) { 249 248 ufs_mtk_ref_clk_notify(on, res); 250 - ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10); 251 249 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); 252 250 } else { 251 + ufshcd_delay_us(host->ref_clk_gating_wait_us, 10); 253 252 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); 254 253 } 255 254 ··· 274 273 275 274 out: 276 275 host->ref_clk_enabled = on; 277 - if (!on) { 278 - ufshcd_delay_us(host->ref_clk_gating_wait_us, 10); 276 + if (on) 277 + ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10); 278 + else 279 279 ufs_mtk_ref_clk_notify(on, res); 280 - } 281 280 282 281 return 0; 283 282 } 284 283 285 284 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, 286 - u16 gating_us, u16 ungating_us) 285 + u16 gating_us) 287 286 { 288 287 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 289 288 ··· 294 293 host->ref_clk_gating_wait_us = gating_us; 295 294 } 296 295 297 - host->ref_clk_ungating_wait_us = ungating_us; 296 + host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US; 297 + } 298 + 299 + static void ufs_mtk_dbg_sel(struct ufs_hba *hba) 300 + { 301 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 302 + 303 + if (((host->ip_ver >> 16) & 0xFF) >= 0x36) { 304 + ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); 305 + ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); 306 + ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); 307 + ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2); 308 + ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3); 309 + } else { 310 + ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); 311 + } 312 + } 313 + 314 + static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, 315 + unsigned long retry_ms) 316 + { 317 + u64 timeout, time_checked; 318 + u32 val, sm; 319 + bool wait_idle; 320 + 321 + /* cannot use plain ktime_get() in suspend */ 322 + timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL; 323 + 324 + /* wait a specific time after check base */ 325 + udelay(10); 326 + wait_idle = false; 327 + 328 + do { 329 + time_checked = ktime_get_mono_fast_ns(); 330 + ufs_mtk_dbg_sel(hba); 331 + val = ufshcd_readl(hba, REG_UFS_PROBE); 332 + 333 + sm = val & 0x1f; 334 + 335 + /* 336 + * if state is in H8 enter and H8 enter confirm 337 + * wait until return to idle state. 338 + */ 339 + if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) { 340 + wait_idle = true; 341 + udelay(50); 342 + continue; 343 + } else if (!wait_idle) 344 + break; 345 + 346 + if (wait_idle && (sm == VS_HCE_BASE)) 347 + break; 348 + } while (time_checked < timeout); 349 + 350 + if (wait_idle && sm != VS_HCE_BASE) 351 + dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); 298 352 } 299 353 300 354 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, ··· 361 305 timeout = ktime_add_ms(ktime_get(), max_wait_ms); 362 306 do { 363 307 time_checked = ktime_get(); 364 - ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); 308 + ufs_mtk_dbg_sel(hba); 365 309 val = ufshcd_readl(hba, REG_UFS_PROBE); 366 310 val = val >> 28; 367 311 ··· 745 689 ufs_mtk_mphy_power_on(hba, true); 746 690 ufs_mtk_setup_clocks(hba, true, POST_CHANGE); 747 691 692 + host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); 693 + 748 694 goto out; 749 695 750 696 out_variant_clear: ··· 990 932 REGULATOR_MODE_NORMAL); 991 933 } 992 934 993 - static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 935 + static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) 936 + { 937 + unsigned long flags; 938 + int ret; 939 + 940 + /* disable auto-hibern8 */ 941 + spin_lock_irqsave(hba->host->host_lock, flags); 942 + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); 943 + spin_unlock_irqrestore(hba->host->host_lock, flags); 944 + 945 + /* wait host return to idle state when auto-hibern8 off */ 946 + ufs_mtk_wait_idle_state(hba, 5); 947 + 948 + ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); 949 + if (ret) 950 + dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); 951 + } 952 + 953 + static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 954 + enum ufs_notify_change_status status) 994 955 { 995 956 int err; 996 957 struct arm_smccc_res res; 958 + 959 + if (status == PRE_CHANGE) { 960 + if (!ufshcd_is_auto_hibern8_supported(hba)) 961 + return 0; 962 + ufs_mtk_auto_hibern8_disable(hba); 963 + return 0; 964 + } 997 965 998 966 if (ufshcd_is_link_hibern8(hba)) { 999 967 err = ufs_mtk_link_set_lpm(hba); ··· 1085 1001 "MPHY Ctrl "); 1086 1002 1087 1003 /* Direct debugging information to REG_MTK_PROBE */ 1088 - ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); 1004 + ufs_mtk_dbg_sel(hba); 1089 1005 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); 1090 1006 } 1091 1007 ··· 1103 1019 * requirements. 1104 1020 */ 1105 1021 if (mid == UFS_VENDOR_SAMSUNG) 1106 - ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1); 1022 + ufs_mtk_setup_ref_clk_wait_us(hba, 1); 1107 1023 else if (mid == UFS_VENDOR_SKHYNIX) 1108 - ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30); 1024 + ufs_mtk_setup_ref_clk_wait_us(hba, 30); 1109 1025 else if (mid == UFS_VENDOR_TOSHIBA) 1110 - ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32); 1026 + ufs_mtk_setup_ref_clk_wait_us(hba, 100); 1027 + else 1028 + ufs_mtk_setup_ref_clk_wait_us(hba, 1029 + REFCLK_DEFAULT_WAIT_US); 1111 1030 1112 1031 return 0; 1113 1032 }
+27
drivers/scsi/ufs/ufs-mediatek.h
··· 15 15 #define REG_UFS_REFCLK_CTRL 0x144 16 16 #define REG_UFS_EXTREG 0x2100 17 17 #define REG_UFS_MPHYCTRL 0x2200 18 + #define REG_UFS_MTK_IP_VER 0x2240 18 19 #define REG_UFS_REJECT_MON 0x22AC 19 20 #define REG_UFS_DEBUG_SEL 0x22C0 20 21 #define REG_UFS_PROBE 0x22C8 22 + #define REG_UFS_DEBUG_SEL_B0 0x22D0 23 + #define REG_UFS_DEBUG_SEL_B1 0x22D4 24 + #define REG_UFS_DEBUG_SEL_B2 0x22D8 25 + #define REG_UFS_DEBUG_SEL_B3 0x22DC 21 26 22 27 /* 23 28 * Ref-clk control ··· 34 29 #define REFCLK_ACK BIT(1) 35 30 36 31 #define REFCLK_REQ_TIMEOUT_US 3000 32 + #define REFCLK_DEFAULT_WAIT_US 32 37 33 38 34 /* 39 35 * Other attributes ··· 53 47 VS_LINK_HIBERN8 = 3, 54 48 VS_LINK_LOST = 4, 55 49 VS_LINK_CFG = 5, 50 + }; 51 + 52 + /* 53 + * Vendor specific host controller state 54 + */ 55 + enum { 56 + VS_HCE_RESET = 0, 57 + VS_HCE_BASE = 1, 58 + VS_HCE_OOCPR_WAIT = 2, 59 + VS_HCE_DME_RESET = 3, 60 + VS_HCE_MIDDLE = 4, 61 + VS_HCE_DME_ENABLE = 5, 62 + VS_HCE_DEFAULTS = 6, 63 + VS_HIB_IDLEEN = 7, 64 + VS_HIB_ENTER = 8, 65 + VS_HIB_ENTER_CONF = 9, 66 + VS_HIB_MIDDLE = 10, 67 + VS_HIB_WAITTIMER = 11, 68 + VS_HIB_EXIT_CONF = 12, 69 + VS_HIB_EXIT = 13, 56 70 }; 57 71 58 72 /* ··· 139 113 bool ref_clk_enabled; 140 114 u16 ref_clk_ungating_wait_us; 141 115 u16 ref_clk_gating_wait_us; 116 + u32 ip_ver; 142 117 }; 143 118 144 119 #endif /* !_UFS_MEDIATEK_H */
+17 -4
drivers/scsi/ufs/ufs-qcom.c
··· 589 589 gpiod_set_value_cansleep(host->device_reset, asserted); 590 590 } 591 591 592 - static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 592 + static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, 593 + enum ufs_notify_change_status status) 593 594 { 594 595 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 595 596 struct phy *phy = host->generic_phy; 597 + 598 + if (status == PRE_CHANGE) 599 + return 0; 596 600 597 601 if (ufs_qcom_is_link_off(hba)) { 598 602 /* ··· 892 888 enum ufs_notify_change_status status) 893 889 { 894 890 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 895 - int err = 0; 896 891 897 892 /* 898 893 * In case ufs_qcom_init() is not yet done, simply ignore. ··· 919 916 break; 920 917 } 921 918 922 - return err; 919 + return 0; 923 920 } 924 921 925 922 static int ··· 1216 1213 int err = 0; 1217 1214 1218 1215 if (status == PRE_CHANGE) { 1216 + err = ufshcd_uic_hibern8_enter(hba); 1217 + if (err) 1218 + return err; 1219 1219 if (scale_up) 1220 1220 err = ufs_qcom_clk_scale_up_pre_change(hba); 1221 1221 else 1222 1222 err = ufs_qcom_clk_scale_down_pre_change(hba); 1223 + if (err) 1224 + ufshcd_uic_hibern8_exit(hba); 1225 + 1223 1226 } else { 1224 1227 if (scale_up) 1225 1228 err = ufs_qcom_clk_scale_up_post_change(hba); 1226 1229 else 1227 1230 err = ufs_qcom_clk_scale_down_post_change(hba); 1228 1231 1229 - if (err || !dev_req_params) 1232 + 1233 + if (err || !dev_req_params) { 1234 + ufshcd_uic_hibern8_exit(hba); 1230 1235 goto out; 1236 + } 1231 1237 1232 1238 ufs_qcom_cfg_timers(hba, 1233 1239 dev_req_params->gear_rx, 1234 1240 dev_req_params->pwr_rx, 1235 1241 dev_req_params->hs_rate, 1236 1242 false); 1243 + ufshcd_uic_hibern8_exit(hba); 1237 1244 } 1238 1245 1239 1246 out:
+7
drivers/scsi/ufs/ufs.h
··· 152 152 QUERY_ATTR_IDN_PSA_STATE = 0x15, 153 153 QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16, 154 154 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17, 155 + QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18, 156 + QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19, 157 + QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A, 155 158 QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C, 156 159 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D, 157 160 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E, ··· 341 338 342 339 /* Possible values for dExtendedUFSFeaturesSupport */ 343 340 enum { 341 + UFS_DEV_LOW_TEMP_NOTIF = BIT(4), 342 + UFS_DEV_HIGH_TEMP_NOTIF = BIT(5), 343 + UFS_DEV_EXT_TEMP_NOTIF = BIT(6), 344 344 UFS_DEV_HPB_SUPPORT = BIT(7), 345 345 UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), 346 346 }; ··· 376 370 MASK_EE_WRITEBOOSTER_EVENT = BIT(5), 377 371 MASK_EE_PERFORMANCE_THROTTLING = BIT(6), 378 372 }; 373 + #define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP) 379 374 380 375 /* Background operation status */ 381 376 enum bkops_status {
+2 -2
drivers/scsi/ufs/ufshcd-pltfrm.c
··· 91 91 92 92 clki->min_freq = clkfreq[i]; 93 93 clki->max_freq = clkfreq[i+1]; 94 - clki->name = kstrdup(name, GFP_KERNEL); 94 + clki->name = devm_kstrdup(dev, name, GFP_KERNEL); 95 95 if (!strcmp(name, "ref_clk")) 96 96 clki->keep_link_active = true; 97 97 dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz", ··· 126 126 if (!vreg) 127 127 return -ENOMEM; 128 128 129 - vreg->name = kstrdup(name, GFP_KERNEL); 129 + vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); 130 130 131 131 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); 132 132 if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
+129 -212
drivers/scsi/ufs/ufshcd.c
··· 62 62 /* maximum number of reset retries before giving up */ 63 63 #define MAX_HOST_RESET_RETRIES 5 64 64 65 + /* Maximum number of error handler retries before giving up */ 66 + #define MAX_ERR_HANDLER_RETRIES 5 67 + 65 68 /* Expose the flag value from utp_upiu_query.value */ 66 69 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF 67 70 ··· 225 222 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); 226 223 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 227 224 static void ufshcd_hba_exit(struct ufs_hba *hba); 228 - static int ufshcd_clear_ua_wluns(struct ufs_hba *hba); 229 - static int ufshcd_probe_hba(struct ufs_hba *hba, bool async); 225 + static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); 230 226 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 231 - static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); 232 227 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); 233 228 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 234 229 static void ufshcd_resume_clkscaling(struct ufs_hba *hba); ··· 2686 2685 2687 2686 switch (hba->ufshcd_state) { 2688 2687 case UFSHCD_STATE_OPERATIONAL: 2688 + break; 2689 2689 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: 2690 + /* 2691 + * SCSI error handler can call ->queuecommand() while UFS error 2692 + * handler is in progress. Error interrupts could change the 2693 + * state from UFSHCD_STATE_RESET to 2694 + * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests 2695 + * being issued in that case. 2696 + */ 2697 + if (ufshcd_eh_in_progress(hba)) { 2698 + err = SCSI_MLQUEUE_HOST_BUSY; 2699 + goto out; 2700 + } 2690 2701 break; 2691 2702 case UFSHCD_STATE_EH_SCHEDULED_FATAL: 2692 2703 /* ··· 2714 2701 if (hba->pm_op_in_progress) { 2715 2702 hba->force_reset = true; 2716 2703 set_host_byte(cmd, DID_BAD_TARGET); 2717 - cmd->scsi_done(cmd); 2704 + scsi_done(cmd); 2718 2705 goto out; 2719 2706 } 2720 2707 fallthrough; ··· 2723 2710 goto out; 2724 2711 case UFSHCD_STATE_ERROR: 2725 2712 set_host_byte(cmd, DID_ERROR); 2726 - cmd->scsi_done(cmd); 2713 + scsi_done(cmd); 2727 2714 goto out; 2728 2715 } 2729 2716 ··· 4086 4073 if (ret) 4087 4074 dev_err(hba->dev, "%s: link recovery failed, err %d", 4088 4075 __func__, ret); 4089 - else 4090 - ufshcd_clear_ua_wluns(hba); 4091 4076 4092 4077 return ret; 4093 4078 } 4094 4079 EXPORT_SYMBOL_GPL(ufshcd_link_recovery); 4095 4080 4096 - static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 4081 + int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 4097 4082 { 4098 4083 int ret; 4099 4084 struct uic_command uic_cmd = {0}; ··· 4113 4102 4114 4103 return ret; 4115 4104 } 4105 + EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter); 4116 4106 4117 4107 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) 4118 4108 { ··· 5270 5258 /* Mark completed command as NULL in LRB */ 5271 5259 lrbp->cmd = NULL; 5272 5260 /* Do not touch lrbp after scsi done */ 5273 - cmd->scsi_done(cmd); 5261 + scsi_done(cmd); 5274 5262 ufshcd_release(hba); 5275 5263 update_scaling = true; 5276 5264 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || ··· 5618 5606 __func__, err); 5619 5607 } 5620 5608 5609 + static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status) 5610 + { 5611 + u32 value; 5612 + 5613 + if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 5614 + QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value)) 5615 + return; 5616 + 5617 + dev_info(hba->dev, "exception Tcase %d\n", value - 80); 5618 + 5619 + ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); 5620 + 5621 + /* 5622 + * A placeholder for the platform vendors to add whatever additional 5623 + * steps required 5624 + */ 5625 + } 5626 + 5621 5627 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) 5622 5628 { 5623 5629 u8 index; ··· 5815 5785 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) 5816 5786 ufshcd_bkops_exception_event_handler(hba); 5817 5787 5788 + if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) 5789 + ufshcd_temp_exception_event_handler(hba, status); 5790 + 5818 5791 ufs_debugfs_exception_event(hba, status); 5819 5792 out: 5820 5793 ufshcd_scsi_unblock_requests(hba); 5821 - return; 5822 5794 } 5823 5795 5824 5796 /* Complete requests that have door-bell cleared */ ··· 5991 5959 ufshcd_release(hba); 5992 5960 if (ufshcd_is_clkscaling_supported(hba)) 5993 5961 ufshcd_clk_scaling_suspend(hba, false); 5994 - ufshcd_clear_ua_wluns(hba); 5995 5962 ufshcd_rpm_put(hba); 5996 5963 } 5997 5964 ··· 6064 6033 */ 6065 6034 static void ufshcd_err_handler(struct work_struct *work) 6066 6035 { 6036 + int retries = MAX_ERR_HANDLER_RETRIES; 6067 6037 struct ufs_hba *hba; 6068 6038 unsigned long flags; 6069 - bool err_xfer = false; 6070 - bool err_tm = false; 6071 - int err = 0, pmc_err; 6039 + bool needs_restore; 6040 + bool needs_reset; 6041 + bool err_xfer; 6042 + bool err_tm; 6043 + int pmc_err; 6072 6044 int tag; 6073 - bool needs_reset = false, needs_restore = false; 6074 6045 6075 6046 hba = container_of(work, struct ufs_hba, eh_work); 6076 6047 ··· 6091 6058 /* Complete requests that have door-bell cleared by h/w */ 6092 6059 ufshcd_complete_requests(hba); 6093 6060 spin_lock_irqsave(hba->host->host_lock, flags); 6061 + again: 6062 + needs_restore = false; 6063 + needs_reset = false; 6064 + err_xfer = false; 6065 + err_tm = false; 6066 + 6094 6067 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6095 6068 hba->ufshcd_state = UFSHCD_STATE_RESET; 6096 6069 /* ··· 6217 6178 do_reset: 6218 6179 /* Fatal errors need reset */ 6219 6180 if (needs_reset) { 6181 + int err; 6182 + 6220 6183 hba->force_reset = false; 6221 6184 spin_unlock_irqrestore(hba->host->host_lock, flags); 6222 6185 err = ufshcd_reset_and_restore(hba); ··· 6237 6196 if (hba->saved_err || hba->saved_uic_err) 6238 6197 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", 6239 6198 __func__, hba->saved_err, hba->saved_uic_err); 6199 + } 6200 + /* Exit in an operational state or dead */ 6201 + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 6202 + hba->ufshcd_state != UFSHCD_STATE_ERROR) { 6203 + if (--retries) 6204 + goto again; 6205 + hba->ufshcd_state = UFSHCD_STATE_ERROR; 6240 6206 } 6241 6207 ufshcd_clear_eh_in_progress(hba); 6242 6208 spin_unlock_irqrestore(hba->host->host_lock, flags); ··· 7150 7102 */ 7151 7103 static int ufshcd_reset_and_restore(struct ufs_hba *hba) 7152 7104 { 7153 - u32 saved_err; 7154 - u32 saved_uic_err; 7105 + u32 saved_err = 0; 7106 + u32 saved_uic_err = 0; 7155 7107 int err = 0; 7156 7108 unsigned long flags; 7157 7109 int retries = MAX_HOST_RESET_RETRIES; 7158 7110 7159 - /* 7160 - * This is a fresh start, cache and clear saved error first, 7161 - * in case new error generated during reset and restore. 7162 - */ 7163 7111 spin_lock_irqsave(hba->host->host_lock, flags); 7164 - saved_err = hba->saved_err; 7165 - saved_uic_err = hba->saved_uic_err; 7166 - hba->saved_err = 0; 7167 - hba->saved_uic_err = 0; 7168 - spin_unlock_irqrestore(hba->host->host_lock, flags); 7169 - 7170 7112 do { 7113 + /* 7114 + * This is a fresh start, cache and clear saved error first, 7115 + * in case new error generated during reset and restore. 7116 + */ 7117 + saved_err |= hba->saved_err; 7118 + saved_uic_err |= hba->saved_uic_err; 7119 + hba->saved_err = 0; 7120 + hba->saved_uic_err = 0; 7121 + hba->force_reset = false; 7122 + hba->ufshcd_state = UFSHCD_STATE_RESET; 7123 + spin_unlock_irqrestore(hba->host->host_lock, flags); 7124 + 7171 7125 /* Reset the attached device */ 7172 7126 ufshcd_device_reset(hba); 7173 7127 7174 7128 err = ufshcd_host_reset_and_restore(hba); 7129 + 7130 + spin_lock_irqsave(hba->host->host_lock, flags); 7131 + if (err) 7132 + continue; 7133 + /* Do not exit unless operational or dead */ 7134 + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL && 7135 + hba->ufshcd_state != UFSHCD_STATE_ERROR && 7136 + hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL) 7137 + err = -EAGAIN; 7175 7138 } while (err && --retries); 7176 7139 7177 - spin_lock_irqsave(hba->host->host_lock, flags); 7178 7140 /* 7179 7141 * Inform scsi mid-layer that we did reset and allow to handle 7180 7142 * Unit Attention properly. ··· 7495 7437 hba->caps &= ~UFSHCD_CAP_WB_EN; 7496 7438 } 7497 7439 7440 + static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf) 7441 + { 7442 + struct ufs_dev_info *dev_info = &hba->dev_info; 7443 + u32 ext_ufs_feature; 7444 + u8 mask = 0; 7445 + 7446 + if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) 7447 + return; 7448 + 7449 + ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); 7450 + 7451 + if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF) 7452 + mask |= MASK_EE_TOO_LOW_TEMP; 7453 + 7454 + if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF) 7455 + mask |= MASK_EE_TOO_HIGH_TEMP; 7456 + 7457 + if (mask) { 7458 + ufshcd_enable_ee(hba, mask); 7459 + ufs_hwmon_probe(hba, mask); 7460 + } 7461 + } 7462 + 7498 7463 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups) 7499 7464 { 7500 7465 struct ufs_dev_fix *f; ··· 7612 7531 ufs_fixup_device_setup(hba); 7613 7532 7614 7533 ufshcd_wb_probe(hba, desc_buf); 7534 + 7535 + ufshcd_temp_notif_probe(hba, desc_buf); 7615 7536 7616 7537 /* 7617 7538 * ufshcd_read_string_desc returns size of the string ··· 7958 7875 if (ret) 7959 7876 goto out; 7960 7877 7961 - ufshcd_clear_ua_wluns(hba); 7962 - 7963 7878 /* Initialize devfreq after UFS device is detected */ 7964 7879 if (ufshcd_is_clkscaling_supported(hba)) { 7965 7880 memcpy(&hba->clk_scaling.saved_pwr_info.info, ··· 7980 7899 pm_runtime_put_sync(hba->dev); 7981 7900 7982 7901 out: 7983 - return ret; 7984 - } 7985 - 7986 - static void ufshcd_request_sense_done(struct request *rq, blk_status_t error) 7987 - { 7988 - if (error != BLK_STS_OK) 7989 - pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error); 7990 - kfree(rq->end_io_data); 7991 - blk_mq_free_request(rq); 7992 - } 7993 - 7994 - static int 7995 - ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev) 7996 - { 7997 - /* 7998 - * Some UFS devices clear unit attention condition only if the sense 7999 - * size used (UFS_SENSE_SIZE in this case) is non-zero. 8000 - */ 8001 - static const u8 cmd[6] = {REQUEST_SENSE, 0, 0, 0, UFS_SENSE_SIZE, 0}; 8002 - struct scsi_request *rq; 8003 - struct request *req; 8004 - char *buffer; 8005 - int ret; 8006 - 8007 - buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL); 8008 - if (!buffer) 8009 - return -ENOMEM; 8010 - 8011 - req = blk_mq_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 8012 - /*flags=*/BLK_MQ_REQ_PM); 8013 - if (IS_ERR(req)) { 8014 - ret = PTR_ERR(req); 8015 - goto out_free; 8016 - } 8017 - 8018 - ret = blk_rq_map_kern(sdev->request_queue, req, 8019 - buffer, UFS_SENSE_SIZE, GFP_NOIO); 8020 - if (ret) 8021 - goto out_put; 8022 - 8023 - rq = scsi_req(req); 8024 - rq->cmd_len = ARRAY_SIZE(cmd); 8025 - memcpy(rq->cmd, cmd, rq->cmd_len); 8026 - rq->retries = 3; 8027 - req->timeout = 1 * HZ; 8028 - req->rq_flags |= RQF_PM | RQF_QUIET; 8029 - req->end_io_data = buffer; 8030 - 8031 - blk_execute_rq_nowait(/*bd_disk=*/NULL, req, /*at_head=*/true, 8032 - ufshcd_request_sense_done); 8033 - return 0; 8034 - 8035 - out_put: 8036 - blk_mq_free_request(req); 8037 - out_free: 8038 - kfree(buffer); 8039 - return ret; 8040 - } 8041 - 8042 - static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun) 8043 - { 8044 - struct scsi_device *sdp; 8045 - unsigned long flags; 8046 - int ret = 0; 8047 - 8048 - spin_lock_irqsave(hba->host->host_lock, flags); 8049 - if (wlun == UFS_UPIU_UFS_DEVICE_WLUN) 8050 - sdp = hba->sdev_ufs_device; 8051 - else if (wlun == UFS_UPIU_RPMB_WLUN) 8052 - sdp = hba->sdev_rpmb; 8053 - else 8054 - BUG(); 8055 - if (sdp) { 8056 - ret = scsi_device_get(sdp); 8057 - if (!ret && !scsi_device_online(sdp)) { 8058 - ret = -ENODEV; 8059 - scsi_device_put(sdp); 8060 - } 8061 - } else { 8062 - ret = -ENODEV; 8063 - } 8064 - spin_unlock_irqrestore(hba->host->host_lock, flags); 8065 - if (ret) 8066 - goto out_err; 8067 - 8068 - ret = ufshcd_request_sense_async(hba, sdp); 8069 - scsi_device_put(sdp); 8070 - out_err: 8071 - if (ret) 8072 - dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n", 8073 - __func__, wlun, ret); 8074 - return ret; 8075 - } 8076 - 8077 - static int ufshcd_clear_ua_wluns(struct ufs_hba *hba) 8078 - { 8079 - int ret = 0; 8080 - 8081 - if (!hba->wlun_dev_clr_ua) 8082 - goto out; 8083 - 8084 - ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN); 8085 - if (!ret) 8086 - ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN); 8087 - if (!ret) 8088 - hba->wlun_dev_clr_ua = false; 8089 - out: 8090 - if (ret) 8091 - dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n", 8092 - __func__, ret); 8093 7902 return ret; 8094 7903 } 8095 7904 ··· 8033 8062 /* UFS device is also active now */ 8034 8063 ufshcd_set_ufs_dev_active(hba); 8035 8064 ufshcd_force_reset_auto_bkops(hba); 8036 - hba->wlun_dev_clr_ua = true; 8037 - hba->wlun_rpmb_clr_ua = true; 8038 8065 8039 8066 /* Gear up to HS gear if supported */ 8040 8067 if (hba->max_pwr_info.is_valid) { ··· 8569 8600 struct scsi_sense_hdr sshdr; 8570 8601 struct scsi_device *sdp; 8571 8602 unsigned long flags; 8572 - int ret; 8603 + int ret, retries; 8573 8604 8574 8605 spin_lock_irqsave(hba->host->host_lock, flags); 8575 8606 sdp = hba->sdev_ufs_device; ··· 8594 8625 * handling context. 8595 8626 */ 8596 8627 hba->host->eh_noresume = 1; 8597 - if (hba->wlun_dev_clr_ua) 8598 - ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN); 8599 8628 8600 8629 cmd[4] = pwr_mode << 4; 8601 8630 ··· 8602 8635 * callbacks hence set the RQF_PM flag so that it doesn't resume the 8603 8636 * already suspended childs. 8604 8637 */ 8605 - ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 8606 - START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); 8638 + for (retries = 3; retries > 0; --retries) { 8639 + ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, 8640 + START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); 8641 + if (!scsi_status_is_check_condition(ret) || 8642 + !scsi_sense_valid(&sshdr) || 8643 + sshdr.sense_key != UNIT_ATTENTION) 8644 + break; 8645 + } 8607 8646 if (ret) { 8608 8647 sdev_printk(KERN_WARNING, sdp, 8609 8648 "START_STOP failed for power mode: %d, result %x\n", ··· 8851 8878 8852 8879 flush_work(&hba->eeh_work); 8853 8880 8881 + ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 8882 + if (ret) 8883 + goto enable_scaling; 8884 + 8854 8885 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { 8855 8886 if (pm_op != UFS_RUNTIME_PM) 8856 8887 /* ensure that bkops is disabled */ ··· 8882 8905 * vendor specific host controller register space call them before the 8883 8906 * host clocks are ON. 8884 8907 */ 8885 - ret = ufshcd_vops_suspend(hba, pm_op); 8908 + ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 8886 8909 if (ret) 8887 8910 goto set_link_active; 8888 8911 goto out; ··· 9010 9033 set_old_link_state: 9011 9034 ufshcd_link_state_transition(hba, old_link_state, 0); 9012 9035 vendor_suspend: 9013 - ufshcd_vops_suspend(hba, pm_op); 9036 + ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 9037 + ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); 9014 9038 out: 9015 9039 if (ret) 9016 9040 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); ··· 9356 9378 { 9357 9379 if (hba->sdev_ufs_device) 9358 9380 ufshcd_rpm_get_sync(hba); 9381 + ufs_hwmon_remove(hba); 9359 9382 ufs_bsg_remove(hba); 9360 9383 ufshpb_remove(hba); 9361 9384 ufs_sysfs_remove_nodes(hba->dev); ··· 9678 9699 ufshcd_rpm_put(hba); 9679 9700 hba->complete_put = false; 9680 9701 } 9681 - if (hba->rpmb_complete_put) { 9682 - ufshcd_rpmb_rpm_put(hba); 9683 - hba->rpmb_complete_put = false; 9684 - } 9685 9702 } 9686 9703 EXPORT_SYMBOL_GPL(ufshcd_resume_complete); 9687 9704 ··· 9699 9724 return ret; 9700 9725 } 9701 9726 hba->complete_put = true; 9702 - } 9703 - if (hba->sdev_rpmb) { 9704 - ufshcd_rpmb_rpm_get_sync(hba); 9705 - hba->rpmb_complete_put = true; 9706 9727 } 9707 9728 return 0; 9708 9729 } ··· 9768 9797 }, 9769 9798 }; 9770 9799 9771 - static int ufshcd_rpmb_probe(struct device *dev) 9772 - { 9773 - return is_rpmb_wlun(to_scsi_device(dev)) ? 0 : -ENODEV; 9774 - } 9775 - 9776 - static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba) 9777 - { 9778 - int ret = 0; 9779 - 9780 - if (!hba->wlun_rpmb_clr_ua) 9781 - return 0; 9782 - ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN); 9783 - if (!ret) 9784 - hba->wlun_rpmb_clr_ua = 0; 9785 - return ret; 9786 - } 9787 - 9788 - #ifdef CONFIG_PM 9789 - static int ufshcd_rpmb_resume(struct device *dev) 9790 - { 9791 - struct ufs_hba *hba = wlun_dev_to_hba(dev); 9792 - 9793 - if (hba->sdev_rpmb) 9794 - ufshcd_clear_rpmb_uac(hba); 9795 - return 0; 9796 - } 9797 - #endif 9798 - 9799 - static const struct dev_pm_ops ufs_rpmb_pm_ops = { 9800 - SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL) 9801 - SET_SYSTEM_SLEEP_PM_OPS(NULL, ufshcd_rpmb_resume) 9802 - }; 9803 - 9804 - /* ufs_rpmb_wlun_template - Describes UFS RPMB WLUN. Used only to send UAC. */ 9805 - static struct scsi_driver ufs_rpmb_wlun_template = { 9806 - .gendrv = { 9807 - .name = "ufs_rpmb_wlun", 9808 - .owner = THIS_MODULE, 9809 - .probe = ufshcd_rpmb_probe, 9810 - .pm = &ufs_rpmb_pm_ops, 9811 - }, 9812 - }; 9813 - 9814 9800 static int __init ufshcd_core_init(void) 9815 9801 { 9816 9802 int ret; ··· 9776 9848 9777 9849 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); 9778 9850 if (ret) 9779 - goto debugfs_exit; 9780 - 9781 - ret = scsi_register_driver(&ufs_rpmb_wlun_template.gendrv); 9782 - if (ret) 9783 - goto unregister; 9784 - 9785 - return ret; 9786 - unregister: 9787 - scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 9788 - debugfs_exit: 9789 - ufs_debugfs_exit(); 9851 + ufs_debugfs_exit(); 9790 9852 return ret; 9791 9853 } 9792 9854 9793 9855 static void __exit ufshcd_core_exit(void) 9794 9856 { 9795 9857 ufs_debugfs_exit(); 9796 - scsi_unregister_driver(&ufs_rpmb_wlun_template.gendrv); 9797 9858 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv); 9798 9859 } 9799 9860
+26 -17
drivers/scsi/ufs/ufshcd.h
··· 344 344 enum ufs_notify_change_status); 345 345 int (*apply_dev_quirks)(struct ufs_hba *hba); 346 346 void (*fixup_dev_quirks)(struct ufs_hba *hba); 347 - int (*suspend)(struct ufs_hba *, enum ufs_pm_op); 347 + int (*suspend)(struct ufs_hba *, enum ufs_pm_op, 348 + enum ufs_notify_change_status); 348 349 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 349 350 void (*dbg_register_dump)(struct ufs_hba *hba); 350 351 int (*phy_initialization)(struct ufs_hba *); ··· 654 653 * in order to exit DeepSleep state. 655 654 */ 656 655 UFSHCD_CAP_DEEPSLEEP = 1 << 10, 656 + 657 + /* 658 + * This capability allows the host controller driver to use temperature 659 + * notification if it is supported by the UFS device. 660 + */ 661 + UFSHCD_CAP_TEMP_NOTIF = 1 << 11, 657 662 }; 658 663 659 664 struct ufs_hba_variant_params { ··· 798 791 struct scsi_device *sdev_ufs_device; 799 792 struct scsi_device *sdev_rpmb; 800 793 794 + #ifdef CONFIG_SCSI_UFS_HWMON 795 + struct device *hwmon_device; 796 + #endif 797 + 801 798 enum ufs_dev_pwr_mode curr_dev_pwr_mode; 802 799 enum uic_link_state uic_link_state; 803 800 /* Desired UFS power management level during runtime PM */ ··· 882 871 struct ufs_vreg_info vreg_info; 883 872 struct list_head clk_list_head; 884 873 885 - bool wlun_dev_clr_ua; 886 - bool wlun_rpmb_clr_ua; 887 - 888 874 /* Number of requests aborts */ 889 875 int req_abort_count; 890 876 ··· 928 920 #endif 929 921 u32 luns_avail; 930 922 bool complete_put; 931 - bool rpmb_complete_put; 932 923 }; 933 924 934 925 /* Returns true if clocks can be gated. Otherwise false */ ··· 1014 1007 int ufshcd_link_recovery(struct ufs_hba *hba); 1015 1008 int ufshcd_make_hba_operational(struct ufs_hba *hba); 1016 1009 void ufshcd_remove(struct ufs_hba *); 1010 + int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); 1017 1011 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); 1018 1012 void ufshcd_delay_us(unsigned long us, unsigned long tolerance); 1019 1013 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, ··· 1062 1054 return hba->dev_info.wb_dedicated_lu; 1063 1055 return 0; 1064 1056 } 1057 + 1058 + #ifdef CONFIG_SCSI_UFS_HWMON 1059 + void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask); 1060 + void ufs_hwmon_remove(struct ufs_hba *hba); 1061 + void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask); 1062 + #else 1063 + static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {} 1064 + static inline void ufs_hwmon_remove(struct ufs_hba *hba) {} 1065 + static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {} 1066 + #endif 1065 1067 1066 1068 #ifdef CONFIG_PM 1067 1069 extern int ufshcd_runtime_suspend(struct device *dev); ··· 1319 1301 hba->vops->fixup_dev_quirks(hba); 1320 1302 } 1321 1303 1322 - static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) 1304 + static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op, 1305 + enum ufs_notify_change_status status) 1323 1306 { 1324 1307 if (hba->vops && hba->vops->suspend) 1325 - return hba->vops->suspend(hba, op); 1308 + return hba->vops->suspend(hba, op, status); 1326 1309 1327 1310 return 0; 1328 1311 } ··· 1410 1391 static inline int ufshcd_rpm_put(struct ufs_hba *hba) 1411 1392 { 1412 1393 return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev); 1413 - } 1414 - 1415 - static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba) 1416 - { 1417 - return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev); 1418 - } 1419 - 1420 - static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba) 1421 - { 1422 - return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev); 1423 1394 } 1424 1395 1425 1396 #endif /* End of Header */
+2 -5
drivers/scsi/ufs/ufshpb.c
··· 2371 2371 2372 2372 ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size); 2373 2373 2374 - pm_runtime_get_sync(hba->dev); 2374 + ufshcd_rpm_get_sync(hba); 2375 2375 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, 2376 2376 QUERY_DESC_IDN_UNIT, lun, 0, 2377 2377 desc_buf, &size); 2378 - pm_runtime_put_sync(hba->dev); 2378 + ufshcd_rpm_put_sync(hba); 2379 2379 2380 2380 if (ret) { 2381 2381 dev_err(hba->dev, ··· 2598 2598 if (version == HPB_SUPPORT_LEGACY_VERSION) 2599 2599 hpb_dev_info->is_legacy = true; 2600 2600 2601 - pm_runtime_get_sync(hba->dev); 2602 2601 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, 2603 2602 QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd); 2604 - pm_runtime_put_sync(hba->dev); 2605 - 2606 2603 if (ret) 2607 2604 dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed", 2608 2605 __func__);
+3 -4
drivers/scsi/virtio_scsi.c
··· 164 164 VIRTIO_SCSI_SENSE_SIZE)); 165 165 } 166 166 167 - sc->scsi_done(sc); 167 + scsi_done(sc); 168 168 } 169 169 170 170 static void virtscsi_vq_done(struct virtio_scsi *vscsi, ··· 620 620 * we're using independent interrupts (e.g. MSI). Poll the 621 621 * virtqueues once. 622 622 * 623 - * In the abort case, sc->scsi_done will do nothing, because 624 - * the block layer must have detected a timeout and as a result 625 - * REQ_ATOM_COMPLETE has been set. 623 + * In the abort case, scsi_done() will do nothing, because the 624 + * command timed out and hence SCMD_STATE_COMPLETE has been set. 626 625 */ 627 626 virtscsi_poll_requests(vscsi); 628 627
+4 -5
drivers/scsi/vmw_pvscsi.c
··· 643 643 "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", 644 644 cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); 645 645 646 - cmd->scsi_done(cmd); 646 + scsi_done(cmd); 647 647 } 648 648 649 649 /* ··· 768 768 return 0; 769 769 } 770 770 771 - static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 771 + static int pvscsi_queue_lck(struct scsi_cmnd *cmd) 772 772 { 773 773 struct Scsi_Host *host = cmd->device->host; 774 774 struct pvscsi_adapter *adapter = shost_priv(host); ··· 786 786 return SCSI_MLQUEUE_HOST_BUSY; 787 787 } 788 788 789 - cmd->scsi_done = done; 790 789 op = cmd->cmnd[0]; 791 790 792 791 dev_dbg(&cmd->device->sdev_gendev, ··· 859 860 * Successfully aborted the command. 860 861 */ 861 862 cmd->result = (DID_ABORT << 16); 862 - cmd->scsi_done(cmd); 863 + scsi_done(cmd); 863 864 864 865 out: 865 866 spin_unlock_irqrestore(&adapter->hw_lock, flags); ··· 886 887 pvscsi_patch_sense(cmd); 887 888 pvscsi_release_context(adapter, ctx); 888 889 cmd->result = (DID_RESET << 16); 889 - cmd->scsi_done(cmd); 890 + scsi_done(cmd); 890 891 } 891 892 } 892 893 }
+7 -11
drivers/scsi/wd33c93.c
··· 362 362 msg[1] = offset; 363 363 } 364 364 365 - static int 366 - wd33c93_queuecommand_lck(struct scsi_cmnd *cmd, 367 - void (*done)(struct scsi_cmnd *)) 365 + static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd) 368 366 { 369 367 struct WD33C93_hostdata *hostdata; 370 368 struct scsi_cmnd *tmp; ··· 374 376 375 377 /* Set up a few fields in the scsi_cmnd structure for our own use: 376 378 * - host_scribble is the pointer to the next cmd in the input queue 377 - * - scsi_done points to the routine we call when a cmd is finished 378 379 * - result is what you'd expect 379 380 */ 380 381 cmd->host_scribble = NULL; 381 - cmd->scsi_done = done; 382 382 cmd->result = 0; 383 383 384 384 /* We use the Scsi_Pointer structure that's included with each command ··· 852 856 cmd->result = DID_NO_CONNECT << 16; 853 857 hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff)); 854 858 hostdata->state = S_UNCONNECTED; 855 - cmd->scsi_done(cmd); 859 + scsi_done(cmd); 856 860 857 861 /* From esp.c: 858 862 * There is a window of time within the scsi_done() path ··· 1179 1183 scsi_msg_to_host_byte(cmd, cmd->SCp.Message); 1180 1184 set_status_byte(cmd, cmd->SCp.Status); 1181 1185 } 1182 - cmd->scsi_done(cmd); 1186 + scsi_done(cmd); 1183 1187 1184 1188 /* We are no longer connected to a target - check to see if 1185 1189 * there are commands waiting to be executed. ··· 1266 1270 scsi_msg_to_host_byte(cmd, cmd->SCp.Message); 1267 1271 set_status_byte(cmd, cmd->SCp.Status); 1268 1272 } 1269 - cmd->scsi_done(cmd); 1273 + scsi_done(cmd); 1270 1274 1271 1275 /* We are no longer connected to a target - check to see if 1272 1276 * there are commands waiting to be executed. ··· 1302 1306 scsi_msg_to_host_byte(cmd, cmd->SCp.Message); 1303 1307 set_status_byte(cmd, cmd->SCp.Status); 1304 1308 } 1305 - cmd->scsi_done(cmd); 1309 + scsi_done(cmd); 1306 1310 break; 1307 1311 case S_PRE_TMP_DISC: 1308 1312 case S_RUNNING_LEVEL2: ··· 1632 1636 ("scsi%d: Abort - removing command from input_Q. ", 1633 1637 instance->host_no); 1634 1638 enable_irq(cmd->device->host->irq); 1635 - cmd->scsi_done(cmd); 1639 + scsi_done(cmd); 1636 1640 return SUCCESS; 1637 1641 } 1638 1642 prev = tmp; ··· 1707 1711 wd33c93_execute(instance); 1708 1712 1709 1713 enable_irq(cmd->device->host->irq); 1710 - cmd->scsi_done(cmd); 1714 + scsi_done(cmd); 1711 1715 return SUCCESS; 1712 1716 } 1713 1717
+2 -2
drivers/scsi/wd719x.c
··· 200 200 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); 201 201 202 202 cmd->result = result << 16; 203 - cmd->scsi_done(cmd); 203 + scsi_done(cmd); 204 204 } 205 205 206 206 /* Build a SCB and send it to the card */ ··· 295 295 DMA_BIDIRECTIONAL); 296 296 out_error: 297 297 cmd->result = DID_ERROR << 16; 298 - cmd->scsi_done(cmd); 298 + scsi_done(cmd); 299 299 return 0; 300 300 } 301 301
+2 -2
drivers/scsi/xen-scsifront.c
··· 276 276 if (sense_len) 277 277 memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len); 278 278 279 - sc->scsi_done(sc); 279 + scsi_done(sc); 280 280 } 281 281 282 282 static void scsifront_sync_cmd_done(struct vscsifrnt_info *info, ··· 558 558 if (err == -ENOMEM) 559 559 return SCSI_MLQUEUE_HOST_BUSY; 560 560 sc->result = DID_ERROR << 16; 561 - sc->scsi_done(sc); 561 + scsi_done(sc); 562 562 return 0; 563 563 } 564 564
+4 -5
drivers/staging/rts5208/rtsx.c
··· 118 118 119 119 /* queue a command */ 120 120 /* This is always called with scsi_lock(host) held */ 121 - static int queuecommand_lck(struct scsi_cmnd *srb, 122 - void (*done)(struct scsi_cmnd *)) 121 + static int queuecommand_lck(struct scsi_cmnd *srb) 123 122 { 123 + void (*done)(struct scsi_cmnd *) = scsi_done; 124 124 struct rtsx_dev *dev = host_to_rtsx(srb->device->host); 125 125 struct rtsx_chip *chip = dev->chip; 126 126 ··· 140 140 } 141 141 142 142 /* enqueue the command and wake up the control thread */ 143 - srb->scsi_done = done; 144 143 chip->srb = srb; 145 144 complete(&dev->cmnd_ready); 146 145 ··· 422 423 423 424 /* indicate that the command is done */ 424 425 else if (chip->srb->result != DID_ABORT << 16) { 425 - chip->srb->scsi_done(chip->srb); 426 + scsi_done(chip->srb); 426 427 } else { 427 428 skip_for_abort: 428 429 dev_err(&dev->pci->dev, "scsi command aborted\n"); ··· 634 635 if (chip->srb) { 635 636 chip->srb->result = DID_NO_CONNECT << 16; 636 637 scsi_lock(host); 637 - chip->srb->scsi_done(dev->chip->srb); 638 + scsi_done(dev->chip->srb); 638 639 chip->srb = NULL; 639 640 scsi_unlock(host); 640 641 }
+8 -12
drivers/staging/unisys/visorhba/visorhba_main.c
··· 327 327 rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev); 328 328 if (rtn == SUCCESS) { 329 329 scsicmd->result = DID_ABORT << 16; 330 - scsicmd->scsi_done(scsicmd); 330 + scsi_done(scsicmd); 331 331 } 332 332 return rtn; 333 333 } ··· 354 354 rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev); 355 355 if (rtn == SUCCESS) { 356 356 scsicmd->result = DID_RESET << 16; 357 - scsicmd->scsi_done(scsicmd); 357 + scsi_done(scsicmd); 358 358 } 359 359 return rtn; 360 360 } ··· 383 383 rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev); 384 384 if (rtn == SUCCESS) { 385 385 scsicmd->result = DID_RESET << 16; 386 - scsicmd->scsi_done(scsicmd); 386 + scsi_done(scsicmd); 387 387 } 388 388 return rtn; 389 389 } ··· 446 446 * Return: 0 if successfully queued to the Service Partition, otherwise 447 447 * error code 448 448 */ 449 - static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd, 450 - void (*visorhba_cmnd_done) 451 - (struct scsi_cmnd *)) 449 + static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd) 452 450 { 451 + void (*visorhba_cmnd_done)(struct scsi_cmnd *) = scsi_done; 453 452 struct uiscmdrsp *cmdrsp; 454 453 struct scsi_device *scsidev = scsicmd->device; 455 454 int insert_location; ··· 475 476 */ 476 477 cmdrsp->scsi.handle = insert_location; 477 478 478 - /* save done function that we have call when cmd is complete */ 479 - scsicmd->scsi_done = visorhba_cmnd_done; 479 + WARN_ON_ONCE(visorhba_cmnd_done != scsi_done); 480 480 /* save destination */ 481 481 cmdrsp->scsi.vdest.channel = scsidev->channel; 482 482 cmdrsp->scsi.vdest.id = scsidev->id; ··· 582 584 .eh_device_reset_handler = visorhba_device_reset_handler, 583 585 .eh_bus_reset_handler = visorhba_bus_reset_handler, 584 586 .eh_host_reset_handler = visorhba_host_reset_handler, 585 - .shost_attrs = NULL, 586 587 #define visorhba_MAX_CMNDS 128 587 588 .can_queue = visorhba_MAX_CMNDS, 588 589 .sg_tablesize = 64, ··· 683 686 case CMD_SCSI_TYPE: 684 687 scsicmd = pendingdel->sent; 685 688 scsicmd->result = DID_RESET << 16; 686 - if (scsicmd->scsi_done) 687 - scsicmd->scsi_done(scsicmd); 689 + scsi_done(scsicmd); 688 690 break; 689 691 case CMD_SCSITASKMGMT_TYPE: 690 692 cmdrsp = pendingdel->sent; ··· 849 853 else 850 854 do_scsi_nolinuxstat(cmdrsp, scsicmd); 851 855 852 - scsicmd->scsi_done(scsicmd); 856 + scsi_done(scsicmd); 853 857 } 854 858 855 859 /*
+3 -5
drivers/target/iscsi/cxgbit/cxgbit_cm.c
··· 836 836 csk->rcv_win = CXGBIT_10G_RCV_WIN; 837 837 if (scale) 838 838 csk->rcv_win *= scale; 839 + csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10); 839 840 840 841 #define CXGBIT_10G_SND_WIN (256 * 1024) 841 842 csk->snd_win = CXGBIT_10G_SND_WIN; 842 843 if (scale) 843 844 csk->snd_win *= scale; 845 + csk->snd_win = min(csk->snd_win, 512U * 1024); 844 846 845 847 pr_debug("%s snd_win %d rcv_win %d\n", 846 848 __func__, csk->snd_win, csk->rcv_win); ··· 1067 1065 if (!skb) 1068 1066 return -1; 1069 1067 1070 - credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) | 1068 + credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) | 1071 1069 RX_CREDITS_V(csk->rx_credits); 1072 1070 1073 1071 cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx, ··· 1199 1197 if (tcph->ece && tcph->cwr) 1200 1198 opt2 |= CCTRL_ECN_V(1); 1201 1199 1202 - opt2 |= RX_COALESCE_V(3); 1203 1200 opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); 1204 1201 1205 1202 opt2 |= T5_ISS_F; ··· 1646 1645 csk->snd_nxt = snd_isn; 1647 1646 1648 1647 csk->rcv_nxt = rcv_isn; 1649 - 1650 - if (csk->rcv_win > (RCV_BUFSIZ_M << 10)) 1651 - csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10)); 1652 1648 1653 1649 csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); 1654 1650 cxgbit_set_emss(csk, tcp_opt);
+12 -5
drivers/target/iscsi/cxgbit/cxgbit_main.c
··· 33 33 struct cxgb4_lld_info *lldi = &cdev->lldi; 34 34 u32 mdsl; 35 35 36 - #define ULP2_MAX_PKT_LEN 16224 37 - #define ISCSI_PDU_NONPAYLOAD_LEN 312 38 - mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN, 39 - ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN); 40 - mdsl = min_t(u32, mdsl, 8192); 36 + #define CXGBIT_T5_MAX_PDU_LEN 16224 37 + #define CXGBIT_PDU_NONPAYLOAD_LEN 312 /* 48(BHS) + 256(AHS) + 8(Digest) */ 38 + if (is_t5(lldi->adapter_type)) { 39 + mdsl = min_t(u32, lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN, 40 + CXGBIT_T5_MAX_PDU_LEN - CXGBIT_PDU_NONPAYLOAD_LEN); 41 + } else { 42 + mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN; 43 + mdsl = min(mdsl, 16384U); 44 + } 45 + 46 + mdsl = round_down(mdsl, 4); 47 + mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE); 41 48 mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); 42 49 43 50 cdev->mdsl = mdsl;
+23 -5
drivers/target/iscsi/cxgbit/cxgbit_target.c
··· 189 189 wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | 190 190 FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); 191 191 192 - req->tunnel_to_proxy = htonl((wr_ulp_mode) | force | 193 - FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); 192 + req->tunnel_to_proxy = htonl(wr_ulp_mode | force | 193 + FW_OFLD_TX_DATA_WR_SHOVE_F); 194 194 } 195 195 196 196 static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) ··· 1531 1531 return ret; 1532 1532 } 1533 1533 1534 - static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1534 + static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1535 1535 { 1536 1536 struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); 1537 1537 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); ··· 1557 1557 return ret; 1558 1558 } 1559 1559 1560 + static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1561 + { 1562 + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); 1563 + int ret; 1564 + 1565 + ret = cxgbit_process_lro_skb(csk, skb); 1566 + if (ret) 1567 + return ret; 1568 + 1569 + csk->rx_credits += lro_cb->pdu_totallen; 1570 + if (csk->rx_credits >= csk->rcv_win) { 1571 + csk->rx_credits = 0; 1572 + cxgbit_rx_data_ack(csk); 1573 + } 1574 + 1575 + return 0; 1576 + } 1577 + 1560 1578 static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) 1561 1579 { 1562 1580 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; ··· 1582 1564 1583 1565 if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { 1584 1566 if (is_t5(lldi->adapter_type)) 1585 - ret = cxgbit_rx_lro_skb(csk, skb); 1567 + ret = cxgbit_t5_rx_lro_skb(csk, skb); 1586 1568 else 1587 - ret = cxgbit_process_lro_skb(csk, skb); 1569 + ret = cxgbit_rx_lro_skb(csk, skb); 1588 1570 } 1589 1571 1590 1572 __kfree_skb(skb);
+32 -59
drivers/target/iscsi/iscsi_target_configfs.c
··· 1005 1005 1006 1006 /* Start items for lio_target_tpg_cit */ 1007 1007 1008 - static ssize_t lio_target_tpg_enable_show(struct config_item *item, char *page) 1009 - { 1010 - struct se_portal_group *se_tpg = to_tpg(item); 1011 - struct iscsi_portal_group *tpg = container_of(se_tpg, 1012 - struct iscsi_portal_group, tpg_se_tpg); 1013 - ssize_t len; 1014 - 1015 - spin_lock(&tpg->tpg_state_lock); 1016 - len = sprintf(page, "%d\n", 1017 - (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0); 1018 - spin_unlock(&tpg->tpg_state_lock); 1019 - 1020 - return len; 1021 - } 1022 - 1023 - static ssize_t lio_target_tpg_enable_store(struct config_item *item, 1024 - const char *page, size_t count) 1025 - { 1026 - struct se_portal_group *se_tpg = to_tpg(item); 1027 - struct iscsi_portal_group *tpg = container_of(se_tpg, 1028 - struct iscsi_portal_group, tpg_se_tpg); 1029 - u32 op; 1030 - int ret; 1031 - 1032 - ret = kstrtou32(page, 0, &op); 1033 - if (ret) 1034 - return ret; 1035 - if ((op != 1) && (op != 0)) { 1036 - pr_err("Illegal value for tpg_enable: %u\n", op); 1037 - return -EINVAL; 1038 - } 1039 - 1040 - ret = iscsit_get_tpg(tpg); 1041 - if (ret < 0) 1042 - return -EINVAL; 1043 - 1044 - if (op) { 1045 - ret = iscsit_tpg_enable_portal_group(tpg); 1046 - if (ret < 0) 1047 - goto out; 1048 - } else { 1049 - /* 1050 - * iscsit_tpg_disable_portal_group() assumes force=1 1051 - */ 1052 - ret = iscsit_tpg_disable_portal_group(tpg, 1); 1053 - if (ret < 0) 1054 - goto out; 1055 - } 1056 - 1057 - iscsit_put_tpg(tpg); 1058 - return count; 1059 - out: 1060 - iscsit_put_tpg(tpg); 1061 - return -EINVAL; 1062 - } 1063 - 1064 - 1065 1008 static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item, 1066 1009 char *page) 1067 1010 { 1068 1011 return target_show_dynamic_sessions(to_tpg(item), page); 1069 1012 } 1070 1013 1071 - CONFIGFS_ATTR(lio_target_tpg_, enable); 1072 1014 CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions); 1073 1015 1074 1016 static struct configfs_attribute *lio_target_tpg_attrs[] = { 1075 - &lio_target_tpg_attr_enable, 1076 1017 &lio_target_tpg_attr_dynamic_sessions, 1077 1018 NULL, 1078 1019 }; ··· 1068 1127 free_out: 1069 1128 kfree(tpg); 1070 1129 return NULL; 1130 + } 1131 + 1132 + static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg, 1133 + bool enable) 1134 + { 1135 + struct iscsi_portal_group *tpg = container_of(se_tpg, 1136 + struct iscsi_portal_group, tpg_se_tpg); 1137 + int ret; 1138 + 1139 + ret = iscsit_get_tpg(tpg); 1140 + if (ret < 0) 1141 + return -EINVAL; 1142 + 1143 + if (enable) { 1144 + ret = iscsit_tpg_enable_portal_group(tpg); 1145 + if (ret < 0) 1146 + goto out; 1147 + } else { 1148 + /* 1149 + * iscsit_tpg_disable_portal_group() assumes force=1 1150 + */ 1151 + ret = iscsit_tpg_disable_portal_group(tpg, 1); 1152 + if (ret < 0) 1153 + goto out; 1154 + } 1155 + 1156 + iscsit_put_tpg(tpg); 1157 + return 0; 1158 + out: 1159 + iscsit_put_tpg(tpg); 1160 + return -EINVAL; 1071 1161 } 1072 1162 1073 1163 static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg) ··· 1528 1556 .fabric_drop_wwn = lio_target_call_coredeltiqn, 1529 1557 .add_wwn_groups = lio_target_add_wwn_groups, 1530 1558 .fabric_make_tpg = lio_target_tiqn_addtpg, 1559 + .fabric_enable_tpg = lio_target_tiqn_enabletpg, 1531 1560 .fabric_drop_tpg = lio_target_tiqn_deltpg, 1532 1561 .fabric_make_np = lio_target_call_addnptotpg, 1533 1562 .fabric_drop_np = lio_target_call_delnpfromtpg,
+2 -2
drivers/target/loopback/tcm_loop.c
··· 71 71 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 72 72 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 73 73 else 74 - sc->scsi_done(sc); 74 + scsi_done(sc); 75 75 } 76 76 77 77 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) ··· 165 165 return; 166 166 167 167 out_done: 168 - sc->scsi_done(sc); 168 + scsi_done(sc); 169 169 } 170 170 171 171 /*
+5 -25
drivers/target/sbp/sbp_target.c
··· 2125 2125 return count; 2126 2126 } 2127 2127 2128 - static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page) 2128 + static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable) 2129 2129 { 2130 - struct se_portal_group *se_tpg = to_tpg(item); 2131 2130 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2132 2131 struct sbp_tport *tport = tpg->tport; 2133 - return sprintf(page, "%d\n", tport->enable); 2134 - } 2135 - 2136 - static ssize_t sbp_tpg_enable_store(struct config_item *item, 2137 - const char *page, size_t count) 2138 - { 2139 - struct se_portal_group *se_tpg = to_tpg(item); 2140 - struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2141 - struct sbp_tport *tport = tpg->tport; 2142 - unsigned long val; 2143 2132 int ret; 2144 2133 2145 - if (kstrtoul(page, 0, &val) < 0) 2146 - return -EINVAL; 2147 - if ((val != 0) && (val != 1)) 2148 - return -EINVAL; 2149 - 2150 - if (tport->enable == val) 2151 - return count; 2152 - 2153 - if (val) { 2134 + if (enable) { 2154 2135 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { 2155 2136 pr_err("Cannot enable a target with no LUNs!\n"); 2156 2137 return -EINVAL; ··· 2146 2165 spin_unlock_bh(&se_tpg->session_lock); 2147 2166 } 2148 2167 2149 - tport->enable = val; 2168 + tport->enable = enable; 2150 2169 2151 2170 ret = sbp_update_unit_directory(tport); 2152 2171 if (ret < 0) { ··· 2154 2173 return ret; 2155 2174 } 2156 2175 2157 - return count; 2176 + return 0; 2158 2177 } 2159 2178 2160 2179 CONFIGFS_ATTR(sbp_tpg_, directory_id); 2161 - CONFIGFS_ATTR(sbp_tpg_, enable); 2162 2180 2163 2181 static struct configfs_attribute *sbp_tpg_base_attrs[] = { 2164 2182 &sbp_tpg_attr_directory_id, 2165 - &sbp_tpg_attr_enable, 2166 2183 NULL, 2167 2184 }; 2168 2185 ··· 2298 2319 .fabric_make_wwn = sbp_make_tport, 2299 2320 .fabric_drop_wwn = sbp_drop_tport, 2300 2321 .fabric_make_tpg = sbp_make_tpg, 2322 + .fabric_enable_tpg = sbp_enable_tpg, 2301 2323 .fabric_drop_tpg = sbp_drop_tpg, 2302 2324 .fabric_post_link = sbp_post_link_lun, 2303 2325 .fabric_pre_unlink = sbp_pre_unlink_lun,
+49 -34
drivers/target/target_core_alua.c
··· 247 247 * this CDB was received upon to determine this value individually 248 248 * for ALUA target port group. 249 249 */ 250 - spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); 251 - tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; 250 + rcu_read_lock(); 251 + tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp); 252 252 if (tg_pt_gp) 253 253 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 254 - spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); 254 + rcu_read_unlock(); 255 255 } 256 256 transport_kunmap_data_sg(cmd); 257 257 ··· 292 292 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed 293 293 * for the local tg_pt_gp. 294 294 */ 295 - spin_lock(&l_lun->lun_tg_pt_gp_lock); 296 - l_tg_pt_gp = l_lun->lun_tg_pt_gp; 295 + rcu_read_lock(); 296 + l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); 297 297 if (!l_tg_pt_gp) { 298 - spin_unlock(&l_lun->lun_tg_pt_gp_lock); 298 + rcu_read_unlock(); 299 299 pr_err("Unable to access l_lun->tg_pt_gp\n"); 300 300 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 301 301 goto out; 302 302 } 303 303 304 304 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 305 - spin_unlock(&l_lun->lun_tg_pt_gp_lock); 305 + rcu_read_unlock(); 306 306 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 307 307 " while TPGS_EXPLICIT_ALUA is disabled\n"); 308 308 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 309 309 goto out; 310 310 } 311 311 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 312 - spin_unlock(&l_lun->lun_tg_pt_gp_lock); 312 + rcu_read_unlock(); 313 313 314 314 ptr = &buf[4]; /* Skip over RESERVED area in header */ 315 315 ··· 662 662 " target port\n"); 663 663 return TCM_ALUA_OFFLINE; 664 664 } 665 - 666 - if (!lun->lun_tg_pt_gp) 665 + rcu_read_lock(); 666 + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 667 + if (!tg_pt_gp) { 668 + rcu_read_unlock(); 667 669 return 0; 670 + } 668 671 669 - spin_lock(&lun->lun_tg_pt_gp_lock); 670 - tg_pt_gp = lun->lun_tg_pt_gp; 671 672 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; 672 673 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 673 674 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 674 - 675 - spin_unlock(&lun->lun_tg_pt_gp_lock); 675 + rcu_read_unlock(); 676 676 /* 677 677 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional 678 678 * statement so the compiler knows explicitly to check this case first. ··· 1219 1219 struct t10_alua_tg_pt_gp *tg_pt_gp; 1220 1220 int trans_delay_msecs; 1221 1221 1222 - spin_lock(&lun->lun_tg_pt_gp_lock); 1223 - tg_pt_gp = lun->lun_tg_pt_gp; 1222 + rcu_read_lock(); 1223 + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 1224 1224 if (!tg_pt_gp) { 1225 - spin_unlock(&lun->lun_tg_pt_gp_lock); 1225 + rcu_read_unlock(); 1226 1226 pr_err("Unable to complete secondary state" 1227 1227 " transition\n"); 1228 1228 return -EINVAL; ··· 1246 1246 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1247 1247 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1248 1248 1249 - spin_unlock(&lun->lun_tg_pt_gp_lock); 1249 + rcu_read_unlock(); 1250 1250 /* 1251 1251 * Do the optional transition delay after we set the secondary 1252 1252 * ALUA access state. ··· 1674 1674 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1675 1675 " 0x0000ffff reached\n"); 1676 1676 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1677 - kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1678 1677 return -ENOSPC; 1679 1678 } 1680 1679 again: ··· 1754 1755 __target_attach_tg_pt_gp(lun, 1755 1756 dev->t10_alua.default_tg_pt_gp); 1756 1757 } else 1757 - lun->lun_tg_pt_gp = NULL; 1758 + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); 1758 1759 spin_unlock(&lun->lun_tg_pt_gp_lock); 1759 1760 1760 1761 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1761 1762 } 1762 1763 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1763 1764 1765 + synchronize_rcu(); 1764 1766 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1765 1767 } 1766 1768 ··· 1806 1806 assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1807 1807 1808 1808 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1809 - lun->lun_tg_pt_gp = tg_pt_gp; 1809 + rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp); 1810 1810 list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); 1811 1811 tg_pt_gp->tg_pt_gp_members++; 1812 1812 spin_lock(&lun->lun_deve_lock); ··· 1823 1823 spin_lock(&lun->lun_tg_pt_gp_lock); 1824 1824 __target_attach_tg_pt_gp(lun, tg_pt_gp); 1825 1825 spin_unlock(&lun->lun_tg_pt_gp_lock); 1826 + synchronize_rcu(); 1826 1827 } 1827 1828 1828 1829 static void __target_detach_tg_pt_gp(struct se_lun *lun, ··· 1835 1834 list_del_init(&lun->lun_tg_pt_gp_link); 1836 1835 tg_pt_gp->tg_pt_gp_members--; 1837 1836 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1838 - 1839 - lun->lun_tg_pt_gp = NULL; 1840 1837 } 1841 1838 1842 1839 void target_detach_tg_pt_gp(struct se_lun *lun) ··· 1842 1843 struct t10_alua_tg_pt_gp *tg_pt_gp; 1843 1844 1844 1845 spin_lock(&lun->lun_tg_pt_gp_lock); 1845 - tg_pt_gp = lun->lun_tg_pt_gp; 1846 - if (tg_pt_gp) 1846 + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, 1847 + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); 1848 + if (tg_pt_gp) { 1847 1849 __target_detach_tg_pt_gp(lun, tg_pt_gp); 1850 + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); 1851 + } 1848 1852 spin_unlock(&lun->lun_tg_pt_gp_lock); 1853 + synchronize_rcu(); 1854 + } 1855 + 1856 + static void target_swap_tg_pt_gp(struct se_lun *lun, 1857 + struct t10_alua_tg_pt_gp *old_tg_pt_gp, 1858 + struct t10_alua_tg_pt_gp *new_tg_pt_gp) 1859 + { 1860 + assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1861 + 1862 + if (old_tg_pt_gp) 1863 + __target_detach_tg_pt_gp(lun, old_tg_pt_gp); 1864 + __target_attach_tg_pt_gp(lun, new_tg_pt_gp); 1849 1865 } 1850 1866 1851 1867 ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) ··· 1869 1855 struct t10_alua_tg_pt_gp *tg_pt_gp; 1870 1856 ssize_t len = 0; 1871 1857 1872 - spin_lock(&lun->lun_tg_pt_gp_lock); 1873 - tg_pt_gp = lun->lun_tg_pt_gp; 1858 + rcu_read_lock(); 1859 + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 1874 1860 if (tg_pt_gp) { 1875 1861 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1876 1862 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" ··· 1886 1872 "Offline" : "None", 1887 1873 core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); 1888 1874 } 1889 - spin_unlock(&lun->lun_tg_pt_gp_lock); 1875 + rcu_read_unlock(); 1890 1876 1891 1877 return len; 1892 1878 } ··· 1933 1919 } 1934 1920 1935 1921 spin_lock(&lun->lun_tg_pt_gp_lock); 1936 - tg_pt_gp = lun->lun_tg_pt_gp; 1922 + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, 1923 + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); 1937 1924 if (tg_pt_gp) { 1938 1925 /* 1939 1926 * Clearing an existing tg_pt_gp association, and replacing ··· 1952 1937 &tg_pt_gp->tg_pt_gp_group.cg_item), 1953 1938 tg_pt_gp->tg_pt_gp_id); 1954 1939 1955 - __target_detach_tg_pt_gp(lun, tg_pt_gp); 1956 - __target_attach_tg_pt_gp(lun, 1940 + target_swap_tg_pt_gp(lun, tg_pt_gp, 1957 1941 dev->t10_alua.default_tg_pt_gp); 1958 1942 spin_unlock(&lun->lun_tg_pt_gp_lock); 1959 1943 1960 - return count; 1944 + goto sync_rcu; 1961 1945 } 1962 - __target_detach_tg_pt_gp(lun, tg_pt_gp); 1963 1946 move = 1; 1964 1947 } 1965 1948 1966 - __target_attach_tg_pt_gp(lun, tg_pt_gp_new); 1949 + target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new); 1967 1950 spin_unlock(&lun->lun_tg_pt_gp_lock); 1968 1951 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 1969 1952 " Target Port Group: alua/%s, ID: %hu\n", (move) ? ··· 1972 1959 tg_pt_gp_new->tg_pt_gp_id); 1973 1960 1974 1961 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 1962 + sync_rcu: 1963 + synchronize_rcu(); 1975 1964 return count; 1976 1965 } 1977 1966
+1
drivers/target/target_core_configfs.c
··· 490 490 * fabric driver unload of TFO->module to proceed. 491 491 */ 492 492 rcu_barrier(); 493 + kfree(t->tf_tpg_base_cit.ct_attrs); 493 494 kfree(t); 494 495 return; 495 496 }
+2
drivers/target/target_core_device.c
··· 772 772 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); 773 773 spin_lock_init(&dev->t10_alua.lba_map_lock); 774 774 775 + INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); 776 + 775 777 dev->t10_wwn.t10_dev = dev; 776 778 /* 777 779 * Use OpenFabrics IEEE Company ID: 00 14 05
+76 -2
drivers/target/target_core_fabric_configfs.c
··· 815 815 .release = target_fabric_tpg_release, 816 816 }; 817 817 818 - TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL); 818 + static ssize_t target_fabric_tpg_base_enable_show(struct config_item *item, 819 + char *page) 820 + { 821 + return sysfs_emit(page, "%d\n", to_tpg(item)->enabled); 822 + } 819 823 824 + static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item, 825 + const char *page, 826 + size_t count) 827 + { 828 + struct se_portal_group *se_tpg = to_tpg(item); 829 + int ret; 830 + bool op; 831 + 832 + ret = strtobool(page, &op); 833 + if (ret) 834 + return ret; 835 + 836 + if (se_tpg->enabled == op) 837 + return count; 838 + 839 + ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, op); 840 + if (ret) 841 + return ret; 842 + 843 + se_tpg->enabled = op; 844 + 845 + return count; 846 + } 847 + 848 + CONFIGFS_ATTR(target_fabric_tpg_base_, enable); 849 + 850 + static int 851 + target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf) 852 + { 853 + struct config_item_type *cit = &tf->tf_tpg_base_cit; 854 + struct configfs_attribute **attrs = NULL; 855 + size_t nr_attrs = 0; 856 + int i = 0; 857 + 858 + if (tf->tf_ops->tfc_tpg_base_attrs) 859 + while (tf->tf_ops->tfc_tpg_base_attrs[nr_attrs] != NULL) 860 + nr_attrs++; 861 + 862 + if (tf->tf_ops->fabric_enable_tpg) 863 + nr_attrs++; 864 + 865 + if (nr_attrs == 0) 866 + goto done; 867 + 868 + /* + 1 for final NULL in the array */ 869 + attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL); 870 + if (!attrs) 871 + return -ENOMEM; 872 + 873 + if (tf->tf_ops->tfc_tpg_base_attrs) 874 + for (; tf->tf_ops->tfc_tpg_base_attrs[i] != NULL; i++) 875 + attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i]; 876 + 877 + if (tf->tf_ops->fabric_enable_tpg) 878 + attrs[i] = &target_fabric_tpg_base_attr_enable; 879 + 880 + done: 881 + cit->ct_item_ops = &target_fabric_tpg_base_item_ops; 882 + cit->ct_attrs = attrs; 883 + cit->ct_owner = tf->tf_ops->module; 884 + pr_debug("Setup generic tpg_base\n"); 885 + 886 + return 0; 887 + } 820 888 /* End of tfc_tpg_base_cit */ 821 889 822 890 /* Start of tfc_tpg_cit */ ··· 1096 1028 1097 1029 int target_fabric_setup_cits(struct target_fabric_configfs *tf) 1098 1030 { 1031 + int ret; 1032 + 1099 1033 target_fabric_setup_discovery_cit(tf); 1100 1034 target_fabric_setup_wwn_cit(tf); 1101 1035 target_fabric_setup_wwn_fabric_stats_cit(tf); 1102 1036 target_fabric_setup_wwn_param_cit(tf); 1103 1037 target_fabric_setup_tpg_cit(tf); 1104 - target_fabric_setup_tpg_base_cit(tf); 1038 + 1039 + ret = target_fabric_setup_tpg_base_cit(tf); 1040 + if (ret) 1041 + return ret; 1042 + 1105 1043 target_fabric_setup_tpg_port_cit(tf); 1106 1044 target_fabric_setup_tpg_port_stat_cit(tf); 1107 1045 target_fabric_setup_tpg_lun_cit(tf);
+1 -3
drivers/target/target_core_iblock.c
··· 636 636 { 637 637 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 638 638 struct block_device *bd = ib_dev->ibd_bd; 639 - char buf[BDEVNAME_SIZE]; 640 639 ssize_t bl = 0; 641 640 642 641 if (bd) 643 - bl += sprintf(b + bl, "iBlock device: %s", 644 - bdevname(bd, buf)); 642 + bl += sprintf(b + bl, "iBlock device: %pg", bd); 645 643 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) 646 644 bl += sprintf(b + bl, " UDEV PATH: %s", 647 645 ib_dev->ibd_udev_path);
+1
drivers/target/target_core_internal.h
··· 151 151 void transport_clear_lun_ref(struct se_lun *); 152 152 sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 153 153 void target_qf_do_work(struct work_struct *work); 154 + void target_do_delayed_work(struct work_struct *work); 154 155 bool target_check_wce(struct se_device *dev); 155 156 bool target_check_fua(struct se_device *dev); 156 157 void __target_execute_cmd(struct se_cmd *, bool);
+63 -31
drivers/target/target_core_transport.c
··· 1511 1511 1512 1512 ret = dev->transport->parse_cdb(cmd); 1513 1513 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1514 - pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1515 - cmd->se_tfo->fabric_name, 1516 - cmd->se_sess->se_node_acl->initiatorname, 1517 - cmd->t_task_cdb[0]); 1514 + pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1515 + cmd->se_tfo->fabric_name, 1516 + cmd->se_sess->se_node_acl->initiatorname, 1517 + cmd->t_task_cdb[0]); 1518 1518 if (ret) 1519 1519 return ret; 1520 1520 ··· 2173 2173 */ 2174 2174 switch (cmd->sam_task_attr) { 2175 2175 case TCM_HEAD_TAG: 2176 + atomic_inc_mb(&dev->non_ordered); 2176 2177 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 2177 2178 cmd->t_task_cdb[0]); 2178 2179 return false; 2179 2180 case TCM_ORDERED_TAG: 2180 - atomic_inc_mb(&dev->dev_ordered_sync); 2181 + atomic_inc_mb(&dev->delayed_cmd_count); 2181 2182 2182 2183 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 2183 2184 cmd->t_task_cdb[0]); 2184 - 2185 - /* 2186 - * Execute an ORDERED command if no other older commands 2187 - * exist that need to be completed first. 2188 - */ 2189 - if (!atomic_read(&dev->simple_cmds)) 2190 - return false; 2191 2185 break; 2192 2186 default: 2193 2187 /* 2194 2188 * For SIMPLE and UNTAGGED Task Attribute commands 2195 2189 */ 2196 - atomic_inc_mb(&dev->simple_cmds); 2190 + atomic_inc_mb(&dev->non_ordered); 2191 + 2192 + if (atomic_read(&dev->delayed_cmd_count) == 0) 2193 + return false; 2197 2194 break; 2198 2195 } 2199 2196 2200 - if (atomic_read(&dev->dev_ordered_sync) == 0) 2201 - return false; 2197 + if (cmd->sam_task_attr != TCM_ORDERED_TAG) { 2198 + atomic_inc_mb(&dev->delayed_cmd_count); 2199 + /* 2200 + * We will account for this when we dequeue from the delayed 2201 + * list. 2202 + */ 2203 + atomic_dec_mb(&dev->non_ordered); 2204 + } 2205 + 2206 + spin_lock_irq(&cmd->t_state_lock); 2207 + cmd->transport_state &= ~CMD_T_SENT; 2208 + spin_unlock_irq(&cmd->t_state_lock); 2202 2209 2203 2210 spin_lock(&dev->delayed_cmd_lock); 2204 2211 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); ··· 2213 2206 2214 2207 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2215 2208 cmd->t_task_cdb[0], cmd->sam_task_attr); 2209 + /* 2210 + * We may have no non ordered cmds when this function started or we 2211 + * could have raced with the last simple/head cmd completing, so kick 2212 + * the delayed handler here. 2213 + */ 2214 + schedule_work(&dev->delayed_cmd_work); 2216 2215 return true; 2217 2216 } 2218 2217 ··· 2241 2228 if (target_write_prot_action(cmd)) 2242 2229 return; 2243 2230 2244 - if (target_handle_task_attr(cmd)) { 2245 - spin_lock_irq(&cmd->t_state_lock); 2246 - cmd->transport_state &= ~CMD_T_SENT; 2247 - spin_unlock_irq(&cmd->t_state_lock); 2231 + if (target_handle_task_attr(cmd)) 2248 2232 return; 2249 - } 2250 2233 2251 2234 __target_execute_cmd(cmd, true); 2252 2235 } ··· 2252 2243 * Process all commands up to the last received ORDERED task attribute which 2253 2244 * requires another blocking boundary 2254 2245 */ 2255 - static void target_restart_delayed_cmds(struct se_device *dev) 2246 + void target_do_delayed_work(struct work_struct *work) 2256 2247 { 2257 - for (;;) { 2248 + struct se_device *dev = container_of(work, struct se_device, 2249 + delayed_cmd_work); 2250 + 2251 + spin_lock(&dev->delayed_cmd_lock); 2252 + while (!dev->ordered_sync_in_progress) { 2258 2253 struct se_cmd *cmd; 2259 2254 2260 - spin_lock(&dev->delayed_cmd_lock); 2261 - if (list_empty(&dev->delayed_cmd_list)) { 2262 - spin_unlock(&dev->delayed_cmd_lock); 2255 + if (list_empty(&dev->delayed_cmd_list)) 2263 2256 break; 2264 - } 2265 2257 2266 2258 cmd = list_entry(dev->delayed_cmd_list.next, 2267 2259 struct se_cmd, se_delayed_node); 2260 + 2261 + if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2262 + /* 2263 + * Check if we started with: 2264 + * [ordered] [simple] [ordered] 2265 + * and we are now at the last ordered so we have to wait 2266 + * for the simple cmd. 2267 + */ 2268 + if (atomic_read(&dev->non_ordered) > 0) 2269 + break; 2270 + 2271 + dev->ordered_sync_in_progress = true; 2272 + } 2273 + 2268 2274 list_del(&cmd->se_delayed_node); 2275 + atomic_dec_mb(&dev->delayed_cmd_count); 2269 2276 spin_unlock(&dev->delayed_cmd_lock); 2277 + 2278 + if (cmd->sam_task_attr != TCM_ORDERED_TAG) 2279 + atomic_inc_mb(&dev->non_ordered); 2270 2280 2271 2281 cmd->transport_state |= CMD_T_SENT; 2272 2282 2273 2283 __target_execute_cmd(cmd, true); 2274 2284 2275 - if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2276 - break; 2285 + spin_lock(&dev->delayed_cmd_lock); 2277 2286 } 2287 + spin_unlock(&dev->delayed_cmd_lock); 2278 2288 } 2279 2289 2280 2290 /* ··· 2311 2283 goto restart; 2312 2284 2313 2285 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2314 - atomic_dec_mb(&dev->simple_cmds); 2286 + atomic_dec_mb(&dev->non_ordered); 2315 2287 dev->dev_cur_ordered_id++; 2316 2288 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2289 + atomic_dec_mb(&dev->non_ordered); 2317 2290 dev->dev_cur_ordered_id++; 2318 2291 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2319 2292 dev->dev_cur_ordered_id); 2320 2293 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2321 - atomic_dec_mb(&dev->dev_ordered_sync); 2294 + spin_lock(&dev->delayed_cmd_lock); 2295 + dev->ordered_sync_in_progress = false; 2296 + spin_unlock(&dev->delayed_cmd_lock); 2322 2297 2323 2298 dev->dev_cur_ordered_id++; 2324 2299 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", ··· 2330 2299 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2331 2300 2332 2301 restart: 2333 - target_restart_delayed_cmds(dev); 2302 + if (atomic_read(&dev->delayed_cmd_count) > 0) 2303 + schedule_work(&dev->delayed_cmd_work); 2334 2304 } 2335 2305 2336 2306 static void transport_complete_qf(struct se_cmd *cmd)
+3 -4
drivers/target/target_core_user.c
··· 523 523 rcu_read_unlock(); 524 524 525 525 for (i = cnt; i < page_cnt; i++) { 526 - /* try to get new page from the mm */ 527 - page = alloc_page(GFP_NOIO); 526 + /* try to get new zeroed page from the mm */ 527 + page = alloc_page(GFP_NOIO | __GFP_ZERO); 528 528 if (!page) 529 529 break; 530 530 ··· 1255 1255 { 1256 1256 int i = 0, cmd_cnt = 0; 1257 1257 bool unqueued = false; 1258 - uint16_t *cmd_ids = NULL; 1259 1258 struct tcmu_cmd *cmd; 1260 1259 struct se_cmd *se_cmd; 1261 1260 struct tcmu_tmr *tmr; ··· 1291 1292 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", 1292 1293 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); 1293 1294 1294 - tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO); 1295 + tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO); 1295 1296 if (!tmr) 1296 1297 goto unlock; 1297 1298
+6 -8
drivers/target/target_core_xcopy.c
··· 295 295 return -EINVAL; 296 296 } 297 297 298 - static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, 299 - unsigned char *p) 298 + static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p) 300 299 { 301 300 unsigned char *desc = p; 302 301 int dc = (desc[1] & 0x02); ··· 331 332 return 0; 332 333 } 333 334 334 - static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, 335 - struct xcopy_op *xop, unsigned char *p, 336 - unsigned int sdll, sense_reason_t *sense_ret) 335 + static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop, 336 + unsigned char *p, unsigned int sdll, 337 + sense_reason_t *sense_ret) 337 338 { 338 339 unsigned char *desc = p; 339 340 unsigned int start = 0; ··· 361 362 */ 362 363 switch (desc[0]) { 363 364 case 0x02: 364 - rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); 365 + rc = target_xcopy_parse_segdesc_02(xop, desc); 365 366 if (rc < 0) 366 367 goto out; 367 368 ··· 839 840 */ 840 841 seg_desc = &p[16] + tdll; 841 842 842 - rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, 843 - sdll, &ret); 843 + rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret); 844 844 if (rc <= 0) 845 845 goto out; 846 846
+6 -25
drivers/usb/gadget/function/f_tcm.c
··· 1495 1495 NULL, 1496 1496 }; 1497 1497 1498 - static ssize_t tcm_usbg_tpg_enable_show(struct config_item *item, char *page) 1499 - { 1500 - struct se_portal_group *se_tpg = to_tpg(item); 1501 - struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); 1502 - 1503 - return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect); 1504 - } 1505 - 1506 1498 static int usbg_attach(struct usbg_tpg *); 1507 1499 static void usbg_detach(struct usbg_tpg *); 1508 1500 1509 - static ssize_t tcm_usbg_tpg_enable_store(struct config_item *item, 1510 - const char *page, size_t count) 1501 + static int usbg_enable_tpg(struct se_portal_group *se_tpg, bool enable) 1511 1502 { 1512 - struct se_portal_group *se_tpg = to_tpg(item); 1513 1503 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); 1514 - bool op; 1515 - ssize_t ret; 1504 + int ret = 0; 1516 1505 1517 - ret = strtobool(page, &op); 1518 - if (ret) 1519 - return ret; 1520 - 1521 - if ((op && tpg->gadget_connect) || (!op && !tpg->gadget_connect)) 1522 - return -EINVAL; 1523 - 1524 - if (op) 1506 + if (enable) 1525 1507 ret = usbg_attach(tpg); 1526 1508 else 1527 1509 usbg_detach(tpg); 1528 1510 if (ret) 1529 1511 return ret; 1530 1512 1531 - tpg->gadget_connect = op; 1513 + tpg->gadget_connect = enable; 1532 1514 1533 - return count; 1515 + return 0; 1534 1516 } 1535 1517 1536 1518 static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page) ··· 1655 1673 return count; 1656 1674 } 1657 1675 1658 - CONFIGFS_ATTR(tcm_usbg_tpg_, enable); 1659 1676 CONFIGFS_ATTR(tcm_usbg_tpg_, nexus); 1660 1677 1661 1678 static struct configfs_attribute *usbg_base_attrs[] = { 1662 - &tcm_usbg_tpg_attr_enable, 1663 1679 &tcm_usbg_tpg_attr_nexus, 1664 1680 NULL, 1665 1681 }; ··· 1710 1730 .fabric_make_wwn = usbg_make_tport, 1711 1731 .fabric_drop_wwn = usbg_drop_tport, 1712 1732 .fabric_make_tpg = usbg_make_tpg, 1733 + .fabric_enable_tpg = usbg_enable_tpg, 1713 1734 .fabric_drop_tpg = usbg_drop_tpg, 1714 1735 .fabric_post_link = usbg_port_link, 1715 1736 .fabric_pre_unlink = usbg_port_unlink,
+2 -3
drivers/usb/image/microtek.c
··· 561 561 desc->context.data_pipe = pipe; 562 562 } 563 563 564 - 565 - static int 566 - mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) 564 + static int mts_scsi_queuecommand_lck(struct scsi_cmnd *srb) 567 565 { 566 + mts_scsi_cmnd_callback callback = scsi_done; 568 567 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); 569 568 int res; 570 569
+7 -6
drivers/usb/storage/scsiglue.c
··· 363 363 364 364 /* queue a command */ 365 365 /* This is always called with scsi_lock(host) held */ 366 - static int queuecommand_lck(struct scsi_cmnd *srb, 367 - void (*done)(struct scsi_cmnd *)) 366 + static int queuecommand_lck(struct scsi_cmnd *srb) 368 367 { 368 + void (*done)(struct scsi_cmnd *) = scsi_done; 369 369 struct us_data *us = host_to_us(srb->device->host); 370 370 371 371 /* check for state-transition errors */ ··· 393 393 } 394 394 395 395 /* enqueue the command and wake up the control thread */ 396 - srb->scsi_done = done; 397 396 us->srb = srb; 398 397 complete(&us->cmnd_ready); 399 398 ··· 587 588 } 588 589 static DEVICE_ATTR_RW(max_sectors); 589 590 590 - static struct device_attribute *sysfs_device_attr_list[] = { 591 - &dev_attr_max_sectors, 591 + static struct attribute *usb_sdev_attrs[] = { 592 + &dev_attr_max_sectors.attr, 592 593 NULL, 593 594 }; 595 + 596 + ATTRIBUTE_GROUPS(usb_sdev); 594 597 595 598 /* 596 599 * this defines our host template, with which we'll allocate hosts ··· 654 653 .skip_settle_delay = 1, 655 654 656 655 /* sysfs device attributes */ 657 - .sdev_attrs = sysfs_device_attr_list, 656 + .sdev_groups = usb_sdev_groups, 658 657 659 658 /* module management */ 660 659 .module = THIS_MODULE
+5 -8
drivers/usb/storage/uas.c
··· 256 256 return -EBUSY; 257 257 devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL; 258 258 uas_free_unsubmitted_urbs(cmnd); 259 - cmnd->scsi_done(cmnd); 259 + scsi_done(cmnd); 260 260 return 0; 261 261 } 262 262 ··· 633 633 return 0; 634 634 } 635 635 636 - static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, 637 - void (*done)(struct scsi_cmnd *)) 636 + static int uas_queuecommand_lck(struct scsi_cmnd *cmnd) 638 637 { 639 638 struct scsi_device *sdev = cmnd->device; 640 639 struct uas_dev_info *devinfo = sdev->hostdata; ··· 652 653 memcpy(cmnd->sense_buffer, usb_stor_sense_invalidCDB, 653 654 sizeof(usb_stor_sense_invalidCDB)); 654 655 cmnd->result = SAM_STAT_CHECK_CONDITION; 655 - cmnd->scsi_done(cmnd); 656 + scsi_done(cmnd); 656 657 return 0; 657 658 } 658 659 ··· 660 661 661 662 if (devinfo->resetting) { 662 663 set_host_byte(cmnd, DID_ERROR); 663 - cmnd->scsi_done(cmnd); 664 + scsi_done(cmnd); 664 665 goto zombie; 665 666 } 666 667 ··· 673 674 spin_unlock_irqrestore(&devinfo->lock, flags); 674 675 return SCSI_MLQUEUE_DEVICE_BUSY; 675 676 } 676 - 677 - cmnd->scsi_done = done; 678 677 679 678 memset(cmdinfo, 0, sizeof(*cmdinfo)); 680 679 cmdinfo->uas_tag = idx + 1; /* uas-tag == usb-stream-id, so 1 based */ ··· 703 706 */ 704 707 if (err == -ENODEV) { 705 708 set_host_byte(cmnd, DID_ERROR); 706 - cmnd->scsi_done(cmnd); 709 + scsi_done(cmnd); 707 710 goto zombie; 708 711 } 709 712 if (err) {
+2 -2
drivers/usb/storage/usb.c
··· 388 388 if (srb->result == DID_ABORT << 16) { 389 389 SkipForAbort: 390 390 usb_stor_dbg(us, "scsi command aborted\n"); 391 - srb = NULL; /* Don't call srb->scsi_done() */ 391 + srb = NULL; /* Don't call scsi_done() */ 392 392 } 393 393 394 394 /* ··· 417 417 if (srb) { 418 418 usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", 419 419 srb->result); 420 - srb->scsi_done(srb); 420 + scsi_done(srb); 421 421 } 422 422 } /* for (;;) */ 423 423
+4 -4
include/linux/libata.h
··· 1403 1403 */ 1404 1404 extern const struct ata_port_operations ata_base_port_ops; 1405 1405 extern const struct ata_port_operations sata_port_ops; 1406 - extern struct device_attribute *ata_common_sdev_attrs[]; 1406 + extern const struct attribute_group *ata_common_sdev_groups[]; 1407 1407 1408 1408 /* 1409 1409 * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated ··· 1433 1433 1434 1434 #define ATA_BASE_SHT(drv_name) \ 1435 1435 ATA_SUBBASE_SHT(drv_name), \ 1436 - .sdev_attrs = ata_common_sdev_attrs 1436 + .sdev_groups = ata_common_sdev_groups 1437 1437 1438 1438 #ifdef CONFIG_SATA_HOST 1439 - extern struct device_attribute *ata_ncq_sdev_attrs[]; 1439 + extern const struct attribute_group *ata_ncq_sdev_groups[]; 1440 1440 1441 1441 #define ATA_NCQ_SHT(drv_name) \ 1442 1442 ATA_SUBBASE_SHT(drv_name), \ 1443 - .sdev_attrs = ata_ncq_sdev_attrs, \ 1443 + .sdev_groups = ata_ncq_sdev_groups, \ 1444 1444 .change_queue_depth = ata_scsi_change_queue_depth 1445 1445 #endif 1446 1446
+1
include/scsi/libsas.h
··· 664 664 665 665 int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates); 666 666 int sas_phy_reset(struct sas_phy *phy, int hard_reset); 667 + int sas_phy_enable(struct sas_phy *phy, int enable); 667 668 extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); 668 669 extern int sas_target_alloc(struct scsi_target *); 669 670 extern int sas_slave_configure(struct scsi_device *);
+9 -5
include/scsi/scsi_cmnd.h
··· 10 10 #include <linux/timer.h> 11 11 #include <linux/scatterlist.h> 12 12 #include <scsi/scsi_device.h> 13 - #include <scsi/scsi_host.h> 14 13 #include <scsi/scsi_request.h> 15 14 16 15 struct Scsi_Host; ··· 64 65 #define SCMD_STATE_COMPLETE 0 65 66 #define SCMD_STATE_INFLIGHT 1 66 67 68 + enum scsi_cmnd_submitter { 69 + SUBMITTED_BY_BLOCK_LAYER = 0, 70 + SUBMITTED_BY_SCSI_ERROR_HANDLER = 1, 71 + SUBMITTED_BY_SCSI_RESET_IOCTL = 2, 72 + } __packed; 73 + 67 74 struct scsi_cmnd { 68 75 struct scsi_request req; 69 76 struct scsi_device *device; ··· 95 90 unsigned char prot_op; 96 91 unsigned char prot_type; 97 92 unsigned char prot_flags; 93 + enum scsi_cmnd_submitter submitter; 98 94 99 95 unsigned short cmd_len; 100 96 enum dma_data_direction sc_data_direction; ··· 122 116 * CHECK CONDITION is received on original 123 117 * command (auto-sense). Length must be 124 118 * SCSI_SENSE_BUFFERSIZE bytes. */ 125 - 126 - /* Low-level done function - can be used by low-level driver to point 127 - * to completion function. Not used by mid/upper level code. */ 128 - void (*scsi_done) (struct scsi_cmnd *); 129 119 130 120 /* 131 121 * The following fields can be written to by the host specific code. ··· 166 164 167 165 return *(struct scsi_driver **)rq->rq_disk->private_data; 168 166 } 167 + 168 + void scsi_done(struct scsi_cmnd *cmd); 169 169 170 170 extern void scsi_finish_command(struct scsi_cmnd *cmd); 171 171
+6
include/scsi/scsi_device.h
··· 225 225 226 226 struct device sdev_gendev, 227 227 sdev_dev; 228 + /* 229 + * The array size 6 provides space for one attribute group for the 230 + * SCSI core, four attribute groups defined by SCSI LLDs and one 231 + * terminating NULL pointer. 232 + */ 233 + const struct attribute_group *gendev_attr_groups[6]; 228 234 229 235 struct execute_work ew; /* used to get process context on put */ 230 236 struct work_struct requeue_work;
+9 -18
include/scsi/scsi_host.h
··· 474 474 #define SCSI_DEFAULT_HOST_BLOCKED 7 475 475 476 476 /* 477 - * Pointer to the sysfs class properties for this host, NULL terminated. 477 + * Pointer to the SCSI host sysfs attribute groups, NULL terminated. 478 478 */ 479 - struct device_attribute **shost_attrs; 480 - 481 - /* 482 - * Pointer to the SCSI device properties for this host, NULL terminated. 483 - */ 484 - struct device_attribute **sdev_attrs; 479 + const struct attribute_group **shost_groups; 485 480 486 481 /* 487 482 * Pointer to the SCSI device attribute groups for this host, ··· 511 516 unsigned long irq_flags; \ 512 517 int rc; \ 513 518 spin_lock_irqsave(shost->host_lock, irq_flags); \ 514 - rc = func_name##_lck (cmd, cmd->scsi_done); \ 519 + rc = func_name##_lck(cmd); \ 515 520 spin_unlock_irqrestore(shost->host_lock, irq_flags); \ 516 521 return rc; \ 517 522 } ··· 690 695 691 696 /* ldm bits */ 692 697 struct device shost_gendev, shost_dev; 698 + /* 699 + * The array size 3 provides space for one attribute group defined by 700 + * the SCSI core, one attribute group defined by the SCSI LLD and one 701 + * terminating NULL pointer. 702 + */ 703 + const struct attribute_group *shost_dev_attr_groups[3]; 693 704 694 705 /* 695 706 * Points to the transport data (if any) which is allocated ··· 797 796 bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv); 798 797 799 798 struct class_container; 800 - 801 - /* 802 - * These two functions are used to allocate and free a pseudo device 803 - * which will connect to the host adapter itself rather than any 804 - * physical device. You must deallocate when you are done with the 805 - * thing. This physical pseudo-device isn't real and won't be available 806 - * from any high-level drivers. 807 - */ 808 - extern void scsi_free_host_dev(struct scsi_device *); 809 - extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); 810 799 811 800 /* 812 801 * DIF defines the exchange of protection information between
+1
include/scsi/scsi_transport_sas.h
··· 41 41 SAS_LINK_RATE_G2 = SAS_LINK_RATE_3_0_GBPS, 42 42 SAS_LINK_RATE_6_0_GBPS = 10, 43 43 SAS_LINK_RATE_12_0_GBPS = 11, 44 + SAS_LINK_RATE_22_5_GBPS = 12, 44 45 /* These are virtual to the transport class and may never 45 46 * be signalled normally since the standard defined field 46 47 * is only 4 bits */
+6 -3
include/target/target_core_base.h
··· 749 749 750 750 /* ALUA target port group linkage */ 751 751 struct list_head lun_tg_pt_gp_link; 752 - struct t10_alua_tg_pt_gp *lun_tg_pt_gp; 752 + struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp; 753 753 spinlock_t lun_tg_pt_gp_lock; 754 754 755 755 struct se_portal_group *lun_tpg; ··· 812 812 atomic_long_t read_bytes; 813 813 atomic_long_t write_bytes; 814 814 /* Active commands on this virtual SE device */ 815 - atomic_t simple_cmds; 816 - atomic_t dev_ordered_sync; 815 + atomic_t non_ordered; 816 + bool ordered_sync_in_progress; 817 + atomic_t delayed_cmd_count; 817 818 atomic_t dev_qf_count; 818 819 u32 export_count; 819 820 spinlock_t delayed_cmd_lock; ··· 835 834 struct list_head dev_sep_list; 836 835 struct list_head dev_tmr_list; 837 836 struct work_struct qf_work_queue; 837 + struct work_struct delayed_cmd_work; 838 838 struct list_head delayed_cmd_list; 839 839 struct list_head qf_cmd_list; 840 840 /* Pointer to associated SE HBA */ ··· 902 900 * Negative values can be used by fabric drivers for internal use TPGs. 903 901 */ 904 902 int proto_id; 903 + bool enabled; 905 904 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 906 905 atomic_t tpg_pr_ref_count; 907 906 /* Spinlock for adding/removing ACLed Nodes */
+1
include/target/target_core_fabric.h
··· 89 89 void (*add_wwn_groups)(struct se_wwn *); 90 90 struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, 91 91 const char *); 92 + int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable); 92 93 void (*fabric_drop_tpg)(struct se_portal_group *); 93 94 int (*fabric_post_link)(struct se_portal_group *, 94 95 struct se_lun *);