Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target updates from Nicholas Bellinger:
"The highlights include:

- numerous target-core-user improvements related to queue full and
timeout handling. (MNC)

- prevent target-core-user corruption when invalid data page is
requested. (MNC)

- add target-core device action configfs attributes to allow
user-space to trigger events separate from existing attributes
exposed to end-users. (MNC)

- fix iscsi-target NULL pointer dereference 4.6+ regression in CHAP
error path. (David Disseldorp)

- avoid target-core backend UNMAP callbacks if range is zero. (Andrei
Vagin)

- fix a iscsi-target 4.14+ regression related multiple PDU logins,
that was exposed due to removal of TCP prequeue support. (Florian
Westphal + MNC)

Also, there is a iser-target bug still being worked on for post -rc1
code to address a long standing issue resulting in persistent
ib_post_send() failures, for RNICs with small max_send_sge"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits)
iscsi-target: make sure to wake up sleeping login worker
tcmu: Fix trailing semicolon
tcmu: fix cmd user after free
target: fix destroy device in target_configure_device
tcmu: allow userspace to reset ring
target core: add device action configfs files
tcmu: fix error return code in tcmu_configure_device()
target_core_user: add cmd id to broken ring message
target: add SAM_STAT_BUSY sense reason
tcmu: prevent corruption when invalid data page requested
target: don't call an unmap callback if a range length is zero
target/iscsi: avoid NULL dereference in CHAP auth error path
cxgbit: call neigh_event_send() to update MAC address
target: tcm_loop: Use seq_puts() in tcm_loop_show_info()
target: tcm_loop: Delete an unnecessary return statement in tcm_loop_submission_work()
target: tcm_loop: Delete two unnecessary variable initialisations in tcm_loop_issue_tmr()
target: tcm_loop: Combine substrings for 26 messages
target: tcm_loop: Improve a size determination in two functions
target: tcm_loop: Delete an error message for a failed memory allocation in four functions
sbp-target: Delete an error message for a failed memory allocation in three functions
...

+799 -390
+3
drivers/target/iscsi/cxgbit/cxgbit_cm.c
··· 893 893 return -ENODEV; 894 894 895 895 rcu_read_lock(); 896 + if (!(n->nud_state & NUD_VALID)) 897 + neigh_event_send(n, NULL); 898 + 896 899 ret = -ENOMEM; 897 900 if (n->dev->flags & IFF_LOOPBACK) { 898 901 if (iptype == 4)
+2 -1
drivers/target/iscsi/iscsi_target_auth.c
··· 421 421 auth_ret = 0; 422 422 out: 423 423 kzfree(desc); 424 - crypto_free_shash(tfm); 424 + if (tfm) 425 + crypto_free_shash(tfm); 425 426 kfree(challenge); 426 427 kfree(challenge_binhex); 427 428 return auth_ret;
+3
drivers/target/iscsi/iscsi_target_nego.c
··· 432 432 if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { 433 433 write_unlock_bh(&sk->sk_callback_lock); 434 434 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); 435 + if (iscsi_target_sk_data_ready == conn->orig_data_ready) 436 + return; 437 + conn->orig_data_ready(sk); 435 438 return; 436 439 } 437 440
+61 -84
drivers/target/loopback/tcm_loop.c
··· 64 64 65 65 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) 66 66 { 67 - seq_printf(m, "tcm_loop_proc_info()\n"); 67 + seq_puts(m, "tcm_loop_proc_info()\n"); 68 68 return 0; 69 69 } 70 70 ··· 123 123 } 124 124 tl_nexus = tl_tpg->tl_nexus; 125 125 if (!tl_nexus) { 126 - scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 127 - " does not exist\n"); 126 + scmd_printk(KERN_ERR, sc, 127 + "TCM_Loop I_T Nexus does not exist\n"); 128 128 set_host_byte(sc, DID_ERROR); 129 129 goto out_done; 130 130 } ··· 166 166 out_done: 167 167 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 168 168 sc->scsi_done(sc); 169 - return; 170 169 } 171 170 172 171 /* ··· 176 177 { 177 178 struct tcm_loop_cmd *tl_cmd; 178 179 179 - pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x" 180 - " scsi_buf_len: %u\n", sc->device->host->host_no, 181 - sc->device->id, sc->device->channel, sc->device->lun, 182 - sc->cmnd[0], scsi_bufflen(sc)); 180 + pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n", 181 + __func__, sc->device->host->host_no, sc->device->id, 182 + sc->device->channel, sc->device->lun, sc->cmnd[0], 183 + scsi_bufflen(sc)); 183 184 184 185 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 185 186 if (!tl_cmd) { 186 - pr_err("Unable to allocate struct tcm_loop_cmd\n"); 187 187 set_host_byte(sc, DID_ERROR); 188 188 sc->scsi_done(sc); 189 189 return 0; ··· 202 204 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 203 205 u64 lun, int task, enum tcm_tmreq_table tmr) 204 206 { 205 - struct se_cmd *se_cmd = NULL; 207 + struct se_cmd *se_cmd; 206 208 struct se_session *se_sess; 207 209 struct tcm_loop_nexus *tl_nexus; 208 - struct tcm_loop_cmd *tl_cmd = NULL; 210 + struct tcm_loop_cmd *tl_cmd; 209 211 int ret = TMR_FUNCTION_FAILED, rc; 210 212 211 213 /* ··· 213 215 */ 214 216 tl_nexus = tl_tpg->tl_nexus; 215 217 if (!tl_nexus) { 216 - pr_err("Unable to perform device reset without" 217 - " active I_T Nexus\n"); 218 + pr_err("Unable to perform device reset without active I_T Nexus\n"); 218 219 return ret; 219 220 } 220 221 221 222 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 222 - if (!tl_cmd) { 223 - pr_err("Unable to allocate memory for tl_cmd\n"); 223 + if (!tl_cmd) 224 224 return ret; 225 - } 226 225 227 226 init_completion(&tl_cmd->tmr_done); 228 227 ··· 293 298 */ 294 299 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 295 300 if (!tl_hba) { 296 - pr_err("Unable to perform device reset without" 297 - " active I_T Nexus\n"); 301 + pr_err("Unable to perform device reset without active I_T Nexus\n"); 298 302 return FAILED; 299 303 } 300 304 /* ··· 411 417 412 418 ret = device_register(&tl_hba->dev); 413 419 if (ret) { 414 - pr_err("device_register() failed for" 415 - " tl_hba->dev: %d\n", ret); 420 + pr_err("device_register() failed for tl_hba->dev: %d\n", ret); 416 421 return -ENODEV; 417 422 } 418 423 ··· 440 447 441 448 ret = driver_register(&tcm_loop_driverfs); 442 449 if (ret) { 443 - pr_err("driver_register() failed for" 444 - "tcm_loop_driverfs\n"); 450 + pr_err("driver_register() failed for tcm_loop_driverfs\n"); 445 451 goto bus_unreg; 446 452 } 447 453 ··· 579 587 struct tcm_loop_cmd, tl_se_cmd); 580 588 struct scsi_cmnd *sc = tl_cmd->sc; 581 589 582 - pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 583 - " cdb: 0x%02x\n", sc, sc->cmnd[0]); 590 + pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", 591 + __func__, sc, sc->cmnd[0]); 584 592 585 593 sc->result = SAM_STAT_GOOD; 586 594 set_host_byte(sc, DID_OK); ··· 597 605 struct tcm_loop_cmd, tl_se_cmd); 598 606 struct scsi_cmnd *sc = tl_cmd->sc; 599 607 600 - pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" 601 - " cdb: 0x%02x\n", sc, sc->cmnd[0]); 608 + pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", 609 + __func__, sc, sc->cmnd[0]); 602 610 603 611 if (se_cmd->sense_buffer && 604 612 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || ··· 683 691 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 684 692 se_lun->unpacked_lun); 685 693 if (!sd) { 686 - pr_err("Unable to locate struct scsi_device for %d:%d:" 687 - "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 694 + pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n", 695 + 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 688 696 return; 689 697 } 690 698 /* ··· 764 772 return -EEXIST; 765 773 } 766 774 767 - tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 768 - if (!tl_nexus) { 769 - pr_err("Unable to allocate struct tcm_loop_nexus\n"); 775 + tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL); 776 + if (!tl_nexus) 770 777 return -ENOMEM; 771 - } 772 778 773 779 tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0, 774 780 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, ··· 777 787 return ret; 778 788 } 779 789 780 - pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 781 - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 782 - name); 790 + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n", 791 + tcm_loop_dump_proto_id(tl_hba), name); 783 792 return 0; 784 793 } 785 794 ··· 797 808 return -ENODEV; 798 809 799 810 if (atomic_read(&tpg->tl_tpg_port_count)) { 800 - pr_err("Unable to remove TCM_Loop I_T Nexus with" 801 - " active TPG port count: %d\n", 802 - atomic_read(&tpg->tl_tpg_port_count)); 811 + pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n", 812 + atomic_read(&tpg->tl_tpg_port_count)); 803 813 return -EPERM; 804 814 } 805 815 806 - pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 807 - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), 808 - tl_nexus->se_sess->se_node_acl->initiatorname); 816 + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n", 817 + tcm_loop_dump_proto_id(tpg->tl_hba), 818 + tl_nexus->se_sess->se_node_acl->initiatorname); 809 819 /* 810 820 * Release the SCSI I_T Nexus to the emulated Target Port 811 821 */ ··· 856 868 * tcm_loop_make_nexus() 857 869 */ 858 870 if (strlen(page) >= TL_WWN_ADDR_LEN) { 859 - pr_err("Emulated NAA Sas Address: %s, exceeds" 860 - " max: %d\n", page, TL_WWN_ADDR_LEN); 871 + pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n", 872 + page, TL_WWN_ADDR_LEN); 861 873 return -EINVAL; 862 874 } 863 875 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); ··· 865 877 ptr = strstr(i_port, "naa."); 866 878 if (ptr) { 867 879 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 868 - pr_err("Passed SAS Initiator Port %s does not" 869 - " match target port protoid: %s\n", i_port, 870 - tcm_loop_dump_proto_id(tl_hba)); 880 + pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n", 881 + i_port, tcm_loop_dump_proto_id(tl_hba)); 871 882 return -EINVAL; 872 883 } 873 884 port_ptr = &i_port[0]; ··· 875 888 ptr = strstr(i_port, "fc."); 876 889 if (ptr) { 877 890 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 878 - pr_err("Passed FCP Initiator Port %s does not" 879 - " match target port protoid: %s\n", i_port, 880 - tcm_loop_dump_proto_id(tl_hba)); 891 + pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n", 892 + i_port, tcm_loop_dump_proto_id(tl_hba)); 881 893 return -EINVAL; 882 894 } 883 895 port_ptr = &i_port[3]; /* Skip over "fc." */ ··· 885 899 ptr = strstr(i_port, "iqn."); 886 900 if (ptr) { 887 901 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 888 - pr_err("Passed iSCSI Initiator Port %s does not" 889 - " match target port protoid: %s\n", i_port, 890 - tcm_loop_dump_proto_id(tl_hba)); 902 + pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n", 903 + i_port, tcm_loop_dump_proto_id(tl_hba)); 891 904 return -EINVAL; 892 905 } 893 906 port_ptr = &i_port[0]; 894 907 goto check_newline; 895 908 } 896 - pr_err("Unable to locate prefix for emulated Initiator Port:" 897 - " %s\n", i_port); 909 + pr_err("Unable to locate prefix for emulated Initiator Port: %s\n", 910 + i_port); 898 911 return -EINVAL; 899 912 /* 900 913 * Clear any trailing newline for the NAA WWN ··· 995 1010 unsigned long tpgt; 996 1011 997 1012 if (strstr(name, "tpgt_") != name) { 998 - pr_err("Unable to locate \"tpgt_#\" directory" 999 - " group\n"); 1013 + pr_err("Unable to locate \"tpgt_#\" directory group\n"); 1000 1014 return ERR_PTR(-EINVAL); 1001 1015 } 1002 1016 if (kstrtoul(name+5, 10, &tpgt)) 1003 1017 return ERR_PTR(-EINVAL); 1004 1018 1005 1019 if (tpgt >= TL_TPGS_PER_HBA) { 1006 - pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" 1007 - " %u\n", tpgt, TL_TPGS_PER_HBA); 1020 + pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n", 1021 + tpgt, TL_TPGS_PER_HBA); 1008 1022 return ERR_PTR(-EINVAL); 1009 1023 } 1010 1024 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; ··· 1016 1032 if (ret < 0) 1017 1033 return ERR_PTR(-ENOMEM); 1018 1034 1019 - pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" 1020 - " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), 1021 - config_item_name(&wwn->wwn_group.cg_item), tpgt); 1022 - 1035 + pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n", 1036 + tcm_loop_dump_proto_id(tl_hba), 1037 + config_item_name(&wwn->wwn_group.cg_item), tpgt); 1023 1038 return &tl_tpg->tl_se_tpg; 1024 1039 } 1025 1040 ··· 1045 1062 tl_tpg->tl_hba = NULL; 1046 1063 tl_tpg->tl_tpgt = 0; 1047 1064 1048 - pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" 1049 - " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1050 - config_item_name(&wwn->wwn_group.cg_item), tpgt); 1065 + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n", 1066 + tcm_loop_dump_proto_id(tl_hba), 1067 + config_item_name(&wwn->wwn_group.cg_item), tpgt); 1051 1068 } 1052 1069 1053 1070 /* End items for tcm_loop_naa_cit */ ··· 1064 1081 char *ptr; 1065 1082 int ret, off = 0; 1066 1083 1067 - tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1068 - if (!tl_hba) { 1069 - pr_err("Unable to allocate struct tcm_loop_hba\n"); 1084 + tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL); 1085 + if (!tl_hba) 1070 1086 return ERR_PTR(-ENOMEM); 1071 - } 1087 + 1072 1088 /* 1073 1089 * Determine the emulated Protocol Identifier and Target Port Name 1074 1090 * based on the incoming configfs directory name. ··· 1085 1103 } 1086 1104 ptr = strstr(name, "iqn."); 1087 1105 if (!ptr) { 1088 - pr_err("Unable to locate prefix for emulated Target " 1089 - "Port: %s\n", name); 1106 + pr_err("Unable to locate prefix for emulated Target Port: %s\n", 1107 + name); 1090 1108 ret = -EINVAL; 1091 1109 goto out; 1092 1110 } ··· 1094 1112 1095 1113 check_len: 1096 1114 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1097 - pr_err("Emulated NAA %s Address: %s, exceeds" 1098 - " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1099 - TL_WWN_ADDR_LEN); 1115 + pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n", 1116 + name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); 1100 1117 ret = -EINVAL; 1101 1118 goto out; 1102 1119 } ··· 1112 1131 1113 1132 sh = tl_hba->sh; 1114 1133 tcm_loop_hba_no_cnt++; 1115 - pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" 1116 - " %s Address: %s at Linux/SCSI Host ID: %d\n", 1117 - tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1118 - 1134 + pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", 1135 + tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1119 1136 return &tl_hba->tl_hba_wwn; 1120 1137 out: 1121 1138 kfree(tl_hba); ··· 1126 1147 struct tcm_loop_hba *tl_hba = container_of(wwn, 1127 1148 struct tcm_loop_hba, tl_hba_wwn); 1128 1149 1129 - pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" 1130 - " %s Address: %s at Linux/SCSI Host ID: %d\n", 1131 - tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, 1132 - tl_hba->sh->host_no); 1150 + pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", 1151 + tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, 1152 + tl_hba->sh->host_no); 1133 1153 /* 1134 1154 * Call device_unregister() on the original tl_hba->dev. 1135 1155 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will ··· 1201 1223 __alignof__(struct tcm_loop_cmd), 1202 1224 0, NULL); 1203 1225 if (!tcm_loop_cmd_cache) { 1204 - pr_debug("kmem_cache_create() for" 1205 - " tcm_loop_cmd_cache failed\n"); 1226 + pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n"); 1206 1227 goto out_destroy_workqueue; 1207 1228 } 1208 1229
+4 -9
drivers/target/sbp/sbp_target.c
··· 201 201 snprintf(guid_str, sizeof(guid_str), "%016llx", guid); 202 202 203 203 sess = kmalloc(sizeof(*sess), GFP_KERNEL); 204 - if (!sess) { 205 - pr_err("failed to allocate session descriptor\n"); 204 + if (!sess) 206 205 return ERR_PTR(-ENOMEM); 207 - } 206 + 208 207 spin_lock_init(&sess->lock); 209 208 INIT_LIST_HEAD(&sess->login_list); 210 209 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); ··· 2028 2029 } 2029 2030 2030 2031 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 2031 - if (!tpg) { 2032 - pr_err("Unable to allocate struct sbp_tpg\n"); 2032 + if (!tpg) 2033 2033 return ERR_PTR(-ENOMEM); 2034 - } 2035 2034 2036 2035 tpg->tport = tport; 2037 2036 tpg->tport_tpgt = tpgt; ··· 2085 2088 return ERR_PTR(-EINVAL); 2086 2089 2087 2090 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2088 - if (!tport) { 2089 - pr_err("Unable to allocate struct sbp_tport\n"); 2091 + if (!tport) 2090 2092 return ERR_PTR(-ENOMEM); 2091 - } 2092 2093 2093 2094 tport->guid = guid; 2094 2095 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
+6
drivers/target/target_core_configfs.c
··· 1197 1197 EXPORT_SYMBOL(passthrough_attrib_attrs); 1198 1198 1199 1199 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL); 1200 + TB_CIT_SETUP_DRV(dev_action, NULL, NULL); 1200 1201 1201 1202 /* End functions for struct config_item_type tb_dev_attrib_cit */ 1202 1203 ··· 2941 2940 2942 2941 config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit); 2943 2942 2943 + config_group_init_type_name(&dev->dev_action_group, "action", 2944 + &tb->tb_dev_action_cit); 2945 + configfs_add_default_group(&dev->dev_action_group, &dev->dev_group); 2946 + 2944 2947 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", 2945 2948 &tb->tb_dev_attrib_cit); 2946 2949 configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group); ··· 3205 3200 void target_setup_backend_cits(struct target_backend *tb) 3206 3201 { 3207 3202 target_core_setup_dev_cit(tb); 3203 + target_core_setup_dev_action_cit(tb); 3208 3204 target_core_setup_dev_attrib_cit(tb); 3209 3205 target_core_setup_dev_pr_cit(tb); 3210 3206 target_core_setup_dev_wwn_cit(tb);
+3 -1
drivers/target/target_core_device.c
··· 997 997 998 998 ret = core_setup_alua(dev); 999 999 if (ret) 1000 - goto out_free_index; 1000 + goto out_destroy_device; 1001 1001 1002 1002 /* 1003 1003 * Startup the struct se_device processing thread ··· 1041 1041 1042 1042 out_free_alua: 1043 1043 core_alua_free_lu_gp_mem(dev); 1044 + out_destroy_device: 1045 + dev->transport->destroy_device(dev); 1044 1046 out_free_index: 1045 1047 mutex_lock(&device_mutex); 1046 1048 idr_remove(&devices_idr, dev->dev_index);
+3 -3
drivers/target/target_core_fabric_lib.c
··· 273 273 274 274 static char *iscsi_parse_pr_out_transport_id( 275 275 struct se_portal_group *se_tpg, 276 - const char *buf, 276 + char *buf, 277 277 u32 *out_tid_len, 278 278 char **port_nexus_ptr) 279 279 { ··· 356 356 } 357 357 } 358 358 359 - return (char *)&buf[4]; 359 + return &buf[4]; 360 360 } 361 361 362 362 int target_get_pr_transport_id_len(struct se_node_acl *nacl, ··· 405 405 } 406 406 407 407 const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, 408 - const char *buf, u32 *out_tid_len, char **port_nexus_ptr) 408 + char *buf, u32 *out_tid_len, char **port_nexus_ptr) 409 409 { 410 410 u32 offset; 411 411
+2 -1
drivers/target/target_core_internal.h
··· 17 17 18 18 struct config_item_type tb_dev_cit; 19 19 struct config_item_type tb_dev_attrib_cit; 20 + struct config_item_type tb_dev_action_cit; 20 21 struct config_item_type tb_dev_pr_cit; 21 22 struct config_item_type tb_dev_wwn_cit; 22 23 struct config_item_type tb_dev_alua_tg_pt_gps_cit; ··· 103 102 struct t10_pr_registration *pr_reg, int *format_code, 104 103 unsigned char *buf); 105 104 const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, 106 - const char *buf, u32 *out_tid_len, char **port_nexus_ptr); 105 + char *buf, u32 *out_tid_len, char **port_nexus_ptr); 107 106 108 107 /* target_core_hba.c */ 109 108 struct se_hba *core_alloc_hba(const char *, u32, u32);
+2 -2
drivers/target/target_core_pr.c
··· 1601 1601 dest_rtpi = tmp_lun->lun_rtpi; 1602 1602 1603 1603 i_str = target_parse_pr_out_transport_id(tmp_tpg, 1604 - (const char *)ptr, &tid_len, &iport_ptr); 1604 + ptr, &tid_len, &iport_ptr); 1605 1605 if (!i_str) 1606 1606 continue; 1607 1607 ··· 3287 3287 goto out; 3288 3288 } 3289 3289 initiator_str = target_parse_pr_out_transport_id(dest_se_tpg, 3290 - (const char *)&buf[24], &tmp_tid_len, &iport_ptr); 3290 + &buf[24], &tmp_tid_len, &iport_ptr); 3291 3291 if (!initiator_str) { 3292 3292 pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" 3293 3293 " initiator_str from Transport ID\n");
+5 -3
drivers/target/target_core_sbc.c
··· 1216 1216 goto err; 1217 1217 } 1218 1218 1219 - ret = ops->execute_unmap(cmd, lba, range); 1220 - if (ret) 1221 - goto err; 1219 + if (range) { 1220 + ret = ops->execute_unmap(cmd, lba, range); 1221 + if (ret) 1222 + goto err; 1223 + } 1222 1224 1223 1225 ptr += 16; 1224 1226 size -= 16;
+3
drivers/target/target_core_transport.c
··· 1774 1774 case TCM_OUT_OF_RESOURCES: 1775 1775 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1776 1776 goto queue_status; 1777 + case TCM_LUN_BUSY: 1778 + cmd->scsi_status = SAM_STAT_BUSY; 1779 + goto queue_status; 1777 1780 case TCM_RESERVATION_CONFLICT: 1778 1781 /* 1779 1782 * No SENSE Data payload for this case, set SCSI Status
+699 -286
drivers/target/target_core_user.c
··· 32 32 #include <linux/highmem.h> 33 33 #include <linux/configfs.h> 34 34 #include <linux/mutex.h> 35 - #include <linux/kthread.h> 35 + #include <linux/workqueue.h> 36 36 #include <net/genetlink.h> 37 37 #include <scsi/scsi_common.h> 38 38 #include <scsi/scsi_proto.h> ··· 77 77 * the total size is 256K * PAGE_SIZE. 78 78 */ 79 79 #define DATA_BLOCK_SIZE PAGE_SIZE 80 - #define DATA_BLOCK_BITS (256 * 1024) 80 + #define DATA_BLOCK_SHIFT PAGE_SHIFT 81 + #define DATA_BLOCK_BITS_DEF (256 * 1024) 81 82 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) 82 - #define DATA_BLOCK_INIT_BITS 128 83 + 84 + #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) 85 + #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) 83 86 84 87 /* The total size of the ring is 8M + 256K * PAGE_SIZE */ 85 88 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 86 89 87 - /* Default maximum of the global data blocks(512K * PAGE_SIZE) */ 88 - #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) 90 + /* 91 + * Default number of global data blocks(512K * PAGE_SIZE) 92 + * when the unmap thread will be started. 93 + */ 94 + #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) 89 95 90 96 static u8 tcmu_kern_cmd_reply_supported; 91 97 ··· 113 107 struct tcmu_dev { 114 108 struct list_head node; 115 109 struct kref kref; 110 + 116 111 struct se_device se_dev; 117 112 118 113 char *name; ··· 121 114 122 115 #define TCMU_DEV_BIT_OPEN 0 123 116 #define TCMU_DEV_BIT_BROKEN 1 117 + #define TCMU_DEV_BIT_BLOCKED 2 124 118 unsigned long flags; 125 119 126 120 struct uio_info uio_info; ··· 136 128 /* Must add data_off and mb_addr to get the address */ 137 129 size_t data_off; 138 130 size_t data_size; 131 + uint32_t max_blocks; 132 + size_t ring_size; 139 133 140 - wait_queue_head_t wait_cmdr; 141 134 struct mutex cmdr_lock; 135 + struct list_head cmdr_queue; 142 136 143 - bool waiting_global; 144 137 uint32_t dbi_max; 145 138 uint32_t dbi_thresh; 146 - DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS); 139 + unsigned long *data_bitmap; 147 140 struct radix_tree_root data_blocks; 148 141 149 142 struct idr commands; 150 - spinlock_t commands_lock; 151 143 152 - struct timer_list timeout; 144 + struct timer_list cmd_timer; 153 145 unsigned int cmd_time_out; 146 + 147 + struct timer_list qfull_timer; 148 + int qfull_time_out; 149 + 150 + struct list_head timedout_entry; 154 151 155 152 spinlock_t nl_cmd_lock; 156 153 struct tcmu_nl_cmd curr_nl_cmd; ··· 174 161 struct tcmu_cmd { 175 162 struct se_cmd *se_cmd; 176 163 struct tcmu_dev *tcmu_dev; 164 + struct list_head cmdr_queue_entry; 177 165 178 166 uint16_t cmd_id; 179 167 ··· 189 175 #define TCMU_CMD_BIT_EXPIRED 0 190 176 unsigned long flags; 191 177 }; 192 - 193 - static struct task_struct *unmap_thread; 194 - static wait_queue_head_t unmap_wait; 178 + /* 179 + * To avoid dead lock the mutex lock order should always be: 180 + * 181 + * mutex_lock(&root_udev_mutex); 182 + * ... 183 + * mutex_lock(&tcmu_dev->cmdr_lock); 184 + * mutex_unlock(&tcmu_dev->cmdr_lock); 185 + * ... 186 + * mutex_unlock(&root_udev_mutex); 187 + */ 195 188 static DEFINE_MUTEX(root_udev_mutex); 196 189 static LIST_HEAD(root_udev); 197 190 198 - static atomic_t global_db_count = ATOMIC_INIT(0); 191 + static DEFINE_SPINLOCK(timed_out_udevs_lock); 192 + static LIST_HEAD(timed_out_udevs); 199 193 200 194 static struct kmem_cache *tcmu_cmd_cache; 195 + 196 + static atomic_t global_db_count = ATOMIC_INIT(0); 197 + static struct delayed_work tcmu_unmap_work; 198 + static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; 199 + 200 + static int tcmu_set_global_max_data_area(const char *str, 201 + const struct kernel_param *kp) 202 + { 203 + int ret, max_area_mb; 204 + 205 + ret = kstrtoint(str, 10, &max_area_mb); 206 + if (ret) 207 + return -EINVAL; 208 + 209 + if (max_area_mb <= 0) { 210 + pr_err("global_max_data_area must be larger than 0.\n"); 211 + return -EINVAL; 212 + } 213 + 214 + tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); 215 + if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 216 + schedule_delayed_work(&tcmu_unmap_work, 0); 217 + else 218 + cancel_delayed_work_sync(&tcmu_unmap_work); 219 + 220 + return 0; 221 + } 222 + 223 + static int tcmu_get_global_max_data_area(char *buffer, 224 + const struct kernel_param *kp) 225 + { 226 + return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 227 + } 228 + 229 + static const struct kernel_param_ops tcmu_global_max_data_area_op = { 230 + .set = tcmu_set_global_max_data_area, 231 + .get = tcmu_get_global_max_data_area, 232 + }; 233 + 234 + module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 235 + S_IWUSR | S_IRUGO); 236 + MODULE_PARM_DESC(global_max_data_area_mb, 237 + "Max MBs allowed to be allocated to all the tcmu device's " 238 + "data areas."); 201 239 202 240 /* multicast group */ 203 241 enum tcmu_multicast_groups { ··· 411 345 page = radix_tree_lookup(&udev->data_blocks, dbi); 412 346 if (!page) { 413 347 if (atomic_add_return(1, &global_db_count) > 414 - TCMU_GLOBAL_MAX_BLOCKS) { 415 - atomic_dec(&global_db_count); 416 - return false; 417 - } 348 + tcmu_global_max_blocks) 349 + schedule_delayed_work(&tcmu_unmap_work, 0); 418 350 419 351 /* try to get new page from the mm */ 420 352 page = alloc_page(GFP_KERNEL); ··· 443 379 { 444 380 int i; 445 381 446 - udev->waiting_global = false; 447 - 448 382 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { 449 383 if (!tcmu_get_empty_block(udev, tcmu_cmd)) 450 - goto err; 384 + return false; 451 385 } 452 386 return true; 453 - 454 - err: 455 - udev->waiting_global = true; 456 - /* Try to wake up the unmap thread */ 457 - wake_up(&unmap_wait); 458 - return false; 459 387 } 460 388 461 389 static inline struct page * ··· 493 437 if (!tcmu_cmd) 494 438 return NULL; 495 439 440 + INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); 496 441 tcmu_cmd->se_cmd = se_cmd; 497 442 tcmu_cmd->tcmu_dev = udev; 498 443 ··· 512 455 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 513 456 { 514 457 unsigned long offset = offset_in_page(vaddr); 458 + void *start = vaddr - offset; 515 459 516 460 size = round_up(size+offset, PAGE_SIZE); 517 - vaddr -= offset; 518 461 519 462 while (size) { 520 - flush_dcache_page(virt_to_page(vaddr)); 463 + flush_dcache_page(virt_to_page(start)); 464 + start += PAGE_SIZE; 521 465 size -= PAGE_SIZE; 522 466 } 523 467 } ··· 548 490 return size - head; 549 491 } 550 492 551 - static inline void new_iov(struct iovec **iov, int *iov_cnt, 552 - struct tcmu_dev *udev) 493 + static inline void new_iov(struct iovec **iov, int *iov_cnt) 553 494 { 554 495 struct iovec *iovec; 555 496 ··· 575 518 return (size_t)iov->iov_base + iov->iov_len; 576 519 } 577 520 578 - static int scatter_data_area(struct tcmu_dev *udev, 521 + static void scatter_data_area(struct tcmu_dev *udev, 579 522 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, 580 523 unsigned int data_nents, struct iovec **iov, 581 524 int *iov_cnt, bool copy_data) ··· 601 544 to = kmap_atomic(page); 602 545 } 603 546 604 - copy_bytes = min_t(size_t, sg_remaining, 605 - block_remaining); 547 + /* 548 + * Covert to virtual offset of the ring data area. 549 + */ 606 550 to_offset = get_block_offset_user(udev, dbi, 607 551 block_remaining); 608 552 553 + /* 554 + * The following code will gather and map the blocks 555 + * to the same iovec when the blocks are all next to 556 + * each other. 557 + */ 558 + copy_bytes = min_t(size_t, sg_remaining, 559 + block_remaining); 609 560 if (*iov_cnt != 0 && 610 561 to_offset == iov_tail(*iov)) { 562 + /* 563 + * Will append to the current iovec, because 564 + * the current block page is next to the 565 + * previous one. 566 + */ 611 567 (*iov)->iov_len += copy_bytes; 612 568 } else { 613 - new_iov(iov, iov_cnt, udev); 569 + /* 570 + * Will allocate a new iovec because we are 571 + * first time here or the current block page 572 + * is not next to the previous one. 573 + */ 574 + new_iov(iov, iov_cnt); 614 575 (*iov)->iov_base = (void __user *)to_offset; 615 576 (*iov)->iov_len = copy_bytes; 616 577 } 578 + 617 579 if (copy_data) { 618 580 offset = DATA_BLOCK_SIZE - block_remaining; 619 581 memcpy(to + offset, ··· 640 564 copy_bytes); 641 565 tcmu_flush_dcache_range(to, copy_bytes); 642 566 } 567 + 643 568 sg_remaining -= copy_bytes; 644 569 block_remaining -= copy_bytes; 645 570 } 646 571 kunmap_atomic(from - sg->offset); 647 572 } 573 + 648 574 if (to) 649 575 kunmap_atomic(to); 650 - 651 - return 0; 652 576 } 653 577 654 578 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, ··· 713 637 714 638 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 715 639 { 716 - return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh)); 640 + return thresh - bitmap_weight(bitmap, thresh); 717 641 } 718 642 719 643 /* ··· 753 677 754 678 /* try to check and get the data blocks as needed */ 755 679 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 756 - if (space < data_needed) { 757 - unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh; 758 - unsigned long grow; 680 + if ((space * DATA_BLOCK_SIZE) < data_needed) { 681 + unsigned long blocks_left = 682 + (udev->max_blocks - udev->dbi_thresh) + space; 759 683 760 684 if (blocks_left < blocks_needed) { 761 685 pr_debug("no data space: only %lu available, but ask for %zu\n", ··· 764 688 return false; 765 689 } 766 690 767 - /* Try to expand the thresh */ 768 - if (!udev->dbi_thresh) { 769 - /* From idle state */ 770 - uint32_t init_thresh = DATA_BLOCK_INIT_BITS; 771 - 772 - udev->dbi_thresh = max(blocks_needed, init_thresh); 773 - } else { 774 - /* 775 - * Grow the data area by max(blocks needed, 776 - * dbi_thresh / 2), but limited to the max 777 - * DATA_BLOCK_BITS size. 778 - */ 779 - grow = max(blocks_needed, udev->dbi_thresh / 2); 780 - udev->dbi_thresh += grow; 781 - if (udev->dbi_thresh > DATA_BLOCK_BITS) 782 - udev->dbi_thresh = DATA_BLOCK_BITS; 783 - } 691 + udev->dbi_thresh += blocks_needed; 692 + if (udev->dbi_thresh > udev->max_blocks) 693 + udev->dbi_thresh = udev->max_blocks; 784 694 } 785 695 786 696 return tcmu_get_empty_blocks(udev, cmd); ··· 793 731 return command_size; 794 732 } 795 733 796 - static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) 734 + static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 735 + struct timer_list *timer) 797 736 { 798 737 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 799 - unsigned long tmo = udev->cmd_time_out; 800 738 int cmd_id; 801 739 802 740 if (tcmu_cmd->cmd_id) 803 - return 0; 741 + goto setup_timer; 804 742 805 743 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 806 744 if (cmd_id < 0) { ··· 809 747 } 810 748 tcmu_cmd->cmd_id = cmd_id; 811 749 750 + pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, 751 + udev->name, tmo / MSEC_PER_SEC); 752 + 753 + setup_timer: 812 754 if (!tmo) 813 755 return 0; 814 756 815 757 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 816 - mod_timer(&udev->timeout, tcmu_cmd->deadline); 758 + mod_timer(timer, tcmu_cmd->deadline); 817 759 return 0; 818 760 } 819 761 820 - static sense_reason_t 821 - tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 762 + static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) 763 + { 764 + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 765 + unsigned int tmo; 766 + int ret; 767 + 768 + /* 769 + * For backwards compat if qfull_time_out is not set use 770 + * cmd_time_out and if that's not set use the default time out. 771 + */ 772 + if (!udev->qfull_time_out) 773 + return -ETIMEDOUT; 774 + else if (udev->qfull_time_out > 0) 775 + tmo = udev->qfull_time_out; 776 + else if (udev->cmd_time_out) 777 + tmo = udev->cmd_time_out; 778 + else 779 + tmo = TCMU_TIME_OUT; 780 + 781 + ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 782 + if (ret) 783 + return ret; 784 + 785 + list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); 786 + pr_debug("adding cmd %u on dev %s to ring space wait queue\n", 787 + tcmu_cmd->cmd_id, udev->name); 788 + return 0; 789 + } 790 + 791 + /** 792 + * queue_cmd_ring - queue cmd to ring or internally 793 + * @tcmu_cmd: cmd to queue 794 + * @scsi_err: TCM error code if failure (-1) returned. 795 + * 796 + * Returns: 797 + * -1 we cannot queue internally or to the ring. 798 + * 0 success 799 + * 1 internally queued to wait for ring memory to free. 800 + */ 801 + static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) 822 802 { 823 803 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 824 804 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; ··· 874 770 bool copy_to_data_area; 875 771 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 876 772 877 - if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 878 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 773 + *scsi_err = TCM_NO_SENSE; 774 + 775 + if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 776 + *scsi_err = TCM_LUN_BUSY; 777 + return -1; 778 + } 779 + 780 + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 781 + *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 782 + return -1; 783 + } 879 784 880 785 /* 881 786 * Must be a certain minimum size for response sense info, but ··· 901 788 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 902 789 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 903 790 904 - mutex_lock(&udev->cmdr_lock); 791 + if (!list_empty(&udev->cmdr_queue)) 792 + goto queue; 905 793 906 794 mb = udev->mb_addr; 907 795 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ ··· 911 797 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 912 798 "cmd ring/data area\n", command_size, data_length, 913 799 udev->cmdr_size, udev->data_size); 914 - mutex_unlock(&udev->cmdr_lock); 915 - return TCM_INVALID_CDB_FIELD; 800 + *scsi_err = TCM_INVALID_CDB_FIELD; 801 + return -1; 916 802 } 917 803 918 - while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { 919 - int ret; 920 - DEFINE_WAIT(__wait); 921 - 922 - prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); 923 - 924 - pr_debug("sleeping for ring space\n"); 925 - mutex_unlock(&udev->cmdr_lock); 926 - if (udev->cmd_time_out) 927 - ret = schedule_timeout( 928 - msecs_to_jiffies(udev->cmd_time_out)); 929 - else 930 - ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 931 - finish_wait(&udev->wait_cmdr, &__wait); 932 - if (!ret) { 933 - pr_warn("tcmu: command timed out\n"); 934 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 935 - } 936 - 937 - mutex_lock(&udev->cmdr_lock); 938 - 939 - /* We dropped cmdr_lock, cmd_head is stale */ 940 - cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 804 + if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { 805 + /* 806 + * Don't leave commands partially setup because the unmap 807 + * thread might need the blocks to make forward progress. 808 + */ 809 + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 810 + tcmu_cmd_reset_dbi_cur(tcmu_cmd); 811 + goto queue; 941 812 } 942 813 943 814 /* Insert a PAD if end-of-ring space is too small */ ··· 954 855 iov_cnt = 0; 955 856 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 956 857 || se_cmd->se_cmd_flags & SCF_BIDI); 957 - ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, 958 - se_cmd->t_data_nents, &iov, &iov_cnt, 959 - copy_to_data_area); 960 - if (ret) { 961 - tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 962 - mutex_unlock(&udev->cmdr_lock); 963 - 964 - pr_err("tcmu: alloc and scatter data failed\n"); 965 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 966 - } 858 + scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, 859 + se_cmd->t_data_nents, &iov, &iov_cnt, 860 + copy_to_data_area); 967 861 entry->req.iov_cnt = iov_cnt; 968 862 969 863 /* Handle BIDI commands */ 970 864 iov_cnt = 0; 971 865 if (se_cmd->se_cmd_flags & SCF_BIDI) { 972 866 iov++; 973 - ret = scatter_data_area(udev, tcmu_cmd, 974 - se_cmd->t_bidi_data_sg, 975 - se_cmd->t_bidi_data_nents, 976 - &iov, &iov_cnt, false); 977 - if (ret) { 978 - tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 979 - mutex_unlock(&udev->cmdr_lock); 980 - 981 - pr_err("tcmu: alloc and scatter bidi data failed\n"); 982 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 983 - } 867 + scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, 868 + se_cmd->t_bidi_data_nents, &iov, &iov_cnt, 869 + false); 984 870 } 985 871 entry->req.iov_bidi_cnt = iov_cnt; 986 872 987 - ret = tcmu_setup_cmd_timer(tcmu_cmd); 873 + ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, 874 + &udev->cmd_timer); 988 875 if (ret) { 989 876 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 990 877 mutex_unlock(&udev->cmdr_lock); 991 - return TCM_OUT_OF_RESOURCES; 878 + 879 + *scsi_err = TCM_OUT_OF_RESOURCES; 880 + return -1; 992 881 } 993 882 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 994 883 ··· 998 911 999 912 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1000 913 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1001 - mutex_unlock(&udev->cmdr_lock); 1002 914 1003 915 /* TODO: only if FLUSH and FUA? */ 1004 916 uio_event_notify(&udev->uio_info); 1005 917 1006 - if (udev->cmd_time_out) 1007 - mod_timer(&udev->timeout, round_jiffies_up(jiffies + 1008 - msecs_to_jiffies(udev->cmd_time_out))); 918 + return 0; 1009 919 1010 - return TCM_NO_SENSE; 920 + queue: 921 + if (add_to_cmdr_queue(tcmu_cmd)) { 922 + *scsi_err = TCM_OUT_OF_RESOURCES; 923 + return -1; 924 + } 925 + 926 + return 1; 1011 927 } 1012 928 1013 929 static sense_reason_t 1014 930 tcmu_queue_cmd(struct se_cmd *se_cmd) 1015 931 { 932 + struct se_device *se_dev = se_cmd->se_dev; 933 + struct tcmu_dev *udev = TCMU_DEV(se_dev); 1016 934 struct tcmu_cmd *tcmu_cmd; 1017 - sense_reason_t ret; 935 + sense_reason_t scsi_ret; 936 + int ret; 1018 937 1019 938 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 1020 939 if (!tcmu_cmd) 1021 940 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1022 941 1023 - ret = tcmu_queue_cmd_ring(tcmu_cmd); 1024 - if (ret != TCM_NO_SENSE) { 1025 - pr_err("TCMU: Could not queue command\n"); 1026 - 942 + mutex_lock(&udev->cmdr_lock); 943 + ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 944 + mutex_unlock(&udev->cmdr_lock); 945 + if (ret < 0) 1027 946 tcmu_free_cmd(tcmu_cmd); 1028 - } 1029 - 1030 - return ret; 947 + return scsi_ret; 1031 948 } 1032 949 1033 950 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) ··· 1102 1011 } 1103 1012 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1104 1013 1105 - spin_lock(&udev->commands_lock); 1106 1014 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); 1107 - spin_unlock(&udev->commands_lock); 1108 - 1109 1015 if (!cmd) { 1110 - pr_err("cmd_id not found, ring is broken\n"); 1016 + pr_err("cmd_id %u not found, ring is broken\n", 1017 + entry->hdr.cmd_id); 1111 1018 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1112 1019 break; 1113 1020 } ··· 1119 1030 handled++; 1120 1031 } 1121 1032 1122 - if (mb->cmd_tail == mb->cmd_head) 1123 - del_timer(&udev->timeout); /* no more pending cmds */ 1033 + if (mb->cmd_tail == mb->cmd_head) { 1034 + /* no more pending commands */ 1035 + del_timer(&udev->cmd_timer); 1124 1036 1125 - wake_up(&udev->wait_cmdr); 1037 + if (list_empty(&udev->cmdr_queue)) { 1038 + /* 1039 + * no more pending or waiting commands so try to 1040 + * reclaim blocks if needed. 1041 + */ 1042 + if (atomic_read(&global_db_count) > 1043 + tcmu_global_max_blocks) 1044 + schedule_delayed_work(&tcmu_unmap_work, 0); 1045 + } 1046 + } 1126 1047 1127 1048 return handled; 1128 1049 } ··· 1140 1041 static int tcmu_check_expired_cmd(int id, void *p, void *data) 1141 1042 { 1142 1043 struct tcmu_cmd *cmd = p; 1044 + struct tcmu_dev *udev = cmd->tcmu_dev; 1045 + u8 scsi_status; 1046 + struct se_cmd *se_cmd; 1047 + bool is_running; 1143 1048 1144 1049 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1145 1050 return 0; ··· 1151 1048 if (!time_after(jiffies, cmd->deadline)) 1152 1049 return 0; 1153 1050 1154 - set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1155 - target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); 1156 - cmd->se_cmd = NULL; 1051 + is_running = list_empty(&cmd->cmdr_queue_entry); 1052 + se_cmd = cmd->se_cmd; 1157 1053 1054 + if (is_running) { 1055 + /* 1056 + * If cmd_time_out is disabled but qfull is set deadline 1057 + * will only reflect the qfull timeout. Ignore it. 1058 + */ 1059 + if (!udev->cmd_time_out) 1060 + return 0; 1061 + 1062 + set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1063 + /* 1064 + * target_complete_cmd will translate this to LUN COMM FAILURE 1065 + */ 1066 + scsi_status = SAM_STAT_CHECK_CONDITION; 1067 + } else { 1068 + list_del_init(&cmd->cmdr_queue_entry); 1069 + 1070 + idr_remove(&udev->commands, id); 1071 + tcmu_free_cmd(cmd); 1072 + scsi_status = SAM_STAT_TASK_SET_FULL; 1073 + } 1074 + 1075 + pr_debug("Timing out cmd %u on dev %s that is %s.\n", 1076 + id, udev->name, is_running ? "inflight" : "queued"); 1077 + 1078 + target_complete_cmd(se_cmd, scsi_status); 1158 1079 return 0; 1159 1080 } 1160 1081 1161 - static void tcmu_device_timedout(struct timer_list *t) 1082 + static void tcmu_device_timedout(struct tcmu_dev *udev) 1162 1083 { 1163 - struct tcmu_dev *udev = from_timer(udev, t, timeout); 1164 - unsigned long flags; 1084 + spin_lock(&timed_out_udevs_lock); 1085 + if (list_empty(&udev->timedout_entry)) 1086 + list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1087 + spin_unlock(&timed_out_udevs_lock); 1165 1088 1166 - spin_lock_irqsave(&udev->commands_lock, flags); 1167 - idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 1168 - spin_unlock_irqrestore(&udev->commands_lock, flags); 1089 + schedule_delayed_work(&tcmu_unmap_work, 0); 1090 + } 1169 1091 1170 - /* Try to wake up the ummap thread */ 1171 - wake_up(&unmap_wait); 1092 + static void tcmu_cmd_timedout(struct timer_list *t) 1093 + { 1094 + struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 1172 1095 1173 - /* 1174 - * We don't need to wakeup threads on wait_cmdr since they have their 1175 - * own timeout. 1176 - */ 1096 + pr_debug("%s cmd timeout has expired\n", udev->name); 1097 + tcmu_device_timedout(udev); 1098 + } 1099 + 1100 + static void tcmu_qfull_timedout(struct timer_list *t) 1101 + { 1102 + struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 1103 + 1104 + pr_debug("%s qfull timeout has expired\n", udev->name); 1105 + tcmu_device_timedout(udev); 1177 1106 } 1178 1107 1179 1108 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) ··· 1245 1110 1246 1111 udev->hba = hba; 1247 1112 udev->cmd_time_out = TCMU_TIME_OUT; 1113 + udev->qfull_time_out = -1; 1248 1114 1249 - init_waitqueue_head(&udev->wait_cmdr); 1115 + udev->max_blocks = DATA_BLOCK_BITS_DEF; 1250 1116 mutex_init(&udev->cmdr_lock); 1251 1117 1118 + INIT_LIST_HEAD(&udev->timedout_entry); 1119 + INIT_LIST_HEAD(&udev->cmdr_queue); 1252 1120 idr_init(&udev->commands); 1253 - spin_lock_init(&udev->commands_lock); 1254 1121 1255 - timer_setup(&udev->timeout, tcmu_device_timedout, 0); 1122 + timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1123 + timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 1256 1124 1257 1125 init_waitqueue_head(&udev->nl_cmd_wq); 1258 1126 spin_lock_init(&udev->nl_cmd_lock); ··· 1265 1127 return &udev->se_dev; 1266 1128 } 1267 1129 1130 + static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) 1131 + { 1132 + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1133 + LIST_HEAD(cmds); 1134 + bool drained = true; 1135 + sense_reason_t scsi_ret; 1136 + int ret; 1137 + 1138 + if (list_empty(&udev->cmdr_queue)) 1139 + return true; 1140 + 1141 + pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1142 + 1143 + list_splice_init(&udev->cmdr_queue, &cmds); 1144 + 1145 + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { 1146 + list_del_init(&tcmu_cmd->cmdr_queue_entry); 1147 + 1148 + pr_debug("removing cmd %u on dev %s from queue\n", 1149 + tcmu_cmd->cmd_id, udev->name); 1150 + 1151 + if (fail) { 1152 + idr_remove(&udev->commands, tcmu_cmd->cmd_id); 1153 + /* 1154 + * We were not able to even start the command, so 1155 + * fail with busy to allow a retry in case runner 1156 + * was only temporarily down. If the device is being 1157 + * removed then LIO core will do the right thing and 1158 + * fail the retry. 1159 + */ 1160 + target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1161 + tcmu_free_cmd(tcmu_cmd); 1162 + continue; 1163 + } 1164 + 1165 + ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1166 + if (ret < 0) { 1167 + pr_debug("cmd %u on dev %s failed with %u\n", 1168 + tcmu_cmd->cmd_id, udev->name, scsi_ret); 1169 + 1170 + idr_remove(&udev->commands, tcmu_cmd->cmd_id); 1171 + /* 1172 + * Ignore scsi_ret for now. target_complete_cmd 1173 + * drops it. 1174 + */ 1175 + target_complete_cmd(tcmu_cmd->se_cmd, 1176 + SAM_STAT_CHECK_CONDITION); 1177 + tcmu_free_cmd(tcmu_cmd); 1178 + } else if (ret > 0) { 1179 + pr_debug("ran out of space during cmdr queue run\n"); 1180 + /* 1181 + * cmd was requeued, so just put all cmds back in 1182 + * the queue 1183 + */ 1184 + list_splice_tail(&cmds, &udev->cmdr_queue); 1185 + drained = false; 1186 + goto done; 1187 + } 1188 + } 1189 + if (list_empty(&udev->cmdr_queue)) 1190 + del_timer(&udev->qfull_timer); 1191 + done: 1192 + return drained; 1193 + } 1194 + 1268 1195 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1269 1196 { 1270 - struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); 1197 + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1271 1198 1272 - mutex_lock(&tcmu_dev->cmdr_lock); 1273 - tcmu_handle_completions(tcmu_dev); 1274 - mutex_unlock(&tcmu_dev->cmdr_lock); 1199 + mutex_lock(&udev->cmdr_lock); 1200 + tcmu_handle_completions(udev); 1201 + run_cmdr_queue(udev, false); 1202 + mutex_unlock(&udev->cmdr_lock); 1275 1203 1276 1204 return 0; 1277 1205 } ··· 1362 1158 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1363 1159 { 1364 1160 struct page *page; 1365 - int ret; 1366 1161 1367 1162 mutex_lock(&udev->cmdr_lock); 1368 1163 page = tcmu_get_block_page(udev, dbi); ··· 1371 1168 } 1372 1169 1373 1170 /* 1374 - * Normally it shouldn't be here: 1375 - * Only when the userspace has touched the blocks which 1376 - * are out of the tcmu_cmd's data iov[], and will return 1377 - * one zeroed page. 1171 + * Userspace messed up and passed in a address not in the 1172 + * data iov passed to it. 1378 1173 */ 1379 - pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi); 1380 - pr_warn("Mostly it will be a bug of userspace, please have a check!\n"); 1381 - 1382 - if (dbi >= udev->dbi_thresh) { 1383 - /* Extern the udev->dbi_thresh to dbi + 1 */ 1384 - udev->dbi_thresh = dbi + 1; 1385 - udev->dbi_max = dbi; 1386 - } 1387 - 1388 - page = radix_tree_lookup(&udev->data_blocks, dbi); 1389 - if (!page) { 1390 - page = alloc_page(GFP_KERNEL | __GFP_ZERO); 1391 - if (!page) { 1392 - mutex_unlock(&udev->cmdr_lock); 1393 - return NULL; 1394 - } 1395 - 1396 - ret = radix_tree_insert(&udev->data_blocks, dbi, page); 1397 - if (ret) { 1398 - mutex_unlock(&udev->cmdr_lock); 1399 - __free_page(page); 1400 - return NULL; 1401 - } 1402 - 1403 - /* 1404 - * Since this case is rare in page fault routine, here we 1405 - * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS 1406 - * to reduce possible page fault call trace. 1407 - */ 1408 - atomic_inc(&global_db_count); 1409 - } 1174 + pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", 1175 + dbi, udev->name); 1176 + page = NULL; 1410 1177 mutex_unlock(&udev->cmdr_lock); 1411 1178 1412 1179 return page; ··· 1411 1238 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1412 1239 page = tcmu_try_get_block_page(udev, dbi); 1413 1240 if (!page) 1414 - return VM_FAULT_NOPAGE; 1241 + return VM_FAULT_SIGBUS; 1415 1242 } 1416 1243 1417 1244 get_page(page); ··· 1433 1260 vma->vm_private_data = udev; 1434 1261 1435 1262 /* Ensure the mmap is exactly the right size */ 1436 - if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) 1263 + if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) 1437 1264 return -EINVAL; 1438 1265 1439 1266 return 0; ··· 1474 1301 return -EINVAL; 1475 1302 } 1476 1303 1477 - static void tcmu_blocks_release(struct tcmu_dev *udev) 1304 + static void tcmu_blocks_release(struct radix_tree_root *blocks, 1305 + int start, int end) 1478 1306 { 1479 1307 int i; 1480 1308 struct page *page; 1481 1309 1482 - /* Try to release all block pages */ 1483 - mutex_lock(&udev->cmdr_lock); 1484 - for (i = 0; i <= udev->dbi_max; i++) { 1485 - page = radix_tree_delete(&udev->data_blocks, i); 1310 + for (i = start; i < end; i++) { 1311 + page = radix_tree_delete(blocks, i); 1486 1312 if (page) { 1487 1313 __free_page(page); 1488 1314 atomic_dec(&global_db_count); 1489 1315 } 1490 1316 } 1491 - mutex_unlock(&udev->cmdr_lock); 1492 1317 } 1493 1318 1494 1319 static void tcmu_dev_kref_release(struct kref *kref) ··· 1500 1329 vfree(udev->mb_addr); 1501 1330 udev->mb_addr = NULL; 1502 1331 1332 + spin_lock_bh(&timed_out_udevs_lock); 1333 + if (!list_empty(&udev->timedout_entry)) 1334 + list_del(&udev->timedout_entry); 1335 + spin_unlock_bh(&timed_out_udevs_lock); 1336 + 1503 1337 /* Upper layer should drain all requests before calling this */ 1504 - spin_lock_irq(&udev->commands_lock); 1338 + mutex_lock(&udev->cmdr_lock); 1505 1339 idr_for_each_entry(&udev->commands, cmd, i) { 1506 1340 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1507 1341 all_expired = false; 1508 1342 } 1509 1343 idr_destroy(&udev->commands); 1510 - spin_unlock_irq(&udev->commands_lock); 1511 1344 WARN_ON(!all_expired); 1512 1345 1513 - tcmu_blocks_release(udev); 1346 + tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); 1347 + kfree(udev->data_bitmap); 1348 + mutex_unlock(&udev->cmdr_lock); 1514 1349 1515 1350 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1516 1351 } ··· 1583 1406 1584 1407 wake_up_all(&udev->nl_cmd_wq); 1585 1408 1586 - return ret;; 1409 + return ret; 1587 1410 } 1588 1411 1589 1412 static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, ··· 1692 1515 1693 1516 info = &udev->uio_info; 1694 1517 1518 + udev->data_bitmap = kzalloc(BITS_TO_LONGS(udev->max_blocks) * 1519 + sizeof(unsigned long), GFP_KERNEL); 1520 + if (!udev->data_bitmap) { 1521 + ret = -ENOMEM; 1522 + goto err_bitmap_alloc; 1523 + } 1524 + 1695 1525 udev->mb_addr = vzalloc(CMDR_SIZE); 1696 1526 if (!udev->mb_addr) { 1697 1527 ret = -ENOMEM; ··· 1708 1524 /* mailbox fits in first part of CMDR space */ 1709 1525 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 1710 1526 udev->data_off = CMDR_SIZE; 1711 - udev->data_size = DATA_SIZE; 1527 + udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; 1712 1528 udev->dbi_thresh = 0; /* Default in Idle state */ 1713 - udev->waiting_global = false; 1714 1529 1715 1530 /* Initialise the mailbox of the ring buffer */ 1716 1531 mb = udev->mb_addr; ··· 1726 1543 1727 1544 info->mem[0].name = "tcm-user command & data buffer"; 1728 1545 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 1729 - info->mem[0].size = TCMU_RING_SIZE; 1546 + info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; 1730 1547 info->mem[0].memtype = UIO_MEM_NONE; 1731 1548 1732 1549 info->irqcontrol = tcmu_irqcontrol; ··· 1779 1596 vfree(udev->mb_addr); 1780 1597 udev->mb_addr = NULL; 1781 1598 err_vzalloc: 1599 + kfree(udev->data_bitmap); 1600 + udev->data_bitmap = NULL; 1601 + err_bitmap_alloc: 1782 1602 kfree(info->name); 1783 1603 info->name = NULL; 1784 1604 ··· 1805 1619 { 1806 1620 struct tcmu_dev *udev = TCMU_DEV(dev); 1807 1621 1808 - del_timer_sync(&udev->timeout); 1622 + del_timer_sync(&udev->cmd_timer); 1623 + del_timer_sync(&udev->qfull_timer); 1809 1624 1810 1625 mutex_lock(&root_udev_mutex); 1811 1626 list_del(&udev->node); ··· 1820 1633 kref_put(&udev->kref, tcmu_dev_kref_release); 1821 1634 } 1822 1635 1636 + static void tcmu_unblock_dev(struct tcmu_dev *udev) 1637 + { 1638 + mutex_lock(&udev->cmdr_lock); 1639 + clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 1640 + mutex_unlock(&udev->cmdr_lock); 1641 + } 1642 + 1643 + static void tcmu_block_dev(struct tcmu_dev *udev) 1644 + { 1645 + mutex_lock(&udev->cmdr_lock); 1646 + 1647 + if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 1648 + goto unlock; 1649 + 1650 + /* complete IO that has executed successfully */ 1651 + tcmu_handle_completions(udev); 1652 + /* fail IO waiting to be queued */ 1653 + run_cmdr_queue(udev, true); 1654 + 1655 + unlock: 1656 + mutex_unlock(&udev->cmdr_lock); 1657 + } 1658 + 1659 + static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 1660 + { 1661 + struct tcmu_mailbox *mb; 1662 + struct tcmu_cmd *cmd; 1663 + int i; 1664 + 1665 + mutex_lock(&udev->cmdr_lock); 1666 + 1667 + idr_for_each_entry(&udev->commands, cmd, i) { 1668 + if (!list_empty(&cmd->cmdr_queue_entry)) 1669 + continue; 1670 + 1671 + pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 1672 + cmd->cmd_id, udev->name, 1673 + test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); 1674 + 1675 + idr_remove(&udev->commands, i); 1676 + if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1677 + if (err_level == 1) { 1678 + /* 1679 + * Userspace was not able to start the 1680 + * command or it is retryable. 1681 + */ 1682 + target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 1683 + } else { 1684 + /* hard failure */ 1685 + target_complete_cmd(cmd->se_cmd, 1686 + SAM_STAT_CHECK_CONDITION); 1687 + } 1688 + } 1689 + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1690 + tcmu_free_cmd(cmd); 1691 + } 1692 + 1693 + mb = udev->mb_addr; 1694 + tcmu_flush_dcache_range(mb, sizeof(*mb)); 1695 + pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 1696 + mb->cmd_tail, mb->cmd_head); 1697 + 1698 + udev->cmdr_last_cleaned = 0; 1699 + mb->cmd_tail = 0; 1700 + mb->cmd_head = 0; 1701 + tcmu_flush_dcache_range(mb, sizeof(*mb)); 1702 + 1703 + del_timer(&udev->cmd_timer); 1704 + 1705 + mutex_unlock(&udev->cmdr_lock); 1706 + } 1707 + 1823 1708 enum { 1824 1709 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 1825 - Opt_nl_reply_supported, Opt_err, 1710 + Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, 1826 1711 }; 1827 1712 1828 1713 static match_table_t tokens = { ··· 1903 1644 {Opt_hw_block_size, "hw_block_size=%u"}, 1904 1645 {Opt_hw_max_sectors, "hw_max_sectors=%u"}, 1905 1646 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 1647 + {Opt_max_data_area_mb, "max_data_area_mb=%u"}, 1906 1648 {Opt_err, NULL} 1907 1649 }; 1908 1650 ··· 1937 1677 struct tcmu_dev *udev = TCMU_DEV(dev); 1938 1678 char *orig, *ptr, *opts, *arg_p; 1939 1679 substring_t args[MAX_OPT_ARGS]; 1940 - int ret = 0, token; 1680 + int ret = 0, token, tmpval; 1941 1681 1942 1682 opts = kstrdup(page, GFP_KERNEL); 1943 1683 if (!opts) ··· 1989 1729 if (ret < 0) 1990 1730 pr_err("kstrtoint() failed for nl_reply_supported=\n"); 1991 1731 break; 1732 + case Opt_max_data_area_mb: 1733 + if (dev->export_count) { 1734 + pr_err("Unable to set max_data_area_mb while exports exist\n"); 1735 + ret = -EINVAL; 1736 + break; 1737 + } 1738 + 1739 + arg_p = match_strdup(&args[0]); 1740 + if (!arg_p) { 1741 + ret = -ENOMEM; 1742 + break; 1743 + } 1744 + ret = kstrtoint(arg_p, 0, &tmpval); 1745 + kfree(arg_p); 1746 + if (ret < 0) { 1747 + pr_err("kstrtoint() failed for max_data_area_mb=\n"); 1748 + break; 1749 + } 1750 + 1751 + if (tmpval <= 0) { 1752 + pr_err("Invalid max_data_area %d\n", tmpval); 1753 + ret = -EINVAL; 1754 + break; 1755 + } 1756 + 1757 + udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval); 1758 + if (udev->max_blocks > tcmu_global_max_blocks) { 1759 + pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 1760 + tmpval, 1761 + TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 1762 + udev->max_blocks = tcmu_global_max_blocks; 1763 + } 1764 + break; 1992 1765 default: 1993 1766 break; 1994 1767 } ··· 2041 1748 2042 1749 bl = sprintf(b + bl, "Config: %s ", 2043 1750 udev->dev_config[0] ? udev->dev_config : "NULL"); 2044 - bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); 1751 + bl += sprintf(b + bl, "Size: %zu ", udev->dev_size); 1752 + bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", 1753 + TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2045 1754 2046 1755 return bl; 2047 1756 } ··· 2094 1799 return count; 2095 1800 } 2096 1801 CONFIGFS_ATTR(tcmu_, cmd_time_out); 1802 + 1803 + static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 1804 + { 1805 + struct se_dev_attrib *da = container_of(to_config_group(item), 1806 + struct se_dev_attrib, da_group); 1807 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1808 + 1809 + return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 1810 + udev->qfull_time_out : 1811 + udev->qfull_time_out / MSEC_PER_SEC); 1812 + } 1813 + 1814 + static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 1815 + const char *page, size_t count) 1816 + { 1817 + struct se_dev_attrib *da = container_of(to_config_group(item), 1818 + struct se_dev_attrib, da_group); 1819 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1820 + s32 val; 1821 + int ret; 1822 + 1823 + ret = kstrtos32(page, 0, &val); 1824 + if (ret < 0) 1825 + return ret; 1826 + 1827 + if (val >= 0) { 1828 + udev->qfull_time_out = val * MSEC_PER_SEC; 1829 + } else { 1830 + printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 1831 + return -EINVAL; 1832 + } 1833 + return count; 1834 + } 1835 + CONFIGFS_ATTR(tcmu_, qfull_time_out); 1836 + 1837 + static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 1838 + { 1839 + struct se_dev_attrib *da = container_of(to_config_group(item), 1840 + struct se_dev_attrib, da_group); 1841 + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1842 + 1843 + return snprintf(page, PAGE_SIZE, "%u\n", 1844 + TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 1845 + } 1846 + CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 2097 1847 2098 1848 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2099 1849 { ··· 2283 1943 } 2284 1944 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 2285 1945 1946 + static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 1947 + { 1948 + struct se_device *se_dev = container_of(to_config_group(item), 1949 + struct se_device, 1950 + dev_action_group); 1951 + struct tcmu_dev *udev = TCMU_DEV(se_dev); 1952 + 1953 + if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 1954 + return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 1955 + else 1956 + return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 1957 + } 1958 + 1959 + static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 1960 + size_t count) 1961 + { 1962 + struct se_device *se_dev = container_of(to_config_group(item), 1963 + struct se_device, 1964 + dev_action_group); 1965 + struct tcmu_dev *udev = TCMU_DEV(se_dev); 1966 + u8 val; 1967 + int ret; 1968 + 1969 + ret = kstrtou8(page, 0, &val); 1970 + if (ret < 0) 1971 + return ret; 1972 + 1973 + if (val > 1) { 1974 + pr_err("Invalid block value %d\n", val); 1975 + return -EINVAL; 1976 + } 1977 + 1978 + if (!val) 1979 + tcmu_unblock_dev(udev); 1980 + else 1981 + tcmu_block_dev(udev); 1982 + return count; 1983 + } 1984 + CONFIGFS_ATTR(tcmu_, block_dev); 1985 + 1986 + static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 1987 + size_t count) 1988 + { 1989 + struct se_device *se_dev = container_of(to_config_group(item), 1990 + struct se_device, 1991 + dev_action_group); 1992 + struct tcmu_dev *udev = TCMU_DEV(se_dev); 1993 + u8 val; 1994 + int ret; 1995 + 1996 + ret = kstrtou8(page, 0, &val); 1997 + if (ret < 0) 1998 + return ret; 1999 + 2000 + if (val != 1 && val != 2) { 2001 + pr_err("Invalid reset ring value %d\n", val); 2002 + return -EINVAL; 2003 + } 2004 + 2005 + tcmu_reset_ring(udev, val); 2006 + return count; 2007 + } 2008 + CONFIGFS_ATTR_WO(tcmu_, reset_ring); 2009 + 2286 2010 static struct configfs_attribute *tcmu_attrib_attrs[] = { 2287 2011 &tcmu_attr_cmd_time_out, 2012 + &tcmu_attr_qfull_time_out, 2013 + &tcmu_attr_max_data_area_mb, 2288 2014 &tcmu_attr_dev_config, 2289 2015 &tcmu_attr_dev_size, 2290 2016 &tcmu_attr_emulate_write_cache, ··· 2359 1953 }; 2360 1954 2361 1955 static struct configfs_attribute **tcmu_attrs; 1956 + 1957 + static struct configfs_attribute *tcmu_action_attrs[] = { 1958 + &tcmu_attr_block_dev, 1959 + &tcmu_attr_reset_ring, 1960 + NULL, 1961 + }; 2362 1962 2363 1963 static struct target_backend_ops tcmu_ops = { 2364 1964 .name = "user", ··· 2381 1969 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 2382 1970 .get_device_type = sbc_get_device_type, 2383 1971 .get_blocks = tcmu_get_blocks, 2384 - .tb_dev_attrib_attrs = NULL, 1972 + .tb_dev_action_attrs = tcmu_action_attrs, 2385 1973 }; 2386 1974 2387 - static int unmap_thread_fn(void *data) 1975 + static void find_free_blocks(void) 2388 1976 { 2389 1977 struct tcmu_dev *udev; 2390 1978 loff_t off; 2391 - uint32_t start, end, block; 2392 - struct page *page; 2393 - int i; 1979 + u32 start, end, block, total_freed = 0; 2394 1980 2395 - while (!kthread_should_stop()) { 2396 - DEFINE_WAIT(__wait); 1981 + if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) 1982 + return; 2397 1983 2398 - prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); 2399 - schedule(); 2400 - finish_wait(&unmap_wait, &__wait); 1984 + mutex_lock(&root_udev_mutex); 1985 + list_for_each_entry(udev, &root_udev, node) { 1986 + mutex_lock(&udev->cmdr_lock); 2401 1987 2402 - if (kthread_should_stop()) 2403 - break; 1988 + /* Try to complete the finished commands first */ 1989 + tcmu_handle_completions(udev); 2404 1990 2405 - mutex_lock(&root_udev_mutex); 2406 - list_for_each_entry(udev, &root_udev, node) { 2407 - mutex_lock(&udev->cmdr_lock); 2408 - 2409 - /* Try to complete the finished commands first */ 2410 - tcmu_handle_completions(udev); 2411 - 2412 - /* Skip the udevs waiting the global pool or in idle */ 2413 - if (udev->waiting_global || !udev->dbi_thresh) { 2414 - mutex_unlock(&udev->cmdr_lock); 2415 - continue; 2416 - } 2417 - 2418 - end = udev->dbi_max + 1; 2419 - block = find_last_bit(udev->data_bitmap, end); 2420 - if (block == udev->dbi_max) { 2421 - /* 2422 - * The last bit is dbi_max, so there is 2423 - * no need to shrink any blocks. 2424 - */ 2425 - mutex_unlock(&udev->cmdr_lock); 2426 - continue; 2427 - } else if (block == end) { 2428 - /* The current udev will goto idle state */ 2429 - udev->dbi_thresh = start = 0; 2430 - udev->dbi_max = 0; 2431 - } else { 2432 - udev->dbi_thresh = start = block + 1; 2433 - udev->dbi_max = block; 2434 - } 2435 - 2436 - /* Here will truncate the data area from off */ 2437 - off = udev->data_off + start * DATA_BLOCK_SIZE; 2438 - unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2439 - 2440 - /* Release the block pages */ 2441 - for (i = start; i < end; i++) { 2442 - page = radix_tree_delete(&udev->data_blocks, i); 2443 - if (page) { 2444 - __free_page(page); 2445 - atomic_dec(&global_db_count); 2446 - } 2447 - } 1991 + /* Skip the udevs in idle */ 1992 + if (!udev->dbi_thresh) { 2448 1993 mutex_unlock(&udev->cmdr_lock); 1994 + continue; 2449 1995 } 2450 1996 2451 - /* 2452 - * Try to wake up the udevs who are waiting 2453 - * for the global data pool. 2454 - */ 2455 - list_for_each_entry(udev, &root_udev, node) { 2456 - if (udev->waiting_global) 2457 - wake_up(&udev->wait_cmdr); 1997 + end = udev->dbi_max + 1; 1998 + block = find_last_bit(udev->data_bitmap, end); 1999 + if (block == udev->dbi_max) { 2000 + /* 2001 + * The last bit is dbi_max, so it is not possible 2002 + * reclaim any blocks. 2003 + */ 2004 + mutex_unlock(&udev->cmdr_lock); 2005 + continue; 2006 + } else if (block == end) { 2007 + /* The current udev will goto idle state */ 2008 + udev->dbi_thresh = start = 0; 2009 + udev->dbi_max = 0; 2010 + } else { 2011 + udev->dbi_thresh = start = block + 1; 2012 + udev->dbi_max = block; 2458 2013 } 2459 - mutex_unlock(&root_udev_mutex); 2014 + 2015 + /* Here will truncate the data area from off */ 2016 + off = udev->data_off + start * DATA_BLOCK_SIZE; 2017 + unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2018 + 2019 + /* Release the block pages */ 2020 + tcmu_blocks_release(&udev->data_blocks, start, end); 2021 + mutex_unlock(&udev->cmdr_lock); 2022 + 2023 + total_freed += end - start; 2024 + pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, 2025 + total_freed, udev->name); 2026 + } 2027 + mutex_unlock(&root_udev_mutex); 2028 + 2029 + if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 2030 + schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 2031 + } 2032 + 2033 + static void check_timedout_devices(void) 2034 + { 2035 + struct tcmu_dev *udev, *tmp_dev; 2036 + LIST_HEAD(devs); 2037 + 2038 + spin_lock_bh(&timed_out_udevs_lock); 2039 + list_splice_init(&timed_out_udevs, &devs); 2040 + 2041 + list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 2042 + list_del_init(&udev->timedout_entry); 2043 + spin_unlock_bh(&timed_out_udevs_lock); 2044 + 2045 + mutex_lock(&udev->cmdr_lock); 2046 + idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 2047 + mutex_unlock(&udev->cmdr_lock); 2048 + 2049 + spin_lock_bh(&timed_out_udevs_lock); 2460 2050 } 2461 2051 2462 - return 0; 2052 + spin_unlock_bh(&timed_out_udevs_lock); 2053 + } 2054 + 2055 + static void tcmu_unmap_work_fn(struct work_struct *work) 2056 + { 2057 + check_timedout_devices(); 2058 + find_free_blocks(); 2463 2059 } 2464 2060 2465 2061 static int __init tcmu_module_init(void) ··· 2475 2055 int ret, i, k, len = 0; 2476 2056 2477 2057 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 2058 + 2059 + INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 2478 2060 2479 2061 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 2480 2062 sizeof(struct tcmu_cmd), ··· 2523 2101 if (ret) 2524 2102 goto out_attrs; 2525 2103 2526 - init_waitqueue_head(&unmap_wait); 2527 - unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap"); 2528 - if (IS_ERR(unmap_thread)) { 2529 - ret = PTR_ERR(unmap_thread); 2530 - goto out_unreg_transport; 2531 - } 2532 - 2533 2104 return 0; 2534 2105 2535 - out_unreg_transport: 2536 - target_backend_unregister(&tcmu_ops); 2537 2106 out_attrs: 2538 2107 kfree(tcmu_attrs); 2539 2108 out_unreg_genl: ··· 2539 2126 2540 2127 static void __exit tcmu_module_exit(void) 2541 2128 { 2542 - kthread_stop(unmap_thread); 2129 + cancel_delayed_work_sync(&tcmu_unmap_work); 2543 2130 target_backend_unregister(&tcmu_ops); 2544 2131 kfree(tcmu_attrs); 2545 2132 genl_unregister_family(&tcmu_genl_family);
+1
include/target/target_core_backend.h
··· 53 53 void (*free_prot)(struct se_device *); 54 54 55 55 struct configfs_attribute **tb_dev_attrib_attrs; 56 + struct configfs_attribute **tb_dev_action_attrs; 56 57 }; 57 58 58 59 struct sbc_ops {
+2
include/target/target_core_base.h
··· 183 183 TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b), 184 184 TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c), 185 185 TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d), 186 + TCM_LUN_BUSY = R(0x1e), 186 187 #undef R 187 188 }; 188 189 ··· 809 808 /* T10 SPC-2 + SPC-3 Reservations */ 810 809 struct t10_reservation t10_pr; 811 810 struct se_dev_attrib dev_attrib; 811 + struct config_group dev_action_group; 812 812 struct config_group dev_group; 813 813 struct config_group dev_pr_group; 814 814 struct se_dev_stat_grps dev_stat_grps;