Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: Subsume se_port + t10_alua_tg_pt_gp_member into se_lun

This patch eliminates all se_port + t10_alua_tg_pt_gp_member usage,
and converts current users to direct se_lun pointer dereference.

This includes the removal of core_export_port(), core_release_port()
core_dev_export() and core_dev_unexport(). Along with conversion
of special case se_lun pointer dereference within PR ALL_TG_PT=1
and ALUA access state transition UNIT_ATTENTION handling.

Also, update core_enable_device_list_for_node() to reference the
new per se_lun->lun_deve_list when creating a new entry, or
replacing an existing one via RCU.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

authored by

Christoph Hellwig and committed by
Nicholas Bellinger
adf653f9 b3eeea66

+461 -810
+151 -249
drivers/target/target_core_alua.c
··· 43 43 static sense_reason_t core_alua_check_transition(int state, int valid, 44 44 int *primary); 45 45 static int core_alua_set_tg_pt_secondary_state( 46 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 47 - struct se_port *port, int explicit, int offline); 46 + struct se_lun *lun, int explicit, int offline); 48 47 49 48 static char *core_alua_dump_state(int state); 49 + 50 + static void __target_attach_tg_pt_gp(struct se_lun *lun, 51 + struct t10_alua_tg_pt_gp *tg_pt_gp); 50 52 51 53 static u16 alua_lu_gps_counter; 52 54 static u32 alua_lu_gps_count; ··· 147 145 target_emulate_report_target_port_groups(struct se_cmd *cmd) 148 146 { 149 147 struct se_device *dev = cmd->se_dev; 150 - struct se_port *port; 151 148 struct t10_alua_tg_pt_gp *tg_pt_gp; 152 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 149 + struct se_lun *lun; 153 150 unsigned char *buf; 154 151 u32 rd_len = 0, off; 155 152 int ext_hdr = (cmd->t_task_cdb[1] & 0x20); ··· 223 222 rd_len += 8; 224 223 225 224 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 226 - list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, 227 - tg_pt_gp_mem_list) { 228 - port = tg_pt_gp_mem->tg_pt; 225 + list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 226 + lun_tg_pt_gp_link) { 229 227 /* 230 228 * Start Target Port descriptor format 231 229 * ··· 234 234 /* 235 235 * Set RELATIVE TARGET PORT IDENTIFIER 236 236 */ 237 - buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 238 - buf[off++] = (port->sep_rtpi & 0xff); 237 + buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 238 + buf[off++] = (lun->lun_rtpi & 0xff); 239 239 rd_len += 4; 240 240 } 241 241 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); ··· 259 259 * this CDB was received upon to determine this value individually 260 260 * for ALUA target port group. 261 261 */ 262 - port = cmd->se_lun->lun_sep; 263 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 264 - if (tg_pt_gp_mem) { 265 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 266 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 267 - if (tg_pt_gp) 268 - buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 269 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 270 - } 262 + spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); 263 + tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; 264 + if (tg_pt_gp) 265 + buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 266 + spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); 271 267 } 272 268 transport_kunmap_data_sg(cmd); 273 269 ··· 280 284 target_emulate_set_target_port_groups(struct se_cmd *cmd) 281 285 { 282 286 struct se_device *dev = cmd->se_dev; 283 - struct se_port *port, *l_port = cmd->se_lun->lun_sep; 287 + struct se_lun *l_lun = cmd->se_lun; 284 288 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 285 289 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 286 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; 287 290 unsigned char *buf; 288 291 unsigned char *ptr; 289 292 sense_reason_t rc = TCM_NO_SENSE; 290 293 u32 len = 4; /* Skip over RESERVED area in header */ 291 294 int alua_access_state, primary = 0, valid_states; 292 295 u16 tg_pt_id, rtpi; 293 - 294 - if (!l_port) 295 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 296 296 297 297 if (cmd->data_length < 4) { 298 298 pr_warn("SET TARGET PORT GROUPS parameter list length %u too" ··· 304 312 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed 305 313 * for the local tg_pt_gp. 306 314 */ 307 - l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 308 - if (!l_tg_pt_gp_mem) { 309 - pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 310 - rc = TCM_UNSUPPORTED_SCSI_OPCODE; 311 - goto out; 312 - } 313 - spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 314 - l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; 315 + spin_lock(&l_lun->lun_tg_pt_gp_lock); 316 + l_tg_pt_gp = l_lun->lun_tg_pt_gp; 315 317 if (!l_tg_pt_gp) { 316 - spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 317 - pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 318 + spin_unlock(&l_lun->lun_tg_pt_gp_lock); 319 + pr_err("Unable to access l_lun->tg_pt_gp\n"); 318 320 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 319 321 goto out; 320 322 } 321 - spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 322 323 323 324 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 325 + spin_unlock(&l_lun->lun_tg_pt_gp_lock); 324 326 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 325 327 " while TPGS_EXPLICIT_ALUA is disabled\n"); 326 328 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 327 329 goto out; 328 330 } 329 331 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 332 + spin_unlock(&l_lun->lun_tg_pt_gp_lock); 330 333 331 334 ptr = &buf[4]; /* Skip over RESERVED area in header */ 332 335 ··· 383 396 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 384 397 385 398 if (!core_alua_do_port_transition(tg_pt_gp, 386 - dev, l_port, nacl, 399 + dev, l_lun, nacl, 387 400 alua_access_state, 1)) 388 401 found = true; 389 402 ··· 393 406 } 394 407 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 395 408 } else { 409 + struct se_lun *lun; 410 + 396 411 /* 397 412 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify 398 413 * the Target Port in question for the the incoming ··· 406 417 * for the struct se_device storage object. 407 418 */ 408 419 spin_lock(&dev->se_port_lock); 409 - list_for_each_entry(port, &dev->dev_sep_list, 410 - sep_list) { 411 - if (port->sep_rtpi != rtpi) 420 + list_for_each_entry(lun, &dev->dev_sep_list, 421 + lun_dev_link) { 422 + if (lun->lun_rtpi != rtpi) 412 423 continue; 413 424 414 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 415 - 425 + // XXX: racy unlock 416 426 spin_unlock(&dev->se_port_lock); 417 427 418 428 if (!core_alua_set_tg_pt_secondary_state( 419 - tg_pt_gp_mem, port, 1, 1)) 429 + lun, 1, 1)) 420 430 found = true; 421 431 422 432 spin_lock(&dev->se_port_lock); ··· 684 696 struct se_device *dev = cmd->se_dev; 685 697 unsigned char *cdb = cmd->t_task_cdb; 686 698 struct se_lun *lun = cmd->se_lun; 687 - struct se_port *port = lun->lun_sep; 688 699 struct t10_alua_tg_pt_gp *tg_pt_gp; 689 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 690 700 int out_alua_state, nonop_delay_msecs; 691 701 692 702 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) ··· 692 706 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 693 707 return 0; 694 708 695 - if (!port) 696 - return 0; 697 709 /* 698 710 * First, check for a struct se_port specific secondary ALUA target port 699 711 * access state: OFFLINE 700 712 */ 701 - if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 713 + if (atomic_read(&lun->lun_tg_pt_secondary_offline)) { 702 714 pr_debug("ALUA: Got secondary offline status for local" 703 715 " target port\n"); 704 716 set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); 705 717 return TCM_CHECK_CONDITION_NOT_READY; 706 718 } 707 - /* 708 - * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the 709 - * ALUA target port group, to obtain current ALUA access state. 710 - * Otherwise look for the underlying struct se_device association with 711 - * a ALUA logical unit group. 712 - */ 713 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 714 - if (!tg_pt_gp_mem) 719 + 720 + if (!lun->lun_tg_pt_gp) 715 721 return 0; 716 722 717 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 718 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 723 + spin_lock(&lun->lun_tg_pt_gp_lock); 724 + tg_pt_gp = lun->lun_tg_pt_gp; 719 725 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 720 726 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 721 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 727 + 728 + // XXX: keeps using tg_pt_gp witout reference after unlock 729 + spin_unlock(&lun->lun_tg_pt_gp_lock); 722 730 /* 723 731 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional 724 732 * statement so the compiler knows explicitly to check this case first. ··· 744 764 break; 745 765 /* 746 766 * OFFLINE is a secondary ALUA target port group access state, that is 747 - * handled above with struct se_port->sep_tg_pt_secondary_offline=1 767 + * handled above with struct se_lun->lun_tg_pt_secondary_offline=1 748 768 */ 749 769 case ALUA_ACCESS_STATE_OFFLINE: 750 770 default: ··· 886 906 } 887 907 EXPORT_SYMBOL(core_alua_check_nonop_delay); 888 908 889 - /* 890 - * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex 891 - * 892 - */ 893 909 static int core_alua_write_tpg_metadata( 894 910 const char *path, 895 911 unsigned char *md_buf, ··· 947 971 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); 948 972 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 949 973 struct se_dev_entry *se_deve; 974 + struct se_lun *lun; 950 975 struct se_lun_acl *lacl; 951 - struct se_port *port; 952 - struct t10_alua_tg_pt_gp_member *mem; 953 976 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == 954 977 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); 955 978 956 979 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 957 - list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, 958 - tg_pt_gp_mem_list) { 959 - port = mem->tg_pt; 980 + list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 981 + lun_tg_pt_gp_link) { 960 982 /* 961 983 * After an implicit target port asymmetric access state 962 984 * change, a device server shall establish a unit attention ··· 969 995 * every I_T nexus other than the I_T nexus on which the SET 970 996 * TARGET PORT GROUPS command 971 997 */ 972 - atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt); 998 + atomic_inc_mb(&lun->lun_active); 973 999 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 974 1000 975 - spin_lock_bh(&port->sep_alua_lock); 976 - list_for_each_entry(se_deve, &port->sep_alua_list, 977 - alua_port_list) { 1001 + spin_lock_bh(&lun->lun_deve_lock); 1002 + list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) { 978 1003 lacl = rcu_dereference_check(se_deve->se_lun_acl, 979 - lockdep_is_held(&port->sep_alua_lock)); 1004 + lockdep_is_held(&lun->lun_deve_lock)); 980 1005 /* 981 1006 * se_deve->se_lun_acl pointer may be NULL for a 982 1007 * entry created without explicit Node+MappedLUN ACLs ··· 987 1014 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 988 1015 (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 989 1016 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && 990 - (tg_pt_gp->tg_pt_gp_alua_port != NULL) && 991 - (tg_pt_gp->tg_pt_gp_alua_port == port)) 1017 + (tg_pt_gp->tg_pt_gp_alua_lun != NULL) && 1018 + (tg_pt_gp->tg_pt_gp_alua_lun == lun)) 992 1019 continue; 993 1020 994 1021 core_scsi3_ua_allocate(lacl->se_lun_nacl, 995 1022 se_deve->mapped_lun, 0x2A, 996 1023 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 997 1024 } 998 - spin_unlock_bh(&port->sep_alua_lock); 1025 + spin_unlock_bh(&lun->lun_deve_lock); 999 1026 1000 1027 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1001 - atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt); 1028 + atomic_dec_mb(&lun->lun_active); 1002 1029 } 1003 1030 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1004 1031 /* ··· 1116 1143 int core_alua_do_port_transition( 1117 1144 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 1118 1145 struct se_device *l_dev, 1119 - struct se_port *l_port, 1146 + struct se_lun *l_lun, 1120 1147 struct se_node_acl *l_nacl, 1121 1148 int new_state, 1122 1149 int explicit) ··· 1146 1173 * core_alua_do_transition_tg_pt() will always return 1147 1174 * success. 1148 1175 */ 1149 - l_tg_pt_gp->tg_pt_gp_alua_port = l_port; 1176 + l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1150 1177 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1151 1178 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1152 1179 new_state, explicit); ··· 1185 1212 continue; 1186 1213 1187 1214 if (l_tg_pt_gp == tg_pt_gp) { 1188 - tg_pt_gp->tg_pt_gp_alua_port = l_port; 1215 + tg_pt_gp->tg_pt_gp_alua_lun = l_lun; 1189 1216 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1190 1217 } else { 1191 - tg_pt_gp->tg_pt_gp_alua_port = NULL; 1218 + tg_pt_gp->tg_pt_gp_alua_lun = NULL; 1192 1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1193 1220 } 1194 1221 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt); ··· 1225 1252 return rc; 1226 1253 } 1227 1254 1228 - /* 1229 - * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held 1230 - */ 1231 - static int core_alua_update_tpg_secondary_metadata( 1232 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1233 - struct se_port *port) 1255 + static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun) 1234 1256 { 1257 + struct se_portal_group *se_tpg = lun->lun_tpg; 1235 1258 unsigned char *md_buf; 1236 - struct se_portal_group *se_tpg = port->sep_tpg; 1237 1259 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1238 1260 int len, rc; 1261 + 1262 + mutex_lock(&lun->lun_tg_pt_md_mutex); 1239 1263 1240 1264 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 1241 1265 if (!md_buf) { 1242 1266 pr_err("Unable to allocate buf for ALUA metadata\n"); 1243 - return -ENOMEM; 1267 + rc = -ENOMEM; 1268 + goto out_unlock; 1244 1269 } 1245 1270 1246 1271 memset(path, 0, ALUA_METADATA_PATH_LEN); ··· 1253 1282 1254 1283 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" 1255 1284 "alua_tg_pt_status=0x%02x\n", 1256 - atomic_read(&port->sep_tg_pt_secondary_offline), 1257 - port->sep_tg_pt_secondary_stat); 1285 + atomic_read(&lun->lun_tg_pt_secondary_offline), 1286 + lun->lun_tg_pt_secondary_stat); 1258 1287 1259 1288 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", 1260 1289 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1261 - port->sep_lun->unpacked_lun); 1290 + lun->unpacked_lun); 1262 1291 1263 1292 rc = core_alua_write_tpg_metadata(path, md_buf, len); 1264 1293 kfree(md_buf); 1265 1294 1295 + out_unlock: 1296 + mutex_unlock(&lun->lun_tg_pt_md_mutex); 1266 1297 return rc; 1267 1298 } 1268 1299 1269 1300 static int core_alua_set_tg_pt_secondary_state( 1270 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1271 - struct se_port *port, 1301 + struct se_lun *lun, 1272 1302 int explicit, 1273 1303 int offline) 1274 1304 { 1275 1305 struct t10_alua_tg_pt_gp *tg_pt_gp; 1276 1306 int trans_delay_msecs; 1277 1307 1278 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1279 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1308 + spin_lock(&lun->lun_tg_pt_gp_lock); 1309 + tg_pt_gp = lun->lun_tg_pt_gp; 1280 1310 if (!tg_pt_gp) { 1281 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1311 + spin_unlock(&lun->lun_tg_pt_gp_lock); 1282 1312 pr_err("Unable to complete secondary state" 1283 1313 " transition\n"); 1284 1314 return -EINVAL; ··· 1287 1315 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 1288 1316 /* 1289 1317 * Set the secondary ALUA target port access state to OFFLINE 1290 - * or release the previously secondary state for struct se_port 1318 + * or release the previously secondary state for struct se_lun 1291 1319 */ 1292 1320 if (offline) 1293 - atomic_set(&port->sep_tg_pt_secondary_offline, 1); 1321 + atomic_set(&lun->lun_tg_pt_secondary_offline, 1); 1294 1322 else 1295 - atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1323 + atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 1296 1324 1297 - port->sep_tg_pt_secondary_stat = (explicit) ? 1325 + lun->lun_tg_pt_secondary_stat = (explicit) ? 1298 1326 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1299 1327 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1300 1328 ··· 1303 1331 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1304 1332 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1305 1333 1306 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1334 + spin_unlock(&lun->lun_tg_pt_gp_lock); 1307 1335 /* 1308 1336 * Do the optional transition delay after we set the secondary 1309 1337 * ALUA access state. ··· 1314 1342 * See if we need to update the ALUA fabric port metadata for 1315 1343 * secondary state and status 1316 1344 */ 1317 - if (port->sep_tg_pt_secondary_write_md) { 1318 - mutex_lock(&port->sep_tg_pt_md_mutex); 1319 - core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port); 1320 - mutex_unlock(&port->sep_tg_pt_md_mutex); 1321 - } 1345 + if (lun->lun_tg_pt_secondary_write_md) 1346 + core_alua_update_tpg_secondary_metadata(lun); 1322 1347 1323 1348 return 0; 1324 1349 } ··· 1669 1700 return NULL; 1670 1701 } 1671 1702 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1672 - INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); 1703 + INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); 1673 1704 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1674 1705 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1675 1706 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); ··· 1763 1794 return 0; 1764 1795 } 1765 1796 1766 - struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 1767 - struct se_port *port) 1768 - { 1769 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1770 - 1771 - tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, 1772 - GFP_KERNEL); 1773 - if (!tg_pt_gp_mem) { 1774 - pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1775 - return ERR_PTR(-ENOMEM); 1776 - } 1777 - INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1778 - spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1779 - atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); 1780 - 1781 - tg_pt_gp_mem->tg_pt = port; 1782 - port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1783 - 1784 - return tg_pt_gp_mem; 1785 - } 1786 - 1787 1797 void core_alua_free_tg_pt_gp( 1788 1798 struct t10_alua_tg_pt_gp *tg_pt_gp) 1789 1799 { 1790 1800 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1791 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; 1801 + struct se_lun *lun, *next; 1792 1802 1793 1803 /* 1794 1804 * Once we have reached this point, config_item_put() has already ··· 1798 1850 * struct se_port. 1799 1851 */ 1800 1852 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1801 - list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, 1802 - &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { 1803 - if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1804 - list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1805 - tg_pt_gp->tg_pt_gp_members--; 1806 - tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1807 - } 1853 + list_for_each_entry_safe(lun, next, 1854 + &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) { 1855 + list_del_init(&lun->lun_tg_pt_gp_link); 1856 + tg_pt_gp->tg_pt_gp_members--; 1857 + 1808 1858 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1809 1859 /* 1810 - * tg_pt_gp_mem is associated with a single 1811 - * se_port->sep_alua_tg_pt_gp_mem, and is released via 1812 - * core_alua_free_tg_pt_gp_mem(). 1813 - * 1814 1860 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1815 1861 * assume we want to re-associate a given tg_pt_gp_mem with 1816 1862 * default_tg_pt_gp. 1817 1863 */ 1818 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1864 + spin_lock(&lun->lun_tg_pt_gp_lock); 1819 1865 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { 1820 - __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1866 + __target_attach_tg_pt_gp(lun, 1821 1867 dev->t10_alua.default_tg_pt_gp); 1822 1868 } else 1823 - tg_pt_gp_mem->tg_pt_gp = NULL; 1824 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1869 + lun->lun_tg_pt_gp = NULL; 1870 + spin_unlock(&lun->lun_tg_pt_gp_lock); 1825 1871 1826 1872 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1827 1873 } 1828 1874 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1829 1875 1830 1876 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1831 - } 1832 - 1833 - void core_alua_free_tg_pt_gp_mem(struct se_port *port) 1834 - { 1835 - struct t10_alua_tg_pt_gp *tg_pt_gp; 1836 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1837 - 1838 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1839 - if (!tg_pt_gp_mem) 1840 - return; 1841 - 1842 - while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) 1843 - cpu_relax(); 1844 - 1845 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1846 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1847 - if (tg_pt_gp) { 1848 - spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1849 - if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1850 - list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1851 - tg_pt_gp->tg_pt_gp_members--; 1852 - tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1853 - } 1854 - spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1855 - tg_pt_gp_mem->tg_pt_gp = NULL; 1856 - } 1857 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1858 - 1859 - kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); 1860 1877 } 1861 1878 1862 1879 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( ··· 1857 1944 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1858 1945 } 1859 1946 1860 - /* 1861 - * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1862 - */ 1863 - void __core_alua_attach_tg_pt_gp_mem( 1864 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1865 - struct t10_alua_tg_pt_gp *tg_pt_gp) 1947 + static void __target_attach_tg_pt_gp(struct se_lun *lun, 1948 + struct t10_alua_tg_pt_gp *tg_pt_gp) 1866 1949 { 1950 + assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1951 + 1867 1952 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1868 - tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; 1869 - tg_pt_gp_mem->tg_pt_gp_assoc = 1; 1870 - list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, 1871 - &tg_pt_gp->tg_pt_gp_mem_list); 1953 + lun->lun_tg_pt_gp = tg_pt_gp; 1954 + list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); 1872 1955 tg_pt_gp->tg_pt_gp_members++; 1873 1956 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1874 1957 } 1875 1958 1876 - /* 1877 - * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1878 - */ 1879 - static void __core_alua_drop_tg_pt_gp_mem( 1880 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1881 - struct t10_alua_tg_pt_gp *tg_pt_gp) 1959 + void target_attach_tg_pt_gp(struct se_lun *lun, 1960 + struct t10_alua_tg_pt_gp *tg_pt_gp) 1882 1961 { 1883 - spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1884 - list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1885 - tg_pt_gp_mem->tg_pt_gp = NULL; 1886 - tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1887 - tg_pt_gp->tg_pt_gp_members--; 1888 - spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1962 + spin_lock(&lun->lun_tg_pt_gp_lock); 1963 + __target_attach_tg_pt_gp(lun, tg_pt_gp); 1964 + spin_unlock(&lun->lun_tg_pt_gp_lock); 1889 1965 } 1890 1966 1891 - ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) 1967 + static void __target_detach_tg_pt_gp(struct se_lun *lun, 1968 + struct t10_alua_tg_pt_gp *tg_pt_gp) 1969 + { 1970 + assert_spin_locked(&lun->lun_tg_pt_gp_lock); 1971 + 1972 + spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1973 + list_del_init(&lun->lun_tg_pt_gp_link); 1974 + tg_pt_gp->tg_pt_gp_members--; 1975 + spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1976 + 1977 + lun->lun_tg_pt_gp = NULL; 1978 + } 1979 + 1980 + void target_detach_tg_pt_gp(struct se_lun *lun) 1981 + { 1982 + struct t10_alua_tg_pt_gp *tg_pt_gp; 1983 + 1984 + spin_lock(&lun->lun_tg_pt_gp_lock); 1985 + tg_pt_gp = lun->lun_tg_pt_gp; 1986 + if (tg_pt_gp) 1987 + __target_detach_tg_pt_gp(lun, tg_pt_gp); 1988 + spin_unlock(&lun->lun_tg_pt_gp_lock); 1989 + } 1990 + 1991 + ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) 1892 1992 { 1893 1993 struct config_item *tg_pt_ci; 1894 1994 struct t10_alua_tg_pt_gp *tg_pt_gp; 1895 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1896 1995 ssize_t len = 0; 1897 1996 1898 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1899 - if (!tg_pt_gp_mem) 1900 - return len; 1901 - 1902 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1903 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1997 + spin_lock(&lun->lun_tg_pt_gp_lock); 1998 + tg_pt_gp = lun->lun_tg_pt_gp; 1904 1999 if (tg_pt_gp) { 1905 2000 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1906 2001 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" ··· 1920 1999 &tg_pt_gp->tg_pt_gp_alua_access_state)), 1921 2000 core_alua_dump_status( 1922 2001 tg_pt_gp->tg_pt_gp_alua_access_status), 1923 - (atomic_read(&port->sep_tg_pt_secondary_offline)) ? 2002 + atomic_read(&lun->lun_tg_pt_secondary_offline) ? 1924 2003 "Offline" : "None", 1925 - core_alua_dump_status(port->sep_tg_pt_secondary_stat)); 2004 + core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); 1926 2005 } 1927 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2006 + spin_unlock(&lun->lun_tg_pt_gp_lock); 1928 2007 1929 2008 return len; 1930 2009 } 1931 2010 1932 2011 ssize_t core_alua_store_tg_pt_gp_info( 1933 - struct se_port *port, 2012 + struct se_lun *lun, 1934 2013 const char *page, 1935 2014 size_t count) 1936 2015 { 1937 - struct se_portal_group *tpg; 1938 - struct se_lun *lun; 1939 - struct se_device *dev = port->sep_lun->lun_se_dev; 2016 + struct se_portal_group *tpg = lun->lun_tpg; 2017 + struct se_device *dev = lun->lun_se_dev; 1940 2018 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1941 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1942 2019 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 1943 2020 int move = 0; 1944 2021 1945 - tpg = port->sep_tpg; 1946 - lun = port->sep_lun; 1947 - 1948 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1949 - if (!tg_pt_gp_mem) 1950 - return 0; 2022 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 2023 + (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2024 + return -ENODEV; 1951 2025 1952 2026 if (count > TG_PT_GROUP_NAME_BUF) { 1953 2027 pr_err("ALUA Target Port Group alias too large!\n"); ··· 1966 2050 return -ENODEV; 1967 2051 } 1968 2052 1969 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1970 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 2053 + spin_lock(&lun->lun_tg_pt_gp_lock); 2054 + tg_pt_gp = lun->lun_tg_pt_gp; 1971 2055 if (tg_pt_gp) { 1972 2056 /* 1973 2057 * Clearing an existing tg_pt_gp association, and replacing ··· 1985 2069 &tg_pt_gp->tg_pt_gp_group.cg_item), 1986 2070 tg_pt_gp->tg_pt_gp_id); 1987 2071 1988 - __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1989 - __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 2072 + __target_detach_tg_pt_gp(lun, tg_pt_gp); 2073 + __target_attach_tg_pt_gp(lun, 1990 2074 dev->t10_alua.default_tg_pt_gp); 1991 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2075 + spin_unlock(&lun->lun_tg_pt_gp_lock); 1992 2076 1993 2077 return count; 1994 2078 } 1995 - /* 1996 - * Removing existing association of tg_pt_gp_mem with tg_pt_gp 1997 - */ 1998 - __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 2079 + __target_detach_tg_pt_gp(lun, tg_pt_gp); 1999 2080 move = 1; 2000 2081 } 2001 - /* 2002 - * Associate tg_pt_gp_mem with tg_pt_gp_new. 2003 - */ 2004 - __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); 2005 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2082 + 2083 + __target_attach_tg_pt_gp(lun, tg_pt_gp_new); 2084 + spin_unlock(&lun->lun_tg_pt_gp_lock); 2006 2085 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 2007 2086 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 2008 2087 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), ··· 2180 2269 2181 2270 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 2182 2271 { 2183 - if (!lun->lun_sep) 2184 - return -ENODEV; 2185 - 2186 2272 return sprintf(page, "%d\n", 2187 - atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); 2273 + atomic_read(&lun->lun_tg_pt_secondary_offline)); 2188 2274 } 2189 2275 2190 2276 ssize_t core_alua_store_offline_bit( ··· 2189 2281 const char *page, 2190 2282 size_t count) 2191 2283 { 2192 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 2284 + struct se_device *dev = lun->lun_se_dev; 2193 2285 unsigned long tmp; 2194 2286 int ret; 2195 2287 2196 - if (!lun->lun_sep) 2288 + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || 2289 + (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 2197 2290 return -ENODEV; 2198 2291 2199 2292 ret = kstrtoul(page, 0, &tmp); ··· 2207 2298 tmp); 2208 2299 return -EINVAL; 2209 2300 } 2210 - tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; 2211 - if (!tg_pt_gp_mem) { 2212 - pr_err("Unable to locate *tg_pt_gp_mem\n"); 2213 - return -EINVAL; 2214 - } 2215 2301 2216 - ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, 2217 - lun->lun_sep, 0, (int)tmp); 2302 + ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp); 2218 2303 if (ret < 0) 2219 2304 return -EINVAL; 2220 2305 ··· 2219 2316 struct se_lun *lun, 2220 2317 char *page) 2221 2318 { 2222 - return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); 2319 + return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat); 2223 2320 } 2224 2321 2225 2322 ssize_t core_alua_store_secondary_status( ··· 2242 2339 tmp); 2243 2340 return -EINVAL; 2244 2341 } 2245 - lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; 2342 + lun->lun_tg_pt_secondary_stat = (int)tmp; 2246 2343 2247 2344 return count; 2248 2345 } ··· 2251 2348 struct se_lun *lun, 2252 2349 char *page) 2253 2350 { 2254 - return sprintf(page, "%d\n", 2255 - lun->lun_sep->sep_tg_pt_secondary_write_md); 2351 + return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md); 2256 2352 } 2257 2353 2258 2354 ssize_t core_alua_store_secondary_write_metadata( ··· 2272 2370 " %lu\n", tmp); 2273 2371 return -EINVAL; 2274 2372 } 2275 - lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; 2373 + lun->lun_tg_pt_secondary_write_md = (int)tmp; 2276 2374 2277 2375 return count; 2278 2376 }
+5 -9
drivers/target/target_core_alua.h
··· 85 85 extern struct kmem_cache *t10_alua_lu_gp_cache; 86 86 extern struct kmem_cache *t10_alua_lu_gp_mem_cache; 87 87 extern struct kmem_cache *t10_alua_tg_pt_gp_cache; 88 - extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 89 88 extern struct kmem_cache *t10_alua_lba_map_cache; 90 89 extern struct kmem_cache *t10_alua_lba_map_mem_cache; 91 90 ··· 93 94 extern sense_reason_t target_emulate_report_referrals(struct se_cmd *); 94 95 extern int core_alua_check_nonop_delay(struct se_cmd *); 95 96 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, 96 - struct se_device *, struct se_port *, 97 + struct se_device *, struct se_lun *, 97 98 struct se_node_acl *, int, int); 98 99 extern char *core_alua_dump_status(int); 99 100 extern struct t10_alua_lba_map *core_alua_allocate_lba_map( ··· 116 117 extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( 117 118 struct se_device *, const char *, int); 118 119 extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); 119 - extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 120 - struct se_port *); 121 120 extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *); 122 - extern void core_alua_free_tg_pt_gp_mem(struct se_port *); 123 - extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *, 124 - struct t10_alua_tg_pt_gp *); 125 - extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *); 126 - extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *, 121 + extern void target_detach_tg_pt_gp(struct se_lun *); 122 + extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *); 123 + extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *); 124 + extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *, 127 125 size_t); 128 126 extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *); 129 127 extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+3 -8
drivers/target/target_core_configfs.c
··· 2889 2889 struct t10_alua_tg_pt_gp *tg_pt_gp, 2890 2890 char *page) 2891 2891 { 2892 - struct se_port *port; 2893 - struct se_portal_group *tpg; 2894 2892 struct se_lun *lun; 2895 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 2896 2893 ssize_t len = 0, cur_len; 2897 2894 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 2898 2895 2899 2896 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 2900 2897 2901 2898 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 2902 - list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, 2903 - tg_pt_gp_mem_list) { 2904 - port = tg_pt_gp_mem->tg_pt; 2905 - tpg = port->sep_tpg; 2906 - lun = port->sep_lun; 2899 + list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, 2900 + lun_tg_pt_gp_link) { 2901 + struct se_portal_group *tpg = lun->lun_tpg; 2907 2902 2908 2903 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" 2909 2904 "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+29 -150
drivers/target/target_core_device.c
··· 120 120 (se_cmd->data_direction != DMA_NONE)) 121 121 return TCM_WRITE_PROTECTED; 122 122 123 - se_lun = &se_sess->se_tpg->tpg_virt_lun0; 124 - se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; 123 + se_lun = se_sess->se_tpg->tpg_virt_lun0; 124 + se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 125 125 se_cmd->orig_fe_lun = 0; 126 126 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 127 127 ··· 309 309 struct se_node_acl *nacl, 310 310 struct se_portal_group *tpg) 311 311 { 312 - struct se_port *port = lun->lun_sep; 313 312 struct se_dev_entry *orig, *new; 314 313 315 314 new = kzalloc(sizeof(*new), GFP_KERNEL); ··· 319 320 320 321 atomic_set(&new->ua_count, 0); 321 322 spin_lock_init(&new->ua_lock); 322 - INIT_LIST_HEAD(&new->alua_port_list); 323 323 INIT_LIST_HEAD(&new->ua_list); 324 + INIT_LIST_HEAD(&new->lun_link); 324 325 325 326 new->mapped_lun = mapped_lun; 326 327 kref_init(&new->pr_kref); ··· 356 357 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 357 358 mutex_unlock(&nacl->lun_entry_mutex); 358 359 359 - spin_lock_bh(&port->sep_alua_lock); 360 - list_del(&orig->alua_port_list); 361 - list_add_tail(&new->alua_port_list, &port->sep_alua_list); 362 - spin_unlock_bh(&port->sep_alua_lock); 360 + spin_lock_bh(&lun->lun_deve_lock); 361 + list_del(&orig->lun_link); 362 + list_add_tail(&new->lun_link, &lun->lun_deve_list); 363 + spin_unlock_bh(&lun->lun_deve_lock); 363 364 364 365 kref_put(&orig->pr_kref, target_pr_kref_release); 365 366 wait_for_completion(&orig->pr_comp); ··· 373 374 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); 374 375 mutex_unlock(&nacl->lun_entry_mutex); 375 376 376 - spin_lock_bh(&port->sep_alua_lock); 377 - list_add_tail(&new->alua_port_list, &port->sep_alua_list); 378 - spin_unlock_bh(&port->sep_alua_lock); 377 + spin_lock_bh(&lun->lun_deve_lock); 378 + list_add_tail(&new->lun_link, &lun->lun_deve_list); 379 + spin_unlock_bh(&lun->lun_deve_lock); 379 380 380 381 return 0; 381 382 } ··· 389 390 struct se_node_acl *nacl, 390 391 struct se_portal_group *tpg) 391 392 { 392 - struct se_port *port = lun->lun_sep; 393 393 /* 394 394 * If the MappedLUN entry is being disabled, the entry in 395 - * port->sep_alua_list must be removed now before clearing the 395 + * lun->lun_deve_list must be removed now before clearing the 396 396 * struct se_dev_entry pointers below as logic in 397 397 * core_alua_do_transition_tg_pt() depends on these being present. 398 398 * 399 399 * deve->se_lun_acl will be NULL for demo-mode created LUNs 400 400 * that have not been explicitly converted to MappedLUNs -> 401 - * struct se_lun_acl, but we remove deve->alua_port_list from 402 - * port->sep_alua_list. This also means that active UAs and 401 + * struct se_lun_acl, but we remove deve->lun_link from 402 + * lun->lun_deve_list. This also means that active UAs and 403 403 * NodeACL context specific PR metadata for demo-mode 404 404 * MappedLUN *deve will be released below.. 405 405 */ 406 - spin_lock_bh(&port->sep_alua_lock); 407 - list_del(&orig->alua_port_list); 408 - spin_unlock_bh(&port->sep_alua_lock); 406 + spin_lock_bh(&lun->lun_deve_lock); 407 + list_del(&orig->lun_link); 408 + spin_unlock_bh(&lun->lun_deve_lock); 409 409 /* 410 410 * Disable struct se_dev_entry LUN ACL mapping 411 411 */ ··· 456 458 mutex_unlock(&tpg->acl_node_mutex); 457 459 } 458 460 459 - static struct se_port *core_alloc_port(struct se_device *dev) 461 + int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) 460 462 { 461 - struct se_port *port, *port_tmp; 462 - 463 - port = kzalloc(sizeof(struct se_port), GFP_KERNEL); 464 - if (!port) { 465 - pr_err("Unable to allocate struct se_port\n"); 466 - return ERR_PTR(-ENOMEM); 467 - } 468 - INIT_LIST_HEAD(&port->sep_alua_list); 469 - INIT_LIST_HEAD(&port->sep_list); 470 - atomic_set(&port->sep_tg_pt_secondary_offline, 0); 471 - spin_lock_init(&port->sep_alua_lock); 472 - mutex_init(&port->sep_tg_pt_md_mutex); 463 + struct se_lun *tmp; 473 464 474 465 spin_lock(&dev->se_port_lock); 475 - if (dev->dev_port_count == 0x0000ffff) { 466 + if (dev->export_count == 0x0000ffff) { 476 467 pr_warn("Reached dev->dev_port_count ==" 477 468 " 0x0000ffff\n"); 478 469 spin_unlock(&dev->se_port_lock); 479 - return ERR_PTR(-ENOSPC); 470 + return -ENOSPC; 480 471 } 481 472 again: 482 473 /* ··· 480 493 * 2h Relative port 2, historically known as port B 481 494 * 3h to FFFFh Relative port 3 through 65 535 482 495 */ 483 - port->sep_rtpi = dev->dev_rpti_counter++; 484 - if (!port->sep_rtpi) 496 + lun->lun_rtpi = dev->dev_rpti_counter++; 497 + if (!lun->lun_rtpi) 485 498 goto again; 486 499 487 - list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { 500 + list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { 488 501 /* 489 502 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique 490 503 * for 16-bit wrap.. 491 504 */ 492 - if (port->sep_rtpi == port_tmp->sep_rtpi) 505 + if (lun->lun_rtpi == tmp->lun_rtpi) 493 506 goto again; 494 507 } 495 508 spin_unlock(&dev->se_port_lock); 496 509 497 - return port; 498 - } 499 - 500 - static void core_export_port( 501 - struct se_device *dev, 502 - struct se_portal_group *tpg, 503 - struct se_port *port, 504 - struct se_lun *lun) 505 - { 506 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 507 - 508 - spin_lock(&dev->se_port_lock); 509 - spin_lock(&lun->lun_sep_lock); 510 - port->sep_tpg = tpg; 511 - port->sep_lun = lun; 512 - lun->lun_sep = port; 513 - spin_unlock(&lun->lun_sep_lock); 514 - 515 - list_add_tail(&port->sep_list, &dev->dev_sep_list); 516 - spin_unlock(&dev->se_port_lock); 517 - 518 - if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 519 - !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 520 - tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 521 - if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 522 - pr_err("Unable to allocate t10_alua_tg_pt" 523 - "_gp_member_t\n"); 524 - return; 525 - } 526 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 527 - __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 528 - dev->t10_alua.default_tg_pt_gp); 529 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 530 - pr_debug("%s/%s: Adding to default ALUA Target Port" 531 - " Group: alua/default_tg_pt_gp\n", 532 - dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); 533 - } 534 - 535 - dev->dev_port_count++; 536 - port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */ 537 - } 538 - 539 - /* 540 - * Called with struct se_device->se_port_lock spinlock held. 541 - */ 542 - static void core_release_port(struct se_device *dev, struct se_port *port) 543 - __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) 544 - { 545 - /* 546 - * Wait for any port reference for PR ALL_TG_PT=1 operation 547 - * to complete in __core_scsi3_alloc_registration() 548 - */ 549 - spin_unlock(&dev->se_port_lock); 550 - if (atomic_read(&port->sep_tg_pt_ref_cnt)) 551 - cpu_relax(); 552 - spin_lock(&dev->se_port_lock); 553 - 554 - core_alua_free_tg_pt_gp_mem(port); 555 - 556 - list_del(&port->sep_list); 557 - dev->dev_port_count--; 558 - kfree(port); 559 - } 560 - 561 - int core_dev_export( 562 - struct se_device *dev, 563 - struct se_portal_group *tpg, 564 - struct se_lun *lun) 565 - { 566 - struct se_hba *hba = dev->se_hba; 567 - struct se_port *port; 568 - 569 - port = core_alloc_port(dev); 570 - if (IS_ERR(port)) 571 - return PTR_ERR(port); 572 - 573 - lun->lun_index = dev->dev_index; 574 - lun->lun_se_dev = dev; 575 - lun->lun_rtpi = port->sep_rtpi; 576 - 577 - spin_lock(&hba->device_lock); 578 - dev->export_count++; 579 - spin_unlock(&hba->device_lock); 580 - 581 - core_export_port(dev, tpg, port, lun); 582 510 return 0; 583 - } 584 - 585 - void core_dev_unexport( 586 - struct se_device *dev, 587 - struct se_portal_group *tpg, 588 - struct se_lun *lun) 589 - { 590 - struct se_hba *hba = dev->se_hba; 591 - struct se_port *port = lun->lun_sep; 592 - 593 - spin_lock(&lun->lun_sep_lock); 594 - if (lun->lun_se_dev == NULL) { 595 - spin_unlock(&lun->lun_sep_lock); 596 - return; 597 - } 598 - spin_unlock(&lun->lun_sep_lock); 599 - 600 - spin_lock(&dev->se_port_lock); 601 - core_release_port(dev, port); 602 - spin_unlock(&dev->se_port_lock); 603 - 604 - spin_lock(&hba->device_lock); 605 - dev->export_count--; 606 - spin_unlock(&hba->device_lock); 607 - 608 - lun->lun_sep = NULL; 609 - lun->lun_se_dev = NULL; 610 511 } 611 512 612 513 static void se_release_vpd_for_dev(struct se_device *dev) ··· 658 783 } 659 784 660 785 int core_dev_del_initiator_node_lun_acl( 661 - struct se_portal_group *tpg, 662 786 struct se_lun *lun, 663 787 struct se_lun_acl *lacl) 664 788 { 789 + struct se_portal_group *tpg = lun->lun_tpg; 665 790 struct se_node_acl *nacl; 666 791 struct se_dev_entry *deve; 667 792 ··· 805 930 xcopy_lun->lun_se_dev = dev; 806 931 spin_lock_init(&xcopy_lun->lun_sep_lock); 807 932 init_completion(&xcopy_lun->lun_ref_comp); 933 + INIT_LIST_HEAD(&xcopy_lun->lun_deve_list); 934 + INIT_LIST_HEAD(&xcopy_lun->lun_dev_link); 935 + mutex_init(&xcopy_lun->lun_tg_pt_md_mutex); 936 + xcopy_lun->lun_tpg = &xcopy_pt_tpg; 808 937 809 938 return dev; 810 939 }
+15 -17
drivers/target/target_core_fabric_configfs.c
··· 91 91 /* 92 92 * Ensure that the source port exists 93 93 */ 94 - if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { 95 - pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" 96 - "_tpg does not exist\n"); 94 + if (!lun->lun_se_dev) { 95 + pr_err("Source se_lun->lun_se_dev does not exist\n"); 97 96 return -EINVAL; 98 97 } 99 - se_tpg = lun->lun_sep->sep_tpg; 98 + se_tpg = lun->lun_tpg; 100 99 101 100 nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; 102 101 tpg_ci = &nacl_ci->ci_group->cg_item; ··· 149 150 struct se_lun_acl, se_lun_group); 150 151 struct se_lun *lun = container_of(to_config_group(lun_ci), 151 152 struct se_lun, lun_group); 152 - struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; 153 153 154 - return core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl); 154 + return core_dev_del_initiator_node_lun_acl(lun, lacl); 155 155 } 156 156 157 157 CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); ··· 641 643 struct se_lun *lun, 642 644 char *page) 643 645 { 644 - if (!lun || !lun->lun_sep) 646 + if (!lun || !lun->lun_se_dev) 645 647 return -ENODEV; 646 648 647 - return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); 649 + return core_alua_show_tg_pt_gp_info(lun, page); 648 650 } 649 651 650 652 static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( ··· 652 654 const char *page, 653 655 size_t count) 654 656 { 655 - if (!lun || !lun->lun_sep) 657 + if (!lun || !lun->lun_se_dev) 656 658 return -ENODEV; 657 659 658 - return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); 660 + return core_alua_store_tg_pt_gp_info(lun, page, count); 659 661 } 660 662 661 663 TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); ··· 667 669 struct se_lun *lun, 668 670 char *page) 669 671 { 670 - if (!lun || !lun->lun_sep) 672 + if (!lun || !lun->lun_se_dev) 671 673 return -ENODEV; 672 674 673 675 return core_alua_show_offline_bit(lun, page); ··· 678 680 const char *page, 679 681 size_t count) 680 682 { 681 - if (!lun || !lun->lun_sep) 683 + if (!lun || !lun->lun_se_dev) 682 684 return -ENODEV; 683 685 684 686 return core_alua_store_offline_bit(lun, page, count); ··· 693 695 struct se_lun *lun, 694 696 char *page) 695 697 { 696 - if (!lun || !lun->lun_sep) 698 + if (!lun || !lun->lun_se_dev) 697 699 return -ENODEV; 698 700 699 701 return core_alua_show_secondary_status(lun, page); ··· 704 706 const char *page, 705 707 size_t count) 706 708 { 707 - if (!lun || !lun->lun_sep) 709 + if (!lun || !lun->lun_se_dev) 708 710 return -ENODEV; 709 711 710 712 return core_alua_store_secondary_status(lun, page, count); ··· 719 721 struct se_lun *lun, 720 722 char *page) 721 723 { 722 - if (!lun || !lun->lun_sep) 724 + if (!lun || !lun->lun_se_dev) 723 725 return -ENODEV; 724 726 725 727 return core_alua_show_secondary_write_metadata(lun, page); ··· 730 732 const char *page, 731 733 size_t count) 732 734 { 733 - if (!lun || !lun->lun_sep) 735 + if (!lun || !lun->lun_se_dev) 734 736 return -ENODEV; 735 737 736 738 return core_alua_store_secondary_write_metadata(lun, page, count); ··· 809 811 { 810 812 struct se_lun *lun = container_of(to_config_group(lun_ci), 811 813 struct se_lun, lun_group); 812 - struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; 814 + struct se_portal_group *se_tpg = lun->lun_tpg; 813 815 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 814 816 815 817 if (tf->tf_ops->fabric_pre_unlink) {
+6 -6
drivers/target/target_core_internal.h
··· 21 21 extern struct mutex g_device_mutex; 22 22 extern struct list_head g_device_list; 23 23 24 + int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev); 24 25 struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 25 26 void target_pr_kref_release(struct kref *); 26 27 void core_free_device_list_for_node(struct se_node_acl *, ··· 33 32 void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *, 34 33 struct se_node_acl *, struct se_portal_group *); 35 34 void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); 36 - int core_dev_export(struct se_device *, struct se_portal_group *, 37 - struct se_lun *); 38 - void core_dev_unexport(struct se_device *, struct se_portal_group *, 39 - struct se_lun *); 40 35 int core_dev_add_lun(struct se_portal_group *, struct se_device *, 41 36 struct se_lun *lun); 42 37 void core_dev_del_lun(struct se_portal_group *, struct se_lun *); ··· 40 43 struct se_node_acl *, u32, int *); 41 44 int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, 42 45 struct se_lun_acl *, struct se_lun *lun, u32); 43 - int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, 44 - struct se_lun *, struct se_lun_acl *); 46 + int core_dev_del_initiator_node_lun_acl(struct se_lun *, 47 + struct se_lun_acl *); 45 48 void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, 46 49 struct se_lun_acl *lacl); 47 50 int core_dev_setup_virtual_lun0(void); ··· 116 119 void target_stat_setup_dev_default_groups(struct se_device *); 117 120 void target_stat_setup_port_default_groups(struct se_lun *); 118 121 void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); 122 + 123 + /* target_core_xcopy.c */ 124 + extern struct se_portal_group xcopy_pt_tpg; 119 125 120 126 #endif /* TARGET_CORE_INTERNAL_H */
+24 -32
drivers/target/target_core_pr.c
··· 642 642 pr_reg->pr_reg_deve = deve; 643 643 pr_reg->pr_res_mapped_lun = mapped_lun; 644 644 pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; 645 - pr_reg->tg_pt_sep_rtpi = lun->lun_sep->sep_rtpi; 645 + pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; 646 646 pr_reg->pr_res_key = sa_res_key; 647 647 pr_reg->pr_reg_all_tg_pt = all_tg_pt; 648 648 pr_reg->pr_reg_aptpl = aptpl; ··· 680 680 struct se_dev_entry *deve_tmp; 681 681 struct se_node_acl *nacl_tmp; 682 682 struct se_lun_acl *lacl_tmp; 683 - struct se_lun *lun_tmp; 684 - struct se_port *port, *port_tmp; 683 + struct se_lun *lun_tmp, *next, *dest_lun; 685 684 const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 686 685 struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe; 687 686 int ret; ··· 703 704 * for ALL_TG_PT=1 704 705 */ 705 706 spin_lock(&dev->se_port_lock); 706 - list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 707 - atomic_inc_mb(&port->sep_tg_pt_ref_cnt); 707 + list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) { 708 + atomic_inc_mb(&lun_tmp->lun_active); 708 709 spin_unlock(&dev->se_port_lock); 709 710 710 - spin_lock_bh(&port->sep_alua_lock); 711 - list_for_each_entry(deve_tmp, &port->sep_alua_list, 712 - alua_port_list) { 711 + spin_lock_bh(&lun_tmp->lun_deve_lock); 712 + list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) { 713 713 /* 714 714 * This pointer will be NULL for demo mode MappedLUNs 715 715 * that have not been make explicit via a ConfigFS ··· 718 720 continue; 719 721 720 722 lacl_tmp = rcu_dereference_check(deve_tmp->se_lun_acl, 721 - lockdep_is_held(&port->sep_alua_lock)); 723 + lockdep_is_held(&lun_tmp->lun_deve_lock)); 722 724 nacl_tmp = lacl_tmp->se_lun_nacl; 723 725 /* 724 726 * Skip the matching struct se_node_acl that is allocated ··· 740 742 continue; 741 743 742 744 kref_get(&deve_tmp->pr_kref); 743 - spin_unlock_bh(&port->sep_alua_lock); 745 + spin_unlock_bh(&lun_tmp->lun_deve_lock); 744 746 /* 745 747 * Grab a configfs group dependency that is released 746 748 * for the exception path at label out: below, or upon ··· 751 753 if (ret < 0) { 752 754 pr_err("core_scsi3_lunacl_depend" 753 755 "_item() failed\n"); 754 - atomic_dec_mb(&port->sep_tg_pt_ref_cnt); 756 + atomic_dec_mb(&lun->lun_active); 755 757 kref_put(&deve_tmp->pr_kref, target_pr_kref_release); 756 758 goto out; 757 759 } ··· 762 764 * the original *pr_reg is processed in 763 765 * __core_scsi3_add_registration() 764 766 */ 765 - lun_tmp = rcu_dereference_check(deve_tmp->se_lun, 767 + dest_lun = rcu_dereference_check(deve_tmp->se_lun, 766 768 atomic_read(&deve_tmp->pr_kref.refcount) != 0); 767 769 768 770 pr_reg_atp = __core_scsi3_do_alloc_registration(dev, 769 - nacl_tmp, lun_tmp, deve_tmp, 771 + nacl_tmp, dest_lun, deve_tmp, 770 772 deve_tmp->mapped_lun, NULL, 771 773 sa_res_key, all_tg_pt, aptpl); 772 774 if (!pr_reg_atp) { 773 - atomic_dec_mb(&port->sep_tg_pt_ref_cnt); 775 + atomic_dec_mb(&lun_tmp->lun_active); 774 776 core_scsi3_lunacl_undepend_item(deve_tmp); 775 777 goto out; 776 778 } 777 779 778 780 list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list, 779 781 &pr_reg->pr_reg_atp_list); 780 - spin_lock_bh(&port->sep_alua_lock); 782 + spin_lock_bh(&lun_tmp->lun_deve_lock); 781 783 } 782 - spin_unlock_bh(&port->sep_alua_lock); 784 + spin_unlock_bh(&lun_tmp->lun_deve_lock); 783 785 784 786 spin_lock(&dev->se_port_lock); 785 - atomic_dec_mb(&port->sep_tg_pt_ref_cnt); 787 + atomic_dec_mb(&lun_tmp->lun_active); 786 788 } 787 789 spin_unlock(&dev->se_port_lock); 788 790 ··· 936 938 (pr_reg->pr_aptpl_target_lun == target_lun)) { 937 939 938 940 pr_reg->pr_reg_nacl = nacl; 939 - pr_reg->tg_pt_sep_rtpi = lun->lun_sep->sep_rtpi; 941 + pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; 940 942 941 943 list_del(&pr_reg->pr_reg_aptpl_list); 942 944 spin_unlock(&pr_tmpl->aptpl_reg_lock); ··· 1463 1465 int aptpl) 1464 1466 { 1465 1467 struct se_device *dev = cmd->se_dev; 1466 - struct se_port *tmp_port; 1467 1468 struct se_portal_group *dest_tpg = NULL, *tmp_tpg; 1468 1469 struct se_session *se_sess = cmd->se_sess; 1469 1470 struct se_node_acl *dest_node_acl = NULL; ··· 1547 1550 ptr = &buf[28]; 1548 1551 1549 1552 while (tpdl > 0) { 1550 - struct se_lun *dest_lun; 1553 + struct se_lun *dest_lun, *tmp_lun; 1551 1554 1552 1555 proto_ident = (ptr[0] & 0x0f); 1553 1556 dest_tpg = NULL; 1554 1557 1555 1558 spin_lock(&dev->se_port_lock); 1556 - list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { 1557 - tmp_tpg = tmp_port->sep_tpg; 1558 - if (!tmp_tpg) 1559 - continue; 1559 + list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) { 1560 + tmp_tpg = tmp_lun->lun_tpg; 1560 1561 1561 1562 /* 1562 1563 * Look for the matching proto_ident provided by ··· 1562 1567 */ 1563 1568 if (tmp_tpg->proto_id != proto_ident) 1564 1569 continue; 1565 - dest_rtpi = tmp_port->sep_rtpi; 1570 + dest_rtpi = tmp_lun->lun_rtpi; 1566 1571 1567 1572 i_str = target_parse_pr_out_transport_id(tmp_tpg, 1568 1573 (const char *)ptr, &tid_len, &iport_ptr); ··· 3114 3119 struct se_session *se_sess = cmd->se_sess; 3115 3120 struct se_device *dev = cmd->se_dev; 3116 3121 struct se_dev_entry *dest_se_deve = NULL; 3117 - struct se_lun *se_lun = cmd->se_lun; 3122 + struct se_lun *se_lun = cmd->se_lun, *tmp_lun; 3118 3123 struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; 3119 - struct se_port *se_port; 3120 3124 struct se_portal_group *se_tpg, *dest_se_tpg = NULL; 3121 3125 const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3122 3126 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; ··· 3200 3206 } 3201 3207 3202 3208 spin_lock(&dev->se_port_lock); 3203 - list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) { 3204 - if (se_port->sep_rtpi != rtpi) 3209 + list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) { 3210 + if (tmp_lun->lun_rtpi != rtpi) 3205 3211 continue; 3206 - dest_se_tpg = se_port->sep_tpg; 3207 - if (!dest_se_tpg) 3208 - continue; 3212 + dest_se_tpg = tmp_lun->lun_tpg; 3209 3213 dest_tf_ops = dest_se_tpg->se_tpg_tfo; 3210 3214 if (!dest_tf_ops) 3211 3215 continue;
+16 -32
drivers/target/target_core_spc.c
··· 37 37 #include "target_core_ua.h" 38 38 #include "target_core_xcopy.h" 39 39 40 - static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) 40 + static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) 41 41 { 42 42 struct t10_alua_tg_pt_gp *tg_pt_gp; 43 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 44 43 45 44 /* 46 45 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. ··· 52 53 * 53 54 * See spc4r17 section 6.4.2 Table 135 54 55 */ 55 - if (!port) 56 - return; 57 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 58 - if (!tg_pt_gp_mem) 59 - return; 60 - 61 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 62 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 56 + spin_lock(&lun->lun_tg_pt_gp_lock); 57 + tg_pt_gp = lun->lun_tg_pt_gp; 63 58 if (tg_pt_gp) 64 59 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 65 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 60 + spin_unlock(&lun->lun_tg_pt_gp_lock); 66 61 } 67 62 68 63 sense_reason_t ··· 87 94 /* 88 95 * Enable SCCS and TPGS fields for Emulated ALUA 89 96 */ 90 - spc_fill_alua_data(lun->lun_sep, buf); 97 + spc_fill_alua_data(lun, buf); 91 98 92 99 /* 93 100 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY ··· 174 181 { 175 182 struct se_device *dev = cmd->se_dev; 176 183 struct se_lun *lun = cmd->se_lun; 177 - struct se_port *port = NULL; 178 184 struct se_portal_group *tpg = NULL; 179 185 struct t10_alua_lu_gp_member *lu_gp_mem; 180 186 struct t10_alua_tg_pt_gp *tg_pt_gp; 181 - struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 182 187 unsigned char *prod = &dev->t10_wwn.model[0]; 183 188 u32 prod_len; 184 189 u32 unit_serial_len, off = 0; ··· 258 267 /* Header size for Designation descriptor */ 259 268 len += (id_len + 4); 260 269 off += (id_len + 4); 261 - /* 262 - * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD 263 - */ 264 - port = lun->lun_sep; 265 - if (port) { 270 + 271 + if (1) { 266 272 struct t10_alua_lu_gp *lu_gp; 267 273 u32 padding, scsi_name_len, scsi_target_len; 268 274 u16 lu_gp_id = 0; 269 275 u16 tg_pt_gp_id = 0; 270 276 u16 tpgt; 271 277 272 - tpg = port->sep_tpg; 278 + tpg = lun->lun_tpg; 273 279 /* 274 280 * Relative target port identifer, see spc4r17 275 281 * section 7.7.3.7 ··· 286 298 /* Skip over Obsolete field in RTPI payload 287 299 * in Table 472 */ 288 300 off += 2; 289 - buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 290 - buf[off++] = (port->sep_rtpi & 0xff); 301 + buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 302 + buf[off++] = (lun->lun_rtpi & 0xff); 291 303 len += 8; /* Header size + Designation descriptor */ 292 304 /* 293 305 * Target port group identifier, see spc4r17 ··· 296 308 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 297 309 * section 7.5.1 Table 362 298 310 */ 299 - tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 300 - if (!tg_pt_gp_mem) 301 - goto check_lu_gp; 302 - 303 - spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 304 - tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 311 + spin_lock(&lun->lun_tg_pt_gp_lock); 312 + tg_pt_gp = lun->lun_tg_pt_gp; 305 313 if (!tg_pt_gp) { 306 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 314 + spin_unlock(&lun->lun_tg_pt_gp_lock); 307 315 goto check_lu_gp; 308 316 } 309 317 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 310 - spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 318 + spin_unlock(&lun->lun_tg_pt_gp_lock); 311 319 312 320 buf[off] = tpg->proto_id << 4; 313 321 buf[off++] |= 0x1; /* CODE SET == Binary */ ··· 678 694 spc_emulate_inquiry(struct se_cmd *cmd) 679 695 { 680 696 struct se_device *dev = cmd->se_dev; 681 - struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 697 + struct se_portal_group *tpg = cmd->se_lun->lun_tpg; 682 698 unsigned char *rbuf; 683 699 unsigned char *cdb = cmd->t_task_cdb; 684 700 unsigned char *buf; ··· 692 708 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 693 709 } 694 710 695 - if (dev == tpg->tpg_virt_lun0.lun_se_dev) 711 + if (dev == tpg->tpg_virt_lun0->lun_se_dev) 696 712 buf[0] = 0x3f; /* Not connected */ 697 713 else 698 714 buf[0] = dev->transport->get_device_type(dev);
+111 -188
drivers/target/target_core_stat.c
··· 106 106 struct se_device *dev = 107 107 container_of(sgrps, struct se_device, dev_stat_grps); 108 108 109 - return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); 109 + return snprintf(page, PAGE_SIZE, "%u\n", dev->export_count); 110 110 } 111 111 DEV_STAT_SCSI_DEV_ATTR_RO(ports); 112 112 ··· 542 542 struct se_port_stat_grps *pgrps, char *page) 543 543 { 544 544 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 545 - struct se_port *sep; 546 - struct se_device *dev = lun->lun_se_dev; 547 - struct se_hba *hba; 548 - ssize_t ret; 545 + struct se_device *dev; 546 + ssize_t ret = -ENODEV; 549 547 550 548 spin_lock(&lun->lun_sep_lock); 551 - sep = lun->lun_sep; 552 - if (!sep) { 553 - spin_unlock(&lun->lun_sep_lock); 554 - return -ENODEV; 555 - } 556 - hba = dev->se_hba; 557 - ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 549 + dev = lun->lun_se_dev; 550 + if (dev) 551 + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->se_hba->hba_index); 558 552 spin_unlock(&lun->lun_sep_lock); 559 553 return ret; 560 554 } ··· 558 564 struct se_port_stat_grps *pgrps, char *page) 559 565 { 560 566 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 561 - struct se_port *sep; 562 - struct se_device *dev = lun->lun_se_dev; 563 - ssize_t ret; 567 + struct se_device *dev; 568 + ssize_t ret = -ENODEV; 564 569 565 570 spin_lock(&lun->lun_sep_lock); 566 - sep = lun->lun_sep; 567 - if (!sep) { 568 - spin_unlock(&lun->lun_sep_lock); 569 - return -ENODEV; 570 - } 571 - ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 571 + dev = lun->lun_se_dev; 572 + if (dev) 573 + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 572 574 spin_unlock(&lun->lun_sep_lock); 573 575 return ret; 574 576 } ··· 574 584 struct se_port_stat_grps *pgrps, char *page) 575 585 { 576 586 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 577 - struct se_port *sep; 578 - ssize_t ret; 587 + struct se_device *dev; 588 + ssize_t ret = -ENODEV; 579 589 580 590 spin_lock(&lun->lun_sep_lock); 581 - sep = lun->lun_sep; 582 - if (!sep) { 583 - spin_unlock(&lun->lun_sep_lock); 584 - return -ENODEV; 585 - } 586 - ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); 591 + dev = lun->lun_se_dev; 592 + if (dev) 593 + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi); 587 594 spin_unlock(&lun->lun_sep_lock); 588 595 return ret; 589 596 } ··· 590 603 struct se_port_stat_grps *pgrps, char *page) 591 604 { 592 605 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 593 - struct se_device *dev = lun->lun_se_dev; 594 - struct se_port *sep; 595 - ssize_t ret; 596 - 597 - if (!dev) 598 - return -ENODEV; 606 + struct se_device *dev; 607 + ssize_t ret = -ENODEV; 599 608 600 609 spin_lock(&lun->lun_sep_lock); 601 - sep = lun->lun_sep; 602 - if (!sep) { 603 - spin_unlock(&lun->lun_sep_lock); 604 - return -ENODEV; 605 - } 606 - ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); 610 + dev = lun->lun_se_dev; 611 + if (dev) 612 + ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index); 607 613 spin_unlock(&lun->lun_sep_lock); 608 614 return ret; 609 615 } ··· 606 626 struct se_port_stat_grps *pgrps, char *page) 607 627 { 608 628 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 609 - struct se_port *sep; 610 - ssize_t ret; 629 + struct se_device *dev; 630 + ssize_t ret = -ENODEV; 611 631 612 632 spin_lock(&lun->lun_sep_lock); 613 - sep = lun->lun_sep; 614 - if (!sep) { 615 - spin_unlock(&lun->lun_sep_lock); 616 - return -ENODEV; 633 + dev = lun->lun_se_dev; 634 + if (dev) { 635 + /* FIXME: scsiPortBusyStatuses */ 636 + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 617 637 } 618 - /* FIXME: scsiPortBusyStatuses */ 619 - ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 620 638 spin_unlock(&lun->lun_sep_lock); 621 639 return ret; 622 640 } ··· 663 685 struct se_port_stat_grps *pgrps, char *page) 664 686 { 665 687 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 666 - struct se_device *dev = lun->lun_se_dev; 667 - struct se_port *sep; 668 - struct se_hba *hba; 669 - ssize_t ret; 688 + struct se_device *dev; 689 + ssize_t ret = -ENODEV; 670 690 671 691 spin_lock(&lun->lun_sep_lock); 672 - sep = lun->lun_sep; 673 - if (!sep) { 674 - spin_unlock(&lun->lun_sep_lock); 675 - return -ENODEV; 676 - } 677 - hba = dev->se_hba; 678 - ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 692 + dev = lun->lun_se_dev; 693 + if (dev) 694 + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->se_hba->hba_index); 679 695 spin_unlock(&lun->lun_sep_lock); 680 696 return ret; 681 697 } ··· 679 707 struct se_port_stat_grps *pgrps, char *page) 680 708 { 681 709 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 682 - struct se_device *dev = lun->lun_se_dev; 683 - struct se_port *sep; 684 - ssize_t ret; 710 + struct se_device *dev; 711 + ssize_t ret = -ENODEV; 685 712 686 713 spin_lock(&lun->lun_sep_lock); 687 - sep = lun->lun_sep; 688 - if (!sep) { 689 - spin_unlock(&lun->lun_sep_lock); 690 - return -ENODEV; 691 - } 692 - ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 714 + dev = lun->lun_se_dev; 715 + if (dev) 716 + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 693 717 spin_unlock(&lun->lun_sep_lock); 694 718 return ret; 695 719 } ··· 695 727 struct se_port_stat_grps *pgrps, char *page) 696 728 { 697 729 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 698 - struct se_port *sep; 699 - ssize_t ret; 730 + struct se_device *dev; 731 + ssize_t ret = -ENODEV; 700 732 701 733 spin_lock(&lun->lun_sep_lock); 702 - sep = lun->lun_sep; 703 - if (!sep) { 704 - spin_unlock(&lun->lun_sep_lock); 705 - return -ENODEV; 706 - } 707 - ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index); 734 + dev = lun->lun_se_dev; 735 + if (dev) 736 + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi); 708 737 spin_unlock(&lun->lun_sep_lock); 709 738 return ret; 710 739 } ··· 711 746 struct se_port_stat_grps *pgrps, char *page) 712 747 { 713 748 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 714 - struct se_port *sep; 715 - struct se_portal_group *tpg; 716 - ssize_t ret; 749 + struct se_portal_group *tpg = lun->lun_tpg; 750 + struct se_device *dev; 751 + ssize_t ret = -ENODEV; 717 752 718 753 spin_lock(&lun->lun_sep_lock); 719 - sep = lun->lun_sep; 720 - if (!sep) { 721 - spin_unlock(&lun->lun_sep_lock); 722 - return -ENODEV; 723 - } 724 - tpg = sep->sep_tpg; 725 - 726 - ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", 727 - tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); 754 + dev = lun->lun_se_dev; 755 + if (dev) 756 + ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", 757 + tpg->se_tpg_tfo->get_fabric_name(), 758 + lun->lun_rtpi); 728 759 spin_unlock(&lun->lun_sep_lock); 729 760 return ret; 730 761 } ··· 730 769 struct se_port_stat_grps *pgrps, char *page) 731 770 { 732 771 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 733 - struct se_port *sep; 734 - struct se_portal_group *tpg; 735 - ssize_t ret; 772 + struct se_portal_group *tpg = lun->lun_tpg; 773 + struct se_device *dev; 774 + ssize_t ret = -ENODEV; 736 775 737 776 spin_lock(&lun->lun_sep_lock); 738 - sep = lun->lun_sep; 739 - if (!sep) { 740 - spin_unlock(&lun->lun_sep_lock); 741 - return -ENODEV; 742 - } 743 - tpg = sep->sep_tpg; 744 - 745 - ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", 746 - tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", 747 - tpg->se_tpg_tfo->tpg_get_tag(tpg)); 777 + dev = lun->lun_se_dev; 778 + if (dev) 779 + ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", 780 + tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", 781 + tpg->se_tpg_tfo->tpg_get_tag(tpg)); 748 782 spin_unlock(&lun->lun_sep_lock); 749 783 return ret; 750 784 } ··· 749 793 struct se_port_stat_grps *pgrps, char *page) 750 794 { 751 795 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 752 - struct se_port *sep; 753 - ssize_t ret; 796 + struct se_device *dev; 797 + ssize_t ret = -ENODEV; 754 798 755 799 spin_lock(&lun->lun_sep_lock); 756 - sep = lun->lun_sep; 757 - if (!sep) { 758 - spin_unlock(&lun->lun_sep_lock); 759 - return -ENODEV; 760 - } 761 - 762 - ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus); 800 + dev = lun->lun_se_dev; 801 + if (dev) 802 + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_stats.cmd_pdus); 763 803 spin_unlock(&lun->lun_sep_lock); 764 804 return ret; 765 805 } ··· 765 813 struct se_port_stat_grps *pgrps, char *page) 766 814 { 767 815 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 768 - struct se_port *sep; 769 - ssize_t ret; 816 + struct se_device *dev; 817 + ssize_t ret = -ENODEV; 770 818 771 819 spin_lock(&lun->lun_sep_lock); 772 - sep = lun->lun_sep; 773 - if (!sep) { 774 - spin_unlock(&lun->lun_sep_lock); 775 - return -ENODEV; 776 - } 777 - 778 - ret = snprintf(page, PAGE_SIZE, "%u\n", 779 - (u32)(sep->sep_stats.rx_data_octets >> 20)); 820 + dev = lun->lun_se_dev; 821 + if (dev) 822 + ret = snprintf(page, PAGE_SIZE, "%u\n", 823 + (u32)(lun->lun_stats.rx_data_octets >> 20)); 780 824 spin_unlock(&lun->lun_sep_lock); 781 825 return ret; 782 826 } ··· 782 834 struct se_port_stat_grps *pgrps, char *page) 783 835 { 784 836 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 785 - struct se_port *sep; 786 - ssize_t ret; 837 + struct se_device *dev; 838 + ssize_t ret = -ENODEV; 787 839 788 840 spin_lock(&lun->lun_sep_lock); 789 - sep = lun->lun_sep; 790 - if (!sep) { 791 - spin_unlock(&lun->lun_sep_lock); 792 - return -ENODEV; 793 - } 794 - 795 - ret = snprintf(page, PAGE_SIZE, "%u\n", 796 - (u32)(sep->sep_stats.tx_data_octets >> 20)); 841 + dev = lun->lun_se_dev; 842 + if (dev) 843 + ret = snprintf(page, PAGE_SIZE, "%u\n", 844 + (u32)(lun->lun_stats.tx_data_octets >> 20)); 797 845 spin_unlock(&lun->lun_sep_lock); 798 846 return ret; 799 847 } ··· 799 855 struct se_port_stat_grps *pgrps, char *page) 800 856 { 801 857 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 802 - struct se_port *sep; 803 - ssize_t ret; 858 + struct se_device *dev; 859 + ssize_t ret = -ENODEV; 804 860 805 861 spin_lock(&lun->lun_sep_lock); 806 - sep = lun->lun_sep; 807 - if (!sep) { 808 - spin_unlock(&lun->lun_sep_lock); 809 - return -ENODEV; 862 + dev = lun->lun_se_dev; 863 + if (dev) { 864 + /* FIXME: scsiTgtPortHsInCommands */ 865 + ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 810 866 } 811 - 812 - /* FIXME: scsiTgtPortHsInCommands */ 813 - ret = snprintf(page, PAGE_SIZE, "%u\n", 0); 814 867 spin_unlock(&lun->lun_sep_lock); 815 868 return ret; 816 869 } ··· 862 921 struct se_port_stat_grps *pgrps, char *page) 863 922 { 864 923 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 865 - struct se_device *dev = lun->lun_se_dev; 866 - struct se_port *sep; 867 - struct se_hba *hba; 868 - ssize_t ret; 924 + struct se_device *dev; 925 + ssize_t ret = -ENODEV; 869 926 870 927 spin_lock(&lun->lun_sep_lock); 871 - sep = lun->lun_sep; 872 - if (!sep) { 873 - spin_unlock(&lun->lun_sep_lock); 874 - return -ENODEV; 875 - } 876 - 877 - hba = dev->se_hba; 878 - ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 928 + dev = lun->lun_se_dev; 929 + if (dev) 930 + ret = snprintf(page, PAGE_SIZE, "%u\n", dev->se_hba->hba_index); 879 931 spin_unlock(&lun->lun_sep_lock); 880 932 return ret; 881 933 } ··· 878 944 struct se_port_stat_grps *pgrps, char *page) 879 945 { 880 946 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 881 - struct se_port *sep; 882 - struct se_portal_group *tpg; 883 - ssize_t ret; 947 + struct se_device *dev; 948 + struct se_portal_group *tpg = lun->lun_tpg; 949 + ssize_t ret = -ENODEV; 884 950 885 951 spin_lock(&lun->lun_sep_lock); 886 - sep = lun->lun_sep; 887 - if (!sep) { 888 - spin_unlock(&lun->lun_sep_lock); 889 - return -ENODEV; 952 + dev = lun->lun_se_dev; 953 + if (dev) { 954 + /* scsiTransportType */ 955 + ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", 956 + tpg->se_tpg_tfo->get_fabric_name()); 890 957 } 891 - tpg = sep->sep_tpg; 892 - /* scsiTransportType */ 893 - ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", 894 - tpg->se_tpg_tfo->get_fabric_name()); 895 958 spin_unlock(&lun->lun_sep_lock); 896 959 return ret; 897 960 } ··· 898 967 struct se_port_stat_grps *pgrps, char *page) 899 968 { 900 969 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 901 - struct se_port *sep; 902 - struct se_portal_group *tpg; 903 - ssize_t ret; 970 + struct se_device *dev; 971 + struct se_portal_group *tpg = lun->lun_tpg; 972 + ssize_t ret = -ENODEV; 904 973 905 974 spin_lock(&lun->lun_sep_lock); 906 - sep = lun->lun_sep; 907 - if (!sep) { 908 - spin_unlock(&lun->lun_sep_lock); 909 - return -ENODEV; 910 - } 911 - tpg = sep->sep_tpg; 912 - ret = snprintf(page, PAGE_SIZE, "%u\n", 913 - tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); 975 + dev = lun->lun_se_dev; 976 + if (dev) 977 + ret = snprintf(page, PAGE_SIZE, "%u\n", 978 + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); 914 979 spin_unlock(&lun->lun_sep_lock); 915 980 return ret; 916 981 } ··· 917 990 { 918 991 struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps); 919 992 struct se_device *dev = lun->lun_se_dev; 920 - struct se_port *sep; 921 - struct se_portal_group *tpg; 993 + struct se_portal_group *tpg = lun->lun_tpg; 922 994 struct t10_wwn *wwn; 923 - ssize_t ret; 995 + ssize_t ret = -ENODEV; 924 996 925 997 spin_lock(&lun->lun_sep_lock); 926 - sep = lun->lun_sep; 927 - if (!sep) { 928 - spin_unlock(&lun->lun_sep_lock); 929 - return -ENODEV; 998 + dev = lun->lun_se_dev; 999 + if (dev) { 1000 + wwn = &dev->t10_wwn; 1001 + /* scsiTransportDevName */ 1002 + ret = snprintf(page, PAGE_SIZE, "%s+%s\n", 1003 + tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1004 + (strlen(wwn->unit_serial)) ? wwn->unit_serial : 1005 + wwn->vendor); 930 1006 } 931 - tpg = sep->sep_tpg; 932 - wwn = &dev->t10_wwn; 933 - /* scsiTransportDevName */ 934 - ret = snprintf(page, PAGE_SIZE, "%s+%s\n", 935 - tpg->se_tpg_tfo->tpg_get_wwn(tpg), 936 - (strlen(wwn->unit_serial)) ? wwn->unit_serial : 937 - wwn->vendor); 938 1007 spin_unlock(&lun->lun_sep_lock); 939 1008 return ret; 940 1009 }
+63 -32
drivers/target/target_core_tpg.c
··· 40 40 #include <target/target_core_fabric.h> 41 41 42 42 #include "target_core_internal.h" 43 + #include "target_core_alua.h" 43 44 #include "target_core_pr.h" 44 45 45 46 extern struct se_device *g_lun0_dev; ··· 485 484 complete(&lun->lun_ref_comp); 486 485 } 487 486 488 - static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) 489 - { 490 - /* Set in core_dev_setup_virtual_lun0() */ 491 - struct se_device *dev = g_lun0_dev; 492 - struct se_lun *lun = &se_tpg->tpg_virt_lun0; 493 - u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 494 - int ret; 495 - 496 - lun->unpacked_lun = 0; 497 - atomic_set(&lun->lun_acl_count, 0); 498 - spin_lock_init(&lun->lun_sep_lock); 499 - init_completion(&lun->lun_ref_comp); 500 - 501 - ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev); 502 - if (ret < 0) 503 - return ret; 504 - 505 - return 0; 506 - } 507 - 508 487 int core_tpg_register( 509 488 const struct target_core_fabric_ops *tfo, 510 489 struct se_wwn *se_wwn, 511 490 struct se_portal_group *se_tpg, 512 491 int proto_id) 513 492 { 493 + int ret; 494 + 514 495 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist); 515 496 se_tpg->proto_id = proto_id; 516 497 se_tpg->se_tpg_tfo = tfo; ··· 506 523 mutex_init(&se_tpg->acl_node_mutex); 507 524 508 525 if (se_tpg->proto_id >= 0) { 509 - if (core_tpg_setup_virtual_lun0(se_tpg) < 0) 510 - return -ENOMEM; 526 + se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0); 527 + if (IS_ERR(se_tpg->tpg_virt_lun0)) 528 + return PTR_ERR(se_tpg->tpg_virt_lun0); 529 + 530 + ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, 531 + TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev); 532 + if (ret < 0) { 533 + kfree(se_tpg->tpg_virt_lun0); 534 + return ret; 535 + } 511 536 } 512 537 513 538 spin_lock_bh(&tpg_lock); ··· 566 575 kfree(nacl); 567 576 } 568 577 569 - if (se_tpg->proto_id >= 0) 570 - core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0); 578 + if (se_tpg->proto_id >= 0) { 579 + core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); 580 + kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); 581 + } 571 582 572 583 return 0; 573 584 } ··· 600 607 atomic_set(&lun->lun_acl_count, 0); 601 608 spin_lock_init(&lun->lun_sep_lock); 602 609 init_completion(&lun->lun_ref_comp); 610 + INIT_LIST_HEAD(&lun->lun_deve_list); 611 + INIT_LIST_HEAD(&lun->lun_dev_link); 612 + atomic_set(&lun->lun_tg_pt_secondary_offline, 0); 613 + spin_lock_init(&lun->lun_deve_lock); 614 + mutex_init(&lun->lun_tg_pt_md_mutex); 615 + INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link); 616 + spin_lock_init(&lun->lun_tg_pt_gp_lock); 617 + atomic_set(&lun->lun_active, 0); 618 + lun->lun_tpg = tpg; 603 619 604 620 return lun; 605 621 } ··· 624 622 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0, 625 623 GFP_KERNEL); 626 624 if (ret < 0) 627 - return ret; 625 + goto out; 628 626 629 - ret = core_dev_export(dev, tpg, lun); 630 - if (ret < 0) { 631 - percpu_ref_exit(&lun->lun_ref); 632 - return ret; 633 - } 627 + ret = core_alloc_rtpi(lun, dev); 628 + if (ret) 629 + goto out_kill_ref; 630 + 631 + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && 632 + !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 633 + target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); 634 634 635 635 mutex_lock(&tpg->tpg_lun_mutex); 636 + 637 + spin_lock(&lun->lun_sep_lock); 638 + lun->lun_index = dev->dev_index; 639 + lun->lun_se_dev = dev; 640 + spin_unlock(&lun->lun_sep_lock); 641 + 642 + spin_lock(&dev->se_port_lock); 643 + rcu_assign_pointer(lun->lun_se_dev, dev); 644 + dev->export_count++; 645 + list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); 646 + spin_unlock(&dev->se_port_lock); 647 + 636 648 lun->lun_access = lun_access; 637 649 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 638 650 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); 639 651 mutex_unlock(&tpg->tpg_lun_mutex); 640 652 641 653 return 0; 654 + 655 + out_kill_ref: 656 + percpu_ref_exit(&lun->lun_ref); 657 + out: 658 + return ret; 642 659 } 643 660 644 661 void core_tpg_remove_lun( ··· 669 648 core_clear_lun_from_tpg(lun, tpg); 670 649 transport_clear_lun_ref(lun); 671 650 672 - core_dev_unexport(lun->lun_se_dev, tpg, lun); 673 - 674 651 mutex_lock(&tpg->tpg_lun_mutex); 652 + if (lun->lun_se_dev) { 653 + while (atomic_read(&lun->lun_active)) 654 + cpu_relax(); 655 + 656 + target_detach_tg_pt_gp(lun); 657 + 658 + spin_lock(&dev->se_port_lock); 659 + list_del(&lun->lun_dev_link); 660 + dev->export_count--; 661 + rcu_assign_pointer(lun->lun_se_dev, NULL); 662 + spin_unlock(&dev->se_port_lock); 663 + } 675 664 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 676 665 hlist_del_rcu(&lun->link); 677 666 mutex_unlock(&tpg->tpg_lun_mutex);
+5 -29
drivers/target/target_core_transport.c
··· 60 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 - struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 64 63 struct kmem_cache *t10_alua_lba_map_cache; 65 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 66 65 ··· 118 119 "cache failed\n"); 119 120 goto out_free_lu_gp_mem_cache; 120 121 } 121 - t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 122 - "t10_alua_tg_pt_gp_mem_cache", 123 - sizeof(struct t10_alua_tg_pt_gp_member), 124 - __alignof__(struct t10_alua_tg_pt_gp_member), 125 - 0, NULL); 126 - if (!t10_alua_tg_pt_gp_mem_cache) { 127 - pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 128 - "mem_t failed\n"); 129 - goto out_free_tg_pt_gp_cache; 130 - } 131 122 t10_alua_lba_map_cache = kmem_cache_create( 132 123 "t10_alua_lba_map_cache", 133 124 sizeof(struct t10_alua_lba_map), ··· 125 136 if (!t10_alua_lba_map_cache) { 126 137 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 138 "cache failed\n"); 128 - goto out_free_tg_pt_gp_mem_cache; 139 + goto out_free_tg_pt_gp_cache; 129 140 } 130 141 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 142 "t10_alua_lba_map_mem_cache", ··· 148 159 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 160 out_free_lba_map_cache: 150 161 kmem_cache_destroy(t10_alua_lba_map_cache); 151 - out_free_tg_pt_gp_mem_cache: 152 - kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 153 162 out_free_tg_pt_gp_cache: 154 163 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 155 164 out_free_lu_gp_mem_cache: ··· 173 186 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 187 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 188 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 - kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 177 189 kmem_cache_destroy(t10_alua_lba_map_cache); 178 190 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 179 191 } ··· 1263 1277 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1264 1278 1265 1279 spin_lock(&cmd->se_lun->lun_sep_lock); 1266 - if (cmd->se_lun->lun_sep) 1267 - cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1280 + cmd->se_lun->lun_stats.cmd_pdus++; 1268 1281 spin_unlock(&cmd->se_lun->lun_sep_lock); 1269 1282 return 0; 1270 1283 } ··· 2061 2076 switch (cmd->data_direction) { 2062 2077 case DMA_FROM_DEVICE: 2063 2078 spin_lock(&cmd->se_lun->lun_sep_lock); 2064 - if (cmd->se_lun->lun_sep) { 2065 - cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 2066 - cmd->data_length; 2067 - } 2079 + cmd->se_lun->lun_stats.tx_data_octets += cmd->data_length; 2068 2080 spin_unlock(&cmd->se_lun->lun_sep_lock); 2069 2081 /* 2070 2082 * Perform READ_STRIP of PI using software emulation when ··· 2086 2104 break; 2087 2105 case DMA_TO_DEVICE: 2088 2106 spin_lock(&cmd->se_lun->lun_sep_lock); 2089 - if (cmd->se_lun->lun_sep) { 2090 - cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 2091 - cmd->data_length; 2092 - } 2107 + cmd->se_lun->lun_stats.rx_data_octets += cmd->data_length; 2093 2108 spin_unlock(&cmd->se_lun->lun_sep_lock); 2094 2109 /* 2095 2110 * Check if we need to send READ payload for BIDI-COMMAND 2096 2111 */ 2097 2112 if (cmd->se_cmd_flags & SCF_BIDI) { 2098 2113 spin_lock(&cmd->se_lun->lun_sep_lock); 2099 - if (cmd->se_lun->lun_sep) { 2100 - cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 2101 - cmd->data_length; 2102 - } 2114 + cmd->se_lun->lun_stats.tx_data_octets += cmd->data_length; 2103 2115 spin_unlock(&cmd->se_lun->lun_sep_lock); 2104 2116 ret = cmd->se_tfo->queue_data_in(cmd); 2105 2117 if (ret == -EAGAIN || ret == -ENOMEM)
+1 -16
drivers/target/target_core_xcopy.c
··· 348 348 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 349 349 }; 350 350 351 - static struct se_port xcopy_pt_port; 352 - static struct se_portal_group xcopy_pt_tpg; 351 + struct se_portal_group xcopy_pt_tpg; 353 352 static struct se_session xcopy_pt_sess; 354 353 static struct se_node_acl xcopy_pt_nacl; 355 354 ··· 438 439 return -ENOMEM; 439 440 } 440 441 441 - memset(&xcopy_pt_port, 0, sizeof(struct se_port)); 442 - INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list); 443 - INIT_LIST_HEAD(&xcopy_pt_port.sep_list); 444 - mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex); 445 - 446 442 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); 447 443 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); 448 444 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); 449 445 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); 450 446 451 - xcopy_pt_port.sep_tpg = &xcopy_pt_tpg; 452 447 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; 453 448 454 449 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); ··· 483 490 */ 484 491 if (remote_port) { 485 492 xpt_cmd->remote_port = remote_port; 486 - pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 487 - pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to" 488 - " cmd->se_lun->lun_sep for X-COPY data PUSH\n", 489 - pt_cmd->se_lun->lun_sep); 490 493 } else { 491 494 pt_cmd->se_lun = ec_cmd->se_lun; 492 495 pt_cmd->se_dev = ec_cmd->se_dev; ··· 502 513 */ 503 514 if (remote_port) { 504 515 xpt_cmd->remote_port = remote_port; 505 - pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 506 - pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to" 507 - " cmd->se_lun->lun_sep for X-COPY data PULL\n", 508 - pt_cmd->se_lun->lun_sep); 509 516 } else { 510 517 pt_cmd->se_lun = ec_cmd->se_lun; 511 518 pt_cmd->se_dev = ec_cmd->se_dev;
+32 -42
include/target/target_core_base.h
··· 304 304 struct se_device *tg_pt_gp_dev; 305 305 struct config_group tg_pt_gp_group; 306 306 struct list_head tg_pt_gp_list; 307 - struct list_head tg_pt_gp_mem_list; 308 - struct se_port *tg_pt_gp_alua_port; 307 + struct list_head tg_pt_gp_lun_list; 308 + struct se_lun *tg_pt_gp_alua_lun; 309 309 struct se_node_acl *tg_pt_gp_alua_nacl; 310 310 struct delayed_work tg_pt_gp_transition_work; 311 311 struct completion *tg_pt_gp_transition_complete; 312 - }; 313 - 314 - struct t10_alua_tg_pt_gp_member { 315 - bool tg_pt_gp_assoc; 316 - atomic_t tg_pt_gp_mem_ref_cnt; 317 - spinlock_t tg_pt_gp_mem_lock; 318 - struct t10_alua_tg_pt_gp *tg_pt_gp; 319 - struct se_port *tg_pt; 320 - struct list_head tg_pt_gp_mem_list; 321 312 }; 322 313 323 314 struct t10_vpd { ··· 641 650 #define DEF_PR_REG_ACTIVE 1 642 651 unsigned long deve_flags; 643 652 struct list_head alua_port_list; 653 + struct list_head lun_link; 644 654 struct list_head ua_list; 645 655 struct hlist_node link; 646 656 struct rcu_head rcu_head; ··· 689 697 struct config_group scsi_transport_group; 690 698 }; 691 699 700 + struct scsi_port_stats { 701 + u32 cmd_pdus; 702 + u64 tx_data_octets; 703 + u64 rx_data_octets; 704 + }; 705 + 692 706 struct se_lun { 707 + /* RELATIVE TARGET PORT IDENTIFER */ 693 708 u16 lun_rtpi; 694 709 #define SE_LUN_LINK_MAGIC 0xffff7771 695 710 u32 lun_link_magic; ··· 706 707 u32 lun_index; 707 708 atomic_t lun_acl_count; 708 709 spinlock_t lun_sep_lock; 709 - struct se_device *lun_se_dev; 710 - struct se_port *lun_sep; 710 + struct se_device __rcu *lun_se_dev; 711 + 712 + struct list_head lun_deve_list; 713 + spinlock_t lun_deve_lock; 714 + 715 + /* ALUA state */ 716 + int lun_tg_pt_secondary_stat; 717 + int lun_tg_pt_secondary_write_md; 718 + atomic_t lun_tg_pt_secondary_offline; 719 + struct mutex lun_tg_pt_md_mutex; 720 + 721 + /* ALUA target port group linkage */ 722 + struct list_head lun_tg_pt_gp_link; 723 + struct t10_alua_tg_pt_gp *lun_tg_pt_gp; 724 + spinlock_t lun_tg_pt_gp_lock; 725 + 726 + atomic_t lun_active; 727 + struct se_portal_group *lun_tpg; 728 + struct scsi_port_stats lun_stats; 711 729 struct config_group lun_group; 712 730 struct se_port_stat_grps port_stat_grps; 713 731 struct completion lun_ref_comp; 714 732 struct percpu_ref lun_ref; 733 + struct list_head lun_dev_link; 715 734 struct hlist_node link; 716 735 struct rcu_head rcu_head; 717 736 }; ··· 754 737 #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 755 738 #define DF_USING_UDEV_PATH 0x00000008 756 739 #define DF_USING_ALIAS 0x00000010 757 - u32 dev_port_count; 758 740 /* Physical device queue depth */ 759 741 u32 queue_depth; 760 742 /* Used for SPC-2 reservations enforce of ISIDs */ ··· 770 754 atomic_t dev_ordered_id; 771 755 atomic_t dev_ordered_sync; 772 756 atomic_t dev_qf_count; 773 - int export_count; 757 + u32 export_count; 774 758 spinlock_t delayed_cmd_lock; 775 759 spinlock_t execute_task_lock; 776 760 spinlock_t dev_reservation_lock; ··· 837 821 struct target_backend *backend; 838 822 }; 839 823 840 - struct scsi_port_stats { 841 - u64 cmd_pdus; 842 - u64 tx_data_octets; 843 - u64 rx_data_octets; 844 - }; 845 - 846 - struct se_port { 847 - /* RELATIVE TARGET PORT IDENTIFER */ 848 - u16 sep_rtpi; 849 - int sep_tg_pt_secondary_stat; 850 - int sep_tg_pt_secondary_write_md; 851 - u32 sep_index; 852 - struct scsi_port_stats sep_stats; 853 - /* Used for ALUA Target Port Groups membership */ 854 - atomic_t sep_tg_pt_secondary_offline; 855 - /* Used for PR ALL_TG_PT=1 */ 856 - atomic_t sep_tg_pt_ref_cnt; 857 - spinlock_t sep_alua_lock; 858 - struct mutex sep_tg_pt_md_mutex; 859 - struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem; 860 - struct se_lun *sep_lun; 861 - struct se_portal_group *sep_tpg; 862 - struct list_head sep_alua_list; 863 - struct list_head sep_list; 864 - }; 865 - 866 824 struct se_tpg_np { 867 825 struct se_portal_group *tpg_np_parent; 868 826 struct config_group tpg_np_group; ··· 862 872 /* linked list for initiator ACL list */ 863 873 struct list_head acl_node_list; 864 874 struct hlist_head tpg_lun_hlist; 865 - struct se_lun tpg_virt_lun0; 875 + struct se_lun *tpg_virt_lun0; 866 876 /* List of TCM sessions associated wth this TPG */ 867 877 struct list_head tpg_sess_list; 868 878 /* Pointer to $FABRIC_MOD dependent code */