Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

target: kill struct se_subsystem_dev

Simplify the code a lot by killing the superflous struct se_subsystem_dev.
Instead se_device is allocated early on by the backend driver, which allocates
it as part of its own per-device structure, borrowing the scheme that is for
example used for inode allocation.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>

authored by

Christoph Hellwig and committed by
Nicholas Bellinger
0fd97ccf 3d70f8c6

+1048 -1908
-1
drivers/target/loopback/tcm_loop.h
··· 53 53 struct se_hba_s *se_hba; 54 54 struct se_lun *tl_hba_lun; 55 55 struct se_port *tl_hba_lun_sep; 56 - struct se_device_s *se_dev_hba_ptr; 57 56 struct tcm_loop_nexus *tl_nexus; 58 57 struct device dev; 59 58 struct Scsi_Host *sh;
+72 -80
drivers/target/target_core_alua.c
··· 61 61 */ 62 62 int target_emulate_report_target_port_groups(struct se_cmd *cmd) 63 63 { 64 - struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 64 + struct se_device *dev = cmd->se_dev; 65 65 struct se_port *port; 66 66 struct t10_alua_tg_pt_gp *tg_pt_gp; 67 67 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; ··· 86 86 } 87 87 buf = transport_kmap_data_sg(cmd); 88 88 89 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 90 - list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 89 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 90 + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 91 91 tg_pt_gp_list) { 92 92 /* 93 93 * Check if the Target port group and Target port descriptor list ··· 160 160 } 161 161 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 162 162 } 163 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 163 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 164 164 /* 165 165 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 166 166 */ ··· 203 203 int target_emulate_set_target_port_groups(struct se_cmd *cmd) 204 204 { 205 205 struct se_device *dev = cmd->se_dev; 206 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 207 206 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 208 207 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 209 208 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; ··· 302 303 * Locate the matching target port group ID from 303 304 * the global tg_pt_gp list 304 305 */ 305 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 306 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 306 307 list_for_each_entry(tg_pt_gp, 307 - &su_dev->t10_alua.tg_pt_gps_list, 308 + &dev->t10_alua.tg_pt_gps_list, 308 309 tg_pt_gp_list) { 309 310 if (!tg_pt_gp->tg_pt_gp_valid_id) 310 311 continue; ··· 314 315 315 316 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 316 317 smp_mb__after_atomic_inc(); 317 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 318 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 318 319 319 320 rc = core_alua_do_port_transition(tg_pt_gp, 320 321 dev, l_port, nacl, 321 322 alua_access_state, 1); 322 323 323 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 324 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 324 325 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 325 326 smp_mb__after_atomic_dec(); 326 327 break; 327 328 } 328 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 329 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 329 330 /* 330 331 * If not matching target port group ID can be located 331 332 * throw an exception with ASCQ: INVALID_PARAMETER_LIST ··· 757 758 int primary_state, 758 759 unsigned char *md_buf) 759 760 { 760 - struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 761 - struct t10_wwn *wwn = &su_dev->t10_wwn; 761 + struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 762 762 char path[ALUA_METADATA_PATH_LEN]; 763 763 int len; 764 764 ··· 897 899 { 898 900 struct se_device *dev; 899 901 struct se_port *port; 900 - struct se_subsystem_dev *su_dev; 901 902 struct se_node_acl *nacl; 902 903 struct t10_alua_lu_gp *lu_gp; 903 904 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; ··· 946 949 lu_gp_mem_list) { 947 950 948 951 dev = lu_gp_mem->lu_gp_mem_dev; 949 - su_dev = dev->se_sub_dev; 950 952 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 951 953 smp_mb__after_atomic_inc(); 952 954 spin_unlock(&lu_gp->lu_gp_lock); 953 955 954 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 956 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 955 957 list_for_each_entry(tg_pt_gp, 956 - &su_dev->t10_alua.tg_pt_gps_list, 958 + &dev->t10_alua.tg_pt_gps_list, 957 959 tg_pt_gp_list) { 958 960 959 961 if (!tg_pt_gp->tg_pt_gp_valid_id) ··· 977 981 } 978 982 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 979 983 smp_mb__after_atomic_inc(); 980 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 984 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 981 985 /* 982 986 * core_alua_do_transition_tg_pt() will always return 983 987 * success. ··· 985 989 core_alua_do_transition_tg_pt(tg_pt_gp, port, 986 990 nacl, md_buf, new_state, explict); 987 991 988 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 992 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 989 993 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 990 994 smp_mb__after_atomic_dec(); 991 995 } 992 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 996 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 993 997 994 998 spin_lock(&lu_gp->lu_gp_lock); 995 999 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); ··· 1264 1268 1265 1269 void core_alua_free_lu_gp_mem(struct se_device *dev) 1266 1270 { 1267 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1268 - struct t10_alua *alua = &su_dev->t10_alua; 1271 + struct t10_alua *alua = &dev->t10_alua; 1269 1272 struct t10_alua_lu_gp *lu_gp; 1270 1273 struct t10_alua_lu_gp_member *lu_gp_mem; 1271 1274 ··· 1353 1358 spin_unlock(&lu_gp->lu_gp_lock); 1354 1359 } 1355 1360 1356 - struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( 1357 - struct se_subsystem_dev *su_dev, 1358 - const char *name, 1359 - int def_group) 1361 + struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, 1362 + const char *name, int def_group) 1360 1363 { 1361 1364 struct t10_alua_tg_pt_gp *tg_pt_gp; 1362 1365 ··· 1368 1375 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1369 1376 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1370 1377 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1371 - tg_pt_gp->tg_pt_gp_su_dev = su_dev; 1378 + tg_pt_gp->tg_pt_gp_dev = dev; 1372 1379 tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; 1373 1380 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1374 1381 ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); ··· 1385 1392 tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; 1386 1393 1387 1394 if (def_group) { 1388 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1395 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1389 1396 tg_pt_gp->tg_pt_gp_id = 1390 - su_dev->t10_alua.alua_tg_pt_gps_counter++; 1397 + dev->t10_alua.alua_tg_pt_gps_counter++; 1391 1398 tg_pt_gp->tg_pt_gp_valid_id = 1; 1392 - su_dev->t10_alua.alua_tg_pt_gps_count++; 1399 + dev->t10_alua.alua_tg_pt_gps_count++; 1393 1400 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1394 - &su_dev->t10_alua.tg_pt_gps_list); 1395 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1401 + &dev->t10_alua.tg_pt_gps_list); 1402 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1396 1403 } 1397 1404 1398 1405 return tg_pt_gp; ··· 1402 1409 struct t10_alua_tg_pt_gp *tg_pt_gp, 1403 1410 u16 tg_pt_gp_id) 1404 1411 { 1405 - struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1412 + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1406 1413 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1407 1414 u16 tg_pt_gp_id_tmp; 1415 + 1408 1416 /* 1409 1417 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1410 1418 */ ··· 1415 1421 return -EINVAL; 1416 1422 } 1417 1423 1418 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1419 - if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1424 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1425 + if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1420 1426 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1421 1427 " 0x0000ffff reached\n"); 1422 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1428 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1423 1429 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1424 1430 return -ENOSPC; 1425 1431 } 1426 1432 again: 1427 1433 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1428 - su_dev->t10_alua.alua_tg_pt_gps_counter++; 1434 + dev->t10_alua.alua_tg_pt_gps_counter++; 1429 1435 1430 - list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, 1436 + list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, 1431 1437 tg_pt_gp_list) { 1432 1438 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1433 1439 if (!tg_pt_gp_id) ··· 1435 1441 1436 1442 pr_err("ALUA Target Port Group ID: %hu already" 1437 1443 " exists, ignoring request\n", tg_pt_gp_id); 1438 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1444 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1439 1445 return -EINVAL; 1440 1446 } 1441 1447 } ··· 1443 1449 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1444 1450 tg_pt_gp->tg_pt_gp_valid_id = 1; 1445 1451 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1446 - &su_dev->t10_alua.tg_pt_gps_list); 1447 - su_dev->t10_alua.alua_tg_pt_gps_count++; 1448 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1452 + &dev->t10_alua.tg_pt_gps_list); 1453 + dev->t10_alua.alua_tg_pt_gps_count++; 1454 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1449 1455 1450 1456 return 0; 1451 1457 } ··· 1474 1480 void core_alua_free_tg_pt_gp( 1475 1481 struct t10_alua_tg_pt_gp *tg_pt_gp) 1476 1482 { 1477 - struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1483 + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1478 1484 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; 1485 + 1479 1486 /* 1480 1487 * Once we have reached this point, config_item_put() has already 1481 1488 * been called from target_core_alua_drop_tg_pt_gp(). ··· 1485 1490 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS 1486 1491 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1487 1492 */ 1488 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1493 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1489 1494 list_del(&tg_pt_gp->tg_pt_gp_list); 1490 - su_dev->t10_alua.alua_tg_pt_gps_counter--; 1491 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1495 + dev->t10_alua.alua_tg_pt_gps_counter--; 1496 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1497 + 1492 1498 /* 1493 1499 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1494 1500 * core_alua_get_tg_pt_gp_by_name() in ··· 1498 1502 */ 1499 1503 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1500 1504 cpu_relax(); 1505 + 1501 1506 /* 1502 1507 * Release reference to struct t10_alua_tg_pt_gp from all associated 1503 1508 * struct se_port. ··· 1522 1525 * default_tg_pt_gp. 1523 1526 */ 1524 1527 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1525 - if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { 1528 + if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { 1526 1529 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1527 - su_dev->t10_alua.default_tg_pt_gp); 1530 + dev->t10_alua.default_tg_pt_gp); 1528 1531 } else 1529 1532 tg_pt_gp_mem->tg_pt_gp = NULL; 1530 1533 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); ··· 1538 1541 1539 1542 void core_alua_free_tg_pt_gp_mem(struct se_port *port) 1540 1543 { 1541 - struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1542 - struct t10_alua *alua = &su_dev->t10_alua; 1544 + struct t10_alua *alua = &port->sep_lun->lun_se_dev->t10_alua; 1543 1545 struct t10_alua_tg_pt_gp *tg_pt_gp; 1544 1546 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1545 1547 ··· 1570 1574 } 1571 1575 1572 1576 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1573 - struct se_subsystem_dev *su_dev, 1574 - const char *name) 1577 + struct se_device *dev, const char *name) 1575 1578 { 1576 1579 struct t10_alua_tg_pt_gp *tg_pt_gp; 1577 1580 struct config_item *ci; 1578 1581 1579 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1580 - list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, 1582 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1583 + list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1581 1584 tg_pt_gp_list) { 1582 1585 if (!tg_pt_gp->tg_pt_gp_valid_id) 1583 1586 continue; 1584 1587 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1585 1588 if (!strcmp(config_item_name(ci), name)) { 1586 1589 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1587 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1590 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1588 1591 return tg_pt_gp; 1589 1592 } 1590 1593 } 1591 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1594 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1592 1595 1593 1596 return NULL; 1594 1597 } ··· 1595 1600 static void core_alua_put_tg_pt_gp_from_name( 1596 1601 struct t10_alua_tg_pt_gp *tg_pt_gp) 1597 1602 { 1598 - struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 1603 + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1599 1604 1600 - spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); 1605 + spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1601 1606 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1602 - spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); 1607 + spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1603 1608 } 1604 1609 1605 1610 /* ··· 1635 1640 1636 1641 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) 1637 1642 { 1638 - struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1639 1643 struct config_item *tg_pt_ci; 1640 - struct t10_alua *alua = &su_dev->t10_alua; 1644 + struct t10_alua *alua = &port->sep_lun->lun_se_dev->t10_alua; 1641 1645 struct t10_alua_tg_pt_gp *tg_pt_gp; 1642 1646 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1643 1647 ssize_t len = 0; ··· 1677 1683 { 1678 1684 struct se_portal_group *tpg; 1679 1685 struct se_lun *lun; 1680 - struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; 1686 + struct se_device *dev = port->sep_lun->lun_se_dev; 1681 1687 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 1682 1688 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1683 1689 unsigned char buf[TG_PT_GROUP_NAME_BUF]; ··· 1686 1692 tpg = port->sep_tpg; 1687 1693 lun = port->sep_lun; 1688 1694 1689 - if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1695 + if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1690 1696 pr_warn("SPC3_ALUA_EMULATED not enabled for" 1691 1697 " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 1692 1698 tpg->se_tpg_tfo->tpg_get_tag(tpg), ··· 1710 1716 * struct t10_alua_tg_pt_gp. This reference is released with 1711 1717 * core_alua_put_tg_pt_gp_from_name() below. 1712 1718 */ 1713 - tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, 1719 + tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, 1714 1720 strstrip(buf)); 1715 1721 if (!tg_pt_gp_new) 1716 1722 return -ENODEV; ··· 1744 1750 1745 1751 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 1746 1752 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1747 - su_dev->t10_alua.default_tg_pt_gp); 1753 + dev->t10_alua.default_tg_pt_gp); 1748 1754 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1749 1755 1750 1756 return count; ··· 2048 2054 return count; 2049 2055 } 2050 2056 2051 - int core_setup_alua(struct se_device *dev, int force_pt) 2057 + int core_setup_alua(struct se_device *dev) 2052 2058 { 2053 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 2054 - struct t10_alua *alua = &su_dev->t10_alua; 2059 + struct t10_alua *alua = &dev->t10_alua; 2055 2060 struct t10_alua_lu_gp_member *lu_gp_mem; 2061 + 2056 2062 /* 2057 2063 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic 2058 2064 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can 2059 2065 * cause a problem because libata and some SATA RAID HBAs appear 2060 2066 * under Linux/SCSI, but emulate SCSI logic themselves. 2061 2067 */ 2062 - if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && 2063 - !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { 2064 - alua->alua_type = SPC_ALUA_PASSTHROUGH; 2065 - alua->alua_state_check = &core_alua_state_check_nop; 2068 + if ((dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) || 2069 + (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV && 2070 + !dev->dev_attrib.emulate_alua)) { 2066 2071 pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" 2067 2072 " emulation\n", dev->transport->name); 2068 - return 0; 2069 - } 2070 - /* 2071 - * If SPC-3 or above is reported by real or emulated struct se_device, 2072 - * use emulated ALUA. 2073 - */ 2074 - if (dev->transport->get_device_rev(dev) >= SCSI_3) { 2073 + 2074 + alua->alua_type = SPC_ALUA_PASSTHROUGH; 2075 + alua->alua_state_check = &core_alua_state_check_nop; 2076 + } else if (dev->transport->get_device_rev(dev) >= SCSI_3) { 2075 2077 pr_debug("%s: Enabling ALUA Emulation for SPC-3" 2076 2078 " device\n", dev->transport->name); 2079 + 2077 2080 /* 2078 2081 * Associate this struct se_device with the default ALUA 2079 2082 * LUN Group. ··· 2090 2099 " core/alua/lu_gps/default_lu_gp\n", 2091 2100 dev->transport->name); 2092 2101 } else { 2093 - alua->alua_type = SPC2_ALUA_DISABLED; 2094 - alua->alua_state_check = &core_alua_state_check_nop; 2095 2102 pr_debug("%s: Disabling ALUA Emulation for SPC-2" 2096 2103 " device\n", dev->transport->name); 2104 + 2105 + alua->alua_type = SPC2_ALUA_DISABLED; 2106 + alua->alua_state_check = &core_alua_state_check_nop; 2097 2107 } 2098 2108 2099 2109 return 0;
+2 -2
drivers/target/target_core_alua.h
··· 91 91 struct t10_alua_lu_gp *); 92 92 extern void core_alua_drop_lu_gp_dev(struct se_device *); 93 93 extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( 94 - struct se_subsystem_dev *, const char *, int); 94 + struct se_device *, const char *, int); 95 95 extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16); 96 96 extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 97 97 struct se_port *); ··· 131 131 char *); 132 132 extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *, 133 133 const char *, size_t); 134 - extern int core_setup_alua(struct se_device *, int); 134 + extern int core_setup_alua(struct se_device *); 135 135 136 136 #endif /* TARGET_CORE_ALUA_H */
+151 -375
drivers/target/target_core_configfs.c
··· 565 565 struct se_dev_attrib *da, \ 566 566 char *page) \ 567 567 { \ 568 - struct se_device *dev; \ 569 - struct se_subsystem_dev *se_dev = da->da_sub_dev; \ 570 - ssize_t rb; \ 571 - \ 572 - spin_lock(&se_dev->se_dev_lock); \ 573 - dev = se_dev->se_dev_ptr; \ 574 - if (!dev) { \ 575 - spin_unlock(&se_dev->se_dev_lock); \ 576 - return -ENODEV; \ 577 - } \ 578 - rb = snprintf(page, PAGE_SIZE, "%u\n", \ 579 - (u32)dev->se_sub_dev->se_dev_attrib._name); \ 580 - spin_unlock(&se_dev->se_dev_lock); \ 581 - \ 582 - return rb; \ 568 + return snprintf(page, PAGE_SIZE, "%u\n", \ 569 + (u32)da->da_dev->dev_attrib._name); \ 583 570 } 584 571 585 572 #define DEF_DEV_ATTRIB_STORE(_name) \ ··· 575 588 const char *page, \ 576 589 size_t count) \ 577 590 { \ 578 - struct se_device *dev; \ 579 - struct se_subsystem_dev *se_dev = da->da_sub_dev; \ 580 591 unsigned long val; \ 581 592 int ret; \ 582 593 \ 583 - spin_lock(&se_dev->se_dev_lock); \ 584 - dev = se_dev->se_dev_ptr; \ 585 - if (!dev) { \ 586 - spin_unlock(&se_dev->se_dev_lock); \ 587 - return -ENODEV; \ 588 - } \ 589 594 ret = strict_strtoul(page, 0, &val); \ 590 595 if (ret < 0) { \ 591 - spin_unlock(&se_dev->se_dev_lock); \ 592 596 pr_err("strict_strtoul() failed with" \ 593 597 " ret: %d\n", ret); \ 594 598 return -EINVAL; \ 595 599 } \ 596 - ret = se_dev_set_##_name(dev, (u32)val); \ 597 - spin_unlock(&se_dev->se_dev_lock); \ 600 + ret = se_dev_set_##_name(da->da_dev, (u32)val); \ 598 601 \ 599 602 return (!ret) ? count : -EINVAL; \ 600 603 } ··· 741 764 struct t10_wwn *t10_wwn, 742 765 char *page) 743 766 { 744 - struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; 745 - struct se_device *dev; 746 - 747 - dev = se_dev->se_dev_ptr; 748 - if (!dev) 749 - return -ENODEV; 750 - 751 767 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", 752 768 &t10_wwn->unit_serial[0]); 753 769 } ··· 750 780 const char *page, 751 781 size_t count) 752 782 { 753 - struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev; 754 - struct se_device *dev; 783 + struct se_device *dev = t10_wwn->t10_dev; 755 784 unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; 756 785 757 786 /* ··· 763 794 * it is doing 'the right thing' wrt a world wide unique 764 795 * VPD Unit Serial Number that OS dependent multipath can depend on. 765 796 */ 766 - if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { 797 + if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) { 767 798 pr_err("Underlying SCSI device firmware provided VPD" 768 799 " Unit Serial, ignoring request\n"); 769 800 return -EOPNOTSUPP; ··· 780 811 * (underneath the initiator side OS dependent multipath code) 781 812 * could cause negative effects. 782 813 */ 783 - dev = su_dev->se_dev_ptr; 784 - if (dev) { 785 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 786 - pr_err("Unable to set VPD Unit Serial while" 787 - " active %d $FABRIC_MOD exports exist\n", 788 - atomic_read(&dev->dev_export_obj.obj_access_count)); 789 - return -EINVAL; 790 - } 814 + if (dev->export_count) { 815 + pr_err("Unable to set VPD Unit Serial while" 816 + " active %d $FABRIC_MOD exports exist\n", 817 + dev->export_count); 818 + return -EINVAL; 791 819 } 820 + 792 821 /* 793 822 * This currently assumes ASCII encoding for emulated VPD Unit Serial. 794 823 * ··· 795 828 */ 796 829 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); 797 830 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); 798 - snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, 831 + snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, 799 832 "%s", strstrip(buf)); 800 - su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; 833 + dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL; 801 834 802 835 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" 803 - " %s\n", su_dev->t10_wwn.unit_serial); 836 + " %s\n", dev->t10_wwn.unit_serial); 804 837 805 838 return count; 806 839 } ··· 814 847 struct t10_wwn *t10_wwn, 815 848 char *page) 816 849 { 817 - struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; 818 - struct se_device *dev; 819 850 struct t10_vpd *vpd; 820 851 unsigned char buf[VPD_TMP_BUF_SIZE]; 821 852 ssize_t len = 0; 822 - 823 - dev = se_dev->se_dev_ptr; 824 - if (!dev) 825 - return -ENODEV; 826 853 827 854 memset(buf, 0, VPD_TMP_BUF_SIZE); 828 855 ··· 855 894 struct t10_wwn *t10_wwn, \ 856 895 char *page) \ 857 896 { \ 858 - struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \ 859 - struct se_device *dev; \ 860 897 struct t10_vpd *vpd; \ 861 898 unsigned char buf[VPD_TMP_BUF_SIZE]; \ 862 899 ssize_t len = 0; \ 863 - \ 864 - dev = se_dev->se_dev_ptr; \ 865 - if (!dev) \ 866 - return -ENODEV; \ 867 900 \ 868 901 spin_lock(&t10_wwn->t10_vpd_lock); \ 869 902 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \ ··· 958 1003 959 1004 /* Start functions for struct config_item_type target_core_dev_pr_cit */ 960 1005 961 - CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev); 1006 + CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); 962 1007 #define SE_DEV_PR_ATTR(_name, _mode) \ 963 1008 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \ 964 1009 __CONFIGFS_EATTR(_name, _mode, \ ··· 1026 1071 return *len; 1027 1072 } 1028 1073 1029 - static ssize_t target_core_dev_pr_show_attr_res_holder( 1030 - struct se_subsystem_dev *su_dev, 1031 - char *page) 1074 + static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev, 1075 + char *page) 1032 1076 { 1033 1077 ssize_t len = 0; 1034 1078 1035 - if (!su_dev->se_dev_ptr) 1036 - return -ENODEV; 1037 - 1038 - switch (su_dev->t10_pr.res_type) { 1079 + switch (dev->t10_pr.res_type) { 1039 1080 case SPC3_PERSISTENT_RESERVATIONS: 1040 - target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, 1041 - page, &len); 1081 + target_core_dev_pr_show_spc3_res(dev, page, &len); 1042 1082 break; 1043 1083 case SPC2_RESERVATIONS: 1044 - target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr, 1045 - page, &len); 1084 + target_core_dev_pr_show_spc2_res(dev, page, &len); 1046 1085 break; 1047 1086 case SPC_PASSTHROUGH: 1048 1087 len += sprintf(page+len, "Passthrough\n"); ··· 1051 1102 1052 1103 SE_DEV_PR_ATTR_RO(res_holder); 1053 1104 1054 - /* 1055 - * res_pr_all_tgt_pts 1056 - */ 1057 1105 static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( 1058 - struct se_subsystem_dev *su_dev, 1059 - char *page) 1106 + struct se_device *dev, char *page) 1060 1107 { 1061 - struct se_device *dev; 1062 1108 struct t10_pr_registration *pr_reg; 1063 1109 ssize_t len = 0; 1064 1110 1065 - dev = su_dev->se_dev_ptr; 1066 - if (!dev) 1067 - return -ENODEV; 1068 - 1069 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1111 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1070 1112 return len; 1071 1113 1072 1114 spin_lock(&dev->dev_reservation_lock); ··· 1084 1144 1085 1145 SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts); 1086 1146 1087 - /* 1088 - * res_pr_generation 1089 - */ 1090 1147 static ssize_t target_core_dev_pr_show_attr_res_pr_generation( 1091 - struct se_subsystem_dev *su_dev, 1092 - char *page) 1148 + struct se_device *dev, char *page) 1093 1149 { 1094 - if (!su_dev->se_dev_ptr) 1095 - return -ENODEV; 1096 - 1097 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1150 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1098 1151 return 0; 1099 1152 1100 - return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); 1153 + return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation); 1101 1154 } 1102 1155 1103 1156 SE_DEV_PR_ATTR_RO(res_pr_generation); ··· 1099 1166 * res_pr_holder_tg_port 1100 1167 */ 1101 1168 static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( 1102 - struct se_subsystem_dev *su_dev, 1103 - char *page) 1169 + struct se_device *dev, char *page) 1104 1170 { 1105 - struct se_device *dev; 1106 1171 struct se_node_acl *se_nacl; 1107 1172 struct se_lun *lun; 1108 1173 struct se_portal_group *se_tpg; ··· 1108 1177 struct target_core_fabric_ops *tfo; 1109 1178 ssize_t len = 0; 1110 1179 1111 - dev = su_dev->se_dev_ptr; 1112 - if (!dev) 1113 - return -ENODEV; 1114 - 1115 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1180 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1116 1181 return len; 1117 1182 1118 1183 spin_lock(&dev->dev_reservation_lock); ··· 1138 1211 1139 1212 SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port); 1140 1213 1141 - /* 1142 - * res_pr_registered_i_pts 1143 - */ 1144 1214 static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( 1145 - struct se_subsystem_dev *su_dev, 1146 - char *page) 1215 + struct se_device *dev, char *page) 1147 1216 { 1148 1217 struct target_core_fabric_ops *tfo; 1149 1218 struct t10_pr_registration *pr_reg; ··· 1148 1225 ssize_t len = 0; 1149 1226 int reg_count = 0, prf_isid; 1150 1227 1151 - if (!su_dev->se_dev_ptr) 1152 - return -ENODEV; 1153 - 1154 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1228 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1155 1229 return len; 1156 1230 1157 1231 len += sprintf(page+len, "SPC-3 PR Registrations:\n"); 1158 1232 1159 - spin_lock(&su_dev->t10_pr.registration_lock); 1160 - list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 1233 + spin_lock(&dev->t10_pr.registration_lock); 1234 + list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, 1161 1235 pr_reg_list) { 1162 1236 1163 1237 memset(buf, 0, 384); ··· 1174 1254 len += sprintf(page+len, "%s", buf); 1175 1255 reg_count++; 1176 1256 } 1177 - spin_unlock(&su_dev->t10_pr.registration_lock); 1257 + spin_unlock(&dev->t10_pr.registration_lock); 1178 1258 1179 1259 if (!reg_count) 1180 1260 len += sprintf(page+len, "None\n"); ··· 1184 1264 1185 1265 SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts); 1186 1266 1187 - /* 1188 - * res_pr_type 1189 - */ 1190 1267 static ssize_t target_core_dev_pr_show_attr_res_pr_type( 1191 - struct se_subsystem_dev *su_dev, 1192 - char *page) 1268 + struct se_device *dev, char *page) 1193 1269 { 1194 - struct se_device *dev; 1195 1270 struct t10_pr_registration *pr_reg; 1196 1271 ssize_t len = 0; 1197 1272 1198 - dev = su_dev->se_dev_ptr; 1199 - if (!dev) 1200 - return -ENODEV; 1201 - 1202 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1273 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1203 1274 return len; 1204 1275 1205 1276 spin_lock(&dev->dev_reservation_lock); ··· 1209 1298 1210 1299 SE_DEV_PR_ATTR_RO(res_pr_type); 1211 1300 1212 - /* 1213 - * res_type 1214 - */ 1215 1301 static ssize_t target_core_dev_pr_show_attr_res_type( 1216 - struct se_subsystem_dev *su_dev, 1217 - char *page) 1302 + struct se_device *dev, char *page) 1218 1303 { 1219 1304 ssize_t len = 0; 1220 1305 1221 - if (!su_dev->se_dev_ptr) 1222 - return -ENODEV; 1223 - 1224 - switch (su_dev->t10_pr.res_type) { 1306 + switch (dev->t10_pr.res_type) { 1225 1307 case SPC3_PERSISTENT_RESERVATIONS: 1226 1308 len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); 1227 1309 break; ··· 1234 1330 1235 1331 SE_DEV_PR_ATTR_RO(res_type); 1236 1332 1237 - /* 1238 - * res_aptpl_active 1239 - */ 1240 - 1241 1333 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( 1242 - struct se_subsystem_dev *su_dev, 1243 - char *page) 1334 + struct se_device *dev, char *page) 1244 1335 { 1245 - if (!su_dev->se_dev_ptr) 1246 - return -ENODEV; 1247 - 1248 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1336 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1249 1337 return 0; 1250 1338 1251 1339 return sprintf(page, "APTPL Bit Status: %s\n", 1252 - (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); 1340 + (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); 1253 1341 } 1254 1342 1255 1343 SE_DEV_PR_ATTR_RO(res_aptpl_active); ··· 1250 1354 * res_aptpl_metadata 1251 1355 */ 1252 1356 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( 1253 - struct se_subsystem_dev *su_dev, 1254 - char *page) 1357 + struct se_device *dev, char *page) 1255 1358 { 1256 - if (!su_dev->se_dev_ptr) 1257 - return -ENODEV; 1258 - 1259 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1359 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1260 1360 return 0; 1261 1361 1262 1362 return sprintf(page, "Ready to process PR APTPL metadata..\n"); ··· 1284 1392 }; 1285 1393 1286 1394 static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( 1287 - struct se_subsystem_dev *su_dev, 1395 + struct se_device *dev, 1288 1396 const char *page, 1289 1397 size_t count) 1290 1398 { 1291 - struct se_device *dev; 1292 1399 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1293 1400 unsigned char *t_fabric = NULL, *t_port = NULL; 1294 1401 char *orig, *ptr, *arg_p, *opts; ··· 1299 1408 u16 port_rpti = 0, tpgt = 0; 1300 1409 u8 type = 0, scope; 1301 1410 1302 - dev = su_dev->se_dev_ptr; 1303 - if (!dev) 1304 - return -ENODEV; 1305 - 1306 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1411 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1307 1412 return 0; 1308 1413 1309 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1414 + if (dev->export_count) { 1310 1415 pr_debug("Unable to process APTPL metadata while" 1311 1416 " active fabric exports exist\n"); 1312 1417 return -EINVAL; ··· 1445 1558 goto out; 1446 1559 } 1447 1560 1448 - ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, 1561 + ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key, 1449 1562 i_port, isid, mapped_lun, t_port, tpgt, target_lun, 1450 1563 res_holder, all_tg_pt, type); 1451 1564 out: ··· 1460 1573 1461 1574 SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR); 1462 1575 1463 - CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group); 1576 + CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group); 1464 1577 1465 1578 static struct configfs_attribute *target_core_dev_pr_attrs[] = { 1466 1579 &target_core_dev_pr_res_holder.attr, ··· 1492 1605 1493 1606 static ssize_t target_core_show_dev_info(void *p, char *page) 1494 1607 { 1495 - struct se_subsystem_dev *se_dev = p; 1496 - struct se_hba *hba = se_dev->se_dev_hba; 1497 - struct se_subsystem_api *t = hba->transport; 1608 + struct se_device *dev = p; 1609 + struct se_subsystem_api *t = dev->transport; 1498 1610 int bl = 0; 1499 1611 ssize_t read_bytes = 0; 1500 1612 1501 - if (!se_dev->se_dev_ptr) 1502 - return -ENODEV; 1503 - 1504 - transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); 1613 + transport_dump_dev_state(dev, page, &bl); 1505 1614 read_bytes += bl; 1506 - read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes); 1615 + read_bytes += t->show_configfs_dev_params(dev, page+read_bytes); 1507 1616 return read_bytes; 1508 1617 } 1509 1618 ··· 1516 1633 const char *page, 1517 1634 size_t count) 1518 1635 { 1519 - struct se_subsystem_dev *se_dev = p; 1520 - struct se_hba *hba = se_dev->se_dev_hba; 1521 - struct se_subsystem_api *t = hba->transport; 1636 + struct se_device *dev = p; 1637 + struct se_subsystem_api *t = dev->transport; 1522 1638 1523 - if (!se_dev->se_dev_su_ptr) { 1524 - pr_err("Unable to locate struct se_subsystem_dev>se" 1525 - "_dev_su_ptr\n"); 1526 - return -EINVAL; 1527 - } 1528 - 1529 - return t->set_configfs_dev_params(hba, se_dev, page, count); 1639 + return t->set_configfs_dev_params(dev, page, count); 1530 1640 } 1531 1641 1532 1642 static struct target_core_configfs_attribute target_core_attr_dev_control = { ··· 1532 1656 1533 1657 static ssize_t target_core_show_dev_alias(void *p, char *page) 1534 1658 { 1535 - struct se_subsystem_dev *se_dev = p; 1659 + struct se_device *dev = p; 1536 1660 1537 - if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) 1661 + if (!(dev->dev_flags & DF_USING_ALIAS)) 1538 1662 return 0; 1539 1663 1540 - return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias); 1664 + return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias); 1541 1665 } 1542 1666 1543 1667 static ssize_t target_core_store_dev_alias( ··· 1545 1669 const char *page, 1546 1670 size_t count) 1547 1671 { 1548 - struct se_subsystem_dev *se_dev = p; 1549 - struct se_hba *hba = se_dev->se_dev_hba; 1672 + struct se_device *dev = p; 1673 + struct se_hba *hba = dev->se_hba; 1550 1674 ssize_t read_bytes; 1551 1675 1552 1676 if (count > (SE_DEV_ALIAS_LEN-1)) { ··· 1556 1680 return -EINVAL; 1557 1681 } 1558 1682 1559 - read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, 1560 - "%s", page); 1683 + read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); 1561 1684 if (!read_bytes) 1562 1685 return -EINVAL; 1563 - if (se_dev->se_dev_alias[read_bytes - 1] == '\n') 1564 - se_dev->se_dev_alias[read_bytes - 1] = '\0'; 1686 + if (dev->dev_alias[read_bytes - 1] == '\n') 1687 + dev->dev_alias[read_bytes - 1] = '\0'; 1565 1688 1566 - se_dev->su_dev_flags |= SDF_USING_ALIAS; 1689 + dev->dev_flags |= DF_USING_ALIAS; 1567 1690 1568 1691 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", 1569 1692 config_item_name(&hba->hba_group.cg_item), 1570 - config_item_name(&se_dev->se_dev_group.cg_item), 1571 - se_dev->se_dev_alias); 1693 + config_item_name(&dev->dev_group.cg_item), 1694 + dev->dev_alias); 1572 1695 1573 1696 return read_bytes; 1574 1697 } ··· 1582 1707 1583 1708 static ssize_t target_core_show_dev_udev_path(void *p, char *page) 1584 1709 { 1585 - struct se_subsystem_dev *se_dev = p; 1710 + struct se_device *dev = p; 1586 1711 1587 - if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) 1712 + if (!(dev->dev_flags & DF_USING_UDEV_PATH)) 1588 1713 return 0; 1589 1714 1590 - return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path); 1715 + return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path); 1591 1716 } 1592 1717 1593 1718 static ssize_t target_core_store_dev_udev_path( ··· 1595 1720 const char *page, 1596 1721 size_t count) 1597 1722 { 1598 - struct se_subsystem_dev *se_dev = p; 1599 - struct se_hba *hba = se_dev->se_dev_hba; 1723 + struct se_device *dev = p; 1724 + struct se_hba *hba = dev->se_hba; 1600 1725 ssize_t read_bytes; 1601 1726 1602 1727 if (count > (SE_UDEV_PATH_LEN-1)) { ··· 1606 1731 return -EINVAL; 1607 1732 } 1608 1733 1609 - read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, 1734 + read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN, 1610 1735 "%s", page); 1611 1736 if (!read_bytes) 1612 1737 return -EINVAL; 1613 - if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n') 1614 - se_dev->se_dev_udev_path[read_bytes - 1] = '\0'; 1738 + if (dev->udev_path[read_bytes - 1] == '\n') 1739 + dev->udev_path[read_bytes - 1] = '\0'; 1615 1740 1616 - se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; 1741 + dev->dev_flags |= DF_USING_UDEV_PATH; 1617 1742 1618 1743 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", 1619 1744 config_item_name(&hba->hba_group.cg_item), 1620 - config_item_name(&se_dev->se_dev_group.cg_item), 1621 - se_dev->se_dev_udev_path); 1745 + config_item_name(&dev->dev_group.cg_item), 1746 + dev->udev_path); 1622 1747 1623 1748 return read_bytes; 1624 1749 } ··· 1636 1761 const char *page, 1637 1762 size_t count) 1638 1763 { 1639 - struct se_subsystem_dev *se_dev = p; 1640 - struct se_device *dev; 1641 - struct se_hba *hba = se_dev->se_dev_hba; 1642 - struct se_subsystem_api *t = hba->transport; 1764 + struct se_device *dev = p; 1643 1765 char *ptr; 1766 + int ret; 1644 1767 1645 1768 ptr = strstr(page, "1"); 1646 1769 if (!ptr) { ··· 1646 1773 " is \"1\"\n"); 1647 1774 return -EINVAL; 1648 1775 } 1649 - if (se_dev->se_dev_ptr) { 1650 - pr_err("se_dev->se_dev_ptr already set for storage" 1651 - " object\n"); 1652 - return -EEXIST; 1653 - } 1654 1776 1655 - if (t->check_configfs_dev_params(hba, se_dev) < 0) 1656 - return -EINVAL; 1657 - 1658 - dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1659 - if (IS_ERR(dev)) 1660 - return PTR_ERR(dev); 1661 - else if (!dev) 1662 - return -EINVAL; 1663 - 1664 - se_dev->se_dev_ptr = dev; 1665 - pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" 1666 - " %p\n", se_dev->se_dev_ptr); 1667 - 1777 + ret = target_configure_device(dev); 1778 + if (ret) 1779 + return ret; 1668 1780 return count; 1669 1781 } 1670 1782 ··· 1663 1805 1664 1806 static ssize_t target_core_show_alua_lu_gp(void *p, char *page) 1665 1807 { 1666 - struct se_device *dev; 1667 - struct se_subsystem_dev *su_dev = p; 1808 + struct se_device *dev = p; 1668 1809 struct config_item *lu_ci; 1669 1810 struct t10_alua_lu_gp *lu_gp; 1670 1811 struct t10_alua_lu_gp_member *lu_gp_mem; 1671 1812 ssize_t len = 0; 1672 1813 1673 - dev = su_dev->se_dev_ptr; 1674 - if (!dev) 1675 - return -ENODEV; 1676 - 1677 - if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) 1814 + if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) 1678 1815 return len; 1679 1816 1680 1817 lu_gp_mem = dev->dev_alua_lu_gp_mem; ··· 1696 1843 const char *page, 1697 1844 size_t count) 1698 1845 { 1699 - struct se_device *dev; 1700 - struct se_subsystem_dev *su_dev = p; 1701 - struct se_hba *hba = su_dev->se_dev_hba; 1846 + struct se_device *dev = p; 1847 + struct se_hba *hba = dev->se_hba; 1702 1848 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; 1703 1849 struct t10_alua_lu_gp_member *lu_gp_mem; 1704 1850 unsigned char buf[LU_GROUP_NAME_BUF]; 1705 1851 int move = 0; 1706 1852 1707 - dev = su_dev->se_dev_ptr; 1708 - if (!dev) 1709 - return -ENODEV; 1710 - 1711 - if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1853 + if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { 1712 1854 pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", 1713 1855 config_item_name(&hba->hba_group.cg_item), 1714 - config_item_name(&su_dev->se_dev_group.cg_item)); 1856 + config_item_name(&dev->dev_group.cg_item)); 1715 1857 return -EINVAL; 1716 1858 } 1717 1859 if (count > LU_GROUP_NAME_BUF) { ··· 1750 1902 " from ALUA LU Group: core/alua/lu_gps/%s, ID:" 1751 1903 " %hu\n", 1752 1904 config_item_name(&hba->hba_group.cg_item), 1753 - config_item_name(&su_dev->se_dev_group.cg_item), 1905 + config_item_name(&dev->dev_group.cg_item), 1754 1906 config_item_name(&lu_gp->lu_gp_group.cg_item), 1755 1907 lu_gp->lu_gp_id); 1756 1908 ··· 1775 1927 " core/alua/lu_gps/%s, ID: %hu\n", 1776 1928 (move) ? "Moving" : "Adding", 1777 1929 config_item_name(&hba->hba_group.cg_item), 1778 - config_item_name(&su_dev->se_dev_group.cg_item), 1930 + config_item_name(&dev->dev_group.cg_item), 1779 1931 config_item_name(&lu_gp_new->lu_gp_group.cg_item), 1780 1932 lu_gp_new->lu_gp_id); 1781 1933 ··· 1803 1955 1804 1956 static void target_core_dev_release(struct config_item *item) 1805 1957 { 1806 - struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1807 - struct se_subsystem_dev, se_dev_group); 1808 - struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 1809 - struct se_subsystem_api *t = hba->transport; 1810 - struct config_group *dev_cg = &se_dev->se_dev_group; 1958 + struct config_group *dev_cg = to_config_group(item); 1959 + struct se_device *dev = 1960 + container_of(dev_cg, struct se_device, dev_group); 1811 1961 1812 1962 kfree(dev_cg->default_groups); 1813 - /* 1814 - * This pointer will set when the storage is enabled with: 1815 - *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 1816 - */ 1817 - if (se_dev->se_dev_ptr) { 1818 - pr_debug("Target_Core_ConfigFS: Calling se_free_" 1819 - "virtual_device() for se_dev_ptr: %p\n", 1820 - se_dev->se_dev_ptr); 1821 - 1822 - se_free_virtual_device(se_dev->se_dev_ptr, hba); 1823 - } else { 1824 - /* 1825 - * Release struct se_subsystem_dev->se_dev_su_ptr.. 1826 - */ 1827 - pr_debug("Target_Core_ConfigFS: Calling t->free_" 1828 - "device() for se_dev_su_ptr: %p\n", 1829 - se_dev->se_dev_su_ptr); 1830 - 1831 - t->free_device(se_dev->se_dev_su_ptr); 1832 - } 1833 - 1834 - pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem" 1835 - "_dev_t: %p\n", se_dev); 1836 - kfree(se_dev); 1963 + target_free_device(dev); 1837 1964 } 1838 1965 1839 1966 static ssize_t target_core_dev_show(struct config_item *item, 1840 1967 struct configfs_attribute *attr, 1841 1968 char *page) 1842 1969 { 1843 - struct se_subsystem_dev *se_dev = container_of( 1844 - to_config_group(item), struct se_subsystem_dev, 1845 - se_dev_group); 1970 + struct config_group *dev_cg = to_config_group(item); 1971 + struct se_device *dev = 1972 + container_of(dev_cg, struct se_device, dev_group); 1846 1973 struct target_core_configfs_attribute *tc_attr = container_of( 1847 1974 attr, struct target_core_configfs_attribute, attr); 1848 1975 1849 1976 if (!tc_attr->show) 1850 1977 return -EINVAL; 1851 1978 1852 - return tc_attr->show(se_dev, page); 1979 + return tc_attr->show(dev, page); 1853 1980 } 1854 1981 1855 1982 static ssize_t target_core_dev_store(struct config_item *item, 1856 1983 struct configfs_attribute *attr, 1857 1984 const char *page, size_t count) 1858 1985 { 1859 - struct se_subsystem_dev *se_dev = container_of( 1860 - to_config_group(item), struct se_subsystem_dev, 1861 - se_dev_group); 1986 + struct config_group *dev_cg = to_config_group(item); 1987 + struct se_device *dev = 1988 + container_of(dev_cg, struct se_device, dev_group); 1862 1989 struct target_core_configfs_attribute *tc_attr = container_of( 1863 1990 attr, struct target_core_configfs_attribute, attr); 1864 1991 1865 1992 if (!tc_attr->store) 1866 1993 return -EINVAL; 1867 1994 1868 - return tc_attr->store(se_dev, page, count); 1995 + return tc_attr->store(dev, page, count); 1869 1996 } 1870 1997 1871 1998 static struct configfs_item_operations target_core_dev_item_ops = { ··· 1930 2107 { 1931 2108 struct se_device *dev; 1932 2109 struct se_hba *hba; 1933 - struct se_subsystem_dev *su_dev; 1934 2110 struct t10_alua_lu_gp_member *lu_gp_mem; 1935 2111 ssize_t len = 0, cur_len; 1936 2112 unsigned char buf[LU_GROUP_NAME_BUF]; ··· 1939 2117 spin_lock(&lu_gp->lu_gp_lock); 1940 2118 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1941 2119 dev = lu_gp_mem->lu_gp_mem_dev; 1942 - su_dev = dev->se_sub_dev; 1943 - hba = su_dev->se_dev_hba; 2120 + hba = dev->se_hba; 1944 2121 1945 2122 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n", 1946 2123 config_item_name(&hba->hba_group.cg_item), 1947 - config_item_name(&su_dev->se_dev_group.cg_item)); 2124 + config_item_name(&dev->dev_group.cg_item)); 1948 2125 cur_len++; /* Extra byte for NULL terminator */ 1949 2126 1950 2127 if ((cur_len + len) > PAGE_SIZE) { ··· 2081 2260 const char *page, 2082 2261 size_t count) 2083 2262 { 2084 - struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; 2263 + struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 2085 2264 unsigned long tmp; 2086 2265 int new_state, ret; 2087 2266 ··· 2105 2284 return -EINVAL; 2106 2285 } 2107 2286 2108 - ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr, 2287 + ret = core_alua_do_port_transition(tg_pt_gp, dev, 2109 2288 NULL, NULL, new_state, 0); 2110 2289 return (!ret) ? count : -EINVAL; 2111 2290 } ··· 2441 2620 struct t10_alua *alua = container_of(group, struct t10_alua, 2442 2621 alua_tg_pt_gps_group); 2443 2622 struct t10_alua_tg_pt_gp *tg_pt_gp; 2444 - struct se_subsystem_dev *su_dev = alua->t10_sub_dev; 2445 2623 struct config_group *alua_tg_pt_gp_cg = NULL; 2446 2624 struct config_item *alua_tg_pt_gp_ci = NULL; 2447 2625 2448 - tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); 2626 + tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0); 2449 2627 if (!tg_pt_gp) 2450 2628 return NULL; 2451 2629 ··· 2541 2721 const char *name) 2542 2722 { 2543 2723 struct t10_alua_tg_pt_gp *tg_pt_gp; 2544 - struct se_subsystem_dev *se_dev; 2545 2724 struct se_subsystem_api *t; 2546 2725 struct config_item *hba_ci = &group->cg_item; 2547 2726 struct se_hba *hba = item_to_hba(hba_ci); 2727 + struct se_device *dev; 2548 2728 struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL; 2549 2729 struct config_group *dev_stat_grp = NULL; 2550 2730 int errno = -ENOMEM, ret; ··· 2557 2737 */ 2558 2738 t = hba->transport; 2559 2739 2560 - se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 2561 - if (!se_dev) { 2562 - pr_err("Unable to allocate memory for" 2563 - " struct se_subsystem_dev\n"); 2564 - goto unlock; 2565 - } 2566 - INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 2567 - spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 2568 - INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 2569 - INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); 2570 - spin_lock_init(&se_dev->t10_pr.registration_lock); 2571 - spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); 2572 - INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); 2573 - spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); 2574 - spin_lock_init(&se_dev->se_dev_lock); 2575 - se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 2576 - se_dev->t10_wwn.t10_sub_dev = se_dev; 2577 - se_dev->t10_alua.t10_sub_dev = se_dev; 2578 - se_dev->se_dev_attrib.da_sub_dev = se_dev; 2740 + dev = target_alloc_device(hba, name); 2741 + if (!dev) 2742 + goto out_unlock; 2579 2743 2580 - se_dev->se_dev_hba = hba; 2581 - dev_cg = &se_dev->se_dev_group; 2744 + dev_cg = &dev->dev_group; 2582 2745 2583 2746 dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, 2584 2747 GFP_KERNEL); 2585 2748 if (!dev_cg->default_groups) 2586 - goto out; 2587 - /* 2588 - * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr 2589 - * for ->allocate_virtdevice() 2590 - * 2591 - * se_dev->se_dev_ptr will be set after ->create_virtdev() 2592 - * has been called successfully in the next level up in the 2593 - * configfs tree for device object's struct config_group. 2594 - */ 2595 - se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); 2596 - if (!se_dev->se_dev_su_ptr) { 2597 - pr_err("Unable to locate subsystem dependent pointer" 2598 - " from allocate_virtdevice()\n"); 2599 - goto out; 2600 - } 2749 + goto out_free_device; 2601 2750 2602 - config_group_init_type_name(&se_dev->se_dev_group, name, 2603 - &target_core_dev_cit); 2604 - config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib", 2751 + config_group_init_type_name(dev_cg, name, &target_core_dev_cit); 2752 + config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", 2605 2753 &target_core_dev_attrib_cit); 2606 - config_group_init_type_name(&se_dev->se_dev_pr_group, "pr", 2754 + config_group_init_type_name(&dev->dev_pr_group, "pr", 2607 2755 &target_core_dev_pr_cit); 2608 - config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn", 2756 + config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", 2609 2757 &target_core_dev_wwn_cit); 2610 - config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group, 2758 + config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, 2611 2759 "alua", &target_core_alua_tg_pt_gps_cit); 2612 - config_group_init_type_name(&se_dev->dev_stat_grps.stat_group, 2760 + config_group_init_type_name(&dev->dev_stat_grps.stat_group, 2613 2761 "statistics", &target_core_stat_cit); 2614 2762 2615 - dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group; 2616 - dev_cg->default_groups[1] = &se_dev->se_dev_pr_group; 2617 - dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group; 2618 - dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group; 2619 - dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group; 2763 + dev_cg->default_groups[0] = &dev->dev_attrib.da_group; 2764 + dev_cg->default_groups[1] = &dev->dev_pr_group; 2765 + dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group; 2766 + dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group; 2767 + dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group; 2620 2768 dev_cg->default_groups[5] = NULL; 2621 2769 /* 2622 2770 * Add core/$HBA/$DEV/alua/default_tg_pt_gp 2623 2771 */ 2624 - tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); 2772 + tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1); 2625 2773 if (!tg_pt_gp) 2626 - goto out; 2774 + goto out_free_dev_cg_default_groups; 2775 + dev->t10_alua.default_tg_pt_gp = tg_pt_gp; 2627 2776 2628 - tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2777 + tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group; 2629 2778 tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, 2630 2779 GFP_KERNEL); 2631 2780 if (!tg_pt_gp_cg->default_groups) { 2632 2781 pr_err("Unable to allocate tg_pt_gp_cg->" 2633 2782 "default_groups\n"); 2634 - goto out; 2783 + goto out_free_tg_pt_gp; 2635 2784 } 2636 2785 2637 2786 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group, 2638 2787 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); 2639 2788 tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; 2640 2789 tg_pt_gp_cg->default_groups[1] = NULL; 2641 - se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp; 2642 2790 /* 2643 2791 * Add core/$HBA/$DEV/statistics/ default groups 2644 2792 */ 2645 - dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2793 + dev_stat_grp = &dev->dev_stat_grps.stat_group; 2646 2794 dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, 2647 2795 GFP_KERNEL); 2648 2796 if (!dev_stat_grp->default_groups) { 2649 2797 pr_err("Unable to allocate dev_stat_grp->default_groups\n"); 2650 - goto out; 2798 + goto out_free_tg_pt_gp_cg_default_groups; 2651 2799 } 2652 - target_stat_setup_dev_default_groups(se_dev); 2653 - 2654 - pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" 2655 - " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); 2800 + target_stat_setup_dev_default_groups(dev); 2656 2801 2657 2802 mutex_unlock(&hba->hba_access_mutex); 2658 - return &se_dev->se_dev_group; 2659 - out: 2660 - if (se_dev->t10_alua.default_tg_pt_gp) { 2661 - core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); 2662 - se_dev->t10_alua.default_tg_pt_gp = NULL; 2663 - } 2664 - if (dev_stat_grp) 2665 - kfree(dev_stat_grp->default_groups); 2666 - if (tg_pt_gp_cg) 2667 - kfree(tg_pt_gp_cg->default_groups); 2668 - if (dev_cg) 2669 - kfree(dev_cg->default_groups); 2670 - if (se_dev->se_dev_su_ptr) 2671 - t->free_device(se_dev->se_dev_su_ptr); 2672 - kfree(se_dev); 2673 - unlock: 2803 + return dev_cg; 2804 + 2805 + out_free_tg_pt_gp_cg_default_groups: 2806 + kfree(tg_pt_gp_cg->default_groups); 2807 + out_free_tg_pt_gp: 2808 + core_alua_free_tg_pt_gp(tg_pt_gp); 2809 + out_free_dev_cg_default_groups: 2810 + kfree(dev_cg->default_groups); 2811 + out_free_device: 2812 + target_free_device(dev); 2813 + out_unlock: 2674 2814 mutex_unlock(&hba->hba_access_mutex); 2675 2815 return ERR_PTR(errno); 2676 2816 } ··· 2639 2859 struct config_group *group, 2640 2860 struct config_item *item) 2641 2861 { 2642 - struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 2643 - struct se_subsystem_dev, se_dev_group); 2862 + struct config_group *dev_cg = to_config_group(item); 2863 + struct se_device *dev = 2864 + container_of(dev_cg, struct se_device, dev_group); 2644 2865 struct se_hba *hba; 2645 2866 struct config_item *df_item; 2646 - struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp; 2867 + struct config_group *tg_pt_gp_cg, *dev_stat_grp; 2647 2868 int i; 2648 2869 2649 - hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2870 + hba = item_to_hba(&dev->se_hba->hba_group.cg_item); 2650 2871 2651 2872 mutex_lock(&hba->hba_access_mutex); 2652 2873 2653 - dev_stat_grp = &se_dev->dev_stat_grps.stat_group; 2874 + dev_stat_grp = &dev->dev_stat_grps.stat_group; 2654 2875 for (i = 0; dev_stat_grp->default_groups[i]; i++) { 2655 2876 df_item = &dev_stat_grp->default_groups[i]->cg_item; 2656 2877 dev_stat_grp->default_groups[i] = NULL; ··· 2659 2878 } 2660 2879 kfree(dev_stat_grp->default_groups); 2661 2880 2662 - tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; 2881 + tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group; 2663 2882 for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { 2664 2883 df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; 2665 2884 tg_pt_gp_cg->default_groups[i] = NULL; ··· 2670 2889 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp 2671 2890 * directly from target_core_alua_tg_pt_gp_release(). 2672 2891 */ 2673 - se_dev->t10_alua.default_tg_pt_gp = NULL; 2892 + dev->t10_alua.default_tg_pt_gp = NULL; 2674 2893 2675 - dev_cg = &se_dev->se_dev_group; 2676 2894 for (i = 0; dev_cg->default_groups[i]; i++) { 2677 2895 df_item = &dev_cg->default_groups[i]->cg_item; 2678 2896 dev_cg->default_groups[i] = NULL; 2679 2897 config_item_put(df_item); 2680 2898 } 2681 2899 /* 2682 - * The releasing of se_dev and associated se_dev->se_dev_ptr is done 2683 - * from target_core_dev_item_ops->release() ->target_core_dev_release(). 2900 + * se_dev is released from target_core_dev_item_ops->release() 2684 2901 */ 2685 2902 config_item_put(item); 2686 2903 mutex_unlock(&hba->hba_access_mutex); ··· 2741 2962 return -EINVAL; 2742 2963 } 2743 2964 2744 - spin_lock(&hba->device_lock); 2745 - if (!list_empty(&hba->hba_dev_list)) { 2965 + if (hba->dev_count) { 2746 2966 pr_err("Unable to set hba_mode with active devices\n"); 2747 - spin_unlock(&hba->device_lock); 2748 2967 return -EINVAL; 2749 2968 } 2750 - spin_unlock(&hba->device_lock); 2751 2969 2752 2970 ret = transport->pmode_enable_hba(hba, mode_flag); 2753 2971 if (ret < 0)
+297 -316
drivers/target/target_core_device.c
··· 50 50 #include "target_core_pr.h" 51 51 #include "target_core_ua.h" 52 52 53 - static void se_dev_start(struct se_device *dev); 54 - static void se_dev_stop(struct se_device *dev); 55 - 56 53 static struct se_hba *lun0_hba; 57 - static struct se_subsystem_dev *lun0_su_dev; 58 54 /* not static, needed by tpg.c */ 59 55 struct se_device *g_lun0_dev; 60 56 ··· 132 136 se_cmd->orig_fe_lun = 0; 133 137 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 134 138 } 135 - /* 136 - * Determine if the struct se_lun is online. 137 - * FIXME: Check for LUN_RESET + UNIT Attention 138 - */ 139 - if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 140 - se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; 141 - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 142 - return -ENODEV; 143 - } 144 139 145 140 /* Directly associate cmd with se_dev */ 146 141 se_cmd->se_dev = se_lun->lun_se_dev; ··· 186 199 " Access for 0x%08x\n", 187 200 se_cmd->se_tfo->get_fabric_name(), 188 201 unpacked_lun); 189 - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 190 - return -ENODEV; 191 - } 192 - /* 193 - * Determine if the struct se_lun is online. 194 - * FIXME: Check for LUN_RESET + UNIT Attention 195 - */ 196 - if (se_dev_check_online(se_lun->lun_se_dev) != 0) { 197 202 se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 198 203 return -ENODEV; 199 204 } ··· 544 565 struct se_port *port, 545 566 struct se_lun *lun) 546 567 { 547 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 548 568 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; 549 569 550 570 spin_lock(&dev->se_port_lock); ··· 556 578 list_add_tail(&port->sep_list, &dev->dev_sep_list); 557 579 spin_unlock(&dev->se_port_lock); 558 580 559 - if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 581 + if (dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 560 582 tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); 561 583 if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { 562 584 pr_err("Unable to allocate t10_alua_tg_pt" ··· 565 587 } 566 588 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 567 589 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 568 - su_dev->t10_alua.default_tg_pt_gp); 590 + dev->t10_alua.default_tg_pt_gp); 569 591 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 570 592 pr_debug("%s/%s: Adding to default ALUA Target Port" 571 593 " Group: alua/default_tg_pt_gp\n", ··· 603 625 struct se_portal_group *tpg, 604 626 struct se_lun *lun) 605 627 { 628 + struct se_hba *hba = dev->se_hba; 606 629 struct se_port *port; 607 630 608 631 port = core_alloc_port(dev); ··· 611 632 return PTR_ERR(port); 612 633 613 634 lun->lun_se_dev = dev; 614 - se_dev_start(dev); 615 635 616 - atomic_inc(&dev->dev_export_obj.obj_access_count); 636 + spin_lock(&hba->device_lock); 637 + dev->export_count++; 638 + spin_unlock(&hba->device_lock); 639 + 617 640 core_export_port(dev, tpg, port, lun); 618 641 return 0; 619 642 } ··· 625 644 struct se_portal_group *tpg, 626 645 struct se_lun *lun) 627 646 { 647 + struct se_hba *hba = dev->se_hba; 628 648 struct se_port *port = lun->lun_sep; 629 649 630 650 spin_lock(&lun->lun_sep_lock); ··· 636 654 spin_unlock(&lun->lun_sep_lock); 637 655 638 656 spin_lock(&dev->se_port_lock); 639 - atomic_dec(&dev->dev_export_obj.obj_access_count); 640 657 core_release_port(dev, port); 641 658 spin_unlock(&dev->se_port_lock); 642 659 643 - se_dev_stop(dev); 660 + spin_lock(&hba->device_lock); 661 + dev->export_count--; 662 + spin_unlock(&hba->device_lock); 663 + 644 664 lun->lun_se_dev = NULL; 645 665 } 646 666 ··· 709 725 return 0; 710 726 } 711 727 712 - /* se_release_device_for_hba(): 713 - * 714 - * 715 - */ 716 - void se_release_device_for_hba(struct se_device *dev) 717 - { 718 - struct se_hba *hba = dev->se_hba; 719 - 720 - if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || 721 - (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || 722 - (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || 723 - (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || 724 - (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) 725 - se_dev_stop(dev); 726 - 727 - if (dev->dev_ptr) { 728 - destroy_workqueue(dev->tmr_wq); 729 - if (dev->transport->free_device) 730 - dev->transport->free_device(dev->dev_ptr); 731 - } 732 - 733 - spin_lock(&hba->device_lock); 734 - list_del(&dev->dev_list); 735 - hba->dev_count--; 736 - spin_unlock(&hba->device_lock); 737 - 738 - core_scsi3_free_all_registrations(dev); 739 - se_release_vpd_for_dev(dev); 740 - 741 - kfree(dev); 742 - } 743 - 744 - void se_release_vpd_for_dev(struct se_device *dev) 728 + static void se_release_vpd_for_dev(struct se_device *dev) 745 729 { 746 730 struct t10_vpd *vpd, *vpd_tmp; 747 731 748 - spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 732 + spin_lock(&dev->t10_wwn.t10_vpd_lock); 749 733 list_for_each_entry_safe(vpd, vpd_tmp, 750 - &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { 734 + &dev->t10_wwn.t10_vpd_list, vpd_list) { 751 735 list_del(&vpd->vpd_list); 752 736 kfree(vpd); 753 737 } 754 - spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); 755 - } 756 - 757 - /* se_free_virtual_device(): 758 - * 759 - * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. 760 - */ 761 - int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) 762 - { 763 - if (!list_empty(&dev->dev_sep_list)) 764 - dump_stack(); 765 - 766 - core_alua_free_lu_gp_mem(dev); 767 - se_release_device_for_hba(dev); 768 - 769 - return 0; 770 - } 771 - 772 - static void se_dev_start(struct se_device *dev) 773 - { 774 - struct se_hba *hba = dev->se_hba; 775 - 776 - spin_lock(&hba->device_lock); 777 - atomic_inc(&dev->dev_obj.obj_access_count); 778 - if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { 779 - if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { 780 - dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; 781 - dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; 782 - } else if (dev->dev_status & 783 - TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { 784 - dev->dev_status &= 785 - ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; 786 - dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; 787 - } 788 - } 789 - spin_unlock(&hba->device_lock); 790 - } 791 - 792 - static void se_dev_stop(struct se_device *dev) 793 - { 794 - struct se_hba *hba = dev->se_hba; 795 - 796 - spin_lock(&hba->device_lock); 797 - atomic_dec(&dev->dev_obj.obj_access_count); 798 - if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { 799 - if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { 800 - dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; 801 - dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 802 - } else if (dev->dev_status & 803 - TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { 804 - dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; 805 - dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; 806 - } 807 - } 808 - spin_unlock(&hba->device_lock); 809 - } 810 - 811 - int se_dev_check_online(struct se_device *dev) 812 - { 813 - unsigned long flags; 814 - int ret; 815 - 816 - spin_lock_irqsave(&dev->dev_status_lock, flags); 817 - ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || 818 - (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; 819 - spin_unlock_irqrestore(&dev->dev_status_lock, flags); 820 - 821 - return ret; 822 - } 823 - 824 - int se_dev_check_shutdown(struct se_device *dev) 825 - { 826 - int ret; 827 - 828 - spin_lock_irq(&dev->dev_status_lock); 829 - ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); 830 - spin_unlock_irq(&dev->dev_status_lock); 831 - 832 - return ret; 738 + spin_unlock(&dev->t10_wwn.t10_vpd_lock); 833 739 } 834 740 835 741 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) ··· 740 866 return aligned_max_sectors; 741 867 } 742 868 743 - void se_dev_set_default_attribs( 744 - struct se_device *dev, 745 - struct se_dev_limits *dev_limits) 746 - { 747 - struct queue_limits *limits = &dev_limits->limits; 748 - 749 - dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; 750 - dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 751 - dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 752 - dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 753 - dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 754 - dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; 755 - dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; 756 - dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 757 - dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; 758 - dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; 759 - dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 760 - dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; 761 - dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 762 - /* 763 - * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK 764 - * iblock_create_virtdevice() from struct queue_limits values 765 - * if blk_queue_discard()==1 766 - */ 767 - dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 768 - dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 769 - DA_MAX_UNMAP_BLOCK_DESC_COUNT; 770 - dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 771 - dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 772 - DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 773 - /* 774 - * block_size is based on subsystem plugin dependent requirements. 775 - */ 776 - dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; 777 - dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; 778 - /* 779 - * Align max_hw_sectors down to PAGE_SIZE I/O transfers 780 - */ 781 - limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors, 782 - limits->logical_block_size); 783 - dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; 784 - 785 - /* 786 - * Set fabric_max_sectors, which is reported in block limits 787 - * VPD page (B0h). 788 - */ 789 - dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 790 - /* 791 - * Set optimal_sectors from fabric_max_sectors, which can be 792 - * lowered via configfs. 793 - */ 794 - dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 795 - /* 796 - * queue_depth is based on subsystem plugin dependent requirements. 797 - */ 798 - dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; 799 - dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; 800 - } 801 - 802 869 int se_dev_set_max_unmap_lba_count( 803 870 struct se_device *dev, 804 871 u32 max_unmap_lba_count) 805 872 { 806 - dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 873 + dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count; 807 874 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", 808 - dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); 875 + dev, dev->dev_attrib.max_unmap_lba_count); 809 876 return 0; 810 877 } 811 878 ··· 754 939 struct se_device *dev, 755 940 u32 max_unmap_block_desc_count) 756 941 { 757 - dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 942 + dev->dev_attrib.max_unmap_block_desc_count = 758 943 max_unmap_block_desc_count; 759 944 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", 760 - dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); 945 + dev, dev->dev_attrib.max_unmap_block_desc_count); 761 946 return 0; 762 947 } 763 948 ··· 765 950 struct se_device *dev, 766 951 u32 unmap_granularity) 767 952 { 768 - dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; 953 + dev->dev_attrib.unmap_granularity = unmap_granularity; 769 954 pr_debug("dev[%p]: Set unmap_granularity: %u\n", 770 - dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); 955 + dev, dev->dev_attrib.unmap_granularity); 771 956 return 0; 772 957 } 773 958 ··· 775 960 struct se_device *dev, 776 961 u32 unmap_granularity_alignment) 777 962 { 778 - dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 963 + dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; 779 964 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", 780 - dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); 965 + dev, dev->dev_attrib.unmap_granularity_alignment); 781 966 return 0; 782 967 } 783 968 ··· 808 993 pr_err("emulate_fua_write not supported for pSCSI\n"); 809 994 return -EINVAL; 810 995 } 811 - dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; 996 + dev->dev_attrib.emulate_fua_write = flag; 812 997 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 813 - dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); 998 + dev, dev->dev_attrib.emulate_fua_write); 814 999 return 0; 815 1000 } 816 1001 ··· 840 1025 pr_err("emulate_write_cache not supported for pSCSI\n"); 841 1026 return -EINVAL; 842 1027 } 843 - dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; 1028 + dev->dev_attrib.emulate_write_cache = flag; 844 1029 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", 845 - dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); 1030 + dev, dev->dev_attrib.emulate_write_cache); 846 1031 return 0; 847 1032 } 848 1033 ··· 853 1038 return -EINVAL; 854 1039 } 855 1040 856 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1041 + if (dev->export_count) { 857 1042 pr_err("dev[%p]: Unable to change SE Device" 858 - " UA_INTRLCK_CTRL while dev_export_obj: %d count" 859 - " exists\n", dev, 860 - atomic_read(&dev->dev_export_obj.obj_access_count)); 1043 + " UA_INTRLCK_CTRL while export_count is %d\n", 1044 + dev, dev->export_count); 861 1045 return -EINVAL; 862 1046 } 863 - dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; 1047 + dev->dev_attrib.emulate_ua_intlck_ctrl = flag; 864 1048 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", 865 - dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); 1049 + dev, dev->dev_attrib.emulate_ua_intlck_ctrl); 866 1050 867 1051 return 0; 868 1052 } ··· 873 1059 return -EINVAL; 874 1060 } 875 1061 876 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1062 + if (dev->export_count) { 877 1063 pr_err("dev[%p]: Unable to change SE Device TAS while" 878 - " dev_export_obj: %d count exists\n", dev, 879 - atomic_read(&dev->dev_export_obj.obj_access_count)); 1064 + " export_count is %d\n", 1065 + dev, dev->export_count); 880 1066 return -EINVAL; 881 1067 } 882 - dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; 1068 + dev->dev_attrib.emulate_tas = flag; 883 1069 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", 884 - dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 1070 + dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); 885 1071 886 1072 return 0; 887 1073 } ··· 896 1082 * We expect this value to be non-zero when generic Block Layer 897 1083 * Discard supported is detected iblock_create_virtdevice(). 898 1084 */ 899 - if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1085 + if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 900 1086 pr_err("Generic Block Discard not supported\n"); 901 1087 return -ENOSYS; 902 1088 } 903 1089 904 - dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; 1090 + dev->dev_attrib.emulate_tpu = flag; 905 1091 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", 906 1092 dev, flag); 907 1093 return 0; ··· 917 1103 * We expect this value to be non-zero when generic Block Layer 918 1104 * Discard supported is detected iblock_create_virtdevice(). 919 1105 */ 920 - if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 1106 + if (flag && !dev->dev_attrib.max_unmap_block_desc_count) { 921 1107 pr_err("Generic Block Discard not supported\n"); 922 1108 return -ENOSYS; 923 1109 } 924 1110 925 - dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; 1111 + dev->dev_attrib.emulate_tpws = flag; 926 1112 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", 927 1113 dev, flag); 928 1114 return 0; ··· 934 1120 pr_err("Illegal value %d\n", flag); 935 1121 return -EINVAL; 936 1122 } 937 - dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; 1123 + dev->dev_attrib.enforce_pr_isids = flag; 938 1124 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, 939 - (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1125 + (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 940 1126 return 0; 941 1127 } 942 1128 ··· 946 1132 printk(KERN_ERR "Illegal value %d\n", flag); 947 1133 return -EINVAL; 948 1134 } 949 - dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; 1135 + dev->dev_attrib.is_nonrot = flag; 950 1136 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", 951 1137 dev, flag); 952 1138 return 0; ··· 959 1145 " reordering not implemented\n", dev); 960 1146 return -ENOSYS; 961 1147 } 962 - dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; 1148 + dev->dev_attrib.emulate_rest_reord = flag; 963 1149 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 964 1150 return 0; 965 1151 } ··· 969 1155 */ 970 1156 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) 971 1157 { 972 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1158 + if (dev->export_count) { 973 1159 pr_err("dev[%p]: Unable to change SE Device TCQ while" 974 - " dev_export_obj: %d count exists\n", dev, 975 - atomic_read(&dev->dev_export_obj.obj_access_count)); 1160 + " export_count is %d\n", 1161 + dev, dev->export_count); 976 1162 return -EINVAL; 977 1163 } 978 1164 if (!queue_depth) { ··· 982 1168 } 983 1169 984 1170 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 985 - if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1171 + if (queue_depth > dev->dev_attrib.hw_queue_depth) { 986 1172 pr_err("dev[%p]: Passed queue_depth: %u" 987 1173 " exceeds TCM/SE_Device TCQ: %u\n", 988 1174 dev, queue_depth, 989 - dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1175 + dev->dev_attrib.hw_queue_depth); 990 1176 return -EINVAL; 991 1177 } 992 1178 } else { 993 - if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { 994 - if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { 1179 + if (queue_depth > dev->dev_attrib.queue_depth) { 1180 + if (queue_depth > dev->dev_attrib.hw_queue_depth) { 995 1181 pr_err("dev[%p]: Passed queue_depth:" 996 1182 " %u exceeds TCM/SE_Device MAX" 997 1183 " TCQ: %u\n", dev, queue_depth, 998 - dev->se_sub_dev->se_dev_attrib.hw_queue_depth); 1184 + dev->dev_attrib.hw_queue_depth); 999 1185 return -EINVAL; 1000 1186 } 1001 1187 } 1002 1188 } 1003 1189 1004 - dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1190 + dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1005 1191 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1006 1192 dev, queue_depth); 1007 1193 return 0; ··· 1009 1195 1010 1196 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1011 1197 { 1012 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1198 + if (dev->export_count) { 1013 1199 pr_err("dev[%p]: Unable to change SE Device" 1014 - " fabric_max_sectors while dev_export_obj: %d count exists\n", 1015 - dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1200 + " fabric_max_sectors while export_count is %d\n", 1201 + dev, dev->export_count); 1016 1202 return -EINVAL; 1017 1203 } 1018 1204 if (!fabric_max_sectors) { ··· 1027 1213 return -EINVAL; 1028 1214 } 1029 1215 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1030 - if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { 1216 + if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { 1031 1217 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1032 1218 " greater than TCM/SE_Device max_sectors:" 1033 1219 " %u\n", dev, fabric_max_sectors, 1034 - dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 1220 + dev->dev_attrib.hw_max_sectors); 1035 1221 return -EINVAL; 1036 1222 } 1037 1223 } else { ··· 1047 1233 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1048 1234 */ 1049 1235 fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 1050 - dev->se_sub_dev->se_dev_attrib.block_size); 1236 + dev->dev_attrib.block_size); 1051 1237 1052 - dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors; 1238 + dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 1053 1239 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1054 1240 dev, fabric_max_sectors); 1055 1241 return 0; ··· 1057 1243 1058 1244 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1059 1245 { 1060 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1246 + if (dev->export_count) { 1061 1247 pr_err("dev[%p]: Unable to change SE Device" 1062 - " optimal_sectors while dev_export_obj: %d count exists\n", 1063 - dev, atomic_read(&dev->dev_export_obj.obj_access_count)); 1248 + " optimal_sectors while export_count is %d\n", 1249 + dev, dev->export_count); 1064 1250 return -EINVAL; 1065 1251 } 1066 1252 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { ··· 1068 1254 " changed for TCM/pSCSI\n", dev); 1069 1255 return -EINVAL; 1070 1256 } 1071 - if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) { 1257 + if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1072 1258 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1073 1259 " greater than fabric_max_sectors: %u\n", dev, 1074 - optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors); 1260 + optimal_sectors, dev->dev_attrib.fabric_max_sectors); 1075 1261 return -EINVAL; 1076 1262 } 1077 1263 1078 - dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; 1264 + dev->dev_attrib.optimal_sectors = optimal_sectors; 1079 1265 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", 1080 1266 dev, optimal_sectors); 1081 1267 return 0; ··· 1083 1269 1084 1270 int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1085 1271 { 1086 - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { 1272 + if (dev->export_count) { 1087 1273 pr_err("dev[%p]: Unable to change SE Device block_size" 1088 - " while dev_export_obj: %d count exists\n", dev, 1089 - atomic_read(&dev->dev_export_obj.obj_access_count)); 1274 + " while export_count is %d\n", 1275 + dev, dev->export_count); 1090 1276 return -EINVAL; 1091 1277 } 1092 1278 ··· 1107 1293 return -EINVAL; 1108 1294 } 1109 1295 1110 - dev->se_sub_dev->se_dev_attrib.block_size = block_size; 1296 + dev->dev_attrib.block_size = block_size; 1111 1297 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1112 1298 dev, block_size); 1113 1299 return 0; ··· 1120 1306 { 1121 1307 struct se_lun *lun_p; 1122 1308 int rc; 1123 - 1124 - if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { 1125 - pr_err("Unable to export struct se_device while dev_access_obj: %d\n", 1126 - atomic_read(&dev->dev_access_obj.obj_access_count)); 1127 - return ERR_PTR(-EACCES); 1128 - } 1129 1309 1130 1310 lun_p = core_tpg_pre_addlun(tpg, lun); 1131 1311 if (IS_ERR(lun_p)) ··· 1376 1568 kfree(lacl); 1377 1569 } 1378 1570 1571 + static void scsi_dump_inquiry(struct se_device *dev) 1572 + { 1573 + struct t10_wwn *wwn = &dev->t10_wwn; 1574 + char buf[17]; 1575 + int i, device_type; 1576 + /* 1577 + * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1578 + */ 1579 + for (i = 0; i < 8; i++) 1580 + if (wwn->vendor[i] >= 0x20) 1581 + buf[i] = wwn->vendor[i]; 1582 + else 1583 + buf[i] = ' '; 1584 + buf[i] = '\0'; 1585 + pr_debug(" Vendor: %s\n", buf); 1586 + 1587 + for (i = 0; i < 16; i++) 1588 + if (wwn->model[i] >= 0x20) 1589 + buf[i] = wwn->model[i]; 1590 + else 1591 + buf[i] = ' '; 1592 + buf[i] = '\0'; 1593 + pr_debug(" Model: %s\n", buf); 1594 + 1595 + for (i = 0; i < 4; i++) 1596 + if (wwn->revision[i] >= 0x20) 1597 + buf[i] = wwn->revision[i]; 1598 + else 1599 + buf[i] = ' '; 1600 + buf[i] = '\0'; 1601 + pr_debug(" Revision: %s\n", buf); 1602 + 1603 + device_type = dev->transport->get_device_type(dev); 1604 + pr_debug(" Type: %s ", scsi_device_type(device_type)); 1605 + pr_debug(" ANSI SCSI revision: %02x\n", 1606 + dev->transport->get_device_rev(dev)); 1607 + } 1608 + 1609 + struct se_device *target_alloc_device(struct se_hba *hba, const char *name) 1610 + { 1611 + struct se_device *dev; 1612 + 1613 + dev = hba->transport->alloc_device(hba, name); 1614 + if (!dev) 1615 + return NULL; 1616 + 1617 + dev->se_hba = hba; 1618 + dev->transport = hba->transport; 1619 + 1620 + INIT_LIST_HEAD(&dev->dev_list); 1621 + INIT_LIST_HEAD(&dev->dev_sep_list); 1622 + INIT_LIST_HEAD(&dev->dev_tmr_list); 1623 + INIT_LIST_HEAD(&dev->delayed_cmd_list); 1624 + INIT_LIST_HEAD(&dev->state_list); 1625 + INIT_LIST_HEAD(&dev->qf_cmd_list); 1626 + spin_lock_init(&dev->stats_lock); 1627 + spin_lock_init(&dev->execute_task_lock); 1628 + spin_lock_init(&dev->delayed_cmd_lock); 1629 + spin_lock_init(&dev->dev_reservation_lock); 1630 + spin_lock_init(&dev->se_port_lock); 1631 + spin_lock_init(&dev->se_tmr_lock); 1632 + spin_lock_init(&dev->qf_cmd_lock); 1633 + atomic_set(&dev->dev_ordered_id, 0); 1634 + INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list); 1635 + spin_lock_init(&dev->t10_wwn.t10_vpd_lock); 1636 + INIT_LIST_HEAD(&dev->t10_pr.registration_list); 1637 + INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list); 1638 + spin_lock_init(&dev->t10_pr.registration_lock); 1639 + spin_lock_init(&dev->t10_pr.aptpl_reg_lock); 1640 + INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list); 1641 + spin_lock_init(&dev->t10_alua.tg_pt_gps_lock); 1642 + 1643 + dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 1644 + dev->t10_wwn.t10_dev = dev; 1645 + dev->t10_alua.t10_dev = dev; 1646 + 1647 + dev->dev_attrib.da_dev = dev; 1648 + dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO; 1649 + dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; 1650 + dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; 1651 + dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; 1652 + dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; 1653 + dev->dev_attrib.emulate_tas = DA_EMULATE_TAS; 1654 + dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU; 1655 + dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS; 1656 + dev->dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; 1657 + dev->dev_attrib.emulate_alua = DA_EMULATE_ALUA; 1658 + dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1659 + dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1660 + dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1661 + dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1662 + dev->dev_attrib.max_unmap_block_desc_count = 1663 + DA_MAX_UNMAP_BLOCK_DESC_COUNT; 1664 + dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; 1665 + dev->dev_attrib.unmap_granularity_alignment = 1666 + DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1667 + dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1668 + dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 1669 + 1670 + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1671 + dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; 1672 + else 1673 + dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 1674 + 1675 + return dev; 1676 + } 1677 + 1678 + int target_configure_device(struct se_device *dev) 1679 + { 1680 + struct se_hba *hba = dev->se_hba; 1681 + int ret; 1682 + 1683 + if (dev->dev_flags & DF_CONFIGURED) { 1684 + pr_err("se_dev->se_dev_ptr already set for storage" 1685 + " object\n"); 1686 + return -EEXIST; 1687 + } 1688 + 1689 + ret = dev->transport->configure_device(dev); 1690 + if (ret) 1691 + goto out; 1692 + dev->dev_flags |= DF_CONFIGURED; 1693 + 1694 + /* 1695 + * XXX: there is not much point to have two different values here.. 1696 + */ 1697 + dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; 1698 + dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; 1699 + 1700 + /* 1701 + * Align max_hw_sectors down to PAGE_SIZE I/O transfers 1702 + */ 1703 + dev->dev_attrib.hw_max_sectors = 1704 + se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1705 + dev->dev_attrib.hw_block_size); 1706 + 1707 + dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1708 + dev->creation_time = get_jiffies_64(); 1709 + 1710 + core_setup_reservations(dev); 1711 + 1712 + ret = core_setup_alua(dev); 1713 + if (ret) 1714 + goto out; 1715 + 1716 + /* 1717 + * Startup the struct se_device processing thread 1718 + */ 1719 + dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 1720 + dev->transport->name); 1721 + if (!dev->tmr_wq) { 1722 + pr_err("Unable to create tmr workqueue for %s\n", 1723 + dev->transport->name); 1724 + ret = -ENOMEM; 1725 + goto out_free_alua; 1726 + } 1727 + 1728 + /* 1729 + * Setup work_queue for QUEUE_FULL 1730 + */ 1731 + INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1732 + 1733 + /* 1734 + * Preload the initial INQUIRY const values if we are doing 1735 + * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1736 + * passthrough because this is being provided by the backend LLD. 1737 + */ 1738 + if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1739 + strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1740 + strncpy(&dev->t10_wwn.model[0], 1741 + dev->transport->inquiry_prod, 16); 1742 + strncpy(&dev->t10_wwn.revision[0], 1743 + dev->transport->inquiry_rev, 4); 1744 + } 1745 + 1746 + scsi_dump_inquiry(dev); 1747 + 1748 + spin_lock(&hba->device_lock); 1749 + hba->dev_count++; 1750 + spin_unlock(&hba->device_lock); 1751 + return 0; 1752 + 1753 + out_free_alua: 1754 + core_alua_free_lu_gp_mem(dev); 1755 + out: 1756 + se_release_vpd_for_dev(dev); 1757 + return ret; 1758 + } 1759 + 1760 + void target_free_device(struct se_device *dev) 1761 + { 1762 + struct se_hba *hba = dev->se_hba; 1763 + 1764 + WARN_ON(!list_empty(&dev->dev_sep_list)); 1765 + 1766 + if (dev->dev_flags & DF_CONFIGURED) { 1767 + destroy_workqueue(dev->tmr_wq); 1768 + 1769 + spin_lock(&hba->device_lock); 1770 + hba->dev_count--; 1771 + spin_unlock(&hba->device_lock); 1772 + } 1773 + 1774 + core_alua_free_lu_gp_mem(dev); 1775 + core_scsi3_free_all_registrations(dev); 1776 + se_release_vpd_for_dev(dev); 1777 + 1778 + dev->transport->free_device(dev); 1779 + } 1780 + 1379 1781 int core_dev_setup_virtual_lun0(void) 1380 1782 { 1381 1783 struct se_hba *hba; 1382 1784 struct se_device *dev; 1383 - struct se_subsystem_dev *se_dev = NULL; 1384 - struct se_subsystem_api *t; 1385 1785 char buf[16]; 1386 1786 int ret; 1387 1787 ··· 1597 1581 if (IS_ERR(hba)) 1598 1582 return PTR_ERR(hba); 1599 1583 1600 - lun0_hba = hba; 1601 - t = hba->transport; 1602 - 1603 - se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); 1604 - if (!se_dev) { 1605 - pr_err("Unable to allocate memory for" 1606 - " struct se_subsystem_dev\n"); 1584 + dev = target_alloc_device(hba, "virt_lun0"); 1585 + if (!dev) { 1607 1586 ret = -ENOMEM; 1608 - goto out; 1587 + goto out_free_hba; 1609 1588 } 1610 - INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); 1611 - spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); 1612 - INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); 1613 - INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); 1614 - spin_lock_init(&se_dev->t10_pr.registration_lock); 1615 - spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); 1616 - INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); 1617 - spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); 1618 - spin_lock_init(&se_dev->se_dev_lock); 1619 - se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; 1620 - se_dev->t10_wwn.t10_sub_dev = se_dev; 1621 - se_dev->t10_alua.t10_sub_dev = se_dev; 1622 - se_dev->se_dev_attrib.da_sub_dev = se_dev; 1623 - se_dev->se_dev_hba = hba; 1624 - 1625 - se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); 1626 - if (!se_dev->se_dev_su_ptr) { 1627 - pr_err("Unable to locate subsystem dependent pointer" 1628 - " from allocate_virtdevice()\n"); 1629 - ret = -ENOMEM; 1630 - goto out; 1631 - } 1632 - lun0_su_dev = se_dev; 1633 1589 1634 1590 memset(buf, 0, 16); 1635 1591 sprintf(buf, "rd_pages=8"); 1636 - t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); 1592 + hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf)); 1637 1593 1638 - dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); 1639 - if (IS_ERR(dev)) { 1640 - ret = PTR_ERR(dev); 1641 - goto out; 1642 - } 1643 - se_dev->se_dev_ptr = dev; 1594 + ret = target_configure_device(dev); 1595 + if (ret) 1596 + goto out_free_se_dev; 1597 + 1598 + lun0_hba = hba; 1644 1599 g_lun0_dev = dev; 1645 - 1646 1600 return 0; 1647 - out: 1648 - lun0_su_dev = NULL; 1649 - kfree(se_dev); 1650 - if (lun0_hba) { 1651 - core_delete_hba(lun0_hba); 1652 - lun0_hba = NULL; 1653 - } 1601 + 1602 + out_free_se_dev: 1603 + target_free_device(dev); 1604 + out_free_hba: 1605 + core_delete_hba(hba); 1654 1606 return ret; 1655 1607 } 1656 1608 ··· 1626 1642 void core_dev_release_virtual_lun0(void) 1627 1643 { 1628 1644 struct se_hba *hba = lun0_hba; 1629 - struct se_subsystem_dev *su_dev = lun0_su_dev; 1630 1645 1631 1646 if (!hba) 1632 1647 return; 1633 1648 1634 1649 if (g_lun0_dev) 1635 - se_free_virtual_device(g_lun0_dev, hba); 1636 - 1637 - kfree(su_dev); 1650 + target_free_device(g_lun0_dev); 1638 1651 core_delete_hba(hba); 1639 1652 }
+2 -12
drivers/target/target_core_fabric_configfs.c
··· 734 734 struct config_item *se_dev_ci) 735 735 { 736 736 struct config_item *tpg_ci; 737 - struct se_device *dev; 738 737 struct se_lun *lun = container_of(to_config_group(lun_ci), 739 738 struct se_lun, lun_group); 740 739 struct se_lun *lun_p; 741 740 struct se_portal_group *se_tpg; 742 - struct se_subsystem_dev *se_dev = container_of( 743 - to_config_group(se_dev_ci), struct se_subsystem_dev, 744 - se_dev_group); 741 + struct se_device *dev = 742 + container_of(to_config_group(se_dev_ci), struct se_device, dev_group); 745 743 struct target_fabric_configfs *tf; 746 744 int ret; 747 745 ··· 751 753 if (lun->lun_se_dev != NULL) { 752 754 pr_err("Port Symlink already exists\n"); 753 755 return -EEXIST; 754 - } 755 - 756 - dev = se_dev->se_dev_ptr; 757 - if (!dev) { 758 - pr_err("Unable to locate struct se_device pointer from" 759 - " %s\n", config_item_name(se_dev_ci)); 760 - ret = -ENODEV; 761 - goto out; 762 756 } 763 757 764 758 lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
+50 -88
drivers/target/target_core_file.c
··· 41 41 42 42 #include "target_core_file.h" 43 43 44 - static struct se_subsystem_api fileio_template; 44 + static inline struct fd_dev *FD_DEV(struct se_device *dev) 45 + { 46 + return container_of(dev, struct fd_dev, dev); 47 + } 45 48 46 49 /* fd_attach_hba(): (Part of se_subsystem_api_t template) 47 50 * ··· 85 82 hba->hba_ptr = NULL; 86 83 } 87 84 88 - static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 85 + static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name) 89 86 { 90 87 struct fd_dev *fd_dev; 91 88 struct fd_host *fd_host = hba->hba_ptr; ··· 100 97 101 98 pr_debug("FILEIO: Allocated fd_dev for %p\n", name); 102 99 103 - return fd_dev; 100 + return &fd_dev->dev; 104 101 } 105 102 106 - /* fd_create_virtdevice(): (Part of se_subsystem_api_t template) 107 - * 108 - * 109 - */ 110 - static struct se_device *fd_create_virtdevice( 111 - struct se_hba *hba, 112 - struct se_subsystem_dev *se_dev, 113 - void *p) 103 + static int fd_configure_device(struct se_device *dev) 114 104 { 115 - struct se_device *dev; 116 - struct se_dev_limits dev_limits; 117 - struct queue_limits *limits; 118 - struct fd_dev *fd_dev = p; 119 - struct fd_host *fd_host = hba->hba_ptr; 105 + struct fd_dev *fd_dev = FD_DEV(dev); 106 + struct fd_host *fd_host = dev->se_hba->hba_ptr; 120 107 struct file *file; 121 108 struct inode *inode = NULL; 122 - int dev_flags = 0, flags, ret = -EINVAL; 109 + int flags, ret = -EINVAL; 123 110 124 - memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 111 + if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 112 + pr_err("Missing fd_dev_name=\n"); 113 + return -EINVAL; 114 + } 125 115 126 116 /* 127 117 * Use O_DSYNC by default instead of O_SYNC to forgo syncing 128 118 * of pure timestamp updates. 129 119 */ 130 120 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; 121 + 131 122 /* 132 123 * Optionally allow fd_buffered_io=1 to be enabled for people 133 124 * who want use the fs buffer cache as an WriteCache mechanism. ··· 151 154 */ 152 155 inode = file->f_mapping->host; 153 156 if (S_ISBLK(inode->i_mode)) { 154 - struct request_queue *q; 157 + struct request_queue *q = bdev_get_queue(inode->i_bdev); 155 158 unsigned long long dev_size; 156 - /* 157 - * Setup the local scope queue_limits from struct request_queue->limits 158 - * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 159 - */ 160 - q = bdev_get_queue(inode->i_bdev); 161 - limits = &dev_limits.limits; 162 - limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); 163 - limits->max_hw_sectors = queue_max_hw_sectors(q); 164 - limits->max_sectors = queue_max_sectors(q); 159 + 160 + dev->dev_attrib.hw_block_size = 161 + bdev_logical_block_size(inode->i_bdev); 162 + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 163 + 165 164 /* 166 165 * Determine the number of bytes from i_size_read() minus 167 166 * one (1) logical sector from underlying struct block_device 168 167 */ 169 - fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 170 168 dev_size = (i_size_read(file->f_mapping->host) - 171 169 fd_dev->fd_block_size); 172 170 ··· 177 185 goto fail; 178 186 } 179 187 180 - limits = &dev_limits.limits; 181 - limits->logical_block_size = FD_BLOCKSIZE; 182 - limits->max_hw_sectors = FD_MAX_SECTORS; 183 - limits->max_sectors = FD_MAX_SECTORS; 184 - fd_dev->fd_block_size = FD_BLOCKSIZE; 188 + dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; 189 + dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; 185 190 } 186 191 187 - dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 188 - dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; 192 + fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; 189 193 190 - dev = transport_add_device_to_core_hba(hba, &fileio_template, 191 - se_dev, dev_flags, fd_dev, 192 - &dev_limits, "FILEIO", FD_VERSION); 193 - if (!dev) 194 - goto fail; 194 + dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 195 195 196 196 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 197 197 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" 198 198 " with FDBD_HAS_BUFFERED_IO_WCE\n"); 199 - dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1; 199 + dev->dev_attrib.emulate_write_cache = 1; 200 200 } 201 201 202 202 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; ··· 198 214 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 199 215 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 200 216 201 - return dev; 217 + return 0; 202 218 fail: 203 219 if (fd_dev->fd_file) { 204 220 filp_close(fd_dev->fd_file, NULL); 205 221 fd_dev->fd_file = NULL; 206 222 } 207 - return ERR_PTR(ret); 223 + return ret; 208 224 } 209 225 210 - /* fd_free_device(): (Part of se_subsystem_api_t template) 211 - * 212 - * 213 - */ 214 - static void fd_free_device(void *p) 226 + static void fd_free_device(struct se_device *dev) 215 227 { 216 - struct fd_dev *fd_dev = p; 228 + struct fd_dev *fd_dev = FD_DEV(dev); 217 229 218 230 if (fd_dev->fd_file) { 219 231 filp_close(fd_dev->fd_file, NULL); ··· 223 243 u32 sgl_nents) 224 244 { 225 245 struct se_device *se_dev = cmd->se_dev; 226 - struct fd_dev *dev = se_dev->dev_ptr; 246 + struct fd_dev *dev = FD_DEV(se_dev); 227 247 struct file *fd = dev->fd_file; 228 248 struct scatterlist *sg; 229 249 struct iovec *iov; 230 250 mm_segment_t old_fs; 231 - loff_t pos = (cmd->t_task_lba * 232 - se_dev->se_sub_dev->se_dev_attrib.block_size); 251 + loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); 233 252 int ret = 0, i; 234 253 235 254 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); ··· 275 296 u32 sgl_nents) 276 297 { 277 298 struct se_device *se_dev = cmd->se_dev; 278 - struct fd_dev *dev = se_dev->dev_ptr; 299 + struct fd_dev *dev = FD_DEV(se_dev); 279 300 struct file *fd = dev->fd_file; 280 301 struct scatterlist *sg; 281 302 struct iovec *iov; 282 303 mm_segment_t old_fs; 283 - loff_t pos = (cmd->t_task_lba * 284 - se_dev->se_sub_dev->se_dev_attrib.block_size); 304 + loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); 285 305 int ret, i = 0; 286 306 287 307 iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); ··· 312 334 static int fd_execute_sync_cache(struct se_cmd *cmd) 313 335 { 314 336 struct se_device *dev = cmd->se_dev; 315 - struct fd_dev *fd_dev = dev->dev_ptr; 337 + struct fd_dev *fd_dev = FD_DEV(dev); 316 338 int immed = (cmd->t_task_cdb[1] & 0x2); 317 339 loff_t start, end; 318 340 int ret; ··· 331 353 start = 0; 332 354 end = LLONG_MAX; 333 355 } else { 334 - start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; 356 + start = cmd->t_task_lba * dev->dev_attrib.block_size; 335 357 if (cmd->data_length) 336 358 end = start + cmd->data_length; 337 359 else ··· 377 399 * Allow this to happen independent of WCE=0 setting. 378 400 */ 379 401 if (ret > 0 && 380 - dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 402 + dev->dev_attrib.emulate_fua_write > 0 && 381 403 (cmd->se_cmd_flags & SCF_FUA)) { 382 - struct fd_dev *fd_dev = dev->dev_ptr; 404 + struct fd_dev *fd_dev = FD_DEV(dev); 383 405 loff_t start = cmd->t_task_lba * 384 - dev->se_sub_dev->se_dev_attrib.block_size; 406 + dev->dev_attrib.block_size; 385 407 loff_t end = start + cmd->data_length; 386 408 387 409 vfs_fsync_range(fd_dev->fd_file, start, end, 1); ··· 408 430 {Opt_err, NULL} 409 431 }; 410 432 411 - static ssize_t fd_set_configfs_dev_params( 412 - struct se_hba *hba, 413 - struct se_subsystem_dev *se_dev, 414 - const char *page, ssize_t count) 433 + static ssize_t fd_set_configfs_dev_params(struct se_device *dev, 434 + const char *page, ssize_t count) 415 435 { 416 - struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 436 + struct fd_dev *fd_dev = FD_DEV(dev); 417 437 char *orig, *ptr, *arg_p, *opts; 418 438 substring_t args[MAX_OPT_ARGS]; 419 439 int ret = 0, arg, token; ··· 478 502 return (!ret) ? count : ret; 479 503 } 480 504 481 - static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 505 + static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b) 482 506 { 483 - struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 484 - 485 - if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 486 - pr_err("Missing fd_dev_name=\n"); 487 - return -EINVAL; 488 - } 489 - 490 - return 0; 491 - } 492 - 493 - static ssize_t fd_show_configfs_dev_params( 494 - struct se_hba *hba, 495 - struct se_subsystem_dev *se_dev, 496 - char *b) 497 - { 498 - struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 507 + struct fd_dev *fd_dev = FD_DEV(dev); 499 508 ssize_t bl = 0; 500 509 501 510 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); ··· 511 550 512 551 static sector_t fd_get_blocks(struct se_device *dev) 513 552 { 514 - struct fd_dev *fd_dev = dev->dev_ptr; 553 + struct fd_dev *fd_dev = FD_DEV(dev); 515 554 struct file *f = fd_dev->fd_file; 516 555 struct inode *i = f->f_mapping->host; 517 556 unsigned long long dev_size; ··· 525 564 else 526 565 dev_size = fd_dev->fd_dev_size; 527 566 528 - return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); 567 + return div_u64(dev_size, dev->dev_attrib.block_size); 529 568 } 530 569 531 570 static struct spc_ops fd_spc_ops = { ··· 540 579 541 580 static struct se_subsystem_api fileio_template = { 542 581 .name = "fileio", 582 + .inquiry_prod = "FILEIO", 583 + .inquiry_rev = FD_VERSION, 543 584 .owner = THIS_MODULE, 544 585 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 545 586 .attach_hba = fd_attach_hba, 546 587 .detach_hba = fd_detach_hba, 547 - .allocate_virtdevice = fd_allocate_virtdevice, 548 - .create_virtdevice = fd_create_virtdevice, 588 + .alloc_device = fd_alloc_device, 589 + .configure_device = fd_configure_device, 549 590 .free_device = fd_free_device, 550 591 .parse_cdb = fd_parse_cdb, 551 - .check_configfs_dev_params = fd_check_configfs_dev_params, 552 592 .set_configfs_dev_params = fd_set_configfs_dev_params, 553 593 .show_configfs_dev_params = fd_show_configfs_dev_params, 554 594 .get_device_rev = fd_get_device_rev,
+2
drivers/target/target_core_file.h
··· 17 17 #define FDBD_HAS_BUFFERED_IO_WCE 0x04 18 18 19 19 struct fd_dev { 20 + struct se_device dev; 21 + 20 22 u32 fbd_flags; 21 23 unsigned char fd_dev_name[FD_MAX_DEV_NAME]; 22 24 /* Unique Ramdisk Device ID in Ramdisk HBA */
+1 -3
drivers/target/target_core_hba.c
··· 113 113 return ERR_PTR(-ENOMEM); 114 114 } 115 115 116 - INIT_LIST_HEAD(&hba->hba_dev_list); 117 116 spin_lock_init(&hba->device_lock); 118 117 mutex_init(&hba->hba_access_mutex); 119 118 ··· 151 152 int 152 153 core_delete_hba(struct se_hba *hba) 153 154 { 154 - if (!list_empty(&hba->hba_dev_list)) 155 - dump_stack(); 155 + WARN_ON(hba->dev_count); 156 156 157 157 hba->transport->detach_hba(hba); 158 158
+74 -111
drivers/target/target_core_iblock.c
··· 50 50 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 51 51 #define IBLOCK_BIO_POOL_SIZE 128 52 52 53 + static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) 54 + { 55 + return container_of(dev, struct iblock_dev, dev); 56 + } 57 + 58 + 53 59 static struct se_subsystem_api iblock_template; 54 60 55 61 static void iblock_bio_done(struct bio *, int); ··· 76 70 { 77 71 } 78 72 79 - static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 73 + static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) 80 74 { 81 75 struct iblock_dev *ib_dev = NULL; 82 76 ··· 88 82 89 83 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 90 84 91 - return ib_dev; 85 + return &ib_dev->dev; 92 86 } 93 87 94 - static struct se_device *iblock_create_virtdevice( 95 - struct se_hba *hba, 96 - struct se_subsystem_dev *se_dev, 97 - void *p) 88 + static int iblock_configure_device(struct se_device *dev) 98 89 { 99 - struct iblock_dev *ib_dev = p; 100 - struct se_device *dev; 101 - struct se_dev_limits dev_limits; 102 - struct block_device *bd = NULL; 90 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 103 91 struct request_queue *q; 104 - struct queue_limits *limits; 105 - u32 dev_flags = 0; 92 + struct block_device *bd = NULL; 106 93 fmode_t mode; 107 - int ret = -EINVAL; 94 + int ret = -ENOMEM; 108 95 109 - if (!ib_dev) { 110 - pr_err("Unable to locate struct iblock_dev parameter\n"); 111 - return ERR_PTR(ret); 96 + if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { 97 + pr_err("Missing udev_path= parameters for IBLOCK\n"); 98 + return -EINVAL; 112 99 } 113 - memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 114 100 115 101 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 116 102 if (!ib_dev->ibd_bio_set) { 117 - pr_err("IBLOCK: Unable to create bioset()\n"); 118 - return ERR_PTR(-ENOMEM); 103 + pr_err("IBLOCK: Unable to create bioset\n"); 104 + goto out; 119 105 } 120 - pr_debug("IBLOCK: Created bio_set()\n"); 121 - /* 122 - * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 123 - * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 124 - */ 106 + 125 107 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 126 108 ib_dev->ibd_udev_path); 127 109 ··· 120 126 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 121 127 if (IS_ERR(bd)) { 122 128 ret = PTR_ERR(bd); 123 - goto failed; 129 + goto out_free_bioset; 124 130 } 125 - /* 126 - * Setup the local scope queue_limits from struct request_queue->limits 127 - * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 128 - */ 129 - q = bdev_get_queue(bd); 130 - limits = &dev_limits.limits; 131 - limits->logical_block_size = bdev_logical_block_size(bd); 132 - limits->max_hw_sectors = UINT_MAX; 133 - limits->max_sectors = UINT_MAX; 134 - dev_limits.hw_queue_depth = q->nr_requests; 135 - dev_limits.queue_depth = q->nr_requests; 136 - 137 131 ib_dev->ibd_bd = bd; 138 132 139 - dev = transport_add_device_to_core_hba(hba, 140 - &iblock_template, se_dev, dev_flags, ib_dev, 141 - &dev_limits, "IBLOCK", IBLOCK_VERSION); 142 - if (!dev) 143 - goto failed; 133 + q = bdev_get_queue(bd); 134 + 135 + dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 136 + dev->dev_attrib.hw_max_sectors = UINT_MAX; 137 + dev->dev_attrib.hw_queue_depth = q->nr_requests; 144 138 145 139 /* 146 140 * Check if the underlying struct block_device request_queue supports ··· 136 154 * in ATA and we need to set TPE=1 137 155 */ 138 156 if (blk_queue_discard(q)) { 139 - dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 157 + dev->dev_attrib.max_unmap_lba_count = 140 158 q->limits.max_discard_sectors; 159 + 141 160 /* 142 161 * Currently hardcoded to 1 in Linux/SCSI code.. 143 162 */ 144 - dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 145 - dev->se_sub_dev->se_dev_attrib.unmap_granularity = 163 + dev->dev_attrib.max_unmap_block_desc_count = 1; 164 + dev->dev_attrib.unmap_granularity = 146 165 q->limits.discard_granularity >> 9; 147 - dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 166 + dev->dev_attrib.unmap_granularity_alignment = 148 167 q->limits.discard_alignment; 149 168 150 169 pr_debug("IBLOCK: BLOCK Discard support available," ··· 153 170 } 154 171 155 172 if (blk_queue_nonrot(q)) 156 - dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 173 + dev->dev_attrib.is_nonrot = 1; 174 + return 0; 157 175 158 - return dev; 159 - 160 - failed: 161 - if (ib_dev->ibd_bio_set) { 162 - bioset_free(ib_dev->ibd_bio_set); 163 - ib_dev->ibd_bio_set = NULL; 164 - } 165 - ib_dev->ibd_bd = NULL; 166 - return ERR_PTR(ret); 176 + out_free_bioset: 177 + bioset_free(ib_dev->ibd_bio_set); 178 + ib_dev->ibd_bio_set = NULL; 179 + out: 180 + return ret; 167 181 } 168 182 169 - static void iblock_free_device(void *p) 183 + static void iblock_free_device(struct se_device *dev) 170 184 { 171 - struct iblock_dev *ib_dev = p; 185 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 172 186 173 187 if (ib_dev->ibd_bd != NULL) 174 188 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); ··· 183 203 bdev_logical_block_size(bd)) - 1); 184 204 u32 block_size = bdev_logical_block_size(bd); 185 205 186 - if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 206 + if (block_size == dev->dev_attrib.block_size) 187 207 return blocks_long; 188 208 189 209 switch (block_size) { 190 210 case 4096: 191 - switch (dev->se_sub_dev->se_dev_attrib.block_size) { 211 + switch (dev->dev_attrib.block_size) { 192 212 case 2048: 193 213 blocks_long <<= 1; 194 214 break; ··· 202 222 } 203 223 break; 204 224 case 2048: 205 - switch (dev->se_sub_dev->se_dev_attrib.block_size) { 225 + switch (dev->dev_attrib.block_size) { 206 226 case 4096: 207 227 blocks_long >>= 1; 208 228 break; ··· 217 237 } 218 238 break; 219 239 case 1024: 220 - switch (dev->se_sub_dev->se_dev_attrib.block_size) { 240 + switch (dev->dev_attrib.block_size) { 221 241 case 4096: 222 242 blocks_long >>= 2; 223 243 break; ··· 232 252 } 233 253 break; 234 254 case 512: 235 - switch (dev->se_sub_dev->se_dev_attrib.block_size) { 255 + switch (dev->dev_attrib.block_size) { 236 256 case 4096: 237 257 blocks_long >>= 3; 238 258 break; ··· 279 299 */ 280 300 static int iblock_execute_sync_cache(struct se_cmd *cmd) 281 301 { 282 - struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 302 + struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 283 303 int immed = (cmd->t_task_cdb[1] & 0x2); 284 304 struct bio *bio; 285 305 ··· 302 322 static int iblock_execute_unmap(struct se_cmd *cmd) 303 323 { 304 324 struct se_device *dev = cmd->se_dev; 305 - struct iblock_dev *ibd = dev->dev_ptr; 325 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 306 326 unsigned char *buf, *ptr = NULL; 307 327 sector_t lba; 308 328 int size; ··· 329 349 else 330 350 size = bd_dl; 331 351 332 - if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 352 + if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 333 353 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 334 354 ret = -EINVAL; 335 355 goto err; ··· 346 366 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 347 367 (unsigned long long)lba, range); 348 368 349 - if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { 369 + if (range > dev->dev_attrib.max_unmap_lba_count) { 350 370 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 351 371 ret = -EINVAL; 352 372 goto err; ··· 358 378 goto err; 359 379 } 360 380 361 - ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, 381 + ret = blkdev_issue_discard(ib_dev->ibd_bd, lba, range, 362 382 GFP_KERNEL, 0); 363 383 if (ret < 0) { 364 384 pr_err("blkdev_issue_discard() failed: %d\n", ··· 379 399 380 400 static int iblock_execute_write_same(struct se_cmd *cmd) 381 401 { 382 - struct iblock_dev *ibd = cmd->se_dev->dev_ptr; 402 + struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 383 403 int ret; 384 404 385 - ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, 405 + ret = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, 386 406 spc_get_write_same_sectors(cmd), GFP_KERNEL, 387 407 0); 388 408 if (ret < 0) { ··· 405 425 {Opt_err, NULL} 406 426 }; 407 427 408 - static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 409 - struct se_subsystem_dev *se_dev, 410 - const char *page, ssize_t count) 428 + static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, 429 + const char *page, ssize_t count) 411 430 { 412 - struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 431 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 413 432 char *orig, *ptr, *arg_p, *opts; 414 433 substring_t args[MAX_OPT_ARGS]; 415 434 int ret = 0, token; ··· 470 491 return (!ret) ? count : ret; 471 492 } 472 493 473 - static ssize_t iblock_check_configfs_dev_params( 474 - struct se_hba *hba, 475 - struct se_subsystem_dev *se_dev) 494 + static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) 476 495 { 477 - struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 478 - 479 - if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 480 - pr_err("Missing udev_path= parameters for IBLOCK\n"); 481 - return -EINVAL; 482 - } 483 - 484 - return 0; 485 - } 486 - 487 - static ssize_t iblock_show_configfs_dev_params( 488 - struct se_hba *hba, 489 - struct se_subsystem_dev *se_dev, 490 - char *b) 491 - { 492 - struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 493 - struct block_device *bd = ibd->ibd_bd; 496 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 497 + struct block_device *bd = ib_dev->ibd_bd; 494 498 char buf[BDEVNAME_SIZE]; 495 499 ssize_t bl = 0; 496 500 497 501 if (bd) 498 502 bl += sprintf(b + bl, "iBlock device: %s", 499 503 bdevname(bd, buf)); 500 - if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) 504 + if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) 501 505 bl += sprintf(b + bl, " UDEV PATH: %s", 502 - ibd->ibd_udev_path); 503 - bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); 506 + ib_dev->ibd_udev_path); 507 + bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); 504 508 505 509 bl += sprintf(b + bl, " "); 506 510 if (bd) { 507 511 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 508 512 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 509 - "" : (bd->bd_holder == ibd) ? 513 + "" : (bd->bd_holder == ib_dev) ? 510 514 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 511 515 } else { 512 516 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); ··· 518 556 static struct bio * 519 557 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 520 558 { 521 - struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 559 + struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 522 560 struct bio *bio; 523 561 524 562 /* ··· 573 611 * Force data to disk if we pretend to not have a volatile 574 612 * write cache, or the initiator set the Force Unit Access bit. 575 613 */ 576 - if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 577 - (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 614 + if (dev->dev_attrib.emulate_write_cache == 0 || 615 + (dev->dev_attrib.emulate_fua_write > 0 && 578 616 (cmd->se_cmd_flags & SCF_FUA))) 579 617 rw = WRITE_FUA; 580 618 else ··· 587 625 * Convert the blocksize advertised to the initiator to the 512 byte 588 626 * units unconditionally used by the Linux block layer. 589 627 */ 590 - if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 628 + if (dev->dev_attrib.block_size == 4096) 591 629 block_lba = (cmd->t_task_lba << 3); 592 - else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 630 + else if (dev->dev_attrib.block_size == 2048) 593 631 block_lba = (cmd->t_task_lba << 2); 594 - else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 632 + else if (dev->dev_attrib.block_size == 1024) 595 633 block_lba = (cmd->t_task_lba << 1); 596 - else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 634 + else if (dev->dev_attrib.block_size == 512) 597 635 block_lba = cmd->t_task_lba; 598 636 else { 599 637 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 600 - " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 638 + " %u\n", dev->dev_attrib.block_size); 601 639 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 602 640 return -ENOSYS; 603 641 } ··· 676 714 677 715 static sector_t iblock_get_blocks(struct se_device *dev) 678 716 { 679 - struct iblock_dev *ibd = dev->dev_ptr; 680 - struct block_device *bd = ibd->ibd_bd; 717 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 718 + struct block_device *bd = ib_dev->ibd_bd; 681 719 struct request_queue *q = bdev_get_queue(bd); 682 720 683 721 return iblock_emulate_read_cap_with_block_size(dev, bd, q); ··· 723 761 724 762 static struct se_subsystem_api iblock_template = { 725 763 .name = "iblock", 764 + .inquiry_prod = "IBLOCK", 765 + .inquiry_rev = IBLOCK_VERSION, 726 766 .owner = THIS_MODULE, 727 767 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 728 768 .attach_hba = iblock_attach_hba, 729 769 .detach_hba = iblock_detach_hba, 730 - .allocate_virtdevice = iblock_allocate_virtdevice, 731 - .create_virtdevice = iblock_create_virtdevice, 770 + .alloc_device = iblock_alloc_device, 771 + .configure_device = iblock_configure_device, 732 772 .free_device = iblock_free_device, 733 773 .parse_cdb = iblock_parse_cdb, 734 - .check_configfs_dev_params = iblock_check_configfs_dev_params, 735 774 .set_configfs_dev_params = iblock_set_configfs_dev_params, 736 775 .show_configfs_dev_params = iblock_show_configfs_dev_params, 737 776 .get_device_rev = iblock_get_device_rev,
+1
drivers/target/target_core_iblock.h
··· 14 14 #define IBDF_HAS_UDEV_PATH 0x01 15 15 16 16 struct iblock_dev { 17 + struct se_device dev; 17 18 unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; 18 19 u32 ibd_flags; 19 20 struct bio_set *ibd_bio_set;
+5 -7
drivers/target/target_core_internal.h
··· 20 20 void core_dev_unexport(struct se_device *, struct se_portal_group *, 21 21 struct se_lun *); 22 22 int target_report_luns(struct se_cmd *); 23 - void se_release_device_for_hba(struct se_device *); 24 - void se_release_vpd_for_dev(struct se_device *); 25 - int se_free_virtual_device(struct se_device *, struct se_hba *); 26 - int se_dev_check_online(struct se_device *); 27 - int se_dev_check_shutdown(struct se_device *); 28 - void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *); 29 23 int se_dev_set_task_timeout(struct se_device *, u32); 30 24 int se_dev_set_max_unmap_lba_count(struct se_device *, u32); 31 25 int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); ··· 54 60 struct se_lun_acl *lacl); 55 61 int core_dev_setup_virtual_lun0(void); 56 62 void core_dev_release_virtual_lun0(void); 63 + struct se_device *target_alloc_device(struct se_hba *hba, const char *name); 64 + int target_configure_device(struct se_device *dev); 65 + void target_free_device(struct se_device *); 57 66 58 67 /* target_core_hba.c */ 59 68 struct se_hba *core_alloc_hba(const char *, u32, u32); ··· 103 106 int transport_clear_lun_from_sessions(struct se_lun *); 104 107 void transport_send_task_abort(struct se_cmd *); 105 108 int target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 109 + void target_qf_do_work(struct work_struct *work); 106 110 107 111 /* target_core_stat.c */ 108 - void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); 112 + void target_stat_setup_dev_default_groups(struct se_device *); 109 113 void target_stat_setup_port_default_groups(struct se_lun *); 110 114 void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); 111 115
+67 -81
drivers/target/target_core_pr.c
··· 103 103 spin_unlock(&dev->dev_reservation_lock); 104 104 return -EINVAL; 105 105 } 106 - if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { 106 + if (!(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID)) { 107 107 spin_unlock(&dev->dev_reservation_lock); 108 108 return 0; 109 109 } ··· 120 120 static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd) 121 121 { 122 122 struct se_session *se_sess = cmd->se_sess; 123 - struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 123 + struct se_device *dev = cmd->se_dev; 124 124 struct t10_pr_registration *pr_reg; 125 - struct t10_reservation *pr_tmpl = &su_dev->t10_pr; 126 - int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 125 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 126 + int crh = (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); 127 127 int conflict = 0; 128 128 129 129 if (!crh) ··· 223 223 goto out_unlock; 224 224 225 225 dev->dev_reserved_node_acl = NULL; 226 - dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 227 - if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { 226 + dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS; 227 + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) { 228 228 dev->dev_res_bin_isid = 0; 229 - dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; 229 + dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID; 230 230 } 231 231 tpg = sess->se_tpg; 232 232 pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" ··· 292 292 } 293 293 294 294 dev->dev_reserved_node_acl = sess->se_node_acl; 295 - dev->dev_flags |= DF_SPC2_RESERVATIONS; 295 + dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS; 296 296 if (sess->sess_bin_isid != 0) { 297 297 dev->dev_res_bin_isid = sess->sess_bin_isid; 298 - dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; 298 + dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID; 299 299 } 300 300 pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" 301 301 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), ··· 333 333 /* 334 334 * A legacy SPC-2 reservation is being held. 335 335 */ 336 - if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) 336 + if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 337 337 return core_scsi2_reservation_seq_non_holder(cmd, 338 338 cdb, pr_reg_type); 339 339 ··· 565 565 566 566 static u32 core_scsi3_pr_generation(struct se_device *dev) 567 567 { 568 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 569 568 u32 prg; 569 + 570 570 /* 571 571 * PRGeneration field shall contain the value of a 32-bit wrapping 572 572 * counter mainted by the device server. ··· 577 577 * See spc4r17 section 6.3.12 READ_KEYS service action 578 578 */ 579 579 spin_lock(&dev->dev_reservation_lock); 580 - prg = su_dev->t10_pr.pr_generation++; 580 + prg = dev->t10_pr.pr_generation++; 581 581 spin_unlock(&dev->dev_reservation_lock); 582 582 583 583 return prg; ··· 596 596 /* 597 597 * A legacy SPC-2 reservation is being held. 598 598 */ 599 - if (dev->dev_flags & DF_SPC2_RESERVATIONS) 599 + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 600 600 return core_scsi2_reservation_check(cmd, pr_reg_type); 601 601 602 602 spin_lock(&dev->dev_reservation_lock); ··· 636 636 int all_tg_pt, 637 637 int aptpl) 638 638 { 639 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 640 639 struct t10_pr_registration *pr_reg; 641 640 642 641 pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); ··· 644 645 return NULL; 645 646 } 646 647 647 - pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, 648 + pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len, 648 649 GFP_ATOMIC); 649 650 if (!pr_reg->pr_aptpl_buf) { 650 651 pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); ··· 928 929 struct se_dev_entry *deve) 929 930 { 930 931 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 931 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 932 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 932 933 unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; 933 934 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; 934 935 u16 tpgt; ··· 995 996 struct se_lun *lun, 996 997 struct se_lun_acl *lun_acl) 997 998 { 998 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 999 999 struct se_node_acl *nacl = lun_acl->se_lun_nacl; 1000 1000 struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun]; 1001 1001 1002 - if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1002 + if (dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) 1003 1003 return 0; 1004 1004 1005 1005 return __core_scsi3_check_aptpl_registration(dev, tpg, lun, ··· 1049 1051 int register_type, 1050 1052 int register_move) 1051 1053 { 1052 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1053 1054 struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; 1054 1055 struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; 1055 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1056 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 1056 1057 1057 1058 /* 1058 1059 * Increment PRgeneration counter for struct se_device upon a successful ··· 1063 1066 * for the REGISTER. 1064 1067 */ 1065 1068 pr_reg->pr_res_generation = (register_move) ? 1066 - su_dev->t10_pr.pr_generation++ : 1069 + dev->t10_pr.pr_generation++ : 1067 1070 core_scsi3_pr_generation(dev); 1068 1071 1069 1072 spin_lock(&pr_tmpl->registration_lock); ··· 1132 1135 struct se_node_acl *nacl, 1133 1136 unsigned char *isid) 1134 1137 { 1135 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1138 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 1136 1139 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 1137 1140 struct se_portal_group *tpg; 1138 1141 ··· 1157 1160 * for fabric modules (iSCSI) requiring them. 1158 1161 */ 1159 1162 if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 1160 - if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) 1163 + if (dev->dev_attrib.enforce_pr_isids) 1161 1164 continue; 1162 1165 } 1163 1166 atomic_inc(&pr_reg->pr_res_holders); ··· 1271 1274 { 1272 1275 struct target_core_fabric_ops *tfo = 1273 1276 pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; 1274 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1277 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 1275 1278 char i_buf[PR_REG_ISID_ID_LEN]; 1276 1279 int prf_isid; 1277 1280 ··· 1332 1335 struct se_device *dev, 1333 1336 struct se_node_acl *nacl) 1334 1337 { 1335 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1338 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 1336 1339 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1337 1340 /* 1338 1341 * If the passed se_node_acl matches the reservation holder, ··· 1362 1365 void core_scsi3_free_all_registrations( 1363 1366 struct se_device *dev) 1364 1367 { 1365 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 1368 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 1366 1369 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1367 1370 1368 1371 spin_lock(&dev->dev_reservation_lock); ··· 1896 1899 { 1897 1900 struct se_lun *lun; 1898 1901 struct se_portal_group *tpg; 1899 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 1900 1902 struct t10_pr_registration *pr_reg; 1901 1903 unsigned char tmp[512], isid_buf[32]; 1902 1904 ssize_t len = 0; ··· 1913 1917 /* 1914 1918 * Walk the registration list.. 1915 1919 */ 1916 - spin_lock(&su_dev->t10_pr.registration_lock); 1917 - list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 1920 + spin_lock(&dev->t10_pr.registration_lock); 1921 + list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, 1918 1922 pr_reg_list) { 1919 1923 1920 1924 tmp[0] = '\0'; ··· 1959 1963 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1960 1964 pr_err("Unable to update renaming" 1961 1965 " APTPL metadata\n"); 1962 - spin_unlock(&su_dev->t10_pr.registration_lock); 1966 + spin_unlock(&dev->t10_pr.registration_lock); 1963 1967 return -EMSGSIZE; 1964 1968 } 1965 1969 len += sprintf(buf+len, "%s", tmp); ··· 1977 1981 if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { 1978 1982 pr_err("Unable to update renaming" 1979 1983 " APTPL metadata\n"); 1980 - spin_unlock(&su_dev->t10_pr.registration_lock); 1984 + spin_unlock(&dev->t10_pr.registration_lock); 1981 1985 return -EMSGSIZE; 1982 1986 } 1983 1987 len += sprintf(buf+len, "%s", tmp); 1984 1988 reg_count++; 1985 1989 } 1986 - spin_unlock(&su_dev->t10_pr.registration_lock); 1990 + spin_unlock(&dev->t10_pr.registration_lock); 1987 1991 1988 1992 if (!reg_count) 1989 1993 len += sprintf(buf+len, "No Registrations or Reservations"); ··· 2015 2019 unsigned char *buf, 2016 2020 u32 pr_aptpl_buf_len) 2017 2021 { 2018 - struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 2022 + struct t10_wwn *wwn = &dev->t10_wwn; 2019 2023 struct file *file; 2020 2024 struct iovec iov[1]; 2021 2025 mm_segment_t old_fs; ··· 2116 2120 struct se_lun *se_lun = cmd->se_lun; 2117 2121 struct se_portal_group *se_tpg; 2118 2122 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; 2119 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2123 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 2120 2124 /* Used for APTPL metadata w/ UNREGISTER */ 2121 2125 unsigned char *pr_aptpl_buf = NULL; 2122 2126 unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; ··· 2430 2434 struct se_session *se_sess = cmd->se_sess; 2431 2435 struct se_lun *se_lun = cmd->se_lun; 2432 2436 struct t10_pr_registration *pr_reg, *pr_res_holder; 2433 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2437 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 2434 2438 char i_buf[PR_REG_ISID_ID_LEN]; 2435 2439 int ret, prf_isid; 2436 2440 ··· 2663 2667 struct se_session *se_sess = cmd->se_sess; 2664 2668 struct se_lun *se_lun = cmd->se_lun; 2665 2669 struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; 2666 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2670 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 2667 2671 int ret, all_reg = 0; 2668 2672 2669 2673 if (!se_sess || !se_lun) { ··· 2832 2836 struct se_device *dev = cmd->se_dev; 2833 2837 struct se_node_acl *pr_reg_nacl; 2834 2838 struct se_session *se_sess = cmd->se_sess; 2835 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 2839 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 2836 2840 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; 2837 2841 u32 pr_res_mapped_lun = 0; 2838 2842 int calling_it_nexus = 0; ··· 3002 3006 struct se_session *se_sess = cmd->se_sess; 3003 3007 LIST_HEAD(preempt_and_abort_list); 3004 3008 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; 3005 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3009 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 3006 3010 u32 pr_res_mapped_lun = 0; 3007 3011 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 3008 3012 int prh_type = 0, prh_scope = 0, ret; ··· 3354 3358 struct se_portal_group *se_tpg, *dest_se_tpg = NULL; 3355 3359 struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; 3356 3360 struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; 3357 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 3361 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 3358 3362 unsigned char *buf; 3359 3363 unsigned char *initiator_str; 3360 3364 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; ··· 3819 3823 * initiator or service action and shall terminate with a RESERVATION 3820 3824 * CONFLICT status. 3821 3825 */ 3822 - if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { 3826 + if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) { 3823 3827 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 3824 3828 " SPC-2 reservation is held, returning" 3825 3829 " RESERVATION_CONFLICT\n"); ··· 3955 3959 */ 3956 3960 static int core_scsi3_pri_read_keys(struct se_cmd *cmd) 3957 3961 { 3958 - struct se_device *se_dev = cmd->se_dev; 3959 - struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 3962 + struct se_device *dev = cmd->se_dev; 3960 3963 struct t10_pr_registration *pr_reg; 3961 3964 unsigned char *buf; 3962 3965 u32 add_len = 0, off = 8; ··· 3968 3973 } 3969 3974 3970 3975 buf = transport_kmap_data_sg(cmd); 3971 - buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 3972 - buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 3973 - buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 3974 - buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 3976 + buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 3977 + buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); 3978 + buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 3979 + buf[3] = (dev->t10_pr.pr_generation & 0xff); 3975 3980 3976 - spin_lock(&su_dev->t10_pr.registration_lock); 3977 - list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, 3981 + spin_lock(&dev->t10_pr.registration_lock); 3982 + list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, 3978 3983 pr_reg_list) { 3979 3984 /* 3980 3985 * Check for overflow of 8byte PRI READ_KEYS payload and ··· 3994 3999 3995 4000 add_len += 8; 3996 4001 } 3997 - spin_unlock(&su_dev->t10_pr.registration_lock); 4002 + spin_unlock(&dev->t10_pr.registration_lock); 3998 4003 3999 4004 buf[4] = ((add_len >> 24) & 0xff); 4000 4005 buf[5] = ((add_len >> 16) & 0xff); ··· 4013 4018 */ 4014 4019 static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) 4015 4020 { 4016 - struct se_device *se_dev = cmd->se_dev; 4017 - struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 4021 + struct se_device *dev = cmd->se_dev; 4018 4022 struct t10_pr_registration *pr_reg; 4019 4023 unsigned char *buf; 4020 4024 u64 pr_res_key; ··· 4027 4033 } 4028 4034 4029 4035 buf = transport_kmap_data_sg(cmd); 4030 - buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4031 - buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4032 - buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 4033 - buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 4036 + buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 4037 + buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); 4038 + buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 4039 + buf[3] = (dev->t10_pr.pr_generation & 0xff); 4034 4040 4035 - spin_lock(&se_dev->dev_reservation_lock); 4036 - pr_reg = se_dev->dev_pr_res_holder; 4041 + spin_lock(&dev->dev_reservation_lock); 4042 + pr_reg = dev->dev_pr_res_holder; 4037 4043 if (pr_reg) { 4038 4044 /* 4039 4045 * Set the hardcoded Additional Length ··· 4084 4090 } 4085 4091 4086 4092 err: 4087 - spin_unlock(&se_dev->dev_reservation_lock); 4093 + spin_unlock(&dev->dev_reservation_lock); 4088 4094 transport_kunmap_data_sg(cmd); 4089 4095 4090 4096 return 0; ··· 4098 4104 static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) 4099 4105 { 4100 4106 struct se_device *dev = cmd->se_dev; 4101 - struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; 4107 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 4102 4108 unsigned char *buf; 4103 4109 u16 add_len = 8; /* Hardcoded to 8. */ 4104 4110 ··· 4153 4159 */ 4154 4160 static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) 4155 4161 { 4156 - struct se_device *se_dev = cmd->se_dev; 4162 + struct se_device *dev = cmd->se_dev; 4157 4163 struct se_node_acl *se_nacl; 4158 - struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; 4159 4164 struct se_portal_group *se_tpg; 4160 4165 struct t10_pr_registration *pr_reg, *pr_reg_tmp; 4161 - struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; 4166 + struct t10_reservation *pr_tmpl = &dev->t10_pr; 4162 4167 unsigned char *buf; 4163 4168 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; 4164 4169 u32 off = 8; /* off into first Full Status descriptor */ ··· 4172 4179 4173 4180 buf = transport_kmap_data_sg(cmd); 4174 4181 4175 - buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); 4176 - buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); 4177 - buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); 4178 - buf[3] = (su_dev->t10_pr.pr_generation & 0xff); 4182 + buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); 4183 + buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); 4184 + buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 4185 + buf[3] = (dev->t10_pr.pr_generation & 0xff); 4179 4186 4180 4187 spin_lock(&pr_tmpl->registration_lock); 4181 4188 list_for_each_entry_safe(pr_reg, pr_reg_tmp, ··· 4309 4316 * initiator or service action and shall terminate with a RESERVATION 4310 4317 * CONFLICT status. 4311 4318 */ 4312 - if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { 4319 + if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) { 4313 4320 pr_err("Received PERSISTENT_RESERVE CDB while legacy" 4314 4321 " SPC-2 reservation is held, returning" 4315 4322 " RESERVATION_CONFLICT\n"); ··· 4356 4363 return 0; 4357 4364 } 4358 4365 4359 - int core_setup_reservations(struct se_device *dev, int force_pt) 4366 + void core_setup_reservations(struct se_device *dev) 4360 4367 { 4361 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 4362 - struct t10_reservation *rest = &su_dev->t10_pr; 4368 + struct t10_reservation *rest = &dev->t10_pr; 4369 + 4363 4370 /* 4364 4371 * If this device is from Target_Core_Mod/pSCSI, use the reservations 4365 4372 * of the Underlying SCSI hardware. In Linux/SCSI terms, this can 4366 4373 * cause a problem because libata and some SATA RAID HBAs appear 4367 4374 * under Linux/SCSI, but to emulate reservations themselves. 4368 4375 */ 4369 - if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && 4370 - !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) { 4376 + if ((dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) || 4377 + (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV && 4378 + !dev->dev_attrib.emulate_reservations)) { 4371 4379 rest->res_type = SPC_PASSTHROUGH; 4372 4380 rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; 4373 4381 rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; 4374 4382 pr_debug("%s: Using SPC_PASSTHROUGH, no reservation" 4375 4383 " emulation\n", dev->transport->name); 4376 - return 0; 4377 - } 4378 - /* 4379 - * If SPC-3 or above is reported by real or emulated struct se_device, 4380 - * use emulated Persistent Reservations. 4381 - */ 4382 - if (dev->transport->get_device_rev(dev) >= SCSI_3) { 4384 + } else if (dev->transport->get_device_rev(dev) >= SCSI_3) { 4383 4385 rest->res_type = SPC3_PERSISTENT_RESERVATIONS; 4384 4386 rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; 4385 4387 rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; ··· 4388 4400 pr_debug("%s: Using SPC2_RESERVATIONS emulation\n", 4389 4401 dev->transport->name); 4390 4402 } 4391 - 4392 - return 0; 4393 4403 }
+1 -1
drivers/target/target_core_pr.h
··· 63 63 64 64 extern int target_scsi3_emulate_pr_in(struct se_cmd *); 65 65 extern int target_scsi3_emulate_pr_out(struct se_cmd *); 66 - extern int core_setup_reservations(struct se_device *, int); 66 + extern void core_setup_reservations(struct se_device *); 67 67 68 68 #endif /* TARGET_CORE_PR_H */
+98 -172
drivers/target/target_core_pscsi.c
··· 53 53 54 54 #define ISPRINT(a) ((a >= ' ') && (a <= '~')) 55 55 56 + static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev) 57 + { 58 + return container_of(dev, struct pscsi_dev_virt, dev); 59 + } 60 + 56 61 static struct se_subsystem_api pscsi_template; 57 62 58 63 static int pscsi_execute_cmd(struct se_cmd *cmd); ··· 224 219 225 220 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); 226 221 227 - wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; 222 + wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL; 228 223 229 224 kfree(buf); 230 225 return 0; ··· 304 299 kfree(buf); 305 300 } 306 301 307 - /* pscsi_add_device_to_list(): 308 - * 309 - * 310 - */ 311 - static struct se_device *pscsi_add_device_to_list( 312 - struct se_hba *hba, 313 - struct se_subsystem_dev *se_dev, 314 - struct pscsi_dev_virt *pdv, 315 - struct scsi_device *sd, 316 - int dev_flags) 302 + static int pscsi_add_device_to_list(struct se_device *dev, 303 + struct scsi_device *sd) 317 304 { 318 - struct se_device *dev; 319 - struct se_dev_limits dev_limits; 320 - struct request_queue *q; 321 - struct queue_limits *limits; 305 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 306 + struct request_queue *q = sd->request_queue; 322 307 323 - memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 308 + pdv->pdv_sd = sd; 324 309 325 310 if (!sd->queue_depth) { 326 311 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; ··· 319 324 " queue_depth to %d\n", sd->channel, sd->id, 320 325 sd->lun, sd->queue_depth); 321 326 } 322 - /* 323 - * Setup the local scope queue_limits from struct request_queue->limits 324 - * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 325 - */ 326 - q = sd->request_queue; 327 - limits = &dev_limits.limits; 328 - limits->logical_block_size = sd->sector_size; 329 - limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); 330 - limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q)); 331 - dev_limits.hw_queue_depth = sd->queue_depth; 332 - dev_limits.queue_depth = sd->queue_depth; 327 + 328 + dev->dev_attrib.hw_block_size = sd->sector_size; 329 + dev->dev_attrib.hw_max_sectors = 330 + min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); 331 + dev->dev_attrib.hw_queue_depth = sd->queue_depth; 332 + 333 333 /* 334 334 * Setup our standard INQUIRY info into se_dev->t10_wwn 335 335 */ 336 - pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); 337 - 338 - /* 339 - * Set the pointer pdv->pdv_sd to from passed struct scsi_device, 340 - * which has already been referenced with Linux SCSI code with 341 - * scsi_device_get() in this file's pscsi_create_virtdevice(). 342 - * 343 - * The passthrough operations called by the transport_add_device_* 344 - * function below will require this pointer to be set for passthroug 345 - * ops. 346 - * 347 - * For the shutdown case in pscsi_free_device(), this struct 348 - * scsi_device reference is released with Linux SCSI code 349 - * scsi_device_put() and the pdv->pdv_sd cleared. 350 - */ 351 - pdv->pdv_sd = sd; 352 - dev = transport_add_device_to_core_hba(hba, &pscsi_template, 353 - se_dev, dev_flags, pdv, 354 - &dev_limits, NULL, NULL); 355 - if (!dev) { 356 - pdv->pdv_sd = NULL; 357 - return NULL; 358 - } 336 + pscsi_set_inquiry_info(sd, &dev->t10_wwn); 359 337 360 338 /* 361 339 * Locate VPD WWN Information used for various purposes within 362 340 * the Storage Engine. 363 341 */ 364 - if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { 342 + if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) { 365 343 /* 366 344 * If VPD Unit Serial returned GOOD status, try 367 345 * VPD Device Identification page (0x83). 368 346 */ 369 - pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); 347 + pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn); 370 348 } 371 349 372 350 /* ··· 347 379 */ 348 380 if (sd->type == TYPE_TAPE) 349 381 pscsi_tape_read_blocksize(dev, sd); 350 - return dev; 382 + return 0; 351 383 } 352 384 353 - static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) 385 + static struct se_device *pscsi_alloc_device(struct se_hba *hba, 386 + const char *name) 354 387 { 355 388 struct pscsi_dev_virt *pdv; 356 389 ··· 360 391 pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); 361 392 return NULL; 362 393 } 363 - pdv->pdv_se_hba = hba; 364 394 365 395 pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); 366 - return pdv; 396 + return &pdv->dev; 367 397 } 368 398 369 399 /* 370 400 * Called with struct Scsi_Host->host_lock called. 371 401 */ 372 - static struct se_device *pscsi_create_type_disk( 373 - struct scsi_device *sd, 374 - struct pscsi_dev_virt *pdv, 375 - struct se_subsystem_dev *se_dev, 376 - struct se_hba *hba) 402 + static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) 377 403 __releases(sh->host_lock) 378 404 { 379 - struct se_device *dev; 380 - struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 405 + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 406 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 381 407 struct Scsi_Host *sh = sd->host; 382 408 struct block_device *bd; 383 - u32 dev_flags = 0; 409 + int ret; 384 410 385 411 if (scsi_device_get(sd)) { 386 412 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 387 413 sh->host_no, sd->channel, sd->id, sd->lun); 388 414 spin_unlock_irq(sh->host_lock); 389 - return NULL; 415 + return -EIO; 390 416 } 391 417 spin_unlock_irq(sh->host_lock); 392 418 /* 393 419 * Claim exclusive struct block_device access to struct scsi_device 394 420 * for TYPE_DISK using supplied udev_path 395 421 */ 396 - bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 422 + bd = blkdev_get_by_path(dev->udev_path, 397 423 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 398 424 if (IS_ERR(bd)) { 399 425 pr_err("pSCSI: blkdev_get_by_path() failed\n"); 400 426 scsi_device_put(sd); 401 - return NULL; 427 + return PTR_ERR(bd); 402 428 } 403 429 pdv->pdv_bd = bd; 404 430 405 - dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 406 - if (!dev) { 431 + ret = pscsi_add_device_to_list(dev, sd); 432 + if (ret) { 407 433 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 408 434 scsi_device_put(sd); 409 - return NULL; 435 + return ret; 410 436 } 437 + 411 438 pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", 412 439 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 413 - 414 - return dev; 440 + return 0; 415 441 } 416 442 417 443 /* 418 444 * Called with struct Scsi_Host->host_lock called. 419 445 */ 420 - static struct se_device *pscsi_create_type_rom( 421 - struct scsi_device *sd, 422 - struct pscsi_dev_virt *pdv, 423 - struct se_subsystem_dev *se_dev, 424 - struct se_hba *hba) 446 + static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) 425 447 __releases(sh->host_lock) 426 448 { 427 - struct se_device *dev; 428 - struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 449 + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 429 450 struct Scsi_Host *sh = sd->host; 430 - u32 dev_flags = 0; 451 + int ret; 431 452 432 453 if (scsi_device_get(sd)) { 433 454 pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", 434 455 sh->host_no, sd->channel, sd->id, sd->lun); 435 456 spin_unlock_irq(sh->host_lock); 436 - return NULL; 457 + return -EIO; 437 458 } 438 459 spin_unlock_irq(sh->host_lock); 439 460 440 - dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 441 - if (!dev) { 461 + ret = pscsi_add_device_to_list(dev, sd); 462 + if (ret) { 442 463 scsi_device_put(sd); 443 - return NULL; 464 + return ret; 444 465 } 445 466 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 446 467 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 447 468 sd->channel, sd->id, sd->lun); 448 469 449 - return dev; 470 + return 0; 450 471 } 451 472 452 473 /* 453 - *Called with struct Scsi_Host->host_lock called. 474 + * Called with struct Scsi_Host->host_lock called. 454 475 */ 455 - static struct se_device *pscsi_create_type_other( 456 - struct scsi_device *sd, 457 - struct pscsi_dev_virt *pdv, 458 - struct se_subsystem_dev *se_dev, 459 - struct se_hba *hba) 476 + static int pscsi_create_type_other(struct se_device *dev, 477 + struct scsi_device *sd) 460 478 __releases(sh->host_lock) 461 479 { 462 - struct se_device *dev; 463 - struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 480 + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 464 481 struct Scsi_Host *sh = sd->host; 465 - u32 dev_flags = 0; 482 + int ret; 466 483 467 484 spin_unlock_irq(sh->host_lock); 468 - dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 469 - if (!dev) 470 - return NULL; 485 + ret = pscsi_add_device_to_list(dev, sd); 486 + if (ret) 487 + return ret; 471 488 472 489 pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 473 490 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 474 491 sd->channel, sd->id, sd->lun); 475 - 476 - return dev; 492 + return 0; 477 493 } 478 494 479 - static struct se_device *pscsi_create_virtdevice( 480 - struct se_hba *hba, 481 - struct se_subsystem_dev *se_dev, 482 - void *p) 495 + int pscsi_configure_device(struct se_device *dev) 483 496 { 484 - struct pscsi_dev_virt *pdv = p; 485 - struct se_device *dev; 497 + struct se_hba *hba = dev->se_hba; 498 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 486 499 struct scsi_device *sd; 487 - struct pscsi_hba_virt *phv = hba->hba_ptr; 500 + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 488 501 struct Scsi_Host *sh = phv->phv_lld_host; 489 502 int legacy_mode_enable = 0; 503 + int ret; 490 504 491 - if (!pdv) { 492 - pr_err("Unable to locate struct pscsi_dev_virt" 493 - " parameter\n"); 494 - return ERR_PTR(-EINVAL); 505 + if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 506 + !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 507 + !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 508 + pr_err("Missing scsi_channel_id=, scsi_target_id= and" 509 + " scsi_lun_id= parameters\n"); 510 + return -EINVAL; 495 511 } 512 + 496 513 /* 497 514 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 498 515 * struct Scsi_Host we will need to bring the TCM/pSCSI object online ··· 487 532 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 488 533 pr_err("pSCSI: Unable to locate struct" 489 534 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 490 - return ERR_PTR(-ENODEV); 535 + return -ENODEV; 491 536 } 492 537 /* 493 538 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device 494 539 * reference, we enforce that udev_path has been set 495 540 */ 496 - if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { 541 + if (!(dev->dev_flags & DF_USING_UDEV_PATH)) { 497 542 pr_err("pSCSI: udev_path attribute has not" 498 543 " been set before ENABLE=1\n"); 499 - return ERR_PTR(-EINVAL); 544 + return -EINVAL; 500 545 } 501 546 /* 502 547 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID, ··· 504 549 * and enable for PHV_LLD_SCSI_HOST_NO mode. 505 550 */ 506 551 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 507 - spin_lock(&hba->device_lock); 508 - if (!list_empty(&hba->hba_dev_list)) { 552 + if (hba->dev_count) { 509 553 pr_err("pSCSI: Unable to set hba_mode" 510 554 " with active devices\n"); 511 - spin_unlock(&hba->device_lock); 512 - return ERR_PTR(-EEXIST); 555 + return -EEXIST; 513 556 } 514 - spin_unlock(&hba->device_lock); 515 557 516 558 if (pscsi_pmode_enable_hba(hba, 1) != 1) 517 - return ERR_PTR(-ENODEV); 559 + return -ENODEV; 518 560 519 561 legacy_mode_enable = 1; 520 562 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; ··· 521 569 if (IS_ERR(sh)) { 522 570 pr_err("pSCSI: Unable to locate" 523 571 " pdv_host_id: %d\n", pdv->pdv_host_id); 524 - return ERR_CAST(sh); 572 + return PTR_ERR(sh); 525 573 } 526 574 } 527 575 } else { 528 576 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { 529 577 pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while" 530 578 " struct Scsi_Host exists\n"); 531 - return ERR_PTR(-EEXIST); 579 + return -EEXIST; 532 580 } 533 581 } 534 582 ··· 545 593 */ 546 594 switch (sd->type) { 547 595 case TYPE_DISK: 548 - dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); 596 + ret = pscsi_create_type_disk(dev, sd); 549 597 break; 550 598 case TYPE_ROM: 551 - dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); 599 + ret = pscsi_create_type_rom(dev, sd); 552 600 break; 553 601 default: 554 - dev = pscsi_create_type_other(sd, pdv, se_dev, hba); 602 + ret = pscsi_create_type_other(dev, sd); 555 603 break; 556 604 } 557 605 558 - if (!dev) { 606 + if (ret) { 559 607 if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) 560 608 scsi_host_put(sh); 561 609 else if (legacy_mode_enable) { ··· 563 611 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 564 612 } 565 613 pdv->pdv_sd = NULL; 566 - return ERR_PTR(-ENODEV); 614 + return ret; 567 615 } 568 - return dev; 616 + return 0; 569 617 } 570 618 spin_unlock_irq(sh->host_lock); 571 619 ··· 579 627 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 580 628 } 581 629 582 - return ERR_PTR(-ENODEV); 630 + return -ENODEV; 583 631 } 584 632 585 - /* pscsi_free_device(): (Part of se_subsystem_api_t template) 586 - * 587 - * 588 - */ 589 - static void pscsi_free_device(void *p) 633 + static void pscsi_free_device(struct se_device *dev) 590 634 { 591 - struct pscsi_dev_virt *pdv = p; 592 - struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 635 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 636 + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 593 637 struct scsi_device *sd = pdv->pdv_sd; 594 638 595 639 if (sd) { ··· 618 670 static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, 619 671 unsigned char *sense_buffer) 620 672 { 621 - struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; 673 + struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 622 674 struct scsi_device *sd = pdv->pdv_sd; 623 675 int result; 624 676 struct pscsi_plugin_task *pt = cmd->priv; ··· 718 770 {Opt_err, NULL} 719 771 }; 720 772 721 - static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, 722 - struct se_subsystem_dev *se_dev, 723 - const char *page, 724 - ssize_t count) 773 + static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev, 774 + const char *page, ssize_t count) 725 775 { 726 - struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 727 - struct pscsi_hba_virt *phv = hba->hba_ptr; 776 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 777 + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 728 778 char *orig, *ptr, *opts; 729 779 substring_t args[MAX_OPT_ARGS]; 730 780 int ret = 0, arg, token; ··· 787 841 return (!ret) ? count : ret; 788 842 } 789 843 790 - static ssize_t pscsi_check_configfs_dev_params( 791 - struct se_hba *hba, 792 - struct se_subsystem_dev *se_dev) 844 + static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b) 793 845 { 794 - struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 795 - 796 - if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 797 - !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 798 - !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 799 - pr_err("Missing scsi_channel_id=, scsi_target_id= and" 800 - " scsi_lun_id= parameters\n"); 801 - return -EINVAL; 802 - } 803 - 804 - return 0; 805 - } 806 - 807 - static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, 808 - struct se_subsystem_dev *se_dev, 809 - char *b) 810 - { 811 - struct pscsi_hba_virt *phv = hba->hba_ptr; 812 - struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 846 + struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; 847 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 813 848 struct scsi_device *sd = pdv->pdv_sd; 814 849 unsigned char host_id[16]; 815 850 ssize_t bl; ··· 860 933 u32 sgl_nents, enum dma_data_direction data_direction, 861 934 struct bio **hbio) 862 935 { 863 - struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; 936 + struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 864 937 struct bio *bio = NULL, *tbio = NULL; 865 938 struct page *page; 866 939 struct scatterlist *sg; ··· 1031 1104 struct scatterlist *sgl = cmd->t_data_sg; 1032 1105 u32 sgl_nents = cmd->t_data_nents; 1033 1106 enum dma_data_direction data_direction = cmd->data_direction; 1034 - struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; 1107 + struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 1035 1108 struct pscsi_plugin_task *pt; 1036 1109 struct request *req; 1037 1110 struct bio *hbio; ··· 1118 1191 */ 1119 1192 static u32 pscsi_get_device_rev(struct se_device *dev) 1120 1193 { 1121 - struct pscsi_dev_virt *pdv = dev->dev_ptr; 1194 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1122 1195 struct scsi_device *sd = pdv->pdv_sd; 1123 1196 1124 1197 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; ··· 1130 1203 */ 1131 1204 static u32 pscsi_get_device_type(struct se_device *dev) 1132 1205 { 1133 - struct pscsi_dev_virt *pdv = dev->dev_ptr; 1206 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1134 1207 struct scsi_device *sd = pdv->pdv_sd; 1135 1208 1136 1209 return sd->type; ··· 1138 1211 1139 1212 static sector_t pscsi_get_blocks(struct se_device *dev) 1140 1213 { 1141 - struct pscsi_dev_virt *pdv = dev->dev_ptr; 1214 + struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); 1142 1215 1143 1216 if (pdv->pdv_bd && pdv->pdv_bd->bd_part) 1144 1217 return pdv->pdv_bd->bd_part->nr_sects; ··· 1186 1259 .attach_hba = pscsi_attach_hba, 1187 1260 .detach_hba = pscsi_detach_hba, 1188 1261 .pmode_enable_hba = pscsi_pmode_enable_hba, 1189 - .allocate_virtdevice = pscsi_allocate_virtdevice, 1190 - .create_virtdevice = pscsi_create_virtdevice, 1262 + .alloc_device = pscsi_alloc_device, 1263 + .configure_device = pscsi_configure_device, 1191 1264 .free_device = pscsi_free_device, 1192 1265 .transport_complete = pscsi_transport_complete, 1193 1266 .parse_cdb = pscsi_parse_cdb, 1194 - .check_configfs_dev_params = pscsi_check_configfs_dev_params, 1195 1267 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1196 1268 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1197 1269 .get_device_rev = pscsi_get_device_rev,
+1 -1
drivers/target/target_core_pscsi.h
··· 37 37 #define PDF_HAS_VIRT_HOST_ID 0x20 38 38 39 39 struct pscsi_dev_virt { 40 + struct se_device dev; 40 41 int pdv_flags; 41 42 int pdv_host_id; 42 43 int pdv_channel_id; ··· 45 44 int pdv_lun_id; 46 45 struct block_device *pdv_bd; 47 46 struct scsi_device *pdv_sd; 48 - struct se_hba *pdv_se_hba; 49 47 } ____cacheline_aligned; 50 48 51 49 typedef enum phv_modes {
+35 -59
drivers/target/target_core_rd.c
··· 41 41 42 42 #include "target_core_rd.h" 43 43 44 - static struct se_subsystem_api rd_mcp_template; 44 + static inline struct rd_dev *RD_DEV(struct se_device *dev) 45 + { 46 + return container_of(dev, struct rd_dev, dev); 47 + } 45 48 46 49 /* rd_attach_hba(): (Part of se_subsystem_api_t template) 47 50 * ··· 199 196 return 0; 200 197 } 201 198 202 - static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name) 199 + static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) 203 200 { 204 201 struct rd_dev *rd_dev; 205 202 struct rd_host *rd_host = hba->hba_ptr; ··· 212 209 213 210 rd_dev->rd_host = rd_host; 214 211 215 - return rd_dev; 212 + return &rd_dev->dev; 216 213 } 217 214 218 - static struct se_device *rd_create_virtdevice(struct se_hba *hba, 219 - struct se_subsystem_dev *se_dev, void *p) 215 + static int rd_configure_device(struct se_device *dev) 220 216 { 221 - struct se_device *dev; 222 - struct se_dev_limits dev_limits; 223 - struct rd_dev *rd_dev = p; 224 - struct rd_host *rd_host = hba->hba_ptr; 225 - int dev_flags = 0, ret; 226 - char prod[16], rev[4]; 217 + struct rd_dev *rd_dev = RD_DEV(dev); 218 + struct rd_host *rd_host = dev->se_hba->hba_ptr; 219 + int ret; 227 220 228 - memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 221 + if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 222 + pr_debug("Missing rd_pages= parameter\n"); 223 + return -EINVAL; 224 + } 229 225 230 226 ret = rd_build_device_space(rd_dev); 231 227 if (ret < 0) 232 228 goto fail; 233 229 234 - snprintf(prod, 16, "RAMDISK-MCP"); 235 - snprintf(rev, 4, "%s", RD_MCP_VERSION); 236 - 237 - dev_limits.limits.logical_block_size = RD_BLOCKSIZE; 238 - dev_limits.limits.max_hw_sectors = UINT_MAX; 239 - dev_limits.limits.max_sectors = UINT_MAX; 240 - dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 241 - dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; 242 - 243 - dev = transport_add_device_to_core_hba(hba, 244 - &rd_mcp_template, se_dev, dev_flags, rd_dev, 245 - &dev_limits, prod, rev); 246 - if (!dev) 247 - goto fail; 230 + dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 231 + dev->dev_attrib.hw_max_sectors = UINT_MAX; 232 + dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 248 233 249 234 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 250 235 ··· 242 251 rd_dev->sg_table_count, 243 252 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); 244 253 245 - return dev; 254 + return 0; 246 255 247 256 fail: 248 257 rd_release_device_space(rd_dev); 249 - return ERR_PTR(ret); 258 + return ret; 250 259 } 251 260 252 - static void rd_free_device(void *p) 261 + static void rd_free_device(struct se_device *dev) 253 262 { 254 - struct rd_dev *rd_dev = p; 263 + struct rd_dev *rd_dev = RD_DEV(dev); 255 264 256 265 rd_release_device_space(rd_dev); 257 266 kfree(rd_dev); ··· 281 290 u32 sgl_nents = cmd->t_data_nents; 282 291 enum dma_data_direction data_direction = cmd->data_direction; 283 292 struct se_device *se_dev = cmd->se_dev; 284 - struct rd_dev *dev = se_dev->dev_ptr; 293 + struct rd_dev *dev = RD_DEV(se_dev); 285 294 struct rd_dev_sg_table *table; 286 295 struct scatterlist *rd_sg; 287 296 struct sg_mapping_iter m; ··· 291 300 u32 src_len; 292 301 u64 tmp; 293 302 294 - tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size; 303 + tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; 295 304 rd_offset = do_div(tmp, PAGE_SIZE); 296 305 rd_page = tmp; 297 306 rd_size = cmd->data_length; ··· 369 378 {Opt_err, NULL} 370 379 }; 371 380 372 - static ssize_t rd_set_configfs_dev_params( 373 - struct se_hba *hba, 374 - struct se_subsystem_dev *se_dev, 375 - const char *page, 376 - ssize_t count) 381 + static ssize_t rd_set_configfs_dev_params(struct se_device *dev, 382 + const char *page, ssize_t count) 377 383 { 378 - struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 384 + struct rd_dev *rd_dev = RD_DEV(dev); 379 385 char *orig, *ptr, *opts; 380 386 substring_t args[MAX_OPT_ARGS]; 381 387 int ret = 0, arg, token; ··· 405 417 return (!ret) ? count : ret; 406 418 } 407 419 408 - static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 420 + static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) 409 421 { 410 - struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 422 + struct rd_dev *rd_dev = RD_DEV(dev); 411 423 412 - if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 413 - pr_debug("Missing rd_pages= parameter\n"); 414 - return -EINVAL; 415 - } 416 - 417 - return 0; 418 - } 419 - 420 - static ssize_t rd_show_configfs_dev_params( 421 - struct se_hba *hba, 422 - struct se_subsystem_dev *se_dev, 423 - char *b) 424 - { 425 - struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 426 424 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", 427 425 rd_dev->rd_dev_id); 428 426 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" ··· 429 455 430 456 static sector_t rd_get_blocks(struct se_device *dev) 431 457 { 432 - struct rd_dev *rd_dev = dev->dev_ptr; 458 + struct rd_dev *rd_dev = RD_DEV(dev); 459 + 433 460 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / 434 - dev->se_sub_dev->se_dev_attrib.block_size) - 1; 461 + dev->dev_attrib.block_size) - 1; 435 462 436 463 return blocks_long; 437 464 } ··· 448 473 449 474 static struct se_subsystem_api rd_mcp_template = { 450 475 .name = "rd_mcp", 476 + .inquiry_prod = "RAMDISK-MCP", 477 + .inquiry_rev = RD_MCP_VERSION, 451 478 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 452 479 .attach_hba = rd_attach_hba, 453 480 .detach_hba = rd_detach_hba, 454 - .allocate_virtdevice = rd_allocate_virtdevice, 455 - .create_virtdevice = rd_create_virtdevice, 481 + .alloc_device = rd_alloc_device, 482 + .configure_device = rd_configure_device, 456 483 .free_device = rd_free_device, 457 484 .parse_cdb = rd_parse_cdb, 458 - .check_configfs_dev_params = rd_check_configfs_dev_params, 459 485 .set_configfs_dev_params = rd_set_configfs_dev_params, 460 486 .show_configfs_dev_params = rd_show_configfs_dev_params, 461 487 .get_device_rev = rd_get_device_rev,
+1
drivers/target/target_core_rd.h
··· 24 24 #define RDF_HAS_PAGE_COUNT 0x01 25 25 26 26 struct rd_dev { 27 + struct se_device dev; 27 28 u32 rd_flags; 28 29 /* Unique Ramdisk Device ID in Ramdisk HBA */ 29 30 u32 rd_dev_id;
+15 -16
drivers/target/target_core_sbc.c
··· 54 54 buf[1] = (blocks >> 16) & 0xff; 55 55 buf[2] = (blocks >> 8) & 0xff; 56 56 buf[3] = blocks & 0xff; 57 - buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 58 - buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 59 - buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 60 - buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 57 + buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 58 + buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 59 + buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 60 + buf[7] = dev->dev_attrib.block_size & 0xff; 61 61 62 62 rbuf = transport_kmap_data_sg(cmd); 63 63 if (rbuf) { ··· 85 85 buf[5] = (blocks >> 16) & 0xff; 86 86 buf[6] = (blocks >> 8) & 0xff; 87 87 buf[7] = blocks & 0xff; 88 - buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; 89 - buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; 90 - buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; 91 - buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; 88 + buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 89 + buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 90 + buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 91 + buf[11] = dev->dev_attrib.block_size & 0xff; 92 92 /* 93 93 * Set Thin Provisioning Enable bit following sbc3r22 in section 94 94 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 95 95 */ 96 - if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 96 + if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 97 97 buf[14] = 0x80; 98 98 99 99 rbuf = transport_kmap_data_sg(cmd); ··· 143 143 144 144 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 145 145 { 146 - return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors; 146 + return cmd->se_dev->dev_attrib.block_size * sectors; 147 147 } 148 148 149 149 static int sbc_check_valid_sectors(struct se_cmd *cmd) ··· 152 152 unsigned long long end_lba; 153 153 u32 sectors; 154 154 155 - sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size; 155 + sectors = cmd->data_length / dev->dev_attrib.block_size; 156 156 end_lba = dev->transport->get_blocks(dev) + 1; 157 157 158 158 if (cmd->t_task_lba + sectors > end_lba) { ··· 315 315 316 316 int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops) 317 317 { 318 - struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 319 318 struct se_device *dev = cmd->se_dev; 320 319 unsigned char *cdb = cmd->t_task_cdb; 321 320 unsigned int size; ··· 561 562 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 562 563 unsigned long long end_lba; 563 564 564 - if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { 565 + if (sectors > dev->dev_attrib.fabric_max_sectors) { 565 566 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 566 567 " big sectors %u exceeds fabric_max_sectors:" 567 568 " %u\n", cdb[0], sectors, 568 - su_dev->se_dev_attrib.fabric_max_sectors); 569 + dev->dev_attrib.fabric_max_sectors); 569 570 goto out_invalid_cdb_field; 570 571 } 571 - if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { 572 + if (sectors > dev->dev_attrib.hw_max_sectors) { 572 573 printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 573 574 " big sectors %u exceeds backend hw_max_sectors:" 574 575 " %u\n", cdb[0], sectors, 575 - su_dev->se_dev_attrib.hw_max_sectors); 576 + dev->dev_attrib.hw_max_sectors); 576 577 goto out_invalid_cdb_field; 577 578 } 578 579
+42 -49
drivers/target/target_core_spc.c
··· 95 95 /* 96 96 * Enable SCCS and TPGS fields for Emulated ALUA 97 97 */ 98 - if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) 98 + if (dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) 99 99 spc_fill_alua_data(lun->lun_sep, buf); 100 100 101 101 buf[7] = 0x2; /* CmdQue=1 */ 102 102 103 103 snprintf(&buf[8], 8, "LIO-ORG"); 104 - snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model); 105 - snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision); 104 + snprintf(&buf[16], 16, "%s", dev->t10_wwn.model); 105 + snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision); 106 106 buf[4] = 31; /* Set additional length to 31 */ 107 107 108 108 return 0; ··· 114 114 struct se_device *dev = cmd->se_dev; 115 115 u16 len = 0; 116 116 117 - if (dev->se_sub_dev->su_dev_flags & 118 - SDF_EMULATED_VPD_UNIT_SERIAL) { 117 + if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 119 118 u32 unit_serial_len; 120 119 121 - unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial); 120 + unit_serial_len = strlen(dev->t10_wwn.unit_serial); 122 121 unit_serial_len++; /* For NULL Terminator */ 123 122 124 - len += sprintf(&buf[4], "%s", 125 - dev->se_sub_dev->t10_wwn.unit_serial); 123 + len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 126 124 len++; /* Extra Byte for NULL Terminator */ 127 125 buf[3] = len; 128 126 } ··· 130 132 static void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 131 133 unsigned char *buf) 132 134 { 133 - unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; 135 + unsigned char *p = &dev->t10_wwn.unit_serial[0]; 134 136 int cnt; 135 137 bool next = true; 136 138 ··· 171 173 struct t10_alua_lu_gp_member *lu_gp_mem; 172 174 struct t10_alua_tg_pt_gp *tg_pt_gp; 173 175 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 174 - unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; 176 + unsigned char *prod = &dev->t10_wwn.model[0]; 175 177 u32 prod_len; 176 178 u32 unit_serial_len, off = 0; 177 179 u16 len = 0, id_len; ··· 186 188 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 187 189 * value in order to return the NAA id. 188 190 */ 189 - if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL)) 191 + if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 190 192 goto check_t10_vend_desc; 191 193 192 194 /* CODE SET == Binary */ ··· 234 236 prod_len += strlen(prod); 235 237 prod_len++; /* For : */ 236 238 237 - if (dev->se_sub_dev->su_dev_flags & 238 - SDF_EMULATED_VPD_UNIT_SERIAL) { 239 - unit_serial_len = 240 - strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); 239 + if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 240 + unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 241 241 unit_serial_len++; /* For NULL Terminator */ 242 242 243 243 id_len += sprintf(&buf[off+12], "%s:%s", prod, 244 - &dev->se_sub_dev->t10_wwn.unit_serial[0]); 244 + &dev->t10_wwn.unit_serial[0]); 245 245 } 246 246 buf[off] = 0x2; /* ASCII */ 247 247 buf[off+1] = 0x1; /* T10 Vendor ID */ ··· 294 298 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 295 299 * section 7.5.1 Table 362 296 300 */ 297 - if (dev->se_sub_dev->t10_alua.alua_type != 298 - SPC3_ALUA_EMULATED) 301 + if (dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) 299 302 goto check_scsi_name; 300 303 301 304 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; ··· 417 422 buf[5] = 0x07; 418 423 419 424 /* If WriteCache emulation is enabled, set V_SUP */ 420 - if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) 425 + if (cmd->se_dev->dev_attrib.emulate_write_cache > 0) 421 426 buf[6] = 0x01; 422 427 return 0; 423 428 } ··· 434 439 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 435 440 * different page length for Thin Provisioning. 436 441 */ 437 - if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) 442 + if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 438 443 have_tp = 1; 439 444 440 445 buf[0] = dev->transport->get_device_type(dev); ··· 451 456 /* 452 457 * Set MAXIMUM TRANSFER LENGTH 453 458 */ 454 - max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, 455 - dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 459 + max_sectors = min(dev->dev_attrib.fabric_max_sectors, 460 + dev->dev_attrib.hw_max_sectors); 456 461 put_unaligned_be32(max_sectors, &buf[8]); 457 462 458 463 /* 459 464 * Set OPTIMAL TRANSFER LENGTH 460 465 */ 461 - put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); 466 + put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 462 467 463 468 /* 464 469 * Exit now if we don't support TP. ··· 469 474 /* 470 475 * Set MAXIMUM UNMAP LBA COUNT 471 476 */ 472 - put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); 477 + put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 473 478 474 479 /* 475 480 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 476 481 */ 477 - put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, 482 + put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 478 483 &buf[24]); 479 484 480 485 /* 481 486 * Set OPTIMAL UNMAP GRANULARITY 482 487 */ 483 - put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); 488 + put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 484 489 485 490 /* 486 491 * UNMAP GRANULARITY ALIGNMENT 487 492 */ 488 - put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, 493 + put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 489 494 &buf[32]); 490 - if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) 495 + if (dev->dev_attrib.unmap_granularity_alignment != 0) 491 496 buf[32] |= 0x80; /* Set the UGAVALID bit */ 492 497 493 498 return 0; ··· 500 505 501 506 buf[0] = dev->transport->get_device_type(dev); 502 507 buf[3] = 0x3c; 503 - buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0; 508 + buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 504 509 505 510 return 0; 506 511 } ··· 541 546 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 542 547 * that the device server does not support the UNMAP command. 543 548 */ 544 - if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) 549 + if (dev->dev_attrib.emulate_tpu != 0) 545 550 buf[5] = 0x80; 546 551 547 552 /* ··· 550 555 * A TPWS bit set to zero indicates that the device server does not 551 556 * support the use of the WRITE SAME (16) command to unmap LBAs. 552 557 */ 553 - if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) 558 + if (dev->dev_attrib.emulate_tpws != 0) 554 559 buf[5] |= 0x40; 555 560 556 561 return 0; ··· 581 586 * Registered Extended LUN WWN has been set via ConfigFS 582 587 * during device creation/restart. 583 588 */ 584 - if (cmd->se_dev->se_sub_dev->su_dev_flags & 585 - SDF_EMULATED_VPD_UNIT_SERIAL) { 589 + if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 586 590 buf[3] = ARRAY_SIZE(evpd_handlers); 587 591 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 588 592 buf[p + 4] = evpd_handlers[p].page; ··· 684 690 * command sequence order shall be explicitly handled by the application client 685 691 * through the selection of appropriate ommands and task attributes. 686 692 */ 687 - p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 693 + p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 688 694 /* 689 695 * From spc4r17, section 7.4.6 Control mode Page 690 696 * ··· 714 720 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 715 721 * to the number of commands completed with one of those status codes. 716 722 */ 717 - p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 718 - (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 723 + p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 724 + (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 719 725 /* 720 726 * From spc4r17, section 7.4.6 Control mode Page 721 727 * ··· 728 734 * which the command was received shall be completed with TASK ABORTED 729 735 * status (see SAM-4). 730 736 */ 731 - p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; 737 + p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 732 738 p[8] = 0xff; 733 739 p[9] = 0xff; 734 740 p[11] = 30; ··· 740 746 { 741 747 p[0] = 0x08; 742 748 p[1] = 0x12; 743 - if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) 749 + if (dev->dev_attrib.emulate_write_cache > 0) 744 750 p[2] = 0x04; /* Write Cache Enable */ 745 751 p[12] = 0x20; /* Disabled Read Ahead */ 746 752 ··· 820 826 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 821 827 spc_modesense_write_protect(&buf[3], type); 822 828 823 - if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 824 - (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 829 + if ((dev->dev_attrib.emulate_write_cache > 0) && 830 + (dev->dev_attrib.emulate_fua_write > 0)) 825 831 spc_modesense_dpofua(&buf[3], type); 826 832 } else { 827 833 offset -= 1; ··· 833 839 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 834 840 spc_modesense_write_protect(&buf[2], type); 835 841 836 - if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && 837 - (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) 842 + if ((dev->dev_attrib.emulate_write_cache > 0) && 843 + (dev->dev_attrib.emulate_fua_write > 0)) 838 844 spc_modesense_dpofua(&buf[2], type); 839 845 } 840 846 ··· 917 923 int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 918 924 { 919 925 struct se_device *dev = cmd->se_dev; 920 - struct se_subsystem_dev *su_dev = dev->se_sub_dev; 921 926 unsigned char *cdb = cmd->t_task_cdb; 922 927 923 928 switch (cdb[0]) { ··· 939 946 *size = (cdb[7] << 8) + cdb[8]; 940 947 break; 941 948 case PERSISTENT_RESERVE_IN: 942 - if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 949 + if (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 943 950 cmd->execute_cmd = target_scsi3_emulate_pr_in; 944 951 *size = (cdb[7] << 8) + cdb[8]; 945 952 break; 946 953 case PERSISTENT_RESERVE_OUT: 947 - if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 954 + if (dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 948 955 cmd->execute_cmd = target_scsi3_emulate_pr_out; 949 956 *size = (cdb[7] << 8) + cdb[8]; 950 957 break; ··· 955 962 else 956 963 *size = cmd->data_length; 957 964 958 - if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 965 + if (dev->t10_pr.res_type != SPC_PASSTHROUGH) 959 966 cmd->execute_cmd = target_scsi2_reservation_release; 960 967 break; 961 968 case RESERVE: ··· 976 983 * is running in SPC_PASSTHROUGH, and wants reservations 977 984 * emulation disabled. 978 985 */ 979 - if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 986 + if (dev->t10_pr.res_type != SPC_PASSTHROUGH) 980 987 cmd->execute_cmd = target_scsi2_reservation_reserve; 981 988 break; 982 989 case REQUEST_SENSE: ··· 1033 1040 * Check for emulated MI_REPORT_TARGET_PGS 1034 1041 */ 1035 1042 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS && 1036 - su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 1043 + dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 1037 1044 cmd->execute_cmd = 1038 1045 target_emulate_report_target_port_groups; 1039 1046 } ··· 1052 1059 * Check for emulated MO_SET_TARGET_PGS. 1053 1060 */ 1054 1061 if (cdb[1] == MO_SET_TARGET_PGS && 1055 - su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 1062 + dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 1056 1063 cmd->execute_cmd = 1057 1064 target_emulate_set_target_port_groups; 1058 1065 }
+75 -232
drivers/target/target_core_stat.c
··· 80 80 static ssize_t target_stat_scsi_dev_show_attr_inst( 81 81 struct se_dev_stat_grps *sgrps, char *page) 82 82 { 83 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 84 - struct se_subsystem_dev, dev_stat_grps); 85 - struct se_hba *hba = se_subdev->se_dev_hba; 86 - struct se_device *dev = se_subdev->se_dev_ptr; 87 - 88 - if (!dev) 89 - return -ENODEV; 83 + struct se_device *dev = 84 + container_of(sgrps, struct se_device, dev_stat_grps); 85 + struct se_hba *hba = dev->se_hba; 90 86 91 87 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 92 88 } ··· 91 95 static ssize_t target_stat_scsi_dev_show_attr_indx( 92 96 struct se_dev_stat_grps *sgrps, char *page) 93 97 { 94 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 95 - struct se_subsystem_dev, dev_stat_grps); 96 - struct se_device *dev = se_subdev->se_dev_ptr; 97 - 98 - if (!dev) 99 - return -ENODEV; 98 + struct se_device *dev = 99 + container_of(sgrps, struct se_device, dev_stat_grps); 100 100 101 101 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 102 102 } ··· 101 109 static ssize_t target_stat_scsi_dev_show_attr_role( 102 110 struct se_dev_stat_grps *sgrps, char *page) 103 111 { 104 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 105 - struct se_subsystem_dev, dev_stat_grps); 106 - struct se_device *dev = se_subdev->se_dev_ptr; 107 - 108 - if (!dev) 109 - return -ENODEV; 110 - 111 112 return snprintf(page, PAGE_SIZE, "Target\n"); 112 113 } 113 114 DEV_STAT_SCSI_DEV_ATTR_RO(role); ··· 108 123 static ssize_t target_stat_scsi_dev_show_attr_ports( 109 124 struct se_dev_stat_grps *sgrps, char *page) 110 125 { 111 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 112 - struct se_subsystem_dev, dev_stat_grps); 113 - struct se_device *dev = se_subdev->se_dev_ptr; 114 - 115 - if (!dev) 116 - return -ENODEV; 126 + struct se_device *dev = 127 + container_of(sgrps, struct se_device, dev_stat_grps); 117 128 118 129 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); 119 130 } ··· 157 176 static ssize_t target_stat_scsi_tgt_dev_show_attr_inst( 158 177 struct se_dev_stat_grps *sgrps, char *page) 159 178 { 160 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 161 - struct se_subsystem_dev, dev_stat_grps); 162 - struct se_hba *hba = se_subdev->se_dev_hba; 163 - struct se_device *dev = se_subdev->se_dev_ptr; 164 - 165 - if (!dev) 166 - return -ENODEV; 179 + struct se_device *dev = 180 + container_of(sgrps, struct se_device, dev_stat_grps); 181 + struct se_hba *hba = dev->se_hba; 167 182 168 183 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 169 184 } ··· 168 191 static ssize_t target_stat_scsi_tgt_dev_show_attr_indx( 169 192 struct se_dev_stat_grps *sgrps, char *page) 170 193 { 171 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 172 - struct se_subsystem_dev, dev_stat_grps); 173 - struct se_device *dev = se_subdev->se_dev_ptr; 174 - 175 - if (!dev) 176 - return -ENODEV; 194 + struct se_device *dev = 195 + container_of(sgrps, struct se_device, dev_stat_grps); 177 196 178 197 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 179 198 } ··· 178 205 static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus( 179 206 struct se_dev_stat_grps *sgrps, char *page) 180 207 { 181 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 182 - struct se_subsystem_dev, dev_stat_grps); 183 - struct se_device *dev = se_subdev->se_dev_ptr; 184 - 185 - if (!dev) 186 - return -ENODEV; 187 - 188 208 return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT); 189 209 } 190 210 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus); ··· 185 219 static ssize_t target_stat_scsi_tgt_dev_show_attr_status( 186 220 struct se_dev_stat_grps *sgrps, char *page) 187 221 { 188 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 189 - struct se_subsystem_dev, dev_stat_grps); 190 - struct se_device *dev = se_subdev->se_dev_ptr; 191 - char status[16]; 222 + struct se_device *dev = 223 + container_of(sgrps, struct se_device, dev_stat_grps); 192 224 193 - if (!dev) 194 - return -ENODEV; 195 - 196 - switch (dev->dev_status) { 197 - case TRANSPORT_DEVICE_ACTIVATED: 198 - strcpy(status, "activated"); 199 - break; 200 - case TRANSPORT_DEVICE_DEACTIVATED: 201 - strcpy(status, "deactivated"); 202 - break; 203 - case TRANSPORT_DEVICE_SHUTDOWN: 204 - strcpy(status, "shutdown"); 205 - break; 206 - case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 207 - case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 208 - strcpy(status, "offline"); 209 - break; 210 - default: 211 - sprintf(status, "unknown(%d)", dev->dev_status); 212 - break; 213 - } 214 - 215 - return snprintf(page, PAGE_SIZE, "%s\n", status); 225 + if (dev->export_count) 226 + return snprintf(page, PAGE_SIZE, "activated"); 227 + else 228 + return snprintf(page, PAGE_SIZE, "deactivated"); 216 229 } 217 230 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status); 218 231 219 232 static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus( 220 233 struct se_dev_stat_grps *sgrps, char *page) 221 234 { 222 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 223 - struct se_subsystem_dev, dev_stat_grps); 224 - struct se_device *dev = se_subdev->se_dev_ptr; 235 + struct se_device *dev = 236 + container_of(sgrps, struct se_device, dev_stat_grps); 225 237 int non_accessible_lus; 226 238 227 - if (!dev) 228 - return -ENODEV; 229 - 230 - switch (dev->dev_status) { 231 - case TRANSPORT_DEVICE_ACTIVATED: 239 + if (dev->export_count) 232 240 non_accessible_lus = 0; 233 - break; 234 - case TRANSPORT_DEVICE_DEACTIVATED: 235 - case TRANSPORT_DEVICE_SHUTDOWN: 236 - case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 237 - case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 238 - default: 241 + else 239 242 non_accessible_lus = 1; 240 - break; 241 - } 242 243 243 244 return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus); 244 245 } ··· 214 281 static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( 215 282 struct se_dev_stat_grps *sgrps, char *page) 216 283 { 217 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 218 - struct se_subsystem_dev, dev_stat_grps); 219 - struct se_device *dev = se_subdev->se_dev_ptr; 220 - 221 - if (!dev) 222 - return -ENODEV; 284 + struct se_device *dev = 285 + container_of(sgrps, struct se_device, dev_stat_grps); 223 286 224 287 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); 225 288 } ··· 264 335 static ssize_t target_stat_scsi_lu_show_attr_inst( 265 336 struct se_dev_stat_grps *sgrps, char *page) 266 337 { 267 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 268 - struct se_subsystem_dev, dev_stat_grps); 269 - struct se_hba *hba = se_subdev->se_dev_hba; 270 - struct se_device *dev = se_subdev->se_dev_ptr; 271 - 272 - if (!dev) 273 - return -ENODEV; 338 + struct se_device *dev = 339 + container_of(sgrps, struct se_device, dev_stat_grps); 340 + struct se_hba *hba = dev->se_hba; 274 341 275 342 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); 276 343 } ··· 275 350 static ssize_t target_stat_scsi_lu_show_attr_dev( 276 351 struct se_dev_stat_grps *sgrps, char *page) 277 352 { 278 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 279 - struct se_subsystem_dev, dev_stat_grps); 280 - struct se_device *dev = se_subdev->se_dev_ptr; 281 - 282 - if (!dev) 283 - return -ENODEV; 353 + struct se_device *dev = 354 + container_of(sgrps, struct se_device, dev_stat_grps); 284 355 285 356 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); 286 357 } ··· 285 364 static ssize_t target_stat_scsi_lu_show_attr_indx( 286 365 struct se_dev_stat_grps *sgrps, char *page) 287 366 { 288 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 289 - struct se_subsystem_dev, dev_stat_grps); 290 - struct se_device *dev = se_subdev->se_dev_ptr; 291 - 292 - if (!dev) 293 - return -ENODEV; 294 - 295 367 return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX); 296 368 } 297 369 DEV_STAT_SCSI_LU_ATTR_RO(indx); ··· 292 378 static ssize_t target_stat_scsi_lu_show_attr_lun( 293 379 struct se_dev_stat_grps *sgrps, char *page) 294 380 { 295 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 296 - struct se_subsystem_dev, dev_stat_grps); 297 - struct se_device *dev = se_subdev->se_dev_ptr; 298 - 299 - if (!dev) 300 - return -ENODEV; 301 381 /* FIXME: scsiLuDefaultLun */ 302 382 return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0); 303 383 } ··· 300 392 static ssize_t target_stat_scsi_lu_show_attr_lu_name( 301 393 struct se_dev_stat_grps *sgrps, char *page) 302 394 { 303 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 304 - struct se_subsystem_dev, dev_stat_grps); 305 - struct se_device *dev = se_subdev->se_dev_ptr; 395 + struct se_device *dev = 396 + container_of(sgrps, struct se_device, dev_stat_grps); 306 397 307 - if (!dev) 308 - return -ENODEV; 309 398 /* scsiLuWwnName */ 310 399 return snprintf(page, PAGE_SIZE, "%s\n", 311 - (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? 312 - dev->se_sub_dev->t10_wwn.unit_serial : "None"); 400 + (strlen(dev->t10_wwn.unit_serial)) ? 401 + dev->t10_wwn.unit_serial : "None"); 313 402 } 314 403 DEV_STAT_SCSI_LU_ATTR_RO(lu_name); 315 404 316 405 static ssize_t target_stat_scsi_lu_show_attr_vend( 317 406 struct se_dev_stat_grps *sgrps, char *page) 318 407 { 319 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 320 - struct se_subsystem_dev, dev_stat_grps); 321 - struct se_device *dev = se_subdev->se_dev_ptr; 408 + struct se_device *dev = 409 + container_of(sgrps, struct se_device, dev_stat_grps); 322 410 int i; 323 - char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; 324 - 325 - if (!dev) 326 - return -ENODEV; 411 + char str[sizeof(dev->t10_wwn.vendor)+1]; 327 412 328 413 /* scsiLuVendorId */ 329 - for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) 330 - str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? 331 - dev->se_sub_dev->t10_wwn.vendor[i] : ' '; 414 + for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 415 + str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ? 416 + dev->t10_wwn.vendor[i] : ' '; 332 417 str[i] = '\0'; 333 418 return snprintf(page, PAGE_SIZE, "%s\n", str); 334 419 } ··· 330 429 static ssize_t target_stat_scsi_lu_show_attr_prod( 331 430 struct se_dev_stat_grps *sgrps, char *page) 332 431 { 333 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 334 - struct se_subsystem_dev, dev_stat_grps); 335 - struct se_device *dev = se_subdev->se_dev_ptr; 432 + struct se_device *dev = 433 + container_of(sgrps, struct se_device, dev_stat_grps); 336 434 int i; 337 - char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; 338 - 339 - if (!dev) 340 - return -ENODEV; 435 + char str[sizeof(dev->t10_wwn.model)+1]; 341 436 342 437 /* scsiLuProductId */ 343 - for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) 344 - str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? 345 - dev->se_sub_dev->t10_wwn.model[i] : ' '; 438 + for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 439 + str[i] = ISPRINT(dev->t10_wwn.model[i]) ? 440 + dev->t10_wwn.model[i] : ' '; 346 441 str[i] = '\0'; 347 442 return snprintf(page, PAGE_SIZE, "%s\n", str); 348 443 } ··· 347 450 static ssize_t target_stat_scsi_lu_show_attr_rev( 348 451 struct se_dev_stat_grps *sgrps, char *page) 349 452 { 350 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 351 - struct se_subsystem_dev, dev_stat_grps); 352 - struct se_device *dev = se_subdev->se_dev_ptr; 453 + struct se_device *dev = 454 + container_of(sgrps, struct se_device, dev_stat_grps); 353 455 int i; 354 - char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; 355 - 356 - if (!dev) 357 - return -ENODEV; 456 + char str[sizeof(dev->t10_wwn.revision)+1]; 358 457 359 458 /* scsiLuRevisionId */ 360 - for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) 361 - str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? 362 - dev->se_sub_dev->t10_wwn.revision[i] : ' '; 459 + for (i = 0; i < sizeof(dev->t10_wwn.revision); i++) 460 + str[i] = ISPRINT(dev->t10_wwn.revision[i]) ? 461 + dev->t10_wwn.revision[i] : ' '; 363 462 str[i] = '\0'; 364 463 return snprintf(page, PAGE_SIZE, "%s\n", str); 365 464 } ··· 364 471 static ssize_t target_stat_scsi_lu_show_attr_dev_type( 365 472 struct se_dev_stat_grps *sgrps, char *page) 366 473 { 367 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 368 - struct se_subsystem_dev, dev_stat_grps); 369 - struct se_device *dev = se_subdev->se_dev_ptr; 370 - 371 - if (!dev) 372 - return -ENODEV; 474 + struct se_device *dev = 475 + container_of(sgrps, struct se_device, dev_stat_grps); 373 476 374 477 /* scsiLuPeripheralType */ 375 478 return snprintf(page, PAGE_SIZE, "%u\n", ··· 376 487 static ssize_t target_stat_scsi_lu_show_attr_status( 377 488 struct se_dev_stat_grps *sgrps, char *page) 378 489 { 379 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 380 - struct se_subsystem_dev, dev_stat_grps); 381 - struct se_device *dev = se_subdev->se_dev_ptr; 382 - 383 - if (!dev) 384 - return -ENODEV; 490 + struct se_device *dev = 491 + container_of(sgrps, struct se_device, dev_stat_grps); 385 492 386 493 /* scsiLuStatus */ 387 494 return snprintf(page, PAGE_SIZE, "%s\n", 388 - (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? 389 - "available" : "notavailable"); 495 + (dev->export_count) ? "available" : "notavailable"); 390 496 } 391 497 DEV_STAT_SCSI_LU_ATTR_RO(status); 392 498 393 499 static ssize_t target_stat_scsi_lu_show_attr_state_bit( 394 500 struct se_dev_stat_grps *sgrps, char *page) 395 501 { 396 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 397 - struct se_subsystem_dev, dev_stat_grps); 398 - struct se_device *dev = se_subdev->se_dev_ptr; 399 - 400 - if (!dev) 401 - return -ENODEV; 402 - 403 502 /* scsiLuState */ 404 503 return snprintf(page, PAGE_SIZE, "exposed\n"); 405 504 } ··· 396 519 static ssize_t target_stat_scsi_lu_show_attr_num_cmds( 397 520 struct se_dev_stat_grps *sgrps, char *page) 398 521 { 399 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 400 - struct se_subsystem_dev, dev_stat_grps); 401 - struct se_device *dev = se_subdev->se_dev_ptr; 402 - 403 - if (!dev) 404 - return -ENODEV; 522 + struct se_device *dev = 523 + container_of(sgrps, struct se_device, dev_stat_grps); 405 524 406 525 /* scsiLuNumCommands */ 407 526 return snprintf(page, PAGE_SIZE, "%llu\n", ··· 408 535 static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( 409 536 struct se_dev_stat_grps *sgrps, char *page) 410 537 { 411 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 412 - struct se_subsystem_dev, dev_stat_grps); 413 - struct se_device *dev = se_subdev->se_dev_ptr; 414 - 415 - if (!dev) 416 - return -ENODEV; 538 + struct se_device *dev = 539 + container_of(sgrps, struct se_device, dev_stat_grps); 417 540 418 541 /* scsiLuReadMegaBytes */ 419 542 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); ··· 419 550 static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( 420 551 struct se_dev_stat_grps *sgrps, char *page) 421 552 { 422 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 423 - struct se_subsystem_dev, dev_stat_grps); 424 - struct se_device *dev = se_subdev->se_dev_ptr; 425 - 426 - if (!dev) 427 - return -ENODEV; 553 + struct se_device *dev = 554 + container_of(sgrps, struct se_device, dev_stat_grps); 428 555 429 556 /* scsiLuWrittenMegaBytes */ 430 557 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); ··· 430 565 static ssize_t target_stat_scsi_lu_show_attr_resets( 431 566 struct se_dev_stat_grps *sgrps, char *page) 432 567 { 433 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 434 - struct se_subsystem_dev, dev_stat_grps); 435 - struct se_device *dev = se_subdev->se_dev_ptr; 436 - 437 - if (!dev) 438 - return -ENODEV; 568 + struct se_device *dev = 569 + container_of(sgrps, struct se_device, dev_stat_grps); 439 570 440 571 /* scsiLuInResets */ 441 572 return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); ··· 441 580 static ssize_t target_stat_scsi_lu_show_attr_full_stat( 442 581 struct se_dev_stat_grps *sgrps, char *page) 443 582 { 444 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 445 - struct se_subsystem_dev, dev_stat_grps); 446 - struct se_device *dev = se_subdev->se_dev_ptr; 447 - 448 - if (!dev) 449 - return -ENODEV; 450 - 451 583 /* FIXME: scsiLuOutTaskSetFullStatus */ 452 584 return snprintf(page, PAGE_SIZE, "%u\n", 0); 453 585 } ··· 449 595 static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds( 450 596 struct se_dev_stat_grps *sgrps, char *page) 451 597 { 452 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 453 - struct se_subsystem_dev, dev_stat_grps); 454 - struct se_device *dev = se_subdev->se_dev_ptr; 455 - 456 - if (!dev) 457 - return -ENODEV; 458 - 459 598 /* FIXME: scsiLuHSInCommands */ 460 599 return snprintf(page, PAGE_SIZE, "%u\n", 0); 461 600 } ··· 457 610 static ssize_t target_stat_scsi_lu_show_attr_creation_time( 458 611 struct se_dev_stat_grps *sgrps, char *page) 459 612 { 460 - struct se_subsystem_dev *se_subdev = container_of(sgrps, 461 - struct se_subsystem_dev, dev_stat_grps); 462 - struct se_device *dev = se_subdev->se_dev_ptr; 463 - 464 - if (!dev) 465 - return -ENODEV; 613 + struct se_device *dev = 614 + container_of(sgrps, struct se_device, dev_stat_grps); 466 615 467 616 /* scsiLuCreationTime */ 468 617 return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time - ··· 505 662 * Called from target_core_configfs.c:target_core_make_subdev() to setup 506 663 * the target statistics groups + configfs CITs located in target_core_stat.c 507 664 */ 508 - void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) 665 + void target_stat_setup_dev_default_groups(struct se_device *dev) 509 666 { 510 - struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; 667 + struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group; 511 668 512 - config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, 669 + config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group, 513 670 "scsi_dev", &target_stat_scsi_dev_cit); 514 - config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, 671 + config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group, 515 672 "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); 516 - config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, 673 + config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group, 517 674 "scsi_lu", &target_stat_scsi_lu_cit); 518 675 519 - dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; 520 - dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; 521 - dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; 676 + dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group; 677 + dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group; 678 + dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group; 522 679 dev_stat_grp->default_groups[3] = NULL; 523 680 } 524 681 ··· 1004 1161 return -ENODEV; 1005 1162 } 1006 1163 tpg = sep->sep_tpg; 1007 - wwn = &dev->se_sub_dev->t10_wwn; 1164 + wwn = &dev->t10_wwn; 1008 1165 /* scsiTransportDevName */ 1009 1166 ret = snprintf(page, PAGE_SIZE, "%s+%s\n", 1010 1167 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+3 -3
drivers/target/target_core_tmr.c
··· 371 371 * which the command was received shall be completed with TASK ABORTED 372 372 * status (see SAM-4). 373 373 */ 374 - tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; 374 + tas = dev->dev_attrib.emulate_tas; 375 375 /* 376 376 * Determine if this se_tmr is coming from a $FABRIC_MOD 377 377 * or struct se_device passthrough.. ··· 399 399 * LOGICAL UNIT RESET 400 400 */ 401 401 if (!preempt_and_abort_list && 402 - (dev->dev_flags & DF_SPC2_RESERVATIONS)) { 402 + (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) { 403 403 spin_lock(&dev->dev_reservation_lock); 404 404 dev->dev_reserved_node_acl = NULL; 405 - dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 405 + dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS; 406 406 spin_unlock(&dev->dev_reservation_lock); 407 407 pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 408 408 }
+12 -205
drivers/target/target_core_transport.c
··· 659 659 static void transport_write_pending_qf(struct se_cmd *cmd); 660 660 static void transport_complete_qf(struct se_cmd *cmd); 661 661 662 - static void target_qf_do_work(struct work_struct *work) 662 + void target_qf_do_work(struct work_struct *work) 663 663 { 664 664 struct se_device *dev = container_of(work, struct se_device, 665 665 qf_work_queue); ··· 712 712 int *bl) 713 713 { 714 714 *bl += sprintf(b + *bl, "Status: "); 715 - switch (dev->dev_status) { 716 - case TRANSPORT_DEVICE_ACTIVATED: 715 + if (dev->export_count) 717 716 *bl += sprintf(b + *bl, "ACTIVATED"); 718 - break; 719 - case TRANSPORT_DEVICE_DEACTIVATED: 717 + else 720 718 *bl += sprintf(b + *bl, "DEACTIVATED"); 721 - break; 722 - case TRANSPORT_DEVICE_SHUTDOWN: 723 - *bl += sprintf(b + *bl, "SHUTDOWN"); 724 - break; 725 - case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 726 - case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 727 - *bl += sprintf(b + *bl, "OFFLINE"); 728 - break; 729 - default: 730 - *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); 731 - break; 732 - } 733 719 734 720 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 735 721 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 736 - dev->se_sub_dev->se_dev_attrib.block_size, 737 - dev->se_sub_dev->se_dev_attrib.hw_max_sectors); 722 + dev->dev_attrib.block_size, 723 + dev->dev_attrib.hw_max_sectors); 738 724 *bl += sprintf(b + *bl, " "); 739 725 } 740 726 ··· 977 991 } 978 992 EXPORT_SYMBOL(transport_set_vpd_ident); 979 993 980 - static void core_setup_task_attr_emulation(struct se_device *dev) 981 - { 982 - /* 983 - * If this device is from Target_Core_Mod/pSCSI, disable the 984 - * SAM Task Attribute emulation. 985 - * 986 - * This is currently not available in upsream Linux/SCSI Target 987 - * mode code, and is assumed to be disabled while using TCM/pSCSI. 988 - */ 989 - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 990 - dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; 991 - return; 992 - } 993 - 994 - dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 995 - pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 996 - " device\n", dev->transport->name, 997 - dev->transport->get_device_rev(dev)); 998 - } 999 - 1000 - static void scsi_dump_inquiry(struct se_device *dev) 1001 - { 1002 - struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 1003 - char buf[17]; 1004 - int i, device_type; 1005 - /* 1006 - * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1007 - */ 1008 - for (i = 0; i < 8; i++) 1009 - if (wwn->vendor[i] >= 0x20) 1010 - buf[i] = wwn->vendor[i]; 1011 - else 1012 - buf[i] = ' '; 1013 - buf[i] = '\0'; 1014 - pr_debug(" Vendor: %s\n", buf); 1015 - 1016 - for (i = 0; i < 16; i++) 1017 - if (wwn->model[i] >= 0x20) 1018 - buf[i] = wwn->model[i]; 1019 - else 1020 - buf[i] = ' '; 1021 - buf[i] = '\0'; 1022 - pr_debug(" Model: %s\n", buf); 1023 - 1024 - for (i = 0; i < 4; i++) 1025 - if (wwn->revision[i] >= 0x20) 1026 - buf[i] = wwn->revision[i]; 1027 - else 1028 - buf[i] = ' '; 1029 - buf[i] = '\0'; 1030 - pr_debug(" Revision: %s\n", buf); 1031 - 1032 - device_type = dev->transport->get_device_type(dev); 1033 - pr_debug(" Type: %s ", scsi_device_type(device_type)); 1034 - pr_debug(" ANSI SCSI revision: %02x\n", 1035 - dev->transport->get_device_rev(dev)); 1036 - } 1037 - 1038 - struct se_device *transport_add_device_to_core_hba( 1039 - struct se_hba *hba, 1040 - struct se_subsystem_api *transport, 1041 - struct se_subsystem_dev *se_dev, 1042 - u32 device_flags, 1043 - void *transport_dev, 1044 - struct se_dev_limits *dev_limits, 1045 - const char *inquiry_prod, 1046 - const char *inquiry_rev) 1047 - { 1048 - int force_pt; 1049 - struct se_device *dev; 1050 - 1051 - dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); 1052 - if (!dev) { 1053 - pr_err("Unable to allocate memory for se_dev_t\n"); 1054 - return NULL; 1055 - } 1056 - 1057 - dev->dev_flags = device_flags; 1058 - dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 1059 - dev->dev_ptr = transport_dev; 1060 - dev->se_hba = hba; 1061 - dev->se_sub_dev = se_dev; 1062 - dev->transport = transport; 1063 - INIT_LIST_HEAD(&dev->dev_list); 1064 - INIT_LIST_HEAD(&dev->dev_sep_list); 1065 - INIT_LIST_HEAD(&dev->dev_tmr_list); 1066 - INIT_LIST_HEAD(&dev->delayed_cmd_list); 1067 - INIT_LIST_HEAD(&dev->state_list); 1068 - INIT_LIST_HEAD(&dev->qf_cmd_list); 1069 - spin_lock_init(&dev->execute_task_lock); 1070 - spin_lock_init(&dev->delayed_cmd_lock); 1071 - spin_lock_init(&dev->dev_reservation_lock); 1072 - spin_lock_init(&dev->dev_status_lock); 1073 - spin_lock_init(&dev->se_port_lock); 1074 - spin_lock_init(&dev->se_tmr_lock); 1075 - spin_lock_init(&dev->qf_cmd_lock); 1076 - atomic_set(&dev->dev_ordered_id, 0); 1077 - 1078 - se_dev_set_default_attribs(dev, dev_limits); 1079 - 1080 - dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1081 - dev->creation_time = get_jiffies_64(); 1082 - spin_lock_init(&dev->stats_lock); 1083 - 1084 - spin_lock(&hba->device_lock); 1085 - list_add_tail(&dev->dev_list, &hba->hba_dev_list); 1086 - hba->dev_count++; 1087 - spin_unlock(&hba->device_lock); 1088 - /* 1089 - * Setup the SAM Task Attribute emulation for struct se_device 1090 - */ 1091 - core_setup_task_attr_emulation(dev); 1092 - /* 1093 - * Force PR and ALUA passthrough emulation with internal object use. 1094 - */ 1095 - force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); 1096 - /* 1097 - * Setup the Reservations infrastructure for struct se_device 1098 - */ 1099 - core_setup_reservations(dev, force_pt); 1100 - /* 1101 - * Setup the Asymmetric Logical Unit Assignment for struct se_device 1102 - */ 1103 - if (core_setup_alua(dev, force_pt) < 0) 1104 - goto err_dev_list; 1105 - 1106 - /* 1107 - * Startup the struct se_device processing thread 1108 - */ 1109 - dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1, 1110 - dev->transport->name); 1111 - if (!dev->tmr_wq) { 1112 - pr_err("Unable to create tmr workqueue for %s\n", 1113 - dev->transport->name); 1114 - goto err_dev_list; 1115 - } 1116 - /* 1117 - * Setup work_queue for QUEUE_FULL 1118 - */ 1119 - INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1120 - /* 1121 - * Preload the initial INQUIRY const values if we are doing 1122 - * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1123 - * passthrough because this is being provided by the backend LLD. 1124 - * This is required so that transport_get_inquiry() copies these 1125 - * originals once back into DEV_T10_WWN(dev) for the virtual device 1126 - * setup. 1127 - */ 1128 - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1129 - if (!inquiry_prod || !inquiry_rev) { 1130 - pr_err("All non TCM/pSCSI plugins require" 1131 - " INQUIRY consts\n"); 1132 - goto err_wq; 1133 - } 1134 - 1135 - strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1136 - strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); 1137 - strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); 1138 - } 1139 - scsi_dump_inquiry(dev); 1140 - 1141 - return dev; 1142 - 1143 - err_wq: 1144 - destroy_workqueue(dev->tmr_wq); 1145 - err_dev_list: 1146 - spin_lock(&hba->device_lock); 1147 - list_del(&dev->dev_list); 1148 - hba->dev_count--; 1149 - spin_unlock(&hba->device_lock); 1150 - 1151 - se_release_vpd_for_dev(dev); 1152 - 1153 - kfree(dev); 1154 - 1155 - return NULL; 1156 - } 1157 - EXPORT_SYMBOL(transport_add_device_to_core_hba); 1158 - 1159 994 int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1160 995 { 1161 996 struct se_device *dev = cmd->se_dev; ··· 998 1191 * Reject READ_* or WRITE_* with overflow/underflow for 999 1192 * type SCF_SCSI_DATA_CDB. 1000 1193 */ 1001 - if (dev->se_sub_dev->se_dev_attrib.block_size != 512) { 1194 + if (dev->dev_attrib.block_size != 512) { 1002 1195 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1003 1196 " CDB on non 512-byte sector setup subsystem" 1004 1197 " plugin: %s\n", dev->transport->name); ··· 1100 1293 struct se_cmd *cmd, 1101 1294 unsigned char *cdb) 1102 1295 { 1103 - struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; 1296 + struct se_device *dev = cmd->se_dev; 1104 1297 u32 pr_reg_type = 0; 1105 1298 u8 alua_ascq = 0; 1106 1299 unsigned long flags; ··· 1152 1345 return -EINVAL; 1153 1346 } 1154 1347 1155 - ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); 1348 + ret = dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); 1156 1349 if (ret != 0) { 1157 1350 /* 1158 1351 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; ··· 1178 1371 /* 1179 1372 * Check status for SPC-3 Persistent Reservations 1180 1373 */ 1181 - if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { 1182 - if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 1374 + if (dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) { 1375 + if (dev->t10_pr.pr_ops.t10_seq_non_holder( 1183 1376 cmd, cdb, pr_reg_type) != 0) { 1184 1377 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1185 1378 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; ··· 1194 1387 */ 1195 1388 } 1196 1389 1197 - ret = cmd->se_dev->transport->parse_cdb(cmd); 1390 + ret = dev->transport->parse_cdb(cmd); 1198 1391 if (ret < 0) 1199 1392 return ret; 1200 1393 ··· 1566 1759 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1567 1760 */ 1568 1761 if (cmd->se_sess && 1569 - cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) 1762 + cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1570 1763 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1571 1764 cmd->orig_fe_lun, 0x2C, 1572 1765 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+3 -3
drivers/target/target_core_ua.c
··· 237 237 * highest priority UNIT_ATTENTION and ASC/ASCQ without 238 238 * clearing it. 239 239 */ 240 - if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { 240 + if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) { 241 241 *asc = ua->ua_asc; 242 242 *ascq = ua->ua_ascq; 243 243 break; ··· 265 265 " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" 266 266 " reported ASC: 0x%02x, ASCQ: 0x%02x\n", 267 267 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), 268 - (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 269 - "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, 268 + (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : 269 + "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl, 270 270 cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); 271 271 } 272 272
+11 -14
include/target/target_core_backend.h
··· 9 9 struct list_head sub_api_list; 10 10 11 11 char name[16]; 12 + char inquiry_prod[16]; 13 + char inquiry_rev[4]; 12 14 struct module *owner; 13 15 14 16 u8 transport_type; ··· 18 16 int (*attach_hba)(struct se_hba *, u32); 19 17 void (*detach_hba)(struct se_hba *); 20 18 int (*pmode_enable_hba)(struct se_hba *, unsigned long); 21 - void *(*allocate_virtdevice)(struct se_hba *, const char *); 22 - struct se_device *(*create_virtdevice)(struct se_hba *, 23 - struct se_subsystem_dev *, void *); 24 - void (*free_device)(void *); 19 + 20 + struct se_device *(*alloc_device)(struct se_hba *, const char *); 21 + int (*configure_device)(struct se_device *); 22 + void (*free_device)(struct se_device *device); 23 + 24 + ssize_t (*set_configfs_dev_params)(struct se_device *, 25 + const char *, ssize_t); 26 + ssize_t (*show_configfs_dev_params)(struct se_device *, char *); 27 + 25 28 void (*transport_complete)(struct se_cmd *cmd, 26 29 struct scatterlist *, 27 30 unsigned char *); 28 31 29 32 int (*parse_cdb)(struct se_cmd *cmd); 30 - ssize_t (*check_configfs_dev_params)(struct se_hba *, 31 - struct se_subsystem_dev *); 32 - ssize_t (*set_configfs_dev_params)(struct se_hba *, 33 - struct se_subsystem_dev *, const char *, ssize_t); 34 - ssize_t (*show_configfs_dev_params)(struct se_hba *, 35 - struct se_subsystem_dev *, char *); 36 33 u32 (*get_device_rev)(struct se_device *); 37 34 u32 (*get_device_type)(struct se_device *); 38 35 sector_t (*get_blocks)(struct se_device *); ··· 47 46 48 47 int transport_subsystem_register(struct se_subsystem_api *); 49 48 void transport_subsystem_release(struct se_subsystem_api *); 50 - 51 - struct se_device *transport_add_device_to_core_hba(struct se_hba *, 52 - struct se_subsystem_api *, struct se_subsystem_dev *, u32, 53 - void *, struct se_dev_limits *, const char *, const char *); 54 49 55 50 void target_complete_cmd(struct se_cmd *, u8); 56 51
+27 -77
include/target/target_core_base.h
··· 62 62 63 63 #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ 64 64 65 - /* 66 - * struct se_subsystem_dev->su_dev_flags 67 - */ 68 - #define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001 69 - #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002 70 - #define SDF_USING_UDEV_PATH 0x00000004 71 - #define SDF_USING_ALIAS 0x00000008 72 - 73 - /* 74 - * struct se_device->dev_flags 75 - */ 76 - #define DF_SPC2_RESERVATIONS 0x00000001 77 - #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000002 78 - 79 65 /* struct se_dev_attrib sanity values */ 80 66 /* Default max_unmap_lba_count */ 81 67 #define DA_MAX_UNMAP_LBA_COUNT 0 ··· 168 182 TRANSPORT_LUNFLAGS_READ_WRITE = 0x04, 169 183 }; 170 184 171 - /* struct se_device->dev_status */ 172 - enum transport_device_status_table { 173 - TRANSPORT_DEVICE_ACTIVATED = 0x01, 174 - TRANSPORT_DEVICE_DEACTIVATED = 0x02, 175 - TRANSPORT_DEVICE_QUEUE_FULL = 0x04, 176 - TRANSPORT_DEVICE_SHUTDOWN = 0x08, 177 - TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10, 178 - TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20, 179 - }; 180 - 181 185 /* 182 186 * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason 183 187 * to signal which ASC/ASCQ sense payload should be built. ··· 222 246 TMR_FUNCTION_REJECTED = 255, 223 247 }; 224 248 225 - struct se_obj { 226 - atomic_t obj_access_count; 227 - }; 228 - 229 249 /* 230 250 * Used by TCM Core internally to signal if ALUA emulation is enabled or 231 251 * disabled, or running in with TCM/pSCSI passthrough mode ··· 260 288 u16 alua_tg_pt_gps_counter; 261 289 u32 alua_tg_pt_gps_count; 262 290 spinlock_t tg_pt_gps_lock; 263 - struct se_subsystem_dev *t10_sub_dev; 291 + struct se_device *t10_dev; 264 292 /* Used for default ALUA Target Port Group */ 265 293 struct t10_alua_tg_pt_gp *default_tg_pt_gp; 266 294 /* Used for default ALUA Target Port Group ConfigFS group */ ··· 307 335 atomic_t tg_pt_gp_ref_cnt; 308 336 spinlock_t tg_pt_gp_lock; 309 337 struct mutex tg_pt_gp_md_mutex; 310 - struct se_subsystem_dev *tg_pt_gp_su_dev; 338 + struct se_device *tg_pt_gp_dev; 311 339 struct config_group tg_pt_gp_group; 312 340 struct list_head tg_pt_gp_list; 313 341 struct list_head tg_pt_gp_mem_list; ··· 338 366 char revision[4]; 339 367 char unit_serial[INQUIRY_VPD_SERIAL_LEN]; 340 368 spinlock_t t10_vpd_lock; 341 - struct se_subsystem_dev *t10_sub_dev; 369 + struct se_device *t10_dev; 342 370 struct config_group t10_wwn_group; 343 371 struct list_head t10_vpd_list; 344 372 }; ··· 634 662 struct list_head ua_list; 635 663 }; 636 664 637 - struct se_dev_limits { 638 - /* Max supported HW queue depth */ 639 - u32 hw_queue_depth; 640 - /* Max supported virtual queue depth */ 641 - u32 queue_depth; 642 - /* From include/linux/blkdev.h for the other HW/SW limits. */ 643 - struct queue_limits limits; 644 - }; 645 - 646 665 struct se_dev_attrib { 647 666 int emulate_dpo; 648 667 int emulate_fua_write; ··· 659 696 u32 max_unmap_block_desc_count; 660 697 u32 unmap_granularity; 661 698 u32 unmap_granularity_alignment; 662 - struct se_subsystem_dev *da_sub_dev; 699 + struct se_device *da_dev; 663 700 struct config_group da_group; 664 701 }; 665 702 ··· 670 707 struct config_group scsi_lu_group; 671 708 }; 672 709 673 - struct se_subsystem_dev { 674 - /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */ 675 - #define SE_DEV_ALIAS_LEN 512 676 - unsigned char se_dev_alias[SE_DEV_ALIAS_LEN]; 677 - /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */ 678 - #define SE_UDEV_PATH_LEN 512 679 - unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN]; 680 - u32 su_dev_flags; 681 - struct se_hba *se_dev_hba; 682 - struct se_device *se_dev_ptr; 683 - struct se_dev_attrib se_dev_attrib; 684 - /* T10 Asymmetric Logical Unit Assignment for Target Ports */ 685 - struct t10_alua t10_alua; 686 - /* T10 Inquiry and VPD WWN Information */ 687 - struct t10_wwn t10_wwn; 688 - /* T10 SPC-2 + SPC-3 Reservations */ 689 - struct t10_reservation t10_pr; 690 - spinlock_t se_dev_lock; 691 - void *se_dev_su_ptr; 692 - struct config_group se_dev_group; 693 - /* For T10 Reservations */ 694 - struct config_group se_dev_pr_group; 695 - /* For target_core_stat.c groups */ 696 - struct se_dev_stat_grps dev_stat_grps; 697 - }; 698 - 699 710 struct se_device { 700 711 /* RELATIVE TARGET PORT IDENTIFER Counter */ 701 712 u16 dev_rpti_counter; 702 713 /* Used for SAM Task Attribute ordering */ 703 714 u32 dev_cur_ordered_id; 704 715 u32 dev_flags; 716 + #define DF_CONFIGURED 0x00000001 717 + #define DF_FIRMWARE_VPD_UNIT_SERIAL 0x00000002 718 + #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 719 + #define DF_USING_UDEV_PATH 0x00000008 720 + #define DF_USING_ALIAS 0x00000010 705 721 u32 dev_port_count; 706 - /* See transport_device_status_table */ 707 - u32 dev_status; 708 722 /* Physical device queue depth */ 709 723 u32 queue_depth; 710 724 /* Used for SPC-2 reservations enforce of ISIDs */ 711 725 u64 dev_res_bin_isid; 712 726 t10_task_attr_index_t dev_task_attr_type; 713 727 /* Pointer to transport specific device structure */ 714 - void *dev_ptr; 715 728 u32 dev_index; 716 729 u64 creation_time; 717 730 u32 num_resets; ··· 700 761 atomic_t dev_ordered_id; 701 762 atomic_t dev_ordered_sync; 702 763 atomic_t dev_qf_count; 703 - struct se_obj dev_obj; 704 - struct se_obj dev_access_obj; 705 - struct se_obj dev_export_obj; 764 + int export_count; 706 765 spinlock_t delayed_cmd_lock; 707 766 spinlock_t execute_task_lock; 708 767 spinlock_t dev_reservation_lock; 709 - spinlock_t dev_status_lock; 768 + unsigned int dev_reservation_flags; 769 + #define DRF_SPC2_RESERVATIONS 0x00000001 770 + #define DRF_SPC2_RESERVATIONS_WITH_ISID 0x00000002 710 771 spinlock_t se_port_lock; 711 772 spinlock_t se_tmr_lock; 712 773 spinlock_t qf_cmd_lock; ··· 725 786 struct list_head qf_cmd_list; 726 787 /* Pointer to associated SE HBA */ 727 788 struct se_hba *se_hba; 728 - struct se_subsystem_dev *se_sub_dev; 789 + /* T10 Inquiry and VPD WWN Information */ 790 + struct t10_wwn t10_wwn; 791 + /* T10 Asymmetric Logical Unit Assignment for Target Ports */ 792 + struct t10_alua t10_alua; 793 + /* T10 SPC-2 + SPC-3 Reservations */ 794 + struct t10_reservation t10_pr; 795 + struct se_dev_attrib dev_attrib; 796 + struct config_group dev_group; 797 + struct config_group dev_pr_group; 798 + struct se_dev_stat_grps dev_stat_grps; 799 + #define SE_DEV_ALIAS_LEN 512 /* must be less than PAGE_SIZE */ 800 + unsigned char dev_alias[SE_DEV_ALIAS_LEN]; 801 + #define SE_UDEV_PATH_LEN 512 /* must be less than PAGE_SIZE */ 802 + unsigned char udev_path[SE_UDEV_PATH_LEN]; 729 803 /* Pointer to template of function pointers for transport */ 730 804 struct se_subsystem_api *transport; 731 805 /* Linked list for struct se_hba struct se_device list */ ··· 755 803 u32 hba_index; 756 804 /* Pointer to transport specific host structure. */ 757 805 void *hba_ptr; 758 - /* Linked list for struct se_device */ 759 - struct list_head hba_dev_list; 760 806 struct list_head hba_node; 761 807 spinlock_t device_lock; 762 808 struct config_group hba_group;